code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: seashore_streets # language: python # name: seashore_streets # --- # # Measure orientation of seashore streets in relation to SW wind # # Computational notebook 06 for Climate adaptation plans in the context of coastal settlements: the case of Portugal. # # Date: 27/06/2020 # # --- # # This notebook computes deviation of seashore street orientation from SW wind direction (45 degrees). # # Requires attribute `case` in `name_str` capturing which LineStrings form the seashore street itself. (1 - True) (already used in `03_Calculate_contextual_characters.ipynb`. # # Structure of GeoPackages: # # ``` # ./data/ # atlantic.gpkg # name_blg - Polygon layers # name_str - LineString layers # name_case - Polygon layers # name_tess - Polygon layers # name_blocks - Polygon layers # ... # preatl.gpkg # name_blg # name_str # name_case # ... # premed.gpkg # name_blg # name_str # name_case # ... # med.gpkg # name_blg # name_str # name_case # ... # ``` # # CRS of the original data is EPSG:3763. # # ``` # <Projected CRS: EPSG:3763> # Name: ETRS89 / Portugal TM06 # Axis Info [cartesian]: # - X[east]: Easting (metre) # - Y[north]: Northing (metre) # Area of Use: # - name: Portugal - mainland - onshore # - bounds: (-9.56, 36.95, -6.19, 42.16) # Coordinate Operation: # - name: Portugual TM06 # - method: Transverse Mercator # Datum: European Terrestrial Reference System 1989 # - Ellipsoid: GRS 1980 # - Prime Meridian: Greenwich # ``` import fiona import geopandas as gpd import shapely import numpy as np import pandas as pd fiona.__version__, gpd.__version__, shapely.__version__, np.__version__, pd.__version__ # + from shapely.ops import linemerge def wind_issue(line, wind_angle=45): coords = line.coords angle = np.arctan2(coords[-1][0] - coords[0][0], coords[-1][1] - coords[0][1]) az = np.degrees(angle) if az < wind_angle: az += 180 az -= wind_angle if az < 0: az = az * -1 if 90 < az <= 180: diff = az - 90 az = az - 2 * diff return az / 90 wind = pd.DataFrame(columns=['place', 'winddev']) ix = 0 parts = ['atlantic', 'preatl', 'premed', 'med'] for part in parts: path = folder + part + '.gpkg' layers = [x[:-4] for x in fiona.listlayers(path) if 'blg' in x] for l in layers: streets = gpd.read_file(path, layer=l + '_str') seashore = streets[streets.case == 1].geometry.to_list() merged = linemerge(seashore) if merged.type != 'LineString': dims = {} for i, seg in enumerate(merged): dims[i] = seg.length key = max(dims, key=dims.get) wind.loc[ix] = [l, wind_issue(merged[key])] ix += 1 else: wind.loc[ix] = [l, wind_issue(merged)] ix += 1 # - wind.to_csv(folder + 'wind_relation.csv')
06_Orientation_towards_wind.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # 420-A52-SF - Algorithmes d'apprentissage supervisé - Hiver 2020 - Spécialisation technique en Intelligence Artificielle - <NAME>, M.Sc. # <br/> # ![Travaux Pratiques #1 - Fonction de cout](static/01-tp1-banner.png) # <br/> # **Objectif:** cette séance de travaux pratique a pour objectif la compréhension des différents aspects de la **fonction de coût** en régression linéaire simple # %reload_ext autoreload # %autoreload 2 # %matplotlib inline # ### 0 - Chargement des bibliothèques # + # Manipulation de données import numpy as np import pandas as pd from collections import defaultdict # Visualisation de données import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import axes3d import seaborn as sns # Outils divers from tqdm.notebook import tqdm_notebook from tqdm import tqdm # - # Configuration de la visualisation sns.set(style="darkgrid", rc={'figure.figsize':(11.7,8.27)}) # ### 1 - Simulation d'un jeu de données m = 200 theta_0_th = -0.5 theta_1_th = 0.75 x = np.random.uniform(1,10,m) y = theta_0_th + (theta_1_th * x) + np.random.randn(m) fig, ax = plt.subplots() ax.scatter(x,y) ax.set_xlabel("x") ax.set_ylabel("y") # ### 2 - Définition d'un modèle de régression linéaire simple # **Exercice 1**: compléter la fonction ci-dessous représentant le modèle de régression linéaire simple (hypothèse) # # Pour rappel, le modèle de régression linéaire simple est # # $h_{\theta}(x)=\theta_{0} + \theta_{1}x$ def hypothesis(x, theta_0, theta_1): # Compléter le code ci-dessous ~ 1 ligne h = None return h # ### 3 - Règlage des paramètres de manière manuelle # **Exercice 2**: Faites varier les paramètres $\theta_0$ et $\theta_1$ de manière à obtenir un bon "fit" sur les données simulées # + theta_0 = None theta_1 = None reg_x = np.linspace(1,10,10) reg_y = hypothesis(np.linspace(1,10,10), theta_0, theta_1) fig, ax = plt.subplots() ax.scatter(x,y) ax.plot(reg_x, reg_y, color="g") ax.set_xlabel("x") ax.set_ylabel("y") # - # ### 4 - Définition de la fonction de coût # **Exercice 3**: Compléter la fonction de coût ci-dessous # # Pour rappel, la fonction de coût en régression linéaire simple s'exprime sous la forme # # $J(\theta_{0},\theta_{1})= \frac{1}{2m}\sum\limits_{i=1}^{m}(h_{\theta}(x^{(i)})-y^{(i)})^{2}$ def cost_function(x,y, theta_0, theta_1): assert len(x) == len(y) # Compléter le code ci dessous ~ 1-4 lignes cost = None return cost # Pour l'illustration suivante, nous fixerons $\theta_0$ à 0 et ferons varier $\theta_1$ sur un interval donné # # Le modèle (ou l'hypothèse) sera donc $h_{\theta}(x) = \theta_{1}x$ thetas = np.linspace(-1,2,20) J = [] theta_0 = 0 for theta_1 in thetas: J.append(cost_function(x, y, theta_0, theta_1)) # **Exercice 4**: selon la courbe ci-dessus, quelle est approximativement la valeur de $\theta_1$ qui minimise la fonction de coût ? fig, ax = plt.subplots() ax.scatter(thetas, J) ax.set_xlabel(r'$\theta_1$') ax.set_ylabel("Coût") # ### 5 - Modèle complet # $h_{\theta}(x) = \theta_{0} + \theta_{1}x$ theta_0s = np.linspace(-4,2,50) theta_1s = np.linspace(-1,2,50) mesh = np.zeros((50,50)) for i, theta0 in enumerate(theta_0s): for j, theta1 in enumerate(theta_1s): mesh[i,j] = cost_function(x, y, theta0, theta1) # **Exercice 5**: selon la courbe ci-dessus, quelle sont approximativement les valeurs de $\theta_{0}$ et $\theta_{1}$ qui minimisent la fonction de coût ? fig, ax = plt.subplots() XX, YY = np.meshgrid(theta_0s,theta_1s) CS = ax.contour(XX, YY, mesh, levels = [1,2,5,10,20,50,100]) ax.clabel(CS, inline=1, fontsize=10) ax.set_xlabel(r'$\theta_0$') ax.set_ylabel(r'$\theta_1$') fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.view_init(20, 70) ax.plot_wireframe(XX, YY, mesh, rstride=1, cstride=1) ax.set_xlabel(r'$\theta_0$') ax.set_ylabel(r'$\theta_1$') ax.set_zlabel('J') # ### 6 - Jeu de données advertising import pandas as pd # **Exercice 6**: à l'aide de la bibiothèques *pandas*, lire le fichier `advertising-univariate.csv` # Compléter le code ci-dessous ~ 1 ligne df= None # #### 6-1 - Préparation de la structure de données x = df['TV'].values y = df['sales'].values x = x/100 # Cette mise à l'échelle permet un meilleur affichage des résultats # #### 6-2 - Affichage des données ax = sns.scatterplot(x=x, y=y, s=60) ax.set_xlabel("Budget de publicité TV (en 100 000 USD)", fontsize=14) ax.set_ylabel("Ventes (en millier d'unités)", fontsize=14) # **Exercice 7**: Faites varier les paramètres $\theta_{0}$ et $\theta_{1}$ de manière à obtenir un bon "fit" sur le jeu de données # + theta_0 = None theta_1 = None reg_x = np.linspace(0,3,10) reg_y = hypothesis(np.linspace(0,3,10), theta_0, theta_1) fig, ax = plt.subplots() ax.scatter(x,y) ax.plot(reg_x, reg_y, color="g") ax.set_xlabel("x") ax.set_ylabel("y") # Calcul de la perte J = cost_function(x,y, theta_0, theta_1) ax.text(0,25,f'J={J}', color='g', fontsize=24) # - # #### 6-3 Modèle simplifié # **Exercice 8**: En utlisant le modèle simplifié ($\theta_0$ = 0), quelle est approximativement la valeur de $\theta_1$ qui minimise la fonction de coût ? Pourquoi la valeur semble différente de celle trouvée précédemment ? # # Note: le modèle (ou l'hypothèse) sera donc $h_{\theta}(x) = \theta_{1}x$ thetas = np.linspace(7,12,20) J = [] theta_0 = 0 for theta_1 in thetas: J.append(cost_function(x, y, theta_0, theta_1)) fig, ax = plt.subplots() ax.scatter(thetas, J) ax.set_xlabel(r"$\theta_1$") ax.set_ylabel(r"$J(\theta_1)$") # #### 6-3 Modèle complet # $h_{\theta}(x) = \theta_{0} + \theta_{1}x$ # **Exercice 9**: selon la courbe ci-dessus, quelle sont approximativement les valeurs de $\theta_{0}$ et $\theta_{1}$ qui minimisent la fonction de coût ? theta_0s = np.linspace(4,9,50) theta_1s = np.linspace(4.5,5.5,50) mesh = np.zeros((50,50)) for i, theta_0 in enumerate(theta_0s): for j, theta_1 in enumerate(theta_1s): mesh[j,i] = cost_function(x, y, theta_0, theta_1) fig, ax = plt.subplots() XX, YY = np.meshgrid(theta_0s,theta_1s) CS = ax.contour(XX, YY, mesh, levels=40) ax.clabel(CS, inline=1, fontsize=10) ax.set_xlabel(r"$\theta_0$") ax.set_ylabel(r"$\theta_1$") # ### Fin du TP
nbs/01-regression-lineaire-simple/01-TP1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt from sklearn import linear_model # - df = pd.read_csv('gender_submission.csv') df.head() train=pd.read_csv('train.csv') train.head() train.describe() # - age has less count train['Age'] = train['Age'].fillna(train['Age'].median()) train.head(5) # Average from all columns train.count() train.groupby('Sex').median() import seaborn as sns sns.countplot(x='Survived', hue='Pclass', data=train) sns.countplot(x='Survived', hue='Sex', data=train) train.drop("Cabin",inplace=True,axis=1) train.count() train.dropna(inplace=True) train.count() sex = pd.get_dummies(train["Sex"],drop_first=True) embarked = pd.get_dummies(train["Embarked"],drop_first=True) pclass = pd.get_dummies(train["Pclass"],drop_first=True) embarked.shape sex.shape train = pd.concat([train,pclass,sex,embarked],axis=1) train.drop(["PassengerId","Pclass","Name","Sex","Ticket","Embarked"],axis=1,inplace=True) train.count() X = train.drop("Survived",axis=1) y = train["Survived"] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X,y,train_size=0.3) from sklearn.linear_model import LogisticRegression logmodel = LogisticRegression(solver='lbfgs', max_iter=1000) logmodel.fit(X_train,y_train) predictions = logmodel.predict(X_test) from sklearn.metrics import classification_report print(classification_report(y_test, predictions)) logmodel.score(X_test, y_test) test=pd.read_csv('test.csv') test test.count() test.drop(["Name","Ticket"],axis=1,inplace=True) test test.drop(["Cabin"],axis=1,inplace=True) test.count() test['Age'] = test['Age'].fillna(test['Age'].median()) test.count() test.Fare=test.Fare.fillna(test.Fare.median()) test.count() test sex = pd.get_dummies(test["Sex"],drop_first=True) embarked = pd.get_dummies(test["Embarked"],drop_first=True) pclass = pd.get_dummies(test["Pclass"],drop_first=True) test.head(5) test = pd.concat([test,pclass,sex,embarked],axis=1) test.drop(["Pclass","Sex","Embarked"],axis=1,inplace=True) test.count() v=test.PassengerId test.drop(["PassengerId"],axis=1,inplace=True) test test.head() predictions=logmodel.predict(test) predictions # - print(type(predictions)) x=pd.DataFrame(data=predictions, columns=['Survived']) x.count() v.count() submission = pd.concat([v,x],axis=1) submission submission.to_csv('titanic_submission.csv', index=False)
titanic.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] id="VMkFdzuSH8JY" papermill={"duration": 0.037092, "end_time": "2022-03-14T23:53:25.450486", "exception": false, "start_time": "2022-03-14T23:53:25.413394", "status": "completed"} tags=[] # # Emotion Detection using DistilBERT # --- # Twitter user bio is analyzed to find emotion probabilities on 28 metrices. I'm using DistilBERT transformer pre-trained on GoEmotions dataset. The model card can be found [here](https://huggingface.co/joeddav/distilbert-base-uncased-go-emotions-student?text=I+feel+lucky+to+be+here.). # + [markdown] id="k7TB4rbiHimj" papermill={"duration": 0.0369, "end_time": "2022-03-14T23:53:25.524940", "exception": false, "start_time": "2022-03-14T23:53:25.488040", "status": "completed"} tags=[] # ### Setup # # Transformers installation from [HuggingFace](https://github.com/huggingface/transformers). # # Plotly upgraded to latest version and pyyaml downgraded to fix yaml loader issue. # # emoji package to demojize tweets # + executionInfo={"elapsed": 52731, "status": "ok", "timestamp": 1646923452448, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "03056090282051064294"}, "user_tz": 300} id="kfuCTZVyGtsE" outputId="5b7c879a-34b7-435c-ed42-6a98a840f81a" papermill={"duration": 25.004098, "end_time": "2022-03-14T23:53:50.570567", "exception": false, "start_time": "2022-03-14T23:53:25.566469", "status": "completed"} tags=[] # !pip install transformers # !pip install --upgrade plotly # #!pip install pyyaml==5.4.1 # !pip install emoji # + [markdown] id="i_ZeReb1LVU7" papermill={"duration": 0.037982, "end_time": "2022-03-14T23:53:50.647293", "exception": false, "start_time": "2022-03-14T23:53:50.609311", "status": "completed"} tags=[] # ### Import Necessary Packages # # + executionInfo={"elapsed": 10780, "status": "ok", "timestamp": 1646923501317, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "03056090282051064294"}, "user_tz": 300} id="3ceejs2jLrIk" outputId="c6bcd034-dd6c-4f13-98e6-9f5905924bd0" papermill={"duration": 12.006929, "end_time": "2022-03-14T23:54:02.692451", "exception": false, "start_time": "2022-03-14T23:53:50.685522", "status": "completed"} tags=[] #Models and Core Packages from transformers import AutoTokenizer, TFAutoModel, pipeline import pandas as pd #For Preprocessing from pathlib import Path import re # RegEx for removing non-letter characters import nltk # natural language processing import emoji # processing emojis nltk.download("stopwords") nltk.download('punkt') from nltk.corpus import stopwords from nltk.stem.porter import * #For data visualization import matplotlib.pyplot as plt import matplotlib.patches as mpatches # %matplotlib inline pd.options.plotting.backend = "plotly" pd.options.display.max_colwidth=160 # + [markdown] id="VyExttTaH7cZ" papermill={"duration": 0.038013, "end_time": "2022-03-14T23:54:02.769351", "exception": false, "start_time": "2022-03-14T23:54:02.731338", "status": "completed"} tags=[] # ### HuggingFace installation check # # Default pipeline to predict following the huggingface installation guide [here](https://huggingface.co/docs/transformers/installation). # + executionInfo={"elapsed": 28831, "status": "ok", "timestamp": 1646881655589, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "03056090282051064294"}, "user_tz": 300} id="1OFv9LibI26z" outputId="535a650f-b22a-423f-b9c4-f93116123b95" papermill={"duration": 38.948674, "end_time": "2022-03-14T23:54:41.756408", "exception": false, "start_time": "2022-03-14T23:54:02.807734", "status": "completed"} tags=[] pipeline('sentiment-analysis')('we love you') # + [markdown] id="5fmB-XXiKDQK" papermill={"duration": 0.041259, "end_time": "2022-03-14T23:54:41.838300", "exception": false, "start_time": "2022-03-14T23:54:41.797041", "status": "completed"} tags=[] # ### DistilBERT base uncased GoEmotions student model # # The model is shared [here](https://huggingface.co/joeddav/distilbert-base-uncased-go-emotions-student). Test run and simple prediction scores. # + executionInfo={"elapsed": 16323, "status": "ok", "timestamp": 1646923524549, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "03056090282051064294"}, "user_tz": 300} id="pDlZjBLWLl17" outputId="0e58d1aa-80b3-4c9e-8864-c47b29fd4daf" papermill={"duration": 43.784531, "end_time": "2022-03-14T23:55:25.664407", "exception": false, "start_time": "2022-03-14T23:54:41.879876", "status": "completed"} tags=[] classifier = pipeline("text-classification", model='joeddav/distilbert-base-uncased-go-emotions-student', framework='tf', return_all_scores=True) predictions = classifier("I feel lucky to be here.") df = pd.DataFrame(predictions[0]) df.plot(x='label', y='score', kind='bar') # + [markdown] id="T0kxrl_MRX7h" papermill={"duration": 0.044865, "end_time": "2022-03-14T23:55:25.754821", "exception": false, "start_time": "2022-03-14T23:55:25.709956", "status": "completed"} tags=[] # ### Utility Functions # + [markdown] id="JNMi6Q-pVzKs" papermill={"duration": 0.044544, "end_time": "2022-03-14T23:55:25.844063", "exception": false, "start_time": "2022-03-14T23:55:25.799519", "status": "completed"} tags=[] # #### Progress bar # citation - from this [answer](https://stackoverflow.com/a/46939639/9573439). # + executionInfo={"elapsed": 146, "status": "ok", "timestamp": 1646923531900, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "03056090282051064294"}, "user_tz": 300} id="hP2DRfLugA1Q" papermill={"duration": 0.055029, "end_time": "2022-03-14T23:55:25.944100", "exception": false, "start_time": "2022-03-14T23:55:25.889071", "status": "completed"} tags=[] from IPython.display import HTML, display import time def progress(done, total): percent = int(100 * done // total) return HTML(""" <span> Progress: <progress value='{percent}' max='100', style='width: 50%' > {percent} </progress> {done}/{total} Complete </span> """.format(percent=percent, done=done, total=total)) # + executionInfo={"elapsed": 880, "status": "ok", "timestamp": 1646923532914, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "03056090282051064294"}, "user_tz": 300} id="jDQoU9D7hH7A" outputId="1bef0c13-ce1f-457e-a847-0c93864221e7" papermill={"duration": 0.655673, "end_time": "2022-03-14T23:55:26.644424", "exception": false, "start_time": "2022-03-14T23:55:25.988751", "status": "completed"} tags=[] out = display(progress(0, 100), display_id=True) for i in range(0, 100, 1): time.sleep(0.01) out.update(progress(i+1, 100)) if i == 50: break # + [markdown] id="GRTMabgAVVXK" papermill={"duration": 0.045018, "end_time": "2022-03-14T23:55:26.734816", "exception": false, "start_time": "2022-03-14T23:55:26.689798", "status": "completed"} tags=[] # #### Verdict # # Lable with maximum probability from predicted probabilities. # + executionInfo={"elapsed": 119, "status": "ok", "timestamp": 1646923534885, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "03056090282051064294"}, "user_tz": 300} id="rSiG6wT4VmH9" outputId="acd16e21-f5b3-439a-a310-121d5fc7bd53" papermill={"duration": 0.056114, "end_time": "2022-03-14T23:55:26.837441", "exception": false, "start_time": "2022-03-14T23:55:26.781327", "status": "completed"} tags=[] def verdict(predictions): max_score = 0 verdict = '' for d in predictions: if d['score'] > max_score: max_score = d['score'] verdict = d['label'] return verdict print(verdict(predictions[0])) # + [markdown] id="gGOX6jnI_JUs" papermill={"duration": 0.046876, "end_time": "2022-03-14T23:55:26.930282", "exception": false, "start_time": "2022-03-14T23:55:26.883406", "status": "completed"} tags=[] # #### Contraction Mapping # # Dictionary containing valid english contractions from wikipedia. # + _kg_hide-input=true executionInfo={"elapsed": 292, "status": "ok", "timestamp": 1646923542080, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "03056090282051064294"}, "user_tz": 300} id="h_wLp-c7_TXK" papermill={"duration": 0.061259, "end_time": "2022-03-14T23:55:27.037446", "exception": false, "start_time": "2022-03-14T23:55:26.976187", "status": "completed"} tags=[] contraction_mapping = { "ain't": "is not", "aren't": "are not","can't": "cannot", "'cause": "because", "could've": "could have", "couldn't": "could not", "didn't": "did not", "doesn't": "does not", "don't": "do not", "hadn't": "had not", "hasn't": "has not", "haven't": "have not", "he'd": "he would","he'll": "he will", "he's": "he is", "how'd": "how did", "how'd'y": "how do you", "how'll": "how will", "how's": "how is", "I'd": "I would", "I'd've": "I would have", "I'll": "I will", "I'll've": "I will have", "I'm": "I am", "I've": "I have", "i'd": "i would", "i'd've": "i would have", "i'll": "i will", "i'll've": "i will have","i'm": "i am", "i've": "i have", "isn't": "is not", "it'd": "it would", "it'd've": "it would have", "it'll": "it will", "it'll've": "it will have", "it's": "it is", "let's": "let us", "ma'am": "madam", "mayn't": "may not", "might've": "might have","mightn't": "might not", "mightn't've": "might not have", "must've": "must have", "mustn't": "must not", "mustn't've": "must not have", "needn't": "need not", "needn't've": "need not have", "o'clock": "of the clock", "oughtn't": "ought not", "oughtn't've": "ought not have", "shan't": "shall not", "sha'n't": "shall not", "shan't've": "shall not have", "she'd": "she would", "she'd've": "she would have", "she'll": "she will", "she'll've": "she will have", "she's": "she is", "should've": "should have", "shouldn't": "should not", "shouldn't've": "should not have", "so've": "so have", "so's": "so as", "this's": "this is","that'd": "that would", "that'd've": "that would have", "that's": "that is", "there'd": "there would", "there'd've": "there would have", "there's": "there is", "here's": "here is","they'd": "they would", "they'd've": "they would have", "they'll": "they will", "they'll've": "they will have", "they're": "they are", "they've": "they have", "to've": "to have", "wasn't": "was not", "we'd": "we would", "we'd've": "we would have", "we'll": "we will", "we'll've": "we will have", "we're": "we are", "we've": "we have", "weren't": "were not", "what'll": "what will", "what'll've": "what will have", "what're": "what are", "what's": "what is", "what've": "what have", "when's": "when is", "when've": "when have", "where'd": "where did", "where's": "where is", "where've": "where have", "who'll": "who will", "who'll've": "who will have", "who's": "who is", "who've": "who have", "why's": "why is", "why've": "why have", "will've": "will have", "won't": "will not", "won't've": "will not have", "would've": "would have", "wouldn't": "would not", "wouldn't've": "would not have", "y'all": "you all", "y'all'd": "you all would", "y'all'd've": "you all would have","y'all're": "you all are", "y'all've": "you all have","you'd": "you would", "you'd've": "you would have", "you'll": "you will", "you'll've": "you will have", "you're": "you are", "you've": "you have", 'u.s':'america', 'e.g':'for example' } # + [markdown] id="czRUpMDFvjrv" papermill={"duration": 0.046556, "end_time": "2022-03-14T23:55:27.129984", "exception": false, "start_time": "2022-03-14T23:55:27.083428", "status": "completed"} tags=[] # #### Text Processing # + executionInfo={"elapsed": 126, "status": "ok", "timestamp": 1646923551458, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "03056090282051064294"}, "user_tz": 300} id="VY-BTQlOy4Lo" papermill={"duration": 0.053505, "end_time": "2022-03-14T23:55:27.230248", "exception": false, "start_time": "2022-03-14T23:55:27.176743", "status": "completed"} tags=[] # Regex patterns. url_regx = r"(quick\s*link[s*]\s*:\s*)*((http://)[^ ]*|(https://)[^ ]*|( www\.)[^ ]*)" user_regx = r'@[^\s]+' hashtag_regx = r'#[^\s]+' special_quotes_regx = r'[’|‘|´|`]+' alpha_regx = r"[^a-zA-Z0-9'\"\.\,\?\!\&\%\$\/\-+]" valid_punc_regx = r"([\"\.\,\?\!\&\%\$\/\-+])" break_alphanum_regx = r'([0-9]+)' sequence_regx = r"(.)\1\1+" seq_replace_regx = r"\1\1" is_empty_str_regx = r'.*[a-zA-Z0-9]+.*' # + executionInfo={"elapsed": 2, "status": "ok", "timestamp": 1646923551592, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "03056090282051064294"}, "user_tz": 300} id="v-zn9AEdyhBM" papermill={"duration": 0.061141, "end_time": "2022-03-14T23:55:27.336519", "exception": false, "start_time": "2022-03-14T23:55:27.275378", "status": "completed"} tags=[] stop_words = set(nltk.corpus.stopwords.words('english')) def process_text(s): # Remove links s = re.sub(url_regx, ' ', s) # Replace tabs with whitespace s = s.replace('\t', ' ') # Replace all emojis. s = emoji.replace_emoji(s, ' ') # Replace @USERNAME to ' '. s = re.sub(user_regx, ' ', s) # Replace #HASHTAG to ' ' s = re.sub(hashtag_regx, ' ', s) # Replace special quotes s = re.sub(special_quotes_regx, "'", s) # Replace 1 or more of valid punctuations with 1 s = re.sub(valid_punc_regx+r"\1+", r"\1", s) # Replace 3 or more consecutive letters by 2 letter. s = re.sub(sequence_regx, seq_replace_regx, s) # Replace all non-english alphabets, digits and invalid punctuations. s = re.sub(alpha_regx, " ", s) # Put space between punctuations and letters/digits s = re.sub(valid_punc_regx, r" \1 ", s) # Break alphanumeric into words and numbers s = re.sub(break_alphanum_regx, r' \1 ', s) # If no alphabet/digits remain if re.match(is_empty_str_regx, s) is None: return '' # Tokenize # tokens = nltk.word_tokenize(s) # breaks contractions into 2 words tokens = s.split() valid_bag = [] for w in tokens: # Remove stopwords # if w in stop_words: # continue # Contraction Mapping if w in contraction_mapping: valid_bag.extend(contraction_mapping[w].split()) valid_bag.append(w) # At least 2 words or 1 word and 1 punctuation if len(valid_bag) < 2: return '' return ' '.join(valid_bag) # + [markdown] id="ONZAMIlMWQMt" papermill={"duration": 0.045008, "end_time": "2022-03-14T23:55:27.427109", "exception": false, "start_time": "2022-03-14T23:55:27.382101", "status": "completed"} tags=[] # ### Limitations of the model # # + [markdown] id="3bIdi_2wdZ08" papermill={"duration": 0.04644, "end_time": "2022-03-14T23:55:27.518836", "exception": false, "start_time": "2022-03-14T23:55:27.472396", "status": "completed"} tags=[] # #### Emoji # This model doesn't work with emojis and empty strings. The highest probability given if emojis are present are often wrong. If the emojis are replaced with corresponding alias the situation also doesn't improve. # # One approach is to remove all of the emojis. # + executionInfo={"elapsed": 2855, "status": "ok", "timestamp": 1646923556828, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "03056090282051064294"}, "user_tz": 300} id="FK6OWmUkWkZV" outputId="de633641-9632-49e8-81f8-782a320c1fb0" papermill={"duration": 1.876916, "end_time": "2022-03-14T23:55:29.455058", "exception": false, "start_time": "2022-03-14T23:55:27.578142", "status": "completed"} tags=[] examples = [ "❤️ you", "meh 😒", "did not bring the charger 😢", "lol 😂", "can you help me with the loan? 😊" ] demojized = [emoji.demojize(s) for s in examples] demojized_eng = [ "heart you", "meh unamused", "did not bring the charger crying", "lol tears of joy", "can you help me with the loan? smiling" ] noemoji = ["you", "meh", "did not bring the charger", "lol", "can you help me with the loan?"] df = pd.DataFrame({ "sentence": [ examples[0], noemoji[0], demojized_eng[0], demojized[0], examples[1], noemoji[1], demojized_eng[1], demojized[1], examples[2], noemoji[2], demojized_eng[2], demojized[2], examples[3], noemoji[3], demojized_eng[3], demojized[3], examples[4], noemoji[4], demojized_eng[4], demojized[4] ], "verdict": [ verdict(classifier(examples[0])[0]), verdict(classifier(noemoji[0])[0]), verdict(classifier(demojized_eng[0])[0]), verdict(classifier(demojized[0])[0]), verdict(classifier(examples[1])[0]), verdict(classifier(noemoji[1])[0]), verdict(classifier(demojized_eng[1])[0]), verdict(classifier(demojized[1])[0]), verdict(classifier(examples[2])[0]), verdict(classifier(noemoji[2])[0]), verdict(classifier(demojized_eng[2])[0]), verdict(classifier(demojized[2])[0]), verdict(classifier(examples[3])[0]), verdict(classifier(noemoji[3])[0]), verdict(classifier(demojized_eng[3])[0]), verdict(classifier(demojized[3])[0]), verdict(classifier(examples[4])[0]), verdict(classifier(noemoji[4])[0]), verdict(classifier(demojized_eng[4])[0]), verdict(classifier(demojized[4])[0])] }) display(df) # + [markdown] id="dP_LcxucddaF" papermill={"duration": 0.049099, "end_time": "2022-03-14T23:55:29.553840", "exception": false, "start_time": "2022-03-14T23:55:29.504741", "status": "completed"} tags=[] # #### Training Limitations # From the model card for the DistilBERT model [here](https://huggingface.co/joeddav/distilbert-base-uncased-go-emotions-student), the model is trained using the zero-shot pipeline provided by huggingface with unlabeled GoEmotions dataset. The model uses the same classes from the GoEmotions dataset. # # This model may underperform compared to a full supervised model. No accuracy/F1 score is shared by the author. # + [markdown] id="9-_JBWR6eMew" papermill={"duration": 0.04838, "end_time": "2022-03-14T23:55:29.653227", "exception": false, "start_time": "2022-03-14T23:55:29.604847", "status": "completed"} tags=[] # ### Pre-processing Bios # # The file contains the userids and bios of twitter users. Suitable techniques from below are used to clean and prepare the data. # # #### Tweet Cleaning Strategy (in order of recommended execution) - # 1. **Lowercase** the bios # 2. **Duplicates** removed. # 3. **Remove links** and any "Quick Links: " text. # 4. **html** code check, remove if found in the dataset. # 5. **Tabs** replaced with whitespace. # 6. **Emoji** to text, ascii emoji to text. Replace consecutive same emoji with a single one. If accuracy suffers then removed. # 7. **Mentions** remove or replace with mask *USER* depending on accuracy. # 8. **HashTags** convertion to valid words if possible. If not possible remove or replaced with mask *HASHTAG* depending on accuracy. # 9. **Special quotation** marks replaced with proper ones. # 10. **Contraction mapping** and short forms (u, lol etc.) expansion. # 11. **Consecutive letters** if 3 or more then replaced with only 2 (*heeyyyy* to *heeyy*). # 12. **Acronyms** expansion. # 13. **English letters, digits, valid punctuations** kept, everything else removed. # 14. **Break Alphanumeric words** by adding space between letters and numbers (assuming missing space mistake). # 15. **Stopwords** removed depending on the hit on accuracy (may be important for emotions?). # 16. **Space between words and punctuations**. Must be after ascii emoji to text is done and unnecessary ones removed. # 17. **Spelling correction** based on valid dictionary. # 18. **POS** generation, tokenization # 19. **Lemmatization** depending on accuracy. # 20. **Remove multiple spaces**. # 21. **Empty sentences** removed from dataset. # + [markdown] id="RFnmMynHdPv8" papermill={"duration": 0.048764, "end_time": "2022-03-14T23:55:29.751304", "exception": false, "start_time": "2022-03-14T23:55:29.702540", "status": "completed"} tags=[] # #### Mounting Google drive # If using Google Colab with Google Drive as storage. # + executionInfo={"elapsed": 15656, "status": "ok", "timestamp": 1646923368778, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "03056090282051064294"}, "user_tz": 300} id="XD80uS1HeXav" outputId="675af7fc-dd70-434b-9257-3f2ce28ac8e4" papermill={"duration": 0.054936, "end_time": "2022-03-14T23:55:29.856994", "exception": false, "start_time": "2022-03-14T23:55:29.802058", "status": "completed"} tags=[] # from google.colab import drive # drive.mount('/content/drive') # + [markdown] id="VMv99DqM4zvq" papermill={"duration": 0.04806, "end_time": "2022-03-14T23:55:29.957023", "exception": false, "start_time": "2022-03-14T23:55:29.908963", "status": "completed"} tags=[] # #### Checking file location # + executionInfo={"elapsed": 1065, "status": "ok", "timestamp": 1646923564167, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "03056090282051064294"}, "user_tz": 300} id="EBczY4CxfawI" outputId="dca7224f-6948-41e4-bf29-0c846ef912ca" papermill={"duration": 0.87474, "end_time": "2022-03-14T23:55:30.878378", "exception": false, "start_time": "2022-03-14T23:55:30.003638", "status": "completed"} tags=[] user_bios = '/kaggle/input/twitter-user-bios/users_bio.csv' # !head /kaggle/input/twitter-user-bios/users_bio.csv # + [markdown] id="yHCLPY1k44UL" papermill={"duration": 0.049018, "end_time": "2022-03-14T23:55:30.975617", "exception": false, "start_time": "2022-03-14T23:55:30.926599", "status": "completed"} tags=[] # #### Processing dataset # + [markdown] id="z7XxeTb0EFue" papermill={"duration": 0.047924, "end_time": "2022-03-14T23:55:31.072375", "exception": false, "start_time": "2022-03-14T23:55:31.024451", "status": "completed"} tags=[] # There is line ending issues in the word file. If directly read using pandas, it causes multiple lines to be considered as the same line. Manual loading of the file is required. # # **Step 1:** Taking advantage of this we are removing empty bios and converting to lowercase. # + executionInfo={"elapsed": 10188, "status": "ok", "timestamp": 1646881738485, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "03056090282051064294"}, "user_tz": 300} id="ukzw9cjNEXJe" papermill={"duration": 7.581639, "end_time": "2022-03-14T23:55:38.702007", "exception": false, "start_time": "2022-03-14T23:55:31.120368", "status": "completed"} tags=[] ids = [] bios = [] with open(user_bios, mode='r') as fin: for l in fin: l = l.strip() parts = l.split('\t') # Empty string or only id and no bio if len(parts) < 2 or len(parts[1]) == 0: continue ids.append(parts[0]) # Lowercase bios during load bios.append(parts[1].lower()) # + [markdown] id="byyUr391m_WD" papermill={"duration": 0.048074, "end_time": "2022-03-14T23:55:38.799117", "exception": false, "start_time": "2022-03-14T23:55:38.751043", "status": "completed"} tags=[] # **Step 2:** Duplicates removed # + executionInfo={"elapsed": 12108, "status": "ok", "timestamp": 1646881760016, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "03056090282051064294"}, "user_tz": 300} id="FavAVV6Cf9Yw" outputId="61c6c8da-8788-4f55-bfde-ac261057fa91" papermill={"duration": 6.654043, "end_time": "2022-03-14T23:55:45.500829", "exception": false, "start_time": "2022-03-14T23:55:38.846786", "status": "completed"} tags=[] df = pd.DataFrame({'ids': ids, 'bios': bios}) df.drop_duplicates(inplace=True) df.dropna(inplace=True) df['length'] = [len(s) for s in df['bios']] df.info() # + executionInfo={"elapsed": 249, "status": "ok", "timestamp": 1646881765225, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "03056090282051064294"}, "user_tz": 300} id="ZYGvDQZYBfyM" outputId="7f55c415-bea7-4165-c327-adeb14b08010" papermill={"duration": 0.061655, "end_time": "2022-03-14T23:55:45.609885", "exception": false, "start_time": "2022-03-14T23:55:45.548230", "status": "completed"} tags=[] df.head() # + executionInfo={"elapsed": 266, "status": "ok", "timestamp": 1646881769855, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "03056090282051064294"}, "user_tz": 300} id="aGp0tLuVBgv5" outputId="0dc042d5-bb49-4e62-bddf-17f3ab72134e" papermill={"duration": 0.065984, "end_time": "2022-03-14T23:55:45.724528", "exception": false, "start_time": "2022-03-14T23:55:45.658544", "status": "completed"} tags=[] df[df.length == df.length.max()].head(1) # + executionInfo={"elapsed": 250, "status": "ok", "timestamp": 1646881783201, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "03056090282051064294"}, "user_tz": 300} id="gn0LqSMwW48v" outputId="893c3a6e-814c-46df-ab16-de5dadd7e111" papermill={"duration": 0.07215, "end_time": "2022-03-14T23:55:45.846680", "exception": false, "start_time": "2022-03-14T23:55:45.774530", "status": "completed"} tags=[] df[df.length == df.length.min()].head(1) # + [markdown] id="kdq6fGVCxaFL" papermill={"duration": 0.050145, "end_time": "2022-03-14T23:55:45.945430", "exception": false, "start_time": "2022-03-14T23:55:45.895285", "status": "completed"} tags=[] # **Step 3, 5-16:** links, mentions, hashtags, emojis, special quotations, contraction mapping, consecutive letters, ~~acronyms~~, non-english/non-number removal, break alphanumeric words, ~~remove stopwords~~, space between words and punctuations, ~~spelling correction~~. # + [markdown] id="giKI9MqHEJoH" papermill={"duration": 0.049907, "end_time": "2022-03-14T23:55:46.044287", "exception": false, "start_time": "2022-03-14T23:55:45.994380", "status": "completed"} tags=[] # Test run and Inspection # + executionInfo={"elapsed": 255, "status": "ok", "timestamp": 1646881791358, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "03056090282051064294"}, "user_tz": 300} id="13c83VB1v0YU" outputId="666de575-fe78-42ec-e31f-b2a4289813a7" papermill={"duration": 0.130561, "end_time": "2022-03-14T23:55:46.223900", "exception": false, "start_time": "2022-03-14T23:55:46.093339", "status": "completed"} tags=[] for s in df['bios'].to_list()[:5]: print(f'Before: {s}\n') print(f'After: {process_text(s)}\n\n') # + [markdown] id="bbCAcKpEEGzh" papermill={"duration": 0.048845, "end_time": "2022-03-14T23:55:46.322621", "exception": false, "start_time": "2022-03-14T23:55:46.273776", "status": "completed"} tags=[] # Final processing and saved in file # + executionInfo={"elapsed": 139402, "status": "ok", "timestamp": 1646881938825, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "03056090282051064294"}, "user_tz": 300} id="3HNBrySeE3EM" outputId="6dd16e9d-00fc-44e6-b201-9eb8e21f8aec" papermill={"duration": 123.560769, "end_time": "2022-03-14T23:57:49.932353", "exception": false, "start_time": "2022-03-14T23:55:46.371584", "status": "completed"} tags=[] df['processed'] = [process_text(s) for s in df['bios']] df['length_processed'] = [len(s) for s in df['processed']] df.info() # + executionInfo={"elapsed": 2367, "status": "ok", "timestamp": 1646882032051, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "03056090282051064294"}, "user_tz": 300} id="PAALQ7U7Jhzl" outputId="db3368cd-2f72-4789-bbfe-482449dc3a51" papermill={"duration": 1.247638, "end_time": "2022-03-14T23:57:51.229913", "exception": false, "start_time": "2022-03-14T23:57:49.982275", "status": "completed"} tags=[] df.drop(df[df.length_processed < 2].index, inplace=True) df.info() # + executionInfo={"elapsed": 250, "status": "ok", "timestamp": 1646882039613, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "03056090282051064294"}, "user_tz": 300} id="tmUCG4aVLONn" outputId="3bf7f2f9-613a-4c88-dd45-3c853aacc017" papermill={"duration": 0.067263, "end_time": "2022-03-14T23:57:51.346161", "exception": false, "start_time": "2022-03-14T23:57:51.278898", "status": "completed"} tags=[] df[df.length_processed == df.length_processed.max()].head(1) # + executionInfo={"elapsed": 255, "status": "ok", "timestamp": 1646882044939, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "03056090282051064294"}, "user_tz": 300} id="og30PUEgeu7Q" outputId="227f4987-45d6-4c23-8602-7b79aac66a6a" papermill={"duration": 0.067941, "end_time": "2022-03-14T23:57:51.467917", "exception": false, "start_time": "2022-03-14T23:57:51.399976", "status": "completed"} tags=[] df[df.length_processed == df.length_processed.min()].head(1) # + [markdown] id="UuzkqqOeb0Gk" papermill={"duration": 0.050324, "end_time": "2022-03-14T23:57:51.570743", "exception": false, "start_time": "2022-03-14T23:57:51.520419", "status": "completed"} tags=[] # #### Save Processed Bios to File # + executionInfo={"elapsed": 6173, "status": "ok", "timestamp": 1646882065268, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "03056090282051064294"}, "user_tz": 300} id="N30hK0vRb9pP" papermill={"duration": 4.619219, "end_time": "2022-03-14T23:57:56.240391", "exception": false, "start_time": "2022-03-14T23:57:51.621172", "status": "completed"} tags=[] processed_bios = '/kaggle/working/users_bio_processed.csv' df.to_csv(processed_bios, sep='\t', columns=['ids', 'processed'], index=False) # + [markdown] id="RO2Y-b1m5Qnu" papermill={"duration": 0.050515, "end_time": "2022-03-14T23:57:56.342188", "exception": false, "start_time": "2022-03-14T23:57:56.291673", "status": "completed"} tags=[] # ### Predictions in Batch # + executionInfo={"elapsed": 5980, "status": "ok", "timestamp": 1646923582438, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "03056090282051064294"}, "user_tz": 300} id="dPoAFkWIpZAz" outputId="54562bba-020a-41f9-f863-5ae13d5c7890" papermill={"duration": 5.171615, "end_time": "2022-03-14T23:58:01.564369", "exception": false, "start_time": "2022-03-14T23:57:56.392754", "status": "completed"} tags=[] df = pd.read_csv(processed_bios, sep='\t', dtype={"ids": "string", "processed": "string"}) df.info() # + [markdown] papermill={"duration": 0.095543, "end_time": "2022-03-14T23:58:01.750820", "exception": false, "start_time": "2022-03-14T23:58:01.655277", "status": "completed"} tags=[] # To circumvent the time limitation on single run, each time we start from the end point of last run. This requires us to update the dataset before each run with the output of the last run. # + papermill={"duration": 10.667254, "end_time": "2022-03-14T23:58:12.483698", "exception": false, "start_time": "2022-03-14T23:58:01.816444", "status": "completed"} tags=[] # !cp '/kaggle/input/twitter-user-bios/users_bio_distilbert_27.csv' 'users_bio_distilbert_27.csv' predicted_bios = 'users_bio_distilbert_27.csv' # + executionInfo={"elapsed": 2010, "status": "ok", "timestamp": 1646923674840, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "03056090282051064294"}, "user_tz": 300} id="OoIA11qdty1F" outputId="2cf30610-8a62-47a9-c7e8-df386dadc4ab" papermill={"duration": 2.757293, "end_time": "2022-03-14T23:58:15.308628", "exception": false, "start_time": "2022-03-14T23:58:12.551335", "status": "completed"} tags=[] out_path = Path(predicted_bios) start_index = 0 if out_path.is_file(): with open(out_path, mode='r') as fin: start_index = len(fin.readlines()) - 1 print(f'Start processing from: {start_index}') # + id="puTx-XEH5gi5" outputId="e36ac8db-3662-42d5-c5d4-0312fdd696ce" papermill={"duration": 0.232011, "end_time": "2022-03-14T23:58:15.593293", "exception": false, "start_time": "2022-03-14T23:58:15.361282", "status": "completed"} tags=[] with open(predicted_bios, mode='a') as fout: if start_index == 0: fout.write('ids,admiration,amusement,anger,annoyance,approval,'+\ 'caring,confusion,curiosity,desire,disappointment,disapproval,'+\ 'disgust,embarrassment,excitement,fear,gratitude,grief,joy,love,'+\ 'nervousness,optimism,pride,realization,relief,remorse,sadness,surprise,neutral,verdict\n') n = len(df['processed']) step_size = 1 # Using 1000 for actual runs out = display(progress(0, n), display_id=True) for i in range(start_index, n, step_size): predictions = classifier(df['processed'][i : (i + step_size)].to_list()) for j in range(len(predictions)): fout.write(f'{df["ids"][i + j]},') for d in predictions[j]: fout.write(f"{d['score']}, ") fout.write(f'{verdict(predictions[j])}\n') out.update(progress(i + step_size, n)) break # Breaking instead of completing the run # + id="w8dyDO9QfYL6" papermill={"duration": 0.059831, "end_time": "2022-03-14T23:58:15.705628", "exception": false, "start_time": "2022-03-14T23:58:15.645797", "status": "completed"} tags=[] # drive.flush_and_unmount()
distilbert-goemotions-ed.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Erasmus+ ICCT project (2018-1-SI01-KA203-047081) # Toggle cell visibility from IPython.display import HTML tag = HTML('''<script> code_show=true; function code_toggle() { if (code_show){ $('div.input').hide() } else { $('div.input').show() } code_show = !code_show } $( document ).ready(code_toggle); </script> Toggle cell visibility <a href="javascript:code_toggle()">here</a>.''') display(tag) # - # %matplotlib notebook import numpy as np import control as control import matplotlib.pyplot as plt import matplotlib.image as mpimg from ipywidgets import widgets from ipywidgets import interact import scipy.signal as signal import sympy as sym # ## Mechanical systems # # #### General mass-spring-damper model # > The mass-spring-damper model consists of discrete mass nodes distributed throughout an object and interconnected via a network of springs and dampers. This model is well-suited for modelling object with complex material properties such as nonlinearity and viscoelasticity. (source: [Wikipedia](https://en.wikipedia.org/wiki/Mass-spring-damper_model "Mass-spring-model")) # #### 1/4 car model # > 1/4 car model is used to analyze the ride quality of automotive suspension systems. Mass $m_1$ is the "sprung mass", which is one-quarter of the vehicle mass that is supported by the suspension system. Mass $m_2$ is the "unsprung mass", which is lumped mass composed of one wheel and half-axle assembly, plus the shock absorber and suspensison springs. The stiffness and damping of the suspension system are modeled by the ideal spring constant $k_1$ and friction coefficient $B$, respecitvely. Tire stifness is modeled by spring constant $k_2$. (source: [Chegg Study](https://www.chegg.com/homework-help/questions-and-answers/figure-p230-shows-1-4-car-model-used-analyze-ride-quality-automotive-suspension-systems-ma-q26244005 "1/4 car model")) # # --- # # ### How to use this notebook? # 1. Toggle between *mass-spring-damper* and *1/4 car model* system by clicking on a corresponding button. # 2. Toggle betweeen *step function*, *impulse function*, *ramp function*, and *sine function* to select the function of the force $F$. # 3. Move the sliders to change the values of the mass ($m$; $m_1$ and $m_2$), spring coefficients ($k$; $k_1$ and $k_2$), damping constant ($B$), input signal amplification and initial conditions ($x_0$, $\dot{x}_0$, $y_0$, $\dot{y}_0$). # <table> # <tr> # <th style="text-align:center">Mass-spring-damper</th> # <th style="text-align:center">1/4 car model</th> # </tr> # <tr> # <td style="width:170px; height:150px"><img src='img/mass-spring-damper.png'></td> # <td style="width:280px; height:150px"><img src='img/car_model.png'></td> # </tr> # <tr> # </tr> # </table> # + # create figure fig = plt.figure(figsize=(9.8, 4),num='Mechanical systems') # add sublot ax = fig.add_subplot(111) ax.set_title('Time Response') ax.set_ylabel('input, output') ax.set_xlabel('$t$ [s]') ax.grid(which='both', axis='both', color='lightgray') inputf, = ax.plot([], []) responsef, = ax.plot([], []) responsef2, = ax.plot([], []) arrowf, = ax.plot([],[]) style = {'description_width': 'initial'} selectSystem=widgets.ToggleButtons( options=[('mass-spring-damper',0),('1/4 car model',1)], description='Select system: ', style=style) # define toggle buttons selectForce = widgets.ToggleButtons( options=[('step function', 0), ('impulse function', 1), ('ramp function', 2), ('sine function', 3)], description='Select $F$ function: ', style=style) display(selectSystem) display(selectForce) def build_model(M,K,B,M1,M2,B1,K1,K2,amp,x0,xpika0,select_System,index): num_of_samples = 1000 total_time = 25 t = np.linspace(0, total_time, num_of_samples) # time for which response is calculated (start, stop, step) global inputf, responsef, responsef2, arrowf if select_System==0: system0 = control.TransferFunction([1], [M, B, K]) if index==0: inputfunc = np.ones(len(t))*amp time, response, xx = control.forced_response(system0, t, inputfunc, X0=[xpika0,x0*M]) inputfunc[0]=0 elif index==1: inputfunc=signal.unit_impulse(1000, 0)*amp time, response, xx = control.forced_response(system0, t, inputfunc, X0=[xpika0,x0*M]) elif index==2: inputfunc=t; time, response, xx = control.forced_response(system0, t, inputfunc, X0=[xpika0,x0*M]) elif index==3: inputfunc=np.sin(t)*amp time, response, xx = control.forced_response(system0, t, inputfunc, X0=[xpika0,x0*M]) elif select_System==1: system1 = control.TransferFunction([B1*K2, K1*K2],[M1*M2, M1*B1+M2*B1, M1*K1+M1*K2+M2*K1, B1*K2, K1*K2]) system2 = control.TransferFunction([M1*K2, B1*K2, K1*K2],[M1*M2, M1*B1+M2*B1, M1*K1+M1*K2+M2*K1, B1*K2, K1*K2]) if index==0: inputfunc = np.ones(len(t))*amp time, response, xx = control.forced_response(system1, t, inputfunc, X0=[0,0,0,0]) time2, response2, xx2 = control.forced_response(system2, t, inputfunc, X0=[0,0,0,0]) inputfunc[0]=0 elif index==1: inputfunc=signal.unit_impulse(1000, 0)*amp time, response, xx = control.forced_response(system1, t, inputfunc, X0=[0,0,0,0]) time2, response2, xx2 = control.forced_response(system2, t, inputfunc, X0=[0,0,0,0]) elif index==2: inputfunc=t; time, response, xx = control.forced_response(system1, t, inputfunc, X0=[0,0,0,0]) time2, response2, xx2 = control.forced_response(system2, t, inputfunc, X0=[0,0,0,0]) elif index==3: inputfunc=np.sin(t)*amp time, response, xx = control.forced_response(system1, t, inputfunc, X0=[0,0,0,0]) time2, response2, xx2 = control.forced_response(system2, t, inputfunc, X0=[0,0,0,0]) ax.lines.remove(responsef) ax.lines.remove(inputf) ax.lines.remove(responsef2) ax.lines.remove(arrowf) inputf, = ax.plot(t,inputfunc,label='$F$',color='C0') responsef, = ax.plot(time, response,label='$x$',color='C3') if select_System==1: responsef2, = ax.plot(time, response2,label='$y$',color='C2') elif select_System==0: responsef2, = ax.plot([],[]) if index==1: if amp>0: arrowf, = ax.plot([-0.1,0,0.1],[amp-((amp*0.05)/2),amp,amp-((amp*0.05)/2)],color='C0',linewidth=4) elif amp==0: arrowf, = ax.plot([],[]) elif amp<0: arrowf, = ax.plot([-0.1,0,0.1],[amp-((amp*0.05)/2),amp,amp-(amp*(0.05)/2)],color='C0',linewidth=4) else: arrowf, = ax.plot([],[]) ax.relim() ax.autoscale_view() ax.legend() def update_sliders(index): global m1_slider, b1_slider, k1_slider, m21_slider, m22_slider, b2_slider, k21_slider, k22_slider global x0_slider, xpika0_slider m1val = [0.1,0.1,0.1,0.1] k1val = [1,1,1,1] b1val = [0.1,0.1,0.1,0.1] m21val = [0.1,0.1,0.1,0.1] m22val = [0.1,0.1,0.1,0.1] b2val = [0.1,0.1,0.1,0.1] k21val = [1,1,1,1] k22val = [1,1,1,1] x0val = [0,0,0,0] xpika0val = [0,0,0,0] y0val = [0,0,0,0] ypika0val = [0,0,0,0] m1_slider.value = m1val[index] k1_slider.value = k1val[index] b1_slider.value = b1val[index] m21_slider.value = m21val[index] m22_slider.value = m22val[index] b2_slider.value = b2val[index] k21_slider.value = k21val[index] k22_slider.value = k22val[index] x0_slider.value = x0val[index] xpika0_slider.value = xpika0val[index] def draw_controllers(type_select,index): global m1_slider, b1_slider, k1_slider, m21_slider, m22_slider, b2_slider, k21_slider, k22_slider global x0_slider, xpika0_slider if type_select==0: amp_slider = widgets.FloatSlider(value=1.,min=-2.,max=2.,step=0.1, description='Input signal amplification:',disabled=False,continuous_update=False,orientation='horizontal',readout=True,readout_format='.1f',style=style) m1_slider = widgets.FloatSlider(value=.1, min=.01, max=1., step=.01, description='$m$ [kg]:',disabled=False,continuous_update=False, orientation='horizontal',readout=True,readout_format='.2f',) k1_slider = widgets.FloatSlider(value=1.,min=0.,max=20.,step=.1, description='$k$ [N/m]:',disabled=False,continuous_update=False, orientation='horizontal',readout=True,readout_format='.1f',) b1_slider = widgets.FloatSlider(value=.1,min=0.0,max=0.5,step=.01, description='$B$ [Ns/m]:',disabled=False,continuous_update=False, rientation='horizontal',readout=True,readout_format='.2f',) m21_slider = widgets.FloatSlider(value=.1,min=.01,max=1.,step=.01, description='$m_1$ [kg]:',disabled=True,continuous_update=False,orientation='horizontal',readout=True,readout_format='.2f', ) m22_slider = widgets.FloatSlider(value=.1,min=.0,max=1.,step=.01, description='$m_2$ [kg]:',disabled=True,continuous_update=False,orientation='horizontal',readout=True,readout_format='.2f', ) b2_slider = widgets.FloatSlider(value=.1,min=0.0,max=2,step=.01, description='$B$ [Ns/m]:',disabled=True,continuous_update=False,orientation='horizontal',readout=True,readout_format='.2f', ) k21_slider = widgets.FloatSlider(value=1.,min=0.,max=20.,step=.1, description='$k_1$ [N/m]:',disabled=True,continuous_update=False,orientation='horizontal',readout=True,readout_format='.1f', ) k22_slider = widgets.FloatSlider(value=1.,min=0.,max=20.,step=.1, description='$k_2$ [N/m]:',disabled=True,continuous_update=False,orientation='horizontal',readout=True,readout_format='.1f', ) x0_slider=widgets.FloatSlider(value=0, min=-1, max=1., step=.1, description='$x_0$ [dm]:',disabled=False,continuous_update=False, orientation='horizontal',readout=True,readout_format='.2f',) xpika0_slider=widgets.FloatSlider(value=0, min=-1, max=1., step=.1, description='${\dot{x}}_0$ [dm/s]:',disabled=False,continuous_update=False, orientation='horizontal',readout=True,readout_format='.2f',) #y0_slider=widgets.FloatSlider(value=0, min=-1, max=1., step=.1, # description='$y_0$ [dm]:',disabled=True,continuous_update=False, # orientation='horizontal',readout=True,readout_format='.2f',) #ypika0_slider=widgets.FloatSlider(value=0, min=-1, max=1., step=.1, # description='${\dot{y}}_0$ [dm/s]:',disabled=True,continuous_update=False, # orientation='horizontal',readout=True,readout_format='.2f',) elif type_select==1: amp_slider = widgets.FloatSlider(value=1.,min=-2.,max=2.,step=0.1, description='Input signal amplification:',disabled=False,continuous_update=False,orientation='horizontal',readout=True,readout_format='.1f',style=style) m1_slider = widgets.FloatSlider(value=.1, min=.01, max=1., step=.01, description='$m$ [kg]:',disabled=True,continuous_update=False, orientation='horizontal',readout=True,readout_format='.2f',) k1_slider = widgets.FloatSlider(value=1.,min=0.,max=20.,step=.1, description='$k$ [N/m]:',disabled=True,continuous_update=False, orientation='horizontal',readout=True,readout_format='.1f',) b1_slider = widgets.FloatSlider(value=.1,min=0.0,max=0.5,step=.01, description='$B$ [Ns/m]:',disabled=True,continuous_update=False, rientation='horizontal',readout=True,readout_format='.2f',) m21_slider = widgets.FloatSlider(value=.1,min=.01,max=1.,step=.01, description='$m_1$ [kg]:',disabled=False,continuous_update=False,orientation='horizontal',readout=True,readout_format='.2f', ) m22_slider = widgets.FloatSlider(value=.1,min=.0,max=1.,step=.01, description='$m_2$ [kg]:',disabled=False,continuous_update=False,orientation='horizontal',readout=True,readout_format='.2f', ) b2_slider = widgets.FloatSlider(value=.1,min=0.0,max=2,step=.01, description='$B$ [Ns/m]:',disabled=False,continuous_update=False,orientation='horizontal',readout=True,readout_format='.2f', ) k21_slider = widgets.FloatSlider(value=1.,min=0.,max=20.,step=.1, description='$k_1$ [N/m]:',disabled=False,continuous_update=False,orientation='horizontal',readout=True,readout_format='.1f', ) k22_slider = widgets.FloatSlider(value=1.,min=0.,max=20.,step=.1, description='$k_2$ [N/m]:',disabled=False,continuous_update=False,orientation='horizontal',readout=True,readout_format='.1f', ) x0_slider=widgets.FloatSlider(value=0, min=-1, max=1., step=.1, description='$x_0$ [dm]:',disabled=True,continuous_update=False, orientation='horizontal',readout=True,readout_format='.2f',) xpika0_slider=widgets.FloatSlider(value=0, min=-1, max=1., step=.1, description='${\dot{x}}_0$ [dm/s]:',disabled=True,continuous_update=False, orientation='horizontal',readout=True,readout_format='.2f',) #y0_slider=widgets.FloatSlider(value=0, min=-1, max=1., step=.1, # description='$y_0$ [dm]:',disabled=True,continuous_update=False, # orientation='horizontal',readout=True,readout_format='.2f',) #ypika0_slider=widgets.FloatSlider(value=0, min=-1, max=1., step=.1, # description='${\dot{y}}_0$ [dm/s]:',disabled=True,continuous_update=False, # orientation='horizontal',readout=True,readout_format='.2f',) input_data = widgets.interactive_output(build_model, {'M':m1_slider, 'K':k1_slider, 'B':b1_slider, 'M1':m21_slider, 'M2':m22_slider, 'B1':b2_slider, 'K1':k21_slider, 'K2':k22_slider, 'amp':amp_slider, 'x0':x0_slider,'xpika0':xpika0_slider, 'select_System':selectSystem,'index':selectForce}) input_data2 = widgets.interactive_output(update_sliders, {'index':selectForce}) box_layout = widgets.Layout(border='1px solid black', width='auto', height='', flex_flow='row', display='flex') buttons1=widgets.HBox([widgets.VBox([amp_slider],layout=widgets.Layout(width='auto')), widgets.VBox([x0_slider,xpika0_slider])],layout=box_layout) display(widgets.VBox([widgets.Label('Select the values of the input signal amplification and intial conditions:'), buttons1])) display(widgets.HBox([widgets.VBox([m1_slider,k1_slider,b1_slider], layout=widgets.Layout(width='45%')), widgets.VBox([m21_slider,m22_slider,k21_slider,k22_slider,b2_slider], layout=widgets.Layout(width='45%'))]), input_data) widgets.interactive_output(draw_controllers, {'type_select':selectSystem,'index':selectForce})
ICCT_en/examples/02/.ipynb_checkpoints/TD-03-Mechanical-systems-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: XBT test 20200128 # language: python # name: xbt_test_20200128 # --- # # Reproducing previous Machine-learning paper # # The first attempt at the Met Office to apply Machine Learning techniques to the problem of missing metadata in the XBT dataset was done by <NAME> and <NAME> working with <NAME> and <NAME>. # # https://journals.ametsoc.org/doi/full/10.1175/JTECH-D-18-0012.1?af=R&mobileUi=0 # # They trained neural network classifier on the XBT data, using the same features, country, date of profile and maximum depth, as the iMeta algorithm. They did not train on the whole dataset, but rather year by year. # In this notebook we are replicating the results in the published paper as a starting point, and then consider some basic extensions, like training on larger subsets or the whole dataset, using additional features and doing hyper parameter tuning. import os import sys import pathlib import functools import ipywidgets import time # + import pandas import numpy import matplotlib import matplotlib.pyplot # - import sklearn import sklearn.model_selection import sklearn.preprocessing import sklearn.neural_network import sklearn.metrics import warnings warnings.filterwarnings('ignore') root_repo_dir = pathlib.Path().absolute().parent sys.path = [os.path.join(root_repo_dir)] + sys.path import xbt.dataset from xbt.dataset import XbtDataset, UNKNOWN_STR, cat_output_formatter, check_value_found from xbt.imeta import imeta_classification, XBT_MAX_DEPTH # Set up some site specific parameters for the notebook try: environment = os.environ['XBT_ENV_NAME'] except KeyError: environment = 'pangeo' root_data_dirs = { 'MO_scitools': '/data/users/shaddad/xbt-data/', 'pangeo': '/data/misc/xbt-data/', } env_date_ranges = { 'MO_scitools': (1966,2015), 'pangeo': (1966,2015) } # Set up some dataset specific parameters root_data_dir = root_data_dirs[environment] year_range = env_date_ranges[environment] experiment_name = 'mlp_first_paper_reproduction' cv_metric_names = ['f1_weighted','precision_weighted','recall_weighted'] input_feature_names = ['country','max_depth', 'year'] train_fraction = 0.75 test_fraction = 1 - train_fraction input_dir_name = 'csv_with_imeta' exp_out_dir_name = 'experiment_outputs' xbt_input_dir = os.path.join(root_data_dir, input_dir_name) xbt_output_dir = os.path.join(root_data_dir, exp_out_dir_name, experiment_name) # ## Preparing the data # %%time xbt_full_dataset = XbtDataset(xbt_input_dir, year_range) xbt_labelled = xbt_full_dataset.filter_obs({'labelled': 'labelled'}) # ## Calculating iMeta results # # For comparison, we calculate the output from the intelligent metadata algorithm to compare results with ML artificial neaural network results. # # %%time imeta_classes = xbt_labelled.xbt_df.apply(imeta_classification, axis=1) imeta_classes.apply(lambda t1: t1[0]) imeta_model = imeta_classes.apply(lambda t1: t1[0]).astype('category', catergories=xbt_labelled['model'].cat.categories) imeta_manufacturer = imeta_classes.apply(lambda t1: t1[1]).astype('category', catergories=xbt_labelled['manufacturer'].cat.categories) imeta_instrument = imeta_classes.apply(lambda t1: f'XBT: {t1[0]} ({t1[1]})').astype('category', catergories=xbt_labelled['instrument'].cat.categories) # ## Training and evaluating neural networks for each year # # To replicate the paper results, we use the scikit-learn MLPClassifier class, training one classifier for year in the XBT dataset. For each of those years, we split the profiles into training and test sets, train the classifier, then calculate the classification metrics on the train and test sets. The main metric used in the paper was recall. # The results are then collated in a dataframe, together with the classification metrics for the iMeta classification output. # First add a train/test split field to the labelled data frame. Thi is so we can easily either train and evaluate on different periods, e.g. train on whole dataset and look at performance per year xbt_train_all, xbt_test_all = xbt_labelled.train_test_split() # Create the feature encoders on the whole dataset so comparisons are possible. This will create encoders based on the whole datasert. When we subset the data using filter_obs or filter_features, the encoders will be passed to the subset and used there. This way we know whatever subset we process, the encodings will be the same so the results can be easily compared. _ = xbt_labelled.get_ml_dataset(return_data = False) def classify_subset(classifier_class, xbt_subset, x_features, y_feature): xbt_train_year, xbt_test_year = xbt_subset.train_test_split() (X_train, _, _, _) = xbt_train_year.filter_features(x_features).get_ml_dataset() (X_test, _, _, _) = xbt_test_year.filter_features(x_features).get_ml_dataset() (y_train, _, _, _) = xbt_train_year.filter_features([y_feature]).get_ml_dataset() (y_test, _, _, _) = xbt_test_year.filter_features([y_feature]).get_ml_dataset() clf_mlp1 = classifier_class() clf_mlp1.fit(X_train, y_train) y_res_train = clf_mlp1.predict(X_train) metric_train = sklearn.metrics.precision_recall_fscore_support(y_train, y_res_train) y_res_test = clf_mlp1.predict(X_test) metric_test = sklearn.metrics.precision_recall_fscore_support(y_test, y_res_test) return {'classifier':clf_mlp1,'metrics_train': metric_train, 'metrics_test': metric_test} classify_nnet = functools.partial(classify_subset, sklearn.neural_network.MLPClassifier) results_model1 = {} results_manufacturer1 = {} results_instrument1 = {} training_progress = ipywidgets.IntProgress(min=env_date_ranges[environment][0], max= env_date_ranges[environment][1], description='Training', bar_style='info') training_progress # + training_progress.value = env_date_ranges[environment][0] for year in range(env_date_ranges[environment][0],env_date_ranges[environment][1]+1): xbt_subset = xbt_labelled.filter_obs({'year': year}) try: results_model1[year] = classify_nnet(xbt_subset, input_feature_names, 'model') except Exception as e1: print(f'Error processing model {year} - {e1}') try: results_manufacturer1[year] = classify_nnet(xbt_subset, input_feature_names, 'manufacturer') except Exception as e1: print(f'Error processing manufacturer {year} - {e1}') training_progress.value = year try: results_instrument1[year] = classify_nnet(xbt_subset, input_feature_names, 'instrument') except Exception as e1: print(f'Error processing manufacturer {year} - {e1}') training_progress.value = year # - eval_progress = ipywidgets.IntProgress(min=env_date_ranges[environment][0], max= env_date_ranges[environment][1], description='Evaluating', bar_style='info') eval_progress imeta_results = [] eval_progress.value = env_date_ranges[environment][0] for year in range(env_date_ranges[environment][0],env_date_ranges[environment][1]+1): xbt_subset = xbt_labelled.filter_obs({'year':year}) xbt_model1, enc_model1, _, _ = xbt_subset.filter_features(['model']).get_ml_dataset() enc_model1 = enc_model1['model'] xbt_manufacturer1, enc_manuf1, _, _ = xbt_subset.filter_features(['manufacturer']).get_ml_dataset() enc_manuf1 = enc_manuf1['manufacturer'] xbt_instrument1, enc_instr1, _, _ = xbt_subset.filter_features(['instrument']).get_ml_dataset() enc_instr1 = enc_instr1['instrument'] imeta_output = imeta_classes[xbt_labelled.xbt_df.year == year] imeta_instr1 = enc_instr1.transform(pandas.DataFrame({'imeta_instrument': imeta_instrument[xbt_labelled.xbt_df.year == year]}) ) imeta_model1 = enc_model1.transform(pandas.DataFrame({'imeta_model': imeta_model[xbt_labelled.xbt_df.year == year]}) ) imeta_manuf1 = enc_manuf1.transform(pandas.DataFrame({'imeta_manufacturer': imeta_manufacturer[xbt_labelled.xbt_df.year == year]}) ) if enc_model1.classes_.shape[0] == 1: (im_pr_model, im_rec_model, im_f1_model, im_sup_model) = (1.0, 1.0, 1.0, xbt_subset.shape[0]) else: (im_pr_model, im_rec_model, im_f1_model, im_sup_model) = sklearn.metrics.precision_recall_fscore_support(xbt_model1, imeta_model1,average='micro') if enc_manuf1.classes_.shape[0] == 1: (im_pr_manuf, im_rec_manuf, im_f1_manuf, im_sup_manuf) = (1.0, 1.0, 1.0, xbt_subset.shape[0]) else: (im_pr_manuf, im_rec_manuf, im_f1_manuf, im_sup_manuf) = sklearn.metrics.precision_recall_fscore_support(xbt_manufacturer1, imeta_manuf1,average='micro') (im_pr_instr, im_rec_instr, im_f1_instr, im_sup_instr) = sklearn.metrics.precision_recall_fscore_support(xbt_instrument1, imeta_instr1,average='micro') imeta_results += [{'year': year, 'imeta_model_recall': im_rec_model, 'imeta_model_precision': im_pr_model, 'imeta_manuf_recall': im_rec_manuf, 'imeta_manuf_precision': im_pr_manuf, 'imeta_instr_recall': im_rec_instr, 'imeta_instr_precision': im_pr_instr, }] eval_progress.value = year imeta_res_df = pandas.DataFrame.from_records(imeta_results) # ## Classification Performance # # The results for classification of model and manufacturer separately is shown below. We see that recall is better for manufacturer than model, which is not surprising as it is principally a 2 class problem, with almost all profles being from probes manufactured by Sippican or TSK. # # The current iMeta performance does not match that shown in the paper and needs further investigation. # + res_manuf_df = pandas.DataFrame({ 'year' :list( results_manufacturer1.keys()), 'recall_manuf_train_per_year' : [ numpy.sum(numpy.array(results_manufacturer1[year1]['metrics_train'][1]) * numpy.array(results_manufacturer1[year1]['metrics_train'][3])) / sum(results_manufacturer1[year1]['metrics_train'][3]) for year1 in results_manufacturer1.keys()], 'precision_manuf_train_per_year' : [ numpy.sum(numpy.array(results_manufacturer1[year1]['metrics_train'][0]) * numpy.array(results_manufacturer1[year1]['metrics_train'][3])) / sum(results_manufacturer1[year1]['metrics_train'][3]) for year1 in results_manufacturer1.keys()], 'recall_manuf_test_per_year' : [ numpy.sum(numpy.array(results_manufacturer1[year1]['metrics_test'][1]) * numpy.array(results_manufacturer1[year1]['metrics_test'][3])) / sum(results_manufacturer1[year1]['metrics_test'][3]) for year1 in results_manufacturer1.keys()], 'precision_manuf_test_per_year' : [ numpy.sum(numpy.array(results_manufacturer1[year1]['metrics_test'][0]) * numpy.array(results_manufacturer1[year1]['metrics_test'][3])) / sum(results_manufacturer1[year1]['metrics_test'][3]) for year1 in results_manufacturer1.keys()], }) res_manuf_df.sort_values(by='year', inplace=True) # + res_model_df = pandas.DataFrame({ 'year' : list(results_model1), 'recall_model_train_per_year' : [ numpy.sum(numpy.array(results_model1[year1]['metrics_train'][1]) * numpy.array(results_model1[year1]['metrics_train'][3])) / sum(results_model1[year1]['metrics_train'][3]) for year1 in results_model1.keys()], 'precision_model_train_per_year' : [ numpy.sum(numpy.array(results_model1[year1]['metrics_train'][0]) * numpy.array(results_model1[year1]['metrics_train'][3])) / sum(results_model1[year1]['metrics_train'][3]) for year1 in results_model1.keys()], 'recall_model_test_per_year' : [ numpy.sum(numpy.array(results_model1[year1]['metrics_test'][1]) * numpy.array(results_model1[year1]['metrics_test'][3])) / sum(results_model1[year1]['metrics_test'][3]) for year1 in results_model1.keys()], 'precision_model_test_per_year' : [ numpy.sum(numpy.array(results_model1[year1]['metrics_test'][0]) * numpy.array(results_model1[year1]['metrics_test'][3])) / sum(results_model1[year1]['metrics_test'][3]) for year1 in results_model1.keys()], }) res_model_df.sort_values(by='year', inplace=True) # - res_instr_df = pandas.DataFrame({ 'year' :list( results_instrument1.keys()), 'recall_instr_train_per_year' : [ numpy.sum(numpy.array(results_instrument1[year1]['metrics_train'][1]) * numpy.array(results_instrument1[year1]['metrics_train'][3])) / sum(results_instrument1[year1]['metrics_train'][3]) for year1 in results_instrument1.keys()], 'precision_instr_train_per_year' : [ numpy.sum(numpy.array(results_instrument1[year1]['metrics_train'][0]) * numpy.array(results_instrument1[year1]['metrics_train'][3])) / sum(results_instrument1[year1]['metrics_train'][3]) for year1 in results_instrument1.keys()], 'recall_instr_test_per_year' : [ numpy.sum(numpy.array(results_instrument1[year1]['metrics_test'][1]) * numpy.array(results_instrument1[year1]['metrics_test'][3])) / sum(results_instrument1[year1]['metrics_test'][3]) for year1 in results_instrument1.keys()], 'precision_instr_test_per_year' : [ numpy.sum(numpy.array(results_instrument1[year1]['metrics_test'][0]) * numpy.array(results_instrument1[year1]['metrics_test'][3])) / sum(results_instrument1[year1]['metrics_test'][3]) for year1 in results_instrument1.keys()], }) res_instr_df.sort_values(by='year', inplace=True) results_df = pandas.merge(res_model_df, res_manuf_df).merge(res_instr_df).merge(imeta_res_df).merge( pandas.DataFrame.from_dict({ 'year': xbt_labelled['year'].value_counts(sort=False).index, 'num_samples': xbt_labelled['year'].value_counts(sort=False).values, })) # ## Paper plots # # Here will replicate the key plots in the paper to demonstrate the we have reproduced the results. # # Firstly we will reproduce the left hand plot in figure 1. This shows the recall for classification of instrument type for the iMeta and Neural Network approaches. # * figure 1 left- Recall for NN and iMeta per year (figure 1 left in paper) # * plot 2 - percentage imporvement in recall per year fig_recall_per_year = matplotlib.pyplot.figure('fig_recall_per_year', figsize=(16, 16)) ax_recall_instr = fig_recall_per_year.add_subplot(111, title='instrument recall per year (figure 1 left)') _ = results_df.plot.line(ax=ax_recall_instr, x='year',y=['recall_instr_train_per_year','recall_instr_test_per_year','imeta_instr_recall'], ) results_df['improvement'] = results_df.apply(lambda r1: ((r1['recall_instr_test_per_year'] / r1['imeta_instr_recall'])-1)*100.0 , axis=1) # This plot splits up figure 1 right in the paper onto 2 separate axes, with the same color scheme. The purple line shows the number of samples per year in the dataset. The green line shows the improvement in recall for each year. These plots are consistent with those in the paper. fig_num_samples_per_year = matplotlib.pyplot.figure('fig_num_samples_per_year', figsize=(24,12)) ax_num_samples = fig_num_samples_per_year.add_subplot(121, title='number of samples per year (figure 1 right)') _ = results_df.plot.line(ax=ax_num_samples, x='year',y=['num_samples'],c='purple' ) ax_num_samples = fig_num_samples_per_year.add_subplot(122, title='improvement per year (figure 1 right)') _ = results_df.plot.line(ax=ax_num_samples, x='year',y=['improvement'], c='green' ) # ## Summary # # We can see that the plots here replicate those from the paper, and we have thus successfully replicated the paper results within this code framework. We can be confident then in comparing these results with subsequent results that it is a true comparison.
notebooks/first_ml_paper.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + id="lJz6FDU1lRzc" """ You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab. Instructions for setting up Colab are as follows: 1. Open a new Python 3 notebook. 2. Import this notebook from GitHub (File -> Upload Notebook -> "GITHUB" tab -> copy/paste GitHub URL) 3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select "GPU" for hardware accelerator) 4. Run this cell to set up dependencies. 5. Restart the runtime (Runtime -> Restart Runtime) for any upgraded packages to take effect """ # If you're using Google Colab and not running locally, run this cell. ## Install dependencies # !pip install wget # !apt-get install sox libsndfile1 ffmpeg # !pip install unidecode # !pip install matplotlib>=3.3.2 ## Install NeMo BRANCH = 'main' # !python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all] ## Grab the config we'll use in this example # !mkdir configs # !wget -P configs/ https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/examples/asr/conf/config.yaml """ Remember to restart the runtime for the kernel to pick up any upgraded packages (e.g. matplotlib)! Alternatively, you can uncomment the exit() below to crash and restart the kernel, in the case that you want to use the "Run All Cells" (or similar) option. """ # exit() # + [markdown] id="v1Jk9etFlRzf" # # Telephony speech (8 kHz) # This notebook covers general recommendations for using NeMo models with 8 kHz speech. All the pretrained models currently available through NeMo are trained with audio at 16 kHz. This means that if the original audio was sampled at a different rate, it's sampling rate was converted to 16 kHz through upsampling or downsampling. One of the common applications for ASR is to recognize telephony speech which typically consists of speech sampled at 8 kHz. # # - # # Mixed sample rate # Most of the pretrained English models distributed with NeMo are trained with mixed sample rate data, i.e. the training data typically consists of data sampled at both 8 kHz and 16 kHz. As an example pretrained Citrinet model "stt_en_citrinet_1024" was trained with the following datasets. # * Librispeech 960 hours of English speech # * Fisher Corpus # * Switchboard-1 Dataset # * WSJ-0 and WSJ-1 # * National Speech Corpus - 1 # * Mozilla Common Voice # # Among these, Fisher and Switchboard datasets are conversational telephone speech datasets with audio sampled at 8 kHz while the other datasets were originally sampled at least 16 kHz. Before training, all audio files from Fisher and Switchboard datasets were upsampled to 16 kHz. Because of this mixed sample rate training, our models can be used to recognize both narrowband (8kHz) and wideband speech (16kHz) # # Inference with NeMo # NeMo ASR currently supports inference of audio in .wav format. Internally, the audio file is resampled to 16 kHz before inference is called on the model, so there is no difference running inference on 8 kHz audio compared to say 16 kHz or any other sampling rate audio with NeMo. Let's look at an example for running inference on 8 kHz audio. # This is where the an4/ directory will be placed. # Change this if you don't want the data to be extracted in the current directory. data_dir = '.' # + import glob import os import subprocess import tarfile import wget # Download the dataset. This will take a few moments... print("******") if not os.path.exists(data_dir + '/an4_sphere.tar.gz'): an4_url = 'http://www.speech.cs.cmu.edu/databases/an4/an4_sphere.tar.gz' an4_path = wget.download(an4_url, data_dir) print(f"Dataset downloaded at: {an4_path}") else: print("Tarfile already exists.") an4_path = data_dir + '/an4_sphere.tar.gz' if not os.path.exists(data_dir + '/an4/'): # Untar and convert .sph to .wav (using sox) tar = tarfile.open(an4_path) tar.extractall(path=data_dir) print("Converting .sph to .wav...") sph_list = glob.glob(data_dir + '/an4/**/*.sph', recursive=True) for sph_path in sph_list: wav_path = sph_path[:-4] + '.wav' cmd = ["sox", sph_path, wav_path] subprocess.run(cmd) print("Finished conversion.\n******") # - # Audio in an4 dataset is sampled at 22 kHz. Let's first downsample an audio file to 16 kHz. # + import librosa import IPython.display as ipd import librosa.display import matplotlib.pyplot as plt # Load and listen to the audio file example_file = data_dir + '/an4/wav/an4_clstk/mgah/cen2-mgah-b.wav' audio, sample_rate = librosa.load(example_file) print(sample_rate) audio_16kHz = librosa.core.resample(audio, sample_rate, 16000) import numpy as np # Get spectrogram using Librosa's Short-Time Fourier Transform (stft) spec = np.abs(librosa.stft(audio_16kHz)) spec_db = librosa.amplitude_to_db(spec, ref=np.max) # Decibels # Use log scale to view frequencies librosa.display.specshow(spec_db, y_axis='log', x_axis='time', sr=16000) plt.colorbar() plt.title('Audio Spectrogram'); plt.ylim([0, 8000]) # - # Now, let's downsample the audio to 8 kHz # + audio_8kHz = librosa.core.resample(audio, sample_rate, 8000) spec = np.abs(librosa.stft(audio_8kHz)) spec_db = librosa.amplitude_to_db(spec, ref=np.max) # Decibels # Use log scale to view frequencies librosa.display.specshow(spec_db, y_axis='log', x_axis='time', sr=8000) plt.colorbar() plt.title('Audio Spectrogram'); plt.ylim([0, 8000]) # - import soundfile as sf sf.write(data_dir + '/audio_16kHz.wav', audio_16kHz, 16000) sample, sr = librosa.core.load(data_dir + '/audio_16kHz.wav') ipd.Audio(sample, rate=sr) sf.write(data_dir + '/audio_8kHz.wav', audio_8kHz, 8000) sample, sr = librosa.core.load(data_dir + '/audio_8kHz.wav') ipd.Audio(sample, rate=sr) # # Let's look at inference results using one of the pretrained models on the original, 16 kHz and 8 kHz versions of the example file we chose above. from nemo.collections.asr.models import ASRModel import torch if torch.cuda.is_available(): device = torch.device(f'cuda:0') asr_model = ASRModel.from_pretrained(model_name='stt_en_citrinet_1024', map_location=device) # As discussed above, there are no changes required for inference based on the sampling rate of audio and as we see below the pretrained Citrinet model gives accurate transcription even for audio downsampled to 8 Khz. print(asr_model.transcribe(paths2audio_files=[example_file])) print(asr_model.transcribe(paths2audio_files=[data_dir + '/audio_16kHz.wav'])) print(asr_model.transcribe(paths2audio_files=[data_dir + '/audio_8kHz.wav'])) # # Training / fine-tuning with 8 kHz data # For training a model with new 8 kHz data, one could take two approaches. The first approach, **which is recommended**, is to finetune a pretrained 16 kHz model by upsampling all the data to 16 kHz. Note that upsampling offline before training is not necessary but recommended as online upsampling during training is very time consuming and may slow down training significantly. The second approach is to train an 8 kHz model from scratch. **Note**: For the second approach, in our experiments we saw that loading the weights of a 16 kHz model as initialization helps the model to converge faster with better accuracy. # # To upsample your 8 kHz data to 16 kHz command line tools like sox or ffmpeg are very useful. Here is the command to upsample and audio file using sox: # ```shell # sox input_8k.wav -r 16000 -o output_16k.wav # ``` # Now to finetune a pre-trained model with this upsampled data, you can just restore the model weights from the pre-trained model and call trainer with the upsampled data. As an example, here is how one would fine-tune a Citrinet model: # ```python # python examples/asr/script_to_bpe.py \ # --config-path="examples/asr/conf/citrinet" \ # --config-name="citrinet_512.yaml" \ # model.train_ds.manifest_filepath="<path to manifest file with upsampled 16kHz data>" \ # model.validation_ds.manifest_filepath="<path to manifest file>" \ # trainer.gpus=-1 \ # trainer.max_epochs=50 \ # +init_from_pretrained_model="stt_en_citrinet_512" # ``` # # To train an 8 kHz model, just change the sample rate in the config to 8000 as follows: # # ```python # python examples/asr/script_to_bpe.py \ # --config-path="examples/asr/conf/citrinet" \ # --config-name="citrinet_512.yaml" \ # model.sample_rate=8000 \ # model.train_ds.manifest_filepath="<path to manifest file with 8kHz data>" \ # model.validation_ds.manifest_filepath="<path to manifest file>" \ # trainer.gpus=-1 \ # trainer.max_epochs=50 \ # +init_from_pretrained_model="stt_en_citrinet_512" # ```
tutorials/asr/ASR_for_telephony_speech.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <div style="display: flex; background-color: #3F579F;"> # <h1 style="margin: auto; padding: 30px 30px 0px 30px;">Design an application for public health - Project 3</h1> # </div> # <div style="display: flex; background-color: #3F579F; margin: auto; padding: 5px 30px 0px 30px;"> # <span style="width: 100%; text-align: center; font-size:20px; font-weight: bold; float: left;">| Analysis notebook |</span> # </div> # <div style="display: flex; background-color: #3F579F; margin: auto; padding: 10px 30px 30px 30px;"> # <span style="width: 100%; text-align: center; font-size:26px; float: left;">Data Scientist course - OpenClassrooms</span> # </div> # <div class="alert alert-block alert-info"> # This <b>notebook</b> is optimized to <b>be used</b> with: # <ul style="list-style-type: square;"> # <li>JupyterLab 3.0.14</li> # <li>JupyterLab Theme: JupyterLab Dark</li> # <li>Extension: @jupyter-widgets/jupyterlab-manager</li> # </ul> # </div> # + [markdown] tags=[] # <div style="background-color: #506AB9;" > # <h2 style="margin: auto; padding: 20px; color:#fff; ">1. Libraries and functions</h2> # </div> # - # <div style="background-color: #6D83C5;" > # <h3 style="margin: auto; padding: 20px; color:#fff; ">1.1. Libraries and functions</h3> # </div> # + import os import io import gc from math import prod import pandas as pd import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec import seaborn as sns # %matplotlib inline sns.set_theme(style="darkgrid") # - # <div style="background-color: #6D83C5;" > # <h3 style="margin: auto; padding: 20px; color:#fff; ">1.2. Functions declaration</h3> # </div> def df_analysis(df, name_df, columns, *args, **kwargs): """ Method used for analyzing on the DataFrame. Parameters: ----------------- df (pandas.DataFrame): Dataset to analyze name_df (str): Dataset name columns (list): Dataframe keys in list format *args, **kwargs: ----------------- flag (str): Flag to show complete information about the dataset to analyse "complete" shows all information about the dataset Returns: ----------------- None. Print the analysis on the Dataset. """ # Getting the variables flag = kwargs.get("flag", None) ORDERING_COMPLETE = [ "name", "type", "records", "unique", "# NaN", "% NaN", "mean", "min", "25%", "50%", "75%", "max", "std" ] # Calculating the memory usage based on dataframe.info() buf = io.StringIO() df.info(buf=buf) memory_usage = buf.getvalue().split('\n')[-2] if df.empty: print("The", name_df, "dataset is empty. Please verify the file.") else: empty_cols = [col for col in df.columns if df[col].isna().all()] # identifying empty columns df_rows_duplicates = df[df.duplicated()] #identifying full duplicates rows # Creating a dataset based on Type object and records by columns type_cols = df.dtypes.apply(lambda x: x.name).to_dict() df_resume = pd.DataFrame(list(type_cols.items()), columns = ["name", "type"]) df_resume["records"] = list(df.count()) df_resume["# NaN"] = list(df.isnull().sum()) df_resume["% NaN"] = list(((df.isnull().sum() / len(df.index))*100).round(2)) print("\nAnalysis of", name_df, "dataset") print("--------------------------------------------------------------------") print("- Dataset shape: ", df.shape[0], "rows and", df.shape[1], "columns") print("- Total of NaN values: ", df.isna().sum().sum()) print("- Percentage of NaN: ", round((df.isna().sum().sum() / prod(df.shape)) * 100, 2), "%") print("- Total of full duplicates rows: ", df_rows_duplicates.shape[0]) print("- Total of empty rows: ", df.shape[0] - df.dropna(axis="rows", how="all").shape[0]) if df.dropna(axis="rows", how="all").shape[0] < df.shape[0] else \ print("- Total of empty rows: 0") print("- Total of empty columns: ", len(empty_cols)) print(" + The empty column is: ", empty_cols) if len(empty_cols) == 1 else \ print(" + The empty column are: ", empty_cols) if len(empty_cols) >= 1 else None print("- Unique indexes: ", df.index.is_unique) print("\n- The key(s):", columns, "is not present multiple times in the dataframe.\n It CAN be used as a primary key.") if df.size == df.drop_duplicates(columns).size else \ print("\n- The key(s):", columns, "is present multiple times in the dataframe.\n It CANNOT be used as a primary key.") pd.set_option("display.max_rows", None) # show full of showing rows pd.set_option("display.max_columns", None) # show full of showing cols pd.set_option("max_colwidth", None) # show full width of showing cols if flag is None or flag != "complete": print("\n- Type object and records by columns (",memory_usage,")") print("--------------------------------------------------------------------") elif flag == "complete": df_resume["unique"] = list(df.nunique()) df_desc = pd.DataFrame(df.describe().T).reset_index() df_desc = df_desc.rename(columns={"index": "name"}) df_resume = df_resume.merge(right=df_desc[["name", "mean", "min", "25%", "50%", "75%", "max", "std"]], on="name", how="left") df_resume = df_resume[ORDERING_COMPLETE] print("\n- Type object and records by columns (",memory_usage,")") print("---------------------------------------------------------------------------------------------------------------------------------") display(df_resume.sort_values("records", ascending=False)) pd.reset_option("display.max_rows") # reset max of showing rows pd.reset_option("display.max_columns") # reset max of showing cols pd.reset_option("display.max_colwidth") # reset width of showing cols # deleting dataframe to free memory if flag == "complete": del [[df_resume, df_desc]] gc.collect() df_resume, df_desc = (pd.DataFrame() for i in range(2)) else: del df_resume gc.collect() df_resume = pd.DataFrame() # + [markdown] tags=[] # <div style="background-color: #506AB9;" > # <h2 style="margin: auto; padding: 20px; color:#fff; ">2. Importing files</h2> # </div> # - # <div style="background-color: #6D83C5;" > # <h3 style="margin: auto; padding: 20px; color:#fff; ">2.1. Importing and preparing files</h3> # </div> data = pd.read_csv("datasets\dataset_cleaned.csv") data.head(5) df_analysis(data, "data", "code", flag="complete") # <div class="alert alert-block alert-danger"> # <b>Flag position</b> # </div> # + [markdown] tags=[] # <div style="background-color: #506AB9;" > # <h2 style="margin: auto; padding: 20px; color:#fff; ">3. Statistic analysis</h2> # </div> # - VARIABLES = ["energy-kcal_100g", "proteins_100g", "fat_100g", "sugars_100g"] for var in VARIABLES: plt.figure(figsize=[12,10]) sns.boxplot(x=data[var], y=data["renamed_category"], orient="h", showmeans=True, meanprops={"markerfacecolor":"white", "markeredgecolor":"blue"}) plt.title("Amount of " + var + " in all categories", size=15) plt.ylabel("Categories", size=12) plt.xlabel("Amount of " + var, size=12) plt.show() # <div style="background-color: #6D83C5;" > # <h3 style="margin: auto; padding: 20px; color:#fff; ">3.1. Select a subset of data</h3> # </div> # <div class="alert alert-block alert-info"> # To limit the analysis to be performed, we are going to work following the next premises # <ul style="list-style-type: square;"> # <li>Select the category who has more records</li> # <li>Select only 4 variables to work</li> # <ul style="list-style-type: disc;"> # <li>energy-kcal_100g</li> # <li>proteins_100g</li> # <li>fat_100g</li> # <li>sugars_100g</li> # </ul> # </ul> # </div> data.groupby("renamed_category")["code"].count().sort_values(ascending=False) df_subset = data[data["renamed_category"] == "Snacks"].copy() df_subset.shape VARIABLES = ["energy-kcal_100g", "proteins_100g", "fat_100g", "sugars_100g"] # <div style="background-color: #6D83C5;" > # <h3 style="margin: auto; padding: 20px; color:#fff; ">3.1. Select a subset of data</h3> # </div> import itertools for a, b in itertools.combinations(VARIABLES, 2): plt.figure(figsize=[12,10]) sns.scatterplot(data=df_subset, x=a, y=b) plt.show() # + [markdown] tags=[] # <div style="background-color: #506AB9;" > # <h2 style="margin: auto; padding: 20px; color:#fff; ">2. Importing files</h2> # </div> # - data # <div class="alert alert-block alert-info"> # This <b>notebook</b> is optimized to <b>be used</b> with: # <ul style="list-style-type: square;"> # <li>Analisis bivariable</li> # <li>Anova</li> # </ul> # </div>
temporal_copies/analysis_notebook-Copy1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # NTF Value Prediction (CSCI-4850 Project) # Full-color model # ## Setup import tensorflow.keras as keras from tensorflow.keras import backend as K import numpy as np from IPython.display import display import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import axes3d from random import randint import os from PIL import Image import json import urllib # %matplotlib inline # + ETH_API = 'https://api.coinbase.com/v2/exchange-rates?currency=ETH' ETH_SYM = 'Ξ' USD_SYM = '$' BTC_SYM = '₿' with urllib.request.urlopen(ETH_API) as data: C_EXCHANGE = json.load(data)['data']['rates'] def exchange(amt: float, frm: str, to: str = None): if to is None: return amt * float(C_EXCHANGE[frm]) else: return amt / float(C_EXCHANGE[frm]) * float(C_EXCHANGE[to]) def dollars(val: float): return f'{USD_SYM} {val:,.2f}' def eths(val: float): return f'{val:,.4f} {ETH_SYM}' def coins(val: float): return f'{val:,.4f} {BTC_SYM}' print(f"current rate ({ETH_SYM}->{USD_SYM}) {dollars(exchange(1.0, 'USD'))}") print(f"current rate ({USD_SYM}->{ETH_SYM}) {eths(exchange(1.0, 'USD', 'ETH'))}") print(f"current rate ({ETH_SYM}->{BTC_SYM}) {coins(exchange(1.0, 'BTC'))}") print(f"current rate ({BTC_SYM}->{ETH_SYM}) {eths(exchange(1.0, 'BTC', 'ETH'))}") # + acceptable = ('.jpg','.jpeg','.bmp','.png') preferred_size = (128, 128) X = [] XG = [] Y1 = [] Y2 = [] for root, dirs, files in os.walk("./Nft dataset"): for name in filter(lambda x: x.endswith(acceptable), files): full_path = os.path.join(root, name) print(f'found {repr(full_path)}') temp = Image.open(full_path) print(f'\timage dimensions: {temp.size}') if temp.size != preferred_size: print(f'\tresizing...') temp = temp.resize(preferred_size, Image.LANCZOS) print(f'\tnew image dimensions: {temp.size}') temp = temp.convert("RGB") arr = np.array(temp) print(f'\tarray shape: {arr.shape}') X.append(arr) temp = temp.convert("L") arr = np.array(temp) arr = np.array([[[k] for k in v] for v in arr]) print(f'\tarray shape: {arr.shape}') XG.append(arr) val = float(name.rsplit('.', 1)[0].rsplit(' ', 1)[0]) print(f'\tvalue: {eths(val)}') Y1.append(val) Y2.append(0.0) print() X = np.array(X) XG = np.array(XG) Y1 = np.array(Y1) Y2 = np.array(Y2) print(X.shape) print(Y1.shape) print(Y2.shape) # + class ModelInfo: def __init__(self, weights, epochs=None): self.weights = weights self.epochs = epochs def load_model(self, model): model.set_weights(self.weights) def store_model(self, model, epochs=None): self.epochs = epochs if epochs is not None else self.epochs self.weights = model.get_weights() class ModelHistory: def __init__(self): self.history = [] def __iter__(self): return iter(self.history) def stash_model(self, model, epochs=None): self.history.append(ModelInfo(model.get_weights(), epochs)) def extend_history(old=None, new=None): if old is None: return new else: oh, nh = old.history, new.history for key in nh: if key in oh: oh[key].extend(nh[key]) else: oh[key] = list(nh[key]) return old # - # ## Model Creation # + x = keras.layers.Input(X.shape[1:], name="Input_Layer") y = x y = keras.layers.Conv2D(128, kernel_size=(8,8), activation='relu', name="First_Convolution")(y) y = keras.layers.Conv2D(64, kernel_size=(6,6), activation='relu', name="Second_Convolution")(y) y = keras.layers.Conv2D(32, kernel_size=(4,4), activation='relu', name="Third_Convolution")(y) y = keras.layers.MaxPooling2D(pool_size=(8,8), name="Max_Pooling")(y) y = keras.layers.Dropout(0.05)(y) y = keras.layers.Flatten()(y) z = keras.layers.Dense(128, activation='relu')(y) #y = keras.layers.Dense(128, activation='relu')(y) stack = 2 for _ in range(stack): a = keras.layers.Dense(128, activation='relu')(z) #b = keras.layers.Dense(128, activation='relu')(y) c = keras.layers.Add()([a, z]) #d = keras.layers.Add()([b, y]) #z, y = c, d z = c # y = keras.layers.LayerNormalization()(y) # Output Logits (10) a = keras.layers.Dense(1, name="ETH_Val")(z) # Ethereum value of the NFT #y = keras.layers.Dense(1, activation='sigmoid', name="Rug_Prob")(y) # Whether or not the NFT will "rug" (scam its buyers) z = a #model = keras.Model(x,[z, y],name="NFT_Valuator") model = keras.Model(x,z,name="NFT_Valuator") generator = keras.preprocessing.image.ImageDataGenerator( width_shift_range=0.05, height_shift_range=0.05, rotation_range=4, zoom_range=0.1 ) #model.compile( # loss={'ETH_Val': keras.losses.MeanSquaredError(), 'Rug_Prob': keras.losses.BinaryCrossentropy()}, # optimizer=keras.optimizers.Adam(), # metrics={'ETH_Val': keras.metrics.MeanSquaredError(), 'Rug_Prob': keras.metrics.BinaryAccuracy()} #) model.compile( loss=keras.losses.MeanSquaredError(), optimizer=keras.optimizers.Adam(), metrics=keras.metrics.MeanAbsoluteError() ) model.summary() keras.utils.plot_model(model, show_shapes=True, expand_nested=True) # - model.save("NFTValuator2") # ## Training history = None model_history = ModelHistory() model = keras.models.load_model("NFTValuator2") # + batch_size = 32 epochs = 4 trials = 200 hr_w = 80 hr = '-' * hr_w generator.fit(X) for t in range(trials): print(hr) print(f'TRIAL {t+1:,}'.center(hr_w)) print(hr) history = extend_history( history, model.fit( generator.flow(X, Y1, batch_size=batch_size), epochs=epochs, batch_size=batch_size, verbose=1 ) ) model_history.stash_model(model, epochs) # - model.save("NFTValuator2") plt.figure(1) # summarize history for accuracy plt.subplot(211) plt.plot(history.history['mean_absolute_error']) #plt.plot(history.history['ETH_Val_mean_squared_error']) #plt.plot(history.history['Rug_Prob_binary_accuracy']) plt.ylabel('Negative Accuracy') plt.xlabel('Epoch') #plt.legend(['ETH Value','Rug Probability'],loc='lower right') # summarize history for loss plt.subplot(212) plt.plot(history.history['loss']) #plt.plot(history.history['ETH_Val_loss']) #plt.plot(history.history['Rug_Prob_loss']) plt.ylabel('Loss') plt.xlabel('Epoch') #plt.legend(['ETH Value','Rug Probability'],loc='upper right') plt.tight_layout() plt.show() # ---- # ## Testing the Model # This step is covered in [Project_Demo.ipynb](../Project_Demo.ipynb)
Project_Network_RGB.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # C/C++ __stdcall DLL, Excel VBA # # * Change the current working directory into `./demo` # %cd demo # %pwd # ## 1 C/C++ __stdcall DLL # # For `Visual Basic` applications (or applications in other languages such as Pascal or Fortran) to call functions in a C/C++ DLL, the functions must be exported using the correct calling convention without any name decoration done by the compiler. # # VBA can **only** call `__stdcall` functions, not `__cdecl` functions. # # * `__stdcall` creates the correct calling convention for the function (the called function cleans up the stack and parameters are passed from right to left) # # * ` __declspec(dllexport)` is used on an `exported` function in a DLL # # Below is the example of techniques which facilitate the use of use of MinGW to create DLLs, exporting functions which may be called from Visual Basic Application with Excel. # # Example: Equations for Region4 of [IAPWS-IF97](http://www.iapws.org/relguide/IF97-Rev.pdf) # # * 8 Equations for Region 4 # # * 8.1 The Saturation-Pressure Equation (Basic Equation) P33,Eq30 # # * 8.2 The Saturation-Temperature Equation (Backward Equation) P35, Eq31 # # ### 1.1 Create your DLL with `__stdcall` calling convention # # Create a DLL with the following code: # # * region4.h # # * region4.c # # For Windows,export all functions as `__stdcall`. # # ```c # #define DLLPORT __declspec(dllexport) double __stdcall # ``` # **NOTE**: The following header `region4.h` declares the interface for # # * building the **Windows/Linux** shared library # # * building an executable that uses the shared library. # # + # %%file ./include/region4.h #pragma once #ifdef __cplusplus extern "C" { #endif #ifdef BUILD_DLL #ifdef WIN32 #define PORT __declspec(dllexport) double __stdcall #else #define PORT double #endif #else #ifdef WIN32 #define PORT __declspec(dllimport) double __stdcall #else #define PORT double #endif #endif PORT pSat(double T); PORT TSat(double p); #ifdef __cplusplus } #endif # - # When you create header files for your DLLs, use # # * ` __declspec(dllexport) ` adds the `export` directive to the object fileworks # # * ` __declspec(dllimport)` on the declarations of the public symbols # + # %%file ./src/region4.c #include <math.h> #include "region4.h" // // Initialize coefficients for region 4 // static double n[11] = {0, 0.11670521452767E+04, -0.72421316703206E+06, -0.17073846940092E+02, 0.12020824702470E+05, -0.32325550322333E+07, 0.14915108613530E+02, -0.48232657361591E+04, 0.40511340542057E+06, -0.23855557567849E+00, 0.65017534844798E+03}; PORT pSat(double T) // saturation pressure of water // pSat in MPa // T :temperaturein K // // pSat = -1: temperature outside range // { double pS; if (T < 273.15 || T > 647.096) // tc_water=647.096 pS = -1.0; else { double del = T + n[9] / (T - n[10]); double aco = del * (del + n[1]) + n[2]; double bco = del * (n[3] * del + n[4]) + n[5]; double cco = del * (n[6] * del + n[7]) + n[8]; pS = pow(2 * cco / (-bco + sqrt(bco * bco - 4 * aco * cco)), 4); } return pS; } PORT TSat(double p) // saturation temperature of water // TSat in K // p :pressure MPa // // TSat=-1: pressure outside range // { double TS; if (p < 0.000611212677 || p > 22.064) TS = -1.0; else { double bet = pow(p, 0.25); double eco = bet * (bet + n[3]) + n[6]; double fco = bet * (n[1] * bet + n[4]) + n[7]; double gco = bet * (n[2] * bet + n[5]) + n[8]; double dco = 2.0 * gco / (-fco - sqrt(fco * fco - 4.0 * eco * gco)); TS = 0.5 * (n[10] + dco - sqrt((n[10] + dco) * (n[10] + dco) - 4.0 * (n[9] + n[10] * dco))); } return TS; } # - # !gcc -c -DBUILD_DLL -o ./obj/region4.o ./src/region4.c -I./include # !gcc -shared -o ./bin/libregion4.dll ./obj/region4.o -Wl,--add-stdcall-alias # * -DBUILD_DLL: # # * -Dname: `Predefine name as a macro`, with definition # # # * -Wl,option # # Pass **option** as an option to the **linker**. If option contains commas, it is split into multiple options at the commas. # # # * --add-stdcall-alias: # # This adds an undecorated alias for the `exported function names` that is simply **the name of the function** # ### 1.2 Add -static-libgcc ,-output-def=libregion4.def # # Links the GNU `libgcc` library `statically` # !gcc -c -DBUILD_DLL -o ./obj/region4.o ./src/region4.c -I./include # !gcc -shared -o ./bin/libregion4.dll -static-libgcc ./obj/region4.o -Wl,--add-stdcall-alias,-output-def=./bin/libregion4.def # * -static-libgcc # # This option links the GNU `libgcc` library **statically** # # # * -output-def=libregion4.def # # Name of `.def` file to be created. # # **def:** A module-definition file is a text file containing one or more module statements that describe various attributes of a DLL # %load ./bin/libregion4.def # + # %%file ./makefile-region4.mk CC=gcc CFLAGS=-DBUILD_DLL SRCDIR= ./src/ OBJDIR= ./obj/ BINDIR= ./bin/ INCDIR=./include/ all: libregion4 libregion4: obj $(CC) -shared -o $(BINDIR)libregion4.dll -static-libgcc $(OBJDIR)region4.o -Wl,--add-stdcall-alias,-output-def=libregion4.def obj: $(CC) -c $(CFLAGS) -o $(OBJDIR)region4.o $(SRCDIR)region4.c -I$(INCDIR) # - # !make -f makefile-region4.mk # ### 1.3 Call Dll from C/C++ # + # %%file ./src/mainReg4.c #include <stdio.h> #include "region4.h" int main() { double T=300.0; printf("Saturation P is %f\n", pSat(T)); return 0; } # - # !gcc -c -o ./obj/mainReg4.o ./src/mainReg4.c # !gcc -o ./bin/mainReg4 ./obj/mainReg4.o -I./include/ -L./bin/ -lregion4 # !.\bin\mainReg4 # ### 1.4 Python API # # * `__stdcall` calling convention: **windll, WINFUNCTYPE** # + # %%file ./src/region4.py from ctypes import windll,c_double,WINFUNCTYPE flib = windll.LoadLibrary('./bin/libregion4.dll') prototype = WINFUNCTYPE(c_double,c_double) def pSat(T): f = prototype(("pSat", flib),) return f(T) def TSat(p): f = prototype(("TSat", flib),) return f(p) # - # **add `mathfuns.py` into the interperte search path** import sys sys.path.append('./src') # + # %%file ./src/test4.py import unittest from region4 import * class Region4Test (unittest.TestCase): def setUp(self): # IF97-dev,Table35 Page 34 : T(K) p(MPa) self.tab35=[[300, 0.353658941e-2], [500, 0.263889776e1], [600, 0.123443146e2]] # IF97-dev, Table 36 Page 36 : p(MPa) T(K) self.tab36=[[0.1, 0.372755919e3], [ 1, 0.453035632e3], [ 10, 0.584149488e3]] def test_pSat(self): places = 6 for item in self.tab35: self.assertAlmostEqual(pSat(item[0]),item[1],places) def test_TSat(self): places = 6 for item in self.tab36: self.assertAlmostEqual(TSat(item[0]),item[1],places) if __name__ == '__main__': unittest.main() # - # %run ./src/test4.py # ## 2 Call DLL from Excel VBA(64bits) # # Do as the following steps: # # ### 2.1 Put DLL in the default path of Windows'dll # # # `libregion4.dll` in `C:\windows\system` # # # ### 2.2 Create the xlsm # # `demo-r4.xlsm` in `./demo/ExcelVBA/` # # # ### 2.3 Create the `VBA module` to call the DLL. # # There are a few ways to open up the **VBA Editor** in Excel. # # * From the `Developer Tools` tab, you can click the `Visual Basic` button. # # * A keyboard shortcut: press "Alt+F11" : # # ![vba](./img/vba.jpg) # # Into **VBA** Editor, then, create the module **mathfuns** to call library: # # ```VBA # Declare PtrSafe Function pSat Lib "libregion4" (ByVal T As Double) As Double # Declare PtrSafe Function TSat Lib "libregion4" (ByVal p As Double) As Double # # Public Function CalpSat(ByVal T As Double) As Double # CalpSat = pSat(T) # End Function # # Public Function CalTSat(ByVal p As Double) As Double # CalTSat = TSat(p) # End Function # ``` # # ### 2.4 call VBA methods in cells # # ![demo-r4](./img/demo-r4.jpg) # ## 3 Excel4Engineering # # Examples of Excel VBA for Engineering: Analysizing the Ideal Rankine Cycle, Monitoring Industrial Process # # https://github.com/thermalogic/Excel4Engineering # ## Reference # # ### C/C++ # # * ctypes http://docs.python.org/3/library/ctypes.html # # ### Microsoft Excel # # * Excel VBA Programming: http://www.homeandlearn.org/the_excel_vba_editor.html # # * [Language reference for Visual Basic for Applications(VBA)](https://docs.microsoft.com/en-us/office/vba/api/overview/language-reference) # # * [Excel VBA reference](https://docs.microsoft.com/en-us/office/vba/api/overview/excel) # # * [Excel add-in tutorial](https://docs.microsoft.com/en-us/office/dev/add-ins/tutorials/excel-tutorial) # # * DLLs in Visual C++ https://msdn.microsoft.com/en-us/library/1ez7dh12.aspx # # * Calling DLL Functions from Visual Basic Applications https://msdn.microsoft.com/en-us/library/dt232c9t.aspx # # #
notebook/Unit8-3-DLL_VBA.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + colab={"base_uri": "https://localhost:8080/"} id="7pI7PJ8XATdT" outputId="423517a6-4d35-49c8-c02e-18bde66f6c32" from google.colab import drive drive.mount('/content/drive') # + colab={"base_uri": "https://localhost:8080/"} id="F9qVDQd_BBqS" outputId="1b3bd01b-9a5b-4d80-f2d4-36fc7d46a26b" # %cd /content/drive/MyDrive/Neural_Tangent_Kernel/ # + id="BWIyC9Ip_bcq" import numpy as np import pandas as pd import torch import torchvision from torch.utils.data import Dataset, DataLoader from torchvision import transforms, utils import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from matplotlib import pyplot as plt from myrmsprop import MyRmsprop from utils import plot_decision_boundary,attn_avg,plot_analysis from synthetic_dataset import MosaicDataset1 from eval_model import calculate_attn_loss,analyse_data # %matplotlib inline torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False # + id="lGVy-1EllAc_" train_data = np.load("train_type4_data.npy",allow_pickle=True) test_data = np.load("test_type4_data.npy",allow_pickle=True) # + id="uL771xuGZC5Q" mosaic_list_of_images = train_data[0]["mosaic_list"] mosaic_label = train_data[0]["mosaic_label"] fore_idx = train_data[0]["fore_idx"] test_mosaic_list_of_images = test_data[0]["mosaic_list"] test_mosaic_label = test_data[0]["mosaic_label"] test_fore_idx = test_data[0]["fore_idx"] # + id="uf76JwkxZCT0" batch = 3000 train_dataset = MosaicDataset1(mosaic_list_of_images, mosaic_label, fore_idx) train_loader = DataLoader( train_dataset,batch_size= batch ,shuffle=False) #batch = 2000 #test_dataset = MosaicDataset1(test_mosaic_list_of_images, test_mosaic_label, test_fore_idx) #test_loader = DataLoader(test_dataset,batch_size= batch ,shuffle=False) # + [markdown] id="P-Lv8nHoB8z-" # # NTK # + id="hmGjlMfTBp3F" data = np.load("NTK_1.npy",allow_pickle=True) # H = data[0] # + id="beyk_-qYB_Ab" colab={"base_uri": "https://localhost:8080/"} outputId="02e3a961-8825-461a-a4a8-d25eef8121e8" print(data[0].keys()) H = torch.tensor(data[0]["NTK"]) # + id="9ULTAsyF6G6a" lr_1 = 1/1470559.2 # + id="xnqbuxdO2U4j" # p_vec = nn.utils.parameters_to_vector(where_func.parameters()) # p, = p_vec.shape # n_m, n_obj,_ = inputs.shape # number of mosaic images x number of objects in each mosaic x d # # this is the transpose jacobian (grad y(w))^T) # features = torch.zeros(n_m*n_obj, p, requires_grad=False) # k = 0 # for i in range(27000): # out = where_func(inpp[i]) # where_func.zero_grad() # out.backward(retain_graph=False) # p_grad = torch.tensor([], requires_grad=False) # for p in where_func.parameters(): # p_grad = torch.cat((p_grad, p.grad.reshape(-1))) # features[k,:] = p_grad # k = k+1 # tangent_kernel = features@features.T # + id="SInPc5gk9XDH" # class Module1(nn.Module): # def __init__(self): # super(Module1, self).__init__() # self.linear1 = nn.Linear(2,100) # self.linear2 = nn.Linear(100,1) # def forward(self,x): # x = F.relu(self.linear1(x)) # x = self.linear2(x) # return x # + id="VW0lzy6i9wk0" # from tqdm import tqdm as tqdm # + id="cti_LAbE8-dn" # inputs,_,_ = iter(train_loader).next() # inputs = torch.reshape(inputs,(27000,2)) # inputs = (inputs - torch.mean(inputs,dim=0,keepdims=True) )/torch.std(inputs,dim=0,keepdims=True) # where_net = Module1() # outputs = where_net(inputs) # + id="e-03FnsNP5bk" # feature1 = torch.zeros((27000,200)) # feature2 = torch.zeros((27000,100)) # for i in tqdm(range(27000)): # where_net.zero_grad() # outputs[i].backward(retain_graph=True) # par = [] # j = 0 # for p in where_net.parameters(): # if j%2 == 0: # vec = torch.nn.utils.parameters_to_vector(p) # p_grad = p.grad.reshape(-1) # par.append(p_grad) # j = j+1 # feature1[i,:] = par[0] # feature2[i,:] = par[1] # + id="eI20WxiR-zCi" # H = <EMAIL> + <EMAIL> # + [markdown] id="OWIBQfQly25h" # # Models # + id="HbrMidFCla6h" class Module2(nn.Module): def __init__(self): super(Module2, self).__init__() self.linear1 = nn.Linear(2,100) self.linear2 = nn.Linear(100,3) def forward(self,x): x = F.relu(self.linear1(x)) x = self.linear2(x) return x # + id="QXpnLkMoCocj" colab={"base_uri": "https://localhost:8080/"} outputId="dd805add-9c41-4e7f-c4f9-8bfe15161213" print(H) # + id="FRDhoG3rEp_w" # + id="rRqj2VELllkX" torch.manual_seed(1234) what_net = Module2().double() what_net.load_state_dict(torch.load("type4_what_net.pt")) what_net = what_net.to("cuda") # + id="oc1pKMEVfhat" # + id="DOpZfj1bq7wN" n_batches = 3000//batch bg = [] for i in range(n_batches): torch.manual_seed(i) betag = torch.randn(3000,9)#torch.ones((250,9))/9 bg.append( betag.requires_grad_() ) # + [markdown] id="76PwzSMACDDj" # # training # + id="S633XgMToeN3" criterion = nn.CrossEntropyLoss() # + id="7lrDkUUaDFCR" optim1 = [] H= H.to("cpu") for i in range(n_batches): optim1.append(MyRmsprop([bg[i]],H=H,lr=1)) # instantiate what net optimizer optimizer_what = optim.RMSprop(what_net.parameters(), lr=0.0001)#, momentum=0.9)#,nesterov=True) # + id="qPaYaojinMTA" colab={"base_uri": "https://localhost:8080/"} outputId="e19f7a3a-34af-4cc8-a251-afe12a32a0a2" acti = [] analysis_data_tr = [] analysis_data_tst = [] loss_curi_tr = [] loss_curi_tst = [] epochs = 2500 # calculate zeroth epoch loss and FTPT values running_loss,anlys_data,correct,total,accuracy = calculate_attn_loss(train_loader,bg,what_net,criterion) print('training epoch: [%d ] loss: %.3f correct: %.3f, total: %.3f, accuracy: %.3f' %(0,running_loss,correct,total,accuracy)) loss_curi_tr.append(running_loss) analysis_data_tr.append(anlys_data) # training starts for epoch in range(epochs): # loop over the dataset multiple times ep_lossi = [] running_loss = 0.0 what_net.train() for i, data in enumerate(train_loader, 0): # get the inputs inputs, labels,_ = data inputs = inputs.double() beta = bg[i] # alpha for ith batch #print(labels) inputs, labels,beta = inputs.to("cuda"),labels.to("cuda"),beta.to("cuda") # zero the parameter gradients optimizer_what.zero_grad() optim1[i].zero_grad() # forward + backward + optimize avg,alpha = attn_avg(inputs,beta) outputs = what_net(avg) loss = criterion(outputs, labels) # print statistics running_loss += loss.item() #alpha.retain_grad() loss.backward(retain_graph=False) optimizer_what.step() optim1[i].step() running_loss_tr,anls_data,correct,total,accuracy = calculate_attn_loss(train_loader,bg,what_net,criterion) analysis_data_tr.append(anls_data) loss_curi_tr.append(running_loss_tr) #loss per epoch print('training epoch: [%d ] loss: %.3f correct: %.3f, total: %.3f, accuracy: %.3f' %(epoch+1,running_loss_tr,correct,total,accuracy)) if running_loss_tr<=0.08: break print('Finished Training run ') analysis_data_tr = np.array(analysis_data_tr) # + id="AciJnAh5nfug" columns = ["epochs", "argmax > 0.5" ,"argmax < 0.5", "focus_true_pred_true", "focus_false_pred_true", "focus_true_pred_false", "focus_false_pred_false" ] df_train = pd.DataFrame() df_test = pd.DataFrame() df_train[columns[0]] = np.arange(0,epoch+2) df_train[columns[1]] = analysis_data_tr[:,-2]/30 df_train[columns[2]] = analysis_data_tr[:,-1]/30 df_train[columns[3]] = analysis_data_tr[:,0]/30 df_train[columns[4]] = analysis_data_tr[:,1]/30 df_train[columns[5]] = analysis_data_tr[:,2]/30 df_train[columns[6]] = analysis_data_tr[:,3]/30 # + id="NoQpS_6scRsC" colab={"base_uri": "https://localhost:8080/", "height": 419} outputId="e44b904f-8e86-4fc2-d980-24b72a5085a6" df_train # + id="EY_j8B274vuH" colab={"base_uri": "https://localhost:8080/", "height": 421} outputId="dfd9dd63-ae6d-4d43-b5e0-18e04dca10a3" # %cd /content/ plot_analysis(df_train,columns,[0,500,1000,1500,2000,2500]) # + id="VCnS6r2_3WdU" aph = [] for i in bg: aph.append(F.softmax(i,dim=1).detach().numpy()) aph = np.concatenate(aph,axis=0) # torch.save({ # 'epoch': 500, # 'model_state_dict': what_net.state_dict(), # #'optimizer_state_dict': optimizer_what.state_dict(), # "optimizer_alpha":optim1, # "FTPT_analysis":analysis_data_tr, # "alpha":aph # }, "type4_what_net_500.pt") # + id="KVzrDOGS4UxU" colab={"base_uri": "https://localhost:8080/"} outputId="e3c3494c-5263-4ece-8fdc-543e098c171f" aph[0] # + id="7Ut6ZTAXbvqx" avrg = [] avrg_lbls = [] with torch.no_grad(): for i, data1 in enumerate(train_loader): inputs , labels , fore_idx = data1 inputs = inputs.double() inputs = inputs.to("cuda") beta = bg[i] beta = beta.to("cuda") avg,alpha = attn_avg(inputs,beta) avrg.append(avg.detach().cpu().numpy()) avrg_lbls.append(labels.numpy()) avrg= np.concatenate(avrg,axis=0) avrg_lbls = np.concatenate(avrg_lbls,axis=0) # + id="2KQFYlmTLG0N" colab={"base_uri": "https://localhost:8080/", "height": 408} outputId="9aa45b00-feb5-4eac-b950-41c954239a5e" # %cd /content/drive/MyDrive/Neural_Tangent_Kernel/ data = np.load("type_4_data.npy",allow_pickle=True) # %cd /content/ plot_decision_boundary(what_net,[1,8,2,9],data,bg,avrg,avrg_lbls) # + id="oqS70WNvOcIw" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="56dc4970-40c0-44cd-91ba-ee5435af8e62" plt.plot(loss_curi_tr) # + [markdown] id="8GmyEWKD92_T" # # + id="8ZQMzXXJa8lB" # + id="C6WUeyvNO3iP" # + id="fQRNesx7O3Xr" # + id="vLPSDVK_QWId" # + [markdown] id="hw2rtHfzFyFA" # # + id="l3lfGywVHL6S"
1_mosaic_data_attention_experiments/3_stage_wise_training/Attention_weights_for_every_data/type4_data/init_1/ntk/both_pretrained_what/lr_1/type4_attn_ewts_NTK.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Pupil preprocessing # A tutorial by <NAME> (<EMAIL>) and <NAME> (<EMAIL>) # + import numpy as np import scipy as sp import scipy.signal as signal import matplotlib import matplotlib.pyplot as pl # %matplotlib inline import seaborn as sn sn.set(style="ticks") # extra dependencies of this notebook, for data loading and fitting of kernels import pandas as pd from lmfit import minimize, Parameters, Parameter, report_fit import sys import os workingDir = os.getcwd()[:-5] sys.path.append(workingDir) from fir import FIRDeconvolution # - # ### Load data # Let's load some raw pupil data, sampled at 1000 Hz. We also load the times at which blinks and saccades occurred. # + sample_rate = 1000.0 eye_dict = pd.read_csv('data/eye_dict.csv') blink_dict = pd.read_csv('data/blink_dict.csv') sac_dict = pd.read_csv('data/sac_dict.csv') # variables to work with: start_time = eye_dict.timepoints[0] timepoints = eye_dict.timepoints - start_time pupil = eye_dict.pupil blink_starts = np.array(blink_dict.start_timestamp - start_time, dtype=int) blink_ends = np.array(blink_dict.end_timestamp - start_time, dtype=int) sac_starts = np.array(sac_dict.start_timestamp - start_time, dtype=int) sac_ends = np.array(sac_dict.end_timestamp - start_time, dtype=int) # - # Let's plot the raw pupil timeseries: x = np.arange(timepoints.shape[0]) / sample_rate f = pl.figure(figsize = (10,3.5)) pl.plot(x, pupil) pl.xlabel('Time (s)') pl.ylabel('Pupil size') sn.despine(offset=10) # The periods where the timeseries drop to 0 correspond to blinks. Let's linearly interpolate these blinks. margin = 100 # ms margin = int((margin*sample_rate)/1000) pupil_interpolated = np.array(pupil.copy()) for b in np.arange(blink_starts.shape[0]): blink_start = np.where(timepoints==blink_starts[b])[0][0]-margin+1 blink_end = np.where(timepoints==blink_ends[b])[0][0]+margin+1 interpolated_signal = np.linspace(pupil_interpolated[blink_start], pupil_interpolated[blink_end], blink_end-blink_start, endpoint=False) pupil_interpolated[blink_start:blink_end] = interpolated_signal f = pl.figure(figsize = (10,3.5)) pl.plot(x, pupil_interpolated) pl.xlabel('Time (s)') pl.ylabel('Pupil size') sn.despine(offset=10) # To see what happened, let's zoom in on one interpolated blink: # + f = pl.figure(figsize = (10,3.5)) pl.axvspan((-margin + blink_starts[7]) / sample_rate, (margin + blink_ends[7]) / sample_rate, alpha=0.15, color='k') pl.axvline( (-margin + blink_starts[7]) / sample_rate, color = 'k', alpha = 0.5, lw = 1.5) pl.axvline( (margin + blink_ends[7]) / sample_rate, color = 'k', alpha = 0.5, lw = 1.5) pl.plot(x, pupil, label='raw pupil') pl.plot(x, pupil_interpolated, label='interpolated pupil') pl.xlim((-margin + blink_starts[7] - 1000) / sample_rate, (margin + blink_ends[7] + 1000) / sample_rate) pl.xlabel('Time (s)') pl.ylabel('Pupil size') pl.legend(loc=3) sn.despine(offset=10) # - # Let's filter blink interpolated pupil timeseries now. We'll construct a low pass (<10Hz), and a band-pass (0.01-10Hz) signal. And again, let's plot the results. # + def _butter_lowpass(data, highcut, fs, order=5): nyq = 0.5 * fs high = highcut / nyq b, a = signal.butter(order, high, btype='lowpass') y = signal.filtfilt(b, a, data) return y def _butter_highpass(data, lowcut, fs, order=5): nyq = 0.5 * fs low = lowcut / nyq b, a = signal.butter(order, low, btype='highpass') y = signal.filtfilt(b, a, data) return y def _butter_bandpass(data, lowcut, highcut, fs, order=5): nyq = 0.5 * fs low = lowcut / nyq high = highcut / nyq data_hp = _butter_highpass(data, lowcut, fs, order) b, a = signal.butter(order, high, btype='lowpass') y = signal.filtfilt(b, a, data_hp) return y # + # High pass: pupil_interpolated_hp = _butter_highpass(data=pupil_interpolated, lowcut=0.01, fs=1000, order=3) # Low pass: pupil_interpolated_lp = _butter_lowpass(data=pupil_interpolated, highcut=10, fs=1000, order=3) # Band pass: pupil_interpolated_bp = _butter_bandpass(data=pupil_interpolated, lowcut=0.01, highcut=10, fs=1000, order=3) f = pl.figure(figsize = (16,3.5)) pl.plot(pupil_interpolated_lp, label='low pass') pl.plot(pupil_interpolated_hp, label='high pass') pl.plot(pupil_interpolated_bp, label='band pass') pl.xlabel('Time (s)') pl.ylabel('Pupil size') pl.legend() sn.despine(offset=10) # - # The band-pass filtered signal we can use now to estimate pupil responses to blinks and saccades. You can think of these of simple event related averages. However, to account for temporally adjacent event, and hence overlapping responses (due to slow pupil IRF), here we will rely on deconvolution. # + downsample_rate = 100 new_sample_rate = sample_rate / downsample_rate interval = 6 # events: events = [(blink_ends / sample_rate), (sac_ends / sample_rate)] # compute blink and sac kernels with deconvolution (on downsampled timeseries): a = FIRDeconvolution(signal=sp.signal.decimate(pupil_interpolated_bp, downsample_rate, 1), events=events, event_names=['blinks', 'sacs'], sample_frequency=new_sample_rate, deconvolution_frequency=new_sample_rate, deconvolution_interval=[0,interval],) a.create_design_matrix() a.regress() a.betas_for_events() blink_response = np.array(a.betas_per_event_type[0]).ravel() sac_response = np.array(a.betas_per_event_type[1]).ravel() # baseline the kernels: blink_response = blink_response - blink_response[0].mean() sac_response = sac_response - blink_response[0].mean() # plot: x = np.linspace(0, interval, len(blink_response)) f = pl.figure(figsize = (10,3.5)) pl.plot(x, blink_response, label='blink response') pl.plot(x, sac_response, label='sac response') pl.xlabel('Time from event (s)') pl.ylabel('Pupil size') pl.axhline(0,color = 'k', lw = 0.5, alpha = 0.5) pl.legend(loc=2) sn.despine(offset=10) # - # Let's fit kernels to these: # + def single_pupil_IRF(params, x): s1 = params['s1'] n1 = params['n1'] tmax1 = params['tmax1'] return s1 * ((x**n1) * (np.e**((-n1*x)/tmax1))) def single_pupil_IRF_ls(params, x, data): s1 = params['s1'].value n1 = params['n1'].value tmax1 = params['tmax1'].value model = s1 * ((x**n1) * (np.e**((-n1*x)/tmax1))) return model - data def double_pupil_IRF(params, x): s1 = params['s1'] s2 = params['s2'] n1 = params['n1'] n2 = params['n2'] tmax1 = params['tmax1'] tmax2 = params['tmax2'] return s1 * ((x**n1) * (np.e**((-n1*x)/tmax1))) + s2 * ((x**n2) * (np.e**((-n2*x)/tmax2))) def double_pupil_IRF_ls(params, x, data): s1 = params['s1'].value s2 = params['s2'].value n1 = params['n1'].value n2 = params['n2'].value tmax1 = params['tmax1'].value tmax2 = params['tmax2'].value model = s1 * ((x**n1) * (np.e**((-n1*x)/tmax1))) + s2 * ((x**n2) * (np.e**((-n2*x)/tmax2))) return model - data # create a set of Parameters params = Parameters() params.add('s1', value=-1, min=-np.inf, max=-1e-25) params.add('s2', value=1, min=1e-25, max=np.inf) params.add('n1', value=10, min=9, max=11) params.add('n2', value=10, min=8, max=12) params.add('tmax1', value=0.9, min=0.5, max=1.5) params.add('tmax2', value=2.5, min=1.5, max=4) # do fit, here with powell method: blink_result = minimize(double_pupil_IRF_ls, params, method='powell', args=(x, blink_response)) blink_kernel = double_pupil_IRF(blink_result.params, x) sac_result = minimize(single_pupil_IRF_ls, params, method='powell', args=(x, sac_response)) sac_kernel = single_pupil_IRF(sac_result.params, x) # plot: f = pl.figure(figsize = (10,3.5)) pl.plot(x, blink_response, label='blink response') pl.plot(x, blink_kernel, label='blink fit') pl.plot(x, sac_response, label='sac response') pl.plot(x, sac_kernel, label='sac fit') pl.xlabel('Time from event (s)') pl.ylabel('Pupil size') pl.axhline(0,color = 'k', lw = 0.5, alpha = 0.5) pl.legend(loc=4) sn.despine(offset=10) # - # Now, with a GLM let's regress these responses to blinks and saccades from our pupil timeseries. # + # upsample: x = np.linspace(0, interval, interval*sample_rate) blink_kernel = double_pupil_IRF(blink_result.params, x) sac_kernel = double_pupil_IRF(sac_result.params, x) # regressors: blink_reg = np.zeros(len(pupil)) blink_reg[blink_ends] = 1 blink_reg_conv = sp.signal.fftconvolve(blink_reg, blink_kernel, 'full')[:-(len(blink_kernel)-1)] sac_reg = np.zeros(len(pupil)) sac_reg[blink_ends] = 1 sac_reg_conv = sp.signal.fftconvolve(sac_reg, sac_kernel, 'full')[:-(len(sac_kernel)-1)] regs = [blink_reg_conv, sac_reg_conv] # GLM: design_matrix = np.matrix(np.vstack([reg for reg in regs])).T betas = np.array(((design_matrix.T * design_matrix).I * design_matrix.T) * np.matrix(pupil_interpolated_bp).T).ravel() explained = np.sum(np.vstack([betas[i]*regs[i] for i in range(len(betas))]), axis=0) # clean pupil: pupil_clean_bp = pupil_interpolated_bp - explained # plot: f = pl.figure(figsize = (10,3.5)) x = np.arange(timepoints.shape[0]) / sample_rate pl.plot(x, pupil_interpolated_bp, 'r', label='band-passed') pl.plot(x, pupil_clean_bp, 'k', label='blinks/sacs regressed out') pl.xlabel('Time (s)') pl.ylabel('Pupil size') pl.axhline(0,color = 'k', lw = 0.5, alpha = 0.5) pl.legend() sn.despine(offset=10) # - # Finally, let's add back the slow drift, which is meaningful part of the signal! # + pupil_clean_lp = pupil_clean_bp + (pupil_interpolated_lp-pupil_interpolated_bp) f = pl.figure(figsize = (10,3.5)) x = np.arange(timepoints.shape[0]) / sample_rate pl.plot(x, pupil, 'k', label='rawest version') pl.plot(x, pupil_interpolated, label='band-passed') pl.plot(x, pupil_clean_lp, label='blinks/sacs regressed out') pl.xlabel('Time (s)') pl.ylabel('Pupil size') pl.axhline(0,color = 'k', lw = 0.5, alpha = 0.5) pl.legend() sn.despine(offset=10) # - # #### Preprocessing done # From here, one can do standard epoch-based regression and averaging analyses.
notebooks/pupil/pupil_preprocess_python.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # # Fully Convolutional Networks (FCN) # Notes from Udacity's Self-Driving Car Nanodegree # - Encoder extracts features that the decoder uses layer # # Pieces: # - Pre-train encoder on VGG/ResNet # - Do a 1x1 convolution # - Tansposed convolutions to upsample # # Skip connections are added. If VGG is used then only 3rd and 4th pooling layers are used as skip connections. Too many skip connections can lead to an explosion of the model size. import numpy as np import tensorflow as tf import collections # ### 1- Replace Fully Connected (FC) with 1x1 convolutions # + # custom init with the seed set to 0 by default def custom_init(shape, dtype=tf.float32, partition_info=None, seed=0): return tf.random_normal(shape, dtype=dtype, seed=seed) # TODO: Use `tf.layers.conv2d` to reproduce the result of `tf.layers.dense`. # Set the `kernel_size` and `stride`. def conv_1x1(x, num_outputs): kernel_size = 1 stride = 1 return tf.layers.conv2d(x, num_outputs, kernel_size, stride, kernel_initializer=custom_init) # + num_outputs = 2 x = tf.constant(np.random.randn(1, 2, 2, 1), dtype=tf.float32) # `tf.layers.dense` flattens the input tensor if the rank > 2 and reshapes it back to the original rank # as the output. dense_out = tf.layers.dense(x, num_outputs, kernel_initializer=custom_init) conv_out = conv_1x1(x, num_outputs) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) a = sess.run(dense_out) b = sess.run(conv_out) print("Dense Output =", a) print("Conv 1x1 Output =", b) print("Same output? =", np.allclose(a, b, atol=1.e-5)) # - a.shape b.shape # ### 2- Upsampling through transposed convolution # - Reverse convolution in which forward and backward passes are swapped # - aka deconvolution # - Differentiability retained and training exactly the same as before # - https://dspguru.com/dsp/faqs/multirate/interpolation/ # - https://github.com/vdumoulin/conv_arithmetic # <img src="https://d17h27t6h515a5.cloudfront.net/topher/2017/October/59d8670c_transposed-conv/transposed-conv.png"> # # + def upsample(x): """ Apply a two times upsample on x and return the result. :x: 4-Rank Tensor :return: TF Operation """ # TODO: Use `tf.layers.conv2d_transpose` return tf.layers.conv2d_transpose(x, x.shape[3], kernel_size=(3, 3), strides=2, padding='SAME') x = tf.constant(np.random.randn(1, 4, 4, 3), dtype=tf.float32) conv = upsample(x) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) result = sess.run(conv) print('Input Shape: {}'.format(x.get_shape())) print('Output Shape: {}'.format(result.shape)) # - # ### 3- Skip connection # - Retrain information # - Use info from multiple resolutions # ### Semantic Segmentation # - Bounding boxes for object detection, easier than segmentation # - [YOLO](https://pjreddie.com/darknet/yolo/) and [SSD](https://github.com/balancap/SSD-Tensorflow) which work well: # - High frames per second (FPS) # - Can detect cars, people, traffic signs, etc # # - Semantic segmentation # - Pixel level # - Scene understanding # - Multiple decoders for different tasks (e.g. segmentation, depth) # # #### Intersection over Union (IoU) # - Intersection => TP # - Union => classified T (TP + FP) + actually T (TP + FN) # #### TensorFlow Implementation # + truth = np.array( [[0, 0, 0, 0], [1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3] ] ) prediction = np.array([ [0, 0, 0, 0], [1, 0, 0, 1], [1, 2, 2, 1], [3, 3, 0, 3] ]) def iou1(truth, pred): t = truth + 1 p = pred + 1 classes = np.unique(t) a = ((t == p) * t).flatten() tp = Counter(a[a > 0]) b = ((t != p) * t).flatten() fn = Counter(b[b > 0]) c = ((t != p) * p).flatten() fp = Counter(c[c > 0]) ious = { class_: tp.get(class_) / count for class_, count in (tp + fp + fn).items() } print(ious) return sum(ious.values()) / len(ious) # - iou1(truth, prediction) # #### Tensorflow implementation # + def mean_iou(ground_truth, prediction, num_classes): # TODO: Use `tf.metrics.mean_iou` to compute the mean IoU. iou, iou_op = tf.metrics.mean_iou(ground_truth, prediction, num_classes) return iou, iou_op ground_truth = tf.constant([ [0, 0, 0, 0], [1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3]], dtype=tf.float32) prediction = tf.constant([ [0, 0, 0, 0], [1, 0, 0, 1], [1, 2, 2, 1], [3, 3, 0, 3]], dtype=tf.float32) # TODO: use `mean_iou` to compute the mean IoU iou, iou_op = mean_iou(ground_truth, prediction, 4) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) # need to initialize local variables for this to run `tf.metrics.mean_iou` sess.run(tf.local_variables_initializer()) sess.run(iou_op) # should be 0.53869 print("Mean IoU =", sess.run(iou)) # - # ### FCN-8 # https://people.eecs.berkeley.edu/~jonlong/long_shelhamer_fcn.pdf # https://github.com/udacity/CarND-Object-Detection-Lab
deep-learning/fully-convolutional-networks.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ============================== loading libraries =========================================== import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import accuracy_score # ============================================================================================= # ============================== data preprocessing =========================================== # loading the dataset data = pd.read_csv('data.csv') # rows * columns data.shape # column labels data.columns # data of top 5 rows data.head() # unique values of the column which is to be predicted by the classifier data['diagnosis'].unique() # total number of null values in every column data.isna().sum() # dropping the null values data.dropna(axis=1, inplace=True) # Malignant = 0 # Benign = 1 data['diagnosis'] = data.diagnosis.map(lambda x: 0 if x == 'M' else 1) data['diagnosis'].unique() # convert the dataset into numpy's ndarray (X and y) y = data['diagnosis'].values data.drop(['diagnosis', 'id'], inplace=True, axis=1) X = data.values print(type(X)) print(type(y)) # split the data set into train and test X1, X_test, y1, y_test = train_test_split(X, y, test_size=0.2) # split the train data set into cross validation train and cross validation test X_train, X_cv, y_train, y_cv = train_test_split(X1, y1, test_size=0.2) # ============================================================================================= # + # ====================== Finding the optimal value of K for K-NN =============================== # list to store accuracy_score accuracy = [] for i in range(1,30,2): # instantiate the K-NN classifier with k = i clf = KNeighborsClassifier(n_neighbors=i) # fitting the model with training data clf.fit(X_train, y_train) # append the accuracy_score of cross_validation data into accuracy list accuracy.append(accuracy_score(y_cv, clf.predict(X_cv)) * float(100)) # plot the accuracy and the value of K to findout optimal value for K plt.xlabel('Value of K') plt.ylabel('Accuracy Score') k = [i for i in range(1,30,2)] plt.plot(k,accuracy) # ============================================================================================= # - # ====================== Fitting the model with optimal value of K ============================ # since, k=9 has highest accuracy # therefore, train the K-NN model using k=9 clf = KNeighborsClassifier(n_neighbors=9) clf.fit(X_train, y_train) print(accuracy_score(y_test, clf.predict(X_test)) * float(100)) # =============================================================================================
breast _cancer_classification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd from sklearn.preprocessing import OneHotEncoder from xgboost import XGBRegressor # + # Read the data X = pd.read_csv('./input/train.csv', index_col='Id') X_test = pd.read_csv('./input/test.csv', index_col='Id') # Remove rows with missing target, separate target from predictors X.dropna(axis=0, subset=['SalePrice'], inplace=True) y = X.SalePrice X.drop(['SalePrice'], axis=1, inplace=True) # To keep things simple, we'll drop columns with missing values cols_with_missing = [col for col in X.columns if X[col].isnull().any()] X.drop(cols_with_missing, axis=1, inplace=True) X_test.drop(cols_with_missing, axis=1, inplace=True) # + object_cols = [col for col in X.columns if X[col].dtype == "object"] # Columns that will be one-hot encoded low_cardinality_cols = [col for col in object_cols if X[col].nunique() < 10] # Columns that will be dropped from the dataset high_cardinality_cols = list(set(object_cols)-set(low_cardinality_cols)) print('Categorical columns that will be one-hot encoded:', low_cardinality_cols) print('\nCategorical columns that will be dropped from the dataset:', high_cardinality_cols) nullCol=[col for col in X_test.columns if X_test[col].isnull().any()] for col in nullCol: if X_test[col].dtype=='object': most_frequent=max(set(list(X[col])),key=list(X[col]).count) X_test[col]=X_test[col].fillna(most_frequent) else: X_test[col]=X_test[col].fillna(X[col].median()) one_hot_encoder = OneHotEncoder(handle_unknown='ignore', sparse=False) OH_cols_train = pd.DataFrame(one_hot_encoder.fit_transform(X[low_cardinality_cols])) OH_cols_test = pd.DataFrame(one_hot_encoder.transform(X_test[low_cardinality_cols])) OH_cols_train.index = X.index OH_cols_test.index = X_test.index num_X_train = X.drop(object_cols, axis=1) num_X_test = X_test.drop(object_cols, axis=1) OH_X_train = pd.concat([OH_cols_train,num_X_train], axis=1) OH_X_test = pd.concat([OH_cols_test,num_X_test], axis=1) # - model=XGBRegressor(n_estimators=200,learning_rate=0.005) model.fit(OH_X_train,y) preds_test=model.predict(OH_X_test) output = pd.DataFrame({'Id': X_test.index,'SalePrice': preds_test}) output.to_csv('submission1.csv', index=False) print("SUCCESS!")
house-prices-ML/Untitled1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + colab={} colab_type="code" id="BjHU0Kfync7_" from google.colab import drive drive.mount('/content/drive') # + [markdown] colab_type="text" id="eulvfJWl7ueY" # # Lab 1 # # # ## Part 1: Bilingual dictionary induction and unsupervised embedding-based MT (30%) # *Note: this homework is based on materials from yandexdataschool [NLP course](https://github.com/yandexdataschool/nlp_course/). Feel free to check this awesome course if you wish to dig deeper.* # # *Refined by [<NAME>](https://www.linkedin.com/in/nikolay-karpachev-b0146a104/)* # + [markdown] colab_type="text" id="fV4rIjxa7uei" # **In this homework** **<font color='red'>YOU</font>** will make machine translation system without using parallel corpora, alignment, attention, 100500 depth super-cool recurrent neural network and all that kind superstuff. # # But even without parallel corpora this system can be good enough (hopefully), in particular for similar languages, e.g. Ukrainian and Russian. # + [markdown] colab_type="text" id="idSYq2GU7uew" # ### Frament of the Swadesh list for some slavic languages # # The Swadesh list is a lexicostatistical stuff. It's named after American linguist Morris Swadesh and contains basic lexis. This list are used to define subgroupings of languages, its relatedness. # # So we can see some kind of word invariance for different Slavic languages. # # # | Russian | Belorussian | Ukrainian | Polish | Czech | Bulgarian | # |-----------------|--------------------------|-------------------------|--------------------|-------------------------------|-----------------------| # | женщина | жанчына, кабета, баба | жінка | kobieta | žena | жена | # | мужчина | мужчына | чоловік, мужчина | mężczyzna | muž | мъж | # | человек | чалавек | людина, чоловік | człowiek | člověk | човек | # | ребёнок, дитя | дзіця, дзіцёнак, немаўля | дитина, дитя | dziecko | dítě | дете | # | жена | жонка | дружина, жінка | żona | žena, manželka, choť | съпруга, жена | # | муж | муж, гаспадар | чоловiк, муж | mąż | muž, manžel, choť | съпруг, мъж | # | мать, мама | маці, матка | мати, матір, неня, мама | matka | matka, máma, 'стар.' mateř | майка | # | отец, тятя | бацька, тата | батько, тато, татусь | ojciec | otec | баща, татко | # | много | шмат, багата | багато | wiele | mnoho, hodně | много | # | несколько | некалькі, колькі | декілька, кілька | kilka | několik, pár, trocha | няколко | # | другой, иной | іншы | інший | inny | druhý, jiný | друг | # | зверь, животное | жывёла, звер, істота | тварина, звір | zwierzę | zvíře | животно | # | рыба | рыба | риба | ryba | ryba | риба | # | птица | птушка | птах, птиця | ptak | pták | птица | # | собака, пёс | сабака | собака, пес | pies | pes | куче, пес | # | вошь | вош | воша | wesz | veš | въшка | # | змея, гад | змяя | змія, гад | wąż | had | змия | # | червь, червяк | чарвяк | хробак, черв'як | robak | červ | червей | # | дерево | дрэва | дерево | drzewo | strom, dřevo | дърво | # | лес | лес | ліс | las | les | гора, лес | # | палка | кій, палка | палиця | patyk, pręt, pałka | hůl, klacek, prut, kůl, pálka | палка, пръчка, бастун | # + [markdown] colab_type="text" id="cNM3_fjr7ue2" # But the context distribution of these languages demonstrates even more invariance. And we can use this fact for our for our purposes. # + [markdown] colab_type="text" id="YLppwa527ue6" # ## Data # + colab={} colab_type="code" id="lYBGKAUn7ue_" import gensim import numpy as np from gensim.models import KeyedVectors # + [markdown] colab_type="text" id="MwGoVhRA7ufP" # In this notebook we're going to use pretrained word vectors - FastText (original paper - https://arxiv.org/abs/1607.04606). # # You can download them from the official [website](https://fasttext.cc/docs/en/crawl-vectors.html). We're going to need embeddings for Russian and Ukrainian languages. Please use word2vec-compatible format (.text). # + colab={"base_uri": "https://localhost:8080/", "height": 224} colab_type="code" executionInfo={"elapsed": 66479, "status": "ok", "timestamp": 1584216312779, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -180} id="Bl_CQJApowMA" outputId="cb2a5b40-6fd5-40e5-86ba-f17b24e2bdfa" # !wget https://dl.fbaipublicfiles.com/fasttext/vectors-crawl/cc.uk.300.vec.gz # + colab={"base_uri": "https://localhost:8080/", "height": 224} colab_type="code" executionInfo={"elapsed": 55809, "status": "ok", "timestamp": 1584216560511, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -180} id="Y432J4ONo4Ug" outputId="bcff4130-0cfe-4eca-b827-88fb2dd42500" # !wget https://dl.fbaipublicfiles.com/fasttext/vectors-crawl/cc.ru.300.vec.gz # + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" executionInfo={"elapsed": 62223, "status": "ok", "timestamp": 1584216645549, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -180} id="FeY1yB1moFBn" outputId="55d75bed-fa76-45de-d10f-0f295731b68d" # !gunzip cc.uk.300.vec.gz # !gunzip cc.ru.300.vec.gz # + colab={"base_uri": "https://localhost:8080/", "height": 71} colab_type="code" executionInfo={"elapsed": 578215, "status": "ok", "timestamp": 1584217253557, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -180} id="u1JjQv_97ufT" outputId="2caf5e79-60d2-40cb-8f18-497bffc89aca" uk_emb = KeyedVectors.load_word2vec_format("cc.uk.300.vec") # + colab={"base_uri": "https://localhost:8080/", "height": 71} colab_type="code" executionInfo={"elapsed": 578472, "status": "ok", "timestamp": 1584217840161, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -180} id="ffzuept_7ufd" outputId="10acb6f6-27af-4627-d1aa-4a0474e072a7" ru_emb = KeyedVectors.load_word2vec_format("cc.ru.300.vec") # + colab={"base_uri": "https://localhost:8080/", "height": 241} colab_type="code" executionInfo={"elapsed": 7463, "status": "ok", "timestamp": 1584217912641, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -180} id="nTkXfT0W7ufk" outputId="28711e9f-5772-42d5-d363-1726d29faecc" ru_emb.most_similar([ru_emb["август"]], topn=10) # + colab={"base_uri": "https://localhost:8080/", "height": 241} colab_type="code" executionInfo={"elapsed": 5293, "status": "ok", "timestamp": 1584217920664, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -180} id="vdBA8lcg7ufs" outputId="79bf4028-cb00-46fc-d43e-64e8537ee863" uk_emb.most_similar([uk_emb["серпень"]]) # + colab={"base_uri": "https://localhost:8080/", "height": 241} colab_type="code" executionInfo={"elapsed": 715, "status": "ok", "timestamp": 1584217923285, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -180} id="_yJvcKXO7uf0" outputId="8e8970d7-00f0-46ec-a259-cdf6a7e90f22" ru_emb.most_similar([uk_emb["серпень"]]) # + [markdown] colab_type="text" id="pNdYAR1q7uf6" # Load small dictionaries for correspoinding words pairs as trainset and testset. # + colab={} colab_type="code" id="35d_DAK67uf8" def load_word_pairs(filename): uk_ru_pairs = [] uk_vectors = [] ru_vectors = [] with open(filename, "r") as inpf: for line in inpf: uk, ru = line.rstrip().split("\t") if uk not in uk_emb or ru not in ru_emb: continue uk_ru_pairs.append((uk, ru)) uk_vectors.append(uk_emb[uk]) ru_vectors.append(ru_emb[ru]) return uk_ru_pairs, np.array(uk_vectors), np.array(ru_vectors) # + colab={"base_uri": "https://localhost:8080/", "height": 377} colab_type="code" executionInfo={"elapsed": 3279, "status": "ok", "timestamp": 1584217938649, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -180} id="wkNL602WHJyO" outputId="191a6314-c19c-44b4-8265-63fe740e87b7" # !wget -O ukr_rus.train.txt http://tiny.cc/jfgecz # + colab={"base_uri": "https://localhost:8080/", "height": 377} colab_type="code" executionInfo={"elapsed": 5269, "status": "ok", "timestamp": 1584217941657, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -180} id="uoclU6JcHCcn" outputId="057dd6dd-de78-440c-81fc-833f74ee332b" # !wget -O ukr_rus.test.txt http://tiny.cc/6zoeez # + colab={} colab_type="code" id="05BqsdSK7ugD" uk_ru_train, X_train, Y_train = load_word_pairs("ukr_rus.train.txt") # + colab={} colab_type="code" id="zQOZw51r7ugL" uk_ru_test, X_test, Y_test = load_word_pairs("ukr_rus.test.txt") # + [markdown] colab_type="text" id="-ZBBNvpz7ugQ" # ## Embedding space mapping (0.3 pts) # + [markdown] colab_type="text" id="x_Dhk5gL7ugS" # Let $x_i \in \mathrm{R}^d$ be the distributed representation of word $i$ in the source language, and $y_i \in \mathrm{R}^d$ is the vector representation of its translation. Our purpose is to learn such linear transform $W$ that minimizes euclidian distance between $Wx_i$ and $y_i$ for some subset of word embeddings. Thus we can formulate so-called Procrustes problem: # # $$W^*= \arg\min_W \sum_{i=1}^n||Wx_i - y_i||_2$$ # or # $$W^*= \arg\min_W ||WX - Y||_F$$ # # where $||*||_F$ - Frobenius norm. # + [markdown] colab_type="text" id="acOjDdtL7ugY" # $W^*= \arg\min_W \sum_{i=1}^n||Wx_i - y_i||_2$ looks like simple multiple linear regression (without intercept fit). So let's code. # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 806, "status": "ok", "timestamp": 1584217977561, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -180} id="Lb-KN1be7uga" outputId="a77ac532-0c91-43f6-9628-d0db473a6e58" from sklearn.linear_model import LinearRegression mapping = LinearRegression(fit_intercept=False) mapping.fit(X_train, Y_train) # + [markdown] colab_type="text" id="X7tqJwoY7ugf" # Let's take a look at neigbours of the vector of word _"серпень"_ (_"август"_ in Russian) after linear transform. # + colab={"base_uri": "https://localhost:8080/", "height": 241} colab_type="code" executionInfo={"elapsed": 558, "status": "ok", "timestamp": 1584217982126, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -180} id="31SrFSbn7ugi" outputId="6c9faadc-0537-45ad-91f1-163b1c0df377" august = mapping.predict(uk_emb["серпень"].reshape(1, -1)) ru_emb.most_similar(august) # + [markdown] colab_type="text" id="okSkjk597ugo" # We can see that neighbourhood of this embedding cosists of different months, but right variant is on the ninth place. # + [markdown] colab_type="text" id="o2uY6Y9B7ugt" # As quality measure we will use precision top-1, top-5 and top-10 (for each transformed Ukrainian embedding we count how many right target pairs are found in top N nearest neighbours in Russian embedding space). # + colab={} colab_type="code" id="zptuho8LAfIE" def precision(pairs, mapped_vectors, topn=1): """ :args: pairs = list of right word pairs [(uk_word_0, ru_word_0), ...] mapped_vectors = list of embeddings after mapping from source embedding space to destination embedding space topn = the number of nearest neighbours in destination embedding space to choose from :returns: precision_val, float number, total number of words for those we can find right translation at top K. """ assert len(pairs) == len(mapped_vectors) num_matches = 0 for i, (_, ru) in enumerate(pairs): most_similar = np.array(ru_emb.most_similar( mapped_vectors[i].reshape(1, -1), topn=topn)) num_matches += int(ru in most_similar[:, 0]) precision_val = num_matches / len(pairs) return precision_val # + colab={"base_uri": "https://localhost:8080/", "height": 71} colab_type="code" executionInfo={"elapsed": 1207, "status": "ok", "timestamp": 1584218122686, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -180} id="duhj9hpv7ugy" outputId="141c87b9-0ce9-4773-cb7d-9a2e197476e6" assert precision([("серпень", "август")], august, topn=5) == 0.0 assert precision([("серпень", "август")], august, topn=9) == 1.0 assert precision([("серпень", "август")], august, topn=10) == 1.0 # + colab={"base_uri": "https://localhost:8080/", "height": 71} colab_type="code" executionInfo={"elapsed": 95861, "status": "ok", "timestamp": 1584218225394, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -180} id="0-iyd5gP7ug5" outputId="afc79923-7664-4634-9fd4-fd5413dc878c" assert precision(uk_ru_test, X_test) == 0.0 assert precision(uk_ru_test, Y_test) == 1.0 # + colab={"base_uri": "https://localhost:8080/", "height": 105} colab_type="code" executionInfo={"elapsed": 102943, "status": "ok", "timestamp": 1584218331176, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -180} id="U-ssEJ3x7uhA" outputId="7281c19b-a539-47a5-957e-b4d8a2b14897" precision_top1 = precision(uk_ru_test, mapping.predict(X_test), 1) precision_top5 = precision(uk_ru_test, mapping.predict(X_test), 5) # + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" executionInfo={"elapsed": 586, "status": "ok", "timestamp": 1584218489397, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -180} id="7K-hy7a6Ksn2" outputId="b734bc8f-8736-4bb9-9f62-0307c4415aaf" print(precision_top1) print(precision_top5) # + [markdown] colab_type="text" id="hf6Ou8bx7uhH" # ## Making it better (orthogonal Procrustean problem) (0.3 pts) # + [markdown] colab_type="text" id="4oLs-drN7uhK" # It can be shown (see original paper) that a self-consistent linear mapping between semantic spaces should be orthogonal. # We can restrict transform $W$ to be orthogonal. Then we will solve next problem: # # $$W^*= \arg\min_W ||WX - Y||_F \text{, where: } W^TW = I$$ # # $$I \text{- identity matrix}$$ # # Instead of making yet another regression problem we can find optimal orthogonal transformation using singular value decomposition. It turns out that optimal transformation $W^*$ can be expressed via SVD components: # $$X^TY=U\Sigma V^T\text{, singular value decompostion}$$ # $$W^*=UV^T$$ # + colab={} colab_type="code" id="_KSaRJFGMFiJ" import numpy as np # + colab={} colab_type="code" id="DdFQ7qti7uhL" def learn_transform(X_train, Y_train): """ :returns: W* : float matrix[emb_dim x emb_dim] as defined in formulae above """ u, s, vt = np.linalg.svd(X_train.T @ Y_train) mapping = u @ vt return mapping # + colab={} colab_type="code" id="7X7QfYDd7uhQ" W = learn_transform(X_train, Y_train) # + colab={"base_uri": "https://localhost:8080/", "height": 241} colab_type="code" executionInfo={"elapsed": 1041, "status": "ok", "timestamp": 1584218512506, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -180} id="OVOFYYa37uhX" outputId="5675e8f8-eee4-49e9-f01c-503f9b4d8598" ru_emb.most_similar([np.matmul(uk_emb["серпень"], W)]) # + colab={"base_uri": "https://localhost:8080/", "height": 105} colab_type="code" executionInfo={"elapsed": 102750, "status": "ok", "timestamp": 1584218615198, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -180} id="r297sYP37uhb" outputId="3302f567-5f91-4bbc-9c9c-9ebec64cb247" print(precision(uk_ru_test, np.matmul(X_test, W))) print(precision(uk_ru_test, np.matmul(X_test, W), 5)) # + [markdown] colab_type="text" id="hvUZ72U5AfJg" # ## Unsupervised embedding-based MT (0.4 pts) # + [markdown] colab_type="text" id="LLyuVfHBLrJn" # Now, let's build our word embeddings-based translator! # + [markdown] colab_type="text" id="tPAURW1CMuP7" # Firstly, download OPUS Tatoeba corpus. # + colab={"base_uri": "https://localhost:8080/", "height": 204} colab_type="code" executionInfo={"elapsed": 4228, "status": "ok", "timestamp": 1584218715912, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -180} id="F80kUKzQMsDu" outputId="b04d9896-1db0-475a-bcbe-d3d28fba0862" # !wget https://object.pouta.csc.fi/OPUS-Tatoeba/v20190709/mono/uk.txt.gz # + colab={} colab_type="code" id="0CGFZoxCUVf1" # !gzip -d ./uk.txt.gz # + colab={} colab_type="code" id="2MV3VvoVUX5U" with open('./uk.txt', 'r') as f: uk_corpus = f.readlines() # + colab={} colab_type="code" id="tU7nPVf0UhbI" # To save your time and CPU, feel free to use first 1000 sentences of the corpus uk_corpus = uk_corpus[:1000] # + colab={} colab_type="code" id="FLN8dBOXAfJ1" from nltk.tokenize import wordpunct_tokenize def preprocess(corpus): corpus_prepr = [] for sentence in corpus: corpus_prepr.append(" ".join(wordpunct_tokenize(sentence))) return corpus_prepr # + colab={} colab_type="code" id="h2MSTsuA0UsI" uk_corpus = preprocess(uk_corpus) # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 758, "status": "ok", "timestamp": 1584219269570, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -180} id="sXDFD5iFy1x3" outputId="b88b186a-76e5-4a8d-e909-ab8b6117896f" uk_corpus[0] # + colab={} colab_type="code" id="FGksC7l_NMi9" def translate(sentence): """ :args: sentence - sentence in Ukrainian (str) :returns: translation - sentence in Russian (str) * find ukrainian embedding for each word in sentence * transform ukrainian embedding vector * find nearest russian word and replace """ translated = [] for word in sentence.split(): if word not in uk_emb: translated.append('UNK') continue embedding = uk_emb[word].reshape(1, -1) transformed = np.matmul(embedding, W) translated.append(ru_emb.most_similar(transformed, topn=1)[0][0]) return " ".join(translated) # + colab={"base_uri": "https://localhost:8080/", "height": 71} colab_type="code" executionInfo={"elapsed": 1679, "status": "ok", "timestamp": 1584219304442, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -180} id="4hbbMy-tNxlf" outputId="34dce5c4-dcc2-4e02-80b8-ec1a230d3390" assert translate(".") == "." assert translate("1 , 3") == "1 , 3" assert translate("кіт зловив мишу") == "кот поймал мышку" # + [markdown] colab_type="text" id="ia6I2ce7O_HI" # Now you can play with your model and try to get as accurate translations as possible. **Note**: one big issue is out-of-vocabulary words. Try to think of various ways of handling it (you can start with translating each of them to a special **UNK** token and then move to more sophisticated approaches). Good luck! # + colab={"base_uri": "https://localhost:8080/", "height": 190} colab_type="code" id="ap1W7ZCeOAVU" outputId="14d61c55-7868-4d0a-e0d0-c4683da0aae1" for sent in uk_corpus[::10]: print(translate(sent)) # + [markdown] colab_type="text" id="vT-kq8_SnXir" # Great! # See second notebook for the Neural Machine Translation assignment.
homeworks/Lab1_NLP/Lab1_NLP_part_1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd import scipy as sp from scipy.optimize import minimize # ## Problema principal # Buscamos minimizar las cantidades $$K_1=(4b+c-4a)C_1$$ y $$K_2=(4b+c-4a)C_2$$ provenientes de la función # $$K(\tau)=K_1+K_2\tau^{1/2}-\tfrac{2}{3\sqrt{3}}(\tau-1)^{3/2}.$$ # Para esto, establecemos el problema de minimización $(P)$ dado por # $$\min_{(a, b, c, C_1, C_2)\in X}\ K_1^2+K_2^2$$ # donde $X\subset \mathbb{R}^5$ es el espacio de restricciones del problema. # ## Problema de optimización # # A continuación se define la función _objetivo_ del problema de optimización, seguido de las _constraints_ de dicho problema. # # *_Observación_*: por comodidad, en el código se consideró $C_1=A^{-1}, C_2=B^{-1}$ y $C_3=C^{1/2}$. def objective(x): a=x[0] b=x[1] c=x[2] A=x[3] B=x[4] return ((4*b+c-4*a)**2)*(1/(A**2)+1/(B**2)) def constraint1(x): a=x[0] b=x[1] c=x[2] A=x[3] B=x[4] return 14*a*(b-2*a)**4-1.5*(b**3)*(A**2)-1.5*(b+c-a)*a*b*(A**2)-1.5*a*(b**2)*(B**2)-0.75*(b+c-a)*(A**4)-0.125*b*(A**2)*(B**2) def constraint2(x): a=x[0] b=x[1] c=x[2] A=x[3] B=x[4] return 6*a*(b-2*a)**2-1.5*b*(A**2)-0.75*a*(B**2) def constraint3(x): a=x[0] b=x[1] c=x[2] A=x[3] B=x[4] return 4*(b-2*a)**3-0.25*(b+c-a)*(A**2)-3*(a**2)/((b-2*a)*C)-2*(b-2*a)*(B**2) def constraint4(x): return x[1]-2*x[0] # Se da un punto inicial $x_0$ y se definen las restricciones del problema. # + x0=[1, 8, 0.5, 3, 3] #restricciones con1={'type':'ineq', 'fun': constraint1} con2={'type':'ineq', 'fun': constraint2} con3={'type':'ineq', 'fun': constraint3} con4={'type':'ineq', 'fun': constraint4} cons=[con1, con2, con3, con4] # - # ### Primer acercamiento # # Fijamos $C_3$ suficientemente grande para resolver el problema. Debido a la libertad de las variables, resolvemos el problema $(P)$ a medida que iteramos sobre las cotas inferiores de $C_1$ y $C_2$. Por la naturaleza del problema original, podemos fijar cotas inferiores para $a, b$ y $c$. # + C=10000000 # 10^7 lista=np.linspace(1, 80, 400).tolist() val_p_min=100 sol_p_min=[10, 10, 10, 10, 10] for j in lista: b1=(1, j) b=(0.01, None) # Cotas de las variables bnds=(b, b, b, b1, b1) # No definimos tipo de solver sol=minimize(objective, x0, bounds=bnds, constraints=cons) # Nos aseguramos de que, en efecto, se satisfagan las restricciones # Guardamos la mínima solución if sol.fun<val_p_min and constraint1(sol.x)>0 and constraint2(sol.x)>0 and constraint3(sol.x)>0 and constraint4(sol.x)>0: val_p_min=sol.fun sol_p_min=sol.x print("---------------------------- Solución ----------------------------") print("val(P)=", val_p_min) print("Solución del problema: a=%s, b=%s, c=%s, C1=1/%s, C2=1/%s" %tuple(sol_p_min)) print("El valor de K1 es ", (4*sol_p_min[1]+sol_p_min[2]-4*sol_p_min[0])*(1/sol_p_min[3])) print("El valor de K2 es ", (4*sol_p_min[1]+sol_p_min[2]-4*sol_p_min[0])*(1/sol_p_min[4])) # - print(objective(sol_p_min)) print(constraint1(sol_p_min)) print(constraint2(sol_p_min)) print(constraint3(sol_p_min)) # ### Comportamiento de $val(P)$ # Al disminuir la cota inferior de $C_1$ y $C_2$ vemos que $val(P)$ tiene chance de decrecer, debido a que la función objetivo es decreciente respecto a dichos parámetros. En efecto, en la siguiente iteración, fijando como cota inferior $10^3$ podemos apreciar tal comportamiento, y a su vez, obtener la misma conclusión tanto para $K_1$ como para $K_2$. lista=np.linspace(1, 1000).tolist() for j in lista: b1=(1, j) b=(0.01, None) # Cotas de las variables bnds=(b, b, b, b1, b1) # No definimos tipo de solver sol=minimize(objective, x0, bounds=bnds, constraints=cons) # Nos aseguramos de que, en efecto, se satisfagan las restricciones if constraint1(sol.x)>0 and constraint2(sol.x)>0 and constraint3(sol.x)>0 and constraint4(sol.x)>0: print("---------------------------- Nueva iteración ----------------------------") print("val(P)=", sol.fun) print("El valor de K1 es ", (4*sol.x[1]+sol.x[2]-4*sol.x[0])*(1/sol.x[3])) print("El valor de K2 es ", (4*sol.x[1]+sol.x[2]-4*sol.x[0])*(1/sol.x[4])) # ### Segundo acercamiento # Se conjetura que $val(P)>71.22$ (en base a una cota inferior para $C_1, C_2$ de $10^{-3}$). Al disminuir la cota inferior de las variables $C_1$ y $C_2$, las mejoras obtenidas en la cota inferior del parámetro $\eta$ son del orden de $10^{-3}$, lo cual consideramos no significativo. Considerar además que el solver puede no dar buenos resultados con números de orden menor. # # Establecido este rango, vemos que es más significativo disminuir $K_2$ sin que aumente (demasiado) $K_1$, por lo cual consideramos, esencialmente, un nuevo problema de optimización $(S)$ (que depende de $C_1$) donde la función objetivo es $K_2$, con las mismas restricciones del problema $(P)$ pero con una nueva restricción que capture el rango de valores mínimos del problema inicial. En otras palabras $$(S)\ \min_{(a, b, c, C_2)\in Y}\ K_2$$ # donde $Y\subset \mathbb{R}^4$ es el espacio de restricciones del problema. # + def objective(x): a=x[0] b=x[1] c=x[2] B=x[3] return (4*b+c-4*a)/B def constraint1(x): a=x[0] b=x[1] c=x[2] B=x[3] return 14*a*(b-2*a)**4-1.5*(b**3)*(A**2)-1.5*(b+c-a)*a*b*(A**2)-1.5*a*(b**2)*(B**2)-0.75*(b+c-a)*(A**4)-0.125*b*(A**2)*(B**2) def constraint2(x): a=x[0] b=x[1] c=x[2] B=x[3] return 6*a*(b-2*a)**2-1.5*b*(A**2)-0.75*a*(B**2) def constraint3(x): a=x[0] b=x[1] c=x[2] B=x[3] return 4*(b-2*a)**3-0.25*(b+c-a)*(A**2)-3*(a**2)/((b-2*a)*C)-2*(b-2*a)*(B**2) def constraint4(x): return x[1]-2*x[0] def constraint5(x): a=x[0] b=x[1] c=x[2] B=x[3] return 7.6*A-(4*b+c-4*a) x0=[1, 8, 0.5, 3] # Añadimos la nueva restricción con1={'type':'ineq', 'fun': constraint1} con2={'type':'ineq', 'fun': constraint2} con3={'type':'ineq', 'fun': constraint3} con4={'type':'ineq', 'fun': constraint4} con5={'type':'ineq', 'fun': constraint5} cons=[con1, con2, con3, con4, con5] C=10000000 # 10^7 lista=np.linspace(1, 80, 400).tolist() list_A=np.linspace(1.5, 2, 6).tolist() val_p_min=100 sol_p_min=[10, 10, 10, 10] for A in list_A: for j in lista: b1=(1, j) b=(0.01, None) # Cotas de las variables bnds=(b, b, b, b1) # No definimos tipo de solver sol=minimize(objective, x0, bounds=bnds, constraints=cons) # Nos aseguramos de que, en efecto, se satisfagan las restricciones # Guardamos la mínima solución if sol.fun<val_p_min and constraint1(sol.x)>0 and constraint2(sol.x)>0 and constraint3(sol.x)>0 and constraint4(sol.x)>0 and constraint5(sol.x)>0: val_p_min=sol.fun sol_p_min=sol.x print("---------------------------- Solución ----------------------------") print("val(P)=", val_p_min) print("C1=1/%s", A) print("Solución del problema: a=%s, b=%s, c=%s, C2=1/%s" %tuple(sol_p_min)) print("El valor de K1 es ", (4*sol_p_min[1]+sol_p_min[2]-4*sol_p_min[0])*(1/A)) print("El valor de K2 es ", (4*sol_p_min[1]+sol_p_min[2]-4*sol_p_min[0])*(1/sol_p_min[3])) # - # Iteramos entre $A=1.5$ y $A=2$ con un paso de $0.1$, pues a partir de ahí no se obtiene una mejora en $val(S)$. # # Con lo siguiente vemos que una elección aproximada de $argmin(S)$ satisface las restricciones iniciales. sol=[0.4768, 4.2744, 0.01, 4.0622] print("argmin(P) aproximado:", sol) print("El valor de K1 es ", (4*sol[1]+sol[2]-4*sol[0])*(1/A)) print("El valor de K2 es ", (4*sol[1]+sol[2]-4*sol[0])*(1/sol[3])) print("val(P) app-val(P):", objective(sol)-objective(sol_p_min)) print("C1(argmin(P)) aproximado:", constraint1(sol)) print("C2(argmin(P)) aproximado:", constraint2(sol)) print("C3(argmin(P)) aproximado:", constraint3(sol)) print("C4(argmin(P)) aproximado:", constraint4(sol)) # Asimismo, vemos que con dicha elección no estamos muy lejos de $val(P)$ original. sol_app=[0.4768, 4.2744, 0.01, 2, 4.0622] def objP(x): a=x[0] b=x[1] c=x[2] A=x[3] B=x[4] return ((4*b+c-4*a)**2)*(1/(A**2)+1/(B**2)) print(objP(sol_app)) # ## Conclusiones # Para $C_1, C_2\geq 1/80$, seleccionamos los parámetros aproximados a cuatro decimales # $$a=0.4768,\ b=4.2744,\ c=0.01,\ C_1=1/2,\ C_2=1/4.0622.$$ # Con dicha elección la función objetivo evaluada en estos puntos es mayor a $val(P)$ y difiere en aproximadamente $1.1\cdot 10^{-4}$. A partir de ello, en nuestro problema principal (en el cual interfieren otros parámetros que no se mencionan acá) $\eta\geq 23.25$.
Carleman estimates/1 Left_BC_Carleman_Polynomial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="3yqLqibr0M_8" # <center> # <h4>Universidad Nacional de Córdoba - Diplomatura en Ciencia de Datos, Aprendizaje Automático y sus Aplicaciones</h4> # <h3> Análisis y Visualización de Datos </h3> # </center> # # + [markdown] colab_type="text" id="FATjz1ST0RRZ" # # Tipos de datos # # En esta notebook vamos a analizar los tipos de datos estadísticos de nuestro dataset, y comparalos con los tipos de datos computacionales de pandas. # + colab={"base_uri": "https://localhost:8080/", "height": 91} colab_type="code" id="yBWTBtS8Mp1D" outputId="f35828e4-596f-4386-9677-61ca34e66acc" import io import pandas import seaborn print(seaborn.__version__) # + colab={"base_uri": "https://localhost:8080/", "height": 141} colab_type="code" id="nDcbnarNOmYY" outputId="0fc8943c-4d74-4085-8767-8534f60df733" df = pandas.read_csv('~/Downloads/generated_dataset.csv') df.columns = ['timestamp', 'gender', 'age', 'zodiac_sign', 'profession', 'junior_programmer_salary', 'senior_programmer_salary'] df[:3] # + [markdown] colab_type="text" id="5o4INtyGSTxJ" # ## Tipos de variables aleatorias # # En base a la definición de variable aleatoria que vimos en el teórico, podemos hablar de que cada columna de nuestro dataset es un **variable aleatoria**, y que su valor en cada respuesta es una **realización** de dicha variable. Pero, ¿qué tipo tienen esas variables? # # Tenemos que distinguir dos conceptos con el mismo nombre y significado similar, pero que no son iguales: # - **tipo de la variable aleatoria** es el tipo teórico que tienen nuestros datos. # - **tipo de datos** es un concepto de programación que indica en qué formato se representa la información. # # Analicemos el tipo de cada variable aleatoria en base a la clasificación que vimos en el teórico: # # - `gender`: ?? # - `age`: ?? # - `zodiac_sign`: ?? # - `profession`: ?? # - `junior_programmer_salary`: ?? # - `senior_programmer_salary`: ?? # # Preguntar para discutir: # # 1. El tipo de `age`, ¿es discreto o continuo? # 2. ¿Qué valores pueden tomar `junior_programmer_salary` y `senior_programmer_salary`? ¿Qué valores toman en la encuenta? # 2. Los signos del zodiaco siguen un orden (al igual que los días y los meses del año). ¿Podríamos decir que `zodiac_sign` tiene tipo ordinal? # # # + [markdown] colab_type="text" id="12GWuJocsY5o" # ### Rango y valores posibles # # Siempre es útil saber qué valores puede tomar una variable para entender qué tipo tiene. Sin embargo, si sospechamos que una variable es continua, no tiene sentido listar todos sus valores posibles. En ese caso, nos conviene mostrar sólo el rango, o tomar una pequeña muestra. Veamos algunos ejemplos: # # + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="4V1UspMStvuq" outputId="10363e1c-3fad-4059-c90e-2251a239ef07" df.gender.unique() # Listar todos los valores posibles # + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="5O7S-oHRtX4N" outputId="9d9ca1a8-ddcb-436b-c6c6-974c24e69f74" df.age.max(), df.age.min() # Variable numérica # + colab={"base_uri": "https://localhost:8080/", "height": 213} colab_type="code" id="wZo5XaM2tbRv" outputId="c631c246-4473-4c77-c085-af7018ccf4e0" df.junior_programmer_salary.sample(n=10) # Tomamos una muestra # + colab={"base_uri": "https://localhost:8080/", "height": 71} colab_type="code" id="aHRzQL81uD8g" outputId="46bc5357-b61b-4d2a-8922-6e3735db28fc" df.profession.unique()[:10] # + [markdown] colab_type="text" id="CBDd_sJ0rXmJ" # ## Tipos de datos en programación # # Ahora pasemos al tipo de datos. Podemos saber con Pandas cómo está guardando la información de cada columna: # # + colab={"base_uri": "https://localhost:8080/", "height": 160} colab_type="code" id="D4HILmylmAO6" outputId="8522273f-d5ba-4c02-84de-5e3222588b31" df.dtypes # + [markdown] colab_type="text" id="dn9MEK0wrrJq" # En este caso, el tipo `object` hace referencia a un string, pero también podría ser una fecha, una lista, o cualquier cosa más compleja que un número. El tipo de datos no siempre se correlaciona con el tipo de la variable. Por ejemplo, si guardamos un puntaje de satisfacción del 1 al 5, aunque usemos números, esa variable es conceptualmente ordinal y no numérica.
04_tipos.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] colab_type="text" id="JCkPHee-alKM" # ##### Copyright 2019 The TensorFlow Authors. # + cellView="form" colab={} colab_type="code" id="cvrknnPsaqaM" #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] colab_type="text" id="2Zt0JEl38SWX" # # Magnitude-based weight pruning with Keras # + [markdown] colab_type="text" id="IwBSTsryazfT" # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://www.tensorflow.org/model_optimization/guide/pruning/pruning_with_keras"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> # </td> # <td> # <a target="_blank" href="https://colab.research.google.com/github/tensorflow/model-optimization/blob/master/tensorflow_model_optimization/g3doc/guide/pruning/pruning_with_keras.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> # </td> # <td> # <a target="_blank" href="https://github.com/tensorflow/model-optimization/blob/master/tensorflow_model_optimization/g3doc/guide/pruning/pruning_with_keras.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> # </td> # </table> # + [markdown] colab_type="text" id="L7IGLmaw5DjA" # ## Overview # # Welcome to the tutorial for *weight pruning*, part of the TensorFlow Model Optimization toolkit. # # #### What is weight pruning? # # Weight pruning means literally that: eliminating unnecessary values in the weight tensor. We are practically setting neural network parameters' values to zero to remove low-weight connections between the *layers* of a neural network. # # #### Why is useful? # # Tensors with several values set to zero can be considered *sparse*. This results in important benefits: # * *Compression*. Sparse tensors are amenable to compression by only keeping the non-zero values and their corresponding coordinates. # * *Speed*. Sparse tensors allow us to skip otherwise unnecessary computations involving the zero values. # # #### How does it work? # # Our Keras-based weight pruning API is designed to iteratively remove connections based on their magnitude, during training. For more details on the usage of the API, please refer to the GitHub page. # # In this tutorial, we'll walk you through an end-to-end example of using the weight pruning API on a simple MNIST model. We will show that by simply using a generic file compression algorithm (e.g. zip) the Keras model will be reduced in size, and that this size reduction persists when converted to a Tensorflow Lite format. # # Two things worth clarifying: # * The technique and API are not TensorFlow Lite specific --we just show its application on the TensorFlow Lite backend, as it covers size-sensitive use-cases. # * By itself, a sparse model will not be faster to execute. It just enables backends with such capability. In the near future, however, TensorFlow Lite will take advantage of the sparsity to speed up computations. # # To recap, in the tutorial we will: # 1. Train a MNIST model with Keras from scratch. # 2. Train a pruned MNIST with the pruning API. # 3. Compare the size of the pruned model and the non-pruned one after compression. # 4. Convert the pruned model to Tensorflow Lite format and verify that accuracy persists. # 5. Show how the pruned model works with other optimization techniques, like post-training quantization. # + [markdown] colab_type="text" id="P8qFbkru8FKu" # ## Setup # + [markdown] colab_type="text" id="q8zIQsT9mUTw" # To use the pruning API, you need to install the `tensorflow-model-optimization` and `tf-nightly` packages. # # Since you will train a few models in this tutorial, install the `tensorflow-gpu` package to speed up things. Enable the GPU with: *Runtime > Change runtime type > Hardware accelator* and make sure GPU is selected. # + colab={} colab_type="code" id="Pn836LSTNSHA" # ! pip uninstall -y tensorflow # ! pip uninstall -y tf-nightly # ! pip install -U tf-nightly-gpu # ! pip install tensorflow-model-optimization # + colab={} colab_type="code" id="1ykjgo4UNXmD" # %load_ext tensorboard import tensorboard # + colab={} colab_type="code" id="mydXeQlDNbnR" import tensorflow as tf tf.enable_eager_execution() import tempfile import zipfile import os # + [markdown] colab_type="text" id="gBYltugp-MdR" # ## Prepare the training data # + colab={} colab_type="code" id="P_MJqxz5z2dh" batch_size = 128 num_classes = 10 epochs = 10 # input image dimensions img_rows, img_cols = 28, 28 # the data, shuffled and split between train and test sets (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data() if tf.keras.backend.image_data_format() == 'channels_first': x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols) x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols) input_shape = (1, img_rows, img_cols) else: x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1) x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1) input_shape = (img_rows, img_cols, 1) x_train = x_train.astype('float32') x_test = x_test.astype('float32') x_train /= 255 x_test /= 255 print('x_train shape:', x_train.shape) print(x_train.shape[0], 'train samples') print(x_test.shape[0], 'test samples') # convert class vectors to binary class matrices y_train = tf.keras.utils.to_categorical(y_train, num_classes) y_test = tf.keras.utils.to_categorical(y_test, num_classes) # + [markdown] colab_type="text" id="OmdnYKPK--5L" # ## Train a MNIST model without pruning # + [markdown] colab_type="text" id="r1p_y3gPWeW2" # ### Build the MNIST model # + colab={} colab_type="code" id="uLg0SGdYp2Q6" l = tf.keras.layers model = tf.keras.Sequential([ l.Conv2D( 32, 5, padding='same', activation='relu', input_shape=input_shape), l.MaxPooling2D((2, 2), (2, 2), padding='same'), l.BatchNormalization(), l.Conv2D(64, 5, padding='same', activation='relu'), l.MaxPooling2D((2, 2), (2, 2), padding='same'), l.Flatten(), l.Dense(1024, activation='relu'), l.Dropout(0.4), l.Dense(num_classes, activation='softmax') ]) model.summary() # + [markdown] colab_type="text" id="5dHynoso_xXF" # ### Train the model to reach an accuracy >99% # + [markdown] colab_type="text" id="KGo0UICmA8J3" # # Load [TensorBoard](https://www.tensorflow.org/tensorboard) to monitor the training process # + colab={} colab_type="code" id="WJyS_IcQBBqg" logdir = tempfile.mkdtemp() print('Writing training logs to ' + logdir) # + colab={} colab_type="code" id="QfgjuOLj_mFu" # %tensorboard --logdir={logdir} # + colab={} colab_type="code" id="A7c-fsQpTzOr" callbacks = [tf.keras.callbacks.TensorBoard(log_dir=logdir, profile_batch=0)] model.compile( loss=tf.keras.losses.categorical_crossentropy, optimizer='adam', metrics=['accuracy']) model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, callbacks=callbacks, validation_data=(x_test, y_test)) score = model.evaluate(x_test, y_test, verbose=0) print('Test loss:', score[0]) print('Test accuracy:', score[1]) # + [markdown] colab_type="text" id="eLgMavDVCde5" # ### Save the original model for size comparison later # + colab={} colab_type="code" id="8E6U7GUIx5r9" # Backend agnostic way to save/restore models _, keras_file = tempfile.mkstemp('.h5') print('Saving model to: ', keras_file) tf.keras.models.save_model(model, keras_file, include_optimizer=False) # + [markdown] colab_type="text" id="gWiQbobKC0NZ" # ## Train a pruned MNIST # # We provide a `prune_low_magnitude()` API to train models with removed connections. The Keras-based API can be applied at the level of individual layers, or the entire model. We will show you the usage of both in the following sections. # # At a high level, the technique works by iteratively removing (i.e. zeroing out) connections between layers, given an schedule and a target sparsity. # # For example, a typical configuration will target a 75% sparsity, by pruning connections every 100 steps (aka epochs), starting from step 2,000. For more details on the possible configurations, please refer to the github documentation. # + [markdown] colab_type="text" id="FfJ6Tm3KXMFY" # ### Build a pruned model layer by layer # In this example, we show how to use the API at the level of layers, and build a pruned MNIST solver model. # # In this case, the `prune_low_magnitude(`) # receives as parameter the Keras layer whose weights we want pruned. # # This function requires a pruning params which configures the pruning algorithm during training. Please refer to our github page for detailed documentation. The parameter used here means: # # # 1. **Sparsity.** PolynomialDecay is used across the whole training process. We start at the sparsity level 50% and gradually train the model to reach 90% sparsity. X% sparsity means that X% of the weight tensor is going to be pruned away. # 2. **Schedule**. Connections are pruned starting from step 2000 to the end of training, and runs every 100 steps. The reasoning behind this is that we want to train the model without pruning for a few epochs to reach a certain accuracy, to aid convergence. Furthermore, we give the model some time to recover after each pruning step, so pruning does not happen on every step. We set the pruning frequency to 100. # # # + colab={} colab_type="code" id="Ip8qCtSlU3TQ" from tensorflow_model_optimization.sparsity import keras as sparsity # + [markdown] colab_type="text" id="tA9rnRUrebTE" # To demonstrate how to save and restore a pruned keras model, in the following example we first train the model for 10 epochs, save it to disk, and finally restore and continue training for 2 epochs. With gradual sparsity, four important parameters are begin_sparsity, final_sparsity, begin_step and end_step. The first three are straight forward. Let's calculate the end step given the number of train example, batch size, and the total epochs to train. # + colab={} colab_type="code" id="Rrs-xoB2cSSQ" import numpy as np epochs = 12 num_train_samples = x_train.shape[0] end_step = np.ceil(1.0 * num_train_samples / batch_size).astype(np.int32) * epochs print('End step: ' + str(end_step)) # + colab={} colab_type="code" id="Shz-r2RiqFca" pruning_params = { 'pruning_schedule': sparsity.PolynomialDecay(initial_sparsity=0.50, final_sparsity=0.90, begin_step=2000, end_step=end_step, frequency=100) } pruned_model = tf.keras.Sequential([ sparsity.prune_low_magnitude( l.Conv2D(32, 5, padding='same', activation='relu'), input_shape=input_shape, **pruning_params), l.MaxPooling2D((2, 2), (2, 2), padding='same'), l.BatchNormalization(), sparsity.prune_low_magnitude( l.Conv2D(64, 5, padding='same', activation='relu'), **pruning_params), l.MaxPooling2D((2, 2), (2, 2), padding='same'), l.Flatten(), sparsity.prune_low_magnitude(l.Dense(1024, activation='relu'), **pruning_params), l.Dropout(0.4), sparsity.prune_low_magnitude(l.Dense(num_classes, activation='softmax'), **pruning_params) ]) pruned_model.summary() # + [markdown] colab_type="text" id="YczppQ6vEPJg" # Load Tensorboard # + colab={} colab_type="code" id="eIO8UpZyEYkp" logdir = tempfile.mkdtemp() print('Writing training logs to ' + logdir) # + colab={} colab_type="code" id="KKb8XpDkA8TN" # %tensorboard --logdir={logdir} # + [markdown] colab_type="text" id="z2166laKE_N6" # ### Train the model # # Start pruning from step 2000 when accuracy >98% # + colab={} colab_type="code" id="GGoOTwQRzEP4" pruned_model.compile( loss=tf.keras.losses.categorical_crossentropy, optimizer='adam', metrics=['accuracy']) # Add a pruning step callback to peg the pruning step to the optimizer's # step. Also add a callback to add pruning summaries to tensorboard callbacks = [ sparsity.UpdatePruningStep(), sparsity.PruningSummaries(log_dir=logdir, profile_batch=0) ] pruned_model.fit(x_train, y_train, batch_size=batch_size, epochs=10, verbose=1, callbacks=callbacks, validation_data=(x_test, y_test)) score = pruned_model.evaluate(x_test, y_test, verbose=0) print('Test loss:', score[0]) print('Test accuracy:', score[1]) # + [markdown] colab_type="text" id="8Nzrm5pN1viP" # ### Save and restore the pruned model # # Continue training for two epochs: # + colab={} colab_type="code" id="Rd8G7srV2dcI" _, checkpoint_file = tempfile.mkstemp('.h5') print('Saving pruned model to: ', checkpoint_file) # saved_model() sets include_optimizer to True by default. Spelling it out here # to highlight. tf.keras.models.save_model(pruned_model, checkpoint_file, include_optimizer=True) with sparsity.prune_scope(): restored_model = tf.keras.models.load_model(checkpoint_file) restored_model.fit(x_train, y_train, batch_size=batch_size, epochs=2, verbose=1, callbacks=callbacks, validation_data=(x_test, y_test)) score = restored_model.evaluate(x_test, y_test, verbose=0) print('Test loss:', score[0]) print('Test accuracy:', score[1]) # + [markdown] colab_type="text" id="1vV7pZrW5TQh" # In the example above, a few things to note are: # # # * When saving the model, include_optimizer must be set to True. We need to preserve the state of the optimizer across training sessions for pruning to work properly. # * When loading the pruned model, you need the prune_scope() for deseriazliation. # # # + [markdown] colab_type="text" id="tMTFhyc0vAA3" # ### Strip the pruning wrappers from the pruned model before export for serving # Before exporting a serving model, you'd need to call the `strip_pruning` API to strip the pruning wrappers from the model, as it's only needed for training. # + colab={} colab_type="code" id="jyCjjpUjvImz" final_model = sparsity.strip_pruning(pruned_model) final_model.summary() # + colab={} colab_type="code" id="B63tViKp_qLK" _, pruned_keras_file = tempfile.mkstemp('.h5') print('Saving pruned model to: ', pruned_keras_file) # No need to save the optimizer with the graph for serving. tf.keras.models.save_model(final_model, pruned_keras_file, include_optimizer=False) # + [markdown] colab_type="text" id="GhXHuBAGOBvY" # ### Compare the size of the unpruned vs. pruned model after compression # + colab={} colab_type="code" id="Hk4DoZTIy2uU" _, zip1 = tempfile.mkstemp('.zip') with zipfile.ZipFile(zip1, 'w', compression=zipfile.ZIP_DEFLATED) as f: f.write(keras_file) print("Size of the unpruned model before compression: %.2f Mb" % (os.path.getsize(keras_file) / float(2**20))) print("Size of the unpruned model after compression: %.2f Mb" % (os.path.getsize(zip1) / float(2**20))) _, zip2 = tempfile.mkstemp('.zip') with zipfile.ZipFile(zip2, 'w', compression=zipfile.ZIP_DEFLATED) as f: f.write(pruned_keras_file) print("Size of the pruned model before compression: %.2f Mb" % (os.path.getsize(pruned_keras_file) / float(2**20))) print("Size of the pruned model after compression: %.2f Mb" % (os.path.getsize(zip2) / float(2**20))) # + [markdown] colab_type="text" id="dayb_w_GqWs_" # ### Prune a whole model # # The `prune_low_magnitude` function can also be applied to the entire Keras model. # # In this case, the algorithm will be applied to all layers that are ameanable to weight pruning (that the API knows about). Layers that the API knows are not ameanable to weight pruning will be ignored, and unknown layers to the API will cause an error. # # *If your model has layers that the API does not know how to prune their weights, but are perfectly fine to leave "un-pruned", then just apply the API in a per-layer basis.* # # Regarding pruning configuration, the same settings apply to all prunable layers in the model. # # Also noteworthy is that pruning doesn't preserve the optimizer associated with the original model. As a result, it is necessary to re-compile the pruned model with a new optimizer. # + [markdown] colab_type="text" id="I-W7Sj8fZjeb" # Before we move forward with the example, lets address the common use case where you may already have a serialized pre-trained Keras model, which you would like to apply weight pruning on. We will take the original MNIST model trained previously to show how this works. In this case, you start by loading the model into memory like this: # + colab={} colab_type="code" id="qJm1SJxfqy2e" # Load the serialized model loaded_model = tf.keras.models.load_model(keras_file) # + [markdown] colab_type="text" id="etYrWnrSMpB5" # Then you can prune the model loaded and compile the pruned model for training. In this case training will restart from step 0. Given the model we loadded already reached a satisfactory accuracy, we can start pruning immediately. As a result, we set the begin_step to 0 here, and only train for another four epochs. # + colab={} colab_type="code" id="CabnOldzrSN2" epochs = 4 end_step = np.ceil(1.0 * num_train_samples / batch_size).astype(np.int32) * epochs print(end_step) new_pruning_params = { 'pruning_schedule': sparsity.PolynomialDecay(initial_sparsity=0.50, final_sparsity=0.90, begin_step=0, end_step=end_step, frequency=100) } new_pruned_model = sparsity.prune_low_magnitude(model, **new_pruning_params) new_pruned_model.summary() new_pruned_model.compile( loss=tf.keras.losses.categorical_crossentropy, optimizer='adam', metrics=['accuracy']) # + [markdown] colab_type="text" id="9qCipfCnaY7g" # Load tensorboard # + colab={} colab_type="code" id="VPBlcTXWB9tx" logdir = tempfile.mkdtemp() print('Writing training logs to ' + logdir) # + colab={} colab_type="code" id="X43ix4NZCDNS" # %tensorboard --logdir={logdir} # + [markdown] colab_type="text" id="r2hLPO7KNKq_" # ### Train the model for another four epochs # + colab={} colab_type="code" id="36hymxokrnbw" # Add a pruning step callback to peg the pruning step to the optimizer's # step. Also add a callback to add pruning summaries to tensorboard callbacks = [ sparsity.UpdatePruningStep(), sparsity.PruningSummaries(log_dir=logdir, profile_batch=0) ] new_pruned_model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, callbacks=callbacks, validation_data=(x_test, y_test)) score = new_pruned_model.evaluate(x_test, y_test, verbose=0) print('Test loss:', score[0]) print('Test accuracy:', score[1]) # + [markdown] colab_type="text" id="Z0yphbuRarfT" # ### Export the pruned model for serving # + colab={} colab_type="code" id="anFHUpCXrxMe" final_model = sparsity.strip_pruning(pruned_model) final_model.summary() # + colab={} colab_type="code" id="4CmEtxHEso7g" _, new_pruned_keras_file = tempfile.mkstemp('.h5') print('Saving pruned model to: ', new_pruned_keras_file) tf.keras.models.save_model(final_model, new_pruned_keras_file, include_optimizer=False) # + [markdown] colab_type="text" id="YT8YqXAza3Tt" # The model size after compression is the same as the one pruned layer-by-layer # + colab={} colab_type="code" id="AtKANC0hs2RR" _, zip3 = tempfile.mkstemp('.zip') with zipfile.ZipFile(zip3, 'w', compression=zipfile.ZIP_DEFLATED) as f: f.write(new_pruned_keras_file) print("Size of the pruned model before compression: %.2f Mb" % (os.path.getsize(new_pruned_keras_file) / float(2**20))) print("Size of the pruned model after compression: %.2f Mb" % (os.path.getsize(zip3) / float(2**20))) # + [markdown] colab_type="text" id="zXrLGUPfIwvV" # ## Convert to TensorFlow Lite # # Finally, you can convert the pruned model to a format that's runnable on your targeting backend. Tensorflow Lite is an example format you can use to deploy to mobile devices. To convert to a Tensorflow Lite graph, you need to use the TFLiteConverter as below: # + [markdown] colab_type="text" id="1f9Eb2K0bcJG" # ### Convert the model with TFLiteConverter # + colab={} colab_type="code" id="Ctqfiix-H-x7" tflite_model_file = '/tmp/sparse_mnist.tflite' converter = tf.lite.TFLiteConverter.from_keras_model_file(pruned_keras_file) tflite_model = converter.convert() with open(tflite_model_file, 'wb') as f: f.write(tflite_model) # + [markdown] colab_type="text" id="fdIiNnrPANcw" # ### Size of the TensorFlow Lite model after compression # + colab={} colab_type="code" id="iYMSXAU1AUYI" _, zip_tflite = tempfile.mkstemp('.zip') with zipfile.ZipFile(zip_tflite, 'w', compression=zipfile.ZIP_DEFLATED) as f: f.write(tflite_model_file) print("Size of the tflite model before compression: %.2f Mb" % (os.path.getsize(tflite_model_file) / float(2**20))) print("Size of the tflite model after compression: %.2f Mb" % (os.path.getsize(zip_tflite) / float(2**20))) # + [markdown] colab_type="text" id="vBqUX1qopV1k" # ### Evaluate the accuracy of the TensorFlow Lite model # + colab={} colab_type="code" id="F5AY-TuivmbP" import numpy as np interpreter = tf.lite.Interpreter(model_path=str(tflite_model_file)) interpreter.allocate_tensors() input_index = interpreter.get_input_details()[0]["index"] output_index = interpreter.get_output_details()[0]["index"] def eval_model(interpreter, x_test, y_test): total_seen = 0 num_correct = 0 for img, label in zip(x_test, y_test): inp = img.reshape((1, 28, 28, 1)) total_seen += 1 interpreter.set_tensor(input_index, inp) interpreter.invoke() predictions = interpreter.get_tensor(output_index) if np.argmax(predictions) == np.argmax(label): num_correct += 1 if total_seen % 1000 == 0: print("Accuracy after %i images: %f" % (total_seen, float(num_correct) / float(total_seen))) return float(num_correct) / float(total_seen) print(eval_model(interpreter, x_test, y_test)) # + [markdown] colab_type="text" id="Zalu5Ng7D7xi" # ### Post-training quantize the TensorFlow Lite model # # You can combine pruning with other optimization techniques like post training quantization. As a recap, post-training quantization converts weights to 8 bit precision as part of model conversion from keras model to TFLite's flat buffer, resulting in a 4x reduction in the model size. # # In the following example, we take the pruned keras model, convert it with post-training quantization, check the size reduction and validate its accuracy. # + colab={} colab_type="code" id="wbTNqf1KER0z" converter = tf.lite.TFLiteConverter.from_keras_model_file(pruned_keras_file) converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE] tflite_quant_model = converter.convert() tflite_quant_model_file = '/tmp/sparse_mnist_quant.tflite' with open(tflite_quant_model_file, 'wb') as f: f.write(tflite_quant_model) # + colab={} colab_type="code" id="s6a8bH_-EXeR" _, zip_tflite = tempfile.mkstemp('.zip') with zipfile.ZipFile(zip_tflite, 'w', compression=zipfile.ZIP_DEFLATED) as f: f.write(tflite_quant_model_file) print("Size of the tflite model before compression: %.2f Mb" % (os.path.getsize(tflite_quant_model_file) / float(2**20))) print("Size of the tflite model after compression: %.2f Mb" % (os.path.getsize(zip_tflite) / float(2**20))) # + [markdown] colab_type="text" id="lMuuxZs_QxMt" # The size of the quantized model is roughly 1/4 of the orignial one. # + colab={} colab_type="code" id="Dkv9mauCEami" interpreter = tf.lite.Interpreter(model_path=str(tflite_quant_model_file)) interpreter.allocate_tensors() input_index = interpreter.get_input_details()[0]["index"] output_index = interpreter.get_output_details()[0]["index"] print(eval_model(interpreter, x_test, y_test)) # + [markdown] colab_type="text" id="ygzK3KZcoZ-w" # ## Conclusion # # In this tutorial, we showed you how to create *sparse models* with the TensorFlow model optimization toolkit weight pruning API. Right now, this allows you to create models that take significant less space on disk. The resulting model can also be more efficiently implemented to avoid computation; in the future TensorFlow Lite will provide such capabilities. # # More specifically, we walked you through an end-to-end example of training a simple MNIST model that used the weight pruning API. We showed you how to convert it to the Tensorflow Lite format for mobile deployment, and demonstrated how with simple file compression the model size was reduced 5x. # # We encourage you to try this new capability on your Keras models, which can be particularly important for deployment in resource-constraint environments. # #
tensorflow_model_optimization/g3doc/guide/pruning/pruning_with_keras.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- class Student: def __init__(self,name,rollNo): self.name = name self.rollNo = rollNo s1 = Student("Pramit",13) s1.__dict__ s2 = Student("Shirsendu", 14) s2.__dict__ s1,s2 s3 = Student("Pritam", 14) s3.__dict__
02.Data-Structures-and-Algorithms/04.OOPS-1/03.Init-method.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="S10Vu9D_HTxK" colab_type="text" # # Perdiction Pipeline | Ensemble Pipeline # # In this notebook, we will ensemble our models to make a prediction pipeline. # The keys steps would be as following : # # 1. Text Preprocessing for inference. # 2. Load classifiers iteratively in a list. # 3. Load test-data & pre-process. # 4. Set classifier threshold and run it through Ensemble # # # + id="tWSz_ii0ZUiL" colab_type="code" outputId="43fe2961-cb2f-4696-a47d-bc355c564d9a" executionInfo={"status": "ok", "timestamp": 1578936597412, "user_tz": -330, "elapsed": 4974, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCNCK3WDEC63aFcnBVJrUB3M3cs4J2bImVRkNad8A=s64", "userId": "15805199744168269697"}} colab={"base_uri": "https://localhost:8080/", "height": 306} # First let's check what has Google given us ! Thank you Google for the GPU # !nvidia-smi # + id="Dj2NshHKZ5gX" colab_type="code" outputId="133f579e-5097-45d3-ea21-3555818a8bde" executionInfo={"status": "ok", "timestamp": 1578936631470, "user_tz": -330, "elapsed": 34486, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCNCK3WDEC63aFcnBVJrUB3M3cs4J2bImVRkNad8A=s64", "userId": "15805199744168269697"}} colab={"base_uri": "https://localhost:8080/", "height": 122} # Let's mount our G-Drive. Hey !! Because for GPU you now give your data to Google from google.colab import drive drive.mount('/content/drive') # + id="nK7yth0BZ7T-" colab_type="code" outputId="c545adf6-062c-4835-e9db-a48b34fadba4" executionInfo={"status": "ok", "timestamp": 1578936704782, "user_tz": -330, "elapsed": 74590, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCNCK3WDEC63aFcnBVJrUB3M3cs4J2bImVRkNad8A=s64", "userId": "15805199744168269697"}} colab={"base_uri": "https://localhost:8080/", "height": 1000} # Install necessary packages and restart the environment # !pip install tiny-tokenizer # !pip install flair # + id="TaDyGj3-Z9G_" colab_type="code" outputId="4ec43f8f-12aa-400a-b71a-a33231f952b7" executionInfo={"status": "ok", "timestamp": 1578936728004, "user_tz": -330, "elapsed": 6877, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCNCK3WDEC63aFcnBVJrUB3M3cs4J2bImVRkNad8A=s64", "userId": "15805199744168269697"}} colab={"base_uri": "https://localhost:8080/", "height": 63} # Let's import our packages ! import pandas as pd from tqdm import tqdm import html import re from bs4 import BeautifulSoup import re from sklearn.model_selection import train_test_split # import flair import pickle from torch.optim.adam import Adam # The sentence objects holds a sentence that we may want to embed or tag from flair.data import Sentence from flair.data import Corpus from flair.datasets import ClassificationCorpus from flair.embeddings import WordEmbeddings, FlairEmbeddings, DocumentRNNEmbeddings from flair.models import TextClassifier from flair.trainers import ModelTrainer from flair.samplers import ImbalancedClassificationDatasetSampler # + [markdown] id="2TiTzjVxGGva" colab_type="text" # # 1. Text Pre-processing # # ### Note - # We need to be careful to apply the same set of text transformations during inference as we did during training. Any changes will directly affect the model to produce junk / more junky results # + id="WJDMNIrNjRUC" colab_type="code" colab={} clean = re.compile('<.*?>') def preprocess_text(text) : try : # soup = BeautifulSoup(text, "html.parser") # text = soup.get_text() text= re.sub(clean, '', text) text = html.unescape(text) except : print("Error in HTML Processing ...") print(text) text = text try : # remove extra newlines (often might be present in really noisy text) text = text.translate(text.maketrans("\n\t\r", " ")) except : print("Error in removing extra lines ...") print(text) text = text try : # remove extra whitespace text = re.sub(' +', ' ', text) text = text.strip() except : print("Error in extra whitespace removal ...") print(text) text = text return text # + [markdown] id="_P-qJxsRIalP" colab_type="text" # # 2. Load Classifiers # # We iterate through the saved classifiers and load them. We append them in a list can be easily iterated during evaluation/prediction. # + id="IQoJposoZ_ba" colab_type="code" outputId="b9f7eb56-5f7a-45ba-bc5b-8516f89088e0" executionInfo={"status": "ok", "timestamp": 1578937117041, "user_tz": -330, "elapsed": 379813, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCNCK3WDEC63aFcnBVJrUB3M3cs4J2bImVRkNad8A=s64", "userId": "15805199744168269697"}} colab={"base_uri": "https://localhost:8080/", "height": 272} classifiers = [] for grp_id in tqdm(range(1,15)) : path = '/content/drive/My Drive/ICDMAI_Tutorial/notebook/training_data/normalised/group/' + str(grp_id) + '/best-model.pt' classifier = TextClassifier.load(path) classifiers.append(classifier) # + [markdown] id="adum5OoFKFrj" colab_type="text" # # 3. Load Test Data & Preprocess # # We load the test-data from the previously created split and re-transform it into a Dataframe with corresponding labels. # # Finally we save the DataFrame with a placeholder for the predicted labels. # + id="j3WJPPPEam39" colab_type="code" colab={} test_data = '/content/drive/My Drive/ICDMAI_Tutorial/notebook/training_data/70_30_split/test.txt' with open(test_data,'r',encoding='utf-8') as f : data = f.readlines() # prefix from the Training Data Format prefix = '__label__' # + id="e_5OHx2bfNR4" colab_type="code" outputId="4fee29ee-49a5-448b-ac50-b5be7a713f69" executionInfo={"status": "ok", "timestamp": 1577418263037, "user_tz": -330, "elapsed": 6068, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCNCK3WDEC63aFcnBVJrUB3M3cs4J2bImVRkNad8A=s64", "userId": "15805199744168269697"}} colab={"base_uri": "https://localhost:8080/", "height": 34} test_text = [] test_label = [] for doc in tqdm(data) : splits = doc.split() labels = [] idx = 0 for word in splits : if prefix in word : labels.append(word[9:].strip()) idx += len(word) else : text = doc[idx:].strip() break test_text.append(text) test_label.append(labels) # break test_df = pd.DataFrame(list(zip(test_text,test_label)),columns = ['text','original_labels']) test_df['predicted_labels'] = None # + id="0qOd1JBJfUSd" colab_type="code" colab={} test_df.to_pickle('/content/drive/My Drive/ICDMAI_Tutorial/notebook/training_data/70_30_split/test.pkl') test_df.head() # + [markdown] id="LjbU5xK3PjX4" colab_type="text" # # 4. Run the Ensemble Pipeline # # Once we load the test-data, we can iteratively run it via all the classifiers and store the predictions for evaluating the performance. # # ### Coding Exercise # 1. Multi-thread/process the prediction pipeline. # 2. Find a redundant line of code in the cell below # 3. Optimize it # + id="rD7TvcWZ_GSJ" colab_type="code" colab={} def predict_ensemble(sentence,threshold=0.1) : labels = [] ## Iterate through each classifier or prediction for classifier in classifiers: classifier.multi_label_threshold = threshold classifier.predict(sentence) for label in sentence.labels : ## Append labels from all classifiers labels.append(label.value) return labels # + id="medRbiboaPgi" colab_type="code" colab={} # Read the cleaned Test Data for running with different classifiers & threshold with open('/content/drive/My Drive/ICDMAI_Tutorial/notebook/training_data/70_30_split/test.pkl','rb') as f : test_df = pickle.load(f) # + id="shBmO5bUilhV" colab_type="code" outputId="e0e7e773-9fd0-486d-b6b2-42cebfe6acd1" executionInfo={"status": "ok", "timestamp": 1577824105653, "user_tz": -330, "elapsed": 17534134, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCNCK3WDEC63aFcnBVJrUB3M3cs4J2bImVRkNad8A=s64", "userId": "15805199744168269697"}} colab={"base_uri": "https://localhost:8080/", "height": 34} ## Setting a global threshold for all classifiers threshold = 0.9 ## Iterating through each Test Example for idx in tqdm(test_df.index) : text = preprocess_text(test_df.loc[idx,'text']) # create example sentence sentence = Sentence(text) labels = [] ## Iterate through each classifier or prediction for classifier in classifiers: classifier.multi_label_threshold = threshold classifier.predict(sentence) for label in sentence.labels : ## Append labels from all classifiers labels.append(label.value) test_df.at[idx,'predicted_labels'] = labels # + id="T7GDcELekzEJ" colab_type="code" colab={} # Save the predictions test_df.to_pickle('/content/drive/My Drive/ICDMAI_Tutorial/notebook/training_data/70_30_split/normalised_test_predcted_th_0'+ str(int(threshold*10)) +'.pkl') test_df.head() # + [markdown] id="pzw1zSe-Fm-G" colab_type="text" # # Let's Infer ! # # Let's manually create some text/questions and ask the model about it. # + id="I-jgM2QLtgWy" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="777e91db-e4fa-4e7f-9468-88e3f6f5d73f" executionInfo={"status": "ok", "timestamp": 1578938687702, "user_tz": -330, "elapsed": 1596, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCNCK3WDEC63aFcnBVJrUB3M3cs4J2bImVRkNad8A=s64", "userId": "15805199744168269697"}} text1 = preprocess_text("How to handle memory locking ?") # create example sentence & tokenize sentence1 = Sentence(text1) # predict labels = predict_ensemble(sentence1,threshold=0.8) print(labels) # + id="BD3c-9ij_fow" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="9e5ff8fe-fc83-4f0a-f07e-00255968655e" executionInfo={"status": "ok", "timestamp": 1578938681991, "user_tz": -330, "elapsed": 1626, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCNCK3WDEC63aFcnBVJrUB3M3cs4J2bImVRkNad8A=s64", "userId": "15805199744168269697"}} text2 = preprocess_text("How to handle memory locking in java ?") # create example sentence & tokenize sentence2 = Sentence(text2) # predict labels = predict_ensemble(sentence2,threshold=0.8) print(labels) # + id="TA9km3X6_406" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="616376bb-d0c3-4dc0-938c-ecd5c7c35e6f" executionInfo={"status": "ok", "timestamp": 1578938692789, "user_tz": -330, "elapsed": 745, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCNCK3WDEC63aFcnBVJrUB3M3cs4J2bImVRkNad8A=s64", "userId": "15805199744168269697"}} text3 = preprocess_text("How to handle memory locking in java python ?") # create example sentence & tokenize sentence3 = Sentence(text3) # predict labels = predict_ensemble(sentence3,threshold=0.8) print(labels) # + id="Se5TGVXBAOnH" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="26192290-0c01-4506-a25b-577835d63e18" executionInfo={"status": "ok", "timestamp": 1578938709270, "user_tz": -330, "elapsed": 1595, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCNCK3WDEC63aFcnBVJrUB3M3cs4J2bImVRkNad8A=s64", "userId": "15805199744168269697"}} text4 = preprocess_text("This post is not about java") # create example sentence & tokenize sentence4 = Sentence(text4) # predict labels = predict_ensemble(sentence4,threshold=0.8) print(labels)
6_inference_pipeline.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # cell_metadata_filter: tags,-all # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 📈 Snorkel Intro Tutorial: Data Augmentation # In this tutorial, we will walk through the process of using *transformation functions* (TFs) to perform data augmentation. # Like the labeling tutorial, our goal is to train a classifier to YouTube comments as `SPAM` or `HAM` (not spam). # In the [previous tutorial](https://github.com/snorkel-team/snorkel-tutorials/blob/master/spam/01_spam_tutorial.ipynb), # we demonstrated how to label training sets programmatically with Snorkel. # In this tutorial, we'll assume that step has already been done, and start with labeled training data, # which we'll aim to augment using transformation functions. # # + [markdown] tags=["md-exclude"] # * For more details on the task, check out the [labeling tutorial](https://github.com/snorkel-team/snorkel-tutorials/blob/master/spam/01_spam_tutorial.ipynb) # * For an overview of Snorkel, visit [snorkel.org](https://snorkel.org) # * You can also check out the [Snorkel API documentation](https://snorkel.readthedocs.io/) # # - # Data augmentation is a popular technique for increasing the size of labeled training sets by applying class-preserving transformations to create copies of labeled data points. # In the image domain, it is a crucial factor in almost every state-of-the-art result today and is quickly gaining # popularity in text-based applications. # Snorkel models the data augmentation process by applying user-defined *transformation functions* (TFs) in sequence. # You can learn more about data augmentation in # [this blog post about our NeurIPS 2017 work on automatically learned data augmentation](https://snorkel.org/tanda/). # # The tutorial is divided into four parts: # 1. **Loading Data**: We load a [YouTube comments dataset](http://www.dt.fee.unicamp.br/~tiago//youtubespamcollection/). # 2. **Writing Transformation Functions**: We write Transformation Functions (TFs) that can be applied to training data points to generate new training data points. # 3. **Applying Transformation Functions to Augment Our Dataset**: We apply a sequence of TFs to each training data point, using a random policy, to generate an augmented training set. # 4. **Training a Model**: We use the augmented training set to train an LSTM model for classifying new comments as `SPAM` or `HAM`. # + [markdown] tags=["md-exclude"] # This next cell takes care of some notebook-specific housekeeping. # You can ignore it. # + tags=["md-exclude"] import os import random import numpy as np # Make sure we're running from the spam/ directory if os.path.basename(os.getcwd()) == "snorkel-tutorials": os.chdir("spam") # Turn off TensorFlow logging messages os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" # For reproducibility seed = 0 os.environ["PYTHONHASHSEED"] = str(seed) np.random.seed(0) random.seed(0) # + [markdown] tags=["md-exclude"] # If you want to display all comment text untruncated, change `DISPLAY_ALL_TEXT` to `True` below. # + tags=["md-exclude"] import pandas as pd DISPLAY_ALL_TEXT = False pd.set_option("display.max_colwidth", 0 if DISPLAY_ALL_TEXT else 50) # + [markdown] tags=["md-exclude"] # This next cell makes sure a spaCy English model is downloaded. # If this is your first time downloading this model, restart the kernel after executing the next cell. # + tags=["md-exclude"] # Download the spaCy english model # ! python -m spacy download en_core_web_sm # - # ## 1. Loading Data # We load the Kaggle dataset and create Pandas DataFrame objects for each of the sets described above. # The two main columns in the DataFrames are: # * **`text`**: Raw text content of the comment # * **`label`**: Whether the comment is `SPAM` (1) or `HAM` (0). # # For more details, check out the [labeling tutorial](https://github.com/snorkel-team/snorkel-tutorials/blob/master/spam/01_spam_tutorial.ipynb). # + from utils import load_spam_dataset df_train, _, df_valid, df_test = load_spam_dataset(load_train_labels=True) # We pull out the label vectors for ease of use later Y_valid = df_valid["label"].values Y_train = df_train["label"].values Y_test = df_test["label"].values # - df_train.head() # ## 2. Writing Transformation Functions (TFs) # # Transformation functions are functions that can be applied to a training data point to create another valid training data point of the same class. # For example, for image classification problems, it is common to rotate or crop images in the training data to create new training inputs. # Transformation functions should be atomic e.g. a small rotation of an image, or changing a single word in a sentence. # We then compose multiple transformation functions when applying them to training data points. # # Common ways to augment text includes replacing words with their synonyms, or replacing names entities with other entities. # More info can be found # [here](https://towardsdatascience.com/data-augmentation-in-nlp-2801a34dfc28) or # [here](https://towardsdatascience.com/these-are-the-easiest-data-augmentation-techniques-in-natural-language-processing-you-can-think-of-88e393fd610). # Our basic modeling assumption is that applying these operations to a comment generally shouldn't change whether it is `SPAM` or not. # # Transformation functions in Snorkel are created with the # [`transformation_function` decorator](https://snorkel.readthedocs.io/en/master/packages/_autosummary/augmentation/snorkel.augmentation.transformation_function.html#snorkel.augmentation.transformation_function), # which wraps a function that takes in a single data point and returns a transformed version of the data point. # If no transformation is possible, a TF can return `None` or the original data point. # If all the TFs applied to a data point return `None`, the data point won't be included in # the augmented dataset when we apply our TFs below. # # Just like the `labeling_function` decorator, the `transformation_function` decorator # accepts `pre` argument for `Preprocessor` objects. # Here, we'll use a # [`SpacyPreprocessor`](https://snorkel.readthedocs.io/en/master/packages/_autosummary/preprocess/snorkel.preprocess.nlp.SpacyPreprocessor.html#snorkel.preprocess.nlp.SpacyPreprocessor). # + from snorkel.preprocess.nlp import SpacyPreprocessor spacy = SpacyPreprocessor(text_field="text", doc_field="doc", memoize=True) # + import names from snorkel.augmentation import transformation_function # Pregenerate some random person names to replace existing ones with # for the transformation strategies below replacement_names = [names.get_full_name() for _ in range(50)] # Replace a random named entity with a different entity of the same type. @transformation_function(pre=[spacy]) def change_person(x): person_names = [ent.text for ent in x.doc.ents if ent.label_ == "PERSON"] # If there is at least one person name, replace a random one. Else return None. if person_names: name_to_replace = np.random.choice(person_names) replacement_name = np.random.choice(replacement_names) x.text = x.text.replace(name_to_replace, replacement_name) return x # Swap two adjectives at random. @transformation_function(pre=[spacy]) def swap_adjectives(x): adjective_idxs = [i for i, token in enumerate(x.doc) if token.pos_ == "ADJ"] # Check that there are at least two adjectives to swap. if len(adjective_idxs) >= 2: idx1, idx2 = sorted(np.random.choice(adjective_idxs, 2, replace=False)) # Swap tokens in positions idx1 and idx2. x.text = " ".join( [ x.doc[:idx1].text, x.doc[idx2].text, x.doc[1 + idx1 : idx2].text, x.doc[idx1].text, x.doc[1 + idx2 :].text, ] ) return x # - # We add some transformation functions that use `wordnet` from [NLTK](https://www.nltk.org/) to replace different parts of speech with their synonyms. # + tags=["md-exclude-output"] import nltk from nltk.corpus import wordnet as wn nltk.download("wordnet") def get_synonym(word, pos=None): """Get synonym for word given its part-of-speech (pos).""" synsets = wn.synsets(word, pos=pos) # Return None if wordnet has no synsets (synonym sets) for this word and pos. if synsets: words = [lemma.name() for lemma in synsets[0].lemmas()] if words[0].lower() != word.lower(): # Skip if synonym is same as word. # Multi word synonyms in wordnet use '_' as a separator e.g. reckon_with. Replace it with space. return words[0].replace("_", " ") def replace_token(spacy_doc, idx, replacement): """Replace token in position idx with replacement.""" return " ".join([spacy_doc[:idx].text, replacement, spacy_doc[1 + idx :].text]) @transformation_function(pre=[spacy]) def replace_verb_with_synonym(x): # Get indices of verb tokens in sentence. verb_idxs = [i for i, token in enumerate(x.doc) if token.pos_ == "VERB"] if verb_idxs: # Pick random verb idx to replace. idx = np.random.choice(verb_idxs) synonym = get_synonym(x.doc[idx].text, pos="v") # If there's a valid verb synonym, replace it. Otherwise, return None. if synonym: x.text = replace_token(x.doc, idx, synonym) return x @transformation_function(pre=[spacy]) def replace_noun_with_synonym(x): # Get indices of noun tokens in sentence. noun_idxs = [i for i, token in enumerate(x.doc) if token.pos_ == "NOUN"] if noun_idxs: # Pick random noun idx to replace. idx = np.random.choice(noun_idxs) synonym = get_synonym(x.doc[idx].text, pos="n") # If there's a valid noun synonym, replace it. Otherwise, return None. if synonym: x.text = replace_token(x.doc, idx, synonym) return x @transformation_function(pre=[spacy]) def replace_adjective_with_synonym(x): # Get indices of adjective tokens in sentence. adjective_idxs = [i for i, token in enumerate(x.doc) if token.pos_ == "ADJ"] if adjective_idxs: # Pick random adjective idx to replace. idx = np.random.choice(adjective_idxs) synonym = get_synonym(x.doc[idx].text, pos="a") # If there's a valid adjective synonym, replace it. Otherwise, return None. if synonym: x.text = replace_token(x.doc, idx, synonym) return x # - tfs = [ change_person, swap_adjectives, replace_verb_with_synonym, replace_noun_with_synonym, replace_adjective_with_synonym, ] # Let's check out a few examples of transformed data points to see what our TFs are doing. # + from utils import preview_tfs preview_tfs(df_train, tfs) # - # We notice a couple of things about the TFs. # # * Sometimes they make trivial changes (`"website"` to `"web site"` for replace_noun_with_synonym). # This can still be helpful for training our model, because it teaches the model to be invariant to such small changes. # * Sometimes they introduce incorrect grammar to the sentence (e.g. `swap_adjectives` swapping `"young"` and `"more"` above). # # The TFs are expected to be heuristic strategies that indeed preserve the class most of the time, but # [don't need to be perfect](https://arxiv.org/pdf/1901.11196.pdf). # This is especially true when using automated # [data augmentation techniques](https://snorkel.org/tanda/) # which can learn to avoid particularly corrupted data points. # As we'll see below, Snorkel is compatible with such learned augmentation policies. # ## 3. Applying Transformation Functions # We'll first define a `Policy` to determine what sequence of TFs to apply to each data point. # We'll start with a [`RandomPolicy`](https://snorkel.readthedocs.io/en/master/packages/_autosummary/augmentation/snorkel.augmentation.RandomPolicy.html) # that samples `sequence_length=2` TFs to apply uniformly at random per data point. # The `n_per_original` argument determines how many augmented data points to generate per original data point. # + from snorkel.augmentation import RandomPolicy random_policy = RandomPolicy( len(tfs), sequence_length=2, n_per_original=2, keep_original=True ) # - # In some cases, we can do better than uniform random sampling. # We might have domain knowledge that some TFs should be applied more frequently than others, # or have trained an [automated data augmentation model](https://snorkel.org/tanda/) # that learned a sampling distribution for the TFs. # Snorkel supports this use case with a # [`MeanFieldPolicy`](https://snorkel.readthedocs.io/en/master/packages/_autosummary/augmentation/snorkel.augmentation.MeanFieldPolicy.html), # which allows you to specify a sampling distribution for the TFs. # We give higher probabilities to the `replace_[X]_with_synonym` TFs, since those provide more information to the model. # + from snorkel.augmentation import MeanFieldPolicy mean_field_policy = MeanFieldPolicy( len(tfs), sequence_length=2, n_per_original=2, keep_original=True, p=[0.05, 0.05, 0.3, 0.3, 0.3], ) # - # To apply one or more TFs that we've written to a collection of data points according to our policy, we use a # [`PandasTFApplier`](https://snorkel.readthedocs.io/en/master/packages/_autosummary/augmentation/snorkel.augmentation.PandasTFApplier.html) # because our data points are represented with a Pandas DataFrame. # + tags=["md-exclude-output"] from snorkel.augmentation import PandasTFApplier tf_applier = PandasTFApplier(tfs, mean_field_policy) df_train_augmented = tf_applier.apply(df_train) Y_train_augmented = df_train_augmented["label"].values # - print(f"Original training set size: {len(df_train)}") print(f"Augmented training set size: {len(df_train_augmented)}") # We have almost doubled our dataset using TFs! # Note that despite `n_per_original` being set to 2, our dataset may not exactly triple in size, # because sometimes TFs return `None` instead of a new data point # (e.g. `change_person` when applied to a sentence with no persons). # If you prefer to have exact proportions for your dataset, you can have TFs that can't perform a # valid transformation return the original data point rather than `None` (as they do here). # ## 4. Training A Model # # Our final step is to use the augmented data to train a model. We train an LSTM (Long Short Term Memory) model, which is a very standard architecture for text processing tasks. # + [markdown] tags=["md-exclude"] # The next cell makes Keras results reproducible. You can ignore it. # + tags=["md-exclude"] import tensorflow as tf session_conf = tf.compat.v1.ConfigProto( intra_op_parallelism_threads=1, inter_op_parallelism_threads=1 ) tf.compat.v1.set_random_seed(0) sess = tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph(), config=session_conf) tf.compat.v1.keras.backend.set_session(sess) # - # Now we'll train our LSTM on both the original and augmented datasets to compare performance. # + tags=["md-exclude-output"] from utils import featurize_df_tokens, get_keras_lstm, get_keras_early_stopping X_train = featurize_df_tokens(df_train) X_train_augmented = featurize_df_tokens(df_train_augmented) X_valid = featurize_df_tokens(df_valid) X_test = featurize_df_tokens(df_test) def train_and_test( X_train, Y_train, X_valid=X_valid, Y_valid=Y_valid, X_test=X_test, Y_test=Y_test, num_buckets=30000, ): # Define a vanilla LSTM model with Keras lstm_model = get_keras_lstm(num_buckets) lstm_model.fit( X_train, Y_train, epochs=25, validation_data=(X_valid, Y_valid), callbacks=[get_keras_early_stopping(5)], verbose=0, ) preds_test = lstm_model.predict(X_test)[:, 0] > 0.5 return (preds_test == Y_test).mean() acc_augmented = train_and_test(X_train_augmented, Y_train_augmented) acc_original = train_and_test(X_train, Y_train) # - print(f"Test Accuracy (original training data): {100 * acc_original:.1f}%") print(f"Test Accuracy (augmented training data): {100 * acc_augmented:.1f}%") # So using the augmented dataset indeed improved our model! # There is a lot more you can do with data augmentation, so try a few ideas # our on your own!
spam/02_spam_data_augmentation_tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # [Airbnb](https://www.airbnb.com/a/?af=43720035&c=.pi0.pk25650614176_308605552029_c_12026464216&sem_position=1t1&sem_target=kwd-12026464216&location_of_interest=&location_physical=9003566&ghost=true&gclid=CjwKCAjwnrjrBRAMEiwAXsCc457x4tEYXmnyq0dHqOnQKFupepnawnEhBV-VFrCi8rkSyYXHKX9RHBoC2aYQAvD_BwE) is a marketplace for short term rentals that allows you to list part or all of your living space for others to rent. You can rent everything from a room in an apartment to your entire house on AirBnB. Because most of the listings are on a short-term basis, AirBnB has grown to become a popular alternative to hotels. The company itself has grown from it's founding in 2008 to a 30 billion dollar valuation in 2016 and is currently worth more than any hotel chain in the world. # # One challenge that hosts looking to rent their living space face is determining the optimal nightly rent price. In many areas, renters are presented with a good selection of listings and can filter on criteria like price, number of bedrooms, room type and more. Since AirBnB is a marketplace, the amount a host can charge on a nightly basis is closely linked to the dynamics of the marketplace. # # As a host, if we try to charge above market price for a living space we'd like to rent, then renters will select more affordable alternatives which are similar to ours. If we set our nightly rent price too low, we'll miss out on potential revenue. # # One strategy we could use is to: # # * Find a few listings that are similar to ours, # * Average the listed price for the ones most similar to ours, # * Set our listing price to this calculated average price. # # The link to dataset we will be using is [here](C:\Users\miself\Desktop\my_dataset\Airbnb) import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns # %matplotlib inline df = pd.read_csv('listings.csv') airbnb = df[['host_response_rate','host_acceptance_rate','host_listings_count','latitude','longitude','city','zipcode','state','accommodates','room_type','bedrooms','bathrooms','beds','price','cleaning_fee','security_deposit','minimum_nights','maximum_nights','number_of_reviews']] # For our purpose we are going to keep only those rows that are useful for our analysis. airbnb.head() # + np.random.seed(1) #Randomize order of row in df shuffled_ind = np.random.permutation(airbnb.index) airbnb = airbnb.reindex(shuffled_ind) #Sorting values according to accommodation airbnb.sort_values('accommodates') # - airbnb.isnull().sum() #Cleaning Price column airbnb['price'] = airbnb['price'].str.replace(',','').str.replace('$','').astype(float) airbnb['price'].mean() airbnb.isnull().sum() # These valuses doesn't make sense for analyzing our data, so we are going to remove these columns. # * room_type,states are non-mumerical values # * latitude, longitude, zipcode are random values # * host_response_rate, host_acceptance_rate, host_listings_count has nothing to do with accomodation # airbnb = airbnb.drop(['host_response_rate', 'host_acceptance_rate', 'host_listings_count','room_type', 'latitude', 'longitude', 'city', 'zipcode', 'state'],axis = 1) # There are too many values in cleaning_fee and security_deposit that are missing and its impossible to clean them, so we will remove them as well. airbnb = airbnb.drop(['cleaning_fee', 'security_deposit'], axis = 1) # Now we are going to drop all the rows with missing values. airbnb = airbnb.dropna() airbnb.isnull().sum() # Normalize all columnns to range from 0 to 1 except the target column. price_col = airbnb['price'] num_airbnb = (airbnb - airbnb.mean())/ (airbnb.std()) num_airbnb['price'] = price_col n_airbnb = num_airbnb.copy() n_airbnb.head() from sklearn.neighbors import KNeighborsRegressor from sklearn.metrics import mean_squared_error # # Predicting Model # + knn = KNeighborsRegressor(n_neighbors = 5, algorithm ='brute') train_df = n_airbnb.iloc[0:2792] test_df = n_airbnb.iloc[2792:] train_features = train_df[['accommodates','bathrooms']] train_target = train_df['price'] knn.fit(train_features, train_target) predictions = knn.predict(test_df[['accommodates','bathrooms']]) mse = mean_squared_error(test_df['price'], predictions) rmse = np.sqrt(mse) rmse # - # ## Tuning models for different k values k_values = list(range(1,21)) mse_values = [] for val in k_values: knn = KNeighborsRegressor(n_neighbors = val,algorithm= 'brute') knn.fit(train_df[['accommodates','bedrooms','bathrooms','number_of_reviews']], train_df['price']) prediction = knn.predict(test_df[['accommodates','bedrooms','bathrooms','number_of_reviews']]) mse = mean_squared_error(test_df['price'], prediction) mse_values.append(mse) plt.scatter(k_values, mse_values) # From the above graph it can be seen that k value of 6 gives us the minimum mse. # # # Multivariate Model # + def knn_train_test(feature, target, df): shuffled_index = np.random.permutation(df.index) rand_df = df.reindex(shuffled_index) last_train_row = int(len(rand_df)/2) train_df = rand_df[0:last_train_row] test_df = rand_df[last_train_row:] k_values = [1,3,5,7,9] k_rmse = {} for k in k_values: knn = KNeighborsRegressor(n_neighbors= k) knn.fit(train_df[[feature]], train_df[target]) predict = knn.predict(test_df[[feature]]) mse = mean_squared_error(test_df[target], predict) rmse = np.sqrt(mse) k_rmse[k] = rmse return k_rmse k_rmse_result = {} features_col = n_airbnb.columns.drop('price') for col in features_cols: rmse_val = knn_train_test(col,'price', n_airbnb) k_rmse_result[col] = rmse_val # - k_rmse_result for k, v in k_rmse_result.items(): x = list(v.keys()) y = list(v.values()) plt.plot(x,y) plt.xlabel('k_values') plt.ylabel('RMSE') # + features_avg_rmse = {} for k,v in k_rmse_result.items(): avg_rmse = (np.mean(list(v.values()))) features_avg_rmse[k] = avg_rmse series_avg_rmse = pd.Series(features_avg_rmse) series_avg_rmse.sort_values() # - # # Multivariate Model # + def knn_train_test(feature, target, df): shuffled_index = np.random.permutation(df.index) rand_df = df.reindex(shuffled_index) last_train_col = int(len(rand_df)/2) train_df = rand_df[0:last_train_col] test_df = rand_df[last_train_col:] knn = KNeighborsRegressor() knn.fit(train_df[feature], train_df[target]) predict = knn.predict(test_df[feature]) mse = mean_squared_error(test_df[target], predict) rmse = np.sqrt(mse) return rmse rmse_results = {} two_best_features = ['bedrooms','maximum_nights'] rmse = knn_train_test(two_best_features, 'price', n_airbnb) rmse_results['two_best_features'] = rmse three_best_features = ['bedrooms','maximum_nights','number_of_reviews'] rmse = knn_train_test(three_best_features,'price',n_airbnb) rmse_results['three_best_features'] = rmse four_best_features = ['bedrooms','maximum_nights','number_of_reviews', 'accommodates'] rmse = knn_train_test(four_best_features, 'price', n_airbnb) rmse_results['four_best_features'] = rmse five_best_features = ['bedrooms','maximum_nights','number_of_reviews', 'accommodates','bathrooms'] rmse = knn_train_test(five_best_features, 'price', n_airbnb) rmse_results['five_best_features'] = rmse rmse_results # + def knn_train_test(feature, target, df): #Ramdomizing the index shuffled_df = np.random.permutation(df.index) rand_df = df.reindex(shuffled_df) #Dividing number of rows in two half last_train_col = int(len(rand_df)/2) #Splitting data into train and train_df = rand_df[0:last_train_col] test_df = rand_df[last_train_col:] K_values = list(range(1,25)) k_rmse = {} for k in k_values: knn = KNeighborsRegressor(n_neighbors= k) knn.fit(train_df[feature], train_df[target]) predict = knn.predict(test_df[feature]) mse = mean_squared_error(test_df[target],predict) rmse = np.sqrt(mse) k_rmse[k] = rmse return k_rmse k_rmse_results = {} three_best_features = ['bedrooms','maximum_nights','number_of_reviews'] rmse = knn_train_test(three_best_features,'price',n_airbnb) k_rmse_results['three_best_features'] = rmse four_best_features = ['bedrooms','maximum_nights','number_of_reviews', 'accommodates'] rmse = knn_train_test(four_best_features, 'price', n_airbnb) k_rmse_results['four_best_features'] = rmse five_best_features = ['bedrooms','maximum_nights','number_of_reviews', 'accommodates','bathrooms'] rmse = knn_train_test(five_best_features, 'price', n_airbnb) k_rmse_results['five_best_features'] = rmse k_rmse_results # - # Now Using KFold to analyse our data. # + from sklearn.model_selection import KFold, cross_val_score num_folds = [3, 5, 7, 9, 10, 11, 13, 15, 17, 19, 21, 23] for fold in num_folds: kf = KFold(fold,shuffle = True, random_state= 1) knn = KNeighborsRegressor() mse = cross_val_score(knn, n_airbnb[['accommodates']], n_airbnb['price'], cv = kf, scoring= 'neg_mean_squared_error') rmse = np.sqrt(np.abs(mse)) avg_rmse = np.mean(rmse) avg_std = np.std(rmse) print(str(fold), 'folds:', "avg_rmse:", str(avg_rmse), "avg_std:", str(avg_std)) # - # As it can be seen 3 folds gives us the lowest avg rmse and avg std deviation.
Airbnb_marketplace_model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/SwetaSengupta/DS-Unit-2-Linear-Models/blob/master/Sweta_Sengupta__LS_DS_214_assignment.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="9mlR1lEpgB5T" colab_type="text" # Lambda School Data Science # # *Unit 2, Sprint 1, Module 4* # # --- # + [markdown] colab_type="text" id="7IXUfiQ2UKj6" # # Logistic Regression # # # ## Assignment 🌯 # # You'll use a [**dataset of 400+ burrito reviews**](https://srcole.github.io/100burritos/). How accurately can you predict whether a burrito is rated 'Great'? # # > We have developed a 10-dimensional system for rating the burritos in San Diego. ... Generate models for what makes a burrito great and investigate correlations in its dimensions. # # - [ ] Do train/validate/test split. Train on reviews from 2016 & earlier. Validate on 2017. Test on 2018 & later. # - [ ] Begin with baselines for classification. # - [ ] Use scikit-learn for logistic regression. # - [ ] Get your model's validation accuracy. (Multiple times if you try multiple iterations.) # - [ ] Get your model's test accuracy. (One time, at the end.) # - [ ] Commit your notebook to your fork of the GitHub repo. # # # ## Stretch Goals # # - [ ] Add your own stretch goal(s) ! # - [ ] Make exploratory visualizations. # - [ ] Do one-hot encoding. # - [ ] Do [feature scaling](https://scikit-learn.org/stable/modules/preprocessing.html). # - [ ] Get and plot your coefficients. # - [ ] Try [scikit-learn pipelines](https://scikit-learn.org/stable/modules/compose.html). # + colab_type="code" id="o9eSnDYhUGD7" colab={} # %%capture import sys # If you're on Colab: if 'google.colab' in sys.modules: DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Linear-Models/master/data/' # !pip install category_encoders==2.* # If you're working locally: else: DATA_PATH = '../data/' # + id="hbeaAqfDgB5h" colab_type="code" colab={} # Load data downloaded from https://srcole.github.io/100burritos/ import pandas as pd df = pd.read_csv(DATA_PATH+'burritos/burritos.csv') # + id="vfUGyMcagB5l" colab_type="code" colab={} # Derive binary classification target: # We define a 'Great' burrito as having an # overall rating of 4 or higher, on a 5 point scale. # Drop unrated burritos. df = df.dropna(subset=['overall']) df['Great'] = df['overall'] >= 4 # + id="iIS1trbwgB5p" colab_type="code" colab={} # Clean/combine the Burrito categories df['Burrito'] = df['Burrito'].str.lower() california = df['Burrito'].str.contains('california') asada = df['Burrito'].str.contains('asada') surf = df['Burrito'].str.contains('surf') carnitas = df['Burrito'].str.contains('carnitas') df.loc[california, 'Burrito'] = 'California' df.loc[asada, 'Burrito'] = 'Asada' df.loc[surf, 'Burrito'] = 'Surf & Turf' df.loc[carnitas, 'Burrito'] = 'Carnitas' df.loc[~california & ~asada & ~surf & ~carnitas, 'Burrito'] = 'Other' # + id="O4fKRPwrgB5u" colab_type="code" colab={} # Drop some high cardinality categoricals df = df.drop(columns=['Notes', 'Location', 'Reviewer', 'Address', 'URL', 'Neighborhood']) # + id="AUoHTO6mgB5x" colab_type="code" colab={} # Drop some columns to prevent "leakage" df = df.drop(columns=['Rec', 'overall']) # + id="Etmr23zfgB54" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 258} outputId="64fc468d-a316-4053-fff8-2ecf65e4a8c4" print(df.shape) df.head() # + id="qXjJKgSpiZLd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 153} outputId="50dfe95c-f237-4812-eed9-e92a68cb4064" df.year.value_counts() # + [markdown] id="ZAHwLvDWiaMo" colab_type="text" # ###Train/validate/test split. Train on reviews from 2016 & earlier. Validate on 2017. Test on 2018 & later. # + id="LQXjWDNhl3ic" colab_type="code" colab={} df['year'] = pd.DatetimeIndex(df['Date']).year df['month'] = pd.DatetimeIndex(df['Date']).month # + id="SJawj8ehl3RB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="6ced1c13-a733-4f03-ffaa-66762a9bbc5e" condition = df['year']<=2016 train = df[condition] train.shape # + id="Gr0BVxpzl3LG" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="94f87496-e0b1-498f-9525-e9227154c4d1" condition_1 = df['year']==2017 val=df[condition_1] val.shape # + id="pX5xy7HzrDGy" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="d191a8bd-8ff5-40af-ff04-b1d5f1516fc3" condition_2 = df['year']>=2018 test = df[condition_2] test.shape # + [markdown] id="tUC9YJ7Mry3Q" colab_type="text" # ###Baselines for classification # + id="3K8cHjVYr2Y7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="1afabe73-276b-4974-a450-d5c765ee5062" #Determine majority class target='Great' y_train = train[target] y_train.value_counts(normalize= True) # + id="pGMVivocsyS_" colab_type="code" colab={} # guessing the majority class for evry prediction majority_class = y_train.mode()[0] y_pred_train = [majority_class]*len(y_train) # + id="tsp9YM1RtCnn" colab_type="code" colab={} # baseline accuracy if we guessed the majority class for every prediction # + id="zjhbO5QhtCjm" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="aa7fb09d-b826-40cf-9d8d-795e1080bd24" from sklearn.metrics import accuracy_score accuracy_score(y_train, y_pred_train) # + id="Nss9_nO7tCfd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="3cf38b06-48d4-48d4-d247-7692418fdf64" y_val = val[target] y_pred = [majority_class]*len(y_val) accuracy_score(y_val, y_pred) # + [markdown] id="Jsf2sczHtVxS" colab_type="text" # ###Linear regression # + id="59fn0UsatVcN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 334} outputId="fbb046e9-b48a-4b3b-b7af-e6138e35515e" train .describe() # + id="664SgA0btVWD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 306} outputId="534969a9-ed22-49d9-902a-76a72005572f" # 1. Import estimator class from sklearn.linear_model import LinearRegression # 2. Instantiate this class linear_reg = LinearRegression() # 3. Arrange X feature matrices (already did y target vectors) features = ['Yelp', 'Cost', 'Hunger'] X_train = train[features] X_val = val[features] # Impute missing values from sklearn.impute import SimpleImputer imputer = SimpleImputer() X_train_imputed = imputer.fit_transform(X_train) X_val_imputed = imputer.transform(X_val) # 4. Fit the model linear_reg.fit(X_train_imputed, y_train) # 5. Apply the model to new data. # The predictions look like this ... linear_reg.predict(X_val_imputed) # + id="VJmX-I0XtVTO" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="ec55d7df-aa16-49b4-eb89-0b1c89530926" features # + id="Ft0AnSJ2tVP2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="991b14c0-04c3-4b04-f2aa-ed7b35be2a29" # Get coefficients pd.Series(linear_reg.coef_, features) # + id="BOPjHCWfu4Ub" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="23fa7a08-2077-41d6-bb82-21963332df6e" test_case = [[3.5, 7.00, 3]] linear_reg.predict(test_case) # + [markdown] id="GQ2R9i2ovUcC" colab_type="text" # ###Logistic Regression # + id="jsSOkwx0vZ_u" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="34ece4f2-2143-4a18-8f06-f43f2668bc48" from sklearn.linear_model import LogisticRegression log_reg = LogisticRegression() log_reg.fit(X_train_imputed, y_train) print('Validation Accuracy', log_reg.score(X_val_imputed, y_val)) # + id="5lZH1cYQvaF5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 187} outputId="54011e33-2ca9-4824-d928-d63a8ce50ffc" # The predictions look like this log_reg.predict(X_val_imputed) # + id="lW8sSv4DvaDX" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="379d2642-a459-4ae2-f6ed-9774fbea090c" log_reg.predict(test_case) # + id="Ef5Z_ZWTvvez" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="86e34213-7473-4433-8cfd-de131d9a4ac7" log_reg.predict_proba(test_case)[0] # + id="XqcBajPvv0_2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="0d4bf15a-b063-4ddf-de09-838a80b79880" # What's the math? log_reg.coef_ # + id="pkohJFxXv4DL" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="ff3ab5f3-942d-49a8-b6b7-76952f2dddda" log_reg.intercept_ # + [markdown] id="mBqth18TwKT4" colab_type="text" # ###Validation accuracy. # + id="ymJKw25bxCz5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 148} outputId="f622647a-5f80-4338-aa8d-a5524540b5de" train.head(2) # + id="YG8EvT-wv3_n" colab_type="code" colab={} features = ['Burrito', 'Yelp','Google','Chips','Cost', 'Hunger','Tortilla','Temp', 'Meat', 'Fillings','Meat:filling','Uniformity','Salsa','Synergy','Wrap'] target ="Great" # + id="xtyIJuKuv379" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="97bfc5e7-4da7-49e0-ff90-9393a5add738" X_train = train[features] y_train = train[target] X_val = val[features] y_val = val[target] X_train.shape, X_val.shape # + id="oDyDYBHav34d" colab_type="code" colab={} import category_encoders as ce from sklearn.impute import SimpleImputer from sklearn.preprocessing import StandardScaler from sklearn.linear_model import LogisticRegressionCV # + id="qWCyR936yXoZ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="796984ff-e956-4434-c2ed-b87b694991cc" # stretch goal-one hot encoding Burrito column encoder = ce.one_hot.OneHotEncoder(use_cat_names=True) X_train_enc = encoder.fit_transform(X_train) X_val_enc = encoder.transform(X_val) X_train_enc.shape, X_val_enc.shape # + id="Iz7bJwGyyb_1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="ee31ecee-c623-46c7-f9c3-00d072a2f9c2" X_val_enc.head() # + id="yiPQ7TrCyky3" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="e950aa3b-b258-44eb-cac8-c15d668a9133" #filling the missing values imputer = SimpleImputer(strategy='mean') X_train_imp = imputer.fit_transform(X_train_enc) X_val_imp = imputer.transform(X_val_enc) X_train_imp.shape, X_val_imp.shape # + id="S-3WeeMQykrK" colab_type="code" colab={} #Simple Imputer returns ndarray tharefore conveting back to df X_train_imp = pd.DataFrame(X_train_imp, columns=X_train_enc.columns) X_val_imp = pd.DataFrame(X_val_imp, columns = X_val_enc.columns) # + id="PlAYvlIXykng" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 436} outputId="01c9cbec-412e-458a-b9e6-8b3f513cf5dc" X_val_imp # + id="nA7HM_mZ2Wg3" colab_type="code" colab={} # Stretch goals - Feature scaling using standard scaler # + id="bPnCdtATykc1" colab_type="code" colab={} scaler = StandardScaler() X_train_sc = scaler.fit_transform(X_train_imp) X_val_sc = scaler.transform(X_val_imp) # + id="D2WBx-R71WtL" colab_type="code" colab={} # Standard scaler retuns ndarray therefore converting to df X_train_sc = pd.DataFrame(X_train_sc, columns=X_train_enc.columns) X_val_sc = pd.DataFrame(X_val_sc, columns = X_val_enc.columns) # + id="gARP9nxp1aMN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 241} outputId="f99a618e-1da1-41cd-c729-739c9b792cd1" X_train_sc.head() # + [markdown] id="d2nwgvos3klD" colab_type="text" # ##Logistic regression CV # # + id="NXfu2Oew1e1b" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="1db3d9e3-2c3b-4edd-96fd-248f7d36ab84" model = LogisticRegressionCV() model.fit(X_train_sc, y_train) # + id="fh4wx4ea1hzb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="40fdf72a-4c2b-4c3d-b1c1-56f42e7d2f09" print(f'Validation score: {model.score(X_val_sc, y_val)}') # + id="dom3fopHXDcH" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="7edf632b-7a34-4c80-be74-328a89d11360" # testing with Cs=9 model = LogisticRegressionCV(Cs=9) model.fit(X_train_sc, y_train) print(f'Validation score: {model.score(X_val_sc, y_val)}') # + [markdown] id="gncjBOWk1t9D" colab_type="text" # #Stretch Goals # + [markdown] id="RMF684_y1vjL" colab_type="text" # ##1)Plot coefficients: # + id="s3ILfQOT1znJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 391} outputId="c082a85e-bbec-43b9-dfb6-a181e495a261" coefs = pd.Series(model.coef_[0], X_train_sc.columns) coefs # + id="BMZVqvIe2AuD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="3c021ca5-a6f1-455f-abe8-1ef4a0a87f5e" coefs.sort_values().plot.barh() # + [markdown] id="0Rh1kkcvQYCS" colab_type="text" # ###Test Accuracy # + id="FF14dc5aHSvA" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="9b8e7da0-61fa-4294-d7a0-a8ed019a1aa6" X_test = test[features] X_test_enc = encoder.transform(X_test) X_test_imp = imputer.transform(X_test_enc) X_test_scaled = scaler.transform(X_test_imp) X_test_scaled # + id="EVCVwO4DQ7i2" colab_type="code" colab={} target = 'Great' y_test = test[target] # + id="VT8Gyb1mQ7Zq" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="bf2bd93f-e968-481d-ada1-dca9b7f0f7d0" model = LogisticRegressionCV() model.fit(X_test_scaled, y_test) # + id="UarwbZ0WQ7Q6" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="7f14dbae-fa18-44a8-e239-531d615a0164" print(f'Test accuracy score: {model.score(X_test_scaled, y_test)}') # + [markdown] id="NwvnuE_aQ3Cz" colab_type="text" # ##Kaggle Submission # + id="xMfdNHdfHYrh" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="37130ee4-27bb-4091-f951-3a5d6f091838" y_pred = model.predict(X_test_scaled) y_pred # + id="0caOxzmXIgSO" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="f31ac831-67e5-4a7a-818a-db68800c0dbd" submission = test[['Cost']].copy() submission # + id="ub7n9AvXHkpz" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="25fe7afa-9e91-46b5-8e43-c1eba33b015b" submission['Great'] = y_pred submission # + id="3xQO_EaQJFz7" colab_type="code" colab={} # + id="47yfzEs1JFv6" colab_type="code" colab={}
Sweta_Sengupta__LS_DS_214_assignment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # %run ../setup/nb_setup # # Orbital and Galactic Evolution 1: The Influence of Non-axisymmetric Structures and Perturbers # # Author(s): <NAME> # # # ## Introduction # # Adrian's orbit workshop already covered a lot of the theory of orbits, orbit integration and potentials, so here we just assume this is known. You should do his first if you haven't! # # There are (to my knowledge) three main Galactic dynamics packages for python: Galpy, Gala and Agama. They all have their individual strengths and weaknesses which will be covered by Adrian on Friday! At their core, they can all handle various gravitational potentials, orbit integration and parameter estimation. # # https://github.com/jobovy/galpy (Bovy 2015, ApJS, 216, 29) # # https://github.com/adrn/gala (Price-Whelan 2017, The Journal of Open Source Software, 2, 388) # # https://github.com/GalacticDynamics-Oxford/Agama (Vasiliev 2019, MNRAS, 482, 1525) # # This workshop/tutorial will be in the galactic dynamics package Galpy, to give some variety in the options available compared to yesterday. Feel to convert any of the cells to your preferred package if you are already an experienced user # # While this tutorial is in Galpy, it's worth remembering we have the author of Gala here at the school (Adrian) so you may want to make use of him in your later project work! # # Finally, there are a few '$\textbf{exercises}$' and a few '$\textbf{stretch goal exercises}$' scattered below. Please have a go at the 'exercises' as you go! They should be directly related to examples from other cells. # # It's OK if you don't finish everything, hopefully this will provide some useful examples for your future work. It's also OK if you get stuck, have a go yourself at things first, but ask for help if it's not working. Your mentors and your peers will be happy to help! # # If you do finish the whole notebook go back and pick from the stretch goals, whatever you find most interesting. They will require more effort, and some new coding! # # If you manage to finish the stretch goals, wow well done you! Please go and help your peers :) # # # ### Notebook Setup and Package Imports import numpy as np import matplotlib.pyplot as plt from galpy.util import bovy_coords, bovy_conversion, multi, bovy_plot from galpy.orbit import Orbit from galpy.potential import ( MovingObjectPotential, PlummerPotential, ChandrasekharDynamicalFrictionForce, plotRotcurve, vcirc, ) from astropy import units as u # ## Example: Potentials # # Like Gala, Galpy has several pre-built potentials for you to make use of # # (https://docs.galpy.org/en/v1.6.0/reference/potential.html#potential-api) # # To start we need a potential for the Milky Way. The standard 'Milky Way like' potential in Galpy is MWPotential2014 which is fit to various observational constraints # # Several other Milky Way like potenitals exist, and can be found at https://docs.galpy.org/en/v1.6.0/reference/potential.html#milky-way-like-potentials # # But, for simplicity we'll mainly stick with MWPotential2014 (and varients of it) in this notebook. MWPotential2014 is a list of potential components comprised of a Power Sphere bulge, a Miyamoto Nagai disc and a NFW halo potential # # We can just import the pre-built version: from galpy.potential import MWPotential2014 # We can plot the rotation curve for the potential in full, or examine the individual components with their list index. # # Note: Galpy uses internal length units of $R_0$, which is the Solar radius (set to 8 kpc as default) and $v_0$, which is the circular velocity at $R_0$ (set to 220 km s$^{-1}$ by default), such that the Sun is at $R/R_0=1$, and $v_{\mathrm{circ}}/v_0=1$ at $R/R_0=1$. # + plotRotcurve(MWPotential2014, label="MWPotential2014") plotRotcurve(MWPotential2014[0], overplot=True, label="Bulge") plotRotcurve(MWPotential2014[1], overplot=True, label="Disc") plotRotcurve(MWPotential2014[2], overplot=True, label="Halo") plt.legend() print( vcirc(MWPotential2014, 1) ) # Get the circular velocity as R/R0=1 (the Solar radius) shows v_circ=1 print( vcirc(MWPotential2014, 2) ) # At 2 Rsun (16 kpc), it's less owing to the falling rotation curve # - # Alternatively, we can construct out own "Milky Way-like" potential by combining different components. # # Note that the normalize values should add up to 1. such that the circular velocity will be 1 at R=1. # # Here's an example with a flat rotation curve, feel free to change the parameters below and see what you get. We won't use this one again so don't worry if you break it ;) The docs for each part of the potential are below, so you can see what the parameters will change: # # https://docs.galpy.org/en/v1.6.0/reference/potentialmiyamoto.html # # https://docs.galpy.org/en/v1.6.0/reference/potentialnfw.html # # https://docs.galpy.org/en/v1.6.0/reference/potentialhernquist.html # + from galpy.potential import MiyamotoNagaiPotential, NFWPotential, HernquistPotential mp = MiyamotoNagaiPotential(a=0.5, b=0.0375, normalize=0.6) nfp = NFWPotential(a=4.5, normalize=0.35) hp = HernquistPotential(a=0.6 / 8, normalize=0.05) pot = hp + mp + nfp # Or equivalently pot=[mp,np,hp] # + plotRotcurve(pot, label="Our potential") plotRotcurve(pot[0], overplot=True, label="Bulge") plotRotcurve(pot[1], overplot=True, label="Disc") plotRotcurve(pot[2], overplot=True, label="Halo") plt.legend() print(vcirc(pot, 1)) print( vcirc(pot, 2) ) # For my values, it's now closer to flat, as you can see from the plot below. # - # You can also evaluate the potential, or density values at specific points, and visualise them # + from galpy.potential import evaluatePotentials, plotPotentials, plotDensities print(evaluatePotentials(MWPotential2014, 1.0, 0.0)) # Evaluate at R=1, z=0 print(evaluatePotentials(pot, 1.0, 0.0)) plotPotentials(MWPotential2014, rmin=0.01, aspect="equal") plotDensities( MWPotential2014, rmin=0.1, zmax=0.25, zmin=-0.25, nrs=101, nzs=101, aspect="equal" ); # - # ## Example: Orbit integration # # Now that we have a potential for our galaxy, we can integrate some orbits in it. # # You can set orbit parameters yourself in an array of [$R,v_R,v_{\phi},z,v_z,\phi$] # # # # But, as above, they must be in natural units, so divide physical values by 8 or 220 for position or velocities # # There are many other ways to initialise an orbit instance in varying coordinate systems (including astropy's skycoord), see doc's below # # (https://docs.galpy.org/en/v1.6.0/reference/orbitinit.html) # # (https://docs.galpy.org/en/v1.6.0/orbit.html#initialization-from-observed-coordinates-or-astropy-skycoord) # + # For example, the coordinates of the Sagittarius dwarf galaxy from Vasiliev (2020) in cylindrical coordinates R = 17.68 vR = 232.07 vphi = 57.7 z = -6.5 vz = 209.0 phi = 3.0 ro = 8.0 vo = 220.0 Sgr_Vas = Orbit(vxvv=[R / ro, vR / vo, vphi / vo, z / ro, vz / vo, phi]) # - # Alternatively, you can initialise orbits for any named object from the Simbad data service # # (Note that Orbits initialized using Orbit.from_name have physical output turned on by default, so methods will return outputs in physical units unless you do Sgr_sim.turn_physical_off() which we won't here.) Sgr_Sim = Orbit.from_name( "SDG" ) # Where, for example, SDG is the Sagittarius dwarf galaxy # Now, can we integrate them backwards in time and see where Sagittarius came from?... # # We need a 'time' array to tell us how long to go for, and the timesteps, and we need a potential in which to calculate the orbits # + # Let's go back 4 Gyr, in 1000 steps. Again, we need to convert into internal Galpy units time = np.linspace(0, -4.0 / bovy_conversion.time_in_Gyr(220.0, 8.0), 1000) Sgr_Vas.integrate(time, MWPotential2014) Sgr_Sim.integrate(time, MWPotential2014) # - # And let's take a look at the orbits: # # You can plot a variety of orbital parameters (see link to docs) # # (https://docs.galpy.org/en/v1.6.0/reference/orbit.html) # + # An example of the orbits in R and z # Note that the 'from name' routine is (inconsistently) already in physical units plt.figure() plt.plot( time * bovy_conversion.time_in_Gyr(220.0, 8.0), Sgr_Vas.r(time) * ro, label="Vasiliev (2020)", color="blue", ) plt.plot( time * bovy_conversion.time_in_Gyr(220.0, 8.0), Sgr_Sim.r(time), label="Simbad", color="red", ) plt.xlim(-4, 0) plt.ylim(0, 110) plt.xlabel(r"$\mathrm{Time\ (Gyr)}$") plt.ylabel(r"$R\ (\mathrm{kpc})$") plt.legend() plt.show() plt.figure() plt.plot( time * bovy_conversion.time_in_Gyr(220.0, 8.0), Sgr_Vas.z(time) * ro, label="Vasiliev (2020)", color="blue", ) plt.plot( time * bovy_conversion.time_in_Gyr(220.0, 8.0), Sgr_Sim.z(time), label="Simbad", color="red", ) plt.plot([-7, 7], [0, 0], ls=":", color="k") plt.xlim(-4, 0) plt.ylim(-100, 100) plt.xlabel(r"$\mathrm{Time\ (Gyr)}$") plt.ylabel(r"$z\ (\mathrm{kpc})$") plt.legend() plt.show() # - # Even in the same galactic potential, the difference in initial conditions makes a significant effect on the resulting orbit! # # ## Example: Dynamical Friction # # However, we are still missing an important part of the orbital modelling, which is dynamical friction # # To implement dynamical friction we need a mass and density profile for the dwarf galaxy (and the host galaxy) # # (https://docs.galpy.org/en/v1.6.0/reference/potentialchandrasekhardynfric.html) cdf = ChandrasekharDynamicalFrictionForce( GMs=1e10 * u.Msun, rhm=8.0 * u.kpc, dens=MWPotential2014 ) # And now let's integrate the orbits with dynamical friction included # # You can add the 'cdf' potential object to the MWPotential2014 list # # (You can ignore the warnings, a lot of the potential integration is done in C, but not implemented for the dynamical friction force so it's falling back on python) # + Sgr_Vas_DF = Orbit(vxvv=[R / ro, vR / vo, vphi / vo, z / ro, vz / vo, phi]) Sgr_Sim_DF = Orbit.from_name("SDG") Sgr_Vas_DF.integrate(time, MWPotential2014 + cdf) Sgr_Sim_DF.integrate(time, MWPotential2014 + cdf) # + plt.figure() plt.plot( time * bovy_conversion.time_in_Gyr(220.0, 8.0), Sgr_Vas_DF.r(time) * ro, label="Vasiliev (2020)", color="blue", ) plt.plot( time * bovy_conversion.time_in_Gyr(220.0, 8.0), Sgr_Sim_DF.r(time), label="Simbad", color="red", ) plt.xlim(-4, 0) plt.ylim(0, 110) plt.xlabel(r"$\mathrm{Time\ (Gyr)}$") plt.ylabel(r"$R\ (\mathrm{kpc})$") plt.legend() plt.show() plt.figure() plt.plot( time * bovy_conversion.time_in_Gyr(220.0, 8.0), Sgr_Vas_DF.z(time) * ro, label="Vasiliev (2020)", color="blue", ) plt.plot( time * bovy_conversion.time_in_Gyr(220.0, 8.0), Sgr_Sim_DF.z(time), label="Simbad", color="red", ) plt.plot([-7, 7], [0, 0], ls=":", color="k") plt.xlim(-4, 0) plt.ylim(-125, 125) plt.xlabel(r"$\mathrm{Time\ (Gyr)}$") plt.ylabel(r"$z\ (\mathrm{kpc})$") plt.legend() plt.show() # - # ### Exercise: # # Play around with the mass and half-mass radius of the satellite in the example cell below and see how the change in dynamical friction affects the orbit. This isn't supposed to be difficult coding, but to help you build intuition about the infall # # So, before you start, how do you think a more massive satellite will fall in to the MW? # + cdf2 = ChandrasekharDynamicalFrictionForce( GMs=100 * u.Msun, rhm=0.01 * u.kpc, dens=MWPotential2014 ) Sgr_Vas_DF2 = Orbit(vxvv=[R / ro, vR / vo, vphi / vo, z / ro, vz / vo, phi]) Sgr_Sim_DF2 = Orbit.from_name("SDG") Sgr_Vas_DF2.integrate(time, pot + cdf2) Sgr_Sim_DF2.integrate(time, pot + cdf2) plt.figure() plt.plot( time * bovy_conversion.time_in_Gyr(220.0, 8.0), Sgr_Vas_DF2.r(time) * ro, label="Vasiliev (2020)", color="blue", ) plt.plot( time * bovy_conversion.time_in_Gyr(220.0, 8.0), Sgr_Sim_DF2.r(time), label="Simbad", color="red", ) plt.xlim(-4, 0) plt.ylim(0, 110) plt.xlabel(r"$\mathrm{Time\ (Gyr)}$") plt.ylabel(r"$R\ (\mathrm{kpc})$") plt.legend() plt.show() plt.figure() plt.plot( time * bovy_conversion.time_in_Gyr(220.0, 8.0), Sgr_Vas_DF2.z(time) * ro, label="Vasiliev (2020)", color="blue", ) plt.plot( time * bovy_conversion.time_in_Gyr(220.0, 8.0), Sgr_Sim_DF2.z(time), label="Simbad", color="red", ) plt.plot([-7, 7], [0, 0], ls=":", color="k") plt.xlim(-4, 0) plt.ylim(-125, 125) plt.xlabel(r"$\mathrm{Time\ (Gyr)}$") plt.ylabel(r"$z\ (\mathrm{kpc})$") plt.legend() plt.show() # - # In addition, the halo potential in MWPotential2014 in known to be on the lower end of mass estimates for the Milky Way's dark matter halo # # Because the potential is a list of components, you can increase the mass of the halo by multiplying that part of the potential (but let's make a copy so we don't lose the original) # # As suggested in the Galpy documentation, increasing the halo mass by 50% brings it more in line with recent measurements # + import copy MWPotential2014_heavy = copy.deepcopy(MWPotential2014) MWPotential2014_heavy[2] *= 1.5 cdf3 = ChandrasekharDynamicalFrictionForce( GMs=1e10 * u.Msun, rhm=1.0 * u.kpc, dens=MWPotential2014_heavy ) Sgr_Vas_DF2 = Orbit(vxvv=[R / ro, vR / vo, vphi / vo, z / ro, vz / vo, phi]) Sgr_Sim_DF2 = Orbit.from_name("SDG") Sgr_Vas_DF2.integrate(time, MWPotential2014_heavy + cdf3) Sgr_Sim_DF2.integrate(time, MWPotential2014_heavy + cdf3) plt.figure() plt.plot( time * bovy_conversion.time_in_Gyr(220.0, 8.0), Sgr_Vas_DF2.r(time) * ro, label="Vasiliev (2020)", color="blue", ) plt.plot( time * bovy_conversion.time_in_Gyr(220.0, 8.0), Sgr_Sim_DF2.r(time), label="Simbad", color="red", ) plt.xlim(-4, 0) plt.ylim(0, 110) plt.xlabel(r"$\mathrm{Time\ (Gyr)}$") plt.ylabel(r"$R\ (\mathrm{kpc})$") plt.legend() plt.show() plt.figure() plt.plot( time * bovy_conversion.time_in_Gyr(220.0, 8.0), Sgr_Vas_DF2.z(time) * ro, label="Vasiliev (2020)", color="blue", ) plt.plot( time * bovy_conversion.time_in_Gyr(220.0, 8.0), Sgr_Sim_DF2.z(time), label="Simbad", color="red", ) plt.plot([-7, 7], [0, 0], ls=":", color="k") plt.xlim(-4, 0) plt.ylim(-125, 125) plt.xlabel(r"$\mathrm{Time\ (Gyr)}$") plt.ylabel(r"$z\ (\mathrm{kpc})$") plt.legend() plt.show() # - # In addition, an actual satellite falling into the Milky Way will be losing mass, which we did not account for here in the dynamical friction force calculation. Take a moment to consider how that would change the infall? # # So, considering the variation in the orbits for our levels of uncertainty, and the lack of mass loss, how much do we trust our orbits here?... Perhaps it's better we don't talk about that! # # Regardless, moving forwards we can select a Fiducial orbit for the rest of the notebook. In the rest of the infall examples I pick Vasiliev's initial conditions, the heavier halo, and set GMs=5e8 M$_{\odot}$ and rhm=5 kpc # # Feel free to select a different one going forwards, and perhaps choose different values and compare with other students! Or come back here later and run through things with a different orbit/potential # # ### Exercise (Stretch goal): Correct for the mass loss # # e.g. see CDF doc page above, but you have to manually edit the mass and integrate segments of the orbits separately # - Come up with some mass loss scheme, you can assume a flat relation for simplicity, or look at Vasiliev's 2020 paper for an estimation # - Compute the orbit in fragments changing the mass # - Compare to the fixed mass orbit # # ## Example: The effect of the dwarf on stellar orbits # # So far we were only examining the orbit of Sgr in a Milky Way-like potential. However, as the dwarf falls in, it will also have an impact on the stars (and dark matter) in our galaxy. # # We'll take a look at the effect on a few disc stars. First, let's set up a multi-star orbit object, similar to above (Feel free to edit the numbers to make your own unique orbits, but remember they must be in internal units!) # # # Set up an array of N individual orbits (these are random, feel free to edit. Each is [R,vR,vphi,z,vz,phi] in normalised units) vxvvs = np.array( [ [1.0, 0.1, 1.0, 0.1, -0.2, 1.5], [0.1, 0.3, 1.1, -0.3, 0.4, 2.0], [0.2, 0.3, 1.1, -0.3, 0.4, 2.0], [1.0, 0.3, 1.1, -0.3, 0.4, 2.0], [0.5, 0.3, 1.1, -0.3, 0.4, 2.0], ] ) # Turn them into an orbit object orbits = Orbit(vxvvs) # Then, we integrate them similar to how we did the satellite, except this time, let's go forward in time from -4 to 0 # + # Integrate it time2 = np.linspace(-4.0 / bovy_conversion.time_in_Gyr(220.0, 8.0), 0.0, 1000) orbits.integrate(time2, MWPotential2014) # The 'orbit' class also has plotting functionality built in. Default is R-z orbits.plot() # You can set other dimensions manually plt.figure(figsize=(5, 5)) orbits.plot(d1="x", d2="y", overplot=True) plt.xlim(-2, 2) plt.xlabel(r"$x\ (\mathrm{kpc})$") plt.ylabel(r"$y\ (\mathrm{kpc})$"); # - # You can also animate the orbits (although it's a little slow...) # # It should work for any of the orbit properties, as described above # # Try hovering over the graph once you've it's done! orbits.animate(d1=["x", "R"], d2=["y", "z"], width=800) # They are nice and regular! # # Now, let's put Sagittarius along the orbit we computed earlier, and see what it does to them? # # Firstly, we need a potential object for the dwarf galaxy itself. We'll use a Plummer Sphere: # # (https://docs.galpy.org/en/v1.6.0/reference/potentialplummer.html) plum = PlummerPotential(amp=5e9 * u.Msun, b=5.0 * u.kpc) # Now, let's initialise another Sagittarius orbit and make a 'moving object potential' where we let the plummer sphere follow the orbital path # # The moving object potential can be used to make any galpy potential object follow along any galpy orbit instance # # (https://docs.galpy.org/en/latest/reference/potentialmovingobj.html) # + # Set up the next orbit cdf4 = ChandrasekharDynamicalFrictionForce( GMs=5e9 * u.Msun, rhm=5.0 * u.kpc, dens=MWPotential2014_heavy ) Sgr_Vas_DF4 = Orbit(vxvv=[R / ro, vR / vo, vphi / vo, z / ro, vz / vo, phi]) Sgr_Vas_DF4.integrate(time, MWPotential2014_heavy + cdf4) satpot = MovingObjectPotential(Sgr_Vas_DF4, plum) # - # Now, let's integrate those same orbits again in the time evolving potential and see what Sgr has done! # + # Turn them into an orbit object perturbed_orbits = Orbit(vxvvs) # Integrate it perturbed_orbits.integrate(time2, MWPotential2014_heavy + satpot) # The 'orbit' class also has plotting functionality built in. Default is R-z perturbed_orbits.plot() # You can set other dimensions plt.figure(figsize=(5, 5)) perturbed_orbits.plot(d1="x", d2="y", overplot=True) plt.xlabel(r"$x\ (\mathrm{kpc})$") plt.ylabel(r"$y\ (\mathrm{kpc})$"); # - # Ok, that wasn't much was it! (although they are now not completely regular) But most of these stars from my demo orbits are in the inner galaxy... You may have had something more dramatic if you edited the orbits # # ### Exercise: What if Sgr was 10 times more massive? # # Using the above cells, you should be able to redo this with a heavier dwarf, and call it more_perturbed_orbits to work with the below cells # + # Set up the next dwarf # Set up the new orbit # Create the moving object pot # Initialise the orbit object # Integrate it # Plot it # If interesting, animate it! If not interesting, try different orbits/perturbers # - # Let's check the energy for these orbits, as discussed in Adrian's orbit workshop (and Helmer's upcoming one) and see the change (note that 'more_perturbed_orbits' is my heavier Sgr orbit instance which I have taken out!) # # The energy for the non-interacting case should be (effectively) constant orbits.plot(d1="R", d2="E") perturbed_orbits.plot(d1="R", d2="E") more_perturbed_orbits.plot(d1="R", d2="E"); # For the perturbed cases you can see the impacts of Sgr in the orbital energy. The larger the dwarf, the larger the change in energy: # + carray = ["blue", "red", "green", "orange", "purple"] # Plot just the first one orbits.plot(d1="time", d2="E", color=carray[0]) perturbed_orbits.plot(d1="time", d2="E", overplot=True, color=carray[0], ls="--") more_perturbed_orbits.plot(d1="time", d2="E", overplot=True, color=carray[0], ls=":") # Plot all five for i in range(0, 5): if i == 0: orbits[i].plot(d1="time", d2="E", color=carray[i]) else: orbits[i].plot(d1="time", d2="E", overplot=True, color=carray[i]) perturbed_orbits[i].plot(d1="time", d2="E", overplot=True, color=carray[i], ls="--") more_perturbed_orbits[i].plot( d1="time", d2="E", overplot=True, color=carray[i], ls=":" ) # - # Now we know how to integrate and examine multiple objects, let's do some real ones from the MW # # The Orbit.from_name method also allows you to load some collections of objects in a simple manner. Currently, three collections are supported: ‘MW globular clusters’, ‘MW satellite galaxies’, and ‘solar system’. # # We'll make use of the Orbit.from_name routine again to get all of the Milky-Way globular clusters with data from Gaia DR2 (using the Vasiliev 2019 catalog): GC_orbits = Orbit.from_name("MW globular clusters") print(GC_orbits.name) # + GC_orbits.integrate(time, MWPotential2014_heavy) plt.figure() GC_orbits.plot() plt.xlim(0, 300.0) plt.ylim(-200.0, 200.0) plt.figure(figsize=(5, 5)) GC_orbits.plot(d1="x", d2="y", overplot=True) plt.xlim(-200.0, 200.0) plt.ylim(-200.0, 200.0) plt.xlabel(r"$x\ (\mathrm{kpc})$") plt.ylabel(r"$y\ (\mathrm{kpc})$"); # - # (Or in 3D while we're here...) GC_orbits.plot3d(alpha=0.4) plt.xlim(-100.0, 100.0) plt.ylim(-100.0, 100.0) plt.gca().set_zlim3d(-100.0, 100.0); # Some of them are (awkwardly) unbound # # This is unlikely to be true, but instead easily explainable by an incorrect potential, or observational error in the cluster orbital parameters # # Regardless, what happens if you put Sgr through this lot? # + perturbed_GC_orbits = Orbit.from_name("MW globular clusters") perturbed_GC_orbits.integrate(time, MWPotential2014_heavy + satpot) plt.figure() perturbed_GC_orbits.plot() plt.xlim(0, 300.0) plt.ylim(-200.0, 200.0) plt.show() plt.figure(figsize=(5, 5)) perturbed_GC_orbits.plot(d1="x", d2="y", overplot=True) plt.xlim(-200.0, 200.0) plt.ylim(-200.0, 200.0) plt.xlabel(r"$x\ (\mathrm{kpc})$") plt.ylabel(r"$y\ (\mathrm{kpc})$") plt.show() perturbed_GC_orbits.plot3d(alpha=0.4) plt.xlim(-100.0, 100.0) plt.ylim(-100.0, 100.0) plt.zlim(-100.0, 100.0) plt.gca().set_zlim3d(-100.0, 100.0); # - # Now they are not unbound. Hmm. Ok. I wasn't expecting that! The options are: # - A fluke! A complete coincedence, helped by observational error and an uncertain Sgr orbit # - We know there are a bunch of GCs that are related to Sgr. Maybe these are those? # - It could be that some recent interaction with Sgr has set them on their current orbits which not feasible otherwise? # # ### Exercise (stretch goal): Track those clusters down! see what they are and where they come from? # - Find unbound GCs # - Compare their orbits to Sgr's orbit # # ### Exercise (stretch goal): Dynamical Friction and MW Satellites # - Load the Milky Way dwarf galaxies: You can get them all at once with Orbit.from_name('MW satellite galaxies') # - Calculate their orbits without DF # - Add dynamical friction and redo # - Which ones change and why? # - How about if they were 10x more massive? # ## Example: Orbits in a barred potential # # While Sagittarius is a perturbing force that comes from outside our galaxy, there are non-axisymmetric structures that arise in a self-gravitating disc such as a bar or spiral arms, which also effect the orbits of stars in the disc # # (Note that it's possible such features are induced by external perturbers, but not required) # # To start, we need a potential for our bar. We'll use the DehnenBarPotential which is a simple $m=2$ bar potential visualised below. # # (https://docs.galpy.org/en/latest/reference/potentialdehnenbar.html) # + from galpy.potential import DehnenBarPotential # We set up our bar parameters. This is a long-slow bar similar to the model of Perez-Villegas et al. (2017) tform = -10.0 # number of bar periods in the past that it formed tsteady = 5.0 # We grow it slowly over 5 bar periods to avoid 'shocking' the potential # (which is completely pointelss here, but vital for other applications so shown for completeness) omega = 1.3 # Pattern speed of the bar in units of the local circular frequency (220./8=27.5 km/s /kpc) angle = np.deg2rad(25.0) # Bar Angle with respect to the sun (in radians) length = 5.0 # Half-length of the bar strength = 2.4 # In 'percent of the radial force at the Solar neighborhood' # Initialise the bar potential, and visualise it long_bar = DehnenBarPotential( omegab=omega, rb=length / 8.0, Af=(strength / 75.0), tform=tform, tsteady=tsteady, barphi=angle, ) plotPotentials( long_bar, xy=True, rmin=-2, rmax=2, nrs=200, zmin=-2, zmax=2, ncontours=20, nzs=200, cntrcolors="none", aspect="equal", ) # Add it to our MW potential long_bar_pot = [MWPotential2014, long_bar] # - # First, let's set up an orbit and just see where that orbit goes without the bar: # + # Set up a particle orbit long_bar_orbit1 = Orbit(vxvv=[1.0, 0.1, 0.86805, 0.0, 0.0, 0.0]) # A new time array long_bar_time = np.linspace(0, 4.0 / bovy_conversion.time_in_Gyr(vo, ro), 10000) # Integrate long_bar_orbit1.integrate(long_bar_time, MWPotential2014, method="odeint") plt.figure(figsize=(5, 5)) plt.plot( long_bar_orbit1.x(long_bar_time), long_bar_orbit1.y(long_bar_time), color="orange" ) plt.xlim(-1.25, 1.25) plt.ylim(-1.25, 1.25) plt.xlabel(r"$X\ (\mathrm{kpc})$") plt.ylabel(r"$Y\ (\mathrm{kpc})$") plt.show() # - # It's nice and regular! What happens if we do the same in the barred potential? # + long_bar_orbit2 = Orbit(vxvv=[1.0, 0.1, 0.86805, 0.0, 0.0, 0.0]) long_bar_orbit2.integrate(long_bar_time, long_bar_pot, method="odeint") plotPotentials( long_bar, xy=True, rmin=-2, rmax=2, nrs=200, zmin=-2, zmax=2, ncontours=20, nzs=200, cntrcolors="none", aspect="equal", ) plt.plot( long_bar_orbit2.x(long_bar_time), long_bar_orbit2.y(long_bar_time), color="orange" ) plt.xlim(-1.25, 1.25) plt.ylim(-1.25, 1.25) plt.xlabel(r"$X\ (\mathrm{kpc})$") plt.ylabel(r"$Y\ (\mathrm{kpc})$") plt.show() # - # Not so regular anymore! But this orbit is also interesting if we look in the reference frame of the rotating bar. # # We know how fast the bar rotates, so we can convert the x-y coordinates into the rotating frame by taking off the bar rotation multiplied by the time # # We'll do it for both the unperturbed and perturbed orbit for illustration (even though it only really makes sense if there is a bar!) # + rotation = long_bar_time * -1.3 # Where 1.3 is the pattern speed, see above lbx1 = ( long_bar_orbit1.x(long_bar_time) * np.cos(rotation) - long_bar_orbit1.y(long_bar_time) * np.sin(rotation) ) lby1 = ( long_bar_orbit1.x(long_bar_time) * np.sin(rotation) + long_bar_orbit1.y(long_bar_time) * np.cos(rotation) ) lbx2 = ( long_bar_orbit2.x(long_bar_time) * np.cos(rotation) - long_bar_orbit2.y(long_bar_time) * np.sin(rotation) ) lby2 = ( long_bar_orbit2.x(long_bar_time) * np.sin(rotation) + long_bar_orbit2.y(long_bar_time) * np.cos(rotation) ) plt.figure(figsize=(5, 5)) plt.plot(lbx2, lby2, color="orange") plt.plot(lbx1, lby1, color="teal", ls="dotted") plt.xlim(-1.25, 1.25) plt.ylim(-1.25, 1.25) plt.xlabel(r"$X\ (\mathrm{kpc})$") plt.ylabel(r"$Y\ (\mathrm{kpc})$") # This is the bar major axis as a black line. # It doesn't mean it's thin but feels unnecessary to redraw the potential each time! plt.plot([-0.566, 0.566], [-0.26, 0.26], color="black") plt.show() # - # Note that while the unperturbed orbit (blue-dotted) covers all azimuth in the rotating frame, the perturbed (red) orbit remains on one side of the bar. # # This is because it is close to the bar co-rotation resonance (it's not perfect, Jason just eyeballed it...) and it ends up librating around one of the bar lagrange points (e.g. Perez-Villegas et al. 2017). # # If the bar is long and slow (as in this example) then this resonance will be close to the Solar neighbourhood and could cause substructure in our local kinematics, but there are plenty or other resonances too # # For example, the corotation resonance for a rigidly rotating structure with pattern speed $\Omega_{\mathrm{p}}$ occurs when $\Omega_{\mathrm{p}}-\Omega_{\phi}=0$. The inner (ILR) and outer (OLR) 2:1 Lindblad resonances occur when $\Omega_{\mathrm{p}}-\Omega_{\phi}\pm\Omega_{\mathrm{R}}/2=0$, and similarly for the 4:1 ILR and OLR, when $\Omega_{\mathrm{p}}-\Omega_{\phi}\pm\Omega_{\mathrm{R}}/4=0$, and the 1:1 ILR and OLR when $\Omega_{\mathrm{p}}-\Omega_{\phi}\pm\Omega_{\mathrm{R}}=0$ (-ve is outer, +ve is inner for each case) # # # A commonly examined bar resonance is the OLR. If the bar is short and fast, then this resonance will be close to the Solar neighbourhood instead (e.g. Dehnen 2000). Let's make that bar and see the orbit structure: # + omega = 1.85 # Faster bar length = 3.5 # Shorter bar strength = 1.0 # Less force from the shorter weaker bar! short_bar = DehnenBarPotential( omegab=omega, rb=length / 8.0, Af=(strength / 75.0), tform=tform, tsteady=tsteady, barphi=angle, ) short_bar_pot = [MWPotential2014, short_bar] # We'll set up two orbits this time (0.153) short_bar_orbit1 = Orbit(vxvv=[1.0, 0.2, 0.9, 0.0]) short_bar_orbit2 = Orbit(vxvv=[1.0, -0.15, 0.9, 0.0]) short_bar_time = np.linspace(0, 0.5 / bovy_conversion.time_in_Gyr(vo, ro), 1000) short_bar_orbit1.integrate(short_bar_time, short_bar_pot, method="odeint") short_bar_orbit2.integrate(short_bar_time, short_bar_pot, method="odeint") # Plot them in the standard frame plt.figure(figsize=(5, 5)) plt.plot( short_bar_orbit1.x(short_bar_time), short_bar_orbit1.y(short_bar_time), color="red" ) plt.plot( short_bar_orbit2.x(short_bar_time), short_bar_orbit2.y(short_bar_time), color="blue" ) plt.xlim(-1.25, 1.25) plt.ylim(-1.25, 1.25) plt.xlabel(r"$X\ (\mathrm{kpc})$") plt.ylabel(r"$Y\ (\mathrm{kpc})$") plt.plot([-0.4, 0.4], [-0.18, 0.18], color="black") plt.show() # - # And in the rotating frame: # + rotation = short_bar_time * -1.85 sbx1 = short_bar_orbit1.x(short_bar_time) * np.cos(rotation) - short_bar_orbit1.y( short_bar_time ) * np.sin(rotation) sby1 = short_bar_orbit1.x(short_bar_time) * np.sin(rotation) + short_bar_orbit1.y( short_bar_time ) * np.cos(rotation) sbx2 = short_bar_orbit2.x(short_bar_time) * np.cos(rotation) - short_bar_orbit2.y( short_bar_time ) * np.sin(rotation) sby2 = short_bar_orbit2.x(short_bar_time) * np.sin(rotation) + short_bar_orbit2.y( short_bar_time ) * np.cos(rotation) plt.figure(figsize=(5, 5)) plt.plot(sbx1, sby1, color="red") plt.plot(sbx2, sby2, color="blue") plt.xlim(-1.25, 1.25) plt.ylim(-1.25, 1.25) plt.xlabel(r"$X\ (\mathrm{kpc})$") plt.ylabel(r"$Y\ (\mathrm{kpc})$") plt.plot([-0.4, 0.4], [-0.18, 0.18], color="black") plt.title(r"$\mathrm{Figure\ 1 - For\ later\ discussion}$") plt.show() # - # Now you see clearly the 2:1 morphology of the orbits at the resonance (in the frame of the bar). E.g. they go in and out twice for every once around the galaxy. The two orbits were chosen to show the bar-aligned and anti-aligned resonant orbit (although they're not perfect aligned...) # # Again, Jason just totally eyeballed these, they're certainly not lying perfectly at the resonances, so are they actually stable structures? Let's run that again for 10 Gyr # + short_bar_orbit3 = Orbit(vxvv=[1.0, 0.2, 0.9, 0.0]) short_bar_orbit4 = Orbit(vxvv=[1.0, -0.15, 0.9, 0.0]) short_bar_time = np.linspace(0, 10.0 / bovy_conversion.time_in_Gyr(vo, ro), 1000) rotation = short_bar_time * -1.85 short_bar_orbit3.integrate(short_bar_time, short_bar_pot, method="odeint") short_bar_orbit4.integrate(short_bar_time, short_bar_pot, method="odeint") sbx3 = short_bar_orbit3.x(short_bar_time) * np.cos(rotation) - short_bar_orbit3.y( short_bar_time ) * np.sin(rotation) sby3 = short_bar_orbit3.x(short_bar_time) * np.sin(rotation) + short_bar_orbit3.y( short_bar_time ) * np.cos(rotation) sbx4 = short_bar_orbit4.x(short_bar_time) * np.cos(rotation) - short_bar_orbit4.y( short_bar_time ) * np.sin(rotation) sby4 = short_bar_orbit4.x(short_bar_time) * np.sin(rotation) + short_bar_orbit4.y( short_bar_time ) * np.cos(rotation) plt.figure(figsize=(5, 5)) plt.plot(sbx3, sby3, color="red") plt.plot(sbx4, sby4, color="blue") plt.xlim(-1.25, 1.25) plt.ylim(-1.25, 1.25) plt.xlabel(r"$X\ (\mathrm{kpc})$") plt.ylabel(r"$Y\ (\mathrm{kpc})$") plt.plot([-0.4, 0.4], [-0.18, 0.18], color="black") plt.show() # - # You can see that while the blue orbit is stable and retains its shape over long timescales, the red orbit does not! This is known, that one alignment is stable while the other is not (e.g. See Fux 2000). Also note that over long timescales the blue orbit librates around the resonance, appearing aligned with the bar axis over multiple orbits! # # But, are they actually resonant? We know from the equations above that the OLR should be where $\Omega_{\mathrm{p}}-\Omega_{\phi}-\Omega_{\mathrm{R}}/2=0$ # # Galpy can calculate actons, angles and frequencies with a variety of methods (see docs below). The Staeckel approximation is overkill here, but a useful example for other applications # + from galpy.actionAngle import ( actionAngleStaeckel, estimateDeltaStaeckel, actionAngleIsochroneApprox, ) # The focal length of the potential local to the orbit delta = estimateDeltaStaeckel(MWPotential2014, 1.0, 0.0000001, no_median=True) # Set up the Staeckel object for the calculation aAS = actionAngleStaeckel(pot=MWPotential2014, delta=delta) # Calculate actions, angles and frequencies with [R,vR,vT,z,vz,phi] # Note that it doesn't work when perfectly planar, so do small z & vz e.g. 0.0000001 ( jR1, lz1, jz1, O_R1, O_phi1, O_z1, theta_R1, theta_phi1, theta_z1, ) = aAS.actionsFreqsAngles(1.0, 0.2, 0.9, 0.00001, 0.000001, 0.0, delta=delta) ( jR2, lz2, jz2, O_R2, O_phi2, O_z2, theta_R2, theta_phi2, theta_z2, ) = aAS.actionsFreqsAngles(1.0, -0.15, 0.9, 0.00001, 0.000001, 0.0, delta=delta) # + # They're close to zero, but not exact, which is what I'd expect given my guesswork! print(1.85 - O_phi1 - O_R1 / 2) print(1.85 - O_phi2 - O_R2 / 2) # - # In addition, these are the actions calculated in the ${\it axisymmetric}$ potential, so they are not the ${\it true}$ actions. They are generally good enough to get close to the right answer for simplisic applications. (For a rigidly rotating pattern you can move to 'fast' and 'slow' actions, but we don't have time to cover that here. See Binney & Tremaine for some discussion) # # However, because the resonances are a relation between the orbital frequencies of the stars, and the rigidly rotating bar pattern, if the potential changes, so do the frequencies, and so do the resonances: # # Let's redo the same orbits in the heavier MWPotential2014_heavy: # + short_bar_pot_heavy = [MWPotential2014_heavy, short_bar] short_bar_orbit5 = Orbit(vxvv=[1.0, 0.2, 0.9, 0.0]) short_bar_orbit6 = Orbit(vxvv=[1.0, -0.15, 0.9, 0.0]) short_bar_time = np.linspace(0, 0.5 / bovy_conversion.time_in_Gyr(vo, ro), 1000) rotation = short_bar_time * -1.85 short_bar_orbit5.integrate(short_bar_time, short_bar_pot_heavy, method="odeint") short_bar_orbit6.integrate(short_bar_time, short_bar_pot_heavy, method="odeint") # We'll just plot the rotating frame this time sbx5 = short_bar_orbit5.x(short_bar_time) * np.cos(rotation) - short_bar_orbit5.y( short_bar_time ) * np.sin(rotation) sby5 = short_bar_orbit5.x(short_bar_time) * np.sin(rotation) + short_bar_orbit5.y( short_bar_time ) * np.cos(rotation) sbx6 = short_bar_orbit6.x(short_bar_time) * np.cos(rotation) - short_bar_orbit6.y( short_bar_time ) * np.sin(rotation) sby6 = short_bar_orbit6.x(short_bar_time) * np.sin(rotation) + short_bar_orbit6.y( short_bar_time ) * np.cos(rotation) plt.figure(figsize=(5, 5)) plt.plot(sbx5, sby5, color="red") plt.plot(sbx6, sby6, color="blue") plt.xlim(-1.25, 1.25) plt.ylim(-1.25, 1.25) plt.xlabel(r"$X\ (\mathrm{kpc})$") plt.ylabel(r"$Y\ (\mathrm{kpc})$") plt.plot([-0.4, 0.4], [-0.18, 0.18], color="black") plt.show() # - # Now it's almost a 3:1, just from changing the halo potential! # # ### Exercise: But are they stable, and are they resonant? # - Why don't you check the freqencies from the equations above? # - You can integrate it further in time to see what happens. Before you do, what do you expect? # + # Calculate actions # Check frequencies compared to the equations above! # Integrate further # - # Regardless of what you found, we would not expect the 3:1 resonance to be strong in a bar with a pure $m=2$ component # # When doing a fourier decomposition on real bars in nature or simulation we find they have a range of structure, and are rarely pure $m=2$ (e.g. Buta 2006). # # But, the even fourier components are usually much stronger than the odd components owing to the bar symmetry. So, let's construct a bar with more complex morphology with the cosmphi disc potential. Let's do $m=4$ which is (usually/always?) the second strongest # # (https://docs.galpy.org/en/latest/reference/potentialcosmphidisk.html) # + from galpy.potential import ( CosmphiDiskPotential, SolidBodyRotationWrapperPotential, DehnenSmoothWrapperPotential, ) omega = 1.4 length = 4.0 strength = 1.0 # This is the base m=2 component for the bar, but we could recreate it with the cosmphi potential below too dp = DehnenBarPotential( omegab=omega, rb=length / 8.0, Af=(strength / 75.0), tform=tform, tsteady=tsteady, barphi=angle, ) # Now we add a m=4 fourier potential component. # We then wrap it with a smooth growth wrapper, and then a rotating potential wrapper cosm4 = SolidBodyRotationWrapperPotential( pot=DehnenSmoothWrapperPotential( pot=CosmphiDiskPotential(amp=0.05, m=4.0, phib=angle, p=-5.0, rb=length / 8.0), tform=dp.tform(), ), omega=omega, ) # m4_bar_pot = [MWPotential2014, dp, cosm4] # Jason only had time to make 1 (almost) resonant orbit... Will try and fix before school m4_bar_orbit1 = Orbit(vxvv=[1.0, 0.1, 0.9553, 0.0]) # m4_bar_orbit2=Orbit(vxvv=[1.,0.1,0.9553,0.]) m4_bar_time = np.linspace(0, 1.62 / bovy_conversion.time_in_Gyr(vo, ro), 10000) m4_bar_orbit1.integrate(m4_bar_time, m4_bar_pot, method="odeint") # m4_bar_orbit2.integrate(m4_bar_time,m4_bar_pot,method='odeint') deg = m4_bar_time * -1.4 b4x3 = m4_bar_orbit1.x(m4_bar_time) * np.cos(deg) - m4_bar_orbit1.y( m4_bar_time ) * np.sin(deg) b4y3 = m4_bar_orbit1.x(m4_bar_time) * np.sin(deg) + m4_bar_orbit1.y( m4_bar_time ) * np.cos(deg) # b4x4=m4_bar_orbit2.x(m4_bar_time)*np.cos(deg)-m4_bar_orbit2.y(m4_bar_time)*np.sin(deg) # b4y4=m4_bar_orbit2.x(m4_bar_time)*np.sin(deg)+m4_bar_orbit2.y(m4_bar_time)*np.cos(deg) plt.figure(figsize=(5, 5)) plt.plot(b4x3, b4y3, color="red") # plt.plot(b4x4,b4y4,color='blue') plt.xlim(-1.25, 1.25) plt.ylim(-1.25, 1.25) plt.xlabel(r"$X\ (\mathrm{kpc})$") plt.ylabel(r"$Y\ (\mathrm{kpc})$") plt.plot([-0.453, 0.453], [-0.211, 0.211], color="black") plt.show() # - # And voila, a 4:1 orbit! # ## Example: Backwards intergation # # Often, we're more interested in a distribution of orbits instead of the parameters/morphology of a single one # # A simple way to visualise the local velocity distribution without integrating a large number of orbits is the so called 'Backwards integration technique' (Dehnen 2000, AJ, 119, 800). # # Essentially this involves integrating a grid of orbits in phase space backwards in time in the desired potential, and then assigning them present day 'weights' based on the density at the location they end up at in the 'past'. # # This works well for a quick visualisation of what you expect today, but completely breaks down in the presence of chaos! Still, as the number of particles needed to resolve local phase space is high in any particle based simulation, this is a useful tool, providing you remember the caveats # + from galpy.potential import LogarithmicHaloPotential from galpy.df import evolveddiskdf from galpy.df import dehnendf from scipy import ndimage as nd # We'll make a new bar here, this is the short_bar again for the discussion below omega = 1.85 # Pattern speed of the bar angle = 25.0 / 180.0 * np.pi # Bar Angle length = 3.5 # Initiate potentials dp = DehnenBarPotential( omegab=omega, rb=length / 8.0, Af=(1.0 / 75.0), tform=tform, tsteady=tsteady, barphi=angle, ) lhp = LogarithmicHaloPotential(normalize=1.0) pot = [lhp, dp] # Initiate Dehnen distribution function (Dehnen 1999, AJ, 118, 1201) dfc = dehnendf(beta=0.0, correct=False, profileParams=(1.0 / 3.0, 1.0, 0.15)) # Integrate backwards following (Dehnen 2000, AJ, 119, 800) edf = evolveddiskdf(dfc, pot, to=dp.tform()) # Calculate the velocity field upon a grid (at R/R0=1, phi=0, at the present day) mvr, grid = edf.meanvR( 1.0, phi=0.0, deg=False, t=0.0, grid=True, returnGrid=True, gridpoints=101 ) # - # And plot the resulting local velocity distribution for a system with the chosen potential plt.figure(figsize=(6, 6)) bovy_plot.bovy_dens2d( nd.gaussian_filter(grid.df[::-1].T, 1.0), origin="lower", contours=True, xrange=[grid.vRgrid[0], grid.vRgrid[-1]], yrange=[grid.vTgrid[0], grid.vTgrid[-1]], xlabel=r"$v_R$", ylabel=r"$v_T$", gcf=True, cmap="viridis", ) bovy_plot._add_ticks() # Assuming you kept the parameters of the short bar above, you'll see a bimodal distribution with a weaker group in the lower left, and a sharp spikey bit going towards the right. This is caused by the 2:1 Outer Lindblad resonance. # # In the past, people have suggested this to be the origin of the division between the 'Hercules stream' and the main part of the distribution (e.g. Dehnen 2000). But recent more direct measurements of the bar pattern speed and length argue against this. # Here, the 'Hercules-like' group in the lower left is primarily composed of the 'red' orbits from 'Figure 1' above, and the sharp part is primarily composed of the 'blue' orbits (see Fragkoudi 2019 for a more thorough discussion) # ### Exercise: What do the other bars leave as imprints in the local kinematics? # - How do a long slow bar and a bar with m=4 and an intermediate pattern compare to the local observed kinematics? # - You can find the real $v_R-v_{\phi}$ plane in https://ui.adsabs.harvard.edu/abs/2018A%26A...616A..11G/abstract # - Can you relate the orbit structure above to the morphology of the velocity distribution? # ### Exercise (stretch goal): What about the spiral arms? # - Try adding a density wave spiral arm potential from galpy's list of potentials # - Try adding transient winding spirals (see https://ui.adsabs.harvard.edu/abs/2018MNRAS.481.3794H/abstract for what is hopefully an understadable example!) # - How do they compare? # $\textbf{The end. You made it! Hopefully some of this was useful/interesting to you (if not I apologise!)}$ # $\textbf{Now you should:}$ # - Go back through and pick one or more of those stretch goal exercises! # - Assist your peers! # - (Go for lunch?)
3-Orbital-and-galactic-evolution/1-Interactions-and-orbit-evolution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # ### NYU # + root = '/home/wuyiming/git/Hand' caffe_root = root + '/caffe' import sys sys.path.insert(0, caffe_root + '/python') sys.path.insert(0, 'lib') sys.path.insert(0, 'lib/layers/') sys.path.insert(0, 'lib/data/') sys.path.insert(0, 'lib/util/') import caffe import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D # %matplotlib inline import h5py from matplotlib.patches import Circle import scipy.io as scio import os import time # + fx = 588.03 fy = 587.07 ux = 320 uy = 240 def jointsImgTo3D(sample): """ Normalize sample to metric 3D :param sample: joints in (x,y,z) with x,y in image coordinates and z in mm :return: normalized joints in mm """ ret = np.zeros((sample.shape[0], 3), np.float32) for i in range(sample.shape[0]): ret[i] = jointImgTo3D(sample[i]) return ret def jointImgTo3D(sample): """ Normalize sample to metric 3D :param sample: joints in (x,y,z) with x,y in image coordinates and z in mm :return: normalized joints in mm """ ret = np.zeros((3,), np.float32) # convert to metric using f, see Thomson et al. ret[0] = (sample[0] - ux) * sample[2] / fx ret[1] = (uy - sample[1]) * sample[2] / fy ret[2] = sample[2] return ret def joints3DToImg(sample): ret = np.zeros((sample.shape[0], 3), np.float32) for i in range(sample.shape[0]): ret[i] = joint3DToImg(sample[i]) return ret def joint3DToImg(sample): ret = np.zeros((3,),np.float32) #convert to metric using f, see Thomson et.al. if sample[2] == 0.: ret[0] = ux ret[1] = uy return ret ret[0] = sample[0]/sample[2]*fx+ux ret[1] = uy-sample[1]/sample[2]*fy ret[2] = sample[2] return ret def loadPredFile(filepath): import os assert os.path.isfile(filepath), "{} is not exists or is not a file!".format(filepath) with open(filepath, 'r') as f: lines = f.readlines() for index, line in enumerate(lines): lines[index] = map(float, line.split()) joints = np.array(lines) n, d = joints.shape return joints.reshape(n, d/3, 3) def getNumFrameWithinMaxDist(gt, joints, dist): return (np.nanmax(np.sqrt(np.square(gt - joints).sum(axis=2)), axis=1) <= dist).sum() def plotAccuracy(gt, joints, thresh = 80, label='ours'): fig = plt.figure() ax = fig.add_subplot(111) ax.plot([getNumFrameWithinMaxDist(gt, joints, j)/ float(joints.shape[0]) * 100. for j in range(0, 80)], label=label) ax.grid(True) plt.xlabel('Distance threshold / mm') plt.ylabel('Fraction of frames within threshold / %') plt.xlim((0, 80)) plt.ylim((0.0, 100.0)) def predict_joints(model_name, weights_num): """predict joints""" model_def = 'models/NYU/hand_' + model_name + '/hand_' + model_name + '.prototxt' model_weights = 'weights/NYU/hand_' + model_name + '/hand_' + model_name + '_iter_' + weights_num + '.caffemodel' print model_def print model_weights net = caffe.Net(model_def, model_weights, caffe.TEST) file_name = 'result/OURS/NYU/hand_' + model_name + '_' + weights_num + '.txt' import os if os.path.isfile(file_name): return file_name print file_name t_start = time.time() predicted_joints = np.array([None]* 8252) for i in xrange(np.int(np.ceil(8252./ net.blobs['inds'].data.shape[0]))): net.forward() print 'iter = ', i for j, ind in enumerate(net.blobs['inds'].data): row = j / 32 col = j % 32 if predicted_joints[int(ind) - 1] == None: if model_name == 'baseline': if ind <= 2440: # test 1 predicted_joints[int(ind) - 1] = (net.blobs['joint_pred'].data[j].reshape(14, 3) * \ 300 / 2 + net.blobs['com'].data[j].reshape(1, 3)).copy() else: # test 2 predicted_joints[int(ind) - 1] = (net.blobs['joint_pred'].data[j].reshape(14, 3) * \ 300 * 0.87 / 2 + net.blobs['com'].data[j].reshape(1, 3)).copy() else: predicted_joints[int(ind) - 1] = (net.blobs['pred_joint'].data[row][col].reshape(14, 3) * \ net.blobs['config'].data[j][0]/2 + net.blobs['com'].data[j].reshape(1, 3)).copy() t_end = time.time() print 'time elapse {}'.format((t_end - t_start) / 8252) with open(file_name, 'w') as f: for i in xrange(predicted_joints.shape[0]): for item in predicted_joints[i].reshape(14*3): f.write("%s "% item) f.write("\n") return file_name def vis_square(data): """Take an array of shape (n, height, width) or (n, height, width, 3) and visualize each (height, width) thing in a grid of size approx. sqrt(n) by sqrt(n)""" # normalize data for display data = (data - data.min()) / (data.max() - data.min()) # force the number of filters to be square n = int(np.ceil(np.sqrt(data.shape[0]))) padding = (((0, n ** 2 - data.shape[0]), (0, 1), (0, 1)) # add some space between filters + ((0, 0),) * (data.ndim - 3)) # don't pad the last dimension (if there is one) data = np.pad(data, padding, mode='constant', constant_values=1) # pad with ones (white) # tile the filters into an image data = data.reshape((n, n) + data.shape[1:]).transpose((0, 2, 1, 3) + tuple(range(4, data.ndim + 1))) data = data.reshape((n * data.shape[1], n * data.shape[3]) + data.shape[4:]) plt.imshow(data); plt.axis('off') # - # ### train # + caffe.set_device(0) caffe.set_mode_gpu() solver = None solver = caffe.AdamSolver('./models/NYU/hand_lstm/solver_hand_lstm.prototxt') # - solver.net.forward() solver.test_nets[0].forward() print solver.net.blobs['inds'].data print solver.test_nets[0].blobs['inds'].data # visulize 3D i = 0 print 'ind = ', solver.net.blobs['inds'].data[i] depth = solver.net.blobs['depth'].data[i] dpt3D = solver.net.blobs['dpt3D'].data[i] plt.axis('off') plt.imshow(depth.reshape(128,128)) fig = plt.figure() ax = fig.add_subplot(111, projection='3d') d,x,y = np.where(dpt3D==1) ax.scatter(x,y,8-d) ax.set_xlabel('x') ax.set_ylabel('y') ax.set_zlabel('d') plt.axis('off') solver.test_nets[0].forward() print solver.test_nets[0].blobs['inds'].data joint_data = solver.test_nets[0].blobs['joint'].data inds_data = solver.test_nets[0].blobs['inds'].data img = (solver.net.blobs['depth'].data[0].reshape(128,128)) plt.imshow(img) (solver.net.blobs['joint'].data[0]).reshape(14,3) # ### test caffe.set_device(0) caffe.set_mode_gpu() net = caffe.Net('models/NYU/hand_lstm_small_frame_size/hand_lstm_small_frame_size.prototxt', 'weights/NYU/hand_lstm_small_frame_size/hand_lstm_small_frame_size_iter_10000.caffemodel', caffe.TEST) net.forward() vis_square(net.blobs['depth'].data[0]) vis_square(net.blobs['conv1_1'].data[0]) vis_square(net.blobs['pool1'].data[0]) vis_square(net.blobs['pool2'].data[0]) vis_square(net.blobs['pool3'].data[0]) vis_square(net.blobs['lstm'].data.reshape(3, 32, 32)) # ## Plot the accuracy of hand pose estimation # ### read the ground truth gt_file = '/mnt/data/NYU-Hands-v2/test/joint_data.mat' data = scio.loadmat(gt_file) kinect_index = 0 image_index = 0 joint_uvd = data['joint_uvd'][kinect_index, :, :, :] joint_xyz = data['joint_xyz'][kinect_index, :, :, :] restrictedJoint = [0, 3, 6, 9, 12, 15, 18, 21, 24, 25, 27, 30, 31, 32] joint_name = data['joint_names'].reshape(36, 1) # #### read the prediction(CVWW 2015) pred_file_CVWW15 = '../result/CVWW15/CVWW15_NYU_Prior.txt' pred_joints = loadPredFile(pred_file_CVWW15) pred_joints3D = [] for joints in pred_joints: joints3D = jointsImgTo3D(joints) pred_joints3D.append(joints3D) pred_joints3D = np.asarray(pred_joints3D) plotAccuracy(joint_xyz[:, restrictedJoint], pred_joints3D, label='CVWW15') pred_file_CVWW15_refine = '../result/CVWW15/CVWW15_NYU_Prior-Refinement.txt' pred_joints = loadPredFile(pred_file_CVWW15_refine) pred_joints3D = [] for joints in pred_joints: joints3D = jointsImgTo3D(joints) pred_joints3D.append(joints3D) pred_joints3D = np.asarray(pred_joints3D) plotAccuracy(joint_xyz[:, restrictedJoint], pred_joints3D, label='CVWW15') pred_file_ICCV = '../result/ICCV15/ICCV15_NYU_Feedback.txt' pred_joints = loadPredFile(pred_file_ICCV) pred_joints3D = [] for joints in pred_joints: joints3D = jointsImgTo3D(joints) pred_joints3D.append(joints3D) pred_joints3D = np.asarray(pred_joints3D) plotAccuracy(joint_xyz[:, restrictedJoint], pred_joints3D, label='ICCV') # #### predicted by ourselves model_num = 'baseline' weights_num = '60000' pred_ours_file = predict_joints(model_num, weights_num) pred_joints = loadPredFile(pred_ours_file) plotAccuracy(joint_xyz[:, restrictedJoint], pred_joints, label='ours')
hand-NYU.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/cwmarris/pull-request-monitor/blob/master/OH_Introduction_To_NLP_01.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="KBW4gr9l1CTw" # # Introduction to NLP 01: Overview # + [markdown] id="HMvDz4e4ZhHY" # * Author: <NAME> # * Date: July 2020 # + [markdown] id="Z3tSKUwN0fln" # ## What is NLP? # + [markdown] id="gagz458u2yml" # Natural Language Processing (NLP) is a field of data science that gives the machines the ability to read, understand and derive meanings from human languages. # + [markdown] id="2zC3kxVz0m0g" # ## What are NLP's use cases? # + [markdown] id="O_cEI9jj09wL" # * Sentiment Analysis # * Topic Modeling (unsupervised) # * Named Entity Recognition (NER) # * Part of Speach (POS) # * Language Translation # * Language Generation # * Text Summarization # * Text Classification (supervised) # * Text Segmentation (unsupervised) # * Speech to Text and Text to Speech # * Chatbot # # + [markdown] id="ILSuWNnnD1bP" # ## NLP Terminologies # + [markdown] id="12tUiwcLEEIw" # * Stop Words # * Tokenization # * Stemming # * Lemmatization # * Count Vectorization # * TF-IDF: Tf-idf stands for term frequency-inverse document frequency. The importance increases proportionally to the number of times a word appears in the document but is offset by the frequency of the word in the corpus # TF-IDF=TF(t)*IDF(t) # * TF: Term Frequency, which measures how frequently a term occurs in a document. Since every document is different in length, it is possible that a term would appear many more times in long documents than shorter ones. Thus, the term frequency is often divided by the document length (aka. the total number of terms in the document) as a way of normalization. # * IDF: Inverse Document Frequency, which measures how important a term is. While computing TF, all terms are considered equally important. However it is known that certain terms, such as "is", "of", and "that", may appear a lot of times but have little importance. Thus we need to weigh down the frequent terms while scale up the rare ones, by computing the following. # # * Bag of Words # * Word Embedding: for example, king-man+woman=queen # + [markdown] id="ASn_zwEOFMug" # ## Hands-on Exercise # + [markdown] id="Mn09U7_DfdlV" # ### Stopwords # + id="J_GBB0PB0F2D" text = 'HBAP students benefit from world-class instruction in courses designed by esteemed Harvard faculty and collaborate with diverse peers in highly interactive online classes. ' # + id="fD0mQD9nMh60" colab={"base_uri": "https://localhost:8080/", "height": 119} outputId="5d4008a0-2223-4a85-b1be-895cff55db3f" import nltk nltk.download('stopwords') nltk.download('word_tokenize') nltk.download('punkt') from nltk.corpus import stopwords from nltk.tokenize import word_tokenize STOPWORDS = set(stopwords.words('english')) # + id="5IAGHkceMnwB" colab={"base_uri": "https://localhost:8080/", "height": 425} outputId="5cbdff2d-c4db-41c0-cc48-fc4dbf2de9be" # Tokenization tokens = word_tokenize(text) tokens # + id="OygcIfDENsel" colab={"base_uri": "https://localhost:8080/", "height": 323} outputId="8ff40e24-1aac-48e1-e4b1-caa43fc9398a" # Remove Stopping Words text_no_stopwords = [w for w in tokens if not w in STOPWORDS] text_no_stopwords # + [markdown] id="7LxLrO5kfqeh" # ### Stemming # + id="ogozBQKfPkmf" colab={"base_uri": "https://localhost:8080/", "height": 323} outputId="e48b32c8-2913-44d0-bebc-52222413484c" # Stemming from nltk.stem import PorterStemmer text_stemmed = [PorterStemmer().stem(w) for w in text_no_stopwords] text_stemmed # + [markdown] id="BQFXPVhpft-M" # ### Lemmatization # + id="3ASrccGqRooH" colab={"base_uri": "https://localhost:8080/", "height": 357} outputId="1ec390d3-31ef-4ffe-b994-e486f389bc65" # Lemmatization nltk.download('wordnet') wn = nltk.WordNetLemmatizer() text_lemma = [wn.lemmatize(w) for w in text_no_stopwords] text_lemma # + [markdown] id="fqdNVSmuTCjh" # Tip: Choose Stemming for speed and lemmatization for accuracy # + [markdown] id="UefToaSjfyet" # ### Count Vectorization # + id="Xkln07VMVX2l" # Count Vectorization from sklearn.feature_extraction.text import CountVectorizer vectorizer = CountVectorizer() # + id="FCNwWPORavmP" text1 = ['Data science is fun.', 'Data science helps us to make data driven decisions.'] # + id="rFvjx8jOawg8" colab={"base_uri": "https://localhost:8080/", "height": 119} outputId="107bf6e7-9fe0-4f18-d8b8-f1a3ca72f2a2" vectorizer.fit(text1) # + id="1fJ2r_Vza3Kb" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="5da0002f-d48f-469d-f5a1-b204c4596d35" print('Vocabulary: ') print(vectorizer.vocabulary_) # + id="Jr8cOoNFbDIZ" vector = vectorizer.transform(text1) # + id="czBfOMsGbDEg" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="e43cfe1d-781d-4a6c-e76b-32a079046361" print('Full vector: ') print(vector.toarray()) # + [markdown] id="WgamgYAqf2NH" # ### TFIDF # + id="Hjos3-SSbDAN" # TFIDF Vectorization from sklearn.feature_extraction.text import TfidfVectorizer tfidf = TfidfVectorizer() # + id="wbvuraaCbC79" colab={"base_uri": "https://localhost:8080/", "height": 136} outputId="7ac75d94-5326-4472-edd4-b91d32b83601" tfidf.fit(text1) # + id="MCkaa-D5bC3-" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="6a595f4c-9def-422b-c2b5-2cb75d2838de" print('Vocabulary: ') print(tfidf.vocabulary_) # + id="pcm6RrcVbC0R" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="f71b2114-a7ab-4d82-d50d-f187bf3ded8a" vector_tfidf = tfidf.transform(text1) print('Full vector: ') print(vector_tfidf.toarray()) # + [markdown] id="2KWTXhA4eGhi" # ## NLP Learning Materials # + [markdown] id="bdTSiB_6ey-g" # * NLP with Deep Learning from Stanford: https://www.youtube.com/watch?v=8rXD5-xhemo&list=PLoROMvodv4rOhcuXMZkNm7j3fVwBBY42z # * NLP with Python: https://www.udemy.com/course/nlp-natural-language-processing-with-python/ # * spaCy documentation: https://spacy.io/api/doc # * NLTK documenation: https://www.nltk.org/ # # + id="p1vBMS3DbCv7" # + id="LRNRLbb0bCsN" # + id="XKart2ZQbCn8"
OH_Introduction_To_NLP_01.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # # "Azure Text Summarization" # > "How to use Azure Text Summarization with PDF, TXT and simple text" # # - toc: false # - branch: master # - badges: true # - comments: true # - categories: [azure, cognitive services, summarization] # - hide: true # - search_exclude: false # - metadata_key1: metadata_value1 # - metadata_key2: metadata_value2 # %pip install azure-ai-textanalytics pdfplumber Unidecode python-dotenv from typing import List import pdfplumber from azure.core.credentials import AzureKeyCredential from azure.ai.textanalytics import TextAnalyticsClient from azure.ai.textanalytics import ExtractSummaryAction from dotenv import load_dotenv import os from unidecode import unidecode DOTENV_FILEPATH = '' CS_ENDPOINT = os.getenv('CV_ENDPOINT') CS_KEY = os.getenv('CV_KEY') # + # https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/textanalytics/azure-ai-textanalytics/samples/sample_extract_summary.py def pdf_parser( filepath: str, x_tolerance=1, y_tolerance=1 ) -> List[str]: texts = [] with pdfplumber.open(filepath) as pdf: for page in pdf.pages: texts.append(unidecode(page.extract_text(x_tolerance=x_tolerance, y_tolerance=y_tolerance))) return texts def split_in_chunks(lst, chunk_size: int): chunked_list = list() for i in range(0, len(lst), chunk_size): chunked_list.append(lst[i:i+chunk_size]) return chunked_list def az_summary( texts: List[str], cs_endpoint: str, cs_key: str, language: str ): az_doc = [] for i in range(len(texts)): doc = {"id": i, "language": language, "text": texts[i]} az_doc.append(doc) break text_analytics_client = TextAnalyticsClient( endpoint=cs_endpoint, credential=AzureKeyCredential(cs_key), ) poller = text_analytics_client.begin_analyze_actions( documents=texts, actions=[ ExtractSummaryAction(order_by='rank'), ], ) extract_summary_results = [] document_results = poller.result() for result in document_results: for ex in result: # print(result[0]) if not ex['is_error']: extract_summary_results.append(ex) return extract_summary_results def summarize(summaries, thr=0): sentences = [] for sr in summaries: for sentence in sr.sentences: if sentence.rank_score >= thr: sentences.append(sentence.text) sentences = list(set(sentences)) return sentences def summarize_pdf( filepath: str, cs_endpoint: str, cs_key: str, language: str, thr=0 ): pdf_text = pdf_parser(filepath=filepath) chunks = split_in_chunks( lst=pdf_text, chunk_size=25 ) summaries = [] for texts in chunks: st = az_summary( texts=texts, cs_endpoint=cs_endpoint, cs_key=cs_key, language=language ) summaries.extend(st) sentences = summarize(summaries, thr) return sentences def summarize_txt( filepath: str, cs_endpoint: str, cs_key: str, language: str, thr=0 ): with open(filepath, 'r', encoding='utf-8') as fh: num_list = fh.read() summary = az_summary( texts=[num_list], cs_endpoint=cs_endpoint, cs_key=cs_key, language=language ) sentences = summarize(summary, thr) return sentences # - load_dotenv(DOTENV_FILEPATH) summary_pdf = summarize_pdf( filepath='my_sample.pdf', cs_endpoint=CS_ENDPOINT, cs_key=CS_KEY, language='en', thr=0.5 ) print(summary_pdf) summary_txt = summarize_txt( filepath='my_sample.txt', cs_endpoint=CS_ENDPOINT, cs_key=CS_KEY, language='en', thr=0.5 ) print(summary_txt) summary_text = az_summary( texts=["""My sample text"""], cs_endpoint=CS_ENDPOINT, cs_key=CS_KEY, language='en', thr=0.5 ) print(summary_text)
temp/notebooks/2021-11-04-azure_summarization01 copy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from pyspark import SparkContext, SparkConf conf = SparkConf().setAppName("pyspark") sc = SparkContext(conf=conf) # + #btc_raw = sc.textFile("graph_datasets/soc-sign-bitcoinalpha.csv") #btc_raw.take(10) #btc = btc_raw.map(lambda x: x.split(",")).map(lambda x: (int(x[0]), int(x[1]))).flatMap(lambda x: [x, (x[1], x[0])]).groupByKey().mapValues(lambda x: set(x)) # - # # Finding seeds from tree (3 different findSeed functions) btc_raw = sc.parallelize([(0,1), (1,2), (2,5), (5,8), (7,8), (3,7), (3,4), (3,6), (10,11), (10,12), (12,13)]) G = btc_raw.flatMap(lambda x: [x, (x[1], x[0])]).groupByKey().mapValues(lambda x: set(x)) # + def Min_Selection_Step(G): #dictionary format RDD v_min = G.map(lambda x: (x[0], min(x[1] | {x[0]}))) NN_G_u = G.map(lambda x: (x[0], (x[1] | {x[0]}))) #Broadcasting v_min_bc = sc.broadcast(dict(v_min.collect())) addEdge = NN_G_u.map(lambda x: (x[0], (x[1], v_min_bc.value[x[0]]))).flatMap(lambda x: [(y, x[1][1]) for y in x[1][0]]) #Without broadcasting #addEdge = NN_G_u.join(v_min).flatMap(lambda x: [(y, x[1][1]) for y in x[1][0]]) #H = addEdge.groupByKey().map(lambda x: (x[0], set(x[1]))) H = addEdge.groupByKey().mapValues(lambda x: set(x)) return H def Pruning_Step(H, T): #minimum node of the neighborhood: shared for following parts v_min = H.map(lambda x: (x[0], min(x[1]))) v_min_bc = sc.broadcast(dict(v_min.collect())) #Broadcasting v_min #---------------G construction------------------- H_filtered = H.filter(lambda x: len(x[1]) > 1) #NN_H_u = H_filtered.map(lambda x: (x[0], x[1] - {min(x[1])} )) NN_H_u = H_filtered.mapValues(lambda x: x - {min(x)} ) #With Broadcasting addEdge2=NN_H_u.map(lambda x:(x[0],(x[1],v_min_bc.value[x[0]]))).flatMap(lambda x:[(x[1][1],y) for y in x[1][0]]) #Without broadcasting #addEdge2 = NN_H_u.join(v_min).flatMap(lambda x: [(x[1][1], y) for y in x[1][0]]) G = addEdge2.flatMap(lambda x: [x, (x[1], x[0])]).groupByKey().mapValues(lambda x: set(x)) #---------------Tree construction-------------- deactiveNodes = H.filter(lambda x: x[0] not in x[1]).mapValues(lambda x: None) #Without broadcasting #addEdge3 = deactiveNodes.join(v_min).map(lambda x: (x[1][1], x[0])) #With Broadcasting addEdge3 = deactiveNodes.map(lambda x: (x[0], (x[1], v_min_bc.value[x[0]]))).map(lambda x: (x[1][1], x[0])) T = T.union(addEdge3) return [G, T] #Finding seeds def findSeeds(T): keys = T values = T.map(lambda x:(x[1], x[0])) return keys.subtractByKey(values).keys().distinct() def findSeeds1(T): keys = T.keys().distinct().map(lambda x:(x,1)) values = T.values().distinct().map(lambda x:(x,1)) return keys.subtractByKey(values).keys() def findSeeds2(T): T_inv = T.map(lambda x:(x[1], x[0])) A = T.keys().distinct().map(lambda x:(x,1)) #Each distinct is a reduceByKey B = T_inv.keys().distinct().map(lambda x:(x,1)) return A.leftOuterJoin(B).filter(lambda x: not x[1][1]).keys() # - def Cracker(G): n = 0 T = sc.parallelize([]) while G.take(1): n += 1 H = Min_Selection_Step(G) G, T = Pruning_Step(H, T) return findSeeds2(T) # %%time #Cracker with findSeeds Cracker(G).collect() # # Tracking activeness and seeds btc_raw = sc.parallelize([(0,1), (1,2), (2,5), (5,8), (7,8), (3,7), (3,4), (3,6), (10,11), (10,12), (12,13)]) G2 = btc_raw.flatMap(lambda x: [x, (x[1], x[0])]).groupByKey().mapValues(lambda x: (set(x), True)) # + def Min_Selection_Step(G): #dictionary format RDD v_min = G.map(lambda x: (x[0], min(x[1][0] | {x[0]}))) NN_G_u = G.map(lambda x: (x[0], (x[1][0] | {x[0]}, x[1][1]))) #Broadcasting v_min_bc = sc.broadcast(dict(v_min.collect())) addEdge = NN_G_u.map(lambda x: (x[0], (x[1][0], v_min_bc.value[x[0]], x[1][1]))) addEdge1 = addEdge.flatMap(lambda x: [(y, (x[1][1], x[1][2])) for y in x[1][0]]) temp = addEdge1.groupByKey().map(lambda x: (x[0], (list(x[1])))) H = temp.map(lambda x: (x[0], list(zip(*x[1])))).mapValues(lambda x: (set(x[0]), all(x[1]))) return H def Pruning_Step(H, T, Seeds): #minimum node of the neighborhood: shared for following parts v_min = H.map(lambda x: (x[0], min(x[1][0]))) v_min_bc = sc.broadcast(dict(v_min.collect())) #Broadcasting v_min #---------------G construction------------------- H_filtered = H.filter(lambda x: len(x[1][0]) > 1) NN_H_u = H_filtered.mapValues(lambda x: (x[0] - {min(x[0])}, x[1] )) #With Broadcasting addEdge2=NN_H_u.map(lambda x:(x[0],(x[1][0],v_min_bc.value[x[0]], x[1][1]))).flatMap(lambda x:[(x[1][1],(y, x[1][2])) for y in x[1][0]]) temp = addEdge2.flatMap(lambda x: [x, (x[1][0], (x[0],x[1][1]))]).groupByKey().mapValues(lambda x: list(x)) G = temp.mapValues(lambda x: list(zip(*x))).mapValues(lambda x: (set(x[0]), all(x[1]))) #---------------Tree construction-------------- #The deactivated Nodes do not appear in G_{t+1} deactiveNodes = H.filter(lambda x: x[0] not in x[1][0]).mapValues(lambda x: False) #With Broadcasting addEdge3 = deactiveNodes.map(lambda x: (x[0], (x[1], v_min_bc.value[x[0]]))).map(lambda x: (x[1][1], x[0])) T = T.union(addEdge3) #--------------Find Seed----------------- #Elements in H with neighborhood from G_{t+1} NN_G_H = H.cogroup(G).mapValues(lambda x: arr( (list(x[0]), list(x[1])) ) ) #---->Not sure if it is necessary to use True/False #deactivated = NN_G_H.cogroup(deactiveNodes).map(lambda x: (x[0], arr2( (list(x[1][0]), list(x[1][1])) ) ) ) #seed = deactivated.filter(lambda x: (len(x[1][0]) <= 1) & (x[0] in x[1][0]) & x[1][1]) seed = NN_G_H.filter(lambda x: (len(x[1][0]) <= 1) & (x[0] in x[1][0])) Seeds = Seeds.union(seed) return [G, T, Seeds] def arr(value): if not value[1]: return value[0][0] else: temp = list(zip(*[value[0][0], value[1][0]])) return temp[0][0].union(temp[0][1]), all(temp[1]) def arr2(value): if not value[1]: return value[0][0] else: return (value[0][0][0], False) # - def Cracker(G): n = 0 T = sc.parallelize([]) Seeds = sc.parallelize([]) while G.take(1): n += 1 H = Min_Selection_Step(G) G, T, Seeds = Pruning_Step(H, T, Seeds) return Seeds.keys() # %%time Cracker(G2).collect() # # Tracking seeds without activeness btc_raw = sc.parallelize([(0,1), (1,2), (2,5), (5,8), (7,8), (3,7), (3,4), (3,6), (10,11), (10,12), (12,13)]) G = btc_raw.flatMap(lambda x: [x, (x[1], x[0])]).groupByKey().mapValues(lambda x: set(x)) # + def Min_Selection_Step(G): #dictionary format RDD v_min = G.map(lambda x: (x[0], min(x[1] | {x[0]}))) NN_G_u = G.map(lambda x: (x[0], x[1] | {x[0]})) #Broadcasting v_min_bc = sc.broadcast(dict(v_min.collect())) addEdge = NN_G_u.map(lambda x: (x[0], (x[1], v_min_bc.value[x[0]])) ) addEdge1 = addEdge.flatMap(lambda x: [(y, x[1][1]) for y in x[1][0]]) #Without broadcasting #addEdge = NN_G_u.join(v_min).flatMap(lambda x: [(y, x[1][1]) for y in x[1][0]]) H = addEdge1.groupByKey().mapValues(lambda x: set(x)) return H def Pruning_Step(H, T, Seeds): #minimum node of the neighborhood: shared for following parts v_min = H.mapValues(lambda x: min(x)) v_min_bc = sc.broadcast(dict(v_min.collect())) #Broadcasting v_min #---------------G construction------------------- H_filtered = H.filter(lambda x: len(x[1]) > 1) NN_H_u = H_filtered.mapValues(lambda x: x - {min(x)} ) #With Broadcasting addEdge2=NN_H_u.map(lambda x:(x[0],(x[1],v_min_bc.value[x[0]]))).flatMap(lambda x:[(x[1][1],y) for y in x[1][0]]) #Without broadcasting #addEdge2 = NN_H_u.join(v_min).flatMap(lambda x: [(x[1][1], y) for y in x[1][0]]) G = addEdge2.flatMap(lambda x: [x, (x[1], x[0])]).groupByKey().mapValues(lambda x: set(x)) #---------------Tree construction-------------- #The deactivated Nodes do not appear in G_{t+1} deactiveNodes = H.filter(lambda x: x[0] not in x[1]).mapValues(lambda x: False) #Without broadcasting #addEdge3 = deactiveNodes.join(v_min).map(lambda x: (x[1][1], x[0])) #With Broadcasting addEdge3 = deactiveNodes.map(lambda x: (x[0], (x[1], v_min_bc.value[x[0]]))).map(lambda x: (x[1][1], x[0])) T = T.union(addEdge3) #--------------Find Seed----------------- #Elements in H with neighborhood from G_{t+1} NN_G_H = H.cogroup(G).mapValues(lambda x: (list(x[0]), list(x[1])) ).mapValues(lambda x: set_join(x) ) #Not sure is necessary to use True/False #deactivated = NN_G_H.cogroup(deactiveNodes).map(lambda x: (x[0], (list(x[1][0]), list(x[1][1])) )) #seed = deactivated.filter(lambda x: (len(x[1][0]) <= 1) & (x[0] in x[1][0]) & x[1][1]) seed = NN_G_H.filter(lambda x: (len(x[1]) <= 1) & (x[0] in x[1])) Seeds = Seeds.union(seed) return [G, T, Seeds] def set_join(value): if not value[1]: return value[0][0] else: return value[0][0] | value[1][0] # - def Cracker(G): n = 0 T = sc.parallelize([]) Seeds = sc.parallelize([]) while G.take(1): n += 1 H = Min_Selection_Step(G) G, T, Seeds = Pruning_Step(H, T, Seeds) return Seeds.keys() # %%time Cracker(G).collect()
rdd_seeds_optim.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Exploring Rossmann Drug Store Sales Data # + import pandas as pd data = pd.read_csv('../../assets/dataset/rossmann.csv', skipinitialspace=True, low_memory=False) data # - # Because we are most interested in the `Date` column that contains the date of sales for each store, we will make sure to process that as a `DateTime` type, and make that the index of our dataframe. # + data['Date'] = pd.to_datetime(data['Date']) data.set_index('Date', inplace=True) data['Year'] = data.index.year data['Month'] = data.index.month # - store1_data = data[data.Store == 1] store1_data # ### Data Exploration and Mining # To compare sales on holidays, we can compare the sales using box-plots, which allows us to compare the distribution of sales on holidays against all other days. On state holidays the store is closed (and as a nice sanity check there are 0 sales), and on school holidays the sales are relatively similar. # + import seaborn as sb # %matplotlib inline sb.factorplot( x='SchoolHoliday', y='Sales', data=store1_data, kind='box' ) # - sb.factorplot( x='StateHoliday', y='Sales', data=store1_data, kind='box' ) # > Check: See if there is a difference affecting sales on promotion days. # + sb.factorplot( col='Promo', x='DayOfWeek', y='Sales', data=store1_data, kind='box', ) # - # Lastly, we want to identify larger-scale trends in our data. How did sales change from 2014 to 2015? Were there any particularly interesting outliers in terms of sales or customer visits? # Filter to days store 1 was open store1_open_data = store1_data[store1_data.Open==1] store1_open_data[['Sales']].plot() store1_open_data[['Customers']].plot() # In pandas we can compute rolling average using the `pd.rolling_mean` or `pd.rolling_median` functions. # ### Data REFINING Using Time Series Statistics # ### Autocorrelation # To measure how much the sales are correlated with each other, we want to compute the _autocorrelation_ of the 'Sales' column. In pandas, we do this we with the `autocorr` function. # # `autocorr` takes one argument, the `lag` - which is how many prior data points should be used to compute the correlation. If we set the `lag` to 1, we compute the correlation between every point and the point directly preceding it, while setting `lag` to 10, computes the correlation between every point and the point 10 days earlier. data['Sales'].resample('D').mean().autocorr(lag=1) # ### Rolling Averages # If we want to investigate trends over time in sales, as always, we will start by computing simple aggregates. We want to know what the mean and median sales were for each month and year. # # In Pandas, this is performed using the `resample` command, which is very similar to the `groupby` command. It allows us to group over different time intervals. # # We can use `data.resample` and provide as arguments: # - The level on which to roll-up to, 'D' for day, 'W' for week, 'M' for month, 'A' for year # - What aggregation to perform: 'mean', 'median', 'sum', etc. #data[['Sales']].resample('M', how=['median', 'mean']).head() print(data[['Sales']].resample('M').mean().head()) print(data[['Sales']].resample('M').median().head()) # While identifying the monthly averages are useful, we often want to compare the sales data of a date to a smaller window. To understand holidays sales, we don't want to compare late December with the entire month, but perhaps a few days surrounding it. We can do this using rolling averages. # # In pandas, we can compute rolling average using the `pd.rolling_mean` or `pd.rolling_median` functions. # + #pd.rolling_mean(data[['Sales']], window=3, center=True, freq='D', how='mean').head() data[['Sales']].resample('D').mean().rolling(window=3, center=True).mean().head() # - # `rolling_mean` (as well as `rolling_median`) takes these important parameters: # - the first is the series to aggregate # - `window` is the number of days to include in the average # - `center` is whether the window should be centered on the date or use data prior to that date # - `freq` is on what level to roll-up the averages to (as used in `resample`). Either `D` for day, `M` for month or `A` for year, etc. # Instead of plotting the full timeseries, we can plot the rolling mean instead, which smooths random changes in sales as well as removing outliers, helping us identify larger trends. pd.rolling_mean(data[['Sales']], window=10, center=True, freq='D', how='mean').plot() pd.rolling_mean(data[['Sales']], window=1, center=True, freq='D',how='mean') #data[['Sales']] # ### Pandas Window functions # Pandas `rolling_mean` and `rolling_median` are only two examples of Pandas window function capabilities. Window functions are operate on a set of N consecutive rows (a window) and produce an output. # # In addition to `rolling_mean` and `rolling_median`, there are `rolling_sum`, `rolling_min`, `rolling_max`... and many more. # # Another common one is `diff`, which takes the difference over time. `pd.diff` takes one arugment, `periods`, which is how many prio rows to use for the difference. # data['Sales'].diff(periods=1).head() store1_open_data['Sales'].diff(periods=7).plot() store1_open_data['Sales'].diff(periods=1).plot() pd.rolling_mean(data[['Sales']], window=10, center=True, freq='D',how='mean').plot() # ### Pandas expanding functions # # In addition to the set of `rolling_*` functions, Pandas also provides a similar collection of `expanding_*` functions, which, instead of using a window of N values, use all values up until that time. # computes the average sales, from the first date _until_ the date specified. pd.expanding_mean(data['Sales'], freq='d').head() #data[['Sales']].resample('D').mean().expanding().mean().head() from previous course data['Sales'] # ## Exercises # > Plot the distribution of sales by month and compare the effect of promotions sb.factorplot( col='Open', hue='Promo', x='Month', y='Sales', data=store1_data, kind='box' ) # > Are sales more correlated with the prior date, a similar date last year, or a similar date last month? # + average_daily_sales = data[['Sales', 'Open']].resample('D', how='mean') print('Correlation with last day: {}'.format(average_daily_sales['Sales'].autocorr(lag=1))) print('Correlation with last month: {}'.format(average_daily_sales['Sales'].autocorr(lag=30))) print('Correlation with last year: {}'.format(average_daily_sales['Sales'].autocorr(lag=365))) # - # > Plot the 15 day rolling mean of customers in the stores pd.rolling_mean(data[['Customers']], window=15, freq='D',how='mean').plot() # > Identify the date with largest drop in sales from the same date in the previous week # + average_daily_sales = data[['Sales', 'Open']].resample('D', how='mean') average_daily_sales['DiffVsLastWeek'] = average_daily_sales[['Sales']].diff(periods=7) average_daily_sales.sort_values(by='DiffVsLastWeek').head() # Unsurprisingly, this day is Dec. 25 and Dec. 26 in 2014 and 2015. When the store is closed and there are many sales in the preceding week. How, about when the store is open? # + average_daily_sales[average_daily_sales.Open == 1].sort_values(by='DiffVsLastWeek').head() # The top values are Dec. 24 and then 2013-12-09 and 2013-10-14 where on average sales were 4k lower than the same day in the previous week. # - average_daily_sales = data[['Sales', 'Open']].resample('D', how='mean') average_daily_sales # > Compute the total sales up until Dec. 2014 # + total_daily_sales = data[['Sales']].resample('D', how='sum') pd.expanding_sum(total_daily_sales)['2014-12'].head() # Note that this is **not** # # pd.expanding_sum(data['Sales'], freq='D') # since we do not, want to first average over stores. # - # > When were the largest differences between 15-day moving/rolling averages? # > HINT: Using `rolling_mean` and `diff` # + pd.rolling_mean(data[['Sales']], window=15, freq='D',how='mean').diff(1).sort_values(by='Sales').head() # Unsurprisingly, they occur at the beginning of every year after the holiday season. # -
general_assembly/15_time_series_analysis/solution-code-15.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #サーボ1個 ver.pwm import asyncio from obniz import Obniz #min 0.5 max 2.4 middle 1.4 async def onconnect(obniz): obniz.io0.output(False) obniz.io1.output(True) pwm = obniz.get_free_pwm() pwm.start({"io": 2}) pwm.freq(100) def read_state(state): if state == "right": val = 1.0 pwm.pulse(val) print("r") elif state == "left": val = 1.8 pwm.pulse(val) print("l") elif state == "none": val = 1.4 pwm.pulse(val) obniz.switch.onchange = read_state obniz = Obniz('4542-8598') obniz.onconnect = onconnect asyncio.get_event_loop().run_forever() # + #サーボ4個 ver.pwm ゆっくり import asyncio from obniz import Obniz #min 0.5 max 2.4 middle 1.4 async def onconnect(obniz): obniz.io0.output(False) obniz.io1.output(True) pwm = obniz.get_free_pwm() #pwm.start({"io": 2}) #pwm.freq(100) servo2 = obniz.wired("ServoMotor", {"pwm": pwm}); servo3 = obniz.wired("ServoMotor", {gnd:3, vcc:4, signal:5}); servo4 = obniz.wired("ServoMotor", {gnd:6, vcc:7, signal:8}); servo5 = obniz.wired("ServoMotor", {gnd:9, vcc:10, signal:11}); servo2.angle(180.0) servo2.angle(0.0) servo2.angle(90.0) def read_state(state): if state == "right": print("r") servo2.angle(180.0) elif state == "left": print("l") servo2.angle(0.0) elif state == "none": servo2.angle(90.0) obniz.switch.onchange = read_state obniz = Obniz('4542-8598') obniz.onconnect = onconnect asyncio.get_event_loop().run_forever() # -
skeleton.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Spleen 3D segmentation with MONAI # This tutorial demonstrates how MONAI can be used in conjunction with the [PyTorch Lightning](https://github.com/PyTorchLightning/pytorch-lightning) framework. # # We demonstrate use of the following MONAI features: # 1. Transforms for dictionary format data. # 2. Loading Nifti images with metadata. # 3. Add channel dim to the data if no channel dimension. # 4. Scaling medical image intensity with expected range. # 5. Croping out a batch of balanced images based on the positive / negative label ratio. # 6. Cache IO and transforms to accelerate training and validation. # 7. Use of a a 3D UNet model, Dice loss function, and mean Dice metric for a 3D segmentation task. # 8. The sliding window inference method. # 9. Deterministic training for reproducibility. # # The training Spleen dataset used in this example can be downloaded from from http://medicaldecathlon.com// # # ![spleen](http://medicaldecathlon.com/img/spleen0.png) # # # Target: Spleen # Modality: CT # Size: 61 3D volumes (41 Training + 20 Testing) # Source: Memorial Sloan Kettering Cancer Center # Challenge: Large ranging foreground size # In addition to the usual MONAI requirements you will need Lightning installed. # ! pip install pytorch-lightning # ! pip install ipywidgets # ! jupyter nbextension enable --py widgetsnbextension # + # Copyright 2020 MONAI Consortium # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys import glob import numpy as np import torch from torch.utils.data import DataLoader import matplotlib.pyplot as plt import monai from monai.transforms import \ Compose, LoadNiftid, AddChanneld, ScaleIntensityRanged, RandCropByPosNegLabeld, \ CropForegroundd, RandAffined, Spacingd, Orientationd, ToTensord from monai.data import list_data_collate from monai.inferers import sliding_window_inference from monai.networks.layers import Norm from monai.metrics import compute_meandice from monai.utils import set_determinism from pytorch_lightning import LightningModule, Trainer, loggers from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint monai.config.print_config() # - # ## Define the LightningModule # # The LightningModule contains a refactoring of your training code. The following module is a refactoring of the code in `spleen_segmentation_3d.ipynb`: class Net(LightningModule): def __init__(self): super().__init__() self._model = monai.networks.nets.UNet(dimensions=3, in_channels=1, out_channels=2, channels=(16, 32, 64, 128, 256),strides=(2, 2, 2, 2), num_res_units=2, norm=Norm.BATCH) self.loss_function = monai.losses.DiceLoss(to_onehot_y=True, softmax=True) self.best_val_dice = 0 self.best_val_epoch = 0 def forward(self, x): return self._model(x) def prepare_data(self): # set up the correct data path data_root = '/workspace/data/medical/Task09_Spleen' train_images = sorted(glob.glob(os.path.join(data_root, 'imagesTr', '*.nii.gz'))) train_labels = sorted(glob.glob(os.path.join(data_root, 'labelsTr', '*.nii.gz'))) data_dicts = [{'image': image_name, 'label': label_name} for image_name, label_name in zip(train_images, train_labels)] train_files, val_files = data_dicts[:-9], data_dicts[-9:] # set deterministic training for reproducibility set_determinism(seed=0) # define the data transforms train_transforms = Compose([ LoadNiftid(keys=['image', 'label']), AddChanneld(keys=['image', 'label']), Spacingd(keys=['image', 'label'], pixdim=(1.5, 1.5, 2.), interp_order=(3, 0)), Orientationd(keys=['image', 'label'], axcodes='RAS'), ScaleIntensityRanged(keys=['image'], a_min=-57, a_max=164, b_min=0.0, b_max=1.0, clip=True), CropForegroundd(keys=['image', 'label'], source_key='image'), # randomly crop out patch samples from big image based on pos / neg ratio # the image centers of negative samples must be in valid image area RandCropByPosNegLabeld(keys=['image', 'label'], label_key='label', size=(96, 96, 96), pos=1, neg=1, num_samples=4, image_key='image', image_threshold=0), # user can also add other random transforms # RandAffined(keys=['image', 'label'], mode=('bilinear', 'nearest'), prob=1.0, spatial_size=(96, 96, 96), # rotate_range=(0, 0, np.pi/15), scale_range=(0.1, 0.1, 0.1)), ToTensord(keys=['image', 'label']) ]) val_transforms = Compose([ LoadNiftid(keys=['image', 'label']), AddChanneld(keys=['image', 'label']), Spacingd(keys=['image', 'label'], pixdim=(1.5, 1.5, 2.), interp_order=(3, 0)), Orientationd(keys=['image', 'label'], axcodes='RAS'), ScaleIntensityRanged(keys=['image'], a_min=-57, a_max=164, b_min=0.0, b_max=1.0, clip=True), CropForegroundd(keys=['image', 'label'], source_key='image'), ToTensord(keys=['image', 'label']) ]) # we use cached datasets - these are 10x faster than regular datasets self.train_ds = monai.data.CacheDataset( data=train_files, transform=train_transforms, cache_rate=1.0, num_workers=4 ) self.val_ds = monai.data.CacheDataset( data=val_files, transform=val_transforms, cache_rate=1.0, num_workers=4 ) #self.train_ds = monai.data.Dataset(data=train_files, transform=train_transforms) #self.val_ds = monai.data.Dataset(data=val_files, transform=val_transforms) def train_dataloader(self): train_loader = DataLoader(self.train_ds, batch_size=2, shuffle=True, num_workers=4, collate_fn=list_data_collate) return train_loader def val_dataloader(self): val_loader = DataLoader(self.val_ds, batch_size=1, num_workers=4) return val_loader def configure_optimizers(self): optimizer = torch.optim.Adam(self._model.parameters(), 1e-4) return optimizer def training_step(self, batch, batch_idx): images, labels = batch['image'], batch['label'] output = self.forward(images) loss = self.loss_function(output, labels) tensorboard_logs = {'train_loss': loss.item()} return {'loss': loss, 'log': tensorboard_logs} def validation_step(self, batch, batch_idx): images, labels = batch['image'], batch['label'] roi_size = (160, 160, 160) sw_batch_size = 4 outputs = sliding_window_inference(images, roi_size, sw_batch_size, self.forward) loss = self.loss_function(outputs, labels) value = compute_meandice(y_pred=outputs, y=labels, include_background=False, to_onehot_y=True, mutually_exclusive=True) return {'val_loss': loss, 'val_dice': value} def validation_epoch_end(self, outputs): val_dice = 0 num_items = 0 for output in outputs: val_dice += output['val_dice'].sum().item() num_items += len(output['val_dice']) mean_val_dice = val_dice / num_items tensorboard_logs = {'val_dice': mean_val_dice} if mean_val_dice > self.best_val_dice: self.best_val_dice = mean_val_dice self.best_val_epoch = self.current_epoch print('current epoch: {} current mean dice: {:.4f} best mean dice: {:.4f} at epoch {}'.format( self.current_epoch, mean_val_dice, self.best_val_dice, self.best_val_epoch)) return {'log': tensorboard_logs} # ## Run the training # + # initialise the LightningModule net = Net() # set up loggers and checkpoints tb_logger = loggers.TensorBoardLogger(save_dir='logs') checkpoint_callback = ModelCheckpoint(filepath='logs/{epoch}-{val_loss:.2f}-{val_dice:.2f}') # initialise Lightning's trainer. trainer = Trainer(gpus=[0], max_epochs=600, logger=tb_logger, checkpoint_callback=checkpoint_callback, show_progress_bar=False, num_sanity_val_steps=1 ) # train trainer.fit(net) # - print('train completed, best_metric: {:.4f} at epoch {}'.format(net.best_val_dice, net.best_val_epoch)) # ## View training in tensorboard # %load_ext tensorboard # %tensorboard --logdir='logs' # ## Check best model output with the input image and label # + jupyter={"outputs_hidden": true} net.eval() device = torch.device('cuda:0') with torch.no_grad(): for i, val_data in enumerate(net.val_dataloader()): roi_size = (160, 160, 160) sw_batch_size = 4 val_outputs = sliding_window_inference(val_data['image'].to(device), roi_size, sw_batch_size, net) # plot the slice [:, :, 80] plt.figure('check', (18, 6)) plt.subplot(1, 3, 1) plt.title('image ' + str(i)) plt.imshow(val_data['image'][0, 0, :, :, 80], cmap='gray') plt.subplot(1, 3, 2) plt.title('label ' + str(i)) plt.imshow(val_data['label'][0, 0, :, :, 80]) plt.subplot(1, 3, 3) plt.title('output ' + str(i)) plt.imshow(torch.argmax(val_outputs, dim=1).detach().cpu()[0, :, :, 80]) plt.show()
examples/notebooks/spleen_segmentation_3d_lightning.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <h1 style=font-size:40px>Predicting Material Backorders in Inventory Management</h1> # + import numpy as np import pandas as pd import matplotlib.pyplot as plt import pickle from sklearn.metrics import accuracy_score, roc_curve, auc import warnings warnings.filterwarnings("ignore") # - #constants calculated from eda & feature engineering lead_time_mean = float(np.load('lead_time_mean.npy')) potential_issue_probability_matrix = pd.read_csv('potential_issue_probability_matrix.csv') deck_risk_probability_matrix = pd.read_csv('deck_risk_probability_matrix.csv') oe_constraint_probability_matrix = pd.read_csv('oe_constraint_probability_matrix.csv') ppap_risk_probability_matrix = pd.read_csv('ppap_risk_probability_matrix.csv') stop_auto_buy_probability_matrix = pd.read_csv('stop_auto_buy_probability_matrix.csv') rev_stop_probability_matrix = pd.read_csv('rev_stop_probability_matrix.csv') data = pd.read_csv("test_dataset_v2.csv") y = data['went_on_backorder'] x = data.drop('went_on_backorder', axis=1) # # final_func_1 def final_fun_1(x): """ Takes the dataframe as input and predicts if the products have gone into backorder or not. 0 indicates the product has not gone into backorder and 1 indicates, the product has gone into backorder. """ if type(x) == dict: dataframe = pd.DataFrame(x, index=[0], columns=['sku', 'national_inv', 'lead_time', 'in_transit_qty', 'forecast_3_month', 'forecast_6_month', 'forecast_9_month', 'sales_1_month', 'sales_3_month', 'sales_6_month', 'sales_9_month', 'min_bank', 'potential_issue', 'pieces_past_due', 'perf_6_month_avg', 'perf_12_month_avg', 'local_bo_qty', 'deck_risk', 'oe_constraint', 'ppap_risk', 'stop_auto_buy', 'rev_stop']) else: dataframe = x dataframe = dataframe.drop('sku', axis=1) #dropping sku column if dataframe.iloc[-1].isna().all() == True: dataframe = dataframe[:-1] #removing last row as there are NaN values dataframe = dataframe.fillna(lead_time_mean) #mean imputation dataframe.replace({'Yes': 1, 'No': 0}, inplace=True) #converting categorical features into binary features #adding binary_pieces_past_due conditions = [dataframe['pieces_past_due'] == 0, dataframe['pieces_past_due'] > 0] values = [0, 1] dataframe['binary_pieces_past_due'] = np.select(conditions, values) #adding binary_local_bo_qty conditions = [dataframe['local_bo_qty'] == 0, dataframe['local_bo_qty'] > 0] values = [0, 1] dataframe['binary_local_bo_qty'] = np.select(conditions, values) #imputing all categorical features conditions_pt = [dataframe['potential_issue'] == 0, dataframe['potential_issue'] == 1] values_pt = [potential_issue_probability_matrix['No'][0], potential_issue_probability_matrix['No'][1]] dataframe['potential_issue'] = np.select(conditions_pt, values_pt) conditions_dr = [dataframe['deck_risk'] == 0, dataframe['deck_risk'] == 1] values_dr = [deck_risk_probability_matrix['No'][0], deck_risk_probability_matrix['No'][1]] dataframe['deck_risk'] = np.select(conditions_dr, values_dr) conditions_oe = [dataframe['oe_constraint'] == 0, dataframe['oe_constraint'] == 1] values_oe = [oe_constraint_probability_matrix['No'][0], oe_constraint_probability_matrix['No'][1]] dataframe['oe_constraint'] = np.select(conditions_oe, values_oe) conditions_pp = [dataframe['ppap_risk'] == 0, dataframe['ppap_risk'] == 1] values_pp = [ppap_risk_probability_matrix['No'][0], ppap_risk_probability_matrix['No'][1]] dataframe['ppap_risk'] = np.select(conditions_pp, values_pp) conditions_stp = [dataframe['stop_auto_buy'] == 0, dataframe['stop_auto_buy'] == 1] values_stp = [stop_auto_buy_probability_matrix['No'][0], stop_auto_buy_probability_matrix['No'][1]] dataframe['stop_auto_buy'] = np.select(conditions_stp, values_stp) conditions_rev = [dataframe['rev_stop'] == 0, dataframe['rev_stop'] == 1] values_rev = [rev_stop_probability_matrix['No'][0], rev_stop_probability_matrix['No'][1]] dataframe['rev_stop'] = np.select(conditions_rev, values_rev) filename = 'best_model_forest.h5' best_model = pickle.load(open(filename, 'rb')) predictions = best_model.predict(dataframe) if len(predictions) == 1: predictions = int(predictions) return predictions a = final_fun_1(x) #taking entire dataframe as input one_datapoint = dict(x.loc[0]) print(one_datapoint) final_fun_1(one_datapoint) #taking one datapoint(dict) as input # # final_func_2 def final_fun_2(x, y): """ Takes the input dataframe and the target label as input and makes prediction. These predictions and then used to compute the performance of the model. Metrics shown are accuracy, precision, recall, AUC and confusion matrix. """ if np.isnan(y.iloc[-1]) == True: y = y[:-1] y.replace({'Yes': 1, 'No': 0}, inplace=True) else: y.replace({'Yes': 1, 'No': 0}, inplace=True) x = x.drop('sku', axis=1) #removing last row if they are all NaN if x.iloc[-1].isna().all() == True: x = x[:-1] x = x.fillna(lead_time_mean) #mean imputation x.replace({'Yes': 1, 'No': 0}, inplace=True) #converting categorical features into binary features #adding binary_pieces_past_due conditions = [x['pieces_past_due'] == 0, x['pieces_past_due'] > 0] values = [0, 1] x['binary_pieces_past_due'] = np.select(conditions, values) #adding binary_local_bo_qty conditions = [x['local_bo_qty'] == 0, x['local_bo_qty'] > 0] values = [0, 1] x['binary_local_bo_qty'] = np.select(conditions, values) #imputing all categorical features conditions_pt = [x['potential_issue'] == 0, x['potential_issue'] == 1] values_pt = [potential_issue_probability_matrix['No'][0], potential_issue_probability_matrix['No'][1]] x['potential_issue'] = np.select(conditions_pt, values_pt) conditions_dr = [x['deck_risk'] == 0, x['deck_risk'] == 1] values_dr = [deck_risk_probability_matrix['No'][0], deck_risk_probability_matrix['No'][1]] x['deck_risk'] = np.select(conditions_dr, values_dr) conditions_oe = [x['oe_constraint'] == 0, x['oe_constraint'] == 1] values_oe = [oe_constraint_probability_matrix['No'][0], oe_constraint_probability_matrix['No'][1]] x['oe_constraint'] = np.select(conditions_oe, values_oe) conditions_pp = [x['ppap_risk'] == 0, x['ppap_risk'] == 1] values_pp = [ppap_risk_probability_matrix['No'][0], ppap_risk_probability_matrix['No'][1]] x['ppap_risk'] = np.select(conditions_pp, values_pp) conditions_stp = [x['stop_auto_buy'] == 0, x['stop_auto_buy'] == 1] values_stp = [stop_auto_buy_probability_matrix['No'][0], stop_auto_buy_probability_matrix['No'][1]] x['stop_auto_buy'] = np.select(conditions_stp, values_stp) conditions_rev = [x['rev_stop'] == 0, x['rev_stop'] == 1] values_rev = [rev_stop_probability_matrix['No'][0], rev_stop_probability_matrix['No'][1]] x['rev_stop'] = np.select(conditions_rev, values_rev) filename = 'best_model_forest.h5' best_model = pickle.load(open(filename, 'rb')) predictions = best_model.predict(x) #printing metrics print('Accuracy:', accuracy_score(y, predictions)) #plotting confurion matrix y_pred = best_model.predict_proba(x)[:,1] fpr, tpr, thresholds = roc_curve(y, y_pred) print('AUC:', auc(fpr, tpr)) plt.plot(fpr, tpr, label="AUC ="+' '+str(auc(fpr, tpr))) plt.legend() plt.xlabel("FPR") plt.ylabel("TPR") plt.title("ROC-AUC Curve") plt.grid() plt.show() final_fun_2(x, y)
final_updated.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.2 64-bit # name: python38264bit440a9b05b92d4257b31359ad5d640545 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/HenryLiangzy/COMP9417_Project/blob/master/Henry_s_work.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # - # # Henry's Work # Using the Decision tree to train the data # while using the three Bag of word model # * Count Vectorizer # * TF-IDF Vectorizer # * Hashing Vectorizer # + id="yRWj8y16mGl1" colab_type="code" colab={} import numpy as np import pandas as pd import datetime from sklearn.base import TransformerMixin from sklearn import tree from sklearn import preprocessing from sklearn import metrics from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, HashingVectorizer from sklearn.model_selection import train_test_split, GridSearchCV, learning_curve from sklearn.metrics import roc_auc_score, roc_curve from sklearn.tree import DecisionTreeClassifier import matplotlib.pyplot as plt # + id="VR9NDtB4mcTX" colab_type="code" colab={} np.random.seed(1) TRAINING_FILE = "training.csv" TEST_FILE = "test.csv" time_format = '%Y-%m-%d*%H:%M:%S' topic_code = { 'ARTS CULTURE ENTERTAINMENT': 1, 'BIOGRAPHIES PERSONALITIES PEOPLE': 2, 'DEFENCE': 3, 'DOMESTIC MARKETS': 4, 'FOREX MARKETS': 5, 'HEALTH': 6, 'MONEY MARKETS': 7, 'SCIENCE AND TECHNOLOGY': 8, 'SHARE LISTINGS': 9, 'SPORTS': 10, 'IRRELEVANT': 0 } # + def preprocess(df): df['topic_code'] = df['topic'].apply(lambda x: topic_code[x]) return df[['article_words', 'topic_code']] def bag_of_word(train_set, test_set, model): vector = model train_x = vector.transform(train_set['article_words'].values) train_y = train_set['topic_code'] test_x = vector.transform(test_set['article_words'].values) test_y = test_set['topic_code'] return train_x, train_y, test_x, test_y # - # ## Test for the model saving in program # + def save_model(model, file_name): if file_name is None: file_name = datetime.datetime.now().strftime(time_format) with open(file_name+'.model', 'wb') as model_file: model_file.write(bytes(model, 'utf-8')) print("Save {} file successfully".format(file_name+'.model')) def load_model(file_name): with open(file_name, 'rb') as model_file: model = model_file.read().decode('utf-8') print("Load model file {} successful".format(file_name)) return model # - # ## Main program of Decision tree training # + id="Az3ZImk-pijn" colab_type="code" outputId="b1202ce8-e728-49a6-a9a8-3e9212449daf" colab={"base_uri": "https://localhost:8080/", "height": 255} # load data from file df = pd.read_csv(TRAINING_FILE) # pre process the y df = preprocess(df) # split the data train_set, test_set = train_test_split(df, test_size=0.1) # Using different model to convert word vector_model = TfidfVectorizer().fit(df['article_words']) #train_x, train_y, test_x, test_y = bag_of_word(train_set, test_set, CountVectorizer().fit(df['article_words'])) train_x, train_y, test_x, test_y = bag_of_word(train_set, test_set, vector_model) #train_x, train_y, test_x, test_y = bag_of_word(train_set, test_set, HashingVectorizer()) print(train_x.shape) print(train_y.shape[0]) print(test_x.shape) print(test_y.shape[0]) # + # %%time # train test_record = [] dtc = DecisionTreeClassifier(min_samples_leaf=2) dtc.fit(train_x, train_y) print("The accuracy of Training set is:", dtc.score(train_x, train_y)) print("The accuracy of Test set is:", dtc.score(test_x, test_y)) # + pred_y = dtc.predict(test_x) # pred_y = pd.DataFrame(dtc.predict(test_x)) # print(pred_y) # for index in range(pred_y.shape[0]): # pred_y[index]+=1 # pred_y[0] = pred_y[0].apply(lambda x: x+1) print(str(False) == 'False') # - predict_proba = dtc.predict_proba(test_x) auc = roc_auc_score(test_y, predict_proba, multi_class='ovr') print("Current auc is:", auc) # + test_record = [] best_model = None min_samples_leaf_value = 0 max_auc = 0 leaf_value_range = range(25, 41) for leaf_value in leaf_value_range: print("fiting with leaf value = {} ...... ".format(leaf_value), end="") model = DecisionTreeClassifier(min_samples_leaf=leaf_value) model.fit(train_x, train_y) print("Done!") prediction = model.predict_proba(test_x) auc = roc_auc_score(test_y, prediction, multi_class='ovr') test_record.append(auc) if auc > max_auc: max_auc = auc min_samples_leaf_value = leaf_value best_model = model print("The optimal number of min_samples_leaf by TEST set is:", min_samples_leaf_value) print("With max AUC by TEST is:", max_auc) print("The accuracy of BEST model in Training set is:", best_model.score(train_x, train_y)) print("The accuracy of BEST model in Test set is:", best_model.score(test_x, test_y)) plt.style.use('ggplot') # fig = plt.figure(figsize=(12, 6)) fig = plt.figure() ax1 = fig.add_subplot(111) ax1.plot(leaf_value_range, test_record) ax1.set_title('AUC in TEST set') ax1.set_xlabel('Min_samples_leaf_value') ax1.set_ylabel('AUC') plt.show() # - # ### Passing Test set to test # + test_file_raw = pd.read_csv(TEST_FILE) test_file = preprocess(test_file_raw) test_set_x = vector_model.transform(test_file['article_words']) test_set_y = test_file['topic_code'] print("The accuracy of BEST model in TEST FILE set is:", best_model.score(test_set_x, test_set_y)) # - print("test")
Henry_s_work.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.7.1 # language: julia # name: julia-1.7 # --- using GraphPlot, Compose, Colors, Plots, Cairo, Fontconfig include("../src/networks/Network-Graphs.jl") my_graph = SimpleGraph(99) add_edge!(my_graph, 1, 3) colorz = [colorant"red" for i = 1:nv(my_graph)] my_gp = gplot(my_graph, nodefillc=colorz, nodestrokec=colorant"black", nodestrokelw=1) my_graph = SimpleGraph(100) add_edge!(my_graph, 1, 3) colorz = [colorant"red" for i = 1:nv(my_graph)] colorz[1] = colorant"black" my_gp = gplot(my_graph, nodefillc=colorz) draw(PDF("test.pdf", 16cm, 16cm), my_gp)
test/Color_Plot_Error_Graph.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Py3 research env # language: python # name: py3_research # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/Victornovikov/ml-class/blob/main/kNN_practice.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] tags=["pdf-title"] id="IGRedJQuNAWy" # # k-Nearest Neighbor (kNN) implementation # # *Credits: this notebook is deeply based on Stanford CS231n course assignment 1. Source link: http://cs231n.github.io/assignments2019/assignment1/* # # The kNN classifier consists of two stages: # # - During training, the classifier takes the training data and simply remembers it # - During testing, kNN classifies every test image by comparing to all training images and transfering the labels of the k most similar training examples # - The value of k is cross-validated # # In this exercise you will implement these steps and understand the basic Image Classification pipeline and gain proficiency in writing efficient, vectorized code. # # We will work with the handwritten digits dataset. Images will be flattened (8x8 sized image -> 64 sized vector) and treated as vectors. # + colab={"base_uri": "https://localhost:8080/"} id="sJ1kc8CNNAW5" outputId="79993e38-f762-4c9a-d4d1-a21d9c58ad91" ''' If you are using Google Colab, uncomment the next line to download `k_nearest_neighbor.py`. You can open and change it in Colab using the "Files" sidebar on the left. ''' # # !wget https://raw.githubusercontent.com/girafe-ai/ml-mipt/basic_s20/homeworks_basic/assignment0_01_kNN/k_nearest_neighbor.py # # !wget https://github.com/girafe-ai/msai-ml/blob/master/homeworks/assignment0_01_kNN/k_nearest_neighbor.py # !wget https://raw.githubusercontent.com/Victornovikov/ml-class/main/k_nearest_neighbor.py # + colab={"base_uri": "https://localhost:8080/"} id="8ro9oxg4NAW6" outputId="785f7ce4-509c-4f45-b04a-f1f0a5f4b90a" from sklearn import datasets dataset = datasets.load_digits() print(dataset.DESCR) # + colab={"base_uri": "https://localhost:8080/"} id="rNxnNllaNAW6" outputId="d43ce051-68e9-492a-9b2d-4f4f32f24301" # First 100 images will be used for testing. This dataset is not sorted by the labels, so it's ok # to do the split this way. # Please be careful when you split your data into train and test in general. test_border = 100 X_train, y_train = dataset.data[test_border:], dataset.target[test_border:] X_test, y_test = dataset.data[:test_border], dataset.target[:test_border] print('Training data shape: ', X_train.shape) print('Training labels shape: ', y_train.shape) print('Test data shape: ', X_test.shape) print('Test labels shape: ', y_test.shape) num_test = X_test.shape[0] # + tags=["pdf-ignore"] id="aAItNvF6NAW7" # Run some setup code for this notebook. import random import numpy as np import matplotlib.pyplot as plt # This is a bit of magic to make matplotlib figures appear inline in the notebook # rather than in a new window. # %matplotlib inline plt.rcParams['figure.figsize'] = (14.0, 12.0) # set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' # Some more magic so that the notebook will reload external python modules; # see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython # %load_ext autoreload # %autoreload 2 # + tags=["pdf-ignore"] colab={"base_uri": "https://localhost:8080/", "height": 561} id="zA3YOw4fNAW8" outputId="f991dc8e-9e08-4443-ac08-6bf421710bc6" # Visualize some examples from the dataset. # We show a few examples of training images from each class. classes = list(np.arange(10)) num_classes = len(classes) samples_per_class = 7 for y, cls in enumerate(classes): idxs = np.flatnonzero(y_train == y) idxs = np.random.choice(idxs, samples_per_class, replace=False) for i, idx in enumerate(idxs): plt_idx = i * num_classes + y + 1 plt.subplot(samples_per_class, num_classes, plt_idx) plt.imshow(X_train[idx].reshape((8, 8)).astype('uint8')) plt.axis('off') if i == 0: plt.title(cls) plt.show() # + [markdown] id="6vdBnF-NNAW8" # Autoreload is a great stuff, but sometimes it does not work as intended. The code below aims to fix than. __Do not forget to save your changes in the `.py` file before reloading the `KNearestNeighbor` class.__ # + tags=["pdf-ignore"] id="1Tz1OFHJNAW8" # This dirty hack might help if the autoreload has failed for some reason try: del KNearestNeighbor except: pass from k_nearest_neighbor import KNearestNeighbor # Create a kNN classifier instance. # Remember that training a kNN classifier is a noop: # the Classifier simply remembers the data and does no further processing classifier = KNearestNeighbor() classifier.fit(X_train, y_train) # + colab={"base_uri": "https://localhost:8080/"} id="REPqtwooNAW9" outputId="ea1837fc-cd9a-4992-a2e2-82a3e0561f6f" X_train.shape # + [markdown] id="P23l6jnZNAW9" # We would now like to classify the test data with the kNN classifier. Recall that we can break down this process into two steps: # # 1. First we must compute the distances between all test examples and all train examples. # 2. Given these distances, for each test example we find the k nearest examples and have them vote for the label # # Lets begin with computing the distance matrix between all training and test examples. For example, if there are **Ntr** training examples and **Nte** test examples, this stage should result in a **Nte x Ntr** matrix where each element (i,j) is the distance between the i-th test and j-th train example. # # **Note: For the three distance computations that we require you to implement in this notebook, you may not use the np.linalg.norm() function that numpy provides.** # # First, open `k_nearest_neighbor.py` and implement the function `compute_distances_two_loops` that uses a (very inefficient) double loop over all pairs of (test, train) examples and computes the distance matrix one element at a time. # + colab={"base_uri": "https://localhost:8080/"} id="3WHyabNNNAW-" outputId="509a6e86-3454-42cc-dcda-bc7912c28414" # Open k_nearest_neighbor.py and implement # compute_distances_two_loops. # Test your implementation: dists = classifier.compute_distances_two_loops(X_test) print(dists.shape) # + colab={"base_uri": "https://localhost:8080/", "height": 81} id="UYZT3fZ-NAW-" outputId="30feecb9-5698-4590-caf5-a1b4e8f95429" # We can visualize the distance matrix: each row is a single test example and # its distances to training examples plt.imshow(dists, interpolation='none') plt.show() # + [markdown] tags=["pdf-inline"] id="l5Hpq2j-NAW-" # **Inline Question 1** # # Notice the structured patterns in the distance matrix, where some rows or columns are visible brighter. (Note that with the default color scheme black indicates low distances while white indicates high distances.) # # - What in the data is the cause behind the distinctly bright rows? # - What causes the columns? # # $\color{blue}{\textit Your Answer:}$ Distinctly bright rows -- are test digits that are written in a way that gives us very few "similar" digits in the train set. Similarly, bright colums -- digits from train that don't have digits in test that are very "close" # # # + colab={"base_uri": "https://localhost:8080/"} id="mcxQcTekNAW-" outputId="94274703-aa04-4a7e-ba6d-ec618a6b35db" # Now implement the function predict_labels and run the code below: # We use k = 1 (which is Nearest Neighbor). y_test_pred = classifier.predict_labels(dists, k=1) # Compute and print the fraction of correctly predicted examples num_correct = np.sum(y_test_pred == y_test) accuracy = float(num_correct) / num_test print('Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy)) # + [markdown] id="IZg_EltVNAW_" # You should expect to see approximately `95%` accuracy. Now lets try out a larger `k`, say `k = 5`: # + colab={"base_uri": "https://localhost:8080/"} id="ADUdf3PHNAW_" outputId="e093a682-ce5c-4f62-9c2a-12d2fe1d8911" y_test_pred = classifier.predict_labels(dists, k=5) num_correct = np.sum(y_test_pred == y_test) accuracy = float(num_correct) / num_test print('Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy)) # + [markdown] id="nb-zGB1WNAXA" # Accuracy should slightly decrease with `k = 5` compared to `k = 1`. # + [markdown] tags=["pdf-inline"] id="C2hzDUHfNAXA" # **Inline Question 2** # # We can also use other distance metrics such as L1 distance. # For pixel values $p_{ij}^{(k)}$ at location $(i,j)$ of some image $I_k$, # # the mean $\mu$ across all pixels over all images is $$\mu=\frac{1}{nhw}\sum_{k=1}^n\sum_{i=1}^{h}\sum_{j=1}^{w}p_{ij}^{(k)}$$ # And the pixel-wise mean $\mu_{ij}$ across all images is # $$\mu_{ij}=\frac{1}{n}\sum_{k=1}^np_{ij}^{(k)}.$$ # The general standard deviation $\sigma$ and pixel-wise standard deviation $\sigma_{ij}$ is defined similarly. # # Which of the following preprocessing steps will not change the performance of a Nearest Neighbor classifier that uses L1 distance? Select all that apply. # 1. Subtracting the mean $\mu$ ($\tilde{p}_{ij}^{(k)}=p_{ij}^{(k)}-\mu$.) # 2. Subtracting the per pixel mean $\mu_{ij}$ ($\tilde{p}_{ij}^{(k)}=p_{ij}^{(k)}-\mu_{ij}$.) # 3. Subtracting the mean $\mu$ and dividing by the standard deviation $\sigma$. # 4. Subtracting the pixel-wise mean $\mu_{ij}$ and dividing by the pixel-wise standard deviation $\sigma_{ij}$. # 5. Rotating the coordinate axes of the data. # # $\color{blue}{\textit Your Answer:}$ # Will NOT chagne: 5 # # $\color{blue}{\textit Your Explanation:}$ # 5: no changes to distances # + tags=["pdf-ignore-input"] colab={"base_uri": "https://localhost:8080/"} id="cuGQWyDKNAXA" outputId="118e456c-11b1-4a70-9d30-0be422f7b017" # Now lets speed up distance matrix computation by using partial vectorization # with one loop. Implement the function compute_distances_one_loop and run the # code below: dists_one = classifier.compute_distances_one_loop(X_test) # To ensure that our vectorized implementation is correct, we make sure that it # agrees with the naive implementation. There are many ways to decide whether # two matrices are similar; one of the simplest is the Frobenius norm. In case # you haven't seen it before, the Frobenius norm of two matrices is the square # root of the squared sum of differences of all elements; in other words, reshape # the matrices into vectors and compute the Euclidean distance between them. difference = np.linalg.norm(dists - dists_one, ord='fro') print('One loop difference was: %f' % (difference, )) if difference < 0.001: print('Good! The distance matrices are the same') else: print('Uh-oh! The distance matrices are different') # + id="xK7VevhfKHm1" # We can visualize the distance matrix: each row is a single test example and # its distances to training examples plt.imshow(dists_one, interpolation='none') plt.show() # + tags=["pdf-ignore-input"] colab={"base_uri": "https://localhost:8080/"} id="YpPUEkLqNAXB" outputId="01a536ee-7acb-4166-85ab-75d8e8445eaf" # Now implement the fully vectorized version inside compute_distances_no_loops # and run the code dists_two = classifier.compute_distances_no_loops(X_test) # check that the distance matrix agrees with the one we computed before: difference = np.linalg.norm(dists - dists_two, ord='fro') print('No loop difference was: %f' % (difference, )) if difference < 0.001: print('Good! The distance matrices are the same') else: print('Uh-oh! The distance matrices are different') # + colab={"base_uri": "https://localhost:8080/", "height": 81} id="TnqwDiIfBjAB" outputId="481947b6-d369-4e42-f12f-34b8164685ff" # + [markdown] id="GIgJeirdNAXD" # ### Comparing handcrafted and `sklearn` implementations # In this section we will just compare the performance of handcrafted and `sklearn` kNN algorithms. The predictions should be the same. No need to write any code in this section. # + id="jNpye7pkNAXE" from sklearn import neighbors # + id="N9vCfTPqNAXE" implemented_knn = KNearestNeighbor() implemented_knn.fit(X_train, y_train) # + colab={"base_uri": "https://localhost:8080/"} id="Gvl9lK_VNAXE" outputId="f1da0f64-a539-4202-9604-7b331f302f18" n_neighbors = 1 external_knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors) external_knn.fit(X_train, y_train) print('sklearn kNN (k=1) implementation achieves: {} accuracy on the test set'.format( external_knn.score(X_test, y_test) )) y_predicted = implemented_knn.predict(X_test, k=n_neighbors).astype(int) accuracy_score = sum((y_predicted==y_test).astype(float)) / num_test print('Handcrafted kNN (k=1) implementation achieves: {} accuracy on the test set'.format(accuracy_score)) assert np.array_equal( external_knn.predict(X_test), y_predicted ), 'Labels predicted by handcrafted and sklearn kNN implementations are different!' print('\nsklearn and handcrafted kNN implementations provide same predictions') print('_'*76) n_neighbors = 5 external_knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors) external_knn.fit(X_train, y_train) print('sklearn kNN (k=5) implementation achieves: {} accuracy on the test set'.format( external_knn.score(X_test, y_test) )) y_predicted = implemented_knn.predict(X_test, k=n_neighbors).astype(int) accuracy_score = sum((y_predicted==y_test).astype(float)) / num_test print('Handcrafted kNN (k=5) implementation achieves: {} accuracy on the test set'.format(accuracy_score)) assert np.array_equal( external_knn.predict(X_test), y_predicted ), 'Labels predicted by handcrafted and sklearn kNN implementations are different!' print('\nsklearn and handcrafted kNN implementations provide same predictions') print('_'*76) # + [markdown] id="tetwzjk0NAXF" # ### Measuring the time # Finally let's compare how fast the implementations are. # # To make the difference more noticable, let's repeat the train and test objects (there is no point but to compute the distance between more pairs). # + id="D4SPKrwaNAXF" X_train_big = np.vstack([X_train]*5) X_test_big = np.vstack([X_test]*5) y_train_big = np.hstack([y_train]*5) y_test_big = np.hstack([y_test]*5) # + tags=["pdf-ignore-input"] id="KoXp705tNAXF" outputId="e347a219-31de-41bc-9ee1-8c5d0692fad6" colab={"base_uri": "https://localhost:8080/"} classifier_big = KNearestNeighbor() classifier_big.fit(X_train_big, y_train_big) # Let's compare how fast the implementations are def time_function(f, *args): """ Call a function f with args and return the time (in seconds) that it took to execute. """ import time tic = time.time() f(*args) toc = time.time() return toc - tic two_loop_time = time_function(classifier_big.compute_distances_two_loops, X_test_big) print('Two loop version took %f seconds' % two_loop_time) one_loop_time = time_function(classifier_big.compute_distances_one_loop, X_test_big) print('One loop version took %f seconds' % one_loop_time) no_loop_time = time_function(classifier_big.compute_distances_no_loops, X_test_big) print('No loop version took %f seconds' % no_loop_time) # You should see significantly faster performance with the fully vectorized implementation! # NOTE: depending on what machine you're using, # you might not see a speedup when you go from two loops to one loop, # and might even see a slow-down. # + [markdown] id="m2CduqcGNAXF" # The improvement seems significant. (On some hardware one loop version may take even more time, than two loop, but no loop should definitely be the fastest. # + [markdown] tags=["pdf-inline"] id="PZnjshyDNAXG" # **Inline Question 3** # # Which of the following statements about $k$-Nearest Neighbor ($k$-NN) are true in a classification setting, and for all $k$? Select all that apply. # 1. The decision boundary (hyperplane between classes in feature space) of the k-NN classifier is linear. # 2. The training error of a 1-NN will always be lower than that of 5-NN. # 3. The test error of a 1-NN will always be lower than that of a 5-NN. # 4. The time needed to classify a test example with the k-NN classifier grows with the size of the training set. # 5. None of the above. # # $\color{blue}{\textit Your Answer:}$ # 1. False # 2. False # 3. False # 4. False # 5. True # # $\color{blue}{\textit Your Explanation:}$ # 1. We do not have a hyper plane (e.g. ONE rule) that we would rely to separate elements of one class from another, in each case we decide individually. # 2. It will depend on the data # 3. It will depend on the data # 4. ? those are two different data sets not dependent on each other? # # + [markdown] id="cibTOTRjNAXG" # ### Submitting your work # To submit your work you need to log into Yandex contest (link will be provided later) and upload the `k_nearest_neighbor.py` file for the corresponding problem
kNN_practice.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Machine Learning with PySpark on the "Titanic: Machine Learning from Disaster" challenge dataset # # This notebook shows how to learn statistical models for predicting the survival rate of the passengers in the Titanic cruise ship. The dataset's statistical analysis and visualization is in a separate notebook named **"Data Analysis and Visualizations.ipynb"** found in the same directory as this one. Here we'll only tackle the use of Spark's Python wrapper library **pyspark** for learning a model to predict if a passengers survived of nor the shipwreck. # # We'll use several popular methods for statistical learning for modeling our data, such as: # # - Logistic Regression # - Decision Trees # - Random Forests # - Gradient Boosting Trees # - Neural networks # # Then, we'll combine all models and create an ensemble of models to predict the survival label on a test set. # # This notebook is structured as follow: # # 1. **Load data** # # 1.1. Import libraries # # 1.2. Load preprocessed files # # 1.3. Setup training and testing data # # 2. **Model selection, training and evaluation** # # 2.1. Import models from scikit-learn # # 2.2. Setup models' parameters # # 2.3. Train the models using cross validation # # 2.4. Analyse the models accuracy # # 2.5. (optional): plot the best features of all models for visual analysis # # 3. **Model prediction** # # 3.1. Predict and Submit results # ## 1. Load data # ### 1.1. Import libraries # + # Import standard libraries import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # %matplotlib inline sns.set_style('whitegrid') # - # Import a Spark session from pyspark.sql import SparkSession # create spark session spark = SparkSession.builder.appName('Titanic').getOrCreate() # ### 1.2. Load preprocessed files # Load pre-processed train and test data sets train = spark.read.csv('data/train_processed.csv', inferSchema=True, header=True) test = spark.read.csv('data/test_processed.csv', inferSchema=True, header=True) train.printSchema() # ### 1.3. Setup feature and label columns # # Pyspark uses a Dense vector to represent a set of features that are then used by the statistical models from the MLlib library. Therefore, the features need to be set up w.r.t. the format thet models expect the data to be in. # Import vector assembler function for grouping columns into a vector from pyspark.ml.feature import VectorAssembler # Drop unused features and create an assembler of features feature_columns = [feature for feature in train.columns if feature not in ['_c0', 'Name', 'Survived', 'PassengerId', 'Family_size']] feature_assembler = VectorAssembler(inputCols=feature_columns, outputCol='features') # # 2. Model Selection, training and evaluation # # This section deals with selecting a model for training on the training set and then evaluating its performance against other models. # # We'll use the most of popular models from the pyspark's MLlib library for classification. Namely, we'll use: # # - Logistic Regression # - Decision Trees # - Random Forests # - Gradient Boosting # - Neural networks # # This notebook doesn't focus on evaluating every single model from the MLlib library but only the (arguably) most popular models for classification tasks available for use. # # > Note: Any other model that has not been used here can easily be integrated in this notebook with little effort. # ### 2.1. Import models from MLlib # Import models for classification from pyspark.ml.classification import ( LogisticRegression, DecisionTreeClassifier, RandomForestClassifier, GBTClassifier, MultilayerPerceptronClassifier ) # ### 2.2. Setup models' parameters # Import parameters grid builder (for grid search) from pyspark.ml.tuning import ParamGridBuilder # Random seed for the train procedure random_seed = 2 # Setup models lr_model = LogisticRegression(featuresCol='features', labelCol='Survived') dtree_model = DecisionTreeClassifier(featuresCol='features', labelCol='Survived') rforest_model = RandomForestClassifier(featuresCol='features', labelCol='Survived') gbt_model = GBTClassifier(featuresCol='features', labelCol='Survived') nn_model = MultilayerPerceptronClassifier(featuresCol='features', labelCol='Survived') # Setup models + parameters classifiers = { "Logistic_Regression": { "model": lr_model, "alias": 'LReg', "params": ParamGridBuilder() \ .addGrid(lr_model.regParam, [0.1, 0.01]) \ .addGrid(lr_model.maxIter, [100, 200, 300]) \ .build() }, "Decision_Trees": { "model": dtree_model, "alias": 'DTree', "params": ParamGridBuilder() \ .addGrid(dtree_model.getMaxDepth, [5, 10, 15]) \ .addGrid(dtree_model.maxBins, [16, 32, 48, 64]) \ .build() }, "Random_Forest": { "model": rforest_model, "alias": 'RFT', "params": ParamGridBuilder() \ .addGrid(rforest_model.getMaxDepth, [5, 10, 15]) \ .addGrid(rforest_model.maxBins, [16, 32, 48, 64]) \ .addGrid(rforest_model.numTrees, [20, 50, 100, 150, 200]) \ .build() }, "Gradient_Boosting": { "model": gbt_model, "alias": 'GBT', "params": ParamGridBuilder() \ .addGrid(gbt_model.maxIter, [10, 20, 30, 40, 50]) \ .addGrid(gbt_model.maxDepth, [2, 4, 6, 8, 10]) \ .build() }, "Neural_Network": { "model": nn_model, "alias": 'NN', "params": ParamGridBuilder() \ .addGrid(nn_model.layers, [[67, 256, 128, 2], [67, 128, 2], [67, 256, 2], [67, 128, 256, 2]]) \ .addGrid(nn_model.stepSize, [0.3, 0.1, 0.01, 0.001]) \ .addGrid(nn_model.maxIter, [100, 200]) \ .addGrid(nn_model.blockSize, [128, 256]) \ .build() }, } # ### 2.3. Train the models using cross validation # Import necessary functions from pyspark.ml.evaluation import BinaryClassificationEvaluator from pyspark.ml.tuning import CrossValidator from pyspark.ml import Pipeline # + # %%time # Train all models using cross validation and randomized search for hyperparameter tunning import time # Number of cross validation splits cross_val_splits = 10 # Store the train results in a var results = {} # Train all models total_models = len(classifiers) for i, model_name in enumerate(classifiers): start = time.time() print('({}/{}) {}: start training model... '.format(i+1, total_models, model_name), end="", flush=True) # Fetch the model to train clf = classifiers[model_name]['model'] # Fetch the model parameters paramGrid = classifiers[model_name]['params'] # Create pipeline pipeline = Pipeline(stages=[feature_assembler, clf]) # Create a cross validation object to train the model crossval = CrossValidator(estimator=pipeline, estimatorParamMaps=paramGrid, evaluator=BinaryClassificationEvaluator(labelCol='Survived'), numFolds=cross_val_splits) # Run cross-validation, and choose the best set of parameters. cvModel = crossval.fit(train) # Store the model results in a dictionary results[model_name] = cvModel print('Done! Elapsed time: {}s\n'.format(time.time() - start)) # - # ### 2.4 Analyse the models accuracy # # Here we'll father which model gave the best results from all 5 of them. Then, we'll select the best model for predicting the survival label of all passengers of the tests et of the Titanic dataset. # Compute the mean + std for all models results_mean_std = [] for model_name in results: # Get the model random/grid search results results_model = results[model_name] # Compute mean + std and add it to a list results_mean_std.append({ "model": model_name, "mean": np.mean(results_model.avgMetrics), "std": np.std(results_model.avgMetrics) }) # Create a Pandas DataFrame with the mean+std results accuracy_df = pd.DataFrame(results_mean_std, columns=['model', 'mean', 'std']) # Show the accuracy dataframe accuracy_df # Plot the mean accuracy for all models fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(10,5)) g = sns.barplot(data=accuracy_df, y='model', x='mean', orient='h', **{'xerr':accuracy_df['std']}, palette='viridis', ax=ax) g.set_ylabel('Algorithms') g.set_xlabel('Mean accuracy') g.set_title('Cross validation accuracy scores') # Here we have the result of the accuracy of all models trained. From these, the model with the best accuracy was the **Logistic_Regression** model with 87.97% accuracy with 0.07% standard deviation. # Create a prediction of all models on the test set predictions_all = {} for model_name in results: # Get best estimator best_model = results[model_name].bestModel # Predict test labels predictions = best_model.transform(test) # Convert to pandas DataFrame predictions_df = predictions.select('prediction').toPandas() # Save predictions to a list predictions_all[model_name] = predictions_df['prediction'] # Creat a DataFrame for the predictions pred = pd.DataFrame(predictions_all) # Plot a heatmap of all correlations to see how the models are correlated with each other fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(9,6)) g = sns.heatmap(pred.corr(), annot=True, cmap='coolwarm', ax=ax) g.set_title('Correlation of the test set label prediction between models') # We see that the models are all very correlated in their predictions. This indicates that the models predict similarly the same way and are comparable in performance. # ### 2.5. (optional): plot the best features of all models for visual analysis # # In this subsection, the feature importance is plotted for all models in order to visually see which features were more important for each model # + # Create figure fig, axes = plt.subplots(nrows = 2, ncols = 2, figsize=(15,15)) # get a list of flat axes if type(axes[0]) == np.ndarray: flat_axes = [ax for axes_ in axes for ax in axes_] else: flat_axes = [ax for ax in axes] # Get model names model_names = list(results.keys()) # Get feature names feature_names = feature_columns # Cycle all models and plot them for i, model_name in enumerate(results): if not model_name in ['Neural_Network']: # Model estimator model = results[model_name].bestModel.stages[1] try: model_features = model.coefficients except AttributeError: model_features = model.featureImportances # Get the top 40 features model_features = np.array(list(model_features)) indices = np.argsort(model_features)[::-1][:40] feats = model_features[indices][:40] labels = list(np.array(feature_columns)[indices][:40]) # Plot the features sorted by their importance g = sns.barplot(y=labels, x = feats, orient='h', ax=flat_axes[i]) g.set_xlabel("Relative importance", fontsize=12) g.set_ylabel("Features", fontsize=12) g.tick_params(labelsize=9) g.set_title(model_name) plt.tight_layout() # - # Here we have a list of the top-40 features for all algorithms. We can see that all have different selections of features and different importance levels for each. However, some top-performing fields are common to all like the title of a passenger for example. # ## 3. Model prediction # ### 3.1. Predict and Submit results # + # Select the best model and predict the survival label for passengers of the test set # Here we'll use the **Random Forest model** for prediction because it offers # better accuracy compared to **Logistic_Regression** bestModel = results['Random_Forest'].bestModel # Compute the final predictions final_predictions = bestModel.transform(test) # + # Combine the passenger ID with the Survived labels final_predictions_df = final_predictions.select('prediction').toPandas() # Rename colum to 'Survived' final_predictions_df.columns = ['Survived'] final_predictions_df['Survived'] = final_predictions_df['Survived'].astype(int) # Load the test set as a pandas DataFrame to get the 'PassengerId' column test_df = pd.read_csv('data/test.csv') test_results = pd.concat([test_df['PassengerId'], final_predictions_df], axis=1) # Export predictions test_results.to_csv("data/pyspark_prediction_random_forest.csv", index=False)
notebooks/Machine Learning with PySpark.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: speechEnv # language: python # name: speechenv # --- # + import os import sys module_path = os.path.abspath(os.path.join('../../src')) print(module_path) if module_path not in sys.path: sys.path.append(module_path) # + import csv from pathlib import Path from os import listdir import pickle from labeling_utils import load_labels import numpy as np from sklearn.metrics import precision_recall_fscore_support from sklearn.model_selection import train_test_split import pandas as pd tags=["Songbird","Water Bird","Insect","Running Water","Rain","Cable","Wind","Aircraft"] from tabulate import tabulate tag_set=tags[:] import torch # %matplotlib inline from torch.utils.data import Dataset, DataLoader # - # * Count only highest ranking tag (what if both of them exist) # * Bird tag is confusing, it can be also waterbird, how to handle ? Also Animal # * # + #LOAD MODEL predictions splits_path= Path('/files/scratch/enis/data/nna/labeling/splits/') # + import csv from os import listdir from pathlib import Path # LOAD LABELS by human labelsbyhumanpath=Path('/scratch/enis/data/nna/labeling/results/') # filter csv extension also by username labelsbyhuman=[i for i in listdir(labelsbyhumanpath) if (".csv" in i ) ] humanresults={} counter=0 for apath in labelsbyhuman: with open(labelsbyhumanpath / apath, newline='') as f: reader=csv.reader(f) for row in reader: counter+=1 humanresults[row[0]]=row[1:] print("unique files:",len(humanresults),"\ntotal files",counter) #Join vehicle and Aircraft for file_name,tagshere in humanresults.items(): # print(file_name,tagshere) tagshere=["Aircraft" if tag == "Vehicle" else tag for tag in tags] # - # load name of the labels labels=load_labels() # + # returns a dictionary, keys are tags from tag set and values are binary list # def vectorized_y_true(humanresults,tag_set): y_true={tag: np.zeros(len(humanresults)) for tag in tag_set} for i,tags in enumerate(humanresults.values()): # we only look for tags in tag_set for tag in tag_set: if tag in tags: y_true[tag][i] = 1 else: y_true[tag][i] = 0 return y_true # - y_true_dict = vectorized_y_true(humanresults,tags) y_true_all = pd.DataFrame(y_true_dict) y_true = np.array(y_true_all["Songbird"]).astype("long") # + def map_reduce(X,func_type): if func_type=="Average": return np.mean(X,axis=1) elif func_type=="Concat": return np.reshape(X,(-1,1280)) else: raise Exception("ERROR with embed type") def pick_embed(embed_type): # humanresults[proc_embeds[0].replace("_embed.npy",".mp3")] X= np.empty((len(humanresults),10,128)) for index,i in enumerate(humanresults): if embed_type=="Raw": file_name=i.replace(".mp3","_rawembed.npy") elif embed_type=="Normalized": file_name=i.replace(".mp3","_embed.npy") elif embed_type=="Unsupervised": file_name=i.replace(".mp3","_superembedv2.npy") else: raise Exception("ERROR with embed type") an_x=np.load(split_path / file_name) # print(index,an_x) X[index,:,:]=an_x[:] return X # + split_path=Path('/scratch/enis/data/nna/labeling/split_embeddings/') # filter by username split_embeds=[i for i in listdir(split_path) ] raw_embeds = [i for i in split_embeds if "rawembed" in i] proc_embeds = [i for i in split_embeds if "_embed" in i] super_embed = [i for i in split_embeds if "_superembedv2" in i] # - X=pick_embed("Raw") # X=pick_embed("Unsupervised") # Unsupervised # X=map_reduce(X,map_reduce_type) humanresults_keys=list(humanresults.keys()) X=X.astype("float32") # + from sklearn.metrics import average_precision_score from sklearn.metrics import roc_auc_score def cal_metrics(y_true_dict,y_pred_dict): results={} for tag in tag_set: y_true = y_true_dict[tag] y_pred = y_pred_dict[tag] metrics=precision_recall_fscore_support(y_true, y_pred,pos_label=1,average="binary") results[tag]=(metrics) return results def cal_auc(y_true_dict,y_pred_dict_prob): results={} for tag in tag_set: y_true = y_true_dict[tag] y_pred = y_pred_dict_prob[tag] metrics_auc=roc_auc_score(y_true, y_pred) #average_precision_score results[tag]=(metrics_auc) return results def print_results(results,y_true_dict,): headers= ["Label","Positive","Precision","Recall","Fscore"] table=[] sample_count=len(next(iter(y_true_dict.values()))) print("Total sample:",sample_count,"And threshold is",prob_threshold) for tag in (tag_set): positive_count=sum(y_true_dict[tag]) table.append([tag,positive_count,*results[tag][:-1]]) print(tabulate(table, headers=headers)) # + def nogradloss(X_test,y_test): with torch.no_grad(): outputs_test = net(X_test) loss = criterion(outputs_test, y_test) return loss.item() def nogradmetrics(X_test,y_test,net,multi_segment=False): with torch.no_grad(): if not multi_segment: y_pred = net(X_test) # print(y_pred.shape) # print(y_pred[1:5,:]) # print(y_pred) loss = criterion(y_pred, y_test) # print(y_pred) y_pred=torch.exp(y_pred) # _, predicted = torch.min(y_pred,1) # print(np.exp(_.cpu().numpy()),predicted) # print(y_test.shape,y_pred.shape) train_auc=roc_auc_score(y_test.cpu().numpy(), y_pred[:,1].cpu().numpy()) return loss.item(),train_auc else: y_pred = net(X_test) y_pred_10 =y_pred.reshape(-1,10,2) indices=torch.max(y_pred[:,1].reshape(-1,10),dim=1).indices y_pred_10 = y_pred_10[range(y_pred_10.shape[0]),indices,:].reshape(-1,2) y_test_10 = torch.max(y_test.reshape(-1,10),dim=1).values loss = criterion(y_pred_10, y_test_10) train_auc=roc_auc_score(y_test_10.cpu().numpy(), y_pred_10[:,1].cpu().numpy()) return loss.item(),train_auc # - device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu") # device="cpu" def augment_data(data,augmentad_size): if data.shape[0]>=augmentad_size: return data[:] # Augment post samples augmentad = data[:] while augmentad.shape[0] < augmentad_size: left=augmentad_size-augmentad.shape[0] left= augmentad.shape[0] if left>augmentad.shape[0] else left # left = 2 if left==1 else left # print(augmentad.shape[0],left) new=np.empty((left,augmentad.shape[1],augmentad.shape[2])) # print(torch.randperm(augmentad.shape[0])[:left]) first,second=augmentad[torch.randperm(augmentad.shape[0])[:left],:,:].reshape(-1,10,128),augmentad[torch.randperm(augmentad.shape[0])[:left],:,:].reshape(-1,10,128) # print(first.shape,second.shape) new[:,0:5,:],new[:,5:10,:]=first[:,0:5,:],second[:,5:10,:] augmentad=np.concatenate([augmentad,new]) # print(augmentad.shape) augmentad=np.unique(augmentad,axis=0) # print(augmentad.shape) return augmentad # #### Run only one of the following cells that does splitting # + # MEAN, AUGMENTAD entire dataset including test and validate pos_index= (y_true==1) neg_index= (y_true==0) X_shuffled = X[:,torch.randperm(X.shape[1]),:] X_shuffled_pos=X_shuffled[pos_index,:,:] X_shuffled_neg=X_shuffled[neg_index,:,:] augmentad_pos=augment_data(X_shuffled_pos,2000) augmentad_neg=augment_data(X_shuffled_neg,2000) X_augmented=np.concatenate([augmentad_pos,augmentad_neg]).astype("float32") y_true_aug=np.concatenate([np.ones(augmentad_pos.shape[0]),np.zeros(augmentad_neg.shape[0])]).astype("int64") MULTI_SEGMENT = False X_augmented_mean=X_augmented.mean(axis=1) X_train, X_test, y_train, y_test = train_test_split( X_augmented_mean, y_true_aug, test_size=0.2, random_state=42) X_train, X_val, y_train, y_val = train_test_split( X_train, y_train, test_size=0.25, random_state=42) # X_train=X_train.reshape(X_train.shape[0],-1,128) # X_test=X_test.reshape(X_test.shape[0],-1,128) # X_val=X_val.reshape(X_test.shape[0],-1,128) # + # AUGMENTATION Experiment # from "Shuffling and Mixing Data Augmentation for Environmental Sound Classification" by <NAME>. al MULTI_SEGMENT = False FLAT=False X_train, X_test, y_train, y_test = train_test_split( X.reshape(X.shape[0],-1), y_true, test_size=0.2, random_state=42) X_train, X_val, y_train, y_val = train_test_split( X_train, y_train, test_size=0.25, random_state=42) X_train=X_train.reshape(X_train.shape[0],-1,128) X_test=X_test.reshape(X_test.shape[0],-1,128) X_val=X_val.reshape(X_test.shape[0],-1,128) pos_index= (y_train==1) neg_index= (y_train==0) # shuffle each sample sound within itself, granularity is 1 second, samples are 10 second # so change order of seconds X_shuffled = X_train[:,torch.randperm(X_train.shape[1]),:] # no shuffle # X_shuffled = X_train[:,:,:] X_shuffled_pos=X_shuffled[pos_index,:,:] X_shuffled_neg=X_shuffled[neg_index,:,:] augmentation_ratio=1.2 augmentation_ratio=(1/augmentation_ratio) augmentad_pos=augment_data(X_shuffled_pos,int(X_shuffled_pos.shape[0]//augmentation_ratio)) # augmentad_neg=augment_data(X_shuffled_neg,int(X_shuffled_neg.shape[0]//augmentation_ratio)) augmentad_neg=augment_data(X_shuffled_neg,X_shuffled_neg.shape[0]) X_train_augmented=np.concatenate([augmentad_pos,augmentad_neg]).astype("float32") y_train_aug=np.concatenate([np.ones(augmentad_pos.shape[0]),np.zeros(augmentad_neg.shape[0])]).astype("int64") X_train=X_train_augmented[:] y_train=y_train_aug[:] X_train=X_train.mean(axis=1) X_test=X_test.mean(axis=1) X_val=X_val.mean(axis=1) # + # concat inputs MULTI_SEGMENT = False FLAT=True X_train, X_test, y_train, y_test = train_test_split( X.reshape(X.shape[0],-1), y_true, test_size=0.2, random_state=42) X_train, X_val, y_train, y_val = train_test_split( X_train, y_train, test_size=0.25, random_state=42) X_train=X_train.reshape(X_train.shape[0],-1,128) X_test=X_test.reshape(X_test.shape[0],-1,128) X_val=X_val.reshape(X_test.shape[0],-1,128) # - # + # MEAN MULTI_SEGMENT = False FLAT=False X_train, X_test, y_train, y_test = train_test_split( X_mean, y_true, test_size=0.2, random_state=42) X_train, X_val, y_train, y_val = train_test_split( X_train, y_train, test_size=0.25, random_state=42) # X_train=X_train.reshape(X_train.shape[0],-1,128) # X_test=X_test.reshape(X_test.shape[0],-1,128) # X_val=X_val.reshape(X_test.shape[0],-1,128) # - # + # BEST model came from this cell # seperate, # different AUC calculation PART, to be fair (do max of 10 predictions) MULTI_SEGMENT = True FLAT=False X_train, X_test, y_train, y_test = train_test_split( X.reshape(X.shape[0],-1), y_true, test_size=0.2, random_state=42) X_train, X_val, y_train, y_val = train_test_split( X_train, y_train, test_size=0.25, random_state=42) # use 1 second as samples X_train=X_train.reshape(-1,128) X_test=X_test.reshape(-1,128) X_val=X_val.reshape(-1,128) # repeat labels y_train=np.repeat(y_train,10) y_test=np.repeat(y_test,10) y_val=np.repeat(y_val,10) # - # ### From here, run all cells: moving data to device, model creation and training # + X_train=torch.from_numpy(X_train).to(device) X_test=torch.from_numpy(X_test).to(device) X_val=torch.from_numpy(X_val).to(device) # birds y_val=torch.from_numpy(y_val).to(device) y_test=torch.from_numpy(y_test).to(device) y_train=torch.from_numpy(y_train).to(device) # - class audioDataset(Dataset): """Face Landmarks dataset.""" def __init__(self,X,y, transform=None): """ Args: csv_file (string): Path to the csv file with annotations. root_dir (string): Directory with all the images. transform (callable, optional): Optional transform to be applied on a sample. """ self.X=X self.y=y # self.landmarks_frame = pd.read_csv(csv_file) # self.root_dir = root_dir self.transform = transform def __len__(self): return self.X.shape[0] def __getitem__(self, idx): sample = self.X[idx],self.y[idx] if self.transform: sample = self.transform(sample) return sample # + params = {'batch_size': 100, 'shuffle': True, 'num_workers': 0} training_set=audioDataset(X_train,y_train) training_generator = DataLoader(training_set, **params) # - # + import torch.nn as nn import torch.nn.functional as F class Net(nn.Module): def __init__(self): super(Net, self).__init__() # self.conv1 = nn.Conv2d(3, 6, 5) # self.pool = nn.MaxPool2d(2, 2) # self.conv2 = nn.Conv2d(6, 16, 5) if FLAT: self.fc1 = nn.Linear(1280, 10) #100 else: self.fc1 = nn.Linear(128, 10) #100 # self.fc1 = nn.Linear(128, 32) torch.nn.init.xavier_normal_(self.fc1.weight) self.fc1_bn = nn.BatchNorm1d(10) # 100 self.fc2 = nn.Linear(10, 10) # 32 torch.nn.init.xavier_normal_(self.fc2.weight) self.fc2_bn = nn.BatchNorm1d(10) self.fc3 = nn.Linear(10,10) torch.nn.init.xavier_normal_(self.fc3.weight) self.fc3_bn = nn.BatchNorm1d(5) self.fc4 = nn.Linear(10, 2) #100 torch.nn.init.xavier_normal_(self.fc4.weight) self.drop = nn.Dropout(p=0.2) #0.2 def forward(self, x): # x = self.pool(F.relu(self.conv1(x))) # x = self.pool(F.relu(self.conv2(x))) if FLAT: x = x.view(-1,1280) else: x = x.view(-1,128) x = F.relu(self.fc1_bn(self.fc1(x))) # x = F.relu((self.fc1(x))) x=self.drop(x) x = F.relu(self.fc2_bn(x)) x=self.drop(x) x = F.relu(self.fc3(x)) x=self.drop(x) x = self.fc4(x) # print(x) x = F.log_softmax(x,dim=1) # print(x) return x net = Net().to(device) loss_values={"test":[],"train":[],"train_auc":[],"test_auc":[]} # - # + import torch.optim as optim #cross-entropy loss is sklearn one # criterion = nn.CrossEntropyLoss(weight=torch.tensor([1.0,5.0]).to(device)) criterion = nn.CrossEntropyLoss() # criterion = nn.NLLLoss() optimizer = optim.Adam(net.parameters(),weight_decay=0.001) # - import copy # + from IPython import display import time best_acc1=0 for epoch in range(10000): # loop over the dataset multiple times running_loss = 0.0 for i, data in enumerate(training_generator, 0): # get the inputs; data is a list of [inputs, labels] inputs, labels = data # print(inputs) # zero the parameter gradients optimizer.zero_grad() # forward + backward + optimize # print(inputs.shape) outputs = net(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() # print statistics # running_loss += loss.item() # if epoch % 20 == 0: # print every 2000 mini-batches net.eval() test_loss,test_auc=nogradmetrics(X_val,y_val,net,multi_segment=MULTI_SEGMENT) train_loss,train_auc=nogradmetrics(X_train,y_train,net,multi_segment=MULTI_SEGMENT) net.train() loss_values["test"].append(test_loss) loss_values["train"].append(train_loss) loss_values["test_auc"].append(test_auc) loss_values["train_auc"].append(train_auc) if epoch % 20 == 0: # print every 2000 mini-batches print('[%d] test : %.3f train: %.3f test auc%.3f train auc%.3f' % (epoch ,test_loss, train_loss,test_auc,train_auc)) is_best = test_auc > best_acc1 best_acc1 = max(test_auc, best_acc1) if is_best: best_model=copy.deepcopy(net) # display.clear_output(wait=True) # display.display(results.plot()) print('Finished Training') # - best_acc1 # raw embeddings, slim network, 3 layers 10 neuron each best_acc1 # raw embeddings, 5 neurons, 0.2 dropout best_acc1 # new v2 best_acc1 # old, raw embeddings, 100 neuron, 0.2 dropout # + # save best model, change name accordingly, by adding validation accuracy import time timestr = time.strftime("%Y%m%d-%H%M%S") torch.save(best_model.state_dict(), "../../data/models/bird_FC_089valid_"+timestr+".pth") # - # visualize results results=pd.DataFrame(loss_values) results[["test","train"]].plot(),results[["test_auc","train_auc"]].plot(), # results on test dataset, loss and AUC nogradmetrics(X_test,y_test,best_model,multi_segment=MULTI_SEGMENT) test_set=audioDataset(X_test,y_test) test_generator = DataLoader(test_set, **params) # Following linse are for comparing sklearn MLP and this model X_last=X[:] # X_last=X_last.reshape(X_last.shape[0],-1) X_last=X_last.reshape(-1,128) X_last_sklearn=X_last[:] X_last=torch.from_numpy(X_last).to(device) X_last.shape y_pred_10.shape,y_test_10.shape X_val.shape,y_val.shape # + best_model.eval() with torch.no_grad(): y_pred = best_model(X_val) y_pred_10 =y_pred.reshape(-1,10,2) indices=torch.max(y_pred[:,1].reshape(-1,10),dim=1).indices y_pred_10 = y_pred_10[range(y_pred_10.shape[0]),indices,:].reshape(-1,2) y_val_10 = torch.max(y_val.reshape(-1,10),dim=1).values # loss = criterion(y_pred_10, y_true) train_auc=roc_auc_score(y_val_10.cpu().numpy(), y_pred_10[:,1].cpu().numpy()) print(train_auc) # - y_pred=y_pred_10.cpu() y_pred=torch.exp(y_pred[:,1]) y_pred[y_pred>=0.5]=1 y_pred[y_pred<0.5]=0 # + from sklearn.metrics import confusion_matrix tn, fp, fn, tp=confusion_matrix(y_val_10.cpu().numpy(), y_pred).ravel() tn, fp, fn, tp (164, 23, 21, 52) sklearn pytorch tp 52 68 fp 23 67 tn 164 120 fn 21 5 # - total=0 for i,mm in enumerate(y_true): predict= 1 if y_pred[i]>0.5 else 0 if mm!=predict: total+=1 print(humanresults_keys[i]) print(total) # #### compare with sklearn # + import pickle # and later you can load it with open('../Visualizations/raw_many2one_NN.pkl', 'rb') as f: clf = pickle.load(f) # - def many2one_predict(X,clf): result_count=(X.shape[0]//10) if X.shape[0]%10==0 else (X.shape[0]//10)+1 results=np.empty(result_count) for i in range(0,X.shape[0],10): result10=clf.predict(X[i:i+10,:]) results[(i//10)] = np.max(result10) return results X_last_sklearn.shape #TEST samples=np.ones((200,128)) y_pred_sklearn=many2one_predict(X_val.cpu().numpy(),clf['Neural Net_Songbird']) # + from sklearn.metrics import confusion_matrix tn, fp, fn, tp=confusion_matrix(y_val_10.cpu().numpy(), y_pred_sklearn).ravel() tn, fp, fn, tp # - total=0 for i,mm in enumerate(y_true): predict= 1 if y_pred[i]>0.5 else 0 predict_sklearn=int(y_pred_sklearn[i]) if mm!=predict and mm==1: total+=1 print(humanresults_keys[i],mm,predict,predict_sklearn) print(total) # + from sklearn.metrics import confusion_matrix confusion_matrix(y_true, y_pred) # - np.exp([-6.7866, -4.4634]) F.softmax(torch.Tensor([0.5403, 0.4597]),dim=0),torch.exp(F.log_softmax(torch.Tensor([ 0.1147, -0.0307]),dim=0))
notebooks/model_exp/MLP_pytorch-Copy1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <p float="center"> # <img src="https://github.com/carlosalvarezh/Analisis_Numerico/blob/master/images/C00_Img00_logo.png?raw=true" width="350" /> # </p> # <h1 align="center">ST0256 - Análisis Numérico</h1> # <h1 align="center">Capítulo 6: Ecuaciones Diferenciales Ordinarias</h1> # <h1 align="center">2021/01</h1> # <h1 align="center">MEDELLÍN - COLOMBIA </h1> # <table> # <tr align=left><td><img align=left src="./images/CC-BY.png"> # <td>Text provided under a Creative Commons Attribution license, CC-BY. All code is made available under the FSF-approved MIT license.(c) <NAME></td> # </table> # *** # # ***Docente:*** <NAME>, I.C. D.Sc. # # ***e-mail:*** <EMAIL> # # ***skype:*** carlos.alberto.alvarez.henao # # ***Herramienta:*** [Jupyter notebook](http://jupyter.org/) # # ***Kernel:*** Python 3.8 # # # *** # <a id='TOC'></a> # + [markdown] toc=true # <h1>Tabla de Contenidos<span class="tocSkip"></span></h1> # <div class="toc"><ul class="toc-item"><li><span><a href="#Ecuaciones-diferenciales-Ordinarias" data-toc-modified-id="Ecuaciones-diferenciales-Ordinarias-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Ecuaciones diferenciales Ordinarias</a></span><ul class="toc-item"><li><span><a href="#Introducción" data-toc-modified-id="Introducción-1.1"><span class="toc-item-num">1.1&nbsp;&nbsp;</span>Introducción</a></span></li><li><span><a href="#Problemas-de-valor-inicial-(PVI)" data-toc-modified-id="Problemas-de-valor-inicial-(PVI)-1.2"><span class="toc-item-num">1.2&nbsp;&nbsp;</span>Problemas de valor inicial (PVI)</a></span></li><li><span><a href="#Solución-numérica-de-una-EDO" data-toc-modified-id="Solución-numérica-de-una-EDO-1.3"><span class="toc-item-num">1.3&nbsp;&nbsp;</span>Solución numérica de una EDO</a></span></li><li><span><a href="#Existencia,-unicidad-y-problemas-bien-planteados" data-toc-modified-id="Existencia,-unicidad-y-problemas-bien-planteados-1.4"><span class="toc-item-num">1.4&nbsp;&nbsp;</span>Existencia, unicidad y problemas bien planteados</a></span><ul class="toc-item"><li><span><a href="#Condición-de-Lipschitz" data-toc-modified-id="Condición-de-Lipschitz-1.4.1"><span class="toc-item-num">1.4.1&nbsp;&nbsp;</span>Condición de Lipschitz</a></span></li><li><span><a href="#Conjunto-convexo" data-toc-modified-id="Conjunto-convexo-1.4.2"><span class="toc-item-num">1.4.2&nbsp;&nbsp;</span>Conjunto convexo</a></span></li><li><span><a href="#Teorema-de-suficiencia" data-toc-modified-id="Teorema-de-suficiencia-1.4.3"><span class="toc-item-num">1.4.3&nbsp;&nbsp;</span>Teorema de suficiencia</a></span></li><li><span><a href="#Teorema-de-existencia-y-unicidad" data-toc-modified-id="Teorema-de-existencia-y-unicidad-1.4.4"><span class="toc-item-num">1.4.4&nbsp;&nbsp;</span>Teorema de existencia y unicidad</a></span></li><li><span><a href="#Problemas-bien-planteados" data-toc-modified-id="Problemas-bien-planteados-1.4.5"><span class="toc-item-num">1.4.5&nbsp;&nbsp;</span>Problemas bien planteados</a></span></li><li><span><a href="#EDOs-estables-e-inestables" data-toc-modified-id="EDOs-estables-e-inestables-1.4.6"><span class="toc-item-num">1.4.6&nbsp;&nbsp;</span>EDOs estables e inestables</a></span></li></ul></li></ul></li><li><span><a href="#Métodos-de-Runge---Kutta" data-toc-modified-id="Métodos-de-Runge---Kutta-2"><span class="toc-item-num">2&nbsp;&nbsp;</span>Métodos de Runge - Kutta</a></span><ul class="toc-item"><li><span><a href="#Método-de-Euler" data-toc-modified-id="Método-de-Euler-2.1"><span class="toc-item-num">2.1&nbsp;&nbsp;</span>Método de Euler</a></span><ul class="toc-item"><li><span><a href="#Introducción" data-toc-modified-id="Introducción-2.1.1"><span class="toc-item-num">2.1.1&nbsp;&nbsp;</span>Introducción</a></span></li><li><span><a href="#Análisis-de-error-para-el-método-de-Euler" data-toc-modified-id="Análisis-de-error-para-el-método-de-Euler-2.1.2"><span class="toc-item-num">2.1.2&nbsp;&nbsp;</span>Análisis de error para el método de Euler</a></span></li><li><span><a href="#Ejemplo-de-aplicación-del-método-de-Euler" data-toc-modified-id="Ejemplo-de-aplicación-del-método-de-Euler-2.1.3"><span class="toc-item-num">2.1.3&nbsp;&nbsp;</span>Ejemplo de aplicación del método de Euler</a></span></li></ul></li><li><span><a href="#Método-de-Heun" data-toc-modified-id="Método-de-Heun-2.2"><span class="toc-item-num">2.2&nbsp;&nbsp;</span>Método de Heun</a></span></li><li><span><a href="#Generalización-de-los-métodos-de-Runge---Kutta" data-toc-modified-id="Generalización-de-los-métodos-de-Runge---Kutta-2.3"><span class="toc-item-num">2.3&nbsp;&nbsp;</span>Generalización de los métodos de <em>Runge - Kutta</em></a></span><ul class="toc-item"><li><span><a href="#Métodos-de-Runge---Kutta-de-segundo-orden" data-toc-modified-id="Métodos-de-Runge---Kutta-de-segundo-orden-2.3.1"><span class="toc-item-num">2.3.1&nbsp;&nbsp;</span>Métodos de <em>Runge - Kutta</em> de segundo orden</a></span></li><li><span><a href="#Método-de-Runge---Kutta-de-tercer-orden" data-toc-modified-id="Método-de-Runge---Kutta-de-tercer-orden-2.3.2"><span class="toc-item-num">2.3.2&nbsp;&nbsp;</span>Método de <em>Runge - Kutta</em> de tercer orden</a></span></li><li><span><a href="#Método-de-Runge---Kutta-de-cuarto-orden" data-toc-modified-id="Método-de-Runge---Kutta-de-cuarto-orden-2.3.3"><span class="toc-item-num">2.3.3&nbsp;&nbsp;</span>Método de <em>Runge - Kutta</em> de cuarto orden</a></span></li></ul></li></ul></li></ul></div> # - # ## Ecuaciones diferenciales Ordinarias # ### Introducción # Las [Ecuaciones Diferenciales ordinarias](https://en.wikipedia.org/wiki/Differential_equation) (*ODE*) se emplean para modelar matemáticamente problemas de la ciencia (exactas, sociales, etc) o la ingeniería, que implican el cambio de alguna variable (*dependiente*) respecto a otra (*independiente*). # # Si $y$ representa a una función $f$ que puede tener como parámetros un conjunto de variables independientes $t_1,t_2,\ldots,t_𝑛$, entonces, una *ODE* es una igualdad en la que intervienen alguna o algunas de las derivadas de la función $f$ con respecto a alguna de las variables independientes $t$, además de los valores y variables: # # <a id='Ec6_1'></a> # \begin{equation*} # y, t, \frac{dy}{dt},\frac{d^2y}{dt^2},\ldots, \frac{d^ny}{dt^n} # \label{eq:Ec6_1} \tag{6.1} # \end{equation*} # # Una ecuación diferencial ordinaria $y'=f(t,y)$ por sí misma no determina una única función que sea solución, porque la ecuación simplemente especifica las pendientes de los componentes de la solución $y'(t)$ en cada punto, pero no el valor real de la solución $y(t)$ en cualquier punto. Por tanto, en general, existe una familia infinita de funciones que satisfacen la ecuación diferencial, siempre que $f$ sea lo suficientemente suave. # # # <a id='Ej6_1'></a> # - ***Ejemplo: 1*** # # Considere la ecuación diferencial ordinaria $y' = y$. Esta es una *EDO* de la forma $y'=f(t,y)$, donde $f(t,y)=y$. La familia de soluciones para esta ecuación viene dada por $y(t)=ce^t$, donde $c$ es cualquier constante real. Si imponemos una condición inicial, $y(t_0)=y_0$, se determinará la única solución particular que satisface la condición inicial. Para este ejemplo, si $t_0=0$, obtenemos $c=y_0$, lo que significa que la solución es $y(t)=y_0e^t$. # # <p float="center"> # <img src="https://github.com/carlosalvarezh/Analisis_Numerico/blob/master/images/C06_Img01_EDO1_1.PNG?raw=true" width="250" /> # </p> # # <div style="text-align: right"> Fuente: <a href="https://heath.cs.illinois.edu/scicomp/"><NAME>. Scientific Computing. An introductory survey</a> </div> # # [Volver a la Tabla de Contenido](#TOC) # ### Problemas de valor inicial (PVI) # Para determinar una solución particular, debemos especificar el valor, generalmente denotado por $y_0$, de la función solución en algún punto, usualmente denotado por $t_0$. Por lo tanto, parte de los datos del problema dados es el requisito de que # $$y(t_0) = y_0$$ # # Este requisito adicional determina una solución única para la *EDO*, siempre que $f$ sea continuamente diferenciable. Debido a que la variable independiente $t$ generalmente representa el tiempo, pensamos en $t_0$ como el tiempo inicial y en $y_0$ como el valor inicial. Por tanto, esto se denomina [problema de valor inicial](https://en.wikipedia.org/wiki/Initial_value_problem) (*PVI*). La *EDO* gobierna la evolución dinámica del sistema en el tiempo desde su estado inicial $y_0$ en el tiempo $t_0$ en adelante, y buscamos una función $y(t)$ que describa el estado del sistema en función del tiempo. # # En este capítulo nos centraremos en aproximar la solucion de $y(t)$ para un problema de la forma # # <a id='Ec6_2'></a> # \begin{equation*} # \frac{dy}{dt} = f(t,y), \quad a \le t \le b # \label{eq:Ec6_2} \tag{6.2} # \end{equation*} # # sujeto a la condición inicial $y(a = t_0) = y_0$. # [Volver a la Tabla de Contenido](#TOC) # ### Solución numérica de una EDO # Para resolver de forma numérica una *EDO* de orden $1$, se debe conocer de antemano los valores iniciales $𝑦(𝑥_0 )=𝑦_0$ y el intervalo $[𝑎,𝑏]$. La solución de la *EDO* es un conjunto finito de puntos # # <a id='Ec6_3'></a> # \begin{equation*} # \{(𝑥_0,𝑓(𝑥_0 )),(𝑥_1,𝑓(𝑥_1 )),…,(𝑥_𝑛,𝑓(𝑥_𝑛 ))\} # \label{eq:Ec6_3} \tag{6.3} # \end{equation*} # # <p float="center"> # <img src="https://github.com/carlosalvarezh/Analisis_Numerico/blob/master/images/C06_Img01_EDO2.PNG?raw=true" width="500" /> # </p> # # <div style="text-align: right"> Fuente: <a href="http://artemisa.unicauca.edu.co/~cardila/Chapra.pdf"><NAME>., <NAME>. Métodos Numéricos para ingenieros, 5a Ed. <NAME>. 2007</a> </div> # # [Volver a la Tabla de Contenido](#TOC) # ### Existencia, unicidad y problemas bien planteados # Para poder determinar la existencia y unicidad de la solución de una *EDO*, es necesario introducir algunos conceptos que serán de utilidad. # [Volver a la Tabla de Contenido](#TOC) # #### Condición de Lipschitz # Una función $f(t,y)$ satisface la [condición de Lipschitz](https://en.wikipedia.org/wiki/Lipschitz_continuity) (llamada así en honor al matemático alemán [<NAME>](https://en.wikipedia.org/wiki/Rudolf_Lipschitz)) en la variable $y$ si existe una constante $L>0$ tal que # # <a id='Ec6_4'></a> # \begin{equation*} # |f(t,y_1)-f(t,y_2)| \le L|y_1-y_2|, # \label{eq:Ec6_4} \tag{6.4} # \end{equation*} # # en una región $R \subset \mathbb{R}^2$ tal que $(t,y_1), (t,y_2) \in R$. # # El anterior resultado implica que, si $\partial f(t,y)/\partial y$ existe, estará acotada por un valor $L$. De forma similar, si $\partial f/ \partial y$ está acotada en un intervalo $[a,b]$ entonces $f$ staisface la condición de *Lipschitz*. # [Volver a la Tabla de Contenido](#TOC) # #### Conjunto convexo # Se dice que un conjunto $D \subset \mathbb{R}^2$ es convexo si para dos puntos arbitrarios $P_1=(t_1,y_1)$, $P_2=(t_2,y_2)$ pertenecen a, o están contenidos en, $D$. Es decir, si $\forall s \in [0,1], sP_1+(1-s)P_s \in D$. # # Geométricamente se tiene que un conjunto es convexo siempre que dos puntos que pertenezcan a un conjunto, todo segmento de línea recta entre los dos puntos también pertenece al conjunto. # # <p float="center"> # <img src="https://github.com/carlosalvarezh/Analisis_Numerico/blob/master/images/C06_Img03_Convex.PNG?raw=true" width="250" /> # </p> # # <div style="text-align: right"> Fuente: <a href="http://jaramillo_herman.workfolio.com/"><NAME>., Página personal</a> </div> # [Volver a la Tabla de Contenido](#TOC) # #### Teorema de suficiencia # Suponga que $f(t,y)$ se define sobre un conjunto convexo $D \subset \mathbb{R}^2$, existe una constante $L>0$ con # # <a id='Ec6_5'></a> # \begin{equation*} # \left| \frac{\partial f(t,y)}{\partial y} \right| \le L, \forall (t,y) \in D # \label{eq:Ec6_5} \tag{6.5} # \end{equation*} # # entonces $f$ satisface la condición *Lipschitz* en $D$ en la variable $y$ con constante $L$ de *Lipschitz*. # [Volver a la Tabla de Contenido](#TOC) # <a id='TEU'></a> # #### Teorema de existencia y unicidad # Suponga que $D= \{(t,y)| a\le t\le b \text{ y } -\infty < y < \infty \}$ y que $f(t,y)$ es continua en $D$. Si $f$ satisface la condición de *Lipschitz* en $D$ en la variable $y$, entonces el problema de valor inicial # # <a id='Ec6_6'></a> # \begin{equation*} # \frac{dy}{dt}=f(t,y), \quad a\le t \le b, \quad y(a)=\alpha # \label{eq:Ec6_6} \tag{6.6} # \end{equation*} # # tiene solución única $y(t)$ para $a\le t \le b$. # [Volver a la Tabla de Contenido](#TOC) # #### Problemas bien planteados # Un problema de valor inicial, dado por la ecuación ([6.6](#Ec6_6)) se dice que está bien planteado si cumple las siguientes dos condiciones: # # # - Existe solución única # # # - La solución depende de la forma continua de los datos, es decir, de las condiciones iniciales. Con esto, pequeños cambios en los datos iniciales implica pequeños cambios en la solución. # # # Este tipo de condición se denomina [*problema perturbado*](https://en.wikipedia.org/wiki/Stability_theory), es decir, existen constantes $\varepsilon_0>0$ y $k>0$, tales que para cualquier $\varepsilon$ en $(0,\varepsilon_0)$, siempre que $\delta(t)$ es continua con $|\delta(t)|<\varepsilon \quad \forall t \in [a,b]$, y cuando $|\delta_0|<\varepsilon$, el problema de valor inicial # # <a id='Ec6_7'></a> # \begin{equation*} # \frac{dz}{dt}=f(t,z)+\delta(t), \quad a\le t \le b, \quad z(a)=\alpha+\delta_0 # \label{eq:Ec6_7} \tag{6.7} # \end{equation*} # # tiene solución única $z(t)$ que satisface # # \begin{equation*} # |z(t)-y(t)|<k\varepsilon \quad \forall t \in [a,b] # \end{equation*} # # Adicional a lo anterior, si $f$ satisface las condiciones del [teorema de existencia y unicidad](#TEU) y la ecuación ([6.6](#Ec6_6)), se dice entonces que el problema de valor inicial dado está bien planteado. # [Volver a la Tabla de Contenido](#TOC) # #### EDOs estables e inestables # Concluyendo, en términos generales, si los miembros de la familia de soluciones para una *EDO* se alejan unos de otros con el tiempo, se dice que la ecuación es inestable; pero si los miembros de la familia de soluciones se acercan entre sí con el tiempo, se dice que la ecuación es estable. Si las curvas solución no convergen ni divergen (es decir, permanecen cerca pero en realidad no se unen), se dice que la ecuación es neutralmente estable. Esta definición de estabilidad para las *EDO* refleja la sensibilidad de una solución de la *EDO* a las perturbaciones. A una pequeña perturbación en una solución de una ecuación estable se amortiguará con el tiempo porque las curvas de la solución están convergiendo, mientras que para una ecuación inestable la perturbación aumentará con el tiempo porque las curvas de la solución son divergentes. # # Retomando el [ejemplo 1](#Ej6_1), se consideró la EDO escalar $y'=y$ y se graficó la familia de curvas que son solución, $y=ce^t$. A partir del crecimiento exponencial de las soluciones, sabemos que las curvas de la solución para esta ecuación se alejan entre sí a medida que aumenta el tiempo. Por tanto, podemos concluir que la ecuación es inestable. Más rigurosamente, observamos que el [*jacobiano*](https://en.wikipedia.org/wiki/Jacobian_matrix_and_determinant) de $f$ (es decir, $\partial f / \partial y$) es positivo (de hecho, es la constante 1), por lo que la ecuación es inestable. # # <p float="center"> # <img src="https://github.com/carlosalvarezh/Analisis_Numerico/blob/master/images/C06_Img01_EDO1_1.PNG?raw=true" width="250" /> # </p> # # <div style="text-align: right"> Fuente: <a href="https://heath.cs.illinois.edu/scicomp/"><NAME>. Scientific Computing. An introductory survey</a> </div> # # Consideremos ahora una ecuación escalar diferente, $y'=-y$. La familia de soluciones para esta ecuación viene dada por $y(t)=ce^{-t}$, donde $c$ es cualquier constante real. Para esta ecuación vemos que el *jacobiano* de $f$ es negativo ($\partial f / \partial y = -1$), por lo que la ecuación es estable. También podemos ver esto en el decaimiento exponencial de las soluciones, como se muestra en la siguiente imágen. # # <p float="center"> # <img src="https://github.com/carlosalvarezh/Analisis_Numerico/blob/master/images/C06_Img02_EDO2.PNG?raw=true" width="350" /> # </p> # # <div style="text-align: right"> Fuente: <a href="https://heath.cs.illinois.edu/scicomp/"><NAME>. Scientific Computing. An introductory survey</a> </div> # [Volver a la Tabla de Contenido](#TOC) # ## Métodos de Runge - Kutta # En el primer capítulo del curso, se utilizó un esquema numérico para resolver la ecuación del paracaidista de la forma: # # $$\text{Nuevo valor = valor anterior + pendiente} \times \text{tamaño de paso}$$ # # en términos matemáticos: # # <a id='Ec6_8'></a> # \begin{equation*} # y_{i+1} = y_i + \phi h # \label{eq:Ec6_8} \tag{6.8} # \end{equation*} # # La pendiente estimada $\phi$ se usa para extrapolar desde un valor anterior $y_i$ a un nuevo valor $y_{i+1}$ en una distancia $h$. Esta fórmula se puede aplicar paso a paso para calcular el valor en el futuro y, por tanto, trazar la trayectoria de la solución. # # A esta familia de métodos se les denomina [Métodos de Runge-Kutta](https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods) # [Volver a la Tabla de Contenido](#TOC) # <a id='Euler'></a> # ### Método de Euler # #### Introducción # Empezando por el método más simple como introducción a los esquemas más complejos, el [método de Euler](https://en.wikipedia.org/wiki/Euler_method). En este método, la primera derivada proporciona una estimación directa de la pendiente en $t_i$: # # <a id='Ec6_9'></a> # \begin{equation*} # \phi=f(t_i,y_i) = y'_i # \label{eq:Ec6_9} \tag{6.9} # \end{equation*} # # donde $f(t_i,y_i)$ es la *EDO* evaluada en $x_i$ y $t_i$. Esta estimación se puede sustituir en la ecuación [(6.8)](#Ec6_8) # # <a id='Ec6_10'></a> # \begin{equation*} # y_{i+1}=y_i+f(t_i,y_i)h # \label{eq:Ec6_10} \tag{6.10} # \end{equation*} # # En el método de Euler, se predice un nuevo valor de $y$ por medio de la pendiente, igual a la primera derivada en el valor original de $t$, que habrá de extrapolarse en forma lineal sobre el tamaño de paso $h$. # # <p float="center"> # <img src="https://github.com/carlosalvarezh/Analisis_Numerico/blob/master/images/C06_Img04_Euler01.PNG?raw=true" width="500" /> # </p> # [Volver a la Tabla de Contenido](#TOC) # #### Análisis de error para el método de Euler # La solución numérica de las *EDO's* involucra dos tipos de errores: # # - ***Truncamiento:*** Causado por la naturaleza de las técnicas empleadas para aproximar los valores de $y$ y a su vez se dividen en dos tipos, *Local*, que es la aplicación del método en cuestión sobre un paso sencillo, y *propagado*, que consiste en las aproximaciones producidas durante los pasos previos. La suma de los dos es el error de truncamiento global. # # # - ***Redondeo:*** Se puede obtener cierto conocimiento acerca de la magnitud y propiedades del error de truncamiento al derivar el método de Euler directamente de la expansión en series de Taylor. Es fácil determinar que el error absoluto en el método de Euler está dado por: # # <a id='Ec6_11'></a> # \begin{equation*} # E_a=\frac{f'(t_i,y_i)}{2}h^2 # \label{eq:Ec6_11} \tag{6.11} # \end{equation*} # [Volver a la Tabla de Contenido](#TOC) # #### Ejemplo de aplicación del método de Euler # Retomando el [ejemplo 1](#Ej6_1), para un tamaño de paso $h=0.1$, se avanzará en la solución desde el tiempo $t_0=0$ hasta el tiempo $t_n=t_0+n\times h$, de la siguiente manera, reemplazando en la ecuación ([6.10](#Ec6_10)) # # \begin{equation*} # y_1=y_0+f(t_0,y_0)h \\ # y_2=y_1+f(t_1,y_1)h \\ # y_3=y_2+f(t_2,y_2)h \\ # \vdots \\ # y_n=y_{n-1}+f(t_{n-1},y_{n-1})h # \end{equation*} # # reemplazando valores y tabulando los cálculos, # # |$$t_i$$ | Euler | Error | # |:------:|:-------------------------------------------------:|:------:| # |$$0.0$$ |$$y_0=1.0$$ | - | # |$$0.1$$ |$$y_1=1.000000+(1.000000) \times 0.1=1.100000$$ |$$0.004679$$| # |$$0.2$$ |$$y_2=1.100000+(1.100000) \times 0.1=1.210000$$ |$$0.009336$$| # |$$0.3$$ |$$y_3=1.210000+(1.210000) \times 0.1=1.331000$$ |$$0.013971$$| # |$$0.4$$ |$$y_4=1.331000+(1.331000) \times 0.1=1.464100$$ |$$0.018584$$| # |$$0.5$$ |$$y_5=1.464100+(1.464100) \times 0.1=1.610510$$ |$$0.023176$$| # |$$0.6$$ |$$y_6=1.610510+(1.610510) \times 0.1=1.771561$$ |$$0.027747$$| # |$$0.7$$ |$$y_7=1.771561+(1.771561) \times 0.1=1.948717$$ |$$0.032296$$| # |$$0.8$$ |$$y_8=2.143589+(2.143589) \times 0.1=2.143589$$ |$$0.036823$$| # |$$0.9$$ |$$y_9=2.143589+(2.143589) \times 0.1=2.357948$$ |$$0.041330$$| # |$$1.0$$ |$$y_{10}=2.357948+(2.357948) \times 0.1=2.593742$$ |$$0.045815$$| # # aprovechando que conocemos la respuesta exacta, el error será calculado como error relativo. A seguir, se realizará una implementación computacional para el método de Euler. import numpy as np import matplotlib.pyplot as plt def plots(t,y, method): plt.plot(t,y, 'ro', label=method) x = np.linspace(t[0],t[-1],100) plt.plot(x,fex(x),'-', label='Exacta') plt.grid(True) plt.legend() plt.show() def euler(y0, t0, tn, h): n = int((tn - t0) / h) y = [y0] t = [t0] print('| t | y | Exacta | Error(%)|') print('|{0:6.6f} |{1:6.6f} |{2:6.6f} | - | '.format(t[0],y[0],y[0])) for i in range(n): yi = y[i] + ode(t[i],y[i]) * h y.append(yi) ti = t[i] + h t.append(ti) fexi = fex(ti) erri = abs((fexi - yi)/fexi) print('|{0:6.6f} |{1:6.6f} |{2:6.6f} |{3:6.6f} | '.format(ti,yi,fexi,erri)) return t,y def ode(t,y): return y def fex(x): return np.exp(x) #Datos iniciales t0 = 0.0 y0 = 1.0 tn = 1 h = 0.1 t, y = euler(y0, t0, tn, h) plots(t, y, 'Euler') # [Volver a la Tabla de Contenido](#TOC) # ### Método de Heun # En el [método de Euler](#Euler), la pendiente al inicio del intervalo (ecuación [6.9](#Ec6_9)) se usa para extrapolar linealmente a $y_{i+1}$ (ecuación [6.10](#Ec6_10)). El método se detiene aquí. Un método para mejorar la estimación de la pendiente en el [método de Euler](#Euler) involucra la determinación y promediado de dos derivadas para el intervalo. En el [método de Heun](https://en.wikipedia.org/wiki/Heun%27s_method), la pendiente calculada en la ecuación [6.10](#Ec6_10) no es la respuesta final, sino una predicción intermedia. Esta ecuación es llamada *predictor*. Mejora una estimación de $y_{i+1}$ que permite el cálculo de una estimación de la pendiente al final del intervalo: # # <a id='Ec6_12'></a> # \begin{equation*} # y'_{i+1}=f(t_{i+1},y_{i+1}^0) # \label{eq:Ec6_12} \tag{6.12} # \end{equation*} # # Las dos pendientes, representadas en las ecuaciones [6.9](#Ec6_9) y [6.12](#Ec6_12), se promedian en el intervalo # # <a id='Ec6_13'></a> # \begin{equation*} # \bar{y}'=\frac{y_i'+y_{i+1}'}{2}=\frac{f(t_{i},y_{i})+f(t_{i+1},y_{i+1}^0)}{2} # \label{eq:Ec6_13} \tag{6.13} # \end{equation*} # # Esta pendiente promedio se utiliza para extrapolar linealmente desde $y_i$ hasta $y_{i+1}$ usando el método de *Euler*: # # <a id='Ec6_14'></a> # \begin{equation*} # y_{i+1}=y_i+\frac{f(t_{i},y_{i})+f(t_{i+1},y_{i+1}^0)}{2}h # \label{eq:Ec6_14} \tag{6.14} # \end{equation*} # # esta última ecuación es llamada *corrector*. el método de *Heun* es un procedimmiento [*predictor - corrector*](https://en.wikipedia.org/wiki/Predictor%E2%80%93corrector_method). Gráficamente, el método de *Heun* indica: # # <p float="center"> # <img src="https://github.com/carlosalvarezh/Analisis_Numerico/blob/master/images/C06_Img05_Heun.PNG?raw=true" width="750" /> # </p> # # [Volver a la Tabla de Contenido](#TOC) # + # Implementación computacional del método de Heun # - # [Volver a la Tabla de Contenido](#TOC) # ### Generalización de los métodos de *Runge - Kutta* # Existen muchas variaciones de los métodos de [Runge-Kutta](https://en.wikipedia.org/wiki/List_of_Runge%E2%80%93Kutta_methods), pero todas se pueden denotar en la forma generalizada de la ecuación: # # # <a id='Ec6_15'></a> # \begin{equation*} # y_{i+1} = y_i + f(t_i,y_i,h)h # \label{eq:Ec6_15} \tag{6.15} # \end{equation*} # # donde $f(t_i,y_i,h)$ es conocida como función incremento, la cual puede interpretarse como una pendiente representativa sobre el intervalo. # # <a id='Ec6_16'></a> # \begin{equation*} # f = a_1k_1+ a_2k_2 +\ldots+ a_nk_n # \label{eq:Ec6_16} \tag{6.16} # \end{equation*} # # donde las $a_i$ son constantes y las $k_i$ son expresiones del tipo: # # <a id='Ec6_17'></a> # \begin{equation*} # \begin{split} # k_1 &= f(t_i,y_i) \\ # k_2 &= f(t_i+p_1h,y_i+q_{11}k_1h) \\ # k_3 &= f(t_i+p_2h,y_i+q_{21}k_1h+q_{22}k_2h) \\ # \vdots # \end{split} # \label{eq:Ec6_17} \tag{6.17} # \end{equation*} # # Observe que las $k$ son relaciones de recurrencia, esto es, $k_1$ aparece en la ecuación para $k_2$, la cual aparece en la ecuación para $k_3$, etc. Como cada $k$ es una evaluación funcional, esta recurrencia hace que los métodos *RK* sean eficientes para la programación. Existen varios tipos de métodos *RK* al emplear diferentes números de términos en la función incremento como la especificada por $n$. # # - $n = 1$, es el *método de Euler*. Una vez se elige $n$, se evalúan las $a$, $p$ y $q$ al igualar la ecuación [6.16](#Ec6_16) a los términos en la expansión de la *serie de Taylor*. # # [Volver a la Tabla de Contenido](#TOC) # #### Métodos de *Runge - Kutta* de segundo orden # La versión de segundo orden para la ecuación [(6.15)](#Ec6_15) es: # # <a id='Ec6_18'></a> # \begin{equation*} # \begin{split} # y_{i+1} = y_i + (a_1k_1+a_2k_2)h # \end{split} # \label{eq:Ec6_18} \tag{6.18} # \end{equation*} # # donde # # <a id='Ec6_19'></a> # \begin{equation*} # \begin{split} # k_1 &= f(t_i, y_i)\\ # k_2 &= f(t_i+p_1h, y_i+q_{11}k_1h) # \end{split} # \label{eq:Ec6_19} \tag{6.19} # \end{equation*} # # Los valores de $a_1$, $a_2$, $p_1$ y $q_{11}$ son evaluados al igualar el término de segundo orden de la ecuación [6.18](#Ec6_18) con la expansión de la serie de Taylor. Desarrollando tres ecuaciones para evaluar las cuatro incógnitas: # # <a id='Ec6_20'></a> # \begin{equation*} # \begin{split} # a_1+a_2 &=1 \\ # a_2p_2 &= 1/2 \\ # a_2q_{11} &= 1/2 # \end{split} # \label{eq:Ec6_20} \tag{6.20} # \end{equation*} # # # Como se tienen tres ecuaciones con cuatro incógnitas se tiene que suponer el valor de una de ellas. Suponiendo que se especificó un valor para $a_2$, se puede resolver de manera simultánea la ecuación [6.20](#Ec6_20): # # <a id='Ec6_21'></a> # \begin{equation*} # \begin{split} # a_1 &= 1 – a_2 \\ # p_1 &= q_{11} = \frac{1}{2a_2} # \end{split} # \label{eq:Ec6_21} \tag{6.21} # \end{equation*} # # Como se puede elegir un número infinito de valores para $a_2$, hay un número infinito de métodos *RK* de segundo orden. Cada versión podría dar exactamente los mismos resultados si la solución de la *EDO* fuera cuadrática, lineal o una constante. # # si # # # - $a_2 = 1/2$: Método de Heun con un solo corrector, donde. # # <a id='Ec6_22'></a> # \begin{equation*} # \begin{split} # y_{i+1} &= y_i + (k_1/2+k_2/2)h \\ # k_1 &= f(t_i, y_i) \\ # k_2 &= f(t_i+h, y_i+k_1h) # \end{split} # \label{eq:Ec6_22} \tag{6.22} # \end{equation*} # # # # - $a_2 = 1$: Método del punto medio. # # <a id='Ec6_23'></a> # \begin{equation*} # \begin{split} # y_{i+1} &= y_i + k_2h \\ # k_1 &= f(t_i, y_i) \\ # k_2 &= f(t_i+h/2, y_i+k_1h/2) # \end{split} # \label{eq:Ec6_23} \tag{6.23} # \end{equation*} # # # - $a_2 = 2/3$: Método de Ralston. # # <a id='Ec6_24'></a> # \begin{equation*} # \begin{split} # y_{i+1} &= y_i + (k_1/3+2k_2/3)h \\ # k_1 &= f(t_i, y_i) \\ # k_2 &= f(t_i+3h/4, y_i+3k_1h/4) # \end{split} # \label{eq:Ec6_24} \tag{6.24} # \end{equation*} # # [Volver a la Tabla de Contenido](#TOC) # #### Método de *Runge - Kutta* de tercer orden # Siguiendo el mismo razonamiento para $n = 3$, el resultado es seis ecuaciones con ocho incógnitas, por lo tanto se deben suponer dos valores con antelación para poder desarrollar el sistema de ecuaciones. Una versión ampliamente usada es: # # <a id='Ec6_25'></a> # \begin{equation*} # \begin{split} # y_{i+1} &= y_i + 1/6 (k_1 + 4k_2 + k_3)h \\ # k_1 &= f(t_i, y_i) \\ # k_2 &= f(t_i+h/2, y_i+k_1h/2) \\ # k_3 &= f(t_i+h, y_i – k_1h + 2k_2h) # \end{split} # \label{eq:Ec6_25} \tag{6.25} # \end{equation*} # # Si la derivada es solo una función de $x$, el método se reduce a la *regla de Simpson1/3* vista en el capítulo anterior. # # Una implementación computacional podría ser: import numpy as np import matplotlib.pyplot as plt def rk3(y0, t0, tn, h): n = int((tn - t0) / h) y = [y0] t = [t0] print('| t | y | Exacta | Error(%)|') print('|{0:6.6f} |{1:6.6f} |{2:6.6f} | - | '.format(t[0],y[0],y[0])) for i in range(n): k1 = ode(t[i],y[i]) k2 = ode(t[i] + h / 2, y[i] + k1 * h / 2) k3 = ode(t[i] + h, y[i] - k1 * h + 2 * k2 * h) yi = y[i] + h * (k1 + 4 * k2 + k3) / 6 y.append(yi) ti = t[i] + h t.append(ti) fexi = fex(ti) erri = abs((fexi - yi)/fexi) print('|{0:6.6f} |{1:6.6f} |{2:6.6f} |{3:6.6f} | '.format(ti,yi,fexi,erri)) return t,y t, y = rk3(y0, t0, tn, h) plots(t,y, 'RK3') # [Volver a la Tabla de Contenido](#TOC) # #### Método de *Runge - Kutta* de cuarto orden # Es el más popular de los métodos *RK*. También cuenta con infinitas versiones. La más usada es: # # <a id='Ec6_26'></a> # \begin{equation*} # \begin{split} # y_{i+1} &= y_i + 1/6 (k_1 + 2k_2 + 2k_3 + k_4)h \\ # k_1 &= f(t_i, y_i) \\ # k_2 &= f(t_i+h/2, y_i+k_1h/2) \\ # k_3 &= f(t_i+h/2, y_i + k_2h/2) \\ # k_4 &= f(t_i+h, y_i + k_3h) # \end{split} # \label{eq:Ec6_26} \tag{6.26} # \end{equation*} # # Una posible implementación computacional sería: def rk4(y0, t0, tn, h): n = int((tn - t0) / h) y = [y0] t = [t0] print('| t | y | Exacta | Error(%)|') print('|{0:6.6f} |{1:6.6f} |{2:6.6f} | - | '.format(t[0],y[0],y[0])) for i in range(n): k1 = ode(t[i],y[i]) k2 = ode(t[i] + h / 2, y[i] + k1 * h / 2) k3 = ode(t[i] + h / 2, y[i] + k2 * h / 2) k4 = ode(t[i] + h, y[i] + k3 * h) yi = y[i] + h * (k1 + 2 * k2 + 2 * k3 + k4) / 6 y.append(yi) ti = t[i] + h t.append(ti) fexi = fex(ti) erri = abs((fexi - yi)/fexi) print('|{0:6.6f} |{1:6.6f} |{2:6.6f} |{3:6.6f} | '.format(ti,yi,fexi,erri)) return t,y t, y = rk4(y0, t0, tn, h) plots(t,y, 'RK4') # [Volver a la Tabla de Contenido](#TOC) from IPython.core.display import HTML def css_styling(): styles = open('./nb_style.css', 'r').read() return HTML(styles) css_styling()
Cap06_EDO.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import pandas as pd # # https://stackoverflow.com/questions/50394873/import-pandas-datareader-gives-importerror-cannot-import-name-is-list-like # pd.core.common.is_list_like = pd.api.types.is_list_like # import pandas_datareader as pdr # from datetime import datetime as dt import quandl as qdl import matplotlib.pyplot as plt # %matplotlib inline # - data = pd.read_csv( 'historical.csv', sep=',', parse_dates=True, index_col='Date', usecols=['Close', 'Date', 'Symbol'] ) tcs_mask = data.Symbol == 'TCS' infy_mask = data.Symbol == 'INFY' # explicit copy to avoid SettingWithCopyWarning due to hidden chained index assignment later on df = data.loc[tcs_mask].copy() df.head() # alternatively if we don't want to make a copy of the dataframe, and want to operate on the original # data.loc['% Change'] = pd.Series(None) data.loc[tcs_mask].head() # data.loc[infy_mask].head() df['% Change'] = pd.Series(None, index=df.index) df.head() # percentage change df['% Change'] = df['Close'].pct_change() df = df.dropna() df.head() # simple moving average df['SMA 365'] = df['Close'].rolling(365).mean() df.dropna() df.head() df.tail() tcs = qdl.get('NSE/TCS', start_date="2016-10-01", end_date="2018-09-15")
movavg/smatest.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] deletable=true editable=true # ## Ejemplo de uso de pyparsing # # Vamos a utilizar [pyparsing](http://pyparsing.wikispaces.com/home) para # procesar un fichero csv. Para hacer las cosas # más interesantes, supondremos que el formato # de los ficheros es muy laxo; por ejemplo, podemos # encontrarnos con lo siguiente: # # - Los campos de texto pueden venir entrecomillados o no # # - Los campos de tipo fecha pueden venir en dos formatos: YYYY-MM-DD o DD/MM/YYYY # # - Los campos de tipo booleano pueden venir con los valores ``1``, ``S``, ``Y`` o ``True`` para el valor lógico verdadero, y ``0``, ``N``, ``F`` y ``False`` para el valor lógico falso # # - Los números decimales pueden venir con una coma o con un punto como separador decimal. # # - La primera línea contiene los nombres de los campos, el resto los datos; en los dos casos se separa cada valor por el caracter ``;`` # # + [markdown] deletable=true editable=true # ### Ejemplo de datos a procesar # # Algo como esto: # # Comentario;Activo;Fecha;Importe # Texto sin comillas porque yo lo valgo;Y;2017-08-08;4292.00 # "Ahora si que pongo comillas";F;25/8/2014;3200.00 # Ya ves, todo vale;True;1/1/1970;4532,02 # # # + [markdown] deletable=true editable=true # ### Una gramatica para estos ficheros # # La gramática podría ser algo así: # # root -> header + lines # header -> \.+ # ignorar la linea # lines -> line+ # line -> text + sep + date + sep + bool + sep + cost # text -> '"' + literal + '"' | literal # date -> year + '-' + month + '-' + day | day + '/' + month + '/' + year # cost -> \d+[\.|,]\d{2} # bool -> '1' | 'S' | 'Y' | 'T' | 'True' | '0' | 'N' | 'F' | 'False' # year -> \d{4} # month -> 1|2|3|4|5|6|7|8|9|10|11|12 # day -> \d{1,2} # # + [markdown] deletable=true editable=true # ### Implementación con pyparsing # + [markdown] deletable=true editable=true # Las gramáticas pueden ser un poco intimidantes la primera vez que las ves. Lo bueno de pyparsing es que nos permite testear y modificar las distintas partes de la gramática como piezas sueltas. Así podemos crear el parser poco a poco, ensamblando las distinas piezas, con la confianza de que estas funcionan. # + [markdown] deletable=true editable=true # ### Parseando fechas # + [markdown] deletable=true editable=true # Por ejemplo, para las fechas, que pueden venir en dos formatos, tenemos el siguiente fragmento de la gramática (pasado a la sintaxis de pyparsing, y por tanto cambiando el orden, la regla inicial sería la última): # # + deletable=true editable=true from pyparsing import Literal, Regex, oneOf, StringEnd, Group, ParseException dash = Literal('-') slash = Literal('/') year = Regex('\d{4}') month = Regex('\d{1,2}') day = Regex('\d{1,2}') date = year + dash + month + dash + day ^ day + slash + month + slash + year # pyparsong sobrecarga el operador ^ para indicar alternancia # + [markdown] deletable=true editable=true # La clase ``Literal`` sirve para indicar una expresión o token literal, que queremos detectar durante la fase de # parseo. Así, definimos ``dash`` y ``slash`` para detectar los literales ``-`` y ``/``. La clase ``Regex`` nos permite definir tokens usando expresiones regulares. Estos objetos, así como el resto de los que veremos, derivan de la clase ``ParserElement`` de pyparsing, que sobrecarga varios operadores para poder expresar las reglas de la gramática. Así, la regla: # # date -> year + '-' + month + '-' + day | day + '/' + month + '/' + year # # Se puede expresar en Python con los operadores ``+`` y ``^``: # # date = year + dash + month + dash + day ^ day + slash + month + slash + year # # Aparte de cambios como el uso del operador ``^``, o la definición de los literales ``dash`` y ``slash``, podemos ver que la gramática se mapea de forma casi directa a expresiones Python. # # Veamos que tal funciona este mini-parser: # + deletable=true editable=true try: date.parseString('hola') except ParseException: pass # Ok, no es una fecha print(date.parseString('25/8/2016')) print(date.parseString('2017-12-08')) # + [markdown] deletable=true editable=true # pyparsing define su propia clase de excepciones para errores de Parseo, ``parseException``. Si nuestro parser es muy complicado puede ser interesante usar esta misma excepción para indicar nuestros propios errores. Por ejemplo, ahora mismo aceptamos para el día cualquier combinación de dos dígitos, e igualmente para el mes: # + deletable=true editable=true print(date.parseString('99/88/2016')) # opps, esto no debería valer, pero vale # + [markdown] deletable=true editable=true # Más adelante veremos que podemos tratar estos casos y elevar errores explicativos que provoquen el fallo del parser. # + [markdown] deletable=true editable=true # Por ahora, poco más de lo que podríamos hacer simplemente con expresiones regulares. # # Podemos realizar una pequeña mejora. Observemos un detalle de los separadores usados en el formato de fechas, definidos como ``dash`` y ``slash``; en realidad, sus valores no nos interesan. Podemos calcular el valor de la fecha sin necesidad de saber que caracteres se usaron como separador. Estos elementos son necesarios para el parser, pero no tienen más utilidad. # # Existe una clase en pyparsing llamada ``Suppress`` que funciona exactamente igual que ``Literal``, pero que retira el token, de forma que nos evitamos procesarlo. Cambiemos la gramática para redefinir ``dash`` y ``slash`` usando ``Suppress`` en vez de ``Literal``: # + deletable=true editable=true from pyparsing import Suppress dash = Suppress('-') slash = Suppress('/') year = Regex('\d{4}') month = Regex('\d{1,2}') day = Regex('\d{1,2}') date = year + dash + month + dash + day ^ day + slash + month + slash + year # + deletable=true editable=true print(date.parseString('23/9/2016')) # bien, el separador desaparece # + [markdown] deletable=true editable=true # No está mal, pero la mejora realmente interesante sería que nos devolviera algo más elaborado, un objeto de tipo fecha, objetos de tipo ``datetime.datetime``, por ejemplo. Vamos a ello. Para eso, necesitamos usar las **reglas de parseo**. # # ### Reglas de parseo # # Podemos asociar acciones a las reglas de parseo (en este daso, ``date``) para que se ejecuten cada vez qe se active la regla. # # Vamos a asociar una función que no haga nada, solo imprimir un valor para ver que, efectivamente, se ejecuta cuando la regla de parseo se activa. Usaremos el método ``setParseAction``: # + deletable=true editable=true def very_simple_action(): print('OK, se ha ejecutado la acción') date.setParseAction(very_simple_action) print(date.parseString('25/8/2016')) print(date.parseString('2017-12-08')) # + [markdown] deletable=true editable=true # De la documentación de pyparsing, podemos obtener más información sobre como definir y usar estas acciones: # # Podemos definir uno o varias acciones a realizar cuando se produce una coincidencia que activa la regla del parser. Estas acciones pueden ser cualquier objeto de tipo *callable* de python; es decir, funciones, métodos u objetos instanciados de clases que definan el método mágico ``__call__``. # # Las acciones pueden aceptar desde cero hasta tres argumentos, es decir que, dependiendo de como definamos la acción ``fn``, esta será llamada como ``fn()``, ``fn(toks)``, ``fn(loc, toks)`` o ``fn(s, loc, toks)``. El significado de estos parámetros es el siguiente: # # - ``s``: es la string original que activó el patrón de la regla # # - ``loc``: es la localización, dentro del texto, de la substring ``s`` (Útil para generar mensajes de error) # # - ``toks``: Una lista de los tokens encontrados, empaquetados en forma de objeto de tipo ``ParseResults`` # # Si la función quiere modificar los tokens, debe devolver un nuevo valor como resultado de la función, con lo que la lista de tokens devueltos reemplazaría a la original. Si no queremos realizar ningún cambio, la función no debe # retornar ningún valor. # # Definamos una acción, solo para ver que estos parámetros se pasan efectivamente: # + deletable=true editable=true def I_just_wanna_see(s, loc, tokens): print('s:', s) print('loc:', loc) print('tokens:', tokens) print() date.setParseAction(I_just_wanna_see) print(date.parseString('25/8/2016')) # + [markdown] deletable=true editable=true # **Nota**: Podemos asignar varias acciones usando el método ``addParseAction``. en ese caso, las acciones se ejecutan de forma anidada, siendo la primera en ejecutarse la primera en añadirse. Cada acción recibe como entrada el resultado de la anterior y pasa su resultado a la siguiente. Veamos un ejemplo: # + deletable=true editable=true token = Literal('hola') token.addParseAction(lambda tokens: 'ei' + tokens[0] + 'ai') token.addParseAction(lambda tokens: tokens[0].upper()) print(token.parseString('hola')) # + [markdown] deletable=true editable=true # Si cambiamos el orden en que se añaden las acciones, el resultado puede diferir, lógicamente: # + deletable=true editable=true token = Literal('hola') token.addParseAction(lambda tokens: tokens[0].upper()) token.addParseAction(lambda tokens: 'ei' + tokens[0] + 'ai') print(token.parseString('hola')) # + [markdown] deletable=true editable=true # Con esto ya podemos definir una acción que nos devuelva un objeto ``date``. Usaremos una acción con un solo parámetro, ``tokens``, la lista de los tokens detectados, ya que no necesitamos los otros parámetros. # # Como devolvemos un valor, el parser sustituirá la lista de tokens detectados por ese nuevo valor. # + deletable=true editable=true import datetime def get_as_date(tokens): first_element = tokens[0] if len(first_element) == 4: # Formato YYYY-MM-DD d = datetime.date( int(tokens[0]), # Year int(tokens[1]), # Month int(tokens[2]), # Day ) else: d = datetime.date( int(tokens[2]), # Year int(tokens[1]), # Month int(tokens[0]), # Day ) return d date.setParseAction(get_as_date) print(date.parseString('25/8/2016')) print(date.parseString('2017-12-08')) # + [markdown] deletable=true editable=true # ### Vamos a parsear valores lógicos # + [markdown] deletable=true editable=true # Podemos hacer algo similar con los objetos booleanos. Usaremos una de las funciones auxiliares de # pyparsing, ``oneOf``, que nos permite definir de forma rápida un conjunto de literales # alternativos. Además se asegura de que siempre intentará capturar el literal más grande, en caso de # que haya conflicto entre alguno de ellos; por ejemplo, entre ``<`` y ``<=`` primero intentará # encontrar una correspondencia con el más largo, ``<=``, y si no la encuentra lo intentará con ``<``. # # + deletable=true editable=true from pyparsing import oneOf boolean = oneOf('1 S Y T True 0 N F False') print(boolean.parseString('True')) # + [markdown] deletable=true editable=true # Con un poco de mágia en forma de acción asociada a la regla obtendremos valores booleanos de Python. LA API # de pyparsing es fluida, por lo que podemos definir la regla y asociar la acción en una sola línea: # + deletable=true editable=true def get_as_bool(tokens): return tokens[0] in ('1', 'S', 'Y', 'T', 'True') boolean = oneOf('1 S Y T True 0 N F False').setParseAction(get_as_bool) # simple tests assert boolean.parseString('1').pop() is True assert boolean.parseString('S').pop() is True assert boolean.parseString('Y').pop() is True assert boolean.parseString('T').pop() is True assert boolean.parseString('True').pop() is True assert boolean.parseString('0').pop() is False assert boolean.parseString('N').pop() is False assert boolean.parseString('F').pop() is False assert boolean.parseString('False').pop() is False # + [markdown] deletable=true editable=true # ### Parsear los importes # # Nos queda el problema de los importes, que pueden usar como separador decimal la coma, al estilo # español, o el punto, al estilo internacional, y las cadenas de textos, que pueden venir # limitadas por comillas o no. Los dos casos son fáciles de tratar: # + deletable=true editable=true from pyparsing import nums, Word cost = Word(nums) + oneOf('. ,') + Regex('\d\d') cost.parseString('3819.24') # + [markdown] deletable=true editable=true # La clase ``Word`` nos permite definir una palabra, pasandole uno o dos parámetros, que son vocabularios. Los vocabularios se pueden indicar con una string de símbolos, como ``'aeiou'``. ``nums`` es solo una constante definida en pyparsing que vale ``'0123456789'``. Podiamos haber usado una expresión regular, pero ``Word`` es bastante interesante. # # Si a ``Word`` se le pasa un solo vocabulario, define una palabra como una secuencia de n caracteres tomados de los símbolos definidos en el vocabulario. # # Si se le pasan dos vocabularios, define una palabra como una secuencia donde el primer # caracter debe pertenecer al primer vocabulario y el resto, si los hubiera, al segundo. Por ejemplo, podemos definir un parser para los nombres válidos de Python, que permiten el uso de carateres alfanuméricos y el carácter subrayado, pero no no se permiten que empiece por un dígito: # + deletable=true editable=true from pyparsing import alphas, nums, alphanums assert alphas == 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz' assert alphanums == alphas + nums var_name = Word(alphas + '_', alphanums + '_') var_name.parseString('a') var_name.parseString('a1') var_name.parseString('alp_ha') try: var_name.parseString('1uno') except ParseException: # Oops, no se permite el caracter '_' pass # + [markdown] deletable=true editable=true # Definamos otra patrón, incluyendo el símbolo dolar como caracter válido al principio, PERL-style: # + deletable=true editable=true var_name_plus = Word(alphas + '$', alphanums + '_') assert var_name_plus.parseString('a').pop() == 'a' assert var_name_plus.parseString('$_alpha').pop() == '$_alpha' assert var_name_plus.parseString('$Alpha_plus').pop() == '$Alpha_plus' # + [markdown] deletable=true editable=true # Hecha esta disgresión, volvamos al problema de tratar los importes. Añadamos # una acción para obtener un número de tipo Decimal: # + deletable=true editable=true from decimal import Decimal def get_as_decimal(tokens): int_part, dec_part = tokens return Decimal('{}.{}'.format(int_part, dec_part)) cost = Word(nums) + Suppress(oneOf('. ,')) + Regex('\d\d') cost.setParseAction(get_as_decimal) cost.parseString('484432,23') num_esp = cost.parseString('484432,23').pop() num_int = cost.parseString('484432.23').pop() assert num_esp == num_int == Decimal('484432.23') print(cost.parseString('3.14')) # + [markdown] deletable=true editable=true # ### Cadenas de texto con o sin delimitador # # Para poder procesar las cadenas de texto, ignorando si procede las comillas delimitadoras opcionales, podemos hacer: # + deletable=true editable=true quote = Suppress('"') content = Regex("[^\";]+") # Cualquier secuencia de caracteres, excepto ; y " text = quote + content + quote ^ content assert text.parseString('Texto sin comillas').pop() == 'Texto sin comillas' assert text.parseString('"Texto con comillas"').pop() == 'Texto con comillas' # + [markdown] deletable=true editable=true # Bueno, ha sido un viaje un poco largo, con paradas en algunos puntos interesantes, pero ya podemos escribir la gramática completa, junto al parser y las acciones aplicadas: # + deletable=true editable=true from pyparsing import OneOrMore # Funciones de conversion def get_as_decimal(s, lok, tokens): int_part, _sep, dec_part = tokens return Decimal('{}.{}'.format(int_part, dec_part)) def get_as_date(s, loc, tokens): a, b, c = tokens if len(a) == 4: # Formato YYYY-MM-DD return datetime.date(int(a), int(b), int(c)) else: # Formato DD/MM/YYYY return datetime.date(int(c), int(b), int(a)) def get_as_bool(s, loc, tokens): return tokens[0] in ('1', 'S', 'Y', 'T', 'True') sep = Suppress(';') quote = Suppress('"') # Texto content = Regex("[^\";]+") # Cualquier secuencia de caracteres, excepto ; y " text = quote + content + quote ^ content text.setParseAction(lambda tokens: tokens[0].strip()) boolean = oneOf('1 S Y T True 0 N F False') # Valores lógicos boolean.setParseAction(get_as_bool) dash = Suppress('-') # Fechas slash = Suppress('/') year = Regex('\d{4}') month = Regex('\d{1,2}') day = Regex('\d{1,2}') date = year + dash + month + dash + day ^ day + slash + month + slash + year date.setParseAction(get_as_date) cost = Word(nums) + oneOf('. ,') + Regex('\d\d') # Importes cost.setParseAction(get_as_decimal) line = Group(text + sep + boolean + sep + date + sep + cost) # One Line lines = OneOrMore(line) # Lines lines.setParseAction(lambda tokens: list(tokens)) header = Suppress(Regex('.+')) # Header parser = header + lines + StringEnd() # First rule # + [markdown] deletable=true editable=true # - La clase ``OneOrMore`` nos permite implementar las reglas de *una secuencia de uno o más elementos repetidos*, como su mismo nombre indica. Pyparsing define muchos más clases de este tipo, como ``ZeroOrMore``, ``Optional``, (Uno o cero), ``OnlyOne``... # # - La clase ``Group`` nos permite agrupar varios tokens en un solo resultado, normalmente porque vamos a tratarlos todos juntos. # # - La clase ``StringEnd`` nos permite indicar que el parser, al consumir este token, debería de haber terminado, es decir, que todo el texto a parserar debe consumirse íntegramente. # # Vamos a hacer unas pruebas parseando líneas individuales: # + deletable=true editable=true print(line.parseString('Texto sin comillas porque yo lo valgo;Y;2017-08-08;4292.00')) # + deletable=true editable=true print(line.parseString('"Ahora si que pongo comillas";F;25/8/2014;3200.00')) # + deletable=true editable=true print(line.parseString('Ya ves, todo vale;True;1/1/1970;4532,02')) # + deletable=true editable=true active="" # print(header.parseString('Comentario;Activo;Fecha;Importe')) # Ignoramos la cabecera # - # Y la prueba de fuego, un fichero completo: # + deletable=true editable=true source = '''Comentario;Activo;Fecha;Importe Texto sin comillas porque yo lo valgo;Y;2017-08-08;4292.00 "Ahora si que pongo comillas";F;25/8/2014;3200.00 Ya ves, todo vale;True;1/1/1970;4532,02 ''' g = parser.parseString(source) for item in g: print(item) # + [markdown] deletable=true editable=true # Unsado el método ``parseFile`` podemos procesar un fichero, si especificamos el nombre, un fichero abierto, o cualquier objeto que implemente una interfaz similar a ``File``: # + deletable=true editable=true with open('ejemplo.csv', 'r') as stream: g = parser.parseFile(stream) for item in g: print(item) # + [markdown] deletable=true editable=true # ## Ventajas de Pyparsing # + [markdown] deletable=true editable=true # - **Robusto y sencillo de usar**. Pyparsing lleva más de una década de desarrollo, y se basa en el uso de gramáticas para la definición formal de lenguajes. El paso de la gramática a código Python es casi directo. # # # - **Desarrollo incremental, facilmente testeable**. El parser final se puede ir construyendo paso a paso. # # - No se ve en los ejemplo, pero podemos añadir **validaciones y mensajes de error explicativos** que simplifican la resolución de problemas --incluyendo, por ejemplo, número de línea y posición del error. Podemos asignar nombres a los resultados de los tokens para que seán más sencillos de referencias, y muchas otras funcionalidades que no hemos podido ver aquí. # # - **Flexible**, comparado con un parser hecho a mano o en base a un montón de espresiones regulares. A modo de ejemplo, véanse los **ejercicios para el lector**. # # + [markdown] deletable=true editable=true # ### Ejercicios para el lector # # - Añadir otro formato válido para las fechas, por ejemplo, ``10/abr/2017`` (10 puntos) # # - Permitir que la última columna, el importe, acepte también un valor entero, es decir, sin parte decimal (20 puntos) # # - Permitir que los textos puedan venir sin comillas, con comillas simples o con comillas dobles (20 puntos) # # - El jefe ha modificado el formato, el fichero tiene ahora una última ĺinea donde va el total acumulado de todos los importes previos, algo así: # # Comentario;Activo;Fecha;Importe # Texto sin comillas porque yo lo valgo;Y;2017-08-08;4292.00 # "Ahora si que pongo comillas";F;25/8/2014;3200.00 # Ya ves, todo vale;True;1/1/1970;4532,02 # 12024.02 # # El parser debe adaptarse a este cambio, y comprobar que la suma de los importes # coincide con el dato final. Si, ya sé que, técnicamente, esto ha dejado de ser # un CSV. Estas cosas pasan. (100 puntos y una gran satisfacción personal) # + [markdown] deletable=true editable=true # ### Mas información # # - La página web de pyparsing: <http://pyparsing.wikispaces.com/> # # - Parsing In Python: Tools And Libraries <https://tomassetti.me/parsing-in-python/> # -
Ejemplo-uso-pyparsing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Now You Code 2: House Depot Paint Estimator # # The big-box hardware store *House Depot* as contracted you to write an App to estimate the amount of paint required to paint a room. Given that 1 gallon of paint covers 400 square feet, you must output the number of paint cans required to paint a rectangular room. # # The program should input: # # - the length of the room in feet, # - the width of the room in feet, # - the height of the room in feet # # The program should output: # # - total area to be painted ( sum of each of the 4 walls) # - the number of gallons of paint required to paint the room. # # Example: # # ``` # Enter length of room: 12 # Enter width of room: 14 # Enter height of room: 8 # Total area to be painted: 416.00 # Total gallons of paint requried: 2 # ``` # # HINT: Use the `math.ceil()` function to round your number of gallons up to the nearest whole number # # ## Step 1: Problem Analysis # # Inputs: Length of the room in feet, width of the room in feet, the height of the room in feet # # Outputs: Total area to be painted, the number of gallons of paint required to paint the room # # Algorithm (Steps in Program): # # # # + import math # you need this to use math.ceil(), so we wrote this part of the code for you!!! LEAVE IT HERE! # Step 2: write code here Length = float(input("Input length: ")) Width = float(input("Input width: ")) Height =float(input("Input height: ")) Area = (Width * Height * 2) + (Length * Height * 2) Total_surface_Area = math.ceil(Area/400) print("Total Area to be painted: %.2f" % (Area)) print ("Total surface Area: %d " %(Total_surface_Area)) # - # ## Step 3: Questions # # 1. Why does the program still run when you enter a negative number for length? # The program still runs when you enter a negative number for length because it is a float and floats can be negative numbers. # 2. Does the output make sense when you enter a negative length? Why type of error is this? # There would be an input error because it is human error to enter a negative length. The output does not make sense because you cannot have a negative length even though the program accepts it. # 3. Why do we use `math.ceil()` in this program? In other words are you allowed to buy 3.75 gallons of paint? # We use 'math.ceil()' because it will always round up the answer when there is a decimal for the answer. This is important with paint because if you rounded down, you wouldn't have enough paint to finish painting the room. # ## Reminder of Evaluation Criteria # # 1. What the problem attempted (analysis, code, and answered questions) ? # 2. What the problem analysis thought out? (does the program match the plan?) # 3. Does the code execute without syntax error? # 4. Does the code solve the intended problem? # 5. Is the code well written? (easy to understand, modular, and self-documenting, handles errors) #
content/lessons/03/Now-You-Code/NYC2-House-Depot-Paint-Estimator.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #Pour mieux illustrer l’utilité des boucles, nous allons chercher à calculer le PGCD de deux #nombres. Pour rappel, le PGCD de deux nombres est le plus grand entier divisant simultanément #ces deux entiers. Par exemple, le PGCD de 15 et 12 est 3 car 15 = 5 × 3 et 12 = 4 × 3. Pour le #calculer, nous allons utiliser l’algorithme d’Euclide. Celui-ci repose sur le principe suivant : #— Le PGCD de 0 et d’un entier a est l’entier a #— Le PGCD de deux entiers a et b est égal au PGCD de b et de a mod b, c’est à dire #PGCD(a, b) = PGCD(b, a mod b) #mod représente l’opération modulo de Python, c’est à dire %. Cette méthode fonctionne car a #mod b est strictement plus petit que a et reste positif. Ainsi, en répétant la deuxième étape un #certain nombre de fois, nous allons pouvoir nous ramener à la première étape et obtenir notre #PGCD. #Facile ? Vous pouvez-essayer mais vous allez avoir un petit problème : combien de fois faut-il #exécuter la deuxième étape ? Eh bien, autant de fois qu’il le faut. Le nombre d’étapes dépend #de a et b, et puis si je vous avais dit 50 étapes, vous auriez fait 50 copier-coller de la deuxième #étape ? Nous allons donc utiliser une boucle pour effectuer le travail. #Une boucle while, qui signifie « tant que » en anglais, permet d’exécuter un bloc d’instructions #tant qu’un certain prédicat est vérifié. Voyons comment utiliser cette fameuse boucle en Python. from Math import.math.* () def calcule(a,b): return pgcd(a,b) a=int(input("Donnez moi la première valeur pour calculer leur pgcd :")) b=int(input("Donnez moi la deuxieme valeur pour calculer le pgcd :")) if b<a and b>0: # - #Pour mieux illustrer l’utilité des boucles, nous allons chercher à calculer le PGCD de deux #nombres. Pour rappel, le PGCD de deux nombres est le plus grand entier divisant simultanément #ces deux entiers. Par exemple, le PGCD de 15 et 12 est 3 car 15 = 5 × 3 et 12 = 4 × 3. Pour le #calculer, nous allons utiliser l’algorithme d’Euclide. Celui-ci repose sur le principe suivant : #— Le PGCD de 0 et d’un entier a est l’entier a #— Le PGCD de deux entiers a et b est égal au PGCD de b et de a mod b, c’est à dire #PGCD(a, b) = PGCD(b, a mod b) #mod représente l’opération modulo de Python, c’est à dire %. Cette méthode fonctionne car a #mod b est strictement plus petit que a et reste positif. Ainsi, en répétant la deuxième étape un #certain nombre de fois, nous allons pouvoir nous ramener à la première étape et obtenir notre #PGCD. #Facile ? Vous pouvez-essayer mais vous allez avoir un petit problème : combien de fois faut-il #exécuter la deuxième étape ? Eh bien, autant de fois qu’il le faut. Le nombre d’étapes dépend #de a et b, et puis si je vous avais dit 50 étapes, vous auriez fait 50 copier-coller de la deuxième #étape ? Nous allons donc utiliser une boucle pour effectuer le travail. #Une boucle while, qui signifie « tant que » en anglais, permet d’exécuter un bloc d’instructions #tant qu’un certain prédicat est vérifié. Voyons comment utiliser cette fameuse boucle en Python. def calcule(a,b): return pgcd(a,b) a=int(input("Donnez moi la première valeur pour calculer leur pgcd :")) b=int(input("Donnez moi la deuxieme valeur pour calculer le pgcd :")) if b<a and b>0: d= a%b i=0 pgcd(0,a)=a while i<50: if(b%d=0): pgcd(a,b) =pgcd(b,d)=d print(pgcd(a,b)) elif b%d!=d: c=b%d print(c) else: print("Le deuxieme nombre doit ètre plus pétite que la premier")
nanp/nan/exercice/icon.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import pandas as pd import matplotlib.pyplot as plt import glob import os # %matplotlib inline # - kpi = '<KEY>' # + df = pd.read_csv('donut/'+kpi+'_predictions.csv',index_col='timestamp') df = df.drop(['Unnamed: 0'], axis=1) # Training df (contains labels) df_train = pd.read_csv('../train/KPI/train_'+kpi+'.csv',index_col='timestamp') prob = np.load('../PICKLE/donut/models/'+kpi+'/test_matrix_initial_size.npy') df['prob'] = prob # - df_mod = df.sort_values(by=['prob'],ascending=False).tail(50) df['value'].head(20*1440).plot(kind='line',figsize=(14,8)) plt.plot(df['predictions'].head(40*1440)*0.1,alpha=.5) df_train.plot(kind='line',figsize=(14,8)) df_train.groupby(df_train.label).count() df.groupby(df.predictions).count() df.groupby(df.predictions).count().value.values[1] / df.groupby(df.predictions).count().value.values[0] df
THU--Advanced_Network_Management/GP/DATASETS/gen_donut_pred.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Breakeven Analysis: 3D Printing vs. Injection Molding ## # # Detemine the breakeven point when comparing the production of the plastic enclosure for the SomniCloud # - __Given__: Enclosure volume is $2.57 in^3$ part_vol = 2.57 # in^3 # ### 3D Printing Specs ### # # - \$4.25 / cubic inch of ABS # - Tooling Cost: \$0.00 # - Machine Time Cost: \$12.00 / hour # - Print Time: 4 hours # Set 3D ABS variable constants usd_in3_3d = 4.25 # $/in^3 setup_3d = 0.00 # $ Setup Cost mc_3d = 12.00 # $/hr time_3d = 4.0 # hrs # ### Injection Mold Data ### # # - \$4.50 / lb ABS # - Mold Cost: \$6500.00 # - Machine Time Cost: \$80.00 / hour # - Cycle Time: 30 seconds # Set Injection Mold ABS variable constants usd_lb_im = 6.50 # $/lb setup_im = 50000.00 # $ Setup Cost mc_im = 120.00 # $/hr time_im = 60 / 3600 # hrs # ### Step 1: Convert \$ / lb to \$ / cubic inch ### # - Density of ABS: $\rho_{ABS} = 1.07 g/cm^{3}$ # # # \begin{equation} # \left(\frac{\$4.50}{lb}\right) \ # \left(\frac{1.07g}{cm^{3}}\right) \ # \left(\frac{2.2lb}{1000g}\right) \ # \left(\frac{2.54cm}{1in}\right)^{3} \ # = X \frac{\$}{in_{3}} # \end{equation} # # Create variables for given constants rho_abs = 1.07 # g/cm^3 # Calculate $ per in^3 of injection molded ABS usd_in3_im = (usd_lb_im) * (rho_abs) * (2.2 / 1000) * ((2.54 / 1)**3) # $/in^3 print(f"Injection Molded ABS [$/in^3]: {usd_in3_im}") # ### Step 2: Calculate Cost Functions ### # # - $Total\ Cost = (Cost\ per\ Unit) * (Number\ of\ Units) + (Setup\ Cost)$ # # # - $Cost\ per\ Unit = (ABS\ Cost\ per\ Unit) + (Machine\ Time\ Cost\ per\ Unit)$ # # # - $ABS\ Cost\ per\ Unit = (ABS\ Cost\ per\ in^{3}) * (Part Volume)$ # # # - $Cost\ per\ Unit = (Machine\ Time\ Cost\ per\ Hour) + (Machine\ Time)$ # # %matplotlib inline import numpy as np import matplotlib.pyplot as plt import math N = np.linspace(0,1000,10000) # ### Cost Function for 3D Printing ### # + # Cost per unit for 3D printing abs_cpu_3d = usd_in3_3d * part_vol # ABS Cost per Unit mc_cpu_3d = mc_3d * time_3d # Machine Cost per Unit t_cpu_3d = abs_cpu_3d + mc_cpu_3d # Total per unit cost # Cost function for 3D printing C_3d = (t_cpu_3d)*N + setup_3d print(f"3D Printing Cost per Unit: ${t_cpu_3d:.2f}") print(f"3D Printing Setup Cost: ${setup_3d:.2f}") # - # ### Cost Function for Injection Molding Printing ### # + # Cost per unit for 3D printing abs_cpu_im = usd_in3_im * part_vol # ABS Cost per Unit mc_cpu_im = mc_im * time_im # Machine Cost per Unit t_cpu_im = abs_cpu_im + mc_cpu_im # Total per unit cost # Cost function for 3D printing C_im = (t_cpu_im)*N + setup_im print(f"3D Printing Cost per Unit: ${t_cpu_im:.2f}") print(f"3D Printing Setup Cost: ${setup_im:.2f}") # - # ### Step 3: Plot Results ### # + fig, ax = plt.subplots(figsize=(12,8)) ax.plot(N,C_3d, label=r"3D Printing") ax.plot(N,C_im, label=r"Injection Molding") idx = np.argwhere(np.diff(np.sign(C_3d - C_im)) != 0).reshape(-1) + 0 ax.plot(N[idx], C_3d[idx], 'o', markersize=8, color='black') ax.legend(loc=2) # upper left corner ax.set_xlabel(r'# of Units', fontsize=18) ax.set_ylabel(r'Total Cost', fontsize=18) ax.set_title('Breakeven Analysis') ax.set_xlim([min(N), max(N)]) ax.set_ylim([0, max(C_3d)]) plt.show() print(f"Breakeven point = {math.ceil(N[idx])} units") # -
Breakeven Analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 0 - Information # # 1 - Packages # ## 1.1 - Import of classical packages # + # Math packages import numpy as np # Progress bar from tqdm import tqdm # - # ## 1.2 - Import of personal packages # + # Import different tools functions from Modules.Utils.Dropout import * from Modules.Utils.ImportData import * from Modules.Utils.Normalisation import * from Modules.Utils.Preprocessing import * from Modules.Utils.Predictions import * from Modules.Utils.Transform import * # Import functions for the data augmentation from Modules.DataAugmentations.NoAugmentation import * from Modules.DataAugmentations.ComplementarySequences import * from Modules.DataAugmentations.PertubatedSequences import * # Import functions for the embedding from Modules.Embeddings.NoEmbedding import * from Modules.Embeddings.SpectrumEmbedding import * from Modules.Embeddings.DimismatchEmbedding import * from Modules.Embeddings.MotifEmbedding import * from Modules.Embeddings.WeightedDegreeEmbedding import * from Modules.Embeddings.HotEncodingEmbedding import * from Modules.Embeddings.FiguresEmbedding import * from Modules.Embeddings.TraidEmbedding import * # from Modules.Embeddings.HMMEmbedding import * # Import functions for the selection of the model from Modules.ModelSelection.CrossValidation import * from Modules.ModelSelection.GridSearch import * # Import functions for the kernels from Modules.Kernels.LinearKernel import * from Modules.Kernels.PolyKernel import * from Modules.Kernels.DimismatchPolyKernel import * from Modules.Kernels.CKN import * from Modules.Kernels.GaussianKernel import * from Modules.Kernels.SpectrumKernel import * from Modules.Kernels.HMM import * # Import function of model from Modules.Models.KernelLogisticRegression import * from Modules.Models.KernelSVM import * # - # # 2 - Data Import # + # Extraction of the dataset df_mat_dict = ImportData("./Data/Optionnal/", "./Data/", suffix="_mat100") df_dict = ImportData("./Data/", "./Data/", header=0, sep=",") # Display one of the datasets extracted (Xtr0_mat100) display(df_dict[0][1].head()) # - # # 3 - Determining Best Model a = np.arange(9, dtype=np.uint8).reshape((3, 3)) b = np.random.randint(0, 100, size=(3, 3), dtype=np.uint8) result = np.zeros((3, 3)) np.dot(a, b, result) # + import numpy as np from numba import njit def PolyKernel(X, Y, k=2, add_ones=False): """Compute the K matrix in the case of the linear kernel.""" # Shape of X n, _ = np.shape(X) d, _ = np.shape(Y) # Convert X and Y X = np.array(X, dtype=np.float) Y = np.array(Y, dtype=np.float) @njit def subPolyKernel(X, Y): """Apply the dot product to X and Y.""" return np.dot(X, Y.T) # Count the dot product result = subPolyKernel(X, Y) # Test if add ones if add_ones: # Compute results result = (np.array(result) + np.ones((n, d))) ** k else: # Compute results result = np.array(result) ** k return result # + # Hyperparameters for DataAugmentation hyperparameters_data_augmentation = { NoAugmentation: {}, # PertubatedSequences: {"n": [2], "add_compl": [True, False]} } # Hyperparameters for the embedding hyperparameters_embedding = { # NoEmbedding: {} SpectrumEmbedding: {"d_l": [[5, 7, 12]]} # FiguresEmbedding: {}, # DimismatchEmbedding: {"d": [[5, 6, 7]]} } # Hyperparameters of the kernels hyperparameters_kernels = { # SpectrumKernel: {"d_l": [[5, 7, 12]]} PolyKernel: { "k": [2], "add_ones": [True] } # DimismatchPolyKernel: { # "m" : [3], # "k": [2], # "add_ones": [True], # "d_l": [[5, 6, 7]] # } # GaussianKernelBIS: {"sigma": [None, 10000, 10]} } # Hyper-parameters of the models hyperparamters_models = { KernelLogisticRegression: {"lamda": [10e-12], "informations": [False], "preprocessing": [None], "normalisation": [None], "max_iter": [15], }, # KernelSVM: # {"lamda": [1, 0.01], # "max_iter": [10e4], # "tol": [10e-6], # "informations": [False], # "preprocessing": [Preprocessing, None]} } # GridSearch [best_score, best_parameters_names, best_parameters_values] = GridSearch(df_dict, hyperparameters_data_augmentation, hyperparameters_embedding, hyperparamters_models, hyperparameters_kernels, cv=5) # Display result print("Best Score: ", best_score) print("Best Parameters: ", best_parameters_names) # - # # 4 - Computation of the Predictions for the Best Parameters # ## 4.1 - Computation of the best parameters # + # Definition of the data augmentation function data_aug = DataAugmentationDefault(NoAugmentation, {}) # Defintion of the embedding embedding = EmbeddingDefault(SpectrumEmbedding , {"d_l": [5, 7, 12]}) # # Definition of the kernel kernel = KernelDefault(PolyKernel, {"k": 2}) # Definition of the model model = KernelLogisticRegression(kernel, informations=True, lamda=10, max_iter=15, preprocessing=None) # Defintion of best parameters values best_parameters_values = {"Data Augmentation": {"Function": data_aug}, "Embedding": {"Function": embedding}, "Kernel": {"Function": kernel}, "Model": {"Function": model}} # + # Computation of the predicition predictions = Prediction(best_parameters_values, df_dict) # Display predicitons predictions # - np.mean(predictions, axis=0) # ## 4.2 - Save predicitons into a csv np.savetxt("./Resultats/Predictions_Test_Spectrum.csv", predictions, fmt='%i', delimiter=",", header="Id,Bound", comments='') # # Testing # ## 1 - Study the result # + df = pd.read_csv("./Resultats/grid_search_res.csv", sep='\t') # Display df df.sort_values("score", inplace=True, ascending=False) df # + # from sklearn.feature_extraction.text import TfidfTransformer # counts = spectrumEmbedding(df_dict[0][0], len_sq=4) # new_X_train = transformer.fit_transform(counts).toarray() # # transformer = TfidfTransformer(smooth_idf=False) # True # for i in [5, 10]: # counts = spectrumEmbedding(df_dict[0][0], len_sq=i) # new_X_train = transformer.fit_transform(counts).toarray() # model = SVC(gamma="scale", C=100) # # Execute a cross validation on the model # gdm = GridSearchCV(model, hp, scoring="accuracy", cv=5) # # Fit the model and find the best parameters and score # gdm.fit(new_X_train, y_train) # print("Iteration {}, score {}".format(i, gdm.best_score_)) # - # # 888 - CKN
Tests.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" import glob import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from operator import itemgetter from PIL import Image import os import json import cv2 # - # # DATA ANALYSIS # + #get annotations direc=os.path.join("/kaggle/input/face-mask-detection-dataset/Medical mask/Medical mask/Medical Mask/annotations/") anno=[] for filn in os.listdir(direc): with open(direc+filn) as f: data=json.load(f) anno.append({'filename':data['FileName'],'num_Anno':data['NumOfAnno'],'Annotations':data['Annotations']}) # - #sorting the list anno=sorted(anno, key=itemgetter('filename')) #read train.csv train_csv=pd.read_csv(os.path.join("/kaggle/input/face-mask-detection-dataset/train.csv")) train_csv.head() #sorting the dataset train_csv.sort_values(by='name',inplace=True) train_csv.head() #length of annotaion from train_csv print(train_csv.name.unique().shape) #length of annotaion from train_csv print(train_csv.classname.unique().shape) #image files images=os.listdir(os.path.join("/kaggle/input/face-mask-detection-dataset/Medical mask/Medical mask/Medical Mask/images")) images.sort() print(images[:10]) print(images[1698:1709]) print(len(images)) #splitting images into train test train=images[1698:] test=images[:1698] # cd '/kaggle/working' #getting all test objects f= open("test.txt","w+") for ele in test: f.write('/content/darknet/data/img/'+ele+'\n') f.close() #getting height and width direc = os.path.abspath('/kaggle/input/face-mask-detection-dataset/Medical mask/Medical mask/Medical Mask/images/') dimen=[] for filn in os.listdir(direc): img = Image.open(os.path.join(direc,filn)) width=img.size[0] height=img.size[1] dimen.append([filn,width,height]) #merging dimen_df=pd.DataFrame(dimen,columns=['name','width','height']) dimen_df.head() os.makedirs('/kaggle/working/img') #getting all train objects f= open("train.txt","w+") for ele in train: f.write('/content/darknet/data/img/'+ele+'\n') f.close() #creating obj.names file f= open("obj.names","w+") for ele in labels: f.write(ele+'\n') f.close() df=train_csv.merge(dimen_df, on='name') df.head() #converting x1 x2 y1 y2 -> x_center, y_center, w, h def convert_df(df): x1 = np.array(df['x1'].values) x2 = np.array(df['x2'].values) y1 = np.array(df['y1'].values) y2 = np.array(df['y2'].values) w = np.array(df['width'].values) h=np.array(df['height'].values) dw = 1./w dh = 1./h x = (y1 + x1)/2.0 y = (y2 + x2)/2.0 w = y1-x1 h = y2-x2 x = x*dw w = w*dw y = y*dh h = h*dh data = {'x':x, 'y':y, 'w':w, 'h':h} df1 = pd.DataFrame(data=data) return pd.concat([df, df1], axis=1) df_final=convert_df(df) df_final.head() #get labels from sklearn.preprocessing import LabelEncoder lb_make=LabelEncoder() df_final["labels"] = lb_make.fit_transform(df_final["classname"]) #getting list of labels with index as their categorical value labels=list(lb_make.inverse_transform([_ for _ in range(20)])) df_final.head() #converting labels to dict label={i:labels[i] for i in range(len(labels))} label #shape df print(df_final.shape) #unique values print(len(df_final['name'].unique())) #show df_final['name'].unique() # cd '/kaggle/working' #get .txt file for every image for row in df_final.values: filename = str(row[0][:4])+'.txt' if os.path.exists(filename): append_write = 'a' # append if already exists else: append_write = 'w' # make a new file if not f = open(filename,append_write) f.write('{0} {1} {2} {3} {4}\n'.format(row[-1],row[-5],row[-4],row[-3],row[-2])) f.close() name=[] for row in df_final.values: name.append(row[0]) name_unique=list(set(name)) matchin_name=[] for item in name_unique: matchin_name.append(item[:4]) matchin_name_unique=list(set(matchin_name)) len(matchin_name_unique)==len(matchin_name) #get duplicate import collections print([item for item, count in collections.Counter(matchin_name).items() if count > 1]) # ### I created a obj.names file obj.data file train.txt file and .txt file for every image # ##### Now i will use these files to train on darknet using yolov3 in colab which is in different file
Mask Detector/model/maskDetection_getdata.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.8 64-bit (''base'': conda)' # language: python # name: python3 # --- # # MUSHROOMS # ## Binary Classification # ### Imports # + import os import pandas as pd import numpy as np import tensorflow as tf from tensorflow import keras import matplotlib.pyplot as plt # - # ### Load Data # + DATA_PATH = '../DATA/' FILE_NAME = 'mushrooms.csv' def load_data(data_path=DATA_PATH, file_name=FILE_NAME): csv_path = os.path.join(data_path, file_name) return pd.read_csv(csv_path) dataset = load_data() # - # ### View Data and Informations dataset.head() dataset.info() # + edible, poisonous = dataset['class'].value_counts() print("Edible:\t ", edible,"\nPoisonous:", poisonous) # + # Categorical to numerical labels = {'e': 0, 'p': 1} dataset['class'].replace(labels, inplace=True) edible, poisonous = dataset['class'].value_counts() print("0 - Edible: ", edible,"\n1 - Poisonous:", poisonous) # - # ### Split Dataset # #### Get the Labels # + X, y = dataset.drop('class', axis=1), dataset['class'].copy() print("X:",X.shape,"\ny:",y.shape) # - # #### Train Set and Test Set from sklearn.model_selection import train_test_split X_white = pd.DataFrame() X_not_white = pd.DataFrame() y_white = pd.Series(dtype='float64') y_not_white = pd.Series(dtype='float64') for i in range(0,len(X)): if X.loc[i,"stalk-root"] == "r": X_white = X_white.append(X.iloc[i,:]) y_white = y_white.append(pd.Series(y.iloc[i])) else: X_not_white = X_not_white.append(X.iloc[i,:]) y_not_white = y_not_white.append(pd.Series(y.iloc[i])) # + X_train_not_white, X_test_not_white, y_train_not_white, y_test_not_white = train_test_split(X_not_white, y_not_white, test_size=1-(6905/(8124-len(X_white))), random_state=37) # print(X_test_white) X_train_white = (X_train_not_white) X_test_white = X_white.append(X_test_not_white) y_train_white = (y_train_not_white) y_test_white = y_white.append(y_test_not_white) # - from sklearn.utils import shuffle X_train_full = shuffle(X_train_white, random_state=37) X_test = shuffle(X_test_white, random_state=37) y_train_full = shuffle(y_train_white, random_state=37) y_test = shuffle(y_test_white, random_state=37) # + # print(X_test[:5]) # print(y_test.loc[:,"0"]) # from sklearn.model_selection import train_test_split # X_train_full, X_test, y_train_full, y_test = train_test_split(X, y, test_size=0.15, random_state=37) # print("85% - X_train size:", X_train_full.shape[0], " y_train size:", y_train_full.shape[0]) # print("15% - X_test size: ", X_test.shape[0], " y_test size: ", y_test.shape[0]) # - # #### Validation Set # + X_valid, X_train = X_train_full[:500], X_train_full[500:] y_valid, y_train = y_train_full[:500], y_train_full[500:] print("X_train:", X_train.shape[0], "y_train", y_train.shape[0]) print("X_valid: ", X_valid.shape[0], "y_valid ", y_valid.shape[0]) print("X_test: ", X_test.shape[0]) # - # ### Prepare the Data # #### Data Transformation # + from sklearn.pipeline import Pipeline from sklearn.preprocessing import OrdinalEncoder from sklearn.compose import ColumnTransformer cat_attr_pipeline = Pipeline([ ('encoder', OrdinalEncoder()) ]) cols = list(X) pipeline = ColumnTransformer([ ('cat_attr_pipeline', cat_attr_pipeline, cols) ]) X_train = pipeline.fit_transform(X_train) X_valid = pipeline.fit_transform(X_valid) X_test = pipeline.fit_transform(X_test) # - # ### Neural Network # #### Model from tensorflow.keras.models import Sequential from tensorflow.keras.layers import InputLayer, Dense tf.random.set_seed(37) model = Sequential([ InputLayer(input_shape=(22,)), # input layer Dense(45, activation='relu'), # hidden layer Dense(1, activation='sigmoid') # output layer ]) model.summary() # #### Compile the Model model.compile(loss='binary_crossentropy', optimizer='sgd', metrics=['accuracy']) # #### Prepare Callbacks # + from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping checkpoint_cb = ModelCheckpoint('../SavedModels/best_model.h5', save_best_only=True) early_stopping_cb = EarlyStopping(patience=3, restore_best_weights=True) # - # ### Training # + train_model = model.fit(X_train, y_train, epochs=100, validation_data=(X_valid, y_valid), callbacks=[checkpoint_cb, early_stopping_cb]) # - # #### Learning Curves pd.DataFrame(train_model.history).plot(figsize=(8,5)) plt.grid(True) plt.gca().set_ylim(0,1) plt.show() # ### Evaluate the Best Model on Test Set results = model.evaluate(X_test, y_test) print("test loss, test acc:", results) # #### Confusion Matrix # + import seaborn as sns #Parameters title = 'Confusion Matrix' custom_color = '#ffa600' #Function for drawing confusion matrix def draw_confusion_matrix(cm, title = title, color = custom_color): palette = sns.light_palette(color, as_cmap=True) ax = plt.subplot() sns.heatmap(cm, annot=True, ax=ax, fmt='d', cmap=palette) # Title ax.set_title('\n' + title + '\n', fontweight='bold', fontstyle='normal', ) # x y labels ax.set_xlabel('Predicted', fontweight='bold') ax.set_ylabel('Actual', fontweight='bold'); # Classes names x_names = ['Poisonous', 'Edible'] y_names = ['Poisonous', 'Edible'] ax.xaxis.set_ticklabels(x_names, ha = 'center') ax.yaxis.set_ticklabels(y_names, va = 'center') # + from sklearn.metrics import confusion_matrix y_test_pred = (model.predict(X_test) > 0.5).astype("int32") cm = confusion_matrix(y_test, y_test_pred) draw_confusion_matrix(cm) # - # #### ROC Curve #Function for plotting the ROC curve def plot_roc_curve(fpr, tpr, roc_auc): plt.plot(fpr, tpr, custom_color, label='Area: %0.3f' %roc_auc, linewidth=2) plt.plot([0, 1], [0, 1], 'k--') plt.title('ROC Curve') plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate - Recall') plt.legend(loc='lower right') plt.show() # + from sklearn.metrics import roc_curve, auc y_test_prob = model.predict(X_test) fpr, tpr, _ = roc_curve(y_test, y_test_prob) roc_auc = auc(fpr, tpr) plot_roc_curve(fpr, tpr, roc_auc) # - # ### Make Some Predictions X_new = X_test[:5] y_prob = model.predict(X_new) # print(y_prob.round(3)) y_pred = (model.predict(X_new) > 0.5).astype("int32") # print(y_pred) # ## KL Divergence # X_new = X_test[:5] X_df = pd.DataFrame(model.predict(X_test)) y_test_pred = pd.DataFrame(y_test_pred).reset_index(drop=True) X_df = pd.concat([X_df, y_test_pred], axis=1) y_test = y_test.reset_index(drop=True) X_df = pd.concat([X_df, y_test], axis=1) X_df.columns = ["X_pred","y_pred","y_actual"] print(X_df) # + import math table = pd.DataFrame(columns=["KL_div","abs distance","correctness"]) for i in range(0,len(X_df)): # KL divergence p = X_df.loc[i,"X_pred"] kl = -(p*math.log(p) + (1-p)*math.log(1-p)) table.loc[i,"KL_div"] = kl # absolute distance abs_dist = 2*abs(0.5-p) table.loc[i,"abs distance"] = abs_dist # correctness y_pred = X_df.loc[i,"y_pred"] y_act = X_df.loc[i,"y_actual"] if y_pred == y_act: table.loc[i,"correctness"] = 1 # correct prediction else: table.loc[i,"correctness"] = 0 # wrong prediction print(table) # - table["count"] = 1 correctness = table[["correctness","count"]].groupby(pd.cut(table["KL_div"], np.arange(0, 0.8, 0.1))).apply(sum) correctness["percent"] = 100*(correctness["correctness"]/correctness["count"]) print(correctness) index = [] for i in (correctness.index): index.append(str(i)) plt.bar(index,correctness["percent"], width=0.7) for index,data in enumerate(correctness["percent"]): plt.text(x=index , y =data+1 , s=f"{round(data,2)}" , fontdict=dict(fontsize=15),ha='center') plt.ylim(0,110) plt.xlabel("KL Divergence") plt.ylabel("% correct") # ### Confidence kl = table[["correctness","count"]].groupby(pd.cut(table["KL_div"], np.arange(0, 0.8, 0.05))).apply(sum) kl["percent"] = (kl["correctness"]/kl["count"]) kl.dropna(inplace=True) plt.scatter(np.arange(0, 0.70, 0.05), kl["percent"]) # print(kl) # print(np.arange(0, 0.7, 0.05)) # + # Linear Regression from sklearn.linear_model import LinearRegression x = np.arange(0, 0.70, 0.05).reshape((-1, 1)) y = kl["percent"] model = LinearRegression().fit(x,y) # - print('intercept(alpha):', model.intercept_) print('slope(theta):', model.coef_)
Notebook/stalkroot_rooted_0.5.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import numpy as np import matplotlib.pyplot as plt import argparse import os import time # # Input data for physics-informed generative model solving PDE equations # ## 1. Laplace equation # $$ \frac {\partial^2 T}{\partial x^2} + \frac {\partial^2 T}{\partial x^2} = 0 $$ # # This is an eclipitic equation and could have two types of boundary condition, Neumann BC and Dirichelet BC # # For Dirichlet BC, the value of the variable T is fixed, while on Neumann BC the value is related to relation between neighbour nodes. # # ### Dirichlet boundary condition: # # # + T = torch.zeros(1,1,32,32) top, bottom, left, right = 100, 10, 100, 50 T[:,:,0,:] = top T[:,:,-1,:] = bottom T[:,:,:,0] = left T[:,:,:,-1] = right T[:,:,0,0] = (top + left) / 2 T[:,:,0,-1] = (top + right) / 2 T[:,:,-1,0] = (bottom + left) / 2 T[:,:,-1,-1] = (bottom + right) / 2 plt.imshow(T.cpu().detach().numpy()[0,0,:,:], vmin = 0, vmax = 100, cmap = plt.cm.viridis) # - # ### Neumann boundary condition: # # Supose one of the boundary, say, left boundary, contains Neumann BC: # $$\frac{\partial T}{\partial x} = a$$ # On top the original setup for Dirichlet BC, we make use of the most externel nodes to be the indicator of the information from Neumann BC # # Normalize all the data into 0 to 1. And fixed the geometry of the domain to be L = 1 L = 1 dtype = torch.FloatTensor print(dtype) def get_solution(input_T, isNeum, dtype = torch.FloatTensor): "FDM method to solve laplace eqn" "a denotes the Neumann boundary condition at X = 0" maxIter = 1e8 padT = input_T[0,0,:,:].numpy() output_T = input_T.clone().numpy() # READ NEUMANN BC FROM INPUT_T if isNeum[0]: nbc_left = padT[2:-2,0] # print(nbc_left) if isNeum[1]: nbc_upper = padT[0,2:-2] if isNeum[2]: nbc_right = padT[2:-2,-1] if isNeum[3]: nbc_bottom = padT[-1,2:-2] # Acquire the real compute domain of T T = padT[1:-1,1:-1] h = L / np.size(padT[0,:]) T_new = np.copy(T) iteration = 0 while iteration < maxIter: T_new[1:-1, 1:-1] = ((T_new[0:-2, 1:-1] + T_new[2:, 1:-1]) + (T_new[1:-1,0:-2] + T_new[1:-1, 2:]))*0.25 if isNeum[0]: T_new[1:-1,0] = 1/3 * (4*T_new[1:-1,1] - T_new[1:-1, 2] - 2*h*nbc_left) err = (T_new - T).flat err = np.sqrt(np.dot(err,err)) if err <= 1e-12: output_T[0,0,1:-1,1:-1] = T_new return torch.from_numpy(output_T).type(dtype) T = np.copy(T_new) iteration += 1 output_T[0,0,1:-1,1:-1] = T_new return torch.from_numpy(output_T).type(dtype) # + size = 32 T = torch.zeros(1,1,size,size) isNeum = [True, False, False, False] nbc = [0.001,0,0,0] top, bottom, left, right = 0.9, 0.1, 0.1, 0.5 T[:,:,1,1:-1] = top T[:,:,-2,1:-1] = bottom T[:,:,1:-1,1] = left T[:,:,1:-1,-2] = right T[:,:,1,1] = (top + left) / 2 T[:,:,1,-2] = (top + right) / 2 T[:,:,-2,1] = (bottom + left) / 2 T[:,:,-2,-2] = (bottom + right) / 2 if isNeum[0]: T[:,:,1:-1,1] = torch.linspace(top,bottom,steps=size-2) T[:,:,2:-2,0] = nbc[0] fig, (ax1, ax2) = plt.subplots(1, 2, figsize = [12, 6]) ax1.imshow(T.detach().numpy()[0,0,:,:], vmin=0, vmax=1, cmap=plt.cm.inferno) ax1.set_title("Example of training data") ax1.set_xlabel("X") ax1.set_ylabel("Y") sol = get_solution(T,[True, False, False, False]) ax2.imshow(sol.cpu().detach().numpy()[0,0,:,:], vmin=0, vmax=1, cmap=plt.cm.inferno) ax2.set_title("Corresponding solution") ax2.set_xlabel("X") ax2.set_ylabel("Y") # - bd = T[:,:,1:-1,0:3].type(dtype) fig, (ax1, ax2) = plt.subplots(1,2) ax1.imshow(bd.cpu().detach().numpy()[0,0,:,:], vmin=0, vmax=1, cmap=plt.cm.inferno) bdsol = sol[:,:,1:-1,0:3].type(dtype) ax2.imshow(bdsol.cpu().detach().numpy()[0,0,:,:], vmin=0, vmax=1, cmap=plt.cm.inferno) fig.suptitle('Take a look at the left boundary', fontsize=16)
Visualization.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Preface # # In class I talked a bit about mapping out steps to executing projects. This would look like this: identify question and dataset(s) that may answer the question; import data; manipulate data; and then try and answer the question. The question part is hard, but this is more conceptual, not coding. The manipulation part is where coding skills are helpful. Specifically, cleaning, merging, shaping the data to that the data set is usable to answer the question at hand. # # ### Cleaning and String Methods on Dataframes # # This notebook works through some cleaning examples that will probably help you in your project. Here we describe features of Pandas that allow us to clean data that, for reasons beyond our control, comes in a form that's not immediately amendable to analysis. This is the first of several such notebooks. # # #### The Question (or want)... # # We need to know what we're trying to do---what we want the data to look like. To borrow a phrase from our friend <NAME>, we say that we apply the want operator. Some problems we've run across that ask to be solved: # # - We have too much data, would prefer to choose a subset. # - Row and column labels are contaminated. # - Numerical data is contaminated by commas (marking thousands); dollar signs; other non-numerical values, etc. # - Missing values are marked erratically. # # What we want in each case is the opposite of what we have: we want nicely formatted numbers, clean row and column labels, and so on. import pandas as pd # data package import matplotlib.pyplot as plt # graphics module import datetime as dt # date and time module import numpy as np # foundation for pandas # ### Example: Chipotle data # # This data comes from a New York Times story about the number of calories in a typical order at Chipotle. The topic doesn't particularly excite us, but the data raises a number of issues that come up repeatedly. We adapt some code written by <NAME>. # + url = "https://raw.githubusercontent.com/mwaugh0328/Data_Bootcamp_Fall_2017/master/data_bootcamp_1106/orders_dirty.csv" #path = "C://data_bootcamp//Data_Bootcamp_Fall_2017//data_bootcamp_1106//orders_dirty.csv" # Double forward slashes for windows machines. chp = pd.read_csv(url) print("Variable dtypes:\n", chp.dtypes, sep='') # Lets checkout the datatypes that we have... are they what you expect? chp.head() #chp.tail() #chp.shape # - chp.tail() # ### Issue #1: We have too much data, want to work with a subset. # # Ok, so this is not really an issue here. This is about 5000 rows, width is small too. Not huge. But lets imagine that it was huge and we don't want deal with continually manipulating a big data set. We already know how to do this...we just use the `nrows` command when we read in the dataset. # + chp = pd.read_csv(url, nrows = 500) print("Variable dtypes:\n", chp.dtypes, sep='') # Lets checkout the datatypes that we have... are they what you expect? chp.head() chp.tail() chp.shape # - # Now the shape indicates that we only have 500 rows. Just as we specified. This was easy. # # One strategy is to write and test your code on only a subset of the data. Again the upside is that the code may run faster, its easier too look at and analyze. Then once you have everything sorted out, you simply change the code above and scale it up. # # **Here is the issue to be mindful of: the subset may not be "representative" of the entire data set.** For example, there may be issues in say row 1458 (e.g. missing values, different data types), that will only arise when the full data set is imported. Moreover, your results (graphic, statistics, etc.) may not be the same one the entire data set is read in. This is just something to be mindful of when pursuing this approach. # # --- # ### Issue #2: Row and column labels are contaminated. # # Return to the head and the `dyypes` and look at the variable names... # + chp = pd.read_csv(url, nrows = 500) print("Variable dtypes:\n", chp.dtypes, sep='') # Lets checkout the datatypes that we have... are they what you expect? chp.head() #chp["order store id 1"].unique() # - # Here we see several issues that may slow us down, if fixed could help things. # # - Notice how the variable names are separated and then they have these numerical values in them (as if the person constructing the data wanted to help us by telling us the column number). We could simply slice the data set accordingly, or we could change the column names in a simpler way. Lets follow the later approach. # # - Second, notice that the "order store id 1" value gives us a order number (note how one order has several entries) and then store id. This is could be cumbersome for many reasons, lets explore this series using `unique()` and `value_counts()`. The code is below... unique_values = pd.DataFrame(chp["order store id 1"].unique()) # This will grabe the unique values and create a new dataframe out of it... unique_values.shape # Now here is an important observations...there are 500 rows, but only 209 unique store, so what this is saying is for each order, there are multiple entries. Now here is another way to see what is going on with this by checking the value counts associated with each uniqie value. chp["order store id 1"].value_counts().head() # Lets now see what is up with order 205... chp[chp["order store id 1"]== "205 Bucks County"] # What we learned is that this is for the same country (Bucks County). Thus is provides no information at all. Lets also change the entries in that column and remove it. # **First step: Fix the column names.** # + # One way to fix the names is just to rename them by hand like this... #new_name_list = ["order_id", "quantity", "item_name", "choice_desc", "item_price"] #chp.columns = new_name_list # + # Another way is to use string methods on the column names and create something more usable. # Here is a test run, what does this do? test = "order store id 1" test.rsplit(maxsplit=1)[0].replace(" ","_") # So this splits the string into a list. The max split doess... # Then the bracket says, take the first entry. # Then the next part says replace the space with an underscore, # this will help us call a column name more easily. # What if we did not have max split? # + # Now lets fix this all up for the data from new_name_list = [] for var in chp.columns: new_name_list.append(var.rsplit(maxsplit=1)[0].replace(" ","_")) # How would you do this in list comprehension format... # Then rename everything... chp.columns = new_name_list chp.head() # - # Great work! # # **Second step: Change the individual column entries.** # # So this fixed some issues with the columns, lets use the same idea to fix the issue with the order store id, so get the "Bucks County" out of there. # + # Again, lets test this out... # Step one, pull off the number... test = "1 Bucks County" test2 = test.rsplit()[0] # same idea, don't use the max split option.... print(test2) print(type(test2)) # I want this numerical, but its not... # Step two, convert to floating point... #test2 = float(test2) #print(type(test2)) # - # This gives a general idea to fixing the the order numbers. Here is the problem: We need to perform this operation on every single entry of a particular column. This is different than just editing the column names. To perform this operation, we need to use **Pandas string methods.** # # We can do the same thing to all the observations of a variable with so-called string methods. We append `.str` to a variable in a DataFrame and then apply the string method of our choice. If this is part of converting a number-like entry that has mistakenly been given `dtype` object, we then convert its `dtype` with the `astype` method. # # **Aside** Below we will see several examples of string methods on the dataframe. Below is a link to a resournce with a more comprehensive treatment of string methods in pandas: # # [Strings in Pandas](https://github.com/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/03.10-Working-With-Strings.ipynb) # + chp.head() chp.columns chp.order_store_id.head() # Just to verify we are doing what we think we are... chp.order_store_id = chp.order_store_id.str.rsplit().str[0].astype(int) # Note that we need two str's here: one to do the split, the other to extract the first element. # Then the last part of the code `astype` converts it to a string... # note nothing changes unless we reassign everything. # - chp.head(20) print("Variable dtypes:\n", chp.dtypes, sep='') # Great work. We now have a numerical value for each order number. Key lesson from this was using `.str` on the dataframe to used string methods on individual entries. # # --- # ### Issue #3: Numerical data is contaminated by commas (marking thousands); dollar signs; other non-numerical values, etc. # # We sorted out issue with labels on the rows and columns. We still have the following issue that the item price is not a numerical value. Check above, the type of `item_price` is an object, not a float. If we want to do some kind of numerical calculation on this, then we need to convert it. # # **Why is `item_price` not a numerical value?** ITs those damm dollar signs. Someone put them their thinking they were being helpful, but it is giving us a headache. **How do we fix it?** Dude, in a very similar way above. # # #### Exercise: Can you use the methods above to... # # - Remove the dollar sign # # - Check the type # # - Convert the type to a float. Note: if its not working, you are proabably doing it right. Can you figure out what the issue is? # # --- # # #### Replacing corrupted entries with missing values # # The issue that we faced in the exercise above is that while we did replace the dollar sign, we could not convert the column to a floating point number because there were some entries in the column that are not numbers (e.g. the gift card values). So Python/Pandas kicks back an error. How do we do this? The natural way to do this is to replace all these entries with a `NaN` value. # # Below is another method to replace whole entries and assign them an missing value. (This will set us up for the next issue. # + chp.item_price.replace(to_replace=["gift card"], value=[np.nan], inplace = True) # So lets walk through what this does, it takes the column, then uses the replace # comand, to_replace = ["what we want to replace"], then the value # that we want to replace it with. We are goning to use the numpy NaN value # which the dataframe will proplerly recognice as not a number. # Note this could be a huge pain if there were differing random # strings floating around. chp.item_price.unique() # simmilar, but just reports the unqiue occurances # chp.item_price.astype? # + chp.item_price = chp.item_price.astype(float) # Now convert it to a floating point number. print("Variable dtypes:\n", chp.dtypes, sep='') # - # ### Important Comment # # Unlike the string methods we described earlier, this use of replace affects **complete entries**, not **elements of string entries**. For example, suppose we tried to use replace to get rid of the dollar signs. If would not work because `replace` is looking for an entry that only has a `$` to replace it. # # --- # ### Issue #4: Missing values are marked erratically. # # It's important to label missing values, so that Pandas doesn't interpret entries as strings. Pandas is also smart enough to ignore things labeled missing when it does calculations or graphs. If we compute, for example, the mean of a variable, the default is to ignore missing values. # # We've seen that we can label certain entries as missing values in read statements: read_csv, read_excel, and so on. Moreover, in the operations above, we showed how to take entries that were hard to make sense of and called them missing values using the `replace` command and `np.nan`. # # **Working with missing values** Here are some operations we can do... chp.order_store_id[chp.item_price.isnull()] # These are the order numbers with null values # The next command of use is `.dropna` The one thing to note is that Pandas (when it computes things or plots) automatically drops stuff. So here is an example, the mean with the NaNs there and the mean without. They are the same. print(chp.item_price.dropna().mean()) print(chp.item_price.mean()) # ----- # # ### Some Analysis # # Now that we have our data set clean, lets just do a couple of things to check it out. # # - # + has_guac = chp[chp.item_name == "Chicken Burrito"].choice_description has_guac = pd.DataFrame(has_guac) list(has_guac.loc[16]) #chp[chp.item_name == "<NAME>"][has_guac].item_price.mean() # - # ### Summary # We've learned the following. we learned how to clean data dealing with several key issues: (i) Too much data (ii) rows, columns, or specific entries have contaminated data (iii) numerical values are contaminated and (iv) missing values. Then we quickly analyzed the Chipoltle data and practice the `gropuby` command and `contains` string method. Great work! # # - **For practice:** What if you did the same analysis on the whole data set? Is this as easy as simply changing `nrows = 500` and running it again? Why or why not?
cleaning_data/cleaning_chipoltle_data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import datetime import seaborn as sb import time #from ipywidgets import interact, fixed from libs.CovidTimeseriesModel import CovidTimeseriesModel from libs.CovidTimeseriesModelSIR import CovidTimeseriesModelSIR from libs.CovidDatasets import JHUDataset pd.set_option('display.float_format', lambda x: '%.2f' % x) # %load_ext autoreload # %autoreload 2 # + Dataset = JHUDataset() def model_state(country, state, interventions=None): ## Constants start_time = time.time() HOSPITALIZATION_RATE = .0727 HOSPITALIZED_CASES_REQUIRING_ICU_CARE = .1397 TOTAL_INFECTED_PERIOD = 12 MODEL_INTERVAL = 4 r0 = 2.4 POP = Dataset.get_population_by_country_state(country, state) # Pack all of the assumptions and parameters into a dict that can be passed into the model MODEL_PARAMETERS = { # Pack the changeable model parameters 'timeseries': Dataset.get_timeseries_by_country_state(country, state, MODEL_INTERVAL), 'beds': Dataset.get_beds_by_country_state(country, state), 'population': POP, 'projection_iterations': 25, # Number of iterations into the future to project 'r0': r0, 'interventions': interventions, 'hospitalization_rate': HOSPITALIZATION_RATE, 'case_fatality_rate': .0109341104294479, 'hospitalized_cases_requiring_icu_care': HOSPITALIZED_CASES_REQUIRING_ICU_CARE, # Assumes that anyone who needs ICU care and doesn't get it dies 'case_fatality_rate_hospitals_overwhelmed': HOSPITALIZATION_RATE * HOSPITALIZED_CASES_REQUIRING_ICU_CARE, 'hospital_capacity_change_daily_rate': 1.05, 'max_hospital_capacity_factor': 2.07, 'initial_hospital_bed_utilization': .6, 'model_interval': 4, # In days 'total_infected_period': 12, # In days 'rolling_intervals_for_current_infected': int(round(TOTAL_INFECTED_PERIOD / MODEL_INTERVAL, 0)), 'estimated_new_cases_per_death': 32, 'estimated_new_cases_per_confirmed': 20, # For new model 'incubation_period': 5, # In days 'duration_mild_infections': 10, # In days 'icu_time_death': 7, #Time from ICU admission to death, In days 'hospital_time_recovery': 11, #Duration of hospitalization, In days 'use_harvard': False, #If True use the harvard parameters directly, if not calculate off the above } return CovidTimeseriesModelSIR().forecast_region(model_parameters=MODEL_PARAMETERS) r0 = 2.4 INTERVENTIONS = [ None, { datetime.date(2020, 3, 23): 1.3, datetime.date(2020, 4, 20): 1.1, datetime.date(2020, 5, 22): 0.8, datetime.date(2020, 6, 23): r0 }, { datetime.date(2020, 3, 23): 1.7, datetime.date(2020, 6, 23): r0 }, { datetime.date(2020, 3, 23): 1.3, datetime.date(2020, 3, 31): 0.3, datetime.date(2020, 4, 28): 0.2, datetime.date(2020, 5, 6): 0.1, datetime.date(2020, 5, 10): 0.35, datetime.date(2020, 5, 18): r0 } ] # + # Dataset.get_timeseries_by_country_state('USA', 'CA', 1)[['date','cases','deaths']] # - model_state('USA', 'TX', INTERVENTIONS[0])
analyses/notebook_runtime.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # ENGR418 Project Stage 2 Group 31 # # By: <NAME> (63586572), <NAME> (75469692) # import numpy as np import pandas as pd import os from sklearn.metrics import confusion_matrix from PIL import Image, ImageFilter import PIL from sklearn.neighbors import KNeighborsClassifier import sklearn # ## Single Function Call # # The function in the cell below can be called to run the entire algorithm. Before running, be sure to run the cells containing the functions at the bottom of this page, or else errors will be thrown. # + import numpy as np import pandas as pd import os from sklearn.metrics import confusion_matrix from PIL import Image, ImageFilter import PIL from sklearn.neighbors import KNeighborsClassifier import sklearn # first param is the relative training data directory # second param is the relative testing data directory training_data_relative_dir = "../data/training" testing_data_relative_dir = "../data/testing" # this will take a 1-2 minutes to run (depending on device capabilities) test_function(training_data_relative_dir, testing_data_relative_dir) # - # ## Setting Tuning Parameters # # pefore starting, these parameters must be set, they can be tuned to optimize performance though. The optimal values found are considered as default below # + # image_size=64, filter_value=4, angles=0->180 increments by 2, 100% & 96.30% # image size will dictate the size each image will be reshapet to later, used for tuning image_size = 64 # filter value sets a threshold value on the edge detection image, used for tuning filter_value = 4 # list of angles that the algorithm will rotate though, used for tuning angles = [] for i in range(90): angles.append(2*i) # - # ### Feature Engineering # # Next, all image data is scrapped from the image files in their respective relative directory. Refer to get_image_feature_data function for line by line description. In essense, each image will have 4 feature vectors. One for maximum Lego brick length, one for minimum lego brick length, and one for both average and median Lego brick length. These values will come from the rotated image, which is further discussed in their own respective functions # gets all training data from relative directory. # refer to functions at bottom for line-by line commenting x,y = get_image_feature_data("../data/training", image_size, filter_value, angles) # gets all testing data from relative directory. xt, yt = get_image_feature_data("../data/testing", image_size, filter_value, angles) # ### Classifier Training # # Now with the feature vectors selected and image data collected, a classifier can be trained. After testing, we opted to use a k neighbors classifier which provided the best results. The classifier will classify a point based on the classification of points near it, making it simple and quick to calcualte. With strong engineered features, the k neighbors classifier provides a high accuracy solution. # + # creates a non linear k-nearest neighbor classifier that considers the 8th nearest neighbors where each neighbor is weighted by distance from sklearn.neighbors import KNeighborsClassifier k_neighbors = KNeighborsClassifier(n_neighbors=8, weights="distance") # fits data to the classifier k_neighbors.fit(x,y); # - # ## **Prediction, Confusion Matrices, and Accuracy of Classifier** # # ### Classifier Prediction: Training Data # # Now, the classifier can be tested. First it is tested with the training image data. The results of the confusion matrix, as well as the accuracy of the algorithm are shown below. # feeds the training data back into the classifier for predicition pred = k_neighbors.predict(x) # formats the prediction values to string labels (refer to function below) predicted = confusion_format(pred) # foramts the actual labels to string labels (refer to function below) actual = confusion_format(y) # prints the confusion matrix using string labels print(pd.crosstab(actual, predicted, rownames=["Shape Actual"], colnames=["Shape Predicted"])) # prints the error percentage (refer to function below) print(f"Percentage of correct classification from model on training data set: {100-error_percentage(pred,y):.2f}%\n") # ### Classifier Prediction: Testing Data # # Next it is tested with the testing image data. The testing image is a better recognition of the classifiers accuracy since it is being fed images that it has never seen before. The results of the confusion matrix, as well as the accuracy of the algorithm are shown below. # feeds the testing data into the classifier for prediction pred = k_neighbors.predict(xt) # formats the prediction values to string labels (refer to function below) predicted = confusion_format(pred) # formats teh actual labels to string values (refer to function below) actual = confusion_format(yt) # prints the confusion matrix using string labels print(pd.crosstab(actual, predicted, rownames=["Shape Actual"], colnames=["Shape Predicted"])) # prints the error percentage (refer to function below) print(f"Percentage of correct classification from model on testing data set: {100-error_percentage(pred,y):.2f}%") # --- # --- # --- # --- # # **Functions** # # All of these functions **must** be ran before anything else. Each function has its purpose discussed, and are each well commented on. # # # ### edge_image # # This function takes in a raw Lego brick image, then exports a filtered, binary, edge detected version. This means, it will output an image that only contains 0/1 in monochrome. All 1's will dictate edges of the Lego brick, while 0's indicate blank space that is not useful. All noise outside of the lego bricks edge should be filtered to allow the get_len to get the correct brick length. Noise going into the get_len function will make the classifier unrealiable. The function is commented on in detail below. # returns image that is the filtered, reshaped, edge detection version def edge_image(image, image_size, filter_value): # takes input image and converts to monochrome image = image.convert("L") # converts the monochrome image to an edge detection version (note this image is ripe with noise) image = image.filter(ImageFilter.FIND_EDGES) # Compress image down to 18x18 image, will blur specific noise in the image to make lego brick obvious image = image.resize((16 + 2,16 + 2)) # simply slices off the outter pixel of the image, border/edge pixels are recognized as ... # a "change" in colour, thus are labeled as an edge, the next line will slice out this edge error. # will output a 16x16 image image = PIL.Image.fromarray(np.array(image)[int(1) : int(image.height -1), int(1) : int(image.width - 1)]) # resizes image from 16x16 to desizered image size, return information to the plot. # resizing down then back up was to blur out and specific noise, so all noise can be easily filtered later. image = image.resize((image_size,image_size)) # converts the image to a numpy array data = np.asarray(image) # filters out any noise in the image data[data <= filter_value] = 0 # converts image from monochrome values to binary (for ease of interpretation) data[data > 0] = 1 # converts the image data back to a Pillow image object for further use image = PIL.Image.fromarray(data) return image # ### get_len # # used to get the length between the top-most pixel, and the bottom-most pixel in the image. Takes image from edge_image function and will return a single integer representing the lenght described above by rotating slowly, and taking length at each step. We can piece together what the lego brick is by examining the values it takes as it rotates. Function takes image from edge_image function above, thus requires very little background noise to work effectively. # # Expect circles to remain similar in value as it rotates. Expect rectangles to have a large maximum value. Expect circles to have a maximum value greater than circle but less then rectangle. Will use max/min/avg/med later to examine the changes over angle def get_len(image): # converts image to numpy array data = np.array(image) # represents the lowest pixel index (index represents height where bottom of image is zero) # initialize quantity to top of image to guarantee it will decrease (assuming image has a non-zero pixel) min_index = image.height # represents the highest pixel index (index represents height where top of image is maximum value, i.e. image height) # initialize quantity to bottom of image to guarantee it will increase (assuming image has a non-zero pixel) max_index = 0 # first loop starts from bottom of image and will crawl upwards for i in range(image.height): # nested loop will examing each pixel from left to right by height for j in range(image.width): # if a edge is detected (lego brick is found) if( data[i][j] == 1): # sets min index if current height index is less then smallest index found far if (min_index > i): min_index = i # sets max index if current height index is greater than greatest index found thus far if( max_index < i): max_index = i # finally, return difference between max height and min height to get vertical length the image takes up return max_index - min_index # ### get_image_feature_data # # Will iterate through a directory and gather feature data from each image, as well as its corresponding correct label. Is largely an accumulation of edge_image and get_len functions that is iterated for each image. Will return *x*, and *y* which are the feature vectors, and feature labels respectively. def get_image_feature_data(rel_dir, image_size, filter_value, angles): # initializes feature data and labels for use population later x = [] y = [] # will loop through each file in rel_dir directory for pic in os.listdir(rel_dir): # creates new Pillow image object from pic in relative directory image = PIL.Image.open(f"{rel_dir}/{pic}") # calls function to get filtered, reshaped, edge detection version of image. image = edge_image(image, image_size, filter_value) # initialize list to propogate with lengths for different angles vec = [] # for each loop to rotate through all angles the algorithm considers for angle in angles: # rotates original image by angle in for each loop img = image.rotate(angle) # for specific angle, find the length using the function described above length = get_len(img) # append new length to list containing length for each angle vec.append(length) # converts list to array to make math more efficient vec = np.array(vec) # maximum length recorded between all angles, normalized by height # useful for identifying rectangles max_len = np.max(vec) / img.height # minimum length recorded between all angles, normalized by height min_len = np.min(vec) / img.height # average length recorded between all angles, normalized by height # useful for identifying circles avg_len = np.average(vec) / img.height # median length recorded between all angles, normalized by height # useful for identifying circles med_len = np.median(vec) / img.height # dynamically override vec to be list of 4 key values from list of lengths by angle vec = [max_len, min_len, avg_len, med_len] # examine the name of the picture file, can find correct label based on first letter of the file name. # c indicates the picture is a circle if( str.lower(pic[0]) == "c"): # classify circles as a 0 y.append(0) # r indicates the picture is a rectangle elif (str.lower(pic[0]) == "r"): # classify rectangle as a 1 y.append(1) # only other situation is the image is a square else: # classify square as a 2 y.append(2) x.append(vec) # each image has 1536 features # convert feature vector data/labels from lists to arrays for ease of use in the classifier model x = np.array(x) y = np.array(y) return x,y # This function will convert from decimal label to strings. Very easy to comprehend # 0=>Circle, 1=>Rectangle, 2=>Square def confusion_format(labels): test = [] for i in labels: if i == 0: test.append("Circle") elif i == 1: test.append("Rectangle") else: test.append("Square") test = np.array(test) return test # Used to calculate error percentage by looking at number of differences between prediction and actual labels. def error_percentage(pred, y): # the number of errors is the number of differences between the model's labels and the correct labels errors = 0 for i in range(pred.size): # pred is the predicted array labels, while y is the actual if pred[i] != y[i]: errors = errors + 1 # then the percentage of errors is the number of errors divided by the total number of image samples times 100 for percentage. return errors / pred.size * 100 # Function is exclusively to demonstrate all code in a single function call. Refer to cells above, or functions described for ... # in depth description/line-by-line description. def test_function(training_dir, testing_dir): image_size = 64 filter_value = 4 angles = [] for i in range(90): angles.append(2*i) x,y = get_image_feature_data(training_dir, image_size, filter_value, angles) xt, yt = get_image_feature_data(testing_dir, image_size, filter_value, angles) k_neighbors = KNeighborsClassifier(8, weights="distance") k_neighbors.fit(x,y); pred = k_neighbors.predict(x) predicted = confusion_format(pred) actual = confusion_format(y) print(pd.crosstab(actual, predicted, rownames=["Shape Actual"], colnames=["Shape Predicted"])) print(f"Percentage of correct classification from model on training data set: {100-error_percentage(pred,y):.2f}%\n") pred = k_neighbors.predict(xt) predicted = confusion_format(pred) actual = confusion_format(yt) print(pd.crosstab(actual, predicted, rownames=["Shape Actual"], colnames=["Shape Predicted"])) print(f"Percentage of correct classification from model on testing data set: {100-error_percentage(pred,y):.2f}%")
analysis/ENGR418_project_group_31_stage_2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + ## this script will be used to calculate the score using bayes average ####### to run this you will want to: # 1. update the file to be your data file # 2. update the trial increments # 3. update the location of the file you are writing to import pandas as pd fake_data = pd.read_csv(r"C:\Users\allym\Documents\MQP\MQP\app\fake_data.csv") # fake_data = pd.read_csv(r"C:\Users\allym\Documents\MQP\MQP\app\cross_val_fake_data.csv") # + # update this set of trials list with the start and end point # of the DIFFERENT trials that you run sets_of_trials = [[20,30],[31,41]] # sets_of_trials = [[0,99]] this_ped_occupied = 0 this_area = 0 this_evaluation_metric = 0 total_ped_occupied = 0 total_area = 0 total_evaluation_metric = 0 avg_ped_occupied = 0 avg_area = 0 avg_evaluation_metric = 0 list_of_scores = [] fake_data['bayes_weighted_avg_score'] = '' ij_index = 0 m = 0 # calculating the averages to be used later on total_area = sum(fake_data['width_i']*fake_data['width_ii']) avg_area = total_area/len(fake_data['width_i']) # print(avg_area) total_ped_occupied = sum(fake_data['max[\'Child\']']+(2*fake_data['max[\'Adult\']'])+(4*fake_data['max[\'AdultBackpack\']'])+(14*fake_data['max[\'AdultBike\']'])) avg_ped_occupied = total_ped_occupied/len(fake_data['max[\'Child\']']) # print('avg_ped_occupied',avg_ped_occupied) total_evaluation_metric = sum(fake_data['evaluation_metric']) avg_evaluation_metric = total_evaluation_metric/len(fake_data['evaluation_metric']) # print('avg_evaluation_metric',avg_evaluation_metric) # for each set of trials from the list, get the index of the # first trial to calculate this_ped_occupied and this_area index = fake_data.index for [i,j] in sets_of_trials: seed = fake_data['seed'] x = i eval_sum = 0 conditioni = seed == i seedi_index = index[conditioni].tolist() # this_ped_occupied is the number of cells occupied by pedestrians # so multiply num peds by number of cells that type of ped takes # up for the given set of trials this_ped_occupied = fake_data['max[\'Child\']'][seedi_index]+(2*fake_data['max[\'Adult\']'][seedi_index])+(4*fake_data['max[\'AdultBackpack\']'][seedi_index])+(14*fake_data['max[\'AdultBike\']'][seedi_index]) # print('this_ped_occupied',this_ped_occupied) # this_area is the area of the grid for the given set of trials this_area = fake_data['width_i'][seedi_index]*fake_data['width_ii'][seedi_index] # print('this_area',this_area) # this_evaluation_metric is the average of the evaluation metric # for the given set of trials while (x <= j): seed = fake_data['seed'] conditionx = seed == x seedx_index = index[conditionx].tolist() # print(seedx_index) # print(seed[seedx_index]) eval_sum = int(eval_sum) + int(fake_data['evaluation_metric'][seedx_index]) x+=1 # print(eval_sum) this_evaluation_metric = eval_sum/(j-i+1) # print('this eval metric',[i,j],this_evaluation_metric) # calculating the weight for bayes weighted average (based on ped occupied and area) weight_ped_occupied = this_ped_occupied/(this_ped_occupied+avg_ped_occupied) weight_area = this_area/(this_area+avg_area) weight = weight_ped_occupied*weight_area # this is the final score calculated using bayes weighted average # lower score is better score = (weight*this_evaluation_metric)+((1-weight)*avg_evaluation_metric) print(float(score)) list_of_scores.append(float(score)) # writing the score to be the last column of the data csv for [i,j] in sets_of_trials: seed = fake_data['seed'] conditioni = seed == i seedi_index = index[conditioni] conditionj = seed == j seedj_index = index[conditionj] while ((m<=seedj_index) and (m>=seedi_index)): fake_data['bayes_weighted_avg_score'][m] = list_of_scores[ij_index] m = m + 1 ij_index = ij_index + 1 fake_data.to_csv(r'fake_data.csv', index=False) # fake_data.to_csv(r'cross_val_fake_data.csv', index=False)
app/.ipynb_checkpoints/bayes_weighted_average-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Course introduction # ## A. Overview # ### Am I ready to take this course? # # Yes. Probably. Some programming experience will help, but is not required. If you have no programming experience, I strongly encourage you to go through the first handful of modules on the [Codecademy Python course](https://www.codecademy.com/learn/learn-python) as soon as possible. While that course utilizes Python 2, we will be using Python 3 in our course here. BUT...many of the basics are identical between the two versions. # # There are <b>a lot</b> of online resources that you can use to supplement things we learn in class. Some examples are: # # * [python.org Tutorial](https://docs.python.org/3/tutorial/index.html) # * [python Tutorial from Microsoft](https://docs.microsoft.com/en-us/learn/modules/intro-to-python/)-shuang # * [Learn Python](https://www.learnpython.org/) # * [Google](http://google.com) (Just type in your question and follow the first [stackoverflow](http://stackoverflow.com) link. This is surprisingly effective; do this first.) # ### What computational resources do I need for class? # # You will need a laptop that will provide you access to the course (i.e. internet access) and a Python environment to follow along. # ### How is this for geosciences specifically? # # The goal of this class is to provide information for all fields in geoscience. To that end, I will try to cover topics from geology, geography, atmospheric sciences, and oceanography. Specifically, I will focus on 1D timeseries, and 2D geospatial (i.e., on a map) analysis. If you have any topics you would like to cover, please let me know, and I will do my best to accommodate. # ## Class setup # # ### Class format # # We will go through course materials during class time. You should bring a computer to class so that you can follow along and participate in exercises. Also, course materials are interactive, so you can learn by running code snippets as we go and asking questions. Much like learning a new spoken language, hands-on coding is one the <b>best</b> ways to learn a new language. # # # ### Course materials # # The course materials are available in the [class repository](https://github.com/snifflesnrumjum/python4geosciences). They are in the form of [Jupyter notebooks](http://jupyter.org/). More information on notebooks in the next section. # # You'll do your work either on your own computer, in a Google Colab notebook, or through the VOAL provided by Texas A&M University. To access the VOAL when off campus, you need to first set up a VPN connection. Set this up for your computer by visiting `https://connect.tamu.edu` and follow instructions there. You'll need to sign in with your NetID, and click on the little blue link that says "AnyConnect VPN" if and when you find that "Web-based installation was unsuccessful" to install Cisco AnyConnect (you will no longer use the web-based installer after this). When you open the Cisco application on your computer, you will need to fill in "connect.tamu.edu" in the little box, then use your NetID and university password to connect. Then you can run this application to use your computer as if you are on campus. # # # ### Course textbook # # There is no textbook for the course. But if you'd like an outside resource, here are three recommendations: # # 1. Learning Python by <NAME> (available electronically through TAMU Library http://library.tamu.edu/) # 2. Beginning Python by <NAME> (available electronically through TAMU Library http://library.tamu.edu/) # 3. <NAME> has written a number of books on Python and related scientific subjects. And as a bonus, they are free (digital versions): http://greenteapress.com/wp/. In particular you would want to check out Think Python (2nd edition). # # + [markdown] slideshow={"slide_type": "slide"} # ## B. Jupyter notebooks # # This file format makes it easy to seamlessly combine text and code. The text can be plain or formatted with [Markdown](https://daringfireball.net/projects/markdown/). The code can be written in over 40 languages including Python, R, and Scala. Most importantly, the code can be interacted with when the notebook is opened in a local (that is, on your computer) iPython server. Alternatively, it can simply be viewed through a github repository (like [this very notebook](https://github.com/snifflesnrumjum/python4geosciences/blob/master/materials/0_intro.ipynb)) or through [nbviewer](http://nbviewer.ipython.org/). # # You'll be able to run class materials (in the form of Jupyter notebooks) on your own computer, on Google Colab or the VOAL via your web browser as well as create and work on homework assignments. If you prefer, you are welcome to run Python on your own computer, but you will need to do that mostly on your own. If you go that route, I recommend using Python 3 (which we will be using in class) and a distribution from [Anaconda](https://www.anaconda.com/products/individual). # # # ### Create a new notebook # # Start up your local notebook server in your new repo and create a new Jupyter notebook from the local server page. # # ### Choose syntax for a cell # # Notebooks are built of cells. Cells can have multiple built-in formats including code and Markdown for text. You can select the desired format from a dropdown menu at the top. # # If you want to type words, use "Markdown"; if you want to write code, choose "code". # # ### Move between cells # # To run a given cell, type `[shift-enter]` which active in that cell. You can run all of the cells with Cell > Run all; other variations are available in that drop down menu. # # # ### Homework # # We'll discuss homework soon and go through details. It will be in the form of Jupyter notebooks and will be submitted through the Canvas LMS. # # - # --- # # The material below is bonus for any students that are interested in using a terminal window on their own computer for running Python. We may go through it in class. # ## Command-line interface # # A command-line interface is a way to interact with your computer using text instead of a Graphical User Interface (GUI), a GUI being visually based with icons etc. We will use these in this class. On a Macintosh or Linux machine, this is a terminal window. On a PC this is often called a command prompt. # # Here are some commonly-used commands: # # * `cd [path]`: change directory from current location to [path]. `cd ..` can be used to move up a single directory, and `cd ../..` moves up two directories, etc. # * `pwd`: print working directory, as in write out the current location in the terminal window. # * `ls`: list files in current directory. `ls -l` list files in long format to include more information, `ls -a` to list all files even those that are usually not shown because the have a `.` in front, `ls -h` to show file sizes in human readable format. Flags can always be combined to use multiple options at once, as in `ls -ah` to show all files in human readable format. # * [tab]: Tab completion. You can always push tab in the terminal window to see available options. As you have some letters entered and push tab, the options will be limited to those that fit the pattern you have started. # * `mkdir [dirname]`: make directory called dirname. # * `rm [filename]`: remove a file called filename. To remove a directory called dirname, use `rm -r [dirname]`. # ## Short git and GitHub tutorial (optional) # # Class materials are available on a [GitHub](http://github.org) repository. GitHub is a way to share and access code online which has been version-controlled using git. Version control allows changes in code to be tracked over time; this is important for reproducibility, retrieving code in case of accidents, and working on code in groups. Git is one way to version control your code — other methods include subversion (svn), cvs, and mercurial. More information on this is provided below. # # Remember: you can always google to learn more! Google is an infinite resource that you can ask at any time of the day. Here we summarize a brief overview of how to use git. GitHub has a [cheatsheet](https://education.github.com/git-cheat-sheet-education.pdf) available. # # To get changes in a file in a local version of a repository tracked, saved, and then shared with the internet (on github), do the following: # # * `git add` to initially tell the system to track your file and subsequently to tell the system that you want to take into account new changes to the file (you can also add more than one file in this process). Then # * `git commit -m [commit note]` to save the changes. Then # * `git push` to share the changes with the version of your repository on github. Now you should be able to look at your repo on github and see your updated file there. # # **GitHub Desktop** # # After you have made your repository on GitHub (the website), you should clone it to GitHub Desktop (which is on your local machine). (This should be very easy if you are properly signed into your github account in GitHub Desktop.) Then to get your file tracked and pushed to GitHub (the website): # # * While inspecting the relevant repository, any untracked files or changes to tracked files are shown in the middle window (it is white with horizontal lines). To do the equivalent of `git add`, you should check the box of the file. # * To commit your changes, fill out the form at the bottom of the same window. One short window leaves space for a "Summary" of your changes, and if you have more to say you can put it in the "Description" box. # * To push your local changes out to GitHub online, use the Sync button on the upper right hand corner of the window. As a side note, this sync button is also how you should pull down changes to a repository you are following (the equivalent of `git pull`). # # # Note that you do not want to have a directory covered by two git repositories. So for file structure for this class, for example, you might want to have one directory for the class ("Python_x89") which contains two version-controlled subdirectories: the course materials (python4geosciences) and your homework repository ("homework"). That will keep everything properly separated. # # ![XKCD](https://imgs.xkcd.com/comics/git.png) # # [Git as explained by XKCD](https://xkcd.com/1597/) # # ### `git status` # # Type this in a git-monitored subdirectory on your computer to see the status of files that are under git version control and which files are not being monitored. # # ### `git add` # # Use this to add local files to your git repository. `git add [filename]`. # # ### `git commit` # # Use this to save to your repository local file changes. First you need to add the file with `git add`, then you can `git commit -m [commit message]`, where the commit message is a concise and useful note to explain what changes you have made. You may skip the `git add` step if you would like to commit at once all changes that have been made (you can see what changes would be committed by first consulting `git status`) with `git commit -am [commit message]`. The `-a` flag stands for "all" as in commit all changes. # # ### `git push` # # To move your local changes to github so that they are saved and also for sharing with others, you need to `git push`. After making whatever commits you want to make, you can run this command to finalize your changes on github. # # ### `git merge` # # If changes have been made in multiple places (say, by another person working on the same code base, or by yourself on two different machines) between `git push`es, git will try to merge if possible — which it will do if the changes don't overlap and therefore don't require your input. If the changes do overlap, you will have to merge the two versions of the code together. You probably won't need to do this in this class. # # # ### Get set up with GitHub # # You'll need an account on github if you don't already have one, and to have git installed on your computer. We will interact with github through a terminal window in class and through the website. You may also download the [GitHub Desktop application](https://desktop.github.com/) to use if you prefer, though we won't be able to help you much with the details of it. # # ### Create a new repo # # In your account on the github webpage, click on the Repositories tab and then the green "New" button in the upper right. You'll need to keep this repo public. After creating this new repo, follow the instructions on the next page for quick setup or to create a new repository on the command line. This makes it so that you have the repo both on github and on your local machine. # # ### Clone a repo # # In a terminal window in the location you want to save the repo materials, type: `git clone [repo address, e.g. https://github.com/snifflesnrumjum/python4geosciences]`.
materials/0_intro_origin.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + deletable=true editable=true import pulse2percept as p2p import numpy as np import matplotlib.pyplot as plt # %matplotlib inline # + deletable=true editable=true axon_bundles = p2p.utils.parfor(p2p.retina.jansonius2009, np.linspace(-180, 180, 501)) # + deletable=true editable=true plt.figure(figsize=(10, 6)) for ax in axon_bundles: plt.plot(ax[:, 0], ax[:, 1]) # + deletable=true editable=true def find_closest_axon(pos_xy, axon_bundles): xneuron, yneuron = pos_xy # find the nearest axon to this pixel dist = [min((ax[:, 0] - xneuron) ** 2 + (ax[:, 1] - yneuron) ** 2) for ax in axon_bundles] axon_id = np.argmin(dist) # find the position on the axon ax = axon_bundles[axon_id] dist = (ax[:, 0] - xneuron) ** 2 + (ax[:, 1] - yneuron) ** 2 pos_id = np.argmin(dist) # add all positions: from `pos_id` to the optic disc return axon_bundles[axon_id][pos_id:0:-1, :] # + deletable=true editable=true def assign_axons(xg, yg, axon_bundles, engine='joblib', scheduler='threading', n_jobs=-1): # Let's say we want a neuron at every pixel location. # We loop over all (x, y) locations and find the closest axon: # pos_xy = [(x, y) for x, y in zip(xg.ravel(), yg.ravel())] pos_xy = np.column_stack((xg.ravel(), yg.ravel())) return p2p.utils.parfor(find_closest_axon, pos_xy, func_args=[axon_bundles]) # + deletable=true editable=true xg, yg = np.meshgrid(np.linspace(-10, 10, 101), np.linspace(-10, 10, 101), indexing='xy') print('grid step: %f dva, %f um' % (xg[0, 1] - xg[0, 0], p2p.retina.dva2ret(xg[0, 1] - xg[0, 0]))) # + deletable=true editable=true axons = assign_axons(xg, yg, axon_bundles) # + deletable=true editable=true plt.figure(figsize=(10, 8)) n_axons = np.minimum(50, len(axons)) idx_axons = np.arange(len(axons)) np.random.seed(42) np.random.shuffle(idx_axons) idx_axons = idx_axons[:n_axons] for ax, x, y in zip(np.array(axons)[idx_axons], xg.ravel()[idx_axons], yg.ravel()[idx_axons]): plt.plot(ax[:, 0], ax[:, 1]) plt.plot(x, y, 's', markersize=8, alpha=0.5) plt.plot(ax[0, 0], ax[0, 1], 'o') for e in p2p.implants.ArgusI(): plt.plot(p2p.retina.ret2dva(e.x_center), p2p.retina.ret2dva(e.y_center), 'ok', markersize=30, alpha=0.4) plt.axis('equal'); # + [markdown] deletable=true editable=true # In words: For every axon, there is a function that describes how sensitive the local tissue is. This is a function of the distance from the soma. Possibilites are: # - drops exponentially with distance # - is the Fried function # # This function needs to be multiplied with the current spread. Then what do you do? # - You could sum all these values: this is the contribution of this axon at the pixel location of the soma. This is basically the dot product that we implement now. # - You could take the effective current to be the max of this element-wise product. # - You could walk along the axon from the optic disc to the soma. The axon is on if we reach some threshold at any point. This is binary. It's kinda weird, but isn't this how things work? # + deletable=true editable=true def axon_sensitivity(dist, rule='decay', decay_const=3.0): if rule.lower() == 'decay': return np.exp(-dist / decay_const) elif rule.lower() == 'fried': mu_gauss = p2p.retina.ret2dva(50.0) std_gauss = p2p.retina.ret2dva(20.0) bell = 0.7 * np.exp(-(dist - mu_gauss) ** 2 / (2 * std_gauss ** 2)) plateau = 0.3 soma = np.maximum(mu_gauss - dist, 0) return np.maximum(0, bell - 0.001 * dist + plateau - soma) else: raise ValueError('Unknown rule "%s"' % rule) # + deletable=true editable=true plt.figure(figsize=(12, 5)) plt.subplot(121) dist = np.linspace(0, p2p.retina.ret2dva(1000), 1000) for decay_const in [0.01, 0.1, 1.0, 2.0, 10.0]: plt.plot(dist, axon_sensitivity(dist, rule='decay', decay_const=decay_const), linewidth=3, label='$\lambda$=' + str(decay_const)) plt.legend() plt.xlabel('dist (dva^2)') plt.title('Decay rule') plt.subplot(122) plt.plot(dist, axon_sensitivity(dist, rule='fried'), linewidth=3) plt.xlabel('dist (dva^2)') plt.title('Fried rule'); # + deletable=true editable=true std = 1.0 cs = np.exp(-((xg - 5) ** 2 + (yg - 5) ** 2) / (2 * std ** 2)) plt.imshow(np.flipud(cs)) # + deletable=true editable=true from scipy.spatial import cKDTree # pos_xy = np.vstack((xg.ravel(), yg.ravel())).T pos_xy = np.column_stack((xg.ravel(), yg.ravel())) tree = cKDTree(pos_xy) # + deletable=true editable=true _, plot_axon = tree.query((2, 5)) print('idx_plot: ', plot_axon) axon = axons[plot_axon] _, idx_neuron = tree.query(axon[0, :]) # Consider only pixels within the grid idx_valid = (axon[:, 0] >= xg.min()) * (axon[:, 0] <= xg.max()) idx_valid *= (axon[:, 1] >= yg.min()) * (axon[:, 1] <= yg.max()) # For these, find the xg, yg coordinates _, idx_cs = tree.query(axon[idx_valid, :]) # Drop duplicates _, idx_cs_unique = np.unique(idx_cs, return_index=True) idx_cs = idx_cs[np.sort(idx_cs_unique)] idx_dist = np.insert(idx_cs, 0, idx_neuron, axis=0) idx_cs, idx_dist # + deletable=true editable=true dist = np.sqrt(np.diff(xg.ravel()[idx_dist]) ** 2 + np.diff(yg.ravel()[idx_dist]) ** 2) dist # + deletable=true editable=true plt.plot(np.cumsum(dist)) plt.ylabel('dist (deg^2)') plt.xlabel('axon segment') # + deletable=true editable=true plt.plot(axon_sensitivity(np.cumsum(dist), rule='decay')) plt.xlabel('axon segment') plt.ylabel('sensitivity') # + deletable=true editable=true plt.plot(cs.ravel()[idx_cs]) plt.xlabel('axon segment') plt.ylabel('electric field "current spread"') # + deletable=true editable=true axon_weights = axon_sensitivity(np.cumsum(dist), rule='decay') * cs.ravel()[idx_cs] plt.plot(axon_weights) plt.xlabel('axon segment') plt.ylabel('effective current') # + deletable=true editable=true axon_weights.mean(), axon_weights.max() # - def distance_from_soma(axon, tree, xg, yg): # Consider only pixels within the grid idx_valid = (axon[:, 0] >= xg.min()) * (axon[:, 0] <= xg.max()) idx_valid *= (axon[:, 1] >= yg.min()) * (axon[:, 1] <= yg.max()) # For these, find the xg, yg coordinates _, idx_cs = tree.query(axon[idx_valid, :]) if len(idx_cs) == 0: return 0, np.inf # Drop duplicates _, idx_cs_unique = np.unique(idx_cs, return_index=True) idx_cs = idx_cs[np.sort(idx_cs_unique)] _, idx_neuron = tree.query(axon[0, :]) if len(idx_cs) == 0: return idx_neuron, 0.0 else: # For distance calculation, add a pixel at the location of the soma idx_dist = np.insert(idx_cs, 0, idx_neuron, axis=0) # Calculate distance from soma xdiff = np.diff(xg.ravel()[idx_dist]) ydiff = np.diff(yg.ravel()[idx_dist]) dist = np.sqrt(np.cumsum(xdiff ** 2 + ydiff ** 2)) return idx_cs, dist axons_dist = p2p.utils.parfor(distance_from_soma, axons, func_args=[tree, xg, yg]) def get_axon_contribution(axon_dist, cs, sensitivity_rule='fried', activation_rule='max', min_contribution=0.01): idx_cs, dist = axon_dist # Find effective current axon_weights = axon_sensitivity(dist, rule=sensitivity_rule) * cs.ravel()[idx_cs] if activation_rule == 'max': axon_contribution = axon_weights.max() elif activation_rule == 'mean': axon_contribution = axon_weights.mean() else: raise ValueError('Unknown activation rule "%s"' % activation_rule) if axon_contribution < min_contribution: return None else: if len(idx_cs) > 1: idx_neuron = idx_cs[0] else: idx_neuron = idx_cs return idx_neuron, axon_contribution # + sensitivity_rules = ['decay', 'fried'] activity_rules = ['mean', 'max'] idx_plot = 1 plt.figure(figsize=(14, 8)) for sens_rule in sensitivity_rules: for act_rule in activity_rules: contrib = p2p.utils.parfor(get_axon_contribution, axons_dist, func_args=[cs], func_kwargs={'sensitivity_rule': sens_rule, 'activation_rule': act_rule}, engine='joblib') plt.subplot(len(sensitivity_rules), len(activity_rules), idx_plot) px_contrib = list(filter(None, contrib)) ecs = np.zeros_like(cs) for i, e in px_contrib: ecs.ravel()[i] = e plt.imshow(np.flipud(ecs)) plt.colorbar(fraction=0.046, pad=0.04) plt.title('%s, %s rule' % (sens_rule, act_rule)) idx_plot += 1 # - np.argmax(ecs)
examples/notebooks/internal-0.2-axon-map.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda_tensorflow_p27 # language: python # name: conda_tensorflow_p27 # --- import os import numpy as np import matplotlib.pyplot as plt import cv2 # !aws s3 cp s3://lowresolutionhighresolution/dataset/ . --recursive # !conda install python==3.7 cv2.__version__ # !pip install --upgrade opencv-python==4.3.0.38 from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, Dropout, Conv2DTranspose, UpSampling2D, add from tensorflow.keras.models import Model from tensorflow.keras import regularizers import tensorflow as tf from sagemaker import get_execution_role role = get_execution_role() bucket = 'arn:aws:s3:::lowresolutionhighresolution' # + import boto3 conn = boto3.client('s3') response = conn.list_buckets() # Output the bucket names print('Existing buckets:') for bucket in response['Buckets']: print(bucket["Name"]) # contents = conn.list_objects(Bucket=bucket)['Contents'] # conn.list_objects(Bucket='lowresolutionhighresolution') # + def load_data(path): high_res_images = [] low_res_images = [] for dirname, _, filenames in os.walk(path+'low_res'): for filename in filenames: img = cv2.imread(os.path.join(dirname, filename)) img = process_image(img) low_res_images.append(img) for dirname, _, filenames in os.walk(path+'high_res'): for filename in filenames: img = cv2.imread(os.path.join(dirname, filename)) img = process_image(img) high_res_images.append(img) return np.array(low_res_images), np.array(high_res_images) def process_image(image): return image/255 # - base_dir = '' train_x, train_y = load_data(base_dir+'train/') val_x, val_y = load_data(base_dir+'val/') train_x.shape train_x[1] val_x.shape fig, (ax1, ax2) = plt.subplots(1, 2) fig.suptitle('Image Comparison') ax1.imshow(train_x[1]) ax1.title.set_text("low-res image ") ax2.imshow(train_y[1]) ax2.title.set_text("high-res image ")
low_resolution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## __XDF latency analysis of LSL data streams: Unity (triggered) vs EEG (measured)__ # # ### __Situation__ # #### Every 500ms a beep sound is played and the background color changes one frame from black to white. # # #### __Unity (90 FPS):__ # - Color change (black or white background) # - Beep sound (audio playing or not) # # #### __EEG (1024 Hz):__ # - Photodiode (light sensor) # - Microphone (audio sensor) # # #### __TODO__ # * [x] Read XDF file and header and select the right data (timestamps and values) # * [x] Compute the timestamps from 0 # * [x] Visualize the data: unity audio vs microphone and unity color vs photodiode # * [x] Compare the timestamps (length, duration, sample count..): Original vs Calculated vs FileInfo # * [x] Descriptive statistics of timestamps distribution and plot # * [x] Actual latency test: select the microphone and photodiode peaks (starting points) and compare with the unity ones # * [x] Test all recordings # * [x] Make and test long recordings (half an hour) and check with two computers (local network setup) # * [ ] Find out why sometimes Unity timestamps start before the EEG ones # * [ ] Find out why sometimes there are two Diode spikes during one colour change # * [ ] ... # #### __Dependencies__ import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import pyxdf from scipy.signal import find_peaks import seaborn as sns # #### __Files (recordings)__ # + files = os.listdir("data") # get all files from the folder "data" files.sort() # sort them alphabetically recordings = [] for file in files: if file.startswith("."): # filter hidden/config files files.remove(file) # remove hidden/config file for i, file in enumerate(files): # store and display all files recordings.append(file) print(f"recordings[{i}] = {file}") # - # #### __Helper functions__ # + a_ch_name = "Audio" c_ch_name = "Diode" e_ch_name = "openvibeSignal" def select_streams(data): global s_channels s_channels = {data[i]["info"]["name"][0]: i for i in range(len(data))} # Time values a = s_channels[a_ch_name] # unity audio stream channel c = s_channels[c_ch_name] # unity color stream channel e = s_channels[e_ch_name] # eeg stream channel (diode and microphone) return a, c, e # - # #### __Checking if EEG data was received before Unity data for all recordings__ print("EEG received first (✔/✗):") for file in recordings: # check all files streams, fileheader = pyxdf.load_xdf(f"data/{file}") # load a XDF file a_ch, c_ch, e_ch = select_streams(streams) # select the data stream channels a_t = streams[a_ch]["time_stamps"][0] # get the first unity timestamp e_t = streams[e_ch]["time_stamps"][0] # get the first eeg timestamp if a_t - e_t < 0: # unity received first (negative difference) print(f"✗ {file}") else: # eeg received first (positive difference) print(f"✔ {file}") # #### __Read XDF data__ file = recordings[11] # select a file print(f"File: {file}") # display the file name streams, fileheader = pyxdf.load_xdf(f"data/{file}") # load the XDF file fileheader # just a dict describing the version and format of the XDF file # #### __Automatically select the stream channels__ a_ch, c_ch, e_ch = select_streams(streams) s_channels # #### __Read EEG and Unity timestamps and sensor data__ # + slideshow={"slide_type": "slide"} u_ts = streams[a_ch]["time_stamps"] # unity timestamps e_ts = streams[e_ch]["time_stamps"] # eeg timestamps # Diode values eeg = np.transpose(streams[e_ch]["time_series"]) # select the photodiode and microphone sensor information # there's recordings with diode data on channels 65 and 66 # so we check which is the right one for this recording if max(eeg[64]) != 0.0: e_color = eeg[64] # channel 65 of the ANT amplifier else: e_color = eeg[65] # channel 66 of the ANT amplifier e_audio = eeg[69] # channel 70 of the ANT amplifier # select unity audio and background color change markers # format: [currentFrame, value, timestamp] u_color = np.transpose(streams[c_ch]["time_series"]) u_audio = np.transpose(streams[a_ch]["time_series"]) e_color = -e_color # invert diode data polarity, easier to visualize # - # #### __Preprocess data: calculate meaningful timestamps__ # + slideshow={"slide_type": "slide"} # calculate time values for unity and eeg from 0 e_time = [0] length = len(e_ts) [e_time.append(e_ts[i + 1] - e_ts[0]) for i in range(length) if i < length - 1] u_time = [0] length = len(u_ts) [u_time.append(u_ts[i + 1] - u_ts[0]) for i in range(length) if i < length - 1] # calculate the diff and shift the values left (negative) or right (positive) diff = u_ts[0] - e_ts[0] u_time = [i + diff for i in u_time] # if diff is negative unity data was received before eeg if diff < 0: print("Unity data received first ✗") if diff < -0.98: #so if the difference cannot be explained by normal EEG sampling print("Something went wrong with this recording") else: print("EEG data received first ✔") # - # #### __Data preview__ # + slideshow={"slide_type": "slide"} # interactive: widget, not interactive: inline # %matplotlib inline sns.set(rc={"figure.figsize": (14, 5)}) # set figure size sns.set_style("darkgrid") # set seaborn plotting style f_n = -0.2 # starting point (s) s_n = 0.1 # ending point (s) start_e = 1024 * f_n # eeg sampling rate = 1024 start_u = 90 * f_n # unity sampling rate = 90 five_sec = 1024 * s_n # N of eeg in 5 s f_sec = 90 * s_n # N of unity in 5 s u_height = 3500 # factor to improve unity (true/1) values visualization e_t = np.array(e_time) u_t = np.array(u_time) # select range of timestamps, diode and microphone values (eeg) e_time_selection = e_t[(e_t > f_n) & (e_t < s_n)] e_color_selection = e_color[(e_t > f_n) & (e_t < s_n)] e_audio_selection = e_audio[(e_t > f_n) & (e_t < s_n)] # select a range of timestamps, color and audio values (unity) u_time_selection = u_t[(u_t > f_n) & (u_t < s_n)] u_color_selection = u_color[(u_t > f_n) & (u_t < s_n)] u_audio_selection = u_audio[1][(u_t > f_n) & (u_t < s_n)] # plot the selected range to compare eeg vs unity values plt.plot(e_time_selection, e_color_selection * 0.05) plt.plot(e_time_selection, e_audio_selection) plt.plot(u_time_selection, u_color_selection * u_height, marker="o") plt.plot(u_time_selection, u_audio_selection * u_height, marker="x") plt.title(f"Sample: N = {five_sec}") plt.ylabel("Sensor value") plt.xlabel("Time (s)") plt.xticks(np.arange(f_n, s_n, step=0.5)) labels = ["photosensor", "microphone", "color", "audio"] plt.legend(labels, loc="upper right") # set the legend plt.show() # - # #### __Timestamps comparison (original vs computed vs file info)__ # + # store unity and eeg timestamps as pandas series # dataframe is not needed since it's 1D array eeg_t = pd.Series(streams[e_ch]["time_stamps"]) unity_t = pd.Series(streams[a_ch]["time_stamps"]) print("Original timestamps") print("===================") u_start = u_ts[0] u_end = u_ts[-1] e_start = e_ts[0] e_end = e_ts[-1] u_length = u_end - u_start e_length = e_end - e_start print(f"EEG first timestamp: {e_start}") print(f"EEG last timestamp: {e_end}") print(f"EEG length: {e_length}") print(f"EEG sample count: {len(e_ts)}") print(f"Unity first timestamp: {u_start}") print(f"Unity last timestamp: {u_end}") print(f"Unity length: {u_length}") print(f"Unity sample count: {len(u_ts)}") print(f"Start difference: {abs(u_start - e_start)}") print(f"Length difference: {abs(u_length - e_length)}") print("") print("Computed timestamps") print("====================") u_start = u_time[0] # [-1:] returns the index and the type as well but [-1:].values[0] also works u_end = u_time[-1] e_start = e_time[0] e_end = e_time[-1] u_length = u_end - u_start e_length = e_end - e_start print(f"EEG first timestamp: {e_start}") print(f"EEG last timestamp: {e_end}") print(f"EEG length: {e_length}") print(f"EEG sample count: {len(e_time)}") print(f"Unity first timestamp: {u_start}") print(f"Unity last timestamp: {u_end}") print(f"Unity length: {u_length}") print(f"Unity sample count: {len(u_time)}") print(f"Start difference: {abs(u_start - e_start)}") print(f"Length difference: {abs(u_length - e_length)}") print("") print("File info") print("========") e_info = streams[e_ch]["info"] e_footer = streams[e_ch]["footer"]["info"] u_info = streams[a_ch]["info"] u_footer = streams[a_ch]["footer"]["info"] print(f"EEG stream created at: {e_info['created_at'][0]}") print(f"Unity stream created at: {u_info['created_at'][0]}") print(f"EEG first timestamp: {e_footer['first_timestamp'][0]}") print(f"EEG last timestamp: {e_footer['last_timestamp'][0]}") print(f"EEG sample count: {e_footer['sample_count'][0]}") print(f"Unity first timestamp: {u_footer['first_timestamp'][0]}") print(f"Unity last timestamp: {u_footer['last_timestamp'][0]}") print(f"Unity sample count: {u_footer['sample_count'][0]}") # - # #### __Descriptive statistics: EEG timestamps__ # + e_time_dist = [e_ts[i + 1] - e_ts[i] for i in range(len(e_ts) - 1)] u_time_dist = [u_ts[i + 1] - u_ts[i] for i in range(len(u_ts) - 1)] e_time_dist = pd.DataFrame(np.array(e_time_dist), columns=["eeg"]) u_time_dist = pd.DataFrame(np.array(u_time_dist), columns=["unity"]) e_time_dist.describe() # - # The EEG samples look really constant over time # # #### __Descriptive statistics: Unity timestamps__ u_time_dist.describe() # It does not seem the case for the unity samples # #### __Time sampling plot comparison__ # %matplotlib inline sns.set(rc={"figure.figsize": (3, 9)}) # set figure size sns.set_style("whitegrid") # set seaborn plotting style p = sns.boxplot(x=u_time_dist, orient="v") p.set_title("Time distribution (s)") plt.show() # #### __Calculating the Latencies__ # ###### __Diode__ # + # get all the first peaks of each of the four recordings e_col_peaks = find_peaks(e_color, height=10000, distance=400) # here the len of unity is one longer than the len of u_col_peaks = find_peaks(u_color) # since we are only intersted in the position of the peaks not the height, lets only take the first column ec_peak = e_col_peaks[0] uc_peak = u_col_peaks[0] # now we have the column where the peak occurs, now we need the corresponding time stamp ec_time = [e_time[e] for e in ec_peak] uc_time = [u_time[e] for e in uc_peak] # calculate the differneces between EEG and unity c_diff = np.empty(len(uc_time)) c_diff[:] = np.nan c_diff = [] length = len(uc_time) # to make sure we do not start with j = 0 if EEG starts before Unity if np.array(uc_time)[0] > 0.25: j = 1 else: j = 0 for i in range(length): if (uc_time[i] - ec_time[j] > -0.25) and (uc_time[i] - ec_time[j] < 0): # add the difference between EEG and unity peak c_diff.append(uc_time[i] - ec_time[j]) if j < len(ec_time): j = j + 1 else: # add nan if there is no EEG peak c_diff.append(np.nan) # check the nan values (and compare them to the graph) nan_val = [] # get the indices of all nan values so we can check if there a diode is actually missing nan_val.append(np.argwhere(np.isnan(c_diff))) n = np.ravel(nan_val) # to make it look nicer # contains the untiy timestamps when the diode is missing --> to check in graph time_st = np.array(uc_time)[np.array(n)] print(time_st) # - # ###### __Speaker__ # + # get all the first peaks of each of the four recordings e_audio_peaks = find_peaks(e_audio, height=2100, distance=400) # here the len of unity is one longer than the len of u_audio_peaks = find_peaks(u_audio[1]) # since we are only intersted in the position of the peaks not the height, lets only take the first column ea_peak = e_audio_peaks[0] ua_peak = u_audio_peaks[0] # now we have the column where the peak occurs, now we need the corresponding time stamp ea_time = [e_time[e] for e in ea_peak] ua_time = [u_time[e] for e in ua_peak] # calculate the differneces between EEG and unity a_diff = [] length = len(ua_time) # to make sure we do not start with j = 0 if EEG starts before Unity if np.array(uc_time)[0] > 0.25: j = 1 else: j = 0 for i in range(length): if (ua_time[i] - ea_time[j] > -0.3) and (ua_time[i] - ea_time[j] < 0): # print(uc_time[i] - ec_time[j]) a_diff.append(ua_time[i] - ea_time[j]) if j < len(ea_time): j = j + 1 else: a_diff.append(np.nan) nan_val = [] # get the indices of all nan values so we can check if there a diode is actually missing nan_val.append(np.argwhere(np.isnan(a_diff))) n = np.ravel(nan_val) # to make it look nicer time_st = np.array(ua_time)[np.array(n)] # contains the untiy timestamps when the diode is missing --> to check in graph print(time_st) # - # #### __Data Preview__ # + # interactive: widget, not interactive: inline # %matplotlib inline sns.set(rc={"figure.figsize": (14, 5)}) # set figure size sns.set_style("darkgrid") # set seaborn plotting style f_n = 0.2 # starting point (s) s_n = 0.5 # ending point (s) start_e = 1024 * f_n # eeg sampling rate = 1024 start_u = 90 * f_n # unity sampling rate = 90 five_sec = 1024 * s_n # N of eeg in 5 s f_sec = 90 * s_n # N of unity in 5 s u_height = 3500 # factor to improve unity (true/1) values visualization e_t = np.array(e_time) u_t = np.array(u_time) # select range of timestamps, diode and microphone values (eeg) e_time_selection = e_t[(e_t > f_n) & (e_t < s_n)] e_color_selection = e_color[(e_t > f_n) & (e_t < s_n)] e_audio_selection = e_audio[(e_t > f_n) & (e_t < s_n)] # select a range of timestamps, color and audio values (unity) u_time_selection = u_t[(u_t > f_n) & (u_t < s_n)] u_color_selection = u_color[(u_t > f_n) & (u_t < s_n)] u_audio_selection = u_audio[1][(u_t > f_n) & (u_t < s_n)] # plot the selected range to compare eeg vs unity values plt.plot(e_time_selection, e_color_selection * 0.05) plt.plot(e_time_selection, e_audio_selection) plt.plot(u_time_selection, u_color_selection * u_height, marker="o") plt.plot(u_time_selection, u_audio_selection * u_height, marker="x") plt.title(f"Sample: N = {five_sec}") plt.ylabel("Sensor value") plt.xlabel("Time (s)") plt.xticks(np.arange(f_n, s_n, step=0.5)) labels = ["photosensor", "microphone", "color", "audio"] plt.legend(labels, loc="upper right") # set the legend plt.show() # - # #### __Descriptive Statistics__ # Descriptive Statistics of colour peak diff c_diff_data = pd.DataFrame(c_diff) c_diff_data.describe() # * ftest1: -0.080 till -0.073 # * ftest2: -0.078 till -0.073 # * ftest3: -0.080 till -0.074 # * test: -0.100 till -0.072 # * ftest_build1: -0.077 till -0.074 # * ftest_build2: -0.080 till -0.074 # * ftest_build3: -0.080 till -0.074 # * ftest_lsl12: - # * final test: -0.076 till -0.074 # Descriptive Statistics of audio peak diff a_diff_data = pd.DataFrame(a_diff) a_diff_data.describe()
analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- string= "a","e","i","o","u","y" def uses_all(word, string): for l in string: if l not in word: return False else: return True word=input("Enter a word: ") uses_all(word, string)
Week5/Guided projects/Project2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] button=false new_sheet=false run_control={"read_only": false} # # Information Flow # # In this chapter, we explore in depth how to track information flows in python by tainting input strings, and tracking the taint across string operations. # - # Some material on `eval` exploitation is adapted from the excellent [blog post](https://nedbatchelder.com/blog/201206/eval_really_is_dangerous.html) by <NAME>. # + [markdown] button=false new_sheet=false run_control={"read_only": false} # **Prerequisites** # # * You should have read the [chapter on coverage](Coverage.ipynb). # * Some knowledge of inheritance in Python is required. # - # We first setup our infrastructure so that we can make use of previously defined functions. import fuzzingbook_utils from ExpectError import ExpectError import inspect import enum # Say we want to implement a *calculator* service in Python. A rather easy way to do that is to rely on the `eval()` function in Python. However, unrestricted `eval()` can be used by users to execute arbitrary commands. Since we want to restrict our users to using only the *calculator* functionality, and do not want the users to trash our server, we use `eval()` with empty `locals` and `globals` (as recommended [elsewhere](https://www.programiz.com/python-programming/methods/built-in/eval)). def my_calculator(my_input): result = eval(my_input, {}, {}) print("The result of %s was %d" % (my_input, result)) # It works as expected: my_calculator('1+2') # Does it? with ExpectError(): my_calculator('__import__("os").popen("ls").read()') # As you can see from the error, `eval()` completed successfully, with the system command `ls` executing successfully. It is easy enough for the user to see the output if needed. my_calculator("1 if __builtins__['print'](__import__('os').popen('pwd').read()) else 0") # The problem is that the Python `__builtins__` is [inserted by default](https://docs.python.org/3/library/functions.html#eval) when one uses `eval()`. We can avoid this by restricting `__builtins__` in `eval` explicitly (again as recommended [elsewhere](http://lybniz2.sourceforge.net/safeeval.html)). def my_calculator(my_input): result = eval(my_input, {"__builtins__":None}, {}) print("The result of %s was %d" % (my_input, result)) # Does it help? with ExpectError(): my_calculator("1 if __builtins__['print'](__import__('os').popen('pwd').read()) else 0") # But does it actually? my_calculator("1 if [x['print'](x['__import__']('os').popen('pwd').read()) for x in ([x for x in (1).__class__.__base__.__subclasses__() if x.__name__ == 'Sized'][0].__len__.__globals__['__builtins__'],)] else 0") # The problem here is that when the user has a way to inject **uninterpreted strings** that can reach a dangerous routine such as `eval()` or an `exec()`, it makes it possible for them to inject dangerous code. What we need is a way to restrict the ability of uninterpreted input string fragments from reaching dangerous portions of code. # + [markdown] button=false new_sheet=true run_control={"read_only": false} # ## A Simple Taint Tracker # - # For capturing information flows we need a new string class. The idea is to use the new tainted string class `tstr` as a wrapper on the original `str` class. However, `str` is an *immutable* class. Hence, it does not call its `__init__` method after being constructed. This means that any subclasses of `str` also will not get the `__init__` called. If we want to get our initialization routine called, we need to [hook into `__new__`](https://docs.python.org/3/reference/datamodel.html#basic-customization) and return an instance of our own class. # # We need to write the `__new__()` method because we want to track the parent object responsible for the taint during our initialization `tstr.__init__()`. Hence, we define a class `tstr_` that subclasses `str`, and enables its subclasses to initialize using `__init__()`. class tstr_(str): def __new__(cls, value, *args, **kw): return super(tstr_, cls).__new__(cls, value) # There are various levels of taint tracking that one can perform. The simplest is to track that a string fragment originated in an untrusted environment, and has not undergone a taint removal process. For this, we simply need to wrap the original string in the untrusted environment with `tstr`, and produce `tstr` instances on each operation that results in another string frament. Distinguishing various untrusted sources may be accomplished by tainting each instances as separate instances (called *colors* in dynamic taint research). You will see an instance of this technique in the chapter on [Grammar Mining](GrammarMining.ipynb). # # In this chapter, we carry *character level* taints. That is, given a fragment that resulted from a portion of the original tainted string, one will be able to tell which portion of the input string the fragment was taken from. In essence, each input character index from a tainted source gets its own color. # # More complex tainting such as *bitmap taints* are possible where a single character may result from multiple tainted character indexes (such as *checksum* operations on strings). We do not consider these in this chapter. # We now define our initialization code in `__init__()`. # The variable `taint` contains non-overlapping taints mapped to the original string. The variable `parent` holds a reference to the `tstr` instance from which this instance was derived. class tstr(tstr_): def __init__(self, value, taint=None, parent=None, **kwargs): self.parent = parent l = len(self) if not taint: taint = 0 self.taint = list(range(taint, taint + l)) if isinstance(taint, int) else taint assert len(self.taint) == l def __repr__(self): return str.__repr__(self) def __str__(self): return str.__str__(self) # For example, if we wrap `hello` in `tstr`, then we should be able to access its taint in indices `0..4` t = tstr('hello') t.taint # We can also specify the starting taint as below -- `6..10` t = tstr('world', taint = 6) t.taint # `repr()` and `str()` returns an untainted `str` instance. type(str(t)) # By default, when we wrap a string, it is tainted. Hence we also need a way to `untaint` the string. One way is to simply return a `str` instance as above. However, one may sometimes wish to remove taint from an existing instance. This is accomplished with `untaint()`. During `untaint()`, we simply set the taint indexes to `-1`. This method comes with a pair method `has_taint()` which checks whether a `tstr` instance is currently tainted. class tstr(tstr): def untaint(self): self.taint = [-1] * len(self) return self def has_taint(self): return any(True for i in self.taint if i >= 0) t = tstr('hello world') t.untaint() t.has_taint() # While the basic tainted string creation works, we have not completed the taint transition. For example, getting a substring does not transfer taint from the original string. with ExpectError(): t = tstr('hello world') t[0:5].has_taint() # In Python, the substring as shown above is implemented using `slice`. We implement this next. # ### Create # We need to create new substrings that are wrapped in `tstr`. However, we also want to allow our subclasses to create their own instances. Hence we provide a `create()` method that produces a new `tstr` instance. class tstr(tstr): def create(self, res, taint): return tstr(res, taint, self) hello = tstr('hello') world = hello.create('world', 6) world.parent.taint, world.taint # ### Index # In Python, indexing is provided through `__getitem__()`. Indexing on positive integers is simple enough. However, it has two additional wrinkles. The first is that, if the index is negative, that many characters are counted from the end of the string which lies just after the last character. That is, the last character has a negative index `-1` class tstr(tstr): def __getitem__(self, key): res = super().__getitem__(key) if type(key) == int: key = len(self) + key if key < 0 else key return self.create(res, [self.taint[key]]) elif type(key) == slice: return self.create(res, self.taint[key]) else: assert False hello = tstr('hello') hello[0], hello[-1] # The other wrinkle is that `__getitem__()` can accept a slice. We discuss this next. # ### Slice # The Python `slice` operator `[n:m]` relies on the object being an `iterator`. Hence, we define the `__iter__()` method, which returns a custom `iterator`. class tstr(tstr): def __iter__(self): return tstr_iterator(self) # #### The iterator class # The `__iter__()` method requires a supporting `iterator` object. The `iterator` is used to save the state of the current iteration, which it does by keeping a reference to the original `tstr`, and the current index of iteration `_str_idx`. class tstr_iterator(): def __init__(self, tstr): self._tstr = tstr self._str_idx = 0 def __next__(self): if self._str_idx == len(self._tstr): raise StopIteration # calls tstr getitem should be tstr c = self._tstr[self._str_idx] assert type(c) is tstr self._str_idx += 1 return c # Bringing all these together: t = tstr('hello world') t[0:5].has_taint() # ### Concatenation # If two tainted strings are concatenated together, it may be desirable to transer the taints from each to the corresponding portion of the resulting string. The concatenation of strings is accomplished by overriding `__add__()`. class tstr(tstr): def __add__(self, other): if type(other) is tstr: return self.create(str.__add__(self, other), (self.taint + other.taint)) else: return self.create(str.__add__(self, other), (self.taint + [-1 for i in other])) # Testing concatenations between two `tstr` instances: my_str1 = tstr("hello") my_str2 = tstr("world", taint=6) v = my_str1 + my_str2 print(v.taint) # What if a `tstr` is concatenated with a `str`? my_str3 = "bye" w = my_str1 + my_str3 + my_str2 print(w.taint) # One wrinkle here is that when adding a `tstr` and a `str`, the user may place the `str` first, in which case, the `__add__()` method will be called on the `str` instance. Not on the `tstr` instance. However, Python provides a solution. If one defines `__radd__()` on the `tstr` instance, that method will be called rather than `str.__add__()` class tstr(tstr): def __radd__(self, other): taint = other.taint if type(other) is tstr else [-1 for i in other] return self.create(str.__add__(other, self), (taint + self.taint)) # We test it out: my_str1 = "hello" my_str2 = tstr("world") v = my_str1 + my_str2 v.taint # These methods: `slicing` and `concatenation` is sufficient to implement other string methods that result in a string, and does not change the character underneath (i.e no case change). Hence, we look at a helper method next. # ### Extract tainted string. # Given a specific input index, the method `x()` extracts the corresponding tainted portion from a `tstr`. As a convenience it supports `slices` along with `ints`. class tstr(tstr): class TaintException(Exception): pass def x(self, i=0): if not self.taint: raise taint.TaintException('Invalid request idx') if isinstance(i, int): return [self[p] for p in [k for k,j in enumerate(self.taint) if j == i]] elif isinstance(i, slice): r = range(i.start or 0, i.stop or len(self), i.step or 1) return [self[p] for p in [k for k,j in enumerate(self.taint) if j in r]] my_str = tstr('abcdefghijkl', taint=100) my_str.x(101) my_str.x(slice(101,105)) # ### Replace # The `replace()` method replaces a portion of the string with another. class tstr(tstr): def replace(self, a, b, n=None): old_taint = self.taint b_taint = b.taint if type(b) is tstr else [-1] * len(b) mystr = str(self) i = 0 while True: if n and i >= n: break idx = mystr.find(a) if idx == -1: break last = idx + len(a) mystr = mystr.replace(a, b, 1) partA, partB = old_taint[0:idx], old_taint[last:] old_taint = partA + b_taint + partB i += 1 return self.create(mystr, old_taint) my_str = tstr("aa cde aa") res = my_str.replace('aa', 'bb') res, res.taint # ### Split # We essentially have to re-implement split operations, and split by space is slightly different from other splits. class tstr(tstr): def _split_helper(self, sep, splitted): result_list = [] last_idx = 0 first_idx = 0 sep_len = len(sep) for s in splitted: last_idx = first_idx + len(s) item = self[first_idx:last_idx] result_list.append(item) first_idx = last_idx + sep_len return result_list def _split_space(self, splitted): result_list = [] last_idx = 0 first_idx = 0 sep_len = 0 for s in splitted: last_idx = first_idx + len(s) item = self[first_idx:last_idx] result_list.append(item) v = str(self[last_idx:]) sep_len = len(v) - len(v.lstrip(' ')) first_idx = last_idx + sep_len return result_list def rsplit(self, sep=None, maxsplit=-1): splitted = super().rsplit(sep, maxsplit) if not sep: return self._split_space(splitted) return self._split_helper(sep, splitted) def split(self, sep=None, maxsplit=-1): splitted = super().split(sep, maxsplit) if not sep: return self._split_space(splitted) return self._split_helper(sep, splitted) # + my_str = tstr('ab cdef ghij kl') ab, cdef, ghij, kl = my_str.rsplit(sep=' ') print(ab.taint, cdef.taint, ghij.taint, kl.taint) my_str = tstr('ab cdef ghij kl', taint=100) ab, cdef, ghij, kl = my_str.rsplit() print(ab.taint, cdef.taint, ghij.taint, kl.taint) # + my_str = tstr('ab cdef ghij kl', taint=list(range(0, 15))) ab, cdef, ghij, kl = my_str.split(sep=' ') print(ab.taint, cdef.taint, kl.taint) my_str = tstr('ab cdef ghij kl', taint=list(range(0, 20))) ab, cdef, ghij, kl = my_str.split() print(ab.taint, cdef.taint, kl.taint) # - # ### Strip class tstr(tstr): def strip(self, cl=None): return self.lstrip(cl).rstrip(cl) def lstrip(self, cl=None): res = super().lstrip(cl) i = self.find(res) return self[i:] def rstrip(self, cl=None): res = super().rstrip(cl) return self[0:len(res)] my_str1 = tstr(" abc ") v = my_str1.strip() v, v.taint my_str1 = tstr(" abc ") v = my_str1.lstrip() v, v.taint my_str1 = tstr(" abc ") v = my_str1.rstrip() v, v.taint # ### Expand Tabs class tstr(tstr): def expandtabs(self, n=8): parts = self.split('\t') res = super().expandtabs(n) all_parts = [] for i, p in enumerate(parts): all_parts.extend(p.taint) if i < len(parts) - 1: l = len(all_parts) % n all_parts.extend([p.taint[-1]] * l) return self.create(res, all_parts) my_tstr = tstr("ab\tcd") my_str = str("ab\tcd") v1 = my_str.expandtabs(4) v2 = my_tstr.expandtabs(4) print(len(v1), repr(my_tstr), repr(v2), v2.taint) class tstr(tstr): def join(self, iterable): mystr = '' mytaint = [] sep_taint = self.taint lst = list(iterable) for i, s in enumerate(lst): staint = s.taint if type(s) is tstr else [-1] * len(s) mytaint.extend(staint) mystr += str(s) if i < len(lst)-1: mytaint.extend(sep_taint) mystr += str(self) res = super().join(iterable) assert len(res) == len(mystr) return self.create(res, mytaint) my_str = tstr("ab cd", taint=100) (v1, v2), v3 = my_str.split(), 'ef' print(v1.taint, v2.taint) v4 = tstr('').join([v2,v3,v1]) print(v4, v4.taint) my_str = tstr("ab cd", taint=100) (v1, v2), v3 = my_str.split(), 'ef' print(v1.taint, v2.taint) v4 = tstr(',').join([v2,v3,v1]) print(v4, v4.taint) # ### Partitions class tstr(tstr): def partition(self, sep): partA, sep, partB = super().partition(sep) return ( self.create(partA, self.taint[0:len(partA)]), self.create(sep, self.taint[len(partA): len(partA) + len(sep)]), self.create(partB, self.taint[len(partA) + len(sep):])) def rpartition(self, sep): partA, sep, partB = super().rpartition(sep) return (self.create(partA, self.taint[0:len(partA)]), self.create(sep, self.taint[len(partA): len(partA) + len(sep)]), self.create(partB, self.taint[len(partA) + len(sep):])) # ### Justify class tstr(tstr): def ljust(self, width, fillchar=' '): res = super().ljust(width, fillchar) initial = len(res) - len(self) if type(fillchar) is tstr: t = fillchar.x() else: t = -1 return self.create(res, [t] * initial + self.taint) def rjust(self, width, fillchar=' '): res = super().rjust(width, fillchar) final = len(res) - len(self) if type(fillchar) is tstr: t = fillchar.x() else: t = -1 return self.create(res, self.taint + [t] * final) # ### String methods that do not change taint # + def make_str_wrapper_eq_taint(fun): def proxy(*args, **kwargs): res = fun(*args, **kwargs) return args[0].create(res, args[0].taint) return proxy for name, fn in inspect.getmembers(str, callable): if name in ['swapcase', 'upper', 'lower', 'capitalize', 'title']: setattr(tstr, name, make_str_wrapper_eq_taint(fn)) # - a = tstr('aa', taint=100).upper() a, a.taint # ### General wrappers # These are not strictly needed for operation, but can be useful for tracing # + def make_str_wrapper(fun): def proxy(*args, **kwargs): res = fun(*args, **kwargs) return res return proxy import types tstr_members = [name for name, fn in inspect.getmembers(tstr,callable) if type(fn) == types.FunctionType and fn.__qualname__.startswith('tstr')] for name, fn in inspect.getmembers(str, callable): if name not in set(['__class__', '__new__', '__str__', '__init__', '__repr__','__getattribute__']) | set(tstr_members): setattr(tstr, name, make_str_wrapper(fn)) # - # ### Methods yet to be translated # These methods generate strings from other strings. However, we do not have the right implementations for any of these. Hence these are marked as dangerous until we can generate the right translations. # + code_folding=[] def make_str_abort_wrapper(fun): def proxy(*args, **kwargs): raise TaintException('%s Not implemented in TSTR' % fun.__name__) return proxy for name, fn in inspect.getmembers(str, callable): if name in ['__format__', '__rmod__', '__mod__', 'format_map', 'format', '__mul__','__rmul__','center','zfill', 'decode', 'encode', 'splitlines']: setattr(tstr, name, make_str_abort_wrapper(fn)) # - # ## EOF Tracker # Sometimes we want to know where an empty string came from. That is, if an empty string is the result of operations on a tainted string, we want to know the best guess as to what the taint index of the preceding character is. # ### Slice # # For detecting EOF, we need to carry the cursor. The main idea is the cursor indicates the taint of the character in front of it. class eoftstr(tstr): def create(self, res, taint): return eoftstr(res, taint, self) def __getitem__(self, key): def get_interval(key): return ((0 if key.start is None else key.start), (len(res) if key.stop is None else key.stop)) res = super().__getitem__(key) if type(key) == int: key = len(self) + key if key < 0 else key return self.create(res, [self.taint[key]]) elif type(key) == slice: if res: return self.create(res, self.taint[key]) # Result is an empty string t = self.create(res, self.taint[key]) key_start, key_stop = get_interval(key) cursor = 0 if key_start < len(self): assert key_stop < len(self) cursor = self.taint[key_stop] else: if len(self) == 0: # if the original string was empty, we assume that any # empty string produced from it should carry the same taint. cursor = self.x() else: # Key start was not in the string. We can reply only # if the key start was just outside the string, in # which case, we guess. if key_start != len(self): raise taint.TaintException('Can\'t guess the taint') cursor = self.taint[len(self) - 1] + 1 # _tcursor gets created only for empty strings. t._tcursor = cursor return t else: assert False # We add an additional method `t()` that takes in a taint index, and returns the taint at that index. If it is an empty string, it gives you a possible location of that empty string. class eoftstr(eoftstr): def t(self, i=0): if self.taint: return self.taint[i] else: if i != 0: raise taint.TaintException('Invalid request idx') # self._tcursor gets created only for empty strings. # use the exception to determine which ones need it. return self._tcursor t = eoftstr('hello world') print(repr(t[11:])) print(t[11:].taint, t[11:].t()) # ## A Comparison Tracker # Sometimes, we also want to know what each character in an input was compared to. # ### Operators # + class Op(enum.Enum): LT = 0 LE = enum.auto() EQ = enum.auto() NE = enum.auto() GT = enum.auto() GE = enum.auto() IN = enum.auto() NOT_IN = enum.auto() IS = enum.auto() IS_NOT = enum.auto() FIND_STR = enum.auto() COMPARE_OPERATORS = { Op.EQ: lambda x, y: x == y, Op.NE: lambda x, y: x != y, Op.IN: lambda x, y: x in y, Op.NOT_IN: lambda x, y: x not in y, Op.FIND_STR: lambda x, y: x.find(y) } Comparisons = [] # - # ### Instructions class Instr: def __init__(self, o, a, b): self.opA = a self.opB = b self.op = o def o(self): if self.op == Op.EQ: return 'eq' elif self.op == Op.NE: return 'ne' else: return '?' def opS(self): if not self.opA.has_taint() and type(self.opB) is tstr: return (self.opB, self.opA) else: return (self.opA, self.opB) @property def op_A(self): return self.opS()[0] @property def op_B(self): return self.opS()[1] def __repr__(self): return "%s,%s,%s" % (self.o(), repr(self.opA), repr(self.opB)) def __str__(self): if self.op == Op.EQ: if str(self.opA) == str(self.opB): return "%s = %s" % (repr(self.opA), repr(self.opB)) else: return "%s != %s" % (repr(self.opA), repr(self.opB)) elif self.op == Op.NE: if str(self.opA) == str(self.opB): return "%s = %s" % (repr(self.opA), repr(self.opB)) else: return "%s != %s" % (repr(self.opA), repr(self.opB)) elif self.op == Op.IN: if str(self.opA) in str(self.opB): return "%s in %s" % (repr(self.opA), repr(self.opB)) else: return "%s not in %s" % (repr(self.opA), repr(self.opB)) elif self.op == Op.NOT_IN: if str(self.opA) in str(self.opB): return "%s in %s" % (repr(self.opA), repr(self.opB)) else: return "%s not in %s" % (repr(self.opA), repr(self.opB)) else: assert False # ### Equivalance class ctstr(eoftstr): def create(self, res, taint): o = ctstr(res, taint, self) o.comparisons = self.comparisons return o def with_comparisons(self, comparisons): self.comparisons = comparisons return self class ctstr(ctstr): def __eq__(self, other): if len(self) == 0 and len(other) == 0: self.comparisons.append(Instr(Op.EQ, self, other)) return True elif len(self) == 0: self.comparisons.append(Instr(Op.EQ, self, other[0])) return False elif len(other) == 0: self.comparisons.append(Instr(Op.EQ, self[0], other)) return False elif len(self) == 1 and len(other) == 1: self.comparisons.append(Instr(Op.EQ, self, other)) return super().__eq__(other) else: if not self[0] == other[0]: return False return self[1:] == other[1:] t = ctstr('hello world', taint=100).with_comparisons([]) print(t.comparisons) t == 'hello' for c in t.comparisons: print(repr(c)) class ctstr(ctstr): def __ne__(self, other): return not self.__eq__(other) t = ctstr('hello', taint=100).with_comparisons([]) print(t.comparisons) t != 'bye' for c in t.comparisons: print(repr(c)) class ctstr(ctstr): def __contains__(self, other): self.comparisons.append(Instr(Op.IN, self, other)) return super().__contains__(other) class ctstr(ctstr): def find(self, sub, start=None, end=None): if start == None: start_val = 0 if end == None: end_val = len(self) self.comparisons.append(Instr(Op.IN, self[start_val:end_val], sub)) return super().find(sub, start, end) # ### In # This requires some surgery on the module. # + def substrings(s, l): for i in range(len(s)-(l-1)): yield s[i:i+l] class ctstr(ctstr): def in_(self, s): # c in '0123456789' # to # __fn(c).in_('0123456789') # ensure that all characters are compared result = [self == c for c in substrings(s, len(self))] return any(result) # + def my_fn(c, s): if (c in s): return c else: return s class __fn: def __init__(self, s): self.s = s def in_(self, v): if isinstance(self.s, ctstr): return self.s.in_(v) else: return self.s in v # - import ast import inspect # #### Get the source code # from fuzzingbook_utils import unparse pass # + class InRewrite(ast.NodeTransformer): def visit_Compare(self, tree_node): left = tree_node.left if not tree_node.ops or not isinstance(tree_node.ops[0], ast.In): return tree_node mod_val = ast.Call( func=ast.Attribute( value=ast.Call( func=ast.Name(id='__fn', ctx=ast.Load()), args=[left], keywords=[]), attr='in_', ctx=left.ctx), args=tree_node.comparators, keywords=[]) return mod_val def rewrite_in(fn): fn_ast = ast.parse(inspect.getsource(fn)) return compile(ast.fix_missing_locations(InRewrite().visit(fn_ast)), filename='', mode='exec') # - my_new_fn = rewrite_in(my_fn) exec(my_new_fn) abcd = 'ABCD' c = ctstr('C').with_comparisons([]) c.comparisons my_fn(c,abcd) c.comparisons # + [markdown] button=false new_sheet=true run_control={"read_only": false} # ## Lessons Learned # # * One can track the information flow form input to the internals of a system. # + [markdown] button=false new_sheet=false run_control={"read_only": false} # ## Next Steps # # _Link to subsequent chapters (notebooks) here:_ # - # ## Background # # \cite{Lin2008} # + [markdown] button=false new_sheet=true run_control={"read_only": false} # ## Exercises # # _Close the chapter with a few exercises such that people have things to do. To make the solutions hidden (to be revealed by the user), have them start with_ # # ```markdown # **Solution.** # ``` # # _Your solution can then extend up to the next title (i.e., any markdown cell starting with `#`)._ # # _Running `make metadata` will automatically add metadata to the cells such that the cells will be hidden by default, and can be uncovered by the user. The button will be introduced above the solution._ # + [markdown] button=false new_sheet=false run_control={"read_only": false} # ### Exercise 1: _Title_ # # _Text of the exercise_ # + cell_style="center" # Some code that is part of the exercise pass # + [markdown] solution2="hidden" solution2_first=true # _Some more text for the exercise_ # + [markdown] slideshow={"slide_type": "skip"} solution2="hidden" # **Solution.** _Some text for the solution_ # + cell_style="split" slideshow={"slide_type": "skip"} solution2="hidden" # Some code for the solution 2 + 2 # + [markdown] slideshow={"slide_type": "skip"} solution2="hidden" # _Some more text for the solution_ # + [markdown] button=false new_sheet=false run_control={"read_only": false} solution="hidden" solution2="hidden" solution2_first=true solution_first=true # ### Exercise 2: _Title_ # # _Text of the exercise_ # + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "skip"} solution="hidden" solution2="hidden" # **Solution.** _Solution for the exercise_
notebooks/InformationFlow.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Boruta ## Import the required libraries import pandas as pd import numpy as np import imblearn from imblearn.pipeline import make_pipeline as make_pipeline_imbfinal from imblearn.over_sampling import SMOTE from imblearn.metrics import classification_report_imbalanced from sklearn.metrics import precision_score, recall_score, f1_score, roc_auc_score, accuracy_score, classification_report from sklearn.cross_validation import train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import BernoulliNB #from sklearn import svm from sklearn.metrics import * import pickle from sklearn.preprocessing import StandardScaler scaler = StandardScaler() df = pd.read_csv('ckd.csv') df_train,df_test = train_test_split(df,train_size=0.7,random_state=42) drop_col_list=['RBC_normal','PCC_present','BA_present','CAD_yes','Ane_yes' , 'PC_normal' , 'Appet_good' , 'age' , 'pot' , 'wc' , 'PE_yes' , 'bp' , 'su' ] x_train=df_train.iloc[:,:24] x_train.drop(drop_col_list,axis=1,inplace=True) y_train=df_train['Classification_ckd'] scaler.fit(x_train) x_train_sc=scaler.transform(x_train) x_test=df_test.iloc[:,:24] x_test.drop(drop_col_list,axis=1,inplace=True) y_test=df_test['Classification_ckd'] scaler.fit(x_test) x_test_sc=scaler.transform(x_test) x_train_sc x_test df_train,df_test = train_test_split(df,train_size=0.7,random_state=42) x_train=df_train.iloc[:,:24] y_train=df_train['Classification_ckd'] scaler.fit(x_train) x_train_sc=scaler.transform(x_train) x_test=df_test.iloc[:,:24] y_test=df_test['Classification_ckd'] scaler.fit(x_test) x_test_sc=scaler.transform(x_test) # **Implementing Boruta** # + import pandas as pd #from sklearn.ensemble import RandomForestClassifier from boruta import BorutaPy # load X and y # NOTE BorutaPy accepts numpy arrays only, hence the .values attribute X = x_train_sc y = y_train # define random forest classifier, with utilising all cores and # sampling in proportion to y labels rfc = RandomForestClassifier(n_estimators=50,random_state=0) # define Boruta feature selection method feat_selector_rf = BorutaPy(rfc, n_estimators='auto', verbose=2) # find all relevant features feat_selector_rf.fit(X, y) # - feat_selector_rf.support_ # Retriving important features after executing Boruta. x_train.columns[feat_selector_rf.support_]
Kidney Disease/Boruta.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import datetime as dt import numpy as np import pandas as pd import warnings warnings.filterwarnings('ignore') from statsmodels.tsa.ar_model import AR % matplotlib inline import matplotlib matplotlib.rcParams['figure.figsize'] = 12, 4 matplotlib.rcParams['figure.dpi'] = 300 import matplotlib.pyplot as plt plt.style.use('bmh') from matplotlib.dates import DateFormatter from matplotlib.ticker import FuncFormatter # - data = pd.read_csv('post2009_naics.csv', parse_dates=['CASE_SUBMITTED']) data = data[(data['CASE_SUBMITTED'] >= '2013-06-01') & (data['CASE_SUBMITTED'] < '2017-06-01')] def autoplot_test(data, title='Forecast of Demand for H-1B Visas', ylim=(-0.1e5, 1.1e5)): """ pass """ # Sum Total Workers for Each Date data = data.groupby('CASE_SUBMITTED').sum() # Convert Index to DatetimeIndex data.index = pd.to_datetime(data.index) # Group Data by Semi-Month Start Frequency (SMS) data = data.resample('W-WED', closed='left', label='left').sum() # Remove NaNs data['TOTAL_WORKERS'] = np.nan_to_num(data) # Model END = data.shape[0] - 1 START = END - 53 model = AR(data['TOTAL_WORKERS']).fit(maxlag=52) y_hat = model.predict(START, END, dynamic=True) y_hat.loc[data.index[START]] = data.iloc[START][0] y_true = data.iloc[START:].values y_pred = np.array(y_hat).reshape(len(y_hat), 1) START = '2016-05-25' END = '2017-05-31' y_hat.index = pd.date_range(START, END, freq='W-WED') y_hat.index = [dt.datetime.strftime(date, '%Y-%m-%d') for date in y_hat.index] y_hat.index = [dt.datetime.strptime(date, '%Y-%m-%d').date() for date in y_hat.index] # ¯\_(ツ)_/¯ Code Needs This to Work data.index = [dt.datetime.strftime(date, '%Y-%m-%d') for date in data.index] data.index = [dt.datetime.strptime(date, '%Y-%m-%d').date() for date in data.index] # Visualization ax = plt.gca() ax.plot( data['TOTAL_WORKERS'], label='y_true', alpha=0.75, marker='.', mfc='red' ) ax.plot( y_hat, label='y_pred', alpha=0.75, marker='.', mfc='red' ) ax.set_xlim(dt.date(data.index[0].year, 1, 1), dt.date(y_hat.index[-1].year + 1, 1, 1)) ax.xaxis.set_major_formatter(DateFormatter('%b %Y')) ax.set_ylim(ylim[0], ylim[1]) ax.get_yaxis().set_major_formatter(FuncFormatter(lambda x, p: format(int(x), ','))) ax.set_title(title) ax.legend(loc='best'); # Metrics y_true = y_true[1:] y_pred = y_pred[1:] # Error E = y_pred - y_true # Root-Mean-Square Error SE = np.square(E) MSE = np.mean(SE) RMSE = np.sqrt(MSE) # Mean Absolute Percentage Error AE = np.abs(E) APE = AE / y_true MAPE = np.mean(APE) * 100 # Standard Absolute Percentage Error SAPE = np.std(APE) * 100 # 75% Confidence Interval CI_75 = (MAPE - 1.15 * SAPE, MAPE + 1.15 * SAPE) # Coefficient of Determination y_bar = np.mean(y_true) SST = np.mean(np.square(y_true - y_bar)) SSR = np.mean(np.square(E)) R2 = 1 - SSR / SST print('RMSE: {:>5.0f} — Root-Mean-Square Error'.format(RMSE)) print('MAPE: {:>5.1f}% — Mean Absolute Percentage Error'.format(MAPE)) print('SE: {:>5.1f}% — Standard (Absolute Percentage) Error'.format(SAPE)) print() print('75% Confidence Interval') print('Upper Limit: {:>5.1f}%'.format(min(CI_75[1], 100))) print('Lower Limit: {:>5.1f}%'.format(max(CI_75[0], 0.0))) print() print('Coefficient of Determination') print('R2: {:.3f}'.format(R2)) autoplot_test(data)
src/model/modified_ar.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="Od4azuOJmXDi" # # **Obtaining Regional Multi-Year SST Maps from GODAS** # # by Ding # + [markdown] id="krOvAdE2r5Pb" # **Section 1 Data Analysis and Visualization** # + [markdown] id="S4kqhYmdWgj3" # We use the following dataset to forecast marine heatwaves. # # [GODAS](https://www.cpc.ncep.noaa.gov/products/GODAS/) # + [markdown] id="Pn4DkN0tmeCA" # Connect Google Drive with Colab. # + colab={"base_uri": "https://localhost:8080/"} id="3nNHvMgvmSjV" outputId="3b82b7b7-2930-4169-8d74-544e8f2e8d60" from google.colab import drive drive.mount('/gdrive', force_remount=True) # + [markdown] id="fFmcIuvkm964" # Import the data analysis libraries. # + colab={"base_uri": "https://localhost:8080/"} id="a6-800atm9iQ" outputId="1912ca05-9377-42d3-d9e0-04fd2ad039e0" # !pip install netcdf4 # !pip install h5netcdf # + id="PDKSErKKnGOu" from netCDF4 import Dataset import matplotlib.pyplot as plt import xarray as xr import glob import h5netcdf.legacyapi as netCDF4 # + [markdown] id="qQ8VTmkLnbLD" # Read one GODAS data file (the global marine potential temperatures in 1980) as an xarray dataset type. # + id="m1F85dtBnW0V" pottmp_1980 = xr.open_dataset('/gdrive/My Drive/GODAS_pottmp/pottmp.1980.nc', decode_times=False) # + [markdown] id="ex8PchN0niOl" # Have a look at the imported data. # + colab={"base_uri": "https://localhost:8080/", "height": 308} id="TMa_SX5Dnm4o" outputId="5daaa2aa-bec3-4c31-a718-08d394456a67" pottmp_1980 # + [markdown] id="GkdNFWc8n687" # xarray provides a convinient way to read all files in one directory and combines them into one xarray dataset. # # Read all data files (the global marine potential temperatures since 1980) as an integrated xarray dataset type. # + colab={"base_uri": "https://localhost:8080/"} id="rENTTqqRofQn" outputId="d3fb8653-9a41-42da-cb96-3315eb5c5298" pottmp_all = xr.open_mfdataset('/gdrive/My Drive/GODAS_pottmp/*.nc', decode_times=False) # + [markdown] id="wQT7aRWLohlZ" # Have a look at it! # # "time: 492" means 492 months. # + colab={"base_uri": "https://localhost:8080/", "height": 308} id="5qh1Lrqoo2hA" outputId="dd06c3a0-35fb-4534-c04d-df013c732c82" pottmp_all # + [markdown] id="OfCT_azbo4W5" # Visualize the global potential temperatures at the level 5 and the time 73048. # + colab={"base_uri": "https://localhost:8080/", "height": 314} id="QqNigixVpNJ0" outputId="9803ea56-9bef-481f-c97c-dfce8da3573f" pottmp_all.pottmp.isel(level=0,time=240).plot() # + [markdown] id="gzVamIURpPeV" # Visualize the potential temperature time series at the lattitude -71.2, the longitude 170.5 and the level 5. # + colab={"base_uri": "https://localhost:8080/", "height": 325} id="qneSNGi_pUPr" outputId="8d55a1a1-f4ae-4b36-899a-0ef865265af3" pottmp_all.pottmp.isel(lat=10,lon=170,level=0).plot(marker="o") # + [markdown] id="Z9-Ee2J1pjJs" # We select a small area to create a few baseline models. # # Extract the ocean region next to southeastern Australia. # # $lat \in (-35, -45)$ # # $lon \in (145, 155)$ # # We also denote the potential temperature at the level 5 as the sea surface temperature (SST). # # $level = 5$ # + id="cuffm0p-qXVa" pottmp_seau = pottmp_all.where(pottmp_all.lat < -35, drop=True) pottmp_seau = pottmp_seau.where(pottmp_seau.lat > -45, drop=True) pottmp_seau = pottmp_seau.where(pottmp_seau.lon < 155, drop=True) pottmp_seau = pottmp_seau.where(pottmp_seau.lon > 145, drop=True) pottmp_seau = pottmp_seau.where(pottmp_seau.level == 5.0, drop=True) # + colab={"base_uri": "https://localhost:8080/", "height": 308} id="RUWQeqXkqZ8_" outputId="3393ef1a-440f-4031-8801-47c2b21bd52b" pottmp_seau # + [markdown] id="TfX5akQ4qngU" # Visualize the SST in this small region at the time 73048. # + colab={"base_uri": "https://localhost:8080/", "height": 314} id="BnER2VeQrVnS" outputId="281a587f-609e-49d0-cba9-61fc6977ca86" pottmp_seau.pottmp.isel(level=0,time=240).plot() # + [markdown] id="Vaps_gN7rXqt" # There are 492 time points. We select the prvious 394 (80%) for training and the latter 98 (20%) for validation. # + id="TpVLpGRFrXwQ" pottmp_seau_train = pottmp_seau.where(pottmp_seau.time[0:394], drop=True) pottmp_seau_val = pottmp_seau.where(pottmp_seau.time[394:], drop=True) # + [markdown] id="KPvb4RQar0jg" # **Section 2 Data Preprocessing** # + id="bNR9Me98sh-z" import numpy as np # + [markdown] id="ciBQMqK4sHG7" # Based on the analysis, create (empty) numpy arrays with the shapes for modeling. # + id="F2WMgx23r__q" train_set = np.zeros((394,1,30,10)) val_set = np.zeros((98,1,30,10)) # + [markdown] id="Hw_SbFeIsmsr" # Load the data from the xarray type to the numpy array type. # + id="Qh2bdK38suGS" train_set[:,:,:,:] = pottmp_seau_train.variables['pottmp'][0:394,:,:,:] val_set[:,:,:,:] = pottmp_seau_val.variables['pottmp'][0:98,:,:,:] # + [markdown] id="Gpfc724ftRLq" # Look at their shapes, which is important for machine learning models. # + colab={"base_uri": "https://localhost:8080/"} id="IWmGMhvutkZv" outputId="3806946f-f0b5-4672-8964-e576f317331d" print(train_set.shape) print(val_set.shape) # + [markdown] id="lsXnirwktr0f" # For convenience, convert "nans" to zeroes. # + id="zWp_InWZtpNS" train_set = np.where(np.isnan(train_set), 0, train_set) val_set = np.where(np.isnan(val_set), 0, val_set) # + [markdown] id="5ty1S4Cft4fz" # Remove the unnecessary dimension of levels, which contains only one value 5. # + id="CC8W9aR5uFf7" train_set = train_set[:,0,:,:] val_set = val_set[:,0,:,:] # + colab={"base_uri": "https://localhost:8080/"} id="G79JNP3TuLYX" outputId="f2084b5f-622e-4175-bb11-fc0cf5a65492" print(train_set.shape) print(val_set.shape) # + [markdown] id="cHsbS_MguUkv" # Check the matrix at one timepoint. # # The temperature unit is kelvin. # + colab={"base_uri": "https://localhost:8080/"} id="3Y0JYx86uXug" outputId="07129d63-1d8f-4d74-977b-e8e86ae282ae" val_set[1] # + [markdown] id="Zeelgw3julAB" # Exclude the zeros and check the mean. # + colab={"base_uri": "https://localhost:8080/"} id="krL8NmUCuwb-" outputId="9cbbc040-e2e4-4bae-81d1-fe3a45fe107b" np.nanmean(np.where(val_set[1]!=0, val_set[1], np.nan)) # + [markdown] id="TKPLKnN5uPlL" # We want to use the SST maps in three consecutive months to predict the area mean SST (one value) in the fourth month. # # Convert the sets into the following format. # # From [Month 1], [Month 2], [Month 3], ..., to [[Month 1], [Month 2], [Month 3]], [[Month 2], [Month 3], [Month 4]], [[Month 3], [Month 4], [Month 5]], ... # # Create the label sets in the following format accordingly. # # Month 4 Mean, Month 5 Mean, Month 6 Mean, ... # + id="PVZ6VwpOwenx" train_set_3_list = [] train_label_list = [] val_set_3_list = [] val_label_list = [] for i in range(len(train_set) - 3): train_set_3_list.append([train_set[i], train_set[i+1], train_set[i+2]]) train_label_list.append(np.nanmean(np.where(train_set[i+3]!=0, train_set[i+3], np.nan))) for i in range(len(val_set) - 3): val_set_3_list.append([val_set[i], val_set[i+1], val_set[i+2]]) val_label_list.append(np.nanmean(np.where(val_set[i+3]!=0, val_set[i+3], np.nan))) # + [markdown] id="WIKVb0uhxfo7" # Convert the list type into the numpy array type. # + id="8tEfTV3Pxi5P" train_set_3 = np.array(train_set_3_list) train_label = np.array(train_label_list) val_set_3 = np.array(val_set_3_list) val_label = np.array(val_label_list) # + [markdown] id="pmrmeGgOxdvu" # Look at their shapes. # + colab={"base_uri": "https://localhost:8080/"} id="WBeEc_jfxsdE" outputId="fbc3a626-9099-4aa4-adee-89227140da59" print(train_set_3.shape) print(val_set_3.shape) print(train_label.shape) print(val_label.shape) # + [markdown] id="CDyOQOYfLSPq" # Put the four tensors into one list for saving. # # + id="A8IFSgwWMKwp" data_sets = [train_set_3.tolist(), val_set_3.tolist(), train_label.tolist(), val_label.tolist()] # + [markdown] id="uPrX488nKJ_0" # Save this list in Google Drive for further use. # + id="lIZ0KiMtKNsB" import json with open('/gdrive/My Drive/GODAS/data_sets.txt', 'w') as out_file: json.dump(data_sets, out_file)
preprocessing_a.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # A basic example # + import sys from matplotlib import pyplot as plt import numpy as np from scipy.stats import lognorm # to give us access to the root when climate metrics is not installed locally sys.path.append('..') from climate_metrics import ( GWP, GTP, cumulative_radiative_forcing, temperature_response ) # - # ### An emission of 1 kg CO2 or CH4. # + time_period = 100 time_horizon = np.arange(time_period+1) emission = 1 # 1 kg of CO2 GHG='co2' gwp = GWP(time_period, emission, GHG) gtp = GTP(time_period, emission, GHG) print(f'GWP {time_period} for emitting 1 kg {GHG}: {gwp:.0f} (kg CO2 eq)') print(f'GTP {time_period} for emitting 1 kg {GHG}: {gtp:.0f} (kg CO2 eq)') GHG='ch4' gwp = GWP(time_period, emission, GHG) gtp = GTP(time_period, emission, GHG) print(f'GWP {time_period} for emitting 1 kg {GHG}: {gwp:.0f} (kg CO2 eq)') print(f'GTP {time_period} for emitting 1 kg {GHG}: {gtp:.0f} (kg CO2 eq) ') # - # ### For an emission scenario # + # parameters baseline_emission_scenario = np.ones(time_period+1) project_emission_scenario = lognorm.sf(time_horizon, s=np.log(2.5), loc=0, scale=40) GHG = 'CO2' # gwp calculations gwp_baseline = GWP(time_period, baseline_emission_scenario, GHG, annual=False) print(f'GWP {time_period} of baseline scenario: {gwp_baseline:.0f} kg CO2 eq') gtp_baseline = GTP(time_period, baseline_emission_scenario, GHG, annual=False) print(f'GTP {time_period} of baseline scenario: {gtp_baseline:.0f} kg CO2 eq') gwp_project = GWP(time_period, project_emission_scenario, GHG, annual=False) print(f'GWP {time_period} of project scenario: {gwp_project:.0f} kg CO2 eq') gtp_project = GTP(time_period, project_emission_scenario, GHG, annual=False) print(f'GTP {time_period} of project scenario: {gtp_project:.0f} kg CO2 eq') # plot plt.plot(baseline_emission_scenario, label='baseline') plt.plot(project_emission_scenario, label='project') plt.title("emission scenarios", size=16) plt.ylim((0,1.05)) plt.ylabel('Annual emissions CO2 (kg)') plt.xlabel('years') _ = plt.legend() # - # ### Going under the hood of GWP and GTP # # We can look at the underlying physical reponse of the models used to generate GWP and GTP. GWP is a relative measure of cumlative radiative forcing (measured in Watts * meter<sup>-2</sup> * yr) which captures the sum of the energy imbalance at the tropopause caused by a GHG emission. GTP is a relative measure of the global average temperature response (measured in Kelvin) caused by a GHG emission. # + crf_baseline = cumulative_radiative_forcing( time_period, baseline_emission_scenario, 'CO2', step_size=1, annual=True) crf_project = cumulative_radiative_forcing( time_period, project_emission_scenario, 'CO2', step_size=1, annual=True) plt.plot(crf_baseline, label='baseline') plt.plot(crf_project, label='project') plt.ylabel('cumulative radiative forcing ($W m^{-2} yr$)') plt.xlabel('year') plt.title('Cumulative radiative forcing') _ = plt.legend() # + temp_baseline = temperature_response( time_period, baseline_emission_scenario, 'CO2', step_size=1, annual=True) temp_project = temperature_response( time_period, project_emission_scenario, 'CO2', step_size=1, annual=True) plt.plot(temp_baseline, label='baseline') plt.plot(temp_project, label='project') plt.ylabel('temperature response ($K$)') plt.xlabel('year') plt.title('Temperature response') _ = plt.legend() # - # ### GWP and GTP and different time points # # While it is common to use GWP 100, we can look at these metrics at different time horizons. gwp_baseline = GWP(time_period, baseline_emission_scenario, GHG, annual=True) gwp_project = GWP(time_period, project_emission_scenario, GHG, annual=True) plt.plot(gwp_baseline, label='baseline') plt.plot(gwp_project, label='project') plt.title("") plt.ylabel('GWP (kg CO2eq)') plt.xlabel('time horizon (years)') _ = plt.legend() gtp_baseline = GTP(time_period, baseline_emission_scenario, GHG, annual=True) gtp_project = GTP(time_period, project_emission_scenario, GHG, annual=True) plt.plot(gtp_baseline, label='baseline') plt.plot(gtp_project, label='project') plt.ylabel('GTP (kg CO2eq)') plt.xlabel('time horizon (years)') _ = plt.legend()
notebooks/A-motivating-example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: env_python39 # language: python # name: env_python39 # --- # + language="html" # <style> # body { # font-family: "Times New Roman"; # font-size: 12pt; # } # </style> # + """ Created on Tue Nov 2 20:31:38 2018 @author: Dr. <NAME> European Space Agency (ESA) European Space Research and Technology Centre (ESTEC) Keplerlaan 1, 2201 AZ Noordwijk, The Netherlands Email: <EMAIL> GitHub: mnguenther Twitter: m_n_guenther Web: www.mnguenther.com """ from __future__ import print_function, division, absolute_import #::: plotting settings import seaborn as sns sns.set(context='paper', style='ticks', palette='deep', font='sans-serif', font_scale=1.5, color_codes=True) sns.set_style({"xtick.direction": "in","ytick.direction": "in"}) sns.set_context(rc={'lines.markeredgewidth': 1}) #::: modules import numpy as np import os import sys import csv import ipywidgets as widgets from IPython.display import display, HTML, Markdown, clear_output, Javascript display(HTML("<style>.container { width:80% !important; }</style>")) from multiprocessing import cpu_count if sys.version_info[0] == 3: # for Python3 from tkinter import Tk, filedialog elif sys.version_info[0] == 2: # for Python2 from Tkinter import Tk import tkFileDialog as filedialog import warnings def custom_formatwarning(msg, *args, **kwargs): return str(msg) + '\n' warnings.formatwarning = custom_formatwarning #::: allesfitter modules #::: somehow jupyter notebooks don't allow relative imports, so it needs a little hack... module_path = os.path.abspath(os.path.join('..')) if module_path not in sys.path: sys.path.append(module_path) import allesfitter from allesfitter.utils.latex_printer import round_txt_separately from allesfitter.priors.transform_priors import get_cosi_from_i, get_Rsuma_from_a_over_Rstar, get_Rsuma_from_Rstar_over_a from allesfitter.priors.estimate_noise import estimate_noise # %load_ext autoreload # %autoreload 2 # - HTML('''<script> code_show=true; function code_toggle() { if (code_show){ $('div.input').hide(); } else { $('div.input').show(); } code_show = !code_show } $( document ).ready(code_toggle); </script> <form action="javascript:code_toggle()"><input type="submit" value="Click here to toggle on/off the raw code."></form>''') Markdown('Your Python path (make sure this matches the Python environment you want to use for allesfitter): {}'.format(sys.executable)) #::: globals global INPUT global VBOXES global BUTTONS global DROPDOWNS INPUT = {} VBOXES = {} BUTTONS = {} DROPDOWNS = {} layout = {'width': '180px'} layout_wide = {'width': '360px'} layout_textbox = {'width': '120px'} layout_checkbox = {} # ![allesfitter](./_static/_logos/logo_circ.png) # + #:::: clean up csv file def clean_up_csv(fname, N_last_rows=0): with open(fname, "r") as f: params_csv = list(csv.reader(f)) with open(fname, "w") as f: writer = csv.writer(f) for i in range(len(params_csv)-N_last_rows): row = params_csv[i] writer.writerow(row) #:::: append a row into csv file def fwrite_params_line(text): with open(INPUT['fname_params'], 'a') as f: f.write(text+'\n') #:::: write params into csv file def fwrite_params(key, label, unit, physical_bounds, return_str=False): if INPUT[key+'_bounds_type'].value == 'uniform': bounds = 'uniform ' \ + str( np.max( [physical_bounds[0], float(INPUT[key+'_median'].value)-float(INPUT[key+'_lerr'].value)] ) ) + ' ' \ + str( np.min( [physical_bounds[1], float(INPUT[key+'_median'].value)+float(INPUT[key+'_uerr'].value)] ) ) elif INPUT[key+'_bounds_type'].value == 'uniform * 5': bounds = 'uniform ' \ + str( np.max( [physical_bounds[0], float(INPUT[key+'_median'].value)-5*float(INPUT[key+'_lerr'].value)] ) ) + ' ' \ + str( np.min( [physical_bounds[1], float(INPUT[key+'_median'].value)+5*float(INPUT[key+'_uerr'].value)] ) ) elif INPUT[key+'_bounds_type'].value == 'trunc_normal': bounds = 'trunc_normal ' \ + str(physical_bounds[0]) + ' ' \ + str(physical_bounds[1]) + ' ' \ + str(INPUT[key+'_median'].value) + ' ' \ + str(np.max( [ float(INPUT[key+'_lerr'].value), float(INPUT[key+'_uerr'].value) ] )) elif INPUT[key+'_bounds_type'].value == 'trunc_normal * 5': bounds = 'trunc_normal ' \ + str(physical_bounds[0]) + ' ' \ + str(physical_bounds[1]) + ' ' \ + str(INPUT[key+'_median'].value) + ' ' \ + str(5*np.max( [ float(INPUT[key+'_lerr'].value), float(INPUT[key+'_uerr'].value) ] )) string = key + ',' + str(INPUT[key+'_median'].value) + ',' + str(int(INPUT[key+'_fit'].value)) + ',' + bounds + ',' + label + ',' + unit if not return_str: fwrite_params_line(string) else: return string #unique def unique(array): uniq, index = np.unique(array, return_index=True) return uniq[index.argsort()] # - # # 1. working directory # Select the working directory for this fit, for example `/Users/me/TESS-1b/`. Then you can run a fit using `allesfitter.ns_fit('/Users/me/TESS-1b/')`. # + BUTTONS['datadir'] = widgets.Button(description='Select directory', button_style='') text_af_directory = widgets.Text(value='', placeholder='for example: /Users/me/TESS-1b/', disable=True) hbox = widgets.HBox([BUTTONS['datadir'], text_af_directory]) display(hbox) def select_datadir(change): root = Tk() root.withdraw() root.call('wm', 'attributes', '.', '-topmost', True) INPUT['datadir'] = filedialog.askdirectory() # %gui tk if INPUT['datadir'] != '': text_af_directory.value = INPUT['datadir'] BUTTONS['datadir'].style.button_color = 'lightgreen' INPUT['show_step_2a'] = True display(Javascript('IPython.notebook.execute_cell_range(IPython.notebook.get_selected_index()+1, IPython.notebook.ncells())')) BUTTONS['datadir'].on_click(select_datadir) # - # # 2. settings if 'show_step_2a' in INPUT and INPUT['show_step_2a'] == True: display(Markdown('### General settings')) DROPDOWNS['planet_or_EB'] = widgets.Dropdown(options=['Planets', 'EBs']) display( widgets.HBox([widgets.Label(value='Fitting planets or EBs?', layout=layout), DROPDOWNS['planet_or_EB']]) ) display(Markdown('Give the companion letters and instruments, space-separated. Leave empty if not applicable.')) hbox_list = [] text_companions_phot = widgets.Text(value='', placeholder='for example: b') hbox_list.append( widgets.HBox([widgets.Label(value='Companions in photometry', layout=layout), text_companions_phot]) ) text_companions_rv = widgets.Text(value='', placeholder='for example: b c') hbox_list.append( widgets.HBox([widgets.Label(value='Companions in RV', layout=layout), text_companions_rv]) ) text_inst_phot = widgets.Text(value='', placeholder='for example: TESS NGTS') hbox_list.append( widgets.HBox([widgets.Label(value='Instruments for photometry', layout=layout), text_inst_phot]) ) text_inst_rv = widgets.Text(value='', placeholder='for example: HARPS Coralie') hbox_list.append( widgets.HBox([widgets.Label(value='Instruments for RV', layout=layout), text_inst_rv]) ) display(widgets.VBox(hbox_list)) def confirm(change): #::: set stuff if len(text_inst_phot.value): INPUT['inst_phot'] = str(text_inst_phot.value).split(' ') else: INPUT['inst_phot'] = [] if len(text_inst_rv.value): INPUT['inst_rv'] = str(text_inst_rv.value).split(' ') else: INPUT['inst_rv'] = [] if len(text_companions_phot.value): INPUT['companions_phot'] = str(text_companions_phot.value).split(' ') else: INPUT['companions_phot'] = [] if len(text_companions_rv.value): INPUT['companions_rv'] = str(text_companions_rv.value).split(' ') else: INPUT['companions_rv'] = [] INPUT['companions_all'] = list(np.unique(INPUT['companions_phot']+INPUT['companions_rv'])) #sorted by b, c, d... INPUT['inst_all'] = list(unique(INPUT['inst_phot']+INPUT['inst_rv'])) #sorted like user input button_2a.style.button_color = 'lightgreen' INPUT['show_step_2b'] = True display(Javascript('IPython.notebook.execute_cell_range(IPython.notebook.get_selected_index()+1, IPython.notebook.ncells())')) button_2a = widgets.Button(description='Confirm', button_style='') display(button_2a) button_2a.on_click(confirm) if 'show_step_2b' in INPUT and INPUT['show_step_2b'] == True: display(Markdown('### Advanced settings')) vbox_list = [] #::: Fitting & performance hbox_list = [] max_cores = cpu_count() DROPDOWNS['multiprocessing'] = widgets.Dropdown(options=['No'] + ['on '+str(i)+' of my '+str(max_cores)+' cores' for i in range(2,max_cores)] + ['always on all - 1 cores on any system']) hbox_list.append(widgets.HBox([widgets.Label(value='Multiprocessing', layout=layout), DROPDOWNS['multiprocessing']])) DROPDOWNS['fit_type'] = widgets.Dropdown(options=['Transit (fast)', 'Transit and occultation (fast)', 'Full lightcurve (slow)']) hbox_list.append(widgets.HBox([widgets.Label(value='Fit type', layout=layout), DROPDOWNS['fit_type']])) DROPDOWNS['shift_epoch'] = widgets.Dropdown(options=['Yes', 'No']) hbox_list.append(widgets.HBox([widgets.Label(value='Automatically shift epoch?', layout=layout), DROPDOWNS['shift_epoch']])) DROPDOWNS['mcmc_settings'] = widgets.Dropdown(options=['Default']) hbox_list.append(widgets.HBox([widgets.Label(value='MCMC settings', layout=layout), DROPDOWNS['mcmc_settings']])) DROPDOWNS['ns_settings'] = widgets.Dropdown(options=['Default']) hbox_list.append(widgets.HBox([widgets.Label(value='Nested Sampling settings', layout=layout), DROPDOWNS['ns_settings']])) vbox_list.append( widgets.VBox(hbox_list) ) #::: Limb darkening hbox_list = [] for inst in INPUT['inst_phot']: DROPDOWNS['host_ld_law_'+inst] = widgets.Dropdown(options=['None','Linear','Quadratic','Sing'], value='Quadratic') hbox_list.append( widgets.HBox([widgets.Label(value='Host limb darkening '+inst, layout=layout), DROPDOWNS['host_ld_law_'+inst]]) ) if DROPDOWNS['planet_or_EB'].value == 'EBs': for companion in INPUT['companions_all']: DROPDOWNS[companion+'_ld_law_'+inst] = widgets.Dropdown(options=['None','Linear','Quadratic','Sing']) hbox_list.append( widgets.HBox([widgets.Label(value=companion+' limb darkening '+inst, layout=layout), DROPDOWNS[companion+'_ld_law_'+inst]]) ) vbox_list.append( widgets.VBox(hbox_list) ) #::: Baseline settings hbox_list = [] for inst in INPUT['inst_phot']: DROPDOWNS['baseline_flux_'+inst] = widgets.Dropdown(options=['sample_offset', 'sample_linear', 'sample_GP_Matern32', 'sample_GP_SHO', 'sample_GP_real', 'sample_GP_complex', 'hybrid_offset', 'hybrid_poly_1', 'hybrid_poly_2', 'hybrid_poly_3', 'hybrid_poly_4', 'hybrid_spline'], value='hybrid_offset') hbox_list.append( widgets.HBox([widgets.Label(value='Baseline flux '+inst, layout=layout), DROPDOWNS['baseline_flux_'+inst]]) ) for inst in INPUT['inst_rv']: DROPDOWNS['baseline_rv_'+inst] = widgets.Dropdown(options=['sample_offset', 'sample_linear', 'sample_GP_Matern32', 'sample_GP_SHO', 'sample_GP_real', 'sample_GP_complex', 'hybrid_offset', 'hybrid_poly_1', 'hybrid_poly_2', 'hybrid_poly_3', 'hybrid_poly_4', 'hybrid_spline'], value='hybrid_offset') hbox_list.append( widgets.HBox([widgets.Label(value='Baseline RV '+inst, layout=layout), DROPDOWNS['baseline_rv_'+inst]]) ) vbox_list.append( widgets.VBox(hbox_list) ) #::: Error settings hbox_list = [] for inst in INPUT['inst_phot']: DROPDOWNS['error_flux_'+inst] = widgets.Dropdown(options=['sample', 'hybrid'], value='sample') hbox_list.append( widgets.HBox([widgets.Label(value='Error flux '+inst, layout=layout), DROPDOWNS['error_flux_'+inst]]) ) for inst in INPUT['inst_rv']: DROPDOWNS['error_rv_'+inst] = widgets.Dropdown(options=['sample', 'hybrid'], value='sample') hbox_list.append( widgets.HBox([widgets.Label(value='Error RV '+inst, layout=layout), DROPDOWNS['error_rv_'+inst]]) ) vbox_list.append( widgets.VBox(hbox_list) ) #::: Exposure time interpolation hbox_list = [] for inst in INPUT['inst_all']: DROPDOWNS['t_exp_'+inst] = widgets.Text( placeholder='None' ) hbox_list.append( widgets.HBox([widgets.Label(value='Exposure time '+inst, layout=layout), DROPDOWNS['t_exp_'+inst], widgets.Label(value='days', layout=layout)]) ) for inst in INPUT['inst_all']: DROPDOWNS['t_exp_n_int_'+inst] = widgets.Text( placeholder='None' ) hbox_list.append( widgets.HBox([widgets.Label(value='Interpolation points '+inst, layout=layout), DROPDOWNS['t_exp_n_int_'+inst], widgets.Label(value='(integer)', layout=layout)]) ) vbox_list.append( widgets.VBox(hbox_list) ) #::: Number of spots hbox_list = [] for inst in INPUT['inst_all']: DROPDOWNS['host_N_spots_'+inst] = widgets.Text( placeholder='None' ) hbox_list.append( widgets.HBox([widgets.Label(value='host: Nr. of spots '+inst, layout=layout), DROPDOWNS['host_N_spots_'+inst], widgets.Label(value='(integer)', layout=layout)]) ) vbox_list.append( widgets.VBox(hbox_list) ) #::: Number of flares hbox_list = [] DROPDOWNS['N_flares'] = widgets.Text( placeholder='None' ) hbox_list.append( widgets.HBox([widgets.Label(value='Nr. of flares', layout=layout), DROPDOWNS['N_flares'], widgets.Label(value='(integer)', layout=layout)]) ) vbox_list.append( widgets.VBox(hbox_list) ) #::: Fit TTVs? hbox_list = [] DROPDOWNS['fit_ttvs'] = widgets.Dropdown(options=["yes","no"], value="no") hbox_list.append( widgets.HBox([widgets.Label(value='Fit TTVs?', layout=layout), DROPDOWNS['fit_ttvs']]) ) vbox_list.append( widgets.VBox(hbox_list) ) #::: Stellar grid (e.g. use "sparse" to speed up intense spot computations) hbox_list = [] for inst in INPUT['inst_all']: DROPDOWNS['host_grid_'+inst] = widgets.Dropdown(options=["very_sparse", "sparse", "default", "fine", "very_fine"], value="default") hbox_list.append( widgets.HBox([widgets.Label(value='Host grid '+inst, layout=layout), DROPDOWNS['host_grid_'+inst]]) ) if DROPDOWNS['planet_or_EB'].value == 'EBs': for companion in INPUT['companions_all']: DROPDOWNS[companion+'_grid_'+inst] = widgets.Dropdown(options=["very_sparse", "sparse", "default", "fine", "very_fine"], value="default") hbox_list.append( widgets.HBox([widgets.Label(value=companion+' grid '+inst, layout=layout), DROPDOWNS[companion+'_grid_'+inst]]) ) vbox_list.append( widgets.VBox(hbox_list) ) #::: Stellar shape (e.g. use "roche" for ellipsoidal variablity) hbox_list = [] for inst in INPUT['inst_all']: DROPDOWNS['host_shape_'+inst] = widgets.Dropdown(options=["roche", "roche_v", "sphere", "poly1p5", "poly3p0", "love"], value="sphere") hbox_list.append( widgets.HBox([widgets.Label(value='Host shape '+inst, layout=layout), DROPDOWNS['host_shape_'+inst]]) ) if DROPDOWNS['planet_or_EB'].value == 'EBs': for companion in INPUT['companions_all']: DROPDOWNS[companion+'_shape_'+inst] = widgets.Dropdown(options=["roche", "roche_v", "sphere", "poly1p5", "poly3p0", "love"], value="sphere") hbox_list.append( widgets.HBox([widgets.Label(value=companion+' shape '+inst, layout=layout), DROPDOWNS[companion+'_shape_'+inst]]) ) vbox_list.append( widgets.VBox(hbox_list) ) #::: Flux weighted RVs ("Yes" for Rossiter-McLaughlin effect) hbox_list = [] for inst in INPUT['inst_rv']: for companion in INPUT['companions_rv']: DROPDOWNS[companion+'_flux_weighted_'+inst] = widgets.Dropdown(options=['No', 'Yes']) hbox_list.append( widgets.HBox([widgets.Label(value=companion+' flux weighted RV '+inst, layout=layout), DROPDOWNS[companion+'_flux_weighted_'+inst]]) ) vbox_list.append( widgets.VBox(hbox_list) ) #::: accordion accordion = widgets.Accordion(children=vbox_list) accordion.set_title(0, 'Fitting & performance') accordion.set_title(1, 'Limb darkening laws') accordion.set_title(2, 'Baseline sampling') accordion.set_title(3, 'Error sampling') accordion.set_title(4, 'Exposure time interpolation') accordion.set_title(5, 'Number of spots') accordion.set_title(6, 'Number of flares') accordion.set_title(7, 'TTVs') accordion.set_title(8, 'Stellar grid (e.g. use "very_sparse" to speed up computations)') accordion.set_title(9, 'Stellar shape (e.g. use "roche" for ellipsoidal variablity)') accordion.set_title(10, 'Flux weighted RVs (e.g. use "true" for Rossiter-McLaughlin effect)') display(accordion) #::: confirm button button_2b = widgets.Button(description='Confirm', button_style='') display(button_2b) def confirm(change): button_2b.style.button_color = 'lightgreen' INPUT['show_step_2c'] = True display(Javascript('IPython.notebook.execute_cell_range(IPython.notebook.get_selected_index()+1, IPython.notebook.ncells())')) button_2b.on_click(confirm) if 'show_step_2c' in INPUT and INPUT['show_step_2c'] == True: BUTTONS['2c'] = widgets.Button(description='Create settings.csv', button_style='') checkbox_2c = widgets.Checkbox(description='Overwrite old settings.csv (if existing)', value=False) display(widgets.HBox([BUTTONS['2c'], checkbox_2c])) def create_settings_file(change): clear_output() display(widgets.HBox([BUTTONS['2c'], checkbox_2c])) go_ahead = True if 'datadir' not in INPUT: warnings.warn('No allesfitter woking directory selected yet. Please go back to step 1) and fill in all fields.') go_ahead = False if os.path.exists(os.path.join(INPUT['datadir'],'settings.csv')) and (checkbox_2c.value==False): warnings.warn('The selected working directory '+os.path.join(INPUT['datadir'],'settings.csv')+' already exists. To proceed, give permission to overwrite it.') go_ahead = False if go_ahead: fname_settings = os.path.join(INPUT['datadir'], 'settings.csv') with open(fname_settings, 'w+') as f: f.write('#name,value\n') def fwrite_settings(text): with open(fname_settings, 'a') as f: f.write(text+'\n') fwrite_settings('###############################################################################,') fwrite_settings('# General settings,') fwrite_settings('###############################################################################,') fwrite_settings('companions_phot,'+text_companions_phot.value) fwrite_settings('companions_rv,'+text_companions_rv.value) fwrite_settings('inst_phot,'+text_inst_phot.value) fwrite_settings('inst_rv,'+text_inst_rv.value) fwrite_settings('###############################################################################,') fwrite_settings('# Fit performance settings,') fwrite_settings('###############################################################################,') if DROPDOWNS['multiprocessing'].value=='No': fwrite_settings('multiprocess,False') elif DROPDOWNS['multiprocessing'].value=='always on all - 1 cores on any system': fwrite_settings('multiprocess,True') fwrite_settings('multiprocess_cores,all') else: fwrite_settings('multiprocess,True') fwrite_settings('multiprocess_cores,'+DROPDOWNS['multiprocessing'].value.split(' ')[1]) if DROPDOWNS['fit_type'].value=='Transit (fast)': fwrite_settings('fast_fit,True') fwrite_settings('fast_fit_width,0.3333333333333333') fwrite_settings('secondary_eclipse,False') fwrite_settings('phase_curve,False') elif DROPDOWNS['fit_type'].value=='Transit and occultation (fast)': fwrite_settings('fast_fit,True') fwrite_settings('fast_fit_width,0.3333333333333333') fwrite_settings('secondary_eclipse,True') fwrite_settings('phase_curve,False') elif DROPDOWNS['fit_type'].value=='Full lightcurve (slow)': fwrite_settings('fast_fit,False') fwrite_settings('fast_fit_width,') fwrite_settings('secondary_eclipse,True') fwrite_settings('phase_curve,True') fwrite_settings('phase_curve_style,GP') if DROPDOWNS['shift_epoch'].value=='Yes': fwrite_settings('shift_epoch,True') for companion in INPUT['companions_all']: fwrite_settings('inst_for_'+companion+'_epoch,all') fwrite_settings('###############################################################################,') fwrite_settings('# MCMC settings,') fwrite_settings('###############################################################################,') if DROPDOWNS['mcmc_settings'].value=='Default': fwrite_settings('mcmc_nwalkers,100') fwrite_settings('mcmc_total_steps,2000') fwrite_settings('mcmc_burn_steps,1000') fwrite_settings('mcmc_thin_by,1') fwrite_settings('###############################################################################,') fwrite_settings('# Nested Sampling settings,') fwrite_settings('###############################################################################,') if DROPDOWNS['ns_settings'].value=='Default': fwrite_settings('ns_modus,dynamic') fwrite_settings('ns_nlive,500') fwrite_settings('ns_bound,single') fwrite_settings('ns_sample,rwalk') fwrite_settings('ns_tol,0.01') fwrite_settings('###############################################################################,') fwrite_settings("# Limb darkening law per object and instrument,") fwrite_settings("# if 'lin' one corresponding parameter called 'ldc_q1_inst' has to be given in params.csv,") fwrite_settings("# if 'quad' two corresponding parameter called 'ldc_q1_inst' and 'ldc_q2_inst' have to be given in params.csv,") fwrite_settings("# if 'sing' three corresponding parameter called 'ldc_q1_inst'; 'ldc_q2_inst' and 'ldc_q3_inst' have to be given in params.csv,") fwrite_settings('###############################################################################,') def translate_ld(x): if x=='None': return '' elif x=='Linear': return 'lin' elif x=='Quadratic': return 'quad' elif x=='Sing': return 'sing' for inst in INPUT['inst_phot']: fwrite_settings('host_ld_law_'+inst+','+translate_ld(DROPDOWNS['host_ld_law_'+inst].value)) if DROPDOWNS['planet_or_EB'].value == 'EBs': for companion in INPUT['companions_all']: fwrite_settings(companion+'_ld_law_'+inst+','+translate_ld(DROPDOWNS[companion+'_ld_law_'+inst].value)) fwrite_settings('###############################################################################,') fwrite_settings("# Baseline settings per instrument,") fwrite_settings("# baseline params per instrument: sample_offset / sample_linear / sample_GP / hybrid_offset / hybrid_poly_1 / hybrid_poly_2 / hybrid_poly_3 / hybrid_pol_4 / hybrid_spline / hybrid_GP,") fwrite_settings("# if 'sample_offset' one corresponding parameter called 'baseline_offset_key_inst' has to be given in params.csv,") fwrite_settings("# if 'sample_linear' two corresponding parameters called 'baseline_a_key_inst' and 'baseline_b_key_inst' have to be given in params.csv,") fwrite_settings("# if 'sample_GP' two corresponding parameters called 'baseline_gp1_key_inst' and 'baseline_gp2_key_inst' have to be given in params.csv,") fwrite_settings('###############################################################################,') for inst in INPUT['inst_phot']: fwrite_settings('baseline_flux_'+inst+','+DROPDOWNS['baseline_flux_'+inst].value) for inst in INPUT['inst_rv']: fwrite_settings('baseline_rv_'+inst+','+DROPDOWNS['baseline_rv_'+inst].value) fwrite_settings('###############################################################################,') fwrite_settings("# Error settings per instrument,") fwrite_settings("# errors (overall scaling) per instrument: sample / hybrid,") fwrite_settings("# if 'sample' one corresponding parameter called 'ln_err_key_inst' (photometry) or 'ln_jitter_key_inst' (RV) has to be given in params.csv,") fwrite_settings('###############################################################################,') for inst in INPUT['inst_phot']: fwrite_settings('error_flux_'+inst+','+DROPDOWNS['error_flux_'+inst].value) for inst in INPUT['inst_rv']: fwrite_settings('error_rv_'+inst+','+DROPDOWNS['error_rv_'+inst].value) fwrite_settings('###############################################################################,') fwrite_settings('# Exposure times for interpolation,') fwrite_settings('# needs to be in the same units as the time series,') fwrite_settings('# if not given the observing times will not be interpolated leading to biased results,') fwrite_settings('###############################################################################,') for inst in INPUT['inst_all']: fwrite_settings('t_exp_'+inst+','+DROPDOWNS['t_exp_'+inst].value) fwrite_settings('###############################################################################,') fwrite_settings('# Number of points for exposure interpolation,') fwrite_settings('# Sample as fine as possible; generally at least with a 2 min sampling for photometry,') fwrite_settings('# n_int=5 was found to be a good number of interpolation points for any short photometric cadence t_exp;,') fwrite_settings('# increase to at least n_int=10 for 30 min phot. cadence,') fwrite_settings('# the impact on RV is not as drastic and generally n_int=5 is fine enough,') fwrite_settings('###############################################################################,') for inst in INPUT['inst_all']: fwrite_settings('t_exp_n_int_'+inst+','+DROPDOWNS['t_exp_n_int_'+inst].value) fwrite_settings('###############################################################################,') fwrite_settings('# Number of spots per object and instrument,') fwrite_settings('###############################################################################,') for inst in INPUT['inst_all']: fwrite_settings('host_N_spots_'+inst+','+DROPDOWNS['host_N_spots_'+inst].value) fwrite_settings('###############################################################################,') fwrite_settings('# Number of flares (in total),') fwrite_settings('###############################################################################,') fwrite_settings('N_flares'+','+DROPDOWNS['N_flares'].value) fwrite_settings('###############################################################################,') fwrite_settings('# TTVs,') fwrite_settings('###############################################################################,') if DROPDOWNS['fit_ttvs'].value == 'no': fwrite_settings('fit_ttvs'+',False') elif DROPDOWNS['fit_ttvs'].value == 'yes': fwrite_settings('fit_ttvs'+',True') fwrite_settings('###############################################################################,') fwrite_settings('# Stellar grid per object and instrument,') fwrite_settings('###############################################################################,') for inst in INPUT['inst_all']: fwrite_settings('host_grid_'+inst+','+DROPDOWNS['host_grid_'+inst].value) if DROPDOWNS['planet_or_EB'].value == 'EBs': for companion in INPUT['companions_all']: fwrite_settings(companion+'_grid_'+inst+','+DROPDOWNS[companion+'_grid_'+inst].value) fwrite_settings('###############################################################################,') fwrite_settings('# Stellar shape per object and instrument,') fwrite_settings('###############################################################################,') for inst in INPUT['inst_all']: fwrite_settings('host_shape_'+inst+','+DROPDOWNS['host_shape_'+inst].value) if DROPDOWNS['planet_or_EB'].value == 'EBs': for companion in INPUT['companions_all']: fwrite_settings(companion+'_shape_'+inst+','+DROPDOWNS[companion+'_shape_'+inst].value) fwrite_settings('###############################################################################,') fwrite_settings('# Flux weighted RVs per object and instrument,') fwrite_settings('# ("Yes" for Rossiter-McLaughlin effect),') fwrite_settings('###############################################################################,') for inst in INPUT['inst_rv']: for companion in INPUT['companions_rv']: fwrite_settings(companion+'_flux_weighted_'+inst+','+DROPDOWNS[companion+'_flux_weighted_'+inst].value) BUTTONS['2c'].style.button_color = 'lightgreen' print('Done.') INPUT['show_step_3'] = True display(Javascript('IPython.notebook.execute_cell_range(IPython.notebook.get_selected_index()+1, IPython.notebook.ncells())')) BUTTONS['2c'].on_click(create_settings_file) # # 3. parameters # + if 'show_step_3' in INPUT and INPUT['show_step_3'] == True: #::: placeholder placeholder = widgets.Label(value='', visible=False, layout=layout) #::: helper function def add_row(key, label, hbox_list, median=0, lerr=0, uerr=0, transform='trunc_normal * 5', fit_value=False): INPUT[key+'_median'] = widgets.FloatText(value=median, placeholder='NaN', layout=layout_textbox) INPUT[key+'_lerr'] = widgets.FloatText(value=lerr, placeholder='NaN', layout=layout_textbox) INPUT[key+'_uerr'] = widgets.FloatText(value=uerr, placeholder='NaN', layout=layout_textbox) INPUT[key+'_bounds_type'] = widgets.Dropdown(options=['uniform', 'uniform * 5', 'trunc_normal', 'trunc_normal * 5'], value=transform, layout=layout) INPUT[key+'_fit'] = widgets.Checkbox(value=fit_value, description='fit?', layout=layout_checkbox) buf = placeholder if key in [ companion+'_rsuma' for companion in INPUT['companions_all'] ]: INPUT[key+'_input_type'] = widgets.Dropdown(options=['(R_comp + R_host) / a', 'R_host / a', 'a / R_host'], layout=layout) buf = INPUT[key+'_input_type'] elif key in [ companion+'_cosi' for companion in INPUT['companions_all'] ]: INPUT[key+'_input_type'] = widgets.Dropdown(options=['cos(i)', 'i (degree)', 'i (rad)'], layout=layout) buf = INPUT[key+'_input_type'] hbox_list.append( widgets.HBox([widgets.Label(value=label, layout=layout), INPUT[key+'_median'], widgets.Label(value="-"), INPUT[key+'_lerr'], widgets.Label(value="+"), INPUT[key+'_uerr'], buf, INPUT[key+'_bounds_type'], INPUT[key+'_fit']]) ) #::: start display(Markdown('### Initial guess and error bars')) display(Markdown('These values will be converted into either uniform or truncated normal priors (with physical boundaries). The errors can be blown up by a factor of 5.')) display(Markdown('#### Astrophysical params per companion')) vbox_list = [] for companion in INPUT['companions_all']: # display(Markdown('##### Companion '+companion)) hbox_list = [] add_row(companion+'_rsuma', 'Radii & semi-major axis:', hbox_list) add_row(companion+'_rr', '$R_'+companion+' / R_\star$:', hbox_list) add_row(companion+'_cosi', 'Inclination:', hbox_list) add_row(companion+'_epoch', 'Epoch (d):', hbox_list) add_row(companion+'_period', 'Period (d):', hbox_list) if companion in INPUT['companions_rv']: add_row(companion+'_K', 'K (km/s):', hbox_list) add_row(companion+'_f_c', '$\sqrt{e} \cos{\omega}$:', hbox_list) add_row(companion+'_f_s', '$\sqrt{e} \sin{\omega}$:', hbox_list) vbox_list.append( widgets.VBox(hbox_list) ) tab = widgets.Tab(children=vbox_list) for i, comp in enumerate(INPUT['companions_all']): tab.set_title(i, 'Companion '+comp) display(tab) # else: # print('Complete previous steps first.') # + if 'show_step_3' in INPUT and INPUT['show_step_3'] == True: display(Markdown('### Advanced params')) vbox_list = [] #::: Dilution per instrument hbox_list = [] for inst in INPUT['inst_phot']: add_row('dil_'+inst, 'Dilution '+inst, hbox_list) vbox_list.append( widgets.VBox(hbox_list) ) #::: Limb darkening per object and instrument hbox_list = [] for inst in INPUT['inst_phot']: if DROPDOWNS['host_ld_law_'+inst].value=='None': pass elif DROPDOWNS['host_ld_law_'+inst].value=='Linear': add_row('host_ldc_q1_'+inst, 'host LD q1 '+inst, hbox_list, median=0.5, lerr=0.5, uerr=0.5, transform='uniform', fit_value=True) elif DROPDOWNS['host_ld_law_'+inst].value=='Quadratic': add_row('host_ldc_q1_'+inst, 'host LD q1 '+inst, hbox_list, median=0.5, lerr=0.5, uerr=0.5, transform='uniform', fit_value=True) add_row('host_ldc_q2_'+inst, 'host LD q2 '+inst, hbox_list, median=0.5, lerr=0.5, uerr=0.5, transform='uniform', fit_value=True) elif DROPDOWNS['host_ld_law_'+inst].value=='Sing': add_row('host_ldc_q1_'+inst, 'host LD q1 '+inst, hbox_list, median=0.5, lerr=0.5, uerr=0.5, transform='uniform', fit_value=True) add_row('host_ldc_q2_'+inst, 'host LD q2 '+inst, hbox_list, median=0.5, lerr=0.5, uerr=0.5, transform='uniform', fit_value=True) add_row('host_ldc_q3_'+inst, 'host LD q3 '+inst, hbox_list, median=0.5, lerr=0.5, uerr=0.5, transform='uniform', fit_value=True) if DROPDOWNS['planet_or_EB']=='EBs': for companion in INPUT['companions_phot']: if DROPDOWNS[companion+'_ld_law_'+inst].value=='None': pass elif DROPDOWNS[companion+'_ld_law_'+inst].value=='Linear': add_row(companion+'_ldc_q1_'+inst, companion+' LD q1 '+inst, hbox_list, median=0.5, lerr=0.5, uerr=0.5, transform='uniform', fit_value=True) elif DROPDOWNS[companion+'_ld_law_'+inst].value=='Quadratic': add_row(companion+'_ldc_q1_'+inst, companion+' LD q1 '+inst, hbox_list, median=0.5, lerr=0.5, uerr=0.5, transform='uniform', fit_value=True) add_row(companion+'_ldc_q2_'+inst, companion+' LD q2 '+inst, hbox_list, median=0.5, lerr=0.5, uerr=0.5, transform='uniform', fit_value=True) elif DROPDOWNS[companion+'_ld_law_'+inst].value=='Sing': add_row(companion+'_ldc_q1_'+inst, companion+' LD q1 '+inst, hbox_list, median=0.5, lerr=0.5, uerr=0.5, transform='uniform', fit_value=True) add_row(companion+'_ldc_q2_'+inst, companion+' LD q2 '+inst, hbox_list, median=0.5, lerr=0.5, uerr=0.5, transform='uniform', fit_value=True) add_row(companion+'_ldc_q3_'+inst, companion+' LD q3 '+inst, hbox_list, median=0.5, lerr=0.5, uerr=0.5, transform='uniform', fit_value=True) vbox_list.append( widgets.VBox(hbox_list) ) #::: Surface brightness ratio per system and instrument hbox_list = [] for inst in INPUT['inst_all']: for companion in INPUT['companions_all']: add_row(companion+'_sbratio_'+inst, companion+' sbratio '+inst, hbox_list) vbox_list.append( widgets.VBox(hbox_list) ) #::: Geometric albedo per object and instrument hbox_list = [] for inst in INPUT['inst_all']: add_row('host_geom_albedo_'+inst, 'host geom. alb. '+inst, hbox_list) for companion in INPUT['companions_all']: add_row(companion+'_geom_albedo_'+inst, companion+' geom. alb. '+inst, hbox_list) vbox_list.append( widgets.VBox(hbox_list) ) #::: Gravity darkening per object and instrument hbox_list = [] for inst in INPUT['inst_all']: add_row('host_gdc_'+inst, 'host grav. dark. '+inst, hbox_list) if DROPDOWNS['planet_or_EB']=='EBs': for companion in INPUT['companions_all']: add_row(companion+'_gdc_'+inst, companion+' grav. dark. '+inst, hbox_list) vbox_list.append( widgets.VBox(hbox_list) ) #::: Stellar spots per object and instrument hbox_list = [] for inst in INPUT['inst_all']: if len(DROPDOWNS['host_N_spots_'+inst].value): N_spots = int(DROPDOWNS['host_N_spots_'+inst].value) for i in range(1,N_spots+1): add_row('host_spot_'+str(i)+'_lat_'+inst, 'host spot '+str(i)+' lat. '+inst+' (deg)', hbox_list) add_row('host_spot_'+str(i)+'_long_'+inst, 'host spot '+str(i)+' long. '+inst+' (deg)', hbox_list) add_row('host_spot_'+str(i)+'_size_'+inst, 'host spot '+str(i)+' size '+inst+' (deg)', hbox_list) add_row('host_spot_'+str(i)+'_brightness_'+inst,'host spot '+str(i)+' brightness '+inst, hbox_list) # To keep the GUI simplistic, spots on companions are only available by manually editing the params.csv and settings.csv files # if DROPDOWNS['planet_or_EB'].value == 'EBs': # for companion in INPUT['companions_all']: # if len(DROPDOWNS[companion+'_N_spots_'+inst].value): # N_spots = int(DROPDOWNS[companion+'_N_spots_'+inst].value) # for i in range(1,N_spots+1): # add_row(companion+'_spot_'+str(i)+'_lat_'+inst, companion+' spot '+str(i)+' lat. '+inst+' (deg)', hbox_list) # add_row(companion+'_spot_'+str(i)+'_long_'+inst, companion+' spot '+str(i)+' long. '+inst+' (deg)', hbox_list) # add_row(companion+'_spot_'+str(i)+'_size_'+inst, companion+' spot '+str(i)+' size '+inst+' (deg)', hbox_list) # add_row(companion+'_spot_'+str(i)+'_brightness_'+inst, companion+' spot '+str(i)+' brightness '+inst, hbox_list) if len(hbox_list)==0: pass #hbox_list.append(widgets.Label(value='N_spots was set to "None" for all objects and instruments.')) vbox_list.append( widgets.VBox(hbox_list) ) #::: Flares hbox_list = [] if len(DROPDOWNS['N_flares'].value): N_flares = int(DROPDOWNS['N_flares'].value) for i in range(1,N_flares+1): add_row('flare_tpeak_'+str(i), 'Flare tpeak '+str(i), hbox_list) add_row('flare_fwhm_'+str(i), 'Flare fwhm '+str(i), hbox_list) add_row('flare_ampl_'+str(i), 'Flare ampl '+str(i), hbox_list) vbox_list.append( widgets.VBox(hbox_list) ) #::: TTV per transit hbox_list = [] if (DROPDOWNS['fit_ttvs'].value)=='yes': for companion in INPUT['companions_all']: add_row(companion+'_ttv_per_transit', 'TTV per transit', hbox_list, median=0, lerr=0.00347222, uerr=0.00347222, transform='uniform', fit_value=True) vbox_list.append( widgets.VBox(hbox_list) ) #::: Errors per instrument hbox_list = [] for inst in INPUT['inst_phot']: if DROPDOWNS['error_flux_'+inst].value == 'sample': add_row('ln_err_flux_'+inst, 'ln err flux '+inst, hbox_list, median=-7, lerr=8, uerr=7, transform='uniform', fit_value=True) else: pass #hbox_list.append(widgets.Label(value='Not applicable, error sampling was set to "hybrid".')) for inst in INPUT['inst_rv']: if DROPDOWNS['error_rv_'+inst].value == 'sample': add_row('ln_jitter_rv_'+inst, 'ln jitter rv '+inst, hbox_list, median=-3, lerr=12, uerr=3, transform='uniform', fit_value=True) else: pass #hbox_list.append(widgets.Label(value='Not applicable, error sampling was set to "hybrid".')) vbox_list.append( widgets.VBox(hbox_list) ) #::: Baselines per instrument hbox_list = [] for inst in INPUT['inst_all']: if inst in INPUT['inst_phot']: key = 'flux' elif inst in INPUT['inst_rv']: key = 'rv' if DROPDOWNS['baseline_'+key+'_'+inst].value == 'sample_GP_Matern32': add_row('baseline_gp_matern32_lnsigma_'+key+'_'+inst, 'baseline gp Matern32 lnsigma '+inst, hbox_list, median=0, lerr=15, uerr=15, transform='uniform', fit_value=True) add_row('baseline_gp_matern32_lnrho_'+key+'_'+inst, 'baseline gp Matern32 lnrho '+inst, hbox_list, median=0, lerr=15, uerr=15, transform='uniform', fit_value=True) elif DROPDOWNS['baseline_'+key+'_'+inst].value == 'sample_GP_SHO': add_row('baseline_gp_sho_lnS0_'+key+'_'+inst, 'baseline gp SHO lnS0 '+inst, hbox_list, median=0, lerr=15, uerr=15, transform='uniform', fit_value=True) add_row('baseline_gp_sho_lnQ_'+key+'_'+inst, 'baseline gp SHO lnQ '+inst, hbox_list, median=0, lerr=15, uerr=15, transform='uniform', fit_value=True) add_row('baseline_gp_sho_lnomega0_'+key+'_'+inst, 'baseline gp SHO lnomega0 '+inst, hbox_list, median=0, lerr=15, uerr=15, transform='uniform', fit_value=True) elif DROPDOWNS['baseline_'+key+'_'+inst].value == 'sample_GP_real': add_row('baseline_gp_real_lna_'+key+'_'+inst, 'baseline gp real lna '+inst, hbox_list, median=0, lerr=15, uerr=15, transform='uniform', fit_value=True) add_row('baseline_gp_real_lnc_'+key+'_'+inst, 'baseline gp real lnc '+inst, hbox_list, median=0, lerr=15, uerr=15, transform='uniform', fit_value=True) elif DROPDOWNS['baseline_'+key+'_'+inst].value == 'sample_GP_complex': add_row('baseline_gp_complex_lna_'+key+'_'+inst, 'baseline gp complex lna '+inst, hbox_list, median=0, lerr=15, uerr=15, transform='uniform', fit_value=True) add_row('baseline_gp_complex_lnc_'+key+'_'+inst, 'baseline gp complex lnc '+inst, hbox_list, median=0, lerr=15, uerr=15, transform='uniform', fit_value=True) add_row('baseline_gp_complex_lnb_'+key+'_'+inst, 'baseline gp complex lnb '+inst, hbox_list, median=0, lerr=15, uerr=15, transform='uniform', fit_value=True) add_row('baseline_gp_complex_lnd_'+key+'_'+inst, 'baseline gp complex lnd '+inst, hbox_list, median=0, lerr=15, uerr=15, transform='uniform', fit_value=True) elif DROPDOWNS['baseline_'+key+'_'+inst].value == 'sample_offset': add_row('baseline_offset_'+key+'_'+inst, 'baseline offset '+inst, hbox_list, median=0, lerr=0, uerr=0, transform='uniform', fit_value=True) elif DROPDOWNS['baseline_'+key+'_'+inst].value == 'sample_linear': add_row('baseline_offset_'+key+'_'+inst, 'baseline offset '+inst, hbox_list, median=0, lerr=0, uerr=0, transform='uniform', fit_value=True) add_row('baseline_slope_'+key+'_'+inst, 'baseline slope '+inst, hbox_list, median=0, lerr=0, uerr=0, transform='uniform', fit_value=True) vbox_list.append( widgets.VBox(hbox_list) ) #::: accordion accordion = widgets.Accordion(children=vbox_list) accordion.set_title(0, 'Dilution') accordion.set_title(1, 'Limb darkening') accordion.set_title(2, 'Surface brightness ratio') accordion.set_title(3, 'Geometric albedo') accordion.set_title(4, 'Gravity darkening') accordion.set_title(5, 'Stellar spots') accordion.set_title(6, 'Flares') accordion.set_title(7, 'TTVs') accordion.set_title(8, 'Errors & jitter') accordion.set_title(9, 'Baselines') display(accordion) # - if 'show_step_3' in INPUT and INPUT['show_step_3'] == True: nan_fields = False button_create_params_file = widgets.Button(description='Create params.csv', button_style='') checkbox_overwrite_params_file = widgets.Checkbox(description='Overwrite old params.csv (if existing)', value=False) hbox_params_file = widgets.HBox([button_create_params_file, checkbox_overwrite_params_file]) display(hbox_params_file) def create_params_file(change): clear_output() display(hbox_params_file) print('Calculating... this might take a few seconds. Please be patient, you will get notified once everything is completed.') go_ahead = True if 'datadir' not in INPUT: warnings.warn('No allesfitter woking directory selected yet. Please go back to step 1) and fill in all fields.') go_ahead = False if os.path.exists(os.path.join(INPUT['datadir'],'params.csv')) and (checkbox_overwrite_params_file.value==False): warnings.warn('The selected working directory '+os.path.join(INPUT['datadir'],'params.csv')+' already exists. To proceed, give permission to overwrite it.') go_ahead = False if go_ahead: INPUT['fname_params'] = os.path.join(INPUT['datadir'], 'params.csv') with open(INPUT['fname_params'], 'w+') as f: f.write('#name,value,fit,bounds,label,unit\n') def get_median_and_error_strings(text_median, text_lerr, text_uerr): if (text_median.value == ''): median = 'NaN' nan_fields = True else: median = text_median.value if (text_lerr.value == '') or (text_uerr.value == ''): err = 'NaN' nan_fields = True else: err = str( 5.* np.max( [float(text_lerr.value), float(text_uerr.value)] ) ) median, err, _ = round_txt_separately( float(median), float(err), float(err) ) return median, err #:::: astrophysical parameters per system for companion in INPUT['companions_all']: fwrite_params_line('#companion '+companion+' astrophysical params,,,,,') #::: rr fwrite_params(companion+'_rr', '$R_'+companion+' / R_\star$', '', [0,1]) #::: rsuma if INPUT[companion+'_rsuma_input_type'].value=='(R_comp + R_host) / a': pass elif INPUT[companion+'_rsuma_input_type'].value=='R_host / a': Rstar_over_a = [ float(INPUT[companion+'_rsuma_median'].value), float(INPUT[companion+'_rsuma_lerr'].value), float(INPUT[companion+'_rsuma_uerr'].value) ] Rp_over_Rstar = [ float(INPUT[companion+'_rr_median'].value), float(INPUT[companion+'_rr_lerr'].value), float(INPUT[companion+'_rr_uerr'].value) ] INPUT[companion+'_rsuma_median'].value, INPUT[companion+'_rsuma_lerr'].value, INPUT[companion+'_rsuma_uerr'].value \ = get_Rsuma_from_Rstar_over_a(Rstar_over_a, Rp_over_Rstar) INPUT[companion+'_rsuma_input_type'].value = '(R_comp + R_host) / a' elif INPUT[companion+'_rsuma_input_type'].value=='a / R_host': a_over_Rstar = [ float(INPUT[companion+'_rsuma_median'].value), float(INPUT[companion+'_rsuma_lerr'].value), float(INPUT[companion+'_rsuma_uerr'].value) ] Rp_over_Rstar = [ float(INPUT[companion+'_rr_median'].value), float(INPUT[companion+'_rr_lerr'].value), float(INPUT[companion+'_rr_uerr'].value) ] INPUT[companion+'_rsuma_median'].value, INPUT[companion+'_rsuma_lerr'].value, INPUT[companion+'_rsuma_uerr'].value \ = get_Rsuma_from_a_over_Rstar(a_over_Rstar, Rp_over_Rstar) INPUT[companion+'_rsuma_input_type'].value = '(R_comp + R_host) / a' else: raise ValueError('Oops, something went wrong.') fwrite_params(companion+'_rsuma', '$(R_\star + R_'+companion+') / a_'+companion+'$', '', [0,1]) #::: cosi if INPUT[companion+'_cosi_input_type'].value=='cos(i)': pass elif INPUT[companion+'_cosi_input_type'].value=='i (degree)': incl = [ float(INPUT[companion+'_cosi_median'].value), float(INPUT[companion+'_cosi_lerr'].value), float(INPUT[companion+'_cosi_uerr'].value) ] INPUT[companion+'_cosi_median'].value, INPUT[companion+'_cosi_lerr'].value, INPUT[companion+'_cosi_uerr'].value \ = get_cosi_from_i(incl) INPUT[companion+'_cosi_input_type'].value = 'cos(i)' elif INPUT[companion+'_cosi_input_type'].value=='i (rad)': incl = [ float(INPUT[companion+'_cosi_median'].value)/180.*np.pi, float(INPUT[companion+'_cosi_lerr'].value)/180.*np.pi, float(INPUT[companion+'_cosi_uerr'].value)/180.*np.pi ] INPUT[companion+'_cosi_median'].value, INPUT[companion+'_cosi_lerr'].value, INPUT[companion+'_cosi_uerr'].value \ = get_cosi_from_i(incl) INPUT[companion+'_cosi_input_type'].value = 'cos(i)' fwrite_params(companion+'_cosi', '$\cos{i_'+companion+'}$', '', [0,1]) #::: epoch fwrite_params(companion+'_epoch', '$T_{0;'+companion+'}$', '$\mathrm{BJD}$', [-1e12,1e12]) #::: period fwrite_params(companion+'_period', '$P_'+companion+'$', '$\mathrm{d}$', [-1e12,1e12]) #::: RV semi-amplitude if companion in INPUT['companions_rv']: fwrite_params(companion+'_K', '$K_'+companion+'$', '$\mathrm{km/s}$', [-1e12,1e12]) #::: eccentricity f_c fwrite_params(companion+'_f_c', '$\sqrt{e_'+companion+'} \cos{\omega_'+companion+'}$', '', [-1,1]) #::: eccentricity f_s fwrite_params(companion+'_f_s', '$\sqrt{e_'+companion+'} \sin{\omega_'+companion+'}$', '', [-1,1]) #::: dilution per instrument if len(INPUT['inst_phot']): fwrite_params_line('#dilution per instrument,,,,,') for inst in INPUT['inst_phot']: fwrite_params('dil_'+inst, '$D_\mathrm{0; '+inst+'}$', '', [0,1]) #fwrite_params('dil_'+inst+',0,0,trunc_normal 0 1 0 0,$D_\mathrm{0; '+inst+'}$,') #::: limb darkening coefficients per instrument if len(INPUT['inst_phot']): fwrite_params_line('#limb darkening coefficients per instrument,,,,,') for inst in INPUT['inst_phot']: #::: host if DROPDOWNS['host_ld_law_'+inst].value=='None': pass elif DROPDOWNS['host_ld_law_'+inst].value=='Linear': fwrite_params('host_ldc_q1_'+inst, '$q_{1; \mathrm{'+inst+'}}$', '', [0,1]) elif DROPDOWNS['host_ld_law_'+inst].value=='Quadratic': fwrite_params('host_ldc_q1_'+inst, '$q_{1; \mathrm{'+inst+'}}$', '', [0,1]) fwrite_params('host_ldc_q2_'+inst, '$q_{2; \mathrm{'+inst+'}}$', '', [0,1]) elif DROPDOWNS['host_ld_law_'+inst].value=='Sing': fwrite_params('host_ldc_q1_'+inst, '$q_{1; \mathrm{'+inst+'}}$', '', [0,1]) fwrite_params('host_ldc_q2_'+inst, '$q_{2; \mathrm{'+inst+'}}$', '', [0,1]) fwrite_params('host_ldc_q3_'+inst, '$q_{3; \mathrm{'+inst+'}}$', '', [0,1]) #::: companion (if EB) if DROPDOWNS['planet_or_EB']=='EBs': if DROPDOWNS[companion+'_ld_law_'+inst].value=='None': pass elif DROPDOWNS[companion+'_ld_law_'+inst].value=='Linear': fwrite_params(companion+'_ldc_q1_'+inst, '$q_{1; \mathrm{'+inst+'}}$', '', [0,1]) elif DROPDOWNS[companion+'_ld_law_'+inst].value=='Quadratic': fwrite_params(companion+'_ldc_q1_'+inst, '$q_{1; \mathrm{'+inst+'}}$', '', [0,1]) fwrite_params(companion+'_ldc_q2_'+inst, '$q_{2; \mathrm{'+inst+'}}$', '', [0,1]) elif DROPDOWNS[companion+'_ld_law_'+inst].value=='Sing': fwrite_params(companion+'_ldc_q1_'+inst, '$q_{1; \mathrm{'+inst+'}}$', '', [0,1]) fwrite_params(companion+'_ldc_q2_'+inst, '$q_{2; \mathrm{'+inst+'}}$', '', [0,1]) fwrite_params(companion+'_ldc_q3_'+inst, '$q_{3; \mathrm{'+inst+'}}$', '', [0,1]) #::: brightness ratio per system and instrument if len(INPUT['inst_all']): fwrite_params_line('#surface brightness per instrument and companion,,,,,') for companion in INPUT['companions_all']: for inst in INPUT['inst_all']: fwrite_params(companion+'_sbratio_'+inst, '$J_{'+companion+'; \mathrm{'+inst+'}}$', '', [0,1]) #::: geometric albedo per system and instrument if len(INPUT['inst_all']): fwrite_params_line('#albedo per instrument and companion,,,,,') for inst in INPUT['inst_all']: fwrite_params('host_geom_albedo_'+inst, '$A_{\mathrm{geom}; host; \mathrm{'+inst+'}}$', '', [0,1]) for companion in INPUT['companions_all']: for inst in INPUT['inst_all']: fwrite_params(companion+'_geom_albedo_'+inst, '$A_{\mathrm{geom}; '+companion+'; \mathrm{'+inst+'}}$', '', [0,1]) #::: gravity darkening per object and instrument if len(INPUT['inst_all']): fwrite_params_line('#gravity darkening per instrument and companion,,,,,') for inst in INPUT['inst_all']: #::: host fwrite_params('host_gdc_'+inst, '$Grav. dark._{'+companion+'; \mathrm{'+inst+'}}$', '', [0,1]) #::: companion (if EB) if DROPDOWNS['planet_or_EB']=='EBs': for companion in INPUT['companions_all']: fwrite_params(companion+'_sbratio_'+inst, '$Grav. dark._{'+companion+'; \mathrm{'+inst+'}}$', '', [0,1]) #::: spots per object and instrument if len(INPUT['inst_all']): fwrite_params_line('#spots per instrument and companion,,,,,') for inst in INPUT['inst_all']: if len(DROPDOWNS['host_N_spots_'+inst].value): N_spots = int(DROPDOWNS['host_N_spots_'+inst].value) for i in range(1,N_spots+1): #::: host fwrite_params('host_spot_'+str(i)+'_long_'+inst, '$\mathrm{host: spot '+str(i)+' long. '+inst+'}$', '\mathrm{deg}', [0,360]) fwrite_params('host_spot_'+str(i)+'_lat_'+inst, '$\mathrm{host: spot '+str(i)+' lat. '+inst+'}$', '\mathrm{deg}', [-90,90]) fwrite_params('host_spot_'+str(i)+'_size_'+inst, '$\mathrm{host: spot '+str(i)+' size '+inst+'}$', '\mathrm{deg}', [0,30]) fwrite_params('host_spot_'+str(i)+'_brightness_'+inst, '$\mathrm{host: spot '+str(i)+' brightness '+inst+'}$', '', [0,1]) #::: companion (if EB) if DROPDOWNS['planet_or_EB']=='EBs': for companion in INPUT['companions_all']: if len(DROPDOWNS[companion+'_N_spots_'+inst].value): N_spots = int(DROPDOWNS[companion+'_N_spots_'+inst].value) fwrite_params(companion+'_spot_'+str(i)+'_long_'+inst, '$\mathrm{'+companion+': spot '+str(i)+' long. '+inst+'}$', '\mathrm{deg}', [0,360]) fwrite_params(companion+'_spot_'+str(i)+'_lat_'+inst, '$\mathrm{'+companion+': spot '+str(i)+' lat. '+inst+'}$', '\mathrm{deg}', [-90,90]) fwrite_params(companion+'_spot_'+str(i)+'_size_'+inst, '$\mathrm{'+companion+': spot '+str(i)+' size '+inst+'}$', '\mathrm{deg}', [0,30]) fwrite_params(companion+'_spot_'+str(i)+'_brightness_'+inst, '$\mathrm{'+companion+': spot '+str(i)+' brightness '+inst+'}$', '', [0,1]) #::: flares if len(DROPDOWNS['N_flares'].value): fwrite_params_line('#flares,,,,,') N_flares = int(DROPDOWNS['N_flares'].value) for i in range(1,N_flares+1): fwrite_params('flare_tpeak_'+str(i), '$t_\mathrm{peak; flare '+str(i)+'}$', '$\mathrm{BJD}$', [-1e12,1e12]) fwrite_params('flare_ampl_'+str(i), '$A_\mathrm{flare '+str(i)+'}$', '$\mathrm{rel. flux.}$', [-1e12,1e12]) fwrite_params('flare_fwhm_'+str(i), '$FWHM_\mathrm{flare '+str(i)+'}$', '$\mathrm{BJD}$', [-1e12,1e12]) #::: TTV per instrument if (DROPDOWNS['fit_ttvs'].value=='yes'): fwrite_params_line('#TTV per transit,,,,,') warnings.warn('TTV priors in params.csv will not be set until you also complete step 4 (adding the data files).') # for inst in INPUT['inst_phot']: # fwrite_params('ttv_'+inst, '$\mathrm{TTV_'+inst+'}$', '$\mathrm{d}$', [-1e12,1e12]) #::: errors and baselines - keep track of rows INPUT['N_last_rows'] = 0 #::: errors per instrument if any( [ 'sample' in DROPDOWNS['error_flux_'+inst].value for inst in INPUT['inst_phot'] ] ) \ or any( [ 'sample' in DROPDOWNS['error_rv_'+inst].value for inst in INPUT['inst_rv'] ] ): fwrite_params_line('#errors per instrument,') INPUT['N_last_rows'] += 1 for inst in INPUT['inst_phot']: if 'hybrid' not in DROPDOWNS['error_flux_'+inst].value: fwrite_params('ln_err_flux_'+inst, '$\ln{\sigma_\mathrm{'+inst+'}}$', '$\ln{ \mathrm{rel. flux.} }$', [-15,0]) INPUT['N_last_rows'] += 1 for inst in INPUT['inst_rv']: if 'hybrid' not in DROPDOWNS['error_rv_'+inst].value: fwrite_params('ln_jitter_rv_'+inst, '$\ln{\sigma_\mathrm{jitter; '+inst+'}}$', '$\ln{ \mathrm{km/s} }$', [-15,0]) INPUT['N_last_rows'] += 1 #::: baseline if any( [ 'sample' in DROPDOWNS['baseline_flux_'+inst].value for inst in INPUT['inst_phot'] ] ) \ or any( [ 'sample' in DROPDOWNS['baseline_rv_'+inst].value for inst in INPUT['inst_rv'] ] ): fwrite_params_line('#baseline per instrument,') INPUT['N_last_rows'] += 1 for inst in INPUT['inst_all']: if inst in INPUT['inst_phot']: key = 'flux' elif inst in INPUT['inst_rv']: key = 'rv' if DROPDOWNS['baseline_'+key+'_'+inst].value == 'sample_GP_Matern32': fwrite_params('baseline_gp_matern32_lnsigma_'+key+'_'+inst, '$\mathrm{gp: \ln{\sigma} ('+inst+')}$', '', [-15,15]) fwrite_params('baseline_gp_matern32_lnrho_'+key+'_'+inst, '$\mathrm{gp: \ln{\\rho} ('+inst+')}$', '', [-15,15]) INPUT['N_last_rows'] += 2 elif DROPDOWNS['baseline_'+key+'_'+inst].value == 'sample_GP_SHO': fwrite_params('baseline_gp_sho_lnS0_'+key+'_'+inst, '$\mathrm{gp: \ln{S_0} ('+inst+')}$', '', [-15,15]) fwrite_params('baseline_gp_sho_lnQ_'+key+'_'+inst, '$\mathrm{gp: \ln{Q} ('+inst+')}$', '', [-15,15]) fwrite_params('baseline_gp_sho_lnomega0_'+key+'_'+inst, '$\mathrm{gp: \ln{\omega_0} ('+inst+')}$', '', [-15,15]) INPUT['N_last_rows'] += 3 elif DROPDOWNS['baseline_'+key+'_'+inst].value == 'sample_GP_real': fwrite_params('baseline_gp_real_lna_'+key+'_'+inst, '$\mathrm{gp: \ln{a} ('+inst+')}$', '', [-15,15]) fwrite_params('baseline_gp_real_lnc_'+key+'_'+inst, '$\mathrm{gp: \ln{c} ('+inst+')}$', '', [-15,15]) INPUT['N_last_rows'] += 2 elif DROPDOWNS['baseline_'+key+'_'+inst].value == 'sample_GP_complex': fwrite_params('baseline_gp_real_lna_'+key+'_'+inst, '$\mathrm{gp: \ln{a} ('+inst+')}$', '', [-15,15]) fwrite_params('baseline_gp_real_lnc_'+key+'_'+inst, '$\mathrm{gp: \ln{c} ('+inst+')}$', '', [-15,15]) fwrite_params('baseline_gp_real_lnb_'+key+'_'+inst, '$\mathrm{gp: \ln{b} ('+inst+')}$', '', [-15,15]) fwrite_params('baseline_gp_real_lnd_'+key+'_'+inst, '$\mathrm{gp: \ln{d} ('+inst+')}$', '', [-15,15]) INPUT['N_last_rows'] += 4 elif DROPDOWNS['baseline_'+key+'_'+inst].value == 'sample_offset': fwrite_params('baseline_offset_flux_'+inst, 'offset ('+inst+')', '', [-1e12,1e12]) INPUT['N_last_rows'] += 1 elif DROPDOWNS['baseline_'+key+'_'+inst].value == 'sample_linear': fwrite_params('baseline_a_flux_'+inst, 'lin. a ('+inst+')', '', [-1e12,1e12]) fwrite_params('baseline_b_flux_'+inst, 'lin. b ('+inst+')', '', [-1e12,1e12]) INPUT['N_last_rows'] += 2 #::: continue button_create_params_file.style.button_color = 'lightgreen' print('Done.') INPUT['show_step_4'] = True display(Javascript('IPython.notebook.execute_cell_range(IPython.notebook.get_selected_index()+1, IPython.notebook.ncells())')) if nan_fields: warnings.warn('You left some fields empty. These will be set NaN in params.csv. Make sure to fix this manually later.') button_create_params_file.on_click(create_params_file) # # 4. data files # Please put all data files into the selected directory, and click the button to confirm. # + if 'show_step_4' in INPUT and INPUT['show_step_4']==True: BUTTONS['confirm_data_files'] = widgets.Button(description='Confirm', button_style='') display(BUTTONS['confirm_data_files']) def check_data_files(change): clear_output() display(BUTTONS['confirm_data_files']) all_data_exists = True for inst in INPUT['inst_all']: if not os.path.exists( os.path.join(INPUT['datadir'], inst+'.csv') ): warnings.warn('Data file '+os.path.join(INPUT['datadir'], inst+'.csv')+' does not exist. Please include the data file into the directory and then repeat this step.') all_data_exists = False if all_data_exists: BUTTONS['confirm_data_files'].style.button_color = 'lightgreen' INPUT['show_step_5'] = True display(Javascript('IPython.notebook.execute_cell_range(IPython.notebook.get_selected_index()+1, IPython.notebook.ncells())')) BUTTONS['confirm_data_files'].on_click(check_data_files) # else: # print('Complete previous steps first.') # + ############################################################################ #::: time to include those TTV lines into the folder! ############################################################################ if 'show_step_5' in INPUT and INPUT['show_step_5']==True and DROPDOWNS['fit_ttvs'].value=='yes': from allesfitter import config config.init(INPUT['datadir']) new_lines = '' for companion in INPUT['companions_all']: N_observed_transits = len(config.BASEMENT.data[companion+'_tmid_observed_transits']) for i in range(N_observed_transits): string = fwrite_params(companion+'_ttv_per_transit', 'TTV$_\mathrm{'+str(i+1)+'}}$', '$\mathrm{d}$', [-15,15], return_str=True) + '\n' string = string.replace('per_transit', 'transit_'+str(i+1)) new_lines += string with open(INPUT['fname_params'], "r") as f: contents = f.readlines() for i, line in enumerate(contents): line = line.rstrip() # remove '\n' at end of line if line == '#TTV per transit,,,,,': index = i+1 contents.insert(index, new_lines) with open(INPUT['fname_params'], "w") as f: contents = "".join(contents) f.write(contents) print('TTVs per transit were added to params.csv.') print('params.csv and settings.csv are now ready to use.') # - # # 5. check if 'show_step_5' in INPUT and INPUT['show_step_5']==True: from allesfitter.general_output import show_initial_guess import matplotlib.pyplot as plt fig_list = show_initial_guess(INPUT['datadir'], do_logprint=False, return_figs=True) for fig in fig_list: plt.show(fig) # + if 'show_step_5' in INPUT and INPUT['show_step_5']==True: BUTTONS['confirm_plots'] = widgets.Button(description='Looks good', button_style='') display(BUTTONS['confirm_plots']) def check_plots(change): clear_output() display(BUTTONS['confirm_plots']) BUTTONS['confirm_plots'].style.button_color = 'lightgreen' INPUT['show_step_6'] = True display(Javascript('IPython.notebook.execute_cell_range(IPython.notebook.get_selected_index()+1, IPython.notebook.ncells())')) BUTTONS['confirm_plots'].on_click(check_plots) # else: # print('Complete previous steps first.') # - # # 6. tighter priors on errors and baselines # This will take a couple of minutes. Make sure your initial guess above is very good. This will subtract the model from the data and evaluate the remaining noise patterns to estimate errors, jitter and GP baselines. # + if 'show_step_6' in INPUT and INPUT['show_step_6']==True: def estimate_tighter_priors(change): print('\nEstimating errors and baselines... this will take a couple of minutes. Please be patient, you will get notified once everything is completed.\n') #::: run MCMC fit to estimate errors and baselines estimate_noise(INPUT['datadir']) #::: delete the rows containing the default (zero) errors and baselines from the params.csv file clean_up_csv( os.path.join( INPUT['datadir'], 'params.csv' ), N_last_rows=INPUT['N_last_rows'] ) #::: write new rows into params.csv #::: errors fwrite_params_line('#errors per instrument,') for i, inst in enumerate(INPUT['inst_phot']): #::: read in the summary file summaryfile = os.path.join( INPUT['datadir'], 'priors', 'summary_phot.csv' ) priors2 = np.genfromtxt(summaryfile, names=True, delimiter=',', dtype=None) priors = {} for key in priors2.dtype.names: priors[key] = np.atleast_1d(priors2[key]) median = priors['ln_yerr_median'][i] err = 5.*np.max([ float(priors['ln_yerr_ll'][i]), float(priors['ln_yerr_ul'][i]) ]) median, err, _ = round_txt_separately(median,err,err) fwrite_params_line('ln_err_flux_'+inst+','+median+',1,trunc_normal -15 0 '+median+' '+err+',$\ln{\sigma_\mathrm{'+inst+'}}$,') for i, inst in enumerate(INPUT['inst_rv']): #::: read in the summary file summaryfile = os.path.join( INPUT['datadir'], 'priors', 'summary_rv.csv' ) priors2 = np.genfromtxt(summaryfile, names=True, delimiter=',', dtype=None) priors = {} for key in priors2.dtype.names: priors[key] = np.atleast_1d(priors2[key]) median = priors['ln_yerr_median'][i] err = 5.*np.max([ float(priors['ln_yerr_ll'][i]), float(priors['ln_yerr_ul'][i]) ]) median, err, _ = round_txt_separately(median,err,err) fwrite_params('ln_jitter_rv_'+inst+','+median+',1,trunc_normal -15 0 '+median+' '+err+',$\ln{\sigma_\mathrm{jitter; '+inst+'}}$,') #::: write new rows into params.csv #::: baselines fwrite_params_line('#baseline per instrument,') for i, inst in enumerate(INPUT['inst_phot']): #::: read in the summary file summaryfile = os.path.join( INPUT['datadir'], 'priors', 'summary_phot.csv' ) priors2 = np.genfromtxt(summaryfile, names=True, delimiter=',', dtype=None) priors = {} for key in priors2.dtype.names: priors[key] = np.atleast_1d(priors2[key]) median = priors['gp_ln_sigma_median'][i] err = 5.*np.max([ float(priors['gp_ln_sigma_ll'][i]), float(priors['gp_ln_sigma_ul'][i]) ]) median, err, _ = round_txt_separately(median,err,err) fwrite_params_line('baseline_gp1_flux_'+inst+','+median+',1,trunc_normal -15 15 '+median+' '+err+',$\mathrm{gp: \ln{\sigma} ('+inst+')}$,') median = priors['gp_ln_rho_median'][i] err = 5.*np.max([ float(priors['gp_ln_rho_ll'][i]), float(priors['gp_ln_rho_ul'][i]) ]) median, err, _ = round_txt_separately(median,err,err) fwrite_params_line('baseline_gp2_flux_'+inst+','+median+',1,trunc_normal -15 15 '+median+' '+err+',$\mathrm{gp: \ln{\\rho} ('+inst+')}$,') #::: confirm BUTTONS['estimate_tighter_priors'].style.button_color = 'lightgreen' print('Done.') INPUT['show_step_7'] = True display(Javascript('IPython.notebook.execute_cell_range(IPython.notebook.get_selected_index()+1, IPython.notebook.ncells())')) def skip(change): BUTTONS['skip'].style.button_color = 'lightgreen' print('Skipped.') INPUT['show_step_7'] = True display(Javascript('IPython.notebook.execute_cell_range(IPython.notebook.get_selected_index()+1, IPython.notebook.ncells())')) # else: # print('Complete previous steps first.') # - if 'show_step_6' in INPUT and INPUT['show_step_6']==True: BUTTONS['estimate_tighter_priors'] = widgets.Button(value=False, description='Estimate tighter priors') BUTTONS['skip'] = widgets.Button(value=False, description='Skip') display( widgets.HBox([BUTTONS['estimate_tighter_priors'],BUTTONS['skip']])) BUTTONS['estimate_tighter_priors'].on_click(estimate_tighter_priors) BUTTONS['skip'].on_click(skip) # # 7. run the fit # + if 'show_step_7' in INPUT and INPUT['show_step_7']==True: try: from importlib import reload except: pass try: from imp import reload except: pass import allesfitter reload(allesfitter) button_run_ns_fit = widgets.Button(description='Run NS fit', button_style='') button_run_mcmc_fit = widgets.Button(description='Run MCMC fit', button_style='') hbox = widgets.HBox([button_run_ns_fit, button_run_mcmc_fit]) display(hbox) def run_ns_fit(change): button_run_ns_fit.style.button_color = 'lightgreen' allesfitter.ns_fit(INPUT['datadir']) allesfitter.ns_output(INPUT['datadir']) def run_mcmc_fit(change): button_run_mcmc_fit.style.button_color = 'lightgreen' allesfitter.mcmc_fit(INPUT['datadir']) allesfitter.mcmc_output(INPUT['datadir']) button_run_ns_fit.on_click(run_ns_fit) button_run_mcmc_fit.on_click(run_mcmc_fit) # else: # print('Complete previous steps first.') # -
allesfitter/GUI.ipynb
-- --- -- jupyter: -- jupytext: -- text_representation: -- extension: .hs -- format_name: light -- format_version: '1.5' -- jupytext_version: 1.14.4 -- kernelspec: -- display_name: Haskell -- language: haskell -- name: haskell -- --- -- # 7. Higher-order functions -- ## 7.1 Basic concepts -- ## 7.2 Processing lists -- ## 7.3 The `foldr` function -- ## 7.4 The `foldl` function -- ## 7.5 The composition operator -- ## 7.6 Binary string transmitter -- ## 7.7 Voting algorithms -- ## 7.8 Chapter remarks -- ## 7.9 Exercises
07 Higher-order functions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Analysis of neural tangent kernel performance # # Given the pre-generated neural tangent kernel (NTK) output from the main code (by default in the directory `'./kernel_output'`), we examine the classification performance on the MNIST dataset of the exact, sparsified, and diagonal NTKs. Additionally, for the quantum algorithms of sparsified and diagonal NTKs, the condition number and the number of measurements required for post-selection/readout are verified to be bounded by $O(\log n)$. # + import numpy as np import glob from IPython.display import set_matplotlib_formats set_matplotlib_formats('pdf', 'svg') import matplotlib import seaborn as sns sns.set(font_scale=1.3) sns.set_style("whitegrid", {"axes.facecolor": ".97"}) import matplotlib.pyplot as plt # - # ## Sparsity pattern # First, a sparsity pattern is constructed in $\tilde O(n)$ time. In the proposed quantum algorithm, this is performed once when the data is stored in a binary QRAM data structure (also in $\tilde O(n)$ time). Given a sparsity pattern with at most $s = O(\log n)$ nonzero elements in any row or column, multiple neural networks (of different architectures) can be efficiently trained in logarithmic time using the same sparsity pattern. # + def get_target_sparsity(m): """ Get expected matrix sparsity, chosen to be O(log n). """ return np.log(m.shape[0]) def block_diagonal(m): """ Prepare a block diagonal matrix [[1, 0], [0, 1]] corresponding to the two data classes in the NTK matrix. """ class_size = m.shape[0]//2 ones_class = np.ones((class_size, class_size)) zeros_class = np.zeros((class_size, class_size)) class_0 = np.block([[ones_class, zeros_class], [zeros_class, zeros_class]]) class_1 = np.block([[zeros_class, zeros_class], [zeros_class, ones_class]]) return class_0, class_1 def get_sparsity_pattern(m): """ Prepare in O(n log n) time a sparsity pattern over the n x n matrix with a pseudorandom generator. """ target_sparsity = get_target_sparsity(m) # procedure produces an equivalent distribution of 1s and 0s as sampling individual # matrix elements i.i.d. from binomial distribution # since we'll take half of the generated indices, we set the probability of a nonzero # element to be double the target sparsity p_one = min(2*target_sparsity/m.shape[0], 1.0) # for each row, sample the binomial distribution to get the number of nonzero indices # matches in expectation get_target_sparsity(m), i.e. O(log n) # reference the upper triangular indices according to the lower triangular indices # can be done efficiently by mapping indices instead of copying matrix elements one_filter = np.zeros(m.shape) for i in range(m.shape[0]): # find O(log n) indices num_nonzero = np.random.randint(m.shape[0], size=np.random.binomial(m.shape[0], p_one)) one_filter[i][num_nonzero] = 1 one_filter = np.tril(one_filter) + np.tril(one_filter, -1).T # set all NTK matrix elements from opposite classes to be zero # since the NTK is larger for more similar data examples, this biases the sparse # matrix towards selecting more important examples class_0, class_1 = block_diagonal(m) one_filter = one_filter * (class_0 + class_1) # make sure the diagonal is ones np.fill_diagonal(one_filter, 1) return one_filter def sparsify_unbiased(m, sparsity_pattern): """ Sparsify NTK matrix `m` using a given sparsity pattern. Used for the fully-connected network. """ return m * sparsity_pattern def sparsify_biased(m, sparsity_pattern, t0, t1): """ Sparsify NTK matrix `m` using a given sparsity pattern, then additionally sparsify by setting elements below `t0` and `t1` in classes 0 and 1 respectively to 0. Used for the convolutional network. """ class_0, class_1 = block_diagonal(m) one_filter = sparsity_pattern * ((m > t0) * class_0 + (m > t1) * class_1) np.fill_diagonal(one_filter, 1) kernel_train_sparse = m * one_filter # we expect a factor of ~target_sparsity by Gershgorin's theorem # empirically, the well-conditioning of the kernel makes it scale better than this f = 0.76 * get_target_sparsity(m)**0.9 conditioning = f * np.diag(kernel_train_sparse)*np.eye(kernel_train_sparse.shape[0]) kernel_train_conditioned = kernel_train_sparse + conditioning return kernel_train_conditioned def compute_class_percentiles(m, percentile): """ Compute the truncation thresholds for `sparsify_biased`. This is evaluated over a small subset (n = 16) of the training set to efficiently bias the sparsification towards large off-diagonal elements. """ class_size = m.shape[0]//2 ones_class = np.ones((class_size, class_size)) zeros_class = np.zeros((class_size, class_size)) class_0 = np.block([[ones_class - np.eye(class_size), zeros_class], [zeros_class, zeros_class]]) class_1 = np.block([[zeros_class, zeros_class], [zeros_class, ones_class - np.eye(class_size)]]) t0 = np.percentile(np.abs(m * class_0), percentile) t1 = np.percentile(np.abs(m * class_1), percentile) return t0, t1 def get_sparsity(m): """ Get maximum number of nonzero elements in any row or column. """ return np.amax(np.sum(m != 0, axis=0)) # - # We verify that the sparsity pattern does indeed scale like $O(\log n)$. # + Ns = [16, 32, 64, 128, 256, 512] sparsity_trials = 100 sparsities = np.zeros(len(Ns)) sparsities_std = np.zeros(len(Ns)) for i in range(len(Ns)): N = Ns[i] sparsities_N = [] for t in range(sparsity_trials): sparsity_pattern = get_sparsity_pattern(np.zeros((N, N))) s = get_sparsity(sparsity_pattern) sparsities_N.append(s) sparsities[i] = np.mean(sparsities_N) sparsities_std[i] = np.std(sparsities_N)/np.sqrt(len(sparsities_N)) plt.figure(figsize=(5, 4)) plt.errorbar(Ns, sparsities, yerr=2*sparsities_std, fmt='o', c='C1') plt.xlabel('Training set size') plt.ylabel('Sparsity') plt.xscale('log') plt.xticks(Ns) plt.minorticks_off() plt.gca().get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter()) plt.tight_layout() plt.show() # - # ## Neural network performance # # Four quantities characterize the infinite-width neural network and its sparsified and diagonal approximations: # * Binary classification accuracy: all three networks are evaluated on a balanced sample of the MNIST test set (separate from the training set). # * Condition number: to invert the sparsified NTK $\tilde K$ efficiently with a quantum linear systems algorithm, the condition number $\kappa(\tilde K)$ (defined to be the ratio of the largest to smallest singular values) must be bounded by $O(\log n)$. # * Post-selection measurements: to prepare the quantum state $|k_*\rangle = \frac{1}{\sqrt{P}} \sum_{i=0}^{n-1} k_i |i\rangle$ of the NTK evaluated between test data $\mathbf x_*$ and the training data $\{\mathbf x_i\}$, we require $O(1/P)$ measurements for $P = \sum_i k_i^2$. Here, $k_i$ corresponds to the kernel $k(\mathbf x_*, \mathbf x_i)$ normalized and clipped to lie within $-1 \leq k_i \leq 1$. To efficiently prepare the state, the number of measurements must be bounded by $O(\log n)$. # * Readout measurements: to perform the final readout, we estimate the sign of state overlap $o = \langle k_* | y \rangle$ (for the diagonal approximation) or $o = \langle k_* | \tilde K^{-1} | y\rangle$ (for the sparsified approximation). This requires $O(1/|o|^2)$ measurements, which must be bounded by $O(\log n)$ for efficient readout. # + def classify(ntk_mean): """ Classify raw output of the NTK on the test dataset, assuming the test data is sampled i.i.d. from the underlying data distribution (i.e. balanced). """ thresh = np.median(ntk_mean) out = np.sign(ntk_mean - thresh) return out def get_file_prefix(fp, seed, N, trial): """ NTK output filename """ return fp + '_seed' + str(seed) + '_data' + str(N) + '_trial' + str(trial) + '_' def analyze(file_prefix, Ns, sparsify_fnc, sparsify_args=(), sparsity_bootstraps=3, plot_margin=0): """ Plot the accuracy, condition number, number of measurements for post-selection, and number of measurements for readout. """ Ns = np.array(Ns) accs_mean = [] accs_std = [] measurements = [] post_selections = [] measurements_std = [] post_selections_std = [] all_kappas = [] for n_ind in range(len(Ns)): N = Ns[n_ind] # load data prefix = get_file_prefix(file_prefix, '*', N, '*') suffixes = ['kernel_train.npy', 'kernel_test.npy', 'kernel_test_normalized.npy', 'train_label.npy', 'test_label.npy'] files = [] for suffix in suffixes: files.append(sorted(glob.glob(prefix + '*' + suffix))) all_dense = [] all_sparse = [] all_identity = [] all_scale = [] trial_p = [] trial_overlaps_diag = [] trial_overlaps_sparse = [] kappas = [] for i in range(len(files[0])): # load files kernel_train = np.load(files[0][i]) kernel_test = np.load(files[1][i]) kernel_test_normalized = np.load(files[2][i]) train_label = np.load(files[3][i]) test_label = np.load(files[4][i]) # bootstrap over different sparsity patterns for s in range(sparsity_bootstraps): # randomize sparsity pattern sparsity_pattern = get_sparsity_pattern(kernel_train) # sparsify kernel kernel_train_sparse = sparsify_fnc(kernel_train, sparsity_pattern, *sparsify_args) kernel_train_identity = np.diag(kernel_train)*np.eye(kernel_train.shape[0]) # calculate condition number eigs = np.linalg.eigvals(kernel_train_sparse) kappa = np.amax(np.abs(eigs))/np.amin(np.abs(eigs)) kappas.append(kappa) # solve A^{-1}y for A being the exact NTK, sparsified NTK, and diagonal NTK inv_y_dense = np.linalg.inv(kernel_train) @ train_label inv_y_dense /= np.sqrt(np.sum(inv_y_dense**2)) inv_y_sparse = np.linalg.inv(kernel_train_sparse) @ train_label inv_y_sparse /= np.sqrt(np.sum(inv_y_sparse**2)) inv_y_diag = np.linalg.inv(kernel_train_identity) @ train_label inv_y_diag /= np.sqrt(np.sum(inv_y_diag**2)) # prepare |k_*> state ki = kernel_test_normalized / np.amax(np.abs(kernel_test_normalized)) p = np.sum(ki**2, axis=1) ki = ki / np.sqrt(p[:, np.newaxis]) # prepare |y> state ny = len(train_label) y = train_label / np.sqrt(ny) trial_p.append(p) # for post-selection measurements trial_overlaps_diag.append(ki @ y) # <k_*|y> trial_overlaps_sparse.append(ki @ inv_y_sparse) # <k_*|\tilde K^{-1}|y> # classify with the exact, sparsified, and diagonal NTKs mean_dense = kernel_test @ inv_y_dense mean_sparse = kernel_test_normalized @ inv_y_sparse mean_identity = kernel_test_normalized @ inv_y_diag correct_dense = classify(mean_dense) == test_label correct_sparse = classify(mean_sparse) == test_label correct_identity = classify(mean_identity) == test_label all_dense = np.concatenate((all_dense, correct_dense)) all_sparse = np.concatenate((all_sparse, correct_sparse)) all_identity = np.concatenate((all_identity, correct_identity)) all_scale.append([trial_p, trial_overlaps_diag, trial_overlaps_sparse]) # compute the mean and standard deviation of all quantities all_out = [all_dense, all_sparse, all_identity] accs_mean_s = [] accs_std_s = [] for i in range(len(all_out)): correct = all_out[i] accs_mean_s.append(np.mean(correct)) accs_std_s.append(np.std(correct)/np.sqrt(len(correct))) accs_mean.append(accs_mean_s) accs_std.append(accs_std_s) scale = np.concatenate(all_scale, axis=1) p = scale[0, :, :].flatten() post_measurements = N/p post_selections.append(np.median(post_measurements)) bootstraps = 5 # Poisson bootstrapping medians = np.zeros(bootstraps) for b in range(bootstraps): r = np.random.poisson(size=post_measurements.shape) pm = r * post_measurements medians[b] = np.median(pm) post_selections_std.append(np.std(medians)/np.sqrt(bootstraps)) overlaps = scale[1:, :, :].reshape(2, -1) # enough measurements for stdev to be O(overlap) these_measurements = 1/overlaps**2 - 1 measurements.append(np.median(these_measurements, axis=1)) bootstraps = 5 # Poisson bootstrapping medians = np.zeros((bootstraps, 2)) for b in range(bootstraps): r = np.random.poisson(size=these_measurements.shape) pm = r * these_measurements medians[b] = np.median(pm, axis=1) measurements_std.append(np.std(medians, axis=0)/np.sqrt(bootstraps)) all_kappas.append(kappas) accs_mean = np.array(accs_mean) accs_std = np.array(accs_std) post_selections = (np.array(post_selections), np.array(post_selections_std)) measurements = (np.array(measurements), np.array(measurements_std)) kappa = [] kappa_std = [] for row in all_kappas: kappa.append(np.mean(row)) kappa_std.append(np.std(row)/np.sqrt(len(row))) kappa = np.array(kappa) kappa_std = np.array(kappa_std) # plot everything plt.figure(figsize=(5, 4)) plt.errorbar(Ns - Ns*plot_margin, accs_mean[:, 0], yerr=2*accs_std[:, 0], label='Exact NTK', fmt='o') plt.errorbar(Ns, accs_mean[:, 1], yerr=2*accs_std[:, 1], label='Sparse NTK', fmt='o') plt.errorbar(Ns + Ns*plot_margin, accs_mean[:, 2], yerr=2*accs_std[:, 2], label='Diagonal NTK', fmt='o') plt.xlabel('Training set size') plt.ylabel('Accuracy') plt.xscale('log') plt.xticks(Ns) plt.minorticks_off() plt.gca().get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter()) plt.legend(loc='lower right') plt.tight_layout() plt.show() plt.figure(figsize=(5, 4)) plt.errorbar(Ns, kappa, yerr=2*kappa_std, fmt='o', c='C1') plt.xlabel('Training set size') plt.ylabel('Condition number') plt.xscale('log') plt.xticks(Ns) plt.minorticks_off() plt.gca().get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter()) plt.gca().get_yaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter( useOffset=False)) plt.tight_layout() plt.show() plt.figure(figsize=(5, 4)) plt.errorbar(Ns, post_selections[0], yerr=2*post_selections[1], fmt='o') plt.xlabel('Training set size') plt.ylabel('Measurements (post-selection)') plt.xscale('log') plt.xticks(Ns) plt.minorticks_off() plt.gca().get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter()) plt.tight_layout() plt.show() plt.figure(figsize=(5, 4)) plt.errorbar(Ns - Ns*plot_margin/2, measurements[0][:, 1], yerr=2*measurements[1][:, 1], label='Sparse NTK', c='C1', fmt='o') plt.errorbar(Ns + Ns*plot_margin/2, measurements[0][:, 0], yerr=2*measurements[1][:, 0], label='Diagonal NTK', c='C2', fmt='o') plt.xlabel('Training set size') plt.ylabel('Measurements (readout)') plt.xscale('log') plt.xticks(Ns) plt.minorticks_off() plt.gca().get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter()) plt.legend() plt.tight_layout() plt.show() # - # Plot the results for the fully-connected neural network. analyze('kernel_output/fully-connected', Ns, sparsify_unbiased, plot_margin=1/8) # Estimate the appropriate normalization threshold for preparing $|k_*\rangle$ based on a small subset ($n=16$) of the training set, and then plot the results for the convolutional neural network. fp = 'kernel_output/convolutional' base_n = 16 base_ntk = np.load(sorted(glob.glob(get_file_prefix(fp, '*', base_n, '*') + 'kernel_train.npy'))[0]) sparsify_args = compute_class_percentiles(base_ntk, 90) analyze(fp, Ns, sparsify_biased, sparsify_args=sparsify_args, plot_margin=1/8)
analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: desdeo-emo # language: python # name: desdeo-emo # --- # # The River Pollution Problem # + from desdeo_emo.EAs.RVEA import RVEA from desdeo_problem import variable_builder, ScalarObjective, VectorObjective, MOProblem import numpy as np import pandas as pd from desdeo_emo.utilities.plotlyanimate import animate_init_, animate_next_ # + # create the problem def f_1(x): return 4.07 + 2.27 * x[:, 0] def f_2(x): return 2.60 + 0.03*x[:, 0] + 0.02*x[:, 1] + 0.01 / (1.39 - x[:, 0]**2) + 0.30 / (1.39 - x[:, 1]**2) def f_3(x): return 8.21 - 0.71 / (1.09 - x[:, 0]**2) def f_4(x): return 0.96 - 0.96 / (1.09 - x[:, 1]**2) # def f_5(x): # return -0.96 + 0.96 / (1.09 - x[:, 1]**2) def f_5(x): return np.max([np.abs(x[:, 0] - 0.65), np.abs(x[:, 1] - 0.65)], axis=0) # - f1 = ScalarObjective(name="f1", evaluator=f_1, maximize=True) f2 = ScalarObjective(name="f2", evaluator=f_2, maximize=True) f3 = ScalarObjective(name="f3", evaluator=f_3, maximize=True) f4 = ScalarObjective(name="f4", evaluator=f_4, maximize=True) f5 = ScalarObjective(name="f5", evaluator=f_5, maximize=False) varsl = variable_builder(["x_1", "x_2"], initial_values=[0.5, 0.5], lower_bounds=[0.3, 0.3], upper_bounds=[1.0, 1.0] ) problem = MOProblem(variables=varsl, objectives=[f1, f2, f3, f4, f5]) evolver = RVEA(problem, interact=True, n_iterations=5, n_gen_per_iter=100) figure = animate_init_(evolver.population.objectives, filename="river.html") pref, plot = evolver.start() print(plot.content["dimensions_data"]) print(pref[0].content['message']) pref[2].response = pd.DataFrame([[6.3,3.3,7,-2,0.3]], columns=pref[2].content['dimensions_data'].columns) # + pref, plot = evolver.iterate(pref[2]) figure = animate_next_( plot.content['data'].values, figure, filename="river.html", generation=evolver._iteration_counter, ) message = (f"Current generation number:{evolver._current_gen_count}. " f"Is looping back recommended: {'Yes' if evolver.continue_evolution() else 'No'}") print(message) # -
docs/notebooks/River_Pollution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + jupyter={"outputs_hidden": false} import numpy as np import ceo from ceo.pyramid import Pyramid as Pym import matplotlib.pyplot as plt # %matplotlib inline # + jupyter={"outputs_hidden": false} N_SIDE_LENSLET=92 n = N_SIDE_LENSLET*32 gmt = ceo.GMT_MX() # + jupyter={"outputs_hidden": false} src = ceo.Source('V',zenith=0,azimuth=0, rays_box_size=25.5, rays_box_sampling=n, rays_origin=[0,0,25]) src>>(gmt,) +src src.wavefront.rms(-9) # + jupyter={"outputs_hidden": false} pym = Pym(N_SIDE_LENSLET,n,modulation=0.0,separation=2) src>>(gmt,pym) # - 132/92 pym.camera.reset() pym.separation = 132/92 # %%time +src fig = plt.figure() fig.set_size_inches(10,10) plt.imshow(pym.camera.frame.host()) plt.colorbar() u = np.arange(0,pym.camera.frame.host().shape[0]*5/4,pym.camera.frame.host().shape[0]/4) plt.xticks(u) plt.yticks(u) plt.grid(alpha=0.5) pym.camera.frame.host().shape[0] src.rays.rot_angle = np.pi/12 pym.camera.reset() +src fig = plt.figure() fig.set_size_inches(10,10) plt.imshow(pym.camera.frame.host()) plt.colorbar() imgr = ceo.Imaging(N_PX_PUPIL=n-1,N_PX_IMAGE=64) imgr.reset() src>>(gmt,imgr) +src imgr.readOut(1e-3,0) plt.imshow(imgr.frame.host())#/imgr.frame.host_data.max()) plt.colorbar() #plt.clim([0,0.5]) imgr.frame.host_data.sum()/src.nPhoton/357 src>>(gmt,pym) pym.camera.reset() pym.modulation = 0 +src pym.camera.readOut(1e-6,15) pym.camera.frame.host().std() plt.imshow(pym.camera.frame.host()) plt.colorbar() pym.camera. pym.camera.frame.host_data.sum()/(1e-6*src.nPhoton*357) pym.camera.reset() pym.modulation = 3 +src pym.camera.readOut(1e-7,15) plt.imshow(pym.camera.frame.host()) plt.colorbar() pym.modulation_sampling pym.camera.N_FRAME
pyramid/Pyramid.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import matplotlib.pyplot as plt import random import math import time from mpl_toolkits.mplot3d import Axes3D import os # + def convert_to_alphaXY(theta,phi): A = np.sqrt(1+np.tan(phi)**2) if math.pi/2 < phi <= 3*math.pi/2: ZX = - np.arctan(np.tan(theta)/A) ZY = - np.arctan(np.tan(theta)*np.tan(phi)/A) else: ZX = np.arctan(np.tan(theta)/A) ZY = np.arctan(np.tan(theta)*np.tan(phi)/A) return(ZX,ZY) def calculate_impact_to_object(dist_to_object,bursts,PosXX,PosYY,Theta,Phi): AngXX,AngYY = convert_to_alphaXY(Theta,Phi) #calculate intermediate point a = PosXX - dist_to_object * np.tan(AngXX) b = PosYY - dist_to_object * np.tan(AngYY) #shape condition, in this case, a circle centered in (10,10) with radius 5. centreX = 10 centreY = 10 if 100 > ((a - centreX)**2 + (b - centreY)**2) and math.floor(np.arctan((b-centreY)/(a-centreX))*bursts/math.pi)%2 == True: impact = True else: impact = False ''' if 5 < a < 15 and 5 < b < 15: impact = True else: impact = False ''' #return the result return impact # - Xbias = 6 Ybias = 6 Foldername = "/eos/home-o/osanspla/SWAN_projects/Neutron Collimator Geometry/Pavia0" number_of_files = int(len(os.listdir(Foldername)[1:])/3) finalposition = [] for elem in range(1): countpos,countneg = 0,0 titleTheta = Foldername+"/Testrun-of-full-sim-AngleTheta-index"+str(elem)+".txt" with open(titleTheta, 'r') as file: AnTheta = np.array(eval(file.read())) # read list string and convert to array file.close() titlePhi = Foldername+"/Testrun-of-full-sim-AnglePhi-index"+str(elem)+".txt" with open(titlePhi, 'r') as file: AnPhi = np.array(eval(file.read())) # read list string and convert to array file.close() titlePos = Foldername+"/Testrun-of-full-sim-positionXY-index"+str(elem)+".txt" with open(titlePos, 'r') as file: Position = np.array(eval(file.read())) # read list string and convert to array file.close() for i in range(len(AnPhi)): impact = calculate_impact_to_object(10,50,Position[i,0]-Xbias,Position[i,1]-Ybias,AnTheta[i],AnPhi[i]) if impact == False: finalposition.append(Position[i,:]) countpos +=1 else: countneg +=1 print("Iteration "+str(elem)+" Number of points: "+str(len(finalposition))+" Positive: "+str(countpos)+" Negative "+str(countneg)) PosXX_after = np.zeros(len(finalposition)) PosYY_after = np.zeros(len(finalposition)) for i in range(len(finalposition)): PosXX_after[i] = finalposition[i][0] - Xbias PosYY_after[i] = finalposition[i][1] - Ybias #hist,xax,yax,image=plt.hist2d(PosXX_after,PosYY_after,400,range=[[-5, 15], [0, 20]],cmap="hot") hist,xax,yax,image=plt.hist2d(PosXX_after,PosYY_after,400) # + PosXX_after = np.zeros(len(finalposition)) PosYY_after = np.zeros(len(finalposition)) for i in range(len(finalposition)): PosXX_after[i] = finalposition[i][0] PosYY_after[i] = finalposition[i][1] # - plt.figure(figsize=(10,10)) #hist,xax,yax,image=plt.hist2d(PosXX_after,PosYY_after,400,range=[[-5, 15], [0, 20]],cmap="hot") hist,xax,yax,image=plt.hist2d(PosXX_after,PosYY_after,400) plt.show() plt.close() #plt.plot(hist[125]) plt.plot(hist[200][280:300]) plt.plot(hist[60][230:250]) print("Inner") print(hist[200][280:300]) print("Outer") print(hist[60][230:250]) # + plt.figure(figsize=(20,20)) #plt.imshow(histtotal[50:250,100:300],cmap="hot") plt.imshow(histtotal[100:200,150:250],cmap="binary",vmax = 350) cbar = plt.colorbar() cbar.set_label("Counts",rotation=270) plt.show() plt.close() # - for i in range(10): os.system("mkdir Pavia"+str(i))
NeDiMo/Siemens-star-move.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: py35 # language: python # name: py35 # --- # # <center> Metered Loads: Data Visualization </center> import os import pandas as pd # %matplotlib inline df = pd.read_csv(os.path.join(os.pardir, 'formatted_data', 'hourly_loads.csv'), parse_dates=['DATE_UTC'], index_col=['DATE_UTC']) df.head() df.tail() df.plot();
data/metered_loads/notebooks/MeteredLoadsVisualization.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- #import libraries import requests from datetime import datetime as dt from datetime import timedelta import pandas as pd # + # get events from n days ago iso = 504 #Pike - changed the ISO code to download data for Morocco instead of Niger limit = 400 api_url = 'https://api.acleddata.com/acled/read?terms=accept&iso={}'.format(iso) print (api_url, type(api_url)) #creates request according to ACLED format specifications - p. 13 # - response = requests.get(api_url) data = response.json() data.keys() data['count'] # ### From the documentation we know this is the max return --- How can we get all the results? # + # Let's mkae a function that updates our search to get the new pages def ping_acled(api_url): ''' Takes one parameter search term for API ''' response = requests.get(api_url) data = response.json() return data # + results = [] # empty data strcture to store results num_results = 500 # condition to continue adding pages count = 0 # tracker of results page = 1 #Per the documentation each page will give us more results while num_results == 500: #if less 500 or 0 we know we have all the results print ("starting ", page, " ", num_results) #just to see our progress api_url = 'https://api.acleddata.com/acled/read?terms=accept&iso={}&page={}'.format(iso,page) #the search data = ping_acled(api_url) #call the previous function results.append(data['data']) #store in our results count += data['count'] #Track number of results num_results = data['count'] #update our condition page += 1 #update our page variable print ("Total Results ", count) #Track our progress # - #Now I want to put them together into one giant result super_list = [] for res in results: super_list += res print (len(super_list)) #convert it into an pandas data frame or just use your data structure and do more stuff morocco_res = pd.DataFrame(super_list) #creating new pandas data frame for the Morocco data morocco_res.head() # ### Do the right thing, take some time to look at the codebook and see what these columns are morocco_res.columns # ### Homework --- Make a map of some ACLED Data (absolutely use the code from the Global Terrorism Database exercise) #Imports necessary aspects of Bokeh for plotting on a map from bokeh.tile_providers import get_provider, Vendors from pyproj import Transformer tile_provider = get_provider('STAMEN_TERRAIN') import math ##Pike - we will need to import math and some bokeh.plotting tools from bokeh.plotting import figure, output_notebook, show #builds interactive graphs for python # Take the data reduced to a country and get the lat/long of the attacks and the name of the group morocco_map = morocco_res[["latitude", 'longitude', 'data_id']] # + #PIKE - we need to convert the data in the Pandas data frame from strings to floats for mapping #we will get an error, but the code will still run morocco_map['latitude'] = morocco_map['latitude'].astype(float) morocco_map['longitude'] = morocco_map['longitude'].astype(float) morocco_map['data_id'] = morocco_map['data_id'].astype(float) # - #create pyproj transformer to convert form lat/long to web mercator transformer = Transformer.from_crs('epsg:4326','epsg:3857') # + #Pike - changed the names to match the niger_map data frame map_dict = {} # empty dictionary to track group attacks by lat long nan_count = {} # some data doesn't have a lat/long so we need to know what we are losing # Iterate through tables and associate group with lat/long for idx, row in morocco_map.iterrows(): if row['data_id'] in map_dict.keys(): if math.isnan(row["latitude"]): #This counts no data if row['data_id'] in nan_count.keys(): nan_count[row['data_id']] += 1 else: nan_count[row['data_id']] = 1 else: #This has to convert the lat/long to a mercator projection point = transformer.transform(row["latitude"],row["longitude"]) map_dict[row['data_id']].append([point[0],point[1]]) #BOTH the if an else statement do the same thing but since it is a dictionary one needs to add the group name first else: if math.isnan(row["latitude"]): nan_count[row['data_id']] = 1 else: point = transformer.transform(row["latitude"],row["longitude"]) map_dict[row['data_id']] =[[point[0],point[1]]] #This tells how many attacks we are losing nan_count # - pts = [(27.4,-13.5), (36.1, -0.9)] #Pike - ensured this bounding box encapsulated all of Morocco bbox = [] for pt in transformer.itransform(pts): bbox.append(pt) NPA_x = [] NPA_y = [] for k,v in map_dict.items(): for pt in v: NPA_x.append(pt[0]) NPA_y.append(pt[1]) # + #Plots the bounding box p = figure(x_range=(bbox[0][0], bbox[1][0]),y_range=(bbox[0][1], bbox[1][1]),x_axis_type="mercator", y_axis_type="mercator") #add the map form the Bokeh map vendor in this case Stamen_Terrain --- see documentation p.add_tile(tile_provider) # Places a circle for each converted lat/long attack p.circle(x = NPA_x, y = NPA_y, color= "firebrick") #shows the plot show(p) # + ##PIKE - running all this new code will correctly map the ACLED data for Morocco
Session 5/Acled - Niger.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python # language: python3 # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # # Foundations of Computational Economics #17 # # by <NAME>, ANU # # <img src="_static/img/dag3logo.png" style="width:256px;"> # + [markdown] slideshow={"slide_type": "fragment"} # ## Linear regression using Pandas and Numpy # # <img src="_static/img/lab.png" style="width:64px;"> # + [markdown] slideshow={"slide_type": "subslide"} # <img src="_static/img/youtube.png" style="width:65px;"> # # [https://youtu.be/LafDXp28IRE](https://youtu.be/LafDXp28IRE) # # Description: Using Numpy and Pandas to estimate simple regression. # + [markdown] slideshow={"slide_type": "slide"} # ### Linear regression # # Recall the classic linear regression model with data in columns of # $ (X,y) $, where $ X $ are independent variables and $ y $ is # the dependent variable. # Parameter vector to be estimated is $ \beta $, and we assume that # errors follow $ \varepsilon \sim N(0, \sigma) $ # # $$ # y = X \beta + \varepsilon \quad \quad \varepsilon \sim N(0, \sigma) # $$ # # Let $ \hat{\beta} $ denote the estimate of the parameters $ \beta $. # To find it, we minimize the sum of squares of the residuals # $ e = y - X \hat{\beta} $, i.e. $ e'e \longrightarrow_{\hat{\beta}} \min $, # which leads to the well known OLS formula # # $$ # \hat{\beta} = (X'X)^{-1} X' y # $$ # # The mean standard error (MSE) of the regression is calculated as $ s = \sqrt{\frac{1}{n-k} e'e} $, # where $ n $ is the number of observations and $ k $ is the number of parameters (elements in $ \beta $). # # The variance-covariance matrix of the estimates is given by $ \hat{\Sigma} = s^2 (X'X)^{-1} $. # The square root of the diagonal elements of this matrix are them standard deviations of the estimates, and give us the measure of the accuracy of the estimated parameters. # # [<NAME> “Econometric Analysis”](https://books.google.com.au/books?id=LWQuAAAAQBAJ&dq=greene%20econometric%20analysis) # + hide-output=false slideshow={"slide_type": "slide"} import numpy as np def ols(X,y,addConstant=True,verbose=True): '''Return the OLS estimates and their variance-covariance matrix for the given data X,y When addConstant is True, constant is added to X When verbose is True, a report is printed ''' pass # + hide-output=false slideshow={"slide_type": "slide"} # test on small dataset X = np.array([[5, 3], [2, 3], [3, 1], [2, 8], [4.5, 2.5], [2.5, 1.5], [4.3, 4.2], [0.5, 3.5], [1, 5], [3, 8]]) truebeta = np.array([1.234,-0.345])[:,np.newaxis] # column vector y = X @ truebeta + 2.5 + np.random.normal(size=(X.shape[0],1),scale=0.2) beta,S=ols(X,y) beta,S=ols(X,y,addConstant=False) # + hide-output=false slideshow={"slide_type": "slide"} # test with one dimensional arrays X = np.array([1,2,3,4,5,6,7,8,9,10]) y = np.array([9.4,8.1,7.7,6.3,5.7,4.4,3.0,2.1,1.1,0.8]) beta,S=ols(X,y) beta,S=ols(X,y,addConstant=False) # + hide-output=false slideshow={"slide_type": "slide"} import numpy as np def ols(X,y,addConstant=True,verbose=True): '''Return the OLS estimates and their variance-covariance matrix for the given data X,y When addConstant is True, constant is added to X When verbose is True, a report is printed ''' y = y.squeeze() # we are better off if y is one-dimensional if addConstant and X.ndim==1: X = np.hstack((np.ones(X.shape[0])[:,np.newaxis],X[:,np.newaxis])) k = 2 elif addConstant and X.ndim>1: X = np.hstack((np.ones(X.shape[0])[:,np.newaxis],X)) k = X.shape[1]+1 elif X.ndim==1: X = X[:,np.newaxis] xxinv = np.linalg.inv(X.T@X) # inv(X'X) beta = xxinv @ X.T@y # OLS estimates e = y - X@beta # residuals n,k = X.shape # number of observations and parameters s2 = e.T@e / (n-k) Sigma = s2*xxinv if verbose: # report the estimates print('Number of observations: {:d}\nNumber of parameters: {:d}'.format(n,k)) print('Parameter estimates (std in brackets)') for b,s in zip(beta,np.sqrt(np.diag(Sigma))): print('{:10.5f} ({:10.5f})'.format(b,s)) print('MSE = {:1.5f}\n'.format(np.sqrt(s2))) return beta,Sigma # + [markdown] slideshow={"slide_type": "slide"} # ### Data on median wages # # **The Economic Guide To Picking A College Major** # # Data dictionary available at # # [https://github.com/fivethirtyeight/data/tree/master/college-majors](https://github.com/fivethirtyeight/data/tree/master/college-majors) # + hide-output=false slideshow={"slide_type": "fragment"} import pandas as pd # same data as in video 15 data = pd.read_csv('./_static/data/recent-grads.csv') # + hide-output=false slideshow={"slide_type": "slide"} data.info() # + hide-output=false slideshow={"slide_type": "slide"} data.head(n=15) # + hide-output=false slideshow={"slide_type": "slide"} import matplotlib.pyplot as plt %matplotlib inline data.plot(x='ShareWomen', y='Median', kind='scatter', figsize=(10, 8), color='red') plt.xlabel('Share of women') plt.ylabel('Median salary') # add a linear regression line to the plot # + hide-output=false slideshow={"slide_type": "slide"} print(data[['Median','ShareWomen']].isnull().sum()) # check if there are NaNs in the data! data1 = data[['Median','ShareWomen']].dropna() # drop NaNs data1.plot(x='ShareWomen', y='Median', kind='scatter', figsize=(10, 8), color='red') plt.xlabel('Share of women') plt.ylabel('Median salary') # add a linear regression line to the plot b,_ = ols(X=data['ShareWomen'],y=data['Median'],verbose=False) fn = lambda x: b[0]+b[1]*x xx = np.linspace(0,1,100) plt.plot(xx,fn(xx),color='navy',linewidth=3) plt.show() # + hide-output=false slideshow={"slide_type": "slide"} # create fraction variables data.drop(index=data[data['Total']==0].index,inplace=True) # drop zero Totals data.drop(index=data[data['Employed']==0].index,inplace=True) # drop zero Employed data['Employment rate'] = data['Employed'] / data['Total'] data['Fulltime rate'] = data['Full_time'] / data['Employed'] data2 = data[['Median','ShareWomen','Employment rate','Fulltime rate']].dropna() # drop NaNs y = data2['Median']/1000 # rescale salary # + hide-output=false slideshow={"slide_type": "slide"} # run the full model ols(data2[['ShareWomen','Employment rate','Fulltime rate']],y); # + [markdown] slideshow={"slide_type": "slide"} # #### Further learning resources # # - Regression analysis using `sklearn` library # [https://datascience.quantecon.org/applications/regression.html](https://datascience.quantecon.org/applications/regression.html)
17_linear_reg.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Plotting boundaires of HxC planes with different number of bins. # + import numpy as np import pandas as pd import matplotlib.pyplot as plt # %matplotlib inline # + lag = 512 # {32, 64, 128, 256, 512} Choose one of them base = pd.read_pickle('../pkl_datasets/mamiraua_dataset_ACF_' + str(lag) + '.gzip') cotas = pd.read_csv('./boundary_files/Cotas_HxC_bins_' + str(int(lag)) + '.csv') noise = pd.read_csv('./coloredNoises/coloredNoises_' + str(int(lag)) + '.csv') # + plt.figure(figsize=(24,10)) plt.rc('font', size=22) plt.rc('axes', titlesize=22) plt.subplot(1,2,1) lags = [32, 64, 128, 256, 512] for lag in lags: cotas = pd.read_csv('./boundary_files/Cotas_HxC_bins_' + str(int(lag)) + '.csv') noise = pd.read_csv('./coloredNoises/coloredNoises_' + str(int(lag)) + '.csv') if lag == 32: plt.plot(cotas['Entropy'],cotas['Complexity'], '--k', label = 'HxC boundaries') plt.plot(noise['Entropy'],noise['Complexity'], '--b', label = 'Colored noises') else: plt.plot(cotas['Entropy'],cotas['Complexity'], '--k', label = '') plt.plot(noise['Entropy'],noise['Complexity'], '--b', label = '') plt.text(0.7, 0.475, '512', fontsize= 18) plt.text(0.7, 0.445, '256', fontsize= 18) plt.text(0.7, 0.415, '128', fontsize= 18) plt.text(0.7, 0.376, '64', fontsize= 18) plt.text(0.7, 0.34, '32', fontsize= 18) plt.text(0.58, 0.27, '512', fontsize= 16, color='blue', backgroundcolor='0.99') plt.text(0.6, 0.254, '256', fontsize= 16, color='blue', backgroundcolor='0.99') plt.text(0.62, 0.238, '128', fontsize= 16, color='blue', backgroundcolor='0.99') plt.text(0.64, 0.223, '64', fontsize= 16, color='blue', backgroundcolor='0.99') plt.text(0.66, 0.207, '32', fontsize= 16, color='blue', backgroundcolor='0.99') plt.xlim([0, 1]) plt.ylim([0, np.max(cotas['Complexity'])+0.01]) plt.ylabel('Complexity [C]') plt.xlabel('Entropy [H]') plt.legend(loc = 'upper left', frameon=False) plt.title('a)') plt.subplot(1,2,2) plt.plot(cotas['Entropy'],cotas['Complexity'], '--k', label = 'HxC boundaries') plt.plot(noise['Entropy'],noise['Complexity'], '--b', label = 'Colored noises') plt.xlim([0, 1]) plt.ylim([0, np.max(cotas['Complexity'])+0.01]) plt.ylabel('Complexity [C]') plt.xlabel('Entropy [H]') plt.legend(loc = 'upper left', frameon=False) plt.scatter(base['H'], base['C'], marker='.', s=15, c=base['JSD'], norm=plt.Normalize(vmax=1, vmin=0), cmap = 'tab20c') plt.axvline(x=0.7, ymin=0, linewidth=1.2, color='r', ls='-.') plt.axhline(y=.40, xmin=0, xmax=0.7, linewidth=1.2, color='r', ls='-.') plt.axhline(y=.37, xmin=0, xmax=0.7, linewidth=1.2, color='r', ls='-.') plt.axhline(y=.34, xmin=0, xmax=0.7, linewidth=1.2, color='r', ls='-.') plt.plot(.7, .40, 'o', color='r', linewidth=1) plt.annotate('$p_1$', xy=(.71, .40)) plt.plot(.7, .37, 'o', color='r', linewidth=1) plt.annotate('$p_2$', xy=(.71, .37)) plt.plot(.7, .34, 'o', color='r', linewidth=1) plt.annotate('$p_3$', xy=(.71, .34)) plt.title('b)') plt.show() # -
Fig1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] deletable=true editable=true # ### osu!nn #2: rhythm estimator # # Builds up a rhythm model to estimate the rhythm using music and timing data. # # Synthesis of "flowData" # * rhythmData x 1 # * (Audio) x 3 # * (Classifier) x 1 # # Synthesis Time: ~5 mins # # Final edit: 2019/4/22 # + [markdown] deletable=true editable=true # First of all, Import the wheels. # # "root" points to the folder that stores all the .npz map data, where all files in .npz extension are read. # + deletable=true editable=true import tensorflow as tf from tensorflow import keras import pandas as pd import numpy as np import matplotlib.pyplot as plt import os, re # + deletable=true editable=true root = "mapdata/"; # updated! now we can set divisor to any number (needs to be consistent with other pages) divisor = 4; # this is a global variable! time_interval = 16; # + deletable=true editable=true # lst file, [TICK, TIME, NOTE, IS_CIRCLE, IS_SLIDER, IS_SPINNER, IS_SLIDER_END, IS_SPINNER_END, # 0, 1, 2, 3, 4, 5, 6, 7, # SLIDING, SPINNING, MOMENTUM, ANGULAR_MOMENTUM, EX1, EX2, EX3], length MAPTICKS # 8, 9, 10, 11, 12, 13, 14, # wav file, [len(snapsize), MAPTICKS, 2, fft_size//4] def read_npz(fn): with np.load(fn) as data: wav_data = data["wav"]; wav_data = np.swapaxes(wav_data, 2, 3); train_data = wav_data; div_source = data["lst"][:, 0]; div_source2 = data["lst"][:, 12:15]; div_data = np.concatenate([divisor_array(div_source), div_source2], axis=1); lst_data = data["lst"][:, 2:10]; # Change the 0/1 data to -1/1 to use tanh instead of softmax in the NN. # Somehow tanh works much better than softmax, even if it is a linear combination. Maybe because it is alchemy! lst_data = 2 * lst_data - 1; train_labels = lst_data; return train_data, div_data, train_labels; def divisor_array(t): d_range = list(range(0, divisor)); return np.array([[int(k % divisor == d) for d in d_range] for k in t]); def read_npz_list(): npz_list = []; for file in os.listdir(root): if file.endswith(".npz"): npz_list.append(os.path.join(root, file)); # reutnr npz_lsit; return npz_list; def prefilter_data(train_data_unfiltered, div_data_unfiltered, train_labels_unfiltered): # Filter out slider ends from the training set, since we cannot reliably decide if a slider end is on a note. # Another way is to set 0.5 for is_note value, but that will break the validation algorithm. # Also remove the IS_SLIDER_END, IS_SPINNER_END columns which are left to be zeros. # Before: NOTE, IS_CIRCLE, IS_SLIDER, IS_SPINNER, IS_SLIDER_END, IS_SPINNER_END, SLIDING, SPINNING # 0, 1, 2, 3, 4, 5, 6, 7 # After: NOTE, IS_CIRCLE, IS_SLIDER, IS_SPINNER, SLIDING, SPINNING # 0, 1, 2, 3, 4, 5 non_object_end_indices = [i for i,k in enumerate(train_labels_unfiltered) if k[4] == -1 and k[5] == -1]; train_data = train_data_unfiltered[non_object_end_indices]; div_data = div_data_unfiltered[non_object_end_indices]; train_labels = train_labels_unfiltered[non_object_end_indices][:, [0, 1, 2, 3, 6, 7]]; # should be (X, 7, 32, 2) and (X, 6) in default sampling settings # (X, fft_window_type, freq_point, magnitude/phase) return train_data, div_data, train_labels; def preprocess_npzs(train_data_unfiltered, div_data_unfiltered, train_labels_unfiltered): train_data, div_data, train_labels = prefilter_data(train_data_unfiltered, div_data_unfiltered, train_labels_unfiltered); # In this version, the train data is already normalized, no need to do it again here # mean = train_data.mean(axisprefilter_=0) # std = train_data.std(axis=0) # train_data = (train_data - np.tile(mean, (train_data.shape[0], 1,1,1))) / np.tile(std, (train_data.shape[0], 1,1,1)) # Make time intervals from training data if train_data.shape[0]%time_interval > 0: train_data = train_data[:-(train_data.shape[0]%time_interval)]; div_data = div_data[:-(div_data.shape[0]%time_interval)]; train_labels = train_labels[:-(train_labels.shape[0]%time_interval)]; train_data2 = np.reshape(train_data, (-1, time_interval, train_data.shape[1], train_data.shape[2], train_data.shape[3])) div_data2 = np.reshape(div_data, (-1, time_interval, div_data.shape[1])) train_labels2 = np.reshape(train_labels, (-1, time_interval, train_labels.shape[1])) return train_data2, div_data2, train_labels2; def get_data_shape(): for file in os.listdir(root): if file.endswith(".npz"): train_data_unfiltered, div_data_unfiltered, train_labels_unfiltered = read_npz(os.path.join(root, file)); train_data, div_data, train_labels = prefilter_data(train_data_unfiltered, div_data_unfiltered, train_labels_unfiltered); # should be (X, 7, 32, 2) and (X, 6) in default sampling settings # (X, fft_window_type, freq_point, magnitude/phase) # X = 76255 # print(train_data.shape, train_labels.shape); if train_data.shape[0] == 0: continue; return train_data.shape, div_data.shape, train_labels.shape; print("cannot find npz!! using default shape"); return (-1, 7, 32, 2), (-1, 3 + divisor), (-1, 6); def read_some_npzs_and_preprocess(npz_list): td_list = []; dd_list = []; tl_list = []; for fp in npz_list: if fp.endswith(".npz"): _td, _dd, _tl = read_npz(fp); if _td.shape[1:] != train_shape[1:]: print("Warning: something wrong found in {}! shape = {}".format(fp, _td.shape)); continue; td_list.append(_td); dd_list.append(_dd); tl_list.append(_tl); train_data_unfiltered = np.concatenate(td_list); div_data_unfiltered = np.concatenate(dd_list); train_labels_unfiltered = np.concatenate(tl_list); train_data2, div_data2, train_labels2 = preprocess_npzs(train_data_unfiltered, div_data_unfiltered, train_labels_unfiltered); return train_data2, div_data2, train_labels2; def train_test_split(train_data2, div_data2, train_labels2, test_split_count=233): new_train_data = train_data2[:-test_split_count]; new_div_data = div_data2[:-test_split_count]; new_train_labels = train_labels2[:-test_split_count]; test_data = train_data2[-test_split_count:]; test_div_data = div_data2[-test_split_count:]; test_labels = train_labels2[-test_split_count:]; return (new_train_data, new_div_data, new_train_labels), (test_data, test_div_data, test_labels); # (train_data_unfiltered, div_data_unfiltered, train_labels_unfiltered) = read_all_npzs(); train_file_list = read_npz_list(); train_shape, div_shape, label_shape = get_data_shape(); # + [markdown] deletable=true editable=true # Now build the model. # + deletable=true editable=true from tensorflow.keras.models import Model; def build_model(): model1 = keras.Sequential([ keras.layers.TimeDistributed(keras.layers.Conv2D(16, (2, 2), data_format='channels_last'), input_shape=(time_interval, train_shape[1], train_shape[2], train_shape[3])), keras.layers.TimeDistributed(keras.layers.MaxPool2D((1, 2), data_format='channels_last')), keras.layers.TimeDistributed(keras.layers.Activation(activation=tf.nn.relu)), keras.layers.TimeDistributed(keras.layers.Dropout(0.3)), keras.layers.TimeDistributed(keras.layers.Conv2D(16, (2, 3), data_format='channels_last')), keras.layers.TimeDistributed(keras.layers.MaxPool2D((1, 2), data_format='channels_last')), keras.layers.TimeDistributed(keras.layers.Activation(activation=tf.nn.relu)), keras.layers.TimeDistributed(keras.layers.Dropout(0.3)), keras.layers.TimeDistributed(keras.layers.Flatten()), keras.layers.LSTM(64, activation=tf.nn.tanh, return_sequences=True) ]) input2 = keras.layers.InputLayer(input_shape=(time_interval, div_shape[1])); conc = keras.layers.concatenate([model1.output, input2.output]); dense1 = keras.layers.Dense(71, activation=tf.nn.tanh)(conc); dense2 = keras.layers.Dense(71, activation=tf.nn.relu)(dense1); dense3 = keras.layers.Dense(label_shape[1], activation=tf.nn.tanh)(dense2); try: optimizer = tf.optimizers.RMSprop(0.001) #Adamoptimizer? except: optimizer = tf.train.RMSPropOptimizer(0.001) #Adamoptimizer? final_model = Model(inputs=[model1.input, input2.input], outputs=dense3); final_model.compile(loss='mse', optimizer=optimizer, metrics=[keras.metrics.mae]) return final_model model = build_model() model.summary() def plot_history(history): plt.figure() plt.xlabel('Epoch') plt.ylabel('Mean Abs Error [Limitless]') plt.plot(history.epoch, np.array(history.history['mean_absolute_error']), label='Train MAE') plt.plot(history.epoch, np.array(history.history['val_mean_absolute_error']), label = 'Val MAE') plt.plot(history.epoch, np.array(history.history['loss']), label='Train Loss') plt.plot(history.epoch, np.array(history.history['val_loss']), label = 'Val Loss') plt.legend() plt.show() # Display training progress by printing a single dot for each completed epoch. class PrintDot(keras.callbacks.Callback): def on_epoch_end(self,epoch,logs): if epoch % 100 == 0: print('') print('.', end='') early_stop = keras.callbacks.EarlyStopping(monitor='mean_absolute_error', patience=20) # + [markdown] deletable=true editable=true # ぐるぐる。 # # it seems that with GPU training, "batch_size" must be set to a smaller value like 10 (default is 32), otherwise it will crash. probably because there is not enough GPU memory. # + deletable=true editable=true # Don't worry, it will successfully overfit after those 16 epochs. EPOCHS = 16 # since each map npz is about 6mb, this amounts to around 1200mb of RAM. too_many_maps_threshold = 200 data_split_count = 80 # if there is too much data, reduce epoch count (hmm) if len(train_file_list) >= too_many_maps_threshold: EPOCHS = 6 if len(train_file_list) < too_many_maps_threshold: train_data2, div_data2, train_labels2 = read_some_npzs_and_preprocess(train_file_list); # Split some test data out (new_train_data, new_div_data, new_train_labels), (test_data, test_div_data, test_labels) = train_test_split(train_data2, div_data2, train_labels2); # Store training stats history = model.fit([new_train_data, new_div_data], new_train_labels, epochs=EPOCHS, validation_split=0.2, verbose=0, #batch_size=10, callbacks=[early_stop, PrintDot()]) # For development! may cause bug in some environment. plot_history(history) else: # too much data! read it every turn. for epoch in range(EPOCHS): for map_batch in range(np.ceil(len(train_file_list) / data_split_count).astype(int)): # hmmmmm if map_batch == 0: train_data2, div_data2, train_labels2 = read_some_npzs_and_preprocess(train_file_list[map_batch * data_split_count : (map_batch+1) * data_split_count]); (new_train_data, new_div_data, new_train_labels), (test_data, test_div_data, test_labels) = train_test_split(train_data2, div_data2, train_labels2); else: new_train_data, new_div_data, new_train_labels = read_some_npzs_and_preprocess(train_file_list[map_batch * data_split_count : (map_batch+1) * data_split_count]); history = model.fit([new_train_data, new_div_data], new_train_labels, epochs=1, validation_split=0.2, verbose=0, #batch_size=10, callbacks=[]) # Manually print the dot print('.', end=''); print(''); [loss, mae] = model.evaluate([test_data, test_div_data], test_labels, verbose=0) print("\nTesting set Mean Abs Error: {}".format(mae)) # print(test_predictions) # print(test_labels) # print(test_predictions - list(test_labels)) # print("Mean Abs Error: "+str(np.mean(np.abs(test_predictions - test_labels)))) # + [markdown] deletable=true editable=true # Print the testing accuracy of the model (using F1-score), and compare with the accuracy of a random result, then in addition print the accuracy of individual columns. # # For the Sota dataset, it should get around 0.6 overall score, 0.77 for is_note, and something much smaller for is_circle, is_slider and is_spinner. It may also throw a warning because there is no spinner predicted/actually present. # # This is not a very high accuracy - but it is not really a problem; map rhythm does not fully correlate to the music itself. There are overmaps, innovative rhythms... and we can also learn from some of them! # + deletable=true editable=true from sklearn.metrics import f1_score test_predictions = model.predict([test_data, test_div_data]).reshape((-1, time_interval, label_shape[1])) flat_test_preds = test_predictions.reshape(-1, label_shape[1]); flat_test_labels = test_labels.reshape(-1, label_shape[1]); pred_result = (np.sign(flat_test_preds) + 1) / 2 actual_result = (flat_test_labels + 1) / 2 random_result = (1 + np.sign(-1 + 2 * np.random.random(size=pred_result.shape))) / 2; is_obj_pred = (1 + np.sign(flat_test_preds[:, 0:1])) / 2; obj_type_pred = np.sign(flat_test_preds[:, 1:4] - np.tile(np.expand_dims(np.max(flat_test_preds[:, 1:4], axis=1), 1), (1, 3))) + 1; others_pred = (1 + np.sign(flat_test_preds[:, 4:label_shape[1]] + 0.5)) / 2; # Only predict obj_type when there is an object! another_pred_result = np.concatenate([is_obj_pred, is_obj_pred * obj_type_pred, others_pred], axis=1); print(f1_score(actual_result.flatten(), pred_result.flatten())); print(f1_score(actual_result.flatten(), another_pred_result.flatten())); print(f1_score(actual_result.flatten(), random_result.flatten())); # Individual column predictions column_names = ["is_note", "is_circle", "is_slider", "is_spinner", "is_sliding", "is_spinning"]; for i, k in enumerate(column_names): print("{} f1_score: {} from {}".format(k, f1_score(another_pred_result[:, i], actual_result[:, i]), f1_score(random_result[:, i], actual_result[:, i]))) # + [markdown] deletable=true editable=true # Done! now save the model to the disk. # + deletable=true editable=true tf.keras.models.save_model( model, "saved_rhythm_model", overwrite=True, include_optimizer=True, save_format="h5" ); # WARNING:tensorflow:TensorFlow optimizers do not make it possible to access optimizer attributes or optimizer # state after instantiation. As a result, we cannot save the optimizer as part of the model save file.You will # have to compile your model again after loading it. Prefer using a Keras optimizer instead (see keras.io/optimizers).
v6.2/02_osurhythm_estimator.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/codefupanda/customer_interaction_summary/blob/master/Build_On_Colab.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="Fa_qqZjwg6j_" # The basics import numpy as np import pandas as pd import os import subprocess # + [markdown] id="8_hCeMLah2Dy" # **Mount G-Drive to download or save files** # + id="MJfdLzT4goUe" outputId="6ef4b49b-30d0-4084-ca29-1cc873f96b91" colab={"base_uri": "https://localhost:8080/", "height": 34} from google.colab import drive drive.mount('/content/gdrive', force_remount=True) # + [markdown] id="UzLqCdQBiJWh" # **Copy Kaggle configuration JSON file to download data from kaggle** # + id="VvUte8IDlLAp" # !mkdir /root/.kaggle/ # + id="W_q5FyBnhEnq" # !cp "/content/gdrive/My Drive/Colab Notebooks/.kaggle/kaggle.json" '/root/.kaggle/kaggle.json' # + [markdown] id="c1t4fyGNiP1N" # **Clone the code from github** # + id="hqhpjCDFhcrm" outputId="c441845f-c4b6-44c7-aef6-554efe1ed76e" colab={"base_uri": "https://localhost:8080/", "height": 34} # !git clone https://github.com/codefupanda/customer_interaction_summary.git # + [markdown] id="Kh4I4nijicA_" # **Init step**: Download required data and dependencies # + id="ZBnaF3HMgwUP" # !cd customer_interaction_summary && make requirements && make data # + [markdown] id="JEV5tioAijO_" # **TRAIN**: Train the model, src /models/model_configs.py as the configuration for which models to train # + id="QO6Z1fYr8-hc" outputId="c1d59fcf-8917-488f-f923-4768cdc116be" colab={"base_uri": "https://localhost:8080/", "height": 204} # !cd customer_interaction_summary && git pull # + id="qTmEp5s1Mwrh" outputId="ad2fe437-cbec-4656-f8d6-0ac3415d39bc" colab={"base_uri": "https://localhost:8080/", "height": 102} # !cd customer_interaction_summary && python3 src/data/make_dataset.py data/raw data/processed # + id="JeWq0IweDOxv" # !cd customer_interaction_summary && rm -rf random_search # + id="FrTjHi_3hGQE" outputId="abfe5ebf-9668-4c1d-bc8c-bf288bd11f6f" colab={"base_uri": "https://localhost:8080/", "height": 1000} # !cd customer_interaction_summary && make train # + [markdown] id="kyd6NyWzjLOP" # **Results are ready** # + id="3fh70EMDhMTT" final_report = pd.read_csv("./customer_interaction_summary/models/final_report.csv") # + id="kgAzlE3GlbPO" outputId="bb194bed-0a0b-4616-e3aa-04379a536b18" colab={"base_uri": "https://localhost:8080/", "height": 235} final_report[final_report['Unnamed: 1'] == 'f1-score'] # + id="uh8Sgo6xsbhp" outputId="7613f38d-1653-4c18-f15e-3d7d88171d90" colab={"base_uri": "https://localhost:8080/", "height": 235} final_report[final_report['Unnamed: 1'] == 'recall'] # + id="rDTFj96Du_Kj" outputId="02484c89-1c60-4e6b-dc42-6135fb6d3fb2" colab={"base_uri": "https://localhost:8080/", "height": 235} final_report[final_report['Unnamed: 1'] == 'precision'] # + id="9tyt4oKV2MIU" # + id="b9eg6kaj2OXf" # !cp "./customer_interaction_summary/models/final_report.csv" "/content/gdrive/My Drive/Colab Notebooks/.kaggle/" # + id="LzHbHAXL9M1f" outputId="4c83354b-5292-462e-8978-0e8dcd8d5a0b" colab={"base_uri": "https://localhost:8080/", "height": 34} # !ls "/content/gdrive/My Drive/Colab Notebooks/.kaggle/" # + id="LlCoNjIx9URv" # from google.colab import files # files.download('./customer_interaction_summary/models/final_report.csv') # + id="bOSHMbAqIy_F" outputId="c28b3d8f-0b5f-4b6d-bd94-b64ba888cfb0" colab={"base_uri": "https://localhost:8080/", "height": 34} # !ls "./customer_interaction_summary/models/" # + id="vHdprvARCSVG" outputId="72b13dff-cc6a-4c36-d712-0af0056ce5f6" colab={"base_uri": "https://localhost:8080/", "height": 34} # !cat "./customer_interaction_summary/models/StackedBiLSTM_hyperparameters.json" # + id="kzU1LHtxCXsw"
Build_On_Colab.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Get The Data # ## Download The Data #Import All The Necessary Packages import os import tarfile import urllib #Path To The Dataset Directory DOWNLOAD_ROOT = "https://raw.githubusercontent.com/ageron/handson-ml2/master/" HOUSING_PATH = os.path.join("dataset", "housing") HOUSING_URL = DOWNLOAD_ROOT + "datasets/housing/housing.tgz" #Creating A Function To Fetch The Data def fetch_housing_data(housing_url=HOUSING_URL, housing_path=HOUSING_PATH): os.makedirs(housing_path, exist_ok=True) tgz_path = os.path.join(housing_path, "housing.tgz") urllib.request.urlretrieve(housing_url, tgz_path) housing_tgz = tarfile.open(tgz_path) housing_tgz.extractall(path=housing_path) housing_tgz.close() fetch_housing_data() #Loading The Data Using Pandas import pandas as pd #Creating A Function To Load The Data def load_housing_data(housing_path=HOUSING_PATH): csv_path = os.path.join(housing_path, "housing.csv") return pd.read_csv(csv_path) # ## Take A Quick Look At The Data Structure #Get An Understanding Of The Data housing = load_housing_data() housing.head() #Check The Type Of Data We Are Dealing With housing.info() # We see that attribute "total_bedrooms" is missing some values. #Checking The Categories housing["ocean_proximity"].value_counts() housing.describe() #Display The Data In The Form Of Histogram # %matplotlib inline import matplotlib.pyplot as plt housing.hist(bins=50, figsize=(20,15)) plt.show() # ## Create A Test Set #To Get The Same Shuffled Indices On Every Run import numpy as np np.random.seed(42) #Create A Function To Split Data def split_train_test(data, test_ratio): shuffled_indices = np.random.permutation(len(data)) test_set_size = int(len(data) * test_ratio) test_indices = shuffled_indices[:test_set_size] train_indices = shuffled_indices[test_set_size:] return data.iloc[train_indices], data.iloc[test_indices] # We split the data into training set(80%) and test set(20%). train_set, test_set = split_train_test(housing, 0.2) len(train_set) len(test_set) # + from zlib import crc32 def test_set_check(identifier, test_ratio): return crc32(np.int64(identifier)) & 0xffffffff < test_ratio *2**32 def split_train_test_by_id(data, test_ratio, id_column): ids = data[id_column] in_test_set = ids.apply(lambda id_: test_set_check(id_, test_ratio)) return data.loc[~in_test_set], data.loc[in_test_set] # - housing_with_id = housing.reset_index() #Adds An 'Index' Column train_set, test_set = split_train_test_by_id(housing_with_id,0.2, "index") test_set.head() housing_with_id["id"] = housing["longitude"] * 1000 + housing["latitude"] train_set, test_set = split_train_test_by_id(housing_with_id, 0.2, "id") test_set.head() from sklearn.model_selection import train_test_split train_set, test_set = train_test_split(housing, test_size=0.2, random_state=42) test_set.head() housing["median_income"].hist() # We convert the "median_income" attribute, which is a continuous attribute into a catergorical attribute. With labels as 1, 2, 3, 4, 5. In the cells below. housing["income_cat"] = pd.cut(housing["median_income"], bins=[0., 1.5, 3.0, 4.5, 6., np.inf], labels=[1, 2, 3, 4, 5]) housing["income_cat"].hist() # + from sklearn.model_selection import StratifiedShuffleSplit split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42) for train_index, test_index in split.split(housing, housing["income_cat"]): strat_train_set = housing.loc[train_index] strat_test_set = housing.loc[test_index] # - strat_test_set["income_cat"].value_counts() / len(strat_test_set) #Remove The income_cat attribute for set_ in (strat_train_set, strat_test_set): set_.drop("income_cat", axis=1, inplace=True) # # Discover And Visualize The Data To Gain Insights # ## Visualizing Geographical Data #Create A Copy housing = strat_train_set.copy() #Create A Scatter Plot To Visualize The Data housing.plot(kind="scatter", x="longitude",y="latitude") #It Is Hard To Visualize The Data Just Like That #We Visualize Area With High Density housing.plot(kind="scatter", x="longitude", y="latitude", alpha=0.1) #We Make A More Informative Scatter Plot housing.plot(kind="scatter", x="longitude", y="latitude", alpha=0.4, s=housing["population"]/100, label="population", figsize=(10,7), c="median_house_value", cmap=plt.get_cmap("jet"), colorbar=True, sharex=False) plt.legend() # ## Looking For Correlations #Looking For Correlations corr_matrix = housing.corr() corr_matrix["median_house_value"].sort_values(ascending=False) # We see that there is a strong positive correlation between between median_house_value and median_income. # + #Using Pandas scatter_matrix() To Check Correlation from pandas.plotting import scatter_matrix attributes = ["median_house_value", "median_income", "total_rooms", "housing_median_age"] scatter_matrix(housing[attributes], figsize=(12,8)) # - # We plot a scatter matrix of 4 attributes (median_house_value, median_income, total_rooms, and housing_median_age). # We also get a histogram of each attribute in the diagonal. #Have A Closer Look At Median Income housing.plot(kind="scatter", x="median_income", y="median_house_value", alpha=0.1) # ## Experimenting With Attribute Combinations housing["rooms_per_household"]=housing["total_rooms"]/housing["households"] housing["bedrooms_per_room"]=housing["total_bedrooms"]/housing["total_rooms"] housing["population_per_household"]=housing["population"]/housing["households"] corr_matrix = housing.corr() corr_matrix["median_house_value"].sort_values(ascending=False) # # Prepare The Data For Machine Learning Algorithms housing = strat_train_set.drop("median_house_value", axis=1) housing_labels = strat_train_set["median_house_value"].copy() # ## Data Cleaning #We Can Drop The Missing Values housing.dropna(subset=["total_bedrooms"]) # We Can Find The Median Value And Substitute It For The Missing Values median = housing["total_bedrooms"].median() housing["total_bedrooms"].fillna(median, inplace=True) housing from sklearn.impute import SimpleImputer imputer = SimpleImputer(strategy = "median") # We Have To Remove The Text Attribute, Since Median Calculates Numerical Attributes Only housing_num = housing.drop("ocean_proximity", axis = 1) imputer.fit(housing_num) imputer.statistics_ # Checking If The Values Are The Same When Calculated Manually housing_num.median().values # Transform The Training Set X = imputer.transform(housing_num) X # Since This Is In NumPy Array We Can Convert It Into pandas DataFrame housing_tr = pd.DataFrame(X, columns = housing_num.columns, index=housing_num.index) housing_tr # ## Handling Text And Categorical Attributes housing_cat = housing[["ocean_proximity"]] housing_cat.head(10) from sklearn.preprocessing import OrdinalEncoder ordinal_encoder = OrdinalEncoder() housing_cat_encoded = ordinal_encoder.fit_transform(housing_cat) housing_cat_encoded[:10] ordinal_encoder.categories_ from sklearn.preprocessing import OneHotEncoder cat_encoder = OneHotEncoder() housing_cat_1hot = cat_encoder.fit_transform(housing_cat) housing_cat_1hot housing_cat_1hot.toarray() # ## Custom Transformers # ### Creating A Custom Transformer To Add Extra Attributes # + from sklearn.base import BaseEstimator, TransformerMixin # column index rooms_ix, bedrooms_ix, population_ix, households_ix = 3, 4, 5, 6 class CombinedAttributesAdder(BaseEstimator, TransformerMixin): def __init__(self, add_bedrooms_per_room = True): # no *args or **kargs self.add_bedrooms_per_room = add_bedrooms_per_room def fit(self, X, y=None): return self # nothing else to do def transform(self, X): rooms_per_household = X[:, rooms_ix] / X[:, households_ix] population_per_household = X[:, population_ix] / X[:, households_ix] if self.add_bedrooms_per_room: bedrooms_per_room = X[:, bedrooms_ix] / X[:, rooms_ix] return np.c_[X, rooms_per_household, population_per_household, bedrooms_per_room] else: return np.c_[X, rooms_per_household, population_per_household] attr_adder = CombinedAttributesAdder(add_bedrooms_per_room=False) housing_extra_attribs = attr_adder.transform(housing.values) # - housing_extra_attribs = pd.DataFrame( housing_extra_attribs, columns=list(housing.columns)+["rooms_per_household", "population_per_household"], index=housing.index) housing_extra_attribs.head() # ## Transformation Pipelines # + from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler num_pipeline = Pipeline([ ('imputer', SimpleImputer(strategy='median')), ('attribs_adder', CombinedAttributesAdder()), ('std_scaler', StandardScaler()) ]) housing_num_tr = num_pipeline.fit_transform(housing_num) housing_num_tr # + from sklearn.compose import ColumnTransformer num_attribs = list(housing_num) cat_attribs = ["ocean_proximity"] full_pipeline = ColumnTransformer([ ('num', num_pipeline, num_attribs), ('cat', OneHotEncoder(), cat_attribs), ]) housing_prepared = full_pipeline.fit_transform(housing) housing_prepared # - print("Shape:", housing_prepared.shape) # # Selecting And Training A Model # ### Training And Evaluating On The Training Set # + # We Use A Linear Regression Model from sklearn.linear_model import LinearRegression lin_reg = LinearRegression() lin_reg.fit(housing_prepared, housing_labels) # - # Checking The Model On Some Training Instances some_data = housing.iloc[:5] some_labels = housing_labels.iloc[:5] some_data_prepared = full_pipeline.transform(some_data) print("Predictions:", lin_reg.predict(some_data_prepared)) print("Labels:", list(some_labels)) # + # Calculating The Root Mean Squared Error from sklearn.metrics import mean_squared_error housing_predictions = lin_reg.predict(housing_prepared) lin_mse = mean_squared_error(housing_labels, housing_predictions) lin_rmse = np.sqrt(lin_mse) lin_rmse # + # Using A Decision Tree Regression Model from sklearn.tree import DecisionTreeRegressor tree_reg = DecisionTreeRegressor() tree_reg.fit(housing_prepared, housing_labels) # - # Checking Our Model On Some Training Instances housing_predictions = tree_reg.predict(housing_prepared) tree_mse = mean_squared_error(housing_labels, housing_predictions) tree_rmse = np.sqrt(tree_mse) tree_rmse # ## Better Evaluation Using Cross-Validation # # + # Cross Validation On Decision Tree Regression from sklearn.model_selection import cross_val_score scores = cross_val_score(tree_reg, housing_prepared, housing_labels, scoring="neg_mean_squared_error", cv=10) tree_rmse_scores = np.sqrt(-scores) # - # Creating A Function To Print The scores, mean, standard deviation def display_scores(scores): print("Scores:", scores) print("Mean:", scores.mean()) print("Standard Deviation", scores.std()) display_scores(tree_rmse_scores) # Cross Validation On Linear Regression lin_scores = cross_val_score(lin_reg, housing_prepared, housing_labels, scoring="neg_mean_squared_error", cv=10) lin_rmse_scores = np.sqrt(-lin_scores) display_scores(lin_rmse_scores) # + # Using Random Forest Regression from sklearn.ensemble import RandomForestRegressor forest_reg = RandomForestRegressor(n_estimators=100, random_state=42) forest_reg.fit(housing_prepared, housing_labels) # - housing_predictions = forest_reg.predict(housing_prepared) forest_mse = mean_squared_error(housing_labels, housing_predictions) forest_rmse = np.sqrt(forest_mse) forest_rmse # + from sklearn.model_selection import cross_val_score forest_scores = cross_val_score(forest_reg, housing_prepared, housing_labels, scoring="neg_mean_squared_error", cv=10) forest_rmse_scores = np.sqrt(-forest_scores) display_scores(forest_rmse_scores) # - # ## Fine Tuning The Model # ### Grid Search # + from sklearn.model_selection import GridSearchCV param_grid = [ # try 12 (3×4) combinations of hyperparameters {'n_estimators': [3, 10, 30], 'max_features': [2, 4, 6, 8]}, # then try 6 (2×3) combinations with bootstrap set as False {'bootstrap': [False], 'n_estimators': [3, 10], 'max_features': [2, 3, 4]}, ] forest_reg = RandomForestRegressor(random_state=42) # train across 5 folds, that's a total of (12+6)*5=90 rounds of training grid_search = GridSearchCV(forest_reg, param_grid, cv=5, scoring='neg_mean_squared_error', return_train_score=True) grid_search.fit(housing_prepared, housing_labels) # - grid_search.best_params_ grid_search.best_estimator_ cvres = grid_search.cv_results_ for mean_score, params in zip(cvres["mean_test_score"], cvres["params"]): print(np.sqrt(-mean_score), params) pd.DataFrame(grid_search.cv_results_) # ## Ensemble Methods # ### Analyse The Best Models And Their Errors feature_importances = grid_search.best_estimator_.feature_importances_ feature_importances extra_attribs = ["rooms_per_hhold", "pop_per_hhold", "bedrooms_per_rooms"] cat_encoder = full_pipeline.named_transformers_["cat"] cat_one_hot_attribs = list(cat_encoder.categories_[0]) attributes = num_attribs + extra_attribs +cat_one_hot_attribs sorted(zip(feature_importances, attributes), reverse = True) # ### Evaluating The System On The Test Set # + final_model = grid_search.best_estimator_ X_test = strat_test_set.drop("median_house_value", axis = 1) y_test = strat_test_set["median_house_value"].copy() X_test_prepared = full_pipeline.transform(X_test) final_predictions = final_model.predict(X_test_prepared) final_mse = mean_squared_error(y_test, final_predictions) final_rmse = np.sqrt(final_mse) # - final_rmse from scipy import stats confidence = 0.95 squared_errors = (final_predictions - y_test)**2 np.sqrt(stats.t.interval(confidence, len(squared_errors) - 1, loc=squared_errors.mean(), scale=stats.sem(squared_errors)))
Housing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Prediction (out of sample) # %matplotlib inline # + import numpy as np import matplotlib.pyplot as plt import statsmodels.api as sm plt.rc("figure", figsize=(16, 8)) plt.rc("font", size=14) # - # ## Artificial data nsample = 50 sig = 0.25 x1 = np.linspace(0, 20, nsample) X = np.column_stack((x1, np.sin(x1), (x1 - 5) ** 2)) X = sm.add_constant(X) beta = [5.0, 0.5, 0.5, -0.02] y_true = np.dot(X, beta) y = y_true + sig * np.random.normal(size=nsample) # ## Estimation olsmod = sm.OLS(y, X) olsres = olsmod.fit() print(olsres.summary()) # ## In-sample prediction ypred = olsres.predict(X) print(ypred) # ## Create a new sample of explanatory variables Xnew, predict and plot x1n = np.linspace(20.5, 25, 10) Xnew = np.column_stack((x1n, np.sin(x1n), (x1n - 5) ** 2)) Xnew = sm.add_constant(Xnew) ynewpred = olsres.predict(Xnew) # predict out of sample print(ynewpred) # ## Plot comparison # + import matplotlib.pyplot as plt fig, ax = plt.subplots() ax.plot(x1, y, "o", label="Data") ax.plot(x1, y_true, "b-", label="True") ax.plot(np.hstack((x1, x1n)), np.hstack((ypred, ynewpred)), "r", label="OLS prediction") ax.legend(loc="best") # - # ## Predicting with Formulas # Using formulas can make both estimation and prediction a lot easier # + from statsmodels.formula.api import ols data = {"x1": x1, "y": y} res = ols("y ~ x1 + np.sin(x1) + I((x1-5)**2)", data=data).fit() # - # We use the `I` to indicate use of the Identity transform. Ie., we do not want any expansion magic from using `**2` res.params # Now we only have to pass the single variable and we get the transformed right-hand side variables automatically res.predict(exog=dict(x1=x1n))
examples/notebooks/predict.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import geemap Map = geemap.Map() Map countries_shp = '../data/countries.shp' countries = geemap.shp_to_ee(countries_shp) Map.addLayer(countries, {}, 'Countries') states_shp = '../data/us-states.shp' states = geemap.shp_to_ee(states_shp) Map.addLayer(states, {}, 'US States') cities_shp = '../data/us-cities.shp' cities = geemap.shp_to_ee(cities_shp) Map.addLayer(cities, {}, 'US Cities') geemap.ee_to_shp(countries, filename='../data/countries_new.shp') geemap.ee_export_vector(states, filename='../data/states.csv') geemap.ee_export_vector(states, filename='../data/states.json') geemap.ee_export_vector(states, filename='../data/states.kml') geemap.ee_export_vector(states, filename='../data/states.kmz')
examples/notebooks/shapefiles.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Test Windowed Positions in SEP # Both SExtractor and SEP windowed positions fail when sources have nearby neighbors. This notebook illustrates the problem and a solution that works for most sources unless they have a very nearby neighbor. # + from __future__ import division, print_function import os import numpy as np from astropy.io import fits from astropy.table import Table import astropy.wcs from astropy.nddata import extract_array import sep from astropyp.db_utils import index import astropyp % matplotlib inline import matplotlib import matplotlib.pyplot as plt # Astropy gives a lot of warnings that # are difficult to filter individually import warnings from astropy.utils.exceptions import AstropyWarning warnings.simplefilter('ignore', category=AstropyWarning) warnings.simplefilter('ignore', category=UserWarning) # 9x9 Gaussian Filter conv_filter = np.array([ [0.030531, 0.065238, 0.112208, 0.155356, 0.173152, 0.155356, 0.112208, 0.065238, 0.030531], [0.065238, 0.139399, 0.239763, 0.331961, 0.369987, 0.331961, 0.239763, 0.139399, 0.065238], [0.112208, 0.239763, 0.412386, 0.570963, 0.636368, 0.570963, 0.412386, 0.239763, 0.112208], [0.155356, 0.331961, 0.570963, 0.790520, 0.881075, 0.790520, 0.570963, 0.331961, 0.155356], [0.173152, 0.369987, 0.636368, 0.881075, 0.982004, 0.881075, 0.636368, 0.369987, 0.173152], [0.155356, 0.331961, 0.570963, 0.790520, 0.881075, 0.790520, 0.570963, 0.331961, 0.155356], [0.112208, 0.239763, 0.412386, 0.570963, 0.636368, 0.570963, 0.412386, 0.239763, 0.112208], [0.065238, 0.139399, 0.239763, 0.331961, 0.369987, 0.331961, 0.239763, 0.139399, 0.065238], [0.030531, 0.065238, 0.112208, 0.155356, 0.173152, 0.155356, 0.112208, 0.065238, 0.030531] ]) # Location of files basepath = '/media/data-beta/users/fmooleka/2016decam' temp_path = '/media/data-beta/users/fmooleka/temp' aper_radius=8 # Set filenames new_img_filename = os.path.join(temp_path, 'F100_i_7.fits') new_dqmask_filename = os.path.join(temp_path, 'F100_i_7.dqmask.fits') # - # #Run SEP to extract sources # + # Load the data from the file img_data = fits.getdata(new_img_filename, 1) dqmask_data = fits.getdata(new_dqmask_filename, 1) # Convert data array from Big Endian to Little Endian img_data = img_data.byteswap().newbyteorder() dqmask_data = dqmask_data.byteswap().newbyteorder() # Subtract the background bkg = sep.Background(img_data, mask=dqmask_data) bkg.subfrom(img_data) thresh = 1.5 * bkg.globalrms # SEP 'extract' detection parameters sep_params = { 'extract': { 'thresh': 50, 'conv': conv_filter, 'deblend_cont': 0.001, }, } # Extract sources using SEP objs = sep.extract(img_data, mask=dqmask_data, **sep_params['extract']) # Calculate the Kron Radius kronrad, krflag = sep.kron_radius(img_data, objs['x'], objs['y'], objs['a'], objs['b'], objs['theta'], aper_radius) # Calculate the equivalent of FLUX_AUTO flux, fluxerr, flag = sep.sum_ellipse(img_data, objs['x'], objs['y'], objs['a'], objs['b'], objs['theta'], 2.5*kronrad,subpix=1) r, flag = sep.flux_radius(img_data, objs['x'], objs['y'], aper_radius*objs['a'], 0.5, normflux=flux, subpix=5) # Convert the flux to a magnitude mag = -2.5*np.log10(flux) # Calculate the windowed positions sig = 2. / 2.35 * r xwin, ywin, flag = sep.winpos(img_data, objs['x'], objs['y'], sig) # Convert the observations into a table and add columns for the windowed positions objs = Table(objs) objs['xwin'] = xwin objs['ywin'] = ywin # - # #SEP Results # For this image there are only two sources with a winpos flag set, so the plots are nearly identical, but several positions are still off by tens of pixels # + # Remove sources flagged by SEP extract cuts = objs['flag']==0 # Change in an objects position dr = np.sqrt((objs['x']-objs['xwin'])**2+(objs['y']-objs['ywin'])**2) # Plot the change in all sources positions plt.plot(mag[cuts],dr[cuts], '.') plt.xlabel('Instrumental Magnitude (mag)') plt.ylabel('Change from X to XWIN (px)') plt.show() # Remove sources with bad windowed positions cuts = cuts & (flag==0) # Plot the change in sources with with good windowed positions plt.plot(mag[cuts],dr[cuts], '.') plt.title('Winpos with FLAGS_WIN==0') plt.xlabel('Instrumental Magnitude (mag)') plt.ylabel('Change from X to XWIN (px)') plt.show() # - # # Relationship between Kron Radius and Bad Windowed Positions # The top plot shows that there is a very small difference in Kron Radii in the magnitude range covered by the detector, with a handfull of sources that have particularly large Kron Radii. # # The middle plot shows that almost all sources with a change in position of more than 1 pixel are likely to have unsusually large Kron Radii. # # The bottom plots shows that with only two exceptions, all of the sources with a change in position of more than 1 pix are bad windowed positions, likely due to nearby objects that are much brighter that confuse the routine to calculate the Kron Radius. # + # Plot the different Kron radii as a function of magnitude plt.plot(mag[cuts], r[cuts], '.') plt.xlabel('Mag Auto') plt.ylabel('Kron radius') plt.show() # Plot the change in position as a function of Kron Radius plt.plot(r[cuts], dr[cuts], '.') # Plot the line for position change = 2 max_diff = 1.0 plt.plot([0,20],[max_diff,max_diff]) plt.plot(r[cuts&(dr>max_diff)], dr[cuts&(dr>max_diff)], '.') plt.xlabel('Kron Radius') plt.ylabel('Change in Position (px)') plt.show() bigdiff = (dr>max_diff)&cuts print('Total non-flagged objects with a change of more than {0} pix: {1}'.format( max_diff, np.sum(bigdiff))) # Plot patch centered on each source with a change in pixel position greater than max_diff for obj in objs[bigdiff]: img = extract_array(img_data,(51,51), (obj['y'],obj['x']), mode='trim') yrad,xrad = img.shape[0]/2.,img.shape[1]/2. xrad = 50-xrad yrad = 50-yrad plt.imshow(img,interpolation='none') plt.scatter(xrad,yrad, c='none', edgecolors='r', s=200) plt.scatter(xrad-(obj['x']-obj['xwin']),yrad-(obj['y']-obj['ywin']), c='none', edgecolors='r', s=200) plt.show() # - # #Fix bad windowed positions # Find a reasonable maximum Kron Radius and calculate the mean Kron Radius for all of the sources below the max value. Set all of the sources with a Kron Radius > max_radius to the mean value. # + max_radius = 2.2#np.mean(r) rcut = r>max_radius new_radius = np.mean(r[~rcut]) plt.plot(mag[cuts], r[cuts], '.') plt.plot(mag[cuts&rcut], r[cuts&rcut], 'r.') plt.xlabel('Mag Auto') plt.ylabel('Kron radius') plt.show() modified_r = r.copy() modified_r[rcut] = new_radius plt.plot(mag[cuts], modified_r[cuts], '.') plt.plot(mag[cuts&rcut], modified_r[cuts&rcut], 'r.') plt.xlabel('Mag Auto') plt.ylabel('Kron radius') plt.show() # - # #Recalculate windowed positions # Using the new Kron Radii, recalculate the windowed positions # Re-calculate the windowed positions sig = 2. / 2.35 * modified_r xwin, ywin, flag = sep.winpos(img_data, objs['x'], objs['y'], sig) # Convert the observations into a table and add columns for the windowed positions objs['xwin'] = xwin objs['ywin'] = ywin # #Results using the New Winpos # Using the new winpos there are significantly fewer sources with positions undergoing a significant change (ie. more than a pixel). # # Most of the sources that had bad positions previously have now been corrected. # + # Remove sources flagged by SEP extract cuts = objs['flag']==0 # Change in an objects position dr = np.sqrt((objs['x']-objs['xwin'])**2+(objs['y']-objs['ywin'])**2) # Remove sources with bad windowed positions cuts = cuts & (flag==0) # Plot the change in sources with with good windowed positions plt.plot(mag[cuts],dr[cuts], '.') plt.title('Winpos with FLAGS_WIN==0') plt.xlabel('Instrumental Magnitude (mag)') plt.ylabel('Change from X to XWIN (px)') plt.show() # Plot the sources that previously had bad windowed positions for obj in objs[bigdiff]: img = extract_array(img_data,(51,51), (obj['y'],obj['x']), mode='trim') yrad,xrad = img.shape[0]/2.,img.shape[1]/2. xrad = 50-xrad yrad = 50-yrad plt.imshow(img,interpolation='none') plt.scatter(xrad,yrad, c='none', edgecolors='r', s=200) plt.scatter(xrad-(obj['x']-obj['xwin']),yrad-(obj['y']-obj['ywin']), c='none', edgecolors='r', s=200) plt.show() # - # #Other bad windowed positions # Looking at the sources that still appear to have bad windowed positions we see that the majority of them have other sources nearby. This appears to be a problem with the SExtractor algorithm and indicates that one should be cautious about using windowed positions in crowded fields or for sources with nearby neighbors. # + bigdiff = (dr>max_diff)&cuts print('Total non-flagged objects with a change of more than {0} pix: {1}'.format( max_diff, np.sum(bigdiff))) # Plot patch centered on each source with a change in pixel position greater than max_diff for obj in objs[bigdiff]: img = extract_array(img_data,(51,51), (obj['y'],obj['x']), mode='trim') yrad,xrad = img.shape[0]/2.,img.shape[1]/2. xrad = 50-xrad yrad = 50-yrad plt.imshow(img,interpolation='none') plt.scatter(xrad,yrad, c='none', edgecolors='r', s=200) plt.scatter(xrad-(obj['x']-obj['xwin']),yrad-(obj['y']-obj['ywin']), c='none', edgecolors='r', s=200) plt.show() # -
examples/.ipynb_checkpoints/test_winpos-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/HisakaKoji/pytorch_advanced/blob/master/7_nlp_sentiment_transformer/7-2_torchtext.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="aqXrzGJZkUFF" colab_type="text" # # 7.2 torchtextでのDataset、DataLoaderの実装方法 # # - 本ファイルでは、torchtextを使用してDatasetおよびDataLoaderを実装する方法を解説します。 # # + [markdown] id="SAroFDqXkUFg" colab_type="text" # ※ 本章のファイルはすべてUbuntuでの動作を前提としています。Windowsなど文字コードが違う環境での動作にはご注意下さい。 # + [markdown] id="6IPVp0bIkUFu" colab_type="text" # # 7.2 学習目標 # # 1. torchtextを用いてDatasetおよびDataLoaderの実装ができる # + [markdown] id="woudDI-1kUF8" colab_type="text" # # 事前準備 # # - 書籍の指示に従い、本章で使用するデータを用意します # # - torchtextをインストールします # # - pip install torchtext # # # + id="4I7TDQ8ymAKe" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 139} outputId="af9636bc-e05b-496f-e83b-059cdfcee631" # !git clone https://github.com/HisakaKoji/pytorch_advanced.git # + id="ftlvhXakmDc_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="b62b03e7-531a-4d5c-883d-7c499b64a01c" # %cd /content/pytorch_advanced/7_nlp_sentiment_transformer # + id="QxQtx1Z-lWFB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 280} outputId="b85d01ab-a067-44d2-e6b8-ea8f4aab41ec" # !pip install janome torchtext # + [markdown] id="2P0ls6DckUGN" colab_type="text" # # 1 . 前処理と単語分割の関数を実装 # # + id="KHto4-FzkUGe" colab_type="code" colab={} # 単語分割にはJanomeを使用 from janome.tokenizer import Tokenizer j_t = Tokenizer() def tokenizer_janome(text): return [tok for tok in j_t.tokenize(text, wakati=True)] # + id="Ii9_eqHDkUHE" colab_type="code" colab={} # 前処理として正規化をする関数を定義 import re def preprocessing_text(text): # 半角・全角の統一 # 今回は無視 # 英語の小文字化 # 今回はここでは無視 # output = output.lower() # 改行、半角スペース、全角スペースを削除 text = re.sub('\r', '', text) text = re.sub('\n', '', text) text = re.sub(' ', '', text) text = re.sub(' ', '', text) # 数字文字の一律「0」化 text = re.sub(r'[0-9 0-9]', '0', text) # 数字 # 記号と数字の除去 # 今回は無視。半角記号,数字,英字 # 今回は無視。全角記号 # 特定文字を正規表現で置換する # 今回は無視 return text # + id="i6NZiuGUkUHn" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="e4f4c004-1657-4220-ed40-3d41e7602b44" # 前処理とJanomeの単語分割を合わせた関数を定義する def tokenizer_with_preprocessing(text): text = preprocessing_text(text) # 前処理の正規化 ret = tokenizer_janome(text) # Janomeの単語分割 return ret # 動作確認 text = "昨日は とても暑く、気温が36度もあった。" print(tokenizer_with_preprocessing(text)) # + [markdown] id="cmPe-MQ0kUIG" colab_type="text" # # 2. 文章データの読み込み # + id="dc5YgTmhkUIS" colab_type="code" colab={} import torchtext # tsvやcsvデータを読み込んだときに、読み込んだ内容に対して行う処理を定義します # 文章とラベルの両方に用意します max_length = 25 TEXT = torchtext.data.Field(sequential=True, tokenize=tokenizer_with_preprocessing, use_vocab=True, lower=True, include_lengths=True, batch_first=True, fix_length=max_length) LABEL = torchtext.data.Field(sequential=False, use_vocab=False) # 引数の意味は次の通り # sequential: データの長さが可変か?文章は長さがいろいろなのでTrue.ラベルはFalse # tokenize: 文章を読み込んだときに、前処理や単語分割をするための関数を定義 # use_vocab:単語をボキャブラリー(単語集:後で解説)に追加するかどうか # lower:アルファベットがあったときに小文字に変換するかどうか # include_length: 文章の単語数のデータを保持するか # batch_first:ミニバッチの次元を先頭に用意するかどうか # fix_length:全部の文章を指定した長さと同じになるように、paddingします # + id="VaNicdkbkUIz" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 89} outputId="46ef5708-3346-4554-ddbf-bebef1f7ace9" # data.TabularDataset 詳細 # https://torchtext.readthedocs.io/en/latest/examples.html?highlight=data.TabularDataset.splits # フォルダ「data」から各tsvファイルを読み込み、Datasetにします # 1行がTEXTとLABELで区切られていることをfieldsで指示します train_ds, val_ds, test_ds = torchtext.data.TabularDataset.splits( path='./data/', train='text_train.tsv', validation='text_val.tsv', test='text_test.tsv', format='tsv', fields=[('Text', TEXT), ('Label', LABEL)]) # 動作確認 print('訓練データの数', len(train_ds)) print('1つ目の訓練データ', vars(train_ds[0])) print('2つ目の訓練データ', vars(train_ds[1])) # + [markdown] id="WOlrcouVkUJV" colab_type="text" # # 単語の数値化 # + id="ryk9MRh7kUJf" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 885} outputId="275c509f-2928-4d52-abe9-8ed7d826d2a3" # ボキャブラリーを作成します # 訓練データtrainの単語からmin_freq以上の頻度の単語を使用してボキャブラリー(単語集)を構築 TEXT.build_vocab(train_ds, min_freq=1) # 訓練データ内の単語と頻度を出力(頻度min_freqより大きいものが出力されます) TEXT.vocab.freqs # 出力させる # + id="jALbPpv6kUKC" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 937} outputId="041a3b40-42ca-4b83-8d47-52bf6516097d" # ボキャブラリーの単語をidに変換した結果を出力。 # 頻度がmin_freqより小さい場合は未知語<unk>になる TEXT.vocab.stoi # 出力。string to identifiers 文字列をidへ # + [markdown] id="svcFrx57kUKg" colab_type="text" # # DataLoaderの作成 # + id="JkVeYGiGkUKq" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 104} outputId="03919329-2480-468f-cb40-b8e2f67f6d91" # DataLoaderを作成します(torchtextの文脈では単純にiteraterと呼ばれています) train_dl = torchtext.data.Iterator(train_ds, batch_size=2, train=True) val_dl = torchtext.data.Iterator( val_ds, batch_size=2, train=False, sort=False) test_dl = torchtext.data.Iterator( test_ds, batch_size=2, train=False, sort=False) # 動作確認 検証データのデータセットで確認 batch = next(iter(val_dl)) print(batch.Text) print(batch.Label) # + [markdown] id="UTA2_NjHkULN" colab_type="text" # 以上
7_nlp_sentiment_transformer/7-2_torchtext.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <p><font size="6"><b>03 - Pandas: Indexing and selecting data</b></font></p> # # # > *DS Data manipulation, analysis and visualisation in Python* # > *December, 2017* # # > *© 2016, <NAME> and <NAME> (<mailto:<EMAIL>>, <mailto:<EMAIL>>). Licensed under [CC BY 4.0 Creative Commons](http://creativecommons.org/licenses/by/4.0/)* # # --- # + run_control={"frozen": false, "read_only": false} import pandas as pd # + run_control={"frozen": false, "read_only": false} # redefining the example objects # series population = pd.Series({'Germany': 81.3, 'Belgium': 11.3, 'France': 64.3, 'United Kingdom': 64.9, 'Netherlands': 16.9}) # dataframe data = {'country': ['Belgium', 'France', 'Germany', 'Netherlands', 'United Kingdom'], 'population': [11.3, 64.3, 81.3, 16.9, 64.9], 'area': [30510, 671308, 357050, 41526, 244820], 'capital': ['Brussels', 'Paris', 'Berlin', 'Amsterdam', 'London']} countries = pd.DataFrame(data) countries # - # Setting the index to the country names: # + run_control={"frozen": false, "read_only": false} countries = countries.set_index('country') countries # - # # Selecting data # <div class="alert alert-warning" style="font-size:120%"> # <b>ATTENTION!</b>: <br><br> # # One of pandas' basic features is the labeling of rows and columns, but this makes indexing also a bit more complex compared to numpy. <br><br> We now have to distuinguish between: # # <ul> # <li>selection by **label** (using the row and column names)</li> # <li>selection by **position** (using integers)</li> # </ul> # </div> # ## `data[]` provides some convenience shortcuts # For a DataFrame, basic indexing selects the columns (cfr. the dictionaries of pure python) # # Selecting a **single column**: # + run_control={"frozen": false, "read_only": false} countries['area'] # single [] # - # or multiple **columns**: # + run_control={"frozen": false, "read_only": false} countries[['area', 'population']] # double [[]] # - # But, slicing or boolean indexing accesses the **rows**: # + run_control={"frozen": false, "read_only": false} countries['France':'Netherlands'] # - countries[countries['population'] > 50] # <div class="alert alert-danger"> # <b>NOTE</b>: # # <ul> # <li>Unlike slicing in numpy, the end label is **included**!</li> # </ul> # </div> # <div class="alert alert-info" style="font-size:120%"> # <b>REMEMBER</b>: <br><br> # # So as a summary, `[]` provides the following convenience shortcuts: # # <ul> # <li>**Series**: selecting a **label**: `s[label]`</li> # <li>**DataFrame**: selecting a single or multiple **columns**: `df['col']` or `df[['col1', 'col2']]`</li> # <li>**DataFrame**: slicing or filtering the **rows**: `df['row_label1':'row_label2']` or `df[mask]`</li> # </ul> # </div> # ## Systematic indexing with `loc` and `iloc` # When using `[]` like above, you can only select from one axis at once (rows or columns, not both). For more advanced indexing, you have some extra attributes: # # * `loc`: selection by label # * `iloc`: selection by position # # Both `loc` and `iloc` use the following pattern: `df.loc[ <selection of the rows> , <selection of the columns> ]`. # # This 'selection of the rows / columns' can be: a single label, a list of labels, a slice or a boolean mask. # Selecting a single element: # + run_control={"frozen": false, "read_only": false} countries.loc['Germany', 'area'] # - # But the row or column indexer can also be a list, slice, boolean array (see next section), .. # + run_control={"frozen": false, "read_only": false} countries.loc['France':'Germany', ['area', 'population']] # - # --- # Selecting by position with `iloc` works similar as **indexing numpy arrays**: # + run_control={"frozen": false, "read_only": false} countries.iloc[0:2,1:3] # - # The different indexing methods can also be used to **assign data**: # + run_control={"frozen": false, "read_only": false} countries2 = countries.copy() countries2.loc['Belgium':'Germany', 'population'] = 10 # + run_control={"frozen": false, "read_only": false} countries2 # - # <div class="alert alert-info" style="font-size:120%"> # <b>REMEMBER</b>: <br><br> # # Advanced indexing with **loc** and **iloc** # # <ul> # <li>**loc**: select by label: `df.loc[row_indexer, column_indexer]`</li> # <li>**iloc**: select by position: `df.iloc[row_indexer, column_indexer]`</li> # </ul> # </div> # ## Boolean indexing (filtering) # Often, you want to select rows based on a certain condition. This can be done with 'boolean indexing' (like a where clause in SQL) and comparable to numpy. # # The indexer (or boolean mask) should be 1-dimensional and the same length as the thing being indexed. # + run_control={"frozen": false, "read_only": false} countries['area'] > 100000 # + run_control={"frozen": false, "read_only": false} countries[countries['area'] > 100000] # - # <div class="alert alert-success"> # <b>EXERCISE</b>: # # <p> # <ul> # <li>Add the population density as column to the DataFrame.</li> # </ul> # </p> # Note: the population column is expressed in millions. # </div> # + clear_cell=true run_control={"frozen": false, "read_only": false} # # %load _solutions/pandas_03_selecting_data15.py # - # <div class="alert alert-success"> # <b>EXERCISE</b>: # # <ul> # <li>Select the capital and the population column of those countries where the density is larger than 300</li> # </ul> # </div> # + clear_cell=true run_control={"frozen": false, "read_only": false} # # %load _solutions/pandas_03_selecting_data16.py # - # <div class="alert alert-success"> # # <b>EXERCISE</b>: # # <ul> # <li>Add a column 'density_ratio' with the ratio of the population density to the average population density for all countries.</li> # </ul> # </div> # + clear_cell=true run_control={"frozen": false, "read_only": false} # # %load _solutions/pandas_03_selecting_data17.py # - # <div class="alert alert-success"> # # <b>EXERCISE</b>: # # <ul> # <li>Change the capital of the UK to Cambridge</li> # </ul> # </div> # + clear_cell=true run_control={"frozen": false, "read_only": false} # # %load _solutions/pandas_03_selecting_data18.py # - # <div class="alert alert-success"> # <b>EXERCISE</b>: # # <ul> # <li>Select all countries whose population density is between 100 and 300 people/km²</li> # </ul> # </div> # + clear_cell=true run_control={"frozen": false, "read_only": false} # # %load _solutions/pandas_03_selecting_data19.py # - # # Some other essential methods: `isin` and `string` methods # The `isin` method of Series is very useful to select rows that may contain certain values: # + run_control={"frozen": false, "read_only": false} s = countries['capital'] # + run_control={"frozen": false, "read_only": false} # s.isin? # + run_control={"frozen": false, "read_only": false} s.isin(['Berlin', 'London']) # - # This can then be used to filter the dataframe with boolean indexing: # + run_control={"frozen": false, "read_only": false} countries[countries['capital'].isin(['Berlin', 'London'])] # - # Let's say we want to select all data for which the capital starts with a 'B'. In Python, when having a string, we could use the `startswith` method: string = 'Berlin' # + run_control={"frozen": false, "read_only": false} string.startswith('B') # - # In pandas, these are available on a Series through the `str` namespace: # + run_control={"frozen": false, "read_only": false} countries['capital'].str.startswith('B') # - # For an overview of all string methods, see: http://pandas.pydata.org/pandas-docs/stable/api.html#string-handling # <div class="alert alert-success"> # # <b>EXERCISE</b>: # # <ul> # <li>Select all countries that have capital names with more than 7 characters</li> # </ul> # # </div> # + clear_cell=true run_control={"frozen": false, "read_only": false} # # %load _solutions/pandas_03_selecting_data27.py # - # <div class="alert alert-success"> # # <b>EXERCISE</b>: # # <ul> # <li>Select all countries that have capital names that contain the character sequence 'am'</li> # </ul> # </div> # + clear_cell=true run_control={"frozen": false, "read_only": false} # # %load _solutions/pandas_03_selecting_data28.py # - # # Pitfall: chained indexing (and the 'SettingWithCopyWarning') # + run_control={"frozen": false, "read_only": false} countries.loc['Belgium', 'capital'] = 'Ghent' # + run_control={"frozen": false, "read_only": false} countries # + run_control={"frozen": false, "read_only": false} countries['capital']['Belgium'] = 'Antwerp' # + run_control={"frozen": false, "read_only": false} countries # + run_control={"frozen": false, "read_only": false} countries[countries['capital'] == 'Antwerp']['capital'] = 'Brussels' # + run_control={"frozen": false, "read_only": false} countries # - countries.loc[countries['capital'] == 'Antwerp', 'capital'] = 'Brussels' countries # <div class="alert alert-info" style="font-size:120%"> # # <b>REMEMBER!</b><br><br> # # What to do when encountering the *value is trying to be set on a copy of a slice from a DataFrame* error? # # <ul> # <li>Use `loc` instead of chained indexing **if possible**!</li> # <li>Or `copy` explicitly if you don't want to change the original data.</li> # </ul> # # </div> # # Exercises using the Titanic dataset df = pd.read_csv("../data/titanic.csv") df.head() # <div class="alert alert-success"> # # <b>EXERCISE</b>: # # <ul> # <li>Select all rows for male passengers and calculate the mean age of those passengers. Do the same for the female passengers.</li> # </ul> # </div> # + clear_cell=true # # %load _solutions/pandas_03_selecting_data38.py # + clear_cell=true # # %load _solutions/pandas_03_selecting_data39.py # + clear_cell=true # # %load _solutions/pandas_03_selecting_data40.py # - # We will later see an easier way to calculate both averages at the same time with groupby. # <div class="alert alert-success"> # # <b>EXERCISE</b>: # # <ul> # <li>How many passengers older than 70 were on the Titanic?</li> # </ul> # </div> # + clear_cell=true # # %load _solutions/pandas_03_selecting_data41.py # + clear_cell=true # # %load _solutions/pandas_03_selecting_data42.py # - # # [OPTIONAL] more exercises # For the quick ones among you, here are some more exercises with some larger dataframe with film data. These exercises are based on the [PyCon tutorial of <NAME>](https://github.com/brandon-rhodes/pycon-pandas-tutorial/) (so all credit to him!) and the datasets he prepared for that. You can download these data from here: [`titles.csv`](https://drive.google.com/open?id=0B3G70MlBnCgKajNMa1pfSzN6Q3M) and [`cast.csv`](https://drive.google.com/open?id=0B3G70MlBnCgKal9UYTJSR2ZhSW8) and put them in the `/data` folder. # + run_control={"frozen": false, "read_only": false} cast = pd.read_csv('../data/cast.csv') cast.head() # + run_control={"frozen": false, "read_only": false} titles = pd.read_csv('../data/titles.csv') titles.head() # - # <div class="alert alert-success"> # # <b>EXERCISE</b>: # # <ul> # <li>How many movies are listed in the titles dataframe?</li> # </ul> # # </div> # + clear_cell=true run_control={"frozen": false, "read_only": false} # # %load _solutions/pandas_03_selecting_data45.py # - # <div class="alert alert-success"> # # <b>EXERCISE</b>: # # <ul> # <li>What are the earliest two films listed in the titles dataframe?</li> # </ul> # </div> # + clear_cell=true run_control={"frozen": false, "read_only": false} # # %load _solutions/pandas_03_selecting_data46.py # - # <div class="alert alert-success"> # # <b>EXERCISE</b>: # # <ul> # <li>How many movies have the title "Hamlet"?</li> # </ul> # </div> # + clear_cell=true run_control={"frozen": false, "read_only": false} # # %load _solutions/pandas_03_selecting_data47.py # - # <div class="alert alert-success"> # # <b>EXERCISE</b>: # # <ul> # <li>List all of the "Treasure Island" movies from earliest to most recent.</li> # </ul> # </div> # + clear_cell=true run_control={"frozen": false, "read_only": false} # # %load _solutions/pandas_03_selecting_data48.py # - # <div class="alert alert-success"> # # <b>EXERCISE</b>: # # <ul> # <li>How many movies were made from 1950 through 1959?</li> # </ul> # </div> # + clear_cell=true run_control={"frozen": false, "read_only": false} # # %load _solutions/pandas_03_selecting_data49.py # + clear_cell=true run_control={"frozen": false, "read_only": false} # # %load _solutions/pandas_03_selecting_data50.py # - # <div class="alert alert-success"> # # <b>EXERCISE</b>: # # <ul> # <li>How many roles in the movie "Inception" are NOT ranked by an "n" value?</li> # </ul> # </div> # + clear_cell=true run_control={"frozen": false, "read_only": false} # # %load _solutions/pandas_03_selecting_data51.py # + clear_cell=true run_control={"frozen": false, "read_only": false} # # %load _solutions/pandas_03_selecting_data52.py # + clear_cell=true run_control={"frozen": false, "read_only": false} # # %load _solutions/pandas_03_selecting_data53.py # - # <div class="alert alert-success"> # # <b>EXERCISE</b>: # # <ul> # <li>But how many roles in the movie "Inception" did receive an "n" value?</li> # </ul> # </div> # + clear_cell=true run_control={"frozen": false, "read_only": false} # # %load _solutions/pandas_03_selecting_data54.py # - # <div class="alert alert-success"> # # <b>EXERCISE</b>: # # <ul> # <li>Display the cast of the "Titanic" (the most famous 1997 one) in their correct "n"-value order, ignoring roles that did not earn a numeric "n" value.</li> # </ul> # </div> # + clear_cell=true run_control={"frozen": false, "read_only": false} # # %load _solutions/pandas_03_selecting_data55.py # - # <div class="alert alert-success"> # # <b>EXERCISE</b>: # # <ul> # <li>List the supporting roles (having n=2) played by <NAME> in the 1990s, in order by year.</li> # </ul> # </div> # + clear_cell=true run_control={"frozen": false, "read_only": false} # # %load _solutions/pandas_03_selecting_data56.py # - # # Acknowledgement # # # > The optional exercises are based on the [PyCon tutorial of <NAME>](https://github.com/brandon-rhodes/pycon-pandas-tutorial/) (so all credit to him!) and the datasets he prepared for that. # # ---
_solved/notebooks/pandas_03_selecting_data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/alxiom/Basic-NLP/blob/main/NLP_03_Attention.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + colab={"base_uri": "https://localhost:8080/"} id="NbCX7udCJi-8" outputId="427260c7-5111-4d76-9cc0-c13efe6f16f7" # !pip install tokenizers # !git clone https://github.com/alxiom/Basic-NLP.git # + id="CLYsE8TdJwSN" import math import random import numpy as np import pandas as pd import torch from torch import nn from torch.nn import functional as ftn from torch.utils.data import Dataset, DataLoader from tokenizers import CharBPETokenizer from bokeh.layouts import column from bokeh.plotting import figure, show from bokeh.io import output_notebook output_notebook() # + id="wa6cnjhjJyh6" random.seed(42) np.random.seed(42) torch.manual_seed(42) special = ["<pad>", "<unk>", "<bos>", "<eos>", "<sep>", "<cls>", "<mask>"] device = "cpu" train_seq2seq_attention = True # + id="4Jhk01MPKBD6" tokenizer = CharBPETokenizer(vocab="Basic-NLP/data/vocab.json", merges="Basic-NLP/data/merges.txt") # + colab={"base_uri": "https://localhost:8080/"} id="pjfXH-FtKG10" outputId="a28e86f2-0fe9-46e4-ef37-b608d67c3ca6" train_data = pd.read_csv("Basic-NLP/data/chat_sample.csv", header=0) print(train_data.head(5)) print(len(train_data)) print("--") # + id="WJVhwkl6KMn7" query_tokens = [] answer_tokens = [] for i in range(len(train_data)): row = train_data.loc[i] query = row["Q"] answer = row["A"] tokenize_query = tokenizer.encode(query) tokenize_answer = tokenizer.encode(answer) query_tokens.append(tokenize_query.ids) answer_tokens.append(tokenize_answer.ids) # + id="ydP7zRCfKgXt" class LoadDataset(Dataset): def __init__(self, x_data, y_data): super(LoadDataset, self).__init__() self.x_data = x_data self.y_data = y_data def __getitem__(self, item): return self.x_data[item], self.y_data[item] def __len__(self): return len(self.y_data) class MaxPadBatch: def __init__(self, max_len=24): super(MaxPadBatch, self).__init__() self.max_len = max_len def __call__(self, batch): batch_x = [] batch_y = [] for x, y in batch: batch_x.append(torch.tensor(x).long()) batch_y.append(torch.tensor([special.index("<bos>")] + y + [special.index("<eos>")]).long()) pad_index = special.index("<pad>") pad_x = [ftn.pad(item, [0, self.max_len - item.shape[0]], value=pad_index).detach() for item in batch_x] pad_y = [ftn.pad(item, [0, self.max_len - item.shape[0]], value=pad_index).detach() for item in batch_y] return torch.stack(pad_x), torch.stack(pad_y), len(batch) max_seq_length = 20 chat_dataset = LoadDataset(query_tokens, answer_tokens) chat_data_loader = DataLoader(chat_dataset, batch_size=32, collate_fn=MaxPadBatch(max_seq_length)) # + id="CQ9sb13pKilQ" class Encoder(nn.Module): def __init__(self, input_size, embedding_size, hidden_size): super(Encoder, self).__init__() self.input_size = input_size self.embedding_size = embedding_size self.hidden_size = hidden_size self.rnn = nn.GRU(self.embedding_size, self.hidden_size, batch_first=True) def forward(self, x, embedding): # x: [batch, seq_length] x = embedding(x) x, hidden = self.rnn(x) return x, hidden # + id="i-AUVqi_KlDG" class Decoder(nn.Module): def __init__(self, output_size, embedding_size, hidden_size): super(Decoder, self).__init__() self.output_size = output_size self.embedding_size = embedding_size self.hidden_size = hidden_size self.rnn = nn.GRU(self.embedding_size, self.hidden_size, batch_first=True) def forward(self, x, hidden, embedding): # x: [batch] --> need second dimension as 1 # hidden: [encoder_layers = 1, batch, hidden_dim] x = x.unsqueeze(1) x = embedding(x) x, hidden = self.rnn(x, hidden) return x, hidden # + id="KPTNgbvxKmCn" class Attention(nn.Module): def __init__(self): super(Attention, self).__init__() self.softmax = nn.Softmax(dim=-1) def forward(self, encoder_output, decoder_output): # 이번 decoder 출력이 encoder 모든 출력들과 얼마나 강한 관계가 있는지 측정 # 이번 decoder 출력과 encoder 모든 출력과 dot product 실행 --> sequence of scala (=attention score) # attention score --> softmax --> attention weight # 위에서 구한 강도에 따라서 encoder 모든 출력을 weight sum --> context_vector attention_score = torch.bmm(decoder_output, encoder_output.transpose(1, 2)) attention_weight = self.softmax(attention_score) context_vector = torch.bmm(attention_weight, encoder_output) return context_vector # + id="Qk783WUYKol3" class Seq2SeqAttention(nn.Module): def __init__(self, encoder, decoder, attention): super(Seq2SeqAttention, self).__init__() self.encoder = encoder self.decoder = decoder self.attention = attention self.embedding = nn.Embedding(self.encoder.input_size, self.encoder.embedding_size) self.target_vocab_size = self.decoder.output_size self.linear = nn.Linear(self.encoder.hidden_size + self.decoder.hidden_size, self.target_vocab_size) def forward(self, source, target, teacher_forcing=0.5): # source: [batch, seq_length] # target: [batch, seq_length] batch_size = target.shape[0] target_seq_length = target.shape[1] encoder_output, hidden = self.encoder(source, self.embedding) decoder_input = torch.tensor([special.index("<bos>")] * batch_size).long() attention_outputs = torch.zeros(batch_size, target_seq_length, self.target_vocab_size) for t in range(1, target_seq_length): decoder_output, hidden = self.decoder(decoder_input, hidden, self.embedding) # encoder output, decoder output 두 값을 이용하여 지금 decoding 할 context 생성 # decoder output, context 이용하여 attention 적용된 output 도출 # attention output 사용하여 greedy decoding context = self.attention(encoder_output, decoder_output) attention_output = self.linear(torch.cat([decoder_output, context], dim=2).squeeze(1)) attention_outputs[:, t, :] = attention_output teacher = target[:, t] top1 = attention_output.argmax(1) decoder_input = teacher if random.random() < teacher_forcing else top1 return attention_outputs # + colab={"base_uri": "https://localhost:8080/"} id="v4srYREhKrFw" outputId="fdafbefe-bb6b-444a-fc4d-0d1e29a8dc5d" embedding_dim = 32 hidden_dim = 32 enc = Encoder(tokenizer.get_vocab_size(), embedding_dim, hidden_dim) dec = Decoder(tokenizer.get_vocab_size(), embedding_dim, hidden_dim) att = Attention() seq2seq_att = Seq2SeqAttention(enc, dec, att) decode_test = torch.tensor([[special.index("<bos>")] + [special.index("<pad>")] * (max_seq_length - 1)]).long() if train_seq2seq_attention: learning_rate = 2e-3 optimizer = torch.optim.Adam(seq2seq_att.parameters(), lr=learning_rate) criterion = nn.CrossEntropyLoss(ignore_index=special.index("<pad>")) for epoch in range(300): seq2seq_att.train() epoch_loss = 0.0 for batch_source, batch_target, batch_length in chat_data_loader: optimizer.zero_grad() seq2seq_attention_output = seq2seq_att(batch_source, batch_target) seq2seq_attention_output_dim = seq2seq_attention_output.shape[-1] seq2seq_attention_output_drop = seq2seq_attention_output[:, 1:, :].reshape(-1, seq2seq_attention_output_dim) batch_target_drop = batch_target[:, 1:].reshape(-1) loss = criterion(seq2seq_attention_output_drop, batch_target_drop) loss.backward() optimizer.step() epoch_loss += loss.item() / batch_length if epoch % 10 == 0: print(f"{epoch} epoch loss: {epoch_loss:.4f} / ppl: {math.exp(epoch_loss):.4f}") seq2seq_att.eval() test = "썸 타는 것도 귀찮아." test_token = tokenizer.encode(test) test_tensor = torch.tensor(test_token.ids).long().unsqueeze(0) test_output = seq2seq_att(test_tensor, decode_test, 0.0)[:, 1:, :].squeeze(0).argmax(1).detach().tolist() recover_test_output = tokenizer.decode(test_output) print(recover_test_output.split("<eos>")[0]) test = "죽을거 같네" test_token = tokenizer.encode(test) test_tensor = torch.tensor(test_token.ids).long().unsqueeze(0) test_output = seq2seq_att(test_tensor, decode_test, 0.0)[:, 1:, :].squeeze(0).argmax(1).detach().tolist() recover_test_output = tokenizer.decode(test_output) print(recover_test_output.split("<eos>")[0]) test = "한심해서 죽고싶다" test_token = tokenizer.encode(test) test_tensor = torch.tensor(test_token.ids).long().unsqueeze(0) test_output = seq2seq_att(test_tensor, decode_test, 0.0)[:, 1:, :].squeeze(0).argmax(1).detach().tolist() recover_test_output = tokenizer.decode(test_output) print(recover_test_output.split("<eos>")[0]) torch.save(seq2seq_att.state_dict(), "Basic-NLP/checkpoint/seq2seq_attention.pt")
colab/NLP_03_Attention.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Backtester Examples # ## Data files # + import os import sys BACKTESTER_DIR = os.path.realpath(os.path.join(os.getcwd(), '..', '..')) TEST_DATA_DIR = os.path.join(BACKTESTER_DIR, 'backtester', 'test', 'test_data') SAMPLE_STOCK_DATA = os.path.join(TEST_DATA_DIR, 'test_data_stocks.csv') SAMPLE_OPTIONS_DATA = os.path.join(TEST_DATA_DIR, 'test_data_options.csv') sys.path.append(BACKTESTER_DIR) # Add backtester base dir to $PYTHONPATH # - # ## Sample backtest from backtester import Backtest, Stock, Type, Direction from backtester.datahandler import HistoricalOptionsData, TiingoData from backtester.strategy import Strategy, StrategyLeg # First we construct an options datahandler. options_data = HistoricalOptionsData(SAMPLE_OPTIONS_DATA) options_schema = options_data.schema # Next, we'll create a toy options strategy. # + sample_strategy = Strategy(options_schema) leg1 = StrategyLeg('leg_1', options_schema, option_type=Type.CALL, direction=Direction.BUY) leg1.entry_filter = ((options_schema.contract == 'SPX170317C00300000') & (options_schema.dte == 73)) | ((options_schema.contract == 'SPX170421C00500000') & (options_schema.dte == 51)) leg1.exit_filter = (options_schema.dte == 44) | (options_schema.dte == 18) leg2 = StrategyLeg('leg_2', options_schema, option_type=Type.PUT, direction=Direction.BUY) leg2.entry_filter = ((options_schema.contract == 'SPX170317P00300000') & (options_schema.dte == 73)) | ((options_schema.contract == 'SPX170421P01375000') & (options_schema.dte == 51)) leg2.exit_filter = (options_schema.dte == 44) | (options_schema.dte == 18) sample_strategy.add_legs([leg1, leg2]) sample_strategy.add_exit_thresholds(profit_pct=0.2, loss_pct=0.2) # - # We do the same for stocks: create a datahandler together with a list of the stocks we want in our inventory and their corresponding weights. stocks_data = TiingoData(SAMPLE_STOCK_DATA) stocks = [Stock('VOO', 0.4), Stock('TUR', 0.1), Stock('RSX', 0.5)] # We set our portfolio allocation, i.e. how much of our capital will be invested in stocks, options and cash. allocation = {'stocks': 0.5, 'options': 0.5, 'cash': 0.0} # Finally, we create the `Backtest` object. # + bt = Backtest(allocation, initial_capital=1_000_000) bt.stocks = stocks bt.stocks_data = stocks_data bt.options_strategy = sample_strategy bt.options_data = options_data # - # And run the backtest with a rebalancing period of one month. bt.run(rebalance_freq=1) # The trade log (`bt.trade_log`) shows we executed 4 trades: we bought 2 calls and 2 puts on _2017-01-03_ and _2017-03-01_, and exited those positions on _2017-02-01_ and _2017-04-03_ respectively. # The balance data structure shows how our positions evolved in time: # - We started with $1000000 on _2017-01-02_ # - `total capital` is the sum of `cash`, `stocks capital` and `options capital` # - `% change` shows the inter day change in `total capital` # - `accumulated return` gives the compounded return in `total capital` since the start of the backtest bt.balance # ## Statistics and Plots from backtester.statistics import * # Summary table of our options strategy. summary(bt.trade_log, bt.balance) # Plot of the accumulated returns over time. returns_chart(bt.balance) # Daily returns histogram. returns_histogram(bt.balance) monthly_returns_heatmap(bt.balance)
backtester/examples/backtester_example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Introduction # # This notebook illustrates how to extra Open States Bills meta data, and scrape content for one of the states using the [GraphQL](https://docs.openstates.org/en/latest/api/v2/index.html) API v2 interface. # # It does the following: # # - Extracts durisdictions and sessions, so you know their names # - For one of these (Rhode island), it extract all bills meta data for a session # - Using the link for the bill in this data, it downloads the PDF and extracts text # - Saves all data to a Pandas dataframe for analysis # # Note: In this notebook, we developed a simple scraper for getting Bill text for simplicity, but Open states [provide scrapers also](https://docs.openstates.org/en/latest/contributing/getting-started.html?highlight=scraping#running-our-first-scraper) # # ## Before you start # # You will need to be provided an API key, and use it to populate the 'headers' variable below. To get this key, [register](https://openstates.org/api/register/). # # # Setup # + import pandas as pd import os import sys import json from newspaper import fulltext from bs4 import BeautifulSoup import re from newspaper import Article import PyPDF2 import wget import requests import pickle import matplotlib.pyplot as plt # %matplotlib inline import seaborn as sns import traceback import subprocess import logging # Openstates API key (they sent me this on request) headers = {"PUT YOUR OPENSTATE API KEY HERE"} # - # # Analysys # ## Finding jurisdictions and sessions # # We are after bills which have been labelled with [Subjects](https://docs.openstates.org/en/latest/policies/categorization.html?highlight=categories#subjects) such as 'Agriculture and Food'. In the API I couldn't find any bills with this set, in either V1 or V2 API. So we'll scan a sample across all states, sessions to see if it's set anywhere # + # Get durisdictions and sessions params = {'query': ''' { jurisdictions { edges { node { id name legislativeSessions { edges { node { identifier name classification startDate endDate } } } } } } } '''} response = requests.post("https://openstates.org/graphql", params=params, headers=headers) data = json.loads(response.content) data = data["data"]["jurisdictions"]["edges"] #display(data) sessions = [] for d in data: d = d["node"] for s in d["legislativeSessions"]["edges"]: s = s["node"] s["legislature"] = d["name"] sessions.append(s) sessions = pd.DataFrame(sessions) # Filter for 2017-2018 (Note, for some states where startDate isn't populated, this excludes 2017 data) sessions = sessions[sessions['startDate'].str.match('2017')] sessions = sessions.sort_values(by='legislature') display(sessions) # - # ## Finding sessions which have the 'subject' field # # Bills can have a 'Subject' set, which includes a list of tags such as 'Taxation' (see description in [API v1 documentation](https://docs.openstates.org/en/latest/policies/categorization.html?highlight=subject#subjects)). However, not all states have this set. The following code is a simple example of looping through jurisdictions and sessions and looking at a few bills in each to see if the subject field is set. # + params_template = {'query': ''' { search_1: bills(first: 4, session: "SESSION_NAME", jurisdiction: "JURISDICTION_NAME",classification:"bill") { edges { node { id identifier subject title } } pageInfo { hasNextPage hasPreviousPage endCursor startCursor } totalCount } } '''} counts = [] has_subjects = [] for index, row in sessions.iterrows(): params = params_template.copy() state = row["legislature"] session = row["identifier"] print("\n\nState: " + state + "; Session identifier: " + session) params["query"] = params["query"].replace("SESSION_NAME",session).replace("JURISDICTION_NAME", state).replace("\n","") response = requests.post("https://openstates.org/graphql", params=params, headers=headers) bills = json.loads(response.content) # Stash page Info, for pagination count = bills["data"]["search_1"]["totalCount"] counts.append(count) if count > 0: print(str(count) + " bills found") bills = bills["data"]["search_1"]["edges"] if len(bills[0]["node"]["subject"]) > 0: has_subjects.append("Yes") display(bills[0]["node"]["subject"]) else: has_subjects.append("No") print("No subjects found") else: print("No bills found") has_subjects.append("No") sessions["billsCount"] = counts sessions["has_subjects"] = has_subjects display(sessions) print("Done") # - # ## Extract Bills meta data and scrape text for Rhode Island # # The following section uses the GraphQL API to extract all bills for a specific session for Rhode Island. It also scrapes bill text, but as mentioned above Open states [provide scrapers also](https://docs.openstates.org/en/latest/contributing/getting-started.html?highlight=scraping#running-our-first-scraper). # + # # A little parser of PDFs, tuned using Rhode Island bills, not tested on other states # def extractPDF(url): filename = wget.download(url) pdfReader = PyPDF2.PdfFileReader(filename) count = pdfReader.numPages text = "" for i in range(count): page = pdfReader.getPage(i) text = text + page.extractText() # Remove lines which just have a number on them text = re.sub(r'^[0-9]*\n','', text, flags=re.M) text = re.sub(r'^\n','', text, flags=re.M) # Hyphenated words text = re.sub(r'\n^-','-', text, flags=re.M) text = re.sub(r'-( |)\n','-', text, flags=re.M) text = re.sub(r'\n','DOUBLE', text, flags=re.M) # Line breaks introduced by PDF parser text = re.sub(r'DOUBLE DOUBLE DOUBLE DOUBLE','\n\n', text, flags=re.M) text = re.sub(r'DOUBLE DOUBLE DOUBLE','\n\n', text, flags=re.M) text = re.sub(r'DOUBLE','', text, flags=re.M) os.remove(filename) return text jurisdiction = "Rhode Island" session = "2017" params_template = {'query': ''' { search_1: bills(first: 4, session: "SESSION", jurisdiction: "JURISDICTION", classification:"bill" AFTER_CLAUSE) { edges { node { id identifier subject title abstracts { abstract note date } openstatesUrl sponsorships { name entityType primary classification } classification subject otherIdentifiers { identifier scheme note } updatedAt createdAt legislativeSession { identifier jurisdiction { name } } actions { date description classification } documents { date note links { url } } versions { date note links { url } } sources { url note } } } pageInfo { hasNextPage hasPreviousPage endCursor startCursor } totalCount } } '''} params = params_template.copy() params["query"] = params["query"].replace("JURISDICTION",jurisdiction) params["query"] = params["query"].replace("SESSION",session) params_template = params.copy() # Paginate API calls hasNextPage = True endCursor = "" bill_array = [] count = 0 print("Starting API calls and PDF scraping for " + jurisdiction + " " + session + " ...") while hasNextPage == True: params = params_template.copy() if endCursor != "": params["query"] = params["query"].replace("AFTER_CLAUSE",',after:"'+ endCursor + '"') else: params["query"] = params["query"].replace("AFTER_CLAUSE",'') response = requests.post("https://openstates.org/graphql", params=params, headers=headers) bills = json.loads(response.content) bills = bills["data"]["search_1"] # Stash page Info, for pagination pageInfo = bills["pageInfo"] hasNextPage = pageInfo["hasNextPage"] if hasNextPage == True: endCursor = pageInfo["endCursor"] # Loop through results for r in bills["edges"]: b = r["node"] url = b["versions"][0]["links"][0]["url"] #display(url) b["billText"] = extractPDF(url) #print(b["billText"]) bill_array.append(b) count = count + 1 if count % 100 == 0: print("Processed " + str(count) + " bills") df = pd.DataFrame(bill_array) display(df) # - # Save our data ... pickle.dump(df, open( "rhode_island.pkl", "wb" )) # . #
notebooks/scrapers/Openstates_GraphQL_API_extract.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import numpy as np # import matplotlib.pyplot as plt # import seaborn as sns # sns.set(context = 'notebook', #mostly controls relative sizes of things on plot # #The base context is “notebook”, and the other contexts are “paper”, “talk”, and “poster” # style = 'darkgrid', #dict, None, or one of {darkgrid, whitegrid, dark, white, ticks} # palette = 'deep', # Should be something that color_palette() can process. # font_scale = 1, # color_codes = False, # rc = None) # from IPython.core.interactiveshell import InteractiveShell # InteractiveShell.ast_node_interactivity = 'last_expr' # setting = "all" allows multiple outputs to be displayed for a given input cell. don't use w plotting! from IPython.display import display # # %matplotlib notebook # #%matplotlib inline pd.__version__ , np.__version__ #, matplotlib.__version__, sns.__version__ # - # + from sklearn.model_selection import train_test_split, learning_curve, cross_val_score, KFold, StratifiedKFold, \ ShuffleSplit, GridSearchCV, RandomizedSearchCV from sklearn.metrics import roc_auc_score from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.neural_network import MLPClassifier from sklearn.ensemble import AdaBoostClassifier from sklearn.tree import DecisionTreeClassifier # - pwd # + # #cd '/Users/DonBunk/Desktop/Google Drive/data_science/Python_Projects/Home_Credit_Default_Risk/' # - from Home_Credit_package.master_pipeline import master_pipeline # # load this # + [markdown] heading_collapsed=true # ## load df. # + hidden=true # + hidden=true path = '' original_cleaned_df = pd.read_csv(path + 'complete_initial_wrangled_data.csv', index_col = 'SK_ID_CURR') level_1_metafeatures_df = pd.read_csv(path + 'level_2_input_FINAL_level_1_meta_features_df.csv', index_col = 'SK_ID_CURR') total_df = pd.merge(original_cleaned_df, level_1_metafeatures_df, left_index=True, right_index=True, how = 'outer' ) # + hidden=true # + hidden=true total_df.info(verbose = True, null_counts = True); # + hidden=true # CHECK: this should be empty if everything is non null total_df.isnull().any()[total_df.isnull().any()==True] # + hidden=true # - # # models with only EXT SOURCES + level 1 final scores. minimal_feats = ['EXT_SOURCE_1','EXT_SOURCE_2','EXT_SOURCE_3', 'pwr_rescale_RanFor_EXTpoly', 'pwr_rescale_RanFor_AllFeats', 'pwr_rescale_LogReg_EXTpoly','pwr_rescale_LogReg_AllFeats', 'pwr_rescale_MLP_AllFeats'] total_df_piped, final_feature_list, total_pipeline, trans_list = master_pipeline(df_in = total_df[minimal_feats], int_cutoff=20, poly_deg=4, feats_with_interaction=[] ) # + [markdown] heading_collapsed=true # ### DONE. random forest grid search. # + hidden=true # + hidden=true active="" # Results: # # input: # param_dist_dict = { 'max_depth' : [10,15,20], # 'min_samples_leaf' : [20,30,40,50], # 'min_samples_split' : [2,3], # 'n_estimators': [40], # } # output: # {'max_depth': 10, # 'min_samples_leaf': 40, # 'min_samples_split': 2, # 'n_estimators': 40} # T_score = 0.7952861875687605, V_score = 0.7734364674660258 # # input: # param_dist_dict = { 'max_depth' : [5,6,7,8,9,10], # 'min_samples_leaf' : [35,37,40,42,45], # 'min_samples_split' : [2], # 'n_estimators': [40], # } # # output: # {'max_depth': 5, # 'min_samples_leaf': 35, # 'min_samples_split': 2, # 'n_estimators': 40} # T_score = 0.7688404542108025, V_score = 0.7750880606507247 # # input: # param_dist_dict = { 'max_depth' : [2,3,4,5], # 'min_samples_leaf' : [10,20,30,35], # 'min_samples_split' : [2], # 'n_estimators': [40], # } # # output: # {'max_depth': 4, # 'min_samples_leaf': 10, # 'min_samples_split': 2, # 'n_estimators': 40} # T_score = 0.7672619344174749, V_score = 0.7749036104756363 # # input: # param_dist_dict = { 'max_depth' : [3,4,5], # 'min_samples_leaf' : [5,8,10,15], # 'min_samples_split' : [2], # 'n_estimators': [40], # } # output: # {'max_depth': 4, # 'min_samples_leaf': 10, # 'min_samples_split': 2, # 'n_estimators': 40} # T_score = 0.7672619344174749, V_score = 0.7749036104756363 # # input: # param_dist_dict = { 'max_depth' : [3,4,5], # 'min_samples_leaf' : [9,10,11,12,13,14], # 'min_samples_split' : [2], # 'n_estimators': [40], # } # # output: # {'max_depth': 4, # 'min_samples_leaf': 12, # 'min_samples_split': 2, # 'n_estimators': 40} # T_score = 0.7672598098889133, V_score = 0.7749204442111646 # # ****************************************************************** # Switching to training on full set. And some slightly new level 1 results. # ****************************************************************** # # input: # param_dist_dict = { 'max_depth' : [3,4,5], # 'min_samples_leaf' : [9,10,11,12,13,14], # 'min_samples_split' : [2], # 'n_estimators': [40], # } # # output: # # {'max_depth': 5, # 'min_samples_leaf': 9, # 'min_samples_split': 2, # 'n_estimators': 40} # best_score = .76...., train_score = 0.7705122375237419 # # input: # param_dist_dict = { 'max_depth' : [5,7,9], # 'min_samples_leaf' : [4,7,9], # 'min_samples_split' : [2], # 'n_estimators': [40], # } # # output: # {'max_depth': 5, # 'min_samples_leaf': 7, # 'min_samples_split': 2, # 'n_estimators': 40} # best_score = 0.7685749094437057, train_score = 0.7705273370296688 # # input: # param_dist_dict = { 'max_depth' : [3,4,5,6], # 'min_samples_leaf' : [5,6,7,8], # 'min_samples_split' : [2], # 'n_estimators': [40], # } # output: # {'max_depth': 5, # 'min_samples_leaf': 7, # 'min_samples_split': 2, # 'n_estimators': 40} # best_score = 0.7685749094437057, train_score = 0.7705273370296688 # # ****************************************************************** # Restarting (Fix leaked level 1 results. ) # ****************************************************************** # # # input: # param_dist_dict = { 'max_depth' : [5,10,20], # 'min_samples_leaf' : [10,20,30,40,50], # 'min_samples_split' : [2], # 'n_estimators': [40], # } # # output: {'max_depth': 5, # 'min_samples_leaf': 20, # 'min_samples_split': 2, # 'n_estimators': 40} # best_score = 0.7682630901407216, train_score = 0.7704197848891641 # # input: # param_dist_dict = { 'max_depth' : [4,5,7,9], # 'min_samples_leaf' : [10,15,20,25,30], # 'min_samples_split' : [2], # 'n_estimators': [40], # } # output: # {'max_depth': 5, # 'min_samples_leaf': 15, # 'min_samples_split': 2, # 'n_estimators': 40} # best_score = 0.7682822695581677, train_score = 0.7703977883032412 # # input: # param_dist_dict = { 'max_depth' : [4,5,6,7], # 'min_samples_leaf' : [12,14,15,16,18], # 'min_samples_split' : [2], # 'n_estimators': [40], # } # # output: # {'max_depth': 6, # 'min_samples_leaf': 12, # 'min_samples_split': 2, # 'n_estimators': 40} # best_score = 0.7683195371814959, train_score = 0.7723275482946468 # # input: # param_dist_dict = { 'max_depth' : [4,5,6,7], # 'min_samples_leaf' : [9,10,11,12,13], # 'min_samples_split' : [2], # 'n_estimators': [40], # } # output: # {'max_depth': 6, # 'min_samples_leaf': 11, # 'min_samples_split': 2, # 'n_estimators': 40} # best_score = 0.7683421720049418, train_score = 0.7723052463371459 # # + hidden=true # + hidden=true param_dist_dict = { 'max_depth' : [4,5,6,7], 'min_samples_leaf' : [9,10,11,12,13], 'min_samples_split' : [2], 'n_estimators': [40], } forest_reg = RandomForestClassifier(random_state=0, class_weight = None) my_grid_search = GridSearchCV(forest_reg, param_dist_dict, cv = 3, return_train_score = True , scoring = 'roc_auc', n_jobs = -1, refit = True, verbose = 51, ) my_grid_search.fit(total_df_piped, total_df['TARGET']) # + hidden=true # + hidden=true pd.set_option('display.max_colwidth', -1) pd.DataFrame(my_grid_search.cv_results_)[['params','mean_test_score']] # + hidden=true # + hidden=true my_grid_search.best_score_ # + hidden=true # + hidden=true RF_optimized_parms = my_grid_search.best_params_ RF_optimized_parms # + hidden=true # + hidden=true train_scores = [x[1] for x in my_grid_search.predict_proba(total_df_piped)] roc_auc_score(total_df['TARGET'], train_scores) # + hidden=true # + hidden=true del my_grid_search # + hidden=true # + [markdown] heading_collapsed=true # ### DONE. log reg grid search. # + hidden=true # + hidden=true active="" # Results: # # input: param_grid = [{ 'C' : [21,22,23,24,25,26,27,28] }] # output: # {'C': 22} # T_score = 0.76631882528102, V_score = 0.7752745675559647 # # input: # param_grid = [{ 'C' : [21.2,21.5,21.7,22,22.2,22.5,22.7] }] # output: # {'C': 22} # T_score = 0.76631882528102, V_score = 0.7752745675559647 # # input: # param_grid = [{ 'C' : [21.8,21.9,22,22.1] }] # output: # {'C': 22} # T_score = 0.76631882528102, V_score = 0.7752745675559647 # # ****************************************************************** # Switching to training on full set. And some slightly new level 1 results. # ****************************************************************** # # input: # param_grid = [{ 'C' : [21.8,21.9,22,22.1] }] # output: # {'C': 22.1} # best_score = 0.7687337315528854, train_score = 0.7689685724410957 # # input: # param_grid = [{ 'C' : [22,23,25,30] }] # output: # {'C': 23} # best_score = 0.7687359741746989, train_score = 0.7689615070006149 # # input: # param_grid = [{ 'C' : [22.5,23,23.5,24] }] # output: # {'C': 23.5} # best_score = 0.7687422095719723, train_score = 0.7689692835023063 # # # # ****************************************************************** # Restarting (Fix leaked level 1 results. ) # ****************************************************************** # # input: # param_grid = [{ 'C' : [15,20,25,30] }] # output: # {'C': 30} # best_score = 0.7689873606423704, train_score = 0.7691956939415567 # # input: # param_grid = [{ 'C' : [25,30,40,50,100] }] # output: {'C': 100} # best_score = 0.7689889876758225, train_score = 0.7692006625352015 # # input: # param_grid = [{ 'C' : [100,200,300,400,500,1000] }] # output:{'C': 100} # best_score = 0.7689889876758225, train_score = 0.7692006625352015 # # input: # param_grid = [{ 'C' : [60,70,80,90,100,110,120,130,140,50,160,170,180,190] }] # output: # {'C': 80} # best_score = 0.7689906617338039, train_score = 0.7692014935790852 # # input: # param_grid = [{ 'C' : [72,74,76,78,80,82,84,86,88] }] # output:{'C': 88} # best_score = 0.768991307672436, train_score = 0.769200101381084 # + hidden=true # + hidden=true param_grid = [{ 'C' : [72,74,76,78,80,82,84,86,88] }] my_LgRg = LogisticRegression(penalty= 'l2', random_state = 0, class_weight = None) my_LgRg_grid_search = GridSearchCV(my_LgRg, param_grid, cv = 3, # for integer inputs, Stratified folding is default. return_train_score = True , scoring = 'roc_auc', n_jobs = -1, refit = True, verbose = 51, ) my_LgRg_grid_search.fit(total_df_piped, total_df['TARGET']) # + hidden=true # + hidden=true pd.set_option('display.max_colwidth', -1) pd.DataFrame(my_LgRg_grid_search.cv_results_)[['params','mean_test_score']] # + hidden=true # + hidden=true my_LgRg_grid_search.best_score_ # + hidden=true # + hidden=true LgRg_optimized_parms = my_LgRg_grid_search.best_params_ LgRg_optimized_parms # + hidden=true # + hidden=true train_scores = [x[1] for x in my_LgRg_grid_search.predict_proba(total_df_piped)] roc_auc_score(total_df['TARGET'], train_scores) # + hidden=true # + hidden=true del my_LgRg_grid_search # + hidden=true # + [markdown] heading_collapsed=true # ### DONE. MLP Classifier. # + hidden=true # + hidden=true active="" # {'alpha': 0.13, 'hidden_layer_sizes': (70,)} # ' # # Results: # # input: # param_dist_dict = { 'alpha' : [.001,.01,.1], # 'hidden_layer_sizes' : [(100, ),(110, ),(120, )], # } # # output: {'alpha': 0.01, 'hidden_layer_sizes': (100,)} # T_score = 0.767409457872488, V_score = 0.7749587075199571 # # input: # param_dist_dict = { 'alpha' : [.002,.005,.007,.01,.02,.05,.07,.1], # 'hidden_layer_sizes' : [(50, ),(70, ),(80, ),(100, )], # } # output: # {'alpha': 0.1, 'hidden_layer_sizes': (70,)} # T_score =0.7661859533460901, V_score = 0.7748665787312043 # # input: # param_dist_dict = { 'alpha' : [.08,.1,.2,1], # 'hidden_layer_sizes' : [(50, ),(55, ),(70, ),(75, ),(80, )], # } # # output: # {'alpha': 0.1, 'hidden_layer_sizes': (70,)} # T_score = 0.7661859533460901, V_score = 0.7748665787312043 # # input: # param_dist_dict = { 'alpha' : [.09,.1,.13,.17], # 'hidden_layer_sizes' : [(55, ),(60, ),(70, ),(73, )], # } # output: # {'alpha': 0.13, 'hidden_layer_sizes': (70,)} # T_score = 0.765805255095316, V_score = 0.7740633407933621 # # input: # param_dist_dict = { 'alpha' : [.11,.12,.13,.15], # 'hidden_layer_sizes' : [(62, ),(65, ),(67, ),(70, ),(72, )], # } # output: # {'alpha': 0.13, 'hidden_layer_sizes': (70,)} # T_score = 0.765805255095316, V_score = 0.7740633407933621 # # input: # param_dist_dict = { 'alpha' : [.12,.13,.14], # 'hidden_layer_sizes' : [(68, ),(69, ),(70, ),(71, )], # } # output: # {'alpha': 0.13, 'hidden_layer_sizes': (70,)} # T_score=0.765805255095316, V_score = 0.7740633407933621 # # ****************************************************************** # Switching to training on full set. And some slightly new level 1 results. # ****************************************************************** # # # # ****************************************************************** # Restarting (Fix leaked level 1 results. ) # ****************************************************************** # # input: # param_dist_dict = { 'alpha' : [.05,.1,.15,.2], # 'hidden_layer_sizes' : [(50, ),(60, ),(70, ),(80, )], # } # output: # {'alpha': 0.05, 'hidden_layer_sizes': (70,)} # best_score = 0.7688137480208642, train_score = 0.76932884834681 # # input: # param_dist_dict = { 'alpha' : [.07,.1,.13], # 'hidden_layer_sizes' : [(65, ),(70, ),(75, )], # } # output: # {'alpha': 0.13, 'hidden_layer_sizes': (70,)} # best_score = 0.7687719119930789, train_score = 0.7689363690631118 # # input: # param_dist_dict = { 'alpha' : [.11,.13,.15], # 'hidden_layer_sizes' : [(67, ),(70, ),(73, )], # } # # output: # {'alpha': 0.15, 'hidden_layer_sizes': (67,)} # best_score = 0.7689326925827549, train_score = 0.7689057213274595 # + hidden=true # + hidden=true param_dist_dict = { 'alpha' : [.11,.13,.15], 'hidden_layer_sizes' : [(67, ),(70, ),(73, )], } # + hidden=true # + hidden=true my_MLP = MLPClassifier(random_state=0, tol=0.0001, # verbose=51, warm_start=False, momentum=0.9) my_grid_search = GridSearchCV(my_MLP, param_dist_dict, cv = 3, return_train_score = True , scoring = 'roc_auc', n_jobs = -1, refit = True, verbose = 51, ) my_grid_search.fit(total_df_piped, total_df['TARGET']) # + hidden=true # + hidden=true pd.set_option('display.max_colwidth', -1) pd.DataFrame(my_grid_search.cv_results_)[['params','mean_test_score']] # + hidden=true # + hidden=true my_grid_search.best_score_ # + hidden=true # + hidden=true my_grid_search_optimized_parms = my_grid_search.best_params_ my_grid_search_optimized_parms # + hidden=true # + hidden=true train_scores = [x[1] for x in my_grid_search.predict_proba(total_df_piped)] roc_auc_score(total_df['TARGET'], train_scores) # + hidden=true # + hidden=true del my_grid_search # + hidden=true # - # # models with all features total_df_piped, final_feature_list, total_pipeline, trans_list = master_pipeline(df_in = total_df, int_cutoff=20, poly_deg=4, feats_with_interaction=[] ) # + [markdown] heading_collapsed=true # ### DONE random forest grid search # + hidden=true # + hidden=true active="" # Results: # # input: # param_dist_dict = { 'max_depth' : [10,20,30], # 'min_samples_leaf' : [300,400,500,600], # 'min_samples_split' : [2], # 'n_estimators': [40], # } # # output: # {'max_depth': 20, # 'min_samples_leaf': 600, # 'min_samples_split': 2, # 'n_estimators': 40} # best_score = 0.7677964449764604, test_score = 0.7846835207980667 # # input: # param_dist_dict = { 'max_depth' : [10,20,30], # 'min_samples_leaf' : [600,700,800,900,1000], # 'min_samples_split' : [2], # 'n_estimators': [40], # } # output: # {'max_depth': 20, # 'min_samples_leaf': 600, # 'min_samples_split': 2, # 'n_estimators': 40} # best_score = 0.7677964449764604, train_score = 0.7846835207980667 # # input: # param_dist_dict = { 'max_depth' : [15,20,25], # 'min_samples_leaf' : [550,600,625,650], # 'min_samples_split' : [2], # 'n_estimators': [40], # } # # output: # {'max_depth': 20, # 'min_samples_leaf': 600, # 'min_samples_split': 2, # 'n_estimators': 40} # best_score = 0.7677964449764604, test_score = 0.7846835207980667 # # input: # param_dist_dict = { 'max_depth' : [16,20,24], # 'min_samples_leaf' : [570,585,600,615], # 'min_samples_split' : [2], # 'n_estimators': [40], # } # # output: # {'max_depth': 20, # 'min_samples_leaf': 585, # 'min_samples_split': 2, # 'n_estimators': 40} # best_score = 0.7678954093859139, test_score = 0.7850927301123214 # # input: # param_dist_dict = { 'max_depth' : [17,19,21,23], # 'min_samples_leaf' : [575,580,585,590,595], # 'min_samples_split' : [2], # 'n_estimators': [40], # } # output: # {'max_depth': 19, # 'min_samples_leaf': 595, # 'min_samples_split': 2, # 'n_estimators': 40} # best_score = 0.7679452249753776, test_score = 0.7848303877123949 # # input: # param_dist_dict = { 'max_depth' : [18,19,20,21,22,23,24], # 'min_samples_leaf' : [591,592,593,594,595,596,597,598,599,600], # 'min_samples_split' : [2], # 'n_estimators': [40], # } # output: # {'max_depth': 19, # 'min_samples_leaf': 597, # 'min_samples_split': 2, # 'n_estimators': 40} # best_score = 0.7679517809833616, test_score = 0.7848144488264958 # # ****************************************************************** # Switching to training on full set. And some slightly new level 1 results. # ****************************************************************** # # input: # param_dist_dict = { 'max_depth' : [18,19,20,21], # 'min_samples_leaf' : [594,595,596,597,598,599,600], # 'min_samples_split' : [2], # 'n_estimators': [40], # } # output: # {'max_depth': 18, # 'min_samples_leaf': 598, # 'min_samples_split': 2, # 'n_estimators': 40} # best_score = 0.7678558950584005, train_score = 0.7851159325098603 # # input: # param_dist_dict = { 'max_depth' : [10,15,20], # 'min_samples_leaf' : [590,595,596,597,598,599,600,610], # 'min_samples_split' : [2], # 'n_estimators': [40], # } # # output: # {'max_depth': 20, # 'min_samples_leaf': 590, # 'min_samples_split': 2, # 'n_estimators': 40} # best_score = 0.7679840053193745, train_score = 0.7851673942468692 # # input: # param_dist_dict = { 'max_depth' : [20,30,40,50], # 'min_samples_leaf' : [200,400,500,525,550,575,600,700,800], # 'min_samples_split' : [2], # 'n_estimators': [40], # } # output: # {'max_depth': 20, # 'min_samples_leaf': 525, # 'min_samples_split': 2, # 'n_estimators': 40} # best_score = 0.768073429697852, train_score = 0.787580700228428 # # input: # param_dist_dict = { 'max_depth' : [16,18,20,22,24], # 'min_samples_leaf' : [505,510,515,520,525,530,535,540,545], # 'min_samples_split' : [2], # 'n_estimators': [40], # } # output: # {'max_depth': 16, # 'min_samples_leaf': 510, # 'min_samples_split': 2, # 'n_estimators': 40} # best_score = 0.768248916714676, train_score = 0.787587817537903 # # input: # param_dist_dict = { 'max_depth' : [5,10,14,16,18,20], # 'min_samples_leaf' : [505,507,510,512,515], # 'min_samples_split' : [2], # 'n_estimators': [40], # } # output: # {'max_depth': 16, # 'min_samples_leaf': 512, # 'min_samples_split': 2, # 'n_estimators': 40} # best_score = 0.7682950251471831, train_score = 0.7874740751036958 # # input: # # param_dist_dict = { 'max_depth' : [14,15,16,17,18], # 'min_samples_leaf' : [510,511,512,513,514,515], # 'min_samples_split' : [2], # 'n_estimators': [40], # } # output: # {'max_depth': 17, # 'min_samples_leaf': 511, # 'min_samples_split': 2, # 'n_estimators': 40} # best_score = 0.7682996744974617, train_score = 0.7879583895814456 # # # ****************************************************************** # Restarting (Fix leaked level 1 results. ) # ****************************************************************** # # input: # param_dist_dict = { 'max_depth' : [10,15,20,30], # 'min_samples_leaf' : [400,500,550,600,650], # 'min_samples_split' : [2], # 'n_estimators': [40], # } # # output: # {'max_depth': 20, # 'min_samples_leaf': 550, # 'min_samples_split': 2, # 'n_estimators': 40} # best_score = 0.7681624453883024, train_score = 0.7864507247869006 # # # input: # param_dist_dict = { 'max_depth' : [15,20,22,24,26], # 'min_samples_leaf' : [520,530,540,550,560,570,580,590], # 'min_samples_split' : [2], # 'n_estimators': [40], # } # # output: # {'max_depth': 20, # 'min_samples_leaf': 550, # 'min_samples_split': 2, # 'n_estimators': 40} # best_score = 0.7681624453883024, train_score = 0.7864507247869006 # # # input: # param_dist_dict = { 'max_depth' : [16,18,20,22], # 'min_samples_leaf' : [543,547,550,553,556], # 'min_samples_split' : [2], # 'n_estimators': [40], # } # # output: # {'max_depth': 16, # 'min_samples_leaf': 550, # 'min_samples_split': 2, # 'n_estimators': 40} # best_score = 0.7681837088411191, train_score = 0.7865378262227534 # # input: # param_dist_dict = { 'max_depth' : [14,16,18], # 'min_samples_leaf' : [546,548,550,552,554], # 'min_samples_split' : [2], # 'n_estimators': [40], # } # output: # # {'max_depth': 16, # 'min_samples_leaf': 550, # 'min_samples_split': 2, # 'n_estimators': 40} # best_score = 0.7681837088411191, train_score = 0.7865378262227534 # + hidden=true # + hidden=true param_dist_dict = { 'max_depth' : [14,16,18], 'min_samples_leaf' : [546,548,550,552,554], 'min_samples_split' : [2], 'n_estimators': [40], } forest_reg = RandomForestClassifier(random_state=0, class_weight = None) my_grid_search = GridSearchCV(forest_reg, param_dist_dict, cv = 3, return_train_score = True , scoring = 'roc_auc', n_jobs = -1, refit = True, verbose = 51, ) my_grid_search.fit(total_df_piped, total_df['TARGET']) # + hidden=true # + hidden=true pd.set_option('display.max_colwidth', -1) pd.DataFrame(my_grid_search.cv_results_)[['params','mean_test_score']] # + hidden=true # + hidden=true RF_optimized_parms = my_grid_search.best_params_ RF_optimized_parms # + hidden=true # + hidden=true my_grid_search.best_score_ # + hidden=true # + hidden=true train_scores = [x[1] for x in my_grid_search.predict_proba(total_df_piped)] roc_auc_score(total_df['TARGET'], train_scores) # + hidden=true # + hidden=true del my_grid_search # + hidden=true # + [markdown] heading_collapsed=true # ### DONE. log reg grid search # + hidden=true # + hidden=true active="" # Results: # # input: # param_grid = [{ 'C' : [36,40,50,70] }] # # output: # {'C': 70} # best_score = 0.771463923141084, train_score = 0.7746624299958278 # # # input: # param_grid = [{ 'C' : [70,90,100,120,150,200] }] # # output: # {'C': 200} # best_score = 0.7715501709165813, train_score = 0.7746619781370906 # # input: # param_grid = [{ 'C' : [200,300,400,500,700] }] # output: # {'C': 200} # best_score = 0.7715501709165813, train_score = 0.7746619781370906 # # input: # param_grid = [{ 'C' : [175,185,200,225,250,275] }] # output: # {'C': 175} # best_score = 0.7718108441473754, train_score = 0.7745536856521934 # # input: # param_grid = [{ 'C' : [160,165,170,175,180] }] # output: # {'C': 175} # best_score = 0.7718108441473754, test_score = 0.7745536856521934 # # ****************************************************************** # Switching to training on full set. And some slightly new level 1 results. # ****************************************************************** # # input: # param_grid = [{ 'C' : [171,172,173,174,175,176,177,178,179] }] # output: # {'C': 174} # best_score = 0.7714033017857964, train_score = 0.7747486686109132 # # # # ****************************************************************** # Restarting (Fix leaked level 1 results. ) # ****************************************************************** # # # input: # param_grid = [{ 'C' : [170,175,180] }] # output: # {'C': 175} # best_score = 0.7713735163180996, train_score = 0.7746764505839284 # # input: # param_grid = [{ 'C' : [171,172,173,174,175,176,177,178,179] }] # output: # {'C': 179} # best_score = 0.7715860050402779, train_score = 0.7738151103627916 # # input: # param_grid = [{ 'C' : [177,178,179,180,181] }] # output: # {'C': 179} # bset_score = 0.7715860050402779, train_score = 0.7738151103627916 # + hidden=true # + hidden=true param_grid = [{ 'C' : [177,178,179,180,181] }] my_LgRg = LogisticRegression(penalty= 'l2', random_state = 0, class_weight = None) my_LgRg_grid_search = GridSearchCV(my_LgRg, param_grid, cv = 3, # for integer inputs, Stratified folding is default. return_train_score = True , scoring = 'roc_auc', n_jobs = -1, refit = True, verbose = 51, ) my_LgRg_grid_search.fit(total_df_piped, total_df['TARGET']) # + hidden=true # + hidden=true pd.set_option('display.max_colwidth', -1) pd.DataFrame(my_LgRg_grid_search.cv_results_)[['params','mean_test_score']] # + hidden=true # + hidden=true my_LgRg_grid_search.best_score_ # + hidden=true # + hidden=true LgRg_optimized_parms = my_LgRg_grid_search.best_params_ LgRg_optimized_parms # + hidden=true # + hidden=true train_scores = [x[1] for x in my_LgRg_grid_search.predict_proba(total_df_piped)] roc_auc_score(total_df['TARGET'], train_scores) # + hidden=true # + hidden=true del my_LgRg_grid_search # + hidden=true # + [markdown] heading_collapsed=true # ### DONE. MLP Classifier # + hidden=true # + hidden=true active="" # Results: # # input: # param_dist_dict = { 'alpha' : [.1,.5,1], # 'hidden_layer_sizes' : [(40, ),(50, ),(100, ),(150, ),(200, )], # } # # output: # {'alpha': 0.1, 'hidden_layer_sizes': (40,)} # best_score = 0.7670088768612681, train_score = 0.7706656650820902 # # input: # param_dist_dict = { 'alpha' : [.1,.3,.5], # 'hidden_layer_sizes' : [(10, ),(20, ),(30, ),(40, )], # } # # output: # {'alpha': 0.1, 'hidden_layer_sizes': (40,)} # best_score =0.7670088768612681, test_score = 0.7706656650820902 # # input: # param_dist_dict = { 'alpha' : [.05,.07,.1], # 'hidden_layer_sizes' : [(35, ),(40, ),(45, )], # } # output: # {'alpha': 0.1, 'hidden_layer_sizes': (40,)} # best_score = 0.7670088768612681, train_score = 0.7706656650820902 # # input: # param_dist_dict = { 'alpha' : [.08,.1,.2,.4,1], # 'hidden_layer_sizes' : [(37, ),(40, ),(43, )], # } # output: # {'alpha': 0.1, 'hidden_layer_sizes': (40,)} # best_score = 0.7670088768612681, test_score = 0.7706656650820902 # # ****************************************************************** # Switching to training on full set. And some slightly new level 1 results. # ****************************************************************** # # input: # param_dist_dict = { 'alpha' : [.09,.10,.12,.14,.16,.18], # 'hidden_layer_sizes' : [(38, ),(39, ),(40, ),(41, ),(42, ),(43, )], # } # output: # {'alpha': 0.09, 'hidden_layer_sizes': (43,)} # best_score = 0.7675439018233958, train_score = 0.7715119433453217 # # input: # param_dist_dict = { 'alpha' : [.01,.05,.10], # 'hidden_layer_sizes' : [(40, ),(43, ),(50, ),(70, ),(100, )], # } # output: # {'alpha': 0.05, 'hidden_layer_sizes': (100,)} # best_score = 0.7676977241901013, train_score = 0.7742670832402382 # # input: # param_dist_dict = { 'alpha' : [.05], # 'hidden_layer_sizes' : [(80, ),(100, ),(120, ),(140, ),(160, )], # } # output: # {'alpha': 0.05, 'hidden_layer_sizes': (100,)} # best_score = , train_score = 0.7742670832402382 # # input: # param_dist_dict = { 'alpha' : [.03,.05,.08], # 'hidden_layer_sizes' : [(90,),(100, ),(110, ),(120,)], # } # output: # {'alpha': 0.05, 'hidden_layer_sizes': (100,)} # best_score = 0.7676977241901013, train_score = 0.7742670832402382 # # input: # param_dist_dict = { 'alpha' : [.03,.05,.07], # 'hidden_layer_sizes' : [(93,),(97, ),(100, ),(103,),(107, )], # } # # output: # {'alpha': 0.05, 'hidden_layer_sizes': (100,)} # best_score = 0.7676977241901013, train_score = 0.7742670832402382 # # # # ****************************************************************** # Restarting (Fix leaked level 1 results. ) # ****************************************************************** # # input: # param_dist_dict = { 'alpha' : [.01,.05,.1], # 'hidden_layer_sizes' : [(30, ),(40, ),(50, ),(70, ),(100, )], # } # output: # {'alpha': 0.05, 'hidden_layer_sizes': (50,)} # best_score = 0.7675229837136625, train_score = 0.7733901342992994 # # input: # param_dist_dict = { 'alpha' : [.03,.05,.07], # 'hidden_layer_sizes' : [(45, ),(50, ),(55, )], # } # output: # {'alpha': 0.05, 'hidden_layer_sizes': (55,)} # best_score = 0.7678962576420044, train_score = 0.7760116364383361 # # input: # param_dist_dict = { 'alpha' : [.04,.05,.06], # 'hidden_layer_sizes' : [(52,),(55, ),(60, ),(65, )], # } # output: # {'alpha': 0.05, 'hidden_layer_sizes': (55,)} # best_score = 0.7678962576420044, train_score = 0.7760116364383361 # # + hidden=true # + hidden=true param_dist_dict = { 'alpha' : [.04,.05,.06], 'hidden_layer_sizes' : [(52,),(55, ),(60, ),(65, )], } # + hidden=true # + hidden=true my_MLP = MLPClassifier(random_state=0, tol=0.0001, # verbose=51, warm_start=False, momentum=0.9) my_grid_search = GridSearchCV(my_MLP, param_dist_dict, cv = 3, return_train_score = True , scoring = 'roc_auc', n_jobs = -1, refit = True, verbose = 51, ) my_grid_search.fit(total_df_piped, total_df['TARGET']) # + hidden=true # + hidden=true pd.set_option('display.max_colwidth', -1) pd.DataFrame(my_grid_search.cv_results_)[['params','mean_test_score']] # + hidden=true # + hidden=true my_grid_search.best_score_ # + hidden=true # + hidden=true my_grid_search_optimized_parms = my_grid_search.best_params_ my_grid_search_optimized_parms # + hidden=true # + hidden=true train_scores = [x[1] for x in my_grid_search.predict_proba(total_df_piped)] roc_auc_score(total_df['TARGET'], train_scores) # + hidden=true # + hidden=true del my_grid_search # + hidden=true # + hidden=true
Kaggle_Home_Credit_Default_Risk/level_2_ensembling/final_level_2_hyp_par_optimization.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 0.4.5 # language: julia # name: julia-0.4 # --- using PyPlot # + abstract Kernel # squared exponential kernel type SEKernel <: Kernel params::AbstractArray # log(σ_f^2), log(l^2), log(σ_y^2) SEKernel(l::Float64,σ::Float64,σy::Float64) = new([log(σ^2); log(l^2); log(σy^2)]) SEKernel(params::AbstractArray) = new(params) end # kernel defintion for SE kernel function kern(se::SEKernel, x::AbstractArray,x_::AbstractArray) diff = x - x_ k = exp(se.params[1]) * exp(-diff'*diff/(2*exp(se.params[2])))[1] return k end # dk/dx (not x_) function d_kern(se::SEKernel, x::AbstractArray,x_::AbstractArray) δ = x - x_ result = -exp(se.params[1]) * δ * exp(-δ'*δ/(2exp(se.params[2]))) / exp(se.params[2]) return result[1] end # d^2k/dx^2 (not x_) function d2_kern(se::SEKernel, x::AbstractArray,x_::AbstractArray) δ = x - x_ outer_product = δ*δ' result = -(eye(outer_product) - outer_product/exp(se.params[2]))* exp(se.params[1]) * exp(-δ'*δ/(2exp(se.params[2])))/exp(se.params[2]) return result[1] end # d^3k/dx^3 (not x_) # note that we assume that x and x_ are # 1x1 arrays here so we don't have to return a tensor! function d3_kern(se::SEKernel, x::AbstractArray,x_::AbstractArray) δ = x - x_ return (exp(se.params[1]) * (3δ - δ^3/exp(se.params[2])) * exp(-δ'*δ/(2exp(se.params[2])))[1] / exp(se.params[2])^2)[1] end # create convariance matrix K # X,X_ have rows of data points function K(kernel::Kernel, X::AbstractArray,X_::AbstractArray) K_ = zeros(size(X)[1], size(X_)[1]) for i=1:size(K_)[1] for j=1:size(K_)[2] @inbounds K_[i,j] = kern(kernel, X[i,:], X_[j,:]) end end if size(K_)[1] == size(K_)[2] && typeof(kernel) == SEKernel K_ += exp(kernel.params[3])*eye(K_) end return K_ end function d_K(kernel::Kernel, X::AbstractArray,X_::AbstractArray) K_ = zeros(size(X)[1], size(X_)[1]) for i=1:size(K_)[1] for j=1:size(K_)[2] @inbounds K_[i,j] = -d_kern(kernel, X[i,:], X_[j,:]) end end return K_ end function d2_K(kernel::Kernel, X::AbstractArray,X_::AbstractArray) K_ = zeros(size(X)[1], size(X_)[1]) for i=1:size(K_)[1] for j=1:size(K_)[2] @inbounds K_[i,j] = d2_kern(kernel, X[i,:], X_[j,:]) end end return K_ end function d3_K(kernel::Kernel, X::AbstractArray,X_::AbstractArray) K_ = zeros(size(X)[1], size(X_)[1]) for i=1:size(K_)[1] for j=1:size(K_)[2] @inbounds K_[i,j] = -d3_kern(kernel, X[i,:], X_[j,:]) end end return K_ end function K(se::SEKernel, x::Float64,x_::Float64) return kern(se, [x], [x_]) end # - # __Algorithm 15.1:__ GP regression: # (Murphy pp. 526) # 1. $L = \mathrm{cholesky}(K + \sigma_y^2 I)$ # 2. $\alpha = L^T \backslash (L \backslash y)$ # 3. $\mathbb{E}[f_*] = k_8^T\alpha$ # 4. $v = L \backslash k_*$ # 5. $\mathrm{var}[f_*] = \kappa(x_*,x_*) - v^Tv$ # 6. $\log p(y|X) = -\frac{1}{2}y^T\alpha - \sum_i \log L_{ii} - \frac{N}{2}\log(2\pi)$ # + σ = 2. l = 0.75 σ_y = 1.5 se = SEKernel(l,σ,σ_y) # define some fake data t = collect(0:0.5:10) y_true = t.*sin(0.25*2π*t) + t y = y_true + randn!(zeros(t))*2 println("==> Construct Kernels") t_ = collect(-1:0.01:15) K_ = K(se, t, t) k_ = K(se, t, t_) k__ = K(se, t_,t_) println("==> Run GP Regression") # GP Regression algorithm # -- Machine Learning: A Probabalistic Perspective # <NAME>, pp. 526 L = chol(K_) α = L'\(L\y) μ = k_'*α v = L'\k_ σ = sqrt(diag(k__) - diag(v'*v)) logP = -y'*α/2 - sum(log(diag(L))) - size(y)[1]*log(2π)/2 println("-- done.") println("==> Marginal Likelihood: log P(y|X) = $(logP[1])") # + plot(t,y, alpha=0.6) #label="\$f(t) = t\\sin(\\pi t/2) + t + \\epsilon\$", alpha=0.6) plot(t_, μ, label="\$\\mathbb{E}[f_*]\$", color="#FF425B") fill_between(t_, μ-2σ, μ+2σ, color="#dddddd", alpha=0.3) plot(t_, t_.*sin(0.25*2π*t_) + t_, label="Ground Truth", color="#84FF80") title("Gaussian Process Regression") xlabel("\$t\$") ylabel("\$f\$") legend(loc="upper left") xlim(-1,15) # - # (Murphy pp. 520) # # Our Gaussian Process is given by the joint density # # $$ # \begin{pmatrix}y\\f_* \end{pmatrix} \sim \mathcal{N}\left( 0, \begin{pmatrix} K_y & K_* \\ K_*^T & K_{**} \end{pmatrix} \right) # $$ # # Note we assume the mean $\mu = 0$. Then our posterior predictive distribution is given by # # $$ # \begin{align*} # p(f_*|x_*,X,y) &= \mathcal{N}(f_*|\mu_*,\Sigma_*)\\ # \mu &= K_*^TK_y^{-1}y\\ # \Sigma_* &= K_{**} - K_*^TK_y^{-1}K_* # \end{align*} # $$ # # When we have a single test input this becomes # # $$ # p(f_*|x_*,X,y) = \mathcal{N}(f_*| k_*^TK_y^{-1}y,\; k_{**} - k_*^TK_y^{-1}k_*), # $$ # # where the posterior mean can be written # # $$ # \overline f_* = k_*^TK_y^{-1}y = \sum_{i=1}^N \alpha_i \kappa(x_i, x_*)\qquad\alpha = K_y^{-1}y # $$ # # This gives the first derivative # # $$ # \frac{\partial \overline f_*}{\partial x_*} = \sum_{i=1}^N \alpha_i \kappa'(x_i,x_*), # $$ # # For our Squared Exponential Kernel, # # $$ # \kappa(x,x') = \sigma_f^2 \exp\left( -\frac{1}{2}(x-x')^T\Lambda^{-1}(x-x') \right) = \sigma_f^2 \exp\left( -\frac{1}{2}||x-x' ||_{\Lambda^{-1}}^2 \right). # $$ # # We have can differentiate this, as shown in ([<NAME>Hutchon 2013](http://mlg.eng.cam.ac.uk/mchutchon/DifferentiatingGPs.pdf) pp. 6), giving (with the third derivative in one dimension $x\in\mathbb{R}$ to avoid a tensor) # # $$ # \begin{align*} # \frac{\partial \kappa(x,x')}{\partial x} &= -\sigma_f^2\Lambda^{-1}(x-x')\exp\left(-\frac{1}{2}||x-x'||_{\Lambda^{-1}}^2\right)\\ # &= -\Lambda^{-1}(x-x')\kappa(x,x')\\ # \frac{\partial^2 \kappa(x,x')}{\partial x^2} &= -\Lambda^{-1}\left(I - \Lambda^{-1}(x-x')(x-x')^T\right)\kappa(x,x')\\ # \frac{\partial^3 \kappa(x,x')}{\partial x^3} &= \lambda^{-1}\left[ 3(x-x') - \lambda^{-1}(x-x')^3 \right]\kappa(x,x')\\ # \end{align*} # $$ # + # define some fake data t = collect(0:0.5:10) y_true = t.*sin(0.25*2π*t) + t y = y_true + randn!(zeros(t))*2 println("==> Construct Kernels") σ = 2. l = 0.75 σ_y = 1. se = SEKernel(l,σ,σ_y) t_ = collect(-1:0.01:15) K_ = K(se, t, t) k_ = K(se, t, t_) dk_ = d_K(se,t,t_) d2k_ = d2_K(se,t,t_) d3k_ = d3_K(se,t,t_) k__ = K(se, t_,t_) println("==> Run GP Regression with Derivatives") # GP Regression algorithm + derivatives # -- Machine Learning: A Probabalistic Perspective # <NAME>, pp. 526 L = chol(K_) α = L\(L'\y) μ = k_'*α dμ = dk_'*α d2μ = d2k_'*α d3μ = d3k_'*α v = L'\k_ σ = sqrt(diag(k__) - diag(v'*v)) logP = -y'*α/2 - sum(log(diag(L))) - size(y)[1]*log(2π)/2 println("-- done.") println("==> Marginal Likelihood: log P(y|X) = $(logP[1])") println("==> Plot Results") plot(t,y, alpha=0.6) plot(t_, μ, label="\$\\mathbb{E}[f_*]\$", color="#FF425B") plot(t_, dμ, label="\$\\mathbb{E}\\left[\\frac{\\partial f_*}{\\partial x}\\right]\$", color="black") plot(t_, d2μ, label="\$\\mathbb{E}\\left[\\frac{\\partial^2 f_*}{\\partial x^2}\\right]\$", color="green") plot(t_, d3μ, label="\$\\mathbb{E}\\left[\\frac{\\partial^3 f_*}{\\partial x^3}\\right]\$", color="cyan") fill_between(t_, μ-2σ, μ+2σ, color="#dddddd", alpha=0.3) #plot(t_, t_.*sin(0.25*2π*t_) + t_, label="Ground Truth", color="#84FF80") title("Gaussian Process Regression") xlabel("\$t\$") ylabel("\$f\$") legend(loc="upper right") grid() xlim(-1,15) # - # ### Financial Data using DataFrames using Dates # will throw warning for some reason # + tsla_raw = readtable("../data/raw/financial/tsla.csv") tsla_raw[:_Date] = Date(tsla_raw[:_Date],"d-u-y") tsla_raw[:Date] = [Float64(datetime2rata(DateTime(date))) for date in tsla_raw[:_Date]] sort!(tsla_raw, cols=[:Date]) head(tsla_raw) # + t = collect(tsla_raw[:Date]) t = (t - mean(t)) / std(t) tsla = collect(tsla_raw[:Open]) tsla = (tsla - mean(tsla)) / std(tsla) t_ = collect(minimum(t):1e-2:maximum(t)) println("==> Run GP Regression over TSLA Open Prices") println("-- between May 5, 2015 and May 4, 2016") σ = 0.75 l = 0.2 σ_y = 0.2 se = SEKernel(l,σ,σ_y) println("==> Construct Kernels") K_ = K(se, t, t) k_ = K(se, t, t_) dk_ = d_K(se,t,t_) d2k_ = d2_K(se,t,t_) k__ = K(se, t_,t_) println("-- done.") # - println("==> Run GP Regression with Derivatives") # GP Regression algorithm + derivatives # -- Machine Learning: A Probabalistic Perspective # <NAME>, pp. 526 L = chol(K_) α = L\(L'\tsla) μ = k_'*α dμ = dk_'*α d2μ = d2k_'*α v = L'\k_ σ = sqrt(diag(k__) - diag(v'*v)) logP = -tsla'*α/2 - sum(log(diag(L))) - size(tsla)[1]*log(2π)/2 println("-- done.") println("==> Marginal Likelihood: log P(y|X) = $(logP[1])") # + println("==> Plot Results") plot(t,tsla) plot(t_, μ, label="\$\\mathbb{E}[f_*]\$", color="#FF425B") plot(t_, dμ, label="\$\\mathbb{E}\\left[\\frac{\\partial f_*}{\\partial x}\\right]\$", color="black") #plot(t_, d2μ, label="\$\\mathbb{E}\\left[\\frac{\\partial^2 f_*}{\\partial x^2}\\right]\$") fill_between(t_, μ-2σ, μ+2σ, color="#dddddd", alpha=0.3) title("TSLA Open Prices") xlabel("\$t\$") ylabel("\$f\$") legend(loc="lower left") grid() xlim(minimum(t), maximum(t)) # - # ### Numerical Gradient Check # # Check the first derivative numerically off of the mean. Check the second derivative numerically off of the analytic first derivative. eps = 0.1 println("Checking First and Second Derivatives with ϵ=$(eps)") for i=2:Integer(floor(size(t_)[1]/10)):size(t_)[1] pass = true # check first derivative if abs(dμ[i] - (μ[i+1]-μ[i-1])/(t_[i+1]-t_[i-1])) > eps pass = false end # check second derivative if abs(d2μ[i] - (dμ[i+1]-dμ[i-1])/(t_[i+1]-t_[i-1])) > eps pass = false end if pass println("t=$(t_[i])\t [√]") else println("t=$(t_[i])\t [x]") end end # ### Finding Critical Points # # We use Newton's Method to find critical points of the time series, where $\mathbb{E}\left[\frac{\partial f^*}{\partial x}\right] = 0$. We first approximate $f$ quadratically about our current guess $x_n$: # # $$ # f \approx f(x_n) + \nabla f(x_0)^T(x-x_n) + \frac{1}{2}(x-x_n)^T\nabla^2 f(x_0)(x-x_n). # $$ # # where $\nabla^2 f(x_n)$ is the Hessian of $f$ at $x_n$. Now, we find the critical point $x_{n+1}$ from our current quadratic approximation of $f$. Taking the first derivative we find # # $$ # \begin{align*} # \frac{\partial f}{\partial x} &= \nabla f(x_n) + \nabla^2 f(x_0)(x-x_n) = 0\\ # x_{n+1} &= x_n - \nabla^2 f(x_n)^{-1}\nabla f(x_n), # \end{align*} # $$ # # giving us our desired update rule. # # # Algorithm 2: Newton-Raphson Method # 1. Input: Initial Guess $x_0$ # 2. Repeat Until Convergence ( $||\nabla f(x_n)||_2 \leq \epsilon$ ): # 3. $x_{n+1} = x_n - \nabla^2 f(x_n)^{-1}\nabla f(x_n)$ # 4. Output Optimal $x_{n+1}$ # + # find critical points of TSLA data ϵ = 1e-6 # convergence tolerance N = 50 # number of starting points t_opt = maximum(t)*2*(rand(N,1)-0.5) ∇f = d_K(se,t,t_opt)'*α ∇2f = d2_K(se,t,t_opt)'*α i = 0 @time while any(∇f.*∇f .> ϵ^2) && i < 100 t_opt = t_opt - ∇f ./ ∇2f if i > 5 # drop points that aren't converging quickly t_opt = t_opt[∇f - mean(∇f) .< 2std(∇f)] # remove any of t out of the range of data t_opt = t_opt[!(t_opt .> maximum(t))] t_opt = t_opt[!(t_opt .< minimum(t))] end ∇f = d_K(se,t,t_opt)'*α ∇2f = d2_K(se,t,t_opt)'*α i = i+1 end println("==> Done.") println("-- Steps: $(i)") println("-- Maximum? $(∇2f[1] < 0)") # - unique(t_opt) # + println("==> Plot Results of Critical Point Optimization") t_opt = unique(t_opt[!isnan(t_opt)]) f_opt = K(se, t, t_opt)'*α ∇f_opt = d_K(se, t, t_opt)'*α plot(t,tsla) plot(t_, μ, label="\$\\mathbb{E}[f_*]\$", color="#FF425B") scatter(t_opt, f_opt, label="\$\\mathbb{E}\\left[\\frac{\\partial f^*}{\\partial x}\\right] = 0\$") fill_between(t_, μ-2σ, μ+2σ, color="#dddddd", alpha=0.3) title("TSLA Open Prices") xlabel("\$t\$") ylabel("\$f\$") legend(loc="lower left") grid() xlim(minimum(t), maximum(t)) # - # ### Approximate Generation of Gram Matrix # # We note that the kernel matrix $K_y$ and $k_{\star\star}$ are both heavily concentrated on the diagonal. We try to exploit this implicit structure given by sorting our times $t$ prior to constructing the matrix. plt[:cm][:copper] imshow(K_, cmap=plt[:cm][:copper]) colorbar() # #### Bayesian Optimization of Hyperparameters # (Murphy pp. 523) # # We can optimize out the (non-convex) [marginal likelihood](https://en.wikipedia.org/wiki/Marginal_likelihood) of the model by maximizing the $\log$ marginal likelihood. We have # # $$ # p(y|X) = \int p(y|f,X)p(f|X)df. # $$ # # One can show that # $$ # \log p(y|X) = \log \mathcal{N}(y|0,K_y) = -\frac{1}{2}yK_Y^{-1}y - \frac{1}{2}\log|K_y| - \frac{N}{2}\log(2\pi) # $$ # and, with the kernel hyperparameters denoted $\theta$, # $$ # \frac{\partial}{\partial \theta_j}\log p(y|X) = \frac{1}{2}\mathrm{tr}\left( (\alpha\alpha^T - K_y^{-1})\frac{\partial K_y}{\partial \theta_j} \right) = \frac{1}{2}\mathrm{tr}\left(A\frac{\partial K_y}{\partial \theta_j}\right). # $$ # # With this expression for the gradient, we can optimize our hyperparameters in a Bayesian way using any standard gradient-based method like gradient descent. # # For the squared exponential kernel in the 1-D case, letting # $$ # \begin{align*} # \theta_1 &= \log(\sigma_f^2)\\ # \theta_2 &= \log(l^2)\\ # \theta_3 &= \log(\sigma_y^2) # \end{align*} # $$ # which gives # # $$ # \begin{align*} # \kappa(x,x') &= \exp\left(\theta_1 -\frac{1}{2\exp(\theta_2)}(x-x')^2 \right) + \exp(\theta_3)\mathbb{1}\{x=x'\}\\ # \frac{\partial \kappa}{\partial \theta_1} &= \exp\left(\theta_1 -\frac{1}{2\exp(\theta_2)}(x-x')^2 \right)\\ # \frac{\partial \kappa}{\partial \theta_2} &= \frac{1}{2}(x-x')^2 \exp\left(\theta_1 - \theta_2 -\frac{1}{2\exp(\theta_2)}(x-x')^2\right)\\ # \frac{\partial \kappa}{\partial \theta_3} &= \exp(\theta_3)\mathbb{1}\{x=x'\} # \end{align*} # $$ # + ## Perform Gradient Descent on Parameters ## first define gradients function d_σf(se::SEKernel, X::AbstractArray,X_::AbstractArray) K_ = zeros(size(X)[1], size(X_)[1]) for i=1:size(K_)[1] for j=1:size(K_)[2] diff = X[i,:] - X_[j,:] @inbounds K_[i,j] = exp(se.params[1] - diff'*diff*exp(-se.params[2])/2)[1] end end return K_ end function d_l(se::SEKernel, X::AbstractArray,X_::AbstractArray) K_ = zeros(size(X)[1], size(X_)[1]) for i=1:size(K_)[1] for j=1:size(K_)[2] diff = X[i,:] - X_[j,:] δ2 = diff'*diff @inbounds K_[i,j] = (δ2[1]*exp(se.params[1]-se.params[2] - δ2*exp(-se.params[2])/2))[1]/2 end end return K_ end function d_σy(se::SEKernel, X::AbstractArray,X_::AbstractArray) return exp(se.params[3])*eye(size(X)[1], size(X_)[1]) end # derivs returns a vector and the marginal likelihood # (dlogP/dθ, logP) function derivs(se::SEKernel, X::AbstractArray, y::AbstractVector) d_params = zeros(3,1) # calculate common factors from data K_ = K(se, X,X) L = chol(K_) α = L\(L'\y) logP = -y'*α/2 - sum(log(diag(L))) - size(y)[1]*log(2π)/2 L_inv = inv(L) A = α*α' - L_inv*L_inv' d_params[1] = trace(A*d_σf(se,X,X))/2 d_params[2] = trace(A*d_l(se,X,X))/2 d_params[3] = trace(A*d_σy(se,X,X))/2 return d_params, logP[1] end # + # run gradient descent σ = 0.75 l = 0.2 σ_y = 0.2 params = [log(σ^2); log(l^2); log(σ_y^2)]'' # params = randn(3,1) # random instantiation converges much slower! se = SEKernel(params) n_iters = 100 lr = 0.05 β = 0.75 ∇logP = [0.; 0.; 0.] logP = zeros(n_iters) for i=1:n_iters ∇logP, logP[i] = derivs(se, t, tsla) # we want to maximize logP so we # will step `lr/i` in the direction of # the graditent se.params += (lr/(β*sqrt(i)))*∇logP if i%25 == 0 println("==> logP[$(i)] = $(logP[i])\n |∇logP| = $(norm(∇logP))") end end plot(collect(1:n_iters), logP) title("Learning Curve") xlabel("Iterations") ylabel("Marginal Likelihood \$\\log\\; P(\\mathbf{y}|\\mathbf{X})\$") # - ∇logP # + # plot results K_ = K(se, t, t) k_ = K(se, t, t_) dk_ = d_K(se,t,t_) d2k_ = d2_K(se,t,t_) k__ = K(se, t_,t_) println("==> Run GP Regression") # GP Regression algorithm + derivatives # -- Machine Learning: A Probabalistic Perspective # <NAME>, pp. 526 L = chol(K_) α = L\(L'\tsla) μ = k_'*α v = L'\k_ σ = sqrt(diag(k__) - diag(v'*v)) logP_ = -tsla'*α/2 - sum(log(diag(L))) - size(tsla)[1]*log(2π)/2 println("-- done.") println("==> Marginal Likelihood: log P(y|X) = $(logP_[1])") println("==> Plot Results") plot(t,tsla) plot(t_, μ, label="\$\\mathbb{E}[f_*]\$", color="#FF425B") fill_between(t_, μ-2σ, μ+2σ, color="#dddddd", alpha=0.3) title("TSLA Open Prices with Optimized Hyperparameters") xlabel("\$t\$") ylabel("\$f\$") legend(loc="lower left") grid() xlim(minimum(t), maximum(t)) # + ## we are going to plot a surface over the ## marginal likelihood to see the nonconvexity # adapted from https://github.com/gizmaa/Julia_Examples/blob/master/pyplot_surfaceplot.jl n = 10 σ_y = linspace(0.01,0.5,n) σ_f = 0.75 l = linspace(0.25,3,n) σgrid = repmat(σ_y',n,1) lgrid = repmat(l,1,n) z = zeros(n,n) for i in 1:n for j in 1:n # calculate marginal likelihood for this # pair of parameters se = SEKernel(σ_f,l[j],σ_y[i]) K_ = K(se, t, t) L = chol(K_) α = L\(L'\tsla) logP = -tsla'*α/2 - sum(log(diag(L))) - size(tsla)[1]*log(2π)/2 z[i:i,j:j] = logP if j%10 + i%10 == 0 println("==> Finished (i,j)=($(i),$(j))") end end end cp = plt[:contourf](σgrid, lgrid, z, levels=linspace(minimum(z),maximum(z),100), cmap=plt[:cm][:jet]) xlabel("\$\\sigma_y\$") ylabel("\$l\$") title("Marginal Likelihood for Different Hyperparameters") colorbar()
notebooks/1.0-gp-regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Does the recording's duration affect the quality of spike sorting? # # This notebook investigates if and how the duration of the recording affects spike sorting. # # Obviously, each sorter engine needs a minimum number of events to detect a "cluster", and therefore a unit. # If a neuron doesn't fire enough during a recording it won't be detected. # The number of event per units depends on the recording duration and the each individual firing rates. # # In order to test this phenomenon, we use the same dataset (with the same neurons and firing rates), but we vary the # duration of the recording. # # The simulated recording is generated with [MEArec](https://github.com/alejoe91/MEArec) using a Neuronexus-32 probe. # This specific dataset seems *relatively* easy to sort. The "SYNTH_MEAREC_NEURONEXUS" dataset in # [SpikeForest](https://spikeforest.flatironinstitute.org/) (which uses the same probe), in fact, shows quite good # results for all sorters. The original duration is 600s (10 min). # # Here we have generated a new but similar recording with a duration of 1800s. Then we have shortened it to 60s, 300s, # 600s and 1800s (original). The recording can be downloaded from Zenodo: https://doi.org/10.5281/zenodo.4058272 # # The dataset name is: **recordings_10cells_Neuronexus-32_1800.0_10.0uV_2020-02-28.h5**. It contains 10 neurons recorded on a Neuronexus-32 probe. # The duration is 1800s and the noise level is 10uV. # # Let's see if spike sorters are robust to fewer events and if are able to deal with long durations or they end up # finding too many events. # # Author: [<NAME>](https://github.com/samuelgarcia), CRNL, Lyon # ### Requirements # # For this need you will need the following Python packages: # # - numpy # - pandas # - matplotlib # - seaborn # - spikeinterface # # To run the MATLAB-based sorters, you would also need a MATLAB license. # For other sorters, please refer to the documentation on [how to install sorters](https://spikeinterface.readthedocs.io/en/latest/sortersinfo.html). # ### Installation and imports # + import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from pathlib import Path import spikeinterface as si import spikeinterface.extractors as se import spikeinterface.sorters as ss import spikeinterface.widgets as sw from spikeinterface.comparison import GroundTruthStudy # + # clone and install MATLAB sorters # kilosort2 # !git clone https://github.com/MouseLand/Kilosort2.git kilosort2_path = './Kilosort2' ss.Kilosort2Sorter.set_kilosort2_path(kilosort2_path) # kilosort # !git clone https://github.com/cortex-lab/KiloSort.git kilosort_path = './KiloSort' ss.KilosortSorter.set_kilosort_path(kilosort_path) # ironclust # !git clone https://github.com/flatironinstitute/ironclust.git ironclust_path = './ironclust' ss.IronclustSorter.set_ironclust_path(ironclust_path) # + # %matplotlib inline # some matplotlib hack to prettify figure SMALL_SIZE = 12 MEDIUM_SIZE = 14 BIGGER_SIZE = 16 plt.rc('font', size=SMALL_SIZE) # controls default text sizes plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize plt.rc('figure', figsize=(10.0, 8.0)) # figsize plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title def clear_axes(ax): ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) # - # ## Check spikeinterface version and sorter version si.print_spikeinterface_version() ss.print_sorter_versions() # ## Setup global path # Change this path to point to where you downloaded the dataset p = Path('/home/samuel/Documents/DataSpikeSorting/mearec/') study_folder = p / 'study_mearec_neuronexus_several_durations/' # ## Setup ground truth study mearec_filename = p / 'recordings_10cells_Neuronexus-32_1800.0_10.0uV_2020-02-28.h5' # + rec = se.MEArecRecordingExtractor(mearec_filename, locs_2d=True) gt_sorting = se.MEArecSortingExtractor(mearec_filename) fs = rec.get_sampling_frequency() gt_dict = {} durations = [60, 300, 600, 1800] for duration in durations: sub_rec = se.SubRecordingExtractor(rec, start_frame=0, end_frame=int(duration*fs)) sub_sorting = se.SubSortingExtractor(gt_sorting, start_frame=0, end_frame=int(duration*fs)) gt_dict[f'rec{duration}'] = (sub_rec, sub_sorting) study = GroundTruthStudy.create(study_folder, gt_dict) # - # ## Run all sorters sorter_list = ['herdingspikes', 'ironclust', 'kilosort2', 'kilosort', 'mountainsort4', 'spykingcircus', 'tridesclous'] study = GroundTruthStudy(study_folder) sorter_params = {} study.run_sorters(sorter_list, sorter_params=sorter_params, mode='keep', verbose=True) # ## Get signal to noise ratio for all units # # Units are the same in each recording so the snr is the same lets take from the longest one # + study = GroundTruthStudy(study_folder) snr = study.get_units_snr(rec_name='rec1800') snr # - fig, ax = plt.subplots() ax.hist(snr['snr'].values, bins=10) ax.set_xlabel('GT units SNR') # ## Run comparison with ground truth and retreive result tables # + # this copy sorting is necessary to copy results from sorter # into a centralize folder with all results study.copy_sortings() # this run all comparison sto GT study.run_comparisons(exhaustive_gt=True, match_score=0.1, overmerged_score=0.2) # - # this retrieve results comparisons = study.comparisons dataframes = study.aggregate_dataframes() # ## Run times run_times = dataframes['run_times'] run_times # insert durations run_times['duration'] = run_times['rec_name'].apply(lambda s: float(s.replace('rec', ''))) g = sns.catplot(data=run_times, x='duration', y='run_time', hue='sorter_name', kind='bar') # ## Accuracy vs duration perf = dataframes['perf_by_units'] perf # insert durations perf['duration'] = perf['rec_name'].apply(lambda s: float(s.replace('rec', ''))) g = sns.catplot(data=perf, x='duration', y='accuracy', hue='sorter_name', kind='bar') # ## Count good, bad, false positive units vs duration count_units = dataframes['count_units'] count_units # insert durations count_units['duration'] = count_units['rec_name'].apply(lambda s: float(s.replace('rec', ''))) # ### num_well_detected vs duration # # the more the better # + g = sns.catplot(data=count_units, x='duration', y='num_well_detected', hue='sorter_name', kind='bar') # - # ### num_false_positive vs duration # # the less the better g = sns.catplot(data=count_units, x='duration', y='num_false_positive', hue='sorter_name', kind='bar') # same as previous but with other limits g = sns.catplot(data=count_units, x='duration', y='num_false_positive', hue='sorter_name', kind='bar') g.fig.axes[0].set_ylim(0, 10) # ### num_redundant vs duration # # the less the better g = sns.catplot(data=count_units, x='duration', y='num_redundant', hue='sorter_name', kind='bar') # ## Conlusion # # For this simple simulated dataset we have observed that: # # * Focusing on the average accuracy, all sorters have similar performance for long or short recordings. # The only exception is Kilosort: it has a clear drop in performence for the shortest duration (60s). # # * Very surprinsingly, some sorters (e.g. tridesclous, ironclust) have better performence at 60s than 300s. This could be specific to this dataset and have to be instigate more. # # * Looking at the number of "num_false_positive" and "num_well_detected" the situation is the following: # # * kilosort is not affected by the duration # * herdingspikes (one of the most affected): the longer the duration, the more "num_false_positive" # * ironclust seems to have a slight increase in "num_false_positive" for longer duration # * kilosort2 has random oscillations of "num_false_positive" across durations # * tridesclous has a few more "num_false_positive" for long durations # * moutainsort is heavily affected by the duration # * spykingcircus is affected by long durations as more "num_false_positive" units are found #
posts/check-if-duration-affects-spike-sorting.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Data Structures in Python # ### Builtin Datatypes # - Strings # - Lists # - Tuples # - Dictionaries # - Sets # ## Lists # - List is like array, heterogenous list myList = [ 1,2,3.5,"Hello"] print(myList) print(type(myList)) l2 = list([1,2,3]) print(l2) print(type(l2)) # + l3 = list(l2) print(l3) l3 = l3 + l2 print(l3) l3.extend(l2) print(l3) # - # List of Square of the numbers from 1 to 5 l4 = [i*i for i in range(1,6)] print(l4) # ## List Slicing # # print(l4[0:3]) print(l4[-3:]) # ## Insertion and Deletion # - append # - insert # # - remove # - pop # - del # + l = [1,2] l.append(3) l.append([1.0,2.1]) l += [4,5,6] print(l) # - l.insert(2,20) print(l) print(l[3][1]) del l[0] print(l) l.pop() print(l) l = ([1,2,3]) l = l*4 print(l) l = ["Apple","mango","guava",80] 80 in l for i in range(len(l)): print(l[i]) for x in l: print(x) # ## More functions on lists : Searching & Sorting l = [4,3,2,16,18] print(max(l)) print(min(l)) #Linear Search print(l.index(16)) l = sorted(l) print(l) l = [4,5,1,3,2] l.sort(reverse=True) print(l) ## Read a list of Numbers numbers = [int(number)*int(number) for number in input().split()] print(numbers) print(type(numbers))
ml_repo/1. Python Programming/python_lists.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # This notebook was prepared by [<NAME>](https://github.com/donnemartin). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges). # # Challenge Notebook # ## Problem: Find the shortest path between two nodes in a graph. # # * [Constraints](#Constraints) # * [Test Cases](#Test-Cases) # * [Algorithm](#Algorithm) # * [Code](#Code) # * [Unit Test](#Unit-Test) # * [Solution Notebook](#Solution-Notebook) # ## Constraints # # * Is the graph directed? # * Yes # * Is the graph weighted? # * No # * Can we assume we already have Graph and Node classes? # * Yes # * Are the inputs two Nodes? # * Yes # * Is the output a list of Node keys that make up the shortest path? # * Yes # * If there is no path, should we return None? # * Yes # * Can we assume this is a connected graph? # * Yes # * Can we assume the inputs are valid? # * Yes # * Can we assume this fits memory? # * Yes # ## Test Cases # # Input: # * `add_edge(source, destination, weight)` # # ``` # graph.add_edge(0, 1) # graph.add_edge(0, 4) # graph.add_edge(0, 5) # graph.add_edge(1, 3) # graph.add_edge(1, 4) # graph.add_edge(2, 1) # graph.add_edge(3, 2) # graph.add_edge(3, 4) # ``` # # Result: # * search_path(start=0, end=2) -> [0, 1, 3, 2] # * search_path(start=0, end=0) -> [0] # * search_path(start=4, end=5) -> None # ## Algorithm # # Refer to the [Solution Notebook](http://nbviewer.ipython.org/github/donnemartin/interactive-coding-challenges/blob/master/graphs_trees/graph_path_exists/path_exists_solution.ipynb). If you are stuck and need a hint, the solution notebook's algorithm discussion might be a good place to start. # ## Code # %run ../graph/graph.py # %load ../graph/graph.py class GraphShortestPath(Graph): def shortest_path(self, source_key, dest_key): # TODO: Implement me pass # ## Unit Test # **The following unit test is expected to fail until you solve the challenge.** # + # # %load test_shortest_path.py import unittest class TestShortestPath(unittest.TestCase): def test_shortest_path(self): nodes = [] graph = GraphShortestPath() for id in range(0, 6): nodes.append(graph.add_node(id)) graph.add_edge(0, 1) graph.add_edge(0, 4) graph.add_edge(0, 5) graph.add_edge(1, 3) graph.add_edge(1, 4) graph.add_edge(2, 1) graph.add_edge(3, 2) graph.add_edge(3, 4) self.assertEqual(graph.shortest_path(nodes[0].key, nodes[2].key), [0, 1, 3, 2]) self.assertEqual(graph.shortest_path(nodes[0].key, nodes[0].key), [0]) self.assertEqual(graph.shortest_path(nodes[4].key, nodes[5].key), None) print('Success: test_shortest_path') def main(): test = TestShortestPath() test.test_shortest_path() if __name__ == '__main__': main() # - # ## Solution Notebook # # Review the [Solution Notebook](http://nbviewer.ipython.org/github/donnemartin/interactive-coding-challenges/blob/master/graphs_trees/graph_path_exists/path_exists_solution.ipynb) for a discussion on algorithms and code solutions.
graphs_trees/graph_shortest_path_unweighted/shortest_path_challenge.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernel_info: # name: python3-azureml # kernelspec: # display_name: Python 3.6 - AzureML # language: python # name: python3-azureml # --- # # 物体检测 # # *物体检测*是计算机视觉的一种形式,在这种形式中,机器学习模型被训练为对图像中的各个物体实例进行分类,并指示一个标记其位置的*边界框*。可以将其视为从*图像分类*(在此阶段模型回答问题“这是什么物体的图像”)到构建解决方案(在此阶段我们可以问模型“这个图像中有什么物体,它们在什么位置?”)的过程。 # # ![正在识别水果的机器人](./images/object-detection.jpg) # # 例如,一家商店也许可以使用物体检测模型来实现自动结帐系统,该系统使用摄像头扫描传送带并能识别特定商品,而无需把每个商品都放在传送带上单独扫描。 # # Microsoft Azure 中的**自定义视觉**认知服务为创建和发布自定义物体检测模型提供了基于云的解决方案。 # # ## 创建自定义视觉资源 # # 要使用自定义视觉服务,需要具有可用于训练模型的 Azure 资源,以及可用于发布模型以供应用程序使用的资源。在完成这些任务时,可以使用相同的资源,也可以为每项任务使用不同的资源以单独分配成本(如果两个资源在同一区域中创建)。用于其中一个(或两个)任务的资源可以是常规的**认知服务**资源,也可以是特定的**自定义视觉**资源。请按照以下说明创建一个新的**自定义视觉**资源(你也可以使用现有的资源)。 # # 1. 在新的浏览器标签页中打开 Azure 门户 ([https://portal.azure.com](https://portal.azure.com)),使用与你的 Azure 订阅关联的 Microsoft 帐户进行登录。 # 2. 选择“**&#65291;创建资源**”按钮,搜索“*自定义视觉*”并以如下设置创建**自定义视觉**资源: # - **创建选项**:均可 # - **订阅**: *你的 Azure 订阅* # - **资源组**: *选择或创建具有唯一名称的资源组* # - **名称**: *输入一个唯一名称* # - **训练位置**: *选择任何可用区域* # - **训练定价层**:中的机器人 F0 # - **预测位置**: *与训练位置保持一致* # - **预测定价层**:中的机器人 F0 # # > **备注**:如果在你的订阅中已有 F0 自定义视觉服务,此处请选择“**S0**”。 # # 3. 等待资源创建完成。 # # ## 创建自定义视觉项目 # # 要训练物体检测模型,需要根据训练资源创建自定义视觉项目。为此,需要使用自定义视觉门户。 # # 1. 在新的浏览器选项卡中打开自定义视觉门户 ([https://customvision.ai](https://customvision.ai)),使用与你的 Azure 订阅关联的 Microsoft 帐户进行登录。 # 2. 新建一个项目,设置如下: # - **名称**:商品检测 # - **说明**:针对商品的物体检测。 # - **资源**: *你之前创建的自定义视觉资源* # - **项目类型**:物体检测 # - **领域**:常规 # 3. 等待项目创建完毕并在浏览器中打开。 # # ## 添加图像并进行标记 # # 要训练物体检测模型,需要上传包含你希望模型识别的类的图像,并对这些图像进行标记以指示每个物体实例的边界框。 # # 1.从 https://aka.ms/fruit-objects 下载并提取训练图像。提取的文件夹包含一个水果的图像集合。**备注**:如果你无法访问训练图像,临时的应变方法是转到 https://www.github.com,然后转到 https://aka.ms/fruit-objects。 # 2. 在自定义视觉门户 [https://customvision.ai](https://customvision.ai) 中,确保你正在处理物体检测项目 _Grocery Detection_。然后选择“**添加图像**”,并上传提取文件夹中的所有图像。 # # ![通过单击“添加图像”上传下载的图像。](./images/fruit-upload.jpg) # # 3. 上传图像后,选择第一个图像将其打开。 # 4. 将鼠标悬停在图像中的任何物体上,直到显示一个自动检测到的区域,如下图所示。然后选择物体,并根据需要调整该区域大小,使其包围所选物体。 # # ![物体的默认区域](./images/object-region.jpg) # # 也可以简单地围绕该物体进行拖动,创建一个区域。 # # 5. 当该区域包围所选物体时,添加一个具有适当物体类型的新标签(“*苹果*”、“*香蕉*”或“*橙子*”),如下所示: # # ![图像中带有标签的物体](./images/object-tag.jpg) # # 6. 在图像中选择各个物体并为其添加标签,根据需要调整区域大小并添加新标签。 # # ![图像中两个带有标签的物体](./images/object-tags.jpg) # # 7. 使用右侧的“**>**”链接转至下一个图像,并为图像中的物体添加标签。然后按照这样的步骤继续处理整个图像集合,为每个苹果、香蕉和橙子添加标签。 # # 8. 标记完最后一个图像后,关闭“**图像细节**”编辑器,并在“**训练图像**”页面上的“**标签**”下选择“**已标记**”以查看所有带有标签的图像: # # ![项目中带有标签的图像](./images/tagged-images.jpg) # # ## 训练和测试模型 # # 你已为项目中的图像添加标签,现在可以训练模型了。 # # 1. 在自定义视觉项目中,单击“**训练**”以使用带有标签的图像训练物体检测模型。选择“**快速训练**”选项。 # 2. 等待训练完成(可能需要 10 分钟左右),然后检查*精度*、*召回*率和 *mAP* 性能指标 - 这些指标用于衡量分类模型的预测准确度,且应该都很高。 # 3. 单击页面右上角的“**快速测试**”,然后在“**图像 URL**”框中输入 `https://aka.ms/apple-orange` 并查看生成的预测结果。然后关闭“**快速测试**”窗口。 # # ## 发布并使用物体检测模型 # # 现在即可发布已训练的模型并在客户端应用程序中使用它。 # # 1. 单击“**性能**”页面左上角的“**&#128504; 发布**”来发布已训练的模型,设置如下: # - **模型名称**:detect-produce # - **预测资源**:*你的自定义视觉**预测**资源*。 # # ### (!)签入 # 是否使用了相同的模型名称“**detect-produce**”? # # 2. 发布后,单击“**性能**”页面右上角的“*设置*”(&#9881;) 图标以查看项目设置。然后在左侧的“**常规**”下复制**项目 ID**。向下滚动并将其粘贴到步骤 5 下的代码单元格中,替换“**YOUR_PROJECT_ID**”。 # # > (*如果在本练习开始时你没有创建**自定义视觉**资源,而是使用**认知服务**资源,可以在项目设置的右侧复制其密钥和终结点,并将其粘贴至下方的代码单元格中,然后运行它以查看结果。否则请继续完成以下步骤,获取自定义视觉预测资源的密钥和终结点。*) # # 3. 单击“**项目设置**”页面左上角的“*项目库*”(&#128065;) 图标以返回到自定义视觉门户主页,此处现在会列出你的项目。 # # 4. 单击自定义视觉门户主页右上角的“*设置*”(&#9881;) 图标,查看自定义视觉服务的设置。然后在“**资源**”下展开预测资源(不是训练资源),并将资源的**密钥**和**终结点**值复制到步骤 5 下面的代码单元格中,分别替换“**YOUR_KEY**”和“**YOUR_ENDPOINT**”。 # # ### (!)签入 # 如果你使用的是**自定义视觉**资源,那你是否使用过**预测**资源(不是训练资源)? # # 5. 通过单击“运行单元格”<span>&#9655;</span> 按钮(位于单元格的左侧)运行下面的代码单元格,将变量设置为你自己的项目 ID、密钥和终结点值。 # + gather={"logged": 1599692485387} project_id = 'YOUR_PROJECT_ID' # Replace with your project ID cv_key = 'YOUR_KEY' # Replace with your prediction resource primary key cv_endpoint = 'YOUR_ENDPOINT' # Replace with your prediction resource endpoint model_name = 'detect-produce' # this must match the model name you set when publishing your model iteration exactly (including case)! print('Ready to predict using model {} in project {}'.format(model_name, project_id)) # - # 现在你可以使用密钥和终结点通过自定义视觉客户端连接到自定义视觉物体检测模型。 # # 运行以下代码单元格,该代码单元格使用你的模型来检测图像中的各个商品。 # # > **备注**:无需太担心代码的详细信息。它使用适用于自定义视觉服务的 Python SDK 向模型提交图像并检索检测到的物体的预测结果。每条预测结果都由类名(“*苹果*”、“*香蕉*”或“*橙子*”)和指示在图像中检测到预测物体的位置的*边界框*坐标组成。然后该代码会使用这些信息在图像上的每个物体周围绘制一个标记框。 # + gather={"logged": 1599692585672} from azure.cognitiveservices.vision.customvision.prediction import CustomVisionPredictionClient from msrest.authentication import ApiKeyCredentials from matplotlib import pyplot as plt from PIL import Image, ImageDraw, ImageFont import numpy as np import os # %matplotlib inline # Load a test image and get its dimensions test_img_file = os.path.join('data', 'object-detection', 'produce.jpg') test_img = Image.open(test_img_file) test_img_h, test_img_w, test_img_ch = np.array(test_img).shape # Get a prediction client for the object detection model credentials = ApiKeyCredentials(in_headers={"Prediction-key": cv_key}) predictor = CustomVisionPredictionClient(endpoint=cv_endpoint, credentials=credentials) print('Detecting objects in {} using model {} in project {}...'.format(test_img_file, model_name, project_id)) # Detect objects in the test image with open(test_img_file, mode="rb") as test_data: results = predictor.detect_image(project_id, model_name, test_data) # Create a figure to display the results fig = plt.figure(figsize=(8, 8)) plt.axis('off') # Display the image with boxes around each detected object draw = ImageDraw.Draw(test_img) lineWidth = int(np.array(test_img).shape[1]/100) object_colors = { "apple": "lightgreen", "banana": "yellow", "orange": "orange" } for prediction in results.predictions: color = 'white' # default for 'other' object tags if (prediction.probability*100) > 50: if prediction.tag_name in object_colors: color = object_colors[prediction.tag_name] left = prediction.bounding_box.left * test_img_w top = prediction.bounding_box.top * test_img_h height = prediction.bounding_box.height * test_img_h width = prediction.bounding_box.width * test_img_w points = ((left,top), (left+width,top), (left+width,top+height), (left,top+height),(left,top)) draw.line(points, fill=color, width=lineWidth) plt.annotate(prediction.tag_name + ": {0:.2f}%".format(prediction.probability * 100),(left,top), backgroundcolor=color) plt.imshow(test_img) # - # 查看生成的预测结果,其中显示检测到的物体以及每种预测结果对应的概率。
03 - Object Detection.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Merge Overlapping Intervals # # [The Original Question](https://mp.weixin.qq.com/s/IjHjUgHaaeRr2UIB98P91A) # ## Question # # You are given an array of intervals - that is, an array of tuples `(start, end)`. The array may not be sorted, and could contain overlapping intervals. Return another array where the overlapping intervals are merged. # ## Example # # ```python3 # [(1, 3), (5, 8), (4, 10), (20, 25)] # ``` # # This input should return `[(1, 3), (4, 10), (20, 25)]` since `(5, 8)` and `(4, 10)` can be merged into `(4, 10)`. def merge(intervals): # Convert intervals into an array of sets. arrays = [{j for j in range(i[0], i[-1] + 1)} for i in intervals] i = 0 j = 1 # Traverse all sets in the array. while i < len(arrays) - 1: while j < len(arrays): # Check the coverage of 2 sets. if arrays[i].issubset(arrays[j]): arrays.remove(arrays[i]) pass elif arrays[i].issuperset(arrays[j]): arrays.remove(arrays[j]) pass else: j += 1 pass i += 1 j = i + 1 pass return [(min(k), max(k)) for k in arrays] print(merge([(1, 3), (5, 8), (4, 10), (20, 25)]))
February/Week5/33.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Environment (conda_tensorflow_p36) # language: python # name: conda_tensorflow_p36 # --- from shutil import copyfile import os # + img_dir = r"./clipart_new/" k = 0 s = 0 for filename in os.listdir(img_dir): if s < 100: filepath = os.path.join(img_dir, filename) copyfile(filepath, '/home/ubuntu/storage/clipart/' + filename) s += 1 k += 1 # -
download_and_clean_data_scripts/Copy_50_files_to_another_folder.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Phenolopy # ## Load packages # ### Set up a dask cluster # + # %matplotlib inline # %load_ext autoreload import os, sys import xarray as xr import numpy as np import pandas as pd import datacube import matplotlib.pyplot as plt from scipy.signal import savgol_filter, wiener from scipy.stats import zscore from statsmodels.tsa.seasonal import STL as stl from datacube.drivers.netcdf import write_dataset_to_netcdf sys.path.append('../Scripts') from dea_datahandling import load_ard from dea_dask import create_local_dask_cluster from dea_plotting import display_map, rgb sys.path.append('./scripts') import phenolopy # - # initialise the cluster. paste url into dask panel for more info. create_local_dask_cluster() # open up a datacube connection dc = datacube.Datacube(app='phenolopy') # ## Study area and data setup # ### Set study area and time range # + # set lat, lon (y, x) dictionary of testing areas for gdv project loc_dict = { 'yan_full': (-22.750, 119.10), 'yan_full_1': (-22.725, 119.05), 'yan_full_2': (-22.775, 119.15), 'roy_sign_1': (-22.618, 119.989), 'roy_full': (-22.555, 120.01), 'roy_full_1': (-22.487, 119.927), 'roy_full_2': (-22.487, 120.092), 'roy_full_3': (-22.623, 119.927), 'roy_full_4': (-22.623, 120.092), 'oph_full': (-23.280432, 119.859309), 'oph_full_1': (-23.375319, 119.859309), 'oph_full_2': (-23.185611, 119.859309), 'oph_full_3': (-23.233013, 119.859309), 'oph_full_4': (-23.280432, 119.859309), 'oph_full_5': (-23.327867, 119.859309), 'test': (-31.6069288, 116.9426373) } # set buffer length and height (x, y) buf_dict = { 'yan_full': (0.15, 0.075), 'yan_full_1': (0.09, 0.025), 'yan_full_2': (0.05, 0.0325), 'roy_sign_1': (0.15, 0.21), 'roy_full': (0.33, 0.27), 'roy_full_1': (0.165209/2, 0.135079/2), 'roy_full_2': (0.165209/2, 0.135079/2), 'roy_full_3': (0.165209/2, 0.135079/2), 'roy_full_4': (0.165209/2, 0.135079/2), 'oph_full': (0.08, 0.11863), 'oph_full_1': (0.08, 0.047452/2), 'oph_full_2': (0.08, 0.047452/2), 'oph_full_3': (0.08, 0.047452/2), 'oph_full_4': (0.08, 0.047452/2), 'oph_full_5': (0.08, 0.047452/2), 'test': (0.05, 0.05) } # + # select location from dict study_area = 'roy_full_2' # set buffer size in lon, lat (x, y) lon_buff, lat_buff = buf_dict[study_area][0], buf_dict[study_area][1] # select time range. for a specific year, set same year with month 01 to 12. multiple years will be averaged. time_range = ('2016-11', '2018-02') # + # select a study area from existing dict lat, lon = loc_dict[study_area][0], loc_dict[study_area][1] # combine centroid with buffer to form study boundary lat_extent = (lat - lat_buff, lat + lat_buff) lon_extent = (lon - lon_buff, lon + lon_buff) # display onto interacrive map display_map(x=lon_extent, y=lat_extent) # - # ### Load sentinel-2a, b data for above parameters # # + # set measurements (bands) measurements = [ 'nbart_blue', 'nbart_green', 'nbart_red', 'nbart_nir_1', 'nbart_swir_2' ] # create query from above and expected info query = { 'x': lon_extent, 'y': lat_extent, 'time': time_range, 'measurements': measurements, 'output_crs': 'EPSG:3577', 'resolution': (-10, 10), 'group_by': 'solar_day', } # load sentinel 2 data ds = load_ard( dc=dc, products=['s2a_ard_granule', 's2b_ard_granule'], min_gooddata=0.90, dask_chunks={'time': 1}, **query ) # display dataset print(ds) # display a rgb data result of temporary resampled median #rgb(ds.resample(time='1M').median(), bands=['nbart_red', 'nbart_green', 'nbart_blue'], col='time', col_wrap=12) # - # ### Conform DEA band names # + # takes our dask ds and conforms (renames) bands ds = phenolopy.conform_dea_band_names(ds) # display dataset print(ds) # - # ### Calculate vegetation index # + # takes our dask ds and calculates veg index from spectral bands ds = phenolopy.calc_vege_index(ds, index='mavi', drop=True) # display dataset print(ds) # - # ## Pre-processing phase # ### Temporary - load MODIS dataset # + #ds = phenolopy.load_test_dataset(data_path='./data/') # - # resample to bimonth ds = phenolopy.resample(ds, interval='1M', reducer='median') # interp ds = ds.chunk({'time': -1}) ds = phenolopy.interpolate(ds=ds, method='interpolate_na') # drop years ds = ds.where(ds['time.year'] == 2017, drop=True) # ### Group data by month and reduce by median # + # take our dask ds and group and reduce dataset in median weeks (26 for one year) ds = phenolopy.group(ds, group_by='month', reducer='median') # display dataset print(ds) # - # show times ds = ds.compute() # ### Remove outliers from dataset on per-pixel basis # + # chunk dask to -1 to make compatible with this function ds = ds.chunk({'time': -1}) # takes our dask ds and remove outliers from data using median method ds = phenolopy.remove_outliers(ds=ds, method='median', user_factor=2, z_pval=0.05) # display dataset print(ds) # - # ### Resample dataset down to bi-monthly medians # + # takes our dask ds and resamples data to bi-monthly medians ds = phenolopy.resample(ds, interval='1W', reducer='median') # display dataset print(ds) # - # ### Interpolate missing (i.e. nan) values linearly # + # chunk dask to -1 to make compatible with this function ds = ds.chunk({'time': -1}) # takes our dask ds and interpolates missing values ds = phenolopy.interpolate(ds=ds, method='interpolate_na') # display dataset print(ds) # - # ### Smooth data on per-pixel basis # + # chunk dask to -1 to make compatible with this function ds = ds.chunk({'time': -1}) # take our dask ds and smooth using savitsky golay filter ds = phenolopy.smooth(ds=ds, method='savitsky', window_length=3, polyorder=1) # display dataset print(ds) # - # ### Upper envelope correction # todo # + # todo # - # ### Calculate number of seasons # + # chunk dask to -1 to make compatible with this function ds = ds.chunk({'time': -1}) # take our dask ds and smooth using savitsky golay filter da_num_seasons = phenolopy.calc_num_seasons(ds=ds) # display dataset print(da_num_seasons) # - # ## Calculate Phenolometrics # compute ds = ds.compute() print(ds) # + # %autoreload # calc phenometrics via phenolopy! ds_phenos = phenolopy.calc_phenometrics(da=ds['veg_index'], peak_metric='pos', base_metric='vos', method='seasonal_amplitude', factor=0.2, thresh_sides='two_sided', abs_value=0.1) # + # set the metric you want to view metric_name = 'lios_values' # plot this on map ds_phenos[metric_name].plot(robust=True, cmap='Spectral') # - from datacube.drivers.netcdf import write_dataset_to_netcdf write_dataset_to_netcdf(ds_phenos, 'roy_2017_1w_phenos.nc') # ### Testing # + # set up params import random import shutil # set output filename filename = 'roy_2_p_pos_b_vos_seas_amp_f_015' # set seed random.seed(50) # gen random x and y lists for specified num pixels (e.g. 250 x, 250 y) n_pixels = 200 x_list = random.sample(range(0, len(ds_phenos['x'])), n_pixels) y_list = random.sample(range(0, len(ds_phenos['y'])), n_pixels) # + def run_test(ds_raw, ds_phen, filename, x_list, y_list): # loop through each pixel pair for x, y in zip(x_list, y_list): # get pixel and associate phenos pixel v = ds_raw.isel(x=x, y=y) p = ds_phen.isel(x=x, y=y) # create fig fig = plt.figure(figsize=(12, 5)) # plot main trend plt.plot(v['time.dayofyear'], v['veg_index'], linestyle='solid', marker='.', color='black') # plot pos vals and times plt.plot(p['pos_times'], p['pos_values'], marker='o', linestyle='', color='blue', label='POS') plt.annotate('POS', (p['pos_times'], p['pos_values'])) # plot vos vals and times plt.plot(p['vos_times'], p['vos_values'], marker='o', linestyle='', color='darkred', label='VOS') plt.annotate('VOS', (p['vos_times'], p['vos_values'])) # plot bse vals plt.axhline(p['bse_values'], marker='', linestyle='dashed', color='red', label='BSE') # add legend # plot sos vals and times plt.plot(p['sos_times'], p['sos_values'], marker='s', linestyle='', color='green', label='SOS') plt.annotate('SOS', (p['sos_times'], p['sos_values'])) # plot eos vals and times plt.plot(p['eos_times'], p['eos_values'], marker='s', linestyle='', color='orange', label='EOS') plt.annotate('EOS', (p['eos_times'], p['eos_values'])) # plot aos vals plt.axvline(p['pos_times'], marker='', color='magenta', linestyle='dotted', label='AOS') # plot los vals plt.axhline((p['sos_values'] + p['eos_values']) / 2, marker='', color='yellowgreen', linestyle='dashdot', label='LOS') # plot sios plt.fill_between(v['time.dayofyear'], v['veg_index'], y2=p['bse_values'], color='red', alpha=0.1, label='SIOS') # plot lios t = ~v.where((v['time.dayofyear'] >= p['sos_times']) & (v['time.dayofyear'] <= p['eos_times'])).isnull() plt.fill_between(v['time.dayofyear'], v['veg_index'], where=t['veg_index'], color='yellow', alpha=0.2, label='LIOS') # plot siot plt.fill_between(v['time.dayofyear'], v['veg_index'], y2=p['bse_values'], color='aqua', alpha=0.3, label='SIOT') # plot liot plt.fill_between(v['time.dayofyear'], v['veg_index'], color='aqua', alpha=0.1, label='LIOT') # add legend plt.legend(loc='best') # create output filename out = os.path.join('testing', filename + '_x_' + str(x) + '_y_' + str(y) + '.jpg') # save to file without plotting fig.savefig(out) plt.close() # export as zip shutil.make_archive(filename + '.zip', 'zip', './testing') # clear all files in dir for root, dirs, files in os.walk('./testing'): for file in files: os.remove(os.path.join(root, file)) # perform test run_test(ds_raw=ds, ds_phen=ds_phenos, filename=filename, x_list=x_list, y_list=y_list) # + from datacube.utils.cog import write_cog write_cog(geo_im=ds_phenos['lios_values'], fname='lios.tif', overwrite=True) # - # ### Working # + jupyter={"source_hidden": true} # different types of detection, using stl residuals - remove outlier method #from scipy.stats import median_absolute_deviation #v = ds.isel(x=0, y=0, time=slice(0, 69)) #v['veg_index'].data = data #v_med = remove_outliers(v, method='median', user_factor=1, num_dates_per_year=24, z_pval=0.05) #v_zsc = remove_outliers(v, method='zscore', user_factor=1, num_dates_per_year=24, z_pval=0.1) #stl_res = stl(v['veg_index'], period=24, seasonal=5, robust=True).fit() #v_rsd = stl_res.resid #v_wgt = stl_res.weights #o = v.copy() #o['veg_index'].data = v_rsd #w = v.copy() #w['veg_index'].data = v_wgt #m = xr.where(o > o.std('time'), True, False) #o = v.where(m) #m = xr.where(w < 1e-8, True, False) #w = v.where(m) #fig = plt.figure(figsize=(18, 7)) #plt.plot(v['time'], v['veg_index'], color='black', marker='o') #plt.plot(o['time'], o['veg_index'], color='red', marker='o', linestyle='-') #plt.plot(w['time'], w['veg_index'], color='blue', marker='o', linestyle='-') #plt.axhline(y=float(o['veg_index'].std('time'))) #plt.show() # + jupyter={"source_hidden": true} # working method for stl outlier dection. can't quite get it to match timesat results? # need to speed this up - very slow for even relatively small datasets #def func_stl(vec, period, seasonal, jump_l, jump_s, jump_t): #resid = stl(vec, period=period, seasonal=seasonal, #seasonal_jump=jump_s, trend_jump=jump_t, low_pass_jump=jump_l).fit() #return resid.resid #def do_stl_apply(da, multi_pct, period, seasonal): # calc jump size for lowpass, season and trend to speed up processing #jump_l = int(multi_pct * (period + 1)) #jump_s = int(multi_pct * (period + 1)) #jump_t = int(multi_pct * 1.5 * (period + 1)) #f = xr.apply_ufunc(func_stl, da, #input_core_dims=[['time']], #output_core_dims=[['time']], #vectorize=True, dask='parallelized', #output_dtypes=[ds['veg_index'].dtype], #kwargs={'period': period, 'seasonal': seasonal, #'jump_l': jump_l, 'jump_s': jump_s, 'jump_t': jump_t}) #return f # chunk up to make use of dask parallel #ds = ds.chunk({'time': -1}) # calculate residuals for each vector stl #stl_resids = do_stl_apply(ds['veg_index'], multi_pct=0.15, period=24, seasonal=13) #s = ds['veg_index'].stack(z=('x', 'y')) #s = s.chunk({'time': -1}) #s = s.groupby('z').map(func_stl) #out = out.unstack() #s = ds.chunk({'time': -1}) #t = xr.full_like(ds['veg_index'], np.nan) #out = xr.map_blocks(func_stl, ds['veg_index'], template=t).compute() #stl_resids = stl_resids.compute() # + jupyter={"source_hidden": true} # working double logistic - messy though # https://colab.research.google.com/github/1mikegrn/pyGC/blob/master/colab/Asymmetric_GC_integration.ipynb#scrollTo=upaYKFdBGEAo # see for asym gaussian example #da = v.where(v['time.year'] == 2016, drop=True) #def logi(x, a, b, c, d): #return a / (1 + xr.ufuncs.exp(-c * (x - d))) + b # get date at max veg index #idx = int(da['veg_index'].argmax()) # get left and right of peak of season #da_l = da.where(da['time'] <= da['time'].isel(time=idx), drop=True) #da_r = da.where(da['time'] >= da['time'].isel(time=idx), drop=True) # must sort right curve (da_r) descending to flip data #da_r = da_r.sortby(da_r['time'], ascending=False) # get indexes of times (times not compat with exp) #da_l_x_idxs = np.arange(1, len(da_l['time']) + 1, step=1) #da_r_x_idxs = np.arange(1, len(da_r['time']) + 1, step=1) # fit curve #popt_l, pcov_l = curve_fit(logi, da_l_x_idxs, da_l['veg_index'], method="trf") #popt_r, pcov_r = curve_fit(logi, da_r_x_idxs, da_r['veg_index'], method="trf") # apply fit to original data #da_fit_l = logi(da_l_x_idxs, *popt_l) #da_fit_r = logi(da_r_x_idxs, *popt_r) # flip fitted vector back to original da order #da_fit_r = np.flip(da_fit_r) # get mean of pos value, remove overlap between l and r #pos_mean = (da_fit_l[-1] + da_fit_r[0]) / 2 #da_fit_l = np.delete(da_fit_l, -1) #da_fit_r = np.delete(da_fit_r, 1) # concat back together with mean val inbetween #da_logi = np.concatenate([da_fit_l, pos_mean, da_fit_r], axis=None) # smooth final curve with mild savgol #da_logi = savgol_filter(da_logi, 3, 1) #fig = plt.subplots(1, 1, figsize=(6, 4)) #plt.plot(da['time'], da['veg_index'], 'o') #plt.plot(da['time'], da_logi) # + jupyter={"source_hidden": true} #from scipy.signal import find_peaks #x, y = 0, 1 #v = da.isel(x=x, y=y) #height = float(v.quantile(dim='time', q=0.75)) #distance = math.ceil(len(v['time']) / 4) #p = find_peaks(v, height=height, distance=distance)[0] #p_dts = v['time'].isel(time=p) #for p_dt in p_dts: #plt.axvline(p_dt['time'].dt.dayofyear, color='black', linestyle='--') #count_peaks = len(num_peaks[0]) #if count_peaks > 0: #return count_peaks #else: #return 0 #plt.plot(v['time.dayofyear'], v) # + jupyter={"source_hidden": true} # flip to get min closest to pos # if we want closest sos val to pos we flip instead to trick argmin #flip = dists_sos_v.sortby(dists_sos_v['time'], ascending=False) #min_right = flip.isel(time=flip.argmin('time')) #temp_pos_cls = da.isel(x=x, y=0).where(da['time'] == min_right['time'].isel(x=x, y=0)) #plt.plot(temp_pos_cls.time, temp_pos_cls, marker='o', color='black', alpha=0.25)
superseded/Phenolopy_old.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # THIS PROGRAM GENERATES ALL THE PRIMES UP TO A SPECIFIED NUMBER import math import numpy as np N=eval(input("Enter a positive integer: ")) prime = {x for x in range(2, N) if all(x % y != 0 for y in range(2,int(math.sqrt(x))+1))} # This is an example of a list comprehension technique. prime, len(prime) # + # THIS PROGRAM COUNTS THE NUMBER OF SUBSETS OF A GIVEN SIZE OF A FINITE SET OF A GIVEN SIZE import math import numpy as np def pascalfunction(A): S=[] S.append(1) for k in range(1,len(A)): S.append(A[k-1]+A[k]) S.append(1) return S n=eval(input("Enter the size of the set: ")) k=eval(input("Enter the size of the subset: ")) A=[1] for m in range(1,n+1): B=pascaluglymf(A) A=B print("The answer you seek is: ", A[k]) # + import numpy as np def pascalfunction(A): S=[] S.append(1) for k in range(1,len(A)): S.append(A[k-1]+A[k]) S.append(1) return S # + A=[1] for m in range(1,n+1): B=pascaluglymf(A) print(B) A=B # - # Copyright (c) 2020 TRIPODS/GradStemForAll 2020 Team
Other Notebooks/SimpleMathPrograms.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # IHA1 - Assignment # Welcome to the first individual home assignment! # # This assignment consists of two parts: # * Python and NumPy exercises # * Build a deep neural network for forward propagation # # The focus of this assignment is for you to gain practical knowledge with implementing forward propagation of deep neural networks without using any deep learning framework. You will also gain practical knowledge in two of Python's scientific libraries [NumPy](https://docs.scipy.org/doc/numpy-1.13.0/index.html) and [Matplotlib](https://matplotlib.org/devdocs/index.html). # # Skeleton code is provided for most tasks and every part you are expected to implement is marked with **TODO** # # We expect you to search and learn by yourself any commands you think are useful for these tasks. Don't limit yourself to only what was taught in CL1. Use the help function, [stackoverflow](https://stackoverflow.com/), google, the [python documentation](https://docs.python.org/3.5/library/index.html) and the [NumPy](https://docs.scipy.org/doc/numpy-1.13.0/index.html) documentation to your advantage. # # **IMPORTANT NOTE**: The tests available are not exhaustive, meaning that if you pass a test you have avoided the most common mistakes, but it is still not guaranteed that you solution is 100% correct. # # Lets start by importing the necessary libraries below import numpy as np import matplotlib.pyplot as plt from utils.tests.iha1Tests import * # ## 1. Lists and arrays introduction # First, we will warm up with a Python exercise and few NumPy exercises # ### 1.1 List comprehensions # Examine the code snippet provided below # + myList = [] for i in range(25): if i % 2 == 0: myList.append(i**2) print(myList) # - # This is not a very "[pythonic](http://docs.python-guide.org/en/latest/writing/style/)" way of writing. Lets re-write the code above using a [list comprehension](https://docs.python.org/2/tutorial/datastructures.html#list-comprehensions). The result will be less code, more readable and elegant. Your solution should be able to fit into one line of code. myList = [x**2 for x in range(0,25,2)]# TODO print(myList) # + # sample output from cell above for reference # - # ### 1.2 Numpy array vs numpy vectors # Run the cell below to create a numpy array. myArr = np.array([1, 9, 25, 49, 81, 121, 169, 225, 289, 361, 441, 529]) print(myArr) print(myArr.shape) # One of the core features of numpy is to efficiently perform linear algebra operations. # There are two types of one-dimensional representations in numpy: arrays of shape (x,) and vectors of shape (x,1) # # The the above result indicates that **myArr** is an array of 12 elements with shape (12,). # # Numpy's arrays and vectors both have the type of `numpy.ndarray` but have in some cases different characteristics and it is important to separate the two types because it will save a lot of debugging time later on. Read more about numpy shapes [here](https://stackoverflow.com/a/22074424) # # Run the code below to see how the transpose operation behaves differently between an array and vector # + # print the shape of an array and the shape of a transposed array print('myArr is an array of shape:') print(myArr.shape) print('The transpose of myArr has the shape:') print(myArr.T.shape) # print the shape of a vector and the transpose of a vector myVec = myArr.reshape(12,1) print('myVec is a vector of shape:') print(myVec.shape) print('The transpose of myVec has the shape:') print(myVec.T.shape) # - # ### 1.3 Numpy exercises # Now run the cell below to create the numpy array `numbers` and then complete the exercises sequentially numbers = np.arange(24) print(numbers) # TODO: reshape numbers into a 6x4 matrix numbers = numbers.reshape(6,4) print(numbers) # + # sample output from cell above for reference # - # test case test_numpy_reshape(numbers) # TODO: set the element of the last row of the last column to zero # Hint: Try what happends when indices are negative numbers[-1, -1] = 0 print(numbers) # + # sample output from cell above for reference # - # test case test_numpy_neg_ix(numbers) # TODO: set every element of the 0th row to 0 numbers[0,:] = 0 print(numbers) # + # sample output from cell above for reference # - # test case test_numpy_row_ix(numbers) # TODO: append a 1x4 row vector of zeros to `numbers`, # resulting in a 7x4 matrix where the new row of zeros is the last row # Hint: A new matrix must be created in the procedure. Numpy arrays are not dynamic. temp = np.zeros([7,4], dtype = np.int16) temp[:6,:] = numbers numbers = temp print(numbers) print(numbers.shape) # + # sample output from cell above for reference # - # test case test_numpy_append_row(numbers) # TODO: set all elements above 10 to the value 1 numbers[numbers>10]=1 print(numbers) # + # sample output from cell above for reference # - # test case test_numpy_bool_matrix(numbers) # TODO: compute the sum of every row and replace `numbers` with the answer # `numbers` will be a (7,) array as a result numbers = np.sum(numbers, axis = 1) print(numbers.shape) print(numbers) # + # sample output from cell above for reference # - # test case test_numpy_sum(numbers) # ## 2 Building your deep neural network # It is time to start implementing your first feed-forward neural network. In this lab you will only focus on implementing the forward propagation procedure. # # When using a neural network, you can not forward propagate the entire dataset at once. Therefore, you divide the dataset into a number of sets/parts called batches. A batch will make up for the first dimension of every input to a layer and the notation `(BATCH_SIZE, NUM_FEATURES)` simply means the dimension of a batch of samples where every sample has `NUM_FEATURES` features # ### 2.1 activation functions # You will start by defining a few activation functions that are later needed by the neural network. # #### 2.1.1 ReLU # The neural network will use the ReLU activation function in every layer except for the last. ReLU does element-wise comparison of the input matrix. For example, if the input is `X`, and `X[i,j] == 2` and `X[k,l] == -1`, then after applying ReLU, `X[i,j] == 2` and `X[k,l] == 0` should be true. # # The formula for implementing ReLU for a single neuron $i$ is: # \begin{equation} # relu(z_i) = # \begin{cases} # 0, & \text{if}\ z_i \leq 0 \\ # z_i, & \text{otherwise} # \end{cases} # \end{equation} # # Now implement `relu` in vectorized form def relu(z): """ Implement the ReLU activation function Arguments: z - the input of the activation function. Has a type of `numpy.ndarray` Returns: a - the output of the activation function. Has a type of numpy.ndarray and the same shape as `z` """ a = z * (z>0) # TODO return a # test case test_relu(relu) # #### 2.1.2 Sigmoid # The sigmoid activation function is common for binary classification. This is because it squashes its input to the range [0,1]. # Implement the activation function `sigmoid` using the formula: # \begin{equation} # \sigma(z) = \frac{1}{1 + e^{-z}} # \end{equation} def sigmoid(z): """ Implement the sigmoid activation function Arguments: z - the input of the activation function. Has a type of `numpy.ndarray` Returns: a - the output of the activation function. Has a type of `numpy.ndarray` and the same shape as `z` """ a = 1/(1+np.exp(-z)) # TODO return a # test case test_sigmoid(sigmoid) # #### 2.1.3 Visualization # Make a plot using matplotlib to visualize the activation functions between the input interval [-3,3]. The plot should have the following properties # * one plot should contain a visualization of both `ReLU` and `sigmoid` # * x-axis: range of values between [-3,3], **hint**: np.linspace # * y-axis: the value of the activation functions at a given input `x` # * a legend explaining which line represents which activation function # TODO: make a plot of ReLU and sigmoid values in the interval [-3,3] x = np.linspace(-3,3, num = 100) relu_hand = plt.plot(x,relu(x), label = 'relu') sigmoid_hand = plt.plot(x,sigmoid(x), label = 'sigmoid') plt.legend() plt.show() # + # sample output from cell above for reference # - # #### 2.1.4 Softmax # You will use the softmax activation function / classifier as the final layer of your neural network later in the assignment. Implement `softmax` according the the formula below. The subtraction of the maximum value is there solely to avoid overflows in a practical implementation. # \begin{equation} # softmax(z_i) = \frac{e^{z_i - max(\mathbf{z})}}{ \sum^j e^{z_j - max(\mathbf{z})}} # \end{equation} # def softmax(z): """ Implement the softmax activation function Arguments: z - the input of the activation function, shape (BATCH_SIZE, FEATURES) and type `numpy.ndarray` Returns: a - the output of the activation function, shape (BATCH_SIZE, FEATURES) and type umpy.ndarray """ maxZ = np.amax(z,axis=1) a = np.exp(np.subtract(np.transpose(z),maxZ))/np.sum(np.exp(np.subtract(np.transpose(z),maxZ)), axis=0) # TODO return np.transpose(a) # test case test_softmax(softmax) # ### 2.2 Initialize weights # You will implement a helper function that takes the shape of a layer as input, and returns an initialized weight matrix $\mathbf{W}$ and bias vector $\mathbf{b}$ as output. $\mathbf{W}$ should be sampled from a normal distribution of mean 0 and standard deviation 2, and $\mathbf{b}$ should be initialized to all zeros. def initialize_weights(layer_shape): """ Implement initialization of the weight matrix and biases Arguments: layer_shape - a tuple of length 2, type (int, int), that determines the dimensions of the weight matrix: (input_dim, output_dim) Returns: w - a weight matrix with dimensions of `layer_shape`, (input_dim, output_dim), that is normally distributed with properties mu = 0, stddev = 2. Has a type of `numpy.ndarray` b - a vector of initialized biases with shape (1,output_dim), all of value zero. Has a type of `numpy.ndarray` """ w = np.random.normal(0,2,size=[layer_shape[0],layer_shape[1]])# TODO b = np.zeros(layer_shape[1]).reshape(1,layer_shape[1])# TODO return w, b # test case test_initialize_weights(initialize_weights) # ### 2.3 Feed-forward neural network layer module # To build a feed-forward neural network of arbitrary depth you are going to define a neural network layer as a module that can be used to stack upon eachother. # # Your task is to complete the `Layer` class by following the descriptions in the comments. # # Recall the formula for forward propagation of an arbitrary layer $l$: # # \begin{equation} # \mathbf{a}^{[l]} = g(\mathbf{z}^{[l]}) = g(\mathbf{a}^{[l-1]}\mathbf{w}^{[l]} +\mathbf{b}^{[l]}) # \end{equation} # # $g$ is the activation function given by `activation_fn`, which can be relu, sigmoid or softmax. class Layer: """ TODO: Build a class called Layer that satisfies the descriptions of the methods Make sure to utilize the helper functions you implemented before """ def __init__(self, input_dim, output_dim, activation_fn=relu): """ Arguments: input_dim - the number of inputs of the layer. type int output_dim - the number of outputs of the layer. type int activation_fn - a reference to the activation function to use. Should be `relu` as a default possible values are the `relu`, `sigmoid` and `softmax` functions you implemented earlier. Has the type `function` Attributes: w - the weight matrix of the layer, should be initialized with `initialize_weights` and has the shape (INPUT_FEATURES, OUTPUT_FEATURES) and type `numpy.ndarray` b - the bias vector of the layer, should be initialized with `initialize_weights` and has the shape (1, OUTPUT_FEATURES) and type `numpy.ndarray` activation_fn - a reference to the activation function to use. Has the type `function` """ self.w, self.b = initialize_weights([input_dim, output_dim]) # TODO self.activation_fn = activation_fn # TODO def forward_prop(self, a_prev): """ Implement the forward propagation module of the neural network layer Should use whatever activation function that `activation_fn` references to Arguments: a_prev - the input to the layer, which may be the data `X`, or the output from the previous layer. a_prev has the shape of (BATCH_SIZE, INPUT_FEATURES) and the type `numpy.ndarray` Returns: a - the output of the layer when performing forward propagation. Has the type `numpy.ndarray` """ z = np.dot(a_prev,self.w) + self.b a = self.activation_fn(z) # TODO return a # test case, be sure that you pass the previous activation function tests before running this test test_layer(Layer, relu, sigmoid, softmax) # ### 2.4 Logistic regression # Binary logistic regression is a classifier where classification is performed by applying the sigmoid activation function to a linear combination of input values. You will now try out your neural network layer by utilizing it as a linear combination of input values and apply the sigmoid activation function to classify a simple problem. # # The cell below defines a dataset of 5 points of either class `0` or class `1`. Your assignment is to: # 1. Create an instance of a `Layer` with sigmoid activation function # 2. Manually tune the weights `w` and `b` of your layer # # You can use `test_logistic` to visually inspect how your classifier is performing. # + # Run this cell to create the dataset X_s = np.array([[1, 2], [5, 3], [8, 8], [7, 5], [3, 6]]) Y_s = np.array([0,0,1,0,1]) test_logistic(X_s, Y_s) # + # create an instance of layer l = Layer(2,1,sigmoid) # TODO: manually tune weights l.w = [-0.5, 1] l.b = -3 # testing your choice of weights with this function test_logistic(X_s,Y_s,l,sigmoid) # + # sample output from cell above for reference # - # ### 2.5 Feed-forward neural network # Now define the actual neural network class. It is an L-layer neural network, meaning that the number of layers and neurons in each layer is specified as input by the user. Once again, you will only focus on implementing the forward propagation part. # # Read the descriptions in the comments and complete the todos class NeuralNetwork: """ TODO: Implement an L-layer neural network class by utilizing the Layer module defined above Each layer should use `relu` activation function, except for the output layer, which should use `softmax` """ def __init__(self, input_n, layer_dims): """ Arguments: input_n - the number of inputs to the network. Should be the same as the length of a data sample Has type int layer_dims - a python list or tuple of the number of neurons in each layer. Layer `l` should have a weight matrix with the shape (`layer_dims[l-1]`, `layer_dims[l]`). `layer_dims[-1]` is the dimension of the output layer. Layer 1 should have the dimensions (`input_n`, `layer_dims[0]`). len(layer_dims) is the depth of the neural network Attributes: input_n - the number of inputs to the network. Has type int layers - a python list of each layer in the network. Each layer should use the `relu` activation function, except for the last layer, which should use `softmax`. Has type `list` containing layers of type `Layer` """ self.input_n = input_n # TODO list_layers = [input_n] + layer_dims list_activation_fn =[relu for _ in range(len(list_layers)-1)] list_activation_fn.append(softmax) self.layers = [Layer(list_layers[i-1],list_layers[i],list_activation_fn[i] ) for i in range(1,len(list_layers)) ] # TODO def forward_prop(self, x): """ Implement the forward propagation procedure through the entire network, from input to output. You will now connect each layer's forward propagation function into a chain of layer-wise forward propagations. Arguments: x - the input data, which has the shape (BATCH_SIZE, NUM_FEATURES) and type `numpy.ndarray` Returns: a - the output of the last layer after forward propagating through the every layer in `layers`. Should have the dimension (BATCH_SIZE, layers[-1].w.shape[1]) and type `numpy.ndarray` """ a = x for i in range(len(self.layers)): a = self.layers[i].forward_prop(a) return a # test case test_neuralnetwork(NeuralNetwork) # ## 3 Making predictions with a neural network # In practice, its common to load weights to your neural network that has already been trained. # In this section, you will create an instance of your neural network, load trained weights from disk, and perform predictions. # # ### 3.1 Load weights from disk # Create an instance of `NeuralNetwork` with input size $28 \times 28 = 784$, two hidden layers of size 100 and an output layer of size 10. Thereafter, load the weights contained in `./utils/ann_weights.npz` to your network. # + ann = NeuralNetwork(784,[100,100,10]) # TODO: create instance of ann # load weights weights = np.load('./utils/ann_weights.npz') for l in range(len(ann.layers)): ann.layers[l].w = weights['w' + str(l)] ann.layers[l].b = weights['b' + str(l)] # - # ### 3.2 Prediction # Now, implement the function `predict_and_correct` which does the following: # 1. Load `./utils/test_data.npz` from disk # 2. Extract test data `X` and `Y` from file # 2. Perform for every pair of data: # a. plot the image `x` # b. make a prediction using your neural network by forward propagating and picking the most probable class # c. check wether the prediction is correct (compare with the ground truth number `y`) # d. print the predicted label and wether it was correct or not # + def predict_and_correct(ann): """ Load test data from file and predict using your neural network. Make a prediction for ever data sample and print it along with wether it was a correct prediction or not Arguments: ann - the neural network to use for prediction. Has type `NeuralNetwork` Returns: # for test case purposes A `numpy.ndarray` of predicted classes (integers [0-9]) with shape (11,) """ data = np.load('./utils/test_data.npz') X, cls = data['X'], data['Y'] cls_preds = [np.argmax(ann.forward_prop(X[i,:])) for i in range(len(cls))] # TODO: make a predicted number for every image in X for i in range(len(X)): plt.imshow(X[i].reshape(28,28), cmap='gray') plt.show() correct = cls_preds[i] == cls[i] print('The prediction was {0}, it was {1}!'.format(cls_preds[i], 'correct' if correct else 'incorrect')) return cls_preds cls_pred = predict_and_correct(ann) # - # final test case test_predict_and_correct_answer(cls_pred) # + # sample output from cell above for reference # - # ## Congratulations! # You have successfully implemented a neural network from scratch using only NumPy!
Home Assignments/IHA1/IHA1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os import numpy as np import cv2 from os import listdir, mkdir from os.path import isfile, join, isdir import dlib from PIL import Image from numba import njit # + # i = 1 # dirs = os.listdir('/home/qiuyang/anonymous/ciagan_semantic/dataset/Adience/test') # dirs.sort() # for dir in dirs: # for name in os.listdir(os.path.join('/home/qiuyang/anonymous/ciagan_semantic/dataset/Adience/test',dir)): # src_path = os.path.join('/home/qiuyang/anonymous/ciagan_semantic/dataset/Adience/test',dir,name) # des_path = os.path.join('/home/qiuyang/anonymous/ciagan_semantic/dataset/Adience/test',dir,str(i).zfill(6)+'.jpg') # os.rename(src_path,des_path) # i+=1 # i = 1 # dirs = os.listdir('/home/qiuyang/anonymous/ciagan_semantic/dataset/Adience/train') # dirs.sort() # for dir in dirs: # for name in os.listdir(os.path.join('/home/qiuyang/anonymous/ciagan_semantic/dataset/Adience/train',dir)): # src_path = os.path.join('/home/qiuyang/anonymous/ciagan_semantic/dataset/Adience/train',dir,name) # des_path = os.path.join('/home/qiuyang/anonymous/ciagan_semantic/dataset/Adience/train',dir,str(i).zfill(6)+'.jpg') # os.rename(src_path,des_path) # i+=1 # dirs = os.listdir('/home/qiuyang/anonymous/ciagan_semantic/dataset/Adience_train/clr') # dirs.sort() # for dir in dirs: # for name in os.listdir(os.path.join('/home/qiuyang/anonymous/ciagan_semantic/dataset/Adience_train/clr',dir)): # src_path = os.path.join('/home/qiuyang/anonymous/ciagan_semantic/dataset/Adience_train/clr',dir,name) # if 'feature_refined' in name: # os.remove(src_path) dirs = os.listdir('/home/qiuyang/anonymous/ciagan_semantic/dataset/Adience_test/clr') dirs.sort() for dir in dirs: for name in os.listdir(os.path.join('/home/qiuyang/anonymous/ciagan_semantic/dataset/Adience_test/clr',dir)): src_path = os.path.join('/home/qiuyang/anonymous/ciagan_semantic/dataset/Adience_test/clr',dir,name) if 'feature_refined' in name: os.remove(src_path) # + def get_lndm(path_img, path_out, start_id = 0, dlib_path=""): dir_proc = {'msk':'msk', 'org':'orig', 'clr':'clr', 'lnd':'lndm'} for dir_it in dir_proc: if os.path.isdir(path_out + dir_proc[dir_it]) == False: os.mkdir(path_out + dir_proc[dir_it]) folder_list = [f for f in listdir(path_img)] folder_list.sort() detector = dlib.get_frontal_face_detector() predictor = dlib.shape_predictor(dlib_path+"shape_predictor_68_face_landmarks.dat") line_px = 1 res_w = 256 res_h = 256 for fld in folder_list[:]: imglist_all = [f[:-4] for f in listdir(join(path_img, fld)) if isfile(join(path_img, fld, f)) and f[-4:] == ".jpg"] imglist_all.sort(key=int) imglist_all = imglist_all[start_id:] for dir_it in dir_proc: if os.path.isdir(join(path_out, dir_proc[dir_it], fld)) == False: os.mkdir(join(path_out, dir_proc[dir_it], fld)) land_mask = True crop_coord = [] for it in range(len(imglist_all)): clr = cv2.imread(join(path_img, fld, imglist_all[it]+".jpg"), cv2.IMREAD_ANYCOLOR) img = clr.copy() img_dlib = np.array(clr[:, :, :], dtype=np.uint8) dets = detector(img_dlib, 1) for k_it, d in enumerate(dets): if k_it != 0: continue landmarks = predictor(img_dlib, d) # centering c_x = int((landmarks.part(42).x + landmarks.part(39).x) / 2) c_y = int((landmarks.part(42).y + landmarks.part(39).y) / 2) w_r = int((landmarks.part(42).x - landmarks.part(39).x)*4) h_r = int((landmarks.part(42).x - landmarks.part(39).x)*5) w_r = int(h_r/res_h*res_w) w, h = int(w_r * 2), int(h_r * 2) pd = int(w) # padding size img_p = np.zeros((img.shape[0]+pd*2, img.shape[1]+pd*2, 3), np.uint8) * 255 img_p[:, :, 0] = np.pad(img[:, :, 0], pd, 'edge') img_p[:, :, 1] = np.pad(img[:, :, 1], pd, 'edge') img_p[:, :, 2] = np.pad(img[:, :, 2], pd, 'edge') visual = img_p[c_y - h_r+pd:c_y + h_r+pd, c_x - w_r+pd:c_x + w_r+pd] crop_coord.append([c_y - h_r, c_y + h_r, c_x - w_r, c_x + w_r, pd, imglist_all[it]+".jpg"]) t_x, t_y = int(c_x - w_r), int(c_y - h_r) ratio_w, ratio_h = res_w/w, res_h/h visual = cv2.resize(visual, dsize=(res_w, res_h), interpolation=cv2.INTER_CUBIC) cv2.imwrite(join(path_out, dir_proc['clr'], fld, imglist_all[it]+".jpg"), visual) #saving crop cv2.imwrite(join(path_out, dir_proc['org'], fld, imglist_all[it]+".jpg"), clr) # saving original #np.save(join(path_out, dir_proc['org'], fld, 'crop_coord.npy'), crop_coord) #crop coordinates print("folder done",fld) # get_lndm('/home/junjie/DeIDVideo/CelebA/clr_test', # '/home/junjie/DeIDVideo/SemanticImageSynthesis/ciagan_segmentation/CeleBAT/', # dlib_path='/home/junjie/DeIDVideo/ciagan/source/') get_lndm('/home/qiuyang/anonymous/ciagan_semantic/dataset/Adience/test', '/home/qiuyang/anonymous/ciagan_semantic/dataset/Adience_test/', dlib_path='/home/junjie/DeIDVideo/ciagan/source/') # -
generate_clr_img.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="TOEGNyZTwUSa" import numpy as np import cv2 as cv from matplotlib import pyplot as plt # + id="1PQBbWEAysZc" img = cv.imread('/content/christian_bale.jpg',0) # + id="It81CqOvyy0D" outputId="edf5bce1-2de9-4785-bc97-30b98a313e6c" colab={"base_uri": "https://localhost:8080/", "height": 252} plt.imshow(img,cmap='gray') plt.xticks([]),plt.yticks([]) plt.show() # + id="gzAWgOwKy_ZD" outputId="b49d1364-7a6c-4f67-910c-472fab55e884" colab={"base_uri": "https://localhost:8080/", "height": 34} img.shape # + id="ood8TlsUzGtq" s_k = np.zeros_like(img) n_k = np.zeros_like(img) nw_k = np.zeros_like(img) ne_k = np.zeros_like(img) sw_k = np.zeros_like(img) se_k = np.zeros_like(img) w_k = np.zeros_like(img) e_k = np.zeros_like(img) # + id="tavHalNrzJ57" na= np.array([[-3,-3,5],[-3,0,5],[-3,-3,5]]) wa= np.array([[5,5,5],[-3,0,-3],[-3,-3,-3]]) sa= np.array([[5,-3,-3],[5,0,-3],[5,-3,-3]]) nea= np.array([[-3,-3,-3],[-3,0,5],[-3,5,5]]) nwa= np.array([[-3,5,5],[-3,0,5],[-3,-3,-3]]) sea= np.array([[-3,-3,-3],[5,0,-3],[5,5,-3]]) swa= np.array([[5,5,-3],[5,0,-3],[-3,-3,-3]]) ka= np.array([[-3,-3,-3],[-3,0,-3],[5,5,5]]) # + id="8o7oEeYTzMSr" import scipy from scipy import ndimage # + id="_o9goOG4zPHz" e_k=ndimage.convolve(img,ka,mode='nearest',cval=0.0) n_k=ndimage.convolve(img,na,mode='nearest',cval=0.0) s_k=ndimage.convolve(img,sa,mode='nearest',cval=0.0) w_k=ndimage.convolve(img,wa,mode='nearest',cval=0.0) ne_k=ndimage.convolve(img,nea,mode='nearest',cval=0.0) nw_k=ndimage.convolve(img,nwa,mode='nearest',cval=0.0) se_k=ndimage.convolve(img,sea,mode='nearest',cval=0.0) sw_k=ndimage.convolve(img,swa,mode='nearest',cval=0.0) # + id="4SA6TrX7zj_L" ldp_mat=np.zeros_like(img) # + id="U6r6-xLBznNP" for i in range(img.shape[0]): for j in range(img.shape[1]): lst=[se_k[i][j],s_k[i][j],sw_k[i][j],w_k[i][j],nw_k[i][j],n_k[i][j],ne_k[i][j],e_k[i][j]] l=[abs(h) for h in lst] marr=np.argsort(l) marr1=marr[::-1] binary=np.zeros(8,dtype="uint8") binary[marr1[0]]=1 binary[marr1[1]]=1 binary[marr1[2]]=1 d_no=binary[0]*2**7+binary[1]*2**6+binary[2]*2**5+binary[3]*2**4+binary[4]*2**3+binary[5]*2**2+binary[6]*2**1+binary[7]*2**0 ldp_mat[i][j]=d_no # + id="0NC5R_GFznRO" outputId="0371bed3-fa9a-4d4a-f208-f2ac461a3350" colab={"base_uri": "https://localhost:8080/", "height": 252} plt.imshow(ldp_mat,cmap='gray') plt.xticks([]),plt.yticks([]) plt.show()
Local_Diretional_Pattern.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # from sq_sql import DBClient import pandas as pd import numpy as np import matplotlib.pyplot as plt from datetime import datetime plt.style.use('seaborn-whitegrid') # %matplotlib inline file_name = '/home/rcruz/Projects/Okay-Bokeh/nine_charts_data.csv' # file_name = '/home/rcruz/Projects/Okay-Bokeh/nine_charts_data_2017_10_22.csv' data = pd.read_csv(file_name) data.head() data['weekend_date'] = pd.to_datetime(data['weekend_date']) # national = data[['period', 'weekend_date', 'cw_reported_sales', 'py_actual_sales', 'cw_traffic', 'py_traffic']].groupby(['period', 'weekend_date']).sum().reset_index() national = data[['period', 'weekend_date', 'cw_sales', 'py_sales', 'cw_traffic', 'py_traffic', 'TIV']].groupby(['period', 'weekend_date']).sum().reset_index() national['sales_growth'] = (national['cw_sales'] - national['py_sales']) / national['py_sales'] national['traffic_growth'] = (national['cw_traffic'] - national['py_traffic']) / national['py_traffic'] national['region_order'] = 0 national['regioncode'] = 100 data = data[['region_order', 'regioncode', 'period', 'weekend_date', 'cw_sales', 'py_sales', 'sales_growth', 'cw_traffic', 'py_traffic', 'traffic_growth','TIV']] national = national[['region_order', 'regioncode', 'period', 'weekend_date', 'cw_sales', 'py_sales', 'sales_growth', 'cw_traffic', 'py_traffic', 'traffic_growth','TIV']] data = data.append(national) plt.plot(data[data['regioncode'] == 26]['weekend_date'], data[data['regioncode'] == 26]['sales_growth'], marker = 'o', label = 'Sales') plt.plot(data[data['regioncode'] == 26]['weekend_date'], data[data['regioncode'] == 26]['traffic_growth'], marker = 'o', label = 'Traffic') plt.legend(prop={'size': 10}, bbox_to_anchor=(1, 0.5), loc='upper left', borderaxespad=0.,) plt.xticks(rotation=90, size = 10) plt.show() plt.plot(data[data['regioncode'] == 100]['weekend_date'], data[data['regioncode'] == 100]['sales_growth'], marker = 'o', label = 'Sales') plt.plot(data[data['regioncode'] == 100]['weekend_date'], data[data['regioncode'] == 100]['traffic_growth'], marker = 'o', label = 'Traffic') plt.legend(prop={'size': 10}, bbox_to_anchor=(1, 0.5), loc='upper left', borderaxespad=0.,) plt.xticks(rotation=90, size = 10) plt.show() plt.plot(data[data['regioncode'] == 24]['weekend_date'], data[data['regioncode'] == 24]['TIV'], marker = 'o', label = 'TIV') plt.legend(prop={'size': 10}, bbox_to_anchor=(1, 0.5), loc='upper left', borderaxespad=0.,) plt.xticks(rotation=90, size = 10) plt.show() # + from bokeh.io import output_notebook, output_file, show from bokeh.layouts import gridplot from bokeh.plotting import figure from bokeh.models import HoverTool, ColumnDataSource, CDSView, BooleanFilter, Legend from bokeh.models import Range1d # from bokeh.models.widgets.tables import TableColumn import bokeh.plotting as bp from bokeh.models import PrintfTickFormatter # - output_notebook() import datetime data['formatted_date'] = data['weekend_date']\ .apply(lambda x: datetime.datetime.strftime(x, '%Y-%m-%d')) data.head() data['sales_growth'] = data['sales_growth'] * 100 data['traffic_growth'] = data['traffic_growth'] * 100 data['TIV'] = data['TIV'] * 100 data['formatted_sales_growth'] = [str(x) + '%' for x in (data['sales_growth']).round(2)] data['formatted_traffic_growth'] = [str(x) + '%' for x in (data['traffic_growth']).round(2)] data['formatted_tiv'] = [str(x) + '%' for x in (data['TIV']).round(2)] data.head() data.dtypes source24 = data[data['regioncode'] == 24] # source24.sort_values('weekend_date') source24.tail(16) # + TOOLS='pan,wheel_zoom,box_zoom,reset' hover = HoverTool(tooltips=[ ("date", "@formatted_date"), ("YOY sales growth", "@formatted_sales_growth"), ("YOY traffic growth", "@formatted_traffic_growth"), ("YOY TIV", "@formatted_tiv"), ]) regions = [[24,26,32], [34, 36,42], [44,48, None]] grid_body = [] for region_row in regions: grid_row = [] for region_number in region_row: if region_number is not None: region_name = "region " + str(region_number) region_data = data[data['regioncode'] == region_number] source = ColumnDataSource(data=region_data) latest_rows = region_data.tail(16) min_date = latest_rows['weekend_date'].min() max_date = latest_rows['weekend_date'].max() plot_grid = bp.figure(tools=[hover,TOOLS], x_axis_type="datetime", title=region_name, x_range=(min_date, max_date)) plot_grid.line(x='weekend_date', y='sales_growth', source=source, color = 'black', legend = 'sales') plot_grid.circle(x="weekend_date", y="sales_growth", size=3, color='black', hover_color="red", source=source, legend = 'sales') plot_grid.line(x='weekend_date', y='traffic_growth', source=source, color = 'red', legend = 'traffic') plot_grid.circle(x="weekend_date", y="traffic_growth", size=3, color='red', hover_color="red", source=source, legend = 'traffic') plot_grid.line(x='weekend_date', y='TIV', source=source, color = '#ED7D31', legend = 'tiv') plot_grid.square(x="weekend_date", y="TIV", size=3, color='#ED7D31', hover_color="orange", source=source, legend = 'tiv') plot_grid.yaxis[0].formatter = PrintfTickFormatter(format='%0.0f %%') plot_grid.yaxis.axis_label='YOY' plot_grid.legend.location = "top_left" plot_grid.legend.click_policy="hide" grid_row.append(plot_grid) else: grid_row.append(None) grid_body.append(grid_row) grid_row = [] p = gridplot(grid_body, plot_width=600, plot_height=350) output_file('daily sales.html') show(p) # + from bokeh.io import output_file, show from bokeh.layouts import gridplot from bokeh.plotting import figure # output_file("layout.html") x = list(range(11)) y0 = x y1 = [10-i for i in x] y2 = [abs(i-5) for i in x] # create a new plot s1 = figure(width=250, plot_height=250, title=None) s1.circle(x, y0, size=10, color="navy", alpha=0.5) # create another one s2 = figure(width=250, height=250, title=None) s2.triangle(x, y1, size=10, color="firebrick", alpha=0.5) # create and another s3 = figure(width=250, height=250, title=None) s3.square(x, y2, size=10, color="olive", alpha=0.5) # put all the plots in a grid layout p = gridplot([[s1, s2], [None, s3]]) # show the results show(p) # -
nine_charts_1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda_pytorch_p36 # language: python # name: conda_pytorch_p36 # --- # + # # !pip install shapely # - import cv2 from tqdm.notebook import tqdm from metrics import * from copy import deepcopy # Usage: # evaluate(predictions, ground_truths, findBoundingBoxs=findBoundingboxs, threshold=0.5, iou_threshold=0.5): """ Given predictions and ground truths, calculate AP, Precision and Recall. :param predictions: An array of prediction images. :param ground_truths: An array of ground truth images. :param findBoundingBoxes: A method that takes in an image, and return its bounding boxes and their corresponding confidence scores. :param threshold: Convert image to a binary image using this threshold. :param iou_threshold: IOU threshold for classifying a True Positive. :return AP, precision, recall, and calculation table of dataset. """ pass base_dir = "/home/ec2-user/SageMaker/benchmarks/dataset/ynet/" predictions = [] ground_truths = [] for i in tqdm(range(90000, 100000)): prediction_path = "prediction/predict_{}.png".format(i) train_mask_path = "Y/train_mask_{}.png".format(i) prediction = cv2.imread(base_dir + prediction_path) gt = cv2.imread(base_dir + train_mask_path) predictions.append(prediction) ground_truths.append(gt) # + # example AP, precision, recall, data = evaluate(predictions, ground_truths, findBoundingBoxs=findBoundingboxs, threshold=0.5, iou_threshold=0.8) # - threshold=0.5 iou_threshold=0.9 data['total_num_GT'] = total_num_GT data["TP"] = (data["iou"] >= iou_threshold) * 1 data["FP"] = (data["iou"] < iou_threshold) * 1 data = data.sort_values(by=["confidenceScore"], ascending=[False]) data["Acc TP"] = np.cumsum(data["TP"]) data["Acc FP"] = np.cumsum(data["FP"]) data["Precision"] = data["Acc TP"] / (data["Acc TP"] + data["Acc FP"]) data["Recall"] = data["Acc TP"] / data['total_num_GT'] plt.plot(list(data["Recall"]), list(data["Precision"])) interpolation = list(data["Precision"]) for i in reversed(range(len(interpolation) - 1)): if interpolation[i+1]>interpolation[i]: interpolation[i]=interpolation[i+1] plt.plot(list(data["Recall"]), interpolation) AP = metrics.auc(list(data["Recall"]), interpolation) precision = max(list(data["Precision"])) recall = max(list(data["Recall"])) print("At IOU", iou_threshold * 100) print("AP:", AP) print("Precision:", precision) print("Recall:", recall) import pickle with open("synthetic_eval.pickle", "wb") as f: pickle.dump(data, f) # + cv=cv2 def findBoundingboxss(image, correction_margin=0): #print('A') ##################### no trans############################# #contours, _ = cv.findContours(image.copy(), cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE) ##################### morph trans ######################### kernel = cv.getStructuringElement(cv.MORPH_RECT, (5, 5)) closed = cv.morphologyEx(image, cv.MORPH_OPEN, kernel) closed = cv.erode(closed, None, iterations = 3) contours, _ = cv.findContours(closed.copy(), cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE) ##################### Watershed ############################ #_, imgray = cv.threshold(closed, 0, 255, cv.THRESH_BINARY) # closed = image # #t=time.time() # Distance = ndimage.distance_transform_edt(closed) # D = ((Distance - Distance.min()) / (Distance.max() - Distance.min()) * 255).astype(np.uint8) # _, D = cv.threshold(D, 0, 255, cv.THRESH_BINARY) # localMax = peak_local_max(D, indices=False, min_distance=1, labels=closed) # markers = ndimage.label(localMax, structure=np.ones((3, 3)))[0] # labels = watershed(-D, markers, mask=closed) # boundingBoxs = [] # scores = [] # for i in range(1, labels.max()+1): # mask_temp = np.where(labels==i, np.uint8(255), np.uint8(0)) # contours, _ = cv.findContours(mask_temp.copy(), cv.RETR_TREE, cv.CHAIN_APPROX_NONE) # for contour in contours: # rect = cv.minAreaRect(contour) # poly = cv.boxPoints(rect) # box = np.int0(poly) # boundingBoxs.append(box) # scores.append(get_confidence_score(image, box)) # #print(t-time.time()) # return boundingBoxs, scores # comment up to watershed and uncomment bellow as it is #contours, _ = cv.findContours(closed.copy(), cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE) boundingBoxs = [] scores = [] for contour in contours: rect = cv.minAreaRect(contour) ############ polygone bboxes ############## poly = cv.boxPoints(rect) box = np.int0(poly) #boundingBoxs.append(box) ############ Up Right bboxes ############## x, y, w, h = cv.boundingRect(contour) temp = [y - correction_margin, x - correction_margin, y + h + correction_margin, x + w + correction_margin] boundingBoxs.append(temp) scores.append(get_confidence_score_(image, box)) return boundingBoxs, scores def normalize_to_gray(im): if len(im.shape) > 2: im = cv.cvtColor(im, cv.COLOR_BGR2GRAY) _, im = cv.threshold(((im*255).astype(np.uint8)).copy(),0 ,255 ,cv.THRESH_OTSU+cv.THRESH_BINARY) im = im / im.max() return im def calc_iou_individual(pred_box, gt_box): x1_t, y1_t, x2_t, y2_t = gt_box x1_p, y1_p, x2_p, y2_p = pred_box # if (x1_p > x2_p) or (y1_p > y2_p): # raise AssertionError( # "Prediction box is malformed? pred box: {}".format(pred_box)) # if (x1_t > x2_t) or (y1_t > y2_t): # raise AssertionError( # "Ground Truth box is malformed? true box: {}".format(gt_box)) if (x2_t < x1_p or x2_p < x1_t or y2_t < y1_p or y2_p < y1_t): return 0.0 far_x = np.min([x2_t, x2_p]) near_x = np.max([x1_t, x1_p]) far_y = np.min([y2_t, y2_p]) near_y = np.max([y1_t, y1_p]) inter_area = (far_x - near_x + 1) * (far_y - near_y + 1) true_box_area = (x2_t - x1_t + 1) * (y2_t - y1_t + 1) pred_box_area = (x2_p - x1_p + 1) * (y2_p - y1_p + 1) iou = inter_area / (true_box_area + pred_box_area - inter_area) return iou def get_single_image_results(gt_boxes, pred_boxes, iou_thr): all_pred_indices = range(len(pred_boxes)) all_gt_indices = range(len(gt_boxes)) if len(all_pred_indices) == 0: #print('A') tp = 0 fp = 0 fn = len(gt_boxes) #pp.pprint({'true_pos': tp, 'false_pos': fp, 'false_neg': fn}) return {'true_pos': tp, 'false_pos': fp, 'false_neg': fn} if len(all_gt_indices) == 0: #print('B') tp = 0 fp = len(pred_boxes) fn = 0 #pp.pprint({'true_pos': tp, 'false_pos': fp, 'false_neg': fn}) return {'true_pos': tp, 'false_pos': fp, 'false_neg': fn} gt_idx_thr = [] pred_idx_thr = [] ious = [] for ipb, pred_box in enumerate(pred_boxes): for igb, gt_box in enumerate(gt_boxes): iou = calc_iou_individual(pred_box, gt_box) if iou > iou_thr: gt_idx_thr.append(igb) pred_idx_thr.append(ipb) ious.append(iou) #print(iou) args_desc = np.argsort(ious)[::-1] if len(args_desc) == 0: #print('C') # No matches tp = 0 fp = len(pred_boxes) fn = len(gt_boxes) else: #print('D') gt_match_idx = [] pred_match_idx = [] for idx in args_desc: gt_idx = gt_idx_thr[idx] pr_idx = pred_idx_thr[idx] # If the boxes are unmatched, add them to matches if (gt_idx not in gt_match_idx) and (pr_idx not in pred_match_idx): gt_match_idx.append(gt_idx) pred_match_idx.append(pr_idx) tp = len(gt_match_idx) fp = len(pred_boxes) - len(pred_match_idx) fn = len(gt_boxes) - len(gt_match_idx) #pp.pprint({'true_pos': tp, 'false_pos': fp, 'false_neg': fn}) return {'true_pos': tp, 'false_pos': fp, 'false_neg': fn} def get_confidence_score_(im_mask, bb): cnt = 0 pixel_acc = 0 im_w = im_mask.shape[0] im_h = im_mask.shape[1] poly = Polygon(bb) minx, miny, maxx, maxy = list(np.int_(poly.bounds)) for row in range(minx, min( 400, maxx)): for col in range(miny, min(400, maxy)): p_temp = Point(row, col) if p_temp.within(poly): cnt += 1 pixel_acc = pixel_acc + im_mask[col, row]#/im_mask.max() if cnt == 0: return 0 avg_pix_value = pixel_acc/cnt return avg_pix_value # + def result_dict_pred(predictions, findBoundingBoxss=findBoundingboxss, threshold=0.5, correction_margin=0): k = 0 pred_boxs = {} for prediction in tqdm(predictions): if prediction is None: k+=1 continue prediction = (normalize_to_gray(prediction) > threshold).astype(np.uint8) bbs_pred, confidences = findBoundingBoxss(prediction, correction_margin) # Pred Boxes dict pred_boxs["img_{:d}".format(k)]= {"boxes": bbs_pred, "scores": confidences} k+=1 return pred_boxs def gt_extract(gt_dict): gt_ex = {} for key in gt_dict: gt_dict[key] = gt_dict[key]['boxes'] return gt_ex # - pred_boxes_dict = result_dict_pred(predictions, findBoundingBoxss=findBoundingboxss, threshold=0.5, correction_margin=4) gt_boxes_dict = result_dict_pred(ground_truths, findBoundingBoxss=findBoundingboxss, threshold=0.5, correction_margin=4) copy_gt = gt_extract(gt_boxes_dict) # + def get_model_scores_map(pred_boxes): model_scores_map = {} for img_id, val in pred_boxes.items(): for score in val['scores']: if score not in model_scores_map.keys(): model_scores_map[score] = [img_id] else: model_scores_map[score].append(img_id) return model_scores_map def get_avg_precision_at_iou3(gt_boxes, pred_boxes, iou_thr=0.5): model_scores_map = get_model_scores_map(pred_boxes) sorted_model_scores = sorted(model_scores_map.keys()) # Sort the predicted boxes in descending order (lowest scoring boxes first): for img_id in pred_boxes.keys(): arg_sort = np.argsort(pred_boxes[img_id]['scores']) pred_boxes[img_id]['scores'] = np.array(pred_boxes[img_id]['scores'])[arg_sort].tolist() pred_boxes[img_id]['boxes'] = np.array(pred_boxes[img_id]['boxes'])[arg_sort].tolist() pred_boxes_pruned = deepcopy(pred_boxes) precisions = [] recalls = [] model_thrs = [] img_results = {} # Loop over model score thresholds and calculate precision, recall for ithr, model_score_thr in tqdm(enumerate(sorted_model_scores[:-1]), total=len(sorted_model_scores)): # On first iteration, define img_results for the first time: img_ids = gt_boxes.keys() if ithr == 0 else model_scores_map[model_score_thr] for img_id in img_ids: gt_boxes_img = gt_boxes[img_id] if img_id in pred_boxes_pruned: box_scores = pred_boxes_pruned[img_id]['scores'] else: box_scores = [] pred_boxes_pruned[img_id] = {} pred_boxes_pruned[img_id]['scores'] = [] pred_boxes_pruned[img_id]['boxes'] = [] start_idx = 0 for score in box_scores: if score <= model_score_thr: pred_boxes_pruned[img_id] start_idx += 1 else: break # Remove boxes, scores of lower than threshold scores: pred_boxes_pruned[img_id]['scores'] = pred_boxes_pruned[img_id]['scores'][start_idx:] pred_boxes_pruned[img_id]['boxes'] = pred_boxes_pruned[img_id]['boxes'][start_idx:] # Recalculate image results for this image img_results[img_id] = get_single_image_results( gt_boxes_img, pred_boxes_pruned[img_id]['boxes'], iou_thr) prec, rec = calc_precision_recall(img_results) precisions.append(prec) recalls.append(rec) model_thrs.append(model_score_thr) precisions = np.array(precisions) recalls = np.array(recalls) prec_at_rec = [] for recall_level in np.linspace(0.0, 1.0, 11): try: args = np.argwhere(recalls >= recall_level).flatten() prec = max(precisions[args]) except ValueError: prec = 0.0 prec_at_rec.append(prec) avg_prec = np.mean(prec_at_rec) return { 'avg_prec': avg_prec, 'precisions': precisions, 'recalls': recalls, 'model_thrs': model_thrs} def calc_precision_recall(img_results): true_pos = 0; false_pos = 0; false_neg = 0 for _, res in img_results.items(): true_pos += res['true_pos'] false_pos += res['false_pos'] false_neg += res['false_neg'] try: precision = true_pos/(true_pos + false_pos) except ZeroDivisionError: precision = 0.0 try: recall = true_pos/(true_pos + false_neg) except ZeroDivisionError: recall = 0.0 return (precision, recall) # + results = get_avg_precision_at_iou3(gt_boxes_dict, pred_boxes_dict, iou_thr=0.5) ap = results['avg_prec'] p = results['precisions'] r = results ['recalls'] print('AP = ', ap) print('prec = ', p[-1]) print('rec = ', r[-1]) # + results = get_avg_precision_at_iou3(gt_boxes_dict, pred_boxes_dict, iou_thr=0.9) ap = results['avg_prec'] p = results['precisions'] r = results ['recalls'] print('AP = ', ap) print('prec = ', p[-1]) print('rec = ', r[-1]) # + # visual check target_image_idx = 367#np.random.randint(start_idx, end_idx) #pp.pprint(pred_boxes_dict['img_{:d}'.format(target_image_idx)]) pred_bboxes_coord = pred_boxes_dict['img_{:d}'.format(target_image_idx)]['boxes'] pred_bboxes_scores = pred_boxes_dict['img_{:d}'.format(target_image_idx)]['scores'] gt_bboxes_coord = gt_boxes_dict['img_{:d}'.format(target_image_idx)] xx = (np.squeeze(ground_truths[target_image_idx]*255).astype(np.uint8)).copy() # xx = cv.cvtColor(xx, cv.COLOR_GRAY2RGB) xx_pred = (np.squeeze(predictions[target_image_idx]*255).astype(np.uint8)).copy() # xx_pred = cv.cvtColor(xx_pred, cv.COLOR_GRAY2RGB) for bbox_pred in pred_bboxes_coord: cv.rectangle(xx, (bbox_pred[1], bbox_pred[0], bbox_pred[3]-bbox_pred[1], bbox_pred[2]-bbox_pred[0]), (255, 0, 0), 1) cv.rectangle(xx_pred, (bbox_pred[1], bbox_pred[0], bbox_pred[3]-bbox_pred[1], bbox_pred[2]-bbox_pred[0]), (255, 0, 0), 1) for bbox_gt in gt_bboxes_coord: cv.rectangle(xx, (bbox_gt[1], bbox_gt[0], bbox_gt[3]-bbox_gt[1], bbox_gt[2]-bbox_gt[0]), (0, 255, 0), 1) cv.rectangle(xx_pred, (bbox_gt[1], bbox_gt[0], bbox_gt[3]-bbox_gt[1], bbox_gt[2]-bbox_gt[0]), (0, 255, 0), 1) show_images2(xx_pred, xx, title1='Mask RCNN pred mask, RED = Predictions - GREEN = Ground Truths', title2 = 'gt mask, RED = Predictions - GREEN = Ground Truths') # - def show_images2 (Im1, Im2, title1="", title2 = ""): display_window_edge_size = 15 width = display_window_edge_size height = display_window_edge_size fig = plt.figure(figsize=[width,height]) axis1 = fig.add_subplot(121) axis1.imshow(Im1, cmap="gray") axis1.set(xticks=[],yticks=[],title =title1) axis2 = fig.add_subplot(122) axis2.imshow(Im2, cmap="gray") axis2.set(xticks=[],yticks=[],title =title2) im = predictions[0] # + total_pixels = 0 correct = 0 for i in tqdm(range(len(predictions))): if (predictions[i] is None) or ground_truths[i] is None: continue pred = normalize_to_gray(predictions[i]) gt = normalize_to_gray(ground_truths[i]) diff = np.abs(pred - gt) total_pixels += 400*400 correct += len(diff[np.where(diff < 0.5)]) # - im = normalize_to_gray(im) correct / total_pixels len(diff[np.where(diff < 0.5)]) diff.shape
samples/barcode/eval_example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # `JSONASOBJ` # An extension to the python `json` library that represents the JSON as a first class python object rather than a straight dictionary. Contents can still be accessed using dictionary format. # # # ## Introduction # # This is an extension to the python *json* library that represents the JSON # as a first class python object rather than a straight # dictionary. Contents can still be accessed using dictionary format. # # --- # ## Requirements # # * Python (3.0 or later) # # ## Installation # # ```bash # pip install jsonasobj # ``` # # ## Short Example # -------------- # # # + import jsonasobj from pprint import PrettyPrinter pp = PrettyPrinter().pprint test_json = """{ "@context": { "name": "http://xmlns.com/foaf/0.1/name", "knows": "http://xmlns.com/foaf/0.1/knows", "menu": { "@id": "name:foo", "@type": "@id" } }, "@id": "http://me.markus-lanthaler.com/", "name": "<NAME>", "knows": [ { "name": "<NAME>", "menu": "something", "modelDate" : "01/01/2015" } ] }""" py_obj = jsonasobj.loads(test_json) py_obj.knows[0].extra = {'age': 17} py_obj.knows.append(dict(name='<NAME>')) del py_obj.knows[0]['menu'] print(py_obj.name) print(py_obj['name']) print(py_obj.knows[0].name) print(py_obj['@context'].name) print(jsonasobj.as_json(py_obj)) pp(jsonasobj.as_dict(py_obj)) # -
notebooks/readme.ipynb