code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # # Initial Data Cleaning and Exploration # Code for the initial data cleaning and exploration done before modeling # _Author: <NAME>_ # _Email: <EMAIL>_ # # Directory & Packages import os import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt import spacy import pysentiment from textstat.textstat import textstat from wordcloud import WordCloud import nltk from bs4 import BeautifulSoup # The default directory is the code subdirectory. Changing to the main repo directory above. retval=os.chdir("..") # # Helper Functions def pd_tab(df,col,sort_by='count',asc=False): tab=df[col].value_counts(dropna=False).reset_index(name='count') tab.columns=[col,'count'] tab['percent']=tab['count']/tab['count'].sum() tab.sort_values(by=sort_by,inplace=True,ascending=asc) return tab # # Upload Data raw_data=pd.read_csv("./raw_data/Reviews.csv") raw_data.head() # ### Inspecting the Raw Features raw_data.columns len(raw_data) # **Data Key** # # * **product/productId:** asin, e.g. amazon.com/dp/B001E4KFG0 # * **review/userId:** id of the user, e.g. A3SGXH7AUHU8GW # * **review/profileName:** name of the user # * **review/helpfulness:** fraction of users who found the review helpful # * **review/score:** rating of the product # * **review/time:** time of the review (unix time) # * **review/summary:** review summary # * **review/text:** text of the review # ##### ID raw_data.Id.is_unique # ##### Product ID len(raw_data.ProductId.unique()) len(raw_data.ProductId.unique())/len(raw_data) pd_tab(raw_data,'ProductId').head(10) pd_tab(raw_data,'ProductId').tail(10) # ##### UserID len(raw_data.UserId.unique()) len(raw_data.UserId.unique())/len(raw_data) pd_tab(raw_data,'UserId').head(10) pd_tab(raw_data,'UserId').tail(10) # ##### Profile Name # ignoring # ##### Helpfulness Numerator raw_data.HelpfulnessNumerator.isnull().sum() np.sum(raw_data.HelpfulnessNumerator==0) np.sum(raw_data.HelpfulnessNumerator==0)/len(raw_data) # At least 53% are not helpful raw_data.HelpfulnessNumerator.describe() g=sns.distplot(raw_data.HelpfulnessNumerator) g.axes.set_ylim(0,) g.axes.set_xlim(0,) g.axes.set_title('Number Found Helpful\n',fontsize=20) g.set_xlabel('Counts',fontsize=15) # Very skewed g=sns.distplot(raw_data[raw_data.HelpfulnessNumerator>0].HelpfulnessNumerator) g.axes.set_ylim(0,) g.axes.set_xlim(0,) g.axes.set_title('Number Found Helpful\n(Non-Zero Counts)',fontsize=20) g.set_xlabel('Counts',fontsize=15) g=sns.distplot(raw_data[raw_data.HelpfulnessNumerator<100].HelpfulnessNumerator) g.axes.set_ylim(0,) g.axes.set_xlim(0,) g.axes.set_title('Number Found Helpful\n(Counts Less than 100)',fontsize=20) g.set_xlabel('Counts',fontsize=15) # ##### Helpfulness Denominator raw_data.HelpfulnessDenominator.isnull().sum() np.sum(raw_data.HelpfulnessDenominator==0) np.sum(raw_data.HelpfulnessDenominator==0)/len(raw_data) raw_data.HelpfulnessDenominator.describe() raw_data[raw_data.HelpfulnessDenominator>100].HelpfulnessDenominator.describe() g=sns.distplot(raw_data.HelpfulnessDenominator) g.axes.set_ylim(0,) g.axes.set_xlim(0,) g.axes.set_title('Number Found Helpful or Unhelpful\n',fontsize=20) g.set_xlabel('Counts',fontsize=15) # ##### Helpfulness Numerator/Denominator len(raw_data[raw_data.HelpfulnessDenominator<raw_data.HelpfulnessNumerator]) raw_data[raw_data.HelpfulnessDenominator<raw_data.HelpfulnessNumerator] raw_data=raw_data.loc[(raw_data.HelpfulnessDenominator<raw_data.HelpfulnessNumerator)==False] raw_data['Unhelpful']=raw_data.HelpfulnessDenominator-raw_data.HelpfulnessNumerator g=sns.regplot(x="HelpfulnessNumerator", y="Unhelpful", data=raw_data[raw_data.HelpfulnessDenominator<100], fit_reg=False) g.axes.set_ylim(0,) g.axes.set_xlim(0,) g.axes.set_title('Number Found Helpful vs Unhelpful\n',fontsize=20) g.set_xlabel('No. Found Helpful',fontsize=15) g.set_ylabel('No. Found Unhelpful',fontsize=15) raw_data['ppt_helpful']=raw_data.HelpfulnessNumerator/raw_data.HelpfulnessDenominator raw_data.ix[raw_data.HelpfulnessDenominator==0,'ppt_helpful']=0 g=sns.distplot(raw_data.ppt_helpful) g.axes.set_ylim(0,) g.axes.set_xlim(0,) g.axes.set_title('Percent Helpful\n',fontsize=20) g.set_xlabel('Percent',fontsize=15) probs=list(np.linspace(start=0,stop=1,num=20)) raw_data.ppt_helpful.describe(percentiles=probs) probs=list(np.linspace(start=0,stop=1,num=20)) for p in probs: ppt=np.sum(raw_data.ppt_helpful<p)/len(raw_data) print('Less than {}% Helpful: {}%'.format(round(p*100,2), round(ppt*100,2))) for p in probs: ppt=np.sum(raw_data.ppt_helpful>=p)/len(raw_data) print('At Least {}% Helpful: {}%'.format(round(p*100,2), round(ppt*100,2))) np.sum((raw_data.ppt_helpful>=0.5) & (raw_data.ppt_helpful<=.8))/len(raw_data) np.sum((raw_data.ppt_helpful>=0.7) & (raw_data.ppt_helpful<=.8))/len(raw_data) np.sum((raw_data.ppt_helpful>=0.8) & (raw_data.ppt_helpful<=.9))/len(raw_data) np.sum((raw_data.ppt_helpful>=0.7) & (raw_data.ppt_helpful<=.9))/len(raw_data) np.sum((raw_data.ppt_helpful>=0.9) & (raw_data.ppt_helpful<=1))/len(raw_data) # Will probably define helpful reviews as those +90% g=sns.regplot(x="HelpfulnessDenominator", y="ppt_helpful", data=raw_data[raw_data.HelpfulnessDenominator<100], fit_reg=False) g.axes.set_ylim(0,1) g.axes.set_xlim(0,) g.axes.set_title('Percent Helpful vs Total Found Helpful or Unhelpful\n',fontsize=20) g.set_xlabel('No. Found Helpful or Unhelpful',fontsize=15) g.set_ylabel('Percent Found Helpful',fontsize=15) # Interesting pattern. Difficult to understand what this means. # ##### Product Rating raw_data.Score.isnull().sum() raw_data.Score.describe() pd_tab(raw_data,'Score',sort_by='Score') g=sns.lmplot(x="HelpfulnessDenominator", y="ppt_helpful", data=raw_data[raw_data.HelpfulnessDenominator<100], hue='Score', fit_reg=False) # For this analysis I will assume that the helpfulness prediction will be made without the produce score # ##### Time raw_data.Time.head() raw_data['date_time']=pd.to_datetime(raw_data['Time'],unit='s') raw_data['date']=pd.to_datetime(raw_data['date_time'],unit='d') raw_data.date_time.describe() raw_data.date.describe() # Median Percent Helpful ts=raw_data[['date','ppt_helpful']].copy() ts['ppt_helpful']=ts.groupby(['date']).ppt_helpful.transform('median') ts.set_index(['date'],inplace=True) ts.sort_index(inplace=True) ts.plot() # Average Helpfulness ts=raw_data[['date','ppt_helpful']].copy() ts['ppt_helpful']=ts.groupby(['date']).ppt_helpful.transform('mean') ts.set_index(['date'],inplace=True) ts.sort_index(inplace=True) ts.plot() # Count Reviews ts=raw_data[['date','ppt_helpful']].copy() ts['ppt_helpful']=ts.groupby(['date']).ppt_helpful.transform('count') ts.set_index(['date'],inplace=True) ts.sort_index(inplace=True) ts.plot() ts.head() ts.tail() len(raw_data[raw_data.date>=pd.to_datetime('2010-01-01')]) len(raw_data[raw_data.date>=pd.to_datetime('2012-01-01')]) raw_data['year']=raw_data.date.dt.year pd_tab(raw_data,'year',sort_by='year') tab=raw_data.groupby(['year']).ppt_helpful.mean().reset_index().sort_values(by='year') tab # The helpfulness scores are definitely non-stationary across years. I will just use the data from 2012. # # Building Model Training Data # ### Limiting Sample raw_data_2=raw_data[(raw_data.year==2012)].copy() # ### Defining Predictor raw_data_2['helpful']=(raw_data_2.ppt_helpful>=0.9).astype(float) pd_tab(raw_data_2,'helpful') del raw_data # ### Minor Prelim Text Cleaning raw_data_2['Text'] = raw_data_2['Text'].apply(lambda x: BeautifulSoup(x,'lxml').get_text()) # ### New Features nlp=spacy.load('en') raw_data_2['doc_id']=(np.linspace(start=1,stop=len(raw_data_2),num=len(raw_data_2))-1) raw_data_2['doc_id'].head() parse_doc_list=[] parse_doc_list_id=[] i=0 for doc in nlp.pipe(raw_data_2.Text.astype(str),batch_size=10000,n_threads=4): parse_doc_list.append(doc) parse_doc_list_id.append(i) i=i+1 raw_data_2['parsed_text'] = parse_doc_list type(parse_doc_list[0]) # ##### Document Vector doc_vecs = np.row_stack([doc.vector for doc in parse_doc_list]) doc_vecs = np.column_stack((doc_vecs,parse_doc_list_id)) doc_vecs.shape len(raw_data_2) doc_vecs=pd.DataFrame(doc_vecs) cols=['vec'+str(s) for s in doc_vecs.columns] cols[-1]='doc_id' doc_vecs.columns=cols doc_vecs.to_pickle('./clean_data/doc_vecs.pkl') raw_data_2=pd.merge(raw_data_2,doc_vecs,how='left',on=['doc_id']) # ##### Word and Sentence Count # + def sent_count(X): return len([x for x in X.sents]) def word_count(X): return len(X) # - raw_data_2['num_sents'] = raw_data_2['parsed_text'].apply(sent_count) raw_data_2['num_words'] = raw_data_2['parsed_text'].apply(word_count) # ##### Readability raw_data_2['readability'] = raw_data_2['Text'].apply(textstat.automated_readability_index) # ##### Sentiment from nltk.sentiment.vader import SentimentIntensityAnalyzer sent_analyzer = SentimentIntensityAnalyzer() raw_data_2['sentiment_dict'] = raw_data_2['Text'].apply(sent_analyzer.polarity_scores) raw_data_2['neg_senti'] = raw_data_2['sentiment_dict'].apply(lambda x: x['neg']) raw_data_2['pos_senti'] = raw_data_2['sentiment_dict'].apply(lambda x: x['pos']) raw_data_2['neu_senti'] = raw_data_2['sentiment_dict'].apply(lambda x: x['neu']) raw_data_2['comp_senti'] = raw_data_2['sentiment_dict'].apply(lambda x: x['compound']) # ##### Word Count Type Features def return_lemma_text(text): ''' Return space separated lemmas, excluding spaces, urls, #s, emails, stop words, and proper nouns ''' return ' '.join([t.lemma_.lower() for t in text if (t.is_punct==False) & (t.is_space==False) & (t.like_url==False) & (t.like_num==False) & (t.like_email==False) & (t.is_stop==False) & (t.pos_!='PROPN')]) raw_data_2['text_lemma'] = raw_data_2['parsed_text'].apply(return_lemma_text) raw_data_2['Text'].head() raw_data_2['text_lemma'].head() del raw_data_2['parsed_text'] raw_data_2.to_pickle('./clean_data/raw_data_post_parse.pkl') # ### The Obligatory NLP Word Clouds # All Reviews text=' '.join(raw_data_2.text_lemma) wordcloud = WordCloud(max_font_size=40).generate(text) plt.figure() plt.imshow(wordcloud, interpolation="bilinear") plt.axis("off") plt.savefig('./plots/all_reviews_word_cloud.png', bbox_inches='tight') plt.show() # Helpful Reviews text=' '.join(raw_data_2[raw_data_2.helpful==1].text_lemma) wordcloud = WordCloud(max_font_size=40).generate(text) plt.figure() plt.imshow(wordcloud, interpolation="bilinear") plt.axis("off") plt.savefig('./plots/helpful_reviews_word_cloud.png', bbox_inches='tight') plt.show() # Unhelpful Reviews text=' '.join(raw_data_2[raw_data_2.helpful==0].text_lemma) wordcloud = WordCloud(max_font_size=40).generate(text) plt.figure() plt.imshow(wordcloud, interpolation="bilinear") plt.axis("off") plt.savefig('./plots/unhelpful_reviews_word_cloud.png', bbox_inches='tight') plt.show()
code/data_cleaning.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.6 - AzureML # language: python # name: python3-azureml # --- # # モデルをトレーニングする # # 機械学習の中心的な目的は、アプリケーションで使用できる予測モデルをトレーニングすることです。Azure Machine Learning では、スクリプトを使用して、Scikit-Learn、TensorFlow、PyTorch、SparkML などの一般的な機械学習フレームワークを活用してモデルをトレーニングできます。これらのトレーニング スクリプトを実験として実行し、指標と出力 (トレーニング済みのモデルを含む) を追跡できます。 # # ## Azure Machine Learning SDK をインストールする # # Azure Machine Learning SDK は頻繁に更新されます。以下のセルを実行し、ノートブック ウィジェットをサポートする追加パッケージとともに最新のリリースにアップグレードします。 # !pip install --upgrade azureml-sdk azureml-widgets # ## ワークスペースに接続する # # 最新バージョンの SDK がインストールされているため、ワークスペースに接続できます。 # # > **注**: Azure サブスクリプションでまだ認証済みのセッションを確立していない場合は、リンクをクリックして認証コードを入力し、Azure にサインインして認証するよう指示されます。 # + import azureml.core from azureml.core import Workspace # 保存した構成ファイルからワークスペースを読み込む ws = Workspace.from_config() print('Ready to use Azure ML {} to work with {}'.format(azureml.core.VERSION, ws.name)) # - # ## トレーニング スクリプトを作成する # # Python スクリプトを使用して、糖尿病データに基づいて機械学習モデルをトレーニングします。スクリプトとデータ ファイル用のフォルダーを作成することから始めましょう。 # + import os, shutil # 実験ファイル用フォルダーを作成する training_folder = 'diabetes-training' os.makedirs(training_folder, exist_ok=True) # データ ファイルを実験フォルダーにコピーする shutil.copy('data/diabetes.csv', os.path.join(training_folder, "diabetes.csv")) # - # これで、トレーニング スクリプトを作成してフォルダーに保存する準備が整いました。 # # > **注**: このコードはスクリプトを*作成*しますが実行しません! # + # %%writefile $training_folder/diabetes_training.py # ライブラリをインポートする from azureml.core import Run import pandas as pd import numpy as np import joblib import os from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression from sklearn.metrics import roc_auc_score from sklearn.metrics import roc_curve # 実験実行コンテキストを取得する run = Run.get_context() # 糖尿病データセットを読み込む print("Loading Data...") diabetes = pd.read_csv('diabetes.csv') # 特徴とラベルを分離する X, y = diabetes[['Pregnancies','PlasmaGlucose','DiastolicBloodPressure','TricepsThickness','SerumInsulin','BMI','DiabetesPedigree','Age']].values, diabetes['Diabetic'].values # データをトレーニング セットとテスト セットに分割する X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=0) # 正規化ハイパーパラメーターを設定する reg = 0.01 # ロジスティック回帰モデルのトレーニング print('Training a logistic regression model with regularization rate of', reg) run.log('Regularization Rate', np.float(reg)) model = LogisticRegression(C=1/reg, solver="liblinear").fit(X_train, y_train) # 正確さを計算する y_hat = model.predict(X_test) acc = np.average(y_hat == y_test) print('Accuracy:', acc) run.log('Accuracy', np.float(acc)) # AUC を計算する y_scores = model.predict_proba(X_test) auc = roc_auc_score(y_test,y_scores[:,1]) print('AUC: ' + str(auc)) run.log('AUC', np.float(auc)) # トレーニング済モデルを出力フォルダーに保存する。 os.makedirs('outputs', exist_ok=True) joblib.dump(value=model, filename='outputs/diabetes_model.pkl') run.complete() # - # ## トレーニング スクリプトを実験として実行する # # これで、スクリプトを実験として実行する準備が整いました。既定の環境には **scikit-learn** パッケージが含まれていないため、明示的にこれを構成に追加する必要がある点に留意してください。Conda 環境は、初めて 実験を実行するとオンデマンドで構築され、同じ構成を使用する将来の実行用にキャッシュされます。このため、初回の実行には少し時間がかかります。 # + from azureml.core import Experiment, ScriptRunConfig, Environment from azureml.core.conda_dependencies import CondaDependencies from azureml.widgets import RunDetails # 実験用 Python 環境を作成する sklearn_env = Environment("sklearn-env") # 必要なパッケージがインストールされていることを確認する (既定で scikit-learn と Azure ML が必要) packages = CondaDependencies.create(pip_packages=['scikit-learn','azureml-defaults']) sklearn_env.python.conda_dependencies = packages # スクリプト構成を作成する script_config = ScriptRunConfig(source_directory=training_folder, script='diabetes_training.py', environment=sklearn_env) # 実験実行を送信する experiment_name = 'mslearn-train-diabetes' experiment = Experiment(workspace=ws, name=experiment_name) run = experiment.submit(config=script_config) # ノートブック ウィジェットで実行中の実験実行を表示する RunDetails(run).show() # 実験実行が完了するまでブロックする run.wait_for_completion() # - # **Run** オブジェクトからメトリックと出力を取得できます。 # 指標の記録とファイルを取得する metrics = run.get_metrics() for key in metrics.keys(): print(key, metrics.get(key)) print('\n') for file in run.get_file_names(): print(file) # ## トレーニングされたモデルを登録する # # 実験の出力には、トレーニング済みモデル ファイル (**diabetes_model.pkl**) が含まれることに注意してください。このモデルを Azure Machine Learning ワークスペースに登録すると、モデルのバージョンを追跡し、後で取得できるようになります。 # + from azureml.core import Model # モデルを登録する run.register_model(model_path='outputs/diabetes_model.pkl', model_name='diabetes_model', tags={'Training context':'Script'}, properties={'AUC': run.get_metrics()['AUC'], 'Accuracy': run.get_metrics()['Accuracy']}) # 登録済みモデルを一覧表示する for model in Model.list(ws): print(model.name, 'version:', model.version) for tag_name in model.tags: tag = model.tags[tag_name] print ('\t',tag_name, ':', tag) for prop_name in model.properties: prop = model.properties[prop_name] print ('\t',prop_name, ':', prop) print('\n') # - # ## パラメーター化されたトレーニング スクリプトを作成する # # スクリプトにパラメーターを追加することで、トレーニング実験の柔軟性を高めることができ、同じトレーニング実験を異なる設定で繰り返すことができます。この場合、モデルのトレーニング時にロジスティック回帰アルゴリズムで使用される正規化率のパラメーターを追加します。 # # パラメーター化されたスクリプトとトレーニング データ用のフォルダーをもう一度作成します。 # + import os, shutil # 実験ファイル用フォルダーを作成する training_folder = 'diabetes-training-params' os.makedirs(training_folder, exist_ok=True) # データ ファイルを実験フォルダーにコピーする shutil.copy('data/diabetes.csv', os.path.join(training_folder, "diabetes.csv")) # - # 次に、正規化率ハイパーパラメーターの引数でスクリプトを作成します。引数は、Python **argparse.ArgumentParser** オブジェクトを使用して読み取られます。 # + # %%writefile $training_folder/diabetes_training.py # ライブラリをインポートする from azureml.core import Run import pandas as pd import numpy as np import joblib import os import argparse from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression from sklearn.metrics import roc_auc_score from sklearn.metrics import roc_curve # 実験実行コンテキストを取得する run = Run.get_context() # 正規化ハイパーパラメーターを設定する parser = argparse.ArgumentParser() parser.add_argument('--reg_rate', type=float, dest='reg', default=0.01) args = parser.parse_args() reg = args.reg # 糖尿病データセットを読み込む print("Loading Data...") # 糖尿病データセットを読み込む diabetes = pd.read_csv('diabetes.csv') # 特徴とラベルを分離する X, y = diabetes[['Pregnancies','PlasmaGlucose','DiastolicBloodPressure','TricepsThickness','SerumInsulin','BMI','DiabetesPedigree','Age']].values, diabetes['Diabetic'].values # データをトレーニング セットとテスト セットに分割する X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=0) # ロジスティック回帰モデルのトレーニング print('Training a logistic regression model with regularization rate of', reg) run.log('Regularization Rate', np.float(reg)) model = LogisticRegression(C=1/reg, solver="liblinear").fit(X_train, y_train) # 正確さを計算する y_hat = model.predict(X_test) acc = np.average(y_hat == y_test) print('Accuracy:', acc) run.log('Accuracy', np.float(acc)) # AUC を計算する y_scores = model.predict_proba(X_test) auc = roc_auc_score(y_test,y_scores[:,1]) print('AUC: ' + str(auc)) run.log('AUC', np.float(auc)) os.makedirs('outputs', exist_ok=True) joblib.dump(value=model, filename='outputs/diabetes_model.pkl') run.complete() # - # ## 引数でスクリプトを実行する # # 以前と同様にスクリプトを実験として実行し、作成した環境を再使用します。ただし、ここではスクリプトが引数として想定している **--reg_rate** パラメーターを提供する必要があります。 # + # スクリプト構成を作成する script_config = ScriptRunConfig(source_directory=training_folder, script='diabetes_training.py', arguments = ['--reg_rate', 0.1], environment=sklearn_env) # 実験を送信する experiment_name = 'mslearn-train-diabetes' experiment = Experiment(workspace=ws, name=experiment_name) run = experiment.submit(config=script_config) RunDetails(run).show() run.wait_for_completion() # - # もう一度、完了した実行からメトリックと出力を取得できます。 # 指標の記録を取得する metrics = run.get_metrics() for key in metrics.keys(): print(key, metrics.get(key)) print('\n') for file in run.get_file_names(): print(file) # ## モデルの新しいバージョンを登録する # # 新しいモデルをトレーニングしたので、ワークスペースに新しいバージョンとして登録できます。 # + from azureml.core import Model # モデルを登録する run.register_model(model_path='outputs/diabetes_model.pkl', model_name='diabetes_model', tags={'Training context':'Parameterized script'}, properties={'AUC': run.get_metrics()['AUC'], 'Accuracy': run.get_metrics()['Accuracy']}) # 登録済みモデルを一覧表示する for model in Model.list(ws): print(model.name, 'version:', model.version) for tag_name in model.tags: tag = model.tags[tag_name] print ('\t',tag_name, ':', tag) for prop_name in model.properties: prop = model.properties[prop_name] print ('\t',prop_name, ':', prop) print('\n') # - # ワークスペースの登録済みモデルは、[Azure Machine Learning studio](https://ml.azure.com) の 「**モデル**」 ページでも表示できます。 # # 探索が終了したら、このノートブックを閉じて、コンピューティング インスタンスをシャットダウンできます。
03B - Training Models.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/micida33/python_codes/blob/main/git.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="N3Qi1ACBNuC8" # # linuxの簡単なコマンド # + id="xoQauMO_EcJF" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="7b37415e-e972-4ee3-d1b8-0c25c95f51a1" # カレントディレクトリ # ! ls # + id="4ivT3TXCEqH1" # ファイルを見る # ! cat hello.py # + [markdown] id="iMaj8kDOOkpo" # # GitHubにプッシュするまでの手順 # # - Google Driveのマウント # - clone # - config # - add # - commit # - origint設定 # - push # + id="3_874BwfN5Cl" colab={"base_uri": "https://localhost:8080/"} outputId="aeec14ae-e3c9-48ca-ff2d-118ff11f7df6" # Google Driveのマウント from google.colab import drive drive.mount('/content/gdrive') # + id="PNjSk3t6PIag" colab={"base_uri": "https://localhost:8080/"} outputId="75b01a82-f0f0-4e2c-fd4e-7484b017db2c" # Git clone # %cd /content/gdrive/MyDrive/Gitdir/Colabdata/python_codes # + colab={"base_uri": "https://localhost:8080/"} id="7fWFCkrL_PTE" outputId="24dda336-d6c7-4433-f1e2-eacc425c284e" # %ls # + colab={"base_uri": "https://localhost:8080/"} id="ehfsp9HT_MAX" outputId="ae852aff-40f5-4659-ba31-4e7fd53aa8a2" # Git clone # %cd /content/gdrive/MyDrive/Gitdir/Colabdata #リポジトリのフォルダごとクローンされる # !git clone https://github.com/micida33/python_codes.git # + id="_uExYge9RVZw" # config # !git config --global user.email "<EMAIL>" # !git config --global user.name "micida33" # + id="p8RJhQ9dRer4" colab={"base_uri": "https://localhost:8080/"} outputId="97ed6d38-05ce-4ec1-8813-827d60e452eb" # add & commit # !git add R_git.ipynb #リポジトリのフォルダ内での作業(移動が必要) # !git commit -m "Added new file." # + id="3nh3fPw_Rs_v" colab={"base_uri": "https://localhost:8080/"} outputId="6620d799-349c-4d12-ff10-1e5458fd7d13" # origin設定 (動かない) # !git remote set-url origin https://micida33:Github1127@github.com/micida33/python_codes.git # !git push origin master # + [markdown] id="Ok1wbqwDR8y4" # # GitHubの設定(簡略) # + id="l5m4uoNJSKpr" # Google Driveのマウント from google.colab import drive drive.mount('/content/gdrive') # + id="RRw7BUTyQklr" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="7ebb23a3-9ac5-4498-81e7-3598e541e853" # !git clone https://micida33:Github1127@github.com/micida33/test.git "gdrive/My Drive/Gitdir" # + id="VjUR84ZnSPPX" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="c506126d-f00e-41e5-e0c6-684c216dd230" # push # !git push origin master # + [markdown] id="_db713gBCNIF" # # GitHubの使い方 # + id="ONRDXEUl55-h" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="3295977d-8529-4c06-a0b3-2a1e81f271a9" # gitのバージョン確認 # ! git --version # + id="HkHHO2dH63Gh" # カレントディレクトリにフォルダを作成 # ! mkdir awesome # + id="4cCaR71U7GPv" # そのフォルダに移動 # ! cd /content/awesome # + id="xo7JM-TU6gRD" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="75df79eb-3485-44b4-9543-6776cd2832d0" # gitリポジトリを新たに作成:カレントディレクトリがGitリポジトリに # ! git init # + id="95Y2M-L79SQw" # ファイルをインデックスに追加 # ! git add hello.html # + id="71TE0KNiAImY" colab={"base_uri": "https://localhost:8080/", "height": 221} outputId="c07ccc31-bf1c-482b-829f-b3f62a071a61" # 変更結果を記録する # ! git commit -m "add new file" # + id="jPgqQpeaAWGw" colab={"base_uri": "https://localhost:8080/"} outputId="c36adb0c-c6d5-4dcb-c67b-ab6074ab54e4" # ファイルの確認 # ! git status # + id="-jP2tzAzAqUf" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="a4ba64ca-c89d-47a9-f959-e2d915a59095" # リモートリポジトリの情報 # ! git remote add origin https://github.com/micida33/test/awesome.git # + id="k1M-5bgMA00f" colab={"base_uri": "https://localhost:8080/"} outputId="58f324fa-6412-497a-a08e-dadd7fff0725" # プッシュ # ! git push origin master # + id="aSFMeTvX71JQ" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="a095fd86-e919-4965-a05f-c45fb4b76475" import os os.getcwd() # + id="No7aLTl_9LmH" os.chdir("/content/awesome") # + colab={"base_uri": "https://localhost:8080/"} id="LmIwJtQPI9Tm" outputId="88b388d4-7872-4293-cafe-534e593494b1" # !git pull https://micida33:Github1127@github.com/micida33/python_codes.git
git.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Solving MNIST with an MLP # + # #%load_ext autoreload # #%autoreload 2 # %matplotlib inline from importlib import reload import fa import ap from train import * from utils import * # - mnist = mnist_data() mnist_test = mnist_data(is_train=False) # ##### Second Order Activity Perturbation # + reload(fa) reload(ap) seed = 1 h1 = 100 h2 = 10 soap_lr = 1 soap_bias = 1e-3 isize = 28 * 28 train_args = {'regression':False, 'data': mnist, 'epochs': 0.2, 'optimizer':'SGD', 'sample_every':5, 'lr':0.05} torch.manual_seed(1) mlp1 = nn.Sequential(flatten(), fa.BPLinear(isize, h1), nn.Tanh(), fa.BPLinear(h1, h2)) loss1 = train_net(mlp1, **train_args)['loss'] torch.manual_seed(1) mlp2 = nn.Sequential(flatten(), fa.BPLinear(isize, h1), ap.SOAP(soap_lr, soap_bias), nn.Tanh(), fa.BPLinear(h1, h2)) loss2 = train_net(mlp2, **train_args, updates=2)['loss'] plt.figure(0) plt.title("training curves") plt.semilogy(loss1); plt.semilogy(loss2); plt.legend(['BP', 'SO-BP']); # - # ##### Feedback Alignment vs Back Propogation # + reload(fa) reload(ap) seed = 2 h1 = 256 h2 = 10 soap_lr = 1.0 soap_bias = 1e-3 isize = 28 * 28 train_args = {'regression':False, 'data': mnist, 'epochs': 2, 'optimizer':'SGD', 'sample_every':5, 'lr':0.01} torch.manual_seed(seed) mlp1 = nn.Sequential(flatten(), fa.BPLinear(isize, h1), nn.Tanh(), fa.BPLinear(h1, h2)) loss1 = train_net(mlp1, **train_args)['loss'] torch.manual_seed(seed) mlp2 = nn.Sequential(flatten(), fa.BPLinear(isize, h1), ap.SOAP(soap_lr, soap_bias), nn.Tanh(), fa.BPLinear(h1, h2)) loss2 = train_net(mlp2, **train_args, updates=2)['loss'] torch.manual_seed(seed) mlp3 = nn.Sequential(flatten(), fa.BPLinear(isize, h1), nn.Tanh(), fa.FALinear(h1, h2)) loss3 = train_net(mlp3, **train_args)['loss'] torch.manual_seed(seed) mlp4 = nn.Sequential(flatten(), fa.BPLinear(isize, h1), ap.SOAP(soap_lr, soap_bias), nn.Tanh(), fa.FALinear(h1, h2)) loss4 = train_net(mlp4, **train_args, updates=2)['loss'] plt.figure(0) plt.title("training curves") plt.semilogy(loss1); plt.semilogy(loss2); plt.semilogy(loss3); plt.semilogy(loss4); plt.legend(['BP', 'SO-BP', 'FA', 'SO-FA']); # - # # Convnets # ### BP vs SO-BP # + reload(fa) reload(ap) seed = 1 soap_lr = 0.1 train_args = {'regression':False, 'data': mnist_test, 'epochs': 2, 'optimizer':'SGD', 'sample_every':5, 'lr':0.01} torch.manual_seed(seed) conv1 = nn.Sequential( nn.Conv2d(1, 6, 5), nn.ReLU(), nn.MaxPool2d(2), nn.Conv2d(6, 16, 5), nn.ReLU(), nn.MaxPool2d(2), flatten(), nn.Linear(256, 120), nn.ReLU(), nn.Linear(120, 84), nn.ReLU(), nn.Linear(84, 10) ) torch.manual_seed(seed) conv2 = nn.Sequential( nn.Conv2d(1, 6, 5), ap.SOAP(soap_lr), nn.ReLU(), nn.MaxPool2d(2), nn.Conv2d(6, 16, 5), ap.SOAP(soap_lr), nn.ReLU(), nn.MaxPool2d(2), flatten(), nn.Linear(256, 120), ap.SOAP(soap_lr), nn.ReLU(), nn.Linear(120, 84), nn.ReLU(), nn.Linear(84, 10) ) loss1 = train_net(conv1, **train_args)['loss'] loss2 = train_net(conv2, updates=2, **train_args)['loss'] plt.figure(0) plt.title("training curves") plt.semilogy(loss1); plt.semilogy(loss2); plt.legend(['BP','SO-BP']) # - # ### Measure accuracy test_categorical_accuracy(mlp1, mnist_test) test_categorical_accuracy(mlp2, mnist_test)
train_mnist.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:dftools] # language: python # name: conda-env-dftools-py # --- # # Basic Example # # This example is a basic introduction to using ``pydftools``. It mimics example 1 of ``dftools``. # + # Import relevant libraries # %matplotlib inline import pydftools as df import time # Make figures a little bigger in the notebook import matplotlib as mpl mpl.rcParams['figure.dpi'] = 120 # For displaying equations from IPython.display import display, Markdown # - # Choose some parameters to use throughout n = 1000 seed = 1234 sigma = 0.5 model =df.model.Schechter() p_true = model.p0 # Generate mock data with observing errors: data, selection, model, other = df.mockdata(n = n, seed = seed, sigma = sigma, model=model, verbose=True) # Create a fitting object (the fit is not performed until the ``fit`` object is accessed): survey = df.DFFit(data=data, selection=selection, model=model) # Perform the fit and get the best set of parameters: start = time.time() print(survey.fit.p_best) print("Time for fitting: ", time.time() - start, " seconds") # Plot the covariances: fig = df.plotting.plotcov([survey], p_true=p_true, figsize=1.3) # Plot the mass function itself: fig, ax = df.mfplot(survey, xlim=(1e7,2e12), ylim=(1e-4,2), p_true = p_true, bin_xmin=7.5, bin_xmax=12) # Write out fitted parameters with (Gaussian) uncertainties: display(Markdown(survey.fit_summary(format_for_notebook=True)))
docs/example_notebooks/basic_example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <NAME> # HW5, Q2 # + pycharm={"name": "#%%\n", "is_executing": false} import jenkspy import pandas as pd import matplotlib.pyplot as plt import numpy as np import matplotlib.cm as cm import plot_utils import seaborn as sns import statistics from sklearn import tree, metrics, preprocessing from sklearn.datasets import load_iris from sklearn.metrics import accuracy_score, silhouette_score, silhouette_samples from sklearn.model_selection import train_test_split from sklearn.naive_bayes import MultinomialNB from sklearn.tree import DecisionTreeClassifier, export_graphviz from sklearn.cluster import KMeans from sklearn.decomposition import PCA # - # ## a # + pycharm={"name": "#%%\n", "is_executing": false} df = pd.read_csv("Banknote.csv") df # + [markdown] pycharm={"name": "#%% md\n"} # ## b # + pycharm={"name": "#%%\n", "is_executing": false} df=df.replace('MISS',np.nan) df=df.replace('missing',np.nan) df=df.replace('NULL',np.nan) df=df.replace("''",np.nan) df=df.replace("?",np.nan) df.columns=['c1','c2','c3','c4','goal'] df.c1 = df.c1.fillna(df.c1.mean()) df.c2 = df.c2.fillna(df.c2.mean()) df.c3 = df.c3.fillna(df.c3.mean()) df.c4 = df.c4.fillna(df.c4.mean()) x = df.values #returns a numpy array min_max_scaler = preprocessing.MinMaxScaler() x_scaled = min_max_scaler.fit_transform(x) df = pd.DataFrame(x_scaled,index=df.index, columns=df.columns) df = df.astype({"goal": int}) df # + [markdown] pycharm={"name": "#%% md\n"} # ## c # + pycharm={"name": "#%%\n", "is_executing": false} clo = df.values[:, 0:4] fit_m = PCA(n_components=2).fit(clo) new_clo = fit_m.transform(clo) k_means = KMeans(init="k-means++", n_clusters=2) k_means.fit(clo) k_predict = k_means.predict(clo) # + [markdown] pycharm={"name": "#%% md\n"} # ## d # + pycharm={"name": "#%%\n", "is_executing": false} d = np.column_stack((new_clo, k_predict)) centroids = fit_m.transform(k_means.cluster_centers_) centroids # + [markdown] pycharm={"name": "#%% md\n"} # ## e # + pycharm={"name": "#%%\n", "is_executing": false} plt.scatter(d[d[:,2] == 0][:,0], d[d[:,2] == 0][:,1], color = 'r',label = 'goal 0') plt.scatter(d[d[:,2] == 1][:,0], d[d[:,2] == 1][:,1], color = 'b',label = 'goal 1') plt.scatter(centroids[:, 0], centroids[:, 1], marker="x", s=169, linewidths=3, color="yellow", zorder=10) plt.legend() plt.show() # + [markdown] pycharm={"name": "#%% md\n"} # ## g :inertia # + pycharm={"name": "#%%\n", "is_executing": false} k_means.inertia_ # + [markdown] pycharm={"name": "#%% md\n"} # ## h # + pycharm={"name": "#%%\n", "is_executing": false} inertia_list = [] for i in range(1,6): k_means = KMeans(init="k-means++", n_clusters=i) k_means.fit(d) inertia_list.append(k_means.inertia_) inertia_list # - # ## i # + pycharm={"name": "#%%\n", "is_executing": false} plt.legend(['k-means inertia'], loc='lower right') plt.plot(inertia_list) # + [markdown] pycharm={"name": "#%% md\n"} # ## j # + pycharm={"name": "#%%\n", "is_executing": false} silhouette_list = [] for i in range(2,6): fig, (first, second) = plt.subplots(1, 2) fig.set_size_inches(15, 5) K_means_cluster = KMeans(n_clusters = i, random_state = 20) Predict_cluster = K_means_cluster.fit_predict(d) score_silhouette = silhouette_score(d, Predict_cluster) silhouette_list.append(score_silhouette) value_for_plot = silhouette_samples(d, Predict_cluster) y_l = 10 for j in range(i): counter_cluster = \ value_for_plot[Predict_cluster == j] counter_cluster.sort() counter_shape = counter_cluster.shape[0] y_u = y_l + counter_shape color = cm.nipy_spectral(float(j) / i) first.fill_betweenx(np.arange(y_l, y_u), 0, counter_cluster, facecolor=color, edgecolor=color, alpha=0.7) first.text(-0.05, y_l + 0.5 * counter_shape, str(i)) y_l = y_u + 10 first.set_title("Cluster's plot of the silhouette") first.set_xlabel("coefficient") first.set_ylabel("label") first.axvline(x=score_silhouette, color="blue", linestyle="--") first.set_yticks([]) first.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1]) my_color = cm.nipy_spectral(Predict_cluster.astype(float) / i) second.scatter(new_clo[:, 0], new_clo[:, 1], marker='.', s=30, lw=0, alpha=0.7, c=my_color, edgecolor='k') cc = fit_m.transform(K_means_cluster.cluster_centers_) second.scatter(cc[:, 0], cc[:, 1], marker='x', c="white", alpha=0.5, s=300, edgecolor='k') for j, k in enumerate(cc): second.scatter(k[0], k[1], marker='$%d$' % j, alpha=1, s=50, edgecolor='k') second.set_title("clustered data visualization : ") second.set_xlabel("C1") second.set_ylabel("C2") plt.suptitle(("KMeans clustering " "with n_clusters = %d" % i), fontsize=16) plt.show() print(silhouette_list) plt.figure() plt.plot(silhouette_list)
Homeworks/HW5/Q2_HW5.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from subprocess import check_output import math df = pd.read_csv('./input/cs448b_ipasn.csv') # - # ## date 별로 그룹 묶기 df['date'] = pd.to_datetime(df['date']) df = df.groupby(['date', 'l_ipn'], as_index=False).sum() # date와 l_ipn 별로 합을 구함 df['date'] df['yday'] = df['date'].dt.dayofyear df['wday'] = df['date'].dt.dayofweek # ## 그래프 그리기 # l_ipn 기준으로 그룹 묶기 ip = [] maxF = [] for i in range(0, 10): ip.insert(i, df[df['l_ipn']==i]) maxF.insert(i, np.max(ip[i]['f'])) ip[0].head() count, division = np.histogram(ip[0]['f'], bins=10) count division # IP 별 f(날짜 별 connection 수) 분포도 f, axarray = plt.subplots(5, 2, figsize=(15, 20)) # 5x2로 10개의 flow를 보일 것이다 for i in range(10): count, division = np.histogram(ip[i]['f'], bins=10) # bins는 x축의 간격 row, col = math.floor(i/2), i%2 g = sns.barplot(x=division[0:len(division)-1], y=count, ax=axarray[row, col]) # (0, 0) 위치에 그래프 넣을 것 axarray[row, col].set_title(f'Local IP {i} Flow') # 1년간 connection 수 흐름 f, axarray = plt.subplots(5, 2, figsize=(15, 20)) for i in range(10): row, col = math.floor(i/2), i%2 axarray[row, col].plot(ip[i]['yday'], ip[i]['f']) axarray[row, col].plot(ip[i]['yday'], [ip[i]['f'].mean() + 3*ip[i]['f'].std()]*len(ip[i]['yday']), color='g') ip[0] = df[df['l_ipn']==0] maxF[0] = np.max(ip[0]['f']) ip[1] = df[df['l_ipn']==1][0:len(ip[1]['f'])-5] maxF[1] = np.max(ip[1]['f']) ip[2] = df[df['l_ipn']==2] maxF[2] = np.max(ip[2]['f']) ip[3] = df[df['l_ipn']==3] maxF[3] = np.max(ip[3]['f']) ip[4] = df[df['l_ipn']==4][0:len(ip[4]['f'])-7] # ## 예측 - RNN # keras 함수들을 사용하기 위해 import import math from keras.models import Sequential from keras.layers import Dense from keras.layers import LSTM from keras.layers import GRU from sklearn.preprocessing import MinMaxScaler from sklearn.metrics import mean_squared_error # 정규화 for i in range(10): fv = [float(v)/float(maxF[i]) for v in ip[i]['f'].values] ip[i].loc[:, 'f'] = np.array(fv).reshape(-1, 1) # feature 리스트와 우리 모델의 target을 만든다 # look_back: 지금으로부터 이전 몇 개의 데이터를 볼지 결정 def create_dataset(dataset, look_back=1): dataX, dataY = [], [] for i in range(len(dataset)-look_back-1): a = dataset[i:(i+look_back)].values dataX.append(a) dataY.append(dataset['f'].iloc[i+look_back]) return np.array(dataX), np.array(dataY) # RNN 훈련시킴 def trainModel(data): data['f'] = data['f'].astype('float32') train = data[0:look_back*5].copy() trainX, trainY = create_dataset(train, look_back) trainX = np.reshape(trainX, (trainX.shape[0], look_back, 2)) model = Sequential() # model(NN) 초기화 # GRU 모델 사용 model.add(GRU(64, input_shape=(trainX.shape[1], trainX.shape[2]), return_sequences=True)) model.add(GRU(32)) # Dense(1): 출력 뉴런 수=1 # 입력 뉴런 수를 따로 지정 안해주면 이전 입력된 수 그대로 이번 입력 뉴런 수로 설정됨 model.add(Dense(1)) model.compile(loss='mean_squared_error', optimizer='sgd') model.fit(trainX, trainY, epochs=100, batch_size=16, verbose=0) return model # 훈련된 모델로 예측 수행 def predictFlow(_model, data): ypredFlow = [0] * look_back for k in range(len(data)-look_back): pattern = data[k:k+look_back].values x = np.reshape(pattern, (1, len(pattern), 2)) ypredFlow.append(_model.predict(x)[0][0]) # ypredFlow = [v*_max for v in ypredFlow] return ypred # + # 예측 결과 보기 m = [] for i in range(10): m[i] = trainModel(ip[i][['f', 'wday']].copy()) f, axarray = plt.subplots(5, 2, figsize=(15, 20)) ypred, ipF = [], [] for i in range(10): ypred[i] = np.multiply(predictFlow(m[i], ip[i][['f', 'wday']].copy()), max[i]) ipF[i] = np.multiply(ip[i]['f'], max[i]) row, col = math.floor(i/2), i%2
Botnet Host Prediction using RNN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Horovod-Infiniband-Benchmark # # ## Introduction # # This recipe shows how to reproduce [Horovod distributed training benchmarks](https://github.com/uber/horovod/blob/master/docs/benchmarks.md) using Azure Batch AI. # # Currently Batch AI has no native support for Horovod framework, but it's easy to run it using customtoolkit and job preparation command line. # # # ## Details # # - Official Horovod Benchmark [scripts](https://github.com/alsrgv/benchmarks/tree/master/scripts/tf_cnn_benchmarks) will be used; # - The job will be run on standard tensorflow container ```tensorflow/tensorflow:1.4.0-gpu```; # - Horovod framework and IntelMPI will be installed in the container using job preparation command line. Note, you can build your own docker image containing tensorflow and horovod instead. # - Benchmark scripts will be downloaded to GPU nodes using job preparation command line as well. # - This sample needs to use at least two `STANDARD_NC24r` nodes, please be sure you have enough quota # - Standard output of the job will be stored on Azure File Share. # ## Instructions # # ### Install Dependencies and Create Configuration file. # Follow [instructions](/recipes) to install all dependencies and create configuration file. # ### Read Configuration and Create Batch AI client # + nbpresent={"id": "bfa11f00-8866-4051-bbfe-a9646e004910"} from __future__ import print_function from datetime import datetime import sys from azure.storage.file import FileService import azure.mgmt.batchai.models as models # utilities.py contains helper functions used by different notebooks sys.path.append('../../') import utilities cfg = utilities.Configuration('../../configuration.json') client = utilities.create_batchai_client(cfg) # - # ## 1. Prepare Training Dataset and Script in Azure Storage # ### Create File Share # # For this example we will create a new File Share with name `batchaisample` under your storage account. This share will be populated with sample scripts and will contain job's output. # # **Note** You don't need to create new file share for every cluster. We are doing this in this sample to simplify resource management for you. azure_file_share_name = 'batchaisample' service = FileService(cfg.storage_account_name, cfg.storage_account_key) service.create_share(azure_file_share_name, fail_on_exist=False) print('Done') # ### Deploy Job Preparation Script to Azure File Share # Create a folder in the file share and upload the sample script to it. samples_dir = 'horovod_samples' service = FileService(cfg.storage_account_name, cfg.storage_account_key) service.create_directory( azure_file_share_name, samples_dir, fail_on_exist=False) print('Done') # Upload the job preparation script, that does the following tasks: # - Install essential packages for infiniband support # - Download benchmark sample # - Install IntelMPI binary # - Install honovod framework service.create_file_from_path( azure_file_share_name, samples_dir, 'jobprep_benchmark.sh', 'jobprep_benchmark.sh') # ## 2. Create Azure Batch AI Compute Cluster # ### Configure Compute Cluster # # - For this example we will use a GPU cluster of `STANDARD_NC24r` nodes, which equip with infiniband device. Number of nodes in the cluster is configured with `nodes_count` variable, and 2 nodes will be used by default; # - Please be sure you have enough core quota to create at least two `STANDARD_NC24r` nodes # - We need to use the latest `UbuntuServer 16.04-LTS` as the host image, which is compatible with infiniband. # - We will mount file share at folder with name `afs`. Full path of this folder on a computer node will be `$AZ_BATCHAI_MOUNT_ROOT/afs`; # - We will call the cluster `nc24r`. # # # So, the cluster will have the following parameters: # + azure_file_share = 'afs' nodes_count = 2 cluster_name = 'nc24r' volumes = models.MountVolumes( azure_file_shares=[ models.AzureFileShareReference( account_name=cfg.storage_account_name, credentials=models.AzureStorageCredentialsInfo( account_key=cfg.storage_account_key), azure_file_url = 'https://{0}.file.core.windows.net/{1}'.format( cfg.storage_account_name, azure_file_share_name), relative_mount_path=azure_file_share) ] ) parameters = models.ClusterCreateParameters( location=cfg.location, vm_size='STANDARD_NC24r', virtual_machine_configuration=models.VirtualMachineConfiguration( image_reference=models.ImageReference( publisher='Canonical', offer='UbuntuServer', sku='16.04-LTS', version='latest')), scale_settings=models.ScaleSettings( manual=models.ManualScaleSettings(target_node_count=nodes_count) ), node_setup=models.NodeSetup( mount_volumes=volumes ), user_account_settings=models.UserAccountSettings( admin_user_name=cfg.admin, admin_user_password=cfg.admin_password, admin_user_ssh_public_key=cfg.admin_ssh_key ) ) # - # ### Create Compute Cluster _ = client.clusters.create(cfg.resource_group, cluster_name, parameters).result() # ### Monitor Cluster Creation # # utilities.py contains a helper function allowing to wait for the cluster to become available - all nodes are allocated and finished preparation. cluster = client.clusters.get(cfg.resource_group, cluster_name) utilities.print_cluster_status(cluster) # ## 3. Run Azure Batch AI Training Job # - The job needs to know where to find train_mnist.py script (the chainer will download MNIST dataset on its own). So, we will configure an input directory for the script: input_directories = [ models.InputDirectory( id='SCRIPTS', path='$AZ_BATCHAI_MOUNT_ROOT/{0}/{1}'.format(azure_file_share, samples_dir))] # The job will be able to reference those directories using ```$AZ_BATCHAI_INPUT_SCRIPT``` environment variable. # ### Configure Output Directories # We will store standard and error output of the job in File Share: std_output_path_prefix = '$AZ_BATCHAI_MOUNT_ROOT/{0}'.format(azure_file_share) # ### Configure Job # # - Will use configured previously input and output directories; # - We will use custom toolkit job to run tensorflow_mnist.py on multiple nodes (use node_count parameter to specify number of nodes). Note, Batch AI will create a host list for the job, it can be found via ```$AZ_BATCH_HOST_LIST``` environment variable; # - Horovod framework, IntelMPI and benchmark sample scripts will be installed by job preparation command line; # - Will output standard output and error streams to file share. # - If you are interested using TCP instead, please replace ```-env I_MPI_FABRICS=dapl -env I_MPI_DAPL_PROVIDER=ofa-v2-ib0 -env I_MPI_DYNAMIC_CONNECTION=0``` with ```-env I_MPI_FABRICS=tcp``` in the command line numWorkers = nodes_count * 4 parameters = models.job_create_parameters.JobCreateParameters( location=cfg.location, cluster=models.ResourceId(id=cluster.id), node_count=nodes_count, input_directories=input_directories, std_out_err_path_prefix=std_output_path_prefix, container_settings=models.ContainerSettings( image_source_registry=models.ImageSourceRegistry(image='tensorflow/tensorflow:1.4.0-gpu')), job_preparation=models.JobPreparation( command_line='bash $AZ_BATCHAI_INPUT_SCRIPTS/jobprep_benchmark.sh'), custom_toolkit_settings = models.CustomToolkitSettings( command_line='source /opt/intel/compilers_and_libraries_2017.4.196/linux/mpi/intel64/bin/mpivars.sh; cd $AZ_BATCHAI_JOB_TEMP/benchmarks/; mpirun -n {0} -ppn 4 -hosts $AZ_BATCH_HOST_LIST -env I_MPI_FABRICS=dapl -env I_MPI_DAPL_PROVIDER=ofa-v2-ib0 -env I_MPI_DYNAMIC_CONNECTION=0 python scripts/tf_cnn_benchmarks/tf_cnn_benchmarks.py --model resnet101 --batch_size 64 --variable_update horovod'.format(str(numWorkers)))) # ### Create a training Job and wait for Job completion # # - Wait for job to complete, and keep streaming the stdout log # - When the job completes, you will see the number of images processed per second by the end of the log job_name = datetime.utcnow().strftime('hvdbenchmark_%m_%d_%Y_%H%M%S') job = client.jobs.create(cfg.resource_group, job_name, parameters).result() print('Created Job: {}'.format(job.name)) # ### Wait for Job to Finish # The job will start running when the cluster will have enough idle nodes. The following code waits for job to start running printing the cluster state. During job run, the code prints current content of stderr.txt. # # **Note** Execution may take several minutes to complete. utilities.wait_for_job_completion(client, cfg.resource_group, job_name, cluster_name, 'stdouterr', 'stdout.txt') # ### Download stdout.txt and stderr.txt files for the Job and job preparation command files = client.jobs.list_output_files(cfg.resource_group, job_name, models.JobsListOutputFilesOptions(outputdirectoryid='stdouterr')) for f in list(files): if f.download_url: utilities.download_file(f.download_url, f.name) print("All files downloaded") # ## 4. Clean Up (Optional) # ### Delete the Job _ = client.jobs.delete(cfg.resource_group, job_name) # ### Delete the Cluster # When you are finished with the sample and don't want to submit any more jobs you can delete the cluster using the following code. _ = client.clusters.delete(cfg.resource_group, cluster_name) # ### Delete File Share # When you are finished with the sample and don't want to submit any more jobs you can delete the file share completely with all files using the following code. service = FileService(cfg.storage_account_name, cfg.storage_account_key) service.delete_share(azure_file_share_name)
recipes/Horovod/Horovod-Infiniband-Benchmark/Horovod-Infiniband-benchmark.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # IMDB: TEXT Classification + BERT + Ax # ## Librairies # + # # !pip install transformers==4.8.2 # # !pip install datasets==1.7.0 # # !pip install ax-platform==0.1.20 # # !pip install ipywidgets # # !jupyter nbextension enable --py widgetsnbextension # - import os import sys # + import io import re import pickle from timeit import default_timer as timer import numpy as np import torch import torch.nn as nn import torch.optim as optim from datasets import load_dataset, Dataset, concatenate_datasets from transformers import AutoTokenizer from transformers import BertModel from transformers.data.data_collator import DataCollatorWithPadding from ax import optimize from ax.plot.contour import plot_contour from ax.plot.trace import optimization_trace_single_method from ax.service.managed_loop import optimize from ax.utils.notebook.plotting import render, init_notebook_plotting import esntorch.core.reservoir as res import esntorch.core.learning_algo as la import esntorch.core.merging_strategy as ms import esntorch.core.esn as esn # - # %config Completer.use_jedi = False # %load_ext autoreload # %autoreload 2 device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') device SEED = 42 # ## Global variables # + RESULTS_PATH = '~/Results/Ax_results/ESN' # path of your result folder CACHE_DIR = '~/Data/huggignface/' # path of your folder PARAMS_FILE = 'imdb_params.pkl' RESULTS_FILE = 'imdb_results.pkl' # - # ## Dataset # + # rename correct column as 'labels': depends on the dataset you load def load_and_enrich_dataset(dataset_name, split, cache_dir): dataset = load_dataset(dataset_name, split=split, cache_dir=CACHE_DIR) dataset = dataset.rename_column('label', 'labels') # cf 'imdb' dataset dataset = dataset.map(lambda e: tokenizer(e['text'], truncation=True, padding=False), batched=True) dataset.set_format(type='torch', columns=['input_ids', 'attention_mask', 'labels']) def add_lengths(sample): sample["lengths"] = sum(sample["input_ids"] != 0) return sample dataset = dataset.map(add_lengths, batched=False) return dataset # + tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased') full_train_dataset = load_and_enrich_dataset('imdb', split='train', cache_dir=CACHE_DIR).sort("lengths") # toriving/sst5 train_val_datasets = full_train_dataset.train_test_split(train_size=0.8, shuffle=True) train_dataset = train_val_datasets['train'].sort("lengths") val_dataset = train_val_datasets['test'].sort("lengths") test_dataset = load_and_enrich_dataset('imdb', split='test', cache_dir=CACHE_DIR).sort("lengths") dataset_d = { 'full_train': full_train_dataset, 'train': train_dataset, 'val': val_dataset, 'test': test_dataset } dataloader_d = {} for k, v in dataset_d.items(): dataloader_d[k] = torch.utils.data.DataLoader(v, batch_size=128,#256, reduced for bi-direction collate_fn=DataCollatorWithPadding(tokenizer)) # - dataset_d # ## Optimization def fitness(leaking_rate, spectral_radius, input_scaling, bias_scaling, alpha, reservoir_dim, dataset_d, dataloader_d, seed_l=[1991, 420, 666, 1979, 7], # 5 seeds return_test_acc=False): acc_l = [] time_l = [] for seed in seed_l: # parameters esn_params = { 'embedding_weights': 'bert-base-uncased', # TEXT.vocab.vectors, 'distribution' : 'uniform', # uniform, gaussian 'input_dim' : 768, # dim of encoding! 'reservoir_dim' : reservoir_dim, 'bias_scaling' : bias_scaling, 'sparsity' : 0.99, 'spectral_radius' : spectral_radius, 'leaking_rate': leaking_rate, 'activation_function' : 'tanh', 'input_scaling' : input_scaling, 'mean' : 0.0, 'std' : 1.0, 'learning_algo' : None, 'criterion' : None, 'optimizer' : None, 'merging_strategy' : 'mean', 'lexicon' : None, 'bidirectional' : False, # False 'device' : device, 'seed' : seed } # model ESN = esn.EchoStateNetwork(**esn_params) ESN.learning_algo = la.RidgeRegression(alpha = alpha)# , mode='normalize') ESN = ESN.to(device) # warm up (new) nb_sentences = 3 for i in range(nb_sentences): sentence = dataset_d["train"].select([i]) dataloader_tmp = torch.utils.data.DataLoader(sentence, batch_size=1, collate_fn=DataCollatorWithPadding(tokenizer)) for sentence in dataloader_tmp: ESN.warm_up(sentence) # predict if return_test_acc: t0 = timer() LOSS = ESN.fit(dataloader_d["full_train"]) # changed back full_train => train t1 = timer() time_l.append(t1 - t0) acc = ESN.predict(dataloader_d["test"], verbose=False)[1].item() else: LOSS = ESN.fit(dataloader_d["train"]) acc = ESN.predict(dataloader_d["val"], verbose=False)[1].item() acc_l.append(acc) # clean objects del ESN.learning_algo del ESN.criterion del ESN.merging_strategy del ESN torch.cuda.empty_cache() if return_test_acc: return np.mean(acc_l), np.std(acc_l), np.mean(time_l), np.std(time_l) else: return np.mean(acc_l) # + # # %%time # fitness(leaking_rate=0.2, spectral_radius=1.1, input_scaling=0.8, bias_scaling=1.0, alpha=10, reservoir_dim=100, dataset_d=dataset_d, dataloader_d=dataloader_d) # - def wrapped_fitness(d, return_test_acc=False): return fitness(leaking_rate=d['leaking_rate'], spectral_radius=d['spectral_radius'], input_scaling=d['input_scaling'], bias_scaling=d['bias_scaling'], alpha=d['alpha'], reservoir_dim=d['reservoir_dim'], # will be in the loop dataset_d=dataset_d, dataloader_d=dataloader_d, return_test_acc=return_test_acc) # + # *** WARNING *** DO NO EXECUTE NEXT CELLS IF BIDIRECTIONAL MODE (OPTIM ALREADY DONE) # + best_params_d = {} for res_dim in [500, 1000, 3000, 5000]: best_parameters, best_values, experiment, model = optimize( parameters=[ { "name": "leaking_rate", "value_type": "float", "type": "range", "bounds": [0.0, 0.999], }, { "name": "spectral_radius", "value_type": "float", "type": "range", "bounds": [0.2, 1.7], }, { "name": "input_scaling", "value_type": "float", "type": "range", "bounds": [0.1, 3.0], }, { "name": "bias_scaling", "value_type": "float", "type": "range", "bounds": [0.1, 3.0], }, { "name": "alpha", "value_type": "float", "type": "range", "log_scale": True, "bounds": [1e-3, 1e3], }, { "name": "reservoir_dim", "value_type": "int", "type": "fixed", "value": res_dim, } ], # Booth function evaluation_function = wrapped_fitness, minimize = False, objective_name = 'val_accuracy', total_trials = 40 ) # results best_params_d[res_dim] = {} best_params_d[res_dim]['best_parameters'] = best_parameters best_params_d[res_dim]['best_values'] = best_values best_params_d[res_dim]['experiment'] = experiment # best_params_d[res_dim]['model'] = model # - # ## Results # + # best parameters with open(os.path.join(RESULTS_PATH, PARAMS_FILE), 'wb') as fh: pickle.dump(best_params_d, fh) # + # # load results # with open(os.path.join(RESULTS_PATH, PARAMS_FILE), 'rb') as fh: # best_params_d = pickle.load(fh) # + # best_params_d # + # results results_d = {} for res_dim in [500, 1000, 3000, 5000]: best_parameters = best_params_d[res_dim]['best_parameters'] acc, acc_std, time, time_std = wrapped_fitness(best_parameters, return_test_acc=True) results_d[res_dim] = acc, acc_std, time, time_std print("Experiment finished.") # - results_d # + # save results with open(os.path.join(RESULTS_PATH, RESULTS_FILE), 'wb') as fh: pickle.dump(results_d, fh) # -
notebooks_paper_2021/ESNs/IMDB.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # In this notebook we do a first exploration of some audio data, to extract individual noise samples, gather them into a dataset, and train a neural net to recognize them. There are only two types of noises so far: the isolated consonant sounds 't' and 'p'. Note by 'noise' here and throughout we mean percussive mouth noises, not background static noise. # # Data loading and cleaning # + # load the audio files, and give each noise a label # we'll assume each file contains multiple, spaced reptitions of the same noise from pydub import AudioSegment directory = "./recordings/sennheiser/" noise_audio = { 't': AudioSegment.from_file(directory + 't1.m4a'), 'p': AudioSegment.from_file(directory + 'p1.m4a') } # get some basic properties frame_rate = { n: noise_audio[n].frame_rate for n in noise_audio } print("Frame rates:", frame_rate) # print(noise_t_audio.duration_seconds) # print(noise_t_audio.duration_seconds * noise_t_audio.frame_rate == noise_t_audio.frame_count()) # print(noise_t_audio.sample_width) # print(noise_t_audio.channels) # convert to mono, so each frame has only one sample noise_audio_mono = { n: noise_audio[n].set_channels(1) for n in noise_audio } # finally, convert to a more workable data type import pandas as pd noise_sample_array = {n: noise_audio_mono[n].get_array_of_samples() for n in noise_audio } noise_series = { n: pd.Series(noise_sample_array[n], name=n) for n in noise_audio } noises = pd.concat(noise_series.values(), axis=1) # - # We may need these later import numpy as np import matplotlib.pyplot as plt # visualize, to make sure things are sensible so far length = 15 * frame_rate['t'] noises[:length].plot() plt.figure() noises['t'][:length].plot() plt.figure() noises['p'][:length].plot() # Zooming in on a couple representative spikes, we see they # * are quite distinct, # * begin abruptly, and # * last <~ 3000 frames ~ 0.07s # + start = 83000 duration = 10000 t_example = noises['t'][start:start+duration] t_example /= t_example.apply(abs).max() t_example.plot() print("Duration shown:", duration / frame_rate['t']) # play the sample. sounddevice is easier to use than pydub for this import sounddevice as sd data = t_example / 10 fr = frame_rate['t'] if False: sd.play(data, fr) sd.wait() start = 47000 duration = 10000 plt.figure() p_example = noises['p'][start:start+duration] p_example /= p_example.max() p_example.plot() print("Duration shown:", duration / frame_rate['t']) data = p_example / 10 if False: sd.play(data, fr) # - # We want to isolate each of these spikes from the recording automatically, as individual "observations" of the desired noise. To do so we need to estimate when each noise starts, and how long it lasts. We'll assume short, percussive noises for now (not drawn out, like a hisssss). # + # To isolate the spikes, look for points with (absolute value) at least 20% of the maximum (absolute value) SPIKE_THRESHOLD_FRACTION = .20 noises_abs = noises.apply(abs) spike_thresholds = { n: noises_abs[n].max() * SPIKE_THRESHOLD_FRACTION for n in noises } print(spike_thresholds) # How does this look? for n in noises: plt.figure(figsize=(15,2)) noises_abs[n].plot() x = range(len(noises)) yval = spike_thresholds[n] plt.plot(x, [yval] * len(x), label=n) # - # It seems to catch most of them for 't', but misses a lot of them for 'p'. Looking in terms of dB isn't much more enlightening: # + noises_abs_dB = noises.apply(lambda x: 20 * np.log(abs(x) + 0.001)) start = 0 end = 10**6 for n in noises: plt.figure(figsize=(15,2)) noises_abs_dB[n][start:end].plot() # - # To find the spikes, then, let's scan and look for peaks in amplitude. We observed earlier that each spike lasts a few 10s of ms, so let's scan in batches of ~0.02 sec, looking for with the amplitude in one batch is substantially larger than the previous. This should also help to avoid loud but non-percussive noises (like a breath or word) that might otherwise clutter the data. # + BATCH_DURATION = 0.02 # look at BATCH_DURATION (seconds) at a time THRESHOLD_MULTIPLIER = 5 # detect a spike when the next batch is at least THRESHOLD_MULTIPLIER times bigger spike_locations = { n: [] for n in noises } batch_amplitudes = { n: [] for n in noises } spike_amplitudes = { n: [] for n in noises } for n in noises: data = noises[n].apply(abs) data /= data.max() # normalize batch_size = round(BATCH_DURATION * frame_rate[n]) number_of_batches = len(data) // batch_size last_amplitude = 1 # initialize variable for b in range(number_of_batches): start = batch_size * b end = batch_size * (b + 1) amplitude = data[start:end].max() # batch_amplitudes[n].append(amplitude) if amplitude > THRESHOLD_MULTIPLIER * last_amplitude: spike_locations[n].append(start) spike_amplitudes[n].append(amplitude) last_amplitude = amplitude # visualize the spike locations plt.figure(figsize=(15,2)) data.plot() for i in range(len(spike_locations[n])): x = spike_locations[n][i] y = spike_amplitudes[n][i] plt.plot([x], [y], marker='o', markersize=3, color="red") # - # At least by visual inspection, this seems to have caught most volume spikes relatively well. Let's now grab a window around each spike, that we'll use for analysis and training. This will create one dataframe per isolated noise. We'll also normalize them while we're at it, to have the same average amplitude. # + BEFORE = 0.5 * BATCH_DURATION # the time (sec) to look before the spike location AFTER = 3 * BATCH_DURATION # the time (sec) to look after the spike location noises_isolated = {} def normalize(series): mean = series.apply(abs).mean() return series / mean for n in noises: df = pd.DataFrame() b = round(BEFORE * frame_rate[n]) a = round(AFTER * frame_rate[n]) LABEL = '_obs' for s in spike_locations[n]: obs = noises[n].iloc[ s-b : s+a ].reset_index(drop=True) df[n + LABEL + str(s)] = normalize(obs) noises_isolated[n] = df noises_isolated[n].plot(legend=False, title=n) # - # These observations are all the same length, and have no missing data. This is dependent on two things: # * the before and after intervals were identical because the frame rates were identical (different frame rates might yield off by one differences because of rounding) # * the spikes are all sufficiently displaced from the ends of the data that there is always enough data to grab a full before and after interval # # The former we could control by matching all the frame rates before proceeding. The latter will be important for training anyways, so we can instruct users to leave enough quiet space at the beginning and ends of the recordings. This will not be hard, because we only need 32ms per observation. # There is no missing data [ noises_isolated[n].isnull().any().any() for n in noises ] # Let's listen to the samples, to see if they sound reasonable. They generally sound pretty good, though the 'p' noise seems often dominated by air blowing into the microphone rather than the lip pop itself. That will likely vary quite a bit based on the microphone and microphone position used, so we should keep an eye on this. for n in noises: fr = frame_rate[n] for obs_label, obs_data in noises_isolated[n].iteritems(): obs_data /= 10 if False: sd.play(obs_data, fs) sd.wait() # # Classification by spectrograms # To classify these, we will compute their spectrograms and classify the resulting images. To begin, we will use Mel Spectrograms (which show more distinct features), and a Convolutional Neural Net (CNN) to classify them, since CNN's have shown particular success in image classification. We will use PyTorch for both of these tasks. (Ultimately we might wish to use PyTorch for the original audio processing, as well.) # # Note: With a dataset as small as ours, it may make more sense to use a simpler method for image classification: deep learning with a CNN may overfit, or otherwise be an unnecessarily large hammer for this problem. We'll proceed anyways to try it out. If it struggles for lack of data, we could even go a transfer learning route, leveraging existing nets trained on much larger image sets. import torch import torchaudio # Let's take a look at a couple spectrograms to settle on what we want to compare. We will also need to convert to our noises to torch tensors for manipulation. # + # Here are the full audio files tfull = torch.tensor(noises['t'], dtype=torch.float) pfull = torch.tensor(noises['p'], dtype=torch.float) # and a couple isolated examples t0 = torch.tensor(noises_isolated['t'].iloc[:,0]) p0 = torch.tensor(noises_isolated['p'].iloc[:,0]) # + # some normal and mel spectrograms n_mels = 28 specgram_t = torchaudio.transforms.Spectrogram()(t0) melspecgram_t = torchaudio.transforms.MelSpectrogram( sample_rate=frame_rate['t'], n_mels=n_mels)(t0) specgram_p = torchaudio.transforms.Spectrogram()(p0) melspecgram_p = torchaudio.transforms.MelSpectrogram( sample_rate=frame_rate['p'], n_mels=n_mels)(p0) fig, ax = plt.subplots(1, 4) ax[0].imshow(specgram_t.log2().numpy()) ax[1].imshow(melspecgram_t.log2().numpy()) ax[2].imshow(specgram_p.log2().numpy()) ax[3].imshow(melspecgram_p.log2().numpy()) # - # For some reason using n_mels too large (>= 47, for the parameters we chosen so far) results in white bars, some mel filterbanks that are identically zero at all times. I don't quite understand this, but a smaller image is probably sufficient anyways, so we'll table this for now. We'll use the mel spectrograms (the second and fourth images) for classification. # Let's compute these mel spectrograms for all of our samples. Let's also take note of how long it takes, since eventually we'll want to do this or something similar in real-time. We note it seems that the runtime is independent of n_mels, the number of mel filterbanks in the spectrogram. # + import time start = time.time() noise_spectrograms = { n: [] for n in noises } n_mels = 28 for n in noises: fr = frame_rate[n] for obs_label, obs_data in noises_isolated[n].iteritems(): obs_data = torch.tensor(obs_data) mel = torchaudio.transforms.MelSpectrogram( sample_rate=fr, n_mels=n_mels)(obs_data) noise_spectrograms[n].append(mel.log2()) end = time.time() total_time = end - start n_samples = sum([ len(df.columns) for n, df in noises_isolated.items() ]) avg_time = total_time / n_samples print("The total time elapsed is", total_time, "sec.") print("The average time per spectrogram is", avg_time, "sec.") # - # # Preparing the datasets # Now, let's move on to building and training a model. First we build our dataset. Ultimately we may wish for this dataset class to also do all of the preprocessing we've done above, but for now we'll just use the spectrograms we've made. The simple dataset class here is adapted from https://stanford.edu/~shervine/blog/pytorch-how-to-generate-data-parallel. # # For training, it may also be helpful to augment our data by randomly shifting the window around each volume spike. We'll leave that for another time, though. # + from torch.utils.data import Dataset, DataLoader # for each type of noise, do the following: # - assign an integer label to the noise string (noise_int_to_str: dict) # - append all corresponding spectrograms to a list (spectrograms: list) # - build a tensor with appropriate noise (integer) labels at corresponding indices (spectrograph_labels: list) noise_int_to_str, all_spectrograms, all_spectrogram_labels, i = {}, [], [], 0 for n in noise_spectrograms: noise_int_to_str[i] = n all_spectrograms += noise_spectrograms[n] all_spectrogram_labels += [i] * len(noise_spectrograms[n]) i += 1 # the CNN will expect the first tensor dimension to be the channel all_spectrograms = [ s.unsqueeze(0) for s in all_spectrograms ] class NoisesDataset(Dataset): """Noises dataset.""" def __init__(self, list_of_indices): 'Initialization' self.spectrogram_indices = list_of_indices def __len__(self): 'Denotes the total number of samples' return len(self.spectrogram_indices) def __getitem__(self, sample_index): 'Generates one sample of data' # Select sample spectrogram_index = self.spectrogram_indices[sample_index] # Load data and get label X = all_spectrograms[spectrogram_index] y = all_spectrogram_labels[spectrogram_index] return X, y # - # We split into training and testing data sets, adapted from https://stackoverflow.com/questions/53916594/typeerror-object-of-type-numpy-int64-has-no-len # + full_dataset = NoisesDataset(range(len(all_spectrograms))) training_fraction = 0.8 train_size = round(training_fraction * len(full_dataset)) test_size = len(full_dataset) - train_size train_dataset, test_dataset = torch.utils.data.random_split(full_dataset, [train_size, test_size]) train_params = { 'batch_size': 10, 'shuffle': True, 'num_workers': 1, } train_loader = DataLoader(dataset=train_dataset, **train_params) test_loader = DataLoader(dataset=test_dataset) # - # Let's observe a few of these, to make sure all is in order # + # get some random training spectrograms train_dataiter = iter(train_loader) spectrograms, labels = train_dataiter.next() batch_size = train_params['batch_size'] # show spectrograms fig, ax = plt.subplots(1, batch_size) for i in range(len(spectrograms)): if batch_size == 1: ax.imshow(spectrograms[i][0].numpy()) # the 0 selects the first (only) channel else: ax[i].imshow(spectrograms[i][0].numpy()) # the 0 selects the first (only) channel # print labels print(' '.join('%4s' % noise_int_to_str[labels[j].item()] for j in range(batch_size))) # - # Let's also check what balance we achieved in the training set, just to make sure there's a reasonably even number of different types of noise. # + def count_noise_types_in_dataset(dataset): counts = {} for s, n_int in dataset: try: counts[n_int] += 1 except: counts[n_int] = 1 return { noise_int_to_str[k]: v for k, v in counts.items() } print(count_noise_types_in_dataset(train_dataset)) print(count_noise_types_in_dataset(test_dataset)) # - # Very good, it seems the random split came out pretty even. # Before we move on to the CNN, let's check the resolution of each spectrogram, and confirm it's the same for all. # + image_size = full_dataset[0][0].size() # first image of first batch for s, l in full_dataset: if s.size() != image_size: print("Image size does not match.") print(image_size) # - # # Preparing the CNN # Much of this section is adapted from https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html#sphx-glr-beginner-blitz-cifar10-tutorial-py. # # Let's begin by trying two convolution layers, and three dense layers. # + import torch.nn as nn import torch.nn.functional as F class Net(nn.Module): def __init__(self, image_size): super(Net, self).__init__() # image_size is a 2-tuple, the expected dimensions of each spectrogram channels, h, w = image_size # number of output nodes, (square) kernel size, and pool size per convolution layer, # assuming the stride for pooling is the same as the pool size kernels = [3, 3] pool = 2 # compute the number of input nodes for the first dense layer h_out, w_out = h, w for k in kernels: # the convolution. h_out += -k + 1 w_out += -k + 1 # the pool. (from help(torch.nn.MaxPool2d)) h_out = int( (h_out - pool) / pool + 1 ) w_out = int( (w_out - pool) / pool + 1 ) self.image_out = h_out * w_out # number of output nodes for final dense layer: the number of noise types N_noises = len(noise_int_to_str) # the total number of noise types # define the layers. The numbers of nodes chosen do not have deep thought behind them. self.conv0 = nn.Conv2d(1, 32, kernels[0]) self.pool = nn.MaxPool2d(2) self.conv1 = nn.Conv2d(32, 10, kernels[1]) self.fc0 = nn.Linear(10 * self.image_out, 50) self.fc1 = nn.Linear(50, 10) self.fc2 = nn.Linear(10, N_noises) def forward(self, x): x = self.pool(F.relu(self.conv0(x))) x = self.pool(F.relu(self.conv1(x))) x = x.view(-1, 10 * self.image_out) x = F.relu(self.fc0(x)) x = F.relu(self.fc1(x)) x = self.fc2(x) return x net = Net(image_size) # - # There are far more parameters than data. print("There are %d parameters." % ( sum([ p.numel() for p in net.parameters() ]))) print("There are %d data samples." % (len(full_dataset))) # Does the net accept our data, as we hope? for data in train_loader: inputs, labels = data print(net(inputs)) break # Next we need to define a cost function, and an optimizer to determine how our net should learn. # # For cost, we'll choose CrossEntropyLoss, which particularly penalizes high confidence in wrong answers. # # For an optimizer, we'll go with the common Stochasic Gradient Descent, without momentum for now. # + import torch.optim as optim criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(net.parameters()) # - def train_net(net, epochs, epoch_progress=10, batch_progress=50): epoch_running_loss = 0.0 batch_num = 0 for epoch in range(epochs): # loop over the dataset multiple times batch_running_loss = 0.0 for i, data in enumerate(train_loader, 0): # get the inputs; data is a list of [inputs, labels] inputs, labels = data # zero the parameter gradients optimizer.zero_grad() # forward + backward + optimize outputs = net(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() # accrue loss for printing batch_running_loss += loss.item() epoch_running_loss += loss.item() batch_num += 1 if i % batch_progress == batch_progress-1: print('[%d, %5d] loss: %.3f' % (epoch + 1, i + 1, batch_running_loss / batch_progress)) batch_running_loss = 0.0 if epoch % epoch_progress == epoch_progress-1: print('[%d] loss: %.3f' % (epoch + 1, epoch_running_loss / batch_num)) running_loss = 0.0 batch_num = 0 print('Finished Training') train_net(net, 100) # How does this perform on the testing data? # + def accuracy_rating(dataloader, label): correct = 0 total = 0 with torch.no_grad(): for data in dataloader: spectrograms, labels = data outputs = net(spectrograms) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print('Accuracy of the network on the %d %s spectrograms: %d %%' % ( total, label, 100 * correct / total)) accuracy_rating(train_loader, 'training') accuracy_rating(test_loader, 'test') # - # Very nice. It doesn't even seem to have overfit, and we didn't even need to augment the data.
explorations (development)/Exploration 1 - two noises - cleaning and training.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: venv-datascience # language: python # name: venv-datascience # --- # # Case Study of Wine Quality # # ![wine](wine.jpg) # # <br/><br/> # # - **Wine dataset**: https://archive.ics.uci.edu/ml/datasets/Wine+Quality # - img credit to [dreamstime](https://www.dreamstime.com/types-wine-glasses-flat-vector-cartoon-illustration-isolated-red-white-rose-wines-image155731509) # ## Gather Data import pandas as pd import matplotlib.pyplot as plt white_df = pd.read_csv("Data/winequality-white.csv", sep=";") red_df = pd.read_csv("Data/winequality-red.csv", sep=";") white_df.head() red_df.head() white_df.info() red_df.info() # ## Access Data print("Number of Samples in White Wine dataset: ", white_df.shape[0]) print("Number of Samples in Red Wine dataset: ", red_df.shape[0]) print("Number of columns in White Wine dataset: ", white_df.shape[1]) print("Number of columns in Red Wine dataset: ", red_df.shape[1]) print("Number of missing values in White Wine dataset : ") white_df.isnull().sum() print("Number of missing values in Red Wine dataset : ") red_df.isnull().sum() #check for duplicates print("Number of duplicate rows in White Wine data set: ", sum(white_df.duplicated())) print("Number of duplicate rows in Red Wine data set: ", sum(red_df.duplicated())) #number of uniques values in Quality Feature print("Unique quality for White: ", white_df["quality"].nunique()) print("\nAggregrated per Quality") white_df["quality"].value_counts() #number of uniques values in Quality Feature print("Unique quality for Red: ", red_df["quality"].nunique()) print("\nAggregrated per Quality") red_df["quality"].value_counts() # mean density of the white wine dataset print("Mean density of white wine: ", white_df["density"].mean()) # mean density of the red wine dataset print("Mean density of red wine: ", red_df["density"].mean()) # ## Appending Data #add color column to each dataset white_df["color"] = "White" white_df.head() red_df["color"] = "Red" red_df.head() # combine both dataset as new df wine_df = white_df.append(red_df, ignore_index=True) wine_df.head() wine_df.tail() wine_df.info() # ## Save Combined Dataset wine_df.to_csv("Data/winequality_cleaned.csv", index=False) # --------------- # # Exploring with Visuals # ## Histograms of various features wine_df.head() # + wine_df["fixed acidity"].plot(kind="hist", bins=20) #customize with pyplot plt.grid(axis="y", alpha=0.75) plt.xlabel('Value') plt.ylabel('Frequency') plt.title("Fixed Acidity Histogram") plt.show() # + wine_df["total sulfur dioxide"].plot(kind="hist", bins=20) #customize with pyplot plt.grid(axis="y", alpha=0.75) plt.xlabel('Value') plt.ylabel('Frequency') plt.title("Total Sulfur Dioxide Histogram") plt.show() # + wine_df["pH"].plot(kind="hist",bins=20) #customize with pyplot plt.grid(axis="y", alpha=0.75) plt.xlabel("Value") plt.ylabel("Frequency") plt.title("pH Histogram") plt.show() # + wine_df["alcohol"].plot(kind="hist", bins=20) #customize with pyplot plt.grid(axis="y", alpha=0.75) plt.xlabel("Value") plt.ylabel("Frequency") plt.title("Alcohol Histogram") plt.show() # - # ## Scatterplots of various features & quality # + wine_df.plot(kind="scatter", x="volatile acidity", y="quality") plt.title("Volatitle Acidity & Quality correlation") plt.show() # + wine_df.plot(kind="scatter", x="residual sugar", y="quality") plt.title("Residual Sugar & Quality correlation") plt.show() # + wine_df.plot(kind="scatter", x="pH", y="quality") plt.title("pH & Quality correlation") plt.show() # + wine_df.plot(kind="scatter", x="alcohol", y="quality") plt.title("Alcohol & Quality correlation") plt.show() # - # ------------ # ## Drawing Conclusions # ## Q: Is a certain type of wine (red or white) associated with higher quality? wine_df.groupby(["color"]).mean()[["quality"]] # ### Conclusion: # White Wine has a slightly higher Average Quality Rating than Red Wine. # ------------------ # ## Q:What level of acidity (pH value) receives the highest average rating? # + # As pH value doesn't have clear catagories, we will create own categories. # View the min, 25%, 50%, 75%, max pH values with Pandas describe wine_df["pH"].describe() # - # Bin edges that will be used to "cut" the data into groups # Fill in this list with five values from descibe bin_edges = [2.72, 3.11, 3.21, 3.32, 4.01] # Labels for the four acidity level groups bin_names = ['High', 'Moderately_High', 'Medium', 'Low'] # + # Creates acidity_levels column wine_df["acidity_levels"] = pd.cut(wine_df["pH"], bin_edges, labels=bin_names) # Checks for successful creation of this column wine_df.head() # - # Find the mean quality of each acidity level with groupby wine_df.groupby(["acidity_levels"]).mean()[["quality"]] # ### Conclusion: # It is found that the higher the quality rating, the lower the acidity level. # ------------ # ## Q: Do wines with higher alcoholic content receive better ratings? wine_df.head() # get the median amount of alcohol content median_alcohol_amt = wine_df["alcohol"].mean() print("Median Alcohol Amount: ", median_alcohol_amt) # + # select samples with alcohol content less than the median below_median_alcohol_wine_df = wine_df.query('alcohol < {}'.format(median_alcohol_amt)) # select samples with alcohol content greater than or equal to the median above_median_alchohol_wine_df = wine_df.query('alcohol >= {}'.format(median_alcohol_amt)) # ensure these queries included each sample exactly once wine_df.shape[0] == below_median_alcohol_wine_df.shape[0] + above_median_alchohol_wine_df.shape[0] # - # get mean quality rating for the low alcohol and high alcohol groups mean_below = below_median_alcohol_wine_df["quality"].mean() mean_above = above_median_alchohol_wine_df["quality"].mean() print("Mean Quality Rating for Low Alcohol Wine: ", mean_below) print("Mean Quality Rating for High Alcohol Wine: ", mean_above) # ### Conculsions: # It seems like High Alcohol content wine receives a little bit Better Rating than Low Alcohol content wine. # + #create bar chart with proper labels x_positions = [1,2] labels = ["Low", "High"] heights = [mean_below, mean_above] plt.bar(x_locations, heights, tick_label = labels) plt.title("Average Quality Rating by Alcohol Content") plt.xlabel("Alcohol Content") plt.ylabel("Average Quality Rating") plt.show() # - # ------------------------- # ## Q: Do sweeter wines (more residual sugar) receive better ratings? # get the median amount of residual sugar median_sugar_amt = wine_df["residual sugar"].mean() print("Median Sugar Amount: ",median_sugar_amt) # + # select samples with residual sugar less than the median below_median_sugar_wine_df = wine_df[wine_df["residual sugar"] < median_sugar_amt] # select samples with residual sugar greater than or equal to the median above_median_sugar_win_df = wine_df[wine_df["residual sugar"] > median_sugar_amt] # ensure these queries included each sample exactly once wine_df.shape[0] == below_median_sugar_wine_df.shape[0] + above_median_sugar_win_df.shape[0] # + # get mean quality rating for the low sugar and high sugar groups below_sugar = below_median_sugar_wine_df["quality"].mean() above_sugar = above_median_sugar_win_df["quality"].mean() print("Mean Quality Rating for Low Sugar Wine: ", below_sugar) print("Mean Quality Rating for High Sugar Wine: ", above_sugar) # - # ### Conculsions: # It seems like Low sugar content wine receive a bit higher quality rating than High sugar content wines. # + # Create a bar chart with proper labels x_positions = [1,2] labels = ["Low", "High"] heights = [below_sugar, above_sugar] plt.bar(x_positions, heights, tick_label = labels) plt.xlabel("Sugar Content") plt.ylabel("Average Quality Rating") plt.title("Average Quality Rating by Sugar Content") plt.show() # - # --------------- # ## White Vs Red Wine Proportions by Color & Quality # # 1. Red bar proportions = counts for each quality rating / total # of red samples # 2. White bar proportions = counts for each quality rating / total # of white samples # + import numpy as np import matplotlib.pyplot as plt # %matplotlib inline import seaborn as sns sns.set_style('darkgrid') # - # get counts for each rating and color rating_color_counts = wine_df.groupby(["color", "quality"]).count()["pH"] rating_color_counts # get total counts for each color total_by_color = wine_df.groupby("color").count()["pH"] total_by_color # get proportions by dividing red rating counts by total # of red samples red_proportions = rating_color_counts["Red"] / total_by_color["Red"] red_proportions #As Red wine doesn't have 9 rating group, so add it in the df. so that we can broadcast later. red_proportions["9"] = 0 red_proportions # get proportions by dividing white rating counts by total # of white samples white_proportions = rating_color_counts["White"] / total_by_color["White"] white_proportions # ### Plot proportions on a bar chart # # Set the x coordinate location for each rating group and and width of each bar. # + # the x locations for the groups index = np.arange(len(red_proportions)) # the width of the bars width = 0.35 # + # plot bars red_bars = plt.bar(index, red_proportions, width, color='r', alpha=.7, label='Red Wine') white_bars = plt.bar(index + width, white_proportions, width, color='w', alpha=.7, label='White Wine') # title and labels plt.ylabel('Proportion') plt.xlabel('Quality') plt.title('Proportion by Wine Color and Quality') locations = index + width / 2 # xtick locations labels = ['3', '4', '5', '6', '7', '8', '9'] # xtick labels plt.xticks(locations, labels) # legend plt.legend() plt.show()
Data Analysis/Project - Case Study 1 - Wine Quality/Case Study - Wine Quality.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.9.7 ('base') # language: python # name: python3 # --- # # Sequential Monte Carlo # # :::{post} Oct 19, 2021 # :tags: SMC # :category: beginner # ::: # + import aesara.tensor as at import arviz as az import numpy as np import pymc as pm print(f"Running on PyMC v{pm.__version__}") # - az.style.use("arviz-darkgrid") # Sampling from distributions with multiple peaks with standard MCMC methods can be difficult, if not impossible, as the Markov chain often gets stuck in either of the minima. A Sequential Monte Carlo sampler (SMC) is a way to ameliorate this problem. # # As there are many SMC flavors, in this notebook we will focus on the version implemented in PyMC. # # SMC combines several statistical ideas, including [importance sampling](https://en.wikipedia.org/wiki/Importance_sampling), tempering and MCMC. By tempering we mean the use of an auxiliary _temperature_ parameter to control the sampling process. To see how tempering can help let's write the posterior as: # # $$p(\theta \mid y)_{\beta} \propto p(y \mid \theta)^{\beta} \; p(\theta)$$ # # When $\beta=0$ we have that $p(\theta \mid y)_{\beta=0}$ is the prior distribution and when $\beta=1$ we recover the _true_ posterior. We can think of $\beta$ as a knob we can use to gradually _fade up_ the likelihood. This can be useful as in general sampling from the prior is easier than sampling from the posterior distribution. Thus we can use $\beta$ to control the transition from an easy to sample distribution to a harder one. # # A summary of the algorithm is: # # 1. Initialize $\beta$ at zero and stage at zero. # 2. Generate N samples $S_{\beta}$ from the prior (because when $\beta = 0$ the tempered posterior is the prior). # 3. Increase $\beta$ in order to make the effective sample size equals some predefined value (we use $Nt$, where $t$ is 0.5 by default). # 4. Compute a set of N importance weights $W$. The weights are computed as the ratio of the likelihoods of a sample at stage $i+1$ and stage $i$. # 5. Obtain $S_{w}$ by re-sampling according to $W$. # 6. Use $W$ to compute the mean and covariance for the proposal distribution, a MVNormal. # 7. For stages other than 0 use the acceptance rate from the previous stage to estimate `n_steps`. # 8. Run N independent Metropolis-Hastings (IMH) chains (each one of length `n_steps`), starting each one from a different sample in $S_{w}$. Samples are IMH as the proposal mean is the of the previous posterior stage and not the current point in parameter space. # 9. Repeat from step 3 until $\beta \ge 1$. # 10. The final result is a collection of $N$ samples from the posterior # # The algorithm is summarized in the next figure, the first subplot shows 5 samples (orange dots) at some particular stage. The second subplot shows how these samples are reweighted according to their posterior density (blue Gaussian curve). The third subplot shows the result of running a certain number of IMH steps, starting from the reweighted samples $S_{w}$ in the second subplot, notice how the two samples with the lower posterior density (smaller circles) are discarded and not used to seed new Markov chains. # # ![SMC stages](smc.png) # # # SMC samplers can also be interpreted in the light of genetic algorithms, which are biologically-inspired algorithms that can be summarized as follows: # # 1. Initialization: set a population of individuals # 2. Mutation: individuals are somehow modified or perturbed # 3. Selection: individuals with high _fitness_ have higher chance to generate _offspring_. # 4. Iterate by using individuals from 3 to set the population in 1. # # If each _individual_ is a particular solution to a problem, then a genetic algorithm will eventually produce good solutions to that problem. One key aspect is to generate enough diversity (mutation step) in order to explore the solution space and hence avoid getting trap in local minima. Then we perform a _selection_ step to _probabilistically_ keep reasonable solutions while also keeping some diversity. Being too greedy and short-sighted could be problematic, _bad_ solutions in a given moment could lead to _good_ solutions in the future. # # For the SMC version implemented in PyMC we set the number of parallel Markov chains $N$ with the `draws` argument. At each stage SMC will use independent Markov chains to explore the _tempered posterior_ (the black arrow in the figure). The final samples, _i.e_ those stored in the `trace`, will be taken exclusively from the final stage ($\beta = 1$), i.e. the _true_ posterior ("true" in the mathematical sense). # # The successive values of $\beta$ are determined automatically (step 3). The harder the distribution is to sample the closer two successive values of $\beta$ will be. And the larger the number of stages SMC will take. SMC computes the next $\beta$ value by keeping the effective sample size (ESS) between two stages at a constant predefined value of half the number of draws. This can be adjusted if necessary by the `threshold` parameter (in the interval [0, 1])-- the current default of 0.5 is generally considered as a good default. The larger this value, the higher the target ESS and the closer two successive values of $\beta$ will be. This ESS values are computed from the importance weights (step 4) and not from the autocorrelation like those from ArviZ (for example using `az.ess` or `az.summary`). # # Two more parameters that are automatically determined are: # # * The number of steps each Markov chain takes to explore the _tempered posterior_ `n_steps`. This is determined from the acceptance rate from the previous stage. # * The covariance of the MVNormal proposal distribution is also adjusted adaptively based on the acceptance rate at each stage. # # As with other sampling methods, running a sampler more than one time is useful to compute diagnostics, SMC is no exception. PyMC will try to run at least two **SMC _chains_** (do not confuse with the $N$ Markov chains inside each SMC chain). # # Even when SMC uses the Metropolis-Hasting algorithm under the hood, it has several advantages over it: # # * It can sample from distributions with multiple peaks. # * It does not have a burn-in period, it starts by sampling directly from the prior and then at each stage the starting points are already _approximately_ distributed according to the tempered posterior (due to the re-weighting step). # * It is inherently parallel. # ## Solving a PyMC model with SMC # # To see an example of how to use SMC inside PyMC let's define a multivariate Gaussian of dimension $n$ with two modes, the weights of each mode and the covariance matrix. # + n = 4 mu1 = np.ones(n) * (1.0 / 2) mu2 = -mu1 stdev = 0.1 sigma = np.power(stdev, 2) * np.eye(n) isigma = np.linalg.inv(sigma) dsigma = np.linalg.det(sigma) w1 = 0.1 # one mode with 0.1 of the mass w2 = 1 - w1 # the other mode with 0.9 of the mass def two_gaussians(x): log_like1 = ( -0.5 * n * at.log(2 * np.pi) - 0.5 * at.log(dsigma) - 0.5 * (x - mu1).T.dot(isigma).dot(x - mu1) ) log_like2 = ( -0.5 * n * at.log(2 * np.pi) - 0.5 * at.log(dsigma) - 0.5 * (x - mu2).T.dot(isigma).dot(x - mu2) ) return pm.math.logsumexp([at.log(w1) + log_like1, at.log(w2) + log_like2]) # - with pm.Model() as model: X = pm.Uniform( "X", shape=n, lower=-2.0 * np.ones_like(mu1), upper=2.0 * np.ones_like(mu1), initval=-1.0 * np.ones_like(mu1), ) llk = pm.Potential("llk", two_gaussians(X)) idata_04 = pm.sample_smc(2000) # We can see from the message that PyMC is running four **SMC chains** in parallel. As explained before this is useful for diagnostics. As with other samplers one useful diagnostics is the `plot_trace`, here we use `kind="rank_vlines"` as rank plots as generally more useful than the classical "trace" ax = az.plot_trace(idata_04, compact=True, kind="rank_vlines") ax[0, 0].axvline(-0.5, 0, 0.9, color="k") ax[0, 0].axvline(0.5, 0, 0.1, color="k") f'Estimated w1 = {np.mean(idata_04.posterior["X"] < 0).item():.3f}' # From the KDE we can see that we recover the modes and even the relative weights seems pretty good. The rank plot on the right looks good too. One SMC chain is represented in blue and the other in orange. The vertical lines indicate deviation from the ideal expected value, which is represented with a black dashed line. If a vertical line is above the reference black dashed line we have more samples than expected, if the vertical line is below the sampler is getting less samples than expected. Deviations like the ones in the figure above are fine and not a reason for concern. # # As previously said SMC internally computes an estimation of the ESS (from importance weights). Those ESS values are not useful for diagnostics as they are a fixed target value. We can compute the ESS values from the trace returned by `sample_smc`, but this is also not a very useful diagnostics, as the computation of this ESS value takes autocorrelation into account and each SMC run/chain has low autocorrelation by construction, for most problems the values of ESS will be either very close to the number of total samples (i.e. draws x chains). In general it will only be a low number if each SMC chain explores a different mode, in that case the value of ESS will be close to the number of modes. # ## Kill your darlings # # SMC is not free of problems, sampling can deteriorate as the dimensionality of the problem increases, in particular for multimodal posterior or _weird_ geometries as in hierarchical models. To some extent increasing the number of draws could help. Increasing the value of the argument `p_acc_rate` is also a good idea. This parameter controls how the number of steps is computed at each stage. To access the number of steps per stage you can check `trace.report.nsteps`. Ideally SMC will take a number of steps lower than `n_steps`. But if the actual number of steps per stage is `n_steps`, for a few stages, this may be signaling that we should also increase `n_steps`. # # Let's see the performance of SMC when we run the same model as before, but increasing the dimensionality from 4 to 80. # + n = 80 mu1 = np.ones(n) * (1.0 / 2) mu2 = -mu1 stdev = 0.1 sigma = np.power(stdev, 2) * np.eye(n) isigma = np.linalg.inv(sigma) dsigma = np.linalg.det(sigma) w1 = 0.1 # one mode with 0.1 of the mass w2 = 1 - w1 # the other mode with 0.9 of the mass def two_gaussians(x): log_like1 = ( -0.5 * n * at.log(2 * np.pi) - 0.5 * at.log(dsigma) - 0.5 * (x - mu1).T.dot(isigma).dot(x - mu1) ) log_like2 = ( -0.5 * n * at.log(2 * np.pi) - 0.5 * at.log(dsigma) - 0.5 * (x - mu2).T.dot(isigma).dot(x - mu2) ) return pm.math.logsumexp([at.log(w1) + log_like1, at.log(w2) + log_like2]) # - with pm.Model() as model: X = pm.Uniform( "X", shape=n, lower=-2.0 * np.ones_like(mu1), upper=2.0 * np.ones_like(mu1), initval=-1.0 * np.ones_like(mu1), ) llk = pm.Potential("llk", two_gaussians(X)) idata_80 = pm.sample_smc(2000) # We see that SMC recognizes this is a harder problem and increases the number of stages. We can see that SMC still sample from both modes but now the model with higher weight is being oversampled (we get a relative weight of 0.99 instead of 0.9). Notice how the rank plot looks worse than when n=4. ax = az.plot_trace(idata_80, compact=True, kind="rank_vlines") ax[0, 0].axvline(-0.5, 0, 0.9, color="k") ax[0, 0].axvline(0.5, 0, 0.1, color="k") f'Estimated w1 = {np.mean(idata_80.posterior["X"] < 0).item():.3f}' # You may want to repeat the SMC sampling for n=80, and change one or more of the default parameters too see if you can improve the sampling and how much time the sampler takes to compute the posterior. # %load_ext watermark # %watermark -n -u -v -iv -w -p xarray
examples/samplers/SMC2_gaussians.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/joyjixu/qm2_resources/blob/main/data_preprocessing/merge_tweets_days.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="VnRbJKhQE-4T" # Since the tweet csvs we hydrated are hour-by-hour, we need to merge them to get one csv per day in April # # First you'll need to upload all 24 csvs for one day onto Colab, and replace the value of the 'day' variable below with the actual day # + id="ocGTGnn_I2c2" day = '01' #format: dd # + id="rqlGq_ghJMgM" import pandas as pd # + colab={"base_uri": "https://localhost:8080/"} id="boQLm4VLE0jW" outputId="697193d3-15f5-4e67-fb3a-2682ff41f2cb" # we initialize the day dataframe to be the one for hour 00, then we will concatenate all the other hours to it df_day = pd.read_csv('coronavirus_tweets_us_04_{day}_00.csv'.format(day=day), engine='python') for i in range(1,24): if i < 10: hour = '0{}'.format(i) else: hour = '{}'.format(i) path = '/content/coronavirus_tweets_us_04_{day}_{hour}.csv'.format(day=day, hour=hour) print(path) df_hour = pd.read_csv(path, engine='python') df_day = pd.concat([df_day,df_hour], ignore_index = True) # + colab={"base_uri": "https://localhost:8080/"} id="v3pmuJTKE9K3" outputId="4c3ef451-b704-47ba-b606-23550927b7c0" df_day.info # + colab={"base_uri": "https://localhost:8080/", "height": 869} id="YhZp3R4f_wZZ" outputId="b47a9f36-dc0b-4cc6-81f3-a243ad5f4135" df_day # + [markdown] id="pokSMpBs-35d" # Finally we can save the day as a csv again (you don't need to change the file name here) # + id="VkYVZvDMOJNQ" df_day.to_csv('daily_coronavirus_tweets_us_04_{day}.csv'.format(day=day)) # + [markdown] id="kZOuWZqh_Lxk" # Make sure to download!
data_preprocessing/cleaning_tweets/merge_tweets_days.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Align paralogous contigs to reference # If you applied the `--keep-paralogs` flag in the SECAPR `find_target_contigs` function, the function will print a text file with paralogous information into the subfolder of each sample. This file contains the information about the locus id (first column) in the reference file, followed by a list of contig headers that were matched to this locus: # + language="bash" # head -n 10 ../../data/processed/target_contigs_paralogs/1061/info_paralogous_loci.txt # - # We can use the SECAPR `paralogs_to_ref` function to extract the sequences of all these contigs and align them with their respective reference sequence. This will give an idea of where the contigs map on the reference and can help decide if these are truly potential paralogous or more likely the result of non-optimal contig assembly (clusters of homologous reads are broken into separate contigs) or if contigs from other parts of the genome map to the reference due to the presence of e.g. repetitive regions or other common sequence patterns. # # We need to provide the function the following input items: # - path to the de novo contig files # - path to the reference fasta file, which was used to extract target contigs # - path to the extracted target contigs (output of `find_target_contigs` function) # # The command looks as follows: # # `secapr paralogs_to_ref --contigs ../../data/processed/contigs --reference ../../data/raw/palm_reference_sequences.fasta --target_contigs ../../data/processed/target_contigs_paralogs_info --output ../../data/processed/paralogs_to_reference` # Depending on how many paralogous loci were identified in your samples this can take several minutes. The script will store the final alignments of the contigs and the reference sequences for each sample in the `paralog_alignments` folder in the provided output path. Let's look at one exemplarly alignment. You can view alignments using alignment viewers such as e.g. [AliView](http://ormbunkar.se/aliview/): from IPython.display import Image, display img1 = Image("../../images/paralog_contig_alignment.png",width=1000) display(img1) # In this example you see the reference sequence in the first line starts with a repetative sequence (CTCTCTCTC....). Many contigs that only consist of this repetitive sequence appear to map to it. Only one of the contigs, however, apears to be a true match, as it exceeds the repetative region and has a lot of overlap with the reference sequence. A locus like this doesn't need to be excluded, since we see that even though it's flagged as paralogous, there are no true signs of paralogy. # [Back](extract_contigs.ipynb)
docs/notebook/subdocs/align_paralogs.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/Belaschich/Projeto_Final_SoulCode/blob/main/ProjetoFinal_(Spark_Oficial)_19_11.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="SSkAJhCPmuiL" # # IMPORT BIBLIOTECAS E ACESSO GCP # + id="6LwlkjPVJ1mT" colab={"base_uri": "https://localhost:8080/"} outputId="1ade19c9-8b66-4ea1-934a-0a911b856d3e" pip install gcsfs # + id="43ufK9DYKPVx" #BIBLIOTECA PARA CHAVE DE ACESSO GCP import os # + id="kn-PFsrUK_Ha" #IMPORT PANDAS E STORAGE import pandas as pd from google.cloud import storage # + id="2lr2x2GbKRTz" #IMPORTA BASE DE ACESSO GCP service_account_key = r"/content/chaveprogrupo10-332518-4db3212adcd1.json" os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = service_account_key # + id="M6Pbcy_kvwaS" colab={"base_uri": "https://localhost:8080/"} outputId="f0fd1a8f-0847-4ef4-ab59-5b03c07ec738" pip install pyspark # + id="8RxYrSHTv4Hc" #IMPORT BIBLIOTECAS SPARK from pyspark.sql import SparkSession import pyspark.sql.functions as F from pyspark.sql.window import Window from pyspark.sql.types import StructField, StructType, StringType, IntegerType, DoubleType, DateType # + id="IRcfmIKLv4xx" #PREPARAÇÃO SPARK SESSION spark = (SparkSession.builder .master("local") .appName("novo-dataframes") .config("spark.ui.port", "4050") .getOrCreate()) # + id="srkbbUn6b_MP" #IMPORTAÇÃO BASE "AMERICA DO NORTE" SALVO NO GCP df1 = pd.read_csv('gs://bucket-projeto-g10/saida_tratado/nivel_pandera/Base_ConsumoNA (2.0).csv') # + [markdown] id="pej0FLIDnC9f" # # ALTERAÇÕES PANDAS # # + id="u-Y3ugAIdlqW" #RENOMEIA A COLUNA "ESTADO" PARA "UF" df1 = df1.rename({'Estado': 'UF'}, axis = 1) # + id="iz-YU_4sdnfj" #CRIA UMA COLUNA "CLIMA" COM VALORES NaN df1['Clima'] = 'NaN' df1 # + id="6nArvRCbdoas" #INSERE DADOS NA COLUNA "CLIMA" COM CONDIÇÕES for index, row in df1.iterrows(): if row['UF'] == 'Alaska': df1.loc[index,'Clima'] = str('Polar') df1.loc[index,'Clima'] = str('Seco') elif row['UF'] == 'Connecticut' or row['UF'] == 'Massachusetts'or row['UF'] == 'Minnesota' or row['UF'] == 'Dakota do Norte': df1.loc[index,'Clima'] = str('Continental' elif row['UF'] == 'Colorado':) elif row['UF'] == 'Delaware' or row['UF'] == 'Florida' or row['UF'] == 'Illinois' or row['UF'] == 'Kentucky' or row['UF'] == 'Missouri'or row['UF'] == 'Tennessee' or row['UF'] == 'Texas' or row['UF'] == 'Iowa': df1.loc[index,'Clima'] = str('Subtropical') df1 # + id="9WaME988drtH" #VALIDAÇÃO DA COLUNA "CLIMA" pd.unique(df1['Clima']) # + id="fsIebeUwdskw" #VERIFICA DADOS UNICOS DA COLUNA "UF" pd.unique(df1['UF']) # + id="ZZDkrarPdtql" #CRIA UMA COLUNA "ESTAÇÃO DO ANO" COM VALORES NaN df1['EstacaoDoAno'] = 'NaN' df1 # + id="23c6iJDSdwQT" #INSERE DADOS NA COLUNA "ESTAÇÃO DO ANO" COM CONDIÇÕES for index, row in df1.iterrows(): if row['Mes'] == 6 or row['Mes'] == 7 or row['Mes'] == 8: df1.loc[index,'EstacaoDoAno'] = str('Verão') elif row['Mes'] == 12 or row['Mes'] == 1 or row['Mes'] == 2: df1.loc[index,'EstacaoDoAno'] = str('Inverno') elif row['Mes'] == 3 or row['Mes'] == 4 or row['Mes'] == 5: df1.loc[index,'EstacaoDoAno'] = str('Primavera') elif row['Mes'] == 9 or row['Mes'] == 10 or row['Mes'] == 11: df1.loc[index,'EstacaoDoAno'] = str('Outono') df1 # + id="npOJc8b3d0Sx" #RENOMEIA OS DADOS DA COLUNA "UF" df1['UF'].replace('Alaska', 'AK', inplace=True) df1['UF'].replace('Colorado', 'CO', inplace=True) df1['UF'].replace('Connecticut', 'CT', inplace=True) df1['UF'].replace('Delaware', 'DE', inplace=True) df1['UF'].replace('Florida', 'FL', inplace=True) df1['UF'].replace('Illinois', 'IL', inplace=True) df1['UF'].replace('Kentucky', 'KY', inplace=True) df1['UF'].replace('Massachusetts', 'MA', inplace=True) df1['UF'].replace('Minnesota', 'MN', inplace=True) df1['UF'].replace('Missouri', 'MO', inplace=True) df1['UF'].replace('Dakota do Norte', 'ND', inplace=True) df1['UF'].replace('Tennessee', 'TN', inplace=True) df1['UF'].replace('Texas', 'TX', inplace=True) # + [markdown] id="dvEInwyOo3lm" # # NÍVEL SPARK (TRATATIVA) # + id="rY_2jUr1w3h9" #CRIAÇÃO DO "STRUCTURED TYPE" PARA OS DADOS DO DATAFRAME "AMÉRICA DO NORTE" customSchema = StructType([ StructField("Ano", IntegerType(), True), StructField("Mes", IntegerType(), True), StructField("MesRef", StringType(), True), StructField("UF", StringType(), True), StructField("Categoria", StringType(), True), StructField("Galoes", IntegerType(), True), StructField("Etanol", IntegerType(), True), StructField("Populacao", IntegerType(), True), StructField("EtanolPerCapita", DoubleType(), True), StructField("Clima", StringType(), True), StructField("EstacaoDoAno", StringType(), True), ]) # + id="w8BPtBy8v_Vd" #CRIA UM DATAFRAME SPARK, COM BASE NOS DADOS DO DF "AN" CRIADO EM PANDAS df1_spark = spark.createDataFrame(df1, schema=customSchema) # + id="gOZtNm7B2plB" #EXIBE OS DADOS DO DF CONVERTIDO df1_spark.show() # + id="s8InVPORNJf8" df1_spark.printSchema() # + id="iNvQJ7d9eE6j" #CRIA UM RANK SOBRE A QUANTIDADE DE ETANOL VENDIDO w0 = Window.orderBy(F.col("Etanol")) df2 = df1_spark.withColumn("rank", F.row_number().over(w0)) df2.select(F.col("Ano"), F.col("Mes"), F.col("UF"), F.col("Etanol") , F.col('Populacao'), F.col("rank")).show(10) # + id="HvhL13CVeGnR" #CRIA UMA COLUNA DE GALÕES PER CAPITA (GALÕES / POPULAÇÃO) df2 = df1_spark.withColumn("GaloesPerCapita", F.col("Galoes") / F.col("Populacao")) df2.show() # + [markdown] id="3H4uKKRTwWUc" # # INSIGHTS SPARK # + id="ZHrXHaK8pwUM" #CRIA UM AGRUPAMENTO DE GALOES PER CAPITA POR "UF" group1 = (df2.groupBy(F.col("UF")).agg( F.sum("GaloesPerCapita").alias("Soma_GaloesPC"), F.avg("GaloesPerCapita").alias("Med_GaloesPC") ) ).orderBy(F.sum('GaloesPerCapita').desc()) group1.show() #INSIGHTS: #DAKOTA DO NORTE APRESENTOU MAIOR CONSUMO (MÉDIA DE 1 GALÃO PER CAPITA) #KENTUCKY APRESENTOU MENOR CONSUMO (0.7 GALÕES PER CAPITA) # + id="U7abGrFfqWio" #CRIA UM AGRUPAMENTO DE GALOES PER CAPITA POR "MESREF" E "UF" group2 = (df2.groupBy(F.col("MesRef"), F.col("UF")).agg( F.sum("GaloesPerCapita").alias("Soma_GaloesPC"), F.avg("GaloesPerCapita").alias("Med_GaloesPC") ) ).orderBy(F.col('MesRef').desc()) group2.show(1000) #INSIGHT: #PODE SER USADO PARA CRIAR UM ESTUDO DE PERÍODO COM FILTROS DE ANO E UF # + id="p_h6Oi06sEbL" #CRIA UM RANK SOBRE A QUANTIDADE DE "GALÕES PER CAPITA" POR PERÍODO E "UF" w1 = Window.orderBy(F.col("GaloesPerCapita").desc()) df2_rank = df2.withColumn("rank", F.row_number().over(w1)) df2_rank.select(F.col("Ano"), F.col("Mes"), F.col("UF"), F.col("GaloesPerCapita"), F.col("rank")).show(1000) #GERA UM INSIGHT RÁPIDO SOBRE MAIOR CONSUMO POR PERÍODO E UF # + id="2XEuLettuwbs" #CRIA UM RANK SOBRE A QUANTIDADE DE "ETANOL PER CAPITA" VENDIDO w1 = Window.orderBy(F.col("EtanolPerCapita").desc()) df2_rank = df2.withColumn("rank", F.row_number().over(w0)) df2_rank.select(F.col("Ano"), F.col("Mes"), F.col("UF"), F.col("EtanolPerCapita"), F.col("rank")).show(1000) #GERA UM INSIGHT RÁPIDO SOBRE MAIOR CONSUMO POR PERÍODO E UF NA VISÃO ETANOL # + id="z25Vk2zwvFce" #CRIA UM RANK SOBRE A QUANTIDADE DE "GALÕES PER CAPITA" VENDIDO POR TIPO DE CLIMA (df2.groupBy(F.col("Clima")).agg( F.sum("GaloesPerCapita").alias("Sum_GaloesPerCapita"), ).show() ) #INSIGHTS: #MAIOR CONSUMO NAS REGIOES SUBTROPICAIS #MENOR CONSUMO NAS REGIOES POLAR # + [markdown] id="mIPpMPcFweEt" # # SPARK SQL # + id="FaMaV6oqyTez" #CRIANDO UMA TABELA A PARTIR DO PYSPARK df2.createOrReplaceTempView("Tab_AN") # + id="31KLOpOUxmKl" #EXIBE EA TABELA CRIADA "Tab_AN" spark.sql("SELECT * FROM Tab_AN").show() # + id="WmpqJsSwzHoq" #AJUSTANDO O MESREF E CRIANDO UMA NOVA TABELA MAIS FUNCIONAL "Tab_AN_Novo" spark.sql( ''' SELECT CASE WHEN CHARACTER_LENGTH(MesRef) == 5 THEN CONCAT('0', MesRef) ELSE MesRef END AS MesRefNovo, UF, Categoria, Galoes, Etanol, Populacao, EtanolPerCapita, Clima, EstacaoDoAno, GaloesPerCapita FROM Tab_AN ''' ).createOrReplaceTempView("Tab_AN_Novo") # + id="buFDH6gA26nz" #CRIANDO UM INSIGHT DE CONSUMO POR POPULAÇÃO E ESTADO spark.sql( ''' SELECT UF, ROUND(SUM(Populacao),2) AS SUM_POPULACAO, ROUND(SUM(EtanolPerCapita),2) AS SUM_ETANOLPC, ROUND(SUM(GaloesPerCapita),2) AS SUM_GALOESPC, ROUND(AVG(GaloesPerCapita),2) AS MED_GALOESPC FROM Tab_AN_Novo GROUP BY UF ORDER BY UF ''' ).show(1000000) #INSIGHTS: #MAIOR CONSUMO: DAKOTA DO NORTE (GALÕES PC) E DELAWARE (ETANOL PC) #MENOR CONSUMO: KENTUCKY (GALÕES E ETANOL PC) # + id="ao-9kROm4MOM" # CRIANDO UM INSIGHT DE SOMA DE GALÕES POR CATEGORIA E ESTADO spark.sql( ''' SELECT UF, CATEGORIA, ROUND(SUM(GaloesPerCapita),2) AS SUM_GALOESPC FROM Tab_AN_Novo GROUP BY UF, CATEGORIA ORDER BY UF, CATEGORIA ''' ).show(1000000) #INSIGHTS: #MAIOR CONSUMIDOR DE CERVEJA: DAKOTA DO NORTE #MAIOR CONSUMIDOR DE DESTILADOS: DELAWARE #MAIOR CONSUMIDOR DE VINHOS: DELAWARE # + id="k2X_-mtG5S3y" #CRIANDO INSIGHT DE CONSUMO POR ANO E SEMESTRE spark.sql( ''' SELECT RIGHT(MesRefNovo, 4) AS ANO, CASE WHEN LEFT(MesRefNovo, 2) Between 01 and 06 THEN 'Primeiro Semestre' ELSE 'Segundo Semestre' END AS Semestre, ROUND(SUM(EtanolPerCapita),2) AS SUM_ETANOLPC, ROUND(SUM(GaloesPerCapita),2) AS SUM_GALOESPC, ROUND(AVG(GaloesPerCapita),2) AS MED_GALOESPC FROM Tab_AN_Novo GROUP BY ANO, Semestre ORDER BY ANO, Semestre ''' ).show(10000) #INSIGHTS: #MAIOR CONSUMO NO SEGUNDO SEMESTRE DE 2020 #MENOR CONSUMO NO PRIMEIRO SEMESTRE DE 2019 E 2020 #OBS: DESCONSIDERAMOS 2021 POR TER APENAS 2 MESES DE COMPARATIVO # + id="JEhqGWzU9LKV" #CRIANDO INSIGHT DE CONSUMO POR ESTAÇÃO DO ANO spark.sql( ''' SELECT RIGHT(MesRefNovo, 4) AS ANO, EstacaoDoAno, ROUND(SUM(EtanolPerCapita),2) AS SUM_ETANOLPC, ROUND(SUM(GaloesPerCapita),2) AS SUM_GALOESPC, ROUND(AVG(GaloesPerCapita),2) AS MED_GALOESPC FROM Tab_AN_Novo GROUP BY ANO, EstacaoDoAno ORDER BY ANO, EstacaoDoAno ''' ).show(10000) #INSIGHTS: #MAIOR CONSUMO GERAL NO VERÃO #MENOR CONSUMO GERAL NO INVERNO # + id="cNIGTExZ-a5E" #CRIANDO INSIGHT DE CONSUMO POR PRODUTO EM CADA ÉPOCA DO ANO spark.sql( ''' SELECT EstacaoDoAno, Categoria, ROUND(SUM(GaloesPerCapita),2) AS SUM_GALOESPC FROM Tab_AN_Novo GROUP BY EstacaoDoAno, Categoria ORDER BY EstacaoDoAno, Categoria ''' ).show(10000) #INSIGHTS: # MAIOR CONSUMO DE CERVEJA DURANTE VERÃO # MAIOR CONSUMO DE DESTILADO DURANTE O INVERNO # MAIOR CONSUMO DE VINHO DURANTE O INVERNO # + [markdown] id="Fo-HvSG1Z6-k" # # PLOTAGEM PANDAS # # + id="PgdNq7fTZ_N6" #CONVERTE O DF DE SPARK PARA PANDAS QUE SERÁ USADO PARA AS PLOTAGENS df2_plot = df2.toPandas() # + id="858kohHChwJF" g2 = df2_plot.groupby(['Ano', 'Mes'])['GaloesPerCapita'].agg('mean') # + id="ZN4ZdVAKh4Rx" g2.plot(x="Ano", y=["GaloesPerCapita"], kind="bar",figsize=(20,8)) #INSIGHTS: #MAIOR CONSUMO GERAL ENTRE MAIO E AGOSTO #MENOR CONSUMO GERAL ENTRE JANEIRO E FEVEREIRO # + id="SounfthBgOLb" g3 = df2_plot.groupby(['Ano', 'Mes'])['EtanolPerCapita'].agg('mean') # + id="bkLHiM9mgwBp" g3.plot(x="Ano", y=["EtanolPerCapita"], kind="bar",figsize=(20,8)) #INSIGHTS: #COMPORTAMENTO SEMELHANTE # + id="RO5RwOU2jmZj" filtro_ano = df2_plot.Ano == 2020 df3_plot = df2_plot.loc[filtro_ano] g3 = df3_plot.groupby(['UF'])['EtanolPerCapita'].agg('mean') # + id="kheRS0f1knuC" g3.plot(x="UF", y=["EtanolPerCapita"], kind="line",figsize=(20,8)) #INSIGHTS: #EM 2020 DELAWARE FOI O MAIOR CONSUMIDOR #KENTUCKY FOI O MENOR CONSUMIDOR NESTE MESMO ANO # + id="jTj4FkQ6k2Yd" filtro_ano = df2_plot.Ano == 2020 df4_plot = df2_plot.loc[filtro_ano] g4 = df4_plot.groupby(['UF'])['GaloesPerCapita'].agg('mean') # + id="Fw96RLYKlAjX" g4.plot(x="UF", y=["GaloesPerCapita"], kind="barh",figsize=(20,8)) #INSIGHTS: #EM 2020 TIVEMOS KENTUCKY E CONNECTICUT FORAM OS MENORES CONSUMIDORES #COM VALORES PRÓXIMOS ENTRE SI # + [markdown] id="PfBk2hK9wbpB" # # EXPORTAÇÃO # + id="vh56V0Asj7Aj" #df2.toPandas().to_csv('gs://bucket-projeto-g10/saida_tratado/nivel_spark/Base_ConsumoNA (3.0).csv', index=False) # + id="4bztTqNHORA2" #df2.printSchema()
ProjetoFinal_(Spark_Oficial)_19_11.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (Spyder) # language: python3 # name: python3 # --- import tensorflow as tf import numpy as np # ### Strings in Tensorflow constant t = tf.constant("kaffee") t print(tf.strings.length(t)) print(tf.strings.unicode_decode(t, "utf-8")) # ### RaggedTensor # + r_ts1 = tf.ragged.constant([[11, 12], [11, 12, 13], [], [8]]) # tensor with 4 rows r_ts2 = tf.ragged.constant([[1, 2], [], [8]]) # tensor with 3 rows r_ts3 = tf.ragged.constant([[3, 4], [], [9]]) # concate two tensors -> append by rows print(tf.concat([r_ts1, r_ts2], axis=0)) # tensor with 7 rows # r_ts2 and r_ts3 have the same shape and can be concated by axis 1 print(tf.concat([r_ts2, r_ts3], axis=1)) # - # links buendig print(r_ts3.to_tensor()) # from [3, 4], [], [9] to [3, 4], [0, 0], [9, 0] # ### Sparse Tensor s_ts = tf.SparseTensor( indices = [[0, 1], [1, 0], [2, 3]], # position for value values = [1, 2, 3], # value on position dense_shape = [3, 4] # tensor shape ) print(tf.sparse.to_dense(s_ts)) # normaly sparse tensor must be reorded to present data in right order s_ts1 = tf.sparse.reorder(s_ts) print(tf.sparse.to_dense(s_ts1))
8_tensorflow_lowlevel_api/2_tensorflow_string.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # <font color='blue'>Big Data Real-Time Analytics com Python e Spark</font> # + import pyspark from pyspark import SparkContext, SparkConf from pyspark.sql import SparkSession conf = pyspark.SparkConf().setAppName('Inicial').setMaster('local') sc = pyspark.SparkContext(conf=conf) spark = SparkSession(sc) # - # Acesse http://localhost:4040 sempre que quiser acompanhar a execução dos jobs # # Transformações # Criando uma lista em Python lista1 = [124, 901, 652, 102, 397] type(lista1) # Carregando dados de uma coleção lstRDD = sc.parallelize(lista1) type(lstRDD) lstRDD.collect() lstRDD.count() # Carregando um arquivo e criando um RDD. autoDataRDD = sc.textFile("carros.csv") type(autoDataRDD) # Operação de Ação. autoDataRDD.first() autoDataRDD.take(5) # Cada ação gera um novo processo de computação dos dados. # Mas podemos persistir os dados em cache para que ele possa ser usado por outras ações, sem a necessidade # de nova computação. autoDataRDD.cache() for line in autoDataRDD.collect(): print(line) # Map() e criação de um novo RDD - Transformação - Lazy Evaluation tsvData = autoDataRDD.map(lambda x : x.replace(",","\t")) tsvData.take(5) autoDataRDD.first() # Filter() e criação de um novo RDD - Transformação - Lazy Evaluation toyotaData = autoDataRDD.filter(lambda x: "toyota" in x) # Ação toyotaData.count() # Ação toyotaData.take(20) # Pode salvar o conjunto de dados, o RDD. # Nesse caso, o Spark solicita os dados ao processo Master e então gera um arquivo de saída. savedRDD = open("carros_v2.csv","w") savedRDD.write("\n".join(toyotaData.collect())) savedRDD.close() # ## Operações Set # Set operations palavras1 = sc.parallelize(["Big Data","Data Science","Analytics","Visualization"]) palavras2 = sc.parallelize(["Big Data","R","Python","Scala"]) # União for unions in palavras1.union(palavras2).distinct().collect(): print(unions) # Interseção for intersects in palavras1.intersection(palavras2).collect(): print(intersects) rdd01 = sc.parallelize(range(1,10)) rdd02 = sc.parallelize(range(10,21)) rdd01.union(rdd02).collect() rdd01 = sc.parallelize(range(1,10)) rdd02 = sc.parallelize(range(5,15)) rdd01.intersection(rdd02).collect() # ## Left/Right Outer Join names1 = sc.parallelize(("banana", "uva", "laranja")).map(lambda a: (a, 1)) names2 = sc.parallelize(("laranja", "abacaxi", "manga")).map(lambda a: (a, 1)) names1.join(names2).collect() names1.leftOuterJoin(names2).collect() names1.rightOuterJoin(names2).collect() # ## Distinct # Distinct lista1 = [124, 901, 652, 102, 397, 124, 901, 652] lstRDD = sc.parallelize(lista1) for numbData in lstRDD.distinct().collect(): print(numbData) # ## Transformação e Limpeza # Transformação e Limpeza def LimpaRDD(autoStr) : if isinstance(autoStr, int) : return autoStr attList = autoStr.split(",") # Converte o número de portas para um num if attList[3] == "two" : attList[3] = "2" else: attList[3] = "4" # Convert o modelo do carro para uppercase attList[5] = attList[4].upper() return ",".join(attList) RDD_limpo = autoDataRDD.map(LimpaRDD) RDD_limpo.collect() # ## Ações # reduce() - soma de valores lista2 = [124, 901, 652, 102, 397, 124, 901, 652] lstRDD = sc.parallelize(lista2) lstRDD.collect() lstRDD.reduce(lambda x,y: x + y) # Encontrando a linha com menor número de caracteres autoDataRDD.reduce(lambda x,y: x if len(x) < len(y) else y) # Criando uma função para redução def getMPG( autoStr) : if isinstance(autoStr, int) : return autoStr attList = autoStr.split(",") if attList[9].isdigit() : return int(attList[9]) else: return 0 # Encontrando a média de MPG para todos os carros autoDataRDD.reduce(lambda x,y : getMPG(x) + getMPG(y)) / (autoDataRDD.count() -1) teams = sc.parallelize(("Flamengo", "Vasco", "Botafogo", "Fluminense", "Palmeiras", "Bahia")) teams.takeSample(True, 3) teams = sc.parallelize(("Flamengo", "Vasco", "Botafogo", "Fluminense", "Palmeiras", "Bahia", "Bahia", "Vasco")) teams.map(lambda k: (k,1)).countByKey().items() savedRDD = open("SaveRDD.csv","w") savedRDD.write("\n".join(autoDataRDD.collect())) savedRDD.close() #print(tempFile.name)
Spark_01_Transformacoes_acoes/Spark_transformacoes_acoes.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # # Data Visualization with Modern Data Science # # > Analyzing Data with Tableau # # Kuo, Yao-Jen # + [markdown] slideshow={"slide_type": "subslide"} # ## TL; DR # # > In this lecture, we will release Tableau's plotting capability and use calculated fields if current fields are unable to answer our questions. # + [markdown] slideshow={"slide_type": "slide"} # ## Creating More Plots with Tableau # + [markdown] slideshow={"slide_type": "subslide"} # ## In the last chapter, we've created # # - Scatter to explore correlations # - Histogram to explore distributions # - Bar to explore rankings # - Line to explore trends # - Box to explore distributions based on categories # + [markdown] slideshow={"slide_type": "subslide"} # ## In fact, we might confidently say that MOST(probably 87%?) of the exploratory analysis are done # + [markdown] slideshow={"slide_type": "subslide"} # ## However, there are still plenty of plots that is often used for certain purposes # + [markdown] slideshow={"slide_type": "subslide"} # ## Area works like line, but it involves the information of combinations # + [markdown] slideshow={"slide_type": "subslide"} # ## Using area to explore trends and combination at the same time # # - Create another worksheet # - Change `Year` data type to `Date` # - Dragging `Year` onto Columns # - Dragging `Pop` onto Rows # - Dragging `Continent` to Color # - Click Show Me then click **area charts(continuous)** # + [markdown] slideshow={"slide_type": "subslide"} # ## Done with a simple area! # # ![Imgur](https://i.imgur.com/TZdYCsJ.png?1) # + [markdown] slideshow={"slide_type": "subslide"} # ## What are those (discrete), anyway? # # - lines(discrete) # - area charts(discrete) # + [markdown] slideshow={"slide_type": "subslide"} # ## These are designed for the date variables with YYYY-MM-DD information # + [markdown] slideshow={"slide_type": "subslide"} # ## Take area charts(discrete) for a quick example # # Let's get a new data source. # # [Right click to download](https://python4ds.s3-ap-northeast-1.amazonaws.com/fang_volume.csv) # + [markdown] slideshow={"slide_type": "subslide"} # ## Use Visual Studio Code to open `fang_volume.csv` # # ![Imgur](https://i.imgur.com/NVnabMr.png?1) # + [markdown] slideshow={"slide_type": "subslide"} # ## Adding a new data source to our workbook # # ![Imgur](https://i.imgur.com/gPwnndk.png?1) # + [markdown] slideshow={"slide_type": "subslide"} # ## Area(discrete) to explore trends and combination at the same time # # - Create another worksheet # - Dragging `Date` onto Columns # - Dragging `Volume` onto Rows # - Dragging `Symbol` to Color # - Click Show Me then click **area charts(discrete)** # - Drill down `Date` variable # + [markdown] slideshow={"slide_type": "subslide"} # ## Done with a simple area(discrete)! # # ![Imgur](https://i.imgur.com/lCoFH8c.png?1) # + [markdown] slideshow={"slide_type": "subslide"} # ## Bullet works like bar, but it involves comparison between 2 measures # + [markdown] slideshow={"slide_type": "subslide"} # ## Get a summarized version of gapminder # # [Right click to download](https://python4ds.s3-ap-northeast-1.amazonaws.com/gapminder_pop_by_cont_year.csv) # + [markdown] slideshow={"slide_type": "subslide"} # ## Use Visual Studio Code to open `gapminder_pop_by_cont_year.csv` # # ![Imgur](https://i.imgur.com/7pDJCSo.png?1) # + [markdown] slideshow={"slide_type": "subslide"} # ## Adding a new data source to our workbook # # ![Imgur](https://i.imgur.com/gPwnndk.png?1) # + [markdown] slideshow={"slide_type": "subslide"} # ## Check if the first row is treated as field names # # ![Imgur](https://i.imgur.com/18vMAEb.png) # + [markdown] slideshow={"slide_type": "subslide"} # ## Using bullet to explore the comparison between 2 measures # # - Create another worksheet # - Dragging both `2002` and `2007` onto Columns # - Dragging `Continent` onto Rows # - Click Show Me then click **bullet graph** # + [markdown] slideshow={"slide_type": "subslide"} # ## Done with a simple bullet! # # ![Imgur](https://i.imgur.com/uwtM65F.png) # + [markdown] slideshow={"slide_type": "subslide"} # ## To swap the two measures, right-click the axis and select Swap Reference Line Fields # # ![Imgur](https://i.imgur.com/JVUTPy9.png?1) # + [markdown] slideshow={"slide_type": "subslide"} # ## Circle views work like box # + [markdown] slideshow={"slide_type": "subslide"} # ## Using circle views to explore distributions based on categories # # - Create another worksheet # - Dragging `Continent` and `Country` onto Columns # - Dragging `Gdp percap` onto Rows # - Replacing Measure with Dimension # - Dragging `Continent` onto Color # + [markdown] slideshow={"slide_type": "subslide"} # ## Done with a simple circle views! # # ![Imgur](https://i.imgur.com/5FaTQCN.png?1) # + [markdown] slideshow={"slide_type": "subslide"} # ## Dual combination integrates a bar and a line with dual axis # + [markdown] slideshow={"slide_type": "subslide"} # ## Using dual combination to explore trends with 2 measures # # - Create another worksheet # - Dragging `Year` onto Columns # - Dragging `Pop` and `Life Exp` onto Rows # - Replacing the measure of `SUM(Life Exp)` with `AVG(Life Exp)` # - Click Show Me then click **dual combination** # - Adjust `SUM(Pop)` with bar, `AVG(Life Exp)` with line # + [markdown] slideshow={"slide_type": "subslide"} # ## Done with a simple dual combination! # # ![Imgur](https://i.imgur.com/llBcMSU.png?1) # + [markdown] slideshow={"slide_type": "subslide"} # ## Dual line integrates 2 lines with dual axis # + [markdown] slideshow={"slide_type": "subslide"} # ## Using dual line to explore trends with 2 measures # # - Create another worksheet # - Dragging `Year` onto Columns # - Dragging `Gdp Percap` and `Life Exp` onto Rows # - Replacing the measure of `SUM()` with `AVG()` # - Click Show Me then click **dual line** # + [markdown] slideshow={"slide_type": "subslide"} # ## Done with a simple dual line! # # ![Imgur](https://i.imgur.com/oSLjqE9.png?1) # + [markdown] slideshow={"slide_type": "subslide"} # ## Heat maps works like scatter, but it is used to visualize patterns or trends in dense data with many overlapping marks particularly # + [markdown] slideshow={"slide_type": "subslide"} # ## Using heat maps to explore correlation and the density of data points # # - Create another worksheet # - Dragging `Gdp Percap` onto Columns # - Dragging `Life Exp` onto Rows # - Replacing Measure with Dimension # - On the Marks card, select Density from the menu # - Adjusting Color or Size for your preference # + [markdown] slideshow={"slide_type": "subslide"} # ## Selecting Density from the menu # # ![Imgur](https://i.imgur.com/EtXdWbw.png?1) # + [markdown] slideshow={"slide_type": "subslide"} # ## Adjusting Color # # ![Imgur](https://i.imgur.com/SnBhl6m.png?1) # + [markdown] slideshow={"slide_type": "subslide"} # ## Adjusting Size # # ![Imgur](https://i.imgur.com/FTFWomZ.png) # + [markdown] slideshow={"slide_type": "subslide"} # ## Done with a simple heat map! # # ![Imgur](https://i.imgur.com/iXEE5rY.png?1) # + [markdown] slideshow={"slide_type": "subslide"} # ## Highlight tables are similar to heat maps in that they display data in various categories using different colors # + [markdown] slideshow={"slide_type": "subslide"} # ## Using highlight tables to explore ranking # # - Create another worksheet # - Dragging `Year` onto Columns # - Dragging `Continent` and `Country` onto Rows # - Dragging `Gdp Percap` onto Color # + [markdown] slideshow={"slide_type": "subslide"} # ## Done with a simple highlight table! # # ![Imgur](https://i.imgur.com/GUCgK3C.png?1) # + [markdown] slideshow={"slide_type": "subslide"} # ## Packed bubble works like bar, but it involves a hierarchy feature and shows measure to with the size of the bubbles # + [markdown] slideshow={"slide_type": "subslide"} # ## Using packed bubble to explore ranking # # - Create another worksheet # - Creating a hierarchy variable with `Continent` and `Country` # - Dragging `Continent, Country` onto Columns # - Dragging `Pop` onto Rows # - Click Show Me then click **packed bubbles** # - Dragging `Year` onto Pages # - Drilling down by clicking the plus sign next to `Continent` # + [markdown] slideshow={"slide_type": "subslide"} # ## Creating a hierarchy variable with `Continent` and `Country` # # ![Imgur](https://i.imgur.com/oT4TfqP.png) # + [markdown] slideshow={"slide_type": "subslide"} # ## Drilling down by clicking the plus sign next to Continent # # ![Imgur](https://i.imgur.com/SEWd75R.png?1) # + [markdown] slideshow={"slide_type": "subslide"} # ## Done with a simple packed bubble! # # ![Imgur](https://i.imgur.com/zFjEnKB.png?1) # + [markdown] slideshow={"slide_type": "subslide"} # ## Pie works like bar, but it shows proportions at the same time # + [markdown] slideshow={"slide_type": "subslide"} # ## Using pie to explore ranking and proportions # # - Create another worksheet # - Dragging `Continent` onto Columns # - Dragging `Pop` onto Rows # - Click Show Me then click **pie charts** # + [markdown] slideshow={"slide_type": "subslide"} # ## Done with a simple pie! # # ![Imgur](https://i.imgur.com/3GWBMKS.png) # + [markdown] slideshow={"slide_type": "subslide"} # ## Side-by-side bar works exactly like bar, but it involves comparison with another dimension # + [markdown] slideshow={"slide_type": "subslide"} # ## Using side-by-side bar to explore ranking and comparison # # - Create another worksheet # - Dragging `Year` and `Continent` onto Columns # - Dragging `Pop` onto Rows # - Dragging `Continent` onto Color # - Click Show Me then click **side-by-side bars** # + [markdown] slideshow={"slide_type": "subslide"} # ## Done with a simple side-by-side bar! # # ![Imgur](https://i.imgur.com/roRhffi.png?1) # + [markdown] slideshow={"slide_type": "subslide"} # ## Side-by-side circle views work exactly like circle views, but it involves comparison with another dimension # + [markdown] slideshow={"slide_type": "subslide"} # ## Using side-by-side circle views to explore distributions based on categories # # - Create another worksheet # - Dragging `Year` and `Continent` onto Columns # - Dragging `Gdp Percap` onto Rows # - Replacing Measure with Dimension # - Dragging `Continent` onto Color # - Click Show Me then click **side-by-side circles** # + [markdown] slideshow={"slide_type": "subslide"} # ## Done with a simple side-by-side circle! # # ![Imgur](https://i.imgur.com/oScaVFh.png?1) # + [markdown] slideshow={"slide_type": "subslide"} # ## Stacked bar works exactly like bar, but it involves the contribution of individual elements # + [markdown] slideshow={"slide_type": "subslide"} # ## Using stacked bar to explore ranking and composition # # - Create another worksheet # - Dragging `Year` and `Continent` onto Columns # - Dragging `Pop` onto Rows # - Click Show Me then click **stacked bars** # + [markdown] slideshow={"slide_type": "subslide"} # ## Done with a simple stacked bar! # # ![Imgur](https://i.imgur.com/qKbRjjc.png?1) # + [markdown] slideshow={"slide_type": "subslide"} # ## Text tables display values as numbers in a table(also called cross-tabs or pivot tables) # + [markdown] slideshow={"slide_type": "subslide"} # ## Let's get a new data source. # # [Right click to download](https://python4ds.s3-ap-northeast-1.amazonaws.com/03-18-2020.csv) # + [markdown] slideshow={"slide_type": "subslide"} # ## Use Visual Studio Code to open `03-18-2020.csv` # # ![Imgur](https://i.imgur.com/LN37FFp.png?1) # + [markdown] slideshow={"slide_type": "subslide"} # ## Adding a new data source to our workbook # # ![Imgur](https://i.imgur.com/gPwnndk.png?1) # + [markdown] slideshow={"slide_type": "subslide"} # ## Using text tables to explore summarized numbers in a table # # - Create another worksheet # - Dragging `Confirmed`, `Deaths`, and `Recovered` onto Columns # - Dragging `Country/Region` and `Province/State` onto Rows # - Click Show Me then click **text table** # + [markdown] slideshow={"slide_type": "subslide"} # ## Done with a simple text table! # # ![Imgur](https://i.imgur.com/ESmuh0L.png?1) # + [markdown] slideshow={"slide_type": "subslide"} # ## Treemap works like exactly like stacked bar # + [markdown] slideshow={"slide_type": "subslide"} # ## Using treemap to explore ranking and composition # # - Create another worksheet # - Dragging `Confirmed` onto Columns # - Dragging the default hierarchy variable `Country/Region, Province/State` onto Rows # - Click Show Me then click **treemaps** # - Drilling down by clicking the plus sign next to `Country/Region, Province/State` # + [markdown] slideshow={"slide_type": "subslide"} # ## Done with a simple treemap! # # ![Imgur](https://i.imgur.com/cp5MBtp.png?1) # + [markdown] slideshow={"slide_type": "subslide"} # ## Symbol map works like a text table, but it shows values on a geographical map # + [markdown] slideshow={"slide_type": "subslide"} # ## Using symbol map to explore summarized numbers on a geographical map # # - Create another worksheet # - Double-click `Province/State` # - Dragging `Confirmed` onto Size # - Drilling down and fold-up by clicking the plus sign next to `Country/Region, Province/State` # + [markdown] slideshow={"slide_type": "subslide"} # ## Done with a simple symbol map! # # ![Imgur](https://i.imgur.com/xf4FBtU.png?1) # + [markdown] slideshow={"slide_type": "subslide"} # ## Filled map works like symbol map, but it uses a color gradient rather than size to represent the value # # Filled map is more commonly referenced as a polygon map. # + [markdown] slideshow={"slide_type": "subslide"} # ## Using filled map to explore summarized numbers on a geographical map # # - Create another worksheet # - Double-click `Province/State` # - Click the Marks drop-down and select Map # - Dragging `Confirmed` onto Color # - Drilling down and fold-up by clicking the plus sign next to `Country/Region, Province/State` # + [markdown] slideshow={"slide_type": "subslide"} # ## Done with a simple filled map! # # ![Imgur](https://i.imgur.com/UpQ3wde.png?1) # + [markdown] slideshow={"slide_type": "subslide"} # ## A preview of the Python code we will write in near future # + slideshow={"slide_type": "-"} from datetime import date, timedelta import pandas as pd # get daily covid-19 stats delta = timedelta(days=-1) yesterday = date.today() + delta # modify date format yesterday_str = yesterday.strftime("%m-%d-%Y") csv_name = yesterday_str + ".csv" csv_url = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/{}".format(csv_name) covid19_daily = pd.read_csv(csv_url) # Observe NaN in original data covid19_daily.head() # + slideshow={"slide_type": "subslide"} # Replace NaN with Country/Region column_to_replace = "Province/State" province_state = [cr if pd.isna(ps) else ps for ps, cr in zip(covid19_daily["Province/State"].values, covid19_daily["Country/Region"].values)] covid19_daily = covid19_daily.drop(column_to_replace, axis=1) covid19_daily.insert(0, column_to_replace, province_state) covid19_daily.to_csv(csv_name, index=False) # Imputed data covid19_daily.head() # + [markdown] slideshow={"slide_type": "slide"} # ## Creating Calculated Fields # + [markdown] slideshow={"slide_type": "subslide"} # ## Why do we need to create calculated fields? # # Our underlying data may not include all of the fields we need to answer the questions. # + [markdown] slideshow={"slide_type": "subslide"} # ## What are calculated fields, anyway? # # We can create new fields in Tableau using calculations and then save them as part of the data source. These fields are called calculated fields. # + [markdown] slideshow={"slide_type": "subslide"} # ## When to use calculated fields? # # - To segment data # - To convert the data type of a field # - To aggregate data # - To filter results # - To calculate ratios # - ...etc. # + [markdown] slideshow={"slide_type": "subslide"} # ## What are the types of calculations? # # - Basic calculations # - Table calculations # - Using functions # + [markdown] slideshow={"slide_type": "subslide"} # ## Basic calculations allow us to transform values or members at # # - a row-level calculation # - an aggregate calculation # + [markdown] slideshow={"slide_type": "subslide"} # ## Let's do a quick row-level calculation # # - Use `03-18-2020` as data source # - Analysis > Create Calculated Field # - Enter a name for the calculated field with `Death Ratio` and a formula # - Edit format to display percentage # + [markdown] slideshow={"slide_type": "subslide"} # ## Analysis > Create Calculated Field # # ![Imgur](https://i.imgur.com/PuuNC46.png) # + [markdown] slideshow={"slide_type": "subslide"} # ## Enter a name for the calculated field with `Death Ratio` and a formula # # ![Imgur](https://i.imgur.com/X62KALk.png) # + [markdown] slideshow={"slide_type": "subslide"} # ## Edit format to display percentage # # ![Imgur](https://i.imgur.com/8RsaLT1.png?1) # + [markdown] slideshow={"slide_type": "subslide"} # ## Done with a row-level calculation! # # ![Imgur](https://i.imgur.com/jslCel2.png?1) # + [markdown] slideshow={"slide_type": "subslide"} # ## Let's do another quick aggregate calculation # # - Use `03-18-2020` as data source # - Analysis > Create Calculated Field # - Enter a name for the calculated field with `Aggregate Death Ratio` and a formula # - Edit format to display percentage # + [markdown] slideshow={"slide_type": "subslide"} # ## Analysis > Create Calculated Field # # ![Imgur](https://i.imgur.com/PuuNC46.png) # + [markdown] slideshow={"slide_type": "subslide"} # ## Enter a name for the calculated field with `Aggregate Death Ratio` and a formula # # ![Imgur](https://i.imgur.com/21bg4Ws.png) # + [markdown] slideshow={"slide_type": "subslide"} # ## Edit format to display percentage # # ![Imgur](https://i.imgur.com/8RsaLT1.png?1) # + [markdown] slideshow={"slide_type": "subslide"} # ## Done with an aggregate calculation! # # ![Imgur](https://i.imgur.com/Twmj1Wz.png?1) # + [markdown] slideshow={"slide_type": "subslide"} # ## Table calculations are calculated based on what is currently in the visualization and do not consider any measures or dimensions that are filtered out of the visualization # + [markdown] slideshow={"slide_type": "subslide"} # ## Let's do a quick table calculations # # - Use `03-18-2020` as data source # - Dragging `Confirmed` onto Columns # - Dragging `Country/Region` onto Rows # - Click on `SUM(Confirmed)` > Quick Table Calculation > Percent of Total # + [markdown] slideshow={"slide_type": "subslide"} # ## Click on `SUM(Confirmed)` > Quick Table Calculation > Percent of Total # # ![Imgur](https://i.imgur.com/27SsdXQ.png?1) # + [markdown] slideshow={"slide_type": "subslide"} # ## Done with a simple table calculation! # # ![Imgur](https://i.imgur.com/Op9bKGz.png?1) # + [markdown] slideshow={"slide_type": "subslide"} # ## The building blocks of writing formula of calculated fields # # - Functions # - Fields # - Operators # - Constants # + [markdown] slideshow={"slide_type": "subslide"} # ## Using functions when creating calculated fields helps a lot # # Statements used to transform the values or members in a field. # + [markdown] slideshow={"slide_type": "subslide"} # ## Tableau supports many functions including # # - Universal Functions # - [Number Functions](https://help.tableau.com/current/pro/desktop/en-us/functions_functions_number.htm) # - [String Functions](https://help.tableau.com/current/pro/desktop/en-us/functions_functions_string.htm) # - [Date Functions](https://help.tableau.com/current/pro/desktop/en-us/functions_functions_date.htm) # - [Type Conversion](https://help.tableau.com/current/pro/desktop/en-us/functions_functions_typeconversion.htm) # - [Logical Functions](https://help.tableau.com/current/pro/desktop/en-us/functions_functions_logical.htm) # - ...etc. # - [Aggregate Functions](https://help.tableau.com/current/pro/desktop/en-us/calculations_calculatedfields_aggregate_create.htm) # + [markdown] slideshow={"slide_type": "subslide"} # ## Function syntax # # ``` # FUNCTION_NAME(expression) # ``` # + [markdown] slideshow={"slide_type": "subslide"} # ## Field in a formula is often surrounded by brackets `[]` # + [markdown] slideshow={"slide_type": "subslide"} # ## Fields syntax # # ``` # [Field Name] # ``` # + [markdown] slideshow={"slide_type": "subslide"} # ## Operators are symbols that denote an operation # + [markdown] slideshow={"slide_type": "subslide"} # ## Operators we can use in Tableau # # ``` # # +, -, *, /, %, ==, =, >, <, >=, <=, !=, <>, ^, AND, OR, NOT, ( ) # ``` # # Source: <https://help.tableau.com/current/pro/desktop/en-us/functions_operators.htm#Operators> # + [markdown] slideshow={"slide_type": "subslide"} # ## Constants we can use in Tableau # # - Numeric literals are written as numbers # - String literals are written with quotation marks `""` # - Date literals are written with the `#` symbol # - Boolean literals are written as either `true` or `false` # - Null literals are written as `null` # # Source: <https://help.tableau.com/current/pro/desktop/en-us/functions_operators.htm#literals> # + [markdown] slideshow={"slide_type": "subslide"} # ## That's all for today, stay healthy and safe :)
02-analyzing-data-with-tableau.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + import cv2 as cv2 import numpy as np #Use the function cv2.imread() to read an image. #The image should be in the working directory or a full path of image should be given. image = cv2.imread('LISC Database/More Dataset without Ground Truth/alll/20020515-65.bmp') gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY) #cv2.COLOR_BGR2GRAY is used for BGR to Gray conversion #cv2.imshow('gray.jpg',gray) #write into newfolder named as graynew cv2.imwrite('graynew/gray.jpg', gray)#Use the function cv2.imwrite() to save an image and stored in graynew folder #First argument is the file name, second argument is the image you want to save. #load the image gray.jpg from graynewfolder im_gray = cv2.imread('graynew/gray.jpg', cv2.IMREAD_GRAYSCALE) #cv2.IMREAD_GRAYSCALE : Loads image in grayscale mode thresh = 127 binary = cv2.threshold(im_gray, thresh, 255, cv2.THRESH_BINARY)[1] #cv2.threshold is a funtn,first arg is sourceimage,(should be a grayscale image), #2nd arg is threshold value which is used to classify the pixel values #3rd arg is the maxVal which represents the value to be given if pixel value is > or < the threshold value. cv2.imwrite('binary/binary.jpg', binary) #use to smooth an image is the Gaussian blur(eg of low pass filtering) operation img_blur_small = cv2.GaussianBlur(binary, (5,5), 0) #1st arg image #2nd arg a tuple describing the shape of the kernel, (k, k) #3rd s the std devtn for the 2d Gaussian distribution in the x dimension. #If we pass in 0, OpenCV automatically determines default std devtn for both the x and y dimensions, based on the kernel size. cv2.imwrite('blur/blur.jpg', img_blur_small) #save in blur folder # -
binarize.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Functions and Predicates # Let's start populating the language ```bw``` for describing instances of Blocks World import tarski import tarski.errors as err bw = tarski.language() # Blocks Worlds are made of objects of two sorts: place = bw.sort('place') # and block = bw.sort('block', place) # We populate our instance with a few blocks b1, b2, b3, b4 = [ bw.constant('b_{}'.format(k), block ) for k in (1,2,3,4) ] # and a table table = bw.constant('table', place) # ## Functions # Function symbols $f$ are used to represent mappings between several sorts $\tau$. Formally, we will define $f$ as mappings # # $$ # f : \tau_1, \ldots, \tau_n \mapsto \tau_{n+1} # $$ # # Functions $f$ have _arity_ $n \geq 0$, their _domain_ is the cartesian product $\tau_1 \times \tau_2 \ldots \times \tau_n$ and their _codomain_ is the sort $\tau_{n+1}$. The _signature_ $\sigma_f$ of $f$ corresponds with the tuple # # $$ # (f, \tau_1, \ldots, \tau_n, \tau_{n+1}) # $$ # # and allows to uniquely identify a function: ```Tarski``` doesn't allow languages with functions $f$ and $f'$ such that $\sigma_f$ $=$ $\sigma_{f'}$. # For Blocks World we can define the function $loc: block \mapsto place$, which we use to refer indirectly to the object a given block is _on top of_ at any point in time loc = bw.function('loc', block, place) # We note that the arguments of this method correspond with the components of a function signature, hence print('Domain of {}: {}'.format(loc, loc.domain)) print('Codomain of {}: {}'.format(loc, loc.codomain)) print('Type of {}: {}'.format(loc, loc.sort)) print('Arity of {} : {}'.format(loc, loc.arity)) # Printing function objects indicates the arity (number of arguments) the function was declared with, following the convention typically used in Prolog. # ## Predicates as Relation Symbols # Relations between objects and intrinsic properties of objects are modelled by means of _relation symbols_ or _predicates_. clear = bw.predicate('clear', block ) # By default, ```Tarski``` languages do not define implictly any kind of builtin predicate or function. For instance, if we try to write something like try: b1 == b2 except err.LanguageError as e: print("We caught an exception") print(e) # For that we need to explicitly attach _theories_ to our language, as shown in [this advanced tutorial](101_advanced_tutorial_theories.ipynb). # ### Next: [Terms and Formulas](003_terms_and_formulas.ipynb)
notebooks/002_functions_and_predicates.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/atuls01/Projects/blob/main/DC(LR_SVM)_ASSIGNMENT.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="qWK-613Y6Ip9" import pandas as pd import numpy as np from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression from sklearn import preprocessing # + id="dU3woey_6PH0" dat = pd.read_csv("pat.csv") # + id="eEioWHGO6e-B" X = dat.drop('Class', axis=1) y = dat['Class'].values.tolist() # + id="9TgOWTbG7CAA" Scaler = preprocessing.StandardScaler().fit(X) X = Scaler.transform(X) # + id="5dYvB_lZ7Zch" X_train, X_test, Y_train, Y_test = train_test_split(X,y, test_size = 0.25) # + id="uFB3CqDx7otS" LRclf = LogisticRegression(random_state=0, solver='lbfgs', max_iter=1000).fit(X,y) # + id="fLJMer3S75rw" score = LRclf.score(X_test, Y_test) # + colab={"base_uri": "https://localhost:8080/"} id="IYHDV9e58BAv" outputId="b878873a-58f3-4340-ce35-d77245920f37" print(score) # + id="n8H5aOoH8E1q" # + [markdown] id="vAwnQk6X8cuu" # ## SUPPORT VECTOR MACHINE # + id="sHFJzf5P8hwX" from sklearn import svm # + id="0O3LZ7Qr8qEP" data = pd.read_csv("pat.csv") # + id="ukJF5DOv81Hn" X = data.drop("Class", axis = 1) # + id="5bwwiZF99AOg" y = data["Class"].values.tolist() # + id="IWprHVUa9GeI" X_train, X_test, Y_train, Y_test = train_test_split(X,y, test_size=0.30) # + id="2hZrORMo9SGI" svmModel1 = svm.SVC(kernel='linear', C=1.0, gamma='scale').fit(X_train, Y_train) # + id="4zNxaO0_9oZj" svmModel2 = svm.SVC(kernel='rbf', gamma='scale').fit(X_train, Y_train) # + id="NuKeQuJd93EI" svmModel3 = svm.SVC(kernel='poly', degree=3, gamma='scale').fit(X_train, Y_train) # + colab={"base_uri": "https://localhost:8080/"} id="zI0lgTZv-J6g" outputId="3d02e6a2-d2cf-4026-bf4f-c87b8702c256" svmModel1.score(X_test, Y_test) # + colab={"base_uri": "https://localhost:8080/"} id="XXpWBT43-TN_" outputId="b1536813-6803-4027-907a-c4356e0a8d59" svmModel2.score(X_test, Y_test) # + colab={"base_uri": "https://localhost:8080/"} id="mk-UWrrx-XyI" outputId="4f7b4fec-2c4d-4578-87cd-27a0897c6fe2" svmModel3.score(X_test, Y_test) # + id="BXI_faTf-c6N"
DC(LR_SVM)_ASSIGNMENT.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Setup: Installing packages and getting my bearings import pandas as pd import matplotlib.pyplot as plt import numpy as np from PIL import Image import datetime as datetime import random as random import seaborn as sns import plotly as py # General note to self: # If PyCharm asks for Jupyter notebook token, try "jupyter notebook list" in terminal and copy down the string after "token=". # Check the current working directory - where am I? import os os.getcwd() # # Chicago Crime Dataset # Background thoughts: At the beginning of the class, I planned to make a super cool project - something that might not be especially practical but that would be challenging and fun. However, my goals were slowly squashed as other things began to pile up on my plate. And about a month ago, I realized that I was running short on both time and creativity. Ideally I would've worked on something I could actually use in my lab at the NIH, but my lab conducts fairly non-computational research - i.e. we do mostly bench work. Of course, sometimes data needs to be analyzed, and this is done on the computer in programs like Excel and Prism. However, no particular assay I did was so frequent or followed the same experimental format enough for me to justify making a program to conduct the analysis for me. The main thing that I do is graphing, and I usually used R for that in my previous lab - but what happened was that because I made my plots in code, no one else in the lab could tinker with them (no one else used R), which was a problem. The closest frequent "computer" thing we do in my current lab is probably checking DNA sequences, but there already exists software for doing that, and I don't think it would've been within my skillset to check for anything other than complete matches. # # So as time was running short, I figured I would find a super cool dataset to analyze and hopefully come to some interesting conclusions. I searched for a while and found some fairly cool datasets (one of them being a huge dataset of the most common tuples in different languages), but some of them were absolutely too big, and frankly not that many really stood out to me. Then I thought about how I am usually interested in city data, so I looked through the DC open data site as well as the Chicago open data site. Regarding the latter, I went to college in Chicago, so I'm a little more interested in the city than most. # # In the end, I decided to use a dataset that describes incidence of crime in Chicago from 2001 to when I downloaded the dataset, i.e. April 6th, 2019. According to the description, this dataset excludes "murders where data exists for each victim." I'm not sure if this is saying that all murders are excluded; or if there are murders where data exists for each victim and murders where that is not the case, and this dataset only includes one of the two. In looking through the Primary Types of crimes below, I do see a label for "homicide," so maybe it's the latter scenario. # # I downloaded the data set from this website that contains data about various things in Chicago: https://data.cityofchicago.org/Public-Safety/Crimes-2001-to-present/ijzp-q8t2. # Before I begin, I want to declare that the website where I downloaded the data actually provides some pretty cool interactive graphs of the data (screenshot below). Image.open('./Crimedata_screenshot.png') # + # Read in the data from where I have it saved in my computer. # I could not figure out how to read it from online. Either that, or I never stuck around to find out; it was taking forever to process, maybe because the file was too large. # Either way, it was far more manageable to read it directly from my computer. url = '/Users/Shu/PycharmProjects/spring2019-solo-project-shuzhang96/Project1/Chicago_crimes_2001_present.csv' data = pd.read_csv(url) # - # Get an idea of what this data looks like, from the first five rows. # Need the first line of code or pandas will truncate the dataframe and not show some of the columns. pd.set_option('display.max_columns', 30) data.head() # And here's what one entry looks like. data.iloc[1,:] # By the way, this dataset has 6.8 million lines and the file size is 1.8 GB. data.shape # There are about two dozen columns in this data. What are they? # # Things that I'll plan to look at: # - Date: Day and time that the incident happened. # - Primary Type: General description of the incident (e.g. theft) # - Description: More detailed description of the incident (e.g. retail theft) # - Location Description: Description of the location where the crime happened (e.g. apartment) # - Arrest: Was an arrest made: Y/N? # - Domestic: Was the incident domestic-related as defined by the Illinois Domestic Violence Act? # - Community Area: Community area where the incident occurred. # - Year: Year that the incident occurred. # - Latitude, Longitude: Latitude and Longitude of the location where the incident occurred. This is shifted slightly for privacy. # # Things I'm not planning to look at here: # - ID: Unique number assigned to each case. Not sure if there is any meaning to be found here anyway. # - Block: City block where this happened. # - IUCR: Stands for Illinois Uniform Crime Reporting (not the International Union of Crystallography! which was the first thing to pop up on Google). Code describing the type of incident. # - Beat: Describes the beat where the incident occurred. A beat is the smallest police geographic area. # - District: Police district where the incident occurred. # - Ward: City Council district where the incident occurred. # - FBI: FBI's crime classification. # - X,Y Coordinate: X and Y coordinates according to the State Plane Illinois East NAD 1983 projection. # - Updated On: When the data point was last updated. # Given that I'm going to exclude many of the columns, I'll read in a copy of the data, called 'fdata', that contains only the columns I care about. fdata = pd.read_csv(url, usecols = ['Date', 'Primary Type', 'Description', 'Location Description', 'Arrest', 'Domestic', 'Community Area', 'Year', 'Latitude', 'Longitude']) # Again, let's get an idea of what this looks like. fdata.head() # # Analysis # Now let's answer some basic questions. The rest of this notebook will be dedicated to this purpose. It involves a rather humble use of Python, but in the future, if I use Python, it'll probably be as a simple data analysis tool rather than a powerful programming language. So even though the usage below is on the simplistic side, it is probably realistically the type of thing I would use Python for. # # Below, I'm numbering things based on the topic/question. # + # 1a. How many crimes happen per day? # There are 6669 days between January 1st, 2001, the first day of the dataset, and April 6th, 2019, the last day of the dataset. days = (datetime.date(2019, 04, 06) - datetime.date(2001, 01, 01)).days # The dataset is 6844979 rows long by the longest count. Each row represents one incident. # I think there are some NaN's for some of the columns, which is why there are different values depending on which column you count. # Note to self: 0 is by column, 1 is by row. crimes = fdata.count(0) # Thus we can divide the two numbers: crimes by days: crimes/days # Taking the largest number, aproximately 1026 crimes happen per day on average. I don't know what would be typical, but that seems like quite a lot! # - # 1b. Another way to look at it is: how many crimes happen per day per community area? (Community areas, which are described more below, are big subdivisions of Chicago.) # There are 77 community areas. They all have different sizes and demographics, but temporarily ignoring this fact: When you do this division, ~13 crimes per community area per day seems like a more palatable number. crimes/days/77 # + # 2. What percentage of crimes result in arrest? # For some quick practice, write a function to make a simple barplot of counts. # In this dataset, each row is a unique observation, so we count how many observations there are of each type by simply tallying number of rows. def barplot(element, pcolor, psize, ptitle): """ Super simple function to just plot a bar graph. Need to give in order of element, color, size, title. """ return fdata[element].value_counts(normalize = True).plot(kind = 'bar', color = pcolor, figsize = psize, title = ptitle) barplot('Arrest', 'mediumturquoise', (6,4), 'What percent of crimes result in arrest?') # A little less than 30% of crimes result in arrest. # + # 3. What percent of crimes are domestic violence-related? barplot('Domestic', 'plum', (6,4), 'What percent of crimes are domestic violence related?') # Approximately 15% of incidents are domestic violence related. # + # 4a. What are the most common primary types of crime? barplot('Primary Type', 'tomato', (15,5), 'Crimes by Primary Type') # The most common type of incident is theft (>20%), with battery (~18%) coming in at a close second. # The next three most common are: criminal damage, narcotics, assault. # Hey, wait a minute: the dataset seemed to suggest that it excludes 'murders,' but I see 'homicide' in the chart below. # But, maybe this is because of the possible explanation I offered above in the first chunk introducing the dataset, about there being two different types of homicide and this dataset maybe only including one of the types. # - # 4b. Another hey, wait a minute: one of the primary types of crime is called "Non-criminal." What does that mean? # Note that this dataset is named "Chicago Crime Data," but not everything logged in it is a crime! # First print out all the incident types. fdata['Primary Type'].value_counts().index.tolist() # + # 4c. Locate just the non-criminal incidents (there are two types). # It looks like many of these descriptions are relatively innocuous things like "lost passport," where the police may be called but no wrongdoing has occurred. # So, it may be more accurate to call this a dataset of police-related incidents, and not just crime. # Of course, most of the incidents are crime-related, however. fdata[(fdata['Primary Type'] == 'NON-CRIMINAL') | (fdata['Primary Type'] == 'NON-CRIMINAL (SUBJECT SPECIFIED)')].head() # + # 5a. Moving on: What is the incidence of crimes by Community Area? More explanation is below this cell. barplot('Community Area', 'gold', (15,5), 'Crimes by Community Area') # To understand this, the following image is helpful: img = Image.open('./Chicago_community_areas.png') w, h = img.size img.resize((w/2, h/2)) # - # There's some more writing here, so I'm going to put it in Markdown instead of code comments. # # Chicago is divided into 77 community areas (and these include smaller divisions known as neighborhoods). Their descriptions can be found at https://en.wikipedia.org/wiki/Community_areas_in_Chicago. # # Surprisingly, community area #25 by far has the most incidents associated with it, with twice as many as the second place community area. Community area #25 is named Austin, and it contains the neighborhoods of Galewood and The Island. I didn't really hear much (either good or bad) about these neighborhoods when I was in Chicago, and frankly I don't really have any guesses for why this area might have the most incidents. # # The community area with the second most incidents is #8, named the Near North Side. It includes the neighborhoods of Gold Coast, Magnificent Mile (aka the main shopping district of downtown Chicago), River North, Streeterville, and some others. In comparison to the neighborhoods in community area #25, I have actually heard of these neighborhoods and know of them as popular places to shop and dine. In fact, if you look at the map, you can see that #8 includes Navy Pier, which is one of the major tourist attractions. I suppose it makes sense there might be more incidents happening here because there is a high density of people, and especially people who might be vulnerable i.e. tourists. I would predict that thefts are more common here than in other community areas. # # The community area with the fewest incidents is #9, Edison Park. It's in the broader classification known as the Far North Side. I know that in general, the northern part is known to be one of the safer regions of Chicago, and I wonder if this extends to the Far North Side as well. # # I went to college in community area #41, named Hyde Park. The number of incidents there seems to be on the lower side (it's about 2/3 down the graph from the left). I wonder if part of this is because my college had a private police force that handled many of the matters on campus, as anecdotally we certainly had our share of muggings. # # Additionally, I have heard of the Englewood neighborhood getting a bad rap for crime, particularly homicides. Looking at the Wikipedia page, I see that this neighborhood is also its own community area: #68. On this graph, it does to be one of the higher-crime community areas. # + # 5b. Following up on my speculation in the cell above about the Near North Side having a higher incidence of theft than average because it is a touristy area. # In the graph of crimes by primary type above, incidence of theft was only slightly higher than that of the next most common crime type, battery. # For community area #8, however, theft is ~3x more common than the next most frequent type of crime (still battery). data8 = fdata[fdata['Community Area'] == 8] data8['Primary Type'].value_counts(normalize = True).plot(kind = 'bar', figsize = (15,5), title = 'Crimes by Primary Type in Near North Side') # + # 6. In what kind of setting do most crimes occur? Show only the top 10 locations. fdata['Location Description'].value_counts(normalize = True).head(10).plot(kind = 'bar', color = 'orchid', figsize = (8,5), title = 'Location Description') # The most common location is on the street (>25%), then at a residence (~17%) (presumably a residence is a house rather than an apartment). # Frankly, some of these distinctions are a little unclear to me, e.g. street (#1) vs sidewalk (#4). I'm speculating that street could be on the road itself plus the sidewalk, whereas sidewalk is specifically the sidewalk? # + # 7. Globally over the years, what has incidence of crime been in Chicago? Make bar widths a little thicker. fdata['Year'].value_counts().sort_index().plot.bar(width = 0.7, color = 'salmon', title = 'Total Crimes in Chicago by Year') # Encouragingly, total crime incidents seem to have decreased since 2001, with a steady decline from 2001-2013 and then a plateau starting in 2014. # I wonder what changed in 2014? Or did we just reach a reasonable amount of crime and plateau there? # (Of course, we can also wonder whether something changed in method of measurement and whether simply fewer incidents were reported and logged. But hopefully the apparent decrease in crime is true.) # The bar for 2019 is low because the year 2019 has not yet completed. # However, if we assume that 1/4 of the year has passed (Jan-Mar), then the final count should be 4x the current bar, which would be consistent with values from 2015-2018. # + # 8a. At what time of day do most incidents happen? # The 'Date' column lists date as 'MM/DD/YYYY HH:MM:SS _M'. # I wanted to split the dates into two strings, day and time: 'MM/DD/YYYY' and 'HH:MM:SS *M'. # (I did this because I thought I had to to convert the time data to 24 hr time manually, and so I needed to separate the strings. I later realized this was not necessary, but I'll continue on this for now.) # However, I tried to do this for the entire dataset and it still hadn't finished running after at least 20 minutes. # Instead, I did this for a random sample of 10000 rows from the complete data, called 'sdata'. sdata = fdata.sample(10000, random_state = 123) # By the way, these 10000 rows compose 1% of the entire dataset. Note to self: need to make one of the numbers in the division a decimal, or Python 2 will return 0 because that is the closest integer. 10000.0/6844979 # Separate into day and time strings: 'MM/DD/YYYY' and 'HH:MM:SS *M'. sdata[['Day', 'Time']] = sdata['Date'].str.split(' ', 1).apply(pd.Series) # + # Write a function for converting the 12 hr time (used in the time string) 'HH:MM:SS *M' format to 24 hr time. def convert_to_24(time): """ Convert 12 hr time (written with AM and PM) to 24 hr time """ # For times that are like 12:30:00 AM that should look like 00:30:00 if 'AM' in time and time[:2] == '12': return '00' + time[2:-3] # For times like 11:30:00 AM that should look like 11:30:00 elif 'AM' in time: return time[:-3] # For times like 12:30:00 PM that should look like 12:30:00 elif 'PM' in time and time[:2] == '12': return time[:-3] # For times like 3:30:00 PM that should look like 15:30:00 else: return str(int(time[0:2]) + 12) + time[2:8] # Testing the function print(convert_to_24("03:30:00 PM")) # + # Convert the 12 hr time to 24 hr time sdata['Time'].apply(convert_to_24) # Convert to datetime format. # (Referring to what I hinted at above, at this point I had not realized that datetime already had the capability to convert 12 hr time to 24hr time, and that I did not need to do it myself.) # (However, I left my clunkier method as a reminder of how to split strings.) # Since the 'Time' column used in the rest of this cell does not contain any day information, datetime automatically fills in with today's date. But that's okay because I am not using the day information here. sdata['Datetime_Time'] = pd.to_datetime(sdata['Time']) # Extract hour. sdata['Hour'] = sdata['Datetime_Time'].dt.hour # Plot number of crimes by hour. sdata['Hour'].value_counts().sort_index().plot(kind = 'bar', title = 'Crimes by Hour', color = 'cornflowerblue') # According to this subsample of data, crime incidence is lowest at 4-5 AM in the morning and generally increases in frequency up to ~7 PM, after which it then decreases slowly, with a drastic drop-off by 1 AM. # This is somewhat expected, as probably few people are awake during what I would consider the "deepest" part of the night, around ~3-4 AM. # Additionally, it kind of makes sense that more people are out and about around "early" nighttime i.e. ~7-8 PM rather than later. This would mean more people who commit crimes and more people to commit crimes against. # Plus, it's generally dark by 8 PM so there's some night cover around that time too. Perhaps that is a happy balance of having people around and having night cover. # The slight spike in crime at noon is a bit hard to explain. Maybe it coincides with lunch break? Or maybe it is an artifact of estimation, where noon is simply a close reference time? # + # 8b. How about frequency of crime at the minute level, i.e. is there a minute on the hour that is most popular? One would expect the answer to be no. sdata['Minute'] = sdata['Datetime_Time'].dt.minute sdata['Minute'].value_counts().sort_index().plot(kind = 'bar', title = 'Crimes by Minute', color = 'darkblue', figsize = (12,4)) # Actually, the answer naive answer is yes - if we look at the graph, there is a minute that is most popular. # However, the result is in reality probably an artifact of rounding. # Most incidents are reported to the nearest hour, with the next most common being the nearest half hour, and then the quarter hour. # This intuitively makes sense for how I would usually estimate time - with a hierarchy of hour > half hour > quarter hour. # Some incidents are reported to the exact minute, but we can see that this is much less common. # (By the way, if we take out the top two bars, the remaining bars create an almost perfectly symmetrical bimodal distribution combined with a constant low-value function.) # (I don't know if this is actually interesting at all, but I feel like maybe it says something about how we approximate guesses of time.) # + # 8c. Are there months during which incidents are more common? sdata['Datetime_Day'] = pd.to_datetime(sdata['Day']) sdata['Month'] = sdata['Datetime_Day'].dt.month sdata['Month'].value_counts().sort_index().plot(kind = 'bar', title = 'Incidents by Month', color = 'mediumslateblue') # Overall, the month-to-month differences aren't huge. # However, the months with the most incidents are July-August, and the months with the fewest incidents are December and February. # This overall appears to make sense, since fewer people are out during the colder months, meaning there are fewer people to be victims or perpetrators, and vice versa. # + # 9a. Let's just briefly repeat this for just the year 2018, the last year for which the data is complete. # Taking out just the rows for year 2018. fdata2018 = fdata[fdata['Year'] == 2018] # + # As mentioned above, I later learned that datetime is smart enough to convert 12 hr to 24 hr time, meaning that there was no need for me to do the conversion myself, nor any need to split the 'Date' column name into two strings. # So, this time I am using the simpler method where I don't do those extra steps. # Apply datetime function fdata2018['Datetime'] = pd.to_datetime(fdata2018['Date']) # There's an error below, but I think it doesn't really affect me so I'll ignore it. # + # 9b. Crime by hour fdata2018['Datetime'].dt.hour.value_counts().sort_index().plot(kind = 'bar', title = 'Crimes by Hour') # Results are similar: lowest incidence near the late hours of night / very early hours of morning; a peak around 7pm; and a spike around noon. # + # 9c. Crime by month fdata2018['Datetime'].dt.month.value_counts().sort_index().plot(kind = 'bar', title = 'Incidents by Month') # Also a similar trend. # From these few graphs, it looks like using the 1% random sample of the 2001-early 2019 data I used above was actually quite similar to the full data from 2018. # Since it runs more quickly, I'll use sdata for future graphs. # + # 10a. How about longitude and latitude information? # Actually, geography-type information is best analyzed with something aimed at this task, such as GIS. # But since I don't have access to that, I will do an approximation. # For some reason, there is an outlier that really distorts my graph (which auto-selects x and y ranges). So I am restricting the latitudes and longitudes that I look at. # Plot a histogram of latitude. lldata = sdata[(sdata['Latitude'] > 41.65) | (sdata['Longitude'] > -87.9)] lldata['Latitude'].plot.hist(grid=True, bins=20, rwidth=0.9, color='lightseagreen', title = 'Crimes in Chicago by Latitude (South to North)') # - # Plot a histogram of longitude. lldata['Longitude'].plot.hist(grid=True, bins=20, rwidth=0.9, color='lightseagreen', title = 'Crimes in Chicago by Longitude (West to East)') # Actually, if we do this in seaborn we can get a histogram with a kernel density distribution. ax = sns.distplot(lldata['Latitude'], color = 'lightseagreen') ax.set_title('Crimes in Chicago by Latitude (South to North)') # + # Using matplotlib, we can also plot a two-way histogram. # First, convert the latitude and longitude data to a numpy array, since it won't take the original form. plt.hist2d(np.array(lldata['Longitude']), np.array(lldata['Latitude']), bins = 20) # + # Unsurprisingly, the 2D histogram approximates the shape of Chicago, shown below in a screenshot from Google maps. # (I distorted the map image so it would look a bit more similar.) img2 = Image.open('./Chicago_map.png') w, h = img2.size img2.resize((w/2, h/4)) # If we compare the 2D histogram above with the map image below, it seems that the highest density of crime actually happens in downtown Chicago. # Additionally, it seems to happen in two general circles, one for northern Chicago and one for southern Chicago. # This was generally surprising to me, since people always talk about crime being especially bad on the South Side. # However, I think this is probably because this dataset excludes most murders, and the news tends to focus on murders, which do happen more frequently on the South Side according to other sources. # So, we can probalby see this dataset mostly as an analysis of non-murderous crime, and there the distribution looks different. # - sns.jointplot(x = np.array(lldata['Longitude']), y = np.array(lldata['Latitude'])); # # Summary # # This project analyzed the "Chicago Crime Dataset" downloaded from the City of Chicago website. More specifically, this project aimed to break down crime / police-related incidents in Chicago from 2001 to early 2019, by factors such as incident type, location, etc. Although far from the most powerful use of Python, simple data analysis is probably the most likely application for which I might use Python in the future. (Tentatively, I guess my main long-term goal would be to do statistical analysis for clinical data.) My previous experience has been some basic knowledge of R (enough to make decent graphs and do linear regression) and Stata (also in linear regression, just because my stats classes in college used both R and Stata). It seems kind of strange to learn how to do much of the same in Python, but I'm hoping this will open the door to learning how to do more complex stuff that can only be done in a "real" programming language like Python. # # ### Takeaways from the Chicago Crime Data: # - Incidence of crime tends to peak with amount of human activity. # - The most common type of crime is theft. # - Crime seems to peak around the downtown Chicago area. # # ### What I learned: # - How to actually run Python! I had downloaded it previously but didn't know about IDEs or how to use it. Having a class helped provide the activation energy to actually use the language, for which I'm very grateful. # - A tiny bit of what GitHub is. I still don't totally get it, nor the frequently obtuse lingo, but I'm better off than when I started. # - This is probably not true for everyone, but I learned that even the simplest thing can require extensive Googling. Sometimes it takes a mountain to move just a foot, man. # # ### The end, thanks for reading!
Project1-Chicago-Crime-Data/Project1_Final.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Tutorial 3 for JetSeT v1.2.0-rc3 # + from IPython.core.display import display, HTML display(HTML("<style>.container { width:95% !important; }</style>")) import matplotlib from matplotlib import pyplot as plt import matplotlib.colors as mcolors font = {'family' : 'sans-serif', 'weight' : 'normal', 'size' : 18} matplotlib.rc('font', **font) matplotlib.pyplot.rc('font', **font) colors=list(mcolors.BASE_COLORS) import warnings warnings.filterwarnings('ignore') import numpy as np # - # ![EC model](images/jetset_EC_scheme.png) # from jetset.jet_model import Jet from jetset.plot_sedfit import PlotSED # + from jetset.jet_model import Jet my_jet=Jet(name='BLR example',electron_distribution='bkn',beaming_expr='bulk_theta') my_jet.add_EC_component(['EC_BLR','EC_Disk']) my_jet.show_model() my_jet.set_par('disk_type',val='MultiBB') # + my_jet.set_par('L_Disk',val=5E46) my_jet.set_par('gmax',val=1E4) my_jet.set_par('gmin',val=2.) my_jet.set_par('R_H',val=3E17) my_jet.set_par('p',val=1.5) my_jet.set_par('p_1',val=3.5) my_jet.set_par('R',val=3E16) my_jet.set_par('B',val=2.0) my_jet.set_par('z_cosm',val=0.6) my_jet.set_par('BulkFactor',val=20) my_jet.set_par('theta',val=1) my_jet.set_par('gamma_break',val=2E2) my_jet.set_N_from_nuLnu(nu_src=3E12,nuLnu_src=1E47) my_jet.set_IC_nu_size(100) # - matplotlib.rc('font', **font) p=PlotSED(figsize=(12,9),frame='src') my_jet.eval() p=my_jet.plot_model(plot_obj=p,frame='src') p.rescale(x_min=10,x_max=26,y_max=50,y_min=42) p=my_jet.electron_distribution.plot() p.ax.axvline(np.log10(my_jet.parameters.gamma_break.val),ls='--') matplotlib.rc('font', **font) my_jet.add_EC_component('EC_DT') my_jet.show_model() matplotlib.rc('font', **font) p=PlotSED(figsize=(12,9),frame='src') my_jet.eval() p=my_jet.plot_model(plot_obj=p,frame='src') p.rescale(x_min=10,x_max=26,y_max=50,y_min=42) # + from jetset.poly_fit import do_log_Parab_FIT def get_gamma_3p(j): #j.set_gamma_grid_size(1000) j.electron_distribution.update() x=np.log10(j.electron_distribution.gamma) y=np.log10(j.electron_distribution.n_gamma)+3*x y_p=y.max() x_p=x[np.argmax(y)] p,err=do_log_Parab_FIT(x,y,x_p,y_p,-1,x_range=[x_p-0.5,x_p+0.5],dy=np.ones(x.size)) return p def get_log_par_peak(x_p,y_p,j,comp): c=j.get_spectral_component_by_name(comp) x=np.log10(c.SED.nu.value) y=np.log10(c.SED.nuFnu.value) p,err=do_log_Parab_FIT(x,y,x_p,y_p,-0.1,x_range=[x_p,x_p+1],dy=np.ones(x.size)) p,err=do_log_Parab_FIT(x,y,p[0],p[1],p[2],x_range=[p[0],p[0]+1],dy=np.ones(x.size)) return p,err # + matplotlib.rc('font', **font) my_jet.set_par('R_H',val=1E17) size=5 p_dt=PlotSED(figsize=(12,9),frame='src') p_blr=PlotSED(figsize=(12,9),frame='src') my_jet.set_nu_grid_size(200) my_jet.set_IC_nu_size(200) my_jet.set_gamma_grid_size(100) nu_IC_curv=np.zeros(size) nu_IC_curv_err=np.zeros(size) p1_val=np.zeros(size) for ID,p_1 in enumerate(np.linspace(2.5,3.5,5)): p1_val[ID]=p_1 my_jet.parameters.p_1.val=p_1 my_jet.set_N_from_nuLnu(nu_src=3E12,nuLnu_src=1E47) my_jet.eval() my_jet.plot_model(plot_obj=p_blr,comp='Sync',label=None,auto_label=False,line_style='--',color=colors[ID],frame='src') my_jet.plot_model(plot_obj=p_dt,comp='Sync',label=None,auto_label=False,line_style='--',color=colors[ID],frame='src') #my_jet.plot_model(plot_obj=p_blr,comp='Sum',label='p_1=%2.2f'%p_1,color=colors[ID]) #my_jet.plot_model(plot_obj=p_dt,comp='Sum',label='p_1=%2.2f'%p_1,color=colors[ID]) my_jet.plot_model(plot_obj=p_blr,comp='EC_BLR',label='EC BLR p_1=%2.2f'%p_1,color=colors[ID],auto_label=False,frame='src') x_p,y_p=my_jet.get_component_peak('EC_BLR',log_log=True) (_,_,nu_IC_curv[ID]),err=get_log_par_peak(x_p,y_p,my_jet,'EC_BLR') nu_IC_curv_err[ID]=err[2] my_jet.plot_model(plot_obj=p_dt,comp='EC_DT',label='EC DT p_1=%2.2f'%p_1,color=colors[ID],auto_label=False,frame='src') my_jet.plot_model(plot_obj=p_dt,comp='DT',color=colors[ID+1],frame='src') my_jet.plot_model(plot_obj=p_blr,comp='Disk',color=colors[ID+1],frame='src') p_dt.rescale(x_min=10,x_max=26,y_max=50,y_min=43) p_blr.rescale(x_min=10,x_max=26,y_max=50,y_min=43) # + matplotlib.rc('font', **font) fig = plt.figure(figsize=(12,8)) ax=fig.add_subplot(111) ax.errorbar(p1_val,nu_IC_curv,yerr=nu_IC_curv_err,fmt='-o',label='EC BLR fit above the peak') ax.fill_between(p1_val, nu_IC_curv - nu_IC_curv_err, nu_IC_curv + nu_IC_curv_err, color='gray', alpha=0.2) ax.set_ylabel(r'curvature') ax.set_xlabel(r'p_1') #ax.legend(fontsize='large',loc='lower left') # - # + matplotlib.rc('font', **font) my_jet.set_par('R_H',val=1E17) size=5 p_dt=PlotSED(figsize=(12,9),frame='src') p_blr=PlotSED(figsize=(12,9),frame='src') my_jet.set_nu_grid_size(200) my_jet.set_IC_nu_size(200) my_jet.set_gamma_grid_size(100) nu_IC_curv=np.zeros(size) nu_IC_curv_err=np.zeros(size) T_DT_val=np.zeros(size) for ID,T_DT in enumerate(np.logspace(1,3.5,size)): p1_val[ID]=p_1 my_jet.parameters.T_DT.val=T_DT my_jet.set_N_from_nuLnu(nu_src=3E12,nuLnu_src=1E47) my_jet.eval() my_jet.plot_model(plot_obj=p_blr,comp='Sync',label=None,auto_label=False,line_style='--',color=colors[ID],frame='src') my_jet.plot_model(plot_obj=p_dt,comp='Sync',label=None,auto_label=False,line_style='--',color=colors[ID],frame='src') my_jet.plot_model(plot_obj=p_dt,comp='EC_DT',label='EC DT T=%2.2f'%T_DT,color=colors[ID],auto_label=False,frame='src') my_jet.plot_model(plot_obj=p_dt,comp='DT',color=colors[ID], label='DT T=%2.2f'%T_DT,frame='src') p_dt.rescale(x_min=10,x_max=26,y_max=50,y_min=43) # - # ## Changing the external field transformation # my_jet.set_external_field_transf('blob') my_jet.set_external_field_transf('disk') # + def iso_field_transf(L,R,BulckFactor): beta=1.0 - 1/(BulckFactor*BulckFactor) return L/(4*np.pi*R*R*3E10)*BulckFactor*BulckFactor*(1+((beta**2)/3)) def external_iso_behind_transf(L,R,BulckFactor): beta=1.0 - 1/(BulckFactor*BulckFactor) return L/((4*np.pi*R*R*3E10)*(BulckFactor*BulckFactor*(1+beta)**2)) # - # EC seed photon fields, in the Disk rest frame # # # + # %matplotlib inline fig = plt.figure(figsize=(8,6)) ax=fig.subplots(1) N=50 G=1 R_range=np.logspace(13,25,N) y=np.zeros((8,N)) my_jet.set_verbosity(0) my_jet.set_par('R_BLR_in',1E17) my_jet.set_par('R_BLR_out',1.1E17) for ID,R in enumerate(R_range): my_jet.set_par('R_H',val=R) my_jet.set_external_fields() my_jet.energetic_report(verbose=False) y[1,ID]=my_jet.energetic_dict['U_BLR_DRF'] y[0,ID]=my_jet.energetic_dict['U_Disk_DRF'] y[2,ID]=my_jet.energetic_dict['U_DT_DRF'] y[4,:]=iso_field_transf(my_jet._blob.L_Disk_radiative*my_jet.parameters.tau_DT.val,my_jet.parameters.R_DT.val,G) y[3,:]=iso_field_transf(my_jet._blob.L_Disk_radiative*my_jet.parameters.tau_BLR.val,my_jet.parameters.R_BLR_in.val,G) y[5,:]=external_iso_behind_transf(my_jet._blob.L_Disk_radiative*my_jet.parameters.tau_BLR.val,R_range,G) y[6,:]=external_iso_behind_transf(my_jet._blob.L_Disk_radiative*my_jet.parameters.tau_DT.val,R_range,G) y[7,:]=external_iso_behind_transf(my_jet._blob.L_Disk_radiative,R_range,G) ax.plot(np.log10(R_range),np.log10(y[0,:]),label='Disk') ax.plot(np.log10(R_range),np.log10(y[1,:]),'-',label='BLR') ax.plot(np.log10(R_range),np.log10(y[2,:]),label='DT') ax.plot(np.log10(R_range),np.log10(y[3,:]),'--',label='BLR uniform') ax.plot(np.log10(R_range),np.log10(y[4,:]),'--',label='DT uniform') ax.plot(np.log10(R_range),np.log10(y[5,:]),'--',label='BLR 1/R2') ax.plot(np.log10(R_range),np.log10(y[6,:]),'--',label='DT 1/R2') ax.plot(np.log10(R_range),np.log10(y[7,:]),'--',label='Disk 1/R2') ax.set_xlabel('log(R_H) cm') ax.set_ylabel('log(Uph) erg cm-3 s-1') ax.legend() # - # EC seed photon fields, in the blob rest frame # + # %matplotlib inline fig = plt.figure(figsize=(8,6)) ax=fig.subplots(1) L_Disk=1E45 N=50 G=my_jet.parameters.BulkFactor.val R_range=np.logspace(15,22,N) y=np.zeros((8,N)) my_jet.set_par('L_Disk',val=L_Disk) my_jet._blob.theta_n_int=100 my_jet._blob.l_n_int=100 my_jet._blob.theta_n_int=100 my_jet._blob.l_n_int=100 for ID,R in enumerate(R_range): my_jet.set_par('R_H',val=R) my_jet.set_par('R_BLR_in',1E17*(L_Disk/1E45)**.5) my_jet.set_par('R_BLR_out',1.1E17*(L_Disk/1E45)**.5) my_jet.set_par('R_DT',2.5E18*(L_Disk/1E45)**.5) my_jet.set_external_fields() my_jet.energetic_report(verbose=False) y[1,ID]=my_jet.energetic_dict['U_BLR'] y[0,ID]=my_jet.energetic_dict['U_Disk'] y[2,ID]=my_jet.energetic_dict['U_DT'] y[4,:]=iso_field_transf(my_jet._blob.L_Disk_radiative*my_jet.parameters.tau_DT.val,my_jet.parameters.R_DT.val,G) y[3,:]=iso_field_transf(my_jet._blob.L_Disk_radiative*my_jet.parameters.tau_BLR.val,my_jet.parameters.R_BLR_in.val,G) y[5,:]=external_iso_behind_transf(my_jet._blob.L_Disk_radiative*my_jet.parameters.tau_BLR.val,R_range,G) y[6,:]=external_iso_behind_transf(my_jet._blob.L_Disk_radiative*my_jet.parameters.tau_DT.val,R_range,G) y[7,:]=external_iso_behind_transf(my_jet._blob.L_Disk_radiative,R_range,G) ax.plot(np.log10(R_range),np.log10(y[0,:]),label='Disk') ax.plot(np.log10(R_range),np.log10(y[1,:]),'-',label='BLR') ax.plot(np.log10(R_range),np.log10(y[2,:]),'-',label='DT') ax.plot(np.log10(R_range),np.log10(y[3,:]),'--',label='BLR uniform') ax.plot(np.log10(R_range),np.log10(y[4,:]),'--',label='DT uniform') ax.plot(np.log10(R_range),np.log10(y[5,:]),'--',label='BLR 1/R2') ax.plot(np.log10(R_range),np.log10(y[6,:]),'--',label='DT 1/R2') ax.plot(np.log10(R_range),np.log10(y[7,:]),'--',label='Disk 1/R2') ax.axvline(np.log10( my_jet.parameters.R_DT.val )) ax.axvline(np.log10( my_jet.parameters.R_BLR_out.val)) ax.set_xlabel('log(R_H) cm') ax.set_ylabel('log(Uph`) erg cm-3 s-1') ax.legend() # - # # Exercise # derive the trend for the Compton dominance (CD) as a function of gmin, B, and BulkFactor # # hint: use the get_component_peak to extract the peak of the SED for each component # ## IC against the CMB # my_jet=Jet(name='test_equipartition',electron_distribution='lppl',beaming_expr='bulk_theta') my_jet.set_par('R',val=1E21) my_jet.set_par('z_cosm',val= 0.651) my_jet.set_par('B',val=2E-5) my_jet.set_par('gmin',val=50) my_jet.set_par('gamma0_log_parab',val=35.0E3) my_jet.set_par('gmax',val=30E5) my_jet.set_par('theta',val=12.0) my_jet.set_par('BulkFactor',val=3.5) my_jet.set_par('s',val=2.58) my_jet.set_par('r',val=0.42) my_jet.set_N_from_nuFnu(5E-15,1E12) my_jet.add_EC_component('EC_CMB') # + from jetset.plot_sedfit import PlotSED p=PlotSED() my_jet.set_external_field_transf('blob') c= ['k', 'g', 'r', 'c'] for ID,theta in enumerate(np.linspace(2,20,4)): my_jet.parameters.theta.val=theta my_jet.eval() my_jet.plot_model(plot_obj=p,comp='Sum',label='blob, theta=%2.2f'%theta,line_style='--',color=c[ID]) my_jet.set_external_field_transf('disk') for ID,theta in enumerate(np.linspace(2,20,4)): my_jet.parameters.theta.val=theta my_jet.eval() my_jet.plot_model(plot_obj=p,comp='Sum',label='disk, theta=%2.2f'%theta,line_style='',color=c[ID]) p.rescale(y_min=-17.5,y_max=-12.5,x_max=28) # - # ## Equipartition # + my_jet.parameters.theta.val=12 B_min,b_grid,U_B,U_e=my_jet.set_B_eq(nuFnu_obs=5E-15,nu_obs=1E12,B_min=1E-9,N_pts=50,plot=True) my_jet.show_pars() my_jet.eval() # - p=my_jet.plot_model() p.rescale(y_min=-16.5,y_max=-13.5,x_max=28)
notebooks/Tutorial_3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Amikor pandasban az index egy dátum - akkor a `DataFrame` átalakul _time series_-é, azaz _idősorrá_. import pandas as pd df=pd.DataFrame([0,1,25,36,4,75,68,7,8]) df.columns=['Ertek'] df['Ev']=[2001,2001,2002,2002,2003,2003,2003,2004,2008] df['Honap']=[1,2,1,2,5,6,7,8,8] df['Szepdatum']=pd.to_datetime(df['Ev'].astype(str)+'-'+df['Honap'].astype(str)) df.set_index('Szepdatum').sort_index() df['Szepdatum'].dt.day df['Szepdatum'].dt.month df['Szepdatum'].dt.year df['Szepdatum'].dt.month_name() df['Szepdatum'].dt.month_name('HUN') df['Szepdatum'].dt.weekday_name
present/bi/2019/jupyter/4-datumok.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:.conda-tf-2-gpu] * # language: python # name: conda-env-.conda-tf-2-gpu-py # --- # + import tensorflow as tf import numpy as np from datetime import datetime import os #os.environ["CUDA_VISIBLE_DEVICES"] = "1" # - def generate_matrix(n, dirichlet_param=0.1): alpha = [dirichlet_param for _ in range(n)] return np.array([np.random.dirichlet(alpha) for _ in range(n)]) # + nl_dict = {3: [1, 2, 3, 5], 5: [1, 3, 5, 10], 10: [1, 5, 10, 15]} # Input params mean = 0. std = 1. lr = 0.01 iter_ = 20 # Number of repeatitions per GT matrix and per n,l-parameter n_gt = 10 # Number of GT matrices to generate n_iter = 30000 # Optimization iterations min_iter = 2000 # Minimum number of optimization iteration convergence_tol = 0. # Convergence threshold # - # Graph def build_graph(n, l): A_gt = tf.placeholder(dtype=tf.float32, shape=[n, n]) init_normal = tf.random_normal_initializer(mean, stddev=std) # Approach (i) - normAbsLin factorization with normalization with tf.variable_scope("%d%d_lin" % (n, l), reuse=tf.AUTO_REUSE): V_lin = tf.get_variable("V", shape=[n, l], dtype=tf.float32, initializer=init_normal) W_lin = tf.get_variable("W", shape=[l, n], dtype=tf.float32, initializer=init_normal) A_tilde_lin = tf.math.abs(tf.matmul(V_lin, W_lin)) A_tilde_lin /= tf.reduce_sum(A_tilde_lin, axis=1)[:, None] A_sum_lin = tf.reduce_sum(A_tilde_lin, axis=1) loss_lin = tf.norm(A_gt - A_tilde_lin) loss_norm_lin = tf.norm(A_gt - A_tilde_lin) / tf.norm(A_gt) #opt_lin = tf.train.GradientDescentOptimizer(lr).minimize(loss_norm_lin) with tf.variable_scope("%d%d_lin" % (n, l), reuse=tf.AUTO_REUSE): opt_lin = tf.train.AdamOptimizer(lr).minimize(loss_norm_lin) # Approach (ii) - Softmax factorization with tf.variable_scope("%d%d_sm" % (n, l), reuse=tf.AUTO_REUSE): V_sm = tf.Variable(V_lin.initialized_value(), dtype=tf.float32, name="V")#tf.get_variable("V", shape=[n, l], dtype=tf.float32, initializer=init_normal) W_sm = tf.Variable(W_lin.initialized_value(), dtype=tf.float32, name="W") A_tilde_sm = tf.math.softmax(tf.matmul(V_sm, W_sm)) A_sum_sm = tf.reduce_sum(A_tilde_sm, axis=1) loss_sm = tf.norm(A_gt - A_tilde_sm) loss_norm_sm = tf.norm(A_gt - A_tilde_sm) / tf.norm(A_gt) #opt_sm = tf.train.GradientDescentOptimizer(lr).minimize(loss_norm_sm) with tf.variable_scope("%d%d_sm" % (n, l), reuse=tf.AUTO_REUSE): opt_sm = tf.train.AdamOptimizer(lr).minimize(loss_norm_sm) #opt = tf.train.GradientDescentOptimizer(lr).minimize(loss) init = tf.global_variables_initializer() return init, A_gt, A_tilde_lin, A_tilde_sm, opt_lin, opt_sm, loss_norm_lin, loss_norm_sm # + exp_dir = 'matrix_fit_exp_adam' + datetime.now().strftime('%Y%m%d_%H-%M-%S') os.mkdir('./%s' % exp_dir) os.mkdir('./%s/savepoints' % exp_dir) gt_mats = dict() losses_sm, losses_relu, losses_lin = dict(), dict(), dict() mean_losses_sm, mean_losses_relu, mean_losses_lin = dict(), dict(), dict() std_losses_sm, std_losses_relu, std_losses_lin = dict(), dict(), dict() optimized_mat_sm, optimized_mat_relu, optimized_mat_lin = dict(), dict(), dict() init_loss_sm, init_loss_relu, init_loss_lin = dict(), dict(), dict() sess = tf.Session() for i in range(n_gt): # Do the training for n_gt matrices for n, l_list in nl_dict.iteritems(): _A_inp = generate_matrix(n) gt_mats[(i, n)] = _A_inp for l in l_list: losses_sm[(i, n, l)], losses_lin[(i, n, l)] = [], [] optimized_mat_sm[(i, n, l)], optimized_mat_lin[(i, n, l)] = [], [] init_loss_sm[(i, n, l)], init_loss_lin[(i, n, l)] = [], [] init, A, A_tilde_lin, A_tilde_sm, opt_lin, opt_sm, loss_norm_lin, loss_norm_sm = build_graph(n, l) feed_dict_ = {A: _A_inp} print("build graph", i, n, l) prev_loss_lin, prev_loss_sm = 0., 0. for _ in range(iter_): sess.run(init) # Re-initialize variables (re-draws U, V) prev_loss_lin = sess.run([loss_norm_lin], feed_dict=feed_dict_) prev_loss_sm = sess.run([loss_norm_sm], feed_dict_) init_loss_lin[(i, n, l)].append(prev_loss_lin) init_loss_sm[(i, n, l)].append(prev_loss_sm) for step in range(n_iter): if step < min_iter: _, _ = sess.run([opt_lin, opt_sm], feed_dict=feed_dict_) else: opt = [] if not ((cur_loss_lin <= prev_loss_lin) and (prev_loss_lin - cur_loss_lin <= convergence_tol)): opt.append(opt_lin) if not ((cur_loss_sm <= prev_loss_sm) and (prev_loss_sm - cur_loss_sm <= convergence_tol)): opt.append(opt_sm) if len(opt) < 1: # All converged print("All converged") break else: sess.run(opt, feed_dict=feed_dict_) if step >= 2: prev_loss_lin, prev_loss_sm = cur_loss_lin, cur_loss_sm cur_loss_lin, cur_loss_sm = sess.run([loss_norm_lin, loss_norm_sm], feed_dict=feed_dict_) if step % 5000 == 0: print(cur_loss_lin, cur_loss_sm) optimized_mat_lin[(i, n, l)].append(sess.run([A_tilde_lin])) optimized_mat_sm[(i, n, l)].append(sess.run([A_tilde_sm])) print(i, n, l, cur_loss_lin, cur_loss_sm) losses_lin[(i, n, l)].append(cur_loss_lin) losses_sm[(i, n, l)].append(cur_loss_sm) mean_losses_sm[(i, n, l)], mean_losses_lin[(i, n, l)] = np.mean(losses_sm[(i, n, l)]), np.mean(losses_lin[(i, n, l)]) std_losses_sm[(i, n, l)], std_losses_lin[(i, n, l)] = np.std(losses_sm[(i, n, l)]), np.std(losses_lin[(i, n, l)]) np.save('%s/savepoints/losses_lin.npy' % exp_dir, losses_lin) np.save('%s/savepoints/losses_sm.npy'% exp_dir, losses_sm) np.save('%s/savepoints/mean_losses_lin.npy'% exp_dir, mean_losses_lin) np.save('%s/savepoints/mean_losses_sm.npy'% exp_dir, mean_losses_sm) np.save('%s/savepoints/std_losses_sm.npy'% exp_dir, std_losses_sm) np.save('%s/savepoints/std_losses_lin.npy'% exp_dir, std_losses_lin) np.save('%s/savepoints/optimized_mat_lin.npy'% exp_dir, optimized_mat_lin) np.save('%s/savepoints/optimized_mat_sm.npy'% exp_dir, optimized_mat_sm) np.save('%s/savepoints/init_loss_sm.npy'% exp_dir, init_loss_sm) np.save('%s/savepoints/init_loss_lin.npy'% exp_dir, init_loss_lin) np.save('%s/savepoints/gt_mats.npy'% exp_dir,gt_mats) np.save('%s/losses_lin.npy' % exp_dir, losses_lin) np.save('%s/losses_sm.npy'% exp_dir, losses_sm) np.save('%s/mean_losses_lin.npy'% exp_dir, mean_losses_lin) np.save('%s/mean_losses_sm.npy'% exp_dir, mean_losses_sm) np.save('%s/std_losses_sm.npy'% exp_dir, std_losses_sm) np.save('%s/std_losses_lin.npy'% exp_dir, std_losses_lin) np.save('%s/optimized_mat_lin.npy'% exp_dir, optimized_mat_lin) np.save('%s/optimized_mat_sm.npy'% exp_dir, optimized_mat_sm) np.save('%s/init_loss_sm.npy'% exp_dir, init_loss_sm) np.save('%s/init_loss_lin.npy'% exp_dir, init_loss_lin) np.save('%s/gt_mats.npy'% exp_dir,gt_mats) print("Done") print(exp_dir) # -
code_dense_hmm/start_matrix_fit_experiment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="w35zPOcMyNwJ" colab={"base_uri": "https://localhost:8080/"} outputId="cf7f1632-6d72-44e9-ddc6-cbf30aadd2ac" from google.colab import drive drive.mount('/content/drive', force_remount=True) # + id="ph-qNIaajXGT" #drive.flush_and_unmount(timeout_ms=24) # + id="9xc7MvTxyQd7" import numpy as np import pandas as pd import pickle import numpy as np import random import time import os #os.environ["OPENCV_IO_MAX_IMAGE_PIXELS"] = pow(2,40).__str__() import cv2 from tqdm import tqdm import tensorflow as tf from tensorflow.python.keras import Sequential from tensorflow.keras import layers, optimizers from tensorflow.keras.applications import DenseNet121 from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.layers import * from tensorflow.keras.models import Model, load_model from tensorflow.keras.initializers import glorot_uniform from tensorflow.keras.utils import plot_model from tensorflow.keras.callbacks import ReduceLROnPlateau, EarlyStopping, ModelCheckpoint, LearningRateScheduler from IPython.display import display from tensorflow.keras import backend as K import matplotlib.pyplot as plt import matplotlib.image as mpimg from sklearn.model_selection import train_test_split from keras import optimizers from sklearn.metrics import classification_report, confusion_matrix from keras.callbacks import CSVLogger, LambdaCallback # + id="LLy6y_e1yQhC" dataset_dir = "drive/My Drive/Plant_Leaf_MalayaKew_MK_Dataset/" work_dir = "drive/My Drive/Plant_Leaf_MalayaKew_MK_Dataset/Records/" data_instance = 128 # + id="SD4sqW2K0-VE" colab={"base_uri": "https://localhost:8080/", "height": 36} outputId="0e854b26-907d-4474-ac88-9fd34e94fb66" ''' list_categories = [f"Class{i}" for i in range(1,45)] num_classes = len(list_categories) print('num_classes =', num_classes) #''' # + id="JOgxOIr7FOFY" # + id="UGKp7XZD53Wc" colab={"base_uri": "https://localhost:8080/", "height": 72} outputId="7317b7fc-10b2-4f7f-ae3a-465a37475eb2" # Saving files: ''' np.save(work_dir+"np_test_imageset.pkl", np_test_imageset, allow_pickle=True) np.save(work_dir+"np_val_imageset.pkl", np_val_imageset, allow_pickle=True) np.save(work_dir+"np_train_imageset.pkl", np_train_imageset, allow_pickle=True) np.save(work_dir+"np_test_label.pkl", np_test_label, allow_pickle=True) np.save(work_dir+"np_val_label.pkl", np_val_label, allow_pickle=True) np.save(work_dir+"np_train_label.pkl", np_train_label, allow_pickle=True) #''' # + id="s5Owb8dv86lf" # Loading files: np_test_imageset = np.load(f"{dataset_dir}D2_Gray3D_crop_{data_instance}x{data_instance}_test_X.pkl.npy", allow_pickle=True) np_train_imageset = np.load(f"{dataset_dir}D2_Gray3D_crop_{data_instance}x{data_instance}_train_X.pkl.npy", allow_pickle=True) np_test_label_str = np.load(f"{dataset_dir}D2_Gray3D_crop_{data_instance}x{data_instance}_test_y.pkl.npy", allow_pickle=True) np_train_label_str = np.load(f"{dataset_dir}D2_Gray3D_crop_{data_instance}x{data_instance}_train_y.pkl.npy", allow_pickle=True) # + colab={"base_uri": "https://localhost:8080/"} id="mP02FVE39aWn" outputId="7e37a7b4-41b7-42a3-81f3-9baaf06411bc" print(f"np_test_imageset.shape = {np_test_imageset.shape}") print(f"np_train_imageset.shape = {np_train_imageset.shape}") # + id="CNOJ9MQMFmNk" np_train_imageset_2D = np_train_imageset np_test_imageset_2D = np_test_imageset # + colab={"base_uri": "https://localhost:8080/"} id="jUMuryG8DRhY" outputId="7c84ed4c-564f-4682-dd90-f32046184dc1" #''' np_train_imageset_3D_channel_first = np.array([np_train_imageset_2D, np_train_imageset_2D, np_train_imageset_2D]) np_train_imageset_3D = np.moveaxis(np_train_imageset_3D_channel_first, 0, -1) np_train_imageset = np_train_imageset_3D print(f"np_train_imageset_3D_channel_first.shape = {np_train_imageset_3D_channel_first.shape}") print(f"np_train_imageset_3D.shape = {np_train_imageset_3D.shape}") print(f"np_train_imageset.shape = {np_train_imageset.shape}\n") np_test_imageset_3D_channel_first = np.array([np_test_imageset_2D, np_test_imageset_2D, np_test_imageset_2D]) np_test_imageset_3D = np.moveaxis(np_test_imageset_3D_channel_first, 0, -1) np_test_imageset = np_test_imageset_3D print(f"np_test_imageset_3D_channel_first.shape = {np_test_imageset_3D_channel_first.shape}") print(f"np_test_imageset_3D.shape = {np_test_imageset_3D.shape}") print(f"np_test_imageset.shape = {np_test_imageset.shape}") #''' # + id="mk51XXBK7q-0" np_test_label_1_based = np_test_label_str.astype(np.int) np_train_label_1_based = np_train_label_str.astype(np.int) # + colab={"base_uri": "https://localhost:8080/"} id="TUz7p_tbst8G" outputId="4332d9df-fb8a-4c08-bcf0-2ef77a492688" classes = np.unique(np_test_label_1_based) num_classes = len(classes) print('num_classes =', num_classes) print(f"Labels: {classes}") # + id="XZMdE_O78uKw" np_test_label = np_test_label_1_based-min(np_test_label_1_based) np_train_label = np_train_label_1_based-min(np_test_label_1_based) # + colab={"base_uri": "https://localhost:8080/"} id="DHSawF_C9ChQ" outputId="941adcd0-5fe1-432a-a661-0f3b38c48e02" classes = np.unique(np_test_label) num_classes = len(classes) print('num_classes =', num_classes) print(f"Zero-based Labels: {classes}") # + id="gA3wNuyQ534T" # + id="OLwF7sUsyRGR" colab={"base_uri": "https://localhost:8080/"} outputId="7f98a81d-069f-45df-e6cd-da33153f27fb" #''' final_test_imageset = np.expand_dims(np_test_imageset, axis = 4) final_train_imageset = np.expand_dims(np_train_imageset, axis = 4) #''' ''' final_test_imageset = np.expand_dims(np_test_imageset, axis = 3) final_train_imageset = np.expand_dims(np_train_imageset, axis = 3) #''' test_label2 = np.expand_dims(np_test_label, axis = 1) train_label2 = np.expand_dims(np_train_label, axis = 1) print('final_test_imageset.shape =', final_test_imageset.shape) print('final_train_imageset.shape =', final_train_imageset.shape) print('\ntest_label2.shape =', test_label2.shape) print('train_label2.shape =', train_label2.shape) # + id="OYGbKJikNxEq" # + id="EIaimJqwyQ_c" colab={"base_uri": "https://localhost:8080/"} outputId="50d7fdf9-b2fa-4114-84fd-cadc6d099156" final_test_label = tf.keras.utils.to_categorical(test_label2, num_classes) final_train_label = tf.keras.utils.to_categorical(train_label2, num_classes) print('final_test_label.shape =',final_test_label.shape) print('final_train_label.shape =',final_train_label.shape) # + id="roidu5RmFRJq" # + id="XRwynF9xPBWy" # DenseNet121 ResNet101 DenseNet201 InceptionV3 Xception NASNetLarge ResNet152V2 InceptionResNetV2 EfficientNetB7 impl_type = "TransferLearning3D.DenseNet201" # TransferLearning3D dataset = f"MalayaKew.gray3D.{np_test_imageset.shape[1:]}" # +str(img_size)+"p" # + id="rLVsewD-O3gl" ''' count_no_improvement = 0 epoch_initial = True #''' # + id="OxppPm7hO3l8" colab={"base_uri": "https://localhost:8080/"} outputId="ba238ae1-f4a2-43c3-e348-1454eaba4768" #NUM_NEURONS = 16 #NUM_LAYERS = 3 BATCH_SIZE = 16 # 10 NUM_EPOCHS = 300 epochs_completed = 0 LEARNING_RATE = 0.00001 EPSILON = 1e-4 early_stop_after_epochs = 10 DROPOUT = 0.5 # 0.5 0.0 pad = 0 LOSS = 'categorical_crossentropy' ACTIVATION_FUNCTION = 'elu' # relu sigmoid elu FINAL_ACTIVATION_FUNCTION = 'softmax' validation_split = 0.1 kernel_size=(1,1) pointTrainableAfter = "allDefault" # "allDefault" 160 170 OPTIMIZER = "Adam" # Adam SGD RMSProp init_weights = "imagenet" # "imagenet" None modelExt = ".Dense.2048.2048.2048.4096" # .Dense.128.256.512, .512.512.512 .Dense.512.512.512.512.Res l2_val = 0.001 # # +"_kernel"+str(kernel_size)+"_lr"+str(LEARNING_RATE)+"_batch"+str(BATCH_SIZE)+"_epochs"+str(NUM_EPOCHS) #checkpointer_name = "weights_"+dataset+"_"+impl_type+"_nLayers"+str(NUM_LAYERS)+"_nNeurons"+str(NUM_NEURONS)+".hdf5" ext = f".Flatten.l2.{str(l2_val)}.run_1" # run_1 run_2 .DropAfter .momentum0.9 #''' checkpointer_name = "weights."+dataset+".pad"+str(pad)+"."+impl_type+".wInit."+str(init_weights)+".TrainableAfter."+str(pointTrainableAfter)+\ modelExt+".actF."+ACTIVATION_FUNCTION+".opt."+OPTIMIZER+".drop."+str(DROPOUT)+".batch"+str(BATCH_SIZE)+ext+".hdf5" log_name = "log."+checkpointer_name[8:-5]+".log" print('checkpointer_name =', checkpointer_name) print('log_name =', log_name) #''' # + id="hcB3WVaWO6BV" # + id="XyK6ZbJOOsjV" colab={"base_uri": "https://localhost:8080/"} outputId="5d2aef20-24e3-4ada-ee35-37dad8c121d3" #''' #base_model=DenseNet121(weights=None, include_top=False, input_shape=np_train_dataset2.shape[1:]) # `None` (random initialization) #base_model=ResNet152V2(weights=None, include_top=False, input_shape=np_train_dataset2.shape[1:]) # ResNet152V2 ResNet50 ResNet101 ResNet152 DenseNet201 InceptionV3 Xception NASNetLarge 'imagenet' ResNet152V2 DenseNet121 #inputs = Input(final_train_imageset.shape[1:]) #x = ZeroPadding2D(padding=(pad,pad))(inputs) #base_model=tf.keras.applications.ResNet50(weights=init_weights, include_top=False, input_tensor=x) base_model=tf.keras.applications.DenseNet201(weights=init_weights, include_top=False, input_shape=np_train_imageset.shape[1:]) x=base_model.output x = Flatten()(x) #''' x = Dense(2048, kernel_regularizer=tf.keras.regularizers.l2(l2_val), activation=ACTIVATION_FUNCTION)(x) x = Dropout(DROPOUT)(x) x = Dense(2048, kernel_regularizer=tf.keras.regularizers.l2(l2_val), activation=ACTIVATION_FUNCTION)(x) x = Dropout(DROPOUT)(x) x = Dense(2048, kernel_regularizer=tf.keras.regularizers.l2(l2_val), activation=ACTIVATION_FUNCTION)(x) x = Dropout(DROPOUT)(x) x = Dense(4096, kernel_regularizer=tf.keras.regularizers.l2(l2_val), activation=ACTIVATION_FUNCTION)(x) x = Dropout(DROPOUT)(x) #''' outputs=Dense(num_classes,activation='softmax')(x) model=Model(inputs=base_model.input,outputs=outputs) model.summary() #''' # + id="WgomlDGDqn6-" colab={"base_uri": "https://localhost:8080/", "height": 36} outputId="a3c4ad83-ac4c-4693-912a-41093ce76e88" ''' tf.keras.utils.plot_model( model, to_file='model.png', show_shapes=True, show_dtype=False, show_layer_names=True, rankdir='TB', expand_nested=True, dpi=32 ) #''' # + id="ozx7Z-ZiUE-2" colab={"base_uri": "https://localhost:8080/"} outputId="eba34020-0ab3-4d6b-d605-92868daae3b6" count_trainable = 0 count_non_trainable = 0 #''' if pointTrainableAfter == "allDefault": for layer in model.layers: layer.trainable=True count_trainable += 1 elif pointTrainableAfter > 0: for layer in model.layers[:pointTrainableAfter]: # [:-pointTrainableAfter] layer.trainable=False count_non_trainable += 1 for layer in model.layers[pointTrainableAfter:]: # [-pointTrainableAfter:] layer.trainable=True count_trainable += 1 #''' ''' for layer in model.layers: layer.trainable=True count_trainable += 1 #''' print("count_non_trainable =", count_non_trainable) print("count_trainable =", count_trainable) print("Total number of layers =", count_non_trainable+count_trainable) # + id="aqwYD5TGPxyV" colab={"base_uri": "https://localhost:8080/", "height": 72} outputId="a6debb40-4658-4189-89c0-0a04d8ddff3c" ''' checkpointer_name = "weights."+dataset+".pad"+str(pad)+"."+impl_type+".wInit."+str(init_weights)+".TrainableAfter."+str(pointTrainableAfter)+\ modelExt+".opt."+OPTIMIZER+".drop."+str(DROPOUT)+".batch"+str(BATCH_SIZE)+ext+".hdf5" log_name = "log."+checkpointer_name[8:-5]+".log" print('checkpointer_name =', checkpointer_name) print('log_name =', log_name) #''' # + id="3T7w_lC1QCPh" colab={"base_uri": "https://localhost:8080/"} outputId="f2343fed-ceee-41b9-9f39-ac5e41f4fec3" # "RMSProp" "SGD" "Adam" "Adamax" "Adadelta" "Adagrad" "SGD" #optimizer = tf.keras.optimizers.RMSprop(lr = LEARNING_RATE, epsilon=EPSILON) if OPTIMIZER == "RMSProp": optimizer = tf.keras.optimizers.RMSprop(lr = LEARNING_RATE, epsilon=EPSILON) elif OPTIMIZER == "Adam": optimizer = tf.keras.optimizers.Adam(lr = LEARNING_RATE, epsilon=EPSILON, beta_1=0.9, beta_2=0.999) elif OPTIMIZER == "Adamax": optimizer = tf.keras.optimizers.Adamax(lr = LEARNING_RATE, epsilon=EPSILON, beta_1=0.9, beta_2=0.999) elif OPTIMIZER == "Adadelta": optimizer = tf.keras.optimizers.Adadelta(lr = LEARNING_RATE, epsilon=EPSILON, rho=0.95) elif OPTIMIZER == "Adagrad": optimizer = tf.keras.optimizers.Adagrad(lr = LEARNING_RATE, epsilon=EPSILON, initial_accumulator_value=0.1) elif OPTIMIZER == "SGD": optimizer = tf.keras.optimizers.SGD(lr = LEARNING_RATE, momentum=0.9) model.compile( #optimizer=OPTIMIZER, optimizer=optimizer, loss=LOSS, metrics=['accuracy'] ) print("OPTIMIZER =", OPTIMIZER) # + id="OdySVEG3QCpv" # save the best model with least validation loss checkpointer = ModelCheckpoint(filepath = work_dir+checkpointer_name, #monitor='val_accuracy', monitor='val_loss', save_weights_only=False, mode='auto', verbose = 0, # 0 = silent, 1 = progress bar, 2 = one line per epoch save_best_only =False ) checkpointer_best = ModelCheckpoint(filepath = work_dir+"best_"+checkpointer_name, monitor='val_loss', save_weights_only=False, mode='auto', verbose = 1, save_best_only = True ) early_stopping = EarlyStopping(monitor='loss', patience=early_stop_after_epochs) # + id="Fq3iXmYXQHNL" colab={"base_uri": "https://localhost:8080/"} outputId="e7434fbe-d1e3-4a9b-a9e2-ab4d8d1d6fb7" ''' if 'count_no_improvement' not in globals(): count_no_improvement = 0 print("count_no_improvement =", count_no_improvement) #''' #''' count_no_improvement = 0 epoch_initial = False #''' min_delta = 0.0009 print("count_no_improvement =", count_no_improvement) def checkBestPerformance(epoch, logs): save_filepath = work_dir+"best_"+checkpointer_name global epoch_initial if epoch_initial == True: epoch_initial = False model.save(filepath = save_filepath) print(". Model saved!") elif epoch_initial == False: global count_no_improvement log_data = pd.read_csv(work_dir+log_name, sep=',', usecols=['val_loss', 'val_accuracy'], engine='python') min_val_loss = float(str(min(log_data.val_loss.values))[:6]) max_val_acc = float(str(max(log_data.val_accuracy.values))[:6]) current_val_acc = float(str(logs['val_accuracy'])[:6]) current_val_loss = float(str(logs['val_loss'])[:6]) if (current_val_loss < min_val_loss) and (abs(current_val_loss-min_val_loss) >= min_delta): count_no_improvement = 0 model.save(filepath = save_filepath) print("\nval_loss decreased from",min_val_loss," to",current_val_loss,"( val_accuracy =",current_val_acc,").") elif (current_val_loss==min_val_loss) and (current_val_acc>max_val_acc): count_no_improvement = 0 model.save(filepath = save_filepath) print("\nval_accuracy increased to", current_val_acc, ".") else: count_no_improvement += 1 print(". count_no_improvement =", count_no_improvement) if count_no_improvement >= early_stop_after_epochs: global list_callbacks del list_callbacks, count_no_improvement #print("count_no_improvement =", count_no_improvement, "... list_callbacks =", list_callbacks) # + id="YYewyuDiREFZ" colab={"base_uri": "https://localhost:8080/"} outputId="ac07cdf0-a448-40c6-b1b6-47ed7e6db5d4" epochs_completed = 0 list_callbacks = [] csv_logger = CSVLogger(work_dir+log_name, separator=',', append=True) #if 'list_callbacks' in globals(): # del list_callbacks try: log_data = pd.read_csv(work_dir+log_name, sep=',', usecols=['epoch'], engine='python') epochs_completed = log_data.shape[0] #if epochs_completed > 0: model = load_model(work_dir+checkpointer_name) list_callbacks = [checkpointer, LambdaCallback(on_epoch_end=checkBestPerformance), csv_logger] print("epochs_completed =", epochs_completed) except Exception as error: if epochs_completed == 0: # list_callbacks = [checkpointer, checkpointer_best, csv_logger, early_stopping] list_callbacks = [checkpointer, LambdaCallback(on_epoch_end=checkBestPerformance), csv_logger] print("epochs_completed =", epochs_completed) elif epochs_completed > 0: print(error) print('checkpointer_name =', checkpointer_name) # + id="6JKqrHnwRGGz" colab={"base_uri": "https://localhost:8080/"} outputId="2308edf8-9acc-40a5-f42b-1754b805000e" print('checkpointer_name =', checkpointer_name) print("Previously completed epochs =", epochs_completed) print("count_no_improvement =", count_no_improvement, "\n") #''' try: start_time = time.time() history = model.fit(np_train_imageset, final_train_label, shuffle=True, batch_size = BATCH_SIZE, epochs = NUM_EPOCHS - epochs_completed, #steps_per_epoch = 2, #validation_split = validation_split, validation_data = (np_test_imageset, final_test_label), callbacks=list_callbacks ) elapsed_time = time.time() - start_time print("\nTime elapsed: ", elapsed_time) except Exception as error: print("\nError:", error) #''' # + id="UJsgsZFzQCs2" # weights.MalayaKew.gray3D.(128, 128, 3).pad0.TransferLearning3D.DenseNet201.wInit.imagenet.TrainableAfter.allDefault.Dense.2048.2048.2048.4096.actF.elu.opt.Adam.drop.0.5.batch16.Flatten.l2.0.001.run_1.hdf5 # + id="l_xKnoFvFQ4F" ''' Record: ; --- Test Acc: 0.3947, Test Loss: 3.1064: ep10, weights.MalayaKew.gray.pad10.TransferLearning.InceptionResNetV2.wInit.None.TrainableAfter.allDefault.Dense.512.actF.relu.opt.Adam.drop.0.0.batch64.Flatten.run_1.hdf5 Test Acc: 0.6353, Test Loss: 1.8520: ep30, weights.MalayaKew.gray3D.(64, 64, 3).pad0.TransferLearning3D.ResNet50.wInit.imagenet.TrainableAfter.allDefault.actF.relu.opt.Adam.drop.0.0.batch64.Flatten.run_1.hdf5 Test Acc: 0.7067, Test Loss: 1.8239: ep32, weights.MalayaKew.gray3D.(64, 64, 3).pad0.TransferLearning3D.ResNet50.wInit.imagenet.TrainableAfter.allDefault.actF.relu.opt.RMSProp.drop.0.0.batch64.Flatten.run_2.hdf5 Test Acc: 0.6276, Test Loss: 1.4814: ep42, weights.MalayaKew.gray3D.(64, 64, 3).pad0.TransferLearning3D.ResNet50.wInit.imagenet.TrainableAfter.allDefault.actF.relu.opt.SGD.drop.0.0.batch64.Flatten.run_1.hdf5 Test Acc: 0.4453, Test Loss: 2.4178: ep17, weights.MalayaKew.gray3D.(64, 64, 3).pad0.TransferLearning3D.ResNet50.wInit.imagenet.TrainableAfter.170.actF.relu.opt.Adam.drop.0.0.batch64.Flatten.run_1.hdf5 Test Acc: 0.6044, Test Loss: 2.3383: ep22, weights.MalayaKew.gray3D.(64, 64, 3).pad0.TransferLearning3D.ResNet50.wInit.imagenet.TrainableAfter.allDefault.Dense.512.512.512.512.actF.elu.opt.Adam.drop.0.5.batch64.Flatten.regularizers.l2.0.0001.run_1.hdf5 Test Acc: 0.6112, Test Loss: 2.5392: ep24, weights.MalayaKew.gray3D.(64, 64, 3).pad0.TransferLearning3D.ResNet50.wInit.imagenet.TrainableAfter.allDefault.Dense.512.512.512.512.Res.actF.elu.opt.Adam.drop.0.5.batch64.Flatten.regularizers.l2.0.0001.run_1.hdf5 Test Acc: 0.6484, Test Loss: 1.8469: ep24, weights.MalayaKew.gray3D.(64, 64, 3).pad0.TransferLearning3D.DenseNet201.wInit.imagenet.TrainableAfter.allDefault.actF.relu.opt.Adam.drop.0.0.batch64.Flatten.run_1.hdf5 Test Acc: 0.6139, Test Loss: 2.3339: ep35, weights.MalayaKew.gray3D.(64, 64, 3).pad0.TransferLearning3D.DenseNet201.wInit.imagenet.TrainableAfter.allDefault.Dense.512.512.512.512.actF.elu.opt.SGD.drop.0.0.batch32.Flatten.run_1.hdf5 Test Acc: 0.7291, Test Loss: 4.8518: ep117, weights.MalayaKew.gray3D.(64, 64, 3).pad0.TransferLearning3D.DenseNet201.wInit.imagenet.TrainableAfter.allDefault.Dense.1024.1024.2048.actF.elu.opt.SGD.drop.0.5.batch16.Flatten.l2.0.01.run_1.hdf5 Plant_Leaf_MalayaKew_MK_impl_5_crop_128p_Gray3D_Dense201_Custom2_withImageNet: #''' csv_logger = CSVLogger(work_dir+log_name, separator=',', append=True) log_data = pd.read_csv(work_dir+log_name, sep=',', usecols=['epoch'], engine='python') epochs_completed = log_data.shape[0] result = model.evaluate(final_test_imageset, final_test_label) print("Test Acc: {}, Test Loss: {}: ep{}, {}\n".format(round(result[1],4), round(result[0],4), epochs_completed, checkpointer_name)) # + id="IQM7JieEFQ1G" #checkpointer_name = "weights.Fashion.DenseNet121.wInit.None.TrainableAfterallDefault.opt.SGD.drop.0.0.batch32.Flatten.run_1.hdf5" model_loaded = load_model(work_dir+"best_"+checkpointer_name) print("Loaded "+work_dir+"best_"+checkpointer_name+".") # + id="chvVsOEgRfsO" ''' Record: ; --- Test Acc: 0.3833, Test Loss: 2.1568: ep10, best_weights.MalayaKew.gray.pad10.TransferLearning.InceptionResNetV2.wInit.None.TrainableAfter.allDefault.Dense.512.actF.relu.opt.Adam.drop.0.0.batch64.Flatten.run_1.hdf5 Test Acc: 0.7168, Test Loss: 1.2828: ep30, best_weights.MalayaKew.gray3D.(64, 64, 3).pad0.TransferLearning3D.ResNet50.wInit.imagenet.TrainableAfter.allDefault.actF.relu.opt.Adam.drop.0.0.batch64.Flatten.run_1.hdf5 Test Acc: 0.6815, Test Loss: 1.5833: ep32, best_weights.MalayaKew.gray3D.(64, 64, 3).pad0.TransferLearning3D.ResNet50.wInit.imagenet.TrainableAfter.allDefault.actF.relu.opt.RMSProp.drop.0.0.batch64.Flatten.run_2.hdf5 Test Acc: 0.6133, Test Loss: 1.4825: ep42, best_weights.MalayaKew.gray3D.(64, 64, 3).pad0.TransferLearning3D.ResNet50.wInit.imagenet.TrainableAfter.allDefault.actF.relu.opt.SGD.drop.0.0.batch64.Flatten.run_1.hdf5 Test Acc: 0.4410, Test Loss: 2.0524: ep17, best_weights.MalayaKew.gray3D.(64, 64, 3).pad0.TransferLearning3D.ResNet50.wInit.imagenet.TrainableAfter.170.actF.relu.opt.Adam.drop.0.0.batch64.Flatten.run_1.hdf5 Test Acc: 0.6757, Test Loss: 1.5270: ep22, best_weights.MalayaKew.gray3D.(64, 64, 3).pad0.TransferLearning3D.ResNet50.wInit.imagenet.TrainableAfter.allDefault.Dense.512.512.512.512.actF.elu.opt.Adam.drop.0.5.batch64.Flatten.regularizers.l2.0.0001.run_1.hdf5 Test Acc: 0.5914, Test Loss: 1.7184: ep24, best_weights.MalayaKew.gray3D.(64, 64, 3).pad0.TransferLearning3D.ResNet50.wInit.imagenet.TrainableAfter.allDefault.Dense.512.512.512.512.Res.actF.elu.opt.Adam.drop.0.5.batch64.Flatten.regularizers.l2.0.0001.run_1.hdf5 Test Acc: 0.7659, Test Loss: 0.7799: ep24, best_weights.MalayaKew.gray3D.(64, 64, 3).pad0.TransferLearning3D.DenseNet201.wInit.imagenet.TrainableAfter.allDefault.actF.relu.opt.Adam.drop.0.0.batch64.Flatten.run_1.hdf5 Test Acc: 0.7326, Test Loss: 1.1991: ep35, best_weights.MalayaKew.gray3D.(64, 64, 3).pad0.TransferLearning3D.DenseNet201.wInit.imagenet.TrainableAfter.allDefault.Dense.512.512.512.512.actF.elu.opt.SGD.drop.0.0.batch32.Flatten.run_1.hdf5 Test Acc: 0.7409, Test Loss: 4.803: ep117, best_weights.MalayaKew.gray3D.(64, 64, 3).pad0.TransferLearning3D.DenseNet201.wInit.imagenet.TrainableAfter.allDefault.Dense.1024.1024.2048.actF.elu.opt.SGD.drop.0.5.batch16.Flatten.l2.0.01.run_1.hdf5 Plant_Leaf_MalayaKew_MK_impl_5_crop_128p_Gray3D_Dense201_Custom2_withImageNet: #''' ''' csv_logger = CSVLogger(work_dir+log_name, separator=',', append=True) log_data = pd.read_csv(work_dir+log_name, sep=',', usecols=['epoch'], engine='python') epochs_completed = log_data.shape[0] #''' result2 = model_loaded.evaluate(final_test_imageset, final_test_label) #print("nLayers: {}, nNeurons: {}, DROPOUT: {}, Test Acc: {}, Test Loss: {}".format(NUM_LAYERS, NUM_NEURONS, DROPOUT, round(result2[1], 4), round(result2[0], 4))) print("Test Acc: {}, Test Loss: {}: ep{}, {}\n".format(round(result2[1],4), round(result2[0],4), epochs_completed, "best_"+checkpointer_name)) # + id="z1GzwpODRlRf" import csv with open(work_dir+'Records.csv', "a") as fp: wr = csv.writer(fp, dialect='excel') try: wr.writerow([checkpointer_name[8:-5], round(result2[1], 4), round(result2[0], 4), elapsed_time]) except: wr.writerow([checkpointer_name[8:-5], round(result2[1], 4), round(result2[0], 4)]) print("Saved results.") # + id="Dv35-hm1Rfv5" # + id="hgY4v21DyQwL" #Confution Matrix and Classification Report #''' Y_pred = model_loaded.predict_generator(final_test_imageset, len(final_test_imageset)) y_pred = np.argmax(Y_pred, axis=1) print('Confusion Matrix') print(confusion_matrix(np_test_label, y_pred)) #''' # + id="U5j7b3KcRvwj" # Precision [TP/TP+FP] = The ratio of correctly predicted positive observations to the total predicted positive observations. # Recall (Sensitivity) [TP/TP+FN] = The ratio of correctly predicted positive observations to the all observations in actual class - 'yes'. # F1 score [F1 Score = 2*(Recall * Precision) / (Recall + Precision)] = The weighted average of Precision and Recall. # Support = The number of samples of the true response that lie in that class. #''' print('Classification Report:') #target_names = ['Mono', 'Di'] # not ['Di', 'Mono'] print(classification_report(np_test_label, y_pred)) #, target_names=target_names)) #''' # + id="ceGRitu9RvzV" ''' print('Classification Report') print(classification_report(y_test, y_pred, target_names=list_categories)) #''' # + id="ujLQjTf2Rv11" log_data = pd.read_csv(work_dir+log_name, sep=',', engine='python') # + id="To55jgGSRv4a" # Getting the model history keys #history.history.keys() log_data.head() # + id="aytAYSJ4Rv7T" # plot the training artifacts title = "Val loss for "+dataset+" "+impl_type plt.plot(log_data['loss']) plt.plot(log_data['val_loss']) plt.title(title) plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train_loss','val_loss'], loc = 'best') img_path = work_dir+'Images/vLoss_'+checkpointer_name[8:-5]+'.png' plt.savefig(img_path, dpi=600) plt.show() print('img_path =', img_path) # + id="yUq1WaElRv-M" title = "Val acc for "+dataset+" "+impl_type plt.plot(log_data['accuracy']) plt.plot(log_data['val_accuracy']) plt.title(title) plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train_accuracy','val_accuracy'], loc = 'best') img_path = work_dir+'Images/vAcc_'+checkpointer_name[8:-5]+'.png' plt.savefig(img_path, dpi=600) plt.show() print('img_path =', img_path) # + id="w4msuVKMSk8E"
Plant_Leaf_MalayaKew_MK_Dataset/Plant_Leaf_MalayaKew_MK_impl_5_crop_128p_Gray3D_Dense201_Custom2_withImageNet.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:Anaconda3] # language: python # name: conda-env-Anaconda3-py # --- # # Counts vs. angle # # W vs. angle for Figure 6 in the paper. # # Enable interactive plots # %matplotlib notebook import os import sys import numpy as np import matplotlib.pyplot as plt from matplotlib.ticker import (MultipleLocator, FormatStrFormatter, AutoMinorLocator) import pandas as pd import scipy.io as sio os.getcwd() os.chdir('..') os.getcwd() sys.path.append('../scripts') import bicorr_sim as bicorr_sim import bicorr_plot as bicorr_plot import bicorr_math as bicorr_math by_angle_e_meas = pd.read_csv(r'Cf072115_to_Cf072215b/datap/by_angle_e_df.csv',index_col=0) by_angle_e_cgmf = pd.read_csv(r'cgmf/datap/by_angle_e_df.csv',index_col=0) by_angle_e_freya = pd.read_csv(r'freya/datap/by_angle_e_df.csv',index_col=0) by_angle_e_ipol = pd.read_csv(r'ipol/datap/by_angle_e_df.csv',index_col=0) by_angle_e_ipol_noct = pd.read_csv(r'ipol_noct/datap/by_angle_e_df.csv',index_col=0) # Load `nps` num_fission_meas = int(int(sio.loadmat('Cf072115_to_Cf072215b/datap/num_fissions.mat')['num_fissions'])*float(sio.loadmat('Cf072115_to_Cf072215b/datap/fc_efficiency.mat')['fc_efficiency'])) num_fission_cgmf = int(sio.loadmat('cgmf/datap/num_fissions.mat')['num_fissions']) num_fission_freya= int(sio.loadmat('freya/datap/num_fissions.mat')['num_fissions']) num_fission_ipol = int(sio.loadmat('ipol/datap/num_fissions.mat')['num_fissions']) num_fission_ipol_noct = int(sio.loadmat('ipol_noct/datap/num_fissions.mat')['num_fissions']) num_fissions = [num_fission_meas, num_fission_cgmf, num_fission_freya, num_fission_ipol, num_fission_ipol_noct] print(num_fissions) # # Set up plot formatting # + by_angle_es = [by_angle_e_meas, by_angle_e_cgmf, by_angle_e_freya, by_angle_e_ipol, by_angle_e_ipol_noct] legends =['Experiment', 'CGMF', 'FREYA', 'PoliMi', 'PoliMi-No CT'] fmts = ['x', 's', 'D', 'o', '^'] colors = ['#5d269b', '#dd673b', '#80bc31', '#3cbfe0', '#4242f4'] to_plot = [0,1, 2, 3] # - line_thickness = 1 ebar_width = 3 # # Normalize by integral # + fig = plt.figure(figsize=(4,4)) ax = plt.gca() for i in to_plot: by_angle_df = by_angle_es[i] nps = num_fissions[i] x = by_angle_df['angle_bin_centers'] y = by_angle_df['W'] yerr = by_angle_df['std W'] norm_factor = np.sum(y[x>20]) print(norm_factor) y = y/norm_factor yerr = yerr/norm_factor plt.errorbar(x, y, yerr=yerr, fmt = fmts[i], markeredgewidth=1, markerfacecolor='none', elinewidth = line_thickness, capthick = line_thickness, capsize = ebar_width, c = colors[i]) leg = plt.legend([legends[i] for i in to_plot]) leg.get_frame().set_edgecolor('w') ax.axvspan(0,20,facecolor='gray', alpha=0.2) ax.set_xlabel(r'$\theta$ (degrees)') ax.set_ylabel(r'$\overline{W}(\theta)$ (arb. units)') ax.set_xlim([0,180]) # Set up ticks ax.tick_params(axis='both', which='major', direction='inout', length=6, color='k', bottom=True, right=True, top=True, left=True) ax.tick_params(axis='both', which='minor', direction='in', length=3, bottom=True, right=True, top=True, left=True) # Major ax.xaxis.set_major_locator(MultipleLocator(45)) #ax.yaxis.set_major_locator(MultipleLocator(0.02)) # Minor ax.xaxis.set_minor_locator(MultipleLocator(15)) #ax.yaxis.set_minor_locator(MultipleLocator(0.005)) ax.text(45,0.12,'(a)', size=15, backgroundcolor='white') plt.tight_layout() # - os.getcwd() bicorr_plot.save_fig_to_folder('W_normd_by_integral',r'compare\fig') # # Divide by experimental def plot_calcs(by_angle_df): x = by_angle_df['angle_bin_centers'] W = by_angle_df['W'] stdW = by_angle_df['std W'] norm_factor = np.sum(W[x>20]) y = W/norm_factor yerr = stdW/norm_factor return x, y, yerr # + by_angle_df_exp = by_angle_es[0] to_plot = [1,2,3] fig = plt.figure(figsize=(4,4)) ax = plt.gca() x_exp, y_exp, yerr_exp = plot_calcs(by_angle_df_exp) for i in to_plot: by_angle_df = by_angle_es[i] x_sim, y_sim, yerr_sim = plot_calcs(by_angle_df) y, yerr = bicorr_math.prop_err_division(y_sim,yerr_sim,y_exp,yerr_exp) plt.errorbar(x, y, yerr=yerr, fmt = fmts[i], markeredgewidth=1, markerfacecolor='none', elinewidth = line_thickness, capthick = line_thickness, capsize = ebar_width, c = colors[i]) leg = plt.legend([legends[i] for i in to_plot]) leg.get_frame().set_edgecolor('w') plt.axhline(1.0,color='gray', linewidth=1,linestyle='--') ax.axvspan(0,20,facecolor='gray', alpha=0.2) ax.set_xlabel(r'$\theta$ (degrees)') ax.set_ylabel(r'$\left[\overline{W}(\theta)\right]_{SIM} / \left[\overline{W}(\theta)\right]_{EXP}$') ax.set_xlim([0,180]) # Set up ticks ax.tick_params(axis='both', which='major', direction='inout', length=6, color='k', bottom=True, right=True, top=True, left=True) ax.tick_params(axis='both', which='minor', direction='in', length=3, bottom=True, right=True, top=True, left=True) # Major ax.xaxis.set_major_locator(MultipleLocator(45)) #ax.yaxis.set_major_locator(MultipleLocator(0.02)) # Minor ax.xaxis.set_minor_locator(MultipleLocator(15)) #ax.yaxis.set_minor_locator(MultipleLocator(0.005)) ax.text(45,1.43,'(b)', size=15, backgroundcolor='white') plt.tight_layout() bicorr_plot.save_fig_to_folder('W_normd_diff',r'compare\fig') # - # # Plot as-is by_angle_e_meas.head() by_angle_e_cgmf.head() by_angle_e_ipol.head() # + plt.figure(figsize=(4,4)) for i in to_plot: by_angle_df = by_angle_es[i] nps = num_fissions[i] print(nps) x = by_angle_df['angle_bin_centers'] # y = by_angle_df['W']*nps y = by_angle_df['W']*np.sqrt(nps) yerr = 0 plt.errorbar(x, y, yerr=yerr, fmt = fmts[i], markeredgewidth=1, markerfacecolor='none', elinewidth = line_thickness, capthick = line_thickness, capsize = ebar_width, c = colors[i]) plt.legend([legends[i] for i in to_plot]) plt.xlabel(r'$\theta$ (degrees)') plt.ylabel(r'$\overline{W}(\theta)$ (arb. units)') plt.tight_layout() # - by_angle_df.head() # + plt.figure(figsize=(4,4)) for i in to_plot: by_angle_df = by_angle_es[i] nps = num_fissions[i] x = by_angle_df['angle_bin_centers'] y = np.multiply(by_angle_df['Sd1'], by_angle_df['Sd2'])/nps yerr = 0 plt.errorbar(x, y, yerr=yerr, fmt = fmts[i], markeredgewidth=1, markerfacecolor='none', elinewidth = line_thickness, capthick = line_thickness, capsize = ebar_width, c = colors[i]) plt.legend([legends[i] for i in to_plot]) plt.xlabel(r'$\theta$ (degrees)') # plt.ylabel(r'$\overline{W}(\theta)$ (arb. units)') plt.tight_layout() # -
analysis/compare/compare_W_angle.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # !sudo salt-key -L --out=json > /mnt/data/salt-keys.json # !sudo salt --static --out json '*' grains.item id os_family os osrelease oscodename osfinger ip4_interfaces kernel > /mnt/data/salt-grains.json # + slideshow={"slide_type": "slide"} import pandas as pd import json tmp = pd.read_json("/mnt/data/salt-grains.json", orient='index') #https://docs.saltstack.com/en/latest/ref/cli/salt.html #salt --static --out json '*' grains.item id os_family os osrelease oscodename osfinger ip4_interfaces kernel > /mnt/data/salt-grains.json df_grains = pd.read_json(tmp['ret'].to_json(), orient='index') #usable_dataframe = pd.io.json.json_normalize(jsondata) #rather roll own? # + slideshow={"slide_type": "slide"} df_grains # + import json from pprint import pprint with open('/mnt/data/salt-grains.json') as json_data: d = json.load(json_data) json_data.close() pprint(d) # + import json with open('/mnt/data/salt-grains.json') as json_data: grains_incoming = json.load(json_data) json_data.close() grains_staged = {} for key, value in grains_incoming.items(): retval = {} retval['id'] = value['ret']['id'] retval['kernel'] = value['ret']['kernel'] retval['os'] = value['ret']['os'] retval['os_family'] = value['ret']['os_family'] retval['oscodename'] = value['ret']['oscodename'] retval['osrelease'] = value['ret']['osrelease'] retval['osfinger'] = value['ret']['osfinger'] tmp = [] for key2, value2 in value['ret']['ip4_interfaces'].items(): if key2 not in ['lo']: tmp.append(value2[0]) retval['ip4_interfaces'] = ", ".join(tmp) grains_staged[key] = retval grains_staged # + import json with open('/mnt/data/salt-keys.json') as json_data: keys_incoming = json.load(json_data) json_data.close() keys_staged={} for k, col in keys_incoming.items(): print(col) for m in map(str, col): keys_staged[m] = k keys_staged # + from pprint import pprint minions = {} for k, v in keys_staged.items(): tmp = {} tmp['keystatus'] = v if k in grains_staged: tmp['grains'] = grains_staged[k] minions[k] = tmp pprint(minions) # - df_minions = pd.DataFrame.from_dict(minions, orient='index') df_minions
data/saltstack-grains-json.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: masterarbeit # language: python # name: masterarbeit # --- import torch import torch.nn as nn from torch.utils.data import DataLoader import torch.optim as optim import pandas as pd import numpy as np from scipy.spatial import distance import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import numpy as np from statsmodels.graphics.tsaplots import plot_pacf from statsmodels.graphics.tsaplots import plot_acf from matplotlib.pyplot import figure from sklearn.decomposition import PCA dataset = pd.read_csv("./total.csv") dataset.head() total = y.merge(y_hat, left_on='ID', right_on='ID') total.head() latent_space_y = dataset.iloc[:,1:8] pca = PCA(n_components=3) columns = [] for i in range(3): columns.append("y_pc"+str(i)) principalComponents = pca.fit_transform(latent_space_y) principalDf = pd.DataFrame(data = principalComponents, columns = columns) status = dataset["anomaly"] ls_y = pd.concat([principalDf, status], axis = 1) ls_y.head() latent_space_yhat = dataset.iloc[:,9:17] pca = PCA(n_components=3) columns = [] for i in range(3): columns.append("y_hatpc"+str(i)) principalComponents = pca.fit_transform(latent_space_yhat) principalDf = pd.DataFrame(data = principalComponents, columns = columns) finalDf = pd.concat([principalDf, ls_y], axis = 1) finalDf.head() distance_df = pd.DataFrame(columns=["distance LS", "anomaly"]) for index, word in dataset.iterrows(): y = word.iloc[1:8].values y_hat = word.iloc[9:17].values distance_pair = distance.euclidean(y, y_hat) df = pd.DataFrame({"distance LS": [distance_pair], "anomaly":[word.anomaly]}) distance_df = distance_df.append(df) distance_df.head() ax = sns.scatterplot(x=range(distance_df.shape[0]), y="distance LS", hue="anomaly", data=distance_df)
ideas/analysis_latent_space/new_approach/Untitled.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## SVM model for 4class audio with 100ms frame size # ## Important Libraries # + import io import time from sklearn import metrics from scipy.stats import zscore from sklearn.model_selection import train_test_split from sklearn.model_selection import KFold from keras.models import Sequential from keras.layers.core import Dense, Activation from keras.callbacks import EarlyStopping import tensorflow as tf from sklearn import svm, datasets import matplotlib.pyplot as plt # %matplotlib inline from sklearn.svm import SVC from sklearn.metrics import accuracy_score from sklearn.metrics import classification_report, confusion_matrix from sklearn.model_selection import cross_val_score from sklearn.model_selection import learning_curve import pickle from sklearn.metrics import roc_curve, auc from sklearn.metrics import log_loss from sklearn import preprocessing import matplotlib.pyplot as plt import numpy as np import pandas as pd import shutil import os import requests import base64 # - # ### Useful Functions # + # Encode text values to dummy variables(i.e. [1,0,0],[0,1,0],[0,0,1] for red,green,blue) def encode_text_dummy(df, name): dummies = pd.get_dummies(df[name]) for x in dummies.columns: dummy_name = "{}-{}".format(name, x) df[dummy_name] = dummies[x] df.drop(name, axis=1, inplace=True) # Encode text values to a single dummy variable. The new columns (which do not replace the old) will have a 1 # at every location where the original column (name) matches each of the target_values. One column is added for # each target value. def encode_text_single_dummy(df, name, target_values): for tv in target_values: l = list(df[name].astype(str)) l = [1 if str(x) == str(tv) else 0 for x in l] name2 = "{}-{}".format(name, tv) df[name2] = l # Encode text values to indexes(i.e. [1],[2],[3] for red,green,blue). def encode_text_index(df, name): le = preprocessing.LabelEncoder() df[name] = le.fit_transform(df[name]) return le.classes_ # Encode a numeric column as zscores def encode_numeric_zscore(df, name, mean=None, sd=None): if mean is None: mean = df[name].mean() if sd is None: sd = df[name].std() df[name] = (df[name] - mean) / sd # Convert all missing values in the specified column to the median def missing_median(df, name): med = df[name].median() df[name] = df[name].fillna(med) # Convert all missing values in the specified column to the default def missing_default(df, name, default_value): df[name] = df[name].fillna(default_value) # Convert a Pandas dataframe to the x,y inputs that TensorFlow needs def to_xy(df, target): result = [] for x in df.columns: if x != target: result.append(x) # find out the type of the target column. Is it really this hard? :( target_type = df[target].dtypes target_type = target_type[0] if hasattr(target_type, '__iter__') else target_type # Encode to int for classification, float otherwise. TensorFlow likes 32 bits. if target_type in (np.int64, np.int32): # Classification dummies = pd.get_dummies(df[target]) return df.as_matrix(result).astype(np.float32), dummies.as_matrix().astype(np.float32) else: # Regression return df.as_matrix(result).astype(np.float32), df.as_matrix([target]).astype(np.float32) # Nicely formatted time string def hms_string(sec_elapsed): h = int(sec_elapsed / (60 * 60)) m = int((sec_elapsed % (60 * 60)) / 60) s = sec_elapsed % 60 return "{}:{:>02}:{:>05.2f}".format(h, m, s) # Regression chart. def chart_regression(pred,y,sort=True): t = pd.DataFrame({'pred' : pred, 'y' : y.flatten()}) if sort: t.sort_values(by=['y'],inplace=True) a = plt.plot(t['y'].tolist(),label='expected') b = plt.plot(t['pred'].tolist(),label='prediction') plt.ylabel('output') plt.legend() plt.show() # Remove all rows where the specified column is +/- sd standard deviations def remove_outliers(df, name, sd): drop_rows = df.index[(np.abs(df[name] - df[name].mean()) >= (sd * df[name].std()))] df.drop(drop_rows, axis=0, inplace=True) # Encode a column to a range between normalized_low and normalized_high. def encode_numeric_range(df, name, normalized_low=-1, normalized_high=1, data_low=None, data_high=None): if data_low is None: data_low = min(df[name]) data_high = max(df[name]) df[name] = ((df[name] - data_low) / (data_high - data_low)) \ * (normalized_high - normalized_low) + normalized_low # Plot a confusion matrix. # cm is the confusion matrix, names are the names of the classes. def plot_confusion_matrix(cm, names, title='Confusion matrix', cmap=plt.cm.Blues): plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(names)) plt.xticks(tick_marks, names, rotation=45) plt.yticks(tick_marks, names) plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') # - # #### Read MFCCs feature CSV file of audio of 500ms block path="/home/bsplab/Desktop/manikanta/SVM_Kflod/Traing_100ms/CRYING" df=pd.read_csv("/home/bsplab/Desktop/manikanta/SVM_Kflod/Traing_100ms/CRYING/individual_100ms.csv",na_values=['NA','?']) df.columns=['MFCC0', 'MFCC1','MFCC2','MFCC3','MFCC4','MFCC5','MFCC6','MFCC7','MFCC8', 'MFCC9', 'MFCC10' ,'MFCC11', 'MFCC12', 'Label'] filename_write = os.path.join(path,"7class-out-of-sample_mySVM_100ms.csv") df.head() # #### Shuffle np.random.seed(42) df = df.reindex(np.random.permutation(df.index)) df.reset_index(inplace=True, drop=True) df.columns df.head() # #### Sperating Independent variable and Target Variable X = df[['MFCC0', 'MFCC1', 'MFCC2', 'MFCC3', 'MFCC4', 'MFCC5', 'MFCC6', 'MFCC7', 'MFCC8', 'MFCC9', 'MFCC10', 'MFCC11', 'MFCC12']] y = df['Label'] # #### Encode to a 2D matrix for training # Encode to a 2D matrix for training Label=encode_text_index(df,'Label') print("Labelling is:{}".format(Label)) # #### Keeping Holdout Data # 25 % holdout data x_main, x_holdout, y_main, y_holdout = train_test_split(X, y, test_size=0.25,random_state = 0) print("Shape of X : {}".format(X.shape)) print("Shape of y : {}".format(y.shape)) print("Shape of x_main : {}".format(x_main.shape)) print("Shape of x_holdout : {}".format(x_holdout.shape)) print("Shape of y_main : {}".format(y_main.shape)) print("Shape of y_holdout : {}".format(y_holdout.shape)) # #### dividing X, y into train and test data # dividing X, y into train and test data x_train, x_test, y_train, y_test = train_test_split(x_main, y_main, test_size=0.20,random_state = 0) print("Shape of x_train : {}".format(x_train.shape)) print("Shape of x_test : {}".format(x_test.shape)) print("Shape of y_train : {}".format(y_train.shape)) print("Shape of y_test : {}".format(y_test.shape)) # #### preprocessing of training data, testing data , holdout data # preprocessing of training data scaler = preprocessing.StandardScaler().fit(x_train) #scaler X_train = scaler.transform(x_train) X_holdout = scaler.transform(x_holdout) # preprocessing of testing data X_test= scaler.transform(x_test) from sklearn.externals import joblib scaler_file = "my_scaler_100.save" joblib.dump(scaler, scaler_file) # ## Model Creation # ## RBF kernel # ### Training ## RBF kernel training #### Training tic=time.time() svclassifier_rbf = SVC(kernel='rbf',C=1, max_iter=-1,verbose=True,probability=True) svclassifier_rbf.fit(X_train, y_train) scores = cross_val_score( svclassifier_rbf, X_train, y_train, cv=5,scoring='f1_macro') toc=time.time() print(str(1000*(toc-tic))+"ms") print("5-Fold score of rbf kernel SVM is: {}".format(scores)) # ### Validation # + ## Testing tic=time.time() y_rbf = svclassifier_rbf.predict(X_test) toc=time.time() print(str(1000*(toc-tic))+"ms") # - # model accuracy for X_test accuracy = accuracy_score(y_test, y_rbf) print (accuracy) ## Evaluation of Algorithm print(confusion_matrix(y_test, y_rbf)) print(classification_report(y_test, y_rbf)) # ### Saving RBF kernel Trained Model # save the model to disk filename = 'SVM_100ms_Rbf_model_mani.save' joblib.dump(svclassifier_rbf, open(filename, 'wb')) # load the model from disk loaded_model_rbf = joblib.load(open(filename, 'rb')) # ### Holdout Predction tic=time.time() holdout_pred_rbf = loaded_model_rbf.predict(X_holdout) toc=time.time() print(str(1000*(toc-tic))+"ms") rbf_score = accuracy_score(y_holdout, holdout_pred_rbf) print("Holdout accuracy with rbf kernel is: {}".format(rbf_score)) ## Turn off the scintific notation np.set_printoptions(suppress=True) cm = confusion_matrix(y_holdout, holdout_pred_rbf) np.set_printoptions(precision=2) print('Confusion matrix, without normalization') print(cm) plt.figure() plot_confusion_matrix(cm, Label) plt.savefig('cm_holdout_mySVM_100ms_rbf_mani.png',dpi=150) # + import matplotlib.pyplot as plt from sklearn.metrics import confusion_matrix import itertools def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] print("Normalized confusion matrix") else: print('Confusion matrix, without normalization') plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, format(cm[i, j], fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') # Compute confusion matrix cnf_matrix = confusion_matrix(y_holdout, holdout_pred_rbf) np.set_printoptions(precision=2) # Plot non-normalized confusion matrix plt.figure(figsize=(5,3)) plot_confusion_matrix(cnf_matrix, classes=['ac&fan', 'crying','music', 'speech'], title='Confusion matrix') plt.savefig('kfold1.png') # - cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] print('Normalized confusion matrix') print(cm_normalized) plt.figure() plot_confusion_matrix(cm_normalized, Label, title='Normalized confusion matrix of Holdout Prediction') plt.savefig('norm_cm_holdout_mySVM_100_rbf_mani.png',dpi=150) plt.show() # + import matplotlib.pyplot as plt from sklearn.metrics import confusion_matrix import itertools def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] print("Normalized confusion matrix") else: print('Confusion matrix, without normalization') plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, format(cm[i, j], fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') # Compute confusion matrix cnf_matrix = confusion_matrix(y_holdout, holdout_pred_rbf) np.set_printoptions(precision=2) # Plot non-normalized confusion matrix plt.figure(figsize=(10,3)) plot_confusion_matrix(cnf_matrix, classes=['ac&fan', 'crying','music', 'speech'], title='Normalized confusion matrix of Holdout Prediction') plt.savefig('kfold2.png') # - print(classification_report(y_holdout, holdout_pred_rbf)) model_prob_rbf = loaded_model_rbf.predict_proba(X_holdout) ##need prob for getting logloss rbf_log_loss = log_loss(y_holdout, model_prob_rbf) print("Log loss score of Holdout data for RBF kernel: {}".format(rbf_log_loss)) # ## Loading Saved Model from sklearn.externals import joblib import pandas as pd import numpy as np import time filename = '/home/bsplab/Desktop/manikanta/SVM_Kflod/SVM_100ms_Rbf_model_mani.save' # call first saved model file # load the model from disk loaded_model_rbf1 = joblib.load(open(filename, 'rb')) # ### A df1=pd.read_csv("/home/bsplab/Desktop/manikanta/SVM_Kflod/Testing_100ms/CRYING/crying_testing.csv",na_values=['NA','?']) df1.columns=['MFCC0', 'MFCC1','MFCC2','MFCC3','MFCC4','MFCC5','MFCC6','MFCC7','MFCC8', 'MFCC9', 'MFCC10' ,'MFCC11', 'MFCC12'] #filename_write = os.path.join(path,"7class-out-of-sample_mySVM_500ms.csv") df1.head() scaler_file = "/home/bsplab/Desktop/manikanta/SVM_Kflod/my_scaler_100.save" scaler = joblib.load(scaler_file) X = scaler.transform(df1) ## Performing preprocessing on tested data tic=time.time() holdout_pred_rbf1 = loaded_model_rbf1.predict(X) toc=time.time() print(str(1000*(toc-tic))+"ms") p1=holdout_pred_rbf1.size print("The size of prediction " + str (p1)) a1=sum(holdout_pred_rbf1=="crying") print("Total no. of predcited crying "+str(a1)) Acc1=a1/p1*100 print("The accuracy os the new environment crying data is "+ str(Acc1)+ " percent") # ### V # + df6=pd.read_csv("/home/bsplab/Desktop/manikanta/SVM_Kflod/Testing_100ms/FAN_AC/acfan_testing.csv",na_values=['NA','?']) df6.columns=['MFCC0', 'MFCC1','MFCC2','MFCC3','MFCC4','MFCC5','MFCC6','MFCC7','MFCC8', 'MFCC9', 'MFCC10' ,'MFCC11', 'MFCC12'] #filename_write = os.path.join(path,"7class-out-of-sample_mySVM_500ms.csv") X6= scaler.transform(df6) ## Direct Do this tic=time.time() holdout_pred_rbf6 = loaded_model_rbf1.predict(X6) toc=time.time() print(str(1000*(toc-tic))+"ms") p6=holdout_pred_rbf6.size print("The size of prediction " + str (p6)) a6=sum(holdout_pred_rbf6=="ac") print("Total no. of predcited fan&ac "+str(a6)) Acc6=a6/p6*100 print("The accuracy of the new environment fan&ac data is "+ str(Acc6)+ " percent") # - df7=pd.read_csv("/home/bsplab/Desktop/manikanta/SVM_Kflod/Testing_100ms/MUSIC/music_testing.csv",na_values=['NA','?']) df7.columns=['MFCC0', 'MFCC1','MFCC2','MFCC3','MFCC4','MFCC5','MFCC6','MFCC7','MFCC8', 'MFCC9', 'MFCC10' ,'MFCC11', 'MFCC12'] #filename_write = os.path.join(path,"7class-out-of-sample_mySVM_500ms.csv") df7.head() X7= scaler.transform(df7) ## Direct Do this tic=time.time() holdout_pred_rbf7 = loaded_model_rbf1.predict(X7) toc=time.time() print(str(1000*(toc-tic))+"ms") p7=holdout_pred_rbf7.size print("The size of prediction " + str (p7)) a7=sum(holdout_pred_rbf7=="music") print("Total no. of predcited music "+str(a7)) Acc7=a7/p7*100 print("The accuracy of the new environment music data is "+ str(Acc7)+ " percent") # + df4=pd.read_csv("/home/bsplab/Desktop/manikanta/SVM_Kflod/Testing_100ms/SPEECH/speech_testing.csv",na_values=['NA','?']) df4.columns=['MFCC0', 'MFCC1','MFCC2','MFCC3','MFCC4','MFCC5','MFCC6','MFCC7','MFCC8', 'MFCC9', 'MFCC10' ,'MFCC11', 'MFCC12'] X4= scaler.transform(df4) ## Direct Do this tic=time.time() holdout_pred_rbf4 = loaded_model_rbf1.predict(X4) toc=time.time() print(str(1000*(toc-tic))+"ms") p4=holdout_pred_rbf4.size print("The size of prediction " + str (p4)) a4=sum(holdout_pred_rbf4=="speech") print("Total no. of predcited speech "+str(a4)) Acc4=a4/p4*100 print("The accuracy of the new environment speech data is "+ str(Acc4)+ " percent") # -
my_SVM_Kfold_100ms.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Interactive `pymf6` examples # # To start example in Notebook click: [ex02-tidal](notebooks/ex02-tidal.ipynb) # # # Scripts # # Open a new terminal *File --> New --> Terminal* # # ## Modifying Constant Head Values # # Change to example directory: # # cd examples/ex16-mfnwt2 # # and type: # # python ../../scripts/chd.py # # # ## Showing Temporal Information # # Change to example directory: # # cd examples/ex02-tidal # # and type: # # python ../../scripts/temporal.py # # You can load the scripts here. # Open a new Notebook *File --> New --> Terminal* (Select "Python 3" as kernel) # In a new cell type: # # # %load scripts/chd.py # # and in another cell type: # # # %load scripts/temporal.py # # to see the content of both scripts.
index.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # PyMC4 — Hiding `yield` from the Model Specification API # # Please refer to the `README.org` for context and discussion. In this notebook I will outline my proposal. from datetime import datetime print("Last run:", datetime.now()) import __future__ import ast import functools import inspect import re import tensorflow as tf import tensorflow_probability as tfp from tensorflow_probability import distributions as tfd # ## AST Helper Functions # # Based on http://code.activestate.com/recipes/578353-code-to-source-and-back/. # # The main thing to take away from this cell block is that `uncompile` takes a Python object and returns its source code (along with a bunch of other things, but we're less interested in those), and `recompile` takes either: # # 1. the output of `uncompile`, or # 2. a modified AST, and `compile`s (the Python built-in) it down to bytecode. # # The output of `uncompile` can then be `exec`'ed or `eval`'ed. # + PyCF_MASK = sum(v for k, v in vars(__future__).items() if k.startswith("CO_FUTURE")) def uncompile(c): """uncompile(codeobj) -> [source, filename, mode, flags, firstlineno, privateprefix].""" if c.co_flags & inspect.CO_NESTED or c.co_freevars: raise NotImplementedError("Nested functions not supported") if c.co_name == "<lambda>": raise NotImplementedError("Lambda functions not supported") if c.co_filename == "<string>": raise NotImplementedError("Code without source file not supported") filename = inspect.getfile(c) try: lines, firstlineno = inspect.getsourcelines(c) except IOError: raise RuntimeError("Source code not available") source = "".join(lines) # __X is mangled to _ClassName__X in methods. Find this prefix: privateprefix = None for name in c.co_names: m = re.match("^(_[A-Za-z][A-Za-z0-9_]*)__.*$", name) if m: privateprefix = m.group(1) break return [source, filename, "exec", c.co_flags & PyCF_MASK, firstlineno, privateprefix] def recompile(source, filename, mode, flags=0, firstlineno=1, privateprefix=None): """Recompile output of uncompile back to a code object. source may also be preparsed AST.""" if isinstance(source, ast.AST): a = source else: a = parse_snippet(source, filename, mode, flags, firstlineno) node = a.body[0] if not isinstance(node, ast.FunctionDef): raise RuntimeError("Expecting function AST node") c0 = compile(a, filename, mode, flags, True) return c0 def parse_snippet(source, filename, mode, flags, firstlineno, privateprefix_ignored=None): """Like ast.parse, but accepts indented code snippet with a line number offset.""" args = filename, mode, flags | ast.PyCF_ONLY_AST, True prefix = "\n" try: a = compile(prefix + source, *args) except IndentationError: # Already indented? Wrap with dummy compound statement prefix = "with 0:\n" a = compile(prefix + source, *args) # Peel wrapper a.body = a.body[0].body ast.increment_lineno(a, firstlineno - 2) return a # - # ## PyMC4 Backend # # Now, let's talk about what the backends need to look like. # # First, a helper class to traverse and transform the AST of the user-defined model specification function. Half the magic is in this class: please read the docstring. class FunctionToGenerator(ast.NodeTransformer): """ This subclass traverses the AST of the user-written, decorated, model specification and transforms it into a generator for the model. Subclassing in this way is the idiomatic way to transform an AST. Specifically: 1. Add `yield` keywords to all assignments E.g. `x = tfd.Normal(0, 1)` -> `x = yield tfd.Normal(0, 1)` 2. Rename the model specification function to `_pm_compiled_model_generator`. This is done out an abundance of caution more than anything. 3. Remove the @Model decorator. Otherwise, we risk running into an infinite recursion. """ def visit_Assign(self, node): # TODO: AugAssign and AnnAssign nodes, for completeness. # https://greentreesnakes.readthedocs.io/en/latest/nodes.html#AugAssign # https://greentreesnakes.readthedocs.io/en/latest/nodes.html#AnnAssign new_node = node new_node.value = ast.Yield(value=new_node.value) # Tie up loose ends in the AST. # FIXME: I may be cargo-culting what I've read in docs and tutorials. ast.copy_location(new_node, node) ast.fix_missing_locations(new_node) self.generic_visit(node) return new_node def visit_FunctionDef(self, node): new_node = node new_node.name = "_pm_compiled_model_generator" new_node.decorator_list = [] # FIXME: Some more cargo-culting. ast.copy_location(new_node, node) ast.fix_missing_locations(new_node) self.generic_visit(node) return new_node # And now for the `pm.Model` decorator. Instead of a function, our decorator [will be a class](https://realpython.com/primer-on-python-decorators/#classes-as-decorators). This allows us to have a stateful decorator, where we can store model-related things (e.g. the AST and the generator) and even define user-facing functions such as `sample` or `observe`. The other half of the magic is in this class: please read the comments and docstrings. class Model: """ pm.Model decorator. """ def __init__(self, func): self.func = func # Introspect wrapped function, instead of the decorator class. functools.update_wrapper(self, func) # Uncompile wrapped function uncompiled = uncompile(func.__code__) # Parse AST and modify it tree = parse_snippet(*uncompiled) tree = FunctionToGenerator().visit(tree) uncompiled[0] = tree # Recompile wrapped function self.recompiled = recompile(*uncompiled) # Execute recompiled code (defines `_pm_compiled_model_generator`) # in the locals() namespace and assign it to an attribute. # Refer to http://lucumr.pocoo.org/2011/2/1/exec-in-python/ # FIXME: Need to understand locals() and namespaces more. exec(self.recompiled, None, locals()) self.model_generator = locals()["_pm_compiled_model_generator"] """ The following three functions aren't necessary for the rest of the notebook. I just want to point out that this would be natural places to define these functions. Refer to the "User-Facing API" section (below) for why. """ def __call__(self, *args, **kwargs): # Could be something like what we have already: # https://github.com/pymc-devs/pymc4/blob/master/pymc4/coroutine_model.py#L63 raise NotImplementedError("Evaluate model, as in `coroutine_model.py`.") def sample(self, *args, **kwargs): raise NotImplementedError("George isn't sure how sampling works.") def observe(self, *args, **kwargs): raise NotImplementedError("George isn't sure how observing works, either.") # ## User-Facing Model Specification API # # And now all users need to see is this: @Model def linear_regression(x): scale = tfd.HalfCauchy(0, 1) coefs = tfd.Normal(tf.zeros(x.shape[1]), 1) predictions = tfd.Normal(tf.linalg.matvec(x, coefs), scale) return predictions # ### What else can we do in the `Model` decorator? # # 1. If we define `__call__`, then users can run `predictions = linear_regression(tf.zeros([3, 10]))`. I am unsure what we would want this to return. Note that this will **not** be as straightfoward as # # ```python # def __call__(self, *args, **kwargs): # return self.func(*args, **kwargs) # ``` # since (currently) `self.func` is the user-defined function that crashes (just as in @ferrine's example). More bluntly, users will be writing a function that, without the `@Model` decorator, crashes. On the other hand, if we _don't_ implement `__call__`, users will write a function and get back a `Model` object that _cannot be called_, as you would expect a function to be. Tricky situation; food for thought; feedback needed! # # 2. If we define `sample`, then users can sample from their model via `linear_regression.sample()`. # # 3. If we define `observe`, then users can provide observations to their model via `linear_regression.observe()` (as suggested by @ferrine and @rpgoldman). # + # All three statements will raise NotImplementedErrors. predictions = linear_regression(tf.zeros([3, 10])) linear_regression.sample() linear_regression.observe() # - # ## PyMC4 Core Engine # # We can get the generator in exactly the same way that @ferrine's notebook requires: linear_regression.model_generator(tf.zeros([3, 10])) # Success!! # # In fact, to demonstrate that it's actually the generator we need (and that there aren't subtle bugs along the way), we can interact with the generator in exactly the same way as in @ferrine's notebook. # # I've omitted the "One level deeper" section in the notebook: that is, recursively interacting with the generator. I haven't tested it out, but I expect that it would also work. # + # Taken from https://gist.github.com/ferrine/59a63c738e03911eacba515b5be904ad def interact(gen, state): control_flow = gen() return_value = None while True: try: dist = control_flow.send(return_value) if dist.name in state["dists"]: control_flow.throw(RuntimeError( "We found duplicate names in your cool model: {}, " "so far we have other variables in the model, {}".format( preds_dist.name, set(state["dists"].keys()), ) )) if dist.name in state["samples"]: return_value = state["samples"][dist.name] else: return_value = dist.sample() state["samples"][dist.name] = return_value state["dists"][dist.name] = dist except StopIteration as e: if e.args: return_value = e.args[0] else: return_value = None break return return_value, state # - preds, state = interact(lambda: linear_regression.model_generator(tf.zeros([3, 10])), state=dict(dists=dict(), samples=dict())) preds state # ## Discussion and Next Steps # # Please refer to the `README.org`. # ## Environment # !python --version # !cat requirements.txt
python/ast-hiding-yield/00-prototype/hiding-yield.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # #!/usr/bin/env python # Importing the required libraries import os import random import argparse from PIL import Image import numpy as np parser = argparse.ArgumentParser( description='Checks background images for correct number of channels') parser.add_argument('--images', dest='images', required=False, help="Diectory of images") parser.set_defaults(images='E:\Documents\GitHub\Photo_Mosaic\data\Trees') args, unknown = parser.parse_known_args() # + moveme = [] # This tests to make sure the dimensions of all the photos are the same for filename in os.listdir(args.images): path = os.path.abspath(os.path.join(args.images, filename)) try: x = Image.open(path) im = np.array(x) try: w, h, d = im.shape except: moveme.append(filename) except: continue if (len(moveme) > 0): command = "mkdir Unusable_Images" os.system(command) for item in moveme: q = '"' + args.images + item + '"' command = "mv " + q + " Unusable_Images/" os.system(command)
scripts/Image_Tester.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.0 64-bit # language: python # name: python38064bit94409e1e4df94da1b5cc700cc0e6ab29 # --- # # Product tagging using Machine Learning # # --- # + # Data processing import pandas as pd import numpy as np from collections import Counter # Visualisation import matplotlib.pyplot as plt from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator import seaborn as sns # Language processing from nltk.corpus import stopwords from nltk.tokenize import word_tokenize import nltk # nltk.download('stopwords') # if you haven't downloaded this yet, you need to now. # Machine Learning - model training from sklearn.naive_bayes import MultinomialNB from sklearn.model_selection import train_test_split from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer, TfidfVectorizer from sklearn.preprocessing import MultiLabelBinarizer from sklearn.linear_model import LogisticRegression from sklearn.svm import LinearSVC from sklearn.multiclass import OneVsRestClassifier from sklearn.pipeline import Pipeline from sklearn.metrics import accuracy_score import warnings warnings.filterwarnings("ignore", category=FutureWarning) # - filepath = '/Users/wolfsinem/product-tagging/data/flipkart_com-ecommerce_sample.csv' df = pd.read_csv((filepath)) df.head(1) model_df = df[['product_name','description']] model_df.shape pd.options.mode.chained_assignment = None model_df['tags'] = "" model_df # #### Set all strings to lowercase df['product_name'].str.lower() df['description'].str.lower() test_string = model_df['description'][0] test_string # ### Function to create tags def tokenize_string(sentence): tokenizer = nltk.RegexpTokenizer(r"\w+") new_words = tokenizer.tokenize(sentence) new_words = [token.lower() for token in new_words] # set to a lower case stop_words = set(stopwords.words('english')) manual_filtered_words = {'details','fabric','key','features','sales','number','contents','type','general', 'specifications'} filtered_sentence = [w for w in new_words if not w in stop_words and not w in manual_filtered_words] count_terms = Counter(filtered_sentence).most_common(10) # fill e.g. (5) for most common 5 terms return [item[0] for item in count_terms] # this function extracts the first element of each sublist, so only the terms and not how many times it occured # #### Generating tags with the above function term_lists = tokenize_string(test_string) term_lists # As you can see there are some numbers being used as tags. We will take these out. # #### Now we need to for-loop this so the function generates tags for each of the comments # + # this function takes out every number within a string because we don't need it as tags since it doesnt give any context # print([x for x in token_lists[] if not any(c.isdigit() for c in x)]) # - token_lists = [] for i in model_df['description']: token_lists.append([x for x in tokenize_string(str((i))) if not any(c.isdigit() for c in x)]) token_lists for i in range(len(model_df.index)): model_df.at[i,'tags'] = token_lists[i] model_df # #### Delete missing values (should be done in the first couple of cells) model_df.isnull().sum() model_df.dropna(inplace=True) # #### dtype type(model_df['tags'].iloc[0]) # # Some analyzing of the words # + # descriptions = model_df.description.str.cat(sep=' ') # tokens = word_tokenize(descriptions) # vocab = set(tokens) # freq_dist = nltk.FreqDist(tokens) # sorted(freq_dist, key=freq_dist.__getitem__, reverse=True)[0:10] # stop_words = set(stopwords.words('english')) # tokens = [w for w in tokens if not w in stop_words] # - all_tags = [] for i in token_lists: for j in i: all_tags.append(j) cv = CountVectorizer(tokenizer=lambda x:x.split()) tag_dtm = cv.fit_transform(all_tags) print(("There are {} unique tags").format(tag_dtm.shape[1])) # Create and generate a word cloud image of just one string/description wordcloud = WordCloud(background_color="white").generate(test_string) plt.imshow(wordcloud, interpolation='bilinear') plt.axis("off") plt.show() tags_counted_and_sorted = pd.DataFrame({'tag': all_tags}).groupby('tag').size().reset_index().sort_values(0, ascending=False) tags_counted_and_sorted tags_counted_and_sorted[:10].plot.barh(x='tag', y=0, figsize=(12,8)) # # Machine Learning - Multilabel Classification # # --- # ## Baseline # # The baseline is actually what I did in the function above. Tags are generated based on the occurunce of a word. The next step is to actually let the Machine Learning Multi-label classification Technique create these tags itself. # ## Preprocessing # there are a total of 19998 rows and 3 different columns. model_df.shape n = 2000 target_variable = model_df['tags'][:n] mlb = MultiLabelBinarizer() target_variable = mlb.fit_transform(target_variable) mlb.classes_ # ### TfidfVectorizer # # --- # # This method tokenizes documents/texts, learns the vocabulary and inverses the document frequency weightings and allows you to encode new documents. # # We will import this method from <b> sklearn.feature_extraction.text </b>. # There are a lot of different parameters: # # - <b>lowercase;</b> Convert all characters to lowercase, which in our case will be handy # - <b>stop_words;</b> If a string, it is passed to _check_stop_list and the appropriate stop list is returned # - <b>ngram_range;</b> The lower and upper boundary of the range of n-values for different n-grams to be extracted # - <b>max_features;</b> If not None, build a vocabulary that only consider the top max_features ordered by term frequency across the corpus # # + # Initialise the vectorizer vectorizer = TfidfVectorizer(strip_accents='unicode', analyzer='word', ngram_range=(1,3), # max_features=1000, stop_words='english', token_pattern=r'\w{3,}' ) # fit the independent features independent_variable = vectorizer.fit_transform(model_df['description'][:n]) print('Independent variable shape: {}'.format(independent_variable.shape)) print('Target variable shape: {}'.format(target_variable.shape)) # - vectorizer.vocabulary_ # #### Train/test sets # split the data into training en testing sets X_train, X_test, y_train, y_test = train_test_split( independent_variable, target_variable, test_size=0.2, random_state=42, ) # + print('X train shape: {}'.format(X_train.shape)) print('y train shape: {}'.format(y_train.shape)) print('------------------------------') print('X test shape: {}'.format(X_test.shape)) print('y test shape: {}'.format(y_test.shape)) # - # # Model building # # --- # # ### Pipeline # ### LinearSVC Linear_pipeline = Pipeline([ ('clf', OneVsRestClassifier(LinearSVC( class_weight='balanced', random_state=42, tol=1e-1, C=8.385), n_jobs=-1)), ]) Linear_pipeline.fit(X_train, y_train) prediction = Linear_pipeline.predict(X_test) print('Accuracy for LinearSVC is {}'.format(accuracy_score(y_test, prediction))) # ### Logistic Regression Logistic_pipeline = Pipeline([ ('clf', OneVsRestClassifier(LogisticRegression( class_weight='balanced', random_state=0, tol=1e-1, C=8.385), n_jobs=-1)), ]) Logistic_pipeline.fit(X_train, y_train) prediction_2 = Logistic_pipeline.predict(X_test) print('Accuracy for LinearSVC is {}'.format(accuracy_score(y_test, prediction_2))) # ### MultinomialNB (Naive Bayes) NB_pipeline = Pipeline([ ('clf', OneVsRestClassifier(MultinomialNB( fit_prior=True, class_prior=None))), ]) # + # NB_pipeline.fit(X_train, y_train) # - prediction_3 = NB_pipeline.predict(X_test) print('Accuracy for Naive Bayes is {}'.format(accuracy_score(y_test, prediction_3))) # # Comparison # model_names = ['LinearSVC','LogisticRegression','Naive Bayes'] model_names = ['LinearSVC'] scores_10 = [0.3575,0.233,0.0535] #scores tested with half of the dataset scores_20 = [0.379166] #scores with the whole dataset score_frame = pd.DataFrame({'Model': model_names, 'Accuracy Score 20k': scores_20}) score_frame sns.factorplot(y='Model',x='Accuracy Score 20k',data=score_frame,kind='bar',aspect=2) plt.show() # # Model testing string_1 = ['Key Features of Alisha Solid Womens Cycling Shorts Cotton Lycra Navy, Red, Navy,Specifications of Alisha Solid Womens Cycling Shorts Shorts Details Number of Contents in Sales Package Pack of 3 Fabric Cotton Lycra Type Cycling Shorts General Details Pattern Solid Ideal For Womens Fabric Care Gentle Machine Wash in Lukewarm Water, Do Not Bleach Additional Details Style Code ALTHT_3P_21 In the Box 3 shorts'] model_string_1 = vectorizer.transform(string_1) Linear_pipeline.predict(model_string_1) # ### Predicted/generated tags for product description 1 predicted_tags = mlb.inverse_transform(Linear_pipeline.predict(model_string_1)) predicted_tags # ### Tags created by the baseline baseline_tags = model_df['tags'][0] baseline_tags # --- # ## Add tags created by the ML to a whole new dataframe submission_df = model_df.drop(columns=['tags','product_name']) sample_df = submission_df[:n] sample_df.head(5) description_variable = vectorizer.fit_transform(sample_df['description']) prediction_values = mlb.inverse_transform(Linear_pipeline.predict(description_variable)) sample_df['tags'] = np.asarray(prediction_values) sample_df.head(5)
notebooks/MachineLearningModels.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Reading and writing numpy arrays to wav files for sound playback. # %matplotlib inline import numpy as np import matplotlib.pyplot as plt from scipy.io import wavfile def synthetic(name, T, rate, base_freq = 1000): """Create a synthetic signal. """ valid_names = set(['tone', 'chirp', 'laser']) nsamples = int(round(rate*T)) t = np.linspace(0, T, nsamples) if name == 'tone': # A simple tone with just one frequency y = np.sin(2*np.pi*base_freq*t) elif name == 'chirp': # a chirp freq = base_freq*t y = np.sin(2*np.pi*freq*t) elif name == 'laser': # a 'laser': 1/t frequency shift freq = base_freq/(t+1) y = np.sin(2*np.pi*freq*t) # linearly rescale raw data to wav range and convert to integers scale_fac = 2**15 sound = (scale_fac*y).astype(np.int16) return sound def viz_sound(sound, name, npts=1000): """Visualize the structure of an audio signal. """ f, (ax0, ax1) = plt.subplots(1, 2, figsize=(10,4)) ax0.plot(sound[:npts]) ax1.specgram(sound) # Generate a synthetic signal rate = 2*11025 # Hz T = 2 # s base_freq = 1000 name = 'tone' name = 'chirp' name = 'laser' sound = synthetic(name, T, rate) # We can write it to a file on disk in case we'd like to play it with system utilities fname = 'sample_%s.wav' % name wavfile.write(fname, rate, sound) # But we can visualize it and play it directly in the browser, using matplotlib and IPython's Audio objects that take advantage of HTML5's audio capabilities: viz_sound(sound, name) from IPython.display import Audio Audio(sound, rate=rate)
exercises/sound_wavfiles.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os import sys import itertools import functools import time import pandas as pd import numpy as np import matplotlib import matplotlib.pyplot as plt import seaborn as sns from sklearn import metrics import yaml from lab import metrics, analytics # + def load_distinguisher_prediction(rep: int): return pd.read_csv(f"../results/evaluate-split/distinguisher-predictions-{rep:02d}.csv") def load_classifier_prediction(path_template: str, classifier: str, rep: int): if classifier == "varcnn": return (load_classifier_prediction(path_template, "varcnn-sizes", rep) + load_classifier_prediction(path_template, "varcnn-time", rep)) / 2 return pd.read_csv( path_template.format(classifier=classifier, rep=rep), header=0, names=["y_true"] + list(range(-1, 100))) def load_split_prediction(classifier: str, rep: int): distinguisher = pd.read_csv(f"../results/evaluate-split/distinguisher-predictions-{rep:02d}.csv") distinguish_proba = distinguisher["predictions"].to_numpy().reshape((-1, 1)) classes = np.arange(-1, 100) classes_cols = [class_ for class_ in classes] # Load the QUIC and TCP Datasets tcp_data = load_classifier_prediction( "../results/evaluate-split/{classifier}-predictions-tcp-{rep:02d}.csv", classifier, rep) quic_data = load_classifier_prediction( "../results/evaluate-split/{classifier}-predictions-quic-{rep:02d}.csv", classifier, rep) assert(tcp_data["y_true"].equals(quic_data["y_true"])) assert(not tcp_data.equals(quic_data)) # Extract the probabilities quic_proba = quic_data[classes_cols].to_numpy() tcp_proba = tcp_data[classes_cols].to_numpy() result = pd.DataFrame(distinguish_proba * quic_proba + (1-distinguish_proba) * tcp_proba, index=distinguisher.index, columns=np.arange(-1, 100)) result["y_true"] = tcp_data["y_true"] return result[[result.columns[-1]] + result.columns.tolist()[:-1]] def load_split_data(): keys = [(classifier, rep, "split") for rep in range(20) for classifier in ["varcnn", "p1fp", "kfp", "dfnet"]] return pd.concat([ load_split_prediction(classifier, rep) for (classifier, rep, proto) in keys ], keys=keys, names=("classifier", "rep", "proto", "sample")) def load_single_mixed_data(): keys = [(classifier, rep, proto) for rep in range(20) for classifier in ["varcnn", "p1fp", "kfp", "dfnet"] for proto in ["quic", "tcp", "mixed"]] return pd.concat([ load_classifier_prediction( f"../../single-and-mixed-analyses/results/dataset-performance/{{classifier}}/predictions-{proto}-{{rep:02d}}.csv", classifier, rep) for (classifier, rep, proto) in keys ], keys=keys, names=("classifier", "rep", "proto", "sample")) def load_data(): return pd.concat([load_split_data(), load_single_mixed_data()]) data = load_data() data # - # # Precision-Recall curve def plot_pr_curve(data): data = (data .transform(lambda x: x*100) .sort_index() .rename({"dfnet": "DF", "varcnn": "Var-CNN", "p1fp": "$p$-FP(C)", "kfp": "$k$-FP"}, axis=0) .rename({"precision": "$\pi_{20}$ (%)", "recall": "Recall (%)"}, axis=1) .rename({"mixed": "Mixed", "split": "Split", "quic": "QUIC", "tcp": "TCP"}, axis=0) .reset_index()) with plt.style.context(["./mpl-style/pr-curve.mplstyle", "./mpl-style/pets.mplstyle", {"text.usetex": True}]): hue_order = ["TCP", "QUIC", "Mixed", "Split"] grid = sns.relplot( data=data, x="Recall (%)", y="$\pi_{20}$ (%)", hue="proto", style="proto", col="classifier", kind="line", hue_order=hue_order, height=1.125, aspect=1.33, facet_kws=dict( legend_out=True, despine=False, gridspec_kws=dict(wspace=.1) ), palette="binary", legend=False) grid.set_titles(col_template="{col_name}") for i, ax in enumerate(np.ravel(grid.axes)): if i == 0: ax.legend(labels=hue_order) ax.set_xticks([0, 50, 100]) ax.set_yticks([0, 50, 100]) ax.xaxis.get_major_formatter()._usetex = False ax.yaxis.get_major_formatter()._usetex = False return grid # + figure = (data.drop(columns=[-1]) .groupby(["classifier", "proto"]) .apply(lambda df: pd.DataFrame(analytics.rprecision_recall_curve(df["y_true"], df.drop("y_true", axis=1), ratio=20)._asdict())) .set_index("thresholds", append=True) .pipe(plot_pr_curve)) figure.savefig("../results/plots/split-classify-prcurve.pdf", dpi=150, bbox_inches="tight") figure.savefig("../results/plots/split-classify-prcurve.pgf", dpi=150, bbox_inches="tight") # - # # Box Plot # + def plot(axes, classifier, metric, frame, label: bool = False, title: bool = False): axes.boxplot(frame.xs([classifier, metric]).values, widths=.6, labels=frame.columns) if label: axes.set_ylabel(metric.replace("%", "\%")) if title: axes.set_title(classifier) def set_ylim(axes, bottom=None, top=100, num=5): if bottom is None: bottom = axes.get_ylim()[0] lim_range = (top - bottom) margin = lim_range * 0.02 axes.set_ylim(bottom - margin, top + margin) y_ticks = np.linspace(start=bottom, stop=top, num=num, endpoint=True, dtype=int) axes.set_yticks(y_ticks) def plot_boxplots(plot_data): figure, axes = plt.subplots(2, 4, figsize=(6, 3), dpi=120) results = {} for i, classifier in enumerate(plot_data.index.unique(level="classifier")[[0, 2, 1, 3]]): plot(axes[0][i], classifier, "$\pi_{20}$ (%)", plot_data, label=(i==0), title=True) plot(axes[1][i], classifier, "Recall (%)", plot_data, label=(i==0)) for position in [0, 1]: axes[position][i].yaxis.set_major_formatter(matplotlib.ticker.FormatStrFormatter('%.0f')) results[classifier] = (figure, axes[0][i], axes[1][i]) figure.tight_layout(pad=1) set_ylim(results["$k$-FP"][1], 80) set_ylim(results["DF"][1], 20) set_ylim(results["$p$-FP(C)"][1], 20) set_ylim(results["Var-CNN"][1], 80) set_ylim(results["$k$-FP"][2], 92) set_ylim(results["DF"][2], 92) set_ylim(results["Var-CNN"][2], 20) set_ylim(results["$p$-FP(C)"][2], 20) return figure # + def score(frame): return pd.Series({ "precision": metrics.rprecision_score(frame["y_true"], frame["y_pred"], ratio=20), "recall": metrics.recall_score(frame["y_true"], frame["y_pred"]) }) plot_data = (data.loc[:, ["y_true"]] .assign(y_pred=np.arange(-1, 100)[np.argmax(data.iloc[:, 1:].to_numpy(), axis=1)]) .groupby(["classifier", "proto", "rep"]) .apply(score) .stack() .rename_axis(index=["classifier", "factor", "repetition", "metric"]) .unstack("factor") .reorder_levels([0, 2, 1]) .rename({"tcp": "TCP", "quic": "QUIC", "mixed": "Mixed", "split": "Split"}, axis=1) .rename({"dfnet": "DF", "varcnn": "Var-CNN", "p1fp": "$p$-FP(C)", "kfp": "$k$-FP"}, axis=0 ) .rename({"precision": "$\pi_{20}$ (%)", "recall": "Recall (%)"}, axis=0 ) .transform(lambda x: x*100) .sort_index() .loc[:, ["TCP", "QUIC", "Mixed", "Split"]]) plot_data # - with plt.style.context([ "mpl-style/white.mplstyle", "mpl-style/pets.mplstyle", "mpl-style/box-plots.mplstyle", {"text.usetex": True} ]): figure = plot_boxplots(plot_data) figure.savefig("../results/plots/split-classify.pgf", dpi=150, bbox_inches="tight") # # Table Layout and Summary Calculations table_data = plot_data.stack().unstack("classifier").reorder_levels([0, 2, 1]).sort_index(ascending=[True, False]) table_data # + def to_latex_table(data, float_fmt=".1f"): medians = data.groupby(level=[0, 1]).median() iq_range = data.groupby(level=[0, 1]).quantile([0.25, 0.75]) iq_range.index.names = iq_range.index.names[:2] + ["IQR"] columns = data.columns.values metrics = data.index.unique(level=0).values factors = data.index.unique(level=1).values result_lines = [] result_lines.append(r"\begin{tabular}{l%s}" % ("r" * len(columns))) result_lines.append(r"\toprule") result_lines.append(r" & %s \\" % " & ".join(columns)) result_lines.append(r"\midrule") for metric in metrics: result_lines.append(r"metric = %s \\" % metric.replace("%", r"\%")) for factor in factors: fmt_string = f"{{median:{float_fmt}}} ({{lq:{float_fmt}}}--{{uq:{float_fmt}}})" values = [fmt_string.format(median=median, lq=lq, uq=uq) for (median, lq, uq) in zip(medians.xs([metric, factor]).values, iq_range.xs([metric, factor, 0.25]), iq_range.xs([metric, factor, 0.75]))] values = " & ".join(values) result_lines.append(r"\quad %s & %s \\" % (factor, values)) result_lines.append(r"\bottomrule") result_lines.append(r"\end{tabular}") return "\n".join(result_lines) print(to_latex_table( table_data .rename({"Precision (%)": "$\pi_{20}$ (%)"}) )) # - table_data.groupby(["metric", "factor"]).median() # + idx = pd.IndexSlice (table_data.groupby("metric").apply(analytics.median_difference, level="factor") .loc[idx[:, ["(Mixed - Split)", "(Split - TCP)"]], :])
workflows/distinguish-protocol/notebooks/split-classify.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## 卷积神经网络识别手写数字 # + from __future__ import division, print_function, absolute_import import tensorflow as tf # Import MNIST data from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) # - learning_rate = 0.001 num_steps = 500 batch_size = 128 display_step = 10 # 神经网络参数 num_input = 784 num_classes = 10 drop_out = 0.75 X = tf.placeholder(tf.float32, [None, num_input]) Y = tf.placeholder(tf.float32, [None, num_classes]) keep_prob = tf.placeholder(tf.float32) # dropout # Create some wrappers for simplicity def conv2d(x, W, b, strides=1): # Conv2D wrapper, with bias and relu activation x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME') x = tf.nn.bias_add(x, b) return tf.nn.relu(x) # pool 层 def maxpool2d(x, k=2): # 池化层封装 return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding="SAME") # 创建模型 def conv_net(x, weights, biases, dropout): x = tf.reshape(x, [-1, 28, 28, 1]) conv1 = conv2d(x, weights["wc1"], biases["bc1"]) conv1 = maxpool2d(conv1, k=2) conv2 = conv2d(conv1, weights["wc2"], biases["bc2"]) conv2 = maxpool2d(conv2, k=2) #全连接层 fc1 = tf.reshape(conv2, [-1, weights["wd1"].get_shape().as_list()[0]]) fc1 = tf.add(tf.matmul(fc1, weights["wd1"]), biases['bd1']) fc1 = tf.nn.relu(fc1) fc1 = tf.nn.dropout(fc1, dropout) out = tf.add(tf.matmul(fc1, weights['out']), biases['out']) return out # 参数:权重和参数 weights = { #5 * 5卷积层,1个输入,32个输出 'wc1':tf.Variable(tf.random_normal([5, 5, 1, 32])), 'wc2':tf.Variable(tf.random_normal([5, 5, 32, 64])), # 全连接层, 7*7*64个输入, 1024个输出 'wd1':tf.Variable(tf.random_normal([7*7*64, 1024])), 'out':tf.Variable(tf.random_normal([1024, num_classes])) } biases = { 'bc1':tf.Variable(tf.random_normal([32])), 'bc2':tf.Variable(tf.random_normal([64])), 'bd1':tf.Variable(tf.random_normal([1024])), 'out':tf.Variable(tf.random_normal([num_classes])) } # 构建模型 logits = conv_net(X, weights, biases, keep_prob) prediction = tf.nn.softmax(logits) # 定义损失和优化器 loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y)) optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) train_op = optimizer.minimize(loss_op) # 评估模型 correct_pred = tf.equal(tf.arg_max(prediction, 1), tf.arg_max(Y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) # 初始化 init = tf.global_variables_initializer() # 训练过程 with tf.Session() as sess: sess.run(init) for step in range(1, num_steps + 1): batch_x, batch_y = mnist.train.next_batch(batch_size) sess.run(train_op, feed_dict={X:batch_x, Y:batch_y, keep_prob: drop_out}) if step % display_step == 0 or step == 1: loss, acc = sess.run([loss_op, accuracy], feed_dict={X:batch_x, Y:batch_y, keep_prob:1.0}) print("Step" + str(step) + ", Minibatch loss= " + "{:.4f}".format(loss) + "Training Accuracy=" + "{:.3f}".format(acc)) print("Optimization Finshed!") print("Testing Accuracy:" , sess.run(accuracy, feed_dict={X:mnist.test.images[0:256], Y:mnist.test.labels[0:256], keep_prob:1.0})) # ## 使用layer API # + from __future__ import division, print_function, absolute_import # Import MNIST data from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("/tmp/data/", one_hot=False) import tensorflow as tf import matplotlib.pyplot as plt import numpy as np # + # Training Parameters# Train learning_rate = 0.001 num_steps = 200 batch_size = 128 # Network Parameters num_input = 784 # MNIST data input (img shape: 28*28) num_classes = 10 # MNIST total classes (0-9 digits) dropout = 0.25 # Dropout, probability to drop a unit # - def conv_net(x_dict, n_classes, dropout, reuse, is_training): # 定义scope重用变量 with tf.variable_scope("ConvNet", reuse=reuse): # 输入 x = x_dict["images"] x = tf.reshape(x, shape=[-1, 28, 28, 1]) conv1 = tf.layers.conv2d(x, 32, 5, activation=tf.nn.relu) conv1 = tf.layers.max_pooling2d(conv1, 2, 2) conv2 = tf.layers.conv2d(conv1, 64, 3, activation=tf.nn.relu) conv2 = tf.layers.max_pooling2d(conv2, 2, 2) fc1 = tf.contrib.layers.flatten(conv2) fc1 = tf.layers.dense(fc1, 1024) fc1 = tf.layers.dropout(fc1, rate=dropout, training=is_training) out = tf.layers.dense(fc1, n_classes) return out # 参考tf估计模板 定义模型 def model_fn(features, labels, mode): # 构建神经网络 logits_train = conv_net(features, num_classes, dropout, reuse=False, is_training=True) logits_test = conv_net(features, num_classes, dropout, reuse=True, is_training=False) pred_classes = tf.argmax(logits_test, axis = 1) pred_probas = tf.nn.softmax(logits_test) if mode == tf.estimator.ModeKeys.PREDICT: return tf.estimator.EstimatorSpec(mode, predictions=pred_classes) loss_op = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits( logits=logits_train, labels=tf.cast(labels, dtype=tf.int32))) optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) train_op = optimizer.minimize(loss_op, global_step=tf.train.get_global_step()) acc_op = tf.metrics.accuracy(labels=labels, predictions=pred_classes) estim_specs = tf.estimator.EstimatorSpec( mode=mode, predictions=pred_classes, loss = loss_op, train_op=train_op, eval_metric_ops={'accuracy':acc_op} ) return estim_specs # 构建 Estimator model = tf.estimator.Estimator(model_fn) # + # 定义输入函数 input_fn = tf.estimator.inputs.numpy_input_fn( x={"images":mnist.train.images}, y=mnist.train.labels, batch_size=batch_size, num_epochs=None,shuffle=True) # 训练模型 model.train(input_fn, steps=num_steps) # - # Evaluate the Model # Define the input function for evaluating input_fn = tf.estimator.inputs.numpy_input_fn( x={'images': mnist.test.images}, y=mnist.test.labels, batch_size=batch_size, shuffle=False) # Use the Estimator 'evaluate' method model.evaluate(input_fn) # + # Predict single images n_images = 4 # Get images from test set test_images = mnist.test.images[:n_images] # Prepare the input data input_fn = tf.estimator.inputs.numpy_input_fn( x={'images': test_images}, shuffle=False) # Use the model to predict the images class preds = list(model.predict(input_fn)) # Display for i in range(n_images): plt.imshow(np.reshape(test_images[i], [28, 28]), cmap='gray') plt.show() print("Model prediction:", preds[i]) # -
3-Neural-Networks/03-CNN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="Avd8KH07aVyf" # ## MARKOV CHAINS # # + id="ef1M3dpHadTZ" outputId="04d74cf8-ffde-4bb7-9a0e-6f6a423b5a18" colab={"base_uri": "https://localhost:8080/"} # !pip install markovify # + id="ppvIJUn9aYhT" import random import markovify # + id="8er5hdtWadv4" def markov_text(): data_sample = "pick-up-lines.txt" text_data = open(data_sample, 'r').read() text_data = ''.join([i for i in text_data if not i.isdigit()]).replace("\n", " ").split(' ') index = 1 markov_gen = {} word_count = int(input('select the number of words you want to generate')) for character in text_data[index:]: key = text_data[index-1] if key in markov_gen: markov_gen[key].append(character) else: markov_gen[key] = [character] index += 1 # + id="OWUIwYaWaoUs" with open("pick-up-lines.txt") as f: data = f.read() # + id="JyhsvkjAa__P" data_model = markovify.Text(data) def pickUpLines(): for i in range(3): return data_model.make_short_sentence(280) # + id="MeRo0llTd5rB" def generate(): new_line = pickUpLines() + "\n" with open("generated.txt", "a") as f: f.write(new_line) print(new_line) # + id="pfM6xyGpgTuz" outputId="7e963a53-29e5-46a5-fde5-7d39cfa7df71" colab={"base_uri": "https://localhost:8080/"} generate() # + id="KvnGfVvOhBsR"
version_1.5/MarkovChains.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="cz71fPGrpRiQ" # # An Introduction to Federated Learning # # Welcome to the federated learning tutorial! # # In this notebook, we'll build a federated learning system using Flower and PyTorch. In part 1, we use PyTorch for the model training pipeline and data loading. In part 2, we continue to federate the PyTorch-based pipeline using Flower. # + [markdown] id="mBu1HRRY6bwX" # ## Part 0: Preparation # # Before we begin with any actual code, let's make sure that we have everything we need. # + [markdown] id="D4KiTMTpiort" # ### Installing dependencies # # Next, we install the necessary packages for PyTorch (`torch` and `torchvision`) and Flower (`flwr`): # + id="eTrCL2FmC5U5" # !pip install flwr["simulation"]==0.18.0 torch torchvision matplotlib # + [markdown] id="3UFT3_A3iz76" # Now that we have the necessary packages installed, we can import everything we need for this tutorial: # + id="Tja2N6l-qH-e" from collections import OrderedDict from typing import List import flwr as fl import numpy as np import matplotlib.pyplot as plt import torch import torch.nn as nn import torchvision import torch.nn.functional as F import torchvision.transforms as transforms from torch.utils.data import DataLoader, random_split from torchvision.datasets import CIFAR10 print("flwr", fl.__version__) print("numpy", np.__version__) print("torch", torch.__version__) print("torchvision", torchvision.__version__) DEVICE = torch.device("cpu") # DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") print(f"Training on {DEVICE}") # + [markdown] id="8D2bnPKG58Gx" # It is possible to switch to a runtime that has GPU acceleration enabled (on Google Colab: `Runtime > Change runtime type > Hardware acclerator: GPU > Save`). Note, however, that Google Colab is not always able to offer GPU acceleration. If you see an error related to GPU availability in one of the following sections, consider switching back to CPU-based execution by setting `DEVICE = torch.device("cpu")`. If the runtime has GPU acceleration enabled, you should see the output `Training on cuda:0`, otherwise it'll say `Training on cpu`. # + [markdown] id="JVcgAAiaihnx" # # ### Loading the data # # Federated learning can be applied to many different types of tasks across different domains. In this tutorial, we introduce federated learning by training a simple convolutional neural network (CNN) on the popular CIFAR-10 dataset. CIFAR-10 can be used to train image classifiers that distinguish between images from ten different classes: # + id="-tpk_Zv37ONm" CLASSES = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') # + [markdown] id="toxAoOq6fS2h" # We simulate having multiple datasets from multiple organizations (also called the "cross-silo" setting in federated learning) by splitting the original CIFAR-10 dataset into multiple partitions. Each partition will represent the data from a single organization. We're doing this purely for experimentation purposes, in the real world there's no need for data splitting because each organization already has their own data (so the data is naturally partitioned). # # Each organization will act as a client in the federated learning system. So having ten organizations participate in a federation means having ten clients connected to the federated learning server: # # + id="q9LhPFDh0S5c" NUM_CLIENTS = 10 # + [markdown] id="01Zy7yjBPhQd" # # Let's now load the CIFAR-10 training and test set, partition them into ten smaller datasets (each split into training and validation set), and wrap the resulting partitions by creating a PyTorch `DataLoader` for each of them: # + id="J4Em7BPNTXeX" BATCH_SIZE = 32 def load_datasets(): # Download and transform CIFAR-10 (train and test) transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] ) trainset = CIFAR10("./dataset", train=True, download=True, transform=transform) testset = CIFAR10("./dataset", train=False, download=True, transform=transform) # Split training set into 10 partitions to simulate the individual dataset partition_size = len(trainset) // NUM_CLIENTS lengths = [partition_size] * NUM_CLIENTS datasets = random_split(trainset, lengths, torch.Generator().manual_seed(42)) # Split each partition into train/val and create DataLoader trainloaders = [] valloaders = [] for ds in datasets: len_val = len(ds) // 10 # 10 % validation set len_train = len(ds) - len_val lengths = [len_train, len_val] ds_train, ds_val = random_split(ds, lengths, torch.Generator().manual_seed(42)) trainloaders.append(DataLoader(ds_train, batch_size=BATCH_SIZE, shuffle=True)) valloaders.append(DataLoader(ds_val, batch_size=BATCH_SIZE)) testloader = DataLoader(testset, batch_size=BATCH_SIZE) return trainloaders, valloaders, testloader trainloaders, valloaders, testloader = load_datasets() # + [markdown] id="OBp7kB4G0sPB" # We now have a list of ten training sets and ten validation sets (`trainloaders` and `valloaders`) representing the data of ten different organizations. Each `trainloader`/`valloader` pair contains 4500 training examples and 500 validation examples. There's also a single `testloader` (we did not split the test set). Again, this is only necessary for building research or educational systems, actual federated learning systems have their data naturally distributed across multiple partitions. # # Let's take a look at the first batch of images and labels in the first training set (i.e., `trainloaders[0]`) before we move on: # + id="E3dag9WeT9VH" def imshow(img): img = img / 2 + 0.5 # unnormalize plt.imshow(np.transpose(img.numpy(), (1, 2, 0))) plt.show() images, labels = iter(trainloaders[0]).next() imshow(torchvision.utils.make_grid(images)) print(' '.join('%5s' % CLASSES[labels[j]] for j in range(32))) # + [markdown] id="ZGVGbRZ_yEA2" # The output above shows a random batch of images from the first `trainloader` in our list of ten `trainloaders`. It also prints the labels associated with each image (i.e., one of the ten possible labels we've seen above). If you run the cell again, you should see another batch of images. # + [markdown] id="4TW4Pzb7p1F9" # ## Part 1: Centralized Training with PyTorch # # # + [markdown] id="cTjCmmBtqPgM" # Next, we're going to use PyTorch to define a simple convolutional neural network. This introduction assumes basic familiarity with PyTorch, so it doesn't cover the PyTorch-related aspects in full detail. If you want to dive deeper into PyTorch, we recommend [*DEEP LEARNING WITH PYTORCH: A 60 MINUTE BLITZ*](https://pytorch.org/tutorials/beginner/deep_learning_60min_blitz.html). # # + [markdown] id="XYks8IpJL6iK" # ### Defining the model # # We use the simple CNN described in the [PyTorch tutorial](https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html#define-a-convolutional-neural-network): # + id="2X3cVBXMpP6w" class Net(nn.Module): def __init__(self) -> None: super(Net, self).__init__() self.conv1 = nn.Conv2d(3, 6, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) self.fc1 = nn.Linear(16 * 5 * 5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = x.view(-1, 16 * 5 * 5) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x # + [markdown] id="tCRhau5cr2Gd" # Let's continue with the usual training and test functions: # + id="xIl8NfAFpyam" def train(net, trainloader, epochs: int, verbose=False): """Train the network on the training set.""" criterion = torch.nn.CrossEntropyLoss() optimizer = torch.optim.Adam(net.parameters()) net.train() for epoch in range(epochs): correct, total, epoch_loss = 0, 0, 0.0 for images, labels in trainloader: images, labels = images.to(DEVICE), labels.to(DEVICE) optimizer.zero_grad() outputs = net(images) loss = criterion(net(images), labels) loss.backward() optimizer.step() # Metrics epoch_loss += loss total += labels.size(0) correct += (torch.max(outputs.data, 1)[1] == labels).sum().item() epoch_loss /= len(testloader.dataset) epoch_acc = correct / total if verbose: print(f"Epoch {epoch+1}: train loss {epoch_loss}, accuracy {epoch_acc}") def test(net, testloader): """Evaluate the network on the entire test set.""" criterion = torch.nn.CrossEntropyLoss() correct, total, loss = 0, 0, 0.0 net.eval() with torch.no_grad(): for images, labels in testloader: images, labels = images.to(DEVICE), labels.to(DEVICE) outputs = net(images) loss += criterion(outputs, labels).item() _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() loss /= len(testloader.dataset) accuracy = correct / total return loss, accuracy # + [markdown] id="GDDxh73Sszck" # ### Training the model # # We now have all the basic building blocks we need: a dataset, a model, a training function, and a test function. Let's put them together to train the model on the dataset of one of our organizations (`trainloaders[0]`). This simulates the reality of most machine learning projects today: each organization has their own data and trains models only on this internal data: # + id="WdUTb8WgtRMz" trainloader = trainloaders[0] valloader = valloaders[0] net = Net().to(DEVICE) for epoch in range(5): train(net, trainloader, 1) loss, accuracy = test(net, valloader) print(f"Epoch {epoch+1}: validation loss {loss}, accuracy {accuracy}") loss, accuracy = test(net, testloader) print(f"Final test set performance:\n\tloss {loss}\n\taccuracy {accuracy}") # + [markdown] id="DhLGLdmhOhVr" # Training the simple CNN on our CIFAR-10 split for 5 epochs should result in a test set accuracy of about 41%, which is not good, but at the same time, it doesn't really matter for the purposes of this tutorial. The intent was just to show a simplistic centralized training pipeline that sets the stage for what comes next - federated learning! # + [markdown] id="a6HP2cYCsqxD" # ## Part 2: Federated Learning with Flower # # Part 1 demonstrated a simple centralized training pipeline. All data was in one place (i.e., a single `trainloader` and a single `valloader`). Next, we'll simulate a situation where we have multiple datasets in multiple organizations and where we train a model over these organizations using federated learning. # + [markdown] id="mf-cW093MzeT" # ### Updating model parameters # # In federated learning, the server sends the global model parameters to the client, and the client updates the local model with the parameters received from the server. It then trains the model on the local data (which changes the model parameters locally) and sends the updated/changed model parameters back to the server (or, alternatively, it sends just the gradients back to the server, not the full model parameters). # # We need two helper functions to update the local model with parameters received from the server and to get the updated model parameters from the local model: `set_parameters` and `get_parameters`. The following two functions do just that for the PyTorch model above. # # The details of how this works are not really important here (feel free to consult the PyTorch documentation if you want to learn more). In essence, we use `state_dict` to access PyTorch model parameter tensors. The parameter tensors are then converted to/from a list of NumPy ndarray's (which Flower knows how to serialize/deserialize): # + id="1ZxGk6AMNvvV" def get_parameters(net) -> List[np.ndarray]: return [val.cpu().numpy() for _, val in net.state_dict().items()] def set_parameters(net, parameters: List[np.ndarray]): params_dict = zip(net.state_dict().keys(), parameters) state_dict = OrderedDict({k: torch.Tensor(v) for k, v in params_dict}) net.load_state_dict(state_dict, strict=True) # + [markdown] id="1lCf3oljdClM" # ### Implementing a Flower client # # With that out of the way, let's move on to the interesting part. Federated learning systems consist of a server and multiple clients. In Flower, we create clients by implementing subclasses of `flwr.client.Client` or `flwr.client.NumPyClient`. We use `NumPyClient` in this tutorial because it is easier to implement and requires us to write less boilerplate. # # To implement the Flower client, we create a subclass of `flwr.client.NumPyClient` and implement the three methods `get_parameters`, `fit`, and `evaluate`: # # * `get_parameters`: Return the current local model parameters # * `fit`: Receive model parameters from the server, train the model parameters on the local data, and return the (updated) model parameters to the server # * `evaluate`: Receive model parameters from the server, evaluate the model parameters on the local data, and return the evaluation result to the server # # We mentioned that our clients will use the previously defined PyTorch components for model training and evaluation. Let's see a simple Flower client implementation that brings everything together: # + id="ye6Jt5p3LWtF" class FlowerClient(fl.client.NumPyClient): def __init__(self, net, trainloader, valloader): self.net = net self.trainloader = trainloader self.valloader = valloader def get_parameters(self): return get_parameters(self.net) def fit(self, parameters, config): set_parameters(self.net, parameters) train(self.net, self.trainloader, epochs=1) return get_parameters(self.net), len(self.trainloader), {} def evaluate(self, parameters, config): set_parameters(self.net, parameters) loss, accuracy = test(self.net, self.valloader) return float(loss), len(self.valloader), {"accuracy": float(accuracy)} # + [markdown] id="Heyxd9MfHOTe" # Our class `FlowerClient` defines how local training/evaluation will be performed and allows Flower to call the local training/evaluation through `fit` and `evaluate`. Each instance of `FlowerClient` represents a *single client* in our federated learning system. Federated learning systems have multiple clients (otherwise there's not much to federate), so each client will be represented by its own instance of `FlowerClient`. If we have, for example, three clients in our workload, then we'd have three instances of `FlowerClient`. Flower calls `FlowerClient.fit` on the respective instance when the server selects a particular client for training (and `FlowerClient.evaluate` for evaluation). # # ### Using the Virtual Client Engine # # In this notebook, we want to simulate a federated learning system with 10 clients on a single machine. This means that the server and all 10 clients will live on a single machine and share resources such as CPU, GPU, and memory. Having 10 clients would mean having 10 instances of `FlowerClient` im memory. Doing this on a single machine can quickly exhaust the available memory resources, even if only a subset of these clients participates in a single round of federated learning. # # In addition to the regular capabilities where server and clients run on multiple machines, Flower therefore provides special simulation capabilities that create `FlowerClient` instances only when they are actually necessary for training or evaluation. To enable the Flower framework to create clients when necessary, we need to implement a function called `client_fn` that creates a `FlowerClient` instance on demand. Flower calls `client_fn` whenever it needs an instance of one particular client to call `fit` or `evaluate` (those instances are usually discarded after use, so they should not keep any local state). Clients are identified by a client ID, or short `cid`. The `cid` can be used, for example, to load different local data partitions for different clients, as can be seen below: # + id="qkcwggRYOwWN" def client_fn(cid: str) -> FlowerClient: """Create a Flower client representing a single organization.""" # Load model net = Net().to(DEVICE) # Load data (CIFAR-10) # Note: each client gets a different trainloader/valloader, so each client # will train and evaluate on their own unique data trainloader = trainloaders[int(cid)] valloader = valloaders[int(cid)] # Create a single Flower client representing a single organization return FlowerClient(net, trainloader, valloader) # + [markdown] id="axzXSMtlfhXU" # ### Starting the training # # We now have the class `FlowerClient` which defines client-side training/evaluation and `client_fn` which allows Flower to create `FlowerClient` instances whenever it needs to call `fit` or `evaluate` on one particular client. The last step is to start the actual simulation using `flwr.simulation.start_simulation`. # # The function `start_simulation` accepts a number of arguments, amongst them the `client_fn` used to create `FlowerClient` instances, the number of clients to simulate (`num_clients`), the number of federated learning rounds (`num_rounds`), and the strategy. The strategy encapsulates the federated learning approach/algorithm, for example, *Federated Averaging* (FedAvg). # # Flower has a number of built-in strategies, but we can also use our own strategy implementations to customize nearly all aspects of the federated learning approach. For this example, we use the built-in `FedAvg` implementation and customize it using a few basic parameters. The last step is the actual call to `start_simulation` which - you guessed it - starts the simulation: # + id="ELNy0-0nfyI2" # Create FedAvg strategy strategy = fl.server.strategy.FedAvg( fraction_fit=1.0, # Sample 100% of available clients for training fraction_eval=0.5, # Sample 50% of available clients for evaluation min_fit_clients=10, # Never sample less than 10 clients for training min_eval_clients=5, # Never sample less than 5 clients for evaluation min_available_clients=10, # Wait until all 10 clients are available ) # Start simulation fl.simulation.start_simulation( client_fn=client_fn, num_clients=NUM_CLIENTS, num_rounds=5, strategy=strategy, ) # + [markdown] id="e_lIXlErb9qN" # ### Behind the scenes # # So how does this work? How does Flower execute this simulation? # # When we call `start_simulation`, we tell Flower that there are 10 clients (`num_clients=10`). Flower then goes ahead an asks the `FedAvg` strategy to select clients. `FedAvg` knows that it should select 100% of the available clients (`fraction_fit=1.0`), so it goes ahead and selects 10 random clients (i.e., 100% of 10). # # Flower then asks the selected 10 clients to train the model. When the server receives the model parameter updates from the clients, it hands those updates over to the strategy (*FedAvg*) for aggregation. The strategy aggregates those updates and returns the new global model, which then gets used in the next round of federated learning. # + [markdown] id="umvwX56Of3Cr" # ## Final remarks # # Congratulations, you just trained a convolutional neural network, federated over 10 clients! With that, you understand the basics of federated learning with Flower. The same approach you've seen can be used with other machine learning frameworks (not just PyTorch) and tasks (not just CIFAR-10 images classification), for example NLP with Hugging Face Transformers or speech with SpeechBrain. # # In the next notebook, we're going to cover some more advanced concepts. Want to customize your strategy? Initialize parameters on the server side? Or evaluate the aggregated model on the server side? We'll cover all this and more in the next tutorial.
tutorials/Flower-1-Intro-to-FL-PyTorch.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from urllib.request import urlopen from bs4 import BeautifulSoup import datetime import random import re random.seed(datetime.datetime.now()) def getLinks(articleUrl): html = urlopen("http://en.wikipedia.org"+articleUrl) bsObj = BeautifulSoup(html, "html.parser") return bsObj.find("div", {"id":"bodyContent"}).findAll("a", href=re.compile("^(/wiki/)((?!:).)*$")) links = getLinks("/wiki/Kevin_Bacon") while len(links) > 0: newArticle = links[random.randint(0, len(links)-1)].attrs["href"] print(newArticle) links = getLinks(newArticle) # - pages = set() def getLinks(pageUrl): global pages html = urlopen("http://en.wikipedia.org"+pageUrl) bsObj = BeautifulSoup(html, "html.parser") try: print(bsObj.h1.get_text()) print(bsObj.find(id ="mw-content-text").findAll("p")[0]) print(bsObj.find(id="ca-edit").find("span").find("a").attrs['href']) except AttributeError: print("This page is missing something! No worries though!") for link in bsObj.findAll("a", href=re.compile("^(/wiki/)")): if 'href' in link.attrs: if link.attrs['href'] not in pages: #We have encountered a new page newPage = link.attrs['href'] print("----------------\n"+newPage) pages.add(newPage) getLinks(newPage) getLinks("") # + pages = set() random.seed(datetime.datetime.now()) #Retrieves a list of all Internal links found on a page def getInternalLinks(bsObj, includeUrl): internalLinks = [] #Finds all links that begin with a "/" for link in bsObj.findAll("a", href=re.compile("^(/|.*"+includeUrl+")")): if link.attrs['href'] is not None: if link.attrs['href'] not in internalLinks: internalLinks.append(link.attrs['href']) return internalLinks #Retrieves a list of all external links found on a page def getExternalLinks(bsObj, excludeUrl): externalLinks = [] #Finds all links that start with "http" or "www" that do #not contain the current URL for link in bsObj.findAll("a", href=re.compile("^(http|www)((?!"+excludeUrl+").)*$")): if link.attrs['href'] is not None: if link.attrs['href'] not in externalLinks: externalLinks.append(link.attrs['href']) return externalLinks def splitAddress(address): addressParts = address.replace("http://", "").split("/") return addressParts def getRandomExternalLink(startingPage): html = urlopen(startingPage) bsObj = BeautifulSoup(html, "html.parser") externalLinks = getExternalLinks(bsObj, splitAddress(startingPage)[0]) if len(externalLinks) == 0: internalLinks = getInternalLinks(startingPage) return getNextExternalLink(internalLinks[random.randint(0, len(internalLinks)-1)]) else: return externalLinks[random.randint(0, len(externalLinks)-1)] def followExternalOnly(startingSite): externalLink = getRandomExternalLink("http://oreilly.com") print("Random external link is: "+externalLink) followExternalOnly(externalLink) followExternalOnly("http://oreilly.com") # + from urllib.parse import urlparse pages = set() random.seed(datetime.datetime.now()) #Retrieves a list of all Internal links found on a page def getInternalLinks(bsObj, includeUrl): includeUrl = urlparse(includeUrl).scheme+"://"+urlparse(includeUrl).netloc internalLinks = [] #Finds all links that begin with a "/" for link in bsObj.findAll("a", href=re.compile("^(/|.*"+includeUrl+")")): if link.attrs['href'] is not None: if link.attrs['href'] not in internalLinks: if(link.attrs['href'].startswith("/")): internalLinks.append(includeUrl+link.attrs['href']) else: internalLinks.append(link.attrs['href']) return internalLinks #Retrieves a list of all external links found on a page def getExternalLinks(bsObj, excludeUrl): externalLinks = [] #Finds all links that start with "http" or "www" that do #not contain the current URL for link in bsObj.findAll("a", href=re.compile( "^(http|www)((?!"+excludeUrl+").)*$")): if link.attrs['href'] is not None: if link.attrs['href'] not in externalLinks: externalLinks.append(link.attrs['href']) return externalLinks def getRandomExternalLink(startingPage): html = urlopen(startingPage) bsObj = BeautifulSoup(html, "html.parser") externalLinks = getExternalLinks(bsObj, urlparse(startingPage).netloc) if len(externalLinks) == 0: print("No external links, looking around the site for one") domain = urlparse(startingPage).scheme+"://"+urlparse(startingPage).netloc internalLinks = getInternalLinks(bsObj, domain) return getRandomExternalLink(internalLinks[random.randint(0,len(internalLinks)-1)]) else: return externalLinks[random.randint(0, len(externalLinks)-1)] def followExternalOnly(startingSite): externalLink = getRandomExternalLink(startingSite) print("Random external link is: "+externalLink) followExternalOnly(externalLink) followExternalOnly("http://oreilly.com") # + pages = set() random.seed(datetime.datetime.now()) #Retrieves a list of all Internal links found on a page def getInternalLinks(bsObj, includeUrl): includeUrl = urlparse(includeUrl).scheme+"://"+urlparse(includeUrl).netloc internalLinks = [] #Finds all links that begin with a "/" for link in bsObj.findAll("a", href=re.compile("^(/|.*"+includeUrl+")")): if link.attrs['href'] is not None: if link.attrs['href'] not in internalLinks: if(link.attrs['href'].startswith("/")): internalLinks.append(includeUrl+link.attrs['href']) else: internalLinks.append(link.attrs['href']) return internalLinks #Retrieves a list of all external links found on a page def getExternalLinks(bsObj, excludeUrl): externalLinks = [] #Finds all links that start with "http" or "www" that do #not contain the current URL for link in bsObj.findAll("a", href=re.compile("^(http|www)((?!"+excludeUrl+").)*$")): if link.attrs['href'] is not None: if link.attrs['href'] not in externalLinks: externalLinks.append(link.attrs['href']) return externalLinks def getRandomExternalLink(startingPage): html = urlopen(startingPage) bsObj = BeautifulSoup(html, "html.parser") externalLinks = getExternalLinks(bsObj, urlparse(startingPage).netloc) if len(externalLinks) == 0: print("No external links, looking around the site for one") domain = urlparse(startingPage).scheme+"://"+urlparse(startingPage).netloc internalLinks = getInternalLinks(bsObj, domain) return getRandomExternalLink(internalLinks[random.randint(0,len(internalLinks)-1)]) else: return externalLinks[random.randint(0, len(externalLinks)-1)] def followExternalOnly(startingSite): externalLink = getRandomExternalLink(startingSite) print("Random external link is: "+externalLink) followExternalOnly(externalLink) #Collects a list of all external URLs found on the site allExtLinks = set() allIntLinks = set() def getAllExternalLinks(siteUrl): html = urlopen(siteUrl) domain = urlparse(siteUrl).scheme+"://"+urlparse(siteUrl).netloc bsObj = BeautifulSoup(html, "html.parser") internalLinks = getInternalLinks(bsObj,domain) externalLinks = getExternalLinks(bsObj,domain) for link in externalLinks: if link not in allExtLinks: allExtLinks.add(link) print(link) for link in internalLinks: if link not in allIntLinks: allIntLinks.add(link) getAllExternalLinks(link) followExternalOnly("http://oreilly.com") allIntLinks.add("http://oreilly.com") getAllExternalLinks("http://oreilly.com") # -
ipynb/python-scraping/chapter_03.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/awermenlinger/adventofcode2020/blob/main/adventofcode06.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="0UOfQ9JuQLhF" import pandas as pd import numpy as np import regex as re import math from itertools import combinations from pydrive.auth import GoogleAuth from pydrive.drive import GoogleDrive from google.colab import auth from oauth2client.client import GoogleCredentials # + id="1QOELSVdQQiC" # Authenticate and create the PyDrive client. auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) # + id="UWgUE4TyQSo5" # #load the file id = "1CXkIoEGHyTooqexvIKyejFwvaAWhDLM3" downloaded = drive.CreateFile({'id':id}) downloaded.GetContentFile('day06.txt') # + id="9dVPaSovQn_q" colab={"base_uri": "https://localhost:8080/"} outputId="6be1c456-a8a3-4416-d835-f1a972317283" answers = [] str_list = "" retchar = set("\n") with open('day06.txt') as f: for line in f: if line == "\n": answers.append(set(list(str_list))-retchar) str_list = "" else: #parse the blobs str_list = str_list + line answers.append(set(list(str_list))-retchar) ans61 = 0 for answer in answers: ans61 = len(list(answer)) + ans61 ans62 = 0 # + id="jufWerCHc3Rd" results = [] empty = " " data = open('day06.txt', 'r').read().rstrip().split('\n\n') for items in data: set_common = set(list("abcdefghijklmnopqrstuvwxyz")) for line in items.split("\n"): set_common = set_common.intersection(set(list(line))) results.append(set_common) ans62 = 0 for answer in results: ans62 = len(list(answer)) + ans62 # + colab={"base_uri": "https://localhost:8080/"} id="8ZhEPuu_ejS0" outputId="6531912d-cf1c-4d70-8cc7-6f52d3933216" print("6.1: {} and 6.2: {}".format(ans61, ans62)) # + id="JLguk6rHh7au"
adventofcode06.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Dados dos passageiros do Titanic # # ### Disponível em: https://gist.githubusercontent.com/michhar/2dfd2de0d4f8727f873422c5d959fff5/raw/fa71405126017e6a37bea592440b4bee94bf7b9e/titanic.csv # # Qual a média de idade dos passageiros do Titanic? # + # PassengerId - Id do passageiro # Survived - sobrevivente # Pclass - classe # Name - Nome # Sex - Sexo # Age - Idade # SibSp - Se tinha filhos presente # Parch - # Ticket - número do bilhete # Fare - Valor da passagem # Cabin - Cabine # Embarked - Local de embarque # - # ## 1º Passo - Leitura do arquivo para disponibilizar os dados # + # Abrir os arquivo com os dados usando open arqTitanic = open('titanic.csv', 'r') # + # usando readlines para le todas as linhas separadamente # transformando os dados em uma lista # arqTitanic.seek(0) linhasTitanic = arqTitanic.readlines() # - # Primeira linha do arquivo é a descrição das colunas linhasTitanic[0] # + # Depois de realizar a leitura dos dados é importante fechar o arquivo arqTitanic.close() # - linhasTitanic[1] # Primeira linha usando split e transformando em uma lista colunas = linhasTitanic[0].split(',') colunas type(colunas) dados = linhasTitanic[1].split(',') len(colunas) len(dados) dados # Usar expressão regular import re match = re.compile(r'"(.*)(,)(\s.*)"') # + match.sub(r'"\3 \1"', linhasTitanic[1]) #1,0,3,"Braund, Mr. <NAME>",male,22,1,0,A/5 21171,7.25,,S # - def tratarNome(linha): import re match = re.compile(r'"(.*)(,)(\s.*)"') return match.sub(r'"\3 \1"', linha) tratarNome(linhasTitanic[1]) tratarNome(linhasTitanic[2]) tratarNome(linhasTitanic[3]) # + # imprimir lista de passageiros com nome 'tratado' for passageiro in linhasTitanic[1:]: print(tratarNome(passageiro)) # + # Criar um dicionário de todo dataset titanic_dados = {} for coluna in colunas: titanic_dados[coluna] = [] titanic_dados # - type(titanic_dados) linhasTitanic[1] # Tratar todas as linhas excluíndo a linha 0 que é a descrição das colunas. #listas for dado in linhasTitanic[1:]: #estrutura de repetição # funções dado_trato = tratarNome(dado) # tratar o nome aplicando er print(dado_trato) type(dado_trato) print(dado_trato) # + # Tratar todas as linhas excluíndo a linha 0 que é a descrição das colunas. for dado in linhasTitanic[1:]: dado_trato = tratarNome(dado) dado_como_lista = dado_trato.split(',') print(dado_como_lista) # - dado_como_lista # + dados_tratados = [] for dado in linhasTitanic[1:]: dado_trato = tratarNome(dado) dado_como_lista = dado_trato.split(',') dados_tratados.append(dado_como_lista) # - dados_tratados[0] len(dados_tratados) print(dados_tratados[0][3]) print(colunas[1], dados_tratados[0][1], colunas[3], dados_tratados[0][3]) colunas dados_tratados for passageiro in dados_tratados: print(colunas[5], passageiro[5])
src/Aula 08 - Organizando dados de um dataset.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Análise de reclamações # Buscamos compreender quais são as empresas mais odiadas. # # Autor: <NAME> # #### Bibliotecas Utilizadas from glob import glob import pandas as pd import numpy as np import matplotlib.pyplot as plt from os import path # Os dados são provenientes do [kaggle](https://www.kaggle.com/gerosa/procon), e um [script](https://gist.github.com/bhering/d21097f5b125d65fd8c422d81695d7f7) foi rodado para higieniza-los. O que o script faz é remover algumas incoerencias (como traços no campo de CEP), e apagar campos sem dados que estavam preenchidos com `NULL` ou `Nao se aplica`. # Todos os .csv da pasta são lidos e compilados em um único dataframe `df`... all_files = glob(path.join('./datasets/costumer_complaints',"*.csv")) df = pd.concat((pd.read_csv(f) for f in all_files)) df['CNPJ'] = df['NumeroCNPJ'].map(str) + df['RadicalCNPJ'].map(str) df[['AnoCalendario', 'DataAbertura', 'strNomeFantasia', 'Atendida', 'DescricaoAssunto', 'DescricaoProblema']][:10] print('Total de registros:', len(df)) print('Total de empresas:', df['CNPJ'].nunique()) # ### Como descrever o quão detestada uma empresa é? # Podemos começar simplesmente observando a distribuição das empresas com mais reclamacões: lim=20 print(df['strRazaoSocial'].value_counts(normalize=True)[:20]) print("Total:", df['strRazaoSocial'].value_counts(normalize=True)[:20].sum()) # Das 58254 empresas, as 20 empresas com mais reclamações tem 13,9% do total de 603036reclamações registradas. # # Os membros da lista não são surpredentes: temos todas as provedoras de telefonia, internet e TV, bem como fabricantes de celular, alguns bancos, grandes distribuídoras de varejo. Um pouco mais surpreendente é o banco de empréstimos (BV) e a seguradora (Cardif) que aparecem entre os 20 primeiros lista. # Porém, embora muitas reclamações possam não ser boas para uma empresa, é ainda pior e mais fácil de classificar como ṕdio quando as empresas tem muitas reclamações print(df[df.Atendida == 'N']['strRazaoSocial'].value_counts(normalize=True)[:lim]) print("Total:", df[df.Atendida == 'N']['strRazaoSocial'].value_counts(normalize=True)[:lim].sum()) print(df[df.Atendida == 'N']['Atendida'].count()) # "<NAME> S/A" (uma das subdivisões da Oi) no topo novamente! Indiscutivelmente, quantativamente a empresa mais detestada. Poderia encerrar o relatório por aqui, mas podemos ter mais insights especialmente se quebrarmos essas duas medidas por segmento e por tempo.
costumer-complaints.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- import diff_classifier.aws as aws import diff_classifier.utils as ut import diff_classifier.msd as msd import diff_classifier.features as ft import os import os.path as op import numpy as np import numpy.ma as ma import pandas as pd remote_folder = "01_18_Experiment/P1" filename = "Traj_P1_S1_L_0000_1_1.csv" to_download = op.join(remote_folder, filename) to_download # local_folder = !pwd local_name = op.join(local_folder[0], filename) local_name aws.download_s3(to_download, local_name) test = ut.csv_to_pd(local_name) msds_a = msd.all_msds(test) import matplotlib as mpl import matplotlib.cm as cm import matplotlib.pyplot as plt # %matplotlib inline # + string = 'elongation' leveler = test_ft[string] t_min = np.min(leveler) t_max = np.max(leveler)*0.6 norm = mpl.colors.Normalize(t_min, t_max, clip=True) mapper = cm.ScalarMappable(norm=norm, cmap=cm.viridis) # + cutoff = 6 plt.figure(figsize=(10, 10)) for i in np.unique(msds_a['Track_ID']): param = test_ft[test_ft['Track_ID']==i][string][i-1] if msds_a[msds_a['Track_ID']==i]['X'].shape[0]> cutoff: if type(param) is np.float64: x = msds_a[msds_a['Track_ID']==i]['X'] y = msds_a[msds_a['Track_ID']==i]['Y'] level = mapper.to_rgba(param) plt.plot(x, y, color=level) mapper.set_array(30) plt.colorbar(mapper) # - xs = test_ft['X'].astype(int) ys = test_ft['Y'].astype(int) zs = test_ft[string] image = np.zeros((512, 512))*np.nan image[xs, ys] = zs plt.scatter(xs, ys, c=zs) plt.matshow(image) test_ft = ft.calculate_features(msds_a) test_ft ma.comp unmask_track(msds_a[msds_a['Track_ID']==50])
notebooks/development/02_13_18_prototype_visualization.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Explore the Hierarchical class # This notebook explores the base functionality of Hierarchical as this base functionality is introduced. # For now, Hierarchical is indeed instantiated as a subclass of Sequential, but only inherits the methods that specify how the layer architecture works. If more general architectures are added, the actual implementation will be moved up. Currently, this implementation already involves the general Model API in keras. # ## Methods in Sequential import tensorflow.keras as keras seq = keras.Sequential() seq.add(keras.layers.Dense(10, input_shape=(4, ))) seq.summary() # Essentially, the thing we must add is a possibility to infer arbitrary states; essentially a state layer. Later, this will be ample opportunity to specify different kinds of state layers (think probabilistic, a la Srinivasan, etc.). Later, we may thus have more than two different modes of estimation, but for now, we have state and weight parameter estimation. This means that effectively, we have multiple sequential models -- which also means that we might as well just explicitly have an entirely new class. # The overall UI should enable state-to-state thinking. Conceptually, there are two levels of a predictive coding model: on one level, there are the state inference layers. On the other hand, each of these inferred states is being used to predict the state blow, the lowest state being actually observed. The supernetworks should be specified in a bottom-up fashion, whereas the subnetworks should be specified in a top-down fashion. When Hierarchical is initialized, it should thus be initialized in a top-down fashion. # The following syntax thus seems sensible: # + active="" # hpc = pc.Hierarchical() # hpc.add( # keras.Sequential( # [pc.layers.State(input_shape=(4, )), # keras.layers.Dense(10), # pc.layers.StateEstimation()] # ) # ) # print(hpc) # - # Or, as a more complex example: # + active="" # nl_hpc = pc.Hierarchical() # nl_hpc.add(pc.layers.State(input_shape=(4, ))) # nl_hpc.add( # keras.Sequential( # keras.layers.activation('relu', input_shape=(4, )), # keras.layers.Dense(10), # pc.layers.StateEstimation()] # ) # ) # nl_hpc.add(pc.layers.StateEstimation()) # # Current state estimation loop closed. Moving up one tier. # nl_hpc.add(pc.layers.State(input_shape=(4, ))) # # nl_hpc.add( # keras.Sequential( # keras.layers.Dense(10), # keras.layers.activation('relu'), # keras.layers.Dense(4)] # ) # ) # print(nl_hpc) # - # Pro: Hierarchical models can be built in a bottom up fashion. # Con: There's a somewhat unnecessary doubling of state estimation and state -- or at least the input shape there. # However, I can see certain advantages with that kind of verbosity, as well. # As a first step, I will implement the pure interface plus printing. # # Revamp: There are essentially three different modi: adding the state tiers, adding the tier models, and adding the connections between the tiers. seq = keras.Sequential() seq.add(keras.layers.Dense(10, input_shape=(4, ))) seq.add(keras.layers.Activation('relu')) seq.add(keras.layers.Dense(4)) seq.summary() # + active="" # nl_hpc = pc.Hierarchical() # nl_hpc.add_tier(shape=(10, )) # # Adding 'Tier 0'. # nl_hpc.add_tier(shape=(4, )) # # Adding 'Tier 1'. # nl_hpc.add_tier(shape=(2, ), name='Final Tier') # # Adding 'Final Tier' (Tier 2). # nl_hpc.summary() # # ------------------------------------------------------------------- # # Layer (type) Output Shape Param # # # =================================================================== # # TIER_2 # # ------------------------------------------------------------------- # # (Missing Model.) # # ------------------------------------------------------------------- # # (Missing State Prediction.) # # ------------------------------------------------------------------- # # TIER_1 # # ------------------------------------------------------------------- # # (Missing Model.) # # ------------------------------------------------------------------- # # (Missing State Prediction.) # # ------------------------------------------------------------------- # # TIER_0 # # =================================================================== # - # Importantly, we separate the user interface from the implementation via the appropriate class methods. Though I have not looked at that yet, this should make an alternative backend to Tensorflow easier. # This kind of interface has now been achieved: import predicode as pc hpc = pc.Hierarchical() hpc.add_tier(shape=(10, )) hpc.add_tier(shape=(4, )) hpc.predictor = keras.Sequential() hpc.predictor.add(keras.layers.Dense(4, input_shape=(10, ))) hpc.summary() # Now, we tackle the real meat of Hierarchical: the estimation. # ## Example estimation # Our estimation consists of an interplay between state variables and their corresponding minimizers, and models and their corresponding minimizers. # ### Example data art = pc.decaying_multi_normal(dimensions=10, size=100) import tensorflow as tf # ### State variables tier_0 = tf.constant(art, name = 'tier_0', dtype=tf.float32) tier_1_initial = pc.init('random', columns=4, rows=100) tier_1 = tf.Variable(tier_1_initial, name = 'hierarchical_1_tier_1', shape = (None, 4), dtype=tf.float32) # ### Predictors predictor_1 = keras.Sequential([keras.layers.Dense(10, input_shape=(4, ), use_bias=False)]) predictor_1 # ### State prediction with tf.GradientTape() as tape: predicted_1 = predictor_1(tier_1) loss = keras.losses.mean_squared_error(tier_0, predicted_1) grad = tape.gradient(loss, (tier_1)) print(grad[0]) predicted_1[0] tier_1[0] # ### State estimation state_optimizer = keras.optimizers.SGD(learning_rate=1) next(zip(loss)) state_step = state_optimizer.apply_gradients(zip([grad], [tier_1])) state_optimizer.iterations tier_1[0] # ### Predictor estimation predictor_optimizer = keras.optimizers.SGD(learning_rate=1) old_predictor = predictor_1.trainable_variables[0].numpy() old_predictor with tf.GradientTape() as tape: predicted_1 = predictor_1(tier_1) loss = keras.losses.mean_squared_error(tier_0, predicted_1) grad = tape.gradient(loss, predictor_1.trainable_variables) weight_step = predictor_optimizer.apply_gradients(zip(grad, predictor_1.trainable_variables)) predictor_1.trainable_variables old_predictor eps_pred = tf.math.reduce_mean(tf.losses.mean_squared_error(predictor_1.trainable_variables, old_predictor)) eps_pred < 1e-5 # This ought to have given us some intuition for how this process should work, so we will now single out the different steps. Clearly, the tiers should simply be Tensorflow variables. import sklearn.decomposition as decomp pca = decomp.PCA(10).fit(art) pca_weights = pca.components_[:4] pca.explained_variance_ final_weights[0].shape import scipy angles = scipy.linalg.subspace_angles(final_weights[0].T, pca_weights.T) angles import numpy as np np.rad2deg(angles) # The algorithm indeed seems to find the PCA subspace. I am not sure why the accuracy is so low, though. (I had increased the iterations and nothing changed.) normalized_weights = final_weights[0] / np.linalg.norm(final_weights[0], 2, axis=1, keepdims=True) contribs = np.matmul(np.linalg.inv(pca.components_).T, normalized_weights.T) import lazytools_sflippl as lazytools lazytools.matrix_heatmap(contribs, pole=0) # This speaks a pretty clear language though. np.linalg.norm(contribs, 2, axis=1) # Same precision, however. This implies reasonable confidence that all errors are numerical, and we can look at an optimization of that after the first implementation. # ### First implementation # We first set up this small model: import predicode as pc import tensorflow as tf import tensorflow.keras as keras hpc = pc.Hierarchical() hpc.add_tier(shape=(10, )) hpc.add_tier(shape=(4, ), name='latent_layer') hpc.summary() hpc.predictor = keras.Sequential() hpc.predictor.add( keras.layers.Dense(10, input_shape=(4, ), use_bias=False, dtype=tf.float32) ) hpc.state_prediction = pc.StatePrediction() hpc.summary() hpc._is_ready() dataset = pc.decaying_multi_normal(dimensions = 10, size = 100).astype('float32') state_regimen = pc.SimpleOptimizerRegimen(keras.optimizers.SGD()) predictor_regimen = pc.SimpleOptimizerRegimen(keras.optimizers.SGD(), eps=1e-7) regimen = pc.ExpectationMaximizationRegimen( state_regimen=state_regimen, predictor_regimen=predictor_regimen ) hpc.train(dataset, regimen, batch_size=50) import sklearn.decomposition as decomp pca = decomp.PCA(10).fit(dataset) import scipy angles = scipy.linalg.subspace_angles(hpc.predictor.get_weights()[0].T, pca.components_[:4].T) angles import numpy as np normalized_weights = hpc.predictor.get_weights()[0] / np.linalg.norm(hpc.predictor.get_weights()[0], 2, axis=1, keepdims=True) contribs = np.matmul(np.linalg.inv(pca.components_).T, normalized_weights.T) import lazytools_sflippl as lazytools lazytools.matrix_heatmap(contribs, pole=0) hpc.tier(1).shape lazytools.matrix_heatmap(hpc.tier(1).numpy(), pole=0) # (With a full batch, it works even better, but this demonstrates that even distributed estimations are fairly close to the optimal estimation.) # ### Better optimization algorithm hpc = pc.Hierarchical() hpc.add_tier(shape=(10, )) hpc.add_tier(shape=(4, ), name='latent_layer') hpc.predictor = keras.Sequential() hpc.predictor.add( keras.layers.Dense(10, input_shape=(4, ), use_bias=False, dtype=tf.float32) ) hpc.state_prediction = pc.StatePrediction() hpc.summary() state_regimen = pc.SimpleOptimizerRegimen(keras.optimizers.Adam()) predictor_regimen = pc.SimpleOptimizerRegimen(keras.optimizers.Adam(), eps=1e-7) regimen = pc.ExpectationMaximizationRegimen( state_regimen=state_regimen, predictor_regimen=predictor_regimen ) hpc.train(dataset, regimen) import scipy angles = scipy.linalg.subspace_angles(hpc.predictor.get_weights()[0].T, pca.components_[:4].T) angles normalized_weights = hpc.predictor.get_weights()[0] / np.linalg.norm(hpc.predictor.get_weights()[0], 2, axis=1, keepdims=True) contribs = np.matmul(np.linalg.inv(pca.components_).T, normalized_weights.T) lazytools.matrix_heatmap(contribs, pole=0) # ### Nonlinear model nl_hpc = pc.Hierarchical() nl_hpc.add_tier(shape=(10, )) nl_hpc.add_tier(shape=(4, ), name='latent_layer') nl_hpc.predictor = keras.Sequential([ keras.layers.Dense(10, input_shape=(4, )), keras.layers.Activation('relu'), keras.layers.Dense(10) ]) nl_hpc.state_prediction = pc.StatePrediction() nl_hpc.summary() state_regimen = pc.SimpleOptimizerRegimen(keras.optimizers.Adam()) predictor_regimen = pc.SimpleOptimizerRegimen(keras.optimizers.Adam(), eps=1e-3) regimen = pc.ExpectationMaximizationRegimen( state_regimen=state_regimen, predictor_regimen=predictor_regimen ) nl_hpc.train(dataset, regimen) regimen.n_steps nl_hpc.predictor.get_weights() # ### Several layers hpc.add_tier(shape=(2, )) hpc.predictor = keras.Sequential([ keras.layers.Dense(4, input_shape=(2, )) ]) hpc.state_prediction = pc.StatePrediction() hpc.summary() state_regimen = pc.SimpleOptimizerRegimen(keras.optimizers.Adam()) predictor_regimen = pc.SimpleOptimizerRegimen(keras.optimizers.Adam(), eps=1e-7) regimen = pc.ExpectationMaximizationRegimen( state_regimen=state_regimen, predictor_regimen=predictor_regimen ) hpc.train(dataset, regimen) print(lazytools.matrix_heatmap(hpc.tier(2).numpy(), pole=0)) print(lazytools.matrix_heatmap(hpc.tier(1).numpy(), pole=0)) print(lazytools.matrix_heatmap(hpc.predictor.get_weights()[0], pole=0)) state_regimen = pc.SimpleOptimizerRegimen(keras.optimizers.Adam()) predictor_regimen = pc.SimpleOptimizerRegimen(keras.optimizers.Adam(), eps=1e-7) regimen = pc.ExpectationMaximizationRegimen( state_regimen=state_regimen, predictor_regimen=predictor_regimen ) hpc.train({'tier_2': np.array([[1, 0]])}, regimen) print(hpc.tier(0)) # ## Adding in metrics # Essentially, there are three levels of detail with which to keep track of metrics: per EM step, for every predictor estimation step, for every state estimation step, and for all steps. By having the regimen keep track of which is which, we shouldn't have to worry about which is which within the regimen. We will first, however, just add in EM step metrics that work somewhat more easily. # %load_ext tensorboard import predicode as pc import tensorflow as tf import tensorflow.keras as keras hpc = pc.Hierarchical() hpc.add_tier(shape=(10, )) hpc.add_tier(shape=(4, ), name='latent_layer') hpc.predictor = keras.Sequential() hpc.predictor.add( keras.layers.Dense(10, input_shape=(4, ), use_bias=False, dtype=tf.float32) ) hpc.state_prediction = pc.StatePrediction() metric = keras.metrics.MeanAbsoluteError() state_regimen = pc.SimpleOptimizerRegimen(keras.optimizers.Adam()) predictor_regimen = pc.SimpleOptimizerRegimen(keras.optimizers.Adam(), eps=1e-7) regimen = pc.ExpectationMaximizationRegimen( state_regimen=state_regimen, predictor_regimen=predictor_regimen ) import datetime datetime.datetime.now() logdir = 'log/{}'.format(datetime.datetime.now()) logdir summary_writer = tf.summary.create_file_writer(logdir) dataset = pc.decaying_multi_normal(dimensions = 10, size = 100).astype('float32') with summary_writer.as_default(): hpc.train(dataset, regimen, metrics = [metric]) # %tensorboard --logdir log # It works!
explore/.ipynb_checkpoints/04-hierarchical-1-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + [markdown] colab_type="text" id="mobuI3G45ErZ" # # Practice with `numpy`! # + [markdown] colab_type="text" id="Rpm_nWJS5Era" # **Remember:** # * Numpy provides a bunch of useful tools for performing calculations # * You access numpy functions by calling `np.function_name` # + [markdown] colab_type="text" id="sBUu7s_y5Erb" # First, __import numpy__. Remember to use the nickname! # + colab={} colab_type="code" id="IGoKRnre5Erc" # load numpy # + [markdown] colab_type="text" id="8uWX2a2V5Erh" # Use numpy to __create a list of numbers from 0 through 99.__ We'll use a function called __`arange`.__ # + colab={} colab_type="code" id="G_MXNx5p5Eri" # Use np.arrange to generate an array of numbers and assign it to a variable called numbers # print the array to see what it looks like # + [markdown] colab_type="text" id="nH-CXAFQ5Erq" # Now use numpy's function called __`zeros`__ to __create another empty array__ of the same size: # + colab={} colab_type="code" id="ncgGZbUF5Err" # Create an empty array with np.zeros and assign it to a variable # print it # + [markdown] colab_type="text" id="xuyYmpD85Erw" # Now try manipulating the arrays using basic math: # + colab={} colab_type="code" id="ULqMaAjJ5Erx" # Add the numbers array to itself # + colab={} colab_type="code" id="IwDpDsPF5Er1" # Multiply the numbers array to itself # + [markdown] colab_type="text" id="_DeFP4ZI5Er4" # Note that when you perform a math operation on an array, __it will often perform that operation on each item in that array.__ It's convenient that __we don't have to loop through all the values__ to apply the math operation to every item in the array. # + [markdown] colab_type="text" id="8Yj_UpHS5Er5" # __You can find information about the size of an array by using `.shape`.__ Note that `.shape` is an *attribute* of array -- a special variable that belongs to every *array object*. Try it out: # # *HINT*: Because `.shape` is not a function you don't need to use parentheses. # + colab={} colab_type="code" id="ZngPKKEF5Er6" # Use shape to view the size of your array # + [markdown] colab_type="text" id="Xw5LqGwy5Er_" # `.shape` gave us just one number, because our array has only 1 dimension. Later we'll see what it looks like for arrays with more than 1 dimension. # # Numpy also allows you to create 2D arrays, like with lists. We can __use the `method` called `reshape` to change an 1-dimensional `array` into a 2-dimensional `array`.__ `reshape` takes two arguments: the number of rows and the number of columns. Try turning one of your arrays into a 2D array using `reshape`. # + colab={} colab_type="code" id="0ERWCQTF5EsA" # Reshape one of your arrays into a 2D array # print out the array # + [markdown] colab_type="text" id="lI5QaVc55EsF" # Now the `.shape` of your array should be changed, __try printing it out below:__ # + colab={} colab_type="code" id="fQkocNpj5EsG" # Print the shape of your new array # + [markdown] colab_type="text" id="EvaJPpWi5EsL" # Now we will try a couple of numpy's math functions! # + colab={} colab_type="code" id="zjvGvWo85EsM" # try using np.sum to add the items in a list together # + colab={} colab_type="code" id="OF-6as2_8TtN" # try squaring the value of each item in your array # + [markdown] colab_type="text" id="TkxL0-u09yCZ" # __Try converting the `numbers` array to an array of floats__ using the method called `astype`: # + colab={} colab_type="code" id="b3IcIXzQ8UB8" # Convert the array into an array of floats # + [markdown] colab_type="text" id="Ac9Wobn95EsQ" # Nice job! You just practiced: # # * Using `numpy` to perform `array` operations. # * Performing math with `numpy`.
Practices/Practice20_Numpy_Intro.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Séance 2 # + [markdown] tags=[] # ## Objectifs de la séance # # - Etude de système d’ordre 2 # - Analyse de la réponse indicielle # - Influence de zeta sur les caractéristiques temporelles : dépassement, temps de réponse, … # - Lien entre ces caractéristiques et la position des pôles # + tags=["remove-cell"] from IPython.display import Image, display, Markdown from control import matlab as ml # Python Control Systems Toolbox (compatibility with MATLAB) import numpy as np # Library to manipulate array and matrix import matplotlib.pyplot as plt # Library to create figures and plots import math # Library to be able to do some mathematical operations import ReguLabFct as rlf # Library useful for the laboratory of regulation of HELMo Gramme # - # ## Réponse indicielle du $2^{nd}$ ordre générale paramétrée # # Soit un système du second degré : # $ # G(p)=\frac{K}{(\frac{p}{\omega_n})^2+\frac{2\zeta}{\omega_n}p+1} # $ # (cfr page 3-6) # # Analysez pour les valeurs caractéristiques de zeta : 0.1 0.2 0.3 0.42 0.5 0.6 0.7 0.8 1 1.41 2 6 10 les réponses typiques cfr la page 3-9. # Créez un script qui permette de tracer de manière itérative les différentes fonctions dont les différents zeta seront encodés dans une liste. # + tags=["hide-cell"] K=1 wn=1 # Définition des coefficients d'amortissement zeta_values =[0.1, 0.2, 0.3, 0.4, 0.42, 0.5, 0.6, 0.7, 0.8, 1, 1.41, 2, 6, 10] # Création de la fenêtre à une taille donnée fig = plt.figure("Steps",figsize=(20,10)) ax = fig.subplots() # Réponse indicielle # Calcule les différentes fonctions de transfert ainsi que la réponse indicielle for zeta in zeta_values: G = ml.tf(K, [(1/wn)**2, 2*zeta/wn, 1]) # Calcul de la fonction de transfert rlf.step_(G, NameOfFigure='Steps', sysName=zeta); # Traçage de la réponse indicielle # - # ### Commentaires sur les courbes # # ..................................................... # # ..................................................... # # :::{toggle} # - **Si $\zeta < 1$ :** Il y a un dépassement et celui-ci est d'autant plus grand que $\zeta$ est faible. # # - **Si $\zeta \geqslant 1$ :** Il n'y a pas de dépassement (système suramorti). # # *(cf. page 3-9)* # ::: # ### Dépassement # # Visualisez la valeur du dépassement pour les différentes valeurs de zeta et regardez l’influence de zeta sur la valeur du dépassement sur l’abaque de la page 3-11 : D ....... si zeta ...... # # :::{toggle} # D $\searrow$ si $\zeta \nearrow$ # ::: # # Observez que les échelles de cet abaque sont logarithmiques. Par exemple, observez la valeur du dépassement lorsque zeta=0.5, sur la figure et indiquez clairement la position de ce point sur l’abaque. # # ![Abaque_Depassement](../images/Abaque_Depassement.png) # # Vérifiez par calcul : # # $ # D_p=100*e^{-\frac{k\pi\zeta}{\sqrt{1-\zeta^2}}} # $ # # ::::{toggle} # # :::{image} ../images/2_Depassement_exemple.png # :alt: Depassement_exemple # :align: center # ::: # # &nbsp; # # Par calcul: $D_p=16.3\%$ # :::: # ### Pseudo pulsation # # Observez l’influence du coefficient d'amortissement sur la pulsation d’oscillation $\omega_d$ : $\omega_d$ ... si $\zeta$ ... # # :::{toggle} # $\omega_d \nearrow$ si $\zeta \searrow$ # # - **Si $\zeta < 1$ :** Il y a des oscillations et celles-ci sont d'autant plus grandes que $\zeta$ est faible. # # - **Si $\zeta \geqslant 1$ :** Il n'y a pas d'oscillations. # # *(cf. page 3-6 à 3-7)* # ::: # ### Temps de réponse à 5% # # Visualisez la valeur du temps de réponse à 5 % pour les différentes valeurs de $\zeta$ et regardez l’influence de $\zeta$ sur l’abaque de la page 3-12. # # ![Abaque_tr5](../images/Abaque_tr5.png) # # Expliquez l’allure particulière de cette courbe : # - si $\zeta$ > 0.7: ... # - en $\zeta$ = 0.7: ... # - si $\zeta$ < 0.7: « escaliers » dans la partie gauche car ... # # ::::{toggle} # - si $\zeta$ > 0.7: comportement d'un système d'ordre 1. # - en $\zeta$ = 0.7: le système possède le $t_{r_{5\%}}$ le plus faible possible => **système le plus rapide** à se stabiliser possible. # - si $\zeta$ < 0.7: « escaliers » dans la partie gauche car il y a des oscillations qui font sortir le système de la plage des 5% de tolérance autour de la valeur atteinte en régime établi. # # :::{note} # Le nombre de "marches" équivaut au nombre de dépassements des valeurs limites 0.95 et 1.05. # ::: # :::: # # Pourquoi le $t_{r_{5\%}}$ est-il "identique" pour un $\zeta$ de 0,6 ou 0,5 ? # # :::{toggle} # Le $t_{r_{5\%}}$ est "identique" pour un $\zeta$ de 0,6 ou 0,5 car ils se trouvent sur la même "marche". # ::: # + tags=["hide-cell"] K=1 wn=1 # Définition des coefficients d'amortissement zeta_values =[0.1, 0.2, 0.3, 0.4, 0.42, 0.5, 0.6, 0.7, 0.8, 1, 1.41, 2, 6, 10] # Création de la fenêtre à une taille donnée fig = plt.figure("Steps",figsize=(20,10)) ax = fig.subplots() # Réponse indicielle # Calcule les différentes fonctions de transfert ainsi que la réponse indicielle for zeta in zeta_values: G = ml.tf(K, [(1/wn)**2, 2*zeta/wn, 1]) # Calcul de la fonction de transfert rlf.stepWithInfo(G, NameOfFigure='Steps', sysName=zeta, plot_rt=False, plot_overshoot=False, plot_DCGain=False); # Traçage de la réponse indicielle avec juste le point du tr5% # - # ### Position des pôles # # Vous pouvez faire le lien entre l’allure de la réponse indicielle et la position des pôles dans le plan complexe tracé par la fonction `pzmap(h)`. # + # Création de la fenêtre à une taille donnée fig = plt.figure("Pole Zero Map",figsize=(20,10)) ax = fig.subplots() # Pour pouvoir boucler sur lnombrees couleurs standards afin de directement jouer avec les couleurs des graphes from itertools import cycle prop_cycle = plt.rcParams['axes.prop_cycle'] colors = cycle(prop_cycle.by_key()['color']) # Trace les poles et zéros pour chacune des fonctions de transfert stockées dans 'g' for zeta in zeta_values: G = ml.tf(K, [(1/wn)**2, 2*zeta/wn, 1]) # Calcul de la fonction de transfert poles, zeros = rlf.pzmap(G, NameOfFigure="Pole Zero Map", sysName=zeta, color=next(colors)); plt.plot([poles.real[0], 0], [0, 0], 'k:'); # Ligne horizontale passant par 0 pour marquer l'axe des imaginaires # - # Pour chaque valeur de $\zeta$, la fonction `pzmap` vous trace 2 croix pour indiquer les 2 pôles du système dans le plan complexe : # - Pour $\zeta=10$, les pôles sont en : ......... et ......... # # C’est le pôle en ......... qui domine dans le tracé de la réponse indicielle car ......... # # - Si $\zeta$ $\searrow$ jusque $\zeta=1$, les pôles se déplacent ......... # - Si $\zeta<1$, les pôles deviennent ......... # - Si $\zeta$ $\searrow$ encore, les pôles se déplacent ......... # # :::{toggle} # - Pour $\zeta=10$, les pôles sont en : -19.9 et -0.05 # # C’est le pôle en -0.05 qui domine dans le tracé de la réponse indicielle car $\tau=\frac{-1}{p}$. La constante de temps est donc plus grande. # # - Si $\zeta$ $\searrow$ jusque $\zeta=1$, les pôles se déplacent sur l'axe des réels (vers la gauche pour les pôles dominants, vers la droite pour les autres). # # - Si $\zeta<1$, les pôles deviennent complexes conjugués. # # - Si $\zeta$ $\searrow$ encore, les pôles se déplacent sur l'axe des imaginaires et l'axe des réels. La valeur absolue de la partie imaginaire (*oscillations*) $\nearrow$, et la valeur absolue de la partie réelle (*amortissement*) $\searrow$. # ::: # # Observez l’influence des pôles réels par rapport aux pôles complexes : ... # # :::{toggle} # Si les pôles du système sont réels alors le système se comporte comme un système du $1^{er}$ ordre $\Rightarrow$ Pas d'oscillations. # # Si par contre, ses pôles sont complexes, le système oscille. # ::: # # &nbsp; # # et si $\zeta<0$ : ... # # :::{toggle} # Si $\zeta<0$, le système est instable! # ::: # # &nbsp; # ### Exercice 1 # # Soit un système asservi à retour unitaire décrit par la fonction de transfert : # # $$ # H_{BF}(s) = \frac{8}{s^2+s+10} # $$ # # #### Etude de la réponse indicielle num = 8 den = [1, 1, 10] H_BF = ml.tf(num, den) rlf.step_(H_BF); # La fonction présente 2 pôles complexes conjugués et les constantes associées à sa réponse sont: w, zetas, poles = ml.damp(H_BF); # Vous pouvez le vérifier en identifiant à la représentation canonique (p. 3-6) : ... # # :::{toggle} # 1°) Mise sour forme canonique: # # $$ # H_{BF}(s) = \frac{8}{s^2+s+10} = \frac{0.8}{\frac{s^2}{10}+\frac{s}{10}+\mathbf{1}} # $$ # # 2°) Identification: # # $$ # \begin{alignat*}{2} # \left\{ \begin{aligned} # \begin{array}{ll} # \frac{2\zeta}{\omega_n} = \frac{1}{10} \\ # \frac{1}{\omega_n^2} = \frac{1}{10} # \end{array} # \end{aligned}\right. # \Rightarrow # \left\{ \begin{aligned} # \begin{array}{ll} # \zeta = \frac{\sqrt{10}}{20}=0.16 \\ # \omega_n = \sqrt{10} = 3.16 # \end{array} # \end{aligned}\right. # \end{alignat*}\ # $$ # ::: # # &nbsp; # # &nbsp; # Déterminez les caractéristiques de la réponse par les abaques : # - le dépassement ($D_\%$) = ............... # - le temps de réponse à 5% ($t_{r_{5\%}}$) = ............... # # :::{toggle} # - le dépassement ($D_\%$) $\approx$ 60% # - le temps de réponse à 5% ($t_{r_{5\%}}$) $\approx \frac{16}{3.16} = 5$ s # ::: # et comparez avec les caractéristiques fournies par `stepWithInfo` : # - la valeur atteinte en régime établi (DCGain) = ............... # - l’erreur statique ($\varepsilon_0$) = ............... # - le temps de réponse à 5% ($t_{r_{5\%}}$) = ............... # - le temps de montée ($t_m$) = ............... # - le dépassement ($D_\%$) = ............... # - l’instant du premier pic ($t_{peak}$) = ............... # + tags=["hide-cell"] info = rlf.stepWithInfo(H_BF) rlf.printInfo(info) print("Erreur statique :", (1-info.DCGain)*100, "%") # + [markdown] tags=[] # ### Exercice 2 # # L’application 2 d’asservissement de position est décrite par le schéma bloc : # # :::{image} ../images/2_Appli2.png # :alt: Appli2 # :align: center # ::: # # où A représente un correcteur proportionnel. # # # La fonction de transfert en BF de l’application d’asservissement de vitesse est : # # $$ # H_{BF}(p) = \frac{\frac{A}{A+1}}{1+\frac{10,875*10^{-3}}{A+1}p+\frac{1,36*10^{-6}}{A+1}p²} # $$ # # Utilisez les abaques ou équations pour : # # - Prédire l’allure de la réponse indicielle du système si A=99 : # # &nbsp; # # &nbsp; # # &nbsp; # # - dépassement : ............... # # - temps de réponse : ............... # # :::{toggle} # $$ # \begin{alignat*}{2} # \left\{ \begin{aligned} # \begin{array}{ll} # \frac{2\zeta}{\omega_n} = \frac{10.875*10^{-3}}{A+1} \\ # \frac{1}{\omega_n^2} = \frac{1.36*10^{-6}}{A+1} # \end{array} # \end{aligned}\right. # \Rightarrow # \left\{ \begin{aligned} # \begin{array}{ll} # \zeta = \frac{10.875*10^{-3}}{100}*\frac{8574.93}{2} = 0.466 \\ # \omega_n = \sqrt{\frac{100}{1.36*10^{-6}}} = 8574.93 rad/s # \end{array} # \end{aligned}\right. # \end{alignat*}\ # $$ # # - dépassement : # $ # D_p=100*e^{-\frac{\pi*0.466}{\sqrt{1-0.466^2}}} = 19.09\% # $ # - temps de réponse à 5%: # $ # \frac{5.3}{8574.93} = 618 µs # $ # ::: # # Vérifiez en traçant les réponses via python. # + tags=["hide-cell"] A = 99 num = A/(A+1) den = [1.36e-6/(A+1), 10.875e-3/(A+1), 1] H_BF = ml.tf(num, den) info = rlf.stepWithInfo(H_BF) print("Dépassement :", info.Overshoot, "%") print("Temps de réponse à 5% :", info.SettlingTime, "s") # - # - Déterminer le correcteur A si on veut un dépassement de 40% : # # :::{toggle} # $$ # D_p=100*e^{-\frac{k\pi\zeta}{\sqrt{1-\zeta^2}}} \Rightarrow 40 = 100*e^{-\frac{k\pi\zeta}{\sqrt{1-\zeta^2}}} \Rightarrow \zeta=0.28 # $$ # # $$ # \begin{alignat*}{2} # \left\{ \begin{aligned} # \begin{array}{ll} # \frac{2*0.28}{\omega_n} = \frac{10.875*10^{-3}}{A+1} \\ # \frac{1}{\omega_n^2} = \frac{1.36*10^{-6}}{A+1} # \end{array} # \end{aligned}\right. # \Rightarrow # \left\{ \begin{aligned} # \begin{array}{ll} # A = 276 \\ # \omega_n = 14279 rad/s # \end{array} # \end{aligned}\right. # \end{alignat*}\ # $$ # ::: # # # &nbsp; # # &nbsp; # # &nbsp; # # &nbsp; # # Vérifiez en traçant les réponses via python. # + tags=["hide-cell"] A = 276 num = A/(A+1) den = [1.36e-6/(A+1), 10.875e-3/(A+1), 1] H_BF = ml.tf(num, den) info = rlf.stepWithInfo(H_BF) print("Dépassement :", info.Overshoot, "%")
_build/jupyter_execute/regu/LaboSeance2.ipynb
# !pip install matplotlib import os import argparse import time import numpy as np import torch import torch.nn as nn import torch.optim as optim # + class Args: method = 'dopri5' # choices=['dopri5', 'adams'] data_size = 1000 batch_time = 10 batch_size = 20 niters = 2000 test_freq = 20 viz = True gpu = True adjoint = False args=Args() # - if args.adjoint: from torchdiffeq import odeint_adjoint as odeint else: from torchdiffeq import odeint device = torch.device('cuda:' + str(args.gpu) if torch.cuda.is_available() else 'cpu') true_y0 = torch.tensor([[2., 0.]]) t = torch.linspace(0., 25., args.data_size) true_A = torch.tensor([[-0.1, 2.0], [-2.0, -0.1]]) class Lambda(nn.Module): def forward(self, t, y): return torch.mm(y**3, true_A) with torch.no_grad(): true_y = odeint(Lambda(), true_y0, t, method='dopri5') def get_batch(): s = torch.from_numpy(np.random.choice(np.arange(args.data_size - args.batch_time, dtype=np.int64), args.batch_size, replace=False)) batch_y0 = true_y[s] # (M, D) batch_t = t[:args.batch_time] # (T) batch_y = torch.stack([true_y[s + i] for i in range(args.batch_time)], dim=0) # (T, M, D) return batch_y0, batch_t, batch_y def makedirs(dirname): if not os.path.exists(dirname): os.makedirs(dirname) def visualize(true_y, pred_y, odefunc, itr): import matplotlib.pyplot as plt fig = plt.figure(figsize=(12, 4), facecolor='white') ax_traj = fig.add_subplot(131, frameon=False) ax_phase = fig.add_subplot(132, frameon=False) ax_vecfield = fig.add_subplot(133, frameon=False) makedirs('png') if args.viz: ax_traj.cla() ax_traj.set_title('Trajectories') ax_traj.set_xlabel('t') ax_traj.set_ylabel('x,y') ax_traj.plot(t.numpy(), true_y.numpy()[:, 0, 0], t.numpy(), true_y.numpy()[:, 0, 1], 'g-') ax_traj.plot(t.numpy(), pred_y.numpy()[:, 0, 0], '--', t.numpy(), pred_y.numpy()[:, 0, 1], 'b--') ax_traj.set_xlim(t.min(), t.max()) ax_traj.set_ylim(-2, 2) ax_traj.legend() ax_phase.cla() ax_phase.set_title('Phase Portrait') ax_phase.set_xlabel('x') ax_phase.set_ylabel('y') ax_phase.plot(true_y.numpy()[:, 0, 0], true_y.numpy()[:, 0, 1], 'g-') ax_phase.plot(pred_y.numpy()[:, 0, 0], pred_y.numpy()[:, 0, 1], 'b--') ax_phase.set_xlim(-2, 2) ax_phase.set_ylim(-2, 2) ax_vecfield.cla() ax_vecfield.set_title('Learned Vector Field') ax_vecfield.set_xlabel('x') ax_vecfield.set_ylabel('y') y, x = np.mgrid[-2:2:21j, -2:2:21j] dydt = odefunc(0, torch.Tensor(np.stack([x, y], -1).reshape(21 * 21, 2))).cpu().detach().numpy() mag = np.sqrt(dydt[:, 0]**2 + dydt[:, 1]**2).reshape(-1, 1) dydt = (dydt / mag) dydt = dydt.reshape(21, 21, 2) ax_vecfield.streamplot(x, y, dydt[:, :, 0], dydt[:, :, 1], color="black") ax_vecfield.set_xlim(-2, 2) ax_vecfield.set_ylim(-2, 2) fig.tight_layout() plt.savefig('png/{:03d}'.format(itr)) plt.draw() plt.pause(0.001); class ODEFunc(nn.Module): def __init__(self): super(ODEFunc, self).__init__() self.net = nn.Sequential( nn.Linear(2, 50), nn.Tanh(), nn.Linear(50, 2), ) for m in self.net.modules(): if isinstance(m, nn.Linear): nn.init.normal_(m.weight, mean=0, std=0.1) nn.init.constant_(m.bias, val=0) def forward(self, t, y): return self.net(y**3) class RunningAverageMeter(object): """Computes and stores the average and current value""" def __init__(self, momentum=0.99): self.momentum = momentum self.reset() def reset(self): self.val = None self.avg = 0 def update(self, val): if self.val is None: self.avg = val else: self.avg = self.avg * self.momentum + val * (1 - self.momentum) self.val = val # + # %matplotlib inline ii = 0 func = ODEFunc() optimizer = optim.RMSprop(func.parameters(), lr=1e-3) end = time.time() time_meter = RunningAverageMeter(0.97) loss_meter = RunningAverageMeter(0.97) for itr in range(1, args.niters + 1): optimizer.zero_grad() batch_y0, batch_t, batch_y = get_batch() pred_y = odeint(func, batch_y0, batch_t) loss = torch.mean(torch.abs(pred_y - batch_y)) loss.backward() optimizer.step() time_meter.update(time.time() - end) loss_meter.update(loss.item()) if itr % args.test_freq == 0: with torch.no_grad(): pred_y = odeint(func, true_y0, t) loss = torch.mean(torch.abs(pred_y - true_y)) print('Iter {:04d} | Total Loss {:.6f}'.format(itr, loss.item())) visualize(true_y, pred_y, func, ii) ii += 1 end = time.time()
examples/ode_demo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/souravgopal25/Data-Structure-Algorithm-Nanodegree/blob/master/DFS.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="uhZNxNAK-S_e" colab_type="text" # # DFS # # # + id="JMlfRIIDyVRp" colab_type="code" colab={} # this code makes the tree that we'll traverse class Node(object): def __init__(self,value = None): self.value = value self.left = None self.right = None def set_value(self,value): self.value = value def get_value(self): return self.value def set_left_child(self,left): self.left = left def set_right_child(self, right): self.right = right def get_left_child(self): return self.left def get_right_child(self): return self.right def has_left_child(self): return self.left != None def has_right_child(self): return self.right != None # define __repr_ to decide what a print statement displays for a Node object def __repr__(self): return f"Node({self.get_value()})" def __str__(self): return f"Node({self.get_value()})" class Tree(): def __init__(self, value=None): self.root = Node(value) def get_root(self): return self.root # + id="zwgAk_jFybt_" colab_type="code" colab={} # create a tree and add some nodes tree = Tree("apple") tree.get_root().set_left_child(Node("banana")) tree.get_root().set_right_child(Node("cherry")) tree.get_root().get_left_child().set_left_child(Node("dates")) # + id="0KVpV0ELyjuL" colab_type="code" colab={} # Let's define a stack to help keep track of the tree nodes class Stack(): def __init__(self): self.list = list() def push(self,value): self.list.append(value) def pop(self): return self.list.pop() def top(self): if len(self.list) > 0: return self.list[-1] else: return None def is_empty(self): return len(self.list) == 0 def __repr__(self): if len(self.list) > 0: s = "<top of stack>\n_________________\n" s += "\n_________________\n".join([str(item) for item in self.list[::-1]]) s += "\n_________________\n<bottom of stack>" return s else: return "<stack is empty>" # + id="_JXlVzxVypLz" colab_type="code" outputId="c80b1886-c58b-4dd9-f4f8-2ba4677dbb77" colab={"base_uri": "https://localhost:8080/", "height": 221} # check Stack stack = Stack() stack.push("apple") stack.push("banana") stack.push("cherry") stack.push("dates") print(stack.pop()) print("\n") print(stack) # + id="MH46DBKtytfj" colab_type="code" outputId="88ede142-1b51-4a22-e41f-edb7e52df153" colab={"base_uri": "https://localhost:8080/", "height": 170} visit_order = list() stack = Stack() # start at the root node, visit it and then add it to the stack node = tree.get_root() visit_order.append(node.get_value()) stack.push(node) print(f""" visit_order {visit_order} stack: {stack} """) # + id="qFAL8r1T5K-x" colab_type="code" colab={} class State(object): def __init__(self,node): self.node = node self.visited_left = False self.visited_right = False def get_node(self): return self.node def get_visited_left(self): return self.visited_left def get_visited_right(self): return self.visited_right def set_visited_left(self): self.visited_left = True def set_visited_right(self): self.visited_right = True def __repr__(self): s = f"""{self.node} visited_left: {self.visited_left} visited_right: {self.visited_right} """ return s # + id="sI8kprXx5u3j" colab_type="code" colab={} def pre_order_with_stack(tree, debug_mode=False): #creating a list for visit order visit_order = list() #creating a stack object stack = Stack() #Getting the root of the tree node = tree.get_root() #adding to the list visit_order.append(node.get_value()) #creating the state of that node state = State(node) #pushing state object of the node into the stack stack.push(state) count = 0 while(node): if debug_mode: print(f""" loop count: {count} current node: {node} stack: {stack} """) #counting the number of node count +=1 if node.has_left_child() and not state.get_visited_left(): #setting the state of left to be true state.set_visited_left() #getting the left node node = node.get_left_child() visit_order.append(node.get_value()) state = State(node) stack.push(state) elif node.has_right_child() and not state.get_visited_right(): state.set_visited_right() node = node.get_right_child() visit_order.append(node.get_value()) state = State(node) else: stack.pop() if not stack.is_empty(): state = stack.top() node = state.get_node() else: node = None if debug_mode: print(f""" loop count: {count} current node: {node} stack: {stack} """) return visit_order # + id="_jLfjxGa5zTi" colab_type="code" outputId="21064975-9b02-4ed3-9174-65e4d161eb69" colab={"base_uri": "https://localhost:8080/", "height": 1000} # check pre-order traversal pre_order_with_stack(tree, debug_mode=True) # + [markdown] id="SfIHt12J9mtf" colab_type="text" # # USING RECURSION # pre order # + id="_6V1ZAvy8iTI" colab_type="code" colab={} def pre_order(tree): visit_order = list() def traverse(node): if node: # visit the node visit_order.append(node.get_value()) # traverse left subtree traverse(node.get_left_child()) # traverse right subtree traverse(node.get_right_child()) traverse(tree.get_root()) return visit_order # + id="qXan3ebN9weW" colab_type="code" outputId="98ad4ef3-d5ae-4e57-885d-c72454cc953a" colab={"base_uri": "https://localhost:8080/", "height": 34} pre_order(tree) # + [markdown] id="Hrno5Fs-9w6D" colab_type="text" # #Post order # + id="r1PKrEDs95dk" colab_type="code" colab={} def post_order(tree): visit_order = list() def traverse(node): if node: # traverse left subtree traverse(node.get_left_child()) # traverse right subtree traverse(node.get_right_child()) # visit node visit_order.append(node.get_value()) traverse(tree.get_root()) return visit_order # + id="lVmrSQHJ99a9" colab_type="code" outputId="3213d0ff-39d7-46e2-b9fe-0f7a13e69896" colab={"base_uri": "https://localhost:8080/", "height": 34} post_order(tree) # + [markdown] id="FrShRgMR-Cnj" colab_type="text" # #IN ORDER # # + id="X-OPkHjY-JFs" colab_type="code" colab={} def in_order(tree): visit_order = list() def traverse(node): if node: # traverse left subtree traverse(node.get_left_child()) # visit node visit_order.append(node.get_value()) # traverse right sub-tree traverse(node.get_right_child()) traverse(tree.get_root()) return visit_order # + id="bUd3X-dC-Lcc" colab_type="code" outputId="31dc168a-3b58-4e9e-af84-0886fee0bb19" colab={"base_uri": "https://localhost:8080/", "height": 34} in_order(tree)
DFS.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ``` # The MIT License (MIT) # # Copyright (c) 2020 NVIDIA # # Permission is hereby granted, free of charge, to any person obtaining a copy of # this software and associated documentation files (the "Software"), to deal in # the Software without restriction, including without limitation the rights to # use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of # the Software, and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS # FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR # COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER # IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # # ``` # # Model Converstion to NVIDIA TensorRT & Inference # # Walthrough a generic pipeline for: # - Converting a Pytorch network to TensorRT (via ONNX) # - With and without dynamic batch # - Steps for Running inference using a TensorRT engine in Python # # #### Environment # All steps executed using **NGC Pytorch Docker (v 20.06)** # * [GPU Dashboards](https://medium.com/rapids-ai/gpu-dashboards-in-jupyter-lab-757b17aae1d5) installed using # ``` # pip install jupyterlab-nvdashboard # jupyter labextension install jupyterlab-nvdashboard # ``` # * [Netron](https://github.com/lutzroeder/netron) for network visualization import numpy as np import torch import json from pathlib import Path from PIL import Image import os # #### Dataset # * [NIH ChestXray 14 dataset](https://www.nih.gov/news-events/news-releases/nih-clinical-center-provides-one-largest-publicly-available-chest-x-ray-datasets-scientific-community) # * 112,120 frontal view chest xrays from 30,805 unique subjects # * X-ray images are available as bitmaps extracted from the DICOM file # + from lib.dataset_utils import * def get_batched_image_input(bs): all_inputs = [] all_targets = [] dataset_json = 'CXR14_dataset.json' with open(dataset_json) as json_file: data = json.load(json_file) test_loader = get_test_loader_TTA(data, batch_size=bs) # Using Test Time Augmentation for i, (input, target) in enumerate(test_loader): bs, n_crops, c, h, w = input.size() input = input.view(-1, c, h, w) all_inputs.append(input) all_targets.append(target) return all_inputs, all_targets, data['labels'][0] def process_results_cxr(out): if isinstance(out,(np.ndarray,list)) : out = torch.cat([torch.tensor(np.atleast_2d(im)) for im in out]) output = torch.nn.functional.softmax(out, dim=1).cpu() output = output.view(8, 10, -1).mean(1).numpy() top5 = np.argsort(output)[:,-5:][:,::-1] for i in range(8): t5 = top5[i] op = output[i] txt = 'Image {} -- '.format(i+1) for c, v in zip(t5, op[t5]): txt = txt + f"{labels[str(c)]}: {100*v:.1f}% " print(txt) # - bs = 8 im_tensor, targets, labels = get_batched_image_input(bs) show_images(im_tensor[0],targets[0], labels,bs) # ### Model # + tags=[] from torch import nn import torchvision.models as models # Using a DenseNet 121 with 14 classes num_classes = 14 model = models.densenet121(pretrained=True) num_features = model.classifier.in_features model.classifier = nn.Sequential(nn.Linear(num_features, num_classes)) # + tags=[] resume = './BestModel_CXR_DenseNet121_224.pth.tar' print("=> loading checkpoint '{}'".format(resume), ) checkpoint = torch.load(resume, map_location = lambda storage, loc: storage.cuda(0)) # + from collections import OrderedDict state_dict = checkpoint['state_dict'] def adjust_statedict(model_state,multiGPU=False): new_state_dict = OrderedDict() for k, v in model_state.items(): name = k if multiGPU: name = k[7:] # remove `module.` if name.find('classifier') != -1: name = name.replace('classifier','classifier.0') new_state_dict[name] = v return new_state_dict model.load_state_dict(adjust_statedict(state_dict,multiGPU=False)) # + tags=["outputPrepend"] input_names=['input'] output_names=['output'] dynamic_axes={'input': {0: 'batch_size'}, 'output': {0: 'batch_size'}} # A dummy input is needed to generate the ONNX model # Using NCHW format dummy_input = torch.randn(8, 3, 224, 224) # No dynamic batch dimension torch.onnx.export(model, dummy_input, "model_weights.onnx", verbose=False, input_names=input_names, output_names=output_names) # With dynamic batch dimension torch.onnx.export(model, dummy_input, "model_weights_dynamicbatch.onnx", verbose=False, input_names=input_names, output_names=output_names, dynamic_axes=dynamic_axes) # + import onnx #Load the ONNX Model model_onnx = onnx.load("model_weights_dynamicbatch.onnx") # # Check that the IR is well formed onnx.checker.check_model(model_onnx) # # Print a human readable representation of the graph # onnx.helper.printable_graph(model_onnx.graph) # - # !ls -lh *.onnx # **Check the ONNX network in Netron (at http://localhost:8080/)** # ### Test inference # ### Pytorch model.eval() model.cuda() with torch.no_grad(): outputs_pyt = model(im_tensor[0].cuda()) process_results_cxr(outputs_pyt) # ### ONNX # + #Run ONNX inference import onnxruntime as ort ort_session = ort.InferenceSession('model_weights_dynamicbatch.onnx') outputs_onnx = ort_session.run(None, {'input': im_tensor[0].numpy()}) # 'input is the layer name specified earlier' process_results_cxr(outputs_onnx) # - # # TensorRT # ### Key Concepts # **Network Definition**: interface provides methods for the application to specify the definition of a network. # # **Builder Configuration**: interface specifies details for creating an engine. # # **Builder**: allows the creation of an optimized engine from a network definition and a builder configuration. # # **Engine**: allows the application to execute inference. It supports synchronous and asynchronous execution, profiling, and enumeration and querying of the bindings for the engine inputs and outputs. # # An **Optimization profile** specifies constraints on dynamic dimensions. It describes a range of dimensions for each network input and the dimensions that the auto-tuner should use for optimization. When using runtime dimensions, you must create at least one optimization profile at build time. Two profiles can specify disjoint or overlapping ranges. # # For example, one profile might specify a minimum size of [3,100,200], a maximum size of [3,200,300], and optimization dimensions of [3,150,250] while another profile might specify min, max and optimization dimensions of [3,200,100], [3,300,400], and [3,250,250]. # # # # > Note, if your TensorRT engine has fixed batch size and input shapes, then you **do not** need to worry about optimization profile(s) # # # ### Note # # *Implicit batch* networks were previously the standard up until TensorRT 6. They supported variable batch size through the use of the builder.maxBatchSize attribute, but do not support variable shapes for any of the other dimensions. # # *Explicit Batch* networks introduced a few changes to the TensorRT API. # First, inference is instead performed using execute_v2(bindings) and execute_async_v2(bindings, stream) , which no longer require a batch_size argument since it is taken from the context binding dimensions explicitly. # ## Convert Model to TRT # Sample commands # # Simple network with no dynamic batch dimension # ``` # trtexec --explicitBatch \ # --onnx=model_weights.onnx \ # --saveEngine=trt_export.engine # ``` # # With dynamic batch # ``` # trtexec --explicitBatch \ # --onnx=model_weights_dynamicbatch.onnx \ # --minShapes=input:1x3x256x256 \ # --optShapes=input:4x3x256x256 \ # --maxShapes=input:8x3x256x256 \ # --shapes=input:4x3x256x256 \ # --saveEngine=trt_export_dynamicbatch.engine # ``` # # For generating and engine in FP16 # ``` # trtexec --explicitBatch \ # --onnx=model_weights_dynamicbatch.onnx \ # --minShapes=input:1x3x256x256 \ # --optShapes=input:4x3x256x256 \ # --maxShapes=input:8x3x256x256 \ # --shapes=input:4x3x256x256 \ # --saveEngine=trt_export_dynamicbatch_fp16.engine \ # --fp16 # ``` # + tags=[] cmd = 'trtexec --explicitBatch --onnx=model_weights_dynamicbatch.onnx --minShapes=input:64x3x224x224 --optShapes=input:80x3x224x224 --maxShapes=input:96x3x224x224 --shapes=input:80x3x224x224 --saveEngine=trt_export_dynamicbatch_bs80.engine' print(cmd) os.system(cmd) # + tags=[] cmd = 'python onnx_to_tensorrt7.py --model=model_weights.onnx --output=model_trt_weights.engine' print(cmd) # os.system(cmd) # - # #### Inspect Engine import tensorrt as trt import pycuda.driver as cuda import pycuda.autoinit # + def load_engine(filename): # Load serialized engine file into memory with open(filename, 'rb') as f, trt.Runtime(trt.Logger(trt.Logger.WARNING)) as runtime: return runtime.deserialize_cuda_engine(f.read()) def inspect_engine(engine): profile_meta = {} num_bindings_per_profile = engine.num_bindings // engine.num_optimization_profiles for profile_index in range(engine.num_optimization_profiles): start_binding = profile_index * num_bindings_per_profile end_binding = start_binding + num_bindings_per_profile binding_meta = {} for binding_index in range(start_binding, end_binding): key = "Binding {}".format(binding_index) binding_meta[key] = { "profile": profile_index, "binding_index": binding_index, "binding_shape": engine.get_binding_shape(binding_index), "binding_dtype": engine.get_binding_dtype(binding_index), "binding_name": engine.get_binding_name(binding_index), } if engine.binding_is_input(binding_index): binding_meta[key]["binding_type"] = "INPUT" binding_meta[key]["profile_shape"] = engine.get_profile_shape(profile_index, binding_index) else: binding_meta[key]["binding_type"] = "OUTPUT" profile_meta["Profile {}".format(profile_index)] = binding_meta from pprint import pprint pprint(profile_meta) # + tags=[] engine_path = './trt_export_dynamicbatch_bs80.engine' # Load a serialized engine into memory engine = load_engine(engine_path) # View various attributes of engine inspect_engine(engine) # + tags=[] # Create context, this can be re-used context = engine.create_execution_context() # Profile 0 (first profile) is used by default context.active_optimization_profile = 0 print("Active Optimization Profile: {}".format(context.active_optimization_profile)) # - # #### Binding Indices and Shape def get_binding_idxs(engine, profile_index): # Calculate start/end binding indices for current context's profile num_bindings_per_profile = engine.num_bindings // engine.num_optimization_profiles start_binding = profile_index * num_bindings_per_profile end_binding = start_binding + num_bindings_per_profile print("Engine/Binding Metadata") print("\tNumber of optimization profiles: {}".format(engine.num_optimization_profiles)) print("\tNumber of bindings per profile: {}".format(num_bindings_per_profile)) print("\tFirst binding for profile {}: {}".format(profile_index, start_binding)) print("\tLast binding for profile {}: {}".format(profile_index, end_binding-1)) # Separate input and output binding indices for convenience input_binding_idxs = [] output_binding_idxs = [] for binding_index in range(start_binding, end_binding): if engine.binding_is_input(binding_index): input_binding_idxs.append(binding_index) else: output_binding_idxs.append(binding_index) return input_binding_idxs, output_binding_idxs # + tags=[] # These binding_idxs can change if either the context or the # active_optimization_profile are changed input_binding_idxs, output_binding_idxs = get_binding_idxs(engine, context.active_optimization_profile) input_names = [engine.get_binding_name(binding_idx) for binding_idx in input_binding_idxs] # + def is_dynamic(shape): return any(dim is None or dim < 0 for dim in shape) def get_image_inputs(engine, context, input_binding_idxs,seed=42): # Input data for inference host_inputs = [] print("Generating Random Inputs") print("\tUsing random seed: {}".format(seed)) np.random.seed(seed) for binding_index in input_binding_idxs: # If input shape is fixed, we'll just use it input_shape = context.get_binding_shape(binding_index) input_name = engine.get_binding_name(binding_index) print("\tInput [{}] shape: {}".format(input_name, input_shape)) # If input shape is dynamic, we'll arbitrarily select one of the # the min/opt/max shapes from our optimization profile if is_dynamic(input_shape): profile_index = context.active_optimization_profile profile_shapes = engine.get_profile_shape(profile_index, binding_index) print("\tProfile Shapes for [{}]: [kMIN {} | kOPT {} | kMAX {}]".format(input_name, *profile_shapes)) # 0=min, 1=opt, 2=max, or choose any shape, (min <= shape <= max) input_shape = profile_shapes[1] print("\tInput [{}] shape was dynamic, setting inference shape to {}".format(input_name, input_shape)) host_inputs.append(np.random.random(input_shape).astype(np.float32)) return host_inputs # + tags=[] # Generate random inputs based on profile shapes host_inputs = get_image_inputs(engine, context, input_binding_idxs) # + tags=[] # Allocate device memory for inputs. This can be easily re-used if the # input shapes don't change device_inputs = [cuda.mem_alloc(h_input.nbytes) for h_input in host_inputs] # Copy host inputs to device, this needs to be done for each new input for h_input, d_input in zip(host_inputs, device_inputs): cuda.memcpy_htod(d_input, h_input) print("Input Metadata") print("\tNumber of Inputs: {}".format(len(input_binding_idxs))) print("\tInput Bindings for Profile {}: {}".format(context.active_optimization_profile, input_binding_idxs)) print("\tInput names: {}".format(input_names)) print("\tInput shapes: {}".format([inp.shape for inp in host_inputs])) # - def setup_binding_shapes(engine, context, host_inputs, input_binding_idxs, output_binding_idxs, has_input_shape_changed=False): # Explicitly set the dynamic input shapes, so the dynamic output # shapes can be computed internally for host_input, binding_index in zip(host_inputs, input_binding_idxs): context.set_binding_shape(binding_index, host_input.shape) assert(context.all_binding_shapes_specified) host_outputs = [None] * len(output_binding_idxs) device_outputs = [None] * len(output_binding_idxs) for i, binding_index in enumerate(output_binding_idxs): output_shape = context.get_binding_shape(binding_index) # print("output_shape", output_shape) # Allocate buffers to hold output results after copying back to host host_outputs[i] = np.empty(output_shape, dtype=np.float32) # Allocate output buffers on device device_outputs[i] = cuda.mem_alloc(host_outputs[i].nbytes) return host_outputs, device_outputs # + tags=[] # This needs to be called everytime your input shapes change # If your inputs are always the same shape (same batch size, etc.), # then you will only need to call this once host_outputs, device_outputs = setup_binding_shapes( engine, context, host_inputs, input_binding_idxs, output_binding_idxs, ) output_names = [engine.get_binding_name(binding_idx) for binding_idx in output_binding_idxs] print("Output Metadata") print("\tNumber of Outputs: {}".format(len(output_binding_idxs))) print("\tOutput names: {}".format(output_names)) print("\tOutput shapes: {}".format([out.shape for out in host_outputs])) print("\tOutput Bindings for Profile {}: {}".format(context.active_optimization_profile, output_binding_idxs)) # - # ### Run TRT inference! # + tags=[] # Bindings are a list of device pointers for inputs and outputs bindings = device_inputs + device_outputs # Inference context.execute_v2(bindings) # Copy outputs back to host to view results for h_output, d_output in zip(host_outputs, device_outputs): cuda.memcpy_dtoh(h_output, d_output) # View outputs print("Inference Outputs Shape:", host_outputs[0].shape) # + tags=[] def get_trt_inference_outputs(context,host_inputs,host_outputs,device_inputs, device_outputs): if not isinstance(host_inputs, list) and isinstance(host_inputs, np.ndarray): host_inputs = [host_inputs] else: raise ValueError('host inputs must be list of numpy-nd arrays') for h_input, d_input in zip(host_inputs, device_inputs): # Copy new inputs cuda.memcpy_htod(d_input, h_input) bindings = device_inputs + device_outputs context.execute_v2(bindings) for h_output, d_output in zip(host_outputs, device_outputs): cuda.memcpy_dtoh(h_output, d_output) # Get outputs from device return host_outputs # - result_trt_fp32 = get_trt_inference_outputs(context,im_tensor[0].numpy(),host_outputs,device_inputs,device_outputs) process_results_cxr(result_trt_fp32) del context del engine # ### FP16 inference cmd = 'trtexec --explicitBatch --onnx=model_weights_dynamicbatch.onnx --minShapes=input:64x3x224x224 --optShapes=input:80x3x224x224 --maxShapes=input:96x3x224x224 --shapes=input:80x3x224x224 --saveEngine=trt_export_dynamicbatch_bs80_fp16.engine --fp16' # print(cmd) # !ls -lh *.engine # + engine = load_engine('trt_export_dynamicbatch_bs80_fp16.engine') inspect_engine(engine) context = engine.create_execution_context() context.active_optimization_profile = 0 input_binding_idxs, output_binding_idxs = get_binding_idxs(engine, context.active_optimization_profile) host_inputs = get_image_inputs(engine, context, input_binding_idxs) device_inputs = [cuda.mem_alloc(h_input.nbytes) for h_input in host_inputs] for h_input, d_input in zip(host_inputs, device_inputs): cuda.memcpy_htod(d_input, h_input) # Placeholder for output buffers, will resize as necessary host_outputs, device_outputs = setup_binding_shapes(engine, context, host_inputs, input_binding_idxs, output_binding_idxs) bindings = device_inputs + device_outputs # - result_trt_fp16 = get_trt_inference_outputs(context,im_tensor[0].numpy(),host_outputs,device_inputs,device_outputs) process_results_cxr(result_trt_fp16) # ### Profiling # # `nsys profile -y 0 -w true -t cudnn,cuda,osrt,nvtx -o Report.qdrep python run_inference.py`
NB1_PyTorch_TRT_ONNX_Inference.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np from pandas import Series,DataFrame import pandas as pd ser1 = Series(np.arange(3),index=['a','b','c']) ser1 ser1.drop('b') dframe1 = DataFrame(np.arange(9).reshape((3,3)),index=['SF','LA','NY'],columns=['pop','size','year']) dframe1 dframe1.drop('LA') dframe2 = dframe1.drop('LA') dframe2 dframe1.drop('year',axis=1)
Lecture 18 - Drop Entry.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/Muzzamal-Hameed/Deep-Learning-Models/blob/main/Captcha_Recognizer_using_Deep_learning.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="vayfkF1N73TS" from google.colab import drive # + colab={"base_uri": "https://localhost:8080/"} id="z75RMHqo8VhF" outputId="a7cfd5e3-bdf4-480a-b874-9ddbfe4838fc" drive.mount('/content/gdrive') # + id="p-7_3EzG8W-y" # ! mkdir ~/.kaggle # + id="J0ehLslk8eu4" # ! cp kaggle.json ~/.kaggle/ # + id="REtEnw1j9ToJ" # ! chmod 600 ~/.kaggle/kaggle.json # + colab={"base_uri": "https://localhost:8080/"} id="w1ClwQ_h9W04" outputId="8f7317a5-fc99-4b46-e1bf-d85bdf74e763" # ! kaggle datasets download -d fournierp/captcha-version-2-images # + colab={"base_uri": "https://localhost:8080/"} id="JpRiE8C_9Zw3" outputId="db806f0f-22eb-42da-b1cb-20681a5d3ab7" # ! unzip captcha-version-2-images # + id="roZgcq8uC3Rl" import matplotlib.pyplot as plt import matplotlib.image as img import os import numpy as np import pandas as pd from tensorflow import keras as k from keras.utils import np_utils import cv2 from PIL import Image from keras.preprocessing.image import img_to_array, ImageDataGenerator from sklearn.preprocessing import LabelEncoder, OneHotEncoder from sklearn.model_selection import train_test_split from keras.models import Sequential from keras.layers import Activation, MaxPooling2D, Flatten, Conv2D, Dropout, Dense # + id="xKj06_cBhybz" X=[] y=[] for dirname, _, filenames in os.walk('/content/samples'): for filename in filenames: path=os.path.join(dirname, filename) image = cv2.imread(path, cv2.IMREAD_GRAYSCALE) image = cv2.adaptiveThreshold(image, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 145, 0) kernel = np.ones((5,5),np.uint8) image = cv2.morphologyEx(image, cv2.MORPH_CLOSE, kernel) kernel = np.ones((2,2),np.uint8) image = cv2.dilate(image, kernel, iterations = 1) image = cv2.GaussianBlur(image, (5,5), 0) x=[image[10:50,30:50],image[10:50,50:70], image[10:50,70:90],image[10:50,90:110],image[10:50,110:130]] for i in range(5): X.append(img_to_array(Image.fromarray(x[i]))) y.append(path[len(path)-9:len(path)-4][i]) X=np.array(X) y=np.array(y) # + id="NmIxtnwoiTR6" X=X.astype('float32') X/=255 y_le = LabelEncoder().fit_transform(y) y_ohe = OneHotEncoder(sparse = False).fit_transform(y_le.reshape(len(y_le),1)) X_train, X_test, y_train, y_test = train_test_split(X, y_ohe, test_size = 0.2, random_state = 42) row, col = X.shape[1],X.shape[2] categories = y_ohe.shape[1] info = {y_le[i] : y[i] for i in range(len(y))} # + colab={"base_uri": "https://localhost:8080/"} id="iQG0JJ-uiiwN" outputId="a541c527-0400-4f29-a193-2380776d6e30" model = Sequential() model.add(Conv2D(filters=16,kernel_size=(3,3), padding='same', input_shape=(row,col,1))) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size = (2,2))) model.add(Conv2D(filters=16,kernel_size=(3,3), padding='same')) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size = (2,2))) model.add(Conv2D(filters=32,kernel_size=(3,3), padding='same')) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size = (2,2))) model.add(Conv2D(filters=32,kernel_size=(3,3), padding='same')) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size = (2,2))) model.add(Flatten()) model.add(Dropout(0.4)) model.add(Dense(256)) model.add(Activation('relu')) model.add(Dropout(0.2)) model.add(Dense(categories)) model.add(Activation("softmax")) model.compile(loss = 'categorical_crossentropy', optimizer = 'adam' , metrics = ['accuracy']) print(model.summary()) # + colab={"base_uri": "https://localhost:8080/"} id="hC9PsRkGisE9" outputId="5bb350ae-0955-4903-9d5f-f54078c1530c" batch_size = 128 epochs = 100 history = model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(X_test, y_test), shuffle=True) # + id="cE9ENohUi1Gz" def pred (img_path) : img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE) plt.imshow(img, cmap='gray') plt.axis('off') plt.show() image = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 145, 0) image = cv2.adaptiveThreshold(image, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 145, 0) kernel = np.ones((5,5),np.uint8) image = cv2.morphologyEx(image, cv2.MORPH_CLOSE, kernel) kernel = np.ones((2,2),np.uint8) image = cv2.dilate(image, kernel, iterations = 1) image = cv2.GaussianBlur(image, (5,5), 0) x = [image[10:50, 30:50], image[10:50, 50:70], image[10:50, 70:90], image[10:50, 90:110], image[10:50, 110:130]] X_pred = [] for i in range(5) : X_pred.append(img_to_array(Image.fromarray(x[i]))) X_pred = np.array(X_pred) X_pred/= 255.0 y_pred = model.predict(X_pred) y_pred = np.argmax(y_pred, axis = 1) print('Prediction: ', end='') for res in y_pred : print(info[res], end='') print('\nActual: ', img_path[len(img_path)-9:len(img_path)-4]) # + colab={"base_uri": "https://localhost:8080/", "height": 150} id="sLdrZWGZi_Pz" outputId="9b2c69a5-46ac-4740-bbe6-b8b36dd49a7d" pred('/content/samples/42nxy.png') # + id="Fs6s6rJcjGzs" model.save('captcha_recognizer.h5') # + id="8IhGuS1UjHbZ"
Captcha_Recognizer_using_Deep_learning.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Cross Industry Standart Process for Data Mining # # In this section we are going to analise Boston AIRBNB Data Set. We are looking to help people on cleaning datasets e how to deal with some especific data. In this post we are going to cover all the subjects bellow: # # 1. Business Understanding: Understand the problem # 2. Data Understanding: Understand the data to solve your problem # 3. Data Preparation: Organizing it in a way that will allow us to answer our questions of interests. # 4. Modeling: Building a predictive model # 5. Evaluation # 6. Insights # # #first we import de the libraries wich are going to be used import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split from sklearn.metrics import r2_score, mean_squared_error #then we open the sugested datasets df_calendar = pd.read_csv("calendar.csv") df_reviews = pd.read_csv("reviews.csv") df_listings = pd.read_csv("listings.csv") # # 1. Business Understanding # # Understanding de Problem. # # We are solving two questions: # * Price, there are any correlations that we can made, in order to predict it? # # 2. Data Understanding # # Understand the data to solve your problem. # # Now that we have stabilished ours goals, we need to undestand our data in order to get there # ### Let's take a look at the data? # df_calendar.head() #looking calendar df_listings.head(2) #looking listings df_reviews.head()# looking reviews print("We have {} rows(cases) and {} features. Our primary goal is to look into these {} features.".format(df_listings.shape[0],df_listings.shape[1],df_listings.shape[1])) # As we have a lot of features, let's select some columns that may have correlation with our goal (predict price). view_df_listings = pd.DataFrame(df_listings.dtypes, columns=["type"]) view_df_listings[:50] #lets take a look at the first 50 columns, wich ones we are going to pick view_df_listings[50:] # For these two ranges of columns, after tooking a look i selected the following columns to be part of our data: # # * host_response_time # * host_response_rate # * host_acceptance_rate # * host_is_superhost # * host_total_listings_count # * latitude # * longitude # * property_type # * room_type # * accommodates # * bathrooms # * bedrooms # * beds # * bed_type # * amenities # * square_feet # * security_deposit # * cleaning_fee # * guests_included # * extra_people # * review_scores_rating # * review_scores_accuracy # * review_scores_cleanliness # * review_scores_checkin # * review_scores_communication # * review_scores_location # * review_scores_value # * cancellation_policy # # # + df_base = df_listings[["host_response_time","host_response_rate","host_acceptance_rate", "host_is_superhost","host_total_listings_count","latitude","longitude", "property_type","room_type","accommodates","bathrooms","bedrooms","beds", "bed_type","amenities","square_feet","security_deposit","cleaning_fee", "guests_included","extra_people","review_scores_rating","review_scores_accuracy", "review_scores_cleanliness","review_scores_checkin","review_scores_communication", "review_scores_location","review_scores_value","cancellation_policy","price"]] print("Now we have {} features".format(df_base.shape[1])) # - no_nulls = set(df_base.columns[df_base.isnull().mean()==0]) ##selecting only columns fully completed print('''Of all selected features only the following columns are fully completed, without any NANs. {} '''.format(no_nulls)) print(df_base.shape) df_base.isnull().sum() # ### Data Conclusion # # Conclusions: # # 1. As we can see **square_feet** is the columns with most NAN values, so we are going to drop it. # 2. For **bathrooms, bedrooms and beds**, as we understand that they exist, but for some reason didn't show up. We will fill it with the int(mean or average) # 3. Column **Security_deposit** all the NAN values will be replaced by 0 # 4. All the **review_scores** with NAN values will be dropped, because we can't put a value that it doesn't exist # 5. Column **cleaning_fee** all the NAN values will be replaced by 0 # 6. All the **host_response_time**,**host_is_superhost** NANs we are going to be dropped because they are categorical features # 7. All the **host_response_rate**,**host_acceptance_rate** NANs we are going to be filled with their mean/average. # 8. We will also drop the rows of **property_type** that have NANs values # # Data Preparation # # Organizing it in a way that will allow us to answer our questions of interests df_base_t1 = df_base.drop(["square_feet"], axis = 1)# 1.Dropping square_feet # + #symbols df_base_t1['host_response_rate'] = df_base_t1['host_response_rate'].str.rstrip('%')#removing symbol df_base_t1['host_acceptance_rate'] = df_base_t1['host_acceptance_rate'].str.rstrip('%')#removing symbol df_base_t1['security_deposit'] = df_base_t1['security_deposit'].str.lstrip('$')#removing symbol df_base_t1['cleaning_fee'] = df_base_t1['cleaning_fee'].str.lstrip('$')#removing symbol df_base_t1['extra_people'] = df_base_t1['extra_people'].str.lstrip('$')#removing symbol df_base_t1['price'] = df_base_t1['price'].str.lstrip('$')#removing symbol mean_1 = {"cleaning_fee": 0,#replacing 0 "security_deposit": "0.00",#replacing 0 "bathrooms": int(df_base.square_feet.mean()),#mean of the column "bedrooms": int(df_base.square_feet.mean()), #mean of the column "beds": int(df_base.square_feet.mean()) #mean of the column } #dict df_base_t1 = df_base_t1.fillna(value = mean_1) # 2, 3 an 5.Filling with the mean() of the columns # - #4 an 6. Dropping NANs on the review_scores df_base_t1.dropna(subset=['review_scores_rating','review_scores_accuracy','review_scores_cleanliness','review_scores_checkin', 'review_scores_communication','review_scores_location','review_scores_value', 'host_response_time','host_is_superhost'], inplace = True) # + #7. Replacing categorical cloumns, or simbols print(df_base_t1.host_response_time.value_counts()) #creating dict for this column host_response_time = {"within an hour":1, "within a few hours":2, "within a day":3, "a few days or more":4} df_base_t1= df_base_t1.replace({"host_response_time":host_response_time}) #replacing categorical # + #types df_base_t1['host_response_rate']= df_base_t1['host_response_rate'].astype(int) #converting type df_base_t1['host_acceptance_rate']= df_base_t1['host_acceptance_rate'].astype(int) #converting type df_base_t1['cleaning_fee']= df_base_t1['cleaning_fee'].astype(float)#converting type df_base_t1['extra_people']= df_base_t1['extra_people'].astype(float)#converting type #symbols inteference # converting price type df_base_t1.price=df_base_t1.price.str.replace(",","") df_base_t1['price']= df_base_t1['price'].astype(float)#converting type # converting security_deposit type df_base_t1.security_deposit=df_base_t1.security_deposit.str.replace(",","") df_base_t1['security_deposit']= df_base_t1['security_deposit'].astype(float)#converting type # + ### categorical #creating dict for this column host_is_superhost = {"f":0, "t":1} df_base_t1= df_base_t1.replace({"host_is_superhost":host_is_superhost}) #replacing categorical #creating dict for this column property_type = {"Apartment":1, "House":2, "Condominium":3, "Townhouse":4, "Bed & Breakfast":4, "Loft":4, "Boat":4, "Other":4, "Villa":4, "Dorm":4, "Guesthouse":4, "Entire Floor":4} df_base_t1= df_base_t1.replace({"property_type":property_type}) #replacing categorical #creating dict for this column room_type = {"Entire home/apt":1, "Private room":2, "Shared room":3} df_base_t1= df_base_t1.replace({"room_type":room_type}) #replacing categorical #creating dict for this column bed_type = {"Real Bed":1, "Futon":2, "Airbed":3, "Pull-out Sofa":4, "Couch":5} df_base_t1= df_base_t1.replace({"bed_type":bed_type}) #replacing categorical #creating dict for this column cancellation_policy = {"strict":1, "moderate":2, "flexible":3, "super_strict_30":4} df_base_t1= df_base_t1.replace({"cancellation_policy":cancellation_policy}) #replacing categorical # - # ### Finished? Not yet... # # All we did until now it was putting in numbers what words were showing us. # # **property_type**: As there were a lot of property types, i creted four categories (1=Apartment,2= House,3=Condominium,4= Others) eache one especified in the dict above. Eventhough we had more types of property, i consider only four, because it could increase our dimmensionality of data. # # I did this to almost every variable, but one is missing, **amenities**, i left this one because there were a lot o categories inside of it. And even if i use get_dummy variables, it could increase the dimmensionality of the data, making the predictions get worse in the future. So i thought we could count how many good amenities are in the house, so that now we transform this information in a number. df_base_t1 = df_base_t1.reset_index(drop=True) #reseting the index # colunas 3,7,8,13,14,26 # simbolos 15 = security_deposit # 16 = cleaning_fee # 18 = extra_people # 27 = price # + amenities = [] for i in df_base_t1.amenities: a = i.split(",") amenities.append(len(a)) df_base_t1["amenities"] = pd.DataFrame(amenities) # - print('''Almost forgot that we still have some values missing... ''' ,df_base_t1.isnull().sum()>0,df_base_t1.shape) # + df_base_t1 = df_base_t1.dropna() print('''Let's take a look now... ''' ,df_base_t1.isnull().sum()>0,df_base_t1.shape) # - # ### Let's take a look in our Database # # Let's just take a look and try to see if we can get some insides of the data. df_base_t1.describe() df_base_t1.hist(figsize=(15,15) ) # ### And what about the correlations? # # There is a good graphic that will help us to see what columns are correlated to each other. fig, ax = plt.subplots(figsize=(15,15)) sns.heatmap(df_base_t1.corr(), annot=True, fmt=".2f",ax=ax) # # Modeling, Evaluating and Insights # + X= df_base_t1[["host_response_time","host_response_rate","host_acceptance_rate", "host_is_superhost","host_total_listings_count","latitude","longitude", "property_type","room_type","accommodates","bathrooms","bedrooms","beds", "bed_type","amenities","security_deposit","cleaning_fee", "guests_included","extra_people","review_scores_rating","review_scores_accuracy", "review_scores_cleanliness","review_scores_checkin","review_scores_communication", "review_scores_location","review_scores_value","cancellation_policy"]] y = df_base_t1[["price"]] # + #Split into train and test X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = .30, random_state=42) lm_model = LinearRegression(normalize=True) # Instantiate lm_model.fit(X_train, y_train) #Fit #Predict and score the model y_test_preds = lm_model.predict(X_test) "The r-squared score for your model was {} on {} values.".format(r2_score(y_test, y_test_preds), len(y_test))
Post.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: PySpark (Local) # language: python # name: pyspark_local # --- # # Run a simple Spark job # ## Jupyter notebook # Code to execute in Pyspark(local) and Pyspark(YARN): rdd = sc.textFile("/data/students/bigdata_internet/lab1/lab1_dataset.txt") #load the data from file to rdd fields_rdd = rdd.map(lambda line: line.split(",")) # transform and split each line of the file into a list of string pair value_rdd = fields_rdd.map(lambda l: int(l[1])) #transfrom the age string into a list of int value_sum = value_rdd.reduce(lambda v1, v2: v1+v2)# use reduce to calcute the sum of the age print("The sum is:", value_sum) #print the sum # A1: The printed value is 46, the propose is shown behine the code above # # A2: The inpurt file is on HDFS file system # # A3: It can still print the value of 46, but it takes more time to print after the execution. The Cluster manager interface is a graphcal interface, operations a based on click but not command line. # ## Execute in a pyspark shell # Command to execute in the terminal: # # pyspark --master local --deploy-mode client # # The result is the same as ex1.1 # # A: "--master local" means the spark executors is executed on the local machine(here is the gateway) # # "--deploy-mode client" menans the driver is deployed locally(on the gateway) # # ## Create a Spark script and run it from the command line # A: They are not in the same file system. My script is locate on the gateway file system. The file is located on the HDFS file system. # # Play with HDFS # The command to copy the the HDFS file to local file system is: # # s287513@jupyter-s287513:~$ hdfs dfs -copyToLocal /data/students/bigdata_internet/lab1/lab1_dataset.txt # # A1: No, it will not automatically affect the HDFS file.The command to copy a new create file form gateway to HDFS file system is : # # s287513@jupyter-s287513:~$ hdfs dfs -copyFromLocal ex2.txt # # A2: The complete path of my file in HDFS is: hdfs://BigDataHA/user/s287513/ex2.txt # # On the gateway local file system is: /home/students/s287513/ex2.txt # # # Run a Job rdd = sc.textFile("/data/students/bigdata_internet/lab1/lab1_dataset.txt") #load the data from file to rdd age = rdd.map(lambda age_r: (age_r.split(',')[0],int(age_r.split(',')[1]))) # transform and split each line of the file into a list of string and int pair total =age.reduceByKey(lambda v1,v2:v1+v2) # use the reduceByKey command to get the total age of each name total.saveAsTextFile("lab1/ex3.txt") # save the result in the HDFS in a .txt file # A1 : As the code above shows # # A2 : The output folder is a folder with the name we save the result(here is ex3.txt), and containing different parts of the result. The reason why there are multiple parts in the folder is that when spark is running, it split the input data into different partitions, and the result of each partion will be saved as a part of the final result. # # Bonus Task rdd = sc.textFile("/data/students/bigdata_internet/lab1/lab1_dataset.txt") #load the data from file to rdd age = rdd.map(lambda age_r: (age_r.split(',')[0],age_r.split(',')[1])) # transform and split each line of the file into a list of string pair total =age.reduceByKey(lambda v1,v2:v1+":"+v2) # use the reduceByKey command to join the age of each name total.saveAsTextFile("lab1/ex4.txt") # save the result in the HDFS in a .txt file
Solutions/lab1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] colab_type="text" id="view-in-github" # <a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/tutorials/W3D2_HiddenDynamics/W3D2_Tutorial5.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # - # # Bonus Tutorial 5: Expectation Maximization for spiking neurons # **Week 3, Day 2: Hidden Dynamics** # # **By Neuromatch Academy** # # ### **Note: this material was developed in NMA 2020 and has not been revised according to the standards of the Hidden Dynamics material.** # # __Content creators:__ <NAME> with help from <NAME> # # __Content reviewers:__ <NAME>, <NAME>, <NAME>, <NAME>, <NAME> # # __Acknowledgement__ # # This tutorial is based on code originally created by <NAME>. # # --- # # Tutorial objectives # # We have learnt how the forward inference works in an HMM in Tutorial 2. In this optional tutorial, you will get a sense of how to perform parameter estimation of an HMM using the EM algorithm. **We encourage you to do these bonus exercises only _after_ you complete the core material in Tutorials 2, and 3.** # # The EM algorithm is a powerful and widely used optimization tool that is much more general than HMMs. Since it is typically taught in the context of Hidden Markov Models, we include it here. # # You will implement an HMM of a network of Poisson spiking neurons mentioned in today's intro and: # # * Implement the forward-backward algorithm # * Complete the E-step and M-step # * Learn parameters for the example problem using the EM algorithm # * Get an intuition of how the EM algorithm monotonically increases data likelihood # + cellView="form" #@title Video 1: Introduction # Insert the ID of the corresponding youtube video from IPython.display import YouTubeVideo video = YouTubeVideo(id="ceQXN0OUaFo", width=854, height=480, fs=1) print("Video available at https://youtu.be/" + video.id) video # + cellView="both" import numpy as np from scipy import stats from scipy.optimize import linear_sum_assignment from collections import namedtuple import matplotlib.pyplot as plt from matplotlib import patches GaussianHMM1D = namedtuple('GaussianHMM1D', ['startprob', 'transmat','means','vars','n_components']) # + cellView="form" #@title Figure Settings # import ipywidgets as widgets # interactive display from IPython.html import widgets from ipywidgets import interactive, interact, HBox, Layout,VBox from IPython.display import HTML # %config InlineBackend.figure_format = 'retina' plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/course-content/NMA2020/nma.mplstyle") # - # --- # # # Section 1: HMM for Poisson spiking neuronal network # + cellView="form" #@title Video 2: HMM for Poisson spiking neurons case study # Insert the ID of the corresponding youtube video from IPython.display import YouTubeVideo video = YouTubeVideo(id="Wb8mf5chmyI", width=854, height=480, fs=1) print("Video available at https://youtu.be/" + video.id) video # - # Given noisy neural or behavioral measurements, we as neuroscientists often want to infer the unobserved latent variables as they change over time. Thalamic relay neurons fire in two distinct modes: a tonic mode where spikes are produced one at a time, and a 'burst mode' where several action potentials are produced in rapid succession. These modes are thought to differentially encode how the neurons relay information from sensory receptors to cortex. A distinct molecular mechanism, T-type calcium channels, switches neurons between modes, but it is very challenging to measure in the brain of a living monkey. However, statistical approaches let us recover the hidden state of those calcium channels purely from their spiking activity, which can be measured in a behaving monkey. # # Here, we're going to tackle a simplified version of that problem. # # # Let's consider the formulation mentioned in the intro lecture. # We have a network of $C$ neurons switching between $K$ states. Neuron $c$ has firing rate $\lambda_i^c$ in state $i$. The transition between states are represented by the $K\times K$ transition matrix $A_{ij}$ and initial probability vector $\psi$ with length $K$ at time $t=1$. # # Let $y_t^c$ be the number of spikes for cell $c$ in time bin $t$. # # In the following exercises (1 and 2) and tutorials, you will # # * Define an instance of such model with $C=5$ and $K=3$ # * Generate a dataset from this model # * (**Exercise 1**) Implement the M-step for this HMM # * Run EM to estimate all parameters $A,\psi,\lambda_i^c$ # * Plot the learning likelihood curve # * Plot expected complete log likelihood versus data log likelihood # * Compare learnt parameters versus true parameters # --- # # # ## Define model and generate data # # Let's first generate a random state sequence from the hidden Markov Chain, and generate `n_frozen_trials` different trials of spike trains for each cell assuming they all use the same underlying sequence we just generated. # # **Suggestions** # # 1. Run the following two sections **Model and simulation parameters** and **Initialize true model** to define a true model and parameters that will be used in our following exercises. Please take a look at the parameters and come back to these two cells if you encounter a variable you don't know in the future. # # 2. Run the provided code to convert a given state sequence to corresponding spike rates for all cells at all times, and use provided code to visualize all spike trains. # # # + cellView="form" #@title Helper functions def plot_spike_train(X, Y, dt): """Plots the spike train for cells across trials and overlay the state. Args: X: (2d numpy array of binary values): The state sequence in a one-hot representation. (T, states) Y: (3d numpy array of floats): The spike sequence. (trials, T, C) dt (float): Interval for a bin. """ n_trials, T, C = Y.shape trial_T = T * dt fig = plt.figure(figsize=(.7 * (12.8 + 6.4), .7 * 9.6)) # plot state sequence starts = [0] + list(np.diff(X.nonzero()[1]).nonzero()[0]) stops = list(np.diff(X.nonzero()[1]).nonzero()[0]) + [T] states = [X[i + 1].nonzero()[0][0] for i in starts] for a, b, i in zip(starts, stops, states): rect = patches.Rectangle((a * dt, 0), (b - a) * dt, n_trials * C, facecolor=plt.get_cmap('tab10').colors[i], alpha=0.15) plt.gca().add_patch(rect) # plot rasters for c in range(C): if c > 0: plt.plot([0, trial_T], [c * n_trials, c * n_trials], color=plt.get_cmap('tab10').colors[0]) for r in range(n_trials): tmp = Y[r, :, c].nonzero()[0] if len(tmp) > 0: plt.plot(np.stack((tmp, tmp)) * dt, (c * n_trials + r + 0.1, c * n_trials + r + .9), 'k') ax = plt.gca() plt.yticks(np.arange(0, n_trials * C, n_trials), labels=np.arange(C, dtype=int)) plt.xlabel('time (s)', fontsize=16) plt.ylabel('Cell number', fontsize=16) def run_em(epochs, Y, psi, A, L, dt): """Run EM for the HMM spiking model. Args: epochs (int): Number of epochs of EM to run Y (numpy 3d array): Tensor of recordings, has shape (n_trials, T, C) psi (numpy vector): Initial probabilities for each state A (numpy matrix): Transition matrix, A[i,j] represents the prob to switch from j to i. Has shape (K,K) L (numpy matrix): Poisson rate parameter for different cells. Has shape (C,K) dt (float): Duration of a time bin Returns: save_vals (lists of floats): Data for later plotting lls (list of flots): ll Before each EM step psi (numpy vector): Estimated initial probabilities for each state A (numpy matrix): Estimated transition matrix, A[i,j] represents the prob to switch from j to i. Has shape (K,K) L (numpy matrix): Estimated Poisson rate parameter for different cells. Has shape (C,K) """ save_vals = [] lls = [] for e in range(epochs): # Run E-step ll, gamma, xi = e_step(Y, psi, A, L, dt) lls.append(ll) # log the data log likelihood for current cycle if e % print_every == 0: print(f'epoch: {e:3d}, ll = {ll}') # log progress # Run M-step psi_new, A_new, L_new = m_step(gamma, xi, dt) """Booking keeping for later plotting Calculate the difference of parameters for later interpolation/extrapolation """ dp, dA, dL = psi_new - psi, A_new - A, L_new - L # Calculate LLs and ECLLs for later plotting if e in plot_epochs: b_min = -min([np.min(psi[dp > 0] / dp[dp > 0]), np.min(A[dA > 0] / dA[dA > 0]), np.min(L[dL > 0] / dL[dL > 0])]) b_max = -max([np.max(psi[dp < 0] / dp[dp < 0]), np.max(A[dA < 0] / dA[dA < 0]), np.max(L[dL < 0] / dL[dL < 0])]) b_min = np.max([.99 * b_min, b_lims[0]]) b_max = np.min([.99 * b_max, b_lims[1]]) bs = np.linspace(b_min, b_max, num_plot_vals) bs = sorted(list(set(np.hstack((bs, [0, 1]))))) bs = np.array(bs) lls_for_plot = [] eclls_for_plot = [] for i, b in enumerate(bs): ll = e_step(Y, psi + b * dp, A + b * dA, L + b * dL, dt)[0] lls_for_plot.append(ll) rate = (L + b * dL) * dt ecll = ((gamma[:, 0] @ np.log(psi + b * dp) + (xi * np.log(A + b * dA)).sum(axis=(-1, -2, -3)) + (gamma * stats.poisson(rate).logpmf(Y[..., np.newaxis]).sum(-2) ).sum(axis=(-1, -2))).mean() / T / dt) eclls_for_plot.append(ecll) if b == 0: diff_ll = ll - ecll lls_for_plot = np.array(lls_for_plot) eclls_for_plot = np.array(eclls_for_plot) + diff_ll save_vals.append((bs, lls_for_plot, eclls_for_plot)) # return new parameter psi, A, L = psi_new, A_new, L_new ll = e_step(Y, psi, A, L, dt)[0] lls.append(ll) print(f'epoch: {epochs:3d}, ll = {ll}') return save_vals, lls, psi, A, L def plot_lls(lls): """Plots log likelihoods at each epoch. Args: lls (list of floats) log likelihoods at each epoch. """ epochs = len(lls) fig, ax = plt.subplots() ax.plot(range(epochs) , lls, linewidth=3) span = max(lls) - min(lls) ax.set_ylim(min(lls) - span * 0.05, max(lls) + span * 0.05) plt.xlabel('iteration') plt.ylabel('log likelihood') plt.show(fig) def plot_lls_eclls(plot_epochs, save_vals): """Plots log likelihoods at each epoch. Args: plot_epochs (list of ints): Which epochs were saved to plot. save_vals (lists of floats): Different likelihoods from EM for plotting. """ rows = int(np.ceil(min(len(plot_epochs), len(save_vals)) / 3)) fig, axes = plt.subplots(rows, 3, figsize=(.7 * 6.4 * 3, .7 * 4.8 * rows)) axes = axes.flatten() minll, maxll = np.inf, -np.inf for i, (ax, (bs, lls_for_plot, eclls_for_plot)) in enumerate(zip(axes, save_vals)): ax.set_xlim([-1.15, 2.15]) min_val = np.stack((lls_for_plot, eclls_for_plot)).min() max_val = np.stack((lls_for_plot, eclls_for_plot)).max() ax.plot([0, 0], [min_val, lls_for_plot[bs == 0]], '--b') ax.plot([1, 1], [min_val, lls_for_plot[bs == 1]], '--b') ax.set_xticks([0, 1]) ax.set_xticklabels([f'$\\theta^{plot_epochs[i]}$', f'$\\theta^{plot_epochs[i] + 1}$']) ax.tick_params(axis='y') ax.tick_params(axis='x') ax.plot(bs, lls_for_plot) ax.plot(bs, eclls_for_plot) if min_val < minll: minll = min_val if max_val > maxll: maxll = max_val if i % 3 == 0: ax.set_ylabel('log likelihood') if i == 4: l = ax.legend(ax.lines[-2:], ['LL', 'ECLL'], framealpha=1) plt.show(fig) def plot_learnt_vs_true(L_true, L, A_true, A, dt): """Plot and compare the true and learnt parameters. Args: L_true (numpy array): True L. L (numpy array): Estimated L. A_true (numpy array): True A. A (numpy array): Estimated A. dt (float): Bin length. """ C, K = L.shape fig = plt.figure(figsize=(8, 4)) plt.subplot(121) plt.plot([0, L_true.max() * 1.05], [0, L_true.max() * 1.05], '--b') for i in range(K): for c in range(C): plt.plot(L_true[c, i], L[c, i], color='C{}'.format(c), marker=['o', '*', 'd'][i]) # this line will fail for K > 3 ax = plt.gca() ax.axis('equal') plt.xlabel('True firing rate (Hz)') plt.ylabel('Inferred firing rate (Hz)') xlim, ylim = ax.get_xlim(), ax.get_ylim() for c in range(C): plt.plot([-10^6], [-10^6], 'o', color='C{}'.format(c)) for i in range(K): plt.plot([-10^6], [-10^6], '.', marker=['o', '*', 'd'][i], c="black") l = plt.legend(ax.lines[-C - K:], [f'cell {c + 1}' for c in range(C)] + [f'state {i + 1}' for i in range(K)]) ax.set_xlim(xlim), ax.set_ylim(ylim) plt.subplot(122) ymax = np.max(A_true - np.diag(np.diag(A_true))) / dt * 1.05 plt.plot([0, ymax], [0, ymax], '--b') for j in range(K): for i in range(K): if i == j: continue plt.plot(A_true[i, j] / dt, A[i, j] / dt, 'o') ax = plt.gca() ax.axis('equal') plt.xlabel('True transition rate (Hz)') plt.ylabel('Inferred transition rate (Hz)') l = plt.legend(ax.lines[1:], ['state 1 -> 2', 'state 1 -> 3', 'state 2 -> 1', 'state 2 -> 3', 'state 3 -> 1', 'state 3 -> 2' ]) plt.show(fig) # - # #### Model and simulation parameters # + # model and data parameters C = 5 # number of cells K = 3 # number of states dt = 0.002 # seconds trial_T = 2.0 # seconds n_frozen_trials = 20 # used to plot multiple trials with the same state sequence n_trials = 300 # number of trials (each has it's own state sequence) # for random data max_firing_rate = 50 # Hz max_transition_rate = 3 # Hz # needed to plot LL and ECLL for every M-step # **This substantially slows things down!!** num_plot_vals = 10 # resolution of the plot (this is the expensive part) b_lims = (-1, 2) # lower limit on graph (b = 0 is start-of-M-step LL; b = 1 is end-of-M-step LL) plot_epochs = list(range(9)) # list of epochs to plot # - # #### Initialize true model # + np.random.seed(101) T = round(trial_T / dt) ts = np.arange(T) # initial state distribution psi = np.arange(1, K + 1) psi = psi / psi.sum() # off-diagonal transition rates sampled uniformly A = np.random.rand(K, K) * max_transition_rate * dt A = (1. - np.eye(K)) * A A = A + np.diag(1 - A.sum(1)) # hand-crafted firing rates make good plots L = np.array([ [.02, .8, .37], [1., .7, .1], [.92, .07, .5], [.25, .42, .75], [.15, .2, .85] ]) * max_firing_rate # (C,K) # Save true parameters for comparison later psi_true = psi A_true = A L_true = L # - # #### Generate data with frozen sequence and plot # Given a state sequence `[0,1,1,3,2,...]`, we'll first convert each state in to sequence in to the so-called "one-hot" coding. For example, with 5 total states, the one-hot coding of state `0` is `[1,0,0,0,0]` and the coding for state `3` is `[0,0,0,1,0]`. Suppose we now have a sequence of length `T`, the one-hot coding of this sequence `Xf` will have shape `(T,K)` # + np.random.seed(101) # sample n_frozen_trials state sequences Xf = np.zeros(T, dtype=int) Xf[0] = (psi.cumsum() > np.random.rand()).argmax() for t in range(1, T): Xf[t] = (A[Xf[t - 1],:].cumsum() > np.random.rand()).argmax() # switch to one-hot encoding of the state Xf = np.eye(K, dtype=int)[Xf] # (T,K) # get the Y values Rates = np.squeeze(L @ Xf[..., None]) * dt # (T,C) Rates = np.tile(Rates, [n_frozen_trials, 1, 1]) # (n_trials, T, C) Yf = stats.poisson(Rates).rvs() with plt.xkcd(): plot_spike_train(Xf, Yf, dt) # - # #### Generate data for EM learning # # The previous dataset is generated with the same state sequence for visualization. Now let's generate `n_trials` trials of observations, each one with its own randomly generated sequence # + np.random.seed(101) # sample n_trials state sequences X = np.zeros((n_trials, T), dtype=int) X[:, 0] = (psi_true.cumsum(0)[:, None] > np.random.rand(n_trials)).argmax(0) for t in range(1, T): X[:, t] = (A_true[X[:, t - 1], :].T.cumsum(0) > np.random.rand(n_trials)).argmax(0) # switch to one-hot encoding of the state one_hot = np.eye(K)[np.array(X).reshape(-1)] X = one_hot.reshape(list(X.shape) + [K]) # get the Y values Y = stats.poisson(np.squeeze(L_true @ X[..., None]) * dt).rvs() # (n_trials, T, C) print("Y has shape: (n_trial={},T={},C={})".format(*Y.shape)) # - # --- # # # Section 2: EM algorithm for HMM # + cellView="form" #@title Video 3: EM Tutorial # Insert the ID of the corresponding youtube video from IPython.display import YouTubeVideo video = YouTubeVideo(id="umU4wUWlKvg", width=854, height=480, fs=1) print("Video available at https://youtu.be/" + video.id) video # - # Finding the optimal values of parameters that maximizes the data likelihood is practically infeasible since we need to integrating out all latent variables $x_{1:T}$. The time needed is exponential to $T$. Thus as an alternative approach, we use the Expectation-Maximization algorithm, which iteratively performing a E-step followed by a M-step and is guaranteed to not decrease(usually increase) the data likelihood after each EM cycle. # # # In this section we will briefly review the EM algorithm for HMM and list # # * Recursive equations for forward and backward probabilities $a_i(t)$ and $b_i(t)$ # * Expressions for singleton and pairwise marginal distributions after seeing data: $\gamma_{i}(t):=p_{\theta}\left(x_{t}=i | Y_{1: T}\right)$ and $\xi_{i j}(t) = p_{\theta}(x_t=i,x_{t+1}=j|Y_{1:T})$ # * Closed-form solutions for updated values of $A,\psi,\lambda$ which increases data likelihood # # # --- # #### E-step: Forward-backward algorithm # In the forward pass, we calculate the **forward probabilities**, or the joint probability of $x_t$ and current and past data $Y_{1:t}$: $a_i(t):=p(x_t=i,Y_{1:t})$ recursively by # # $$a_i(t) = p_(y_t|x_i=t)\sum_j A_{ji} a_j(t-1)$$ # # In contrast to the intro, now $A_{ji}$ means **the transition probability from state $j$ to state $i$.** # # The backward pass calculate the **backward probabilities** $b_i(t):=p_{\theta}(Y_{t+1:T}|x_t=i)$, which is the likelihood of observing all future data points given current state $x_t$. The recursion of $b_i(t)$ is given by # # $$ b_i(t) = \sum_j p_{\theta}(y_{t+1}|x_{t+1}=j)b_j(t+1)A_{ij} $$ # # Combining all past and future information, the **singleton and pairwise marginal distributions** are given by # # $$ \gamma_{i}(t):=p_{\theta}\left(x_{t}=i | Y_{1: T}\right)=\frac{a_{i}(t) b_{i}(t)}{p_{\theta}\left(Y_{1: T}\right)} $$ # # $$ \xi_{i j}(t) = p_{\theta}(x_t=i,x_{t+1}=j|Y_{1:T}) =\frac{b_{j}(t+1)p_{\theta}\left(y_{t+1} | x_{t+1}=j\right) A_{i j} a_{i}(t)}{p_{\theta}\left(Y_{1: T}\right)} $$ # # where $p_{\theta}(Y_{1:T})=\sum_i a_i(T)$. # # --- # #### M-step # # The M-step for HMM has a closed-form solution. First the new transition matrix is given by # $$ # A_{ij} =\frac{\sum_{t=1}^{T-1} \xi_{i j}(t)}{\sum_{t=1}^{T-1} \gamma_{i}(t)} # $$ # # which is the expected empirical transition probabilities. # New initial probabilities and parameters of the emission models are also given by their empirical values given single and pairwise marginal distributions: # # $$ \psi_i = \frac{1}{N}\sum_{trials}\gamma_i(1) $$ # # $$ \lambda_{i}^{c}=\frac{\sum_{t} \gamma_{i}(t) y_{t}^{c}}{\sum_{t} \gamma_{i}(t) d t}$$ # --- # # # ### E-step: forward and backward algorithm # # **(Optional)** # # In this section you will read through the code for the forward-backward algorithm and understand how to implement the computation efficiently in `numpy` by calculating the recursion for all trials at once. # # --- # # Let's re-write the forward and backward recursions in a more compact form: # # $$ a_i^t = \sum_j A_{ji}o_j^t a_j^{t-1} $$ # # # $$b^t_i = \sum_j A_{ij} o_j^{t+1}b_j^{t+1} $$ where $o_j^{t}=p(y_{t}|x_{t}=j)$. # # # Let's take the backward recursion for example. In practice we will handle all trials together since they are independent of each other. After adding a trial index $l$ to the recursion equations, the backward recursion becomes: # # $$b^t_{li} = \sum_j A_{ij} o_{lj}^{t+1}b_{lj}^{t+1} $$ # # What we have in hand are: # * `A`: matrix of size `(K,K)` # * `o^{t+1}`: array of size `(N,K)` is the log data likelihood for all trials at a given time # * `b^{t+1}`: array of size `(N,K)` is the backward probability for all trials at a given time # # where `N` stands for the number of trials. # # The index size and meaning doesn't match for these three arrays: the index is $i$ for $A$ in the first dimension and is $l$ for $o$ and $b$, so we can't just multiply them together. However, we can do this by viewing vectors $o^{t+1}_{l\cdot}$ and $b^{t+1}_{l\cdot}$ as a matrix with 1 row and re-write the backward equation as: # # $$b^t_{li} = \sum_j A_{ij} o_{l1j}^{t+1}b_{l1j}^{t+1} $$ # # Now we can just multiply these three arrays element-wise and sum over the last dimension. # # In `numpy`, we can achieve this by indexing the array with `None` at the location we want to insert a dimension. Take `b` with size `(N,T,K)` for example,`b[:,t,:]` will have shape `(N,K)`, `b[:,t,None,:]` will have shape `(N,1,K)` and `b[:,t,:,None]` will have shape `(N,K,1)`. # # So the backward recursion computation can be implemented as # # ```python # b[:,t,:] = (A * o[:,t+1,None,:] * b[:,t+1,None,:]).sum(-1) # ``` # # --- # # In addition to the trick introduced above, in this exercise we will work in the **log scale** for numerical stability. # # # **Suggestions** # # 1. Take a look at the code for the forward recursion and backward recursion. # # # # # def e_step(Y, psi, A, L, dt): """Calculate the E-step for the HMM spiking model. Args: Y (numpy 3d array): tensor of recordings, has shape (n_trials, T, C) psi (numpy vector): initial probabilities for each state A (numpy matrix): transition matrix, A[i,j] represents the prob to switch from i to j. Has shape (K,K) L (numpy matrix): Poisson rate parameter for different cells. Has shape (C,K) dt (float): Bin length Returns: ll (float): data log likelihood gamma (numpy 3d array): singleton marginal distribution. Has shape (n_trials, T, K) xi (numpy 4d array): pairwise marginal distribution for adjacent nodes . Has shape (n_trials, T-1, K, K) """ n_trials = Y.shape[0] T = Y.shape[1] K = psi.size log_a = np.zeros((n_trials, T, K)) log_b = np.zeros((n_trials, T, K)) log_A = np.log(A) log_obs = stats.poisson(L * dt).logpmf(Y[..., None]).sum(-2) # n_trials, T, K # forward pass log_a[:, 0] = log_obs[:, 0] + np.log(psi) for t in range(1, T): tmp = log_A + log_a[:, t - 1, : ,None] # (n_trials, K,K) maxtmp = tmp.max(-2) # (n_trials,K) log_a[:, t] = (log_obs[:, t] + maxtmp + np.log(np.exp(tmp - maxtmp[:, None]).sum(-2))) # backward pass for t in range(T - 2, -1, -1): tmp = log_A + log_b[:, t + 1, None] + log_obs[:, t + 1, None] maxtmp = tmp.max(-1) log_b[:, t] = maxtmp + np.log(np.exp(tmp - maxtmp[..., None]).sum(-1)) # data log likelihood maxtmp = log_a[:, -1].max(-1) ll = np.log(np.exp(log_a[:, -1] - maxtmp[:, None]).sum(-1)) + maxtmp # singleton and pairwise marginal distributions gamma = np.exp(log_a + log_b - ll[:, None, None]) xi = np.exp(log_a[:, :-1, :, None] + (log_obs + log_b)[:, 1:, None] + log_A - ll[:, None, None, None]) return ll.mean() / T / dt, gamma, xi # + cellView="form" #@title Video 4: Implement the M-step # Insert the ID of the corresponding youtube video from IPython.display import YouTubeVideo video = YouTubeVideo(id="H4GGTg_9BaE", width=854, height=480, fs=1) print("Video available at https://youtu.be/" + video.id) video # - # #### EXERCISE 1: Implement the M-step # # In this exercise you will complete the M-step for this HMM using closed form solutions mentioned before. # # **Suggestions** # # 1. Calculate new initial probabilities as empirical counts of singleton marginals # # $$ \psi_i = \frac{1}{N}\sum_{trials}\gamma_i(1) $$ # # 2. Remember the extra trial dimension and average over all trials # # # **For reference:** # # New transition matrix is calculated as empirical counts of transition events from marginals # # $$ A_{ij} =\frac{\sum_{t=1}^{T-1} \xi_{i j}(t)}{\sum_{t=1}^{T-1} \gamma_{i}(t)}$$ # # # New spiking rates for each cell and each state are given by # # $$ \lambda_{i}^{c}=\frac{\sum_{t} \gamma_{i}(t) y_{t}^{c}}{\sum_{t} \gamma_{i}(t) d t} $$ # def m_step(gamma, xi, dt): """Calculate the M-step updates for the HMM spiking model. Args: gamma (): Number of epochs of EM to run xi (numpy 3d array): Tensor of recordings, has shape (n_trials, T, C) dt (float): Duration of a time bin Returns: psi_new (numpy vector): Updated initial probabilities for each state A_new (numpy matrix): Updated transition matrix, A[i,j] represents the prob. to switch from j to i. Has shape (K,K) L_new (numpy matrix): Updated Poisson rate parameter for different cells. Has shape (C,K) """ raise NotImplementedError("`m_step` need to be implemented") ############################################################################ # Insert your code here to: # Calculate the new prior probabilities in each state at time 0 # Hint: Take the first time step and average over all trials ########################################################################### psi_new = ... # Make sure the probabilities are normalized psi_new /= psi_new.sum() # Calculate new transition matrix A_new = xi.sum(axis=(0, 1)) / gamma[:, :-1].sum(axis=(0, 1))[:, np.newaxis] # Calculate new firing rates L_new = (np.swapaxes(Y, -1, -2) @ gamma).sum(axis=0) / gamma.sum(axis=(0, 1)) / dt return psi_new, A_new, L_new # to_remove solution def m_step(gamma, xi, dt): """Calculate the M-step updates for the HMM spiking model. Args: gamma (): Number of epochs of EM to run xi (numpy 3d array): Tensor of recordings, has shape (n_trials, T, C) dt (float): Duration of a time bin Returns: psi_new (numpy vector): Updated initial probabilities for each state A_new (numpy matrix): Updated transition matrix, A[i,j] represents the prob. to switch from j to i. Has shape (K,K) L_new (numpy matrix): Updated Poisson rate parameter for different cells. Has shape (C,K) """ # Calculate and normalize the new initial probabilities, psi_new psi_new = gamma[:, 0].mean(axis=0) # Make sure the probabilities are normalized psi_new /= psi_new.sum() # Calculate new transition matrix A_new = xi.sum(axis=(0, 1)) / gamma[:, :-1].sum(axis=(0, 1))[:, np.newaxis] # Calculate new firing rates L_new = (np.swapaxes(Y, -1, -2) @ gamma).sum(axis=0) / gamma.sum(axis=(0, 1)) / dt return psi_new, A_new, L_new # + cellView="form" #@title Video 5: Running and plotting EM # Insert the ID of the corresponding youtube video from IPython.display import YouTubeVideo video = YouTubeVideo(id="6UTsXxE3hG0", width=854, height=480, fs=1) print("Video available at https://youtu.be/" + video.id) video # - # --- # # ### Run EM # # ####Initialization for parameters # # + np.random.seed(101) # number of EM steps epochs = 9 print_every = 1 # initial state distribution psi = np.arange(1, K + 1) psi = psi / psi.sum() # off-diagonal transition rates sampled uniformly A = np.ones((K, K)) * max_transition_rate * dt / 2 A = (1 - np.eye(K)) * A A = A + np.diag(1 - A.sum(1)) # firing rates sampled uniformly L = np.random.rand(C, K) * max_firing_rate # + # LL for true vs. initial parameters print(f'LL for true 𝜃: {e_step(Y, psi_true, A_true, L_true, dt)[0]}') print(f'LL for initial 𝜃: {e_step(Y, psi, A, L, dt)[0]}\n') # Run EM save_vals, lls, psi, A, L = run_em(epochs, Y, psi, A, L, dt) # + # EM doesn't guarantee the order of learnt latent states are the same as that of true model # so we need to sort learnt parameters # Compare all true and estimated latents across cells cost_mat = np.sum((L_true[..., np.newaxis] - L[:, np.newaxis])**2, axis=0) true_ind, est_ind = linear_sum_assignment(cost_mat) psi = psi[est_ind] A = A[est_ind] A = A[:, est_ind] L = L[:, est_ind] # - # # --- # # ## Plotting the training process and learnt model # ### Plotting progress during EM! # # Now you can # # * Plot the likelihood during training # * Plot the M-step log likelihood versus expected complete log likelihood(ECLL) to get an intuition of how EM works and the convexity of ECLL # * Plot learnt parameters versus true parameters # Plot the log likelihood after each epoch of EM with plt.xkcd(): plot_lls(lls) # For each saved epoch, plot the log likelihood and expected complete log likelihood # for the initial and final parameter values with plt.xkcd(): plot_lls_eclls(plot_epochs, save_vals) # ### Plot learnt parameters vs. true parameters # # Now we will plot the (sorted) learnt parameters with true parameters to see if we successfully recovered all the parameters # # Compare true and learnt parameters with plt.xkcd(): plot_learnt_vs_true(L_true, L, A_true, A, dt)
tutorials/W3D2_HiddenDynamics/W3D2_Tutorial5.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:r37] * # language: python # name: conda-env-r37-py # --- # # First, read in seasonal count data # + import pandas as pd ctrl_counts = pd.read_csv("../table_data/combined_CTRL_daily_count.csv") ctrl_counts['date'] = pd.to_datetime(ctrl_counts.date) ctrl_counts = ctrl_counts.set_index('date') pgw_counts = pd.read_csv("../table_data/combined_PGW_daily_count.csv") pgw_counts['date'] = pd.to_datetime(pgw_counts.date) pgw_counts = pgw_counts.set_index('date') print('CTRL counts', ctrl_counts.sum(axis=0).values) print('PGW Counts', pgw_counts.sum(axis=0).values) # - # Significance test for seasonal counts # + from scipy.stats import ks_2samp ks_2samp(ctrl_counts.sum(axis=0).values, pgw_counts.sum(axis=0).values) # - # Seasonal SWE # + import pandas as pd ctrl_swe = pd.read_csv("../table_data/combined_CTRL_daily_swe.csv") ctrl_swe['date'] = pd.to_datetime(ctrl_swe.date) ctrl_swe = ctrl_swe.set_index('date') pgw_swe = pd.read_csv("../table_data/combined_PGW_daily_swe.csv") pgw_swe['date'] = pd.to_datetime(pgw_swe.date) pgw_swe = pgw_swe.set_index('date') print('CTRL swe', ctrl_swe.sum(axis=0).values) print('PGW swe', pgw_swe.sum(axis=0).values) # - ks_2samp(ctrl_counts.sum(axis=0).values, pgw_counts.sum(axis=0).values) # + ctrl_extent = pd.read_csv("../table_data/CTRL_extent.csv") pgw_extent = pd.read_csv("../table_data/PGW_extent.csv") ctrl_extent.head() # - # Durations significance test # # (I'm just showing the seasonal values as an example.. the test is for all swaths) # + print('CTRL duration', ctrl_extent.groupby('season')['duration'].sum()) print('PGW swe', pgw_extent.groupby('season')['duration'].sum()) ks_2samp(ctrl_extent['duration'].values, pgw_extent['duration'].values) # - # SWE significance test # # (I'm just showing the seasonal values as an example.. the test is for all swaths) # + print('CTRL swe', ctrl_extent.groupby('season')['swe'].sum()) print('PGW swe', pgw_extent.groupby('season')['swe'].sum()) ks_2samp(ctrl_extent['swe'].values, pgw_extent['swe'].values) # - # Swath area significance test # # (I'm just showing the seasonal values as an example.. the test is for all swaths) # + print('CTRL area', ctrl_extent.groupby('season')['swath_area'].sum() * 10**-6) print('PGW area', pgw_extent.groupby('season')['swath_area'].sum() * 10**-6) ks_2samp(ctrl_extent['swath_area'].values, pgw_extent['swath_area'].values) # - # 50th percentile swath area sig. test # # (I'm just showing the seasonal values as an example.. the test is for all swaths) # + print('CTRL area', ctrl_extent.groupby('season')['swath_area_50p'].sum() * 10**-6) print('PGW area', pgw_extent.groupby('season')['swath_area_50p'].sum() * 10**-6) ks_2samp(ctrl_extent['swath_area_50p'].values, pgw_extent['swath_area_50p'].values) # - # 90th percentile swath area sig. test # # (I'm just showing the seasonal values as an example.. the test is for all swaths) # + print('CTRL area', ctrl_extent.groupby('season')['swath_area_90p'].sum() * 10**-6) print('PGW area', pgw_extent.groupby('season')['swath_area_90p'].sum() * 10**-6) ks_2samp(ctrl_extent['swath_area_90p'].values, pgw_extent['swath_area_90p'].values)
future_snow/notebooks/Significance_Tests.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Derivation of MKS Localization Equation # # The goal of this notebook is to derivate the Materials Knowledge Systems (MKS) equation from elastostatic equilibrium equation. Note that the MKS equation can be derivated from other partial differential equations. # ### Definitions # # Let $C(x)$ be the local stiffness tensor for a two-phase material with stiffness tensors $C_A$ and $C_B$. The stiffness tensor at location $x$ can be represented at a perturbation from a reference stiffness tensor. # # $$C(x) = C^R + C'(x)$$ # # The strain field at location $(x)$ can also be defined in terms of a simular perturbation. # # $$\varepsilon(x) = \bar{\varepsilon} + \varepsilon '(x)$$ # # where $\bar{\varepsilon}$ is the average strain and $\varepsilon '(x)$ is the local strain perturbation from $\bar{\varepsilon}$. # # The constitutive equation is therefore # # $$\sigma_{ij}(x) = \big(C^R_{ijlk} + C'_{ijlk}(x) \big ) \big (\bar{\varepsilon}_{lk} + \varepsilon'_{lk}(x) \big )$$ # ### Equilibrium Condition # # The equilibrium condition is defined below: # # $$\sigma_{ij,j}(x) = \Big [\big(C^R_{ijlk} + C'_{ijlk}(x) \big ) \big (\bar{\varepsilon}_{lk} + \varepsilon'_{lk}(x) \big )\Big ]_{,j} = 0$$ # # $$\sigma_{ij,j}(x) = C^R_{ijlk}\varepsilon'_{lk,j}(x) + C'_{ijlk,j}(x)\bar{\varepsilon}_{lk} + \Big [C'_{ijlk}(x) \varepsilon'_{lk}(x)\Big ]_{,j} = 0$$ # # Let # # $$F_i(x) = C'_{ijlk,j}(x)\bar{\varepsilon}_{lk} + \Big [C'_{ijlk}(x) \varepsilon'_{lk}(x)\Big ]_{,j} $$ # # Using the definition of $F(x)$ above, the equilibrium equation above can be rearranged in the form of an inhomogenous differential equation. # # $$C^R_{ijlk}\varepsilon'_{lk,j}(x) + F_i(x) = 0$$ # # ### Strain, Displacement, and Green's Functions # # By using the relationship between strain and displacement, the equilibrium equation can be rewritten as follows: # # $$ \varepsilon_{kl}(x) = \frac{\big (u_{k,l}(x) + u_{l,k}(x) \big)}{2} $$ # # $$C^R_{ijkl} \frac{\big (u'_{k,lj}(x) + u'_{l,kj}(x) \big)}{2} + F_i(x) = 0$$ # # The solution to the displacements can be found using Green's functions: # # $$C^R_{ijkl} G_{km,lj}(r) + \delta_{im}\delta(x-r) = 0$$ # # $$u'_k(x) = \int_V G_{ik}(r) F_i (x-r)dr = \int_V G_{ik}(r) \Big [C'_{ijlk}(x-r)\bar{\varepsilon}_{lk} + \big [C'_{ijlk}(x-r)\varepsilon'_{lk}(x-r)\big ]\Big ]_{,j}dr$$ # # and # # $$u'_l(x) = \int_V G_{il}(r) F_i (x - r)dr = \int_V G_{ik}(r) \Big [C'_{ijlk}(x-r)\bar{\varepsilon}_{lk} + \big [C'_{ijlk}(x-r)\varepsilon'_{lk}(x-r)\big ]\Big ]_{,j}dr$$ # # Therefore, the strain can also be found in terms of Green's functions: # # $$\varepsilon'_{kl}(x) = \int_V \frac{\big (G_{ik,l}(r) + G_{il,k}(r) \big)}{2} F_i (x-r)dr = \int_V \frac{\big (G_{ik,l}(r) + G_{il,k}(r) \big)}{2} \Big [C'_{ijlk}(x-r)\bar{\varepsilon}_{lk} + \big [C'_{ijlk}(x-r)\varepsilon'_{lk}(x-r)\big ]\Big ]_{,j}dr$$ # # # Note that the $G(r)$ terms depend on the reference medium $C^R$. # ### Integration by Parts # # The equation above can be recast with all of the derivatives on the Green's functions by integrating by parts. # # $$ # \varepsilon'_{kl}(x) = \Bigg [ \int_S \frac{\big (G_{ik,l}(r) + G_{il,k}(r) \big)}{2} \Big [C'_{ijlk}(x-r)\bar{\varepsilon}_{lk} + \big [C'_{ijlk}(x-r)\varepsilon'_{lk}(x-r)\big ]\Big ] n_j dS\Bigg ]_{r \rightarrow 0}^{r \rightarrow \infty} - $$ # # $$ \int_V \frac{\big (G_{ik,lj}(r) + G_{il,kj}(r) \big)}{2} \Big [C'_{ijlk}(x-r)\bar{\varepsilon}_{lk} + \big [C'_{ijlk}(x-r)\varepsilon'_{lk}(x-r)\big ]\Big ]dr # $$ # ### Principal Value Singularity # # In the equation above, the surface term, tending to zero, is a principal value integral, because of the singularity in the Green's functions at $r = 0$. As a result, the integrand is not differentiable. Torquato shows that, by excluding a sphere at the origin and using integration by parts and the divergence theorem, we can arrive at the following equation [1]. # # # $$\varepsilon'_{kl}(x) = I_{ikjl} - E_{ikjl} + \int_V \Phi_{ikjl}(r) \Big [C'_{ijlk}(x-r)\bar{\varepsilon}_{lk} + \big [C'_{ijlk}(x-r)\varepsilon'_{lk}(x-r)\big ]\Big ]dr $$ # # where # # $$\Phi_{ikjl}(r) = - \frac{\big (G_{ik,lj}(r) + G_{il,kj}(r) \big)}{2} $$ # # is the Green's function terms, and # # $$I_{ikjl}^{\infty} = \lim_{r \rightarrow \infty} \int_S\frac{\big (G_{ik,l}(r) + G_{il,k}(r)\big)}{2} \Big [C'_{ijlk}(x-r)\bar{\varepsilon}_{lk} + \big [C'_{ijlk}(x-r)\varepsilon'_{lk}(x-r)\big ]\Big ]n_l dS $$ # # $$E_{ikjl}(x) = \lim_{r \rightarrow 0} \int_S\frac{\big (G_{ik,l}(r) + G_{il,k}(r)\big)}{2} n_l dS $$ # # are the contribution from the surface integrals at $\infty$ and from the singularity. # # Finally, let # # $$\Gamma_{iklj}(r) = I_{ikjl}^{\infty}\delta(r)-E_{ikjl}\delta(r) + \Phi_{ikjl}(r)$$ # # The strain can then be written in the following form: # # $$\varepsilon'_{kl}(x) = \int_V \Gamma_{ikjl}(r) \Big [C'_{ijlk}(x-r)\bar{\varepsilon}_{lk} + \big [C'_{ijlk}(x-r)\varepsilon'_{lk}(x-r)\big ]\Big ]dr $$ # # ### Weak Contrast Expansion # # $$\varepsilon'(x) =\int_V \Gamma(r) C'(x-r) [ \bar{\varepsilon} + \varepsilon'(x-r)]dr $$ # # By recursively inserting $\varepsilon'(x)$ into the RHS of the equation, we get the following series: # # $$ # \varepsilon'(x) =\int_V \Gamma(r) C'(x-r) \bar{\varepsilon} dr +\int_V \int_V \Big[ \Gamma(r) C'(x-r)\bar{\varepsilon}\Big ]\Big [\Gamma(r') C'(x-r') \bar{\varepsilon}\Big] dr'dr + ...$$ # # As long as # # $$\Gamma(r) C'(x)\bar{\varepsilon} << 1$$ # # the series can be truncated after a few terms and still provide resonable accuracy. # ### Materials Knowledge Systems # # Let # # $$ C'(x-r) = \int_H h m(h, x-r) dh$$ # # where $m(h, r)$ is the microstructure function, which is a probablity density that spans both the local state space $h$ and real space $x$. The expectation of local state variable for the microstructure function is the integral over the local state space $H$ and describes the expected local state $h$ which is equal to $C'(r)$. # # Also, let # # $$\alpha(h, r) = \Gamma(r) h \bar{\varepsilon} $$ # $$\alpha(h, h', r, r') = \Gamma(r) h \bar{\varepsilon} \Gamma(r') h' \bar{\varepsilon} $$ # $$ etc... $$ # # where, again, $h$ is the local state variable. # # Plugging these definitions into the Weak Contrast Expansion recasts the series in the following form: # $$\varepsilon '(x) =\int_V \int_H \alpha(h, r) m(h, x-r) dr dh + \int_V \int_V \int_H \int_H\alpha_(h, h', r, r') m(h, x-r) m(h', x-r') dr'dr dh dh'+ ...$$ # # The discrete version of this equation is the MKS localization: # $$\varepsilon'[s] = \sum_{l=0}^{L-1} \sum_{r=0}^{S-1} \alpha[l, r] m[l, s-r] +\sum_{l=0}^{L-1} \sum_{l'=0}^{L-1} \sum_{r=0}^{S-1} \sum_{r'=0}^{S-1} \alpha[l, l', r, r'] m[l, s-r] m_[l', s-r'] + ... $$ # # ## References # # [1] <NAME>., 1997. *Effective stiffness tensor of composite media. I. Exact series expansions.* J. Mech. Phys. Solids 45, 1421–1448. # # [2] <NAME>, <NAME>, <NAME>. *Microstructure Sensitive Design for Performance Optimization.* # # [3] <NAME>, <NAME>, <NAME>. *A strong contrast homogenization formulation for multi-phase anisotropic materials.*
notebooks/derivation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # This notebook was prepared by [<NAME>](https://github.com/donnemartin). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges). # # Challenge Notebook # ## Problem: Given an array of 32 integers, find an int not in the input. Use a minimal amount of memory. # # * [Constraints](#Constraints) # * [Test Cases](#Test-Cases) # * [Algorithm](#Algorithm) # * [Code](#Code) # * [Unit Test](#Unit-Test) # * [Solution Notebook](#Solution-Notebook) # ## Constraints # # * Are we working with non-negative ints? # * Yes # * What is the range of the integers? # * Discuss the approach for 4 billion integers # * Implement for 32 integers # * Can we assume the inputs are valid? # * No # ## Test Cases # # * None -> Exception # * [] -> Exception # * General case # * There is an int excluded from the input -> int # * There isn't an int excluded from the input -> None # ## Algorithm # # Refer to the [Solution Notebook](http://nbviewer.jupyter.org/github/donnemartin/interactive-coding-challenges/blob/master/sorting_searching/new_int/new_int_solution.ipynb). If you are stuck and need a hint, the solution notebook's algorithm discussion might be a good place to start. # ## Code # + from bitstring import BitArray # run pip install bitstring class Bits(object): def new_int(self, array, max_size): # TODO: Implement me pass # - # ## Unit Test # **The following unit test is expected to fail until you solve the challenge.** # + # # %load test_new_int.py from nose.tools import assert_equal, assert_raises class TestBits(object): def test_new_int(self): bits = Bits() max_size = 32 assert_raises(TypeError, bits.new_int, None, max_size) assert_raises(TypeError, bits.new_int, [], max_size) data = [item for item in range(30)] data.append(31) assert_equal(bits.new_int(data, max_size), 30) data = [item for item in range(32)] assert_equal(bits.new_int(data, max_size), None) print('Success: test_find_int_excluded_from_input') def main(): test = TestBits() test.test_new_int() if __name__ == '__main__': main() # - # ## Solution Notebook # # Review the [Solution Notebook]() for a discussion on algorithms and code solutions.
sorting_searching/new_int/new_int_challenge.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Example Notebook Illustrating `nbpuller` # ![pims-logo](http://media.pims.math.ca/logos/webvertlcaselarge.png) # This notebook demonstrates how to contstruct [nbgitpuller](https://github.com/data-8/nbgitpuller) links and some other bits and pieces. Each nbgitpuller URL consists of a jupyterhub server, a repository and (optionally) some parameters. # # ### Execute All of the following cells in order... import ipywidgets as widgets from IPython.display import display # The hub can be any valid JupyterHub URL as long as nbgitpuller has been installed there. hubs = { 'pims': 'https://pims.syzygy.ca/jupyter', 'cybera' : 'https://cybera.syzygy.ca/jupyter' } hub = widgets.Dropdown(options=hubs, description="Hub") hub # Specify a publically accessible github url, e.g. `https://github.com/pimsmath/public-notebooks` repo = widgets.Text(value='https://github.com/pimsmath/public-notebooks', description='GitHub repo', layout=widgets.Layout(width='80%')) repo # Optionally, you can specify a subPath within the repository (if you don't want to clone the whole repository). For example, this notebook has the filename `nbpuller-example1.ipynb` inside the `pimsmath/public-notebooks` repository on GitHub. subPath = widgets.Text(placeholder='', description='Sub path') subPath branch = widgets.Text(placeholder='master', description='Branch') branch # + def build_nbgitpuller_url(hub, repo, subPath, branch): url = '{}/user-redirect/git-pull?repo={}'.format(hub, repo) if subPath: url += '&subPath={}'.format(subPath) if branch: url += '&branch={}'.format(branch) print(url) out = widgets.interactive_output(build_nbgitpuller_url, {'hub': hub, 'repo': repo, 'subPath': subPath, 'branch': branch }) print("Here is the resulting URL:") display(out)
test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd # + # load messages dataset messages = pd.read_csv(r'C:\Users\abello\Documents\Test Classification Project\data\disaster_messages.csv') # load categories dataset categories = pd.read_csv(r'C:\Users\abello\Documents\Test Classification Project\data\disaster_categories.csv') # Merge categories and messages data set on 'id' df = pd.merge(messages,categories, on='id') # create a dataframe of the 36 individual category columns categories = df['categories'].str.split(pat =";",n=36,expand=True) # select the first row of the categories dataframe row = categories.iloc[0] # use this row to extract a list of new column names for categories. # one way is to apply a lambda function that takes everything # up to the second to last character of each string with slicing category_colnames = list(row.str.split(pat = "-",expand =True)[0]) # rename the columns of `categories` categories.columns = category_colnames for column in categories: # set each value to be the last character of the string categories[column] = [x[-1] for x in categories[column]] # convert column from string to numeric categories[column] = pd.to_numeric(categories[column]) # drop the original categories column from `df` df.drop('categories', axis=1, inplace=True) # concatenate the original dataframe with the new `categories` dataframe df = pd.concat([df,categories],axis=1) df.head() #Change values of related to 1 where it's erronously inputed as 2 df['related'] = df['related'].replace(2,1) # - df['related'].unique() df.describe()
models/Untitled.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:miniconda3-metabolic] # language: python # name: conda-env-miniconda3-metabolic-py # --- # + import os from glob import glob import funnel import yaml # + files = sorted(glob(f'{funnel.config.cache_catalog_dir}/*.yml')) data = {} files_w_missing_assets = [] for f in files: with open(f) as fid: data[f] = yaml.safe_load(fid) asset = data[f]['asset'] if not os.path.exists(asset): print(f'missing {asset}') files_w_missing_assets.append(f) if 'additional_coord_values' in data[f]: print(f) # - files_w_missing_assets for f in files_w_missing_assets: os.remove(f)
notebooks/_maintain-funnel-catalog.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # A Python guessing game for jupyter # Play the existing game with the first cell! The only things here that we did not talk about in Seminar 1 are: # * isdigit() - a function to see if a string can be transformed into an integer without error # * randInt from the random module - a function to create a random integer # + #-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# # guessingGame # A python number guessing game # # <NAME> # <EMAIL> # # Created for ICS Python Training Series # Spring 2019 #-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# #-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# # This is a number guessing game. Python # uses a random number generator to get # a target between minVal and maxVal # and then the guesser must guess this # number using greater/less than clues. #-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# #-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# # Load the modules we need # Use random to get the random number import random as rd #-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# # Functions required def guessChecker( guessVal ): # Function to verify that the guess an integer if guessVal.isdigit() == True: return True else: print("Bad input, please use an integer") return False #-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# # Background info minVal=1 maxVal=100 goodGuesses=1 ANSWER=rd.randint( minVal, maxVal) answerRight = 'Nope' #-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# # Play the game print( "Guess an integer such that", minVal, " <= X <= ", maxVal ) while answerRight == 'Nope': # Get the guess guess = input("\nEnter an integer: ") # Check to make sure the guess is an integer goodAns = guessChecker( guess ) # Yes an integer if goodAns == True: guessInt = int( guess ) # Check to see if guess is above the answer if guessInt > ANSWER: print("Answer is too high, please guess again") maxVal = guessInt goodGuesses = goodGuesses+1 print( "Your current inclusive range is ", minVal, " - ", maxVal ) elif guessInt < ANSWER: print("Answer is too low, please guess again") minVal = guessInt goodGuesses = goodGuesses+1 print( "Your current inclusive range is ", minVal, " - ", maxVal ) else: print("\nHurray, you guessed ", ANSWER, " in ", goodGuesses, " guesses! " ) answerRight = 'Yep' # Not an integer else: print( "Please try again") # - # Can you add a range checker to the code using a function? Take an initial guess of 100000001: this is an integer and so will pass the current check, but it is out of the existing range. Your updated code should limit your guesses to the current range. (Note the maxVal is set to 10 here to make debugging easier.) # + #-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# # guessingGame # A python number guessing game # # Originally written: <NAME> # <EMAIL> # # Modified by: YOUR NAME # YOUR EMAIL ADDRESS # # Created for ICS Python Training Series # Spring 2019 #-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# #-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# # This is a number guessing game. Python # uses a random number generator to get # a target between minVal and maxVal # and then the guesser must guess this # number using greater/less than clues. # # MODIFICATION: #-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# #-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# # Load the modules we need # Use random to get the random number import random as rd #-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# # Functions required def guessChecker( guessVal ): # Function to verify that the guess an integer if guessVal.isdigit() == True: return True else: print("Bad input, please use an integer") return False #-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# # Background info minVal=1 maxVal=10 goodGuesses=1 ANSWER=rd.randint( minVal, maxVal) answerRight = 'Nope' #-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# # Play the game print( "Guess an integer such that", minVal, " <= X <= ", maxVal ) while answerRight == 'Nope': # Get the guess guess = input("\nEnter an integer: ") # Check to make sure the guess is an integer goodAns = guessChecker( guess ) # Yes an integer if goodAns == True: guessInt = int( guess ) # Check to see if guess is above the answer if guessInt > ANSWER: print("Answer is too high, please guess again") maxVal = guessInt goodGuesses = goodGuesses+1 print( "Your current inclusive range is ", minVal, " - ", maxVal ) elif guessInt < ANSWER: print("Answer is too low, please guess again") minVal = guessInt goodGuesses = goodGuesses+1 print( "Your current inclusive range is ", minVal, " - ", maxVal ) else: print("\nHurray, you guessed ", ANSWER, " in ", goodGuesses, " guesses! " ) answerRight = 'Yep' # Not an integer else: print( "Please try again")
_build/jupyter_execute/Guessing Game.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] pycharm={"name": "#%% md\n"} # # JSON-LD 1.0 Context issue # # This notebook demonstrates the basic problem with using JSON-LD 1.0 and the curies used in the [prefixcommons](https://github.com/prefixcommons) library. For the purposes of this demo, we will use the [biocontext](https://github.com/prefixcommons/biocontext) [monarch context](https://raw.githubusercontent.com/prefixcommons/biocontext/master/registry/monarch_context.jsonld). This is a serious problem because a not-insignificant portion of the prefixcommons libraries use prefixes that end in something other than "/" or "#". # # This issue exists because of a fix described in https://lists.w3.org/Archives/Public/public-rdf-comments/2018Jan/0002.html . Basically, the solution in JSON-LD 1.0 is, "if it doesn't look like a prefix, it isn't a prefix". # # The `@prefix` tag was added in the [JSON-LD 1.1 specification](https://w3c.github.io/json-ld-syntax/#compact-iris) to allow one to force a any string to be treated as a prefix. This, however, currently has to be done on a per-prefix basis: # ```json # { # "@context" : { # "CHEBI" : { # "@id": "http://purl.obolibrary.org/obo/CHEBI_", # "@prefix": true # } # } # } # ``` # # The problem with this approach is that [prefixcommons](https://github.com/prefixcommons) library users use _both_ the raw json _and_ the rdflib json-ld parser, meaning that, unless the prefixcommons parser is enhanced to recognize the expanded format, the above fix won't work. # # An [issue](https://github.com/w3c/json-ld-syntax/issues/329) has been filed suggesting (now requesting) that `@prefix` be allowed as a default on the entire context: # ```json # { # "@context" : { # "@prefix": true, # "CHEBI" : "http://purl.obolibrary.org/obo/CHEBI_" # ... # } # } # ``` # - # !pip install -q --disable-pip-version-check prefixcommons # !pip install -q --disable-pip-version-check rdflib # !pip install -q --disable-pip-version-check rdflib-jsonld # !pip install -q --disable-pip-version-check jsonasobj # + [markdown] pycharm={"name": "#%% md\n"} # ## Utilities # + pycharm={"name": "#%%\n"} from contextlib import closing from typing import Optional, Dict import requests from jsonasobj import loads from prefixcommons import curie_util from rdflib import Graph def fetch_pc_context(name: str) -> Optional[str]: """ Retrive the prefixcommons JSON-LD entry for name :param name: context name :return: String representation of JSON-LD context """ url = f"https://raw.githubusercontent.com/prefixcommons/biocontext/master/registry/{name}.jsonld" with closing(requests.get(url, stream=False)) as resp: if resp.status_code == 200: return resp.text else: print(f"Cannot fetch: {url}") def prefix_for(prefixes: Dict[str, str], prefix: str) -> str: """ Format the prefix entry in prefixes :param prefixes: map from prefix to URI :param prefix: prefix to map :return: result """ if prefix in prefixes: return f'@prefix {prefix}: <{prefixes[prefix]}> .' else: return f'*prefix: {prefix} not mapped' # - # When we load the context as a plain JSON-LD object, both the BIOGRID and CHEBI contexts are are aliases # + pycharm={"name": "#%%\n"} ctxt_str = fetch_pc_context('monarch_context') ctxt = loads(ctxt_str) print(f"Entry for BIOGRID is {ctxt['@context'].BIOGRID}") print(f"Entry for CHEBI is {ctxt['@context'].CHEBI}") # - # The prefix commons utility doesn't use the JSON-LD library, so both of the prefixes are represented # + pycharm={"name": "#%%\n"} curie_map = {k: v for k, v in curie_util.read_biocontext('monarch_context').items()} print(prefix_for(curie_map, 'BIOGRID')) print(prefix_for(curie_map, 'CHEBI')) # - # When we use the JSON-LD library, however, URI's that don't end in "#" or "/" are _not_ treated as prefixes (!) # + pycharm={"name": "#%%\n"} g = Graph() g.parse(data=ctxt_str, format="json-ld") prefixes = {k:v for k, v in g.namespaces()} print(prefix_for(prefixes, 'BIOGRID')) print(prefix_for(prefixes, 'CHEBI'))
notebooks/context_issue.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # # Update sklearn to prevent version mismatches # # !pip install sklearn --upgrade # + # # install joblib. This will be used to save your model. # # Restart your kernel after installing # # !pip install joblib # # !pip install yellowbrick # - import pandas as pd import warnings import matplotlib.pyplot as plt warnings.filterwarnings("ignore") # # Read the CSV and Perform Basic Data Cleaning # Read in csv df = pd.read_csv("data/exoplanet_data.csv") # Drop the null columns where all values are null df = df.dropna(axis='columns', how='all') # Drop the null rows df = df.dropna() df.head() # # Select features (columns) # Set target, features and feature_names. target = df["koi_disposition"] data = df.drop("koi_disposition", axis=1) feature_names = data.columns data.head() # # Create a Train Test Split # + # Import Dependencies from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder, MinMaxScaler from sklearn.neighbors import KNeighborsClassifier from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(data, target, random_state=42) # - X_train.head() # # Pre-processing # Scale the data using the MinMaxScaler and perform some feature selection # + from sklearn.preprocessing import MinMaxScaler X_scaler = MinMaxScaler().fit(X_train) X_train_scaled = X_scaler.transform(X_train) X_test_scaled = X_scaler.transform(X_test) # - # # Train the Model (K-nearest neighbors) # + # Create the KNN Model # Loop through different k values to see which has the highest accuracy # Note: We only use odd numbers because we don't want any ties train_scores = [] test_scores = [] for k in range(1, 60, 2): knn = KNeighborsClassifier(n_neighbors=k) knn.fit(X_train_scaled, y_train) train_score = knn.score(X_train_scaled, y_train) test_score = knn.score(X_test_scaled, y_test) train_scores.append(train_score) test_scores.append(test_score) print(f"k: {k}, Train/Test Score: {train_score:.3f}/{test_score:.3f}") plt.plot(range(1, 60, 2), train_scores, marker='o') plt.plot(range(1, 60, 2), test_scores, marker="x") plt.xlabel("k neighbors") plt.ylabel("Testing accuracy Score") plt.savefig("image/logistic_featureimportance.png") plt.show() # - knn = KNeighborsClassifier(n_neighbors=29) knn.fit(X_train, y_train) print('k=29 Test Acc: %.3f' % knn.score(X_test, y_test)) print(f"Training Data Score: {knn.score(X_train, y_train)}") print(f"Testing Data Score: {knn.score(X_test, y_test)}") knn.predict(X_test)[:10] knn.predict_proba(X_test)[:10] # # Hyperparameter Tuning # Use GridSearchCV to tune the model's parameters k_range = list(range(1, 60, 2)) print(k_range) param_grid = dict(n_neighbors=k_range, weights = ['uniform', 'distance'], metric =['euclidean','manhattan'] ) print(param_grid) # Create the GridSearchCV model from sklearn.model_selection import GridSearchCV grid = GridSearchCV(knn, param_grid, verbose =1, cv = 10, n_jobs = -1) grid.fit(X_train_scaled, y_train) # List the best parameters for this dataset print(f"Best Grid Parameters: {grid.best_params_}") # List the best score print(f"Best Grid Score: {grid.best_score_}") # List the best estimator print(f"Best Grid Estimator: {grid.best_estimator_}") print(f"Training Grid Score: {grid.score(X_train_scaled, y_train)}") print(f"Testing Grid Score: {grid.score(X_test_scaled, y_test)}") # Make predictions with the hypertuned model predictions = grid.predict(X_test_scaled) knn.predict(X_test_scaled)[:10] # Calculate classification report from sklearn.metrics import classification_report print(classification_report(y_test, predictions)) # # %matplotlib notebook from yellowbrick.classifier import ClassificationReport viz = ClassificationReport(KNeighborsClassifier(),cmap="PuBu") viz.fit(X_train_scaled, y_train) viz.score(X_test_scaled, y_test) viz.finalize() viz.show(outpath="image/Knn_classifier.png") # # Save the Model # save your model by updating "your_name" with your name # and "your_model" with your model variable # be sure to turn this in to BCS # if joblib fails to import, try running the command to install in terminal/git-bash import joblib filename = 'model_sav/ermias_gaga_knn.sav' joblib.dump(knn, filename)
all_ML_code/.ipynb_checkpoints/KNN_model-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Introduction # Python provided three kinds of number system: # # 1. Decimal (base 10), no prefix # 2. Binary (base 2), prefix: 0b # 3. Octal (base 8), prefix: 0o # 4. Hexadecimal (base 16), 0~9, A, B, C, D, E, F, prefix 0x # + # Decimal print('Decimal:', 60) print('Binary:', 0b111100, type(0b111100)) print('Octmal:', 0o74, type(0o74)) print('Haxadecimal:', 0x3c, type(0x3c)) # - # # Convert # ## Decimal to Binary, Octmal and Haxadecimal # # In this program, we have used built-in functions **bin(), oct() and hex()** to convert the given decimal number into respective number systems. # # These functions take an **integer (in decimal)** and return a **string**. # + # Python program to convert decimal into other number systems dec = 344 print("The decimal value of", dec, "is:") print(bin(dec), "in binary and its type", type(bin(dec))) print(oct(dec), "in octal and its type", type(oct(dec))) print(hex(dec), "in hexadecimal and its type", type(hex(dec))) # - # ## Binary, Octmal and Haxadecimal to Decimal # # 1. We can print a value, and there is no need to convert it to decimal. Since print function prints the value in decimal format. # # 2. We still can use **int()** method to convert them (string format) into decimal by defining base of the number system dec = 156 print(f"Binary to Decimal: {bin(dec)} -> {int(bin(dec), 2)}") print(f"Octal to Decimal: {oct(dec)} -> {int(oct(dec), 8)}") print(f"Haxadecimal to Decimal: {hex(dec)} -> {int(hex(dec), 16)}") def binary_to_decimal(binary_num: str) -> int: if binary_num[:2] not in ['0b', '0B']: return None index = len(binary_num) - 3 reg = 0 for num in binary_num[2:]: reg += int(num) * (2 ** index) index -= 1 return reg binary_to_decimal(bin(dec)) def to_decimal(binary_num: str, base=2) -> int: index = len(binary_num) - 3 reg = 0 for num in binary_num[2:]: reg += int(num) * (base ** index) index -= 1 return reg to_decimal(bin(dec)) to_decimal(oct(dec), 8) def hex_to_decimal(hex_num: str) -> int: index = len(hex_num) - 3 nums = {'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9, 'a': 10, 'b': 11, 'c': 12, 'd': 13, 'e': 14, 'f': 15} reg = 0 for num in hex_num[2:]: reg += nums[num] * (16 ** index) index -= 1 return reg hex_to_decimal(hex(dec)) # # Bitwise Operator # Bitwise operators are used to compare (binary) numbers: # # | Operator | Name | Description| # | -------- | :--: | :----------| # | & | and | Sets each bit to 1 if both bits are 1| # | \| | OR | Sets each bit to 1 if one of two bits is 1| # | ^ | XOR | Sets each bit to 1 if only one of two bits is 1 | # | ~ | NOT | Inverts all the bits | # | << | Zero fill left shift | Shift left by pushing zeros in from the right and let the leftmost bits fall off | # | >> | Signed right shift | Shift right by pushing copies of the leftmost bit in from the left, and let the rightmost bits fall off |
Python/Number System.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.2 64-bit (''renv'': conda)' # name: python3 # --- # This notebook has been formatted with # # https://github.com/csurfer/blackcellmagic # %load_ext blackcellmagic # + import pandas as pd from main import R_MarketMatching as rmm # - ip_weather_data_path = '../notebooks/examples/datasets/weather.csv' weather_df = rmm.read_data(ip_weather_data_path) weather_df.head(2) mm_only_cph = rmm.best_matches( data=weather_df, id_variable="Area", date_variable="Date", markets_to_be_matched=["CPH"], matching_variable="Mean_TemperatureF", parallel=False, warping_limit=1, dtw_emphasis=1, matches=5, start_match_period="2014-01-01", end_match_period="2014-10-01", ) results_only_cph = rmm.inference( matched_markets=mm_only_cph, test_market="CPH", end_post_period="2015-10-01" ) mm_only_cph["best_matches"] results_only_cph['all_output_plots'](figsize=(15, 14))
notebooks/examples/r_lib_examples.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <img src="http://dask.readthedocs.io/en/latest/_images/dask_horizontal.svg" # align="right" # width="30%" # alt="Dask logo\"> # # # # Parallelize code with `dask.delayed` # # In this section we parallelize simple for-loop style code with Dask and `dask.delayed`. # # This is a simple way to use `dask` to parallelize existing codebases or build complex systems. This will also help us to develop an understanding for later sections. # ## Basics # # First let's make some toy functions, `inc` and `add`, that sleep for a while to simulate work. We'll then time running these functions normally. # # In the next section we'll parallelize this code. # + from time import sleep def inc(x): sleep(1) return x + 1 def add(x, y): sleep(1) return x + y # + # %%time # This takes three seconds to run because we call each # function sequentially, one after the other x = inc(1) y = inc(2) z = add(x, y) # - # ### Parallelize with `dask.delayed` decorator # # Those two increment calls *could* be called in parallel. # # In this section we call `inc` and `add`, wrapped with `dask.delayed`. This changes those functions so that they don't run immediately, but instead put those functions and arguments into a task graph. Now when we run our code this runs immediately, but all it does it create a graph. We then separately compute the result by calling the `.compute()` method. from dask import delayed # + # %%time # This runs immediately, all it does is build a graph x = delayed(inc)(1) y = delayed(inc)(2) z = delayed(add)(x, y) # + # %%time # This actually runs our computation using a local thread pool z.compute() # - # ## What just happened? # # The `z` object is a lazy `Delayed` object. This object holds everything we need to compute the final result. We can compute the result with `.compute()` as above or we can visualize the task graph for this value with `.visualize()`. z # Look at the task graph for `z` z.visualize() # ### Some questions to consider: # # - Why did we go from 3s to 2s? Why weren't we able to parallelize down to 1s? # - What would have happened if the inc and add functions didn't include the `sleep(1)`? Would Dask still be able to speed up this code? # - What if we have multiple outputs or also want to get access to x or y? # ## Exercise: Parallelize a for loop # # For loops are one of the most common things that we want to parallelize. Use `dask.delayed` on `inc` and `sum` to parallelize the computation below: data = [1, 2, 3, 4, 5, 6, 7, 8] # + # %%time # Sequential code results = [] for x in data: y = inc(x) results.append(y) total = sum(results) # - total # %%time # Your parallel code here... results = [] for x in data: # TODO total # <button data-toggle="collapse" data-target="#sol1" class='btn btn-primary'>Solution</button> # <div id="sol1" class="collapse"> # ```python # # %%time # results = [] # for x in data: # y = delayed(inc)(x) # results.append(y) # # total = delayed(sum)(results) # print(total) # Let's see what type of thing total is # total = total.compute() # print(total) # After it is computed... # ``` # ## Exercise: Parallelizing a for-loop code with control flow # # Often we want to delay only *some* functions, running a few of them immediately. This is especially helpful when those functions are fast and help us to determine what other slower functions we should call. This decision, to delay or not to delay, is usually where we need to be thoughtful when using `dask.delayed`. # # In the example below we iterate through a list of inputs. If that input is even then we want to call `inc`. If the input is odd then we want to call `double`. This `iseven` decision to call `inc` or `double` has to be made immediately (not lazily) in order for our graph-building Python code to proceed. # + def double(x): sleep(1) return 2 * x def is_even(x): return not x % 2 data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] # + # %%time # Sequential code results = [] for x in data: if is_even(x): y = double(x) else: y = inc(x) results.append(y) total = sum(results) print(total) # - # %%time # Your parallel code here... # TODO: parallelize the sequential code above using dask.delayed # You will need to delay some functions, but not all # <button data-toggle="collapse" data-target="#sol2" class='btn btn-primary'>Solution</button> # <div id="sol2" class="collapse"> # ```python # results = [] # for x in data: # if is_even(x): # even # y = delayed(double)(x) # else: # odd # y = delayed(inc)(x) # results.append(y) # # total = delayed(sum)(results) # ``` # %time total.compute() total.visualize() # ### Some questions to consider: # # - What are other examples of control flow where we can't use delayed? # - What would have happened if we had delayed the evaluation of `is_even(x)` in the example above? # - What are your thoughts on delaying `sum`? This function is both computational but also fast to run. # ## Exercise: Parallelizing a Pandas Groupby Reduction # # In this exercise we read several CSV files and perform a groupby operation in parallel. We are given sequential code to do this and parallelize it with `dask.delayed`. # # The computation we will parallelize is to compute the mean departure delay per airport from some historical flight data. We will do this by using `dask.delayed` together with `pandas`. In a future section we will do this same exercise with `dask.dataframe`. # ### Prep data # # First, run this code to prep some data. You don't need to understand this code. # # This extracts some historical flight data for flights out of NYC between 1990 and 2000. The data is taken from [here](http://stat-computing.org/dataexpo/2009/the-data.html). This should only take a few seconds to run. # %run prep_data.py # ### Inspect data import os sorted(os.listdir(os.path.join('data', 'nycflights'))) # ### Read one file with `pandas.read_csv` and compute mean departure delay import pandas as pd df = pd.read_csv(os.path.join('data', 'nycflights', '1990.csv')) df.head() # What is the schema? df.dtypes # What originating airports are in the data? df.Origin.unique() # Mean departure delay per-airport for one year df.groupby('Origin').DepDelay.mean() # ### Sequential code: Mean Departure Delay Per Airport # # The above cell computes the mean departure delay per-airport for one year. Here we expand that to all years using a sequential for loop. from glob import glob filenames = sorted(glob(os.path.join('data', 'nycflights', '*.csv'))) # + # %%time sums = [] counts = [] for fn in filenames: # Read in file df = pd.read_csv(fn) # Groupby origin airport by_origin = df.groupby('Origin') # Sum of all departure delays by origin total = by_origin.DepDelay.sum() # Number of flights by origin count = by_origin.DepDelay.count() # Save the intermediates sums.append(total) counts.append(count) # Combine intermediates to get total mean-delay-per-origin total_delays = sum(sums) n_flights = sum(counts) mean = total_delays / n_flights # - mean # ### Parallelize the code above # # Use `dask.delayed` to parallelize the code above. Some extra things you will need to know. # # 1. Methods and attribute access on delayed objects work automatically, so if you have a delayed object you can perform normal arithmetic, slicing, and method calls on it and it will produce the correct delayed calls. # # ```python # x = delayed(np.arange)(10) # y = (x + 1)[::2].sum() # everything here was delayed # ``` # 2. Calling the `.compute()` method works well when you have a single output. When you have multiple outputs you might want to use the `dask.compute` function: # # ```python # >>> x = delayed(np.arange)(10) # >>> y = x ** 2 # >>> min, max = compute(y.min(), y.max()) # (0, 81) # ``` # # This way Dask can share the intermediate values (like `y = x**2`) # # So your goal is to parallelize the code above (which has been copied below) using `dask.delayed`. You may also want to visualize a bit of the computation to see if you're doing it correctly. from dask import compute # + # %%time sums = [] counts = [] for fn in filenames: # Read in file df = pd.read_csv(fn) # Groupby origin airport by_origin = df.groupby('Origin') # Sum of all departure delays by origin total = by_origin.DepDelay.sum() # Number of flights by origin count = by_origin.DepDelay.count() # Save the intermediates sums.append(total) counts.append(count) # Combine intermediates to get total mean-delay-per-origin total_delays = sum(sums) n_flights = sum(counts) mean = total_delays / n_flights # - mean # <button data-toggle="collapse" data-target="#sol3" class='btn btn-primary'>Solution</button> # <div id="sol3" class="collapse"> # ```python # # This is just one possible solution, there are # # several ways to do this using `delayed` # # sums = [] # counts = [] # for fn in filenames: # # Read in file # df = delayed(pd.read_csv)(fn) # # # Groupby origin airport # by_origin = df.groupby('Origin') # # # Sum of all departure delays by origin # total = by_origin.DepDelay.sum() # # # Number of flights by origin # count = by_origin.DepDelay.count() # # # Save the intermediates # sums.append(total) # counts.append(count) # # # Compute the intermediates # sums, counts = compute(sums, counts) # # # Combine intermediates to get total mean-delay-per-origin # total_delays = sum(sums) # n_flights = sum(counts) # mean = total_delays / n_flights # ``` # ### Some questions to consider: # # - How much speedup did you get? Is this how much speedup you'd expect? # - Experiment with where to call `compute`. What happens when you call it on `sums` and `counts`? What happens if you wait and call it on `mean`? # - Experiment with delaying the call to `sum`. What does the graph look like if `sum` is delayed? What does the graph look like if it isn't? # - Can you think of any reason why you'd want to do the reduction one way over the other?
a03_Dask/Collection/dask-tutorial-pydata-seattle-2017/01-dask.delayed.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="5Kd4BuUhsApQ" # # Autocomplete Language Models # # Git: https://github.com/gyan42/autocomplete-ngram-model # + colab={"base_uri": "https://localhost:8080/"} id="Gr1kmLPSJiA7" outputId="f0a46328-75e4-41a5-ab2b-03dff54d8400" import os import math import random import numpy as np import pandas as pd import nltk import json from collections import Counter from collections import defaultdict from tqdm import tqdm from random import sample nltk.download('punkt') from functools import partialmethod # tqdm.__init__ = partialmethod(tqdm.__init__, disable=True) # + [markdown] id="iTrU8cV0m-cN" # ## Dataset # # Huggingface dataset is used. # - https://huggingface.co/datasets # - [Online viewer](https://huggingface.co/datasets/viewer/) # + [markdown] id="W6ufkGBvsZYo" # ## Requiremetns # + colab={"base_uri": "https://localhost:8080/"} id="aeLoUCDwCghi" outputId="4a5d7779-142d-4c51-9783-b3cf5d039f89" # !pip install datasets # + [markdown] id="_p4Is2A2sgK-" # ## Explore Huggingface Dataset # + colab={"base_uri": "https://localhost:8080/", "height": 514, "referenced_widgets": ["cbef3680ecf44e0186b427f840262b25", "b893b79714e04d41967a54bbe190b2a6", "<KEY>", "c1dba1111c0a4e0499e40065786856b9", "cc1271c68c2f43239c91542af2119ece", "<KEY>", "1ce78da3c6c84280a2a886e000bbab10", "6e9b9471604c41ebb39e98a1ab666481", "8da839d1542f4d91b0941bca9c52efeb", "<KEY>", "4b463f8ab2e84e7382eabe1d3ae050eb", "<KEY>", "<KEY>", "<KEY>", "f5a23afd91594068b1dce93ffd647e7e", "95ea4304d8804decaccf5494272e37d6", "e458e2314d48472b8c4c53efb7ac2cb3", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "9c49a0c307d941539824b3aaa3e9d2e7", "<KEY>", "1bad2309e66248dbbb47289c03398e9a", "<KEY>", "<KEY>", "2d22d71c90494922aa04df47064ed8f3", "<KEY>", "9395657f9dfd42419c0a39e8d86b93ca", "<KEY>", "<KEY>", "5a59d0355dd846c6b4eed4267d747190", "<KEY>", "7de0edeeeea6403790ef80e4bf09ffe4", "<KEY>", "<KEY>", "<KEY>", "db6314c265ed434fa13be8d16009348e", "4d1fe6c588234005a6e5908e84047765", "<KEY>", "<KEY>", "173e6389da6146ca89d122b95320c4fb", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "d3289d5770fb468db29e1a77b0317c15", "c78c7ced2aa64266893310d47e664230", "08c50cd9544b49de8fe7614ca99e36d6", "<KEY>", "387dac2a78ce4b3da735fd856b89ff10", "<KEY>", "803a204383704ee98ebd7e804c4938a1", "<KEY>", "9a50ce3158264661b8e4da1c5d6cffce", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "f43f3f74f9c94596b7b623e4eedc9eaf", "06bb6a9f162045f8a49c4b619c011e53", "616d6ef65fc447de886db1491b73c97a", "b430afe4dc3e449dba531aa0f64b98ae", "<KEY>", "beb7afc1f98a4544885fe50c3ad446fa", "b2fd7db9ad654fb7befee52315e2b532", "<KEY>", "<KEY>", "2ed1e374620841a384b14eb29763cfd6", "083ff98ee43d4003a7c417e2b86ad8e2", "<KEY>", "45536150effb4739b9e4a6b1a462fe71", "<KEY>", "b59d47384f254265835e1afacefa04c6", "6735ec5c7e1e4eb1952ed68d5c4e6745", "b20f090a95dd47f1bad9479e461dc206", "<KEY>", "<KEY>", "7f4a94159e024161b95410400e6359cf", "5b39b8740876488da813d874b7872633", "<KEY>", "780f5933f1d848caa2bab3da52125552", "<KEY>", "8229079a91414cd6b52b846b4e679fe1", "<KEY>", "1a3e37b08c7a455a8fa11e460e9ff8ea", "<KEY>", "32459676b30a44ee811d467a3eb050b7", "<KEY>", "e85e5c7b078143af9b161be36bea4344", "a38b12fbe69b4f8da225b42e19eda415", "0add852cc45645aa95c7dbe1272e58b1", "<KEY>", "<KEY>", "<KEY>", "ceae1d040a974970877996a509985e52", "bae977aa015e43f3904ec44e5faad64e"]} id="Llcd5sUwvd5T" outputId="01fa7eaa-75ec-4142-91eb-889b3ca00768" from datasets import load_dataset dataset = load_dataset('google_wellformed_query') dataset # + [markdown] id="vXkirEhesmTm" # ## Dataset Class Wrapper # + colab={"base_uri": "https://localhost:8080/", "height": 136, "referenced_widgets": ["b5fcfdf2b03745558d39b848c7fc1f0f", "41a6b12e717d496582914f57b19a0ea4", "1c59758a76ff49a390a1d3d258d40fb4", "a6971f05b6324b71b52b52ce69ec4ff4", "<KEY>", "513da21ab5e04768ae48e6500b572d38", "11a501530bad46daa32f4e16574160c8", "2fa5d8ee8fd941e8974ec24d30644aff", "<KEY>", "dbe73eff82ba4d23b50af03a1c261d80", "fbe467da1da04bfb92b8ee5713468962"]} id="8DTHvs3oEDbn" outputId="c9d66f5d-ceae-4c64-92a5-ceb741c77fbf" class Dataset(object): def __init__(self, sample_ratio=1.0): self.lines = [] # dataset = load_dataset('amazon_reviews_multi', 'en') # self.lines = dataset['train']['review_body'] # dataset_2 = load_dataset('wikitext', 'wikitext-103-raw-v1') # self.lines = self.lines + dataset_2['train']['text'] dataset = load_dataset('google_wellformed_query') self.lines = self.lines + dataset['train']['content'] self.lines = self.lines + dataset['test']['content'] self.lines = self.lines + dataset['validation']['content'] self.lines = list(filter(lambda line: len(line) > 0, self.lines)) print("Total number of examples : ", len(self.lines)) self.lines = sample(self.lines, int(sample_ratio * len(self.lines))) print("Sampled data examples count: ", len(self.lines)) ds = Dataset() # + colab={"base_uri": "https://localhost:8080/"} id="m_taQY66Ey4y" outputId="16489419-b7d2-416a-cc81-020bdecab365" sample(ds.lines, 10) # + [markdown] id="RG3kEDcJssAV" # ## AutoCorrect Model # + [markdown] id="TSKquj1O6jjL" # # + id="I0yVwJOFJoqB" class AutoCorrectModel(object): def __init__(self, unknown_token='<unk>', start_token='<s>', end_token='<e>', k=1): self._unknown_word = '<unk>' self._start_token = start_token self._end_token = end_token self._tokenized_sentences = None self._k = k # smoothing prameter self._word_frequency = Counter() # keys are the closed vocab self._ngram_word_frequency = defaultdict(lambda: 0) self._ngram_plus1_word_frequency = defaultdict(lambda: 0) self._no_match_threshold = 5 def tokenize(self, sentences): # Tokenize the sentences self._tokenized_sentences = [nltk.word_tokenize(sentence.lower()) for sentence in tqdm(sentences, desc="Tokenize")] def train(self, minimum_freq=5, ngram=3): self._minimum_freq = minimum_freq self._ngram = ngram # Prepare word vocab through frequency counter self._calculate_word_frequency() self._vocab = list( self._word_frequency.keys()) + [self._unknown_word, "<e>"] #self._filter_low_freq_words() # TODO enable to simulate unknown words # Normalize data i.e replace less frequent words with unknown tag self._tokenized_sentences = self._tokenize_n_normalize(self._tokenized_sentences) # Prepare ngram word frequency self._ngram_word_frequency = self._count_n_grams(self._tokenized_sentences, self._ngram) self._ngram_plus1_word_frequency = self._count_n_grams(self._tokenized_sentences, self._ngram+1) def _calculate_word_frequency(self): ''' Counts word counts ''' for tokenized_sentence in tqdm(self._tokenized_sentences, desc="Word Frequency"): self._word_frequency.update(tokenized_sentence) def _filter_low_freq_words(self): ''' Filter words whose count are less than threshold ''' words = self._word_frequency.keys() words_to_be_deleted = [] for word in words: if self._word_frequency[word] < self._minimum_freq: words_to_be_deleted.append(word) for word in words_to_be_deleted: del self._word_frequency[word] def _tokenize_n_normalize(self, tokenized_sentences): ''' Remove all words which not part of vocab and replace it with unknown tag ''' new_sentences = [] for sentence in tqdm(tokenized_sentences, desc="Normalize"): new_sentence = [] for token in sentence: if self._word_frequency[token] != 0: new_sentence.append(token) else: new_sentence.append(self._unknown_word) new_sentences.append(new_sentence) return new_sentences def _count_n_grams(self, tokenized_sentences, ngram): ''' Creates n-gram from tokenized sentence and counts the same ''' freq = defaultdict(lambda: 0) for sentence in tqdm(tokenized_sentences, desc="NGrams"): sentence = [self._start_token] * ngram + sentence + [self._end_token] m = len(sentence) if ngram == 1 else len(sentence) - 1 for i in range(m): ngram_token = sentence[i:i+ngram] #freq[tuple(ngram_token)] += 1 # tuples can't be used as key in JSON freq[" ".join(ngram_token)] += 1 return freq def _estimate_probability(self, word, previous_ngram): vocab_size = len(self._word_frequency) #previous_ngram = tuple(previous_ngram) if type(previous_ngram) != list: previous_ngram = [previous_ngram] previous_ngram = " ".join(previous_ngram) previous_ngram_count = self._ngram_word_frequency.get(previous_ngram, 0) if previous_ngram_count == 0: # print("Warning no match found for entered words!") return 0 denominator = previous_ngram_count + self._k * len(self._vocab) n_plus1_gram = previous_ngram + " " + word n_plus1_gram_count = self._ngram_plus1_word_frequency.get(n_plus1_gram, 0) numerator = n_plus1_gram_count + self._k probability = numerator / denominator return probability def _estimate_probabilities(self, previous_ngram): probabilities = {} # previous_n_gram = tuple(previous_n_gram) if type(previous_ngram) != list: previous_ngram = [previous_ngram] previous_ngram = " ".join(previous_ngram).lower() for word in self._vocab: probabilities[word] = self._estimate_probability(word, previous_ngram) return probabilities def suggestions(self, previous_tokens, num_suggestions=5, start_with=None): """ previous_tokens: N-Gram tuple of previous tokens num_sugestions : start_with: """ previous_ngram = previous_tokens[-self._ngram:] probabilities = self._estimate_probabilities(previous_ngram) probs = probabilities.items() probs = filter(lambda t: t[1]>0, probs) if start_with: probs = filter(lambda t: t[0].startswith(start_with), probs) probs = sorted(probs, key=lambda t: t[1], reverse=True) words = map(lambda t: t[0], probs) words = list(words) return words[:num_suggestions] def save_as_json(self, name): data = {} data["ngram_word_frequency"] = self._ngram_word_frequency #json.dumps(self._ngram_word_frequency, indent = 4) data["ngram_plus1_word_frequency"] = self._ngram_plus1_word_frequency #json.dumps(self._ngram_plus1_word_frequency, indent = 4) data["vocab"] = self._vocab data["ngram"] = self._ngram with open(name, "w", encoding='utf-8') as file: json.dump(data, file, ensure_ascii=False, indent=4) def load_from_json(self, file_path): data = json.load(open(file_path)) self._ngram_word_frequency = data["ngram_word_frequency"] self._ngram_plus1_word_frequency =data["ngram_plus1_word_frequency"] self._vocab = data["vocab"] self._ngram = data["ngram"] # + [markdown] id="jKINil_gwCos" # ## Testing the Model # + id="XvX7FABbkmNa" test_lines = ['i like a cat', 'this dog is like a cat'] # + colab={"base_uri": "https://localhost:8080/"} id="5-IlrG45HPW1" outputId="afb17055-3940-4b74-a3ef-af2fde1b3065" model = AutoCorrectModel() model.tokenize(test_lines) model.train(minimum_freq=1, ngram=1) # + id="iHT_etFHD3o3" model.save_as_json("test.json") # + colab={"base_uri": "https://localhost:8080/"} id="o-iyVATBTlM9" outputId="4e894084-61f9-440d-f1fc-28f9f2212b55" model._estimate_probability("cat", "a") # + colab={"base_uri": "https://localhost:8080/"} id="deXzFvRZtbr2" outputId="fd6c6e72-bdb5-49f7-e25a-e6831f2ce220" model._estimate_probability("like", "i") # + colab={"base_uri": "https://localhost:8080/"} id="Owj7p3bMt-0f" outputId="20758839-19b4-4638-c212-29250996fc9d" model._estimate_probabilities("a") # + colab={"base_uri": "https://localhost:8080/"} id="vDhg6nbxufsd" outputId="8caa2342-fb95-4cd2-f72c-cc8420f5c9c6" model.suggestions(["i", "like"]) # + colab={"base_uri": "https://localhost:8080/"} id="S5cu3dbwq_Mk" outputId="06be9746-afe4-470a-98c6-a9cd08acf8e3" model.suggestions(["i", "like"], start_with="c") # + colab={"base_uri": "https://localhost:8080/"} id="L0cWcVymQJK8" outputId="dc06a348-1394-4d48-a4b7-4154e604603d" model.suggestions(["i", "dont"], start_with="c") # + colab={"base_uri": "https://localhost:8080/"} id="YHr1gl30v9tg" outputId="c737b60c-ae2b-4be8-f41d-afd0d0f726de" model = AutoCorrectModel() model.tokenize(test_lines) model.train(minimum_freq=1, ngram=2) model._estimate_probabilities(["<s>", "<s>"]) # + colab={"base_uri": "https://localhost:8080/"} id="eS2KNB-Tw2L0" outputId="dba91bcc-acb4-4661-8d53-be8135385d1a" model.suggestions(["i", "like"], start_with="c") # + [markdown] id="VrkYElXr2nQo" # ## google_wellformed_query dataset # + colab={"base_uri": "https://localhost:8080/"} id="G3FHh9wjHmdG" outputId="19c349ee-d275-48e4-dffb-82bb3d5a8e62" model = AutoCorrectModel() model.tokenize(ds.lines) # + [markdown] id="dzufWOWJWN7r" # BiGram Model # + colab={"base_uri": "https://localhost:8080/"} id="_TpuPH7Zymoq" outputId="844c90ac-be45-4755-8a5c-7fe04d95989a" model.train(minimum_freq=0, ngram=2) model.save_as_json("bigram-autocompleter.json") # + colab={"base_uri": "https://localhost:8080/"} id="L2DzymOO-0dF" outputId="63b45e3d-3999-4934-8fc7-9f0f92e89274" # %%time model.suggestions(["what", "is"], start_with="c") # + colab={"base_uri": "https://localhost:8080/"} id="9MM0CFrzAwCU" outputId="b8ce4b2f-d018-41da-e3c5-45447abb1d35" model.suggestions(["What", "is"]) # + colab={"base_uri": "https://localhost:8080/"} id="ZgbxEDzN_In5" outputId="fc7da756-d986-4f47-9088-2b3147b2aa6f" model.suggestions(["how", "to"]) # + colab={"base_uri": "https://localhost:8080/"} id="Rz8DCU5v_OX2" outputId="1f44df1d-2cba-4162-fe35-61221c4240ad" model.suggestions(["i", "like", "a", "great"]) # + colab={"base_uri": "https://localhost:8080/"} id="Y_G1X_lW_WzM" outputId="07230c01-ab75-4be9-c48d-16eac955f1ee" start_tokens = ["what", "is", "crazy"] model.suggestions(start_tokens) # + colab={"base_uri": "https://localhost:8080/"} id="XJW4_YVpBVZ9" outputId="fb8fd0cd-916e-40e0-8d59-8ff0d0cc8471" start_tokens = ["how", "are"] model.suggestions(start_tokens) # + colab={"base_uri": "https://localhost:8080/"} id="V290mAGzCKoX" outputId="381e2717-5a3b-4565-f2d3-83661d4f200c" start_tokens = ["what", "is"] model.suggestions(start_tokens) # + colab={"base_uri": "https://localhost:8080/"} id="463JhBscCUZp" outputId="3f624c4d-1458-4fac-d285-8faf11e705d0" start_tokens = ["where", "is"] model.suggestions(start_tokens) # + [markdown] id="avbY0pXCWodd" # TriGram Model # + colab={"base_uri": "https://localhost:8080/"} id="vj-FBM2KHaV0" outputId="c23eb241-acc2-4e5c-8bf2-9ac9eafefaba" model.train(minimum_freq=1, ngram=3) model.save_as_json(name="trigram-autocompleter.json") # + colab={"base_uri": "https://localhost:8080/"} id="YOYejbcUX43C" outputId="8813aa33-5ecf-469f-8160-8eead2b02117" start_tokens = ["<s>", "<s>", "how"] model.suggestions(start_tokens) # + colab={"base_uri": "https://localhost:8080/"} id="UOTfV17bH1CF" outputId="a1ec6914-88b5-42d7-fe4e-2dda5d6c9d42" start_tokens = ["how", "many", "pairs", "of"] model.suggestions(start_tokens) # + colab={"base_uri": "https://localhost:8080/"} id="6IkAXxVBC2ow" outputId="4d901331-db05-4146-969f-622703eebc8e" start_tokens = ["how", "did", "they"] model.suggestions(start_tokens) # + colab={"base_uri": "https://localhost:8080/"} id="mA_X4s8F2lqg" outputId="6c28a6d6-cab3-4259-c2d9-35331f661e93" start_tokens = ["who", "got", "the"] model.suggestions(start_tokens) # + [markdown] id="cXej0hs8ZijS" # # Model with Apache Spark # + [markdown] id="mP0O21K-wpCq" # Requiremetns # + colab={"base_uri": "https://localhost:8080/"} id="oUkuoOM5ZVzM" outputId="9a56e35e-9026-4823-e699-f49fd8fbe64b" # !pip install pyspark # !wget -q https://dlcdn.apache.org/spark/spark-3.2.0/spark-3.2.0-bin-hadoop3.2.tgz # !tar xf spark-3.2.0-bin-hadoop3.2.tgz # !apt-get install openjdk-8-jdk-headless -qq > /dev/null # !pip install -q findspark # + id="yX7rSDZQdHx5" # #!wget http://setup.johnsnowlabs.com/colab.sh -O - | bash # + [markdown] id="pVmLF2KvwukZ" # Spark Environment Setup # + colab={"base_uri": "https://localhost:8080/", "height": 35} id="xUvh0u99kwxs" outputId="da58d8a7-9049-47f3-beb8-4e06a56df328" import os os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64" os.environ["SPARK_HOME"] = "/content/spark-3.2.0-bin-hadoop3.2" import findspark findspark.init() findspark.find() # + [markdown] id="Yb-o3UcOwkuV" # Spark Imports # + id="AD3oNC-DZXcs" from pyspark.sql import SparkSession from pyspark.ml.feature import Tokenizer from pyspark.ml.feature import NGram import pyspark.sql.functions as F # + colab={"base_uri": "https://localhost:8080/", "height": 214} id="-nJlnU0dZ1Qd" outputId="26054b46-5f3f-4a30-88dd-4d3cadb11713" spark = SparkSession.builder.master("local[*]").config('spark.ui.port', '4050').getOrCreate() spark # + [markdown] id="VhHxe_RPpun5" # **Setup Spark UI Tunneling** # + colab={"base_uri": "https://localhost:8080/"} id="LD2NLEVs7AE7" outputId="4c45461e-1399-4193-ccab-cdc46c748559" # !wget https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-linux-amd64.zip # !unzip ngrok-stable-linux-amd64.zip get_ipython().system_raw('./ngrok http 4050 &') # !curl -s http://localhost:4040/api/tunnels # + id="KidijoPlZ-Es" class SparkAutoCorrectModel(object): def __init__(self, spark, dataset, ngram=2): self._spark = spark self._df = spark.createDataFrame(pd.DataFrame({"text": ds.lines})) self._ngram = ngram self._tokenizer = Tokenizer(inputCol="text", outputCol="words") self._ngram_transformer = NGram(n=ngram, inputCol="words", outputCol="ngrams") self._ngramplus1_transformer = NGram(n=ngram+1, inputCol="words", outputCol="ngram_plus_one") def transform(self): df_tokenized = self._tokenizer.transform(self._df) ngram_df = self._ngram_transformer.transform(df_tokenized) ngram_df = self._ngramplus1.transform(ngram_df) ngram_df.show() self._ngram_df = ngram_df def save_as_json(self, file_path): vocab = self._ngram_df.select(F.explode("words").alias("vocab")).collect() vocab = {row['vocab'] for row in vocab} vocab = list(vocab) # https://spark.apache.org/docs/latest/api/python/reference/api/pyspark.sql.functions.explode.html # Returns a new row for each element in the given array or map. ngram = self._ngram_df.select(F.explode("ngrams").alias("ngram")).groupBy("ngram").count().collect() ngram_word_frequency = {row['ngram']: row['count'] for row in ngram} ngram_plus_one = self._ngram_df.select(F.explode("ngram_plus_one").alias("ngram_plus_one_")).groupBy("ngram_plus_one_").count().collect() ngram_plus1_word_frequency = {row['ngram_plus_one_']: row['count'] for row in ngram_plus_one} data = {} data['ngram'] = self._ngram data['vocab'] = vocab data['ngram_word_frequency'] = ngram_word_frequency data['ngram_plus1_word_frequency'] = ngram_plus1_word_frequency with open(file_path, "w", encoding='utf-8') as file: json.dump(data, file, ensure_ascii=False, indent=4) # + id="ps0strtW-Ieg" spark_model = SparkAutoCorrectModel(spark=spark, dataset=ds) # + colab={"base_uri": "https://localhost:8080/"} id="3_TqVhAs83f7" outputId="10d619fd-f672-4280-c3dd-cb1d35276fcf" spark_model.transform() # + id="KTBF_CUc9O4S" spark_model.save_as_json("spark-autocomplete.json") # + [markdown] id="wr7Tbuc1_gjg" # Load the json created with Spark and test the suggestions # + id="usDBWgP_E9sJ" auto_correct_model = AutoCorrectModel() auto_correct_model.load_from_json("spark-autocomplete.json") # + colab={"base_uri": "https://localhost:8080/"} id="DRC5-2BVFnJ3" outputId="662730c2-bcac-48a6-e05a-b72db6297afc" start_tokens = ["how", "did", "they"] auto_correct_model.suggestions(start_tokens) # + colab={"base_uri": "https://localhost:8080/"} id="gAsM66cl4i79" outputId="3c20e914-478a-47eb-9e7d-b9313a86f14c" start_tokens = ["where", "is", "oil"] auto_correct_model.suggestions(start_tokens) # + [markdown] id="lfvw76hz_oDa" # ## Evaluation # + [markdown] id="xHK7x1OD_qQY" # TODO : https://towardsdatascience.com/perplexity-intuition-and-derivation-105dd481c8f3#:~:text=In%20general%2C%20perplexity%20is%20a,way%20to%20evaluate%20language%20models. # + [markdown] id="ShqtZWCY7NIF" # ## References # # - https://www.analyticsvidhya.com/blog/2020/11/a-must-read-guide-on-how-to-work-with-pyspark-on-google-colab-for-data-scientists/ # + id="KlSKovw-7PHJ"
AutoCorrectModel.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # CBS # all imports from instaloader import Post import pandas as pd import sklearn as sk import os import instaloader import sys import re import numpy as np # + # functions def outputCSV(dataset, filename): dataset.to_csv("../../data/" + filename, sep=';') def cprint(text): sys.stdout.write("\r" + text) sys.stdout.flush() # + # retrieve data insta = pd.read_csv('../../data/Coosto_berichten.csv', delimiter=';') # remove empty columns insta = insta.drop(['zoekopdracht', 'type', 'titel'], axis=1) # check with count() insta.count() # + # Download all posts from instagram using an array of urls def get_posts(urls): posts_dict = {} total_length = len(urls) for index, url in enumerate(urls): shortcode = url.split("/")[-2] try: L = instaloader.Instaloader() post = Post.from_shortcode(L.context, url.split("/")[-2]) posts_dict[shortcode] = post except: pass cprint("Getting posts " + str(round((index / total_length) * 100)) + "% completed") return posts_dict # Get the indexes of the posts which do not exist anymore def get_non_exsisting_posts(dataset, posts_dict): indexes_to_drop = [] for index, row in dataset.iterrows(): shortcode = row['url'].split("/")[-2] if not shortcode in posts_dict: indexes_to_drop.append(index) return indexes_to_drop # Delete posts from the dataset based on an array of indexes def del_posts(data, indexes_to_drop): for index in indexes_to_drop: data = data.drop(index=index, axis=0) return data # Enrich dataset with like count def add_like_count_to_dataset(dataset, posts_dict): for index, row in dataset.iterrows(): shortcode = row['url'].split("/")[-2] if shortcode in posts_dict: dataset.at[index, 'likes count'] = posts_dict[shortcode].likes return dataset # Adds utc date to the dataset def add_date_utc(dataset, posts_dict): for index, row in dataset.iterrows(): shortcode = row['url'].split('/')[-2] if shortcode in posts_dict: dataset.at[index, 'datum utc'] = posts_dict[shortcode].date_utc return dataset # Refreshes comment count def refresh_comment_count(dataset, posts_dict): for index, row in dataset.iterrows(): shortcode = row['url'].split('/')[-2] if shortcode in posts_dict: dataset.at[index, 'discussielengte'] = posts_dict[shortcode].comments return dataset # Refreshes view count def refresh_views(dataset, posts_dict): for index, row in dataset.iterrows(): shortcode = row['url'].split('/')[-2] if shortcode in posts_dict: dataset.at[index, 'views'] = posts_dict[shortcode].likes return dataset # Cleans invalid urls and enriches with like count, data utc, comment count and view count def clean_und_enrich(dataset): posts_dict = get_posts(dataset['url']) indexes_to_drop = get_non_exsisting_posts(dataset, posts_dict) dataset = del_posts(dataset, indexes_to_drop) dataset = add_like_count_to_dataset(dataset, posts_dict) dataset = add_date_utc(dataset, posts_dict) dataset = refresh_comment_count(dataset, posts_dict) dataset = refresh_views(dataset, posts_dict) cprint('\nInvalid urls found: ' + str(len(indexes_to_drop))) return dataset def improve_sentiment(dataset): dataset['sentiment'] = dataset['sentiment'].replace(np.nan, '0') return dataset def isolate_hashtag(data): total_hashtags = [] for index, row in data.iterrows(): text = row['bericht tekst'] # find all hashtags in text and isolate them in new column total_hashtags.append(re.findall(r"#(\w+)", text)) #remove hashtags from text pattern = re.compile("#(\w+)") newText = pattern.sub(r'', text) data.at[index, 'bericht tekst'] = newText data['hashtags'] = total_hashtags return data def remove_emoji(data): indexes_to_drop = [] for index, row in data.iterrows(): a = row['bericht tekst'] # todo: vul aan met meer emoji's emoji_pattern = re.compile("[" u"\U0001F600-\U0001F64F" # emoticons u"\U0001F300-\U0001F5FF" # symbols & pictographs u"\U0001F680-\U0001F6FF" # transport & map symbols u"\U0001F1E0-\U0001F1FF" # flags (iOS) u"\U00002702-\U000027B0" u"\U000024C2-\U0001F251" "]+", flags=re.UNICODE) newValue = emoji_pattern.sub(r'', a) newValue = newValue.replace('🥗', '') if newValue == '' : indexes_to_drop.append(index) else: data.at[index, 'bericht tekst'] = newValue pass data = del_posts(data, indexes_to_drop) return data # + insta = improve_sentiment(insta) insta = remove_emoji(insta) insta = clean_und_enrich(insta) insta = isolate_hashtag(insta) # Resets index insta.index = range(len(insta)) # - insta.head(20) # output new cleaned dataset data = insta outputCSV(data, "cleaned.csv") # text analyse d.m.v. machine_learning posts = pd.read_csv("../../data/cleaned.csv")
cleaning/personal/demian.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="q_xK6nyKQmFY" # [程式碼]('https://www.mygreatlearning.com/blog/pos-tagging/') # # 內容基本上沒有更改,只有調整一些順序跟備註。 # + [markdown] id="Yl-vUsmLJ4OP" # # HMM # + id="gwAF6DOTKC96" # Importing libraries import nltk import numpy as np import pandas as pd import random from sklearn.model_selection import train_test_split import pprint, time # + colab={"base_uri": "https://localhost:8080/"} id="Q80QoAwjKKBq" outputId="0bc89086-3ba5-4674-f66f-24d1a4c8263b" nltk.download('treebank') nltk.download('universal_tagset') # + colab={"base_uri": "https://localhost:8080/"} id="YXKbf9rLJwKG" outputId="e9c8cd8d-965c-4966-b0ac-3f7eab96a2ba" nltk_data = list(nltk.corpus.treebank.tagged_sents(tagset='universal')) print(nltk_data[:2]) # + id="MMFOyclEJ4az" # for sent in nltk_data[:2]: # for tuple in sent: # print(tuple) # + [markdown] id="HlEB4yefPXW6" # # train/test # + id="UnNnfkmNJ4dJ" train_set,test_set =train_test_split(nltk_data,train_size=0.80,test_size=0.20,random_state = 101) # + colab={"base_uri": "https://localhost:8080/"} id="MTo-0TKsJ4fn" outputId="4aba7d3e-10b7-49ea-95f0-e30fa41ac659" train_tagged_words = [ tup for sent in train_set for tup in sent ] test_tagged_words = [ tup for sent in test_set for tup in sent ] print(len(train_tagged_words)) print(len(test_tagged_words)) # + colab={"base_uri": "https://localhost:8080/"} id="4gRsPwb9Ofqf" outputId="8126eefc-443a-4f98-eb23-cc39ae792068" train_tagged_words[0] # + [markdown] id="VKQEljT7PLIY" # # 詞性/單字 # + colab={"base_uri": "https://localhost:8080/"} id="Ic0faPn1N7ny" outputId="fa821102-dd2d-46c8-d683-1b27d969c843" tags = {tag for word,tag in train_tagged_words} #用set去重複 vocab = {word for word,tag in train_tagged_words} print(len(tags)) print(tags) # + id="Xp12SXUITk62" # + id="TRfwoWQ_OB7r" # Emission Probability def word_given_tag(word, tag, train_bag = train_tagged_words): tag_list = [pair for pair in train_bag if pair[1]==tag] #找出詞性相同 w_given_tag_list = [pair[0] for pair in tag_list if pair[0]==word] #找出相同單字 count_tag = len(tag_list) count_w_given_tag = len(w_given_tag_list) return (count_w_given_tag, count_tag) # + id="YWdovIZ_OCFT" # Transition Probability def t2_given_t1(t2, t1, train_bag = train_tagged_words): tags = [pair[1] for pair in train_bag] # 所有詞性標記 count_t1 = len([t for t in tags if t==t1]) # 詞性是t1的單字數量 count_t2_t1 = 0 for index in range(len(tags)-1): if tags[index]==t1 and tags[index+1] == t2: # 計算t1到t2的數量 count_t2_t1 += 1 return (count_t2_t1, count_t1) # + colab={"base_uri": "https://localhost:8080/"} id="tEWhO8oiQ0sr" outputId="81f329c4-efa9-4976-a04d-747d566c4800" tags_matrix = np.zeros((len(tags), len(tags)), dtype='float32') for i, t1 in enumerate(list(tags)): for j, t2 in enumerate(list(tags)): tags_matrix[i, j] = t2_given_t1(t2, t1)[0]/t2_given_t1(t2, t1)[1] print(tags_matrix) # + colab={"base_uri": "https://localhost:8080/", "height": 426} id="Ow0149DIQ0vJ" outputId="60ff200f-e834-4ff1-8ef2-3408e9b9248f" tags_df = pd.DataFrame(tags_matrix, columns = list(tags), index=list(tags)) # 用表來看 左邊為先前的狀態 上面為現在的狀態 display(tags_df) # + id="dcIlXnOKcOBe" # list(set([pair[1] for pair in train_tagged_words])) # + id="t_8HbA8MQ0xi" def Viterbi(words, train_bag = train_tagged_words): state = [] T = list(set([pair[1] for pair in train_bag])) for key, word in enumerate(words): p = [] for tag in T: if key == 0: # transition_p = 特定狀態發生的機率 (例如:形容詞連接名詞) transition_p = tags_df.loc['.', tag] # 只有一個字的時候 當作前面是 '.' 的狀況 else: transition_p = tags_df.loc[state[-1], tag] # 計算 HMM 看狀態的機率跟觀察的機率 emission_p = word_given_tag(words[key], tag)[0]/word_given_tag(words[key], tag)[1] state_probability = emission_p * transition_p p.append(state_probability) pmax = max(p) state_max = T[p.index(pmax)] state.append(state_max) return list(zip(words, state)) # + id="2r-B6PN4Q00E" random.seed(1234) rndom = [random.randint(1,len(test_set)) for x in range(10)] test_run = [test_set[i] for i in rndom] test_run_base = [tup for sent in test_run for tup in sent] test_tagged_words = [tup[0] for sent in test_run for tup in sent] # + colab={"base_uri": "https://localhost:8080/"} id="1FFXCCzpSb3k" outputId="d9f48e74-192e-4b58-9fba-d254eecdeca7" len(rndom) # + colab={"base_uri": "https://localhost:8080/"} id="e52O2L-AQ02V" outputId="e254d3b6-de2f-4a6b-dc4f-d2dcf9bb2c8b" start = time.time() tagged_seq = Viterbi(test_tagged_words) end = time.time() difference = end-start print("Time taken in seconds: ", difference) # accuracy check = [i for i, j in zip(tagged_seq, test_run_base) if i == j] accuracy = len(check)/len(tagged_seq) print('Viterbi Algorithm Accuracy: ',accuracy*100) # + [markdown] id="yAVNAIhxSsMA" # 全部跑 實在太久就先不執行了 # + id="K_2gLLNBSHpZ" # test_tagged_words = [tup for sent in test_set for tup in sent] # test_untagged_words = [tup[0] for sent in test_set for tup in sent] # test_untagged_words # start = time.time() # tagged_seq = Viterbi(test_untagged_words) # end = time.time() # difference = end-start # print("Time taken in seconds: ", difference) # # accuracy # check = [i for i, j in zip(test_tagged_words, test_untagged_words) if i == j] # accuracy = len(check)/len(tagged_seq) # print('Viterbi Algorithm Accuracy: ',accuracy*100) # + id="7XZMIqUvSHru" patterns = [ (r'.*ing$', 'VERB'), # 進行式 (r'.*ed$', 'VERB'), # 過去式 (r'.*es$', 'VERB'), # 動詞 (r'.*\'s$', 'NOUN'), # 所有格 (r'.*s$', 'NOUN'), # 複數 (r'\*T?\*?-[0-9]+$', 'X'), # X (r'^-?[0-9]+(.[0-9]+)?$', 'NUM'), # 基數 (r'.*', 'NOUN') # nouns ] rule_based_tagger = nltk.RegexpTagger(patterns) # + id="dphEsmpASHu-" def Viterbi_rule_based(words, train_bag = train_tagged_words): state = [] T = list(set([pair[1] for pair in train_bag])) for key, word in enumerate(words): p = [] for tag in T: if key == 0: transition_p = tags_df.loc['.', tag] else: transition_p = tags_df.loc[state[-1], tag] emission_p = word_given_tag(words[key], tag)[0]/word_given_tag(words[key], tag)[1] state_probability = emission_p * transition_p p.append(state_probability) pmax = max(p) state_max = rule_based_tagger.tag([word])[0][1] if(pmax==0): state_max = rule_based_tagger.tag([word])[0][1] else: if state_max != 'X': state_max = T[p.index(pmax)] state.append(state_max) return list(zip(words, state)) # + id="4eG-uCoRSHxK" colab={"base_uri": "https://localhost:8080/"} outputId="a22f1465-4bd5-4dea-e050-754a0b0631fd" start = time.time() tagged_seq = Viterbi_rule_based(test_tagged_words) end = time.time() difference = end-start print("Time taken in seconds: ", difference) # accuracy check = [i for i, j in zip(tagged_seq, test_run_base) if i == j] accuracy = len(check)/len(tagged_seq) print('Viterbi Algorithm Accuracy: ',accuracy*100) # + [markdown] id="fSeU6OxPTw_-" # 比較兩種方法 # + id="1ar9tG-OSHzo" colab={"base_uri": "https://localhost:8080/"} outputId="48ce394d-757d-40fb-91d2-adbc81de46cf" test_sent="<NAME> Marry" pred_tags_rule=Viterbi_rule_based(test_sent.split()) pred_tags_withoutRules= Viterbi(test_sent.split()) print(pred_tags_rule) print(pred_tags_withoutRules) # + id="ONxunqhIS67y"
NLP/pos_tagging_HMM.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # In-Class Coding Lab: Functions # # The goals of this lab are to help you to understand: # # - How to use Python's built-in functions in the standard library. # - How to write user-defined functions # - The benefits of user-defined functions to code reuse and simplicity. # - How to create a program to use functions to solve a complex idea # # We will demonstrate these through the following example: # # ## The Cat Problem # # **You want to buy 3 cats from a pet store that has 50 cats. In how many ways can you do this?** # # This is a classic application in the area of mathematics known as *combinatorics* which is the study of objects belonging to a finite set in accordance with certain constraints. # # In this example the set is 50 cats, where we select 3 of those 50 cats and the order in which we select them does not matter. We want to know how many different combinations of 3 cats can we get from the 50. # # This problem, written as a program would work like this: # # ``` # How many cats are at the pet store? 50 # How many are you willing to take home? 3 # There are #### different combinations of 3 cats from the 50 you can choose to take home! # ``` # # # Of course `####` gets replaced with the answer, but we don't know how to do that....yet. # # ### Combinatorics 101 # # In *combinatorics*: # # - a **permutation** defined as `P(n,k)` is the number of ordered arrangements of `n` things taken `k` at a time. # - a **combination** defined as `C(n,k)` is the number of un-ordered arrangements of `n` things taken `k` at a time. # # In our cat case we're bringing 3 (`k`) home from the 50 (`n`) and their order doesn't matter, (after all we plan on loving them equally) so we want **combination** instead of **permutation**. An example of permutation would be if those same cats were in a beauty contest and the 3 (`k`) were to be placed in 1st, 2nd and 3rd. # # ### Formula for C(n,k) # # The formula for `C(n,k)` is as follows: `n! / ((n-k)! * k!) ` we will eventually write this as a user-defined Python function, but before we do, what exactly is `!` ? # # ### Factorial # # The `!` is not a Python symbol, it is a mathematical symbol. It represents **factorial** defined as `n!` as the the product of the positive integer `n` and all the positive integers less than `n`. Furthermore `0! == 1`. # # Example: `5! == 5*4*3*2*1 == 120` # # ### We are ready to write our program! # # Our cat problem needs the combination formula, the combination formula needs factorial. We now know everything we need to solve the problem. We just have to assemble it all into a working program! # # You could solve this problem by writing a user-defined Python function for factorial, then another function for combination. Instead, we'll take a hybrid approach, using the factorial function from the Python standard library and writing a user-defined combination function. # # ## Built-In Functions # # Let's start by checking out the built-in functions in Python's math library. We use the `dir()` function to list the names of the math library: # # + import math dir(math) # - # If you look through the output, you'll see a `factorial` name. Let's see if it's a function we can use: help(math.factorial) # It says it's a built-in function, and requies an integer value (which it referrs to as x, but that value is arbitrary) as an argument. Let's call the function and see if it works: math.factorial(5) #should be 120 math.factorial(0) # should be 1 # Next we need to write a user-defined function for the **combination** formula. Recall: # # `combination(n,k)` is defined as `n! / ((n-k)! * k!)` use `math.factorial()` in place of `!` in the formula. For example `(n-k)!` would be `math.factorial(n-k)` in Python. #TODO: Write code to define the combination(n,k) function here: n=0 k=0 def combination(n,k): combination = (math.factorial(n)) / ((math.factorial(n-k)) * (math.factorial(k))) return int(combination) ## Test your combination function here combination(50,3) # should be 19600 combination(4,1) # should be 4 # ## Now write the entire program # # Sample run # # ``` # How many cats are at the pet store? 50 # How many are you willing to take home? 3 # There are #### different combinations of 3 cats from the 50 you can choose to take home! # ``` # # TO-Do List: # # ``` # # TODO List for program # 1. input how many cats at pet store? save in variable n # 2. input how many you are willing to take home? sabe in variable k # 3. compute combination of n and k # 4. print results # ``` # # TODO: Write entire program n = int(input("How many cats at pet store?: ")) k = int(input("How many are you willing to take home?: ")) num_of_combinations = combination(n,k) print('There are', num_of_combinations, 'different combination of', k, 'cats from the', n, 'you can choose to take home!') # ## The Cat Beauty Contest # # We made mention of a cat beauty pagent, where order does matter, would use the **permutation** formula. Do the following: # # 1. Write a function `permutation(n,k)` in Python to implement the permutation formula # 2. Write a main program similar to the one you wrote above, but instead implements the cat beauty contest. # # # ``` # # TODO List for program # 1. print "welcome to the cat beauty contest" # 2. input how many cat contenstents? save input into variable n # 3. how many places? save input into variable k # 4. compute permutation(n,k) # 5. print number of possible ways the contest can end. # ``` # # + # TODO: Write entire program import math def permutation(n,k): permutations = math.factorial(n) / math.factorial((n-k)) return permutations permutations = int(permutation(n,k)) print("Welcome to the cat beauty contest") n = int(input("How many cat contestents? ")) k = int(input("How many places? ")) print("Number of possible ways the contest can end: ", permutations) # -
content/lessons/06/Class-Coding-Lab/CCL-Functions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="-pxRW-6f_uNq" # # Build a Pipeline # > A tutorial on using Pipelines SDK v2 to orchestrate your ML workflow as a pipeline # # # A Kubeflow pipeline is a portable and scalable definition of a machine learning # (ML) workflow. Each step in your ML workflow, such as preparing data or # training a model, is an instance of a pipeline component. This document # provides an overview of pipeline concepts and best practices, and instructions # describing how to build an ML pipeline. # # **Note:** This guide demonstrates how to build pipelines using the Pipelines SDK v2. # Currently, Kubeflow Pipelines v2 is in development. You can use this guide to start # building and running pipelines that are compatible with the Pipelines SDK v2. # # [Learn more about Pipelines SDK v2][kfpv2]. # # [kfpv2]: https://www.kubeflow.org/docs/components/pipelines/sdk-v2/v2-compatibility # # ## Before you begin # # 1. Run the following command to install the Kubeflow Pipelines SDK v1.6.2 or higher. # If you run this command in a Jupyter notebook, restart the kernel after # installing the SDK. # + id="04mM73j7nWJ-" # !pip install --upgrade kfp # + [markdown] id="8KExWR1i_7Ur" # 2. Import the `kfp` packages. # + id="TLAhMbMG_M3A" import kfp from kfp.v2 import dsl from kfp.v2.dsl import component from kfp.v2.dsl import ( Input, Output, Artifact, Dataset, ) # + [markdown] id="deBrhgzrD3Fr" # ## Understanding pipelines # # A Kubeflow pipeline is a portable and scalable definition of an ML workflow, # based on containers. A pipeline is composed of a set of input parameters and a # list of the steps in this workflow. Each step in a pipeline is an instance of a # component, which is represented as an instance of # [`ContainerOp`][container-op]. # # You can use pipelines to: # # * Orchestrate repeatable ML workflows. # * Accelerate experimentation by running a workflow with different sets of # hyperparameters. # # ### Understanding pipeline components # # A pipeline component is a containerized application that performs one step in a # pipeline's workflow. Pipeline components are defined in # [component specifications][component-spec], which define the following: # # * The component's interface, its inputs and outputs. # * The component's implementation, the container image and the command to # execute. # * The component's metadata, such as the name and description of the # component. # # You can build components by [defining a component specification for a # containerized application][component-dev], or you can [use the Kubeflow # Pipelines SDK to generate a component specification for a Python # function][python-function-component]. You can also [reuse prebuilt components # in your pipeline][prebuilt-components]. # # ### Understanding the pipeline graph # # Each step in your pipeline's workflow is an instance of a component. When # you define your pipeline, you specify the source of each step's inputs. Step # inputs can be set from the pipeline's input arguments, constants, or step # inputs can depend on the outputs of other steps in this pipeline. Kubeflow # Pipelines uses these dependencies to define your pipeline's workflow as # a graph. # # For example, consider a pipeline with the following steps: ingest data, # generate statistics, preprocess data, and train a model. The following # describes the data dependencies between each step. # # * **Ingest data**: This step loads data from an external source which is # specified using a pipeline argument, and it outputs a dataset. Since # this step does not depend on the output of any other steps, this step # can run first. # * **Generate statistics**: This step uses the ingested dataset to generate # and output a set of statistics. Since this step depends on the dataset # produced by the ingest data step, it must run after the ingest data step. # * **Preprocess data**: This step preprocesses the ingested dataset and # transforms the data into a preprocessed dataset. Since this step depends # on the dataset produced by the ingest data step, it must run after the # ingest data step. # * **Train a model**: This step trains a model using the preprocessed dataset, # the generated statistics, and pipeline parameters, such as the learning # rate. Since this step depends on the preprocessed data and the generated # statistics, it must run after both the preprocess data and generate # statistics steps are complete. # # Since the generate statistics and preprocess data steps both depend on the # ingested data, the generate statistics and preprocess data steps can run in # parallel. All other steps are executed once their data dependencies are # available. # # ## Designing your pipeline # # When designing your pipeline, think about how to split your ML workflow into # pipeline components. The process of splitting an ML workflow into pipeline # components is similar to the process of splitting a monolithic script into # testable functions. The following rules can help you define the components # that you need to build your pipeline. # # * Components should have a single responsibility. Having a single # responsibility makes it easier to test and reuse a component. For example, # if you have a component that loads data you can reuse that for similar # tasks that load data. If you have a component that loads and transforms # a dataset, the component can be less useful since you can use it only when # you need to load and transform that dataset. # # * Reuse components when possible. Kubeflow Pipelines provides [components for # common pipeline tasks and for access to cloud # services][prebuilt-components]. # # Note: Not all prebuilt components are compatible with Pipelines SDK v2. # For example, you might need to update the type hints for component inputs # and outputs. # # * Consider what you need to know to debug your pipeline and research the # lineage of the models that your pipeline produces. Kubeflow Pipelines # stores the inputs and outputs of each pipeline step. By interrogating the # artifacts produced by a pipeline run, you can better understand the # variations in model quality between runs or track down bugs in your # workflow. # # In general, you should design your components with composability in mind. # # Pipelines are composed of component instances, also called steps. Steps can # define their inputs as depending on the output of another step. The # dependencies between steps define the pipeline workflow graph. # # ### Building pipeline components # # Kubeflow pipeline components are containerized applications that perform a # step in your ML workflow. Here are the ways that you can define pipeline # components: # # * If you have a containerized application that you want to use as a # pipeline component, create a component specification to define this # container image as a pipeline component. # # This option provides the flexibility to include code written in any # language in your pipeline, so long as you can package the application # as a container image. Learn more about [building pipeline # components][component-dev]. # # * If your component code can be expressed as a Python function, [evaluate if # your component can be built as a Python function-based # component][python-function-component]. The Kubeflow Pipelines SDK makes it # easier to build lightweight Python function-based components by saving you # the effort of creating a component specification. # # Whenever possible, [reuse prebuilt components][prebuilt-components] to save # yourself the effort of building custom components. # # The example in this guide demonstrates how to build a pipeline that uses a # Python function-based component and reuses a prebuilt component. # # ### Understanding how data is passed between components # # When Kubeflow Pipelines runs a component, a container image is started in a # Kubernetes Pod and your component’s inputs are passed in as command-line # arguments. When your component has finished, the component's outputs are # returned as files. # # In your component's specification, you define the components inputs and outputs # and how the inputs and output paths are passed to your program as command-line # arguments. # # Component inputs and outputs are classified as either _parameters_ or _artifacts_, # depending on their data type. # # * Parameters typically represent settings that affect the behavior of your pipeline. # Parameters are passed into your component by value, and can be of any of # the following types: `int`, `double`, `float`, or `str`. Since parameters are # passed by value, the quantity of data passed in a parameter must be appropriate # to pass as a command-line argument. # * Artifacts represent large or complex data structures like datasets or models, and # are passed into components as a reference to a file path. # # If you have large amounts of string data to pass to your component, such as a JSON # file, annotate that input or output as a type of [`Artifact`][kfp-artifact], such # as [`Dataset`][kfp-artifact], to let Kubeflow Pipelines know to pass this to # your component as a file. # # In addition to the artifact’s data, you can also read and write the artifact's # metadata. For output artifacts, you can record metadata as key-value pairs, such # as the accuracy of a trained model. For input artifacts, you can read the # artifact's metadata &mdash; for example, you could use metadata to decide if a # model is accurate enough to deploy for predictions. # # All outputs are returned as files, using the the paths that Kubeflow Pipelines # provides. # # [kfp-artifact]: https://github.com/kubeflow/pipelines/blob/sdk/release-1.8/sdk/python/kfp/dsl/io_types.py # # Python function-based components make it easier to build pipeline components # by building the component specification for you. Python function-based # components also handle the complexity of passing inputs into your component # and passing your function’s outputs back to your pipeline. # # Learn more about how [Python function-based components handle inputs and # outputs][python-function-component-data-passing]. # # ## Getting started building a pipeline # # The following sections demonstrate how to get started building a Kubeflow # pipeline by walking through the process of converting a Python script into # a pipeline. # # ### Design your pipeline # # The following steps walk through some of the design decisions you may face # when designing a pipeline. # # 1. Evaluate the process. In the following example, a Python function downloads # a zipped tar file (`.tar.gz`) that contains several CSV files, from a # public website. The function extracts the CSV files and then merges them # into a single file. # # [container-op]: https://kubeflow-pipelines.readthedocs.io/en/latest/source/kfp.dsl.html#kfp.dsl.ContainerOp # [component-spec]: https://www.kubeflow.org/docs/components/pipelines/reference/component-spec/ # [python-function-component]: https://www.kubeflow.org/docs/components/pipelines/sdk-v2/python-function-components/ # [component-dev]: https://www.kubeflow.org/docs/components/pipelines/sdk-v2/component-development/ # [python-function-component-data-passing]: https://www.kubeflow.org/docs/components/pipelines/sdk-v2/python-function-components/#understanding-how-data-is-passed-between-components # [prebuilt-components]: https://www.kubeflow.org/docs/examples/shared-resources/ # # + id="Vn9MXolH_2BG" import glob import pandas as pd import tarfile import urllib.request def download_and_merge_csv(url: str, output_csv: str): with urllib.request.urlopen(url) as res: tarfile.open(fileobj=res, mode="r|gz").extractall('data') df = pd.concat( [pd.read_csv(csv_file, header=None) for csv_file in glob.glob('data/*.csv')]) df.to_csv(output_csv, index=False, header=False) # + [markdown] id="cWmF17kyIKGF" # 2. Run the following Python command to test the function. # + id="he6MK5x1Fwbk" download_and_merge_csv( url='https://storage.googleapis.com/ml-pipeline-playground/iris-csv-files.tar.gz', output_csv='merged_data.csv') # - # 3. Run the following to print the first few rows of the # merged CSV file. # !head merged_data.csv # + [markdown] id="yT6Di92BOrNQ" # 4. Design your pipeline. For example, consider the following pipeline designs. # # * Implement the pipeline using a single step. In this case, the pipeline # contains one component that works similarly to the example function. # This is a straightforward function, and implementing a single-step # pipeline is a reasonable approach in this case. # # The down side of this approach is that the zipped tar file would not be # an artifact of your pipeline runs. Not having this artifact available # could make it harder to debug this component in production. # # * Implement this as a two-step pipeline. The first step downloads a file # from a website. The second step extracts the CSV files from a zipped # tar file and merges them into a single file. # # This approach has a few benefits: # # * You can reuse the [Web Download component][web-download-component] # to implement the first step. # * Each step has a single responsibility, which makes the components # easier to reuse. # * The zipped tar file is an artifact of the first pipeline step. # This means that you can examine this artifact when debugging # pipelines that use this component. # # This example implements a two-step pipeline. # # ### Build your pipeline components # # # Build your pipeline components. This example modifies the initial script to # extract the contents of a zipped tar file, merge the CSV files that were # contained in the zipped tar file, and return the merged CSV file. # # This example builds a Python function-based component. You can also package # your component's code as a Docker container image and define the component # using a ComponentSpec. # # In this case, the following modifications were required to the original # function. # # * The file download logic was removed. The path to the zipped tar file # is passed to this function as the `tar_data` argument. # * The import statements were moved inside of the function. Python # function-based components require standalone Python functions. This # means that any required import statements must be defined within the # function, and any helper functions must be defined within the function. # * The function's arguments are annotated as an [`kfp.dsl.Input`][input] # or [`kfp.dsl.Output`][output] artifact. These annotations let Kubeflow # Pipelines know to provide the path to the zipped tar file and to # create a path where your function stores the merged CSV file. # * The function is decorated with the [`kfp.dsl.component`][dsl-component] # annotation. You can also use this annotation to define the following: # # * The container image that your function runs in. # * Any PyPI packages that this function depends on, that are not already # installed on the container image. # * The location to save the component specification to. You can use the # component specification to share this component with your colleagues. # # This annotation converts your function into a factory function that # creates pipeline steps. These pipeline steps execute the function you # defined as a part of a pipeline's workflow. # # Learn more about [building Python function-based components][python-function-components]. # # The following example shows the updated `merge_csv` function. # # [web-download-component]: https://github.com/kubeflow/pipelines/blob/sdk/release-1.8/components/web/Download/component.yaml # [python-function-components]: https://www.kubeflow.org/docs/components/pipelines/sdk-v2/python-function-components/ # [input]: https://github.com/kubeflow/pipelines/blob/sdk/release-1.8/sdk/python/kfp/dsl/io_types.py # [output]: https://github.com/kubeflow/pipelines/blob/sdk/release-1.8/sdk/python/kfp/dsl/io_types.py # [dsl-component]: https://github.com/kubeflow/pipelines/blob/sdk/release-1.8/sdk/python/kfp/dsl/_component.py # + id="NB3eNHmNCN2C" @component( packages_to_install=['pandas==1.1.4'], output_component_file='component.yaml' ) def merge_csv(tar_data: Input[Artifact], output_csv: Output[Dataset]): import glob import pandas as pd import tarfile tarfile.open(name=tar_data.path, mode="r|gz").extractall('data') df = pd.concat( [pd.read_csv(csv_file, header=None) for csv_file in glob.glob('data/*.csv')]) df.to_csv(output_csv.path, index=False, header=False) # + [markdown] id="j9Axem9HPHP2" # ### Build your pipeline # # 1. Use [`kfp.components.load_component_from_url`][load_component_from_url] # to load the component specification YAML for any components that you are # reusing in this pipeline. # # [load_component_from_url]: https://kubeflow-pipelines.readthedocs.io/en/latest/source/kfp.components.html?highlight=load_component_from_url#kfp.components.load_component_from_url # + id="QDzFCaGQa_oR" web_downloader_op = kfp.components.load_component_from_url( 'https://raw.githubusercontent.com/kubeflow/pipelines/master/components/contrib/web/Download/component-sdk-v2.yaml') # + [markdown] id="p4bIwiHhbACy" # 2. Define your pipeline as a Python function. # # Your pipeline function's arguments define your pipeline's parameters. Use # pipeline parameters to experiment with different hyperparameters, such as # the learning rate used to train a model, or pass run-level inputs, such as # the path to an input file, into a pipeline run. The data type must be # specified for all pipeline parameters. # # Use the factory functions created by # the `kfp.dsl.component` annotation and the # `kfp.components.load_component_from_url` function to create your pipeline's tasks. # The inputs to the component factory functions can be pipeline parameters, # the outputs of other tasks, or a constant value. In this case, the # `web_downloader_task` task uses the `url` pipeline parameter, and the # `merge_csv_task` uses the `data` output of the `web_downloader_task`. # # The `kfp.dsl.pipeline` annotation lets you specify the following: # # * `name`: The pipeline's name. # * `description`: (Optional.) A description of the pipeline's workflow. # * `pipeline_root`: The default path where your pipeline's artifacts are # stored. This must be a path that your pipeline can read and write to, # such as a Persistent Volume Claim or a cloud service such as Google # Cloud Storage. # + id="WsyKJeBOTlkz" # Define a pipeline and create a task from a component: @dsl.pipeline( name='my-pipeline', # You can optionally specify your own pipeline_root # pipeline_root='gs://my-pipeline-root/example-pipeline', ) def my_pipeline(url: str): web_downloader_task = web_downloader_op(url=url) merge_csv_task = merge_csv(tar_data=web_downloader_task.outputs['data']) # The outputs of the merge_csv_task can be referenced using the # merge_csv_task.outputs dictionary: merge_csv_task.outputs['output_csv'] # + [markdown] id="OT3O_2GgVKoT" # ### Compile and run your pipeline # # After defining the pipeline in Python as described in the preceding section, use one of the following options to compile the pipeline and submit it to the Kubeflow Pipelines service. # # #### Option 1: Compile and then upload in UI # # 1. Run the following to compile your pipeline and save it as `pipeline.yaml`. # # + id="U0Ll8ve2WNUo" kfp.compiler.Compiler(mode=kfp.dsl.PipelineExecutionMode.V2_COMPATIBLE).compile( pipeline_func=my_pipeline, package_path='pipeline.yaml') # - # 2. Upload and run your `pipeline.yaml` using the Kubeflow Pipelines user interface. # See the guide to [getting started with the UI][quickstart]. # # [quickstart]: https://www.kubeflow.org/docs/components/pipelines/overview/quickstart # + [markdown] id="jNLI1-_bfEky" # #### Option 2: run the pipeline using Kubeflow Pipelines SDK client # # 1. Create an instance of the [`kfp.Client` class][kfp-client] following steps in [connecting to Kubeflow Pipelines using the SDK client][connect-api]. # # [kfp-client]: https://kubeflow-pipelines.readthedocs.io/en/latest/source/kfp.client.html#kfp.Client # [connect-api]: https://www.kubeflow.org/docs/components/pipelines/sdk/connect-api # - client = kfp.Client() # change arguments accordingly # 2. Run the pipeline using the `kfp.Client` instance: # + id="jRNHZpfnVJ0h" client.create_run_from_pipeline_func( my_pipeline, mode=kfp.dsl.PipelineExecutionMode.V2_COMPATIBLE, # You can optionally override your pipeline_root when submitting the run too: # pipeline_root='gs://my-pipeline-root/example-pipeline', arguments={ 'url': 'https://storage.googleapis.com/ml-pipeline-playground/iris-csv-files.tar.gz' }) # + [markdown] id="pnhZm12y_wvc" # # ## Next steps # # * Learn about advanced pipeline features, such as [using conditional execution in a # pipeline][conditional]. # # # [conditional]: https://github.com/kubeflow/pipelines/blob/sdk/release-1.8/samples/tutorials/DSL%20-%20Control%20structures/DSL%20-%20Control%20structures.py # [k8s-resources]: https://www.kubeflow.org/docs/components/pipelines/sdk/manipulate-resources/
content/en/docs/components/pipelines/sdk-v2/build-pipeline.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # K Index Calculation # + tags=["hide-cell"] import datetime as dt import numpy as np import pandas as pd import matplotlib.pyplot as plt import pooch from viresclient import SwarmRequest import ipywidgets as widgets from warnings import filterwarnings filterwarnings(action="ignore") # Data dependencies (pooch caches this in ~/.cache/pooch/) esk_k_ind_file = pooch.retrieve( "https://raw.githubusercontent.com/MagneticEarth/IAGA_SummerSchool2019/master/data/external/k_inds/esk/2003.esk", known_hash="233246e167a212cd1afa33ff2fe130fbc308cd2ae7971c6c2afcd363c9775c18" ) # - # ## Calculating K-indices for a single observatory # The K-index is a local geomagnetic activity index devised by Julius Bartels in 1938 to give a simple measure of the degree of geomagnetic disturbance during each 3-hour (UT) interval seen at a single magnetic observatory. Data from the observatory magnetometers are used to assign a number in the range 0-9 to each 3-hour interval, with K=0 indicating very little geomagnetic activity and K=9 representing an extreme geomagnetic storm. The K-index was introduced at the time of photographic recording, when magnetograms recorded variations in the horizontal geomagnetic field elements declination (D) and horizontal intensity (H), and in the vertical intensity (Z). # # To derive a K-index an observer would fit, __by eye__, a 'solar regular variation' ($S_R$) curve to the records of D and H and measure the range (maximum-minimum) of the deviation of the recording from the curve. The K-index was then assigned according to a conversion table, with the greatest range in D and H 'winning'. The north component (X) may be used instead of H, and the east component (Y) instead of D (X and Y will be used in the examples below and see http://isgi.unistra.fr/what_are_kindices.php for more details on the K-index). The vertical component Z is not used because it is liable to contamination by local induced currents. # # The conversion from range in nanoteslas to index is quasi-logarithmic. The conversion table varies with latitude in an attempt to normalise the K-index distribution for observatories at different latitudes. The table for Eskdalemuir is shown below. # # | K | 0 | 1 |2|3|4|5|6|7|8|9| # | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | # | Lower bound (nT) | 0 | 8 |15|30|60|105|180|300|500|750| # # This means that, for instance, K=2 if the measured range is in the interval \[15, 30\) nT. # # There was a long debate in IAGA Division V about algorithms that could adequately reproduce the K-indices that an experienced observer would assign. The algorithms and code approved by IAGA are available at the International Service for Geomagnetic Indices: http://isgi.unistra.fr/softwares.php. # ### Example # In the following cells, we **illustrate** a possible approach. We assume the so-called regular daily variation $S_R$ is made up of 24h, 12h and 8h signals (and, possibly, higher harmonics). A Fourier analysis can be used to investigate this. The functions in the cell below calculate Fourier coefficients from a data sample of one-minute data values over a 24-hour UT interval, and then synthesise a smooth fit using the Fourier coefficients. # # For some days this simple approach to estimating $S_R$ seems to work well, on others it's obviously wrong. Think about another approach you might take. # # We then attempt to calculate K-indices for the day chosen by computing the Fourier series up the the number of harmonics selected by subtracting the synthetic harmonic signal from the data, then calculating 3-hr ranges and converting these into the corresponding K-index. The functions to do are also included in the following cell. # + tags=["hide-input"] def fourier(v, nhar): npts = len(v) f = 2.0/npts t = np.linspace(0, npts, npts, endpoint=False)*2*np.pi/npts vmn = np.mean(v) v = v - vmn cofs = [0]*(nhar+1) cofs[0] = (vmn,0) for i in range(1,nhar+1): c, s = np.cos(i*t), np.sin(i*t) cofs[i] = (np.dot(v,c)*f, np.dot(v,s)*f) return (cofs) def fourier_synth(cofs, npts): nt = len(cofs) syn = np.zeros(npts) t = np.linspace(0, npts, npts, endpoint=False)*2*np.pi/npts for n in range(1, nt): for j in range(npts): syn[j] += cofs[n][0]*np.cos(n*t[j]) + cofs[n][1]*np.sin(n*t[j]) return (syn) # Define K-index conversion table for ESK K_conversions = { f"K{level}": level_bin for level, level_bin in enumerate( (0, 8, 15, 30, 60, 105, 180, 300, 500, 750) ) } # Define reverse mapping nT_to_K = {v: k for k, v in K_conversions.items()} def K_calc(d, synd, Kb=K_conversions): tmp = np.ptp((d-synd).reshape(8,180), axis=1) return(list(np.digitize(tmp, bins=list(Kb.values()), right=False)-1)) def load_official_K(filepath=esk_k_ind_file): df = pd.read_csv(filepath, skiprows=0, header=None, delim_whitespace=True, parse_dates=[[2,1,0]], index_col=0) df = df.drop(3, axis=1) df.index.name='Date' df.columns = ['00','03','06','09','12','15','18','21'] return(df) def load_ESK_2003(): request = SwarmRequest() request.set_collection(f"SW_OPER_AUX_OBSM2_:ESK", verbose=False) request.set_products(measurements=["B_NEC", "IAGA_code"]) data = request.get_between( dt.datetime(2003, 1, 1), dt.datetime(2004, 1, 1), ) df = data.as_dataframe(expand=True).drop( columns=["Spacecraft"] ) df = df.rename(columns={f"B_NEC_{i}": j for i, j in zip("NEC", "XYZ")}) return df # - # First, load in (X, Y, Z) one-minute data from Eskdalemuir for 2003 into a pandas dataframe. df_obs = load_ESK_2003() df_obs.head() # Load the official K index data (available from <http://www.geomag.bgs.ac.uk/data_service/data/magnetic_indices/k_indices>) to compare with later. df_K_official = load_official_K() df_K_official.head() # Evaluate K indices for a given day: # - For each of $X$ and $Y$: # - Perform a Fourier analysis on the data to find the regular daily variation, $S_R$ # - Over each 3-hour interval, find the maximum differences from $S_R$ # - Convert from nT to $K$ using the conversion table for ESK # - Pick the greater of $K(X)$ and $K(Y)$ and compare with the official K index # + tags=["hide-input"] def analyse_day(day=dt.date(2003, 1, 1), n_harmonics=3, df=df_obs, df_K_official=df_K_official): """Generate figure illustrating the K index calculation for a given day""" # Select given day _df = df.loc[day.isoformat()] _df_K = df_K_official.loc[day.isoformat()] # Select X & Y data and remove daily mean x = (_df["X"] - _df["X"].mean()).values y = (_df["Y"] - _df["Y"].mean()).values # Perform Fourier analysis of X & Y separately xcofs = fourier(x, n_harmonics) synx = fourier_synth(xcofs, len(x)) ycofs = fourier(y, n_harmonics) syny = fourier_synth(ycofs, len(y)) # Build plot t = np.linspace(0, 1440, 1440, endpoint=False)/60 fig, axes = plt.subplots(2, 1, figsize=(15, 10), sharex=True) # Plot X & Y data with approximated variation axes[0].plot(t, x, color="tab:blue", alpha=0.5) axes[0].plot(t, synx, color="tab:blue", label="X") axes[0].plot(t, y, color="tab:red", alpha=0.5) axes[0].plot(t, syny, color="tab:red", label="Y") # Plot the differences axes[1].plot(t, (x-synx), color="tab:blue") axes[1].plot(t, (y-syny), color="tab:red") # Find and plot min/max bounds over 3-hourly intervals minX = np.min((x-synx).reshape(8, 180), axis=1) maxX = np.max((x-synx).reshape(8, 180), axis=1) minY = np.min((y-syny).reshape(8, 180), axis=1) maxY = np.max((y-syny).reshape(8, 180), axis=1) t_3hours = np.linspace(0, 1440, 9, endpoint=True)/60 axes[1].fill_between(t_3hours, list(minX)+[0], list(maxX)+[0], step="post", color="tab:blue", alpha=0.5) axes[1].fill_between(t_3hours, list(minY)+[0], list(maxY)+[0], step="post", color="tab:red", alpha=0.5) # Determine K index from each of X & Y K_X = np.digitize((maxX-minX), bins=list(K_conversions.values()), right=False) - 1 K_Y = np.digitize((maxY-minY), bins=list(K_conversions.values()), right=False) - 1 for i, (K_X_i, K_Y_i) in enumerate(zip(K_X, K_Y)): # Display determined K from X & Y px = i*3 py = axes[1].get_ylim()[1] axes[1].annotate( f"K(X): {K_X_i}", (px, py), xytext=(30, 18), textcoords="offset pixels", color="tab:blue", size=12, ) axes[1].annotate( f"K(Y): {K_Y_i}", (px, py), xytext=(30, 3), textcoords="offset pixels", color="tab:red", size=12, ) # Display comparison with the official K index K_ours = max(K_X_i, K_Y_i) K_official = _df_K[i] axes[1].annotate( f"{K_ours}\n{K_official}", (i*3, axes[1].get_ylim()[0]), xytext=(40, -70), textcoords="offset pixels" ) axes[1].annotate( f"Determined K:\nOfficial K:", (0, axes[1].get_ylim()[0]), xytext=(-80, -70), textcoords="offset pixels" ) # Finalise figure for ax in axes: ax.grid() ax.xaxis.set_ticks(np.arange(0, 27, 3)) axes[1].set_ylabel("Residuals [nT]") axes[1].set_xlabel("UT [hour]") axes[0].set_ylabel("[nT]") axes[0].legend(loc="upper right") fig.suptitle(f"ESK: {day.isoformat()}", y=0.9) return fig, axes def make_widgets_K_index_calc(): day = widgets.SelectionSlider( options=[t.date() for t in pd.date_range(dt.date(2003, 1, 1), dt.date(2003, 12, 31))], description="Select day:", layout=widgets.Layout(width='700px') ) # day = widgets.DatePicker(value=dt.date(2003, 1, 1), description="Select day:") n_harmonics = widgets.SelectionSlider(options=range(1, 11), value=3, description="# harmonics:") return widgets.VBox( [day, n_harmonics, widgets.interactive_output( analyse_day, {"day": day, "n_harmonics": n_harmonics} )], ) make_widgets_K_index_calc() # - # ## Statistics of the K index # We will use the official K index from ESK to probe some statistics through the year 2003. # # Histograms of the K indices for each 3-hour period: axes = df_K_official.hist( figsize=(12, 12), bins=range(11), sharey=True, align="left", rwidth=0.8, ) plt.suptitle('ESK 2003: Distribution of K-indices for each 3-hour interval') axes[-1, 0].set_ylabel("Frequency") axes[-1, 0].set_xlabel("K"); # ... plotted side by side: plt.figure(figsize=(7,7)) plt.hist(df_K_official.values, bins=range(11), align='left') plt.legend(df_K_official.columns) plt.ylabel('Number of 3-hour intervals') plt.xlabel('K'); # ... and stacked together: plt.figure(figsize=(7,7)) plt.hist(df_K_official.values, bins=range(11), stacked=True, align='left', rwidth=0.8) plt.legend(df_K_official.columns) plt.ylabel('Number of 3-hour intervals') plt.xlabel('K'); # We also compute a daily sum of the K-indices for the 2003 file, and list days with high and low summed values. Note that this summation is not really appropriate because the K-index is quasi-logarithmic, however, this is a common simple measure of quiet and disturbed days. (These might be interesting days for you to look at.) df_K_official['Ksum'] = df_K_official.sum(axis=1) Ksort = df_K_official.sort_values('Ksum') print('Quiet days: \n\n', Ksort.head(10), '\n\n') print('Disturbed days: \n\n', Ksort.tail(10)) # ## Note on the Fast Fourier Transform # In the examples above we computed Fourier coefficients in the 'traditional' way, so that if $F(t)$ is a Fourier series representation of $f(t)$, then, # # $$ # \begin{align} # F(t) &= A_o+\sum_{n=1}^N A_n \cos\left(\frac{2\pi nt}{T}\right)+B_n \sin\left(\frac{2\pi nt}{T}\right) # \end{align} # $$ # # where $T$ is the fundamental period of $F(t)$. The $A_n$ and $B_n$ are estimated by # # $$ # \begin{align} # A_o&=\frac{1}{T}\int_0^T f(t) dt\\ # A_n&=\frac{2}{T}\int_0^T f(t)\cos\left(\frac{2\pi nt}{T}\right) dt\\ # B_n&=\frac{2}{T}\int_0^T f(t)\sin\left(\frac{2\pi nt}{T}\right) dt # \end{align} # $$ # # With $N$ samples of digital data, the integral for $A_n$ may be replaced by the summation # # $$ # \begin{align} # A_n&=\frac{2}{T}\sum_{j=0}^{N-1} f_j\cos\left(\frac{2\pi nj\Delta t}{T}\right) \Delta t\\ # &=\frac{2}{N}\sum_{j=0}^{N-1} f_j\cos\left(\frac{2\pi nj}{N}\right) # \end{align} # $$ # # where the sampling interval $\Delta t$ is given by $T = N \Delta t$ and $f_j = f(j \Delta t)$. A similar expression applies for the $B_n$, and these are the coefficients returned by the function _fourier_ above. # # The fast Fourier transform (FFT) offers a computationally efficient means of finding the Fourier coefficients. The conventions for the FFT and its inverse (IFFT) vary from package to package. In the _scipy.fftpack_ package, the FFT of a sequence $x_n$ of length $N$ is defined as # # $$ # \begin{align} # y_k&=\sum_{n=0}^{N-1} x_n\exp\left(-\frac{2\pi i\thinspace kn}{N}\right)\\ # &=\sum_{n=0}^{N-1} x_n\left(\cos\left(\frac{2\pi \thinspace kn}{N}\right)-i\sin\left(\frac{2\pi \thinspace kn}{N}\right)\right) # \end{align} # $$ # # with the inverse defined as, # # $$ # \begin{align} # x_n&=\frac{1}{N}\sum_{k=0}^{N-1} y_k\exp\left(\frac{2\pi i\thinspace kn}{N}\right)\\ # \end{align} # $$ # # (The _scipy_ documentation is perhaps a little confusing here because it explains the order of the $y_n$ as being $y_1,y_2, \dots y_{N/2-1}$ as corresponding to increasing positive frequency and $y_{N/2}, y_{N/2+1}, \dots y_{N-1}$ as ordered by decreasing negative frequency, for $N$ even. See: https://docs.scipy.org/doc/scipy/reference/tutorial/fft.html.) # # The interpretation is that if $y_k=a_k+ib_k$ then will have (for $N$ even), $y_{N-k} = a_k-ib_k$ and so # # $$ # \begin{align} # a_k&=\frac{1}{2}\text{Re}\left(y_k+y_{N-k}\right)\\ # b_k&=\frac{1}{2}\text{Im}\left(y_k-y_{N-k}\right) # \end{align} # $$ # # # and so we expect the relationship to the digitised Fourier series coefficients returned by the function _fourier_ defined above to be, # # $$ # \begin{align} # A_k&=\phantom{-}\frac{1}{N}\text{Re}\left(a_k+a_{N-k}\right)\\ # B_k&=-\frac{1}{N}\text{Im}\left(b_k-b_{N-k}\right) # \end{align} # $$ # # The following shows the equivalence between the conventional Fourier series approach and the FFT. # + from scipy.fftpack import fft # Compute the fourier series as before _df = df_obs.loc["2003-01-01"] x = (_df["X"] - _df["X"].mean()).values xcofs = fourier(x, 3) # Compute using scipy FFT npts = len(x) xfft = fft(x) # Compare results for the 24-hour component k = 1 print('Fourier coefficients: \n', f'A1 = {xcofs[1][0]} \n', f'B1 = {xcofs[1][1]} \n') print('scipy FFT outputs: \n', f'a1 = {np.real(xfft[k]+xfft[npts-k])/npts} \n', \ f'b1 = {-np.imag(xfft[k]-xfft[npts-k])/npts} \n') # - # ## References # <NAME> al. (1995) ‘Computer production of K indices: review and comparison of methods’, Geophysical Journal International. Oxford University Press, 123(3), pp. 866–886. doi: [10.1111/j.1365-246X.1995.tb06895.x](https://doi.org/10.1111/j.1365-246X.1995.tb06895.x).
geomag-obs-models/02a_K-Index-Calculation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # # Westeros Tutorial Part 1 - Welcome to the MESSAGEix framework & Creating a baseline scenario # # ### *Integrated Assessment Modeling for the 21st Century* # # For information on how to install *MESSAGEix*, please refer to [Installation page](https://docs.messageix.org/en/stable/#getting-started) and for getting *MESSAGEix* tutorials, please follow the steps mentioned in [Tutorials](https://docs.messageix.org/en/stable/tutorials.html). # # Please refer to the [user guidelines](https://github.com/iiasa/message_ix/blob/master/NOTICE.rst) # for additional information on using *MESSAGEix*, including the recommended citation and how to name new models. # # **Structure of these tutorials.** After having run this baseline tutorial, you are able to start with any of the other tutorials, but we recommend to follow the order below for going through the information step-wise: # # 1. Baseline tutorial (``westeros_baseline.ipynb``) # 2. Add extra detail and constraints to the model # 1. Emissions # 1. Introducing emissions (`westeros_emissions_bounds.ipynb`) # 2. Introducing taxes on emissions (`westeros_emissions_taxes.ipynb`) # 2. Add firm capacity (``westeros_firm_capacity.ipynb``) # 3. Add flexible energy generation (``westeros_flexible_generation.ipynb``) # 4. Add seasonality as an example of temporal variability (``westeros_seasonality.ipynb``) # 3. Post-processing: learn how to report calculations _after_ the MESSAGE model has run (``westeros_report.ipynb``) # # **Pre-requisites** # - Have succesfully installed *MESSAGEix*. # # _This tutorial is based on a presentation by <NAME> ([@gidden](https://github.com/gidden)) # for a summer school at the the **Centre National de la Recherche Scientifique (CNRS)** # on *Integrated Assessment Modeling* in June 2018._ # + [markdown] slideshow={"slide_type": "slide"} # ## Scope of this tutorial: Building a Simple Energy Model # # The goal of this tutorial is to build a simple energy model using *MESSAGEix* with minimal features that can be expanded in future tutorials. # # We will build the model component by component, focusing on both the **how** (code implementation) and **why** (mathematical formulation). # # *oh yeah let's fly away* # + [markdown] slideshow={"slide_type": "slide"} # ## Online documentation # # The full framework documentation is available at [https://docs.messageix.org](https://docs.messageix.org) # # <img src='_static/doc_page.png'> # + [markdown] slideshow={"slide_type": "slide"} # ## A stylized reference energy system model for Westeros # # This tutorial is based on the country of Westeros from the TV show "Game of Thrones". # # <table align='center'><tr><td><img src='_static/westeros.jpg' width='150'></td><td><img src='_static/base_res.png'></td></tr></table> # + [markdown] slideshow={"slide_type": "slide"} # ## MESSAGEix: the mathematical paradigm # # At its core, *MESSAGEix* is an optimization problem: # # > $\min \quad ~c^T \cdot x$ # > $~s.t. \quad A \cdot x \leq b$ # # More explicitly, the model... # - optimizes an **objective function**, nominally minimizing total **system costs** # - under a system of **constraints** (inequalities or equality conditions) # # The mathematical implementation includes a number of features that make it particularly geared towards the modelling of *energy-water-land systems* in the context of *climate change mitigation and sustainable development*. # # Throughout this document, the mathematical formulation follows the convention that # - decision **VARIABLES** ($x$) are capitalized # - input **parameters** ($A$, $b$) are lower case # + [markdown] slideshow={"slide_type": "slide"} # ## MESSAGEix: connected to the *ix modeling platform (ixmp)* # # The *modeling platform for integrated and cross-cutting analysis* (ixmp) provides a powerful framework for working with scenarios, including a database infrastucture for data version control and interfaces to scientific programming languages. # # <img src='_static/message_ixmp.png' width='700'> # + [markdown] slideshow={"slide_type": "slide"} # ## Ready, steady, go! # # First, we import all the packages we need. We import a utility function called *make_df*, which can be used to wrap the input data into dataframes that can be saved in model parameters. # + slideshow={"slide_type": "fragment"} import pandas as pd import ixmp import message_ix from message_ix.utils import make_df # %matplotlib inline # + [markdown] slideshow={"slide_type": "slide"} # The *MESSAGEix* model is built using the *ixmp* `Platform`. The `Platform` is your connection to a database for storing model input data and scenario results. # + slideshow={"slide_type": "fragment"} mp = ixmp.Platform() # + [markdown] slideshow={"slide_type": "slide"} # Once connected, we create a new `Scenario` to build our model. A `Scenario` instance will contain all the model input data and results. # + slideshow={"slide_type": "fragment"} scenario = message_ix.Scenario(mp, model='Westeros Electrified', scenario='baseline', version='new') # + [markdown] slideshow={"slide_type": "slide"} # ## Model Structure # # We start by defining basic characteristics of the model, including time, space, and the energy system structure. # + [markdown] slideshow={"slide_type": "fragment"} # The model horizon will span 3 decades (690-720). Let's assume that we're far in the future after the events of A Song of Ice and Fire (which occur ~300 years after Aegon the conqueror). # # | Math Notation | Model Meaning | # |---------------|------------------------------| # | $y \in Y^H$ | time periods in history | # | $y \in Y^M$ | time periods in model horizon| # + slideshow={"slide_type": "fragment"} history = [690] model_horizon = [700, 710, 720] scenario.add_horizon( year=history + model_horizon, firstmodelyear=model_horizon[0] ) # + [markdown] slideshow={"slide_type": "slide"} # Our model will have a single `node`, i.e., its spatial dimension. # # # | Math Notation | Model Meaning| # |---------------|--------------| # | $n \in N$ | node | # + slideshow={"slide_type": "fragment"} country = 'Westeros' scenario.add_spatial_sets({'country': country}) # + [markdown] slideshow={"slide_type": "slide"} # And we fill in the energy system's `commodities`, `levels`, `technologies`, and `modes` (i.e., modes of operation of technologies). This information defines how certain technologies operate. # # # | Math Notation | Model Meaning| # |---------------|--------------| # | $c \in C$ | commodity | # | $l \in L$ | level | # | $t \in T$ | technology | # | $m \in M$ | mode | # + slideshow={"slide_type": "fragment"} scenario.add_set("commodity", ["electricity", "light"]) scenario.add_set("level", ["secondary", "final", "useful"]) scenario.add_set("technology", ['coal_ppl', 'wind_ppl', 'grid', 'bulb']) scenario.add_set("mode", "standard") # + [markdown] slideshow={"slide_type": "slide"} # ## Supply and Demand (or Balancing Commodities) # + [markdown] slideshow={"slide_type": "fragment"} # The fundamental premise of the model is to satisfy demand for energy (services). # To first order, demand for services like electricity track with economic productivity (GDP). # We define a GDP profile similar to first-world GDP growth from [1900-1930](https://en.wikipedia.org/wiki/List_of_regions_by_past_GDP): # + slideshow={"slide_type": "fragment"} gdp_profile = pd.Series([1., 1.5, 1.9], index=pd.Index(model_horizon, name='Time')) gdp_profile.plot(title='Demand') # + [markdown] slideshow={"slide_type": "slide"} # The `COMMODITY_BALANCE_GT` and `COMMODITY_BALANCE_LT` equations ensure that `demand` for each `commodity` is met at each `level` in the energy system. # The equation is copied below in this tutorial notebook, but every model equation is available for reference in # the [Mathematical formulation](https://docs.messageix.org/en/stable/model/MESSAGE/model_core.html) section of the *MESSAGEix* documentation. # # $\sum_{\substack{n^L,t,m \\ y^V \leq y}} \text{output}_{n^L,t,y^V,y,m,n,c,l} \cdot \text{ACT}_{n^L,t,y^V,y,m}$ # $- \sum_{\substack{n^L,t,m, \\ y^V \leq y}} \text{input}_{n^L,t,y^V,y,m,n,c,l} \cdot \text{ACT}_{n^L,t,m,y}$ # $\geq \text{demand}_{n,c,l,y} \quad \forall \ l \in L$ # # While `demand` must be met, supply can *exceed* demand allowing the model to plan for meeting demand in future periods by storing storable commodities. # # + [markdown] slideshow={"slide_type": "slide"} # First we establish demand. Let's assume # # - 40 million people in [300 AC](https://atlasoficeandfireblog.wordpress.com/2016/03/06/the-population-of-the-seven-kingdoms/) # - similar population growth to Earth in the same time frame [(~factor of 12)](https://en.wikipedia.org/wiki/World_population_estimates) # - a per capita demand for electricity of 1000 kWh # - and 8760 hours in a year (of course!) # # Then we can add the demand parameter # + [markdown] slideshow={"slide_type": "skip"} # Note present day: [~72000 GWh in Austria](https://www.iea.org/statistics/?country=AUSTRIA&year=2016&category=Energy%20consumption&indicator=undefined&mode=chart&dataTable=INDICATORS) with population [~8.7M](http://www.austria.org/population/) which is ~8300 kWh per capita # + slideshow={"slide_type": "fragment"} demand_per_year = 40 * 12 * 1000 / 8760 light_demand = pd.DataFrame({ 'node': country, 'commodity': 'light', 'level': 'useful', 'year': model_horizon, 'time': 'year', 'value': (100 * gdp_profile).round(), 'unit': 'GWa', }) # + [markdown] slideshow={"slide_type": "fragment"} # `light_demand` illustrates the data format for *MESSAGEix* parameters. It is a `pandas.DataFrame` containing three types of information in a specific format: # # - A "value" column containing the numerical values for this parameter. # - A "unit" column. # - Other columns ("node", "commodity", "level", "time") that indicate the key to which each value applies. # + slideshow={"slide_type": "fragment"} light_demand # + slideshow={"slide_type": "fragment"} # We use add_par for adding data to a MESSAGEix parameter scenario.add_par("demand", light_demand) # + [markdown] slideshow={"slide_type": "fragment"} # In order to define the input and output commodites of each technology, we define some common keys. # # - **Input** quantities require `_origin` keys that specify where the inputs are *received from*. # - **Output** quantities require `_dest` keys that specify where the outputs are *transferred to*. # + slideshow={"slide_type": "skip"} year_df = scenario.vintage_and_active_years() vintage_years, act_years = year_df['year_vtg'], year_df['year_act'] base = { 'node_loc': country, 'year_vtg': vintage_years, 'year_act': act_years, 'mode': 'standard', 'time': 'year', 'unit': '-', } base_input = make_df(base, node_origin=country, time_origin='year') base_output = make_df(base, node_dest=country, time_dest='year') # + [markdown] slideshow={"slide_type": "slide"} # Working backwards along the Reference Energy System, we can add connections for the `bulb`. A light bulb… # # - receives *input* in the form of the "electricity" *commodity* at the "final [energy]" *level*, and # - *outputs* the commodity "light" at the "useful [energy]" level. # # The `value` in the input and output parameter is used to represent the effiecieny of a technology (efficiency = output/input). # For example, input of 1.0 and output of 1.0 for a technology shows that the efficiency of that technology is 100% in converting # the input commodity to the output commodity. # + slideshow={"slide_type": "fragment"} bulb_out = make_df(base_output, technology='bulb', commodity='light', level='useful', value=1.0) scenario.add_par('output', bulb_out) bulb_in = make_df(base_input, technology='bulb', commodity='electricity', level='final', value=1.0) scenario.add_par('input', bulb_in) # + [markdown] slideshow={"slide_type": "slide"} # Next, we parameterize the electrical `grid`, which… # # - receives electricity at the "secondary" energy level. # - also outputs electricity, but at the "final" energy level (to be used by the light bulb). # # Because the grid has transmission losses, only 90% of the input electricity is available as output. # + slideshow={"slide_type": "fragment"} grid_efficiency = 0.9 grid_out = make_df(base_output, technology='grid', commodity='electricity', level='final', value=grid_efficiency) scenario.add_par('output', grid_out) grid_in = make_df(base_input, technology='grid', commodity='electricity', level='secondary', value=1.0) scenario.add_par('input', grid_in) # + [markdown] slideshow={"slide_type": "slide"} # And finally, our power plants. The model does not include the fossil resources used as `input` for coal plants; however, costs of coal extraction are included in the parameter $variable\_cost$. # + slideshow={"slide_type": "fragment"} coal_out = make_df(base_output, technology='coal_ppl', commodity='electricity', level='secondary', value=1., unit="GWa") scenario.add_par('output', coal_out) wind_out = make_df(base_output, technology='wind_ppl', commodity='electricity', level='secondary', value=1., unit="GWa") scenario.add_par('output', wind_out) # + [markdown] slideshow={"slide_type": "slide"} # ## Operational Constraints and Parameters # + [markdown] slideshow={"slide_type": "fragment"} # The model has a number of "reality" constraints, which relate built *capacity* (`CAP`) to available power, or the *activity* (`ACT`) of that technology. # # The **capacity constraint** limits the activity of a technology to the installed capacity multiplied by a capacity factor. Capacity factor or is the fraction of installed capacity that can be active in a certain period (here the sub-annual time step *h*). # # $$\sum_{m} \text{ACT}_{n,t,y^V,y,m,h} # \leq \text{duration_time}_{h} \cdot \text{capacity_factor}_{n,t,y^V,y,h} \cdot \text{CAP}_{n,t,y^V,y} # \quad t \ \in \ T^{INV}$$ # # + [markdown] slideshow={"slide_type": "slide"} # This requires us to provide the `capacity_factor` for each technology. Here, we call `make_df()` and `add_par()` in a loop to execute similar code for three technologies: # + slideshow={"slide_type": "skip"} base_capacity_factor = { 'node_loc': country, 'year_vtg': vintage_years, 'year_act': act_years, 'time': 'year', 'unit': '-', } # + slideshow={"slide_type": "fragment"} capacity_factor = { 'coal_ppl': 1, 'wind_ppl': 0.36, 'bulb': 1, } for tec, val in capacity_factor.items(): df = make_df(base_capacity_factor, technology=tec, value=val) scenario.add_par('capacity_factor', df) # + [markdown] slideshow={"slide_type": "slide"} # The model can further be provided `technical_lifetime`s in order to properly manage deployed capacity and related costs via the **capacity maintenance** constraint: # # $\text{CAP}_{n,t,y^V,y} \leq \text{remaining_capacity}_{n,t,y^V,y} \cdot \text{value} \quad \forall \quad t \in T^{INV}$ # # where `value` can take different forms depending on what time period is considered: # # | Value | Condition | # |-------------------------------------|-----------------------------------------------------| # | $\Delta_y \text{historical_new_capacity}_{n,t,y^V}$ | $y$ is first model period | # | $\Delta_y \text{CAP_NEW}_{n,t,y^V}$ | $y = y^V$ | # | $\text{CAP}_{n,t,y^V,y-1}$ | $0 < y - y^V < \text{technical_lifetime}_{n,t,y^V}$ | # # + slideshow={"slide_type": "skip"} base_technical_lifetime = { 'node_loc': country, 'year_vtg': model_horizon, 'unit': 'y', } # + slideshow={"slide_type": "fragment"} lifetime = { 'coal_ppl': 20, 'wind_ppl': 20, 'bulb': 1, } for tec, val in lifetime.items(): df = make_df(base_technical_lifetime, technology=tec, value=val) scenario.add_par('technical_lifetime', df) # + [markdown] slideshow={"slide_type": "slide"} # ## Technological Diffusion and Contraction # # We know from historical precedent that energy systems can not be transformed instantaneously. Therefore, we use a family of dynamic constraints on activity and capacity. These constraints define the upper and lower limit of the domain of activity and capacity over time based on their value in the previous time step, an initial value, and growth/decline rates. # + [markdown] slideshow={"slide_type": "fragment"} # $\sum_{y^V \leq y,m} \text{ACT}_{n,t,y^V,y,m,h} \leq$ # $\text{initial_activity_up}_{n,t,y,h} # \cdot \frac{ \Big( 1 + growth\_activity\_up_{n,t,y,h} \Big)^{|y|} - 1 } # { growth\_activity\_up_{n,t,y,h} }+ \Big( 1 + growth\_activity\_up_{n,t,y,h} \Big)^{|y|} \cdot \Big( \sum_{y^V \leq y-1,m} ACT_{n,t,y^V,y-1,m,h} + \sum_{m} historical\_activity_{n,t,y-1,m,h}\Big)$ # + [markdown] slideshow={"slide_type": "slide"} # This example limits the ability for technologies to **grow**. To do so, we need to provide `growth_activity_up` values for each technology that we want to model as being diffusion constrained. Here, we set this constraint at 10% per year. # + slideshow={"slide_type": "skip"} base_growth = { 'node_loc': country, 'year_act': model_horizon, 'time': 'year', 'unit': '-', } # + slideshow={"slide_type": "fragment"} growth_technologies = [ "coal_ppl", "wind_ppl", ] for tec in growth_technologies: df = make_df(base_growth, technology=tec, value=0.1) scenario.add_par('growth_activity_up', df) # + [markdown] slideshow={"slide_type": "slide"} # ## Defining an Energy Mix (Model Calibration) # # To model the transition of an energy system, one must start with the existing system which are defined by the parameters `historical_activity` and `historical_new_capacity`. These parameters define the energy mix before the model horizon. # # We begin by defining a few key values: # # - how much useful energy was needed # - how much final energy was generated # - and the mix for different technologies # + slideshow={"slide_type": "fragment"} historic_demand = 0.85 * demand_per_year historic_generation = historic_demand / grid_efficiency coal_fraction = 0.6 # + slideshow={"slide_type": "skip"} base_capacity = { 'node_loc': country, 'year_vtg': history, 'unit': 'GWa', } base_activity = { 'node_loc': country, 'year_act': history, 'mode': 'standard', 'time': 'year', 'unit': 'GWa', } # + [markdown] slideshow={"slide_type": "slide"} # Then, we can define the **activity** and **capacity** in the historic period # + slideshow={"slide_type": "fragment"} old_activity = { 'coal_ppl': coal_fraction * historic_generation, 'wind_ppl': (1 - coal_fraction) * historic_generation, } for tec, val in old_activity.items(): df = make_df(base_activity, technology=tec, value=val) scenario.add_par('historical_activity', df) # + slideshow={"slide_type": "fragment"} act_to_cap = { 'coal_ppl': 1 / 10 / capacity_factor['coal_ppl'] / 2, # 20 year lifetime 'wind_ppl': 1 / 10 / capacity_factor['wind_ppl'] / 2, } for tec in act_to_cap: value = old_activity[tec] * act_to_cap[tec] df = make_df(base_capacity, technology=tec, value=value) scenario.add_par('historical_new_capacity', df) # + [markdown] slideshow={"slide_type": "slide"} # ## Objective Function # # The objective function drives the purpose of the optimization. Do we wish to seek maximum utility of the social planner, minimize carbon emissions, or something else? Classical IAMs seek to minimize total discounted system cost over space and time. # # $$\min \sum_{n,y \in Y^{M}} \text{interestrate}_{y} \cdot \text{COST_NODAL}_{n,y}$$ # # + [markdown] slideshow={"slide_type": "fragment"} # First, let's add the interest rate parameter. # + slideshow={"slide_type": "fragment"} scenario.add_par("interestrate", model_horizon, value=0.05, unit='-') # + [markdown] slideshow={"slide_type": "fragment"} # `COST_NODAL` is comprised of a variety of costs related to the use of different technologies. # + [markdown] slideshow={"slide_type": "slide"} # ### Investment Costs # # Capital, or investment, costs are invoked whenever a new plant or unit is built # # $$\text{inv_cost}_{n,t,y} \cdot \text{construction_time_factor}_{n,t,y} \cdot \text{CAP_NEW}_{n,t,y}$$ # + slideshow={"slide_type": "skip"} base_inv_cost = { 'node_loc': country, 'year_vtg': model_horizon, 'unit': 'USD/kW', } # Adding a new unit to the library mp.add_unit('USD/kW') # + slideshow={"slide_type": "fragment"} # in $ / kW (specific investment cost) costs = { 'coal_ppl': 500, 'wind_ppl': 1500, 'bulb': 5, } for tec, val in costs.items(): df = make_df(base_inv_cost, technology=tec, value=val) scenario.add_par('inv_cost', df) # + [markdown] slideshow={"slide_type": "slide"} # ### Fixed O&M Costs # # Fixed cost are only relevant as long as the capacity is active. This formulation allows to include the potential cost savings from early retirement of installed capacity. # # $$\sum_{y^V \leq y} \text{fix_cost}_{n,t,y^V,y} \cdot \text{CAP}_{n,t,y^V,y}$$ # + slideshow={"slide_type": "skip"} base_fix_cost = { 'node_loc': country, 'year_vtg': vintage_years, 'year_act': act_years, 'unit': 'USD/kWa', } # + slideshow={"slide_type": "fragment"} # in $ / kW / year (every year a fixed quantity is destinated to cover part of the O&M costs # based on the size of the plant, e.g. lightning, labor, scheduled maintenance, etc.) costs = { 'coal_ppl': 30, 'wind_ppl': 10, } for tec, val in costs.items(): df = make_df(base_fix_cost, technology=tec, value=val) scenario.add_par('fix_cost', df) # + [markdown] slideshow={"slide_type": "slide"} # ### Variable O&M Costs # # Variable Operation and Maintence costs are associated with the costs of actively running the plant. Thus, they are not applied if a plant is on standby (i.e., constructed, but not currently in use). # # $$\sum_{\substack{y^V \leq y \\ m,h}} \text{var_cost}_{n,t,y^V,y,m,h} \cdot \text{ACT}_{n,t,y^V,y,m,h} $$ # + slideshow={"slide_type": "skip"} base_var_cost = { 'node_loc': country, 'year_vtg': vintage_years, 'year_act': act_years, 'mode': 'standard', 'time': 'year', 'unit': 'USD/kWa', } # + slideshow={"slide_type": "fragment"} # in $ / kWa (costs associatied to the degradation of equipment when the plant is functioning # per unit of energy produced kW·year = 8760 kWh. # Therefore this costs represents USD per 8760 kWh of energy). Do not confuse with fixed O&M units. costs = { 'coal_ppl': 30, 'grid': 50, } for tec, val in costs.items(): df = make_df(base_var_cost, technology=tec, value=val) scenario.add_par('var_cost', df) # + [markdown] slideshow={"slide_type": "slide"} # A full model will also have costs associated with # # - costs associated with technologies (investment, fixed, variable costs) # - resource extraction: $\sum_{c,g} \ resource\_cost_{n,c,g,y} \cdot EXT_{n,c,g,y} $ # - emissions # - land use (emulator): $\sum_{s} land\_cost_{n,s,y} \cdot LAND_{n,s,y}$ # + [markdown] slideshow={"slide_type": "slide"} # ## Time to Solve the Model # # First, we *commit* the model structure and input data (sets and parameters). # In the `ixmp` backend, this creates a new model version in the database, which is assigned a version number automatically: # + slideshow={"slide_type": "fragment"} from message_ix import log log.info('version number prior to commit: {}'.format(scenario.version)) scenario.commit(comment='basic model of Westeros electrification') log.info('version number prior committing to the database: {}'.format(scenario.version)) # + [markdown] slideshow={"slide_type": "fragment"} # An `ixmp` database can contain many scenarios, and possibly multiple versions of the same model and scenario name. # These are distinguished by unique version numbers. # # To make it easier to retrieve the "correct" version (e.g., the latest one), you can set a specific scenario as the default version to use if the "Westeros Electrified" model is loaded from the `ixmp` database. # + slideshow={"slide_type": "fragment"} scenario.set_as_default() # + slideshow={"slide_type": "fragment"} scenario.solve() # + jupyter={"name": "solve-objective-value"} slideshow={"slide_type": "fragment"} scenario.var('OBJ')['lvl'] # + [markdown] slideshow={"slide_type": "slide"} # ## Plotting Results # # Analyzing the results of a scenario after it is solved is called **reporting**. # MESSAGE*ix* includes flexible and customizable message_ix/tutorial for reporting, but here we wish to focus on the results of our example scenario. # We use some custom code to set up some simple plots: # + slideshow={"slide_type": "fragment"} # Create a Reporter object to describe and carry out reporting # calculations and operations (like plotting) based on `scenario` from message_ix.reporting import Reporter rep = Reporter.from_scenario(scenario) # Add keys like "plot activity" to describe reporting operations. # See tutorial/utils/plotting.py from message_ix.util.tutorial import prepare_plots prepare_plots(rep) # + [markdown] slideshow={"slide_type": "slide"} # Now the object `rep` is ready to generate several plots. # # (For a detailed introduction to how the `Reporter` works, see `westeros_report.ipynb`.) # # ### Activity # # How much energy is generated in each time period from the different potential sources? # + slideshow={"slide_type": "fragment"} # Only show a subset of technologies in the follow plots; # e.g. exclude "bulb" and "grid" rep.set_filters(t=["coal_ppl", "wind_ppl"]) # Trigger the calculation and plotting rep.get("plot activity") # + [markdown] slideshow={"slide_type": "slide"} # ### Capacity # # How much capacity of each plant is installed in each period? # + slideshow={"slide_type": "fragment"} # Create a different plot. The same filters are still active. rep.get("plot capacity") # + [markdown] slideshow={"slide_type": "slide"} # ### Electricity Price # # And how much does the electricity cost? These prices are in fact **shadow prices** taken from the **dual variables** of the model solution. # They reflect the marginal cost of electricity generation (i.e., the additional cost of the system for supplying one more unit of # electricity), which is in fact the marginal cost of the most expensive operating generator. # # Note the price drop when the most expensive technology is no longer in the system. # + slideshow={"slide_type": "fragment"} # Replace the technology filters with a commodity filter; # show only "light" and not e.g. "electricity". rep.set_filters(c=["light"]) # Create a price plot rep.get("plot prices") # - # ## Close the connection to the database # # When working with local HSQLDB database instances, you cannot connect to one database from multipe Jupyter notebooks (or processes) at the same time. # # If you want to easily switch between notebooks with connections to the same `ixmp` database, you need to close the connection in one notebook before initializing the platform using `ixmp.Platform()` in another notebook. # # After having closed the database connection, you can reopen it using # ``` # mp.open_db() # ``` # + slideshow={"slide_type": "skip"} mp.close_db() # + [markdown] slideshow={"slide_type": "slide"} # ## Congratulations! # # You have built and run your very first *MESSAGEix* model. Welcome to the community! # # The next tutorials will introduce you to other features of the framework, including energy system constraints, emissions taxes, and other policy options. # # Check us out on Github https://github.com/iiasa/message_ix # and get in touch with us online https://groups.google.com/forum/message-ix ...
tutorial/westeros/westeros_baseline.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- tupla = ([1, 2, 3], [2, 3, 4]) tupla[1] = [2, 4, 5] arr = tupla[1] arr += [2] tupla tupla[0] += [4] tupla tupla[0].append(5) tupla tokens1 = [1, 1, 2, 3, 5] tokens2 = [1, 2, 3, 3, 4] from collections import Counter from random import randint tokens1 = [randint(0, 50) for _ in range(randint(25, 40))] tokens2 = [randint(0, 50) for _ in range(randint(25, 40))] # %%timeit tokens1 = [randint(0, 15) for _ in range(randint(25, 40))] tokens2 = [randint(0, 15) for _ in range(randint(25, 40))] tokens1.sort() tokens2.sort() sum(Counter(tokens1) & Counter(tokens2)) # + from typing import List from dataclasses import dataclass from math import ceil @dataclass class CandidateData: is_clone: bool score: float def compute_candidate_similarity(tokens1: List[str], tokens2: List[str], \ ths: float) -> CandidateData: max_len = max(len(tokens1), len(tokens2)) req_matches = ceil(max_len * ths) curr_matches = 0 tok_pos1 = 0 tok_pos2 = 0 while tok_pos1 < len(tokens1) and tok_pos2 < len(tokens2): # print(tokens1[tok_pos1], tokens2[tok_pos2]) if min(len(tokens1) - tok_pos1, len(tokens2) - tok_pos2) + curr_matches >= req_matches: if tokens1[tok_pos1] == tokens2[tok_pos2]: curr_matches += 1 tok_pos1 += 1 tok_pos2 += 1 #print(curr_matches) else: if tokens1[tok_pos1] < tokens2[tok_pos2]: tok_pos1 += 1 else: tok_pos2 += 1 else: break if curr_matches >= req_matches: return CandidateData(True, curr_matches / max_len) else: return CandidateData(False, curr_matches / max_len) # - # %%timeit tokens1 = [randint(0, 15) for _ in range(randint(25, 40))] tokens2 = [randint(0, 15) for _ in range(randint(25, 40))] tokens1.sort() tokens2.sort() compute_candidate_similarity(tokens1, tokens2, 0.8) compute_candidate_similarity(tokens1, tokens2, 0.8) tokens1, tokens2 tokens1 = [1, 1, 2, 3, 5, 6] tokens2 = [2, 3, 5, 6, 4] ths = 0.5 for _ in range(100000): tokens1 = [randint(0, 10) for _ in range(randint(25, 40))] tokens2 = [randint(0, 10) for _ in range(randint(25, 40))] tokens1.sort() tokens2.sort() sim = sum((Counter(tokens1) & Counter(tokens2)).values()) / max(len(tokens1), len(tokens2)) c = compute_candidate_similarity(tokens1, tokens2, ths) #print(sim, c.is_clone, c.score) #print(tokens1, tokens2) if sim >= ths: assert c.is_clone == True else: assert c.is_clone == False tokens1 = [0, 0, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 5, 6, 6, 6, 6, 7, 7, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9] tokens2 = [0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 5, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10] compute_candidate_similarity(tokens1, tokens2, 0.8)
experiments/tokens position filtering.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <table> <tr> # <td style="background-color:#ffffff;"> # <a href="http://qworld.lu.lv" target="_blank"><img src="../images/qworld.jpg" width="25%" align="left"> </a></td> # <td style="background-color:#ffffff;vertical-align:bottom;text-align:right;"> # prepared by <a href="http://abu.lu.lv" target="_blank"><NAME></a> (<a href="http://qworld.lu.lv/index.php/qlatvia/" target="_blank">QLatvia</a>) # </td> # </tr></table> # <table width="100%"><tr><td style="color:#bbbbbb;background-color:#ffffff;font-size:11px;font-style:italic;text-align:right;">This cell contains some macros. If there is a problem with displaying mathematical formulas, please run this cell to load these macros. </td></tr></table> # $ \newcommand{\bra}[1]{\langle #1|} $ # $ \newcommand{\ket}[1]{|#1\rangle} $ # $ \newcommand{\braket}[2]{\langle #1|#2\rangle} $ # $ \newcommand{\dot}[2]{ #1 \cdot #2} $ # $ \newcommand{\biginner}[2]{\left\langle #1,#2\right\rangle} $ # $ \newcommand{\mymatrix}[2]{\left( \begin{array}{#1} #2\end{array} \right)} $ # $ \newcommand{\myvector}[1]{\mymatrix{c}{#1}} $ # $ \newcommand{\myrvector}[1]{\mymatrix{r}{#1}} $ # $ \newcommand{\mypar}[1]{\left( #1 \right)} $ # $ \newcommand{\mybigpar}[1]{ \Big( #1 \Big)} $ # $ \newcommand{\sqrttwo}{\frac{1}{\sqrt{2}}} $ # $ \newcommand{\dsqrttwo}{\dfrac{1}{\sqrt{2}}} $ # $ \newcommand{\onehalf}{\frac{1}{2}} $ # $ \newcommand{\donehalf}{\dfrac{1}{2}} $ # $ \newcommand{\hadamard}{ \mymatrix{rr}{ \sqrttwo & \sqrttwo \\ \sqrttwo & -\sqrttwo }} $ # $ \newcommand{\vzero}{\myvector{1\\0}} $ # $ \newcommand{\vone}{\myvector{0\\1}} $ # $ \newcommand{\stateplus}{\myvector{ \sqrttwo \\ \sqrttwo } } $ # $ \newcommand{\stateminus}{ \myrvector{ \sqrttwo \\ -\sqrttwo } } $ # $ \newcommand{\myarray}[2]{ \begin{array}{#1}#2\end{array}} $ # $ \newcommand{\X}{ \mymatrix{cc}{0 & 1 \\ 1 & 0} } $ # $ \newcommand{\I}{ \mymatrix{rr}{1 & 0 \\ 0 & 1} } $ # $ \newcommand{\Z}{ \mymatrix{rr}{1 & 0 \\ 0 & -1} } $ # $ \newcommand{\Htwo}{ \mymatrix{rrrr}{ \frac{1}{2} & \frac{1}{2} & \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & \frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} & \frac{1}{2} } } $ # $ \newcommand{\CNOT}{ \mymatrix{cccc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0} } $ # $ \newcommand{\norm}[1]{ \left\lVert #1 \right\rVert } $ # $ \newcommand{\pstate}[1]{ \lceil \mspace{-1mu} #1 \mspace{-1.5mu} \rfloor } $ # <h2> <font color="blue"> Solutions for </font>A Game with two biased coins</h2> # <a id="task2"></a> # <h3> Task 2: Tracing ten biased coin tosses </h3> # # By using python, calculate the probabilities of Asja seeing heads and tails after 10 coin tosses. # # $ # GameCoins = \begin{array}{c|cc} & \mathbf{Head} & \mathbf{Tail} \\ \hline \mathbf{Head} & 0.6 & 0.3\\ \mathbf{Tail} & 0.4 & 0.7 \end{array} = \begin{array}{c|cc} & \mathbf{0} & \mathbf{1} \\ \hline \mathbf{0} & 0.6 & 0.3 \\ \mathbf{1} & 0.4 & 0.7 \end{array} # $ # # Use a loop in your solution. # <h3>Solution</h3> # + # # We copy and paste the previous code # # initial condition: # Asja will start with one euro, # and so, we assume that the probability of having head is 1 at the beginning. prob_head = 1 prob_tail = 0 number_of_iterations = 10 for i in range(number_of_iterations): # the new probability of head is calculated by using the first row of table new_prob_head = prob_head * 0.6 + prob_tail * 0.3 # the new probability of tail is calculated by using the second row of table new_prob_tail = prob_head * 0.4 + prob_tail * 0.7 # update the probabilities prob_head = new_prob_head prob_tail = new_prob_tail # print prob_head and prob_tail print("the probability of getting head after",number_of_iterations,"coin tosses is",prob_head) print("the probability of getting tail after",number_of_iterations,"coin tosses is",prob_tail) # - # <a id="task3"></a> # <h3> Task 3 </h3> # # Repeat Task 2 for 20, 30, and 50 coin tosses. # <h3>Solution</h3> # + # define iterations as a list iterations = [20,30,50] for iteration in iterations: # initial probabilites prob_head = 1 prob_tail = 0 print("the number of iterations is",iteration) for i in range(iteration): # the new probability of head is calculated by using the first row of table new_prob_head = prob_head * 0.6 + prob_tail * 0.3 # the new probability of tail is calculated by using the second row of table new_prob_tail = prob_head * 0.4 + prob_tail * 0.7 # update the probabilities prob_head = new_prob_head prob_tail = new_prob_tail # print prob_head and prob_tail print("the probability of getting head after",iteration,"coin tosses is",prob_head) print("the probability of getting tail after",iteration,"coin tosses is",prob_tail) print() # - # <a id="task4"></a> # <h3> Task 4 </h3> # # Repeat Task 2 for 10, 20, and 50 coin tosses by picking different initial conditions, e.g., # # prob_head = prob_tail = 1/2 # or # # prob_head = 0 # prob_tail = 1 # <h3>Solution</h3> # + # define iterations as a list iterations = [20,30,50] # define initial probability pairs as a double list initial_probabilities =[ [1/2,1/2], [0,1] ] for initial_probability_pair in initial_probabilities: print("probability of head is",initial_probability_pair[0]) print("probability of tail is",initial_probability_pair[1]) print() for iteration in iterations: # initial probabilites [prob_head,prob_tail] = initial_probability_pair print("the number of iterations is",iteration) for i in range(iteration): # the new probability of head is calculated by using the first row of table new_prob_head = prob_head * 0.6 + prob_tail * 0.3 # the new probability of tail is calculated by using the second row of table new_prob_tail = prob_head * 0.4 + prob_tail * 0.7 # update the probabilities prob_head = new_prob_head prob_tail = new_prob_tail # print prob_head and prob_tail print("the probability of getting head after",iteration,"coin tosses is",prob_head) print("the probability of getting tail after",iteration,"coin tosses is",prob_tail) print() print()
bronze/B09_Coin_Flip_Game_Solutions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # ASTRO 533 - Project 8 # # **Created:** Nov. 2020 # **Last Edit:** Nov. 2020 # # **Author:** <NAME> # **Email:** <EMAIL> # ## Load packages and read data # + import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt import matplotlib.colors as mcolors from astropy.table import Table as tb import astropy.coordinates as coord import astropy.units as u from astropy.units import cds import copy plt.style.use('bill') my_YlGnBu = copy.copy(mpl.cm.get_cmap('YlGnBu')) # copy the default cmap my_YlGnBu.set_bad('w') my_magma = copy.copy(mpl.cm.get_cmap('magma')) my_magma.set_bad(my_magma(-1)) nyu_cat = tb.read('lowz_catalog.dr4.fits', format='fits') grp_gal = tb.read('SDSS_M_galaxy.dat', format='ascii') grp_grp = tb.read('SDSS_M_group.dat', delimiter='\s',guess=False,format='ascii.no_header') # + c = coord.SkyCoord(ra=nyu_cat['RA'], dec=nyu_cat['DEC'], frame="icrs", unit="deg") grp_c = coord.SkyCoord(ra=grp_gal['col4'],dec=grp_gal['col5'],frame="icrs", unit="deg") # https://docs.astropy.org/en/stable/coordinates/matchsep.html idx, d2d, d3d = coord.match_coordinates_sky(c, grp_c) idx2, d2d2, d3d2 = coord.match_coordinates_sky(grp_c, c) mtchd, = np.where(d2d.arcsec < 1) print(len(grp_gal['col15'][idx[mtchd]])) # make g-r_0.1 and stellar mass variables for later exploration/plotting. gr = np.zeros(len(c))-99.0 smass = np.zeros(len(c))-99.0 gr[mtchd]=grp_gal['col15'][idx[mtchd]] smass[mtchd]=grp_gal['col14'][idx[mtchd]] # - # Now make halo masses and central/satellite tags hmass = np.zeros(len(c))-99.0 grpid = np.zeros(len(c))-99.0 cen = np.full(len(c), False) # Loop :( Couldn't figure out a non-loop way. for i,j in enumerate(grp_gal['col3'][idx[mtchd]]): gg, = np.where(grp_grp['col1']==j) hmass[mtchd[i]] = grp_grp['col6'][gg] grpid[mtchd[i]] = grp_grp['col1'][gg] if (grp_gal['col1'][idx[mtchd[i]]]==grp_grp['col2'][gg]): cen[mtchd[i]] = True # ## Phase diagram of the most massive # + id_max = np.argsort(hmass)[-1] grp_id_max = np.where(grp_grp['col1']==grp_gal[idx[id_max]]['col3'])[0][0] print('mass:', grp_grp['col6'][grp_id_max]) cen_id_max = np.where(grp_gal['col1']==grp_grp['col2'][grp_id_max])[0] sat_id_max = np.where(grp_gal['col3']==grp_grp['col1'][grp_id_max])[0] # sat_id_max = np.intersect1d(sat_id_max, idx[mtchd]) ra_cen = grp_gal['col4'][cen_id_max] z_cmb_cen = grp_gal['col8'][cen_id_max] L_cen = grp_gal['col13'][cen_id_max] g_r_cen = grp_gal['col15'][cen_id_max] ra = grp_gal['col4'][sat_id_max] z_cmb = grp_gal['col8'][sat_id_max] L = grp_gal['col13'][sat_id_max] g_r = grp_gal['col15'][sat_id_max] # + fig, ax = plt.subplots(figsize=(6,6)) ax.scatter(ra, z_cmb, s=10**(L-8.8), facecolor='', edgecolor='r', lw=1) ax.scatter(ra_cen, z_cmb_cen, s=10**(L_cen-8.8), facecolor='r', edgecolor='', alpha=0.5) ax.set_xlabel(r'$\rm RA\ (deg)$') ax.set_ylabel(r'$z$') ax.set_xlim(229.6, 231) ax.set_ylim(0.07, 0.086) ax.set_xticks([229.6, 229.8, 230, 230.2, 230.4, 230.6, 230.8, 231]) ax.set_xticklabels([r'$229.6$', r'$229.8$', r'$230$', r'$230.2$', r'$230.4$', r'$230.6$', r'$230.8$', r'$231$']) ax.set_yticks([0.07, 0.072, 0.074, 0.076, 0.078, 0.08, 0.082, 0.084, 0.086]) ax.set_yticklabels([r'$0.07$', r'$0.072$', r'$0.074$', r'$0.076$', r'$0.078$', r'$0.08$', r'$0.082$', r'$0.084$', r'$0.086$']) plt.savefig('./figures/phase.pdf') plt.show() # + H0 = 100*u.km/u.s/u.Mpc d = cds.c*0.078/H0 print('d:', d.to(u.Mpc)) ind_valid = np.where(z_cmb>0.07)[0] x = ra[ind_valid] * (np.pi/180) * d v = (z_cmb[ind_valid]) * cds.c print(np.mean(x.to(u.kpc)), np.std(x.to(u.kpc))) print(np.mean(v.to(u.km/u.s)), np.std(v.to(u.km/u.s))) print( np.log10( (5*np.std(x.to(u.kpc))*np.var(v.to(u.km/u.s))/cds.G).to(u.Msun).value ) ) # - # ## CMD # + fig, ax = plt.subplots(figsize=(6,6)) ax.hist2d(grp_gal[idx[mtchd]]['col15'], grp_gal[idx[mtchd]]['col13'], range=[[0.2, 1.2], [8.5, 11.5]], bins = 60, cmap=my_YlGnBu) ax.scatter(g_r, L, s=10, facecolor='', edgecolor='r', lw=1) ax.scatter(g_r_cen, L_cen, s=100, facecolor='r', edgecolor='', marker='*') ax.set_xlabel(r'$(g-r)_{z=0.1}$') ax.set_ylabel(r'$\log_{10}\,(L\,\left/\,L_\odot\right.)+2\log_{10}\,h$') ax.set_xlim(0.2, 1.2) ax.set_ylim(8.5, 11.5) ax.set_xticks([0.2, 0.4, 0.6, 0.8, 1, 1.2]) ax.set_xticklabels([r'$0.2$', r'$0.4$', r'$0.6$', r'$0.8$', r'$1$', r'$1.2$']) ax.set_yticks([8.5, 9, 9.5, 10, 10.5, 11, 11.5]) ax.set_yticklabels([r'$8.5$', r'$9$', r'$9.5$', r'$10$', r'$10.5$', r'$11$', r'$11.5$']) # plt.savefig('./figures/cmd.pdf') plt.show() # - # ## Low mass clusters # + id_min = np.argsort(hmass)[np.random.randint(10000,20000, 100)] cen_id_min = np.array([]) sat_id_min = np.array([]) for i in range(100): grp_id_min = np.where(grp_grp['col1'] == grp_gal[idx[id_min[i]]]['col3'])[0][0] cen_id_min = np.concatenate( (cen_id_min, np.where(grp_gal['col1']==grp_grp['col2'][grp_id_min])[0]) ) sat_id_min = np.concatenate( (sat_id_min, np.where(grp_gal['col3']==grp_grp['col1'][grp_id_min])[0]) ) cen_id_min = np.int64(cen_id_min) sat_id_min = np.int64(sat_id_min) ra2_cen = grp_gal['col4'][cen_id_min] z2_cmb_cen = grp_gal['col8'][cen_id_min] L2_cen = grp_gal['col13'][cen_id_min] g_r2_cen = grp_gal['col15'][cen_id_min] ra2 = grp_gal['col4'][sat_id_min] z2_cmb = grp_gal['col8'][sat_id_min] L2 = grp_gal['col13'][sat_id_min] g_r2 = grp_gal['col15'][sat_id_min] print(len(sat_id_min)) # - print('mass:', np.sort(hmass)[10000]) print(len(cen_id_min)/len(sat_id_min)) # + fig, ax = plt.subplots(figsize=(6,6)) ax.hist2d(grp_gal[idx[mtchd]]['col15'], grp_gal[idx[mtchd]]['col13'], range=[[0.2, 1.2], [8, 11.5]], bins = 60, cmap=my_YlGnBu) ax.scatter(g_r, L, s=10, facecolor='', edgecolor='r', lw=1) ax.scatter(g_r_cen, L_cen, s=100, facecolor='r', edgecolor='', marker='*') ax.scatter(g_r2, L2, s=10, facecolor='', edgecolor='k', lw=1) ax.scatter(g_r2_cen, L2_cen, s=50, facecolor='k', edgecolor='', marker='*') ax.set_xlabel(r'$(g-r)_{z=0.1}$') ax.set_ylabel(r'$\log_{10}\,(L\,\left/\,L_\odot\right.)+2\log_{10}\,h$') ax.set_xlim(0.2, 1.2) ax.set_ylim(8, 11.5) ax.set_xticks([0.2, 0.4, 0.6, 0.8, 1, 1.2]) ax.set_xticklabels([r'$0.2$', r'$0.4$', r'$0.6$', r'$0.8$', r'$1$', r'$1.2$']) ax.set_yticks([8, 8.5, 9, 9.5, 10, 10.5, 11, 11.5]) ax.set_yticklabels([r'$8$', r'$8.5$', r'$9$', r'$9.5$', r'$10$', r'$10.5$', r'$11$', r'$11.5$']) plt.savefig('./figures/cmd.pdf') plt.show() # - ind_1 = np.where(nyu_cat['SERSIC_N'][:,2] < 2)[0] ind_2 = np.where(nyu_cat['SERSIC_N'][:,2] > 2)[0] # + fig, [ax, ax2] = plt.subplots(1,2, figsize=(12,6), sharey=True) fig.subplots_adjust(wspace=0) ax.hist2d(nyu_cat['ABSMAG'][ind_1,1]-nyu_cat['ABSMAG'][ind_1,2], nyu_cat['ABSMAG'][ind_1,2]-5*np.log10(1), range=[[0, 1], [-24, -16]], bins = 60, cmap=my_YlGnBu) ax.axvline(0.6, c='gray', ls='-.', alpha=0.6) ax2.hist2d(nyu_cat['ABSMAG'][ind_2,1]-nyu_cat['ABSMAG'][ind_2,2], nyu_cat['ABSMAG'][ind_2,2]-5*np.log10(1), range=[[0, 1], [-24, -16]], bins = 60, cmap=my_YlGnBu) ax2.axvline(0.6, c='gray', ls='-.', alpha=0.6) ax.set_xlabel(r'$g-r$') ax.set_ylabel(r'$M_r-5\,\log_{10}\,h$') ax.set_xlim(0.2, 1) ax.set_ylim(-16, -22) ax.set_xticks([0.2, 0.4, 0.6, 0.8]) ax.set_xticklabels([r'$0.2$', r'$0.4$', r'$0.6$', r'$0.8$']) ax.set_yticks([-16, -18, -20, -22]) ax.set_yticklabels([r'$-16$', r'$-18$', r'$-20$', r'$-22$']) ax2.set_xlabel(r'$g-r$') ax2.set_xlim(0.2, 1) ax2.set_xticks([0.2, 0.4, 0.6, 0.8, 1]) ax2.set_xticklabels([r'$0.2$', r'$0.4$', r'$0.6$', r'$0.8$', r'$1$']) ax.text(0.95, 0.96, r'$n<2$', transform=ax.transAxes, horizontalalignment='right', verticalalignment='top', fontsize=20) ax2.text(0.95, 0.96, r'$n>2$', transform=ax2.transAxes, horizontalalignment='right', verticalalignment='top', fontsize=20) plt.savefig('./figures/cmd2.pdf') plt.show()
project_8/project_8.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Aim: Integrate gene expression data from three online data portals: Stemformatics, ENCODE and Haemosphere. # # In previous notebooks, we have combined the gene expression data of selected samples in each individual data portals. Here, we will integrate the three pre-combined expression datasets together into a larger dataset of mouse blood. # import pandas as pd import atlas import handler import numpy as np # ### Merge expression tables # Load data expression_s4m = pd.read_csv('../data/interim/mouse_integrate/expression_s4m.tsv', sep='\t', index_col=0) expression_encode = pd.read_csv('../data/interim/mouse_integrate/expression_encode.tsv', sep='\t', index_col=0) expression_haemosphere = pd.read_csv('../data/interim/mouse_integrate/expression_Haemosphere.tsv', sep='\t', index_col=0) print(expression_s4m.shape, expression_encode.shape, expression_haemosphere.shape) # + dfs = [expression_encode, expression_haemosphere, expression_s4m] common_genes = handler.find_common_genes(dfs) mouse_atlas_expression = atlas.rankTransform(handler.merge_columns(dfs, common_genes)) print(mouse_atlas_expression.shape) mouse_atlas_expression.head() # - # ### Standardise sample metadata # # We have encountered several difficulties during the integration of the metadata that associated with the collected datasets. # # 1. Different datasets may record different pieces of information to describe each sample. e.g. 'tissue' attribute is recorded in some sample metadata but not all. # 2. Same piece of information can be recorded in different ways. e.g. cell type information is recorded in a single 'celltype' column in the haemosphere metadata, whereas in s4m metadata, there are 4 columns having information related to the cell type of samples. # 3. Inconsistent format of contents e.g. macrophage is represented as 'BM mac', 'BM macrophage', 'BM-derived macrophage day 0', 'Bone marrow derived macrophage' in a same column. # 4. Different type of information might be stored mixedly under the same attributes. e.g. In 'replicate_group_id' column of the s4m metadata, we might find experiment information, cell type and sort markers information about samples. # # To address these issues: # 1. we will determin a essential list of attributes to describe samples and unify the naming of these attributes. These essential list of attributes are: # # Cell Type; Cell Lineage; Description; Dataset Name; Platform # # 2. Reannotate the metadata so that the content of each attribute is consistent. # Load the reannotated sample metadata # For metadata of each data collection: stemformatics, encode and haemosphere, we added two manually annotated columns # 'cell_lineage_anno' and 'celltype_anno' with consistent content format. samples_s4m = pd.read_csv('../data/interim/reannotated/samples_s4m_anno.tsv', sep='\t', index_col=0) samples_encode = pd.read_csv('../data/interim/reannotated/samples_encode_anno.tsv', sep='\t', index_col=0) samples_haemosphere = pd.read_csv('../data/interim/reannotated/samples_Haemosphere_anno.tsv', sep='\t', index_col=0) # slice relevant columns from each metadata table and rename in consistent format samples_s4m[:3] samples_s4m = samples_s4m[['celltype_anno', 'cell_lineage_anno', 'description', 'platform', 'dataset_name']] samples_s4m.columns = ['Cell Type', 'Cell Lineage', 'Description', 'Platform', 'Dataset Name'] samples_haemosphere[:3] samples_haemosphere = samples_haemosphere[['celltype_anno', 'cell_lineage_anno', 'description', 'platform', 'dataset_name']] samples_haemosphere.columns = ['Cell Type', 'Cell Lineage', 'Description', 'Platform', 'Dataset Name'] samples_encode[:3] samples_encode = samples_encode[['celltype_anno', 'cell_lineage_anno', 'Description', 'Platform', 'Project']] samples_encode.columns = ['Cell Type', 'Cell Lineage', 'Description', 'Platform', 'Dataset Name'] # ### Merge the three metadata tables mouse_atlas_samples = pd.concat([samples_encode, samples_haemosphere, samples_s4m]) print(mouse_atlas_samples.shape) mouse_atlas_samples.head() # remove duplicated samples that are included in multiple data collection mouse_atlas_samples = mouse_atlas_samples[~mouse_atlas_samples.index.duplicated() & mouse_atlas_samples.index.notna()] mouse_atlas_expression = mouse_atlas_expression.loc[:,~mouse_atlas_expression.columns.duplicated()] print(mouse_atlas_samples.shape, mouse_atlas_expression.shape) # same the integrated expression table and sample table mouse_atlas_expression.to_csv('../data/interim/mouse_atlas/mouse_atlas_expression.tsv', sep='\t') mouse_atlas_samples.to_csv('../data/interim/mouse_atlas/mouse_atlas_samples.tsv', sep='\t')
notebooks/2_data_integration.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:dl-minicourse] * # language: python # name: conda-env-dl-minicourse-py # --- import torch from torch import nn, optim from matplotlib import pyplot as plt from res.plot_lib import set_default # Set style (needs to be in a new cell) set_default(figsize=(16, 8)) # Training set m = 20 # nb of training pairs x = (torch.rand(m) - 0.5) * 12 # inputs, sampled from -5 to +5 y = x * torch.sin(x) # targets # View training points plt.plot(x.numpy(), y.numpy(), 'o') plt.axis('equal') plt.ylim([-10, 5]) # + # Define network architecture (try different non-linearities) non_linear = nn.Tanh non_linear = nn.ReLU net = nn.Sequential( nn.Dropout(p=0.05), nn.Linear(1, 20), non_linear(), nn.Dropout(p=0.05), nn.Linear(20, 20), non_linear(), nn.Linear(20, 1) ) # - # Training objective and optimiser criterion = nn.MSELoss() optimiser = optim.SGD(net.parameters(), lr=0.01, weight_decay=0.00001) # Training loop for epoch in range(1000): y_hat = net(x.view(-1, 1)) loss = criterion(y_hat, y.view(-1, 1)) optimiser.zero_grad() loss.backward() optimiser.step() # print(loss.item()) # Define a denser input range xx = torch.linspace(-15, 15, 1000) # + # Evaluate net over denser input (try both eval() and train() modes) net.eval() # net.train() with torch.no_grad(): plt.plot(xx.numpy(), net(xx.view(-1, 1)).squeeze().numpy(), 'C1') plt.plot(x.numpy(), y.numpy(), 'oC0') plt.axis('equal') plt.ylim([-10, 5]) # - # Multiple (100) runs for denser input net.train() y_hat = list() with torch.no_grad(): for t in range(100): y_hat.append(net(xx.view(-1, 1)).squeeze()) # Evaluate mean and std over denser input y_hat = torch.stack(y_hat) mean = y_hat.mean(0) std = y_hat.std(0) # Visualise mean and mean ± std -> confidence range plt.plot(xx.numpy(), mean.numpy(), 'C1') plt.fill_between(xx.numpy(), (mean + std).numpy(), (mean - std).numpy(), color='C2') plt.plot(x.numpy(), y.numpy(), 'oC0') plt.axis('equal') plt.ylim([-10, 5])
ML_AI/PyTorch/Bayesian_nn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Day 14 - Bit masking operations # # * https://adventofcode.com/2020/day/14 # # All we need to do is generate *two* bitmasks per input mask: # # - A mask to _set_ bits, using bit-wise OR (`number | bitmask`); any `1` in the bitmask sets that bit in the output. # - A mask to _clear_ bits, using bit-wise AND (`number & bitmask`); any `0` in the bitmask clears that bit. # # The masks can be generated trivially; just replace the `"X"`s with `"0"` or `"1"` respectively, then convert the string as a binary value to an integer; the example `XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X` input makes a set mask when transformed to `int("000000000000000000000000000001000000", 2)`, and a clear mask when transformed to `int("111111111111111111111111111111111101", 2)`. # # Memory is just a `defaultdict(int)` object. # + import re from collections import defaultdict from dataclasses import dataclass from typing import Any @dataclass class Mask: set_mask: int = 0 clear_mask: int = (2 ** 36) - 1 def __rand__(self, other: Any) -> int: if not isinstance(other, int): return NotImplemented return other & self.clear_mask | self.set_mask @classmethod def from_mask(cls, mask: str) -> "Mask": return cls( int(mask.translate({88: 48}), 2), # X -> 0 int(mask.translate({88: 49}), 2), # X -> 1 ) _instr = re.compile(r""" ^(?: mem\[(?P<addr>\d+)\]\s*=\s*(?P<val>\d+) | mask\s*=\s*(?P<mask>[01X]+) )$ """, flags=re.VERBOSE).search def initialize_program(lines: list[str], _parse=_instr) -> int: mem = defaultdict(int) mask = Mask() for match in map(_parse, lines): if (mval := match["mask"]): mask = Mask.from_mask(mval) else: mem[int(match["addr"])] = int(match["val"]) & mask return sum(mem.values()) assert initialize_program("""\ mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X mem[8] = 11 mem[7] = 101 mem[8] = 0 """.splitlines()) == 165 # - import aocd lines = aocd.get_data(day=14, year=2020).splitlines() print("Part 1:", initialize_program(lines)) # ## Part 2 - generating addresses # # At first I wondered if there might be a method of eliminating addresses from the input, as I was worried about the size of the address space. But looking at my input, the masks never have more than 9 Xs, so up to 512 potential addresses to generate per mask. That's easy enough to brute-force. # # To generate the floating masks, we need to _clear_ the bits at the `X` positions, then provide alternative values for these for all possible bit combinations. We can take the `X` positions to produce a series of `(0, 1 << bitpos)` tuples as input to `product()`, we can then use their combination as an additional OR mask. # # E.g. `000000000000000000000000000000X1001X` becomes a set mask (`000000000000000000000000000000010010`), and the Xs are mapped to `1 << 5` and `1 << 1` and each paired with `0`, so we can generate `(0, 0)`, `(0, 1 << 1)`, `(1 << 5, 0)` and `(1 << 5, 1 << 1)`, generating the 4 masks needed to produce the floating addresses. # # I opted to generate `Mask` instances; you can generate all possible addresses by applying the `Mask` instances that iteration over a `FloatingMask` produces: # + from collections.abc import Iterable, Iterator from functools import reduce from itertools import product from operator import or_ @dataclass class FloatingMask: set_mask: int = 0 clear_mask: int = (2 ** 36) - 1 address_bits: Iterable[int] = () def __rand__(self, other: Any) -> int: if not isinstance(other, int): return NotImplemented return other & self.clear_mask | self.set_mask def __iter__(self) -> Iterator[Mask]: for combo in product(*([0, a] for a in self.address_bits)): yield Mask(reduce(or_, combo, self.set_mask), self.clear_mask) @classmethod def from_mask(cls, mask: str) -> "Mask": return cls( int(mask.translate({88: 48}), 2), # X -> 0 int(mask.translate({48: 49, 88: 48}), 2), # 0 -> 1, X -> 0 [1 << (35 - i) for i, c in enumerate(mask) if c == "X"], ) def apply_memory_address_decoder_program(lines: list[str], _parse=_instr) -> int: mem = defaultdict(int) fmask = FloatingMask() for match in map(_parse, lines): if (mval := match["mask"]): fmask = FloatingMask.from_mask(mval) else: addr, val = map(int, (match["addr"], match["val"])) mem |= {addr & mask: val for mask in fmask} return sum(mem.values()) assert apply_memory_address_decoder_program("""\ mask = 000000000000000000000000000000X1001X mem[42] = 100 mask = 00000000000000000000000000000000X0XX mem[26] = 1 """.splitlines()) == 208 # - print("Part 2:", apply_memory_address_decoder_program(lines))
2020/Day 14.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <a href="https://qworld.net" target="_blank" align="left"><img src="../qworld/images/header.jpg" align="left"></a> # $ \newcommand{\bra}[1]{\langle #1|} $ # $ \newcommand{\ket}[1]{|#1\rangle} $ # $ \newcommand{\braket}[2]{\langle #1|#2\rangle} $ # $ \newcommand{\dot}[2]{ #1 \cdot #2} $ # $ \newcommand{\biginner}[2]{\left\langle #1,#2\right\rangle} $ # $ \newcommand{\mymatrix}[2]{\left( \begin{array}{#1} #2\end{array} \right)} $ # $ \newcommand{\myvector}[1]{\mymatrix{c}{#1}} $ # $ \newcommand{\myrvector}[1]{\mymatrix{r}{#1}} $ # $ \newcommand{\mypar}[1]{\left( #1 \right)} $ # $ \newcommand{\mybigpar}[1]{ \Big( #1 \Big)} $ # $ \newcommand{\sqrttwo}{\frac{1}{\sqrt{2}}} $ # $ \newcommand{\dsqrttwo}{\dfrac{1}{\sqrt{2}}} $ # $ \newcommand{\onehalf}{\frac{1}{2}} $ # $ \newcommand{\donehalf}{\dfrac{1}{2}} $ # $ \newcommand{\hadamard}{ \mymatrix{rr}{ \sqrttwo & \sqrttwo \\ \sqrttwo & -\sqrttwo }} $ # $ \newcommand{\vzero}{\myvector{1\\0}} $ # $ \newcommand{\vone}{\myvector{0\\1}} $ # $ \newcommand{\stateplus}{\myvector{ \sqrttwo \\ \sqrttwo } } $ # $ \newcommand{\stateminus}{ \myrvector{ \sqrttwo \\ -\sqrttwo } } $ # $ \newcommand{\myarray}[2]{ \begin{array}{#1}#2\end{array}} $ # $ \newcommand{\X}{ \mymatrix{cc}{0 & 1 \\ 1 & 0} } $ # $ \newcommand{\I}{ \mymatrix{rr}{1 & 0 \\ 0 & 1} } $ # $ \newcommand{\Z}{ \mymatrix{rr}{1 & 0 \\ 0 & -1} } $ # $ \newcommand{\Htwo}{ \mymatrix{rrrr}{ \frac{1}{2} & \frac{1}{2} & \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & \frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} & \frac{1}{2} } } $ # $ \newcommand{\CNOT}{ \mymatrix{cccc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0} } $ # $ \newcommand{\norm}[1]{ \left\lVert #1 \right\rVert } $ # $ \newcommand{\pstate}[1]{ \lceil \mspace{-1mu} #1 \mspace{-1.5mu} \rfloor } $ # $ \newcommand{\greenbit}[1] {\mathbf{{\color{green}#1}}} $ # $ \newcommand{\bluebit}[1] {\mathbf{{\color{blue}#1}}} $ # $ \newcommand{\redbit}[1] {\mathbf{{\color{red}#1}}} $ # $ \newcommand{\brownbit}[1] {\mathbf{{\color{brown}#1}}} $ # $ \newcommand{\blackbit}[1] {\mathbf{{\color{black}#1}}} $ # <font style="font-size:28px;" align="left"><b>Two Qubits</b></font> # <br> # _prepared by <NAME>_ # <br><br> # [<img src="../qworld/images/watch_lecture.jpg" align="left">](https://youtu.be/qvQrDjRRj28) # <br><br><br> # Remember that when we have a quantum system with two qubits, then we can represent its states as $ \ket{00}, \ket{01}, \ket{10}, \ket{11} $. # # The state $ \ket{ab} $ means that # <ul> # <li>the first qubit is in state $ \ket{a} $ and </li> # <li> the second qubit is in state $ \ket{b} $, </li> # </ul> # where $ a,b \in \{0,1\} $. # # $ \ket{ab} = \ket{a} \otimes \ket{b} $ (or shortly $\ket{a}\ket{b}$. # <h3> Task 1 (vector representation)</h3> # # Verify the vector representations of $ \ket{00}, \ket{01}, \ket{10}, \ket{11} $: # # $$ # \ket{00} = \myvector{1 \\ 0 \\ 0 \\ 0}, # ~~~~~~ # \ket{01} = \myvector{0 \\ 1 \\ 0 \\ 0}, # ~~~~~~ # \ket{10} = \myvector{0 \\ 0 \\ 1 \\ 0}, # ~~~ \mbox{ and } ~~~ # \ket{11} = \myvector{0 \\ 0 \\ 0 \\ 1}. # $$ # <h3> Task 2 (generalization)</h3> # # Suppose that we have $ k>1 $ qubits (or bits). # # Then, any deterministic (basis) state can be represented by $ k $ bits: $ \ket{b_1b_2\cdots b_k} $, where any $ b_j \in \{0,1\} $ for $ 1 \leq j \leq k $. # - What is the size of the vector representing the states of $k$ qubits? # - If the decimal value of $ \ket{b_1 b_2 \cdots b_k} $ is $ b $, then which entry has the value of 1? # <h3>Operators on two qubits</h3> # # We define a quantum circuit with two qubits and apply the Hadamard operator to each of them. # + from qiskit import QuantumCircuit # remark the coincise representation of a quantum circuit qc = QuantumCircuit(2) qc.h(0) qc.h(1) qc.draw(output='mpl',reverse_bits=True) # - # These two Hadamards can also be represented as a single quantum operator on two qubits: $ H \otimes H $. # # $$ # H^{\otimes 2} = H \otimes H = \hadamard \otimes \hadamard = \Htwo . # $$ # <h3> Unitary backend</h3> # # Unitary_simulator gives a single matrix representation of all gates in the circuit until that point. # # job = execute(circuit, Aer.get_backend('unitary_simulator'),optimization_level=0) # current_unitary = job.result().get_unitary(circuit, decimals=3) # print(current_unitary) # + from qiskit import execute, Aer job = execute(qc, Aer.get_backend('unitary_simulator'),shots=1,optimization_level=0) current_unitary = job.result().get_unitary(qc, decimals=3) for row in current_unitary: column = "" for entry in row: column = column + str(entry.real) + " " print(column) # - # <h3> Task 3 </h3> # # We define a quantum circuit with two qubits: $ q_0 $ and $ q_1 $. They are tensored as $ q_1 \otimes q_0 $ in Qiskit. # # We apply the Hadamard operator to $q_1$. from qiskit import QuantumCircuit qc = QuantumCircuit(2) qc.h(1) qc.draw(output='mpl',reverse_bits=True) # Then, the quantum operator applied to both qubits will be $ H \otimes I $. # # Read the quantum operator of the above circuit by using 'unitary_simulator' and then verify that it is $ H \otimes I $. # + from qiskit import execute, Aer # # your code is here # job = execute(qc, Aer.get_backend('unitary_simulator'),shots=1,optimization_level=0) current_unitary = job.result().get_unitary(qc, decimals=3) for row in current_unitary: column = "" for entry in row: column = column + str(round(entry.real,3)) + " " print(column) # - # [click for our solution](Q60_Two_Qubits_Solutions.ipynb#task3) # <h3>Applying Hadamards to both qubits</h3> # # Applying a h-gate to the first and second qubits is the same as applying the following single operator on both qubits: # # $$ # H^{\otimes 2} = H \otimes H = \hadamard \otimes \hadamard = \Htwo . # $$ # <h4> Case 1: Let's find $ H^{\otimes 2} \ket{00} $ (in three different ways) </h4> # <ul> # <li> Direct matrix-vector multiplication: # $$ # H^{\otimes 2} \ket{00} # = \Htwo \myvector{1 \\ 0 \\ 0 \\ 0} # = \myvector{ \frac{1}{2} \\ \frac{1}{2} \\ \frac{1}{2} \\ \frac{1}{2} } . # $$ </li> # <li> We calculate the quantum state of each state, and then we find the quantum state of the composite system. # $$ # H\ket{0} \otimes H \ket{0} # = \stateplus \otimes \stateplus # = \myvector{ \frac{1}{2} \\ \frac{1}{2} \\ \frac{1}{2} \\ \frac{1}{2} }. # $$ </li> # <li> We make calculations with $ \ket{0} $ and $ \ket{1} $. # $$ # H \ket{0} \otimes H \ket{0} # = \mypar{ \frac{1}{\sqrt{2}} \ket{0} + \frac{1}{\sqrt{2}} \ket{1} } # \otimes \mypar{ \frac{1}{\sqrt{2}} \ket{0} + \frac{1}{\sqrt{2}} \ket{1} } # = \frac{1}{2} \ket{00} + \frac{1}{2} \ket{01} + \frac{1}{2} \ket{10} + \frac{1}{2} \ket{11} # = \myvector{ \frac{1}{2} \\ \frac{1}{2} \\ \frac{1}{2} \\ \frac{1}{2} }. # $$ # </ul> # <hr> # <h3> Task 4 </h3> # # Pick one of the following cases (2, 3, or 4), and verify the correctness of all three different ways for this selected case. # <hr> # <h4> Case 2: Let's find $ H^{\otimes 2} \ket{01} $ (in three different ways) </h4> # <ul> # <li> Direct matrix-vector multiplication: # $$ # H^{\otimes 2} \ket{01} # = \Htwo \myvector{0 \\ 1 \\ 0 \\ 0} # = \myrvector{ \frac{1}{2} \\ - \frac{1}{2} \\ \frac{1}{2} \\ - \frac{1}{2} } . # $$ </li> # <li> We calculate the quantum state of each state, and then we find the quantum state of the composite system. # $$ # H\ket{0} \otimes H \ket{1} # = \stateplus \otimes \stateminus # = \myrvector{ \frac{1}{2} \\ - \frac{1}{2} \\ \frac{1}{2} \\ - \frac{1}{2} }. # $$ </li> # <li> We make calculations with $ \ket{0} $ and $ \ket{1} $. # $$ # H \ket{0} \otimes H \ket{1} # = \mypar{ \frac{1}{\sqrt{2}} \ket{0} + \frac{1}{\sqrt{2}} \ket{1} } # \otimes \mypar{ \frac{1}{\sqrt{2}} \ket{0} - \frac{1}{\sqrt{2}} \ket{1} } # = \frac{1}{2} \ket{00} - \frac{1}{2} \ket{01} + \frac{1}{2} \ket{10} - \frac{1}{2} \ket{11} # = \myrvector{ \frac{1}{2} \\ - \frac{1}{2} \\ \frac{1}{2} \\ - \frac{1}{2} }. # $$ # </ul> # <h4> Case 3: Let's find $ H^{\otimes 2} \ket{10} $ (in three different ways) </h4> # <ul> # <li> Direct matrix-vector multiplication: # $$ # H^{\otimes 2} \ket{10} # = \Htwo \myvector{0 \\ 0 \\ 1 \\ 0} # = \myrvector{ \frac{1}{2} \\ \frac{1}{2} \\ - \frac{1}{2} \\ - \frac{1}{2} } . # $$ </li> # <li> We calculate the quantum state of each state, and then we find the quantum state of the composite system. # $$ # H\ket{1} \otimes H \ket{0} # = \stateminus \otimes \stateplus # = \myrvector{ \frac{1}{2} \\ \frac{1}{2} \\ - \frac{1}{2} \\ - \frac{1}{2} }. # $$ </li> # <li> We make calculations with $ \ket{0} $ and $ \ket{1} $. # $$ # H \ket{1} \otimes H \ket{0} # = \mypar{ \frac{1}{\sqrt{2}} \ket{0} - \frac{1}{\sqrt{2}} \ket{1} } # \otimes \mypar{ \frac{1}{\sqrt{2}} \ket{0} + \frac{1}{\sqrt{2}} \ket{1} } # = \frac{1}{2} \ket{00} + \frac{1}{2} \ket{01} - \frac{1}{2} \ket{10} - \frac{1}{2} \ket{11} # = \myrvector{ \frac{1}{2} \\ \frac{1}{2} \\ - \frac{1}{2} \\ -\frac{1}{2} }. # $$ # </ul> # <h4> Case 4: Let's find $ H^{\otimes 2} \ket{11} $ (in three different ways) </h4> # <ul> # <li> Direct matrix-vector multiplication: # $$ # H^{\otimes 2} \ket{11} # = \Htwo \myvector{0 \\ 0 \\ 0 \\ 1} # = \myrvector{ \frac{1}{2} \\ - \frac{1}{2} \\ - \frac{1}{2} \\ \frac{1}{2} } . # $$ </li> # <li> We calculate the quantum state of each state,and then we find the quantum state of the composite system. # $$ # H\ket{1} \otimes H \ket{1} # = \stateminus \otimes \stateminus # = \myrvector{ \frac{1}{2} \\ - \frac{1}{2} \\ - \frac{1}{2} \\ \frac{1}{2} }. # $$ </li> # <li> We make calculations with $ \ket{0} $ and $ \ket{1} $. # $$ # H \ket{1} \otimes H \ket{1} # = \mypar{ \frac{1}{\sqrt{2}} \ket{0} - \frac{1}{\sqrt{2}} \ket{1} } # \otimes \mypar{ \frac{1}{\sqrt{2}} \ket{0} - \frac{1}{\sqrt{2}} \ket{1} } # = \frac{1}{2} \ket{00} - \frac{1}{2} \ket{01} - \frac{1}{2} \ket{10} + \frac{1}{2} \ket{11} # = \myrvector{ \frac{1}{2} \\ - \frac{1}{2} \\ - \frac{1}{2} \\ \frac{1}{2} }. # $$ # </ul> # <hr> # <h3> CNOT operator </h3> # CNOT is an operator defined on two qubits: # # $$ # CNOT = \mymatrix{cccc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0} . # $$ # # Its effect is very simple: if the state of the first qubit is one, then the state of the second qubit is flipped. # # If the state of the first qubit is zero, then the state of the second qubit remains the same. # # In summary: # <ul> # <li>$ CNOT \ket{00} = \ket{00} $, </li> # <li>$ CNOT \ket{01} = \ket{01} $, </li> # <li>$ CNOT \ket{10} = \ket{11} $, and, </li> # <li>$ CNOT \ket{11} = \ket{10} $. </li> # </ul> # # CNOT refers to as Controlled-NOT: NOT operator is applied in a controlled way. # <h3> cx-gate </h3> # # In Qiskit, CNOT operator is represented as cx-gate. # # It takes two arguments: controller-qubit and target-qubit. # # Its implementation is as follows: # # <i> <b>x-gate</b> (NOT operator) is applied to <u>the target qubit</u> that is <b>CONTROLLED</b> by <u>the controller qubit</u>.</i> # # We apply CNOT operator to the states $ \ket{00}, \ket{01}, \ket{10}, \ket{11} $ and then measure each. # + pairs = ['00','01','10','11'] for pair in pairs: from qiskit import QuantumCircuit, execute, Aer qc = QuantumCircuit(2,2) # initialize the pair # we follow the reading order in Qiskit # q1-tensor-q0 if pair[1] == '1': qc.x(0) if pair[0] =='1': qc.x(1) qc.cx(1,0) qc.measure(0,0) qc.measure(1,1) display(qc.draw(output='mpl',reverse_bits=True)) job = execute(qc,Aer.get_backend('qasm_simulator'),shots=1024) counts = job.result().get_counts(qc) print(pair,"--CNOT->",counts) # - # <h3> Task 5 </h3> # # Create a quantum circuit with $ n=5 $ qubits. # # Set each qubit to $ \ket{1} $. # # Repeat 4 times: # <ul> # <li>Randomly pick a pair of qubits, and apply cx-gate (CNOT operator) on the pair.</li> # </ul> # # Draw your circuit, and execute your program 100 times. # # Verify your measurement results by checking the diagram of the circuit. # + # import all necessary objects and methods for quantum circuits from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer # import randrange for random choices from random import randrange # # your code is here # n = 5 m = 4 states_of_qubits = [] # we trace the state of each qubit also by ourselves q = QuantumRegister(n,"q") # quantum register with n qubits c = ClassicalRegister(n,"c") # classical register with n bits qc = QuantumCircuit(q,c) # quantum circuit with quantum and classical registers # set each qubit to |1> for i in range(n): qc.x(q[i]) # apply x-gate (NOT operator) states_of_qubits.append(1) # the state of each qubit is set to 1 # randomly pick m pairs of qubits for i in range(m): controller_qubit = randrange(n) target_qubit = randrange(n) # controller and target qubits should be different while controller_qubit == target_qubit: # if they are the same, we pick the target_qubit again target_qubit = randrange(n) # print our picked qubits print("the indices of the controller and target qubits are",controller_qubit,target_qubit) # apply cx-gate (CNOT operator) qc.cx(q[controller_qubit],q[target_qubit]) # we also trace the results if states_of_qubits[controller_qubit] == 1: # if the value of the controller qubit is 1, states_of_qubits[target_qubit] = 1 - states_of_qubits[target_qubit] # then flips the value of the target qubit # remark that 1-x gives the negation of x # measure the quantum register qc.barrier() qc.measure(q,c) # draw the circuit in reading order display(qc.draw(output='mpl',reverse_bits=True)) # execute the circuit 100 times in the local simulator job = execute(qc,Aer.get_backend('qasm_simulator'),shots=100) counts = job.result().get_counts(qc) print("the measurument result is",counts) our_result="" for state in states_of_qubits: our_result = str(state) + our_result print("our result is",our_result) # - # [click for our solution](Q60_Two_Qubits_Solutions.ipynb#task5) # <h3>Task 6</h3> # # Our task is to learn the behavior of the following quantum circuit by doing experiments. # # Our circuit has two qubits: $ q_0 $ and $ q_1 $. They are tensored as $ q_1 \otimes q_0 $ in Qiskit. # <ul> # <li> Apply Hadamard to the both qubits. # <li> Apply CNOT($q_1$,$q_0$). # <li> Apply Hadamard to the both qubits. # <li> Measure the circuit. # </ul> # # Iteratively initialize the qubits to $ \ket{00} $, $ \ket{01} $, $ \ket{10} $, and $ \ket{11} $. # # Execute your program 100 times for each iteration, and then check the outcomes for each iteration. # # Observe that the overall circuit implements CNOT($q_0$,$q_1$). # + # import all necessary objects and methods for quantum circuits from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer # # your code is here # # import all necessary objects and methods for quantum circuits all_inputs=['00','01','10','11'] for input in all_inputs: q = QuantumRegister(2,"q") # quantum register with 2 qubits c = ClassicalRegister(2,"c") # classical register with 2 bits qc = QuantumCircuit(q,c) # quantum circuit with quantum and classical registers # initialize the inputs w.r.t the reading order of Qiskit if input[0]=='1': qc.x(q[1]) # set the state of the up qubit to |1> if input[1]=='1': qc.x(q[0]) # set the state of the down qubit to |1> # apply h-gate to both qubits qc.h(q[0]) qc.h(q[1]) # apply cx(up-qubit,down-qubit) qc.cx(q[1],q[0]) # apply h-gate to both qubits qc.h(q[0]) qc.h(q[1]) # measure both qubits qc.barrier() qc.measure(q,c) # draw the circuit w.r.t the reading order of Qiskit display(qc.draw(output='mpl',reverse_bits=True)) # execute the circuit 100 times in the local simulator job = execute(qc,Aer.get_backend('qasm_simulator'),shots=100) counts = job.result().get_counts(qc) print(input,"is mapped to",counts) # - # [click for our solution](Q60_Two_Qubits_Solutions.ipynb#task6) # <h3>Task 7</h3> # # Our task is to learn the behavior of the following quantum circuit by doing experiments. # # Our circuit has two qubits: $ q_0 $ and $ q_1 $. They are tensored as $ q_1 \otimes q_0 $ in Qiskit. # <ul> # <li> Apply CNOT($q_1$,$q_0$). # <li> Apply CNOT($q_0$,$q_1$). # <li> Apply CNOT($q_1$,$q_0$). # </ul> # # Iteratively initialize the qubits to $ \ket{00} $, $ \ket{01} $, $ \ket{10} $, and $ \ket{11} $. # # Execute your program 100 times for each iteration, and then check the outcomes for each iteration. # # Observe that the overall circuit swaps the values of the first and second qubits: # <ul> # <li> $\ket{00} \rightarrow \ket{00} $ </li> # <li> $\ket{01} \rightarrow \ket{10} $ </li> # <li> $\ket{10} \rightarrow \ket{01} $ </li> # <li> $\ket{11} \rightarrow \ket{11} $ </li> # </ul> # + # import all necessary objects and methods for quantum circuits from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer # # your code is here # all_inputs=['00','01','10','11'] for input in all_inputs: q = QuantumRegister(2,"q") # quantum register with 2 qubits c = ClassicalRegister(2,"c") # classical register with 2 bits qc = QuantumCircuit(q,c) # quantum circuit with quantum and classical registers #initialize the inputs w.r.t the reading order of Qiskit if input[0]=='1': qc.x(q[1]) # set the state of the up qubit to |1> if input[1]=='1': qc.x(q[0]) # set the state of the down qubit to |1> # apply cx(up-qubit,down-qubit) qc.cx(q[1],q[0]) # apply cx(down-qubit,up-qubit) qc.cx(q[0],q[1]) # apply cx(up-qubit,down-qubit) qc.cx(q[1],q[0]) # measure both qubits qc.barrier() qc.measure(q,c) # draw the circuit w.r.t the reading order of Qiskit display(qc.draw(output='mpl',reverse_bits=True)) # execute the circuit 100 times in the local simulator job = execute(qc,Aer.get_backend('qasm_simulator'),shots=100) counts = job.result().get_counts(qc) print(input,"is mapped to",counts) # - # [click for our solution](Q60_Two_Qubits_Solutions.ipynb#task7)
quantum-with-qiskit/Q60_Two_Qubits.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/XinyiYS/FairAndPrivateFederatedLearning/blob/master/pytorch_MLP_CNN.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="uqMjPW4vrdmE" colab_type="code" colab={} import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torchvision import datasets, transforms from torchsummary import summary from torch.utils import data import numpy as np # + id="JjZS4EcEA7UV" colab_type="code" colab={} import os root = './mnist' if not os.path.exists(root): os.mkdir(root) trans = transforms.Compose([transforms.Pad((2,2,2,2)), transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ]) # if not exist, download mnist dataset train_set = datasets.MNIST(root=root, train=True, transform=trans, download=True) test_set = datasets.MNIST(root=root, train=False, transform=trans, download=True) training_generator = data.DataLoader(train_set, **params) validation_generator = data.DataLoader(test_set, **params) # + id="LrFEcKwCGIWb" colab_type="code" colab={} def test(model, device, test_loader, verbose=True): model.eval() test_loss = 0 correct = 0 with torch.no_grad(): for data, target in test_loader: data, target = data.to(device), target.to(device) output = model(data.float()) test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss pred = output.argmax(1, keepdim=True) # get the index of the max log-probability correct += pred.eq(target.view_as(pred)).sum().item() test_loss /= len(test_loader.dataset) if verbose: print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( test_loss, correct, len(test_loader.dataset), 100. * correct / len(test_loader.dataset))) test_acc = 1.* correct / len(test_loader.dataset) return test_acc def train(model, device, data_loader, optimizer, epoch): model.train() for batch_idx, (data, target) in enumerate(data_loader): data, target = data.to(device), target.to(device) optimizer.zero_grad() output = model(data) loss = F.nll_loss(output, target) loss.backward() optimizer.step() # + id="5RNXDpxWMPQR" colab_type="code" colab={} # Parameters params = {'batch_size': 10, 'shuffle': True, 'num_workers': 6} max_epochs = 5 lr = 1e-2 device = torch.device('cpu') if torch.cuda.is_available(): device = torch.device('cuda') # + id="uwVrSop5u1el" colab_type="code" outputId="9b85c447-acf2-40f2-d64f-0cdd771a6227" colab={"base_uri": "https://localhost:8080/", "height": 357} # MLP = nn.Sequential( # nn.Linear(1024, 128), # nn.ReLU(), # nn.Linear(128, 64), # nn.ReLU(), # nn.Linear(64, 10), # nn.LogSoftmax()).to(device) class MLP_Net(nn.Module): def __init__(self): super(MLP_Net, self).__init__() self.fc1 = nn.Linear(1024, 128) self.fc2 = nn.Linear(128, 64) self.fc3 = nn.Linear(64, 10) def forward(self, x): x = x.view(-1, 1024) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return F.log_softmax(x, dim=1) MLP = MLP_Net().to(device) optimizer = optim.SGD(MLP.parameters(), lr=lr) # TODO momentum is not supported at the moment summary(MLP, (1, 32, 32)) train(MLP, device, training_generator, optimizer, max_epochs) test(MLP, device, validation_generator) # + id="qq8YuTycxFdM" colab_type="code" outputId="1ec3ceed-a05e-484d-bac6-76f7b5119742" colab={"base_uri": "https://localhost:8080/", "height": 428} # CNN = nn.Sequential( # nn.Conv2d() # nn.Tanh(), # nn.MaxPool2d(), # nn.Conv2d(), # nn.Tanh(), # nn.MaxPool2d(), # nn.Flatten(), # nn.Linear(256, 200), # nn.Tanh(), # nn.Linear(200, 10), # nn.LogSoftmax()).to(device) class CNN_Net(nn.Module): def __init__(self): super(CNN_Net, self).__init__() self.conv1 = nn.Conv2d(1, 64, 3, 1) self.conv2 = nn.Conv2d(64, 16, 7, 1) self.fc1 = nn.Linear(4*4*16, 200) self.fc2 = nn.Linear(200, 10) def forward(self, x): x = x.view(-1, 1, 32, 32) x = F.tanh(self.conv1(x)) x = F.max_pool2d(x, 2, 2) x = F.tanh(self.conv2(x)) x = F.max_pool2d(x, 2, 2) x = x.view(-1, 4*4*16) x = F.tanh(self.fc1(x)) x = self.fc2(x) return F.log_softmax(x, dim=1) CNN = CNN_Net().to(device) summary(CNN, (1, 32, 32)) optimizer = optim.SGD(CNN.parameters(), lr=lr) train(CNN, device, training_generator, optimizer, max_epochs) test(CNN, device, validation_generator) # + id="3ZKneEf6FsBz" colab_type="code" colab={}
archive/pytorch_MLP_CNN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import geopandas as gpd import datetime from src.constants import PAISAGENSLIDAR_PATH biomasses = gpd.read_file("biomass_EFC.zip") biomasses.head() geometry_wgs84 = [] for idx, row in tqdm(biomasses.iterrows(), total=len(biomasses)): try: geometry_wgs84.append(gpd.GeoSeries(row["geometry"], crs=row["crs_orig"]).to_crs(WGS84).iloc[0]) except Exception as e: geometry_wgs84.append(None) print(idx, e) biomasses["geometry_orig"] = biomasses["geometry"] biomasses["geometry"] = geometry_wgs84 biomasses["Area [ha]"] = round(biomasses["geometry_orig"].area / 1e4, 3) biomasses.to_feather(PAISAGENSLIDAR_PATH / f"paisagenslidar_biomasses_v{datetime.date.today()}.feather biomasses.drop("geometry_orig", axis=1).to_file(PAISAGENSLIDAR_PATH / f"paisagenslidar_biomasses_v{datetime.date.today()}.gpkg", driver="GPKG")
notebooks/22-Ground_biomass_paisagenslidar.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Notebook by [<NAME>](http://www.pythonicfool.com/) # ### Logistic Regression model with a One-Layer-NN (from scratch) and KNN on Framingham Dataset # ##### [Pythonicfool GitHub Repository](https://github.com/volkansonmez/Exploratory_Data_Analysis_and_ML_Projects) # # # ## Table of contents # # 1. [Introduction](#Introduction) # # 2. [EDA and Building ML Model](#EDA_and_Building_ML_Model) # # 3. [Conclusion](#Conclusion) # ## Introduction # # [[ go back to the top ]](#Table-of-contents) # # Raw framingham.csv dataset is downloaded from Kaggle. # # Neural networks and K-nearest neigbors are coded from scratch and then trained with this dataset for logistic regression analysis. # # > The raw dataset can also be obtained here: https://github.com/volkansonmez/Exploratory_Data_Analysis_and_ML_Projects/blob/master/Framingham%20Dataset/framingham.csv # ## EDA_and_Building_ML_Model # # [[ go back to the top ]](#Table-of-contents) # # This notebook uses several Python packages that come standard with the Anaconda Python distribution. # The primary libraries you need to run this notebook are: # # * **NumPy**: Provides a fast numerical array structure and helper functions. # * **pandas**: Provides a DataFrame structure to store data in memory and work with it easily and efficiently. # * **scikit-learn**: The essential Machine Learning package in Python. # * **matplotlib**: Basic plotting library in Python; most other Python plotting libraries are built on top of it. # * **Seaborn**: Advanced statistical plotting library. # # To make sure you have all of the packages you need, install them with `conda`: # # conda install numpy pandas matplotlib torch # # # #### How to build models for logistic regression training & analysis on this dataset: # # After cleaning the dataset, drop the NA values, check the feature scores with chi2 test. # # Create a one-layer neural network from scratch (not with PyTorch but with NumPy only) and train it. Do a grid search for optimum iterations, learning rate, and learning rate decay for obtaining the best test accuracy. # # Write KNN from scratch to see if it is possible to beat the deep learning method in test accuracy. Tweak the KNN just for this problem to adapt it to the data since it is mostly composed of "0" values in the target column (this is not recommended outside of this particular problem). Compare your results of sklearn's logistic regression algorithm. Show your results in the confusion matrix. # # BONUS: Create a more complex NN model with PyTorch sequential (a model with few layers) to see if increasing the model's complexity will help to reach better accuracy. # # This data is specifically selected for showing students that sometimes the data is not sufficient to reach a good accuracy regardless of any machine learning algorithm used. Increasing complexity sometimes is not necessary. # + import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.metrics import confusion_matrix # %matplotlib inline # Import the dataset framingham_data = pd.read_csv("framingham.csv") print(len(framingham_data)) print(framingham_data[:5]) # visualize the first 5 instances of the dataset df = framingham_data # make a copy of the data to play with it df.isna().sum() # Unmark the below lines to check for duplicates if needed # duplicate_df = df[df.duplicated()] # print(duplicate_df) # - # Drop the NaN values df = df.dropna() print(df.shape) # Check if there are any NaN values left in the dataset. df.isna().sum() # View the number of 1's and 0's in the dataset df.TenYearCHD.value_counts() # + # Check the feature scores print(df.shape)# the total number of attributes including the label from sklearn.feature_selection import SelectKBest from sklearn.feature_selection import chi2 X = df.iloc[:,:-1] # data y = df.iloc[:,-1] # labels # check the scores bestfeatures = SelectKBest(score_func=chi2, k= "all") fit = bestfeatures.fit(X,y) dfscores = pd.DataFrame(fit.scores_) dfcolumns = pd.DataFrame(X.columns) # concat two dataframes for better visualization featureScores = pd.concat([dfcolumns, dfscores], axis=1, sort= False) featureScores.columns = ['Specs','Score'] print(featureScores) print(df.shape) # - # Drop the unnecessary columns df = df.drop(['currentSmoker','education', 'heartRate', 'prevalentStroke'] ,axis=1, inplace=True) # + X = df.iloc[:,:-1] # data y = df.iloc[:,-1] # labels # normalize the dataset print(df.shape) print(X.shape, y.shape) # Rescale the values in the attributes between 0 and 1 from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler(feature_range=(0,1)) X_scaled = pd.DataFrame(scaler.fit_transform(X), columns= X.columns) # view the data description X_scaled.describe() # + # Turn the dataframe into a numpy array, shuffle the data, do the split for testing data and train data dataset = np.array(X_scaled) y = np.array(y) # shuffle the data, separate data to test and train sets np.random.seed(0) shuffled_indices = np.random.permutation(len(y)) shuffled_dataset = dataset[shuffled_indices] # assign the test and train data manually train_set = shuffled_dataset[:3100] test_set = shuffled_dataset[3100:] print(train_set[0]) print(test_set[0]) # assign the test and train labels y = y[:, np.newaxis] shuffled_labels = y[shuffled_indices] train_label = shuffled_labels[:3100] test_label = shuffled_labels[3100:] print('labels:', train_label[:5], test_label[:5]) # visualize the labels test_label.shape # + # Before moving on with a classic ML algorithm to do logistic regression, revisit the label score rates target_count = df.TenYearCHD.value_counts() print(target_count) # if accuracy was going to be evaluated based on sensitivity and specificity these are the tp and tn amounts below: tp_rate = 557/len(df) tn_rate = 3099/len(df) print(tp_rate , tn_rate) # + # Write and test the sigmoid function def sigmoid(x): return 1/(1+np.exp(-x)) def sigmoid_p(x): return sigmoid(x) * (1-sigmoid(x)) # Visualize the sigmoid function and its derivative X = np.linspace(-5, 5, 100) plt.plot(X, sigmoid(X), c="b") # sigmoid in blue plt.plot(X, sigmoid_p(X), c="r") # sigmoid_p in red # + # Write a one layer NN and train the network, test it and analyze the result with the confusion matrix # Analyze the accuracy with a grid search on all hyperparameters import numpy as np class Neural_Network(): def __init__(self): # set the network parameters self.inputSize = 11 # there are 11 attributes for each input. self.outputSize = 1 # the output will be a single value # set the weights self.W = 0.01 * np.random.randn(self.inputSize, self.outputSize) self.b = np.zeros([1, self.outputSize]) self.lr = 0.01 def forward(self, input_data): # forward propagation self.z = np.dot(input_data, self.W) + self.b # dot product of X (input) and weights self.o = self.sigmoid(self.z) # fit the logit into the activation function return self.o # return scores def sigmoid(self, s): # activation function return 1/(1+np.exp(-s)) def dsigmoid(self, s): # derivative of sigmoid return s * (1 - s) def backward(self, input_data, label): # backward propagation #self.do = self.o - label # (since the loss function used is: log loss) self.dz = self.o - label self.db = self.dz self.dW = np.dot(input_data.T , self.dz) # vanilla update of the weights and biases self.b -= self.lr * self.db self.W -= self.lr * self.dW # all parameters here are LIST of numbers to do grid search to optimize the results def train(self, train_set, test_set, test_label, train_label, iteration_i, lr, decay): NN.lr = lr # train the network for i in range(iteration_i): ri = np.random.randint(len(train_set)) # random index rd = train_set[ri] # instance at the random index picked rd = rd[np.newaxis,:] # reshape the instance to 2D. (1,12) y = train_label[ri] # label of the random instance picked self.forward(rd) self.backward(rd, y) if (i % 100) == 299: NN.lr *= decay # done with the training, test the test set data on the trained network pred_list = [] # store the predictions in this list for j in range(len(test_set)): # for each item in the test set, make predictions and check the accuracy test_data = test_set[j] fwd_prop = NN.forward(test_data) predict_value = float(fwd_prop) if predict_value < 0.5: prediction = 0 else: prediction = 1 pred_list.append(prediction) # pass the predictins to confusion matrix tn, fp, fn, tp = confusion_matrix(test_label, pred_list).ravel() # actual labels compared with predictions specificity_and_sensitivity_average = (0.85* tn/(tn+fp) + 0.15* tp/(tp+fn)) # weighted average return specificity_and_sensitivity_average, pred_list # def saveWeights(self): # this saves a text file in the same working folder when the best training is found # np.savetxt("w.txt", self.W) # np.savetxt("b.txt", self.b) # Grid search for the number of iterations, learning rate and learning rate decay for the best accuracy test_label = test_label.reshape(len(test_label)) # reshape the test label iterations = [300, 500, 1000, 2000, 4000, 5000] lr = [1e-5, 5*1e-4, 1e-4, 5*1e-4, 5*1e-3, 1e-3, 5*1e-3, 1e-2, 5*1e-2, 1e-1] lr_decay =[0.99, 0.98, 0.97, 0.96, 0.95, 0.90] best_specificity_and_sensitivity = 0 iteration_accuracy_at = 0 pred_list = [] for i in iterations: for k in lr: for n in lr_decay: NN = Neural_Network() # initialize the object #('training with:', i,'iterations', k, 'learning rate', n, 'learning rate decay') specificity_and_sensitivity_average , pred_list = NN.train(train_set, test_set, test_label, train_label, i, k, n) if specificity_and_sensitivity_average > best_specificity_and_sensitivity: best_specificity_and_sensitivity = specificity_and_sensitivity_average mark_i = i mark_k = k mark_n = n best_pred_list = pred_list print('best hyper-parameters:', mark_i, mark_k, mark_n) print(np.array(best_pred_list[:25]), test_label[:25]) accuracy = np.mean(best_pred_list == test_label) print('accuracy', accuracy) tn, fp, fn, tp = confusion_matrix(test_label, best_pred_list).ravel() print(tn, fp, fn, tp) weighted_accuracy = (0.85* tn/(tn+fp) + 0.15* tp/(tp+fn)) print(weighted_accuracy) # + # Write a KNN to analyze the data. Then, compare it with the one layer NN above. # view the data type and shape one more time using numpy arrays. # print(type(test_set), test_set.shape) # print(type(train_set), train_set.shape) # print(type(test_label), test_label.shape) # print(type(train_label), train_label.shape) def K_Nearest_Neighbor(test_set, test_label, train_set, train_label, n): results = [] for i in range(len(test_set)): indices_visited = [] # make a list of indices visited votes = np.zeros(2) for j in range(n): min = np.inf for k in range(len(train_set)): difference = np.linalg.norm(train_set[k] - test_set[i]) if difference < min and k not in indices_visited: min = difference index_at_min_distance = k the_label_found_at_min_dist = int(train_label[k]) indices_visited.append(index_at_min_distance) votes[the_label_found_at_min_dist] +=1 label_of_max_votes = np.argmax(votes) results.append(label_of_max_votes) # check the accuracy by comparing predictions with the actual test labels tn, fp, fn, tp = confusion_matrix(test_label, results).ravel() print('for i =:', n, 'tn, fp, fn, tp values are: ' , tn, fp, fn, tp ) best_score = (0.85* tn/(tn+fp) + 0.15* tp/(tp+fn)) accuracy = np.sum(results == test_label) / len(test_label) return best_score, np.around(100*accuracy, decimals = 2) all_results = [] for i in range(1,9): best_scores, accuracy = K_Nearest_Neighbor(test_set, test_label, train_set, train_label, i) all_results.append(accuracy) print(all_results) plt.plot(np.arange(1,9), np.array(all_results)) # + # See if more weights is given to find the TPs, would yield a better accuracy. # (Don't try this at work! You might be fired even if you are right. Remember, this dataset has 85% negatives) def Tweaked_K_Nearest_Neighbor(test_set, test_label, train_set, train_label, n): results = [] for i in range(len(test_set)): indices_visited = [] # make a list of indices visited votes = np.zeros(2) for j in range(n): min = np.inf for k in range(len(train_set)): difference = np.linalg.norm(train_set[k] - test_set[i]) if difference < min and k not in indices_visited: min = difference index_at_min_distance = k the_label_found_at_min_dist = int(train_label[k]) indices_visited.append(index_at_min_distance) votes[the_label_found_at_min_dist] +=1 if votes[1] >= n//2: weighted_votes = 1 else: weighted_votes = 0 results.append(weighted_votes) tn, fp, fn, tp = confusion_matrix(test_label, results).ravel() print('for i =:', n, 'tn, fp, fn, tp values are: ' , tn, fp, fn, tp ) best_score = (0.85* tn/(tn+fp) + 0.15* tp/(tp+fn)) accuracy = np.sum(results == test_label) / len(test_label) return best_score, np.around(100*accuracy, decimals = 2) # view some of the results to judge the accuracy of this model: all_results = [] for i in range(1,9): best_scores, accuracy = K_Nearest_Neighbor(test_set, test_label, train_set, train_label, i) all_results.append(accuracy) print(all_results) plt.plot(np.arange(1,9), np.array(all_results)) # + # Check the sklearn linear model's logistic regression classifier and compare it with the above algorithms from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score clf = LogisticRegression().fit(train_set, train_label) pred = clf.predict(test_set) # check accuracy: Accuracy = (True Pos + True Negative) accuracy = accuracy_score(test_label, pred) print(accuracy) cnf_matrix_log = confusion_matrix(test_label, pred) print(cnf_matrix_log) sns.heatmap(pd.DataFrame(cnf_matrix_log), annot=True,cmap="Reds" , fmt='g') # conclusion: it is not a good system, sensitivity rate is very low, false negatives are high tn, fp, fn, tp = confusion_matrix(test_label, pred).ravel() weighted_accuracy = (0.85* tn/(tn+fp) + 0.15* tp/(tp+fn)) print(weighted_accuracy) # + # BONUS: Do a simple 3 layer pytorch sequential model, train it, and test the data import numpy as np import random import torch from torch import nn, optim import torch.functional as F from IPython import display y = y.reshape(-1) # prepare the data for torch tensors x = torch.from_numpy(train_set).float() y = torch.from_numpy(train_label).float() dtype = float D_in, H1, H2, D_out = 11, 50, 10, 1 learning_rate = 1e-3 model = nn.Sequential( nn.Linear(D_in, H1), nn.ReLU(), nn.Linear(H1, H2), nn.ReLU(), nn.Linear(H2, D_out), nn.Sigmoid(), ) criterion = torch.nn.BCELoss() # Use the optim package to apply stochastic gradient descent for parameter updates optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) # Training for t in range(100): # Feed forward to get the logits y_pred = model(x) # Compute the loss and accuracy loss = criterion(y_pred, y) score, predicted = torch.max(y_pred, 1) y = y.reshape(-1) acc = ((y == predicted).sum()).float() / len(x) print("[EPOCH]: %i, [LOSS]: %.6f, [ACCURACY]: %.3f" % (t, loss.item(), acc)) display.clear_output(wait=True) # zero the gradients before running # the backward pass. optimizer.zero_grad() # Backward pass to compute the gradient # of loss w.r.t our learnable params. loss.backward() # Update params optimizer.step() # - # ## Conclusion # # [[ go back to the top ]](#Table-of-contents) # # # KNN, Sklearn's Logistic Regression, a One layer NN, and Three-layer NN models fail to get a good accuracy (more than 85%) since the dataset is too noisy. If all models get trained with the optimum hyperparameters, the accuracy is reaching 87%. However, there are too many false positives and false negatives in the confusion matrix. High TP and TN values prove that the dataset is too small and noisy. The model is likely to reach a better accuracy if there is more data.
Framingham Dataset/EDA_and_ML_Application_on_Framingham_Dataset.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [py27] # language: python # name: Python [py27] # --- #question 1 float_nunber = 7.5687 integer_number = int(float_nunber) print(integer_number) # + #question 2 amount = [19, 27.57] for i in amount: print("Variable_type", type(i), i) #checking variable type before manipulations rounding_off = round(i*10) # Rounding after multiplying by 10 print("rounding off", type(rounding_off), rounding_off) #Checking variable type before division output_number = round(i*10)/10.0 print("Final_output", type(output_number) ,output_number) #Final Values # + #question 3 # This is a generator. We will get to it later. Its an advanced concept def fib(x1,x2): while True is True: x2,x1 = x1 + x2,x2 yield x2 l1 = [5,8,2,3] for i in fib(0,1): if i<30: print(i) else: break # + #question 4 def fizzbuzz(i,num1, num2): if i%num1==0 and i%num2 ==0: #Tells you a lot about python control flow. return(i,'Fizzbuzz') elif i%num1== 0: return(i, 'fizz') elif i%num2==0: return(i, 'buzz') else: return(i) for i in range(0,100): print(fizzbuzz(i,3,5)) # + #question 5 def armstrong_number(n,last): while True: cubic_sum = sum(int(a)**3 for a in str(n)) if n ==cubic_sum: yield n n = n+1 if n >=last: break for i in armstrong_number(100,999): print(i) # + #question 6 import random import sys def collatz(a = random.randint(0,sys.maxsize)): #create any random number or you can take user input while a > 1: last_digit = int(str(a)[-1]) if last_digit%2==0: new_number = a/2 else: new_number = (a*3)+1 yield(new_number) a = new_number output_list = [] for i in collatz(): output_list.append(i) print(output_list) # 'L' in output just shows the type of varialble # - import time #Ignore this code, I was testing some performance differences. import sys a = sys.maxsize before = time.time() b = a/2 after = time.time() # + #question 7 import random def check_input_number(n): digits = [] for i in str(n): if i in digits:pass else: digits.append(i) if len(digits)>1: return True else: return False def input_generator(): number = random.randint(1000,9999) if check_input_number(number) == False: input_generator() else:return(number) def input_method(): input_query = raw_input("Enter '0' for the program to autorun or,\nType a 4 digit number with at least two digits different") if input_query =='0': start_number = input_generator() return start_number else: start_number = int(input_query) if check_input_number(start_number)==False: input_method() else : return start_number def manipulation(n): asc = int("".join(sorted(str(n)))) dsc = int("".join(sorted(str(n),reverse=True))) x= dsc-asc #print(x) yield(n) if n ==x: yield("Kaprekar’s constant") else: for i in manipulation(x): yield i number = input_method() for i in manipulation(number): if i == "Kaprekar’s constant": break print(i) # -
asignments/.ipynb_checkpoints/sol_assignment1-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- from gutenberg.acquire import load_etext from gutenberg.cleanup import strip_headers import nltk # + import seaborn as sns import numpy as np def get_length_kde(book_id): book = load_etext(book_id) book = strip_headers(book) sents = [nltk.word_tokenize(str.lower(s)) for s in nltk.sent_tokenize(book)] lengths = [len(s) for s in sents] print(np.quantile(lengths, [0.25, 0.75])) sns.kdeplot(data=lengths) def get_sentence_lengths(book_id): book = load_etext(book_id) book = strip_headers(book) return [len(nltk.word_tokenize(str.lower(s))) for s in nltk.sent_tokenize(book)] # + from collections import Counter from functools import reduce import operator from scipy.spatial import distance from gutenberg.acquire import load_etext from gutenberg.cleanup import strip_headers def load_and_strip(book_id): return strip_headers(load_etext(book_id)) def get_probability_vec(lengths, max_sent_length): result = np.zeros(max_sent_length + 1) for i, count in Counter(lengths).items(): result[i] = count return result / result.sum() def get_sentence_lengths_num_words(text): [len(nltk.word_tokenize(s)) for s in nltk.sent_tokenize(text)] def sentence_length_question(q): books = [load_and_strip(id) for id in q["answers"]] book_sent_lengths = [get_sentence_lengths_num_words(text) for id in books] # get max sentence length max_sent_length = max([l for sent_lengths in book_sent_lengths for l in sent_lengths]) probability_vecs = [get_probability_vec(lengths, max_sent_length) for lengths in book_sent_lengths] avg_probability_vec = reduce(operator.add, probability_vecs) / len(probability_vecs) _, index_of_most_different = max([ (distance.jensenshannon(v, avg_probability_vec), i) for i, v in enumerate(probability_vecs) ]) # fill zeros to max length where index is the sentence length # get probabilities where index is the # combinations of N-1 vs left out # see who is the most different from the average # return answer + probability distribution for sentence lengths for average vs other # - [a, b] = {"a":1, "b":4, "c":6} [(k,v) for k, v in Counter([1, 1, 2, 3]).items()] from collections import Counter c = Counter([1,2,3,1,2]) c.most_common(2) # + from nltk import FreqDist # JJ adjective 'big' # JJR adjective, comparative 'bigger' # JJS adjective, superlative 'biggest' adjectives = [x[0] for x in filtered_tagged_words if x[1] in {"JJ", "JJR", "JJS"}] freq_dist = FreqDist(adjectives) # - np.array([np.array([1,2]), np.array([5,5])]).sum(axis=1).argmax(axis=) arr = np.array([np.array([1,2]), np.array([5,5])]) print(np.array([np.array(l) for l in [[1,2,3], [2,3,4]]])) from sklearn.neighbors import KernelDensity import numpy as np X = np.array([1, 1, 2, 2, 3, 3, 4, 5, 6, 8, 10]) kde = KernelDensity(kernel='gaussian', bandwidth=0.2).fit(X) kde.score_samples(X) # + def make_data(N, f=0.3, rseed=1): rand = np.random.RandomState(rseed) x = rand.randn(N) x[int(f * N):] += 5 return x x = make_data(1000) # - x kde = KernelDensity(bandwidth=1.0, kernel='gaussian') kde.fit(x[:, None]) X_plot = np.linspace(-6, 6, 1000)[:, None] X_plot np.array([1,2,3]).reshape(3,1).shape
exploration.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Example - Merge # + import rioxarray # for the extension to load import xarray from rioxarray.merge import merge_arrays # Note: You can merge datasets with the merge_datasets method # %matplotlib inline # - # ## Load in xarray dataset # # API reference: # # - [rioxarray.open_rasterio](../rioxarray.rst#rioxarray-open-rasterio) # - [rioxarray.merge.merge_arrays](../rioxarray.rst#rioxarray.merge.merge_arrays) # - [rioxarray.merge.merge_datasets](../rioxarray.rst#rioxarray.merge.merge_datasets) dem_test = "../../test/test_data/input/MODIS_ARRAY.nc" with rioxarray.open_rasterio(dem_test) as rds: arrays = [ rds.isel(x=slice(100), y=slice(100)), rds.isel(x=slice(100, 200), y=slice(100, 200)), rds.isel(x=slice(100), y=slice(100, 200)), rds.isel(x=slice(100, 200), y=slice(100)), ] merged = merge_arrays(arrays) rds.where(rds!=rds.nodata).plot(); merged.where(merged!=merged.nodata).plot()
docs/examples/merge.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + from functools import partial import numpy as np import pandas as pd import re import constants from locale import * setlocale(LC_NUMERIC, 'en_US.UTF-8') pd.set_option('display.max_columns', None) def resilient_atof(x): try: return atof(x) if x else 0.0 except: return 0.0 # - raw_file = '../data/raw.csv' results_file = '../data/results.csv' df = pd.read_csv(raw_file) df = df.fillna('') df.columns = constants.columns # + def gender_map(row): new_gender = row['gender'].lower() if (new_gender.startswith('prefer not')): new_gender = 'undisclosed' return new_gender city_regex = re.compile('(.*),.*') def get_city(row): result = city_regex.match(row['location']) if result: return result.group(1).strip() return "?" country_regex = re.compile('.*,(.*)') def get_country(row): result = country_regex.match(row['location']) if result: return result.group(1).strip() return "?" df['gender'] = df.apply(gender_map, axis=1) df['city_of_origin'] = df.apply(get_city, axis=1) df['country_of_origin'] = df.apply(get_country, axis=1) # + def get_valid_coop_salaries(row): raw_salaries = row[['coop_salary_1', 'coop_salary_2', 'coop_salary_3', 'coop_salary_4', 'coop_salary_5', 'coop_salary_6']] salaries = np.array(map(resilient_atof, raw_salaries), dtype=np.float32) return [max(0, min(100000, x)) for x in salaries] def compute_coop_avg(row): return np.mean(get_valid_coop_salaries(row)) def compute_coop_median(row): return np.median(get_valid_coop_salaries(row)) df['coop_salary_avg'] = df.apply(compute_coop_avg, axis=1) df['coop_salary_median'] = df.apply(compute_coop_median, axis=1) # + def get_valid_term_grade_avgs(row): raw_avgs = row[['term_avg_1a', 'term_avg_1b', 'term_avg_2a', 'term_avg_2b', 'term_avg_3a', 'term_avg_3b', 'term_avg_4a']] avgs = np.array(map(resilient_atof, raw_avgs), dtype=np.float32) return [max(0, min(100, x)) for x in avgs] def compute_culm_grade_avg(row): return np.mean(get_valid_term_grade_avgs(row)) def compute_culm_grade_median(row): return np.median(get_valid_term_grade_avgs(row)) df['culm_grade_avg'] = df.apply(compute_culm_grade_avg, axis=1) df['culm_grade_median'] = df.apply(compute_culm_grade_median, axis=1) # + def round_admission_avg(x): x = min(100, max(70, x)) return int(round(x)) df['rounded_admission_avg'] = df['admission_avg'].apply(round_admission_avg) # + """ Processing comma-separated lists, e.g. hs_extras, uni_extras. """ def to_lower(row, colname): return row[colname].lower() """ Perform string replacements in a DataFrame column. row: a row of a DataFrame provided when run within DataFrame.apply with axis=1 colname (str): the column to perform replacements in transforms (list( tuple(string, string) )): a list of string replacements to perform, in order """ def replace_strs(row, colname, transforms=[]): tmp = row for src_str, dst_str in transforms: tmp[colname] = tmp[colname].replace(src_str, dst_str) return tmp[colname] def split_and_count(df, colname, delimiter=","): return pd.DataFrame([ x.strip() for x in df[colname].str.split(delimiter, expand=True).values.flatten() if x is not None ]).groupby(0).size() extra_transforms = [ ('hack the north', 'hackathon'), ('team', '') ] df['uni_extras_normed'] = df.apply(partial(to_lower, colname='uni_extras'), axis=1) df['uni_extras_normed'] = df.apply(partial(replace_strs, colname='uni_extras_normed', transforms=extra_transforms), axis=1) # - split_and_count(df, 'uni_extras', ',') split_and_count(df, 'uni_extras_normed', ',') df.to_csv(results_file)
utils/3-Data-Preprocessing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="9BF12JPET1En" # This is a notebook that shows an example of reading a file from a Google drive. # + id="X8RvvGIWTuFq" executionInfo={"status": "ok", "timestamp": 1637981818767, "user_tz": 300, "elapsed": 1883, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhGw3Lpyn8v_49EbZpajVWtDhieE3O3Dn1YoG2yfQ=s64", "userId": "10287662568849688016"}} ###### Set Up ##### # verify our folder with the data and module assets is installed # if it is installed make sure it is the latest # !test -e ds-assets && cd ds-assets && git pull -q && cd .. # if it is not installed clone it # !test ! -e ds-assets && git clone -q https://github.com/lutzhamel/ds-assets.git # point to the folder with the assets home = "ds-assets/assets/" import sys sys.path.append(home) # add home folder to module search path # + id="a1b8jOJUUQs6" executionInfo={"status": "ok", "timestamp": 1637981818768, "user_tz": 300, "elapsed": 7, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhGw3Lpyn8v_49EbZpajVWtDhieE3O3Dn1YoG2yfQ=s64", "userId": "10287662568849688016"}} import pandas from google_drive import downloadlink # convert a shareable link to a download link # + colab={"base_uri": "https://localhost:8080/", "height": 424} id="zrJu5J6VUJ5t" executionInfo={"status": "ok", "timestamp": 1637981819256, "user_tz": 300, "elapsed": 493, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhGw3Lpyn8v_49EbZpajVWtDhieE3O3Dn1YoG2yfQ=s64", "userId": "10287662568849688016"}} outputId="77bcc500-157d-4d87-e682-ad68397e248c" # the shareable link to a file on a Google drive file_url = "https://drive.google.com/file/d/1Bz777RtW_b1qaesa4G81O8fPLlWWtVph/view?usp=sharing" df = pandas.read_csv(downloadlink(file_url)) df
notes/read_drive.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Part of this file can't be rendered in GitHub. Refer to the following link for a properly rendered version of this file: https://nbviewer.jupyter.org/github/sfu-db/dataprep/blob/develop/examples/DataConnector_Yelp.ipynb # # Connector for Yelp # # In this example, we will be going over how to use Connector with Yelp. # ## Prerequisites # # Connector is a component in the DataPrep library that aims to simplify data access by providing a standard API set. The goal is to help users skip the complex API configuration. In this tutorial, we demonstrate how to use the connector component with Yelp. # # If you haven't installed DataPrep, run command `!pip install dataprep` or execute the following cell. # Run me if you'd like to install # !pip install dataprep # # Download and store the configuration files in DataPrep. # # The configuration files are used to configure the parameters and initial setup for the API. The available configuration files can be manually downloaded here: [Configuration Files](https://github.com/sfu-db/DataConnectorConfigs) or automatically downloaded at usage. # # Store the configuration file in the dataprep folder. # # Obtaining an access token from Yelp # # To connect to Yelp, you need to generate a token. This token is a unique identifier of an application requesting access to the Yelp's API. Once an application creates the token, it will act as your credential when making an API request. # # To receive an access token, the user needs to create a server-side application from Yelp. You can get a token by following the [Yelp document](https://www.yelp.com/developers/documentation/v3/authentication). # Simply create an application and generate a key. # Store the token or API Key in a secure location as it will provide you access to Yelp's data. # # Initialize connector # # Copy and paste your API key as the value for the **auth_token** variable. Ensure the **API name** argument is correct. This establishes a connection with Twitter and returns an object. Once you run the code you can use the built in functions available from connector. # + from dataprep.connector import connect, info auth_token = '<insert API key>' dc = connect('yelp', _auth={'access_token':auth_token}) dc # - # # Functionalities # # Connector has several functions you can perform to gain insight on the data downloaded from Yelp. # ### Connector.info # The info method gives information and guidelines on using the connector. There are 4 sections in the response and they are table, parameters, example and schema. # # >1. Table - The table(s) being accessed. # >2. Parameters - Identifies which parameters can be used to call the method. # >3. Examples - Shows how you can call the methods in the Connector class. # >4. Schema - Names and data types of attributes in the response. info('yelp') # ### Connector.query # The query method downloads the website data and displays it in a Dataframe. The parameters must meet the requirements as indicated in connector.info for the operation to run. # # When the data is received from the server, it will either be in a JSON or XML format. The connector reformats the data in pandas Dataframe for the convenience of downstream operations. # # As an example, let's try to get data from the "business" table, providing the term "city" and location "seattle". df = await dc.query("businesses", _q="city", location="seattle") df # From query results, you can see how easy it is to download the restaurant data from Yelp into a pandas Dataframe. # # Now that you have an understanding of how connector operates, you can easily accomplish the task with two lines of code. # # >1. dc = Connector(...) # >2. dc.query(...) # # That's all for now. # If you are interested in writing your own configuration file or modify an existing one, refer to the [Configuration Files](https://github.com/sfu-db/DataConnectorConfigs>).
examples/DataConnector_Yelp.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + import numpy as np from torch.utils.data import Dataset import torchvision import os import h5py import pickle # TODO or use h5py instead? import trimesh import config as cfg import dataset.augmentation as Transforms # - class DatasetModelnet40(Dataset): def __init__(self, split, noise_type): dataset_path = cfg.M40_PATH categories = np.arange(20) if split in ["train", "val"] else np.arange(20, 40) split = "test" if split == "val" else split # ModelNet40 has no validation set - use cat 0-19 with test set self.samples, self.labels = self.get_samples(dataset_path, split, categories) self.transforms = self.get_transforms(split, noise_type) def __len__(self): return self.samples.shape[0] def __getitem__(self, item): sample = {'points': self.samples[item, :, :], 'label': self.labels[item], 'idx': np.array(item, dtype=np.int32)} if self.transforms: sample = self.transforms(sample) return sample def get_transforms(self, split, noise_type): # prepare augmentations if noise_type == "clean": # 1-1 correspondence for each point (resample first before splitting), no noise if split == "train": transforms = [Transforms.Resampler(1024), Transforms.SplitSourceRef(), Transforms.Scale(), Transforms.Shear(), Transforms.Mirror(), Transforms.RandomTransformSE3_euler(), Transforms.ShufflePoints()] else: transforms = [Transforms.SetDeterministic(), Transforms.FixedResampler(1024), Transforms.SplitSourceRef(), Transforms.RandomTransformSE3_euler(), Transforms.ShufflePoints()] elif noise_type == "jitter": # Points randomly sampled (might not have perfect correspondence), gaussian noise to position if split == "train": transforms = [Transforms.SplitSourceRef(), Transforms.Scale(), Transforms.Shear(), Transforms.Mirror(), Transforms.RandomTransformSE3_euler(), Transforms.Resampler(1024), Transforms.RandomJitter(), Transforms.ShufflePoints()] else: transforms = [Transforms.SetDeterministic(), Transforms.SplitSourceRef(), Transforms.RandomTransformSE3_euler(), Transforms.Resampler(1024), Transforms.RandomJitter(), Transforms.ShufflePoints()] else: raise ValueError(f"Noise type {noise_type} not supported for ModelNet40.") return torchvision.transforms.Compose(transforms) def get_samples(self, dataset_path, split, categories): filelist = [os.path.join(dataset_path, file.strip().split("/")[-1]) for file in open(os.path.join(dataset_path, f'{split}_files.txt'))] all_data = [] all_labels = [] for fi, fname in enumerate(filelist): f = h5py.File(fname, mode='r') data = np.concatenate([f['data'][:], f['normal'][:]], axis=-1) labels = f['label'][:].flatten().astype(np.int64) if categories is not None: # Filter out unwanted categories mask = np.isin(labels, categories).flatten() data = data[mask, ...] labels = labels[mask, ...] all_data.append(data) all_labels.append(labels) all_data = np.concatenate(all_data, axis=0) all_labels = np.concatenate(all_labels, axis=0) return all_data, all_labels if __name__ == '__main__': dataset = DatasetModelnet40(split = "train", noise_type="clean") #print(len(dataset)) print(dataset[0])
datatset_testing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib notebook import torch import numpy as np import matplotlib.pyplot as plt # # Preparación de datos # # - Datos de ejemplo: Problema no linealmente separable # - DataSet y DataLoaders # + import sklearn.datasets from sklearn.model_selection import StratifiedShuffleSplit #data, labels = sklearn.datasets.make_circles(n_samples=1000, noise=0.2, factor=0.25) data, labels = sklearn.datasets.make_blobs(n_samples=[300]*3, n_features=2, cluster_std=0.5, centers=np.array([[-1, 1], [1, 1], [-1, -1]])) labels[labels==2] = 1 n_input = data.shape[1] # Dimensionalidad de la entrada n_classes = len(np.unique(labels)) # Número de clases symbols = ['x', 'o', 'd', '+'] fig, ax = plt.subplots(figsize=(6, 4), tight_layout=True) for k, marker in enumerate(symbols[:n_classes]): ax.scatter(data[labels==k, 0], data[labels==k, 1], c='k', s=20, marker=marker, alpha=0.75) # Para las gráficas x_min, x_max = data[:, 0].min() - 0.5, data[:, 0].max() + 0.5 y_min, y_max = data[:, 1].min() - 0.5, data[:, 1].max() + 0.5 xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.02), np.arange(y_min, y_max, 0.02)) import sklearn.model_selection # Separamos el data set en entrenamiento y validación train_idx, valid_idx = next(StratifiedShuffleSplit(train_size=0.75).split(data, labels)) # Crear conjuntos de entrenamiento y prueba from torch.utils.data import DataLoader, TensorDataset, Subset # Creamos un conjunto de datos en formato tensor torch_set = TensorDataset(torch.from_numpy(data.astype('float32')), torch.from_numpy(labels)) # Data loader de entrenamiento torch_train_loader = DataLoader(Subset(torch_set, train_idx), shuffle=True, batch_size=32) # Data loader de validación torch_valid_loader = DataLoader(Subset(torch_set, valid_idx), shuffle=False, batch_size=256) # - # # Perceptrón multicapa # # Implementemos un perceptrón multicapa class MLP_onehiddenlayer(torch.nn.Module): def __init__(self, n_input, n_hidden, n_output): super(type(self), self).__init__() self.hidden = torch.nn.Linear(n_input, n_hidden) self.output = torch.nn.Linear(n_hidden, n_output) self.activation = torch.nn.Sigmoid() def forward(self, x): x = self.activation(self.hidden(x)) return self.output(x) # ¿Y si queremos más de una capa oculta? # # - Podemos añadir explicitamente otra capa # - Podemos usar [`torch.nn.ModuleList`](https://pytorch.org/docs/stable/generated/torch.nn.ModuleList.html) para crear capas ocultas programaticamente. `nn.ModuleList` funciona como una lista de capas que luego podemos iterar class MLP(torch.nn.Module): def __init__(self, neurons=[2, 1]): super(type(self), self).__init__() assert len(neurons) >= 2, "Se necesita al menos capa de entrada y capa de salida" self.hidden = torch.nn.ModuleList() for k in range(len(neurons)-2): self.hidden.append(torch.nn.Linear(neurons[k], neurons[k+1])) self.output = torch.nn.Linear(neurons[-2], neurons[-1]) self.activation = torch.nn.Sigmoid() def forward(self, x): # ModuleList es un objeto iterable for k, layer in enumerate(self.hidden): x = self.activation(layer(x)) return self.output(x) # A continuación definimos algunas funciones utilitarias # # Detalles # # - Algunas capas de PyTorch tales como *Dropout* y *Batch Normalization*, tienen comportamiento distinto en entrenamiento y evaluación # - Podemos cambiar el "modo" de nuestra red neuronal llamando a las funciones internas `train()` y `eval()` respectivamente # - Todo modelo que herede de `nn.Module` tendrá definidas dichas funciones # - El contexto `torch.no_grad()` evita que se construya el grafo, lo cual aumenta la velocidad de la evaluación # + def train_one_step(batch): model.train() optimizer.zero_grad() x, y = batch yhat = model.forward(x) loss = criterion(yhat, y) loss.backward() optimizer.step() return loss.item() def evaluate_one_step(batch): model.eval() with torch.no_grad(): x, y = batch yhat = model.forward(x) loss = criterion(yhat, y) return yhat.argmax(dim=1), y, loss.item() def update_plot(epoch): XY = torch.from_numpy(np.c_[xx.ravel(), yy.ravel()].astype('float32')) Z = torch.nn.Softmax(dim=1)(model.forward(XY)).detach().numpy()[:, 0].reshape(xx.shape) [ax_.cla() for ax_ in ax] ax[0].contourf(xx, yy, Z, cmap=plt.cm.RdBu_r, alpha=1., vmin=0, vmax=1) for i, marker in enumerate(['o', 'x', 'd']): ax[0].scatter(data[labels==i, 0], data[labels==i, 1], color='k', s=10, marker=marker, alpha=0.5) for i, name in enumerate(['Train', 'Validation']): ax[1].plot(np.arange(0, epoch+1, step=1), running_loss[:epoch+1, i], '-', label=name+" cost") plt.legend(); ax[1].grid() fig.canvas.draw() # - # ### Entrenamiento usando Pytorch # # Estudiemos # # - ¿Cómo cambia el resultado según la cantidad de capas y neuronas ocultas? # # + torch.manual_seed(12345) # Inicialización neurons = [n_input, 2, n_classes] # Arquitectura model = MLP(neurons) optimizer = torch.optim.Adam(model.parameters(), lr=1e-2) criterion = torch.nn.CrossEntropyLoss(reduction='sum') max_epochs = 100 running_loss = np.zeros(shape=(max_epochs, 2)) best_valid_loss = np.inf fig, ax = plt.subplots(1, 2, figsize=(8, 3.5), tight_layout=True) for epoch in range(max_epochs): # Loop de entrenamiento train_loss, valid_loss = 0.0, 0.0 for batch in torch_train_loader: train_loss += train_one_step(batch) running_loss[epoch, 0] = train_loss/torch_train_loader.dataset.__len__() # Loop de validación for batch in torch_valid_loader: valid_loss += evaluate_one_step(batch)[-1] running_loss[epoch, 1] = valid_loss/torch_valid_loader.dataset.__len__() # Checkpointing if valid_loss < best_valid_loss: best_valid_loss = valid_loss torch.save({'current_epoch': epoch, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'current_valid_loss': valid_loss }, 'best_model.pt') # Actualizar gráficos update_plot(epoch) # - # ## Inspeccionando la solución # # - Cada neurona es un hiperplano # - La primera capa son hiperplanos en el espacio de los datos # - La segunda capa es un hiperplano en la salida de la primera capa # - La segunda capa no es un hiperplano en el espacio de los datos, sino una combinación no-lineal de hiperplanos # + assert neurons[1] == 2, "Esta gráfica no funciona con más de 2 neuronas en capa oculta" model = MLP(neurons) model.load_state_dict(torch.load('best_model.pt')['model_state_dict']) XY = torch.from_numpy(np.c_[xx.ravel(), yy.ravel()].astype('float32')) Z = model.activation(model.hidden[0](XY)).detach().numpy() fig, ax = plt.subplots(1+n_classes-1, 2, figsize=(8, 3*(n_classes)), tight_layout=True) for k in range(2): ax[0, k].set_title(f"Salida neurona {k+1}") cf = ax[0, k].contourf(xx, yy, Z[:, k].reshape(xx.shape), cmap=plt.cm.BrBG_r, alpha=1., vmin=0, vmax=1) fig.colorbar(cf, ax=ax[0, k]) for i, marker in enumerate(['o', 'x', 'd']): ax[0, k].scatter(data[labels==i, 0], data[labels==i, 1], color='k', s=10, marker=marker, alpha=0.5) for k in range(n_classes-1): Z = torch.nn.Softmax(dim=1)(model.forward(XY))[:,k].detach().numpy() ax[k+1, 1].contourf(xx, yy, Z.reshape(xx.shape), cmap=plt.cm.RdBu_r, alpha=1.) for i, marker in enumerate(symbols[:n_classes]): ax[k+1, 1].scatter(data[labels==i, 0], data[labels==i, 1], color='k', s=10, marker=marker, alpha=0.5) Z = model.activation(model.output(XY))[:,k].detach().numpy() ax[k+1, 0].contourf(xx, yy, Z.reshape(xx.shape), cmap=plt.cm.RdBu_r, alpha=1.) ax[k+1, 0].set_xlim([0, 1]); ax[k+1, 0].set_ylim([0, 1]); ax[k+1, 0].set_xlabel('Salida Neurona 1'); ax[k+1, 0].set_ylabel('Salida neurona 2'); # - # ### Entrenamiento usando Ignite # # Ignite es una librería de alto nivel # # Provee engines, eventos, manejadores y métricas # # - Los engines se encargan de entrenar y evaluar la red. Se ponen en marcha usando el atributo `run` # - Una métrica es un valor con el que evaluamos nuestra red (Loss, accuracy, f1-score) # - Los manejadores nos permiten realizar acciones cuando se cumple un evento, por ejemplo # - Imprimir los resultados # - Guardar el mejor modelo # # + from ignite.engine import Events, create_supervised_trainer, create_supervised_evaluator from ignite.metrics import Loss, Accuracy from ignite.handlers import ModelCheckpoint torch.manual_seed(12345) # Inicialización neurons = [2, 2, n_classes] model = MLP(neurons) optimizer = torch.optim.Adam(model.parameters(), lr=1e-2) criterion = torch.nn.CrossEntropyLoss(reduction='sum') max_epochs = 100 trainer = create_supervised_trainer(model, optimizer, criterion) # Creo un engine para entrenar metrics = {'Loss': Loss(criterion), 'Acc': Accuracy()} evaluator = create_supervised_evaluator(model, metrics=metrics) # Creo un engine para validar @trainer.on(Events.EPOCH_COMPLETED(every=10)) # Cada 10 epocas def log_results(engine): evaluator.run(torch_valid_loader) # Evaluo el conjunto de validación loss = evaluator.state.metrics['Loss'] acc = evaluator.state.metrics['Acc'] print(f"Epoca: {engine.state.epoch} \t Loss: {loss:.2f} \t Accuracy: {acc:.2f}") best_model_handler = ModelCheckpoint(dirname='.', require_empty=False, filename_prefix="best", n_saved=1, score_function=lambda engine: -engine.state.metrics['Loss'], score_name="val_loss") # Lo siguiente se ejecuta cada ves que termine el loop de validación evaluator.add_event_handler(Events.COMPLETED, best_model_handler, {'mymodel': model}) trainer.run(torch_train_loader, max_epochs=max_epochs) # + model = MLP(neurons) #model.load_state_dict(torch.load('best_mymodel_val_loss=-33.8630.pt')) fig, ax = plt.subplots(1, n_classes, figsize=(3*n_classes, 3), tight_layout=True) XY = torch.from_numpy(np.c_[xx.ravel(), yy.ravel()].astype('float32')) Z = torch.nn.Softmax(dim=1)(model.forward(XY)).detach().numpy() for j in range(n_classes): ax[j].contourf(xx, yy, Z[:, j].reshape(xx.shape), cmap=plt.cm.RdBu_r, alpha=1.) for i, marker in enumerate(symbols[:n_classes]): ax[j].scatter(data[labels==i, 0], data[labels==i, 1], color='k', s=10, marker=marker, alpha=0.5) # - # Si los *engine* por defecto no cumplen con lo que necesitamos, podemos crear un engine en base a una función como sigue # + from ignite.engine import Engine # Esto es lo que hace el engine de entrenamiento def train_one_step(engine, batch): optimizer.zero_grad() x, y = batch yhat = model.forward(x) loss = criterion(yhat, y.unsqueeze(1)) loss.backward() optimizer.step() return loss.item() # Este output puede llamar luego como trainer.state.output # Esto es lo que hace el engine de evaluación def evaluate_one_step(engine, batch): with torch.no_grad(): x, y = batch yhat = model.forward(x) return yhat, y trainer = Engine(train_one_step) evaluator = Engine(evaluate_one_step) for name, metric in metrics.items(): metric.attach(evaluator, name) # - # # Pytorch, Ignite y Tensorboard # # Podemos usar la herramienta [tensorboard](https://pytorch.org/tutorials/intermediate/tensorboard_tutorial.html) para visualizar el entrenamiento de la red en vivo y/o comparar distintos entrenamientos # # - Instalar tensorboard versión 1.15 o mayor con conda # # - Escribir en un terminal # # tensorboard --logdir=/tmp/tensorboard/ # # - Apuntar el navegador a # # https://localhost:6006 # + from torch.utils.tensorboard import SummaryWriter import time torch.manual_seed(12345) # Inicialización neurons = [2, 2, n_classes] model = MLP(neurons) optimizer = torch.optim.Adam(model.parameters(), lr=1e-2) criterion = torch.nn.CrossEntropyLoss(reduction='sum') max_epochs = 100 # Creación de engines y asignación de métricas trainer = create_supervised_trainer(model, optimizer, criterion) metrics = {'Loss': Loss(criterion), 'Acc': Accuracy()} evaluator = create_supervised_evaluator(model, metrics=metrics) # Contexto de escritura de datos para tensorboard with SummaryWriter(log_dir=f'/tmp/tensorboard/experimento_interesante_{time.time_ns()}') as writer: @trainer.on(Events.EPOCH_COMPLETED(every=1)) # Cada 1 epocas def log_results(engine): evaluator.run(torch_train_loader) # Evaluo el conjunto de entrenamiento writer.add_scalar("train/loss", evaluator.state.metrics['Loss'], engine.state.epoch) writer.add_scalar("train/accy", evaluator.state.metrics['Acc'], engine.state.epoch) evaluator.run(torch_valid_loader) # Evaluo el conjunto de validación writer.add_scalar("valid/loss", evaluator.state.metrics['Loss'], engine.state.epoch) writer.add_scalar("valid/accy", evaluator.state.metrics['Acc'], engine.state.epoch) best_model_handler = ModelCheckpoint(dirname='.', require_empty=False, filename_prefix="best", n_saved=1, score_function=lambda engine: -engine.state.metrics['Loss'], score_name="val_loss") # Lo siguiente se ejecuta cada ves que termine el loop de validación evaluator.add_event_handler(Events.COMPLETED, best_model_handler, {'mymodel': model}) trainer.run(torch_train_loader, max_epochs=max_epochs) # -
notebooks/clases/2_red_multicapa_ignite.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Estimating Distributions # > A tutorial introduction to density estimation methods with deep learning. # # - toc: true # - badges: true # - comments: true # - categories: [Reinforcement learning] # # # Introduction # # Here, I will cover methods that leverage deep learning to estimate distributions. Ideally, they should also allow us to sample new data-points aka generative models. Supervised learning has already shown great success. But, when labels are not available we still want to be able to use raw perceptual data (videos,images,raw text etc) to capture rich patterns in the data. Broadly, I will cover the basics of the following approaches : # # - Autoregressive Models # - Flow Based Models # - Latent Variable Models # - Generative Adversarial Networks (GAN's) # Consider a distribution X, of dog images (24\*24 images). We want a model that can give # - a. high probability values for any dog image in the training data # - b. high probability for any other similar dog image (Generalization) # - c. Can generate novel dogs (Generative model). # # The recipe for satisfying conditions a.,b. seems straight forward - train on the training data maximizing log-likelihood of each image. An architecture like CNN would encode good prior(translational invariance and parameter sharing) to enable generalization. To be clear, what i'm suggesting is for each image, we will have a CNN processing followed by the output of size 256\*24\*24. ( For each pixel,we will have 256 possible values and the label comes from the actual value of that pixel in that image.). So,we are essentially maximizing the log probability of each pixel corresponding to all the images in the training data. # But, Once we have trained on our data what we end up is 24\*24 histograms. What exactly will be P(x_i) ? Can we say that the sigma(p(xi)) = 1.(definition of probability distribution). Let's say we define P(x_i) as product of all the outputs. Here,all that we are ensuring is each histogram corresponding to each pixel sums to 1.(via softmax over outputs). So, each of the methods tries to approach this problem in different ways with some tradeoff in ease of sampling new values and training.
_notebooks/2020-11-10-Estimating-Distributions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" papermill={"duration": 5.496565, "end_time": "2021-11-13T15:33:57.565882", "exception": false, "start_time": "2021-11-13T15:33:52.069317", "status": "completed"} tags=[] import numpy as np import pandas as pd from sklearn.feature_extraction.text import CountVectorizer from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences from keras.models import Sequential from keras.layers import Dense, Embedding,Bidirectional,GRU, LSTM, SpatialDropout1D from sklearn.model_selection import train_test_split from keras.utils.np_utils import to_categorical import re # + papermill={"duration": 0.568976, "end_time": "2021-11-13T15:33:58.142959", "exception": false, "start_time": "2021-11-13T15:33:57.573983", "status": "completed"} tags=[] data=pd.read_csv("../input/twitter-and-reddit-sentimental-analysis-dataset/Twitter_Data.csv") data.clean_text=data.clean_text.astype(str) data = data[['clean_text','category']] # + papermill={"duration": 12.092407, "end_time": "2021-11-13T15:34:10.242586", "exception": false, "start_time": "2021-11-13T15:33:58.150179", "status": "completed"} tags=[] data=data[data.category !=0] data['clean_text']=data['clean_text'].apply(lambda x: x.lower()) data['clean_text'] = data['clean_text'].apply((lambda x: re.sub('[^a-zA-z0-9\s]','',x))) for idx,row in data.iterrows(): row[0] = row[0].replace('rt',' ') max_features = 2000 tokenizer = Tokenizer(num_words=max_features, split=' ') tokenizer.fit_on_texts(data['clean_text'].values) X = tokenizer.texts_to_sequences(data['clean_text'].values) X = pad_sequences(X) print(data[ data['category'] == 1].size) print(data[ data['category'] == -1].size) # + papermill={"duration": 3.007403, "end_time": "2021-11-13T15:34:13.258668", "exception": false, "start_time": "2021-11-13T15:34:10.251265", "status": "completed"} tags=[] embed_dim = 128 gru_out = 196 model = Sequential() model.add(Embedding(max_features, embed_dim,input_length = X.shape[1])) model.add(SpatialDropout1D(0.4)) model.add(Bidirectional(GRU(gru_out, dropout=0.2, recurrent_dropout=0.2))) model.add(Dense(2,activation='softmax')) model.compile(loss = 'categorical_crossentropy', optimizer='adam',metrics = ['accuracy']) print(model.summary()) # + papermill={"duration": 0.041885, "end_time": "2021-11-13T15:34:13.308921", "exception": false, "start_time": "2021-11-13T15:34:13.267036", "status": "completed"} tags=[] Y = pd.get_dummies(data['category']).values X_train, X_test, Y_train, Y_test = train_test_split(X,Y, test_size = 0.3, random_state = 42) print(X_train.shape,Y_train.shape) print(X_test.shape,Y_test.shape) # + papermill={"duration": 1821.262975, "end_time": "2021-11-13T16:04:34.580092", "exception": false, "start_time": "2021-11-13T15:34:13.317117", "status": "completed"} tags=[] batch_size = 32 model.fit(X_train, Y_train, epochs = 2, batch_size=batch_size, verbose = 2) # + papermill={"duration": 27.372119, "end_time": "2021-11-13T16:05:01.962613", "exception": false, "start_time": "2021-11-13T16:04:34.590494", "status": "completed"} tags=[] validation_size = 1500 X_validate = X_test[-validation_size:] Y_validate = Y_test[-validation_size:] X_test = X_test[:-validation_size] Y_test = Y_test[:-validation_size] score,acc = model.evaluate(X_test, Y_test, verbose = 2, batch_size = batch_size) print("score: %.2f" % (score)) print("acc: %.2f" % (acc)) # + papermill={"duration": 0.553394, "end_time": "2021-11-13T16:05:02.527020", "exception": false, "start_time": "2021-11-13T16:05:01.973626", "status": "completed"} tags=[] text = ['Aditya is a very good boy'] text = tokenizer.texts_to_sequences(text) text = pad_sequences(text, maxlen=28, dtype='int32', value=0) print(text) sentiment = model.predict(text,batch_size=1,verbose = 2)[0] if(np.argmax(sentiment) == 0): print("negative") elif (np.argmax(sentiment) == 1): print("positive") # + papermill={"duration": 0.011514, "end_time": "2021-11-13T16:05:02.550694", "exception": false, "start_time": "2021-11-13T16:05:02.539180", "status": "completed"} tags=[]
sentimentanalysis-twitter-reddit (1).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] colab_type="text" id="LfqITQgUOw6h" # #Step 0: Authenticate & Fetch Code # + cellView="both" colab={} colab_type="code" id="SgmYlNsg5QoY" GCP_PROJECT_ID = "" #@param {type:"string"} # + colab={} colab_type="code" id="0Y2VDsp3u7D7" # !gcloud auth login # !gcloud config set project $GCP_PROJECT_ID # + colab={} colab_type="code" id="qrQe_muGLDZI" # !rm -rf modem && git clone https://github.com/google/modem.git # + [markdown] colab_type="text" id="F68GjTCF6_eO" # # Step 1: Copy the service key file text into the cell below. # # Replace the existing content expect the %%writefile header (line 1). # + colab={} colab_type="code" id="4z-t093VbYi5" # %%writefile modem/bqml/pipeline/svc_key.json { "TODO": "Replace file." } # + [markdown] colab_type="text" id="ysn9j8Lg8DF-" # # Step 2: Fill out the parameters # # GA_ACCOUNT_ID, GA_PROPERTY_ID, GA_DATASET_ID, BQML_PREDICT_QUERY (required) <br> # **Note:** Please don't remove the string quotes. # + colab={} colab_type="code" id="eHTElGAqcq-F" # %%writefile modem/bqml/pipeline/params.py # -------------------------MANDATORY SECTION------------------------ # GA Details GA_ACCOUNT_ID = "" GA_PROPERTY_ID = "" GA_DATASET_ID = "" GA_IMPORT_METHOD = "di" # "di" - Data Import or "mp" - Measurement Protocol # BQML Details - # Ensure that the BQ result headers resemble the data import schema # E.g. If data import schema looks like - ga:clientId, ga:dimension1, etc. # BQ result headers should like ga_clientId, ga_dimension1, etc. BQML_PREDICT_QUERY = """ """ # Options for logging & error monitoring # LOGGING: Create BQ Table for logs with schema as follows - # time TIMESTAMP, status STRING, error ERROR ENABLE_BQ_LOGGING = False # ERROR MONITORING: Sign up for the free Sendgrid API. ENABLE_SENDGRID_EMAIL_REPORTING = False # --------------------------OPTIONAL SECTION------------------------- # (OPTIONAL) Workflow Logging - BQ details, if enabled GCP_PROJECT_ID = "" BQ_DATASET_NAME = "" BQ_TABLE_NAME = "" # (OPTIONAL) Email Reporting - Sendgrid details, if enabled SENDGRID_API_KEY = "" TO_EMAIL = "" # (OPTIONAL) Email Reporting - Additional Parameters FROM_EMAIL = "<EMAIL>" SUBJECT = "FAILED: Audience Upload to GA" HTML_CONTENT = """ <p> Hi WorkflowUser, <br> Your BQML Custom Audience Upload has failed- <br> Time: {0} UTC <br> Reason: {1} </p> """ # + [markdown] colab_type="text" id="eMV_48Yw8YXa" # # Step 3: Deploy the cloud function # Run the cell below. Takes 2 - 3 mins. # Asks for the following - # * GCP PROJECT ID # * FUNCTION NAME (any name you like) # * Allow unauthenticated invocations of new function (y/N)? -> N # + colab={} colab_type="code" id="kUftHrVuuJLR" # %%shell # cd modem/bqml/pipeline sh deploy.sh > upload.txt # cat upload.txt # + [markdown] colab_type="text" id="TZo-FEr99ShX" # # Step 4: Test the function # Run the cell below. # + colab={} colab_type="code" id="ZLL5MZXLcihL" import re functions_ui_url = "https://console.cloud.google.com/functions/list?project="+GCP_PROJECT_ID print("Cloud Functions UI: ", functions_ui_url) FUNCTION_URL = re.findall(r'https://.*', open("modem/bqml/pipeline/upload.txt").read())[0] print("Testing: ",FUNCTION_URL) # !curl $FUNCTION_URL -H "Authorization: Bearer $(gcloud auth print-identity-token)" # + [markdown] colab_type="text" id="ev4xYin4MDxO" # # Step 5: Schedule the function using Cloud Scheduler # # Specify the params and run the cell below - # * **JOBNAME:** Any name you like, e.g. "schedule_model_upload" # * **SCHEDULE:** Specify the schedule in a cron-tab format e.g. "45 23 * * *" to run job every day at 11:45 pm # * **TIMEZONE:** Specify timezone e.g. "EST", "PST", "CST" etc. for US time zones # + cellView="form" colab={} colab_type="code" id="yXKYmWOVEwa2" JOB_NAME="" #@param {type:"string", description:"ad"} SCHEDULE="" #@param {type:"string"} TIMEZONE="EST" #@param {type:"string"} SERVICE_ACCOUNT_EMAIL=GCP_PROJECT_ID+"<EMAIL>" # !gcloud scheduler jobs create http $JOB_NAME --schedule="$SCHEDULE" --uri="$FUNCTION_URL" --time-zone=$TIMEZONE --oidc-service-account-email=$SERVICE_ACCOUNT_EMAIL --attempt-deadline="540s" scheduler_url = "https://console.cloud.google.com/cloudscheduler?project="+GCP_PROJECT_ID print("Job scheduled. Check in the Cloud Scheduler UI: ", scheduler_url)
bqml/utils/BQML_Deployment_Template_Cloud_Function.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd churn = 3 df = pd.read_feather(f"../../../data/processed/Churn_{churn}_train_set.feather") df["1__SITUACION_CT"].unique() pd.crosstab(df["1__SITUACION_CT"],df[f"BAJA_{churn}m"]) pd.crosstab(df["2__SITUACION_CT"],df[f"BAJA_{churn}m"]) (df["2__SITUACION_CT"] - df["1__SITUACION_CT"]).value_counts() pd.crosstab(df["2__SITUACION_CT"] - df["1__SITUACION_CT"], df[f"BAJA_{churn}m"]) pd.crosstab((df["2__SITUACION_CT"] - df["1__SITUACION_CT"]).map(lambda x: 1 if x>0 else -1 if x<0 else 0), df[f"BAJA_{churn}m"])
notebooks/3. Model/Hipotesis 2/4. Checking SITUACION_CT.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Tuning a scikit-learn estimator with `skopt` # # <NAME>, July 2016 <br /> # <NAME>, August 2016 # # If you are looking for a `GridSearchCV` replacement checkout [the `BayesSearchCV` example](sklearn-gridsearchcv-replacement.ipynb) instead. # %matplotlib inline import numpy as np import matplotlib.pyplot as plt # ## Problem statement # # Tuning the hyper-parameters of a machine learning model is often carried out using an exhaustive exploration of (a subset of) the space all hyper-parameter configurations (e.g., using `sklearn.model_selection.GridSearchCV`), which often results in a very time consuming operation. # # In this notebook, we illustrate how to couple `gp_minimize` with sklearn's estimators to tune hyper-parameters using sequential model-based optimisation, hopefully resulting in equivalent or better solutions, but within less evaluations. # # Note: scikit-optimize provides a dedicated interface for estimator tuning via `BayesSearchCV` class which has a similar interface to those of `GridSearchCV`. This class uses functions of skopt to perform hyperparameter search efficiently. For example usage of this class, see [the `BayesSearchCV` example](sklearn-gridsearchcv-replacement.ipynb) example notebook. # ## Objective # To tune the hyper-parameters of our model we need to define a model, decide which parameters to optimize, and define the objective function we want to minimize. # + from sklearn.datasets import load_boston from sklearn.ensemble import GradientBoostingRegressor from sklearn.model_selection import cross_val_score boston = load_boston() X, y = boston.data, boston.target n_features = X.shape[1] # gradient boosted trees tend to do well on problems like this reg = GradientBoostingRegressor(n_estimators=50, random_state=0) # - # Next, we need to define the bounds of the dimensions of the search space we want to explore and pick the objective. In this case the cross-validation mean absolute error of a gradient boosting regressor over the Boston dataset, as a function of its hyper-parameters. # + from skopt.space import Real, Integer from skopt.utils import use_named_args # The list of hyper-parameters we want to optimize. For each one we define the bounds, # the corresponding scikit-learn parameter name, as well as how to sample values # from that dimension (`'log-uniform'` for the learning rate) space = [Integer(1, 5, name='max_depth'), Real(10**-5, 10**0, "log-uniform", name='learning_rate'), Integer(1, n_features, name='max_features'), Integer(2, 100, name='min_samples_split'), Integer(1, 100, name='min_samples_leaf')] # this decorator allows your objective function to receive a the parameters as # keyword arguments. This is particularly convenient when you want to set scikit-learn # estimator parameters @use_named_args(space) def objective(**params): reg.set_params(**params) return -np.mean(cross_val_score(reg, X, y, cv=5, n_jobs=-1, scoring="neg_mean_absolute_error")) # - # ## Optimize all the things! # With these two pieces, we are now ready for sequential model-based optimisation. Here we use gaussian process-based optimisation. # + from skopt import gp_minimize res_gp = gp_minimize(objective, space, n_calls=50, random_state=0) "Best score=%.4f" % res_gp.fun # - print("""Best parameters: - max_depth=%d - learning_rate=%.6f - max_features=%d - min_samples_split=%d - min_samples_leaf=%d""" % (res_gp.x[0], res_gp.x[1], res_gp.x[2], res_gp.x[3], res_gp.x[4])) # ## Convergence plot # + from skopt.plots import plot_convergence plot_convergence(res_gp)
examples/hyperparameter-optimization.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Convert pretrained PyTorch RGB Model to Single Channel Grey # # ## The Problem # # Challenge participants will be using models pretrained on RGB data where the first convolution takes in 3 channels. # The competition data is fundamentally single channel. The images are RGB JPEGS, the R, G, and B values are always the same. # # This is inefficient because: # - Data augmentation transforms will be performed on 3x more data then necessary # - Data transfer to the GPU is 3x larger than necessary # - Flops of the first convolution are approximatley 3x more than necessary # # ## The Solution # # Convert the first convolution to single channel by summing over the channels. # # Below I demonstrated PyTorch surgery on a pre-trained network to address this. Outputs are not exactly the same due to the complexity of whitening with different means and variances for R, G, and B. The final plot should demonstrate that the outputs are approximately the same, and certainly close enough for fine tuning in a dramatically different domain. # + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" import os import matplotlib.pyplot as plt import numpy as np import pandas as pd from PIL import Image import torchvision import torch.nn # - # ## Demonstration Image pil_im = Image.open(os.path.join('../input/test_images/b51cdf84f.jpg')) pil_im # + model_rgb = torchvision.models.resnet18(pretrained=True) model_rgb.eval() transforms_rgb = torchvision.transforms.Compose([ torchvision.transforms.Resize(224), torchvision.transforms.CenterCrop(224), torchvision.transforms.ToTensor(), torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) # + model_grey = torchvision.models.resnet18(pretrained=True) weight_rgb = model_grey.conv1.weight # Sum over the weights to convert the kernel weight_grey = weight_rgb.sum(dim=1, keepdim=True) bias = model_grey.conv1.bias # Instantiate a new convolution module and set weights model_grey.conv1 = torch.nn.Conv2d(1, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False) model_grey.conv1.weight = torch.nn.Parameter(weight_grey) model_grey.conv1.bias = bias model_grey.eval() transforms_grey = torchvision.transforms.Compose([ # Reduce to a single channel. We could use x.convert('L') here, but this is probably fewer operations and the result is the same. torchvision.transforms.Lambda(lambda x: Image.fromarray(np.asarray(pil_im)[:, :, 0], mode='L')), # These image transforms are now on a single channel image instead of 3 torchvision.transforms.Resize(224), torchvision.transforms.CenterCrop(224), torchvision.transforms.ToTensor(), # Normalization means and stds can be taken as the average of the rgb values torchvision.transforms.Normalize(mean=[np.mean([0.485, 0.456, 0.406])], std=[np.mean([0.229, 0.224, 0.225])]) ]) # + tensor_rgb = transforms_rgb(pil_im) batch_tensor_rgb = tensor_rgb.unsqueeze(0) with torch.no_grad(): classes_rgb = model_rgb(batch_tensor_rgb) classes_rgb = classes_rgb.squeeze().numpy() # + tensor_grey = transforms_grey(pil_im) batch_tensor_grey = tensor_grey.unsqueeze(0) with torch.no_grad(): classes_grey = model_grey(batch_tensor_grey) classes_grey = classes_grey.squeeze().numpy() # - plt.figure(figsize=(8, 8)) plt.scatter(classes_rgb, classes_grey) plt.xlabel('RGB Model') plt.ylabel('Grey Model') plt.title('Converted Model ImageNet Class Logit Comparison') plt.show() # # Classification # + import urllib.request import json response = urllib.request.urlopen('https://gist.githubusercontent.com/yrevar/942d3a0ac09ec9e5eb3a/raw/238f720ff059c1f82f368259d1ca4ffa5dd8f9f5/imagenet1000_clsidx_to_labels.txt') data = response.read() label_mapping = eval(data) label_mapping[np.argmax(classes_grey)] # - # For the curious... it's a fountain.
notebooks/convert-pretrained-pytorch-rgb-to-single-channel.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Packaging # ## Packaging # # Once we've made a working program, we'd like to be able to share it with others. # # A good cross-platform build tool is the most important thing: you can always # have collaborators build from source. # # ## Distribution tools # Distribution tools allow one to obtain a working copy of someone else's package. # # Language-specific tools: PyPI, Ruby Gems, CPAN, CRAN # Platform specific packagers e.g. brew, apt/yum # # Until recently windows didn't have anything like `brew install` or `apt-get` # You had to build an 'installer', but now there is https://chocolatey.org # ## Laying out a project # # When planning to package a project for distribution, defining a suitable # project layout is essential. # # # # + language="bash" # # #%%cmd (windows) # tree --charset ascii greetings -I "doc|build|Greetings.egg-info|dist|*.pyc" # - # We can start by making our directory structure # + language="bash" # mkdir -p greetings/greetings/test/fixtures # - # ## Using setuptools # # To make python code into a package, we have to write a `setupfile`: # # # # # # # + # %%writefile greetings/setup.py from setuptools import setup, find_packages setup( name="Greetings", version="0.1.0", packages=find_packages(exclude=["*test"]), entry_points={"console_scripts": ["greet = greetings.command:process"]}, ) # - # # # # We can now install this code with # # ``` # # cd greetings # pip install . # ``` # # And the package will be then available to use everywhere on the system. # # + from greetings.greeter import greet greet("James", "Hetherington") # - from greetings.greeter import * # # And the scripts are now available as command line commands: # # # # + language="bash" # greet --help # + language="bash" # greet <NAME> # greet --polite <NAME> # greet <NAME> --title Dr # - # ## Installing from GitHub # # We could now submit "greeter" to PyPI for approval, so everyone could `pip install` it. # # However, when using git, we don't even need to do that: we can install directly from any git URL: # # ``` # pip install git+git://github.com/jamespjh/greeter # ``` # + language="bash" # greet <NAME> --title Sir # - # ## Convert the script to a module # # Of course, there's more to do when taking code from a quick script and turning it into a proper module: # # # # # # # %%writefile greetings/greetings/greeter.py def greet(personal, family, title="", polite=False): """Generate a greeting string for a person. Parameters ---------- personal: str A given name, such as Will or Jean-Luc family: str A family name, such as Riker or Picard title: str An optional title, such as Captain or Reverend polite: bool True for a formal greeting, False for informal. Returns ------- string An appropriate greeting """ greeting = "How do you do, " if polite else "Hey, " if title: greeting += title + " " greeting += personal + " " + family + "." return greeting # + import greetings help(greetings.greeter.greet) # - # # # # The documentation string explains how to use the function; don't worry about this for now, we'll consider # this next time. # # ## Write an executable script # + # %%writefile greetings/greetings/command.py from argparse import ArgumentParser from .greeter import greet # Note python 3 relative import def process(): parser = ArgumentParser(description="Generate appropriate greetings") parser.add_argument("--title", "-t") parser.add_argument("--polite", "-p", action="store_true") parser.add_argument("personal") parser.add_argument("family") arguments = parser.parse_args() print( greet(arguments.personal, arguments.family, arguments.title, arguments.polite) ) if __name__ == "__main__": process() # - # ## Specify dependencies # We use the setup.py file to specify the packages we depend on: # ```python # setup( # name = "Greetings", # version = "0.1.0", # packages = find_packages(exclude=['*test']), # install_requires = ['argparse'] # ) # ``` # ## Specify entry point # + # %%writefile greetings/setup.py from setuptools import setup, find_packages setup( name="Greetings", version="0.1.0", packages=find_packages(exclude=["*test"]), install_requires=["argparse"], entry_points={"console_scripts": ["greet = greetings.command:process"]}, ) # - # ## Write a readme file # e.g.: # + # %%writefile greetings/README.md Greetings! ========== This is a very simple example package used as part of the Turing [Research Software Engineering with Python](https://alan-turing-institute.github.io/rsd-engineeringcourse) course. Usage: Invoke the tool with greet <FirstName> <Secondname> # - # ## Write a license file # e.g.: # + # %%writefile greetings/LICENSE.md (C) The Alan Turing Institute 2021 This "greetings" example package is granted into the public domain. # - # ## Write a citation file # e.g.: # + # %%writefile greetings/CITATION.md If you wish to refer to this course, please cite the URL https://alan-turing-institute.github.io/rsd-engineeringcourse Portions of the material are taken from Software Carpentry http://swcarpentry.org # - # You may well want to formalise this using the [codemeta.json](https://codemeta.github.io/) standard - this doesn't have wide adoption yet, but we recommend it. # ## Define packages and executables # + language="bash" # touch greetings/greetings/test/__init__.py # touch greetings/greetings/__init__.py # - # ## Write some unit tests # # Separating the script from the logical module made this possible: # # # # # # # + # %%writefile greetings/greetings/test/test_greeter.py import yaml import os from ..greeter import greet def test_greeter(): with open( os.path.join(os.path.dirname(__file__), "fixtures", "samples.yaml") ) as fixtures_file: fixtures = yaml.safe_load(fixtures_file) for fixture in fixtures: answer = fixture.pop("answer") assert greet(**fixture) == answer # - # # # # Add a fixtures file: # # # # # # # %%writefile greetings/greetings/test/fixtures/samples.yaml - personal: James family: Hetherington answer: "Hey, <NAME>." - personal: James family: Hetherington polite: True answer: "How do you do, <NAME>." - personal: James family: Hetherington title: Dr answer: "Hey, Dr <NAME>." # + language="bash" # py.test # - # ## Developer Install # # If you modify your source files, you would now find it appeared as if the program doesn't change. # # That's because pip install **copies** the file. # # (On my system to /Library/Python/2.7/site-packages/: this is operating # system dependent.) # # If you want to install a package, but keep working on it, you can do # # ``` # # cd greetings # pip install -e . # ``` # ## Distributing compiled code # # If you're working in C++ or Fortran, there is no language specific repository. # You'll need to write platform installers for as many platforms as you want to # support. # # Typically: # # * `dpkg` for `apt-get` on Ubuntu and Debian # * `rpm` for `yum` on Redhat and Fedora # * `homebrew` on OSX (Possibly `macports` as well) # * An executable `msi` installer for Windows. # # ## Homebrew # # Homebrew: A ruby DSL, you host off your own webpage # # See my [installer for the cppcourse example](http://github.com/jamespjh/homebrew-reactor) # # If you're on OSX, do: # # ``` # brew tap jamespjh/homebrew-reactor # brew install reactor # ```
module06_software_projects/06_04_packaging.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Predicting the Goldstein scale rating of events from their actors and coverage # This example workflow demonstrates the use of cuDF, cuML, and dask_cudf for ETL, model training, and evaluation # # Acquiring and Loading the Data # The [GDELT](https://www.gdeltproject.org/) project maintains a publically available 50TB database of world events. This workflow uses a sample of the data obtained through [Google BigQuery](https://console.cloud.google.com/marketplace/details/the-gdelt-project/gdelt-2-events) # # We work with a ~5.7GB sample of the data stored locally. With appropriate Google Cloud credentials, the [gcs python api](https://cloud.google.com/bigquery/docs/quickstarts/quickstart-client-libraries) can be used to query the data directly without using a local copy. The GDELT project provides [a codebook](http://data.gdeltproject.org/documentation/GDELT-Event_Codebook-V2.0.pdf) to help us understand the dataset's schema # + import dask_cudf import cudf from os.path import join # The location of our local copy DATA_DIR = "/datasets/gdelt/events/" # Define the schema for the dataset. Normally dask_cudf can infer these types, however # it does this on a per-file basis. Some of our features are so sparse that all entries # within a file are null, making it impossible to infer the data type dtypes = ( ["int64"] * 4 + ["float64"] + ["str"] * 20 + ["int64"] + ["str"] * 3 + ["int64"] + ["float64"] + ["int64"] * 3 + ["float64"] + ["int64"] + ["str"] * 4 + ["float64"] * 2 + ["str"] + ["int64"] + ["str"] * 4 + ["float64"] * 2 + ["str"] + ["int64"] + ["str"] * 4 + ["float64"] * 2 + ["str"] + ["int64"] + ["str"] ) # The dataset was partitioned into ~2000 csv files. Let's load a subset of those df = dask_cudf.read_csv(join(DATA_DIR, "00000000189*"), dtype=dtypes).compute() # - # What did that give us? print(f"Found {len(df)} rows") print(f"Memory footprint: {df.__sizeof__() / (1024 * 1024):.2f} MB") print(df.columns) # # Preprocessing # We don't need all or even most of these columns # Rather than listing the columns we don't want, let's list the ones we do keep_cols = ( "EventCode", "QuadClass", "GoldsteinScale", "NumMentions", "NumSources", "NumArticles", "AvgTone", "Actor1Type1Code", "Actor1Type2Code", "Actor1Type3Code", "Actor2Type1Code", "Actor2Type2Code", "Actor2Type3Code", "ActionGeo_CountryCode", "Actor1Geo_CountryCode", "Actor2Geo_CountryCode", ) # And drop everything else df = df.drop(col for col in df.columns if col not in keep_cols) # ## Actor metadata # These codes tell us who, what, and where the relevant actors in the event are. # # If we look at the codebook, we see that a lot of these features are categorical string labels. CuML can transform these into numerical values for us # + from cuml.preprocessing import LabelEncoder le = LabelEncoder() for col in ( "Actor1Type1Code", "Actor1Type2Code", "Actor1Type3Code", "Actor2Type1Code", "Actor2Type2Code", "Actor2Type3Code", "ActionGeo_CountryCode", "Actor1Geo_CountryCode", "Actor2Geo_CountryCode", ): df[col] = le.fit_transform(df[col]) # - print(df["Actor1Type1Code"].value_counts()) # ## QuadClass # An event's `QuadClass` categorizes it as relating to either **verbal cooperation**, **material cooperation**, **verbal conflict**, or **material conflict** # # The data is still a bit messy. Despite what the codebook says, `QuadClass` has a huge outlier we need to clean up. Let's also one-hot encode it to preserve the orthogonality of the categories # # *Incidentally, it looks like verbal cooperation was the most common category, which is always nice to see* print(df["QuadClass"].value_counts()) # + from cudf import get_dummies # We can fix that by clipping the data to our known range df["QuadClass"] = df["QuadClass"].applymap(lambda i: min(i, 4)) df["QuadClass"] = df["QuadClass"].applymap(lambda i: max(i, 1)) # Now we one hot encode and drop the old column df = get_dummies(df, columns=["QuadClass"]).drop("QuadClass") # - print(df.columns) # ## Normalization # - `NumArticles`, `NumSources`, and `NumMentions` tell us the number of source documents, information sources, and mentions across sources that an event recieved within 15 minutes of being discovered by the database. We can use this as a proxy for the importance or impact of an event # - `AvgTone` describes the tone of the above documents towards the event, on a scale from -100 (extremely negative) to 100 (extremely positive) # - `GoldsteinScale` is our target variable. It serves as a way of quantifying the intensity of positive and negative events. # # It looks like these variables are filled with `null` values for col in ("NumArticles", "NumSources", "NumMentions", "AvgTone", "GoldsteinScale"): print(f"{col}: {df[col].isna().value_counts()[True]}") # Proportionally there aren't that many `null`s, so we can reasonably replace them with the average feature values. While we're here, let's also remove any outliers from our target variable # + df["AvgTone"] = df["AvgTone"].fillna(df["AvgTone"].mean()) df["GoldsteinScale"] = df["GoldsteinScale"].applymap(lambda i: min(i, 10)) df["GoldsteinScale"] = df["GoldsteinScale"].applymap(lambda i: max(i, -10)) df["GoldsteinScale"] = df["GoldsteinScale"].fillna(df["GoldsteinScale"].mean()) df["NumMentions"] = ( df["NumMentions"].astype("float64").fillna(df["NumMentions"].mean()) ) df["NumSources"] = ( df["NumSources"].astype("float64").fillna(df["NumSources"].mean()) ) df["NumArticles"] = ( df["NumArticles"].astype("float64").fillna(df["NumArticles"].mean()) ) # - # We'll also want to min/max scale `NumSources`, `NumArticles`, and `NumMentions` to maintain consistency with our other features for col in ( "NumMentions", "NumSources", "NumArticles", ): col_max = df[col].max() col_min = df[col].min() if col_max != col_min: df[col] = (df[col] - df[col].mean()) / (col_max - col_min) # Before we give cuML our data, we need to ensure all columns have the same dtype for col in filter(lambda col: df[col].dtype != "float64", df): df[col] = df[col].astype("float64") # # Model Training and Evaluation # Now we can run and score our regression. To do this we'll have to # # - define our performance metrics # - split our data into train and test sets # - train and test our model # + # As our performance metrics we'll use a basic mean squared error and coefficient of determination implementation def mse(y_test, y_pred): return ((y_test - y_pred) ** 2).mean() def cod(y_test, y_pred): y_bar = y_test.mean() total = ((y_test - y_bar) ** 2).sum() residuals = ((y_test - y_pred) ** 2).sum() return 1 - (residuals / total) # - # With the help of cuML we can abstract away most of the train/test process # + from cuml.preprocessing.model_selection import train_test_split from cuml.linear_model.ridge import Ridge def train_and_score(data, clf, train_frac=0.8, n_runs=20): mse_scores, cod_scores = [], [] for _ in range(n_runs): X_train, X_test, y_train, y_test = train_test_split( data, y="GoldsteinScale", train_size=train_frac ) y_pred = clf.fit(X_train, y_train).predict(X_test) mse_scores.append(mse(y_test, y_pred)) cod_scores.append(cod(y_test, y_pred)) return mse_scores, cod_scores # - # # Results # Let's see how our model performs import numpy as np n_runs = 20 clf = Ridge() mse_scores, cod_scores = train_and_score(df, clf, n_runs=n_runs) print(f"median MSE ({n_runs} runs): {np.median(mse_scores)}") print(f"median COD ({n_runs} runs): {np.median(cod_scores)}") # Reasonably good correlation, though the MSE leaves a bit to be desired. Predicting a value on the Goldstein scale within ~2.24 points is nice, until you remember our values are in \[-10, 10\]. Let's try some more intelligent analysis. For instance, what does the learning curve for this model look like? clf = Ridge() n_runs = 10 train_sizes = np.arange(0.0001, 0.91, 0.05) scores = [np.median(train_and_score(df, clf, train_frac=s, n_runs=n_runs)[0]) for s in train_sizes] print(f"Variance: {np.var(scores)}") import matplotlib.pyplot as plt plt.plot(train_sizes, scores) plt.show() # Interestingly, our loss changes very little when given 10% of the data or 90% of the data. Were we to deploy this model, satisfied with it's performance, we could comfortably train it on only ~890,000 instances (for an even greater speedup) with minimal performance loss. # # Performance # On our 5.7GB sample of the dataset which consisted of ~8.9 million rows, this notebook takes 2.5 minutes to run from start to finish. Recall that this includes # - ETL # - Preprocessing and feature engineering, with feature encoding and rescaling # - Training and inference for a total of 190 ridge regressors # # In the same amount of time, the equivalent pandas/scikit-learn implementation barely finishes its etl phase. # # # The key takeaways # With cuDF, cuML, and dask_cudf, we're able to # - explore large datasets # - quickly analyze and transform our data # - train and evaluate models # # with minimal changes to our existing pandas, scikit-learn, and dask powered workflows, and see drastic improvements in performance, making it easier to build scalable data science pipelines without sacrificing performance
intermediate_notebooks/E2E/gdelt/Ridge_regression_with_feature_encoding.ipynb