code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import random import numpy a = random.sample(range(-50,50), 100) len(a) # %time for i, j in zip(range(0, len(a)), range(5, len(a))): _ = a[i] < a[j] # %time i = 0 for i in range(0, len(a)-5): _ = a[i] < a[i+5]
jupyterFile/Untitled.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="Sm6WDU403MIX" # ## News Named Entity Extraction (NER) and Sentiment Analysis # + [markdown] colab_type="text" id="Q7JrDKGB3MIY" # ###### Load dependency libraries # + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="4mbIoRAy3MIZ" outputId="d9569336-a829-4b00-9ddd-ccb708cc37ba" import pandas as pd import numpy as np import threading from textblob import TextBlob import psycopg2 import requests import re import json import sqlalchemy as db from bs4 import BeautifulSoup # + [markdown] colab_type="text" id="atxlDpT63MIc" # **News / NLP Signal Pipeline** # # 1. **Fetch news** - read in news source via an RSS feed (Feedparser) (**news_download**) # 2. **Extract entitities** - perform named entity recognition (NER) on the unstructured text (Open Calais) (**entity_extraction**) # 3. **Sentiment analysis** - extract sentiment on the news item (Texblob) (**sentiment_assignment**) # 4. **Historical EOD prices** - fetch historical prices (Eodhistoricaldata.com) (**price_download**) # 5. **Find signal** - correlate sentiment with price movement (**signal_analysis**) # 6. **Backtesting** - back test for PnL performance (Pyfinance) (**sentiment_backtest**) # # https://www.altsignals.ai # # # + [markdown] colab_type="text" id="1KYr2kn2WJfF" # **Get News from Database** # + [markdown] colab_type="text" id="4DSGfeh-eVXL" # # - DB_URL = 'postgres://altsignals:altdata2$2@172.16.31.10:5432/altsignals-beta' engine = db.create_engine(DB_URL) conn = engine.connect() metadata = db.MetaData() news_item = db.Table('news_item', metadata, autoload=True, autoload_with=engine) print(news_item.columns.keys()) # Get only English news for now get_news_query = db.select([news_item.columns.news_item_id, news_item.columns.summary]).where(news_item.columns.language == 'en') news_result = conn.execute(get_news_query).fetchall() for r in news_result: print('id:', r['news_item_id'], 'summary:', r['summary']) # + [markdown] colab_type="text" id="8bNJz6tuXcv5" # **2. Named Entity Extraction (NER) - make an API calls to Thomson Reutersr Intelligent Tagging (TRIT) with news headline content** # - entity = db.Table('entity', metadata, autoload=True, autoload_with=engine) print(entity.columns.keys()) # + [markdown] colab_type="text" id="9CsJn6LuXzln" # **2.1 Query TRIT / OpenCalais JSON API** # + colab={} colab_type="code" id="hsV7cfPv3MIn" def get_trit(content): headType = "text/raw" token = '<KEY>' url = "https://api-eit.refinitiv.com/permid/calais" payload = content.encode('utf8') headers = { 'Content-Type': headType, 'X-AG-Access-Token': token, 'outputformat': "application/json" } TRITResponse = requests.request("POST", url, data=payload, headers=headers) # Load content into JSON object JSONResponse = json.loads(TRITResponse.text) return JSONResponse # + [markdown] colab_type="text" id="0jeEgr3jVpcq" # **2.2 Get entities in news** # + colab={"base_uri": "https://localhost:8080/", "height": 85} colab_type="code" id="V6hlzkMrRBQm" outputId="f6db0f8e-5eed-4545-b5e3-ac64af7772a3" def add_entity(news_id, summary): JSONResponse = get_trit(summary) for key in JSONResponse: if ('_typeGroup' in JSONResponse[key]): if JSONResponse[key]['_typeGroup'] == 'entities': print(JSONResponse[key]['_type'] + ", " + JSONResponse[key]['name']) entity_ins = entity.insert().values(type = JSONResponse[key]['_type'], name = JSONResponse[key]['name'], news_item_id = news_id) conn.execute(entity_ins) # - for r in news_result: add_entity(r['news_item_id'], r['summary']) # + [markdown] colab_type="text" id="Xg4sJ-I5V1Uf" # **2.3 Get RIC code for entity** # + colab={"base_uri": "https://localhost:8080/", "height": 68} colab_type="code" id="8_AWYjwDRxgp" outputId="5264a469-8347-4ee9-daa2-58c4bccc2a0c" #Get RIC code print('====RIC====') print('RIC') for entity in JSONResponse: for info in JSONResponse[entity]: if (info =='resolutions'): for companyinfo in (JSONResponse[entity][info]): if 'primaryric' in companyinfo: symbol = companyinfo['primaryric'] print(symbol) # + [markdown] colab_type="text" id="EVsAt1HeWALK" # **2.4 Get topics for the news item** # + colab={"base_uri": "https://localhost:8080/", "height": 119} colab_type="code" id="jj4knjVNagMI" outputId="d1d5ed9e-3f76-46a9-ce93-57b7c0a4e437" #Print Header print(symbol) print('====Topics====') print('Topics, Score') for key in JSONResponse: if ('_typeGroup' in JSONResponse[key]): if JSONResponse[key]['_typeGroup'] == 'topics': print(JSONResponse[key]['name'] + ", " + str(JSONResponse[key]['score'])) # + [markdown] colab_type="text" id="SUfjk4NAYNPw" # **4. Sentiment Analysis** # + colab={} colab_type="code" id="-1KUfYDi3MIq" # Define function to be used for text senitments analysis def get_sentiment(txt): ''' Utility function to clean text by removing links, special characters using simple regex statements and to classify sentiment of passed tweet using textblob's sentiment method ''' #clean text clean_txt = ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)", " ", txt).split()) # create TextBlob object of passed tweet text analysis = TextBlob(clean_txt) # set sentiment if analysis.sentiment.polarity > 0: return 'positive' elif analysis.sentiment.polarity == 0: return 'neutral' else: return 'negative' # + colab={"base_uri": "https://localhost:8080/", "height": 105} colab_type="code" id="whiyVb30R4Ys" outputId="eb14ac4b-4b0a-44d5-bbe7-a33691f04c59" print('headline: ', allheadlines[1]) print('headline sentiment: ', get_sentiment(allheadlines[1])) print('summary: ', summaries[1]) print('summary sentiment: ', get_sentiment(summaries[1])) # + [markdown] colab_type="text" id="_JzOdMSyev-g" # **5. Get historical EOD price data** # + colab={"base_uri": "https://localhost:8080/", "height": 204} colab_type="code" id="ygRBdEJwe0Sc" outputId="c9658660-daf9-4561-a6ce-ab07f0398e71" eod_api_token = '5<PASSWORD>' eod_symbol = symbol.replace('N', 'US') eod_price_url = 'https://eodhistoricaldata.com/api/eod/' + eod_symbol + '?api_token=' + eod_api_token price_df = pd.read_csv(eod_price_url) price_df.sort_values(by=['Date'], inplace=True, ascending=False) price_df.head() # + colab={} colab_type="code" id="HUKKSaMgmvyK" # + colab={} colab_type="code" id="fNIpTEpb3MLJ"
data_collection/altdata_service/news/notebooks/entity_extraction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/taureandyernv/colabs/blob/master/rapids_colab_0_8_base.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="CtNdk7PSafKP" colab_type="text" # #Setup: # # 1. Use pynvml to confirm Colab allocated you a Tesla T4 GPU. # 2. Install most recent Miniconda release compatible with Google Colab's Python install (3.6.7) # 3. Install RAPIDS libraries # 4. Copy RAPIDS .so files into current working directory, a workaround for conda/colab interactions # 5. Add the ngrok binary to expose Dask's status dashboard # 6. Update env variables so Python can find and use RAPIDS artifacts # # All of the above steps are automated in the next cell. # # You should re-run this cell any time your instance re-starts. # + id="JMklOdH-i424" colab_type="code" outputId="ed1eef86-d20f-45cf-ddc6-984dc01e559e" colab={"base_uri": "https://localhost:8080/", "height": 493} # !wget https://github.com/randerzander/notebooks-extended/raw/master/utils/rapids-colab.sh # !chmod +x rapids-colab.sh # !./rapids-colab.sh import sys, os sys.path.append('/usr/local/lib/python3.6/site-packages/') os.environ['NUMBAPRO_NVVM'] = '/usr/local/cuda/nvvm/lib64/libnvvm.so' os.environ['NUMBAPRO_LIBDEVICE'] = '/usr/local/cuda/nvvm/libdevice/' import nvstrings, nvcategory, cudf, cuml, xgboost import dask_cudf, dask_cuml, dask_xgboost from dask.distributed import Client, LocalCluster, wait, progress # we have one GPU, so limit Dask's workers and threads to exactly 1 cluster = LocalCluster(processes=False, threads_per_worker=1, n_workers=1) client = Client(cluster) client # + [markdown] id="ZGiYPeJvHHuL" colab_type="text" # Dask has a helpful [web interface](http://distributed.dask.org/en/latest/web.html) for monitoring the status of long running tasks. It's started automatically when you create a LocalCluster. # # However, when you run things like Dask's web interface in the background on Colab, their ports are not publicly exposed. Services like ngrok provide an easy way to access them anyway. # # If you want to expose your Dask dashboard as a public URL, run the cell below and open the resulting link in a new tab. When you run computations below, you can switch to it and observe as the task DAG progresses. # + id="kiY9evYWiubK" colab_type="code" outputId="d3025425-b681-4c74-c49e-e419ec1a367a" colab={"base_uri": "https://localhost:8080/", "height": 34} get_ipython().system_raw('ngrok http 8787 &') # !curl -s http://localhost:4040/api/tunnels | python3 -c \ # "import sys, json; print(json.load(sys.stdin)['tunnels'][0]['public_url'])" # + colab_type="code" id="zaTeeNfZzuS8" outputId="d498d4ad-4c42-4ae6-9b33-02c609a78611" colab={"base_uri": "https://localhost:8080/", "height": 215} # the datafile has no header, supply columnnames cols = [ "Year", "Month", "DayofMonth", "DayofWeek", "CRSDepTime", "CRSArrTime", "UniqueCarrier", "FlightNum", "ActualElapsedTime", "Origin", "Dest", "Distance", "Diverted", "ArrDelay" ] #from http://kt.ijs.si/elena_ikonomovska/data.html df = dask_cudf.read_csv('gs://rapidsai/airline/data.csv', names=cols) df.head().to_pandas() # + id="HgvYZg_nUJSd" colab_type="code" outputId="e173d910-c670-4a4f-f14e-b2090860dc7a" colab={"base_uri": "https://localhost:8080/", "height": 105} # %%time cat_cols = ['Origin', 'Dest', 'UniqueCarrier'] uniques = {} for col in cat_cols: print('Finding uniques for ' + col) uniques[col] = list(df[col].unique().compute()) # + id="fywOE4Oh4Hrq" colab_type="code" outputId="2bb3e88c-95db-4238-ccb0-77b74995ab21" colab={"base_uri": "https://localhost:8080/", "height": 52} # %%time import nvcategory from librmm_cffi import librmm import numpy as np # There's a WIP PR that will make this much cleaner.. keep an eye on # https://github.com/rapidsai/cuml/pull/631 def categorize(df_part, uniques): for col in uniques.keys(): keys = nvstrings.to_device(uniques[col]) cat = nvcategory.from_strings(df_part[col].data).set_keys(keys) device_array = librmm.device_array(df_part[col].data.size(), dtype=np.int32) cat.values(devptr=device_array.device_ctypes_pointer.value) df_part[col] = cudf.Series(device_array) return df_part df = df.map_partitions(categorize, uniques) # Turn into binary classification problem df["ArrDelayBinary"] = df["ArrDelay"] > 0 df.head().to_pandas() # + id="XnTnL2FRLGn5" colab_type="code" colab={} # About 25% of data is year >= 2014 X_train = df.query('Year < 2004') y_train = X_train[["ArrDelayBinary"]] X_train = X_train[X_train.columns.difference(["ArrDelay", "ArrDelayBinary"])] X_test = df.query('Year >= 2004') y_test = X_test[["ArrDelayBinary"]] X_test = X_test[X_test.columns.difference(["ArrDelay", "ArrDelayBinary"])] # + id="8NJyJxf-ZF7z" colab_type="code" outputId="0a352c96-5b06-4dac-929e-3f1adf31ffe4" colab={"base_uri": "https://localhost:8080/", "height": 440} # %%time X_train = X_train.compute() y_train = y_train.compute() y_train['ArrDelayBinary'] = y_train['ArrDelayBinary'].astype('int32') #X_train = X_train.persist() #y_train = y_train.persist() #res = wait([X_train, y_train]) # + id="b6CIS-B1FXe9" colab_type="code" colab={} # %%time dtrain = xgboost.DMatrix(X_train, y_train) # + id="SDme2AT1LTvG" colab_type="code" outputId="11078d66-d247-420b-92e4-2eb28c5b30c5" colab={"base_uri": "https://localhost:8080/", "height": 405} # %%time params = {"max_depth": 8, "learning_rate": 0.1, "min_child_weight": 1, "max_leaves": 256, "tree_method": "gpu_hist", "ngpus": -1, "reg_lambda": 1, "objective": "binary:logistic", "scale_pos_weight": len(y_train) / len(y_train.query('ArrDelayBinary > 0')) } #train the model model = dask_xgboost.train(client, params, X_train, y_train) # + [markdown] id="9oOCJ4NYMjY7" colab_type="text" # # cuDF and cuML Examples # # # Now you can run code! # # What follows are basic examples where all processing takes place on the GPU. # + [markdown] id="V38dg-oUJtEO" colab_type="text" # #[cuDF](https://github.com/rapidsai/cudf)# # # Load a dataset into a GPU memory resident DataFrame and perform a basic calculation. # # Everything from CSV parsing to calculating tip percentage and computing a grouped average is done on the GPU. # # _Note_: You must import nvstrings and nvcategory before cudf, else you'll get errors. # + colab_type="code" id="gnu9UeXkZPOo" outputId="002aae86-58a4-48c4-c62c-6e33fadefe8e" colab={"base_uri": "https://localhost:8080/", "height": 158} import nvstrings, nvcategory, cudf import io, requests # download CSV file from GitHub url="https://github.com/plotly/datasets/raw/master/tips.csv" content = requests.get(url).content.decode('utf-8') # read CSV from memory tips_df = cudf.read_csv(io.StringIO(content)) tips_df['tip_percentage'] = tips_df['tip']/tips_df['total_bill']*100 # display average tip by dining party size print(tips_df.groupby('size').tip_percentage.mean()) # + [markdown] id="Ul3UZJdUJqlT" colab_type="text" # #[cuML](https://github.com/rapidsai/cuml)# # # This snippet loads a # # As above, all calculations are performed on the GPU. # + id="dCE8WhO3HpL_" colab_type="code" outputId="efb3c0e0-6ca0-435e-c939-dd0913ab45ba" colab={"base_uri": "https://localhost:8080/", "height": 87} import cuml # Create and populate a GPU DataFrame df_float = cudf.DataFrame() df_float['0'] = [1.0, 2.0, 5.0] df_float['1'] = [4.0, 2.0, 1.0] df_float['2'] = [4.0, 2.0, 1.0] # Setup and fit clusters dbscan_float = cuml.DBSCAN(eps=1.0, min_samples=1) dbscan_float.fit(df_float) print(dbscan_float.labels_) # + [markdown] id="Dlsyk9m9NN2K" colab_type="text" # # Next Steps # # # For an overview of how you can access and work with your own datasets in Colab, check out [this guide](https://towardsdatascience.com/3-ways-to-load-csv-files-into-colab-7c14fcbdcb92). # # For more RAPIDS examples, check out our RAPIDS notebooks repos: # 1. https://github.com/rapidsai/notebooks # 2. https://github.com/rapidsai/notebooks-extended
rapids_colab_0_8_base.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="cZTYjhNXWAP0" # # "Pandas 정복기 [1]" # > "데이터 핸들링을 위한 필수 라이브러리 Pandas" # # - toc: true # - badges: true # - comments: true # - categories: [Data analysis] # - image: images/pandas/1.png # - search_exclude: true # # + id="RWpNZrMEVqz-" #hide from sklearn.datasets import load_iris import pandas as pd import numpy as np import matplotlib.pyplot as plt # + colab={"base_uri": "https://localhost:8080/"} id="fh-9QBTGemCy" outputId="576b4299-789e-4638-efa2-a8a3f1e5ced5" #hide from google.colab import drive drive.mount('/content/drive') # + id="OcCkLkkY_eWI" #hide import os os.chdir('/content/drive/MyDrive/Kaggle') # + colab={"base_uri": "https://localhost:8080/"} id="pyAa0od3_j6w" outputId="219bf3cc-38ec-40e1-fc1a-fa395549d0a9" #hide # !mkdir -p ~/.kaggle # !cp kaggle.json ~/.kaggle/ # Permission Warning 이 일어나지 않도록 # !chmod 600 ~/.kaggle/kaggle.json # 본인이 참가한 모든 대회 보기 # !kaggle competitions list # + colab={"base_uri": "https://localhost:8080/"} id="hgqMGL3U_f7g" outputId="63dbcf76-39a2-4e11-f729-eb9df63789ec" #hide # !kaggle competitions download -c titanic # + id="tV-YvkeHewQx" # hide df=pd.read_csv("train.csv") test= pd.read_csv('test.csv') IDtest= test["PassengerId"] # + [markdown] id="_ZUP541PWVns" # # # 1. DataFrames # 2. Aggregating Data # 3. Slicing and Indexing Data # 4. Creating and Visualizing Data # + [markdown] id="8fIra0jhWgeO" # 새로운 데이터를 받아서 분석하기 위해서 Pandas를 자주 사용합니다. 하지만 가끔씩 제대로 기억이 나지 않는 부분들이 있어 한꺼번에 복습 겸 정리를 해둘려고 이 포스트를 만들어봤습니다. # # DataSet은 Kaggle의 Titanic 데이터를 사용했습니다. # 기본적인 Pandas 사용방법을 정리하기 때문에 `train.csv`를 `df`로 저장했습니다 # # # # 1. DataFrames # 사실 `pandas`는 `numpy`와 `matplotlib` 위에 빌드됩니다. # numpy는 다차원 배열 객체를 제공하고 matplotlib는 시각화를 담당합니다. # # + [markdown] id="tCbcCm-tY0Vc" # - df.head() # 데이터 구조가 어떻게 생겼나 보기 # + colab={"base_uri": "https://localhost:8080/", "height": 335} id="ZJMQENDXYBYb" outputId="38d5bcef-7db3-484c-bf5a-27f6c70d1acb" df .head() # + [markdown] id="lwbKw1oQY5lP" # - df.info() # 각 colum별 데이터 타입과 결측값이 있는지 확인 할 수 있음 # + colab={"base_uri": "https://localhost:8080/"} id="XFyBsqJYYsHW" outputId="0c208e86-7c04-4468-e207-944d1a654414" df .info() # + [markdown] id="tl1kzTLEZE5T" # - df.shpae # 데이터 형태 보기 150행 5열(튜플 형태) # + colab={"base_uri": "https://localhost:8080/"} id="8rgSi8bjYvg9" outputId="c6875f28-2b5e-4260-8ea2-e91b83a99d01" df .shape # + [markdown] id="O0RfeHAcZNZI" # - df.describe() # 데이터 셋의 요약 통계를 볼 수 있음 # + colab={"base_uri": "https://localhost:8080/", "height": 294} id="2QKdcGloYyUH" outputId="8357f591-cb4a-4325-cc41-0a3d6b6044c0" df .describe() # + [markdown] id="QxwIAB19ZgE9" # - df.values # 2차원 array 형태로 값들을 볼 수 있음 # + colab={"base_uri": "https://localhost:8080/"} id="29YuZ_EFZRQE" outputId="78972cfa-acb5-4356-865a-e3368f5c8d53" df .values # + [markdown] id="MzkU8wVyZ2ar" # - df.columns # 열 이름 확인할 수 있음 # + colab={"base_uri": "https://localhost:8080/"} id="zGS229ctZlYo" outputId="6a90ed7a-693e-4c2f-8c83-a52241ffe2aa" df .columns # + [markdown] id="W-BlBi8vaBXQ" # - df.index # 행 번호 또는 행 이름이 포함됩니다. # + colab={"base_uri": "https://localhost:8080/"} id="wdpw2lDXaBfp" outputId="e57c8ffd-9792-47be-9668-dbc9ffbb692e" df .index # + [markdown] id="1NtUXT0efS61" # - df.sort_values(*컬럼명, ascending=True or False) # 해당 컬럼에 대해 오름차순(default)으로 정렬 # 내림차순 정렬시 `ascending=False` # + colab={"base_uri": "https://localhost:8080/", "height": 581} id="75PyQXn-aFUg" outputId="0f0b1c38-d2df-4f79-fba7-5ba9dab56aee" df .sort_values("Age") # + colab={"base_uri": "https://localhost:8080/", "height": 631} id="ukumagHWfROB" outputId="01f5ccb7-a9d2-4495-b5de-22402d4d17ea" df.sort_values(["Pclass", 'Age']) # + colab={"base_uri": "https://localhost:8080/", "height": 614} id="asQCqLTnfugS" outputId="189edd76-1565-4acc-a3b1-ffffa607cd12" # Pclass = 오름차순 Age = 내림차순 df.sort_values(["Pclass", 'Age'], ascending=[True, False]) # + [markdown] id="Px1MOUMggAwc" # - df['컬럼명'] # 두 개 이상의 컬럼을 볼 때는 괄호를 2번 사용해야 합니다. 바깥쪽은 DataFrame의 부분 집합을 담당하고 안쪽은 하위 집합의 열 이름의 목록을 담습니다. # + colab={"base_uri": "https://localhost:8080/"} id="EJc22zv0fzK6" outputId="5be22468-01f6-450a-8167-0c00c4d8ecb5" df['Embarked'] # + colab={"base_uri": "https://localhost:8080/", "height": 414} id="0nVRbgmpf4cU" outputId="b546b578-6880-43d7-dbbd-0e2c3273835a" df[['Pclass', 'Embarked']] # + [markdown] id="NL-9jLD4hVhL" # - 논리 연산자 `>`, `<`, `==` # 대괄호를 이용해 500명 이상인 행의 하위 집합을 만들 수도 있습니다. 뿐만아니라 텍스트, 날짜 데이터도 가능합니다. # + colab={"base_uri": "https://localhost:8080/"} id="y2vvExFIf_qD" outputId="e75aca5b-9ab4-47d5-c80e-8478edd750af" df['Age'] > 30 # + colab={"base_uri": "https://localhost:8080/", "height": 664} id="MSjCpefnggqj" outputId="fb26b977-fad1-433f-8c7f-04232e30f56e" df[df['Age'] > 18] # + colab={"base_uri": "https://localhost:8080/", "height": 731} id="eXPfS6I6htz6" outputId="0358b2e7-f115-42e8-f97a-7396b9eec773" df[df['Embarked'] == 'S'] # + colab={"base_uri": "https://localhost:8080/", "height": 505} id="Q7ask4HhinDt" outputId="f8b4f750-fbb9-4879-8cbe-7b2f2839d910" is_male= df['Sex'] =='male' is_adult= df['Age'] > 19 # State = Ak and Count=0 인 행 df[is_male & is_adult][:10] # + colab={"base_uri": "https://localhost:8080/", "height": 505} id="_d7hJQPijLkJ" outputId="2216fce8-b5b9-4b24-b799-312a302824c9" # 위 와 동일함 각 조건에 ()추가 df[(df['Sex']== 'male') & (df['Age']>19)][:10] # + [markdown] id="LDmZWdFgj2xY" # - df['컬럼명'].isin() # 범주형 여러 값을 필터링 하는 경우에 사용 # 마찬가지로 여러개를 넣기 위해서는 대괄호 사용 # + colab={"base_uri": "https://localhost:8080/", "height": 335} id="66413uvnjf6o" outputId="4e79c1cb-2fdf-42bf-9d3a-3327e8254534" is_C_or_S = df['Embarked'].isin(['C', 'S']) df[is_C_or_S].head() # + [markdown] id="IfSw56qiCMEK" # - 컬럼 추가하기 # df['컬럼명']= _______ # 예를 들어서 SibSp와 Parch라는 Feature를 더해 Family_member라는 변수를 만들어 보겠습니다. # + colab={"base_uri": "https://localhost:8080/", "height": 488} id="9CROZpKkB1TI" outputId="c5c5a5bb-fb31-43e6-828f-30db2e146739" # 형제 자매 수 + 부모님 수 + 본인(1) df['Family_member']=df['SibSp']+df['Parch']+1 df.head()
_notebooks/2021-09-16-Pandas_tutorial_1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- myset = set() myset myset.add(1) myset myset.add(2) myset myset.add(2) myset mylist = [1,1,1,1,2,2,2,2,3,3,3,3] set(mylist)
Sets.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import matplotlib.pyplot as plt import numpy as np import seaborn as sns #master___2018.07.24-2* df = pd.read_csv("../classifier_compression/logs/master___2018.07.24-235532/arch_space.csv") df df2 = pd.read_csv("../classifier_compression/logs/master___2018.07.24-232342/arch_space.csv") df2 df3 = pd.read_csv("../classifier_compression/logs/master___2018.07.24-225916/arch_space.csv") df3 df4 = pd.read_csv("../classifier_compression/logs/master___2018.07.25-205658/arch_space.csv") # + def add_results(df, plt, cmap): # create data x = df['MACs'].tolist() y = df['Top1'].tolist() z = df['NNZ'].tolist() z = [n/30000 for n in z] plt.scatter(x, y, s=z, c=x, cmap=cmap, alpha=0.4, edgecolors="black", linewidth=2) # Change color with c and alpha. I map the color to the X axis value. plt.figure(figsize=(20,10)) add_results(df, plt, cmap="Blues") add_results(df2, plt, cmap="Reds") add_results(df3, plt, cmap="Greens") # Add titles (main and on axis) plt.xlabel("Compute (MACs)") plt.ylabel("Accuracy (Top1)") plt.title("Network Space") plt.show() # + # Change color with c and alpha. I map the color to the X axis value. plt.figure(figsize=(20,10)) add_results(df4, plt, cmap="Oranges") # Add titles (main and on axis) plt.xlabel("Compute (MACs)") plt.ylabel("Accuracy (Top1)") plt.title("Network Space") plt.show() # - df4 # Relative import of code from distiller, w/o installing the package import os import sys import torch module_path = os.path.abspath(os.path.join('..', '..')) if module_path not in sys.path: sys.path.append(module_path) import distiller import models import apputils # + dataset = 'cifar10' dummy_input = torch.randn(1, 3, 32, 32) arch = 'vgg16_cifar' checkpoint_file = "../classifier_compression/logs/master___2018.07.25-205658/BEST_adc_episode_044_checkpoint.pth.tar" model = models.create_model(pretrained=False, dataset=dataset, arch=arch) apputils.load_checkpoint(model, checkpoint_file); # - t = distiller.weights_sparsity_summary(model) t # + checkpoint_file = "../classifier_compression/logs/master___2018.07.25-205658/BEST_adc_episode_008_checkpoint.pth.tar" model = models.create_model(pretrained=False, dataset=dataset, arch=arch) apputils.load_checkpoint(model, checkpoint_file); distiller.weights_sparsity_summary(model) # -
examples/automated_deep_compression/comparing_model_spaces.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np # ## Exercise 1 # # #### Write code to create a 4x3 matrix with values ranging from 2 to 13. # ## Exercise 2 # # #### Write code to replace the odd numbers by -1 in the following array. ar = np.arange(10) ar # ## Exercise 3 # # #### Perform the following operations on an array of mobile phones prices 6999, 7500, 11999, 27899, 14999, 9999. # # ##### 1. Create a 1d-array of mobile phones prices # #### 2. Convert this array to float type # #### 3. Append a new mobile having price of 13999 Rs. to this array # #### 4. Reverse this array of mobile phones prices # #### 5. Apply GST of 18% on mobile phones prices and update this array. # #### 6. Sort the array in descending order of price # #### 7. What is the average mobile phone price # #### 8. What is the difference b/w maximum and minimum price
04 Numpy and Pandas/NumPy/Exercises.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.8 64-bit (''base'': conda)' # name: python3 # --- import numpy as np import matplotlib.pyplot as plt # + x = np.array((2,5,7,2,3,5,1,7,8,4)) y = np.random.randn(10) func_fit = np.polyfit(x, y, deg=1) # - plt.plot(x, y, '.') plt.plot(x, np.polyval(func_fit, x), 'r') plt.show() from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression from sklearn import metrics import pandas as pd # + person = { 'finance': (1,4,3,5,1,6,1,7,7,2,4,7,3,9,9), 'management': (6,8,2,2,6,1,2,1,1,8,9,2,2,4,1), 'logistic': (1,4,1,1,5,7,3,9,9,8,5,3,3,1,6), 'get_work': (1,4,2,6,6,7,3,3,2,7,9,1,2,2,7)} df = pd.DataFrame(person, columns=['finance', 'management', 'logistic', 'get_work']) df # - X = df[['finance','management','logistic']] y = df['get_work'] y X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=42) # + from sklearn import preprocessing scaler = preprocessing.StandardScaler().fit(X_train) X_train = scaler.transform(X_train) X_train # - X_test = np.array(X_test) X_test y_train = np.array(y_train) y_train # + lr = LogisticRegression() lr.fit(X_train, y_train) y_prediction = lr.predict(X_test) y_prediction # + import pandas as pd import seaborn as sn conf_mat = pd.crosstab(y_test, y_prediction, rownames=['True'], colnames=['False']) sn.heatmap(conf_mat, annot=True) plt.show() # - y_test = np.array(y_test) y_test metrics.accuracy_score(y_test, y_prediction)
Courses/Introduction to Data Science A-Z/08_ml.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import pandas as pd import matplotlib.pyplot as plt from IPython.display import clear_output from six.moves import urllib import tensorflow.compat.v2.feature_column as fc import tensorflow as tf # - dftrain = pd.read_csv('http://localhost:8889/edit/Downloads/dataexport_20201208T122342.csv') dfeveal = pd.read_csv('http://localhost:8889/edit/Downloads/dataexport_20201208T122136.csv') print(dftrain.head()) y_train = dftrain.pop('predicted') y_eval - dfeval.pop('predicted')
Untitled45.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: widgets-tutorial # language: python # name: widgets-tutorial # --- # # Input Widgets and Geospatial Data Analysis # # In addition to ipywidgets 7, and pythreejs, this example requires GDAL # # **GDAL installation:** # # ```bash # conda install gdal # conda install krb5 # ``` from __future__ import print_function from ipywidgets import Controller, FloatText, HTML, VBox from pythreejs.install import install from traitlets.traitlets import link, dlink # # Terrain modeling? Let us check out the Grand Canyon from pythreejs import * import numpy as np import gdal as gd # + gc_ds = gd.Open('gc_dem.tif') dem = gc_ds.ReadAsArray()[::20, ::20] gt = gc_ds.GetGeoTransform() z = (dem - np.mean(dem)) / 1000 nx, ny = z.shape surf_g = SurfaceGeometry(z=list(z.flat), height_segments=nx - 1, width_segments=ny - 1) surf = Mesh(geometry=surf_g, material=LambertMaterial(map=height_texture(z[::-1], colormap='terrain')), scale=(10, 10, 1)) scene = Scene(children=[AmbientLight(color='#777777'), surf, DirectionalLight(color='white', position=[3, 5, 1], intensity=0.5)]) # - c = PerspectiveCamera(position=[0, 10, 10], up=[0, 0, 1], children=[DirectionalLight(color='white', position=[3, 5, 1], intensity=0.5)], aspect=2) width = 950 height = 950 / c.aspect c.look_at(c.position, (1, 0, 0)) fly_controls = FlyControls(controlling=c) renderer = Renderer(camera=c, scene=scene, width=str(width), height=str(height), controls=[fly_controls]) c.position = [0, 10, 10] c.look_at(c.position, (1, 0, 0)) renderer # ## Adjusting the camera position? c.position = [10, 15, 10] c.look_at(c.position, (1, 0, 0)) # <img src="Flow.svg"></img> # # Sliders? # + from ipywidgets import FloatSlider, HBox, VBox x_slider, y_slider, z_slider = (FloatSlider(description='x', min=10.0, max=20.0, orientation='vertical'), FloatSlider(description='y', min=10.0, max=20.0, orientation='vertical'), FloatSlider(description='z', min=10.0, max=20.0, orientation='vertical')) # + def update(change): c.position = [x_slider.value, y_slider.value, z_slider.value] x_slider.observe(update, names=['value']) y_slider.observe(update, names=['value']) z_slider.observe(update, names=['value']) # - HBox([x_slider, y_slider, z_slider, renderer]) pad = Controller() pad pad.axes[1].value # + factor = 10 def affine(constant, factor): return lambda x: constant + factor * x pad.links = [] def setup(): if pad.connected: pad.links.append(dlink((pad.axes[1], 'value'), (fly_controls, 'pitch'), affine(0.0, factor))) pad.links.append(dlink((pad.axes[0], 'value'), (fly_controls, 'roll'), affine(0.0, -factor))) pad.links.append(dlink((pad.axes[3], 'value'), (fly_controls, 'forward_speed'), affine(0.0, 2 * factor))) pad.links.append(dlink((pad.axes[2], 'value'), (fly_controls, 'yaw'), affine(0.0, factor))) if not pad.connected: for l in pad.links: l.unlink() pad.links = [] pad.observe(setup, names=['connected']) setup() # -
notebooks/10.09-flight-sim.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Daftar method untuk manipulasi list # append() untuk menambahkan item di paling akhir # clear() menghapus semua item di list # copy() menduplikasi item # count() menghitung total item berdasrkan spesific value # extends() memasukkan list atau tupple ke list lainnya # index() show position index of value # insert() add value in specified position # pop() remove element by specified position # remove() remove elemen by spesified value # reverse() changing the order list to descending # sort() # - # append function _currencies = ["IDR","USD","JPY"] print("Currencies before appending item : ") print(_currencies) _currencies.append("GBP") print("Currencies after appending item : ") print(_currencies) # clear function _var = [1,2,3,4,5] print("Before clear : ") print(_var) print("After clear : ") print(_var.clear()) # # copy of list _copy = _currencies.copy() print(_currencies) # Sample of count _count = _currencies.count("USD") print(_count) # Sample extended list _currencies.extend(_copy) print(_currencies) # Sample of index print(_currencies.index("JPY")) # Sample of insert(index_number, value) _currencies.insert(5,"TEST") print(_currencies) # Sample of pop(index) _currencies.pop(1) # remove index 1 _currencies.pop(5) print(_currencies) # Sample of remove(value) _currencies.remove("GBP") print(_currencies) # Reverse of list (desc is default) _currencies.reverse() print(_currencies) # Sample of sort() default is asc _currencies.sort() print(_currencies) # Looping in list for x in _currencies : print(x.lower()) newlist = [x for x in _currencies if "GBP" in x] print(newlist)
#01 Python Fundamental/#08_list_method.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- import os import time import numpy as np import matplotlib.pyplot as plt #import imageio import sys # %matplotlib notebook # # Goal # Here we want to demonstrate how our ASD-AFM aproach could be applied for experimental AFM data processing. Automated Structure Discovery means: # - identification of structure and chemical content for unknown molecule # - identification of orientations for known molecule. # # First task can be solved for more or less 2D molecules. Nevertheless it starts to feel troubles for 3D molecules. Why? It is partially connected to the nature of AFM method. Terminated by probe particle tip doesn't penetrate inside molecule. In case of bulky molecules AFM data contains only information only about some top layers of atoms. Therefore unknown molecule full chemical content identification could be problematic in some cases. # # Second task is easier for any molecules with known chemical content. As a benchmark for our first iteration of the method, we apply our pretrained CNN model to resolve several distinct adsorption configurations of 1S-camphor on Cu(111) based on low-temperature AFM measurements. # # The 3D experimental image stack is fed into the CNN model and a 2D image descriptor (van der Waals Spheres) is predicted based on this data. This descriptor is then compared via cross-correlation to a set of descriptors calculated directly from atomic coordinates taken from a set of uniformly distributed 500 rotations of 1S-Camphor molecule in vacuum. The best fit gives us a prediction of the molecular configuration corresponding to the original vdW-Spheres descriptor from experimental data. # # Experimental AFM data, simulated AFM data for 500 uniformly distributed rotations of 1S-Camphor in vacuum and also predicted from CNN vdW-Spheres image descriptors we take as an input here. Download these from here: https://www.dropbox.com/s/jq68p0u1b9hx1qu/matching_data.tar.gz?dl=0. Extract the archive to repo directory. It will create: # - simulated_configs_data.npz - simulated AFM data and predicted vdW-Spheres descriptor for 500 1S-Camphor's configurations # - simulated_configs_geom - folder with geometries for simulated configurations # - experimental_configs_data - experimental AFM data and predicted vdW-Spheres descriptors # # Experimental 1S-Camphor AFM data # Let's take a look on experimental data # + # plot input AFM data from preloaded to folders .npz files def plot_experiment_AFM(list_of_exp): cols = 10 rows = len(list_of_exp) fig=plt.figure(figsize=(1.8*cols, 1.8*rows)) ax = [] for i,experiment in enumerate(list_of_exp): filename_exp = 'experimental_configs_data/'+str(experiment)+'orient_exp.npz' data= np.load(filename_exp) X_exp=data['X'] print '#'+str(experiment)+' 1S-Champhor experiment '+ 'X.shape:', X_exp.shape for j in range(10): ax.append(fig.add_subplot(rows,cols,i*cols+j+1)) xj = X_exp[0,:,:,j] vmax = xj.max() vmin = xj.min() plt.imshow(xj, cmap='afmhot', origin="lower",vmin=vmin-0.1*(vmax-vmin),vmax=vmax+0.1*(vmax-vmin)) plt.xticks([]) plt.yticks([]) if j == 0: ax[-1].set_ylabel('AFM experiment '+ str(experiment)) plt.show() experiments = [1,3,4,6,7] plot_experiment_AFM(experiments) # - # Even highly trained experts were not able to decipher the molecular structure from these images. # + def plot_experiment_preds(list_of_exp): cols = len(list_of_exp) rows = 1 fig=plt.figure(figsize=(2*cols, 2*rows)) ax = [] for i,experiment in enumerate(list_of_exp): filename_exp = 'experimental_configs_data/'+str(experiment)+'orient_exp.npz' data= np.load(filename_exp) Y_exp=data['Y'] ax.append(fig.add_subplot(rows,cols,i+1)) plt.imshow(Y_exp[1][0], origin="lower") ax[-1].set_ylabel('vdW-Spheres') ax[-1].set_xlabel('AFM experiment '+ str(experiment)) plt.xticks([]) plt.yticks([]) plt.show() experiments = [1,3,4,6,7] plot_experiment_preds(experiments) # - # # Matching simulated configurations to experimental configurations # The best fit gives us a prediction of the molecular configuration corresponding to the original descriptor from experimental data # + from utils_matching import * orientations = [1,3, 4 ,6 ,7] filename_model = './simulated_configs_data.npz' for i in range(np.size(orientations)): orient = orientations[i] filename_exp = 'experimental_configs_data/'+str(orient)+'orient_exp.npz' # check if correlation values have been already calculated if os.path.isfile('experimental_configs_data/'+str(orient)+'orient_exp_sim_cor_values.csv'): print ('Experiment '+str(orient)+ ': correlations file exist') else: create_df_model_coef_correls(filename_model,filename_exp,orient) # - # Let's plot for each 1S-Camphor AFM experiment few simulated configurations with best correlation coefficients: orientations = [1,3,4,6,7] num_best = 3 # how many best simulated configurations to plot filename_model = './simulated_configs_data.npz' dir_exp = 'experimental_configs_data/' # here we load csv and X Y experiment orientations data dir_sim_geom = './simulated_configs_geom/' plot_best_match_configs_one(filename_model,dir_sim_geom,dir_exp,orientations[0],num_best) plot_best_match_configs_one(filename_model,dir_sim_geom,dir_exp,orientations[2],num_best) # Let's plot all experiments matched in one big image: orientations = [1,3,4,6,7] num_best = 3 # how many best simulated configurations to plot filename_model = './simulated_configs_data.npz' dir_exp = 'experimental_configs_data/' # here we load csv and X Y experiment orientations data dir_sim_geom = './simulated_configs_geom/' plot_best_match_configs_all(filename_model,dir_sim_geom,dir_exp,orientations)
experiment_1S-Camphor.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Diagnostics for multi-plane with the measured PSF model # ### Configure environment import os os.chdir("/home/hbabcock/Data/storm_analysis/sa_diagnostics/mp_psf_fft") print(os.getcwd()) import storm_analysis.diagnostics.multiplane.settings as settings import storm_analysis.diagnostics.multiplane.configure as configure import storm_analysis.diagnostics.multiplane.make_data as makeData import storm_analysis.diagnostics.multiplane.analyze_data as analyzeData import storm_analysis.diagnostics.multiplane.collate as collate # + settings.photons = [[10, 500], [10, 1000]] print(settings.photons) settings.iterations = 20 settings.n_frames = 10 settings.peak_locations = None settings.psf_size = 30 # - # ### Configure configure.configure("psf_fft", False) # ### Make Data makeData.makeData() # ### Analyze data # %time analyzeData.analyzeData() # ### Collate data collate.collate() # ### Reference results # + active="" # 2018-10-01 # commit 2dabb41ff5a5c1a9b1f0a575c039cf19d2c8e1d8 # # Added 1260 # Added 1260 # Processing test_01 # Using max_distance 200.0 nm for error calcuations. # Processing test_02 # Using max_distance 200.0 nm for error calcuations. # # Analysis Summary: # Processed 2504 localizations in 22.69 seconds, 110.33/sec # Recall 0.58254 # Noise 0.41746 # XYZ Error Standard Deviation (nm): # test_01 31.67 30.13 62.24 # test_02 16.44 16.67 38.23 # # XYZ RMSE Accuracy (nm): # test_01 31.67 30.15 62.34 # test_02 16.44 16.67 38.49
storm_analysis/diagnostics/jpy_notebooks/mp_psf_fft.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Kalman Filter Algorithm # # The Kalman Filter is an algorithm that estimates the state of a stochastic dynamical system at a given time $x_t$ by using noisy measurements $z_t$ of the system. In particular, the Kalman Filter is used when the dynamics of the system can be represented by a linear finite-difference equation of the form # # $$ # x_t = F_t x_{t-1} + B_t u_t + w_t # $$ # # where $F_t$ is the state transition matrix, $u_t$ is a control input vector, $B_t$ is the matrix which controls how the input changes the state, and $w_t$ is process noise. # # Measurements $z_t$ are assumed to be linearly related to the state # # $$ # z_t = H_t x_t + v_t # $$ # # where $H_t$ is the transformation matrix from state to measurement and $v_t$ is the measurement noise. # # The Kalman Filter method assumes that $w_t$ and $v_t$ are Gaussian distributed random variables with zero mean and covariance matrices $Q_t$ and $R_t$ respectively. # # The Kalman Filter algorithm repeatedly estimates the current state based on previous state estimates and current measurements. In particular, the algorithm models the current state's probability distribution as a Gaussian with mean $\hat{x}_{t|t}$ and covariance matrix $\hat{P}_{t|t}$. # # This procedure can be though of in two steps: # # ### Prediction # # First, we would like to estimate the current state $x_t$ based on past predictions, $\hat{x}_{t-1|t-1}, \hat{P}_{t-1|t-1}$. This estimate, which is based purely on the dynamics, we call $\hat{x}_{t|t-1}$. And the estimate for the covariance matrix associated with the state's fluctuations is likewise called $\hat{P}_{t|t-1}$. # # Inserting the dynamical equation for $x_t$ into the definitions of mean and covariance, we obtain equations for these two estimates: # # $$ # \hat{x}_{t|t-1} = F_t \hat{x}_{t-1|t-1} + B_t u_t \\ # \hat{P}_{t|t-1} = F_t \hat{P}_{t-1|t-1}F_t^T + Q_t. # $$ # # # ### Measurement # # Next, we want to incorporate the measurements $z_t$ taken at time $t$ into our estimate for $x_t$ and its covariance matrix. We invert the measurement equation for $z_t$ to solve for $x_t$. This defines a Gaussian probability distribution for $x_t$ based on the measurement at time $t$. By multiplying this by the Gaussian distribution determined from the prediction step, we get a new combined Gaussian with mean and covariance specified by the current measurement $z_t$ (and their covariance $R_t$) and the past predictions $\hat{x}_{t|t-1}$ and $\hat{P}_{t|t-1}$. # # This gives us, after some algebraic manipulations, the measurement equations: # # $$ # \hat{x}_{t|t} = \hat{x}_{t|t-1} + K_t(z_t - H_t \hat{x}_{t|t-1}) \\ # \hat{P}_{t|t} = \hat{P}_{t|t-1} - K_t H_t \hat{P}_{t|t-1} # $$ # # where $K_t$ is the so-called Kalman gain, which has the form # # $$ # K_t = \hat{P}_{t|t-1} H_t^T (H_t P_{t|t-1} H_t^T + R_t)^{-1}. # $$ # ## Kalman Filter code # # Here is a few lines of code in python that implements an iteration of the Kalman Filter algorithm. # + import numpy as np from numpy.linalg import inv def kalman(x_prev, P_prev, z, F, Q, H, R): # Prediction estimates x_pred = np.dot(F, x_prev) P_pred = np.dot(F, np.dot(P_prev, np.transpose(F))) + Q # Measurement estimates K = np.dot(P_prev, np.dot(np.transpose(H), inv(np.dot(H, np.dot(P_prev, np.transpose(H))) + R))) x_meas = np.dot(K, z - np.dot(H, x_prev)) P_meas = -np.dot(K, np.dot(H, P_prev)) return (x_pred + x_meas, P_pred + P_meas) # - # ### Example 1: Simple Harmonic Oscillator # # Suppose we have a state that is oscillating as an SHO subject to random noise. # # The SHO differential equation # # $$ # \ddot{x} = -\omega_0^2 x # $$ # # can be written as a set of coupled ODEs # # $$ # \dot{x_1} = x_2 \\ # \dot{x_2} = -\omega_0^2 x_1 # $$ # where $x_1 \equiv x$ and $x_2 \equiv \dot{x}.$ The discretized finite-difference version of the coupled SHO equations is # # $$ # \begin{pmatrix} x_1 \\ x_2 \end{pmatrix}_t = \begin{pmatrix} 1 & \Delta t \\ -\omega_0^2 \Delta t & 1 \end{pmatrix} \begin{pmatrix} x_1 \\ x_2 \end{pmatrix}_{t-1} # $$ # where $\Delta t$ is the time between times steps $t-1$ and $t$. # # Note that any linear ODE can be written as a set of coupled first-order ODEs and discretized. The Kalman Filter can be used to estimate the state of all such systems. # + import matplotlib import matplotlib.pyplot as plt # number of time steps Nt = 1000 # oscillator frequency w0 = 1.0 # discretization time step dt = 0.01 # total time elapsed T = Nt * dt time = np.linspace(0.0, T, Nt) # The initial state x0 = np.array([[0.0],[1.0]]) # The transition matrix F = np.array([[1.0, dt],[-w0*w0*dt, 1.0]]) # The (constant) process noise Qvar = 0.0005 Q = np.zeros((2,2)) Q[0,0] = Qvar Q[1,1] = Qvar # The measurement noise Rvar = 4.0 R = np.zeros((2,2)) R[0,0] = Rvar R[1,1] = Rvar # The actual state x = np.zeros((2,1,Nt)) # The measurements z = np.zeros((2,1,Nt)) # The predictions x_pred = np.zeros((2,1,Nt)) P_pred = np.zeros((2,2,Nt)) # Simulating the dynamics and running the filter x[:,:,0] = x0 x_pred[:,:,0] = x0 P_pred[:,:,0] = R for t in range(1,Nt): x[:,:,t] = np.dot(F, x[:,:,t-1]) + np.sqrt(Qvar) * np.random.randn(2,1) z[:,:,t] = x[:,:,t-1] + np.sqrt(Rvar) * np.random.randn(2,1) (x_pred[:,:,t], P_pred[:,:,t]) = kalman(x_pred[:,:,t-1], P_pred[:,:,t-1], z[:,:,t], F, Q, 1, R) matplotlib.rcParams.update({'font.size': 34}) # Plotting the actual dynamics versus the Kalman Filter predictions (shown with error bars) plt.plot(time, x[0,0,:], 'k', label='actual dynamics', linewidth=5.0) plt.plot(time, z[0,0,:], 'rD', label='measurements', markersize=6) plt.errorbar(time, x_pred[0,0,:], yerr=np.sqrt(P_pred[0,0,:]), color='g',ecolor='g', label='predicted dynamics', linewidth=2.5) plt.legend() plt.title('Kalman Filter for Simple Harmonic Oscillator') plt.xlabel('time') plt.ylabel('position') plt.show() # - # ### Example 2: Control of orbital trajectories # # One of the most successful applications of Kalman Filters has been in the control of trajectories for rockets in space flight missions. # # Note that the Kalman Filter itself does not inherently have to do with control theory. As we have described, it simply provides an estimate for a linear dynamic process based on measurements. However, these estimates can be combined with a feedback control system to improve the accuracy and stability of a desired trajectory. # # #### PID control # # For this example, we will use a very simple type of control system that implements feedback. It is called proportional–integral–derivative (PID) control and is frequently implemented by engineers through software and hardware to control various devices. # # The idea is very simple. Suppose we have a dynamic process, whose state takes on a value $x(t)$ at a given time. We want to control the system so that the state takes on the value $y(t)$. We do this by trying to minimize the error $e(t) = x(t)-y(t)$. We want to push $x(t)$ towards $y(t)$ as a function of the error. We achieve this through the control (driving force) $u(t)$ which we apply to the system. # # The three simplest ways to update $x(t)$ is through: # 1. proportional control, which updates the signal due to the error at the current time # $$ # u_P(t) = -K_P e(t) # $$ # 2. integral control, which updates the signal based on the history of previous errors # $$ # u_I(t) = -K_I \int_0^t e(t')dt' # $$ # 3. derivative control, which updates the signal based on the expected increase in the error # $$ # u_D(t) = -K_D \frac{de(t)}{dt} # $$ # where $K_P,K_I,K_D$ are positive coefficients. # # Altogether, in discrete time, the PID control equation looks like # $$ # u_t = -K_P (x_t-y_t) - K_I \sum_{t'=0}^t (x_{t'}-y_{t'}) \Delta t - K_D \frac{(x_t-y_t) - (x_{t-1} - y_{t-1})}{\Delta t}. # $$ # # This $u_t$ is what we use in our equation of motion for the system state # $$ # x_t = F_t x_{t-1} + B_t u_t + w_t. # $$ # # #### Orbital dynamics # # Suppose we are a telecommunications company that wants to launch a satellite into geosynchronous orbit. Our satellite is launched by a rocket, which detaches shortly after launch. The satellite has thrusters that propel it in the radial and transverse directions. We also have a measuring device that tells us the satellites position and velocity with some noise. # # Our engineers want to use PID control to guide the satellite into the orbit. The satellite's equations of motion in polar coordinates look like: # # $$ # \ddot{r} + r\dot{\phi}^2 - \frac{u_r}{m} + \frac{GM}{r^2} = 0 \\ # 2\dot{r}\dot{\phi} + r \ddot{\phi} - \frac{u_\phi}{m} = 0. # $$ # # This is a non-linear second-order ODE. # # We can break this up into a system of non-linear first-order ODEs. Then, the equations can be linearized by computing the Jacobian at each time step. Finally, the derivatives can be approximated by finite-differences and we can obtain a finite-difference equation for the satellite's position $(r_t,\phi_t)$ over time. # # The resulting linearized finite-difference equation (that I computed) was # # $$ # x_t = F_t x_{t-1} + B_t u_t + w_t # $$ # # where # # $$ # x_t = \begin{pmatrix} # x_1 \\ x_2 \\ x_3 \\ x_4 # \end{pmatrix}_t = \begin{pmatrix} # r \\ \dot{r} \\ \phi \\ \dot{\phi} # \end{pmatrix}_t # $$ # # is the state vector, # # $$ # u_t = \begin{pmatrix} # u_1 \\ u_2 # \end{pmatrix}_t = \begin{pmatrix} # u_r \\ u_\phi # \end{pmatrix}_t # $$ # # is the driving force vector (there are radial and tangential thrusters on the satellite), # # $$ # F_t = # \begin{pmatrix} # 1 & \Delta t & 0 & 0 \\ # \left(-x_4^2 + \frac{2GM}{x_1^3}\right)\Delta t & 1 & 0 & -2x_1 x_4 \Delta t \\ # 0 & 0 & 1 & \Delta t \\ # \left(\frac{2x_2x_4}{x_1^2} - \frac{u_2}{mx_1^2}\right)\Delta t & -\frac{2x_4}{x_1}\Delta t & 0 & 1-\frac{2x_2}{x_1}\Delta t # \end{pmatrix}_{t-1} # $$ # # is the linearized state transition matrix (i.e., the $x_t$-Jacobian), and # # $$ # B_t = # \frac{\Delta t}{m}\begin{pmatrix} # 0 & 0 \\ # 1 & 0 \\ # 0 & 0 \\ # 0 & \frac{1}{x_1} # \end{pmatrix}_t # $$ # # is the linearized control to state matrix (i.e., the $u_t$-Jacobian). # # The filter that uses Jacobian's to linearize non-linear finite-difference equations for the Kalman Filter is called the Extended Kalman Filter. Another non-linear filter is the Unscented Kalman Filter, which does something else, that attempts to preserve the Gaussian properties of the noise. # #### Code # # Below is code I wrote to implement this example. I could not get it to work properly, but I include it for completeness. It could be that I derived the equations of motion incorrectly, or that I picked bad parameter values, or that there's a bug in the code somewhere. # + import numpy as np import matplotlib import matplotlib.pyplot as plt # number of time steps Nt = 1000 # Newton's constant G = 6.674e-11 # Mass of satellite m = 2200000.0 # Mass of Earth M = 6.0e24 # Radius of Earth R0 = 6400000.0 # Period of Earth rotation T0 = 24*60*60 # Angular velocity of Earth omega = 2.0*np.pi/T0 # Initial vertical velocity of satellite v0 = 1.0e-2*11000.0 # discretization time step (in seconds) dt = 1.0 # total time elapsed T = Nt * dt # time vector time = np.linspace(dt, dt*Nt, Nt) # number of variables (r,\phi,\dot{r},\dot{\phi}) Nvars = 4 # The initial state vector x0 = np.array([[R0], [v0], [0.0], [omega]]) # The (constant) process noise Q = np.zeros((Nvars,Nvars)) Q[0,0] = 1.0 Q[1,1] = 0.1 Q[2,2] = 0.001 Q[3,3] = 0.001 Q = 1.0e-9*Q # The measurement noise R = 10.0*Q # The state x = np.zeros((Nvars,1,Nt)) # The measurements z = np.zeros((Nvars,1,Nt)) # The controls u = np.zeros((Nvars,1,Nt)) yt = np.array([0.0, 0.0, 0.0, omega]) kP = 0.1 kI = 0.0 #1.0e-9 kD = 0.0 #1.0e-8 integral = 0.0 timeDelay = 0.5*Nt # The predictions x_pred = np.zeros((Nvars,1,Nt)) P_pred = np.zeros((Nvars,Nvars,Nt)) # Simulating the dynamics and running the filter x[:,:,0] = x0 z[:,:,0] = x0 x_pred[:,:,0] = x0 P_pred[:,:,0] = R prev_error = 0.0 for t in range(0,Nt-1): xt = x[:,0,t] x_predt = x_pred[:,0,t] # PID control error = yt - x_predt # P uP = -kP * error # I if t > timeDelay: integral += error * dt uI = -kI * integral else: uI = np.zeros(Nvars) # D if t > 0: uD = -kD * (error - prev_error)/dt prev_error = error else: uD = np.zeros(Nvars) u[:,0,t+1] = uP + uI + uD # We only have two control variables, the others are not used u_r = u[1,0,t+1] u_phi = u[3,0,t+1] # The state Jacobian (linearizes the eqns. of motion at time t+1) F = np.array([[1.0, dt, 0.0, 0.0], [-dt*xt[3]*xt[3] + dt*2.0*G*M/(xt[0]*xt[0]*xt[0]), 1.0, 0.0, -dt*2.0*xt[0]*xt[3]], [0.0, 0.0, 1.0, dt], [dt*2.0*xt[1]*xt[3]/(xt[0]*xt[0]) - dt*u_phi/(m*xt[0]*xt[0]), -dt*2.0*xt[3]/xt[0], 0.0, 1.0-dt*2.0*xt[1]/xt[0]]]) # The control Jacobian B = np.zeros((Nvars,Nvars)) B[1,1] = dt/m * u_r B[3,3] = dt/m * u_phi / xt[0] # State and measurement updates x[:,:,t+1] = np.dot(F, x[:,:,t]) + np.dot(B, u[:,:,t+1]) + np.dot(np.sqrt(Q), np.random.randn(Nvars,1)) z[:,:,t+1] = x[:,:,t+1] + np.dot(np.sqrt(R), np.random.randn(Nvars,1)) # Kalman filter state estimation (x_pred[:,:,t+1], P_pred[:,:,t+1]) = kalman(x_pred[:,:,t], P_pred[:,:,t], z[:,:,t+1], F, Q, 1, R) #print error matplotlib.rcParams.update({'font.size': 34}) # Plotting the actual dynamics versus the Kalman Filter predictions (shown with error bars) plt.figure(0) plt.plot(time, (x[0,0,:])/R0, 'k', label='actual dynamics', linewidth=5.0) plt.plot(time, (z[0,0,:])/R0, 'rD', label='measurements', markersize=6) plt.errorbar(time, (x_pred[0,0,:])/R0, yerr=np.sqrt(P_pred[0,0,:])/R0, color='g',ecolor='g', label='predicted dynamics', linewidth=2.5) plt.legend() plt.title('Kalman Filter for Orbital Trajectory') plt.xlabel('time') plt.ylabel('$r/R_E$') # Plotting the actual dynamics versus the Kalman Filter predictions (shown with error bars) plt.figure(1) plt.plot(time, x[1,0,:], 'k', label='actual dynamics', linewidth=5.0) plt.plot(time, z[1,0,:], 'rD', label='measurements', markersize=6) plt.errorbar(time, x_pred[1,0,:], yerr=np.sqrt(P_pred[1,0,:]), color='g',ecolor='g', label='predicted dynamics', linewidth=2.5) plt.legend() plt.title('Kalman Filter for Orbital Trajectory') plt.xlabel('time') plt.ylabel('$v_r$') plt.show() # - # ## Drawbacks to the Kalman Filter # # * You need to have knowledge (or good estimates) for the covariance matrices $R_t$ and $Q_t$. While it may be possible to quantify the noise in your measurement devices, it might be quite difficult to determine the process noise. For physical processes, it could for example be noise due to Brownian motion, which depends on temperature. # * The Kalman Filter assumes Gaussian distributed noise. This assumption is what determined the measurement equations, and the form of the Kalman gain. Strongly non-Gaussian noise would likely cause the filter to perform very poorly. #
assets/notebooks/KalmanFilter.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Vignette for WHO_GHO_API_client package # # ### by <NAME> # + # ==============================================================+ # Author : <NAME> | # Class : MODERN DATA STRUCTURES (G5072) | # Assignment: Final Project | # Date : 12/10/2021 - 18/01/2022 | # ==============================================================+ # - # ## API client for WHO GHO OData API # ***** # ## Importing packages and dependencies import os import requests import json import pandas as pd import re import numpy as np # ## Introduction # #### Data description # The World Health Organization Global Health Observatory hosts a database containing global health indicator measurements and statistics content, and the GHO OData API provides a query interface. # API documentation: [https://www.who.int/data/gho/info/gho-odata-api](https://www.who.int/data/gho/info/gho-odata-api) # Base URL: [https://ghoapi.azureedge.net/api/](https://ghoapi.azureedge.net/api/) # ## Overview of the WHO_GHO_API_client package functions: # # The database contains measurement for indicators of health (eg. immunization coverage, air pollution attributable deaths), as well as a plethora of dimensions in which these indicators are measured (eg. age, sex, country/region, income). # To make it easier to obtain data without directly interacting with the API, this package aims to provide a API wrapper/client that allows the user to view and search indicators and dimensions available in the database, select the dimensions and indicators they are interested in, and easily download the data they need in form of a filtered pandas dataframe, which can be exported as csv files if needed. # The package will be broken down into the following 5 main and accessory functions, and executed accordingly: # # * `get_indicators()`: returns all possible indicators of global health in the database, with the option of implementing a filtered search on the indicators names retrieved. # # * `get_records()`: retrieves indicator records based on selected dimension filters, which depends on # - `query_parser()`: Allows user to specify dimensions and indicators they would like to retrieve from the database in the arguments, and parse them into the format that could be submitted via a get request. # # * `get_dimensions()`: returns all possible dimensions of measurement in the database, with the option of implementing a regex search on the dimensions retrieved by Function 3. # # * `get_dimension_values()`: implement a regex search on the values of a specific dimension. # # ## Package functions # #### `get_indicators()`: Retrieve dataframe of indicators codes and description, and search through indicator names. def get_indicators(IndicatorName = 'all'): """ Retrieves by default a dataframe of indicators codes and description available on the GHO database, and provides a simple functionality to filter for indicator names of interest. The search function is case-insensitive. Returns a dash symbol when no records are found. Parameters ---------- IndicatorName : str A search word which we want to retrieve relevant indicators for. Returns ------- pandas.core.frame.DataFrame A dataframe containing the requested indicators recorded in the GHO database. Consists of indicator codes and descriptions. Examples -------- >>> get_indicators(IndicatorName = 'female') returns a dataframe of all indicators containing "female" in the description. """ if IndicatorName == 'all': url = "https://ghoapi.azureedge.net/api/Indicator" elif IndicatorName != 'all': url = "https://ghoapi.azureedge.net/api/Indicator?$filter=contains(IndicatorName," + "'{}'".format(IndicatorName) + ")" r = requests.get(url) indicators = r.json() return pd.DataFrame(indicators['value']) # ### Demonstration of `get_indicators()` function # Getting all indicators in the database. get_indicators() # Search for indicators containing "FEMALE" in its description. get_indicators(IndicatorName = 'female') # If the search does not yield any valid indicators within the database, a dash will be returned. get_indicators(IndicatorName = 'jibberish') # #### `get_records()`: Return dataframe of records for indicators, with filters implemented and summary of recorded dimensions (with query parsing by Function 4): # # * includes optional argument whether to output final dataframe as csv. # * generates a summary of geographical, temporal information plus dimensions recorded for this indicator. def get_records(indicator_code = "AIR_11", spatial_dimension = '', country = '', temporal_dimension = '', year = '', filter_1 = '', filter_1_value = '', filter_2 = '', filter_2_value = '', filter_3 = '', filter_3_value = '', summary = True, to_csv = True, csv_name = "output.csv"): """ Returns a dataframe of records for an indicator of choice, optionally with geographical, temporal and demographical filters implemented. Allows for the option to generate a summary of recorded dimensions for the indicator. The pulled dataframe can also be saved to csv. Parameters ---------- indicator_code : str The GHO database unique identifier of the indicator we want to obtain records for. Run get_indicators() function and refer to the IndicatorCode column. spatial_dimension : str (Optional) If the chosen indicator has records for more than one type of spatial dimension (eg. records for a single country and continent), this argument can be used to filter records for the desired one. country : str (Optional) If the chosen indicators has records on a country level, this argument can be used to filter records for a country of interest. temporal_dimension : str (Optional) If the chosen indicator has records for more than one type of temporal dimension (eg. month of the year vs. entire year), this argument can be used to filter records for the desired one. year : str (Optional) If the chosen indicators has records on a yearly basis, this argument can be used to filter for a year of interest. filter_1/filter_2/filter_3 : str (Optional) Additional demographical dimensions to filter indicator records by. Refer to full indicator dataframe or summary for filtering options. filter_1/filter_2/filter_3 value : str (Optional) Dimension values to filter indicator records by. Refer to full indicator dataframe or summary for filtering options. summary : bool If true, generates summary dictionary of the recorded dimensions for the indicator of choice. to_csv : bool If true, saves the resultant indicator entries dataframe to a local csv file. csv_name : str Name for output csv file. Returns ------- pandas.core.frame.DataFrame Containing the requested indicator entries. dict Summarizing the values in each column of the output dataframe. Provides a overview of what dimensions are measured for a certain indicator, to use as guidance for further filtering. csv Output dataframe saved locally. Examples -------- >>> get_records(indicator_code = 'AIR_11', to_csv = True, csv_name = 'Household air pollution attributable deaths.csv', country = "USA") Returns a dataframe and csv file with entries for the AIR_11 indicator, filtered for records about the USA only. """ # Calling the query_parser function to format a query url using submitted arguments try: url = query_parser(IndicatorCode = indicator_code, SpatialDimType = spatial_dimension, SpatialDim = country, TimeDimType = temporal_dimension, TimeDim = year, Dim1Type = filter_1, Dim1 = filter_1_value, Dim2Type = filter_2, Dim2 = filter_2_value, Dim3Type = filter_3, Dim3 = filter_3_value) records = requests.get(url) r_df = pd.DataFrame(records.json()['value']) except ValueError: print("Invalid search criteria provided, please check arguments and try again.") return # generate a dictionary summarizing the recorded dimensions for this indicator for user consideration if summary == True and r_df.shape != (0,0): desired_columns = ['IndicatorCode', 'SpatialDimType', 'SpatialDim', 'TimeDimType', 'TimeDim', 'Dim1Type', 'Dim1', 'Dim2Type', 'Dim2', 'Dim3Type', 'Dim3'] objects = r_df[desired_columns] summary_dict = {} for (colname, data) in objects.iteritems(): summary_dict[colname] = data.unique() elif summary == True and r_df.shape == (0,0): summary_dict = {} raise Exception("0 entries matching search criteria, please adjust and try again.") return # generate output file if to_csv == True and r_df.shape != (0,0): r_df.to_csv(csv_name) return r_df, summary_dict # ### Demonstration of the `get_records()` function # Here we get all records for the indicator AIR_11, which generates a data container containing a summary of the dimensions recorded for this indicator, as well as returns a dataframe of all entries. data_container = get_records(indicator_code = 'AIR_11', to_csv = True, csv_name = 'Household air pollution attributable deaths.csv') data_container[1] data_container[0] # Using the information in the previous summary object, we can refine our filtering for AIR11 for the US only. data_container2 = get_records(indicator_code = 'AIR_11', to_csv = True, csv_name = 'Household air pollution attributable deaths.csv', country = "USA") data_container2[1] data_container2[0] # If no entries match the given search criteria for the indicator, an Exception is raised. data_container3 = get_records(indicator_code = 'AIR_11', to_csv = True, csv_name = 'Household air pollution attributable deaths.csv', year = 2013) # When invalid arguments are submitted to the function and the request does not return a valid JSON response, we use a try-except clause to catch this error and output a helper message. wronginfo = get_records(indicator_code = 'jibberish', to_csv = True, csv_name = 'Household air pollution attributable deaths.csv', year = 2013, country = "CHL") # #### `query_parser()`: Return full dataframe for indicator, with filters implemented # # The function parses filtering criteria into the format recognized by the API, and then is submitted as the get request. This could have been done with two options: # # - Option 1: filter using built-in parsing of API (example below) - saves space locally # - Option 2: filter locally after Function 2 retrieves dataframe for indicator - allows preliminary inspection before filtering # # We chose Option 1 for its notable benefit of saving space locally. If the user already knows exactly what data points they require, they are able to just pull exactly what they need without redundancy. We can also cover functionality of Option 2 by just requesting for the entire dataframe for an indicator without filtering, if preliminary inspection is needed. def query_parser(IndicatorCode = 'WHOSIS_000001', SpatialDimType = '', SpatialDim = '', TimeDimType = '', TimeDim = '', Dim1Type = '', Dim1 = '', Dim2Type = '', Dim2 = '', Dim3Type = '', Dim3 = ''): """ Accessory function of get_records(). With user-defined geographical, temporal and demographical filters, a request query is parsed for the desired indicator. All arguments are taken from those submitted to get_records(). Parameters ---------- IndicatorCode : str The GHO database unique identifier for which we want to query. SpatialDimType : str If the chosen indicator has records for more than one type of spatial dimension (eg. records for a single country and continent), this argument can be used to filter records for the desired one. SpatialDim : str If the chosen spatial dimension has more than one unique values, this argument can be used to filter records for the desired one. TimeDimType : str If the chosen indicator has records for more than one type of temporal dimension (eg. month of the year vs. entire year), this argument can be used to filter records for the desired one. TimeDim : str If the chosen temporal dimension has more than one unique values, this argument can be used to filter records for the desired one. Dim1/Dim2/Dim3Type : str Additional demographical dimensions to filter indicator records by. Refer to full indicator dataframe or summary for filtering options. Dim1/Dim2/Dim3 : str Dimension values to filter indicator records by. Refer to full indicator dataframe or summary for filtering options. Returns ------- str A parsed url used to query the database and obtain the data entries of interest from the GHO database. Examples -------- >>> query_parser(IndicatorCode = 'WHOSIS_000001', SpatialDimType = 'Region', TimeDimType = 'year', Dim1 = 'WQ1') "https://ghoapi.azureedge.net/api/WHOSIS_000001?$filter=SpatialDimType eq 'Region' and TimeDimType eq 'year' and Dim1 eq 'WQ1'" """ d = locals() given_filters = {key:value for key, value in d.items() if value} if len(given_filters) >= 1: parsed_request_url = "https://ghoapi.azureedge.net/api/" + IndicatorCode if len(given_filters) > 1: del(given_filters["IndicatorCode"]) filters = '?$filter=' for key, value in given_filters.items(): if key == "TimeDim": filters += key + " eq " + str(value) + ' and ' else: filters += key + " eq " + "'{}'".format(value) + ' and ' parsed_request_url += filters.rstrip(' and ') return parsed_request_url # ### Demonstration of the `query_parser()` function # # Returns a properly formatted search query that can be interpreted by the GHO OData API server. When the parsed url is submitted we can see the desired datapoints are pulled to local. query_parser(IndicatorCode = 'WHOSIS_000001', SpatialDimType = 'Region', TimeDimType = 'year', Dim1 = 'WQ1') test_url = query_parser(IndicatorCode = 'vdpt', SpatialDimType = 'country', TimeDimType = 'year', Dim1 = 'WQ1') test1 = requests.get(test_url) pd.DataFrame(test1.json()['value']) test2_url = query_parser(IndicatorCode = 'DEVICES23', SpatialDimType = 'country', TimeDimType = 'year', TimeDim = "2010") test2 = requests.get(test2_url) pd.DataFrame(test2.json()['value']) # When there are no records that match the queried criteria, a dash is returned. test3_url = query_parser(IndicatorCode = 'DEVICES23', SpatialDimType = 'country', TimeDimType = 'year', Dim1 = 'wq1') test3 = requests.get(test3_url) print(test3.status_code) pd.DataFrame(test3.json()['value']) # #### `search_dimensions()`: Retrieving and searching types of dimensions # # Each indicator has a series of dimensions that tells us where, when and from whom the data entries were collected from. `get_dimensions()` allows us to see and search through dimensions available in the dataset. This is helpful for preliminary understanding the what data entries are present in the database. # # It pulls a dataframe of all dimensions recorded in the database. A local search is implemented on the pulled dataframe, since the GHO API does not have this as a built-in functionality. def search_dimensions(search_for = 'all'): """ Retrieves a dataframe of all dimensions recorded in the GHO database. A local, case-insensitive keyword search can be optionally implemented, to locate dimensions of interest. Parameters ---------- search_for : str A searchword specifying dimensions of interest. Used to filter for dimension codes containing the searchword. Returns ------- pandas.core.frame.DataFrame Containing all dimension codes and descriptions recorded in the database, or alternatively fitting the search criteria. Examples -------- >>> search_dimensions() Retrives all dimensions recorded in the database. >>> search_dimensions(search_for = "type") Retrives dimensions in the database that contains the word "type" in its code. """ r = requests.get('https://ghoapi.azureedge.net/api/Dimension') dimensions = pd.DataFrame(r.json()['value']) if search_for == 'all': return dimensions elif search_for != 'all': dimensions = dimensions[dimensions.Code.str.contains(search_for.upper())] if dimensions.shape == (0,2): raise Exception('No matching dimensions found, please adjust your search.') return dimensions # ### Demonstration of `get_dimensions()` function # # A simple call with no arguments gives all dimensions recorded in the database. search_dimensions() # By entering the keyword "type", we retrieve dimensions that contain this word in their code. We can see that there are probably records about substance and alcohol abuse, as well as possibly records about driving and motor road usage. search_dimensions(search_for = 'type') # If there are no dimensions matching the search, an Exception is raised. search_dimensions(search_for = 'jibberish') # #### `get_dimension_values()`: Retrieve a specific dimension and search for values within # Allows user to inspect and search through values of a specific dimension. def get_dimension_values(dimension_code = 'YEAR', search_for = ''): """ Allows user to inspect, and run a case-insensitive search through the values of a specific dimension. Parameters ---------- dimension_code : str The GHO database unique identifier for each dimension. search_for : str A searchword specifying dimension values of interest. This search applies to all elements in the dimension values dataframe. Returns ------- pandas.core.frame.DataFrame Containing all exisiting values and descriptions for a particular dimension, or alternatively values that fit the search criteria. Examples -------- >>> get_dimension_values(dimension_code = "country") Retrives all countries with records in the GHO database. >>> get_dimension_values(dimension_code = "country", search_for = "africa") Retrives all African countries with records in the GHO database. """ url = "https://ghoapi.azureedge.net/api/DIMENSION/" + dimension_code.upper() + "/DimensionValues" dimension_values = requests.get(url) dimension_values_df = pd.DataFrame(dimension_values.json()['value']) if search_for == '': pass elif search_for != '': result = dimension_values_df.apply(lambda row: row.astype(str).str.contains(search_for, na = False, flags=re.IGNORECASE).any(), axis=1) dimension_values_df = dimension_values_df.loc[result] if dimension_values_df.shape == (0,6): raise Exception('No matching dimension values found, please adjust your search.') return dimension_values_df # ### Demonstration of `get_dimension_values()` function # # Here we get all values in the year dimension. Then we use the `search_for` argument to search for dimensions containing "2020". get_dimension_values(dimension_code = 'year') get_dimension_values(dimension_code = 'year', search_for = '2020') # Similarly, here we see all options of the "country" and "region" dimensions. # # The dataframe information then allows us to conduct finer searches on those dimensions: eg. for a particular country, countries within a geographical region (in this case here African countries), as well as European subregions that are recorded in the database. get_dimension_values(dimension_code = 'country') get_dimension_values(dimension_code = "country", search_for = "africa") get_dimension_values(dimension_code = "country", search_for = "Togo") get_dimension_values(dimension_code = 'region') get_dimension_values(dimension_code = 'region', search_for = 'europe') # If invalid arguments are submitted: the function raises an Exception if the search value is not found. If an invalid dimension code is submitted, the API returns a dash. get_dimension_values(dimension_code = "country", search_for = "jibberish") get_dimension_values(dimension_code = "jibberish", search_for = "2020")
Vignette_WHO_GHO_API_client.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- # # Normal lunges V/S Abnormal lungs # + library(EBImage) img = readImage("https://previews.123rf.com/images/blueringmedia/blueringmedia1802/blueringmedia180200008/96116752-diagram-showing-healthy-and-unhealthy-lungs-illustration.jpg") display(img, method = "raster") # + library(tidyverse) file_path <- fs::dir_ls("../input/respiratory-sound-database/Respiratory_Sound_Database/Respiratory_Sound_Database/audio_and_txt_files/") # extracting txt files only to list #list_of_files #store the reading data Data_store <-list() list_of_files <- list.files(path = "../input/respiratory-sound-database/Respiratory_Sound_Database/Respiratory_Sound_Database/audio_and_txt_files/", recursive = TRUE, pattern = "\\.txt$", full.names = TRUE) for(i in seq_along(list_of_files)){ X <- list_of_files[i] Data_store[[i]] <- read.table(file = X) } # - # ## Reading the data into one sheet # ### Reading the txt files Single_data_store <- do.call("rbind", lapply(Data_store, as.data.frame)) res <- list() for(i in seq_along(list_of_files)){ res[i] <- as.numeric(gsub(".*?([0-9]+).*", "\\1",list_of_files[i])) } number_of_rows <- list() z = 0 for(z in 1:920){ number_of_rows[z]<-NROW(Data_store[[z]]) } # + _kg_hide-output=false file.remove("./PatientID.txt") for(i in seq_along(list_of_files)){ Z <- res[[i]] num <- number_of_rows[[i]] for(j in seq(from=1, to=num, by=1)) { textfile=file.path("./PatientID.txt"); printer = file(textfile,"a+"); write(c(Z),textfile,append = TRUE); close(printer) } } # - PID_store <- read.table(file = './PatientID.txt') NROW(PID_store) Single_data_store # + # Normal - 0 or Abnormal -1 Single_data_store <- Single_data_store%>% select(V1,V2,V3,V4)%>% mutate(V2-V1) for(i in 1:6898){ if(Single_data_store$V3[[i]] == 1){ Single_data_store$V6[[i]] = 1 } else if(Single_data_store$V4[[i]] == 1){ Single_data_store$V6[[i]] = 1 } else{ Single_data_store$V6[[i]] = 0 } } Single_data_store # - Single_data_store <- setNames(Single_data_store, c("Start_rec","End_rec","crackles","wheezes","Start_End","Normal_VS_Abnormal")) New_1 <- cbind(x = Single_data_store, y = PID_store) # ### Reading the WAV files file.remove("./Freq.txt") fileConn <- file("./Freq.txt") # + library(seewave) library(tuneR) library(reshape2) library(dplyr) list_of_files_WAV <- list.files(path = "../input/respiratory-sound-database/Respiratory_Sound_Database/Respiratory_Sound_Database/audio_and_txt_files/", recursive = TRUE, pattern = "\\.wav$", full.names = TRUE) freq <- list() for(i in seq_along(list_of_files_WAV)){ Y <- list_of_files_WAV[i] wav = readWave(Y) wave = spectro(wav, plot = F) freq = wave$freq max = max(freq) num <- number_of_rows[[i]] for(j in seq(from=1, to=num, by=1)) { textfile=file.path("./Freq.txt"); printer = file(textfile,"a+"); write(c(max),textfile,append = TRUE); close(printer) } } # - Freq_store <- read.table(file = './Freq.txt') NROW(Freq_store) New_1 <- cbind(x = New_1, y = Freq_store) New_1<-setNames(New_1 , c("Start_rec","End_rec","crackles","wheezes","Start_End","Normal_VS_Abnormal","ID","Freq")) New_1 countainer <- read.table('../input/respiratory-sound-database/demographic_info.txt') # + Countainer <- countainer%>% group_by(V3)%>% select(V1,V2,V3) Countainer_one <-setNames(Countainer, c("ID", "Age","Sex")) for(i in 1:nrow(New_1)){ for(j in 1:nrow(Countainer_one)){ if(New_1$ID[[i]] == Countainer_one$ID[j]){ if(Countainer_one$ID[j] == 223){ New_1$Sex[[i]]= 'M' New_1$Age[[i]]= 50 } else{ New_1$Sex[[i]]= Countainer_one$Sex[j] New_1$Age[[i]]= Countainer_one$Age[j] } } else{ return } } } # - New_1 <- New_1 %>% relocate(Normal_VS_Abnormal, .after = Age) X_df <- New_1[5:10] X_df$Sex [X_df$Sex == "M"] <- 1 X_df$Sex [X_df$Sex == "F"] <- 0 X_df$Sex <- as.integer(X_df$Sex) X_df$Start_End<-as.numeric(X_df$Start_End) X_df$Sex<-as.numeric(X_df$Sex) X_df$Age<-as.numeric(X_df$Age) X_df$ID<-as.numeric(X_df$ID) X_df$Freq<-as.numeric(X_df$Freq) X_df$Normal_VS_Abnormal = factor(X_df$Normal_VS_Abnormal, levels = c(0, 1)) library(caTools) set.seed(123) split = sample.split(X_df$Normal_VS_Abnormal, SplitRatio = 0.75) training_set = subset(X_df, split == TRUE) test_set = subset(X_df, split == FALSE) training_set[-6] = scale(training_set[-6]) test_set[-6] = scale(test_set[-6]) library(e1071) classifier_svm = svm(formula = Normal_VS_Abnormal ~ ., data = training_set, type = 'C-classification', kernel = 'radial') library(randomForest) classifier_randomForest = randomForest(x = training_set[-6], y = training_set$Normal_VS_Abnormal, ntree = 10) y_pred_svm = predict(classifier_svm, newdata = test_set[-6]) y_pred_randomForest = predict(classifier_randomForest, newdata = test_set[-6]) cm_rf = table(test_set[, 6], y_pred_randomForest) cm_svm = table(test_set[, 6], y_pred_svm) cm_svm cm_rf
normal-v-s-abnormal.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import xlwings as xw import os try: aux=os.environ['RPprefix'] except: os.environ['RPprefix']='C:\\Users\\Public\\REFPROP' import ccp from ccp import State, Q_ import numpy as np wb = xw.Book('Modelo_2sections.xlsx') # connect to an existing file in the current working directory FD_sheet=wb.sheets['DataSheet'] AT_sheet=wb.sheets['Actual Test Data'] ### Reading and writing SECTION 1 from the FD sheet Ps_FD = Q_(FD_sheet['T23'].value,'bar') Ts_FD = Q_(FD_sheet.range('T24').value,'degC') Pd_FD = Q_(FD_sheet.range('T31').value,'bar') Td_FD = Q_(FD_sheet.range('T32').value,'degC') if FD_sheet.range('T21').value==None: V_test=True flow_v_FD = Q_(FD_sheet.range('T29').value,'m³/h') else: V_test=False flow_m_FD = Q_(FD_sheet.range('T21').value,'kg/h') #flow_m_FD = Q_(FD_sheet.range('T21').value,'kg/h') #flow_v_FD = Q_(FD_sheet.range('T29').value,'m**3/h') speed_FD = Q_(FD_sheet.range('T38').value,'rpm') brake_pow1_FD = Q_(FD_sheet.range('T36').value,'kW') D = Q_(FD_sheet.range('AB132').value,'mm') b = Q_(FD_sheet.range('AQ132').value,'mm') GasesFD = FD_sheet.range('B69:B85').value mol_fracFD = FD_sheet.range('K69:K85').value fluid_FD={GasesFD[i] : mol_fracFD[i] for i in range(len(GasesFD))} sucFD=State.define(fluid=fluid_FD , p=Ps_FD , T=Ts_FD) if V_test: flow_m_FD=flow_v_FD*sucFD.rho() FD_sheet['AS34'].value=flow_m_FD.to('kg/h').magnitude FD_sheet['AQ34'].value='Mass Flow' FD_sheet['AU34'].value='kg/h' else: flow_v_FD=flow_m_FD/sucFD.rho() FD_sheet['AS34'].value=flow_v_FD.to('m³/h').magnitude FD_sheet['AQ34'].value='Inlet Volume Flow' FD_sheet['AU34'].value='m³/h' dischFD=State.define(fluid=fluid_FD , p=Pd_FD , T=Td_FD) P_FD=ccp.Point(speed=speed_FD,flow_m=flow_m_FD,suc=sucFD,disch=dischFD) P_FD_=ccp.Point(speed=speed_FD,flow_m=flow_m_FD*0.001,suc=sucFD,disch=dischFD) Imp_FD = ccp.Impeller([P_FD,P_FD_],b=b,D=D) FD_sheet['AS25'].value=Imp_FD._mach(P_FD).magnitude FD_sheet['AS26'].value=Imp_FD._reynolds(P_FD).magnitude FD_sheet['AS27'].value=1/P_FD._volume_ratio().magnitude FD_sheet['AS28'].value=Imp_FD._phi(P_FD).magnitude FD_sheet['AS29'].value=Imp_FD._psi(P_FD).magnitude FD_sheet['AS30'].value=Imp_FD._work_input_factor(P_FD).magnitude FD_sheet['AS32'].value=P_FD._eff_pol_schultz().magnitude FD_sheet['AS33'].value=P_FD._power_calc().to('kW').magnitude #Processando dados de projeto do Side Stream SS_config = FD_sheet.range('W18').value Ps2_FD = Pd_FD*0.995 if SS_config=='IN': TSS_FD = Q_(FD_sheet.range('W24').value,'degC') else: TSS_FD = Td_FD Pd2_FD = Q_(FD_sheet.range('Z31').value,'bar') Td2_FD = Q_(FD_sheet.range('Z32').value,'degC') if FD_sheet.range('W21').value==None: V_test=True flowSS_v_FD = Q_(FD_sheet.range('W29').value,'m³/h') else: V_test=False flowSS_m_FD = Q_(FD_sheet.range('W21').value,'kg/h') brake_pow2_FD = Q_(FD_sheet.range('Z36').value,'kW') D2 = Q_(FD_sheet.range('AB133').value,'mm') b2 = Q_(FD_sheet.range('AQ133').value,'mm') if SS_config=='IN': GasesFD = FD_sheet.range('B69:B85').value mol_fracSS_FD = FD_sheet.range('N69:N85').value fluidSS_FD={GasesFD[i] : mol_fracSS_FD[i] for i in range(len(GasesFD))} else: fluidSS_FD=fluid_FD SS_FD = State.define(fluid=fluidSS_FD , p=Ps2_FD , T=TSS_FD) if V_test: flowSS_m_FD=flowSS_v_FD*SS_FD.rho() FD_sheet['AS36'].value=flowSS_m_FD.to('kg/h').magnitude FD_sheet['AQ36'].value='SS Mass Flow' FD_sheet['AU36'].value='kg/h' else: flowSS_v_FD=flowSS_m_FD/SS_FD.rho() FD_sheet['AS36'].value=flowSS_v_FD.to('m³/h').magnitude FD_sheet['AQ36'].value='SS Volume Flow' FD_sheet['AU36'].value='m³/h' if SS_config=='IN': flow2_m_FD=flow_m_FD+flowSS_m_FD RSS=flowSS_m_FD/flow2_m_FD R1=flow_m_FD/flow2_m_FD fluid2_FD={GasesFD[i] : mol_fracSS_FD[i]*RSS+mol_fracFD[i]*R1 for i in range(len(GasesFD))} h2_FD=dischFD.h()*R1+SS_FD.h()*RSS suc2FD=State.define(fluid=fluid2_FD , p=Ps2_FD , h=h2_FD) disch2FD=State.define(fluid=fluid2_FD , p=Pd2_FD , T=Td2_FD) FD_sheet['AT35'].value=suc2FD.T().to('degC').magnitude else: fluid2_FD=fluid_FD flow2_m_FD=flow_m_FD-flowSS_m_FD suc2FD=State.define(fluid=fluid2_FD , p=Ps2_FD , T=Td_FD) disch2FD=State.define(fluid=fluid2_FD , p=Pd2_FD , T=Td2_FD) FD_sheet['AT35'].value=suc2FD.T().to('degC').magnitude P2_FD=ccp.Point(speed=speed_FD,flow_m=flow2_m_FD,suc=suc2FD,disch=disch2FD) P2_FD_=ccp.Point(speed=speed_FD,flow_m=flow2_m_FD*0.001,suc=suc2FD,disch=disch2FD) if V_test: FD_sheet['AT34'].value=P2_FD.flow_m.to('kg/h').magnitude else: FD_sheet['AT34'].value=P2_FD.flow_v.to('m³/h').magnitude Imp2_FD = ccp.Impeller([P2_FD,P2_FD_],b=b2,D=D2) Q1d_FD=flow_m_FD/dischFD.rho() FD_sheet['AS37'].value=flowSS_v_FD.to('m³/h').magnitude/Q1d_FD.to('m³/h').magnitude FD_sheet['AT25'].value=Imp2_FD._mach(P2_FD).magnitude FD_sheet['AT26'].value=Imp2_FD._reynolds(P2_FD).magnitude FD_sheet['AT27'].value=1/P2_FD._volume_ratio().magnitude FD_sheet['AT28'].value=Imp2_FD._phi(P2_FD).magnitude FD_sheet['AT29'].value=Imp2_FD._psi(P2_FD).magnitude FD_sheet['AT30'].value=Imp2_FD._work_input_factor(P2_FD).magnitude FD_sheet['AT32'].value=P2_FD._eff_pol_schultz().magnitude FD_sheet['AT33'].value=P2_FD._power_calc().to('kW').magnitude FD_sheet['K90'].value=sucFD.molar_mass().to('g/mol').magnitude FD_sheet['N90'].value=SS_FD.molar_mass().to('g/mol').magnitude Curva=FD_sheet['AP42:AS49'] Curva2=FD_sheet['AP53:AS60'] for i in range(10): if Curva[i,0].value==None: Nc=i break for i in range(10): if Curva2[i,0].value==None: Nc2=i break Curva=Curva[0:Nc+1,:] Curva2=Curva2[0:Nc2+1,:] ## Organização dos pontos da curva da primeira seção QFD=np.array(Curva[0:Nc,0].value) if (Nc>0 and min(abs(QFD-flow_v_FD.to('m³/h').magnitude))==0): Gar=[None,None,None,None] Curva[Nc,:].value=Gar else: Gar=[flow_v_FD.to('m³/h').magnitude,P_FD._head_pol_schultz().to('kJ/kg').magnitude, None,P_FD._eff_pol_schultz().magnitude] Curva[Nc,:].value=Gar Nc=Nc+1 QFD=np.array(Curva[0:Nc,0].value) Id=list(np.argsort(QFD)) if len(Id)>1: Caux=Curva.value for i in range(Nc): Curva[i,:].value=Caux[Id[i]][:] ## Organização dos pontos da curva da segunda seção QFD2=np.array(Curva2[0:Nc2,0].value) if (Nc2>0 and min(abs(QFD2-P2_FD.flow_v.to('m³/h').magnitude))==0): Gar=[None,None,None,None] Curva2[Nc2,:].value=Gar else: Gar=[P2_FD.flow_v.to('m³/h').magnitude,P2_FD._head_pol_schultz().to('kJ/kg').magnitude, None,P2_FD._eff_pol_schultz().magnitude] Curva2[Nc2,:].value=Gar Nc2=Nc2+1 QFD2=np.array(Curva2[0:Nc2,0].value) Id2=list(np.argsort(QFD2)) if len(Id2)>1: Caux2=Curva2.value for i in range(Nc2): Curva2[i,:].value=Caux2[Id2[i]][:] ### Reading and writing in the Actual Test Data Sheet Dados_AT=AT_sheet['G7:S16'] for i in range(10): if Dados_AT[i,5].value==None: N=i break Dados_AT=Dados_AT[0:N,:] speed_AT = Q_(AT_sheet.range('H4').value,AT_sheet.range('I4').value) N_ratio=speed_FD/speed_AT GasesT = AT_sheet.range('B4:B20').value mol_fracT = AT_sheet.range('D4:D20').value P_AT=[] P2_AT=[] fluid_AT={} for i in range(len(GasesT)): if mol_fracT[i]>0: fluid_AT.update({GasesT[i]:mol_fracT[i]}) for i in range(N): Ps_AT = Q_(Dados_AT[i,2].value,AT_sheet.range('I6').value) Ts_AT = Q_(Dados_AT[i,3].value,AT_sheet.range('J6').value) Pd_AT = Q_(Dados_AT[i,4].value,AT_sheet.range('K6').value) Td_AT = Q_(Dados_AT[i,5].value,AT_sheet.range('L6').value) if Dados_AT[i,1].value!=None: V_test=True flow_v_AT = Q_(Dados_AT[i,1].value,AT_sheet.range('H6').value) else: V_test=False flow_m_AT = Q_(Dados_AT[i,0].value,AT_sheet.range('G6').value) sucAT=State.define(fluid=fluid_AT , p=Ps_AT , T=Ts_AT) dischAT=State.define(fluid=fluid_AT , p=Pd_AT , T=Td_AT) if V_test: flow_m_AT=flow_v_AT*sucAT.rho() Dados_AT[i,0].value=flow_m_AT.to(AT_sheet['G6'].value).magnitude else: flow_v_AT=flow_m_AT/sucAT.rho() Dados_AT[i,1].value=flow_v_AT.to(AT_sheet['H6'].value).magnitude P_AT.append(ccp.Point(speed=speed_AT,flow_m=flow_m_AT,suc=sucAT,disch=dischAT)) if N==1: P_AT.append(ccp.Point(speed=speed_AT,flow_m=flow_m_AT*0.001,suc=sucAT,disch=dischAT)) ## Carregando dados da segunda seção Ps2_AT = Pd_AT*0.995 if SS_config=='IN': TSS_AT = Q_(Dados_AT[i,8].value,AT_sheet.range('O6').value) else: TSS_AT = Td_AT Pd2_AT = Q_(Dados_AT[i,9].value,AT_sheet.range('P6').value) Td2_AT = Q_(Dados_AT[i,10].value,AT_sheet.range('Q6').value) if Dados_AT[i,7].value!=None: V_test=True flowSS_v_AT = Q_(Dados_AT[i,7].value,AT_sheet.range('N6').value) else: V_test=False flowSS_m_AT = Q_(Dados_AT[i,6].value,AT_sheet.range('M6').value) fluidSS_AT=fluid_AT SS_AT = State.define(fluid=fluidSS_AT , p=Ps2_AT , T=TSS_AT) if V_test: flowSS_m_AT=flowSS_v_AT*SS_AT.rho() Dados_AT[i,6].value=flowSS_m_AT.to(AT_sheet.range('M6').value).magnitude else: flowSS_v_AT=flowSS_m_AT/SS_AT.rho() Dados_AT[i,7].value=flowSS_v_AT.to(AT_sheet.range('N6').value).magnitude if SS_config=='IN': flow2_m_AT=flow_m_AT+flowSS_m_AT RSS=flowSS_m_AT/flow2_m_AT R1=flow_m_AT/flow2_m_AT fluid2_AT=fluid_AT h2_AT=dischAT.h()*R1+SS_AT.h()*RSS suc2AT=State.define(fluid=fluid2_AT , p=Ps2_AT , h=h2_AT) disch2AT=State.define(fluid=fluid2_AT , p=Pd2_AT , T=Td2_AT) else: fluid2_AT=fluid_AT flow2_m_AT=flow_m_AT-flowSS_m_AT suc2AT=State.define(fluid=fluid2_AT , p=Ps2_AT , T=Td_AT) disch2FD=State.define(fluid=fluid2_AT , p=Pd2_AT , T=Td2_AT) Dados_AT[i,11].value=suc2AT.T().to(AT_sheet.range('R6').value).magnitude Q1d_AT=flow_m_AT/dischAT.rho() Dados_AT[i,12].value=flowSS_v_AT.to('m³/h').magnitude/Q1d_AT.to('m³/h').magnitude/(flowSS_v_FD.to('m³/h').magnitude/Q1d_FD.to('m³/h').magnitude) P2_AT.append(ccp.Point(speed=speed_AT,flow_m=flow2_m_AT,suc=suc2AT,disch=disch2AT)) if N==1: P2_AT.append(ccp.Point(speed=speed_AT,flow_m=flow2_m_AT*0.001,suc=suc2AT,disch=disch2AT)) Imp_AT = ccp.Impeller([P_AT[i] for i in range(len(P_AT))],b=b,D=D) Imp2_AT = ccp.Impeller([P2_AT[i] for i in range(len(P2_AT))],b=b2,D=D2) speed_AT if AT_sheet['U3'].value=='Vazão Seção 1': QQ=np.array(Dados_AT[:,1].value) else: QQ=[] for i in range(N): QQ.append(P2_AT[i].flow_v.magnitude) Id=list(np.argsort(QQ)) if len(Id)>1: Daux=Dados_AT.value Paux=[P for P in P_AT] P2aux=[P for P in P2_AT] for i in range(N): Dados_AT[i,:].value=Daux[Id[i]][:] P_AT[i]=Paux[Id[i]] P2_AT[i]=P2aux[Id[i]] P_ATconv=[] P2_ATconv=[] Results_AT=AT_sheet['G22:AB31'] Results_AT.value=[[None]*len(Results_AT[0,:].value)]*10 Results_AT=Results_AT[0:N,:] Results2_AT=AT_sheet['G37:AB46'] Results2_AT.value=[[None]*len(Results2_AT[0,:].value)]*10 Results2_AT=Results2_AT[0:N,:] for i in range(N): if AT_sheet['C23'].value=='Yes': rug=AT_sheet['D24'].value ReAT=Imp_AT._reynolds(P_AT[i]) Re2AT=Imp2_AT._reynolds(P2_AT[i]) ReFD=Imp_FD._reynolds(P_FD) Re2FD=Imp2_FD._reynolds(P2_FD) RCAT=0.988/ReAT**0.243 RC2AT=0.988/Re2AT**0.243 RCFD=0.988/ReFD**0.243 RC2FD=0.988/Re2FD**0.243 RBAT=np.log(0.000125+13.67/ReAT)/np.log(rug+13.67/ReAT) RB2AT=np.log(0.000125+13.67/Re2AT)/np.log(rug+13.67/Re2AT) RBFD=np.log(0.000125+13.67/ReFD)/np.log(rug+13.67/ReFD) RB2FD=np.log(0.000125+13.67/Re2FD)/np.log(rug+13.67/Re2FD) RAAT=0.066+0.934*(4.8e6*b.to('ft').magnitude/ReAT)**RCAT RA2AT=0.066+0.934*(4.8e6*b2.to('ft').magnitude/Re2AT)**RC2AT RAFD=0.066+0.934*(4.8e6*b.to('ft').magnitude/ReFD)**RCFD RA2FD=0.066+0.934*(4.8e6*b2.to('ft').magnitude/Re2FD)**RC2FD corr=RAFD/RAAT*RBFD/RBAT corr2=RA2FD/RA2AT*RB2FD/RB2AT if abs(1-Imp_AT._phi(P_AT[i]).magnitude/Imp_FD._phi(P_FD).magnitude)<0.04: eff=1-(1-P_AT[i]._eff_pol_schultz())*corr else: eff=P_AT[i]._eff_pol_schultz() if abs(1-Imp2_AT._phi(P2_AT[i]).magnitude/Imp2_FD._phi(P2_FD).magnitude)<0.04: eff2=1-(1-P2_AT[i]._eff_pol_schultz())*corr2 else: eff2=P2_AT[i]._eff_pol_schultz() Results_AT[i,21].value=eff.magnitude Results2_AT[i,21].value=eff2.magnitude P_ATconv.append(ccp.Point(suc=P_FD.suc, eff=eff, speed=speed_FD,flow_v=P_AT[i].flow_v*N_ratio, head=P_AT[i]._head_pol_schultz()*N_ratio**2)) P2_ATconv.append(ccp.Point(suc=P2_FD.suc, eff=eff2, speed=speed_FD,flow_v=P2_AT[i].flow_v*N_ratio, head=P2_AT[i]._head_pol_schultz()*N_ratio**2)) else: P_ATconv.append(ccp.Point(suc=P_FD.suc, eff=P_AT[i]._eff_pol_schultz(), speed=speed_FD,flow_v=P_AT[i].flow_v*N_ratio, head=P_AT[i]._head_pol_schultz()*N_ratio**2)) P2_ATconv.append(ccp.Point(suc=P2_FD.suc, eff=P2_AT[i]._eff_pol_schultz(), speed=speed_FD,flow_v=P2_AT[i].flow_v*N_ratio, head=P2_AT[i]._head_pol_schultz()*N_ratio**2)) Results_AT[i,21].value=P_AT[i]._eff_pol_schultz() Results2_AT[i,21].value=P2_AT[i]._eff_pol_schultz() ## Escrevendo resultados para a Seção 1 Results_AT[i,0].value=1/P_AT[i]._volume_ratio().magnitude Results_AT[i,1].value=1/(P_AT[i]._volume_ratio().magnitude/P_FD._volume_ratio().magnitude) Results_AT[i,2].value=Imp_AT._mach(P_AT[i]).magnitude Results_AT[i,3].value=Imp_AT._mach(P_AT[i]).magnitude-Imp_FD._mach(P_FD).magnitude Results_AT[i,4].value=Imp_AT._reynolds(P_AT[i]).magnitude Results_AT[i,5].value=Imp_AT._reynolds(P_AT[i]).magnitude/Imp_FD._reynolds(P_FD).magnitude Results_AT[i,6].value=Imp_AT._phi(P_AT[i]).magnitude Results_AT[i,7].value=Imp_AT._phi(P_AT[i]).magnitude/Imp_FD._phi(P_FD).magnitude Results_AT[i,8].value=Imp_AT._psi(P_AT[i]).magnitude Results_AT[i,9].value=Imp_AT._psi(P_AT[i]).magnitude/Imp_FD._psi(P_FD).magnitude Results_AT[i,10].value=P_AT[i]._head_pol_schultz().to('kJ/kg').magnitude Results_AT[i,11].value=P_AT[i]._head_pol_schultz().to('kJ/kg').magnitude/P_FD._head_pol_schultz().to('kJ/kg').magnitude Results_AT[i,12].value=P_ATconv[i]._head_pol_schultz().to('kJ/kg').magnitude Results_AT[i,13].value=P_ATconv[i]._head_pol_schultz().to('kJ/kg').magnitude/P_FD._head_pol_schultz().to('kJ/kg').magnitude Results_AT[i,14].value=P_ATconv[i].flow_v.to('m³/h').magnitude Results_AT[i,15].value=P_ATconv[i].flow_v.to('m³/h').magnitude/P_FD.flow_v.to('m³/h').magnitude Results_AT[i,16].value=P_AT[i]._power_calc().to('kW').magnitude Results_AT[i,17].value=P_AT[i]._power_calc().to('kW').magnitude/P_FD._power_calc().to('kW').magnitude if AT_sheet['C25'].value=='Yes': HL_FD=Q_(((sucFD.T()+dischFD.T()).to('degC').magnitude*0.8/2-25)*1.166*AT_sheet['D26'].value,'W') HL_AT=Q_(((P_AT[i].suc.T()+P_AT[i].disch.T()).to('degC').magnitude*0.8/2-25)*1.166*AT_sheet['D26'].value,'W') Results_AT[i,18].value=(P_ATconv[i]._power_calc()-HL_AT+HL_FD).to('kW').magnitude Results_AT[i,19].value=(P_ATconv[i]._power_calc()-HL_AT+HL_FD).to('kW').magnitude/(P_FD._power_calc()).to('kW').magnitude else: Results_AT[i,18].value=P_ATconv[i]._power_calc().to('kW').magnitude Results_AT[i,19].value=P_ATconv[i]._power_calc().to('kW').magnitude/P_FD._power_calc().to('kW').magnitude Results_AT[i,20].value=P_AT[i]._eff_pol_schultz().magnitude ## Escrevendo resultados para Seção 2 Results2_AT[i,0].value=1/P2_AT[i]._volume_ratio().magnitude Results2_AT[i,1].value=1/(P2_AT[i]._volume_ratio().magnitude/P2_FD._volume_ratio().magnitude) Results2_AT[i,2].value=Imp2_AT._mach(P2_AT[i]).magnitude Results2_AT[i,3].value=Imp2_AT._mach(P2_AT[i]).magnitude-Imp2_FD._mach(P2_FD).magnitude Results2_AT[i,4].value=Imp2_AT._reynolds(P2_AT[i]).magnitude Results2_AT[i,5].value=Imp2_AT._reynolds(P2_AT[i]).magnitude/Imp2_FD._reynolds(P2_FD).magnitude Results2_AT[i,6].value=Imp2_AT._phi(P2_AT[i]).magnitude Results2_AT[i,7].value=Imp2_AT._phi(P2_AT[i]).magnitude/Imp2_FD._phi(P2_FD).magnitude Results2_AT[i,8].value=Imp2_AT._psi(P2_AT[i]).magnitude Results2_AT[i,9].value=Imp2_AT._psi(P2_AT[i]).magnitude/Imp2_FD._psi(P2_FD).magnitude Results2_AT[i,10].value=P2_AT[i]._head_pol_schultz().to('kJ/kg').magnitude Results2_AT[i,11].value=P2_AT[i]._head_pol_schultz().to('kJ/kg').magnitude/P2_FD._head_pol_schultz().to('kJ/kg').magnitude Results2_AT[i,12].value=P2_ATconv[i]._head_pol_schultz().to('kJ/kg').magnitude Results2_AT[i,13].value=P2_ATconv[i]._head_pol_schultz().to('kJ/kg').magnitude/P2_FD._head_pol_schultz().to('kJ/kg').magnitude Results2_AT[i,14].value=P2_ATconv[i].flow_v.to('m³/h').magnitude Results2_AT[i,15].value=P2_ATconv[i].flow_v.to('m³/h').magnitude/P2_FD.flow_v.to('m³/h').magnitude Results2_AT[i,16].value=P2_AT[i]._power_calc().to('kW').magnitude Results2_AT[i,17].value=P2_AT[i]._power_calc().to('kW').magnitude/P2_FD._power_calc().to('kW').magnitude if AT_sheet['C25'].value=='Yes': HL2_FD=Q_(((suc2FD.T()+disch2FD.T()).to('degC').magnitude*0.8/2-25)*1.166*AT_sheet['D28'].value,'W') HL2_AT=Q_(((P2_AT[i].suc.T()+P2_AT[i].disch.T()).to('degC').magnitude*0.8/2-25)*1.166*AT_sheet['D28'].value,'W') Results2_AT[i,18].value=(P2_ATconv[i]._power_calc()-HL2_AT+HL2_FD).to('kW').magnitude Results2_AT[i,19].value=(P2_ATconv[i]._power_calc()-HL2_AT+HL2_FD).to('kW').magnitude/(P2_FD._power_calc()).to('kW').magnitude else: Results2_AT[i,18].value=P2_ATconv[i]._power_calc().to('kW').magnitude Results2_AT[i,19].value=P2_ATconv[i]._power_calc().to('kW').magnitude/P2_FD._power_calc().to('kW').magnitude Results2_AT[i,20].value=P2_AT[i]._eff_pol_schultz().magnitude Phi=np.abs(1-np.array(Results_AT[0:N,7].value)) Phi2=np.abs(1-np.array(Results2_AT[0:N,7].value)) IdG=[] IdG2=[] for i in range(N): try: if Phi[i]<0.04: IdG.append(i) except: if Phi<0.04: IdG.append(i) try: if Phi2[i]<0.04: IdG2.append(i) except: if Phi2<0.04: IdG2.append(i) if len(IdG)==1: AT_sheet['G32:AB32'].value=Results_AT[IdG[0],:].value elif len(IdG)>1: IdG=[int(k) for k in np.argsort(Phi)[0:2]] IdG=sorted(IdG) aux1=np.array(Results_AT[IdG[0],:].value) aux2=np.array(Results_AT[IdG[1],:].value) f=(1-aux1[7])/(aux2[7]-aux1[7]) aux=aux1+f*(aux2-aux1) AT_sheet['G32:AB32'].value=aux else: AT_sheet['G32:AB32'].value=[None]*len(Results_AT[0,:].value) if len(IdG2)==1: AT_sheet['G47:AB47'].value=Results2_AT[IdG2[0],:].value elif len(IdG2)>1: IdG2=[int(k) for k in np.argsort(Phi2)[0:2]] IdG2=sorted(IdG2) aux1=np.array(Results2_AT[IdG2[0],:].value) aux2=np.array(Results2_AT[IdG2[1],:].value) f=(1-aux1[7])/(aux2[7]-aux1[7]) aux=aux1+f*(aux2-aux1) AT_sheet['G47:AB47'].value=aux else: AT_sheet['G47:AB47'].value=[None]*len(Results2_AT[0,:].value) # - file=open('Modelo_2sections.xlsx','r+')
scripts/check_test_2sec.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # K-means clustering # Iris data : [UCI](http://archive.ics.uci.edu/ml/datasets/Iris) import numpy as np import pandas as pd import matplotlib.pyplot as plt from scipy.spatial.distance import cdist, pdist from sklearn.cluster import KMeans from sklearn.metrics import silhouette_score iris = pd.read_csv("iris.csv") print(iris.head()) # + x_iris = iris.drop(['class'], axis =1) y_iris = iris["class"] # 자료가 많아지면 확실한 클러스터 개수 확인하기 어려움 k_means_fit = KMeans(n_clusters = 3, max_iter=300) k_means_fit.fit(x_iris) # 군집화 확인 print("<K-Means Clustering - Confusion Matrix>\n", pd.crosstab(y_iris, k_means_fit.labels_, rownames = ["Actuall"], colnames = ["Predicted"])) # - # 민감도 분석(적절한 클러스터 수 찾기) for k in range(2,10) : k_means_fitk = KMeans(n_clusters=k, max_iter=300) k_means_fitk.fit(x_iris) print("for K value", k, ", Silhouette-score : %0.3f" % silhouette_score(x_iris, k_means_fitk.labels_,metric = 'euclidean')) # **클러스터 내 평균분산** # + # 평균 클러스터 내 제곱의 합 K = range(1,10) KM = [KMeans(n_clusters=k).fit(x_iris) for k in K] centroids = [k.cluster_centers_ for k in KM] D_k = [cdist(x_iris, centrds, 'euclidean') for centrds in centroids] cIdx = [np.argmin(D, axis = 1) for D in D_k] dist = [np.min(D, axis = 1) for D in D_k] avgWithinSS = [sum(d)/x_iris.shape[0] for d in dist] # - # 클러스터 내 제곱의 합계 총합 wcss = [sum(d**2) for d in dist] tss = sum(pdist(x_iris)**2)/x_iris.shape[0] bss = tss-wcss # 엘보 곡선 (클러스터 내 제곱의 합계 평균) fig = plt.figure() ax = fig.add_subplot(111) ax.plot(K, avgWithinSS, 'b*-') plt.grid(True) plt.xlabel('Number of clusters') plt.ylabel('Average within-cluster sum of squares') plt.title('Elbow for KMeans clustering') # 엘보곡선 (해석된 분산 퍼센티지, > 80%) fig = plt.figure() ax = fig.add_subplot(111) ax.plot(K, bss/tss*100, 'b*-') plt.grid(True) plt.xlabel('Number of clusters') # 3이 이상적 plt.ylabel('Percentage of variance explained') plt.title('Elbow for KMeans clustering')
Unsupervised Learning/K-means clustering.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np class Melanie_Diamond(): def loves(person): if person=='Gabe': return True class Optimizer(Melanie_Diamond): #Fuck <NAME> def __init__(self,num_neurons=50,alpha=3): self.P_nis = 1/alpha * np.eye(num_neurons) def update(self,weights,state,target): self.P_nis -= ( (self.P_nis @ state) @ (state.T @ self.P_nis) / 1 + state.T @ self.P_nis @ state) weights -= (weights.T @ state - target) * (self.P_nis @ state) def Target(x): return np.sin(x) # + N=50 optim = Optimizer(num_neurons=N,alpha=3) for i in range(1000): optim.update(np.random.rand(N),np.random.rand(N),1) # - np.ones((5,5)) @ np.ones((5,5)) # # %matplotlib notebook import matplotlib.pyplot as plt from matplotlib.pyplot import figure as ff import numpy as np from IPython.core.debugger import set_trace as bp # + import numpy as np def Target(x): #The function we're trying to fit scale = 2*np.pi/10 return np.sin(scale*x) class Optimizer(): def __init__(self,num_neurons=50,alpha=3,dt=0.01): self.P = 1/alpha * np.eye(num_neurons) self.alpha = alpha self.eta = 2e-3 self.gamma = 2 self.dt = dt def update(self,weights,state,target): # error = (weights.T @ state - target) # weights -= error * (self.P @ state) / (1 + state.T @ self.P @ state) # self.P -= ( (self.P @ state) @ (state.T @ self.P) # / (1 + state.T @ self.P @ state) ) error = (weights.T @ state - target) weights -= self.eta * error * state self.eta += self.dt * self.eta * ( -self.eta + np.abs(error)**self.gamma ) return weights, error class Model(): def __init__(self, n, tau = 1000000, t_unroll = 1, t_epoch = 5, init_scale = None, alpha=3, nl = 'tanh',bias=False): self.t = 0 self.dt = 0.01 self.epoch_steps = 2 self.tau = tau self.t_unroll = t_unroll self.t_epoch = t_epoch self.n = n; self.nl = nl if init_scale is None: self.init_scale = 1.5 / np.sqrt(n) else: self.init_scale = init_scale; p_sparse = 0.1 self.w = np.zeros([self.n,self.n]); self.w = np.random.randn(len(self.w),len(self.w)) * self.init_scale; # self.w *= (np.random.rand(self.w.shape[0],self.w.shape[1]) < p_sparse).astype(float) self.w_out = np.zeros(n) self.w_rec = np.random.randn(n) if nl == 'tanh': self.x = np.tanh(np.random.randn(len(self.w))) self.r = np.tanh(self.x) if not bias: self.b = np.zeros(len(self.w)); else: self.b = np.random.randn(len(self.w)) * self.init_scale; self.optim = Optimizer(n,alpha=alpha,dt=self.dt) self.z = 0 def single_step(self): self.x += self.dt * ( -self.x + self.w @ np.tanh(self.x) + self.w_rec * self.z ) self.r = np.tanh(self.x) self.z = self.w_out @ self.r self.t += self.dt def train_step(self,p=False): xs = [] self.z = 0 for i in range(self.epoch_steps): self.single_step() # self.x += self.dt * ( -self.x + self.w @ np.tanh(self.x) + self.w_rec * self.z ) # r = np.tanh(self.x) # self.z = self.w_out @ r # self.t += self.dt xs.append(np.array(r)) # bp() target = Target(self.t) # print('TARGET: ',target) self.w_out, error = self.optim.update(self.w_out,self.r,target) # bp() if p: print('Target: {} \t Pred: {} \t Error: {}'.format(target,self.w_out @ self.r,error)) return xs model = Model(n=1000,alpha=100) xs = [] zs = [] w_dots = [] num_steps = 300000 w_last = np.zeros(1000) for i in range(num_steps): # xs += model.train_step(p=False) model.train_step(p=False) zs.append(model.w_out @ model.r) delta = ((w_last - model.w_out)**2).sum() w_last = np.array(model.w_out) w_dots.append(delta) # print('W_max: ',model.w_out.max()) # print('P_max: ',model.optim.P.max()) # print(model.optim.P_nis.max()) # print(model.optim.P_nis.max()) # xs = np.concatenate(xs,0) # xs = xs.reshape(200,-1) # Xs = np.zeros((100,6000)) # for n in range(100): # for t in range(6000): # Xs[n,t] = xs[t][n] # - ff() ts = model.epoch_steps*np.arange(num_steps)*model.dt plt.plot(ts,zs) plt.plot(ts,Target(ts)) ff() plt.plot(w_dots) num_steps = int(1e5) Xs = np.zeros((1000,num_steps)) for i in range(num_steps): model.single_step() Xs[:,i] = np.array(model.r) ff() plt.plot(model.w_out @ Xs[:,-10000:]) model.w_out ff() plt.plot(model.w_out @ np.tanh(Xs)) ff() plt.plot(Xs[:10,:].T); ff() plt.plot(Xs[:10,::100].T); ff() # plt.plot(xs[0,:100]); plt.plot(xs[:,:10]); # plt.plot(xs[-1]); xs[t].shape model.w[:5,:5] xs.shape xs.shape ff() plt.plot(xs) xs.shape xs[-1] model.optim.P_nis ff() plt.plot(xs[:,2]) # + vs = np.random.randn(100) W = np.random.randn(100,100) / 2 dt = 0.001 Vs = [] for i in range(10000): vs += dt * (-vs + W @ np.tanh(vs)) Vs.append(np.array(vs)) Vs = np.concatenate(Vs,0) Vs = Vs.reshape(100,-1) # Vs = np.array(Vs) # - ff() plt.plot(Vs[:,:10:]); ff() plt.plot(Vs[0]); plt.plot(Vs[-1]); Vs.shape ff() plt.plot(Vs[0]); ff() plt.plot(Vs[:,6]) vs np.concatenate([[1,2],[3,4]],0) dt * (-vs + W @ np.tanh(vs)) # + P = np.eye(100)/5 L = 1 for i in range(10): r = np.random.randn(100) P = P/L - (P@r) @ (r@P) / (L + r@P@r) / L # - P # + n = 100 np.linalg.inv(n * np.ones((100,100))) # -
old/notebooks/first_attempt.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="I-bHS6nJdz50" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 595} outputId="e66526f7-c751-4d84-b1e4-59ae9f53e951" # !pip install transformers # + id="1RZJEVb6dbUv" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="5155387f-7963-424d-a64b-1d211775c006" import torch import json import csv import os import time import numpy as np import pandas as pd import matplotlib.pyplot as plt from transformers import GPT2Tokenizer, GPT2LMHeadModel from transformers import AdamW, get_linear_schedule_with_warmup device = 'cuda' if torch.cuda.is_available() else 'cpu' print(f'Using device: {device}') # + [markdown] id="JXikLtVVeiuu" colab_type="text" # # Load GPT-2 Medium # + id="lwJpuU_9dvna" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 267, "referenced_widgets": ["997667c44ee3463cb0e546415aca2c22", "9a5c4df9eeda46a4bb656e3722a006d2", "9d044a3cfa3f431b8f96ad4ea2ee7aa1", "e87a1bde58a0490f9e93657456cafc9f", "c7fe4c3191234e1699136ea0af8f9234", "<KEY>", "3d858a2cfd0d4e3992c2e5fb48238827", "<KEY>", "<KEY>", "5ab16509f54e420c9387ee6d08101d67", "<KEY>", "<KEY>", "bc910b206b6e4fee9a5ea6712144d61e", "c4e948d210f443f4b06090cc8e344349", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "afad3a41a3fc4eb1ba76e26d75b5dd1c", "cc71985b8e454049a69c6a65778f03a9", "8fd8807519654be1a60e1bd950a49dba", "277e3995e4cc4577b1a7b58e14560d5b", "7380edb75e804c0f8923537cfc6cedf9", "fe3fd92268014d2f8b5326ace5a6c636", "9eafee42cdf544528b9ba55aed1ae83d", "<KEY>", "<KEY>", "ee7f8b23ce4047c9a1d97363d34f9789", "25fe51e9d95c40739b2abd21d551b661", "<KEY>", "<KEY>"]} outputId="0566ffc8-fb56-4f9c-ea04-ed26e84818a7" tokenizer = GPT2Tokenizer.from_pretrained('gpt2', model_max_length=1024) model = GPT2LMHeadModel.from_pretrained('gpt2') model = model.to(device) # + [markdown] id="YYjQE8lPppK5" colab_type="text" # # Helper functions # + id="X0yxogS8d90v" colab_type="code" colab={} class ModelWrapper: def __init__(self, model, tokenizer): self.model = model self.tokenizer = tokenizer self.device = 'cuda' if torch.cuda.is_available() else 'cpu' self.set_generation_settings() def set_generation_settings(self, params={}): if params: for key in params: self.gen_params[key] = params[key] else: self.gen_params = { 'max_length': 512, 'top_p': 0.92, 'top_k': 0, 'temperature': 0.7 } def generate(self, prompt='', keywords=[], category=''): input = '<|startoftext|>' if category: assert category in ['positive', 'negative'] input += f'~`{category}' if keywords: keywords = [ k.replace(' ', '-') for k in keywords ] input += f"~^{' '.join(keywords)}" input += f"~@{prompt if prompt else ''}" input_encoded = self.tokenizer.encode(input, return_tensors='pt').to(self.device) outputs = self.model.generate( input_encoded, do_sample=True, **self.gen_params ) # TODO: force outputs with keywords (?) outputs_decoded = [ self.tokenizer.decode(out, skip_special_tokens=True).split('~@')[-1] for out in outputs ] return outputs_decoded # + [markdown] id="ld2jV9AJjk7H" colab_type="text" # # Reviews dataset # + id="y9Qw6cZr4KA_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 122} outputId="7b29a9b4-d477-4b05-a413-69a682b7eae9" from google.colab import drive drive.mount("/content/drive", force_remount=True) # + id="7o6HT6QX4by3" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="438b3070-3c5d-4d2b-82fd-2e1ae1a12f5f" DATA_PATH = '/content/drive/My Drive/Data Science/Datasets' # !ls '$DATA_PATH' # + id="rjVKNlOlg79K" colab_type="code" colab={} from torch.utils.data import Dataset, DataLoader MAX_SEQ_LEN = 1000 class ReviewsDataset(Dataset): def __init__(self, filename): super().__init__() with open(filename) as data_file: self.reviews = data_file.readlines() self.join_sequences() def join_sequences(self): joined = [] temp_reviews_tens = None for review in self.reviews: # fit as many review sequences into MAX_SEQ_LEN sequence as possible review_tens = torch.tensor(tokenizer.encode(review, max_length=MAX_SEQ_LEN, truncation=True)).unsqueeze(0) # the first review sequence in the sequence if not torch.is_tensor(temp_reviews_tens): temp_reviews_tens = review_tens continue else: # the next review does not fit in so we process the sequence and leave the last review # as the start for next sequence if temp_reviews_tens.size()[1] + review_tens.size()[1] < MAX_SEQ_LEN: # add the review to sequence, continue and try to add more temp_reviews_tens = torch.cat([temp_reviews_tens, review_tens[:, 1:]], dim=1) continue else: work_reviews_tens, temp_reviews_tens = temp_reviews_tens, review_tens joined.append(work_reviews_tens) self.encoded_joined_seq = joined def __len__(self): return len(self.encoded_joined_seq) def __getitem__(self, idx): return self.encoded_joined_seq[idx] review_dataset = ReviewsDataset(f'{DATA_PATH}/reviews_nlp_encoded.txt') review_loader = DataLoader(review_dataset, batch_size=1, shuffle=True) # + [markdown] id="-WePSCj4DTvm" colab_type="text" # # Hyperparameters # + id="dLfOsi8BDETo" colab_type="code" colab={} BATCH_SIZE = 5 EPOCHS = 8 LEARNING_RATE = 1e-4 WARMUP_FRAC = 0.25 TRAINING_STEPS = len(review_loader)*EPOCHS//BATCH_SIZE # + id="zChsgrqpwsiB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="fefc64d1-0b19-4f7f-af68-3016c782b890" print(f'There are {TRAINING_STEPS} training steps ({TRAINING_STEPS/EPOCHS} per epoch).') # + [markdown] id="IpL9nymxDyAO" colab_type="text" # # Training # + id="x3-izXatDz1O" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 697} outputId="2a33861c-09fa-4dc7-b059-85f7237007b2" model = model.to(device) model.train() optimizer = AdamW(model.parameters(), lr=LEARNING_RATE) scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=int(WARMUP_FRAC*TRAINING_STEPS), num_training_steps=TRAINING_STEPS) loss_history = [] batch_count = 0 reviews_count = 0 print_loss_every = int(TRAINING_STEPS/EPOCHS/3) temp_reviews_tens = None models_dir = "models" if not os.path.exists(models_dir): os.mkdir(models_dir) for epoch in range(EPOCHS): print(f"EPOCH {epoch+1}") start = time.perf_counter() epoch_loss_history = [] for review_tens in review_loader: review_tens = review_tens.to(device) # sequence ready, process it trough the model optimizer.zero_grad() outputs = model(review_tens, labels=review_tens) loss, logits = outputs[:2] loss.backward() epoch_loss_history.append(loss.detach().item()) reviews_count += 1 if reviews_count == BATCH_SIZE: reviews_count = 0 batch_count += 1 optimizer.step() scheduler.step() if batch_count == print_loss_every: batch_count = 0 avg_loss = np.array(epoch_loss_history)[-print_loss_every*BATCH_SIZE+1:].mean() print(f'Avg. loss: {avg_loss:.4f}') loss_history.append(torch.tensor(epoch_loss_history)) end = time.perf_counter() print(f'The epoch has been trained in {end-start:.2f} seconds.\n') # store the model after each epoch to compare the performance of them torch.save(model.state_dict(), os.path.join(models_dir, f"review_model_e{epoch+1}.pt")) # + [markdown] id="N81rgHj-nkIf" colab_type="text" # # Evaluate model # + id="NuNWJIUZYSWJ" colab_type="code" colab={} def load_model(filename): model = GPT2LMHeadModel.from_pretrained('gpt2') model.to(device) model.load_state_dict(torch.load(filename)) model.eval() return model # + id="oREjFnwxJKLy" colab_type="code" colab={} def create_evaluation_report(model, tokenizer, gen_params_list, meta_info): wrapped = ModelWrapper(model, tokenizer) report = 'Book Review Generation Model\n' def add_reviews(report, reviews): for idx, review in enumerate(reviews): report += f'\tReview {idx+1}:\n\t{review}\n' return report+'\n' for key in meta_info: if meta_info[key]: report += f'{key}: {meta_info[key]}\n' report += '\n' for idx, params in enumerate(gen_params_list): wrapped.set_generation_settings(params) report += '-'*20+'\n' report += f'GENERATION PARAMS V{idx+1}\n' report += ' | '.join([ f'{key}: {params[key]}' for key in params ]) report += '\n\n' # TEST 1 report += 'TEST 1: Positive vs Negative Reviews\n\n' report += 'Category: POSITIVE\n' report = add_reviews(report, wrapped.generate(category='positive')) report += 'Category: NEGATIVE\n' report = add_reviews(report, wrapped.generate(category='negative')) # TEST 2 report += 'TEST 2: Keywords\n\n' report += 'Keywords: BORING\n' report = add_reviews(report, wrapped.generate(keywords=['boring'])) report += 'Keywords: TWILIGHT\n' report = add_reviews(report, wrapped.generate(keywords=['twilight'])) report += 'Keywords: SEX\n' report = add_reviews(report, wrapped.generate(keywords=['sex'])) report += 'Keywords: TREE, DOG\n' report = add_reviews(report, wrapped.generate(keywords=['tree', 'dog'])) return report # + id="6AVEvf1s44eX" colab_type="code" colab={} meta_info = { 'epoch': 8, 'learning_rate': LEARNING_RATE, 'batch_size': BATCH_SIZE, 'scheduler': 'linear warmup 0.25' } # + id="I1FnAnKhGlz0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 71} outputId="6de16c55-5b00-4322-f654-fabb5cf8e6bc" model = load_model(f'models/review_model_e{meta_info["epoch"]}.pt') tokenizer = GPT2Tokenizer.from_pretrained('gpt2', model_max_length=1024) # + id="vIiwDEkvoCLE" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="eec315da-3f6b-438c-a601-0288a2c049ea" report = create_evaluation_report(model, tokenizer, [ { 'top_p': 0.92, 'top_k': 0, 'temperature': 0.7, 'num_return_sequences': 2 } ], meta_info) print(report) # + id="dIH1j6JioHAb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 224} outputId="6a825261-3bfd-416c-faa2-3653b0f12b74" wrapped = ModelWrapper(model, tokenizer) wrapped.set_generation_settings({ 'top_p': 0.92, 'top_k': 0, 'temperature': 0.7, 'num_return_sequences': 5 }) reviews = wrapped.generate(keywords=['harry']) for review in reviews: print(review+'\n') # + id="PYbaMOwnEzxT" colab_type="code" colab={}
modelling/nlp/NLP_book_review_generator.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import os import numpy as np import joblib import tensorflow as tf import matplotlib.pyplot as plt from keras.models import model_from_json from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score from sklearn.metrics import confusion_matrix # + jupyter={"source_hidden": true} tags=[] def stacked_dataset(members, inputX): stackX = None for model in members: # make prediction yhat = model.predict(inputX, verbose=0) # stack predictions into [rows, members, probabilities] if stackX is None: stackX = yhat else: stackX = np.dstack((stackX, yhat)) # flatten predictions to [rows, members x probabilities] stackX = stackX.reshape((stackX.shape[0], stackX.shape[1]*stackX.shape[2])) return stackX def Lr(members, inputX, inputy): # create dataset using ensemble stackedX = stacked_dataset(members, inputX) # fit standalone model model = LogisticRegression() model.fit(stackedX, inputy) return model # make a prediction with the stacked model def stacked_prediction(members, model, inputX): # create dataset using ensemble stackedX = stacked_dataset(members, inputX) # make a prediction yhat = model.predict(stackedX) return yhat # - save_dir_mfcc = 'C:/Users/NITM/home_automation/ensemble_learning/' x1 = joblib.load(save_dir_mfcc + 'mfcc_xtest.joblib') y1 = joblib.load(save_dir_mfcc + 'mfcc_ytest.joblib') save_dir_emd = 'C:/Users/NITM/home_automation/ensemble_learning/' x2 = joblib.load(save_dir_mfcc + 'emd_xtest.joblib') y2 = joblib.load(save_dir_mfcc + 'emd_ytest.joblib') y1_ohe = tf.keras.utils.to_categorical(y1) y2_ohe = tf.keras.utils.to_categorical(y2) model_list1=[] model_list2=[] model_path1='C:/Users/NITM/home_automation/ensemble_learning/mfcc_weights' model_li1=os.listdir(model_path1) model_li1 model_path2='C:/Users/NITM/home_automation/ensemble_learning/emd_weights' model_li2=os.listdir(model_path2) model_li2 for fol in model_li1: path=os.path.join(model_path1,fol) li=os.listdir(path) j=[_ for _ in li if _.endswith(".json")] w=[_ for _ in li if _.endswith(".h5")] json_file = open(path+"/"+j[0], 'r') loaded_model_json = json_file.read() json_file.close() loaded_model = model_from_json(loaded_model_json) # load weights into new model loaded_model.load_weights(path+"/"+w[0]) model_list1.append(loaded_model) for fol in model_li2: path=os.path.join(model_path2,fol) li=os.listdir(path) j=[_ for _ in li if _.endswith(".json")] w=[_ for _ in li if _.endswith(".h5")] json_file = open(path+"/"+j[0], 'r') loaded_model_json = json_file.read() json_file.close() loaded_model = model_from_json(loaded_model_json) # load weights into new model loaded_model.load_weights(path+"/"+w[0]) model_list2.append(loaded_model) for i in range(0,len(model_list1)): model_list1[i].compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) model_list2[i].compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) his0=model_list1[0].evaluate(x1,y1_ohe) his1=model_list1[1].evaluate(x1,y1_ohe) his2=model_list1[2].evaluate(x1,y1_ohe) his3=model_list1[3].evaluate(x1,y1_ohe) his4=model_list1[4].evaluate(x1,y1_ohe) his0=model_list2[0].evaluate(x2,y2_ohe) his1=model_list2[1].evaluate(x2,y2_ohe) his2=model_list2[2].evaluate(x2,y2_ohe) his3=model_list2[3].evaluate(x2,y2_ohe) his4=model_list2[4].evaluate(x2,y2_ohe) model2=Lr(model_list2,x2,y2) # evaluate model on test set yhat2 = stacked_prediction(model_list2, model2, x2) acc2 = accuracy_score(y2, yhat2) print('Stacked Test Accuracy: %.3f' % acc2) # + cm2=confusion_matrix(y2,yhat2) cm2=(cm2/cm2.astype(np.float64).sum(axis=1))*100 from sklearn.metrics import ConfusionMatrixDisplay labels = ["Blender","Glass","Washing","Vacuum"] disp = ConfusionMatrixDisplay(confusion_matrix=cm2, display_labels=labels) disp.plot(cmap=plt.cm.Blues) plt.show() # - model1=Lr(model_list1,x1,y1) # evaluate model on test set yhat1 = stacked_prediction(model_list1, model1, x1) acc1 = accuracy_score(y1, yhat1) print('Stacked Test Accuracy: %.3f' % acc1) # + cm1=confusion_matrix(y1,yhat1) cm1=(cm1/cm1.astype(np.float64).sum(axis=1))*100 from sklearn.metrics import ConfusionMatrixDisplay labels = ["Blender","Glass","Washing","Vacuum"] disp = ConfusionMatrixDisplay(confusion_matrix=cm1, display_labels=labels) disp.plot(cmap=plt.cm.Blues) plt.show() # - model_list3=[] model_list3.append(model_list2[1]) model_list3.append(model_list1[1]) yhat3_1=model_list3[0].predict(x2)#emd .815 yhat3_2=model_list3[1].predict(x1)#mfcc .905 stackX = np.dstack((yhat3_1, yhat3_2)) stackX = stackX.reshape((stackX.shape[0], stackX.shape[1]*stackX.shape[2])) stackX = np.dstack((yhat3_1, yhat3_2)) stackX = stackX.reshape((stackX.shape[0], stackX.shape[1]*stackX.shape[2])) model_x = LogisticRegression() model_x.fit(stackX,y1) res=model_x.predict(stackX) acc3 = accuracy_score(y2, res) print('Stacked Test Accuracy: %.3f' % acc3) # + model_list3=[] model_list3.append(model_list2[1]) model_list3.append(model_list1[1]) model_list3.append(model_list2[3]) yhat3_1=model_list3[0].predict(x2)#emd .815 yhat3_2=model_list3[1].predict(x1)#mfcc .905 yhat3_3=model_list3[2].predict(x2)#emd .806 stackX = np.dstack((yhat3_1, yhat3_2)) stackX = np.dstack((stackX, yhat3_3)) stackX = stackX.reshape((stackX.shape[0], stackX.shape[1]*stackX.shape[2])) model_x = LogisticRegression() model_x.fit(stackX,y1) res=model_x.predict(stackX) acc3 = accuracy_score(y2, res) print('Stacked Test Accuracy: %.3f' % acc3) # + model_list3=[] model_list3.append(model_list2[1]) model_list3.append(model_list1[1]) model_list3.append(model_list2[3]) model_list3.append(model_list2[4]) yhat3_1=model_list3[0].predict(x2)#emd .815 yhat3_2=model_list3[1].predict(x1)#mfcc .905 yhat3_3=model_list3[2].predict(x2)#emd .806 yhat3_4=model_list3[2].predict(x2)#emd .79 stackX = np.dstack((yhat3_1, yhat3_2)) stackX = np.dstack((stackX, yhat3_3)) stackX = np.dstack((stackX, yhat3_4)) stackX = stackX.reshape((stackX.shape[0], stackX.shape[1]*stackX.shape[2])) model_x = LogisticRegression() model_x.fit(stackX,y1) res=model_x.predict(stackX) acc3 = accuracy_score(y2, res) print('Stacked Test Accuracy: %.3f' % acc3) # + model_list3=[] model_list3.append(model_list2[1]) model_list3.append(model_list1[1]) model_list3.append(model_list2[3]) model_list3.append(model_list2[4]) model_list3.append(model_list1[0]) yhat3_1=model_list3[0].predict(x2)#emd .815 yhat3_2=model_list3[1].predict(x1)#mfcc .905 yhat3_3=model_list3[2].predict(x2)#emd .806 yhat3_4=model_list3[3].predict(x2)#emd .79 yhat3_5=model_list3[4].predict(x1)#mfcc .87 stackX = np.dstack((yhat3_1, yhat3_2)) stackX = np.dstack((stackX, yhat3_3)) stackX = np.dstack((stackX, yhat3_4)) stackX = np.dstack((stackX, yhat3_5)) stackX = stackX.reshape((stackX.shape[0], stackX.shape[1]*stackX.shape[2])) model_x = LogisticRegression() model_x.fit(stackX,y1) res=model_x.predict(stackX) acc3 = accuracy_score(y2, res) print('Stacked Test Accuracy: %.3f' % acc3) # -
ensemble_mfcc-emd.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + Selenium Serverを起動する。 from selenium import webdriver chrome_options = webdriver.ChromeOptions() driver = webdriver.Remote( command_executor="http://192.168.0.2:4444/wd/hub", options=chrome_options ) driver.get("https://www.google.com/search?q=qiita")
Simple_Start.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline from matplotlib import pyplot as plt import pandas as pd import numpy as np # + import sys in_colab = 'google.colab' in sys.modules if in_colab: BASE_DIR = "https://github.com/DiploDatos/AnalisisYCuracion/raw/master/" else: BASE_DIR = ".." # - tabla_1 = pd.read_csv(BASE_DIR + "/input/wtageinf.csv") tabla_1.head(10) tabla_1.dtypes tabla_1.columns filtrado= tabla_1[tabla_1.Sex==1] filtrado.plot(x='Agemos', y='P3');
AnalisisYCuracion/notebooks/Ejercicio-tablas.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np from common import PlatForm as PF from strategies.epsilon_intelligence.epsilon_intelligence import EpsilonIntelligence as EI from strategies.zero_intelligence.zero_intelligence import ZeroIntelligence as ZI from strategies.human_player.player import HumanPlayer as HP from strategies.naive_intelligence.naive_intelligence import NaiveIntelligence as NI player=HP() L=[NI(simulation_time_limit=0.2,aggresive=0.8,simple_minded=False,response_principle=0),ZI(0.5,0.5),ZI(0.5,0.5),ZI(0.5,0.5)] Game=PF(3,L,call_level=0.3,bluff=0.05,trainning=False) Game.play() # # player 0 is naive_intelligence, the other three players are zero_intelligence
liars_dice AIs.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # \*args # # - \* can be used on a function definition, result is a tuple # - customary to name it \*args # - no positional arguments can be added after the \*args # ## Code Examples def func1(a, b, *c): print(a, b, c) func1(10, 20) func1(10, 20, 1, 2, 3) def avg(*args): print(args) avg() avg(10, 20) def avg(*args): count = len(args) total= sum(args) # short circuit to avoid division by 0 return count and total/count avg(2, 2, 4, 4) avg() # Enforcing at least one argument def avg(a, *args): count = len(args) + 1 total= sum(args) + a # short circuit to avoid division by 0 return count and total/count avg(2, 2, 4, 4) # TypeError because it is missing a positional argument avg() def funct1(a, b, c): print(a, b, c) l = [10, 20, 30] # without unpacking, TypeError funct1(l) funct1(*l) def funct1(a, b, c, *args): print(a, b, c, args) l = [1, 2, 3, 4, 5, 6, 7] funct1(*l)
python-deepdive/deepdive1/section05/section_05_args.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] toc=true # <h1>Table of Contents<span class="tocSkip"></span></h1> # <div class="toc"><ul class="toc-item"><li><span><a href="#Plotting-Routine" data-toc-modified-id="Plotting-Routine-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Plotting Routine</a></span></li><li><span><a href="#Apples-and-Bananas" data-toc-modified-id="Apples-and-Bananas-2"><span class="toc-item-num">2&nbsp;&nbsp;</span>Apples and Bananas</a></span><ul class="toc-item"><li><span><a href="#The-Puzzle" data-toc-modified-id="The-Puzzle-2.1"><span class="toc-item-num">2.1&nbsp;&nbsp;</span>The Puzzle</a></span></li><li><span><a href="#Variation-1" data-toc-modified-id="Variation-1-2.2"><span class="toc-item-num">2.2&nbsp;&nbsp;</span>Variation 1</a></span></li><li><span><a href="#Variation-2" data-toc-modified-id="Variation-2-2.3"><span class="toc-item-num">2.3&nbsp;&nbsp;</span>Variation 2</a></span></li></ul></li></ul></div> # - # Import required library import matplotlib.pyplot as plt import scipy.optimize as opt # + [markdown] heading_collapsed=true # # Plotting Routine # + hidden=true def line(A,b,i,x): return (b[i]-A[i][0]*x)/A[i][1] def grad(c,g,x): return (g-c[0]*x)/c[1] def MIN(a, b): return b if a==None else a if b==None else min(a,b) def MAX(a, b): return b if a==None else a if b==None else max(a,b) def plot_linopt(A, b, c, bounds, res, borders=None, dx=5, dy=5, title=None, labels=None, solution=None, legend=None, output=False): ax=plt.axes() ax.grid(True) if borders==None: borders=[(res.x[0]-dx, res.x[0]+dx), (res.x[1]-dy, res.x[1]+dy)] # set drawing region (xmin, xmax) (ymin, ymax) xmin = borders[0][0] xmax = borders[0][1] ymin = borders[1][0] ymax = borders[1][1] ax.set_xlim((xmin,xmax)) ax.set_ylim((ymin,ymax)) if labels!=None: plt.xlabel(labels[0]) plt.ylabel(labels[1]) if legend==None: legend=[] for i in range(0, len(A)): legend+=['Constraint '+str(i)] if solution==None: solution='Solution' # compute visual bounds (drawing limits if there is no bound) xleft = MAX(bounds[0][0], borders[0][0]) xright = MIN(bounds[0][1], borders[0][1]) ybottom = MAX(bounds[1][0], borders[1][0]) ytop = MIN(bounds[1][1], borders[1][1]) # plot constraints x=[xmin,xmax] lines=[] for i in range(0, len(A)): y = [line(A,b,i,xmin), line(A,b,i,xmax)] l=plt.plot(x,y,label=legend[i]) plt.fill_between(x, y, ymin if A[i][1]>0 else ymax, alpha=0.3) lines=lines+[l[0]] # plot bounding box rangex=[xleft, xright, xright, xleft, xleft] rangey=[ybottom, ybottom, ytop, ytop, ybottom] l=plt.plot(rangex, rangey,label='Bounds') plt.fill_between(rangex, rangey, alpha=0.3) lines+=[l[0]] # plot optimal cost function x=[xmin,xmax] lopt=plt.plot(x, [grad(c,res.fun,xmin),grad(c,res.fun,xmax)], color='red', label=solution) # plot optimal solution plt.plot(res.x[0],res.x[1],'ro') if legend!=None: plt.legend(handles=lines+[lopt[0]]) if title!=None: plt.suptitle(title) if output: print(solution, '=', res.fun) for i in range(0, len(c)): print(labels[i],'=',res.x[i]) # - # # Apples and Bananas # ## The Puzzle # Input parameters and bounds A = [[-2,1], [0.5, -1]] b = [0,0] c = [-1, -1] bounds=[(0, 9),(0, 4)] # no more than 9 apples and 4 bananas # Run the model res = opt.linprog(c, A, b, bounds=bounds, method='simplex') [apples, bananas]=res.x apples bananas res.fun # Drawing # fig=plt.figure(figsize=(4,4), dpi=300) plot_linopt(A, b, c, bounds, res, borders=[(-2, 10), (-2,10)], title='Apples and Bananas', labels=['Apples', 'Bananas'], legend=['Upper Bound Bananas', 'Lower Bound Bananas']) # ## Variation 1 # Drop Positivity Condition bounds=[(None, 9),(None, 4)] # no more than 9 apples and 4 bananas res=opt.linprog(c, A, b, bounds=bounds, method='simplex') # fig=plt.figure(figsize=(4,4), dpi=300) plot_linopt(A, b, c, bounds, res, borders=[(-2, 10), (-2,10)], title='Apples and Bananas', labels=['Apples', 'Bananas'], legend=['Upper Bound Bananas', 'Lower Bound Bananas']) # ## Variation 2 # Automatic setting of drawing region around solution bounds=[(0, 9),(0, 4)] # no more than 9 apples and 4 bananas res=opt.linprog(c, A, b, bounds=bounds, method='simplex') # fig=plt.figure(figsize=(4,4), dpi=300) plot_linopt(A, b, c, bounds, res, title='Apples and Bananas', labels=['Apples', 'Bananas'], legend=['Upper Bound Bananas', 'Lower Bound Bananas']) # + # Refe
.ipynb_checkpoints/Lab 1 Apples and Bananas-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <img src="http://hilpisch.com/tpq_logo.png" alt="The Python Quants" width="35%" align="right" border="0"><br> # # Python for Finance # **Analyze Big Financial Data** # # O'Reilly (2014) # # <NAME> # <img style="border:0px solid grey;" src="http://hilpisch.com/python_for_finance.png" alt="Python for Finance" width="30%" align="left" border="0"> # **Buy the book ** | # <a href='http://shop.oreilly.com/product/0636920032441.do' target='_blank'>O'Reilly</a> | # <a href='http://www.amazon.com/Yves-Hilpisch/e/B00JCYHHJM' target='_blank'>Amazon</a> # # **All book codes & IPYNBs** | # <a href="http://oreilly.quant-platform.com">http://oreilly.quant-platform.com</a> # # **The Python Quants GmbH** | <a href='http://tpq.io' target='_blank'>http://tpq.io</a> # # **Contact us** | <a href='mailto:<EMAIL>'><EMAIL></a> # ## Introduction from pylab import plt plt.style.use('ggplot') import matplotlib as mpl mpl.rcParams['font.family'] = 'serif' # ## Finance and Python Syntax # + uuid="1447b7bb-ed26-4c0f-9e0f-222dfd5d0c9b" S0 = 100. K = 105. T = 1.0 r = 0.05 sigma = 0.2 # + uuid="a95c7301-39f7-4d51-937a-334f051c2d9e" from numpy import * I = 100000 random.seed(1000) z = random.standard_normal(I) ST = S0 * exp(r * T + sigma * sqrt(T) * z) hT = maximum(ST - K, 0) C0 = exp(-r * T) * sum(hT) / I # + uuid="84aab05d-40de-4ef1-b3a8-eb08f86e1662" print("Value of the European Call Option %5.3f" % C0) # - # ## Time-to-Results # + uuid="e16924db-8402-4bcb-a9c7-d5753346c72a" import numpy as np import pandas as pd from pandas_datareader import data as web # + uuid="22071d72-094b-4b51-aa39-cfa793d58623" goog = web.DataReader('GOOG', data_source='yahoo', start='3/14/2009', end='4/14/2014') goog.index.name = u'Date' goog.tail() # + uuid="304b2f50-5b81-4e88-8fa7-ea69995cf2fe" goog['Log_Ret'] = np.log(goog['Close'] / goog['Close'].shift(1)) goog['Volatility'] = goog['Log_Ret'].rolling(window=252).std() * np.sqrt(252) # + uuid="b4e61939-c7fe-4a1e-a8c9-6fd755edcf86" # %matplotlib inline goog[['Close', 'Volatility']].plot(subplots=True, color='blue', figsize=(8, 6), grid=True); # tag: goog_vola # title: Google closing prices and yearly volatility # - # ## Paradigm # + uuid="beca497d-4c20-4240-b003-4c79c26154be" loops = 25000000 from math import * a = range(1, loops) def f(x): return 3 * log(x) + cos(x) ** 2 # %timeit r = [f(x) for x in a] # + uuid="931fd1fc-cc54-4045-802f-acb8f094a23f" import numpy as np a = np.arange(1, loops) # %timeit r = 3 * np.log(a) + np.cos(a) ** 2 # + uuid="8d1602b3-a490-4d1a-97d9-112825d86185" import numexpr as ne ne.set_num_threads(1) f = '3 * log(a) + cos(a) ** 2' # %timeit r = ne.evaluate(f) # + uuid="6994f16a-1802-4abe-851d-a48e0ead154d" ne.set_num_threads(4) # %timeit r = ne.evaluate(f) # - # <img src="http://hilpisch.com/tpq_logo.png" alt="The Python Quants" width="35%" align="right" border="0"><br> # # <a href="http://tpq.io" target="_blank">http://tpq.io</a> | <a href="http://twitter.com/dyjh" target="_blank">@dyjh</a> | <a href="mailto:<EMAIL>"><EMAIL></a> # # **Quant Platform** | # <a href="http://quant-platform.com">http://quant-platform.com</a> # # **Python for Finance** | # <a href="http://python-for-finance.com" target="_blank">Python for Finance @ O'Reilly</a> # # **Derivatives Analytics with Python** | # <a href="http://derivatives-analytics-with-python.com" target="_blank">Derivatives Analytics @ Wiley Finance</a> # # **Listed Volatility and Variance Derivatives** | # <a href="http://lvvd.tpq.io" target="_blank">Listed VV Derivatives @ Wiley Finance</a> # # **Python Training** | # <a href="http://training.tpq.io" target="_blank">Python for Finance University Certificate</a>
ipython3/01_Why_Python_ipynb.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.8 64-bit ('.env') # metadata: # interpreter: # hash: 6b3d8fded84b82a84dd06aec3772984b6fd7683755c4932dae62599619bfeba9 # name: python3 # --- # # Customizing Ticks # # Matplotlib's default trick locators and formatters are designed to be generally sufficient in many common situations, but are in no way optimal for every plot. This section will give several examples of adjusting the tick locations and formatting for the particular plot type you're interested in. # # Matplotlib aims to have a Python object representing everythin that appears on the plot: for example, recall that the `figure` is the bounding box within which plot elements appears. Each `figure` can contain one or more `axes` objects, each of which in turn contain other objects representing plot contents. # # The tick marks are no exception. Each `axes` has attribures `xaxis` and `yaxis`, which in turn have attributes that contain all the properties of the lines, ticks, and labels that make upt the axes. # ## Major and Minor Ticks # # Within each axis, there is the concept of a *major* tick mark, and a *minor* tick mark. As the names would imply, mahor ticks are usually bigger or more pronunced, while minor ticks are usually smaller. By default, Matplotlib rarely makes use of minor ticks, but one place you can see them is within logarithmic plots: import matplotlib.pyplot as plt plt.style.use('classic') # %matplotlib inline import numpy as np ax = plt.axes(xscale='log', yscale='log') ax.grid(); # We see here that each major tick shows a large tickmark and a label, while each minor tick shows a smaller tickmark with no label. # # This can be customized by setting the `formatter` and `locator` objects of each axis. print(ax.xaxis.get_major_locator()) print(ax.xaxis.get_minor_locator()) print(ax.xaxis.get_major_formatter()) print(ax.xaxis.get_minor_formatter()) # ## Hiding Ticks or Labels # Perhaps the most common tick/label formatting opeartion is the act of hiding ticks or labels. This can be done using `plt.NullLocator()` and `plt.Nullformatter()`, as shown here: # + ax = plt.axes() ax.plot(np.random.rand(50)) ax.yaxis.set_major_locator(plt.NullLocator()) ax.xaxis.set_major_formatter(plt.NullFormatter()) # - # Notice that we've removed the labels from the x axis, and removed the ticks from the y axis. Having no ticks at all can be useful in many situations for example, when you want to show a grid of images. # + fig, ax = plt.subplots(5, 5, figsize=(5, 5)) fig.subplots_adjust(hspace=0, wspace=0) # Get some face data form scikit-learn from sklearn.datasets import fetch_olivetti_faces faces = fetch_olivetti_faces().images for i in range(5): for j in range(5): ax[i, j].xaxis.set_major_locator(plt.NullLocator()) ax[i, j].yaxis.set_major_locator(plt.NullLocator()) ax[i, j].imshow(faces[10 * i + j], cmap="bone") # - # All image has its own axes, and we've set the locators to nul because the tick values (pixel number in this case) do not convey relevant information for this particular visualization. # ## Reducing or Increasing the Number of Ticks # # One common problem with the default settings is that smaller subplots can end up with crowded labels. We can see this in the plot grid shown here: fig, ax = plt.subplots(4, 4, sharex=True, sharey=True) # Particularly for the x ticks, the numbers nearly overlap and make them quite difficult to decipher. We can fix this with the `plt.MaxNLocator()`, which allows us to specify the maximum number of ticks that will be displayed. Given this maximum number, Matplotlib will use internal logic to choose the particular tick locations: # + # for every axis, set he x and y major locator for axi in ax.flat: axi.xaxis.set_major_locator(plt.MaxNLocator(3)) axi.yaxis.set_major_locator(plt.MaxNLocator(3)) fig # - # This makes things much cleaner. If you want even more control over the locations of regularly-spaced ticks, you might also use `plt.MultipleLocator`, which we'll discuss in the following section. # ## Fancy Tick Formats # # Matplotlib's default tick formatting can leave a lot be desired: it works well as a broad default, but sometimes you'd like do something more. Consider this plot of a sine and a cosine: # + # Plot a sine and cosine curve fig, ax = plt.subplots() x = np.linspace(0, 3 * np.pi, 1000) ax.plot(x, np.sin(x), lw=3, label='Sine') ax.plot(x, np.cos(x), lw=3, label='Cosine') # Set up grid, legend, and limits ax.grid(True) ax.legend(frameon=False) ax.axis('equal') ax.set_xlim(0, 3 * np.pi); # - # There are a couple changes we might like to make. First, it's more natural for this data to space the ticks and grid line in multiples of $\pi$. We can do this by setting a `MultipleLocator`, which locates ticks at a multiple of the number you provide. For good measure, we'll add both major and minor ticks in multiples of $\frac{\pi}{4}$: ax.xaxis.set_major_locator(plt.MultipleLocator(np.pi / 2)) ax.xaxis.set_minor_locator(plt.MultipleLocator(np.pi / 4)) fig # But now these tick labels look a little bit silly: we can see that hey are multipels of $\pi$, but the decimal reprsentation does not immediately convey this. To fix this, we chan change the tick formatter. There's no built-in formatter for waht we want to do, se we'll instead use `plt.FuncFormatter`, which accepts a user-defined function givin fine-grained control over the tick outputs: # + def format_func(value, tick_number): # find number of multiples of pi/2 N = int(np.round(2 * value / np.pi)) if N == 0: return "0" elif N == 1: return r"$r\pi/2$" elif N == 2: return r"$r\pi$" elif N % 2 > 0: return r"${0}\pi/2$".format(N) else: return r"${0}\pi$".format(N // 2) ax.xaxis.set_major_formatter(plt.FuncFormatter(format_func)) fig # - # We've made use of Matplotlib's LaTeX support, specified by encolosing the string within dollar signs. This is very convenient for display of mathematical symbolsa nd formulae. # # The `plt.FuncFormatter()` offrs extremely fine-grainedcontrol over the appearance of your plot ticks, and comes in very hand ywhen preparing plots for presentation or publication. # ## Summary of Formatters and Locators # # We've mentioned a couple of the available formatters and locators. We'll conclude this section by briefly listing all the built-in locator and formatter options. For more information on any of these, refer to the docstring or to the Matplolib online documentaiton. Each of the following is available in the `ptl` namespace: # ``` # Locator class Description # NullLocator No ticks # FixedLocator Tick locations are fixed # IndexLocator Locator for index plots (e.g., where x = range(len(y))) # LinearLocator Evenly spaced ticks from min to max # LogLocator Logarithmically ticks from min to max # MultipleLocator Ticks and range are a multiple of base # MaxNLocator Finds up to a max number of ticks at nice locations # AutoLocator (Default.) MaxNLocator with simple defaults. # AutoMinorLocator Locator for minor ticks # # Formatter Class Description # NullFormatter No labels on the ticks # IndexFormatter Set the strings from a list of labels # FixedFormatter Set the strings manually for the labels # FuncFormatter User-defined function sets the labels # FormatStrFormatter Use a format string for each value # ScalarFormatter (Default.) Formatter for scalar values # LogFormatter Default formatter for log axes # ```
matplotlib/customizing_ticks.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:fastai] * # language: python # name: conda-env-fastai-py # --- # # "Push large files to github: git-lfs" # > "to answer to exceeding GitHub's file size limit of 100.00 MB" # - show_tags: true # - toc: true # - branch: master # - badges: false # - comments: true # - categories: [git] # - image: images/git.png # # Where is the problem # I am currently following deep learning specialization from <NAME> on coursera. # # In the course 4 about CNNs, there are some pre-trained yolo models that we use to do object detection. And these models come as large `.h5` files. # Because I run all programming assignments locally and keep everything (lectures + codes) on my local repo, when I pushed to github I got this error: # # ``` bash # (base) explore@explore-ThinkPad-P53:~/git/guillaume/deeplearning_specialization$ git push # Enumerating objects: 247, done. # Counting objects: 100% (247/247), done. # Delta compression using up to 12 threads # Compressing objects: 100% (239/239), done. # Writing objects: 100% (242/242), 707.06 MiB | 4.74 MiB/s, done. # Total 242 (delta 6), reused 0 (delta 0) # remote: Resolving deltas: 100% (6/6), completed with 3 local objects. # remote: warning: File notebooks/C4W3/nb_images/pred_video.mp4 is 85.44 MB; this is larger than GitHub's recommended maximum file size of 50.00 MB # remote: warning: File notebooks/C4W3/nb_images/road_video.mp4 is 81.71 MB; this is larger than GitHub's recommended maximum file size of 50.00 MB # remote: error: GH001: Large files detected. You may want to try Git Large File Storage - https://git-lfs.github.com. # remote: error: Trace: 2d1944991c30279b831124b51e4aac57a17a860f2ef789b4e32801fb65282244 # remote: error: See http://git.io/iEPt8g for more information. # remote: error: File notebooks/C4W2/ResNet50.h5 is 270.32 MB; this exceeds GitHub's file size limit of 100.00 MB # remote: error: File notebooks/C4W3/model_data/yolo.h5 is 194.69 MB; this exceeds GitHub's file size limit of 100.00 MB # To github.com:castorfou/deeplearning_specialization.git # ! [remote rejected] master -> master (pre-receive hook declined) # ``` # # Solution: git-lfs # As explained in https://github.com/git-lfs/git-lfs/wiki/Tutorial, there is (always) a way to do it properly. # First it is a matter of installing git-lfs: # # `sudo apt-get install git-lfs` # # Then to setup git lfs # # `git lfs install` # # And then to "migrate" big files to lfs: # # `git lfs migrate import --include="*.mp4"` # # `git lfs migrate import --include="*.h5"` # # And now to `git push` # ``` bash # (base) explore@explore-ThinkPad-P53:~/git/guillaume/deeplearning_specialization$ git push # Uploading LFS objects: 100% (25/25), 954 MB | 37 MB/s, done. # Enumerating objects: 311, done. # Counting objects: 100% (311/311), done. # Delta compression using up to 12 threads # Compressing objects: 100% (273/273), done. # Writing objects: 100% (276/276), 60.49 MiB | 5.59 MiB/s, done. # Total 276 (delta 18), reused 0 (delta 0) # remote: Resolving deltas: 100% (18/18), completed with 16 local objects. # To github.com:castorfou/deeplearning_specialization.git # d0d2dc2..004fa09 master -> master # ```
_notebooks/2020-12-02-push-big-files-to-github.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import cv2 import glob import pickle #prepare object points (0,0,0),(1,0,0)...until (8,5,0) objp = np.zeros((9*6, 3), np.float32) objp[:,:2] = np.mgrid[0:9, 0:6].T.reshape(-1,2) #store object points and image points in arrays objpoints = [] imgpoints = [] #get all the calibration images images = glob.glob('./camera_cal/calibration*.jpg') imageCount = len(images) print("Calibration image count:", imageCount) # + for idx, fname in enumerate (images): img = cv2.imread(fname) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) ret, corners = cv2.findChessboardCorners(gray, (9,6),None) if ret == True: print('working on', fname) objpoints.append(objp) imgpoints.append(corners) cv2.drawChessboardCorners(img, (9,6), corners, ret) write_name = './output_images/corners_found'+str(idx)+'.jpg' cv2.imwrite(write_name, img) # + img = cv2.imread('./camera_cal/calibration1.jpg') img_size = (img.shape[1], img.shape[0]) ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size, None, None) dist_pickle = {} dist_pickle["mtx"] = mtx dist_pickle["dist"] = dist pickle.dump(dist_pickle, open("./calibration_pickle.p", "wb")) # + #load in the camera calibration results dist_pickle = pickle.load(open("./calibration_pickle.p", "rb")) mtx = dist_pickle["mtx"] dist = dist_pickle["dist"] print("camera calibration values loaded") # + #function for producing the binary pixel of interest images for the LaneTracker Algorithm def abs_sobel_thresh(img, orient='x',sobel_kernel=3, thresh=(0,255)): # Convert to grayscale gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) # Apply x or y gradient with the OpenCV Sobel() function # and take the absolute value if orient == 'x': abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0)) if orient == 'y': abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1)) # Rescale back to 8 bit integer scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel)) # Create a copy and apply the threshold binary_output = np.zeros_like(scaled_sobel) # Here I'm using inclusive (>=, <=) thresholds, but exclusive is ok too binary_output[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1 # Return the result return binary_output def mag_thresh(img, sobel_kernel=3, mag_thresh=(0, 255)): # Convert to grayscale gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) # Take both Sobel x and y gradients sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel) sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel) # Calculate the gradient magnitude gradmag = np.sqrt(sobelx**2 + sobely**2) # Rescale to 8 bit scale_factor = np.max(gradmag)/255 gradmag = (gradmag/scale_factor).astype(np.uint8) # Create a binary image of ones where threshold is met, zeros otherwise binary_output = np.zeros_like(gradmag) binary_output[(gradmag >= mag_thresh[0]) & (gradmag <= mag_thresh[1])] = 1 # Return the binary image return binary_output def dir_threshold(img, sobel_kernel=3, thresh=(0, np.pi/2)): # Grayscale gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) # Calculate the x and y gradients sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel) sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel) # Take the absolute value of the gradient direction, # apply a threshold, and create a binary image result absgraddir = np.arctan2(np.absolute(sobely), np.absolute(sobelx)) binary_output = np.zeros_like(absgraddir) binary_output[(absgraddir >= thresh[0]) & (absgraddir <= thresh[1])] = 1 # Return the binary image return binary_output def color_threshold(img, s_thresh=(0,255), v_thresh=(0, 255)): hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS) s_channel = hls[:,:,2] s_binary = np.zeros_like(s_channel) s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1 hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV) v_channel = hsv[:,:,2] v_binary = np.zeros_like(v_channel) v_binary[(v_channel >= v_thresh[0]) & (v_channel <= v_thresh[1])] = 1 output = np.zeros_like(s_channel) output[(s_binary == 1) & (v_binary ==1)] = 1 return output def hls_select(img, thresh=(0, 255)): hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS) s_channel = hls[:,:,2] binary_output = np.zeros_like(s_channel) binary_output[(s_channel > thresh[0]) & (s_channel <= thresh[1])] = 1 return binary_output # + #apply the undistort function on test images images = glob.glob('./test_images/test*.jpg') for idx, fname in enumerate(images): img = cv2.imread(fname) img = cv2.undistort(img, mtx, dist, None, mtx) preprocessImage = np.zeros_like(img[:,:,0]) gradx = abs_sobel_thresh(img, orient='x',thresh=(12,255)) grady = abs_sobel_thresh(img, orient='y',thresh=(25,255)) c_binary = color_threshold(img, s_thresh=(100,255) ,v_thresh=(50,255)) preprocessImage[((gradx == 1) & (grady==1) | (c_binary ==1))] = 255 img_size = (img.shape[1], img.shape[0]) print(img.shape[1],img.shape[0]) bot_width = 0.76 mid_width = 0.08 height_pct = 0.62 bottom_trim = 0.935 src = np.float32([[img.shape[1]*(0.5-mid_width/2), img.shape[0]*height_pct], [img.shape[1]*(0.5+mid_width/2), img.shape[0]*height_pct], [img.shape[1]*(0.5+bot_width/2), img.shape[0]*bottom_trim],[img.shape[1]*(0.5-bot_width/2), img.shape[0]*bottom_trim]]) offset = img_size[0]* .25 dst = np.float32([[offset, 0], [img_size[0]-offset, 0], [img_size[0]-offset, img_size[1]], [offset, img_size[1]]]) M = cv2.getPerspectiveTransform(src, dst) Minv = cv2.getPerspectiveTransform(dst, src) warped = cv2.warpPerspective(preprocessImage, M, img_size, flags=cv2.INTER_LINEAR) result = warped write_name = './test_images/tracked'+str(idx)+'.jpg' cv2.imwrite(write_name, result) print('input: ', fname, "output:", write_name) # -
Project.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] nbgrader={} # # Project Euler: Problem 4 # + [markdown] nbgrader={} # https://projecteuler.net/problem=4 # # A palindromic number reads the same both ways. The largest palindrome made from the product of two 2-digit numbers is 9009 = 91 × 99. # # Find the largest palindrome made from the product of two 3-digit numbers. # + deletable=false nbgrader={"checksum": "6cff4e8e53b15273846c3aecaea84a3d", "solution": true} list_of_palindromes=[] a = list(range(100,1000)) # list of all 3 digit numbers b = list(range(100,1000)) all_possible_multiples=[] for x in a: for y in b: c=x*y all_possible_multiples.append(c) for z in all_possible_multiples: if str(z)==str(z)[::-1]: # I got this line " [::-1]" from http://stackoverflow.com/questions/19024371/generalized-project-euler-4. # because I did not know how to reverse the strings list_of_palindromes.append(z) print (list_of_palindromes) # Checked to see if it worked, it did! answer = max(list_of_palindromes) print(answer) #906609 is my answer! # - # + deletable=false nbgrader={"checksum": "aff23ddd30b5712efa3907cd1608bcf5", "grade": true, "grade_id": "projecteuler4", "points": 10} # This cell will be used for grading, leave it at the end of the notebook.
assignments/assignment02/ProjectEuler4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.6 - AzureML # language: python # name: python3-azureml # --- from azureml.core import Workspace, Datastore from azureml.pipeline.core import Pipeline, PublishedPipeline from azureml.core.experiment import Experiment from azureml.pipeline.core.schedule import ScheduleRecurrence, Schedule experiment = Experiment(ws,'AutoML-Retraining-Code-Trigger') published_pipeline = PublishedPipeline.get(workspace=ws, id=’your-published-pipeline-id') pipeline_run = experiment.submit(published_pipeline) recurrence = ScheduleRecurrence(frequency="Day", interval=1) schedule = Schedule.create(ws, name="IrisTrainingSchedule", description="AutoML Training", pipeline_id='your-pipeline-id’, experiment_name='Iris-Retraining', recurrence=recurrence)
Chapter09/ML-Pipeline-Scheduling.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Assignment of peer feedback # # This notebook contains the code used to determine which two other teams each team must give feed back to in the data project # + # #!pip install PyGithub # - from github import Github import sys import datetime import pandas as pd import numpy as np import random from collections import Counter # Access github through access token. This one has been destroyed be Github for safety reasons. gh = Github('<KEY>') org = gh.get_organization('NumEconCopenhagen') all_repos = org.get_repos() active_repos = {} date0 = datetime.datetime(2021, 3, 20) handin_date = datetime.datetime(2021, 5, 13) disregard_repos = ['Projects_workfolder', 'Suggested-Solutions-2021', 'The-Danish-German-Alliance', 'cahm-old-', 'Group-Sara'] # Function to test whether activity has been recorded since active date in specific project def check_activity(repo, project, active_repos, start_date, handin_date): try: commits = repo.get_commits(since = start_date, until = handin_date) for commit in commits: for cf in commit.files: if project in cf.filename: active_repos[repo.name] = repo return active_repos except: pass return active_repos # Add active repos to dictionary for i,repo in enumerate(all_repos): if repo.name in disregard_repos: continue print('processing repo', str(i), 'out of', str(all_repos.totalCount), end = '\r') # Run function to test presence an active data project active_repos = check_activity(repo, 'dataproject', active_repos, date0, handin_date) # Get team names and write out all active repos teams = [] for k in active_repos.keys(): print(k) teams.append(k.replace('projects-2021-', '')) teams # + tags=[] # Create data frame for storing assignment random.seed(1) teams_df = pd.DataFrame(data = teams, columns = ['team']) irrelevant = teams_df.team.isin(disregard_repos) teams_df = teams_df.loc[np.invert(irrelevant),:] teams_df.sort_values(by = 'team', inplace = True) teams_df.reset_index(drop=True, inplace=True) teams_df['peer_group_1'] = np.nan teams_df['peer_group_2'] = np.nan display(teams_df) # + # Algorithm for assigning peers to teams N = len(teams_df.team.values) counter = [0 for _ in range(N)] assigned = [] # For each team, loop over teams that have not already been assigned for i,_ in enumerate(teams_df.team): pop = [t for t in range(N) if (t != i) & (t not in assigned)] peers = random.sample(pop, 2) for c in [0,1]: teams_df.iloc[i, 1 + c] = teams_df.team[peers[c]] counter[peers[c]] += 1 if counter[peers[c]] == 2: assigned.append(peers[c]) display(teams_df) # - # Test that everyone are assigned to exactly 2 groups peergroups = teams_df.peer_group_1.values.tolist() + teams_df.peer_group_2.values.tolist() counts = Counter(peergroups) print('Everyone is assigned to exactly 2 groups?: ', set(counts.values()) == {2}) # Store the resulting list in excel file available on MS Teams teams_df.to_excel('Model Project - peer assignment.xlsx', sheet_name='Data project', index = False)
projects/PeerFeedbackAssignments.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.9.7 64-bit # name: python3 # --- # #Instalacion paquetes (para JupyterHub) # !apt install -qq enchant # !pip install pyenchant # !pip install sacrebleu # #Imports import requests import json import enchant import csv import re import time import itertools from sacrebleu import sentence_bleu import multiprocessing as mp import os import pandas as pd # #Funciones auxiliares # + def jsonToDict(route) -> dict: ''' Funcion auxiliar que dada la ruta de un json, lo abre y lo convierte a diccionario ''' with open(route, encoding="utf-8") as f: return json.load(f) def queryJSON(queryURL, json): ''' Funcion auxiliar que dado un JSON con una pregunta, realiza una consulta (con esta pregunta) a una URL ''' question = json['question'] files = { 'question': (None, question), } ''' En caso de que quisiesemos la respuesta verbalizada o larga, hacer la request con params = payload: payload = { ('text', 'true') } ''' response = requests.get(queryURL, files = files) #Obtenemos la respuesta como JSonObject y la devolvemos return response.json() def exactMatchScore(string1,string2): ''' Funcion auxiliar que incorpora la medida EM (exact match) ''' matches = 0 total = 0 for (x,y) in itertools.zip_longest(string1,string2): if(x == y): matches+=1 total+=1 return matches/total def writeResults(csvRoute, rows, counter, question, modelAnswerLong, obtainedAnswer, queryTime, textLen): ''' Funcion auxiliar que extrae la respuesta que se espera, hace la distancia de levenshtein y añade a la lista de filas: -Pregunta -Respuesta modelo y nuestra respuesta -Distancia de levenshtein entre ambas respuestas -Tiempo que ha tardado en ejecutarse la consulta -Longitud del texto del que se ha obtenido nuestra respuesta -Si la pregunta dada tiene respuesta modelo o no ''' #La respuesta esperada se obtiene con una expresion regular (sacar texto entre corchetes) modelAnswerLongGroups = re.search(r"\[([^\)]+)\]", modelAnswerLong) if(modelAnswerLongGroups is not None): modelAnswer = modelAnswerLongGroups.group(1) isAnswered = "YES" if modelAnswer == "answer": isAnswered = "NO" distance = "None" if obtainedAnswer is not None: distance = enchant.utils.levenshtein(modelAnswer,obtainedAnswer) reference = modelAnswer.split() candidate = obtainedAnswer.split() rows.append( [question, modelAnswer, obtainedAnswer, distance, sentence_bleu(obtainedAnswer,[modelAnswer]).score, exactMatchScore(reference,candidate), queryTime, textLen, isAnswered] ) counter.value += 1 #print("Contador: ", counter.value) #Escribimos cuando el valor del contador llegue a 24 if(counter.value != 0 and counter.value % 24 == 0): #print("Escribiendo. Contador: ", counter.value) with open(csvRoute, 'a', newline='', encoding="utf-8") as f: (pd.DataFrame.from_records(rows, columns=header)).to_csv(f, header=False, index=False, sep=';', quoting=csv.QUOTE_ALL) rows[:] = [] f.close() def evaluateQuestion(csvRoute, i, rows, counter, queryURL): ''' Funcion auxiliar para paralelizar la ejecucion de consultas y escritura en csv de resultados. Realiza la consulta (midiendo el tiempo que tarda) y llama a writeResults ''' #print("Process id: ", os.getpid()) #print("Question: ", i['question']) #Para medir el tiempo que se tarda en ejecutar la consulta queryStartTime = time.time() jsonResponse = queryJSON(queryURL,i) queryTime = round((time.time() - queryStartTime),2) #Pasamos las respuestas a minuscula y llamamos a extractAndCompare. writeResults(csvRoute, rows, counter, i['question'], i['verbalized_answer'].lower(),jsonResponse['answer'].lower(),queryTime,jsonResponse['textLen']) # - # #Funcion principal def EQAKGMetrics(pool, rows, counter, JSONroute, queryURL, csvRoute): ''' Funcion que dado un JSON con preguntas y respuestas (asumimos que las preguntas están en la clave 'question' del JSON, y las respuestas en 'verbalized_answers'), una url a través de la cual realizar consultas y un csv donde guardar los resultados, hace una serie de metricas: - Realiza las preguntas del JSON dado - Lo compara con la respuesta esperada y obtiene varias metricas de rendimiento (Distancia de Levenshtein, BLEU, EM,...) - Escribe en el CSV la pregunta, la respuesta esperada, la respuesta obtenida y estas metricas ''' VQuandaData = jsonToDict(JSONroute) #Escribimos el Header with open(csvRoute,'w', newline='', encoding="utf-8") as f: csvwriter = csv.writer(f,delimiter=';', quotechar='"', quoting=csv.QUOTE_ALL) global header csvwriter.writerow(header) f.close() for i in VQuandaData: #Paraleliza con metodos asincronos pool.apply_async(evaluateQuestion, (csvRoute,i,rows,counter,queryURL)) pool.close() pool.join() #Escribimos lo que quede with open(csvRoute, 'a', newline='', encoding="utf-8") as f: (pd.DataFrame.from_records(rows, columns=header)).to_csv(f,header=False, index=False, sep=';', quoting=csv.QUOTE_ALL) f.close() # #Ejecucion script # + #Creamos el array donde guardaremos las columnas y el contador como variables globales para que sean accesibles por los multiprocesos rows = None counter = None header = ["Question", "Answer", "Response", "Levenshtein Distance","BLEU Score","EM Score","Query Time","Text Length","Is Answered"] if __name__ == '__main__': with mp.Manager() as manager: rows = manager.list([]) counter = manager.Value('i', 0) pool = mp.Pool(processes=6, initargs = (counter,rows,)) queryUrl = "http://localhost:5000/eqakg/dbpedia/en?text=false" #queryUrl = "https://librairy.linkeddata.es/eqakg/dbpedia/en?text=false" EQAKGMetrics(pool,rows,counter,"test.json",queryUrl,"results/VQuanda.csv")
test/datasets/VQuAnDa/EQAKGMetrics.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + jupyter={"outputs_hidden": false} # %matplotlib inline # - # # # Balance model complexity and cross-validated score # # # This example balances model complexity and cross-validated score by # finding a decent accuracy within 1 standard deviation of the best accuracy # score while minimising the number of PCA components [1]. # # The figure shows the trade-off between cross-validated score and the number # of PCA components. The balanced case is when n_components=6 and accuracy=0.80, # which falls into the range within 1 standard deviation of the best accuracy # score. # # [1] <NAME>., <NAME>.,, <NAME>. (2001). Model Assessment and # Selection. The Elements of Statistical Learning (pp. 219-260). New York, # NY, USA: Springer New York Inc.. # # -- # # **NOTE:** This is sourced from ```scikit-learn``` learning module found here: # https://scikit-learn.org/stable/auto_examples/model_selection/plot_grid_search_refit_callable.html#sphx-glr-auto-examples-model-selection-plot-grid-search-refit-callable-py # # -- # # # # + # BBZ added - this is really for if you have a very simple flat script (no functions or blocks) # it suppresses all FutureWarnings # import warnings filter from warnings import simplefilter # ignore all future warnings simplefilter(action='ignore', category=FutureWarning) #--> BBZ added to suppress warnings: # run block of code and catch warnings #with warnings.catch_warnings(): # ignore all caught warnings #warnings.filterwarnings("ignore") # execute code that will generate warnings # + jupyter={"outputs_hidden": false} # Author: <NAME> <<EMAIL>> print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import load_digits from sklearn.decomposition import PCA from sklearn.model_selection import GridSearchCV from sklearn.pipeline import Pipeline from sklearn.svm import LinearSVC def lower_bound(cv_results): """ Calculate the lower bound within 1 standard deviation of the best `mean_test_scores`. Parameters ---------- cv_results : dict of numpy(masked) ndarrays See attribute cv_results_ of `GridSearchCV` Returns ------- float Lower bound within 1 standard deviation of the best `mean_test_score`. """ best_score_idx = np.argmax(cv_results['mean_test_score']) return (cv_results['mean_test_score'][best_score_idx] - cv_results['std_test_score'][best_score_idx]) def best_low_complexity(cv_results): """ Balance model complexity with cross-validated score. Parameters ---------- cv_results : dict of numpy(masked) ndarrays See attribute cv_results_ of `GridSearchCV`. Return ------ int Index of a model that has the fewest PCA components while has its test score within 1 standard deviation of the best `mean_test_score`. """ threshold = lower_bound(cv_results) candidate_idx = np.flatnonzero(cv_results['mean_test_score'] >= threshold) best_idx = candidate_idx[cv_results['param_reduce_dim__n_components'] [candidate_idx].argmin()] return best_idx pipe = Pipeline([ ('reduce_dim', PCA(random_state=42)), ('classify', LinearSVC(max_iter=1000, random_state=42)), ]) param_grid = { 'reduce_dim__n_components': [2, 4, 6, 8] } grid = GridSearchCV(pipe, cv=10, n_jobs=1, param_grid=param_grid, scoring='accuracy', refit=best_low_complexity) digits = load_digits() grid.fit(digits.data, digits.target) n_components = grid.cv_results_['param_reduce_dim__n_components'] test_scores = grid.cv_results_['mean_test_score'] plt.figure() plt.bar(n_components, test_scores, width=1.3, color='b') lower = lower_bound(grid.cv_results_) plt.axhline(np.max(test_scores), linestyle='--', color='y', label='Best score') plt.axhline(lower, linestyle='--', color='.5', label='Best score - 1 std') plt.title("Balance model complexity and cross-validated score") plt.xlabel('Number of PCA components used') plt.ylabel('Digit classification accuracy') plt.xticks(n_components.tolist()) plt.ylim((0, 1.0)) plt.legend(loc='upper left') best_index_ = grid.best_index_ print("The best_index_ is %d" % best_index_) print("The n_components selected is %d" % n_components[best_index_]) print("The corresponding accuracy score is %.2f" % grid.cv_results_['mean_test_score'][best_index_]) plt.show() # -
further_resources_learn/nn_gridsearchcv/nn_gridsearch__balmodcompl_cvscore__doc.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/Blakester609/final-project-basketball-IS-115/blob/main/final_project_IS_115_basketball.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="6xxdvJ9H_vHN" colab={"base_uri": "https://localhost:8080/"} outputId="9b5aca03-a6a6-4d04-cb41-1c8517cd965b" # !pip install cookiecutter # + colab={"base_uri": "https://localhost:8080/"} id="FrXSM-JGDIDe" outputId="be0148a0-a6ba-4442-999e-814fc4041080" from google.colab import drive drive.mount('/content/drive') # + id="N-IuJdY__4zJ" # + id="8773U7wc_0Xu" colab={"base_uri": "https://localhost:8080/"} outputId="318861ba-ae58-4376-bd33-2c4269c3ba42" # !cookiecutter -c v1 https://github.com/drivendata/cookiecutter-data-science # + id="0QMQj6CfAQHI" colab={"base_uri": "https://localhost:8080/"} outputId="5b75b62d-0eeb-41c9-9574-b56e77424893" # !ls # + id="n_l5pFH_Ag56" # !cd final_project/ # + id="8tHMpZCUAkHK"
final_project_IS_115_basketball.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.5 # language: python # name: python3 # --- # + [markdown] id="_th7e2nu0At7" colab_type="text" # # SIT742: Modern Data Science # **(Assessment Task 02: Bank Marketing Data Analytics)** # # --- # - Materials in this module include resources collected from various open-source online repositories. # - You are free to use, change and distribute this package. # # Prepared by **SIT742 Teaching Team** # # # --- # # **Project Group Information:** # # - Names: # - Student IDs: # - Emails: # # --- # + [markdown] colab_type="text" id="eASNvREtBU9G" # # 1.Import Spark # + colab_type="code" id="duCJfMrnGu00" cellView="code" colab={} # !pip install wget # !apt-get install openjdk-8-jdk-headless -qq > /dev/null # !wget -q https://archive.apache.org/dist/spark/spark-2.4.0/spark-2.4.0-bin-hadoop2.7.tgz # !tar xf spark-2.4.0-bin-hadoop2.7.tgz # !pip install -q findspark import os os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64" os.environ["SPARK_HOME"] = "/content/spark-2.4.0-bin-hadoop2.7" # + colab_type="code" id="aGOo805LA-fM" colab={} import findspark findspark.init() from pyspark.sql import SparkSession # + [markdown] colab_type="text" id="K7bEtO_fBZmE" # # 2.Read and check data # + id="EADPspOv0Auh" colab_type="code" colab={} import wget link_to_data = 'https://github.com/tulip-lab/sit742/raw/master/Assessment/2019/data/bank.csv' DataSet = wget.download(link_to_data) # + id="tiM-PiiR0Aup" colab_type="code" colab={} # !ls # + colab_type="code" id="FQ8Ts9eZBA-M" colab={} # Import the 'bank.csv' as a Spark dataframe and name it as df spark = SparkSession.builder.appName('ml-bank').getOrCreate() df = spark.read.csv('bank.csv', header = True, inferSchema = True) df.printSchema() df.show(5) # + colab_type="code" id="XJGWVc0yB0UA" colab={} # Check data distribution # You may use printSchema() # + [markdown] colab_type="text" id="wKQMhrtFChHa" # # 3.Select features # + colab_type="code" id="VU5xMqN_RyM2" colab={} #Select features ('age', 'job', 'marital', 'education', 'default', 'balance', 'housing', 'loan', 'campaign', 'pdays', 'previous', 'poutcome', 'deposit') as df2 df2= # + colab_type="code" id="vwF5sqRYa_eI" colab={} #Remove invalid rows/records using spark.sql # + colab_type="code" id="A_T8qvxzR-oI" colab={} #Covert categorical features to metric features using One hot encoding # + [markdown] colab_type="text" id="pOqHERYJCQuC" # ## 3.1 normalisation # + colab_type="code" id="cfL6_sca5VwI" colab={} #Apply Min-Max normalisation on each attribute using MinMaxScaler from pyspark.ml.feature import MinMaxScaler # + [markdown] colab_type="text" id="9trUY7ZgCzhW" # # 4.Unsupervised learning # + [markdown] colab_type="text" id="KTQfUch2Cmmi" # ## 4.1 K-means # + colab_type="code" id="dGGZI70Ohqgg" colab={} # Perform unsupervised learning on df2 with k-means # You can use whole df2 as both training and testing data, # Evaluate the clustering result using Accuracy. from pyspark.ml.clustering import KMeans from pyspark.ml.evaluation import ClusteringEvaluator from pyspark.ml.evaluation import MulticlassClassificationEvaluator from pyspark.ml.evaluation import BinaryClassificationEvaluator # + [markdown] colab_type="text" id="FHom8o2KCt36" # ## 4.2 PCA # + colab_type="code" id="wT4cx5uGTjmj" colab={} #Generate a scatter plot using the first two PCA components to investigate the data distribution. from pyspark.ml.feature import PCA from pyspark.ml.linalg import Vectors # + [markdown] colab_type="text" id="ulp_uILXCv4Z" # # 5.Supervised learning # + colab_type="code" id="0O7tszcPfnHN" colab={} train, test = df2.randomSplit([0.7, 0.3], seed = 742) print("Training Dataset Count: " + str(train.count())) print("Test Dataset Count: " + str(test.count())) # + [markdown] colab_type="text" id="2SsHdh7YC-eN" # ## 5.1 LogisticRegression # + colab_type="code" id="Vqo_ywFQYxSj" colab={} # Logistic Regression from pyspark.ml.classification import LogisticRegression from pyspark.ml.evaluation import MulticlassClassificationEvaluator from pyspark.ml.evaluation import BinaryClassificationEvaluator # + colab_type="code" id="hANwFUzhgG83" colab={} #Exam the coefficients # + [markdown] colab_type="text" id="evM5eiJoDHw2" # ## 5.2 Decision tree # + colab_type="code" id="He4mlHb7hBoY" colab={} #Decision tree from pyspark.ml.classification import DecisionTreeClassifier # + [markdown] colab_type="text" id="CaE-Z_IlDKXF" # ## 5.3 NaiveBayes # + colab_type="code" id="v2XL6I0t7irt" colab={} #NaiveBayes from pyspark.ml.classification import NaiveBayes from pyspark.ml.evaluation import MulticlassClassificationEvaluator
Assessment/2019/SIT742Task2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import diff_classifier.aws as aws import numpy as np import matplotlib.pyplot as plt import matplotlib import numpy.ma as ma # %matplotlib inline def plot_ft_distribution(prefix, bins, labels, feature, folder="01_18_Experiment", show=True): def cumulative(ls): test = [0]*len(ls) test[0] = np.round(ls[0], 4) for i in range(1, len(ls), 1): test[i] = np.round(test[i-1] + ls[i], 4) return test def rearrange(*args): counter = 0 for ls in args: counter = counter + 1 old_size = len(ls) new_size = counter new_lists = np.zeros((old_size, new_size)) ncounter = 0 for ls in args: for i in range(0, old_size): new_lists[i, ncounter] = ls[i] ncounter = ncounter + 1 return new_lists def generate_legend(bins, ends=True): nbins = len(bins) lbins = [0]*(nbins-1) for i in range(0, nbins-1, 1): if i==0 and ends: lbins[i] = '< {}'.format(bins[i+1]) elif i==nbins-2 and ends: lbins[nbins-2] = '> {}'.format(bins[i]) else: lbins[i] = '{} -- {}'.format(bins[i], bins[i+1]) return lbins vids = 15 to_analyze = [] puppies = [] hist = [0]*vids for vid in range(0, vids): pref = "{}_{}".format(prefix, "%04d" % vid) pup = prefix.split('_')[0] puppies.append(pup) fname = 'features_{}.csv'.format(pref) to_analyze.append(fname) aws.download_s3('{}/{}/{}'.format(folder, pup, fname), fname) features = pd.read_csv(to_analyze[vid]) bins = bins mfeatures = ma.masked_invalid(features[feature]) mmask = mfeatures.mask mfeatures = features[feature][~mmask] hist[vid] = np.histogram(mfeatures, bins=bins)[0] cortex = [sum(x) for x in zip(hist[0], hist[1], hist[2], hist[3], hist[4])] cortex = cumulative(cortex/sum(cortex)) hipp = [sum(x) for x in zip(hist[5], hist[6], hist[7], hist[8], hist[9])] hipp = cumulative(hipp/sum(hipp)) thal = [sum(x) for x in zip(hist[10], hist[11], hist[12], hist[13], hist[14])] thal = cumulative(thal/sum(thal)) data = rearrange(cortex, hipp, thal) plt.figure(figsize=(6, 6)) N = 3 width = 0.35 ind = np.arange(N) cmap = matplotlib.cm.get_cmap('viridis') ssize = data.shape[0] legend = list(reversed(generate_legend(bins))) ax = {} axes = [0]*ssize for num in range(0, ssize, 1): rgba = cmap((num)*(1.01/(ssize))) ax[num] = plt.bar(ind, data[ssize-num-1, :], width, color=rgba) axes[num] = ax[num] plt.title('{} breakdown'.format(feature)) plt.xticks(ind, ('cortex', 'hippocampus', 'thalamus')) plt.legend(axes, legend, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) #plt.subplots_adjust(left=0, bottom=0, right=3, top=1, wspace=0, hspace=0) if show: plt.show() return data features = pd.read_csv('features_P3_S1_R_0000.csv') feature = 'boundedness' to_graph = [features[feature].tolist()] pos = [1] len(to_graph) plt.violinplot(to_graph, pos, points=20, widths=0.3, showmeans=True, showextrema=False) plt. # + #cortex cortexnames = [] puppies = [] cortex = [] folder="01_18_Experiment" for vid in range(0, 5): pref = "{}_{}".format(prefix, "%04d" % vid) pup = prefix.split('_')[0] puppies.append(pup) fname = 'features_{}.csv'.format(pref) cortexnames.append(fname) aws.download_s3('{}/{}/{}'.format(folder, pup, fname), fname, bucket_name='ccurtis.data') features = pd.read_csv(cortexnames[vid]) cortex.extend(features[feature].tolist()) #hippocampus hippnames = [] puppies = [] hipp = [] add = 5 for vid in range(0, 5): pref = "{}_{}".format(prefix, "%04d" % int(vid+add)) pup = prefix.split('_')[0] puppies.append(pup) fname = 'features_{}.csv'.format(pref) hippnames.append(fname) aws.download_s3('{}/{}/{}'.format(folder, pup, fname), fname, bucket_name='ccurtis.data') features = pd.read_csv(hippnames[vid]) hipp.extend(features[feature].tolist()) #thalamus thalnames = [] puppies = [] thal = [] add = 10 for vid in range(0, 5): pref = "{}_{}".format(prefix, "%04d" % int(vid+add)) pup = prefix.split('_')[0] puppies.append(pup) fname = 'features_{}.csv'.format(pref) thalnames.append(fname) aws.download_s3('{}/{}/{}'.format(folder, pup, fname), fname, bucket_name='ccurtis.data') features = pd.read_csv(thalnames[vid]) thal.extend(features[feature].tolist()) # - def set_axis_style(ax, labels): ax.get_xaxis().set_tick_params(direction='out') ax.xaxis.set_ticks_position('bottom') ax.set_xticks(np.arange(1, len(labels) + 1)) ax.set_xticklabels(labels) ax.set_xlim(0.25, len(labels) + 0.75) # + fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(6, 6)) to_graph = [cortex, hipp, thal] pos = [1, 2, 3] axes.violinplot(to_graph, pos, points=20, widths=0.9, showmeans=True, showextrema=False) labels = ['cortex', 'hipp', 'thal'] set_axis_style(axes, labels) axes.tick_params(axis = 'both', which = 'major', labelsize = 16) # - def feature_violin(prefix, feature='boundedness', folder="01_18_Experiment", videos=5, labels=['cortex', 'hipp', 'thal'], points=40, ylim=[0, 1], majorticks = np.linspace(0, 1, 11)): #cortex cortexnames = [] puppies = [] cortex = [] for vid in range(0, videos): pref = "{}_{}".format(prefix, "%04d" % vid) pup = prefix.split('_')[0] puppies.append(pup) fname = 'features_{}.csv'.format(pref) cortexnames.append(fname) aws.download_s3('{}/{}/{}'.format(folder, pup, fname), fname, bucket_name='ccurtis.data') features = pd.read_csv(cortexnames[vid]) cortex.extend(features[feature].dropna().tolist()) #hippocampus hippnames = [] puppies = [] hipp = [] add = videos for vid in range(0, videos): pref = "{}_{}".format(prefix, "%04d" % int(vid+add)) pup = prefix.split('_')[0] puppies.append(pup) fname = 'features_{}.csv'.format(pref) hippnames.append(fname) aws.download_s3('{}/{}/{}'.format(folder, pup, fname), fname, bucket_name='ccurtis.data') features = pd.read_csv(hippnames[vid]) hipp.extend(features[feature].dropna().tolist()) #thalamus thalnames = [] puppies = [] thal = [] add = 2*videos for vid in range(0, videos): pref = "{}_{}".format(prefix, "%04d" % int(vid+add)) pup = prefix.split('_')[0] puppies.append(pup) fname = 'features_{}.csv'.format(pref) thalnames.append(fname) aws.download_s3('{}/{}/{}'.format(folder, pup, fname), fname, bucket_name='ccurtis.data') features = pd.read_csv(thalnames[vid]) thal.extend(features[feature].dropna().tolist()) def set_axis_style(ax, labels): ax.get_xaxis().set_tick_params(direction='out') ax.xaxis.set_ticks_position('bottom') ax.set_xticks(np.arange(1, len(labels) + 1)) ax.set_xticklabels(labels) ax.set_xlim(0.25, len(labels) + 0.75) fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(6, 6)) to_graph = [cortex, hipp, thal] pos = [1, 2, 3] axes.violinplot(to_graph, pos, points=points, widths=0.9, showmeans=True, showextrema=False) set_axis_style(axes, labels) axes.tick_params(axis = 'both', which = 'major', labelsize = 16) axes.set_ylim(ylim) axes.set_yticks(majorticks) plt.show() features.head() #COOH feature='frames' prefix = 'P3_S1_R' ylim = [0, 600] majorticks = np.linspace(0, 600, 11) points = 1000 feature_violin(prefix, feature=feature, points=points, ylim=ylim, majorticks=majorticks) #PEG prefix = 'P3_S1_L' feature_violin(prefix, feature=feature, points = points, ylim=ylim, majorticks=majorticks)
notebooks/development/05_16_18_quant_verification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/helenpiepie/helenpiepie.github.io/blob/master/Digital_Music_Recommendation.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="QVeI1j4ccCRj" colab_type="text" # # Collaborative Filtering Recommendation: Amazon Digital Music # + [markdown] id="aoqKxhN4b_Ac" colab_type="text" # ## Ingest # + id="gORP0Aw_g6k5" colab_type="code" outputId="b2f836f8-ec91-4d2a-dbf7-dd4aa3b1eaf8" colab={"base_uri": "https://localhost:8080/", "height": 51} # !pip install -q scikit-surprise # + id="aKkyy1_QGn7a" colab_type="code" colab={} import pandas as pd import matplotlib.pyplot as plt from matplotlib.pyplot import show import seaborn as sns import io from surprise import SVD, KNNBasic from surprise import Dataset from surprise import evaluate, Reader from surprise.model_selection import cross_validate # + [markdown] id="ROMcKn9gFWAT" colab_type="text" # Data Source: http://jmcauley.ucsd.edu/data/amazon/links.html # + id="tmm7479hcIyr" colab_type="code" colab={} df = pd.read_csv("https://raw.githubusercontent.com/helenpiepie/practice1/master/amazon_product.csv?token=<PASSWORD>%3D%3D") # + id="6m63YrCMdUXW" colab_type="code" outputId="9d1cb89c-c967-45cb-d5fc-691e81aab245" colab={"base_uri": "https://localhost:8080/", "height": 204} df.head() # + id="gSLoMaLUdajF" colab_type="code" outputId="7df699aa-d355-478e-f9f7-7d4755ea56fd" colab={"base_uri": "https://localhost:8080/", "height": 204} df.drop(["unixReviewTime", "Unnamed: 0"], axis=1, inplace = True) df.head() # + [markdown] id="2ic3cOjbdpVI" colab_type="text" # ## EDA # + id="p8IT1lQ7drpU" colab_type="code" outputId="0314cf30-b507-4fad-8a9f-106a13b9bc0d" colab={"base_uri": "https://localhost:8080/", "height": 153} df.info() # + [markdown] id="5D-d44Es-hST" colab_type="text" # ### Rating Distribution # + id="s_qJ5uO9gCXd" colab_type="code" outputId="bac4d8a3-758f-4529-e20d-21c0f4c8aecc" colab={"base_uri": "https://localhost:8080/", "height": 415} sns.set(style="darkgrid") total = float(len(df)) # one person per row ax = sns.countplot(x="rating", data=df) for p in ax.patches: height = p.get_height() ax.text(p.get_x()+p.get_width()/2., height + 3, '{:1.2f}'.format(height/total), ha="center") show() # + [markdown] id="QDT_YR1--pTh" colab_type="text" # ### Rating by Product # + id="PapaNSZjinRl" colab_type="code" outputId="31dd9cac-8cae-4585-9ed6-9921f98a31c7" colab={"base_uri": "https://localhost:8080/", "height": 221} df[['productID', 'rating']].groupby(["productID"])['rating'].size().nlargest(10) # + id="HPXcNvl2cX9G" colab_type="code" outputId="f780d60d-33c7-40a4-d5d3-3cb598185008" colab={"base_uri": "https://localhost:8080/", "height": 874} product_df = df[['productID', 'rating']].groupby(["productID"])['rating'].size().clip(upper = 300).reset_index(name="counts") plt.hist(product_df["counts"], bins = 100) # + [markdown] id="R-Aillv5-52s" colab_type="text" # ### Rating by User # + id="c1PWjzRHinWZ" colab_type="code" outputId="ad4c5e6f-c001-41a0-b5e2-f43cd6c3a0c8" colab={"base_uri": "https://localhost:8080/", "height": 221} df[['reviewID', 'rating']].groupby(["reviewID"])['rating'].size().nlargest(10) # + id="Ts3TkwA0g6iU" colab_type="code" outputId="cb0bb529-7ff3-4f80-cb58-6c5cde112699" colab={"base_uri": "https://localhost:8080/", "height": 483} review_df = df[['reviewID', 'rating']].groupby(["reviewID"])['rating'].size().clip(upper = 15).reset_index(name="counts") plt.hist(review_df["counts"], bins = 15) # + [markdown] id="JWJ5JE6thOdT" colab_type="text" # ## Modeling # + [markdown] id="loKqp4yT_O7a" colab_type="text" # ### Load Dataset to Surprise # + id="vZlv8BGGmqrE" colab_type="code" colab={} reader = Reader(rating_scale=(0, 5)) data = Dataset.load_from_df(df[['reviewID', 'productID', 'rating']], reader) # + id="3CjbOh-worIE" colab_type="code" outputId="2e7dc8c8-55c1-4c15-b55b-568b0cf52aa9" colab={"base_uri": "https://localhost:8080/", "height": 34} data # + id="Yh8g1dIClaem" colab_type="code" colab={} trainset = data.build_full_trainset() # + [markdown] id="eoCe2tbl_az-" colab_type="text" # ### Train SVD Model - Predict User's Rating of Product # + id="I2RDUXujg6ok" colab_type="code" outputId="5a81809d-43c3-4271-d6ce-d9d6b9eaad9b" colab={"base_uri": "https://localhost:8080/", "height": 768} algo = SVD() evaluate(algo, data, measures=['RMSE', 'MAE']) # + id="gNy1a9Oj37c_" colab_type="code" outputId="b8d8c295-b06d-44d5-8f3f-029e70e3d04b" colab={"base_uri": "https://localhost:8080/", "height": 88} algo.train(trainset) # + [markdown] id="0ry3IaEltE96" colab_type="text" # Afterwards to predict a rating we give the reviewID, productID and the actual rating as follows # + id="SH8PfuNhxGa_" colab_type="code" outputId="3274cd6b-caf0-4844-c96d-537f3d4d3fa6" colab={"base_uri": "https://localhost:8080/", "height": 54} # we can now query for specific predicions uid = "A13HBXGVUGUT2R" # raw user id pid = "5555991584" # raw product id # get a prediction for specific users and items. pred = algo.predict(uid, pid, r_ui=5, verbose=True) # + [markdown] id="B0KlJHnR5SJ5" colab_type="text" # ### Train KNN Basic Model - Recommend Similar Product # + id="9FvmceG4zw_p" colab_type="code" colab={} sim_options = {'name': 'cosine', 'user_based': False # compute similarities between items } sim_algo = KNNBasic(sim_options=sim_options) # + id="nmOXLmTGxGiU" colab_type="code" outputId="2b7ed85f-267c-4986-d669-ce0696d09119" colab={"base_uri": "https://localhost:8080/", "height": 68} sim_algo.fit(trainset) # + [markdown] id="qsH2iTncAG18" colab_type="text" # Recommend 10 similar products with product "5555991584" # + id="oEwFVHzyg6nY" colab_type="code" outputId="62d1a219-3d29-43c3-d07d-1ba8030be14a" colab={"base_uri": "https://localhost:8080/", "height": 187} pid = "5555991584" inner_id = sim_algo.trainset.to_inner_iid(pid) product_neighbor = sim_algo.get_neighbors(inner_id, k=10) product_neighbor = (sim_algo.trainset.to_raw_iid(inner) for inner in product_neighbor) for product in product_neighbor: print(product) # + [markdown] id="sIfsIkbp5W5p" colab_type="text" # ## Conclusion # + [markdown] id="TNdzTG5nASkS" colab_type="text" # # # # # * The idea behind collaborative filtering is to recommend new items based on the similarity of users. Surprise is a handy and powerful Python library for simple recommendation systems to perform item-item collaborative filtering. Besides cosine similarity, we can also use Pearson or MSD as similarity measures. # * I would recommend using collaborative filtering to identify similar users and learn about their favorites. Then we can provide a recommendation list of new digital music to users. By tracking whether users accept the recommendation and listen to the music, we have more information of users’ likes and dislikes. Understanding users and making right recommendations are important to maintain customers. # #
Digital_Music_Recommendation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Fuzzy logic and control # ======================= # + import numpy as np import matplotlib.pyplot as plt import skfuzzy as fuzz # %matplotlib inline # Generate trapezoidal membership function on range [0, 1] x = np.arange(0, 5.05, 0.1) mfx = fuzz.trapmf(x, [2.0, 2.5, 3, 4.5]) print(' x = ', x) # Defuzzify this membership function five ways defuzz_centroid = fuzz.defuzz(x, mfx, 'centroid') # Same as skfuzzy.centroid defuzz_bisector = fuzz.defuzz(x, mfx, 'bisector') defuzz_mom = fuzz.defuzz(x, mfx, 'mom') defuzz_som = fuzz.defuzz(x, mfx, 'som') defuzz_lom = fuzz.defuzz(x, mfx, 'lom') # Collect info for vertical lines labels = ['centroid', 'bisector', 'mean of maximum', 'min of maximum', 'max of maximum'] xvals = [defuzz_centroid, defuzz_bisector, defuzz_mom, defuzz_som, defuzz_lom] colors = ['r', 'b', 'g', 'c', 'm'] ymax = [fuzz.interp_membership(x, mfx, i) for i in xvals] # Display and compare defuzzification results against membership function plt.figure(figsize=(8, 5)) plt.plot(x, mfx, 'k') for xv, y, label, color in zip(xvals, ymax, labels, colors): plt.vlines(xv, 0, y, label=label, color=color) plt.ylabel('Fuzzy membership') plt.xlabel('Universe variable (arb)') plt.ylim(-0.1, 1.1) plt.legend(loc=2) plt.show() # + # Generate universe variables # * Quality and service on subjective ranges [0, 10] # * Tip has a range of [0, 25] in units of percentage points x_qual = np.arange(0, 11, 1) x_serv = np.arange(0, 11, 1) x_tip = np.arange(0, 26, 1) # Generate fuzzy membership functions qual_hi = fuzz.trimf(x_qual, [0, 0, 5]) qual_md = fuzz.trimf(x_qual, [2, 5, 8]) qual_lo = fuzz.trimf(x_qual, [5, 10, 10]) serv_hi = fuzz.trimf(x_serv, [0, 0, 5]) serv_md = fuzz.trimf(x_serv, [0, 5, 10]) serv_lo = fuzz.trimf(x_serv, [5, 10, 10]) tip_hi = fuzz.trimf(x_tip, [0, 0, 13]) tip_md = fuzz.trimf(x_tip, [0, 13, 25]) tip_lo = fuzz.trimf(x_tip, [13, 25, 25]) # Visualize these universes and membership functions fig, (ax0, ax1, ax2) = plt.subplots(nrows=3, figsize=(8, 9)) ax0.plot(x_qual, qual_lo, 'b', linewidth=1.5, label='Bad') ax0.plot(x_qual, qual_md, 'g', linewidth=1.5, label='Decent') ax0.plot(x_qual, qual_hi, 'r', linewidth=1.5, label='Great') ax0.set_title('Food quality') ax0.legend() ax1.plot(x_serv, serv_lo, 'b', linewidth=1.5, label='Poor') ax1.plot(x_serv, serv_md, 'g', linewidth=1.5, label='Acceptable') ax1.plot(x_serv, serv_hi, 'r', linewidth=1.5, label='Amazing') ax1.set_title('Service quality') ax1.legend() ax2.plot(x_tip, tip_lo, 'b', linewidth=1.5, label='Low') ax2.plot(x_tip, tip_md, 'g', linewidth=1.5, label='Medium') ax2.plot(x_tip, tip_hi, 'r', linewidth=1.5, label='High') ax2.set_title('Tip amount') ax2.legend() # Turn off top/right axes for ax in (ax0, ax1, ax2): ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) ax.get_xaxis().tick_bottom() ax.get_yaxis().tick_left() plt.tight_layout() # + # We need the activation of our fuzzy membership functions at these values. # The exact values 6.5 and 9.8 do not exist on our universes... # This is what fuzz.interp_membership exists for! qual_level_lo = fuzz.interp_membership(x_qual, qual_lo, 6.5) qual_level_md = fuzz.interp_membership(x_qual, qual_md, 6.5) qual_level_hi = fuzz.interp_membership(x_qual, qual_hi, 6.5) serv_level_lo = fuzz.interp_membership(x_serv, serv_lo, 9.8) serv_level_md = fuzz.interp_membership(x_serv, serv_md, 9.8) serv_level_hi = fuzz.interp_membership(x_serv, serv_hi, 9.8) # Now we take our rules and apply them. Rule 1 concerns bad food OR service. # The OR operator means we take the maximum of these two. active_rule1 = np.fmax(qual_level_lo, serv_level_lo) # Now we apply this by clipping the top off the corresponding output # membership function with `np.fmin` tip_activation_lo = np.fmin(active_rule1, tip_lo) # removed entirely to 0 # For rule 2 we connect acceptable service to medium tipping tip_activation_md = np.fmin(serv_level_md, tip_md) # For rule 3 we connect high service OR high food with high tipping active_rule3 = np.fmax(qual_level_hi, serv_level_hi) tip_activation_hi = np.fmin(active_rule3, tip_hi) tip0 = np.zeros_like(x_tip) # Visualize this fig, ax0 = plt.subplots(figsize=(8, 3)) ax0.fill_between(x_tip, tip0, tip_activation_lo, facecolor='b', alpha=0.7) ax0.plot(x_tip, tip_lo, 'b', linewidth=0.5, linestyle='--', ) ax0.fill_between(x_tip, tip0, tip_activation_md, facecolor='g', alpha=0.7) ax0.plot(x_tip, tip_md, 'g', linewidth=0.5, linestyle='--') ax0.fill_between(x_tip, tip0, tip_activation_hi, facecolor='r', alpha=0.7) ax0.plot(x_tip, tip_hi, 'r', linewidth=0.5, linestyle='--') ax0.set_title('Output membership activity') # Turn off top/right axes for ax in (ax0,): ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) ax.get_xaxis().tick_bottom() ax.get_yaxis().tick_left() plt.tight_layout()
Notebooks/Fuzzy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] colab_type="text" id="view-in-github" # <a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/projects/neurons/load_steinmetz_extra.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> &nbsp; <a href="https://kaggle.com/kernels/welcome?src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/projects/neurons/load_steinmetz_extra.ipynb" target="_parent"><img src="https://kaggle.com/static/images/open-in-kaggle.svg" alt="Open in Kaggle"/></a> # - # ## Loading of Steinmetz data # # includes some visualizations import numpy as np import matplotlib.pyplot as plt # + cellView="form" # @title Import matplotlib and set defaults from matplotlib import rcParams rcParams['figure.figsize'] = [20, 4] rcParams['font.size'] = 15 rcParams['axes.spines.top'] = False rcParams['axes.spines.right'] = False rcParams['figure.autolayout'] = True # + cellView="form" # @title Data retrieval import os, requests fname = ['steinmetz_st.npz'] fname.append('steinmetz_wav.npz') fname.append('steinmetz_lfp.npz') url = ["https://osf.io/4bjns/download"] url.append("https://osf.io/ugm9v/download") url.append("https://osf.io/kx3v9/download") for j in range(len(url)): if not os.path.isfile(fname[j]): try: r = requests.get(url[j]) except requests.ConnectionError: print("!!! Failed to download data !!!") else: if r.status_code != requests.codes.ok: print("!!! Failed to download data !!!") else: with open(fname[j], "wb") as fid: fid.write(r.content) # + cellView="form" # @title Data loading dat_LFP = np.load('steinmetz_lfp.npz', allow_pickle=True)['dat'] dat_WAV = np.load('steinmetz_wav.npz', allow_pickle=True)['dat'] dat_ST = np.load('steinmetz_st.npz', allow_pickle=True)['dat'] # select just one of the recordings here. 11 is nice because it has some neurons in vis ctx. dat = dat_LFP[11] print(dat.keys()) dat = dat_WAV[11] print(dat.keys()) dat = dat_ST[11] print(dat.keys()) # - # `dat_LFP`, `dat_WAV`, `dat_ST` contain 39 sessions from 10 mice, data from Steinmetz et al, 2019, supplemental to the main data provided for NMA. Time bins for all measurements are 10ms, starting 500ms before stimulus onset (same as the main data). The followin fields are available across the three supplemental files. # # * `dat['lfp']`: recording of the local field potential in each brain area from this experiment, binned at `10ms`. # * `dat['brain_area_lfp']`: brain area names for the LFP channels. # * `dat['trough_to_peak']`: measures the width of the action potential waveform for each neuron. Widths `<=10` samples are "putative fast spiking neurons". # * `dat['waveform_w']`: temporal components of spike waveforms. `w@u` reconstructs the time by channels action potential shape. # * `dat['waveform_u]`: spatial components of spike waveforms. # * `dat['ss']`: neurons by trials. Exact spikes times for each neuron and each trial, reference to the stimulus onset. A (neuron,trial) entry can be an empty list if that neuron did not fire at all on that trial. # * `dat['%X%_passive']`: same as above for `X` = {`lfp`, `ss`} but for passive trials at the end of the recording when the mouse was no longer engaged and stopped making responses. # # # # groupings of brain regions regions = ["vis ctx", "thal", "hipp", "other ctx", "midbrain", "basal ganglia", "cortical subplate", "other"] brain_groups = [["VISa", "VISam", "VISl", "VISp", "VISpm", "VISrl"], # visual cortex ["CL", "LD", "LGd", "LH", "LP", "MD", "MG", "PO", "POL", "PT", "RT", "SPF", "TH", "VAL", "VPL", "VPM"], # thalamus ["CA", "CA1", "CA2", "CA3", "DG", "SUB", "POST"], # hippocampal ["ACA", "AUD", "COA", "DP", "ILA", "MOp", "MOs", "OLF", "ORB", "ORBm", "PIR", "PL", "SSp", "SSs", "RSP"," TT"], # non-visual cortex ["APN", "IC", "MB", "MRN", "NB", "PAG", "RN", "SCs", "SCm", "SCig", "SCsg", "ZI"], # midbrain ["ACB", "CP", "GPe", "LS", "LSc", "LSr", "MS", "OT", "SNr", "SI"], # basal ganglia ["BLA", "BMA", "EP", "EPd", "MEA"] # cortical subplate ]
projects/neurons/load_steinmetz_extra.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import numpy as np import warnings warnings.filterwarnings('ignore') # - import os print(os.getcwd()) train = pd.read_csv('../data/prep/train3_train.csv') test = pd.read_csv('../data/prep/test3_valid.csv') submission = pd.read_csv('../data/prep/submission_valid.csv') train = train.iloc[:, 1:] test = test.iloc[:, 1:] X = train.drop('target', axis = 1) #y = np.log1p(train.target) y = train.target target = test[X.columns] from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor from catboost import CatBoostRegressor, Pool from ngboost import NGBRegressor from sklearn.metrics import make_scorer from sklearn.model_selection import KFold def NMAE(true, pred) -> float: mae = np.mean(np.abs(true - pred)) score = mae / np.mean(np.abs(true)) return score nmae_score = make_scorer(NMAE, greater_is_better=False) kf = KFold(n_splits = 10, random_state = 42, shuffle = True) cb_pred = np.zeros(target.shape[0]) cb_val = [] for n, (tr_idx, val_idx) in enumerate(kf.split(X, y)) : print(f'{n + 1} FOLD Training.....') tr_x, tr_y = X.iloc[tr_idx], y.iloc[tr_idx] val_x, val_y = X.iloc[val_idx], np.expm1(y.iloc[val_idx]) tr_data = Pool(data = tr_x, label = tr_y) val_data = Pool(data = val_x, label = val_y) cb = CatBoostRegressor(depth = 4, random_state = 42, loss_function = 'MAE', n_estimators = 3000, learning_rate = 0.03, verbose = 0) cb.fit(tr_data, eval_set = val_data, early_stopping_rounds = 750, verbose = 1000) val_pred = np.expm1(cb.predict(val_x)) val_nmae = NMAE(val_y, val_pred) cb_val.append(val_nmae) print(f'{n + 1} FOLD NMAE = {val_nmae}\n') target_data = Pool(data = target, label = None) fold_pred = cb.predict(target) / 10 cb_pred += fold_pred print(f'10FOLD Mean of NMAE = {np.mean(cb_val)} & std = {np.std(cb_val)}') # ngboost ngb_pred = np.zeros(target.shape[0]) ngb_val = [] for n, (tr_idx, val_idx) in enumerate(kf.split(X, y)) : print(f'{n + 1} FOLD Training.....') tr_x, tr_y = X.iloc[tr_idx], y.iloc[tr_idx] val_x, val_y = X.iloc[val_idx], np.expm1(y.iloc[val_idx]) ngb = NGBRegressor(random_state = 42, n_estimators = 1000, verbose = 0, learning_rate = 0.03) ngb.fit(tr_x, tr_y, val_x, val_y, early_stopping_rounds = 300) val_pred = np.expm1(ngb.predict(val_x)) val_nmae = NMAE(val_y, val_pred) ngb_val.append(val_nmae) print(f'{n + 1} FOLD NMAE = {val_nmae}\n') target_data = Pool(data = target, label = None) fold_pred = ngb.predict(target) / 10 ngb_pred += fold_pred print(f'10FOLD Mean of NMAE = {np.mean(ngb_val)} & std = {np.std(ngb_val)}') submission['target'] = ngb_pred submission.to_csv("../out/ngb/ngb1_valid.csv", header = True, index = False) submission['target'] = cb_pred submission.to_csv("../out/cb/cb1_valid.csv", header = True, index = False)
src/05_modelingPyValid.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Example of the processing of a unique image # This example shows the image processing of the confocal image of the top surface of a raw silicon wafer from diamond wire cutting. The aim is to extract and characterize in terms of size and location the topography "features" induced by diamonds indentation or fragile fracture present on the wafer surface and superimposed with saw marks. # # + # Standard library imports import os from pathlib import Path # Path of 'site-packages' where useful packages are stored on MAC-OS mac_packages = "/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages" # Root directory for all files root = Path("C:/Users/franc/OneDrive/Bureau/confocal/fichiers") if not os.path.isdir(root) : root = Path('/Users/amal/Gwyddion Images /Carton-Louise') store_file = root / Path('synthesis.xlsx') # Parameters of image reference cut_number = '119' wafer_number = '5' image_ref = 'd' # Specification of the directory of the experimental files and useful file names root_input = root / Path('cut ' + cut_number + '/wafer '+ wafer_number + '/save_plu') root_output = root / Path('cut ' + cut_number + '/wafer '+ wafer_number + '/results') if not os.path.isdir(root_output) : os.mkdir(root_output) # image_name with two possible spelling image_name = 'wafer' + wafer_number + '-' + image_ref # input file input_file_name = root_input / Path(image_name + '.plu') if not os.path.isfile(input_file_name) : # deal with additional blanck blk = ' ' image_name = 'wafer' + blk + wafer_number + '-' + image_ref # alternative input file input_file_name = root_input / Path(image_name + '.plu') file_xlsx_res = root_output / Path(image_name + '_res.xlsx') # file EXEL features morphological analysis # + ''' Generated image processing: - saw marks removal and background correction using a top hat filter with a pattern length larger than the largest feature to extract - image binarization using a threshold on feature depth larger than the residual image background - features morphological analysis (using the scipy label function). Generate a dataframe index|'x'|'long_x'| 'y'|'long_y'| 'size'|'depth' where - index: feature index - x: gravity center x position of the feature - long_x: maximum feature width - y: gravity center y position of the feature - long_y: maximum feature height - size: pixels number of the feature - depth : feature depth Plot of the results (images, histogram, box plot) Store the results in an excel file ''' # 3rd party imports import numpy as np import matplotlib.colors as colx import matplotlib.cm as cmx import matplotlib.pyplot as plt # Add package image_features_extract try: # standard storage path of 'site-packages' on WIN import image_features_extract as ife except: # Add storage path of 'site-packages' on MAC-OS import sys sys.path.append(mac_packages) import image_features_extract as ife # %matplotlib inline store_flag = True # if store_flag = True we store the resuts in an excel file unmeasured_point_color = 'blue' # color of dots representing the unmeasured points # parameters initialization Top_hat_length = 50 # should be larger than the largest feature to extract threshold = -0.3 # should be larger than the residual image background # read .plu image (N rows by M columns) N, M, confocal_img, _ = ife.read_plu_topography(input_file_name) # substitute unmeasured values by interpolated ones confocal_img1 = ife.fill_gap(confocal_img) im_corr = ife.top_hat_flattening(confocal_img1, Top_hat_length, top_hat_sens=-1 )# background removal im_bin = np.where(im_corr < threshold, 1, 0) # binarization df = ife.analyse_morphologique(im_bin,im_corr) # morphological analysis stored in df dataframe fig = plt.figure(figsize=(15,15)) # stacked plot of the flattened image with dots at the position of the unmeasured points plt.subplot(3,2,1) plt.imshow(confocal_img1, cmap='gray') plt.colorbar() mask = np.where(confocal_img == 1000001) plt.scatter(mask[1],mask[0],c=unmeasured_point_color,s =1,alpha=0.1 ) plt.title(f'Confocal image and unmeasured points ({unmeasured_point_color})') # top-hat filtered image plot plt.subplot(3,2,2) plt.imshow(im_corr, cmap='gray') plt.colorbar() plt.title(f'Top hat length: {Top_hat_length}') # binarized image plot plt.subplot(3,2,3) plt.imshow(im_bin, cmap='gray') plt.colorbar() plt.title(f'Threshold: {threshold} µm') # reconstructed image plot ax1 = plt.subplot(3,2,4) colorsMap = plt.get_cmap("plasma") #("RdYlGn") g = list(zip(np.array(df['x']), np.array(df['y']), np.array(df['height']), np.array(df['size']))) x,y,c,s=zip(*sorted(g, key=lambda tup: tup[2],reverse=False)) cm = plt.get_cmap(colorsMap) cNorm = colx.Normalize(vmin=min(c), vmax=max(c)) scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=cm) conv_inch_pixel = 5 # sloppy determination, must be improved plt.scatter(np.array(x) ,N-np.array(y) ,s=np.array(s)/conv_inch_pixel, c=scalarMap.to_rgba(c)) plt.xlim(0,M) plt.ylim(0,N) trash = plt.yticks(ticks=[N,N-100, N-200, N-300, N-400, N-500],labels=[0,100,200,300,400,500], rotation=0) scalarMap.set_array(c) plt.colorbar(scalarMap) plt.title("Size and height") # features size histogram plot plt.subplot(3,2,5) h = plt.hist(df['size'], bins=100) plt.title(f'Size histogram') plt.ylabel('size (pix\u00B2)') # features size box plot plt.subplot(3,2,6) h = plt.boxplot(df['size']) plt.title(f'Size bbox') _ = plt.ylabel('size (pix\u00B2)') # conditional storage of morphological analysis result if store_flag: df.to_excel(file_xlsx_res) # + ''' # Plot of selected horizontal and vertical profiles from the top-hat filtered image ''' # 3rd party imports import numpy as np import matplotlib.pyplot as plt # %matplotlib inline index =[ 50] # list of the row or column index to be plotted fig = plt.figure(figsize=(15,5)) plt.subplot(1,2,1) color =['k','r','b','y'] for c,index_ in zip(color,index): plt.plot(confocal_img1.T[index_],'-'+c, label=str(index_)) plt.plot(im_corr.T[index_],'--'+c, label='cor '+str(index_)) plt.ylim(np.min(confocal_img1),np.max(confocal_img1)) plt.legend() plt.ylabel('Height (µm)') plt.title("Vertical profiles") plt.subplot(1,2,2) for c,index_ in zip(color,index): plt.plot(confocal_img1[index_],'-'+c,label=str(index_)) plt.plot(im_corr[index_],'--'+c, label='cor '+str(index_)) plt.ylim(np.min(confocal_img1),np.max(confocal_img1)) plt.legend() plt.ylabel('Height (µm)') _ = plt.title("Horizontal profiles") # -
ife_dwsi_single.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- 'samet' "samet" """samet""" 'samet' in dersi var' "samet'in dersi var" 'samet\'in dersi var' a = "merhaba" a a[3] a[0] a[-1] a[-2] a[-4] a = "Python Programlama Dili" a[4:10] a[:10] a[4:] a[:] a[:-1] a[::2] a[4:12:2] a[::] a[::-1] s = "Merhaba" len(s) s[3] = 't' a = "Python " b = "Programlama " c = "Dili " a + b + c "python" * 3 a = "Samet " a = a + "Çelikbıçak" a
karakter dizileri.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/Harsh1619/Sales-Prediction-Simple-Linear-Regression-/blob/main/SALESPREDICTION(20191CSE0187).ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="HoCyISzFnRv7" import warnings warnings.filterwarnings('ignore') import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # + id="9FefRIvt5lWc" advertising = pd.DataFrame(pd.read_csv("/advertising.csv")) # + [markdown] id="78v5aan36sNa" # Displaying first five data # + colab={"base_uri": "https://localhost:8080/", "height": 203} id="oEIg9HS45lpP" outputId="5481338b-9e6d-4786-fc80-7022d80c9a42" advertising.head() # + [markdown] id="mxqUd6EZ7CI-" # Diplaying 200 rows and 4 columns # + colab={"base_uri": "https://localhost:8080/"} id="vKsYzhEC5lyt" outputId="52d44dce-6ede-4932-f961-71cd4e38e57b" advertising.shape # + colab={"base_uri": "https://localhost:8080/"} id="FeyMRS235l3d" outputId="61fe5804-b880-4bc7-ce04-840090a7d35d" advertising.info() # + colab={"base_uri": "https://localhost:8080/", "height": 295} id="vgikM6dd5l6l" outputId="52b8c79e-6a1c-451e-cc45-e416cba6edce" advertising.describe() # + [markdown] id="dy7PKPDe_dhX" # Checking Null values # + colab={"base_uri": "https://localhost:8080/"} id="jWM7jKhm5mPe" outputId="a93ef15f-e11f-48be-b200-9ab21d2e6d5b" advertising.isnull().sum()*100/advertising.shape[0] # + [markdown] id="EFiCdu1j_qcF" # Outlier Analysis # + colab={"base_uri": "https://localhost:8080/", "height": 369} id="Q48kUoBd5nHY" outputId="03a27799-3941-4dd2-86a7-a4a968f77354" fig, axs = plt.subplots(3, figsize = (5,5)) plt1 = sns.boxplot(advertising['TV'], ax = axs[0]) plt2 = sns.boxplot(advertising['Newspaper'], ax = axs[1]) plt3 = sns.boxplot(advertising['Radio'], ax = axs[2]) plt.tight_layout() # + colab={"base_uri": "https://localhost:8080/", "height": 279} id="XZrtf7-p5nKX" outputId="e5cd32df-d5e1-4641-e3e8-3c8094719038" sns.boxplot(advertising['Sales']) plt.show() # + [markdown] id="0c6whB2j_zT3" # How Sales are related with other variables using scatter plot # + colab={"base_uri": "https://localhost:8080/", "height": 303} id="Q0j6O4Bu5nNq" outputId="150324c1-89e8-4939-cbb4-3b5fd547e524" sns.pairplot(advertising, x_vars=['TV', 'Newspaper', 'Radio'], y_vars='Sales', height=4, aspect=1, kind='scatter') plt.show() # + [markdown] id="95WLlYak_60_" # The correlation between different variables. # + colab={"base_uri": "https://localhost:8080/", "height": 269} id="otc5r1LD5nRF" outputId="48222a2c-dba0-4d20-9ec0-08a568ee8dac" sns.heatmap(advertising.corr(), cmap="YlGnBu", annot = True) plt.show() # + [markdown] id="O0Fy4tjGAR0p" # Assign the variable TV to the variable X and the response variable Sales to the variable y # + id="vDfhr22B5nUO" X = advertising['TV'] y = advertising['Sales'] # + id="PtK7Fwd75nYO" from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, train_size = 0.7, test_size = 0.3, random_state = 100) # + colab={"base_uri": "https://localhost:8080/"} id="NhvZoI8n5naz" outputId="022cf355-d73c-4b16-fa31-435c618201b7" X_train.head() # + colab={"base_uri": "https://localhost:8080/"} id="NpZ7quzh5neU" outputId="65244cd5-1756-4943-88fb-df5f5f6e2643" y_train.head() # + [markdown] id="jkqn38X7AnSz" # Import the statsmodel.api library using which you will perform the linear regression # + id="x1Loh4Xd8TCD" import statsmodels.api as sm # + [markdown] id="iEyfklQfA_1Y" # Add a constant to get an intercept # + id="Ohvxx7D18TSx" X_train_sm = sm.add_constant(X_train) # + [markdown] id="TeJSQI40BBKk" # Fit the resgression line using 'OLS' # + id="BwB9ck2t8TXh" lr = sm.OLS(y_train, X_train_sm).fit() # + [markdown] id="PrUCxSZfBFX6" # Print the parameters # + colab={"base_uri": "https://localhost:8080/"} id="jekWNcpf8dUG" outputId="cbb7d428-e104-4830-a4ae-89942c37f280" lr.params # + [markdown] id="tmPF7HN-BMFf" # Performing a summary operation lists out all the different parameters of the regression line fitted # + colab={"base_uri": "https://localhost:8080/"} id="i2dzztFF8diw" outputId="fe9b5c06-f420-4df4-fc15-a2adfe1c71bc" print(lr.summary()) # + colab={"base_uri": "https://localhost:8080/", "height": 265} id="FBK7_iM18dnW" outputId="3f5823bc-079d-466d-856c-3b535f835fc3" plt.scatter(X_train, y_train) plt.plot(X_train, 6.948 + 0.054*X_train, 'r') plt.show() # + id="vt1wEyMa8dqU" y_train_pred = lr.predict(X_train_sm) res = (y_train - y_train_pred) # + colab={"base_uri": "https://localhost:8080/", "height": 314} id="DwFL6lCt8dul" outputId="6ba96ef6-fa1c-473f-8aa3-5f68618e5ea7" fig = plt.figure() sns.distplot(res, bins = 15) fig.suptitle('Error Terms', fontsize = 15) # Plot heading plt.xlabel('y_train - y_train_pred', fontsize = 15) # X-label plt.show() # + [markdown] id="1xLDT5qfCBWm" # Looking for patterns in the residuals # + colab={"base_uri": "https://localhost:8080/", "height": 265} id="s6iHax858dxg" outputId="c40312d5-2378-4301-c189-fb41d20a3ac3" plt.scatter(X_train,res) plt.show() # + [markdown] id="p2KGf-qgBuQp" # Add a constant to X_test and Predict the y values corresponding to X_test_sm # + id="lcU-q7xt8d0H" X_test_sm = sm.add_constant(X_test) y_pred = lr.predict(X_test_sm) # + colab={"base_uri": "https://localhost:8080/"} id="W7OwtSQE8d4n" outputId="91435340-24f8-4344-8a79-86dc1e6adca6" y_pred.head() # + id="e0QRj-3U9LvL" from sklearn.metrics import mean_squared_error from sklearn.metrics import r2_score # + [markdown] id="vqDZvr82Bniy" # Returns the mean squared error # + colab={"base_uri": "https://localhost:8080/"} id="WzxHO9K_9XDU" outputId="d06f5c7e-3fbe-4a8a-b187-80368112bd82" np.sqrt(mean_squared_error(y_test, y_pred)) # + [markdown] id="DTsztugJBjZz" # Checking the R-squared on the test set # + colab={"base_uri": "https://localhost:8080/"} id="MTQTdOUI9ZOf" outputId="833b33dc-ba06-467b-b98d-6824c0e2e88b" r_squared = r2_score(y_test, y_pred) r_squared # + [markdown] id="MV2hOJ9ZBcvN" # Visualizing the fit on the test set # + colab={"base_uri": "https://localhost:8080/", "height": 265} id="RxCP5NVs9Zc3" outputId="e120af8b-45b4-41e6-ff70-f3ad32c39b42" plt.scatter(X_test, y_test) plt.plot(X_test, 6.948 + 0.054 * X_test, 'r') plt.show()
SALESPREDICTION(20191CSE0187).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python (herschelhelp_internal) # language: python # name: helpint # --- # # Lockman-SWIRE Selection Functions # ## Depth maps and selection functions for Lockman-SWIRE # # The simplest selection function available is the field MOC which specifies the area for which there is Herschel data. Each pristine catalogue also has a MOC defining the area for which that data is available. # # The next stage is to provide mean flux standard deviations which act as a proxy for the catalogue's 5$\sigma$ depth # + # %matplotlib inline # #%config InlineBackend.figure_format = 'svg' import matplotlib.pyplot as plt plt.rc('figure', figsize=(10, 6)) import os import time from astropy import units as u from astropy.coordinates import SkyCoord from astropy.table import Column, Table, join import numpy as np from pymoc import MOC import healpy as hp #import pandas as pd #Astropy has group_by function so apandas isn't required. import seaborn as sns import warnings #We ignore warnings - this is a little dangerous but a huge number of warnings are generated by empty cells later warnings.filterwarnings('ignore') from herschelhelp_internal.utils import inMoc, coords_to_hpidx from herschelhelp_internal.masterlist import find_last_ml_suffix, nb_ccplots # + #TMP_DIR = os.environ.get('TMP_DIR', "./data_tmp") #OUT_DIR = os.environ.get('OUT_DIR', "./data") #SUFFIX = find_last_ml_suffix() #SUFFIX = "20171016" master_catalogue_filename = "../dmu1/dmu1_ml_Lockman-swire/data_tmp/SWIRE.fits" master_catalogue = Table.read(master_catalogue_filename) print("Diagnostics done using: {}".format(master_catalogue_filename)) ORDER = 10 #TODO write code to decide on appropriate order field_moc = MOC(filename="../dmu2/dmu2_field_coverages/Lockman-SWIRE_MOC.fits") # - master_catalogue.colnames # Plotting a histogram of magnitudes gives a quick idea of depth. sns.distplot(master_catalogue['f_ap_swire_irac1'][(master_catalogue['f_ap_swire_irac1'] > 0) & (master_catalogue['f_ap_swire_irac1'] < 90.)]) # and for flux sns.distplot(master_catalogue['f_ap_swire_irac1'][(master_catalogue['f_ap_swire_irac1'] < 10.) & (master_catalogue['f_ap_swire_irac1'] > 0)] ) # Distribution of flux errors: sns.distplot(master_catalogue['f_ap_swire_irac1'][(master_catalogue['f_ap_swire_irac1'] > 0.) &(master_catalogue['f_ap_swire_irac1'] < 3.)]) # Magnitude error as function of magnitude ax = sns.jointplot((np.log10(master_catalogue['f_ap_swire_irac1'])), (master_catalogue['ferr_ap_swire_irac1'] )) #ax.set(xticklabels=) # ## I - Group masterlist objects by healpix cell and calculate depths # We add a column to the masterlist catalogue for the target order healpix cell <i>per object</i>. #Add a column to the catalogue with the order=ORDER hp_idx master_catalogue.add_column(Column(data=coords_to_hpidx(master_catalogue['swire_ra'], master_catalogue['swire_dec'], ORDER), name="hp_idx_O_{}".format(str(ORDER)) ) ) # + # Convert catalogue to pandas and group by the order=ORDER pixel group = master_catalogue.group_by(["hp_idx_O_{}".format(str(ORDER))]) # - f_wfc_g_p90 = group["hp_idx_O_{}".format(str(ORDER)), 'f_swire_irac1'].groups.aggregate(lambda x: np.nanpercentile(x, 10.)) f_wfc_g_p90['f_swire_irac1'].name = 'f_swire_irac1_p90' f_wfc_g_p90[:10].show_in_notebook() ferr_wfc_g_mean = group["hp_idx_O_{}".format(str(ORDER)), 'ferr_swire_irac1'].groups.aggregate(np.nanmean) ferr_wfc_g_mean['ferr_swire_irac1'].name = 'ferr_swire_irac1_mean' ferr_wfc_g_mean[:10].show_in_notebook() # + #Downgrade the groups from order=ORDER to order=13 and then fill out the appropriate cells #hp.pixelfunc.ud_grade([2599293, 2599294], nside_out=hp.order2nside(13)) # - # ## II Create a table of all Order=13 healpix cells in the field and populate it # We create a table with every order=13 healpix cell in the field MOC. We then calculate the healpix cell at lower order that the order=13 cell is in. We then fill in the depth at every order=13 cell as calculated for the lower order cell that that the order=13 cell is inside. depths = Table() depths['hp_idx_O_13'] = list(field_moc.flattened(13)) depths[:10].show_in_notebook() depths.add_column(Column(hp.pixelfunc.ang2pix(2**ORDER, hp.pixelfunc.pix2ang(2**13, depths['hp_idx_O_13'], nest=True)[0], hp.pixelfunc.pix2ang(2**13, depths['hp_idx_O_13'], nest=True)[1], nest = True), name="hp_idx_O_{}".format(str(ORDER)) ) ) depths[:10].show_in_notebook() join(depths, ferr_wfc_g_mean)[:10].show_in_notebook() # + for col in master_catalogue.colnames: if col.startswith("f_"): errcol = "ferr{}".format(col[1:]) depths = join(depths, group["hp_idx_O_{}".format(str(ORDER)), errcol].groups.aggregate(np.nanmean), join_type='left') depths[errcol].name = errcol + "_mean" depths = join(depths, group["hp_idx_O_{}".format(str(ORDER)), col].groups.aggregate(lambda x: np.nanpercentile(x, 90.)), join_type='left') depths[col].name = col + "_p90" depths[:10].show_in_notebook() # - # ## III - Save the table of depth maps depths.write("depths_lockman-swire_SWIRE.fits")
dmu8/4_Selection_function.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import requests api_key = "<KEY>" # + start_date = '2022-03-21' end_date = '2022-03-27' url = f'https://api.nasa.gov/neo/rest/v1/feed?start_date={start_date}&end_date={end_date}&api_key={api_key}' url # - r = requests.get(url) data = r.json() data data['element_count'] # Let's Extract only the near earth objects data NEO = data['near_earth_objects'] NEO # Lets' access a particulat asteroid first = NEO['2022-03-24'][0] # simillarly we can get other asteroids data as well. pprint(first) from pprint import pprint # Finding informaion of Asteroid By Name all_asteroids = NEO[start_date] for asteroid in all_asteroids: if asteroid['name'] == '(2022 EE4)': pprint(asteroid) break asteroid['name'] asteroid['id'] asteroid['nasa_jpl_url'] asteroid['absolute_magnitude_h'] distance = asteroid['estimated_diameter'] distance import astropy.units as u av_distance = (distance['meters']['estimated_diameter_min']+distance['meters']['estimated_diameter_max'])/2 * u.meter av_distance asteroid['is_potentially_hazardous_asteroid'] closest_aproch_date = asteroid['close_approach_data'] pprint(closes_aproch_date) miss_distance = closest_aproch_date[0]['miss_distance'] pprint(miss_distance) relative_velocity = closest_aproch_date[0]['relative_velocity'] pprint(relative_velocity) asteroid['is_sentry_object'] # Do we have to destroy it to save us
Asteroid_with_NASA_API in Python.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Exceptions and Testing # Things go wrong when programming all the time. Some of these "problems" are errors that stop the program from making sense. Others are problems that stop the program from working in specific, special cases. These "problems" may be real, or we may want to treat them as special cases that don't stop the program from running. # # These special cases can be dealt with using *exceptions*. # # # Exceptions # # Let's define a function that divides two numbers. from __future__ import division def divide(numerator, denominator): """ Divide two numbers. Parameters ---------- numerator: float numerator denominator: float denominator Returns ------- fraction: float numerator / denominator """ return numerator / denominator print(divide(4.0, 5.0)) # But what happens if we try something *really stupid*? print(divide(4.0, 0.0)) # So, the code works fine until we pass in input that we shouldn't. When we do, this causes the code to stop. To show how this can be a problem, consider the loop: denominators = [1.0, 0.0, 3.0, 5.0] for denominator in denominators: print(divide(4.0, denominator)) # There are three sensible results, but we only get the first. # There are many more complex, real cases where it's not obvious that we're doing something wrong ahead of time. In this case, we want to be able to *try* running the code and *catch* errors without stopping the code. This can be done in Python: try: print(divide(4.0, 0.0)) except ZeroDivisionError: print("Dividing by zero is a silly thing to do!") denominators = [1.0, 0.0, 3.0, 5.0] for denominator in denominators: try: print(divide(4.0, denominator)) except ZeroDivisionError: print("Dividing by zero is a silly thing to do!") # The idea here is given by the names. Python will *try* to execute the code inside the `try` block. This is just like an `if` or a `for` block: each command that is indented in that block will be executed in order. # # If, and only if, an error arises then the `except` block will be checked. If the error that is produced matches the one listed then instead of stopping, the code inside the `except` block will be run instead. # # To show how this works with different errors, consider a different silly error: try: print(divide(4.0, "zero")) except ZeroDivisionError: print("Dividing by zero is a silly thing to do!") # We see that, as it makes no sense to divide by a string, we get a `TypeError` instead of a `ZeroDivisionError`. We could catch both errors: try: print(divide(4.0, "zero")) except ZeroDivisionError: print("Dividing by zero is a silly thing to do!") except TypeError: print("Dividing by a string is a silly thing to do!") # We could catch *any* error: try: print(divide(4.0, "zero")) except: print("Some error occured") # This doesn't give us much information, and may lose information that we need in order to handle the error. We can capture the exception to a variable, and then use that variable: try: print(divide(4.0, "zero")) except (ZeroDivisionError, TypeError) as exception: print("Some error occured: {}".format(exception)) # Here we have caught two possible types of error within the tuple (which *must*, in this case, have parantheses) and captured the specific error in the variable `exception`. This variable can then be used: here we just print it out. # # Normally best practise is to be as specific as possible on the error you are trying to catch. # # ## Extending the logic # # Sometimes you may want to perform an action *only* if an error did not occur. For example, let's suppose we wanted to store the result of dividing 4 by a divisor, and also store the divisor, but *only* if the divisor is valid. # # One way of doing this would be the following: denominators = [1.0, 0.0, 3.0, "zero", 5.0] results = [] divisors = [] for denominator in denominators: try: result = divide(4.0, denominator) except (ZeroDivisionError, TypeError) as exception: print("Error of type {} for denominator {}".format(exception, denominator)) else: results.append(result) divisors.append(denominator) print(results) print(divisors) # The statements in the `else` block are only run if the `try` block succeeds. If it doesn't - if the statements in the `try` block raise an exception - then the statements in the `else` block are not run. # # ## Exceptions in your own code # # Sometimes you don't want to wait for the code to break at a low level, but instead stop when you know things are going to go wrong. This is usually because you can be more informative about what's going wrong. Here's a slightly artificial example: def divide_sum(numerator, denominator1, denominator2): """ Divide a number by a sum. Parameters ---------- numerator: float numerator denominator1: float Part of the denominator denominator2: float Part of the denominator Returns ------- fraction: float numerator / (denominator1 + denominator2) """ return numerator / (denominator1 + denominator2) divide_sum(1, 1, -1) # It should be obvious to the code that this is going to go wrong. Rather than letting the code hit the `ZeroDivisionError` exception automatically, we can *raise* it ourselves, with a more meaningful error message: def divide_sum(numerator, denominator1, denominator2): """ Divide a number by a sum. Parameters ---------- numerator: float numerator denominator1: float Part of the denominator denominator2: float Part of the denominator Returns ------- fraction: float numerator / (denominator1 + denominator2) """ if (denominator1 + denominator2) == 0: raise ZeroDivisionError("The sum of denominator1 and denominator2 is zero!") return numerator / (denominator1 + denominator2) divide_sum(1, 1, -1) # There are [a large number of standard exceptions](https://docs.python.org/2/library/exceptions.html) in Python, and most of the time you should use one of those, combined with a meaningful error message. One is particularly useful: `NotImplementedError`. # # This exception is used when the behaviour the code is about to attempt makes no sense, is not defined, or similar. For example, consider computing the roots of the quadratic equation, but restricting to only real solutions. Using the standard formula # # $$ x_{\pm} = \frac{-b \pm \sqrt{b^2 - 4ac}}{2a} $$ # # we know that this only makes sense if $b^2 \ge 4ac$. We put this in code as: # + from math import sqrt def real_quadratic_roots(a, b, c): """ Find the real roots of the quadratic equation a x^2 + b x + c = 0, if they exist. Parameters ---------- a : float Coefficient of x^2 b : float Coefficient of x^1 c : float Coefficient of x^0 Returns ------- roots : tuple The roots Raises ------ NotImplementedError If the roots are not real. """ discriminant = b**2 - 4.0*a*c if discriminant < 0.0: raise NotImplementedError("The discriminant is {} < 0. " "No real roots exist.".format(discriminant)) x_plus = (-b + sqrt(discriminant)) / (2.0*a) x_minus = (-b - sqrt(discriminant)) / (2.0*a) return x_plus, x_minus # - print(real_quadratic_roots(1.0, 5.0, 6.0)) real_quadratic_roots(1.0, 1.0, 5.0) # # Testing # # How do we know if our code is working correctly? It is not when the code runs and returns some value: as seen above, there may be times where it makes sense to stop the code even when it is correct, as it is being used incorrectly. We need to test the code to check that it works. # # *Unit testing* is the idea of writing many small tests that check if simple cases are behaving correctly. Rather than trying to *prove* that the code is correct in all cases (which could be very hard), we check that it is correct in a number of tightly controlled cases (which should be more straightforward). If we later find a problem with the code, we add a test to cover that case. # # Consider a function solving for the real roots of the quadratic equation again. This time, if there are no real roots we shall return `None` (to say there are no roots) instead of raising an exception. # + from math import sqrt def real_quadratic_roots(a, b, c): """ Find the real roots of the quadratic equation a x^2 + b x + c = 0, if they exist. Parameters ---------- a : float Coefficient of x^2 b : float Coefficient of x^1 c : float Coefficient of x^0 Returns ------- roots : tuple or None The roots """ discriminant = b**2 - 4.0*a*c if discriminant < 0.0: return None x_plus = (-b + sqrt(discriminant)) / (2.0*a) x_minus = (-b + sqrt(discriminant)) / (2.0*a) return x_plus, x_minus # - # First we check what happens if there are imaginary roots, using $x^2 + 1 = 0$: print(real_quadratic_roots(1, 0, 1)) # As we wanted, it has returned `None`. We also check what happens if the roots are zero, using $x^2 = 0$: print(real_quadratic_roots(1, 0, 0)) # We get the expected behaviour. We also check what happens if the roots are real, using $x^2 - 1 = 0$ which has roots $\pm 1$: print(real_quadratic_roots(1, 0, -1)) # Something has gone wrong. Looking at the code, we see that the `x_minus` line has been copied and pasted from the `x_plus` line, without changing the sign correctly. So we fix that error: # + from math import sqrt def real_quadratic_roots(a, b, c): """ Find the real roots of the quadratic equation a x^2 + b x + c = 0, if they exist. Parameters ---------- a : float Coefficient of x^2 b : float Coefficient of x^1 c : float Coefficient of x^0 Returns ------- roots : tuple or None The roots """ discriminant = b**2 - 4.0*a*c if discriminant < 0.0: return None x_plus = (-b + sqrt(discriminant)) / (2.0*a) x_minus = (-b - sqrt(discriminant)) / (2.0*a) return x_plus, x_minus # - # We have changed the code, so now have to re-run *all* our tests, in case our change broke something else: print(real_quadratic_roots(1, 0, 1)) print(real_quadratic_roots(1, 0, 0)) print(real_quadratic_roots(1, 0, -1)) # As a final test, we check what happens if the equation degenerates to a linear equation where $a=0$, using $x + 1 = 0$ with solution $-1$: print(real_quadratic_roots(0, 1, 1)) # In this case we get an exception, which we don't want. We fix this problem: # + from math import sqrt def real_quadratic_roots(a, b, c): """ Find the real roots of the quadratic equation a x^2 + b x + c = 0, if they exist. Parameters ---------- a : float Coefficient of x^2 b : float Coefficient of x^1 c : float Coefficient of x^0 Returns ------- roots : tuple or float or None The root(s) (two if a genuine quadratic, one if linear, None otherwise) Raises ------ NotImplementedError If the equation has trivial a and b coefficients, so isn't solvable. """ discriminant = b**2 - 4.0*a*c if discriminant < 0.0: return None if a == 0: if b == 0: raise NotImplementedError("Cannot solve quadratic with both a" " and b coefficients equal to 0.") else: return -c / b x_plus = (-b + sqrt(discriminant)) / (2.0*a) x_minus = (-b - sqrt(discriminant)) / (2.0*a) return x_plus, x_minus # - # And we now must re-run all our tests again, as the code has changed once more: print(real_quadratic_roots(1, 0, 1)) print(real_quadratic_roots(1, 0, 0)) print(real_quadratic_roots(1, 0, -1)) print(real_quadratic_roots(0, 1, 1)) # ## Formalizing tests # # This small set of tests covers most of the cases we are concerned with. However, by this point it's getting hard to remember # # 1. what each line is actually testing, and # 2. what the correct value is meant to be. # # To formalize this, we write each test as a small function that contains this information for us. Let's start with the $x^2 - 1 = 0$ case where the roots are $\pm 1$: # + from numpy.testing import assert_equal, assert_allclose def test_real_distinct(): """ Test that the roots of x^2 - 1 = 0 are \pm 1. """ roots = (1.0, -1.0) assert_equal(real_quadratic_roots(1, 0, -1), roots, err_msg="Testing x^2-1=0; roots should be 1 and -1.") # - test_real_distinct() # What this function does is checks that the results of the function call match the expected value, here stored in `roots`. If it didn't match the expected value, it would raise an exception: # + def test_should_fail(): """ Comparing the roots of x^2 - 1 = 0 to (1, 1), which should fail. """ roots = (1.0, 1.0) assert_equal(real_quadratic_roots(1, 0, -1), roots, err_msg="Testing x^2-1=0; roots should be 1 and 1." " So this test should fail") test_should_fail() # - # Testing that one floating point number equals another can be dangerous. Consider $x^2 - 2 x + (1 - 10^{-10}) = 0$ with roots $1.1 \pm 10^{-5} )$: # + from math import sqrt def test_real_distinct_irrational(): """ Test that the roots of x^2 - 2 x + (1 - 10**(-10)) = 0 are 1 \pm 1e-5. """ roots = (1 + 1e-5, 1 - 1e-5) assert_equal(real_quadratic_roots(1, -2.0, 1.0 - 1e-10), roots, err_msg="Testing x^2-2x+(1-1e-10)=0; roots should be 1 +- 1e-5.") test_real_distinct_irrational() # - # We see that the solutions match to the first 14 or so digits, but this isn't enough for them to be *exactly* the same. In this case, and in most cases using floating point numbers, we want the result to be "close enough": to match the expected precision. There is an assertion for this as well: # + from math import sqrt def test_real_distinct_irrational(): """ Test that the roots of x^2 - 2 x + (1 - 10**(-10)) = 0 are 1 \pm 1e-5. """ roots = (1 + 1e-5, 1 - 1e-5) assert_allclose(real_quadratic_roots(1, -2.0, 1.0 - 1e-10), roots, err_msg="Testing x^2-2x+(1-1e-10)=0; roots should be 1 +- 1e-5.") test_real_distinct_irrational() # - # The `assert_allclose` statement takes options controlling the precision of our test. # # We can now write out all our tests: # + from math import sqrt from numpy.testing import assert_equal, assert_allclose def test_no_roots(): """ Test that the roots of x^2 + 1 = 0 are not real. """ roots = None assert_equal(real_quadratic_roots(1, 0, 1), roots, err_msg="Testing x^2+1=0; no real roots.") def test_zero_roots(): """ Test that the roots of x^2 = 0 are both zero. """ roots = (0, 0) assert_equal(real_quadratic_roots(1, 0, 0), roots, err_msg="Testing x^2=0; should both be zero.") def test_real_distinct(): """ Test that the roots of x^2 - 1 = 0 are \pm 1. """ roots = (1.0, -1.0) assert_equal(real_quadratic_roots(1, 0, -1), roots, err_msg="Testing x^2-1=0; roots should be 1 and -1.") def test_real_distinct_irrational(): """ Test that the roots of x^2 - 2 x + (1 - 10**(-10)) = 0 are 1 \pm 1e-5. """ roots = (1 + 1e-5, 1 - 1e-5) assert_allclose(real_quadratic_roots(1, -2.0, 1.0 - 1e-10), roots, err_msg="Testing x^2-2x+(1-1e-10)=0; roots should be 1 +- 1e-5.") def test_real_linear_degeneracy(): """ Test that the root of x + 1 = 0 is -1. """ root = -1.0 assert_equal(real_quadratic_roots(0, 1, 1), root, err_msg="Testing x+1=0; root should be -1.") # - test_no_roots() test_zero_roots() test_real_distinct() test_real_distinct_irrational() test_real_linear_degeneracy() # ## Nose # # We now have a set of tests - a *testsuite*, as it is sometimes called - encoded in functions, with meaningful names, which give useful error messages if the test fails. Every time the code is changed, we want to re-run all the tests to ensure that our change has not broken the code. This can be tedious. A better way would be to run a single command that runs all tests. `nosetests` is that command. # # The easiest way to use it is to put all tests in the same file as the function being tested. So, create a file `quadratic.py` containing # # ```python # from math import sqrt # from numpy.testing import assert_equal, assert_allclose # # def real_quadratic_roots(a, b, c): # """ # Find the real roots of the quadratic equation a x^2 + b x + c = 0, if they exist. # # Parameters # ---------- # # a : float # Coefficient of x^2 # b : float # Coefficient of x^1 # c : float # Coefficient of x^0 # # Returns # ------- # # roots : tuple or float or None # The root(s) (two if a genuine quadratic, one if linear, None otherwise) # # Raises # ------ # # NotImplementedError # If the equation has trivial a and b coefficients, so isn't solvable. # """ # # discriminant = b**2 - 4.0*a*c # if discriminant < 0.0: # return None # # if a == 0: # if b == 0: # raise NotImplementedError("Cannot solve quadratic with both a" # " and b coefficients equal to 0.") # else: # return -c / b # # x_plus = (-b + sqrt(discriminant)) / (2.0*a) # x_minus = (-b - sqrt(discriminant)) / (2.0*a) # # return x_plus, x_minus # # def test_no_roots(): # """ # Test that the roots of x^2 + 1 = 0 are not real. # """ # # roots = None # assert_equal(real_quadratic_roots(1, 0, 1), roots, # err_msg="Testing x^2+1=0; no real roots.") # # def test_zero_roots(): # """ # Test that the roots of x^2 = 0 are both zero. # """ # # roots = (0, 0) # assert_equal(real_quadratic_roots(1, 0, 0), roots, # err_msg="Testing x^2=0; should both be zero.") # # def test_real_distinct(): # """ # Test that the roots of x^2 - 1 = 0 are \pm 1. # """ # # roots = (1.0, -1.0) # assert_equal(real_quadratic_roots(1, 0, -1), roots, # err_msg="Testing x^2-1=0; roots should be 1 and -1.") # # def test_real_distinct_irrational(): # """ # Test that the roots of x^2 - 2 x + (1 - 10**(-10)) = 0 are 1 \pm 1e-5. # """ # # roots = (1 + 1e-5, 1 - 1e-5) # assert_allclose(real_quadratic_roots(1, -2.0, 1.0 - 1e-10), roots, # err_msg="Testing x^2-2x+(1-1e-10)=0; roots should be 1 +- 1e-5.") # # def test_real_linear_degeneracy(): # """ # Test that the root of x + 1 = 0 is -1. # """ # # root = -1.0 # assert_equal(real_quadratic_roots(0, 1, 1), root, # err_msg="Testing x+1=0; root should be -1.") # ``` # Then, in a terminal or command window, switch to the directory containing this file. Then run # # ``` # nosetests quadratic.py # ``` # # You should see output similar to # # ``` # nosetests quadratic.py # ..... # ---------------------------------------------------------------------- # Ran 5 tests in 0.006s # # OK # ``` # # Each dot corresponds to a test. If a test fails, `nose` will report the error and move on to the next test. `nose` automatically runs every function that starts with `test`, or every file in a module starting with `test`, or more. [The documentation](https://nose.readthedocs.org/en/latest/testing.html) gives more details about using `nose` in more complex cases. # # To summarize: when trying to get code working, tests are essential. Tests should be simple and cover as many of the easy cases and as much of the code as possible. By writing tests as functions that raise exceptions, and using a testing framework such as `nose`, all tests can be run rapidly, saving time. # # ## Test Driven Development # # There are many ways of writing code to solve problems. Most involve planning in advance how the code should be written. An alternative is to say in advance what tests the code should pass. This *Test Driven Development* (TDD) has advantages (the code always has a detailed set of tests, features in the code are always relevant to some test, it's easy to start writing code) and some disadvantages (it can be overkill for small projects, it can lead down blind alleys). A [detailed discussion is given by Beck's book](http://www.amazon.co.uk/Driven-Development-Addison-Wesley-Signature-Series/dp/0321146530), and a [more recent discussion in this series of conversations](http://martinfowler.com/articles/is-tdd-dead/). # # Even if TDD does not work for you, testing itself is extremely important.
content/notebooks/09-exceptions-testing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # -----Bearing Fault Diagnosis----- -----Feature Extraction----- -----Student: <NAME> ---- -----Student ID: 196325 ---- ---- July 2020 ---- # # Step 1: import necessary libraries # + from __future__ import print_function, division import torch import torch.nn as nn import torch.optim as optim from torch.optim import lr_scheduler import numpy as np import torchvision from torchvision import datasets, models, transforms import matplotlib.pyplot as plt import time import os import copy from livelossplot import PlotLosses from models import * plt.ion() # interactive mode from efficientnet_pytorch import EfficientNet import sls # - # Step 2: Data preparing # # #1: Calculating mean and standard deviation values for Normalization # + mean = 0.0 meansq = 0.0 count = 0 data_transforms = { 'train': transforms.Compose([ transforms.Resize((224,224)), #transforms.RandomHorizontalFlip(), transforms.ToTensor(), #transforms.Normalize([0.3837, 0.7844, 0.4731], [0.6414, 0.0938, 0.4934]) ]), 'val': transforms.Compose([ transforms.Resize((224,224)), #transforms.CenterCrop(224), #transforms.RandomHorizontalFlip(), transforms.ToTensor(), #transforms.Normalize([0.3852, 0.7849, 0.4712], [0.1462, 0.0209, 0.1106]) ]), } data_dir = './' image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x in ['train', 'val']} dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=32, shuffle=True, num_workers=4) for x in ['train', 'val']} mean = 0. std = 0. for images, _ in dataloaders['train']: batch_samples = images.size(0) # batch size (the last batch can have smaller size!) images = images.view(batch_samples, images.size(1), -1) mean += images.mean(2).sum(0) std += images.std(2).sum(0) mean /= len(dataloaders['train'].dataset) std /= len(dataloaders['train'].dataset) print(mean) print(std) # + # Data augmentation and normalization for training # Just normalization for validation data_transforms = { 'train': transforms.Compose([ transforms.Resize((224,224)), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.7681, 0.8457, 0.5798], [0.2784, 0.1300, 0.3544]) ]), 'val': transforms.Compose([ transforms.Resize((224,224)), #transforms.CenterCrop(224), #transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.7681, 0.8457, 0.5798], [0.2784, 0.1300, 0.3544]) ]), } data_dir = './' image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x in ['train', 'val']} batch_size = 4 dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=16, shuffle=True, num_workers=4) for x in ['train', 'val']} dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']} class_names = image_datasets['train'].classes device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # - # #Show images before and after applied Argumentation # + def imshow(inp, title=None): """Imshow for Tensor.""" inp = inp.numpy().transpose((1, 2, 0)) #mean = np.array([0.485, 0.456, 0.406]) #std = np.array([0.229, 0.224, 0.225]) #inp = std * inp + mean inp = np.clip(inp, 0, 1) plt.imshow(inp) if title is not None: plt.title(title) plt.pause(0.001) # pause a bit so that plots are updated def imshow2(inp, title=None): """Imshow for Tensor.""" inp = inp.numpy().transpose((1, 2, 0)) mean = np.array([0.7681, 0.8457, 0.5798]) std = np.array([0.2784, 0.1300, 0.3544]) inp = std * inp + mean inp = np.clip(inp, 0, 1) plt.imshow(inp) if title is not None: plt.title(title) plt.pause(0.001) # pause a bit so that plots are updated # Get a batch of training data inputs, classes = next(iter(dataloaders['train'])) # Make a grid from batch out = torchvision.utils.make_grid(inputs) imshow(out, title=[class_names[x] for x in classes]) imshow2(out, title=[class_names[x] for x in classes]) # - # Stage 3: Import VGG 16 model and define Loss function, optimizer and initial plot import torch.nn as nn import torch.nn.functional as F net = vgg16_bn()# VGG-net 16 net = net.to(device) #Net().to(device) # + import torch.optim as optim criterion = nn.CrossEntropyLoss() #optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9) liveloss = PlotLosses() #optimizer = torch.optim.Adam(net.parameters()) optimizer = sls.Sls(net.parameters()) # - # Stage 3: Training the model def train_model(model, criterion, optimizer, num_epochs=20): liveloss = PlotLosses() model = model.to(device) optimizer.zero_grad() for epoch in range(num_epochs): logs = {} for phase in ['train', 'val']: if phase == 'train': model.train() else: model.eval() running_loss = 0.0 running_corrects = 0 for inputs, labels in dataloaders[phase]: inputs = inputs.to(device) labels = labels.to(device) outputs = model(inputs) loss = criterion(outputs, labels) if phase == 'train': optimizer.zero_grad() def closure(): probs = F.log_softmax(net(inputs), dim=1) loss = F.nll_loss(probs, labels, reduction="sum") return loss loss.backward() optimizer.step(closure) _, preds = torch.max(outputs, 1) running_loss += loss.detach() * inputs.size(0) running_corrects += torch.sum(preds == labels.data) epoch_loss = running_loss / len(dataloaders[phase].dataset) epoch_acc = running_corrects.float() / len(dataloaders[phase].dataset) prefix = '' if phase == 'val': prefix = 'val_' logs[prefix + 'log loss'] = epoch_loss.item() logs[prefix + 'accuracy'] = epoch_acc.item() print(epoch_loss.item()) #plotter.plot('loss', 'train', 'Class Loss', epoch, epoch_loss) liveloss.update(logs) liveloss.draw() PATH = './../kqua_CWRU_vgg_4class_stft_no_noise.pth' torch.save(net.state_dict(), PATH) train_model(net, criterion, optimizer, num_epochs=10) # Stage 5: Testing and Evaluate the model dataiter = iter(dataloaders['val']) images, labels = dataiter.next() PATH = './../kqua_CWRU_vgg_4class_stft_no_noise.pth' # + # print images imshow(torchvision.utils.make_grid(images)) print('Goundtruth: ', ' '.join('%5s' % labels[j] for j in range(4))) # - net = vgg16_bn()#EfficientNet.from_name(model_name) net = net.to(device) #Net().to(device) net.load_state_dict(torch.load(PATH)) images = images.to(device) labels = labels.to(device) outputs = net(images).to(device) # + _, predicted = torch.max(outputs, 1) print('Predicted: ', ' '.join('%5s' % predicted[j] for j in range(4))) # + correct = 0 total = 0 with torch.no_grad(): for inputs, labels in dataloaders['val']: inputs = inputs.to(device) labels = labels.to(device) outputs = net(inputs).to(device) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print('Accuracy of the network on the 10000 test images: %0.2f%%' % ( 100 * correct / total)) # - class_correct = list(0. for i in range(4)) class_total = list(0. for i in range(4)) with torch.no_grad(): for inputs, labels in dataloaders['val']: inputs = inputs.to(device) labels = labels.to(device) outputs = net(inputs).to(device) _, predicted = torch.max(outputs, 1) c = (predicted == labels).squeeze() for i in range(5): label = labels[i] class_correct[label] += c[i].item() class_total[label] += 1 for i in range(4): print('Accuracy of %5s : %0.2f %%' % ( image_datasets['val'].classes[i], 100 * class_correct[i] / class_total[i]))
Untitled.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Python for Machine Learning II # ## Copyright notice # # This version (c) 2019 <NAME>, [MIT License](LICENSE). # ## Imports import numpy as np import os from PIL import Image import numpy as np import sys sys.path.append('lib/') from esu_util import show_img, np_PIL, PIL_np from scipy.ndimage import convolve # ## Dictionaries # Dictionaries extend lists in a useful way: they introduce key value pairs which allow indexing an item by name, rather than by position. Dictionaries in Python use curly brackets, item names are strings, item values follow the name, separated by a colon: empty_dict = {} fruit = {'cars':3, 'dragons': 2} # We can get an item from the second dictionary like this: number_of_apples = fruit['apples'] print(number_of_apples) # To loop over a dictionary, Python 3 offers the `items()`function. For instance: for k, v in fruit.items(): print(k, v) # will print the contents of the dictionary. Keys and values can be accesses separately. Values of keys can be simply overwritten: fruit['apples'] = 15 # To check whether a single key is in the dictionary, use the in keyword: # if 'apples' in fruit: print('We have', fruit['apples'], 'apples.') # ## Classes # Python is a multi-paradigm programming language. It is not strictly [imperative](https://en.wikipedia.org/wiki/Imperative_programming) or [declarative](https://en.wikipedia.org/wiki/Declarative_programming) but combines feature from both [paradigms](https://en.wikipedia.org/wiki/Programming_paradigm). The building-block like structure of neural networks (or rather the useful abstraction of a building-block like structure), however, lends itself to an object-oriented approach. From Wikipedia: # # > Object-oriented programming (OOP) is a programming paradigm based on the concept of "objects", which may contain data, in the form of fields, often known as attributes; and code, in the form of procedures, often known as methods. A feature of objects is that an object's procedures can access and often modify the data fields of the object with which they are associated (objects have a notion of "this" or "self"). In OOP, computer programs are designed by making them out of objects that interact with one another. There is significant diversity of OOP languages, but the most popular ones are class-based, meaning that objects are instances of classes, which typically also determine their type. # # Objects are thus arbitrary structures consisting of methods and attributes. We can regard classes as recipes for building objects, or, conversely, we can regard them as abstraction of (future) objects. Classes define which initial attributes and which methods an object will have at its disposal. Objects are *instantiated* from a class. Classes have several predefines methods starting and ending with double underscores. The most commonly used is `__init__`, which is called once when an object is created. Classes - and thus objects - define their own scope, of course. Hence, all class *methods* must take the `self` argument to pass down a reference to the calling object. An example class `Apple` that makes use of all these techniques could be defined like this: class Apple(): color = 'red' diameter = 4 price = '0.0' def __init__(self): self.price = '1.99' def eat(self): self.diameter-=1 if (self.diameter <=0): self.diameter = 0 # We can now construct an object from the `Apple` class and access its attributes and methods: a = Apple() print(a.color) print(a.price) print(a.diameter) a.eat() print(a.diameter) # One important technique in OOP is *inheritance*. Inheritance means that we can create new classes that extend existing classes. For instance, climbing further down the ladder of abstraction, we could define a subclass `FujiApple`, which will have all the properties of a regular `Apple`but be more expensive. The base class for a class inheriting properties is defined in parenthesis behind the class name. The following class will have the same attributes and methods as the base class: class FujiApple(Apple): def __init__(self): self.price = 2.99 b = FujiApple() print(b.color) print(b.price) print(b.diameter) b.eat() print(b.diameter) # ## Numpy # # [NumPy](http://www.numpy.org/), an extension of the Python programming language for high-performance numerical computation, is an important part of all major machine learning frameworks (TensorFlow and PyTorch). Importantly, NumPy re-introduces multidimensional array with predefined *types* to Python, which helps particularly with machine learning tasks. To quote the NumPy docs: # # > NumPy’s main object is the homogeneous multidimensional array. It is a table of elements (usually numbers), all of the same type, indexed by a tuple of positive integers. In NumPy dimensions are called axes. # # To create a new 10x10 *NumPy 64 bit floating point array* initialized with zero, we can write: a = np.zeros([10,10], dtype=np.float64) # The most important property of a NumPy array, next to its content, is its *shape*. In machine learning, we operate on large, high-dimensional arrays of values that constantly change their shape. Thus, it is important to occasionally check what values, shape, and type an array contains: print(a) print(a.shape) print(a.dtype) # NumPy re-implements many Python functions in a more efficient way. For instance, the generation of random numbers in NumPy is provided by the `np.random.random()` function and its cousins. Moreover, most NumPy functions allow passing a `size` parameter (sometimes implicitly, sometimes explicitly) to create matrices directly from functions: # + for x in range(10): print(np.random.random(), np.random.randint(10)) r = np.random.random(size=(3,3)) print(r) # - # The more complex multidimensional arrays are, the more important does slicing become. For instance, to return the n-th z-axis plane of our three, dimensional array `a` we can write: n=1 print(a[:,:,n]) # A colon indicates that the respective dimension (axis) is returned completely, you can think of it as an "empty" slicing operator. NumPy is an incredibly powerful package. Please read through the [official tutorial](https://docs.scipy.org/doc/numpy/user/quickstart.html) to get to know some of the functions its provides. # ## <font color='red'>Exercises</font> # # 1. Define a class `vehicle`, a class `car`, and a class `mercedes`. A Mercedes is a car which is a vehicle. Invent some placeholder attributes and methods (`drive()`, `price`, etc.), and build a Mercedes object. # 2. `np.ones` creates NumPy arrays full of ones. Define a three-dimensional array (4x4x4) full of ones and set the value in the first row, second column, third z-plane to 0. Print the array to check if your implementation is correct. # ## Walking the file system # # Almost every image dataset comes in the form of files, often hundreds and thousands of files. Hence, it is important to be able to move through the file system efficiently. The `os` library makes this really easy. Let's see where we are first. The below command will output our *absolute* path, i.e. the path of the folder we are in right now in relation to the root of our file system (`/`on Unix-like systems). # Where are we? os.getcwd() # We can easily create a new folder, too. Note that this time the path of the folder we are creating is stated as a *relative* path, i.e. inr elation to our current working directory. You will see the new folder pop up in the list on the left. if not os.path.exists('saves'): os.makedirs('saves') # "Walking through" a folder and its subfolders is also easy. `os.walk()` will give us all the subfolders and files of a specified directory. folder = 'img' found_files = [] for root, dirs, files in os.walk(folder): for file in files: found_files.append(f'{root}/{file}') print(f'{len(found_files)} files found') print(found_files) # `.endswith()` will check for us which of the files are JPEG files. # Filter only JPEG images for file in found_files: if file.endswith('.jpg'): print(file) # ## Showing an image # # We will have to deal with two different image formats in this workshop. A `PIL` image and a `NumPy`image. Showing an image within Jupyter Lab requires some "magic" that we imported from the custom `esu_util` library at the top of this notebook. img_PIL = Image.open('img/person.jpg') show_img(img_PIL) img_PIL.save('test.jpg') # PIL infers the file format from the extension # ## Converting between PIL and NumPy formats # # We can only load and save `PIL`-type images. We will only operate on `NumPy`-type images. Thus, we have to convert often. This is fortunately easily done in one line. Our `show_img()` function can handle both types. img_np = PIL_np(img_PIL) show_img(img_np) img_PIL = np_PIL(img_np) show_img(img_PIL) # ## What is an image? # # In Python (for deep learning), images are NumPy arrays, i.e. multi-dimensional matrices. Color images have three channels. For PIL, images are something else: we will only need that to actually look at them in the notebook or write them to disk print(img_PIL) a = np.ones((200,200,3), dtype=np.uint8)*200 print(a.shape) # Show the "shape" of a matrix show_img(a) # ## Manipulating pixels b = a b[:,:,1:3] = 0 # Leave only the red channel at 255 show_img(b) img_np = PIL_np(img_PIL) img_np[0:100,0:100,0] = 0 show_img(img_np) # ## <font color='red'>Exercises</font> # # 1. Walk your home or documents folder and collect all Word document files, then display only the first three files and the last three files. # 2. Create a grey 300x300 pixel image and display it. # 3. Re-color 1/3 of the image red, 1/3 green, 1/3 blue and display it. # ## Convolution # # [Convolution](https://en.wikipedia.org/wiki/Convolution) ist arguably the most important concept in image-based machine learning. In short, convolution is a method to emphasize features in images by running a kernel - a small matrix - over an image - a large matrix - line by line and changing each pixel. # # ![](img/conv.gif) # # ![](img/conv2.gif) # # ![](img/conv3.gif) # # We can use the `convolve` function supplied by the scipy package to apply convolution to an image. All we need is a NumPy array that defines a kernel, for instance an approximate [Gaussian kernel](https://en.wikipedia.org/wiki/Kernel_(image_processing)) that blurs an image. # + # Open image, convert to grayscale img_PIL = Image.open('img/person.jpg').convert('L') show_img(img_PIL) # Convert image to NumPy array img = PIL_np(img_PIL) # Define Gaussian blur kernel kernel_gaussian = 1/16 * np.array([[1,2,1],[2,4,2],[1,2,1]]) # Run the kernel over the image with the help of scipy_convolve show_img(np_PIL(convolve(img, kernel_gaussian))) # - # We can also use convolution to implement other kinds of filters, for instance a sharpen filter that reverses the Gaussian blur filter. # Define sharpen kernel kernel_sharpen = np.array([[0,-1,0],[-1,5,-1],[0,-1,0]]) show_img(np_PIL(convolve(convolve(img, kernel_gaussian), kernel_sharpen))) # ## <font color='red'>Exercises</font> # # 1. To implement convolution from scratch, we need a function that loops over every single pixel in an image and changes its value. Write a function that takes a numpy array, loops over every pixel in that object and changes it, for instance by adding or subtracting a number. Play with the effects of the changes on the image. Advanced: implement the actual convolution function as `custom_convolve`. # 2. Convolution runs into problems at the border of an image: what are potential solutions? (No implementation, just think about it).
A5HO_Python_2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # (INTROOPTCODIGO)= # # 5.1 Introducción a optimización de código # ```{admonition} Notas para contenedor de docker: # # Comando de docker para ejecución de la nota de forma local: # # nota: cambiar `<ruta a mi directorio>` por la ruta de directorio que se desea mapear a `/datos` dentro del contenedor de docker. # # `docker run --rm -v <ruta a mi directorio>:/datos --name jupyterlab_optimizacion_2 -p 8888:8888 -p 8787:8787 -d palmoreck/jupyterlab_optimizacion_2:3.0.0` # # password para jupyterlab: `<PASSWORD>` # # Detener el contenedor de docker: # # `docker stop jupyterlab_optimizacion_2` # # Documentación de la imagen de docker `palmoreck/jupyterlab_optimizacion_2:3.0.0` en [liga](https://github.com/palmoreck/dockerfiles/tree/master/jupyterlab/optimizacion_2). # # ``` # --- # Nota generada a partir de la [liga](https://www.dropbox.com/s/z465znq3wwao9ad/2.1.Un_poco_de_historia_y_generalidades.pdf?dl=0) # ```{admonition} Al final de esta nota el y la lectora: # :class: tip # # * Conocerá razones del por qué algunas implementaciones de algoritmos son ineficientes a diferentes niveles. Uno es al nivel de lenguajes utilizados. Otro es al nivel de componentes de un sistema computacional. # # * Conocerá herramientas para analizar y escribir programas para un alto rendimiento. # # * Conocerá rediseños que se han hecho a las unidades computacionales de un sistema computacional para resolver *bottlenecks*. # # * Conocerá la jerarquía de almacenamiento en el sistema de memoria de un sistema computacional. # # * Conocerá la diferencia entre programas secuenciales y en paralelo. # # * Aprenderá una metodología y enfoques utilizados para escribir programas con cómputo en paralelo. # # ``` # La implementación de los métodos o algoritmos en el contexto de grandes cantidades de datos o *big data* es crítica al ir a la práctica pues de esto depende que nuestra(s) máquina(s) tarde meses, semanas, días u horas para resolver problemas que se presentan en este contexto. En este contexto la [optimización de código o de software](https://en.wikipedia.org/wiki/Program_optimization) nos ayuda a la eficiencia. # # ## Temas a considerar para escribir un programa de máquina de alto rendimiento # Para tener un alto *performance* en un programa de máquina, deben considerarse las siguientes preguntas: # # * ¿Qué tanto aprovecha mi programa aspectos como ***data reuse*** y ***data locality***? # # La respuesta nos lleva a pensar en el número de instrucciones por ciclo y el número de ciclos que realiza el procesador. Entiéndase un ciclo por los pasos de leer una instrucción, determinar acciones a realizar por tal instrucción y ejecutar las acciones, ver [liga](https://en.wikipedia.org/wiki/Instruction_cycle). # # * ¿Cómo es mi ***data layout*** en el almacenamiento? (forma en la que están almacenados o dispuestos los datos) # # Dependiendo de la respuesta podemos elegir una arquitectura de computadoras u otra y así también un algoritmo u otro. # # * ¿Cuánto ***data movement*** o ***data motion*** realiza mi programa? (flujo de datos entre los distintos niveles de jerarquía de almacenamiento o entre las máquinas en un clúster de máquinas) # # La respuesta implica analizar el tráfico de datos entre las **jerarquías de almacenamiento** (o máquinas si estamos en un clúster de máquinas) y potenciales ***bottlenecks***. # ## Herramientas que tenemos a nuestra disposición para analizar y escribir programas de máquina para un alto rendimiento # * Vectorización que pueden realizar los procesadores. # # * Perfilamiento de código para lograr la eficiencia deseada. # # * Programación en lenguajes compilados en lugar de intérpretes (o combinando intérpretes con lenguajes compilados) # # * Conocimiento de los propósitos con los que fueron diseñados los procesadores para explotar su capacidad. Aquí decidimos si usamos **código secuencial** o **código en paralelo**. # # ...además necesitamos conocer las diferentes **arquitecturas** que pueden utilizarse para cómputo en paralelo. # # * ¿Alguien ya resolvió mi *bottleneck*? # # * Experiencia en el lenguaje de programación seleccionado. # # ## Vectorización, [BLAS: Basic Linear Algebra Subprograms](https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms) y el uso del caché eficientemente. # Para comprender lo que la vectorización y finalmente las operaciones de la especificación BLAS en sus diferentes niveles tienen por objetivo resolver, revisemos de forma general el sistema de una computadora. # ### Un poco de historia y generalidades del sistema en una computadora # Las componentes fundamentales de un sistema en una computadora pueden simplificarse en: # # * Unidades computacionales. En éstas unidades nos interesa la pregunta ¿cuántos cálculos pueden realizar por segundo? # # * Unidades de memoria. En éstas unidades nos interesa la pregunta ¿cuántos datos pueden alojar y qué tan rápido puede leerse desde y escribirse hacia las distintas jerarquías? # # * Conexiones entre las unidades anteriores. Nos interesa ¿qué tan rápido pueden moverse datos de un lugar a otro? # ```{margin} # # Ver [liga](https://es.wikipedia.org/wiki/Caché) para información sobre cachés tipo L. # # ``` # Con esta simplificación tenemos como ejemplo a la CPU como unidad de cómputo conectada tanto a la RAM y a un disco duro como dos unidades de memoria y un *bus* que provee las conexiones entre estas partes. Otro ejemplo es considerar que la CPU tiene diferentes unidades de memoria en ella: los cachés tipo L1, L2, L3 y L4 conectadas a la CPU a través de otro *bus*. También la GPU es ejemplo de una unidad de cómputo conectada a una unidades de memoria como RAM y cachés. # Un dibujo simplificado y basado en una arquitectura de computadoras con nombre [Von Neumann](https://en.wikipedia.org/wiki/Von_Neumann_architecture) que nos ayuda a visualizar lo anterior en la **CPU** es el siguiente: # <img src="https://dl.dropboxusercontent.com/s/txsj5mzxyajbypa/von_Neumann.png?dl=0" heigth="500" width="500"> # * La **memoria principal** es una colección de ubicaciones que almacenan datos e instrucciones. Cada ubicación consiste de una dirección (*address*) que se utiliza para accesar a la ubicación y a sus contenidos. # # * La CPU está dividida en la **unidad de control y la unidad aritmética y lógica**. Aquí encontramos *registers* que son áreas o ubicaciones de almacenamiento (de datos, direcciones de memoria e información del estado de ejecución de un programa) de rápido acceso. # # * La **interconexión** o *bus* ayuda a la transferencia de datos e instrucciones entre la CPU y la memoria. # ```{admonition} Comentarios # # * En el dibujo no se está presentando los dispositivos de *input* y *output* pero sí aparecen en la arquitectura de Von Neumann. # # * También no se presentan en el dibujo unidades de almacenamiento como los discos duros pero también aparecen en la arquitectura de Von Neumann. Los discos duros se consideran dentro de las unidades de memoria y la CPU se conecta a ellos mediante un *bus*. # # * Si los datos se transfieren de la memoria a la CPU se dice que los datos o instrucciones son leídas y si van de la CPU a la memoria decimos que son escritos a memoria. # # * La separación entre la memoria y la CPU genera lo que se conoce como **Von Neumann *bottleneck*** y tiene que ver con la lectura/escritura y almacenamiento de datos e instrucciones. La interconexión determina la tasa a la cual se accede a éstos. # # ``` # ### Unidades de memoria # Su objetivo es el almacenamiento de bits de información. Como ejemplos tenemos la memoria RAM, discos duros o el caché. La principal diferencia entre cada tipo de unidad de memoria es la velocidad a la que pueden leer/escribir datos. Ésta velocidad depende enormemente de la forma en que se leen los datos. Por ejemplo, la mayoría de las unidades de memoria tienen un mejor *performance* al leer un gran pedazo de información que al leer muchos pedacitos. # ```{admonition} Comentario # # Desde el punto de vista de los lenguajes de programación como Python o R, un resultado del manejo automático de memoria en estos lenguajes, es la fragmentación de datos o ***data fragmentation*** que surge al no tener bloques contiguos de memoria. Esto causa que en lugar de mover todo un bloque contiguo de datos en una sola transferencia a través del *bus* se requieran mover pedazos de memoria de forma individual lo que causa un mayor tiempo de lectura. # # ``` # Las unidades de memoria tienen latencia que típicamente cambia dependiendo de una jerarquía de almacenamiento mostrada en el dibujo siguiente: # ```{admonition} Comentario # # Entiéndase por latencia el tiempo que le toma a la unidad o dispositivo para encontrar los datos que serán usados por el proceso. # # ``` # ### Jerarquías de almacenamiento # <img src="https://dl.dropboxusercontent.com/s/ahxsnpgp4rjdvw3/jerarquias_de_almacenamiento.png?dl=0" heigth="500" width="500"> # # En el dibujo anterior se representan típicos dispositivos de almacenamiento en una máquina. La capacidad de almacenamiento **disminuye** conforme nos movemos hacia arriba en el dibujo: mientras que en disco podemos almacenar terabytes de información, en los *registers* sólo podemos almacenar bytes o kilobytes. Por el contrario, la velocidad de lectura/escritura **disminuye** conforme nos movemos hacia abajo: la lectura y escritura en disco es órdenes de veces más tardado que en los *registers* (que físicamente están en el procesador). # ### Caché # # Entre las técnicas que tenemos a nuestro alcance para que un algoritmo pueda aprovechar el ***data layout*** de la información se encuentra el *caching*: el eficiente uso del caché. # # El caché es una memoria que está físicamente localizada más cercana a los registers del procesador para almacenar datos e instrucciones por lo que pueden ser accesados en menor tiempo que en otras unidades de memoria (como la RAM). # # El caché se diseñó para resolver el Von Neumann *bottleneck* al existir un límite de tasa de transferencia entre la memoria RAM y el procesador (CPU o GPU). Si pudiéramos mover datos de una forma infinitamente rápida, no necesitaríamos al caché pues el procesador podría obtener los datos en cualquier instante, en esta situación no existiría tal bottleneck. # # Aunque no tenemos en nuestros lenguajes de programación instrucciones del tipo "carga los datos en el caché" podemos usar los principios de **localidad** y **temporalidad** para mejorar la eficiencia de nuestros algoritmos. Los principios de localidad y temporalidad consisten en que el sistema de memoria tiende a usar los datos e instrucciones que físicamente son cercanos (localidad) y los datos e instrucciones que recientemente fueron usados (temporalidad). # # ### ¿Cómo funciona el acceso a la memoria en un sistema de computadora? # Si el procesador requiere un conjunto de datos para ejecutar instrucciones, el sistema de memoria carga un bloque de datos (aunque pueden ser también instrucciones), conocido como ***cache blocks*** o ***cache lines*** para que el procesador opere en ellos. # ### Ejemplo # En C al declarar un arreglo con la línea: `float z[20];` se le solicita al sistema de memoria que se alojen $20$ bloques contiguos de ubicaciones de memoria en la RAM, esto es: la ubicación para el almacenamiento de `z[1]` está inmediatamente después de la de `z[0]`. Si además tenemos un programa como el siguiente: # ```C # ... # float z[20]; # float sum=0; # int i=0; # for(i=0;i<20;i++) # sum+=z[i]; # ... # ``` # y nuestro cache line permite almacenar $16$ floats, entonces el sistema de memoria leerá los datos `z[0],...,z[15]` de la RAM al caché y el procesador realizará la suma. # # Posteriormente el procesador (que en este caso es la CPU) al accesar a los datos checa primero el caché, si las encontró se le conoce como ***cache hit***, si no lo encontró sería un ***cache miss*** y el sistema de memoria tendría que leer nuevamente desde la RAM. # ### Ejemplo # C almacena los arreglos de dos dimensiones en una forma ***row major***, esto es, aunque en papel representamos tales arreglos como un bloque rectangular, en la implementación en este lenguaje se están almacenando como un arreglo de una dimensión: el renglón $0$ es almacenado primero, a continuación el renglón $1$ y así sucesivamente. # # Observemos los siguientes códigos que realizan una multiplicación **secuencial** entre una matriz `A` y un vector `x` para obtener al vector `y`: # **Algoritmo: multiplicación matriz vector secuencial** # # # ```C # double A[MAX][MAX], x[MAX], y [MAX]; //MAX es un valor constante definido previamente # ... # # //bloque de código para inicializar A,x # //bloque de código para inicializar y con ceros # # //Algoritmo 1: # # for(i=0;i<MAX;i++) # for(j=0;j<MAX;j++) # y[i]+=A[i][j]*x[j]; # # //volver a asignar a y con ceros # # //Algoritmo 2: # # for(j=0;j<MAX;j++) # for(i=0;i<MAX;i++) # y[i]+=A[i][j]*x[j]; # # ``` # Supongamos que `MAX=4` y los elementos de `A` están almacenados en memoria como sigue: # # <img src="https://dl.dropboxusercontent.com/s/rgjh4pv0o1kbe24/ejemplo_cache.png?dl=0" heigth="500" width="500"> # # para simplificar el siguiente análisis supóngase que: # # * Ninguno de los elementos de `A` están en el caché al iniciar el par de loops. # # * Un *cache line* consiste de $4$ elementos de `A` y `A[0][0]` es el primer elemento del *cache line*. # # * Cada *cache line* le corresponde una única úbicación en el caché al que será asignado. # # * El caché sólo puede almacenar $4$ elementos de `A` o un *cache line*. # # Entonces para el algoritmo $1$ se tiene que la CPU y el sistema de memoria: # # 1) La CPU requiere `A[0][0]` que no se encuentra en el caché por lo que tenemos un *cache miss* y el sistema de memoria lee de RAM el *cache line*: `A[0][0] A[0][1] A[0][2] A[0][3]` y al estar en caché la CPU puede operar con `A[0][0]`. # # 2) La CPU requiere `A[0][1]` que sí se encuentra en el caché por lo que tenemos un *cache hit* y la CPU opera con éste dato. Posteriormente la CPU requiere `A[0][2]`, `A[0][3]` y `A[0][4]` que se encuentran en caché y se tienen otros tres *cache hits*. # # 3) La CPU requiere `A[1][0]` que resulta en un *cache miss* y el sistema de memoria lee de RAM el *cache line*: `A[1][0] A[1][1] A[1][2] A[1][3]` y al estar en caché la CPU puede operar con `A[1][0]`. # # ... # # Finalmente para el algoritmo $1$ tenemos $4$ cache misses, uno por cada renglón. # ```{admonition} Observación # :class: tip # # ¿Cuál es el número de cache misses para el algoritmo 2?. # # ``` # Mayor número de *cache misses* incrementa el tiempo de ejecución de las instrucciones por el procesador pues no solamente el procesador espera mientras se transfieren los datos de la RAM hacia el caché sino también se interrumpe el flujo de la ejecución del *pipeline*. Por ello es importante que los algoritmos trabajen con un buen ***data layout*** de la información en memoria y utilicen el ***data reuse*** y ***data locality***. # ```{admonition} Comentarios # # * El *pipelining* es un mecanismo utilizado por un procesador para la ejecución de instrucciones. Obstáculos al *pipelining* se encuentran los *cache misses* o el llamado *mispredicted branching*. El *branch prediction*, relacionado con el *branching* en un código (para el *branching* piénsese por ejemplo en una línea de código del tipo `if...then`), es otro mecanismo del procesador para tratar de predecir la siguiente instrucción a ejecutar y cargar las porciones relevantes de memoria en el caché mientras se trabaja en la instrucción actual. El procesador al toparse con un *branching* en el código trata de hacer una suposición de qué dirección se tomará en el *branching* y precargar las instrucciones relevantes, si falla tiene ***branch-misses***. Aunque también son importantes estos aspectos al considerar la implementación de un algoritmo, la herramienta más rápida que tenemos para resolver los *bottlenecks* en este contexto, son trabajar en la localidad y temporalidad de los datos. # # * Un factor que incrementa el número de *cache-misses* es la fragmentación de datos, ver [Fragmentation](https://en.wikipedia.org/wiki/Fragmentation_(computing)). La fragmentación incrementa el número de transferencias de memoria hacia la CPU y además imposibilita la vectorización porque el caché no está lleno. El caché puede llenarse sólo al tener bloques contiguos de memoria alojados para los datos (la interconexión o *bus* sólo puede mover *chunks* de memoria contigua). Python en su implementación común [CPython](https://github.com/python/cpython), tiene ***data fragmentation*** por ejemplo al usar listas. Las listas de Python alojan locaciones donde se pueden encontrar los datos y no los datos en sí. Al utilizar listas para operaciones matriciales el *runtime* de Python tiene *overhead* en la transferencia de datos pues debe realizar *lookups* por índices, al no encontrarse de forma contigua los datos se tendrán un mayor número de *cache-misses* y también los *cores* deberán esperar hasta que los datos estén disponibles en el caché. Por lo anterior las listas no se recomiendan para operaciones vectoriales o matriciales pero sí se utilizarían en casos como almacenar diferentes tipos de valores. # # * En Python se tiene el paquete de [NumPy](https://numpy.org/) para almacenamiento de bloques de memoria contiguos de arreglos y uso de la capacidad de la CPU para operaciones en un modo vectorizado. Sin extensiones a Python con paquetes como `numpy` no sería posible en este lenguaje aprovechar la vectorización (capaz de realizar un procesador actual) ni alojar bloques de memoria contiguos. El no soporte para operaciones vectorizadas tiene que ver con que el [bytecode](https://docs.python.org/3.9/glossary.html#term-bytecode) de Python no está optimizado para vectorización. En el caso del alojamiento de bloques de memoria contiguos, Python es un *garbage-collected language* que permite que la memoria sea automáticamente alojada y liberada. No obstante tal característica causa problemas del tipo ***memory fragmentation*** que afecta la transferencia al caché de datos **usables** o **relevantes** para las operaciones (ver ejemplo del algoritmo 2 anterior). # # ``` # ```{admonition} Observación # :class: tip # # Los archivos con extensión `.pyc` en *Python* contienen el *bytecode*. Ver [if-python-is-interpreted-what-are-pyc-files](https://stackoverflow.com/questions/2998215/if-python-is-interpreted-what-are-pyc-files/2998544). Ver [bytecode](https://en.wikipedia.org/wiki/Bytecode) para una explicación más general. # # ``` # ### Interconexión, *bus* o capas de comunicación y transferencia de datos # Hay distintos *bus* que permiten la comunicación entre las unidades computacionales y las de memoria: # # * El *backside bus* que permite la conexión entre el caché y el procesador. Tiene la tasa de transferencia de datos más alta. # # * El *frontside bus* permite la conexión entre la RAM y los L's cachés. Mueve los datos para ser procesados por el procesador (CPU o GPU) y mueve los datos de salida (resultados) entre estas regiones de memoria. # # * El *external bus* cuya acción se realiza en los dispositivos de hardware como los discos duros y tarjetas de memoria hacia la CPU y el sistema de memoria. Este *bus* típicamente es más lento que el *frontside bus*. # # Entonces los datos se mueven del disco hacia la RAM con el *external bus*, luego de la RAM hacia el caché vía el *frontside bus* y finalmente del caché hacia la CPU con el *backside bus*. # ```{admonition} Comentarios # # * La GPU está conectada en un dispositivo periférico y se comunica através del *PCI bus*, (ver [PCI](https://en.wikipedia.org/wiki/Conventional_PCI)) el cual es más lento que el *frontside bus*. Como resultado, la transferencia de datos hacia la GPU y fuera de ésta es una operación costosa. Un diseño que atiende este problema es tener tanto a la CPU como a la GPU en el *frontside bus* para reducción de costos de transferencia hacia la GPU para grandes cantidades de información. # # * Otra capa de comunicación en un sistema de computadora es la red o *network*. Un dispositivo de red puede conectarse a una unidad de memoria u a otra unidad de computación. Típicamente la comunicación por red es mucho más lenta que las comunicaciones con el *backside, frontside, external bus* descritos antes. # # * La propiedad principal de un *bus* es su velocidad: ¿cuántos datos pueden moverse en un periodo de tiempo? Esta propiedad se obtiene combinando el *bus width* entendido como ¿cuántos datos se pueden mover en una transferencia? (físicamente se puede observar por el número de cables del *bus*) y el *bus frequency* ¿cuántas transferencias se pueden hacer por segundo? (físicamente se ve en la longitud de los cables que unen a las unidades de cómputo o memoria). # # * Es importante notar que el movimiento de los datos en una transferencia siempre es secuencial: un pedazo de datos es leído de memoria y es movido a otro lugar: no es posible leer un pedazo de datos y moverlo a distintos lugares o leer divisiones del pedazo de datos que estén en distintos lugares. # # * Un *bus width* grande ayuda a la vectorización pues en una sola transferencia se mueven los datos importantes. Un *bus frequency* alto puede ayudar al código a realizar una gran candidad de lecturas de diferentes lugares de la memoria. Depende qué operación es la que se desea realizar lo que en un caso u otro convendrá. Por ejemplo, si el problema a resolver se relaciona con la cantidad de lecturas que se deben hacer, podríamos tener un *bus frequency* alto y un *bus width* pequeño para resolver un *bottleneck* de lecturas. # ``` # ### Unidades computacionales # ```{margin} # # Para referencias de instrucciones por ciclo (IPC) ver [liga](https://en.wikipedia.org/wiki/Instructions_per_cycle) y número de ciclos por segundo ver [liga](https://en.wikipedia.org/wiki/Clock_rate). # # ``` # Sea una CPU o una GPU, las unidades computacionales toman como input un conjunto de bits (que representan números por ejemplo) y producen otro conjunto de bits (por ejemplo la suma de los números). El performance de éstas unidades se mide en **instrucciones por ciclo** y en **ciclos por segundo**, nombrado *clock rate* o *clock speed*. La IPC puede incrementarse vía la **vectorización**. # Entre los rediseños que se han hecho para las unidades computacionales de un modelo clásico de <NAME> con el objetivo de resolver los *bottlenecks*, mejorar la velocidad y el *performance* se encuentran: # ### Múltiples CPU's o cores # Incrementar el *clock speed* en una unidad computacional hace más rápido a un programa y también es importante la medida de IPC. La IPC se puede incrementar vía la **vectorización** y es típico en procesadores que soportan las instrucciones llamadas ***Single Instruction Multiple Data*** (SIMD). La vectorización consiste en que dados múltiples datos, el procesador puede trabajar sobre ellos en un instante o tiempo (ejecución en **paralelo**): # <img src="https://dl.dropboxusercontent.com/s/mpfk9xmtq9fm7vm/SIMD.png?dl=0" heigth="450" width="450"> # ```{admonition} Observación # :class: tip # # El término *core* hoy en día lo usamos como sinónimo de procesador. # # ``` # ```{admonition} Comentarios # # * Este tipo de procesadores reemplazaron al modelo de Von Neumann clásico ***Single Instruction Single Data*** (SISD): # # <img src="https://dl.dropboxusercontent.com/s/bdx27axhnl3ug5n/SISD.png?dl=0" heigth="300" width="300"> # # en el que un conjunto de datos se procesaban en un tiempo determinado y no de forma simultánea o en **paralelo**. Así, se transitó de un diseño de hardware secuencial hacia un hardware paralelo. # # * Un ejemplo de procesadores SIMD son los procesadores vectoriales o en arreglo, ver [liga](https://en.wikipedia.org/wiki/Vector_processor). # # * En la práctica se ha visto que simplemente añadir más CPU's o cores al sistema **no siempre** aumenta la velocidad de ejecución en un programa. Existe una ley que explica lo anterior, **la ley de Amdahl**, la cual indica que si un programa está diseñado para ejecutarse en múltiples cores y tiene algunas secciones de su código que pueden sólo ejecutarse en un core, entonces éste será el *bottleneck* del programa. Por ejemplo, si tuviéramos que realizar una encuesta que tarda $1$ min a $100$ personas y tenemos una sola persona, entonces nos tardaríamos $100$ minutos (proceso serial). Si tenemos a $100$ personas entonces nos tardaríamos $1$ minuto en completar todas las encuestas (proceso en paralelo). Pero si tenemos más de $100$ personas, entonces no nos tardaremos menos de $1$ minuto pues las personas "extras" no podrán participar en realizar la encuesta. En este punto la única forma de reducir el tiempo es reducir el tiempo que le toma a una persona encuestar a otra (esta es la parte secuencial del programa). # # * Los sistemas SISD, SIMD y ***Multiple Instruction Multiple Data*** (MIMD), presentado a continuación: # # # <img src="https://dl.dropboxusercontent.com/s/ddze2xuzwn9bh6h/MIMD.png?dl=0" heigth="500" width="500"> # # son parte de la [taxonomía de Flynn](https://en.wikipedia.org/wiki/Flynn%27s_taxonomy) que clasifica a los sistemas dependiendo del número de stream de datos e instrucciones que puede procesar. Ejemplos de sistemas MIMD son máquinas multicore y clústers de máquinas. # # * En la taxonomía de Flynn hay una división más, la del sistema ***Simple Program Multiple Data*** (SPMD) en el que un mismo programa se ejecuta en múltiples datos, éste sistema lo encontramos en la GPU por ejemplo. # # * Hoy en día la industria continúa desarrollando y creando procesadores capaces de ejecutar instrucciones basadas en operaciones vectorizadas. Ver por ejemplo [Streaming SIMD Extensions 2: SSE2](https://en.wikipedia.org/wiki/SSE2) y [Advanced Vector Extensions: AVX](https://en.wikipedia.org/wiki/Advanced_Vector_Extensions). # # * El incremento en IPC vía la vectorización se enfoca a que las instrucciones tiendan a completar trabajo útil por ciclo pues podría darse la situación en la que se tiene: a *high rate of instructions, but a low rate of actual work completed*. # # # ``` # (THREADINGHYPER)= # ### *Threading* o *Hyperthreading* # Otra funcionalidad que se les añadió a los procesadores para incrementar la velocidad de ejecución de un **proceso** y resolver el *bottleneck* de Von Neumann fue la capacidad de crear hilos, ***threads***, de ejecución en un programa contenidos en un proceso. Esto es nombrado *threading* o *hyperthreading* en una CPU o en un *core*. Básicamente el *threading* permite la ejecución de más de una instrucción en un mismo *core* "virtualizando" un procesador adicional (el sistema operativo "cree" que en lugar de haber un *core* hay dos). # ```{admonition} Definiciones # # Un proceso es una instancia de un programa que se ejecuta en el procesador y está compuesto por elementos como por ejemplo los bloques de memoria que puede utilizar (los llamados *stack* y *heap*) e información de su estado, entre otros. # # Un *thread*, al igual que un proceso, es una instancia de un programa, se ejecuta en el procesador pero está contenido en el proceso del que salió. Al estar contenido en el proceso, comparte elementos de éste, tienen distinto *stack* de memoria (variables locales creadas en funciones) y en sistemas *multicore* es posible definir variables que sean accesadas por todos los *threads* (variables compartidas). # # ``` # La creación de threads a partir de un proceso se le nombra *fork* y su unión al proceso se le nombra *join*: # <img src="https://dl.dropboxusercontent.com/s/0vnjfdk7fo62m8h/threading.png?dl=0" heigth="400" width="400"> # Ambas acciones constituyen al *threading* que realiza un *core*, ver [multithreading](https://en.wikipedia.org/wiki/Multithreading_(computer_architecture)), [thread](https://en.wikipedia.org/wiki/Thread_(computing)). # ### Vectorización y niveles de BLAS # En términos simples vectorizar una operación significa realizar la operación de forma independiente al mismo tiempo sobre diferentes pedazos de datos. La vectorización **sólo puede realizarse** si se llena el caché con los datos usables o relevantes para la operación. Para lograr esto el *bus* moverá pedazos de memoria contiguos lo cual será posible sólo si los datos están almacenados secuencialmente en la memoria. Si se tiene *data fragmentation* causará *memory fragmentation* pues se tendrán los datos esparcidos en la memoria. Aún si se llenara la capacidad del *bus width* si no se tienen los datos usables o relevantes para la operación se tendrán *cache misses*. # # Los múltiples *cores* junto con la funcionalidad del *threading* permiten a la CPU o GPU la vectorización. Análogamente la vectorización puede realizarse en un clúster de máquinas con el cómputo distribuido: cada máquina procesa un pedazo de los datos de un arreglo de una o más dimensiones. # # # ```{margin} # # En *Python* se tiene *data fragmentation* al usar listas por lo que no se recomienda su uso para realizar operaciones vectoriales o matriciales. Un ejemplo de un paquete que permite realizar operaciones de forma vectorizada es [numpy](https://numpy.org/). Tales operaciones se clasifican de acuerdo a los niveles de BLAS que utilizan. # # ``` # Para lograr la vectorización los paquetes de *software* utilizan el hecho que el cómputo matricial está construído sobre una jerarquía de operaciones del álgebra lineal: # # * Productos punto involucran operaciones escalares de suma y multiplicación (nivel BLAS 1). # # * La multiplicación matriz-vector está hecha de productos punto (nivel BLAS 2). # # * La multiplicación matriz-matriz utiliza colecciones de productos matriz-vector (nivel BLAS 3). # # Las operaciones anteriores se describen en el álgebra lineal con la teoría de espacios vectoriales pero también es posible describirlas en una forma algorítmica. Ambas descripciones se complementan una a la otra. # ```{admonition} Comentario # # El acrónimo BLAS es el nombre de la especificación que prescribe el conjunto de rutinas para realizar las operaciones del álgebra lineal. Hay diferentes implementaciones de tal especificación como se verá más adelante. Ver [Basic Linear Algebra Subprograms](https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms). # # ``` # ### Ejemplo de operación nivel BLAS 1: producto interno estándar o producto punto # Consideramos $x,y \in \mathbb{R}^n$. El producto punto entre $x$ y $y$ es $c = x^Ty = \displaystyle \sum_{i=1}^n x_iy_i$. # # + c=0 n=5 x=[-1]*n y=[1.5]*n for i in range(n): c += x[i]*y[i] # - print(c) # ### Implementaciones de la API standard de BLAS y LAPACK # ```{margin} # # Ver [Linear Algebra Package: LAPACK](http://www.netlib.org/lapack/explore-html/dir_fa94b7b114d387a7a8beb2e3e22bf78d.html) para nombres que se utilizan para operaciones escalares, vectores o matrices y [Reference-LAPACK / lapack](https://github.com/Reference-LAPACK/lapack) para el repositorio. # # ``` # ```{margin} # # Ver [Application Programming Interface: API](https://en.wikipedia.org/wiki/Application_programming_interface) para una explicación de lo que es una API. # # ``` # En [Handle different versions of BLAS and LAPACK](https://wiki.debian.org/DebianScience/LinearAlgebraLibraries) se explica que [BLAS: Basic Linear Algebra Subprograms](https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms) y [Linear Algebra Package: LAPACK](http://www.netlib.org/lapack/explore-html/dir_fa94b7b114d387a7a8beb2e3e22bf78d.html) además de ser implementaciones, también son API *standard* para operaciones básicas del álgebra lineal. Muchas implementaciones de la API existen. Un ejemplo de implementaciones son las incluidas al instalar R o Python. Otras son las que se pueden instalar vía línea de comando: # ```{margin} # # Ver [libblas3](https://packages.debian.org/libblas3) [libblas-dev](https://packages.debian.org/libblas-dev) [liblapack3](https://packages.debian.org/liblapack3) [liblapack-dev](https://packages.debian.org/liblapack-dev). # # ``` # ```bash # sudo apt-get install -y libblas3 libblas-dev liblapack3 liblapack-dev # ``` # # en un sistema operativo Ubuntu por ejemplo. # Sin embargo existen otras implementaciones de la API que están optimizadas para la arquitectura de nuestras máquinas, por ejemplo: # # * [OpenBLAS](https://github.com/xianyi/OpenBLAS) # # * [Atlas](http://math-atlas.sourceforge.net), [Building a full LAPACK library using ATLAS and netlib's LAPACK](http://math-atlas.sourceforge.net/atlas_install/node8.html), [ATLAS FAQ](http://math-atlas.sourceforge.net/faq.html) # # ```{admonition} Comentario # # Ver [Building from source-Prerequisites](https://numpy.org/devdocs/user/building.html#prerequisites) para información sobre diferentes librerías de álgebra lineal que se pueden utilizar para *NumPy* al instalarlas en nuestras máquinas. # # ``` # ## ¿Qué es el perfilamiento y por qué es necesario? # ```{epigraph} # # Programmers waste enormous amounts of time thinking about, or worrying about, the speed of noncritical parts of their programs, and these attempts at efficiency actually have a strong negative impact when debugging and maintenance are considered # # -- <NAME> # ``` # ```{epigraph} # # We should forget about small efficiencies, say about 97% of the time: premature optimization is the root of all evil. Yet we should not pass up our opportunities in that critical 3%". A good programmer ... will be wise to look carefully at the critical code; but only after that code has been identified # # -- <NAME> # ``` # El perfilamiento de código nos ayuda a encontrar ***bottlenecks*** de nuestro código ya sea en el uso de CPU, RAM, *network bandwidth* u operaciones hacia el disco de *Input*/*Output* (I/O). # ```{margin} # # Aún teniendo experiencia en programación identificar algunos *bottlenecks* en los códigos es difícil... # # ``` # Una mala práctica que es común al iniciar en la programación es intentar optimizar el código (y por optimización piénsese en algún caso, por ejemplo mejorar el tiempo de ejecución de un bloque de código) **a ciegas**, intentando cambiar líneas de código **por intuición y no por evidencias o mediciones**. Esto aunque puede funcionar en algunas ocasiones no conduce la mayoría de las veces a corregir los problemas de los *bottlenecks* del código (o bien en programas o ¡sistemas enteros!). La optimización guiada por la intuición conduce a un mayor tiempo en el desarrollo para un pequeño incremento en el *performance*. # Si bien es importante que el código resuelva un problema definido, también es importante perfilarlo. Considérese el caso en el que un código resuelve bien un problema en un día completo, es importante entonces perfilarlo para realizar mediciones (CPU, memoria p.ej.) de los lugares en los que el código gasta la mayor parte de tiempo (en general recursos). # # A largo plazo el perfilamiento de código te dará las decisiones más pragmáticas posibles con el menor esfuerzo total. # # Al perfilar tu código no olvides lo siguiente: # # ```{margin} # # *Overhead* en este caso se refiere a todo lo extra que se añade al perfilar tu código y que no se encuentra en tu código original. Por ejemplo el trabajo extra que realiza tu máquina para analizar tu código (con un paquete por ejemplo) no estaba presente desde un inicio en tu código. # # ``` # * Medir el tiempo total de tus códigos para decidir si se requiere optimizarlos. # # * Perfilar tus códigos para decidir en dónde se iniciará con la optimización. También ayuda definir hipótesis para decidir en qué bloques perfilar primero. # # * Escribir *tests* para asegurarse que se resuelve el problema de forma correcta al igual que antes de perfilarlo y optimizarlo. # # * Cualquier recurso medible puede ser perfilado (no sólo el uso de CPU p.ej.). # # * Perfilar típicamente añade *overhead* en la ejecución del código (aumento de tiempo de 10x o 100x es común). # También considera el tiempo que inviertes para optimizar tu código y si vale la pena la inversión de tiempo que realizas en esto pues hay códigos que casi no son utilizados y otros que sí. No pierdas de vista: # <img src="https://imgs.xkcd.com/comics/is_it_worth_the_time.png" heigth="400" width="400"> # Principalmente porque es tentativo caer en la mala práctica de tratar de remover todos los *bottlenecks*. Sé una persona práctica, define un tiempo objetivo para tu código y optimiza sólo para llegar a ese objetivo. # Algunas ***strategies to profile your code successfully*** extraídas de [high performance python](https://www.oreilly.com/library/view/high-performance-python/9781492055013/) para un perfilamiento de código exitoso: # # * *Disable TurboBoost in the BIOS (a cool CPU may run the same block of code faster than a hot CPU)*. # # * *Disable the operating system's ability to override the SpeedStep (you will find this in your BIOS if you're allowed to control it)*. # # * *Only use mains power (never battery power) <- a laptop on battery power is likely to more agressively control CPU speed than a laptop on mains power*. # # * *Disable background tools like backups and Dropbox while running experiments*. # # * *Run the experiments many times to obtain a stable measurement*. # # * *Possibly drop to run level 1 (Unix) so that no other tasks are running*. # # * *Reboot and rerun the experiments to double-confirm the results*. # # # Otras estrategias las encuentran en [Reproducible benchmarking in Linux-based environments](https://github.com/JuliaCI/BenchmarkTools.jl/blob/master/doc/linuxtips.md). # ### *Unit testing* # Además del perfilamiento del código, el *unit testing* ayuda a **validar** que cada unidad del *software* trabaje y se desempeñe como fue diseñada (una unidad puede ser una función, programa, procedimiento, método). El *unit testing* es importante pues se debe cuidar que el código genere resultados correctos. Puede realizarse independientemente del perfilamiento y si se ha hecho perfilamiento es muy indispensable que se haga un unit testing. # # * ***Unit testing during optimization to maintain correctness:*** ... *after spending a day optimizing her code, having disabled unit tests because they were inconvenient, only to discover that her significant speedup result was due to breaking a part of the algorithm she was improving...* # # * *...If you try to performance test code deep inside a larger project without separating it from the larger project, you are likely to witness side effects that will sidetrack your efforts. It is likely to be harder to unit test a larger project when you're making fine-grained changes, and this may further hamper your efforts. Side effects could include other threads and processes impacting CPU and memory usage and network and disk activity, which will skew your results.* # ## ¿Por qué compilar a código de máquina? # ```{margin} # # Ver [liga](https://en.wikipedia.org/wiki/Interpreter_(computing)) para revisar lo que es un lenguaje tipo intérprete. # # ``` # De las opciones que tenemos a nuestra disposición para resolver *bottlenecks* en nuestro programa es hacer que nuestro código haga menos trabajo. Compilando nuestro código a código de máquina para que el código en los lenguajes tipo intérpretes ejecuten menos instrucciones es una opción a seguir. # ### ¿Por qué puede ser lenta la ejecución de un bloque de código en Python (o en algún otro lenguaje tipo intérprete)? # ```{margin} # # *Overhead* en este caso se refiere a todo lo extra que debe realizar la máquina en un lenguaje intérprete y que no está presente en un lenguaje compilado. Por ejemplo, un objeto tipo `int` en Python tiene asociado un objeto de alto nivel con el que interactuamos. Tal objeto tiene asociados métodos, funciones y atributos por lo que en el código al querer utilizarlos disminuye eficiencia de ejecución pues deben encontrarse, cargarse, verificar tipo de valores, entre otras operaciones. # # ``` # * Verificación de tipo de valores: si son `int`, `double` o `string` u otros. # # * Los objetos temporales que se crean por tipo de dato causa *overhead* . # # * Las llamadas a funciones de alto nivel. Por ejemplo las que ayudan a almacenar al objeto en memoria. # # son tres de las fuentes que hacen a un lenguaje tipo intérprete como `Python` `R` o `Matlab` lento. También otras fuentes son: # # * Desde el punto de vista de la memoria de la máquina, el número de referencias a un objeto y las copias entre objetos. # # * No es posible vectorizar un cálculo sin el uso de extensiones (por ejemplo paquetes como `numpy`). # ```{admonition} Comentarios # # * Hay paquetes que permiten la compilación hacia lenguajes más eficientes como Fortran o C (lenguajes que deben realizar compilación), por ejemplo [cython](https://cython.org/) o [rcpp](http://www.rcpp.org/). # # * Escribir directamente en el lenguaje C en un equipo para un proyecto de desarrollo de *software* indudablemente cambiará la velocidad de su trabajo si no conocen tal lenguaje. En este caso se recomienda ver ganancias y pérdidas para los tiempos de entrega del proyecto. # # * En la práctica si se tiene un *bottleneck* que no ha podido resolverse con herramientas como el cómputo en paralelo o vectorización, se recomienda utilizar paquetes para compilación hacia lenguajes más eficientes en regiones pequeñas del código y así resolver el *bottleneck* del programa. En ocasiones también puede ser la opción más viable a seguir si escribir un programa con cómputo en paralelo no es una opción. # # ``` # ## Sobre los términos concurrencia, paralelo y distribuido # La distinción entre los términos de paralelo y distribuido es borrosa y en ocasiones es díficil de distinguir. # ### Paralelo # El término **paralelo** típicamente se relaciona con programas cuya ejecución involucra *cores* o nodos que físicamente son cercanos y comparten memoria o están conectados por una red (*network*) para ejecución de instrucciones en un mismo tiempo o instante. # ### Distribuido # Los programas **distribuidos** son ejecutados por nodos o máquinas separadas a distancia y una de sus características es que no necesariamente fueron iniciados por un nodo central o *scheduler* por lo que su ejecución es independiente de los demás nodos. Así como con el término paralelo, existirán instrucciones en los programas que se ejecutarán en un mismo tiempo o instante. # ### Concurrencia # El término de **concurrencia** se refiere a que las múltiples tareas que debe realizar un programa pueden estar en progreso en cualquier instante. # ```{margin} # # Ver [kernel operating system](https://en.wikipedia.org/wiki/Kernel_(operating_system)) para definición del kernel de una máquina. # # ``` # Por ejemplo: cada vez que tu código lee un archivo o escribe a un dispositivo (memoria, disco, *network*), debe pausar su ejecución para contactar al kernel del sistema operativo, solicitar que se ejecute tal operación, y esperar a que se complete, por ejemplo: alojamiento de memoria. Estas operaciones son órdenes de magnitud más lentas que las instrucciones u operaciones ejecutadas en la CPU y el tiempo que el programa espera a que se completen tales operaciones se le nombra *I/O wait*. La concurrencia nos ayuda a utilizar este tiempo perdido al permitir ejecutar operaciones mientras que una operación I/O se complete. # ```{margin} # # Ejemplos de paquetes de *Python* para ejecución del código de forma asíncrona son: [asyncio](https://docs.python.org/3/library/asyncio.html), [tornado](https://www.tornadoweb.org/en/stable/). También *Dask* permite tal tipo de ejecución, ver [dask-asynchronous](https://distributed.dask.org/en/latest/asynchronous.html). # # ``` # Un programa concurrente en lugar de ejecutarse de forma secuencial, esto es, pasar de una línea a otra línea, tiene código escrito para ejecutar distintas líneas conforme sucedan "eventos". Aquí se involucra una forma de programación llamada **asíncrona**, por ejemplo si una operación tipo I/O es solicitada, el programa ejecuta otras funciones mientras espera que se complete la operación I/O. # ```{admonition} Comentario # # Cambiar de función a función en un programa asíncrono también genera costo pues el kernel debe invertir tiempo en hacer todo el *set up* para ejecutar a la función. La concurrencia funciona bien en programas con alto I/O wait. # # ``` # ### ¿Por qué el cómputo en paralelo? # La industria entre los años $2003-2005$ en lugar de fabricar procesadores monolíticos (clásico [Von Neumann](https://en.wikipedia.org/wiki/Von_Neumann_architecture)) que fueran más rápidos y complejos, decidió fabricar múltiples, simples procesadores, *cores*, en un sólo chip para incrementar el poder de procesamiento, disminuir el Von Neumann *bottleneck* y aumentar el *clock speed*. # # Esto fue motivado pues desde el año $2002$ el incremento del *performance* de los procesadores con un sólo CPU fue de un $20\%$ por año vs un $50\%$ por año entre $1986$ y $2002$. Lo anterior se debió a los problemas de la construcción de procesadores monolíticos o de un *core* relacionados con la disipación del calor por un mayor consumo de energía al hacer más pequeños los transistores. # Hoy en día podemos encontrar en nuestros celulares, laptops, computadoras de escritorio y servidores arquitecturas que cuentan con múltiples *cores* o núcleos para procesamiento. Por lo anterior es **indispensable** explotar tal tecnología para tener programas de máquina más eficientes. Muchos de los dispositivos anteriores además tienen un(os) procesador(es) gráficos. # # <img src="https://dl.dropboxusercontent.com/s/k11qub01w4nvksi/CPU_multicore.png?dl=0" heigth="500" width="500"> # # # <img src="https://dl.dropboxusercontent.com/s/lw9kia12qhwp95r/GPU.png?dl=0" heigth="500" width="500"> # # En una buena cantidad de aplicaciones tenemos que implementar algoritmos considerando tal disponibilidad de *cores* para reducir el tiempo de procesamiento. Esto conduce a reimplementar o en otros casos a repensar al algoritmo en sí. # # # Y otro aspecto a tomar en cuenta en esta implementación es la transferencia de datos que existe en la jerarquía de memoria de una máquina. # ```{admonition} Definición # # Los algoritmos que utilizan un sólo *core* para procesamiento les nombramos **secuenciales**, los que utilizan múltiples *cores* son **paralelos**. # # ``` # ### Programas cuya ejecución es en paralelo # La decisión de reescribir tu programa secuencial en un programa en paralelo depende mucho de considerar cuatro situaciones: # # * Tener el hardware para ejecución en paralelo del programa. # # * Comunicarle al programa que está en un hardware para ejecución en paralelo de instrucciones. # # * Tener un método que aproveche el hardware paralelo de forma eficiente. # # * Tiempo invertido en la reescritura de un código secuencial a uno en paralelo vs ganancias en tiempo de ejecución. # Lo primero es fácilmente alcanzable pues hoy en día tenemos celulares con múltiples *cores* o procesadores. Lo segundo es más dependiente del lenguaje e implementación de éste lenguaje en el que se esté programando. El tercer punto es quizás el más complicado de lograr pues en ocasiones implica repensar el método, disminuir la comunicación lo más posible entre los procesadores, el balanceo de carga o ***load balancing*** debe evaluarse y el perfilamiento o el *debugging* es más difícil en la programación en paralelo que en la forma secuencial. El cuarto punto es esencial para la decisión. # ### ¿Cuando es recomendable pensar en ejecutar en paralelo tu programa? # Si tus instrucciones a realizar pueden ser divididas en múltiples *cores* o nodos sin tanto esfuerzo de ingeniería (levantar un clúster de cero es difícil...) o no te lleva mucho tiempo el rediseño de tus métodos para decisiones prácticas (paralelizar el método de despomposición en valores singulares, SVD, es difícil de realizar...) entonces es una opción a considerar. Se recomienda mantener el nivel de paralelización lo más simple posible (aunque no se esté utilizando el 100\% de todos tus *cores*) de modo que el desarrollo de *software* sea rápido. # ### Si tengo n procesadores ¿espero un *speedup* de $nx$? # Normalmente **no** se tiene un mejoramiento en la velocidad de $n$ veces ($nx$) (por ejemplo, si tienes una máquina de $8$ *cores* es poco probable que observes un $8x$ *speedup*). # # Las razones de esto tienen que ver con que al paralelizar instrucciones típicamente se tiene *overhead* por la comunicación entre los procesos o *threads* y decrece la memoria RAM disponible que puede ser usada por subprocesos o *threads*. # También dentro de las razones se encuentran la [ley de Amdahl](https://en.wikipedia.org/wiki/Amdahl%27s_law) que nos dice que si sólo una parte del código puede ser paralelizado, no importa cuántos cores tengas, en términos totales el código no se ejecutará más rápido en presencia de secciones secuenciales que dominarán el tiempo de ejecución. # ### ¿A qué nos referimos al escribir *overhead* en un programa cuya ejecución es en paralelo? # A todo lo que implica ejecutar el programa en paralelo que no está presente en la ejecución en una forma secuencial. Por ejemplo, iniciar procesos implica comunicación con el kernel del sistema operativo y por tanto, tiempo. # ### ¿Cuáles enfoques puedo utilizar para escribir programas en paralelo? # Hay $2$ enfoques muy utilizados para escribir programas en paralelo: # # * Paralelizar las tareas entre los cores. Su característica principal es la ejecución de instrucciones distintas en los cores. Por ejemplo: al llegar la persona invitada a casa, María le ofrecerá de tomar y Luis le abrirá la puerta. # # * Paralelización de los datos entre los cores. Su característica principal es la ejecución de mismas instrucciones en datos que fueron divididos por alguna metodología previa. Por ejemplo: tú repartes la mitad del pastel a las mesas 1,2 y 3, y yo la otra mitad a las mesas 4,5 y 6. # # ```{admonition} Comentarios # # * No son enfoques excluyentes, esto es, podemos encontrar ambos en un mismo programa. La elección de alguno de éstos enfoques depende del problema y del software que será usado para tal enfoque. # # * Obsérvese que en el ejemplo de María y Luis necesitan **coordinarse**, **comunicarse** y **sincronizarse** para tener éxito en recibir a la invitada. # # * Obsérvese que en el ejemplo de repartir el pastel se requiere un buen ***load balancing*** pues no queremos que yo le reparta a $5$ mesas y tú le repartas a ¡sólo una!. # # ``` # ### ¿A qué nos referimos con el término *embarrassingly parallel problem*? # # # A los problemas en los que la comunicación entre procesos o threads es cero. Por ejemplo sumar un array `a` con un array `b`. # # Y en general si evitamos compartir el estado (pensando a la palabra "estado" como un término más general que sólo comunicación) en un sistema paralelo nos hará la vida más fácil (el *speedup* será bueno, el *debugging* será sencillo, el perfilamiento será más fácil de realizar...). # ### ¿Cómo inicio en la programación en paralelo de mi código? # <NAME> en su libro *Designing and Building Parallel Programs* da una serie de pasos que ayudan a la programación en paralelo: # * *Partitioning. Divide the computation to be performed and the data operated on by the computation into small tasks. The focus here should be on identifying tasks that can be executed in parallel.* # # * *Communication. Determine what communication needs to be carried out among the tasks identified in the previous step.* # # * *Agglomeration or aggregation. Combine tasks and communications identified in the first step into larger tasks. For example, if task A must be executed before task B can be executed, it may make sense to aggregate them into a single composite task.* # # * *Mapping. Assign the composite tasks identified in the previous step to processes/threads. This should be done so that communication is minimized, and each process/thread gets roughly the same amount of work.* # # # ```{admonition} Comentario # # En ocasiones es mejor dejar las tareas simples y redundantes que complicarlas y no redundantes. Por ejemplo, si en el programa en paralelo varios procesadores o *threads* hacen tarea redundante pero me evitan la comunicación, prefiero este programa a uno en el que haga trabajo no redundate y muy específico a cada procesador o *thread* pero que la comunicación a realizar sea muy complicada o compleja. # # ``` # ### Ejemplo en la regla del rectángulo compuesta en una máquina *multicore* # 1.*Partitioning*: la tarea a realizar es el cálculo de un área de un rectángulo para un subintervalo. # <img src="https://dl.dropboxusercontent.com/s/5nqciu6ca5xzdh9/parallel_processing_Rcf_1.png?dl=0" heigth="300" width="400"> # 2.*Communication* y *mapping*: los subintervalos deben repartirse entre los *cores* y se debe comunicar esta repartición por algún medio (por ejemplo con variables en memoria). # 3.*Aggregation*: un *core* puede calcular más de un área de un rectángulo si recibe más de un subintervalo. # <img src="https://dl.dropboxusercontent.com/s/lpcwd9mejb90rq3/parallel_processing_Rcf_2.png?dl=0" heigth="200" width="300"> # # 4.*Communication* y *mapping*: el área de los rectángulos calculados por cada procesador deben sumarse para calcular la aproximación a la integral. # <img src="https://dl.dropboxusercontent.com/s/mfo5rfzjnonn8lq/parallel_processing_Rcf_3.png?dl=0" heigth="400" width="500"> # # ### ¿*Software*? # # **Nota: las listas de herramientas de *software* que se presentan son no exhaustivas**. # Depende del procesador y arquitectura a utilizar. Si lo que deseamos usar son *cores* de una CPU en una máquina tenemos: # # * [Dask](https://docs.dask.org/en/latest/) # # * [multiprocessing](https://docs.python.org/3.1/library/multiprocessing.html) # # * [Cython](https://github.com/cython/cython/) (que además provee compilación a *C*) # # * [Numba](https://github.com/numba/numba) (que además provee compilación a *C*) # # * [joblib](https://joblib.readthedocs.io/en/latest/) # # * [parallel](https://www.rdocumentation.org/packages/parallel/versions/3.6.2) # # * [foreach](https://www.rdocumentation.org/packages/foreach/versions/1.4.7/topics/foreach) # # * [RcppParallel](https://github.com/RcppCore/RcppParallel) (que además provee compilación a *C++*) # # * [openMP](http://www.openmp.org/) # # * [Pthreads](https://computing.llnl.gov/tutorials/pthreads/) # # * [Thrust](https://thrust.github.io/) # # * [PLASMA](https://www.lrz.de/services/software/mathematik/plasma/) # # * [oneTBB](https://github.com/oneapi-src/oneTBB) # # # Si deseamos usar *cores* en una GPU y cómputo multi-GPU tenemos: # # * [CUDA C](https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html), [CUBLAS](https://docs.nvidia.com/cuda/cublas/index.html), [MAGMA](https://icl.cs.utk.edu/magma/), [CULA](http://www.culatools.com/cula_dense_programmers_guide/), [CUSOLVER](https://docs.nvidia.com/cuda/cusolver/index.html), [NVBLAS](https://docs.nvidia.com/cuda/nvblas/) # # * [CuPy](https://docs.cupy.dev/en/stable/) # # * [PyCUDA](https://documen.tician.de/pycuda/) # # * [dask-cuda](https://github.com/rapidsai/dask-cuda), [Rapids](https://rapids.ai/) # # * [Numba](https://github.com/numba/numba) (que además provee compilación a *C*) # # * [gputools](https://www.rdocumentation.org/packages/gputools/versions/1.1) # # * [gpuR](https://github.com/cdeterman/gpuR) # # * [Rth-org/Rth](https://github.com/Rth-org/Rth) y más reciente [matloff/Rth](https://github.com/matloff/Rth). Ver también [rdrr.io matloff/Rth](https://rdrr.io/github/matloff/Rth/f/README.md). # # * [Thrust](https://thrust.github.io/) # # # Para cómputo distribuido se encuentran: # # * [OpenMPI](https://www.open-mpi.org/) # # * [Slurm](https://www.schedmd.com/), [Slurm github](https://github.com/SchedMD/slurm) # # * [ScaLAPACK](http://www.netlib.org/scalapack/) # # Para áreas como *deep learning* en el uso de la GPU y que también permiten cómputo distribuido y multigpu se tienen: # # * [tensorflow](https://github.com/tensorflow/tensorflow), [keras](https://github.com/keras-team/keras) # # * [caffe](https://github.com/BVLC/caffe) # # * [mxnet](https://github.com/apache/incubator-mxnet) # # * [pytorch](https://github.com/pytorch/pytorch) # # * [Theano](https://github.com/Theano/Theano), [pymc-devs/aesara](https://github.com/pymc-devs/aesara) # # Varios de los paquetes anteriores están habilitadas con [oneDNN](https://github.com/oneapi-src/oneDNN) para un buen *performance* en el uso de la CPU/GPU. # # Y un estándar para cómputo en sistemas heterogéneos: [OpenCL](https://www.khronos.org/opencl/), [OpenCL-c-programming-language](https://www.khronos.org/registry/OpenCL/specs/3.0-unified/html/OpenCL_C.html#the-opencl-c-programming-language), [OpenCL-Wikipedia](https://en.wikipedia.org/wiki/OpenCL) # ## Referencias de interés # * [MAGMA by example](https://developer.nvidia.com/sites/default/files/akamai/cuda/files/Misc/mygpu.pdf) # # * [Numerical Linear Algebra on Emerging Architectures: the PLASMA and MAGMA Projects](https://www.icl.utk.edu/files/publications/2009/icl-utk-388-2009.pdf) # # * [pymc3](https://github.com/pymc-devs/pymc3) # # * Para monitoreo de número de instrucciones por ciclo, número de ciclos por segundo, *branch misses, cache references* (o *hits*) y *misses* se recomiendan las herramientas de [perf](https://en.wikipedia.org/wiki/Perf), [valgrind](https://valgrind.org/), ver también: [Valgrind](https://en.wikipedia.org/wiki/Valgrind). # # * Algunas ligas que explican lo que es el *garbage collector* en Python son: [python-garbage-collector](https://rushter.com/blog/python-garbage-collector/), [Basics of Memory Management in Python](https://stackabuse.com/basics-of-memory-management-in-python/), [python-garbage-collection](https://stackify.com/python-garbage-collection/) y [python-garbage-collector-documentation](https://stackoverflow.com/questions/4484167/python-garbage-collector-documentation). # # * Ver [python-vs-cpython](https://stackoverflow.com/questions/17130975/python-vs-cpython) para una breve explicación de implementaciones de Python. # **Preguntas de comprehensión:** # # 1)¿Qué factores han influido en que desde el 2002-2003 a la fecha, el *performance* de los procesadores se esté incrementando en un 20% por año vs el 50% de incremento por año que se tenía entre 1986 y 2002? # # 2)Menciona los componentes y realiza un esquema de una arquitectura von Neumann y descríbelas. # # 3)Menciona la ley de Amdahl. # # 4)¿Qué es un proceso y de qué consta? # # 5)¿Qué es un *thread*? # # 6)¿Qué es el *threading*? ¿qué ventajas nos da para la programación en un sistema de memoria compartida? # # 7)¿Qué es el caché? # # 8)Nosotros como programadores o programadoras, ¿cómo podemos obtener ventajas del caché? # # 9)¿Qué es un *cache hit*? ¿un *cache miss*? # # 10)De acuerdo a la taxonomía de Flynn, ¿qué tipos de arquitecturas existen? Menciona sus características, ventajas /desventajas y ejemplos. # # 11)Menciona algunos ejemplos de: # # a.Sistemas de memoria distribuida. # # b.Sistemas de memoria compartida. # # 12)¿Qué es el *pipelining* y el *branch prediction*? # # 13)Menciona los distintos *bus* o interconexiones en un sistema de computadora y su propiedad principal o lo que nos interesa medir en un *bus*. # # 14)¿Qué significan los términos concurrencia, paralelo, distribuido? # # 15)¿Cuáles son los enfoques que se utilizan para escribir programas en paralelo? # # 16)Define a cuál enfoque corresponde (de acuerdo a la pregunta 13) cada uno de los siguientes incisos: # # a)Supón que tienes 2 cores y un arreglo de tamaño 100 # # if(rango_core módulo 2 == 0 ) # operar en los elementos 50 a 99 # else # operar en los elementos 0 a 49 # # donde módulo es una operación que nos devuelve el residuo al dividir un número entre otro. # # b)Tenemos tres trabajadores: Aurora, Pedro, Daniel # # if(mi_nombre es Pedro) # lavo el baño # else # voy de compras # # 17)En el cómputo en paralelo debemos realizar coordinación entre procesos o *threads* y considerar el *load balancing*. Menciona tipos de coordinación que existen y ¿a qué se refiere el *load balancing*? # # 18)¿Cuáles son los pasos a seguir, que de acuerdo a <NAME>, se puede seguir para el diseño de programas en paralelo? # # **Referencias:** # # 1. <NAME>, <NAME>, High Performance Python, O'Reilly Media, 2014. # # 2. <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, # <NAME>, <NAME>, <NAME> and <NAME>, LAPACK Users Guide, Society for Industrial and Applied Mathematics, Philadelphia, PA, third ed., 1999. # # 3. <NAME>, An Introduction to Parallel Programming, <NAME>, 2011. # # 4. https://xkcd.com/ #
libro_optimizacion/temas/V.optimizacion_de_codigo/5.1/introduccion_optimizacion_de_codigo.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- # PDF versions of the plots included in this file can get very large, use PNG to save space options(jupyter.plot_mimetypes = 'image/png') # # MDI++ Demo in iPython/Jupyter # # # This document demonstrates `mdi++` in an Jupyter notebook, it is therefore a "live" document with the intention that you can download and locally execute the included code. This document uses [R inside](https://github.com/IRkernel/IRkernel) so you can download and modify this document locally or copy and paste commands outside to your R editor of choice. # # ## Generating Demo Datasets ## # # We start by loading in some code that will allow us to regenerate our "demonstration" datasets: source("../demo/datagen.R") # next we sample our cluster allocations (for 100 items) from a Dirichlet Process and generate one dataset for each datatype supported by MDI. The default output format used by R is compatible with MDI++ so we just write them directly out. # + (base <- rdirichletprocess(100)) write.csv(create.normgam(base, 10), "../demo/normgam.csv") write.csv(create.gaussianprocess(base,20), "../demo/gaussianprocess.csv") write.csv(create.multinom(base, rep(3,10)), "../demo/multinom.csv") write.csv(create.bagofwords(base,100, 10), "../demo/bagwords.csv") # - # The second parameter in the `create.*` scripts above is generally the number of features, but for the Multinomial and Bag-of-Words datatypes things are more complicated—have a look inside `datagen.R` if you wish to see the details! # # ## Running MDI++ ## # # Now that we have generated some datasets we can run `mdi++` on them, we `cd` into the relevant subdirectory and execute `mdi++` that you have hopefully just built. Each dataset is preceeded by a flag specifying its datatype, `N` for Independent Normal/Gaussian features, `GP` for Gaussian Process, `M` for Multinomial and `BW` for Bag-of-Words. We use a standard shell redirection (i.e. the `> mcmc.csv`) to save results to disk. system(paste("cd ../demo", "../mdi++ N normgam.csv GP gaussianprocess.csv M multinom.csv BW bagwords.csv > mcmc.csv", sep=";")) # All Monte-Carlo output will now be saved in the CSV file "`demo/mcmc.csv`" and is formatted such that it can be easily loaded using the standard R functions. I have a variety of functions in `scripts/analysis.R` intended for interpreting this output. # # There is a significant amount of code within the analysis script for convergence diagnostics, mostly this will apply when you have a large number of features (more than about 15 to 50 depending on the datatype) as the data will become too informative. For the analysis of this example data convergence will occur with probability approaching 1. # # ## Analysing MDI++ Output ## # # We start by pulling the analysis code into our workspace, installing the `mcclust` package if necessary. # + if(!require("mcclust")) install.packages("mcclust") source("../scripts/analysis.R") # - # We can now load the data that was generated at the start into R enabling us to analyse the MCMC output. I have assumed that you will not be generating the datasets within R, therefore the datasets generated above were deliberately not retained so that this functionality can be demonstrated. # + yg <- loadDataGauss("../demo/normgam.csv") ygp <- loadDataGP("../demo/gaussianprocess.csv") ymn <- loadDataMultinom("../demo/multinom.csv") ybw <- loadDataBagOfWords("../demo/bagwords.csv") # Put the resulting dataframes into a list for easy access datafiles <- list(yg,ygp,ymn,ybw) # - # We can start by ensuring that the data looks appropriate, thus demonstrating that `plot` as been overloaded. par(mfrow=c(2,2),mar=c(2,2,0.5,0.5)) for (y in datafiles) plot(y) # Next we load MDI's MCMC output into R: mcmc <- readMdiMcmcOutput("../demo/mcmc.csv") # Note: I would tend to put this command into its own cell as loading a large file can take a significant amount of time. # # Once it has loaded we can plot a sample from the posterior. This figure has one row of plots per dataset, with each column of plots representing a single cluster. Individual plots will contain the items selected for that cluster and dataset, and will be empty when there are no items in that dataset's clustering. # # If you're working on your own analysis I would recommend plotting a variety of samples to ensure that "similar" items end up in appropriate clusters and that these associations are shared across datasets. plotSingleMcmcSample(mcmc[5000,],datafiles) # Another plot that has been useful is the agreement in allocations between datasets—termed "fusion". Plotting high dimensional data is difficult, so all plots for all pairwise combinations are generated. Each individual plot describes the cumulative average agreement over the second half of the MCMC samples (i.e. first half is considered burn-in) after thining to 2000 samples. plotAllocationAgreement(tail(mcmc,2000)) # In this case the chain appears to have converged, one is looking for MC uncertainty in early samples and no systematic drift in later samples. Then it can be helpful to simplify to a marginal histograms. # # In the plot below we show the posteriour mass associated with each number of allocation agreements across every pairwise combination of datasets, with the mean agreement indicated by the dotted vertical line. In this case we know that every dataset has exactly the same cluster partition and so we see all 100 items basically agreeing and only limited uncertainty. In real datasets we would expect much less consistency between datasets. plotAllocationAgreementHistogram(tail(mcmc,2000),datafiles) # In a number of publications it has been found that a Posterior Similarity Matrix is a good way to describe the output of MDI, and other similar algorithms, that generate many posterior samples. Code included in MDI makes it easy to generate a PSM and then plot the PSM with items ordered by a hierarchical clustering of the matrix. # # We start by calling code to generate PSMs across each dataset independently and them sum to get an average/consensus PSM. cpsm <- generateConsensusPSM(tail(mcmc)) # In order to generate a plot, we need to take a guess at the number of clusters at which to "cut" the hierarchical clustering—here we use the median across all datasets. # # We can then plot the PSMs for each dataset independently along with the consensus. # + nclust <- apply(getClustersOccupied(tail(mcmc,101)),2,median) par(mar=c(1,2,1,1)); plotConsensusPSM(cpsm, datafiles, median(nclust), ann = TRUE) # - # Darker colors indicate greater agreement and the black ticks on the axes display the inferred cluster partition across the consensus PSM. Each dataset is presented on its own on the right, with the larger image representing the consensus, i.e. the mean of each individual PSM. As can be seen there is little uncertainty in the cluster allocations given this data. # # Further analysis/plotting tends to be application specific and depends on the degree of fusion between datasets. For example each datasets may utilise a different lab method and one may be interested highlighting the pairwise similarity between datasets, at which point the histogram of pairwise agreements may be enough. # # Otherwise extracting the cluster allocation inferred for each data file may serve as useful input for further work. head(cp <- extractPSMClustPartition(cpsm, nclust, datafiles)) write.csv(cp, "../demo/cluster-partition.csv")
docs/mdi demo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Qudits # # Most of the time in quantum computation, we work with qubits, which are 2-level quantum systems. A qu-*d*-it is a generalization of a qubit to a d-level or d-dimension system. # # Qudits with known values for d have specific names. A qubit has dimension 2, a qutrit has dimension 3, a ququart has dimension 4, and so on. # In Cirq, qudits work exactly like qubits except they have a `dimension` attribute other than 2, and they can only be used with gates specific to that dimension. # # Both qubits and qudits are represented by a `Qid` object. # # To apply a gate to some qudits, the dimensions of the qudits must match the dimensions it works on. For example, if a gate represents a unitary evolution on three qudits, a qubit, a qutrit, and another qutrit, the gate's "qid shape" is `(2, 3, 3)` and its `on` method will accept exactly 3 `Qid`s with dimension 2, 3, and 3. # # This is an example single qutrit gate used in a circuit: # # + import cirq class QutritPlusGate(cirq.SingleQubitGate): def _qid_shape_(self): return (3,) def _unitary_(self): return np.array([[0, 0, 1], [1, 0, 0], [0, 1, 0]]) def _circuit_diagram_info_(self, args): return '[+1]' q0 = cirq.LineQid(0, dimension=3) circuit = cirq.Circuit( QutritPlusGate().on(q0) ) print(circuit) # - # ### Qids # # `Qid` is the type that represents both qubits and qudits. By default, a qid like `cirq.NamedQubit('a')` is a qubit. # # While Cirq has the built-in qubit types, it also provides the corresponding Qid types: # # - `cirq.NamedQid`: To create a qutrit named 'a', specify the dimension with `cirq.NamedQid('a', dimension=3)`. # - `cirq.GridQid`: To create a 2x2 grid of ququarts, use `cirq.GridQid.rect(2, 2, dimension=4)`. # - `cirq.LineQid`: In addition, the `LineQid` constructor also supports a dimension argument directly `cirq.LineQid(0, dimension=4)`. # # ### `cirq.qid_shape` and `def _qid_shape_` # # Quantum gates, operations, and other types that act on a sequence of qudits can specify the dimension of each qudit they act on by implementing the `_qid_shape_` magic method. This method returns a tuple of integers corresponding to the required dimension of each qudit it operates on, e.g. `(2, 3, 3)` means an object that acts on a qubit, a qutrit, and another qutrit. # # When `Qid`s are used with `Gate`s, `Operation`s, and `Circuit`s, the dimension of each qid must match the corresponding entry in the qid shape. An error is raised otherwise. # # Callers can query the qid shape of an object or a list of `Qid`s by calling `cirq.qid_shape` on it. # By default, `cirq.qid_shape` will return the equivalent qid shape for qubits if `_qid_shape_` is not defined. # # For a qubit-only gate the qid shape is a tuple of 2s containing one 2 for each qubit e.g. `(2,) * cirq.num_qubits(gate)`. # # ### Unitaries, Mixtures, and Channels # # The magic methods `_unitary_`, `_apply_unitary_`, `_mixture_`, and `_channel_` used to define unitary operations, mixtures, and channels can be used with qudits with one caveat. # # # The matrix dimensions for qudits will be larger than for qubits based on the values of the qudit dimensions (the object's qid shape). The size of the matrix is determined from the product of the qudit dimensions. # # For example, a single qubit unitary is a 2x2 matrix, whereas a single qutrit unitary is a 3x3 matrix. A two qutrit unitary is a 9x9 matrix (3 * 3 = 9) and a qubit-ququart unitary is an 8x8 matrix (2 * 4 = 8). The size of the matrices for mixtures and channels follow the same rule. # # ### Simulators and Samplers # # Simulators like `cirq.Simulator` and `cirq.DensityMatrixSimulator` will return simulation results with larger matrices than the same size qubit circuit when simulating qudit circuits. # # The size of the matrix is determined by the product of the dimensions of the qudits being simulated. The state vector output of `cirq.Simulator` after simulating a circuit on a qubit, a qutrit, and a qutrit will have 2 * 3 * 3 = 18 elements. # # Call `cirq.qid_shape(simulation_result)` to check the qudit dimensions. # # Measurement results from running a qudit circuit are integers in the range `0` to `qid.dimension-1`.
docs/qudits.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.0.3 # language: julia # name: julia-1.0 # --- # # Question 1 # # # $f(x) = 2x$ if $2x < 3 -x$ # # As a result, $f(x) = 2x$ when $x<1$ # # Similarly, $f(x) = 3-x$ when $x \geq 1$ # # As a result, get that # # $f(x) = ReLU([1-x]) +RelU([2x-2]) + 2$ # # If $x<1$ then the $ReLU()$ function maps $f(x)$ to $f(x) = 2x$ since $1-x <0$. Similarly, if $x\geq1$ then $ReLU()$ function maps $f(x)$ to $3-x$ # # Question 2 # # We can use an artificual neural network. Each node in the nueral network computes a ReLU funciton. Each node has takes the nodes in the previous layer as inputs using a linear map. We can estimate these paramters using stochastic gradient descent. # # Question 3 # # * To compute the formula, first we find the Legendre polynomial $P_n(x)$. In this case, # # $$P_2(x) = 1/2 (3x^2 -1) $$ # # * Next, we find the roots of $P_n(x)$ in (−1,1); these will be our nodes $x1,...,xn$. In this case the roots are # # $x_1 = 1/\sqrt{3}$ and $x_2 = 1/\sqrt{3}$ # # * Third, we find the Lagrange polynomial that interpolates the integrand $f(x)$ at $x_1,...,x_n$. In this case, we get that # # In this case that polynomial is # # $$f(x_1)(x-x_2)/(x_1-x_2) + f(x_2)(x-x_1)/(x_2-x_1) $$ # # # # * Finally, we integrate the Lagrange polynomial to determine the weights $w1,...$ and we get # # $$ \int_{-1}^{1} f(x_1)(x-x_2)/(x_1-x_2) + f(x_2)(x-x_1)/(x_2-x_1) = $$ # # $$f(x_1)(x^2/2 - x_2x)/(x_1-x_2) + f(x_2)(x^2/2-x_1x)/(x_2-x_1) |_{-1}^{1} = $$ # # $$f(x_1)(1/2 - x_2)/(x_1-x_2) + f(x_2)(1/2 -x_1)/(x_2-x_1) - [f(x_1)(1/2 + x_2)/(x_1-x_2) + f(x_2)(1/2 +x_1x)/(x_2-x_1)] = $$ # # # $$f(- 1/\sqrt{3} )+ f( 1/\sqrt{3})$$ # # Question 4 # # The formula for a Chebychev polynomial is given by # # # $$T_n(x) = \cos\big(k \arccos x \big)$$ # # Where $|x| \leq 1 $. Since $cos(x)$ bounded between 1 and -1, we can see from the formula that $T_n(x)$ must be bounded between -1 and 1 on the interval $[-1,1]$ # # We can see that it achieves these bounds by solving for the minimum and maximum. # # Taking the derivative we get # # $$\dfrac{\partial T_n(x)}{\partial x} = -sin(k arcos(x)) \dfrac{-k}{\sqrt{1-x^2}} $$ # # So the extremum occur when $-sin(k arcos(x)) =0$. This occurs when $x= cos(n\pi/k)$. We can see this evaluates to 1 and -1 depending on whether $n$ is odd or even. # # Question 5 # # The Haar wavelet's mother wavelet function is given by # # $$\psi_1(t) = \begin{cases} # 1 & 0 \leq t < \frac{1}{2},\\ # -1 & \frac{1}{2} \leq t < 1,\\ # 0 &\text{otherwise.} \end{cases}$$ # # We wavelets with support larger than or equal to 1/4 together with one shape function with # support of length 2. In this case, our shape function has support of 2 # # Here $\text{1}$ is the indicator function. We compute the basis below. We can solve for the coefficients using the following formula # # $$F_w(a,b)= {|a|^{1/2}} \int_{-\infty}^{\infty} f(t)\overline\psi\left( {at} - b\right)\, dt$$ # # $$F_w(a,b) = {|a|^{1/2}} \left( \int_{-1}^{0} (1+t) \psi(at-b)dt + \int_{0}^{1} (1-t) \psi(ax-b)dt \right) $$ # # $$F_w(a,b) = {|a|^{1/2}} \left(\int_{-1}^{0} (1+t) \psi(at-b)dx + \int_{0}^{1} (1-t) \psi(ax-b)dt \right) $$ # # For, $\psi_1(t)$ We can see that $F_w(1,0) = 0$ from the formula above # # If, $\psi_2(t) = \sqrt(2) \psi_1(2t-1)$ then # # $$F_w(2,-1) = {|2|^{1/2}} \left( \int_{-1}^{0} (1+t) \psi(2t-1)dt \right) = {|2|^{1/2}} \left( \int_{-1}^{-1/2} (1+t) \psi(2t-1)dt + \int_{-1/2}^{0} -(1+t) dt \right) = -.25 $$ # # Similarly,, $\psi_3(t) = \sqrt(2) \psi_1(2t+1)$ then $F_w(2,1) = .25$ # # # $\psi_5(t) = 2 \psi_1(4t+1)$ then $F_w(4,1) = -.0625$ # # $\psi_6(t) = 2 \psi_1(4t+2)$ then $F(4,2) = -.0625$ # # $\psi_7(t) = 2 \psi_1(4t-1)$ then $F(4,-1) = .0625$ # # $\psi_8(t) = 2 \psi_1(4t-2)$ then $F(4,-2) = .0625$ # # # using Pkg using FFTW using PyPlot # + for N =[4,16] T = 1 #setup u0 u0=zeros(N,1) xs = zeros(N,1) for i=1:N xs[i] = -1+2*(i-1)/(N-1) u0[i]= 1 - abs( xs[i] ) end figure(N) plot(xs,u0,label="initial temp $(N) modes") A=u0*ones(1,N); A=fft(A) t = zeros(N,1) for i=1:N t[i] = (i-1)*T/(N-1) end k= collect(0:N-1) k2= k.*k W= k2.*t W= exp.(-W) A= A.*W u = real(ifft(A)) #plot(t,u[:,1]) #plot(t,u[:,2]) #plot(t,u[:,3]) plot(xs,u[:,N] ,label="final temp $(N) modes") legend() end
hw6/homework6.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <img src="./imgs/5_GenomicRangeQuery.png" width="100%"> #O(N*M) 1st try def solution(S, P, Q): d = len(P) b = [] for i in range(d): b.append(min(S[P[i]:Q[i]+1])) a = {"A":"1","C":"2","G":"3","T":"4"} return list(map(lambda x: int(a[x]), b)) solution("CAGCCTA",[2,5,0],[4,5,6]) #O(N*M) 2nd try; def solution(S, P, Q): d = len(P) b = [] a = {"A":1,"C":2,"G":3,"T":4} for i in range(d): b.append(a[min(S[P[i]:Q[i]+1])]) return b solution("CAGCCTA",[2,5,0],[4,5,6]) #O(N+M) 3rd; OMG -_-argarwjgio;awlyjgoi;arl4jgio def solution(S, P, Q): d = len(P) b = [] for i in range(d): if "A" in (S[P[i]:Q[i]+1]): b.append(1) elif "C" in (S[P[i]:Q[i]+1]): b.append(2) elif "G" in (S[P[i]:Q[i]+1]): b.append(3) else: b.append(4) return b solution("CAGCCTA",[2,5,0],[4,5,6])
fun_coding/codility/5_GenomicRangeQuery(Prefix Sums).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # name: python2 # --- # + colab_type="code" id="1xJZ5glPPCRz" colab={"height": 259} outputId="27e9141e-270b-4f91-ee06-c534f26241d5" executionInfo={"status": "ok", "timestamp": 1550095507708, "user_tz": 480, "elapsed": 15633, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-RcxktLY-TBk/AAAAAAAAAAI/AAAAAAAAABY/b4V4dTIqmPI/s64/photo.jpg", "userId": "06401446828348966425"}} import os from tensorflow.keras import layers from tensorflow.keras import Model # !wget --no-check-certificate \ # https://storage.googleapis.com/mledu-datasets/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5 \ # -O /tmp/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5 from tensorflow.keras.applications.inception_v3 import InceptionV3 local_weights_file = '/tmp/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5' pre_trained_model = InceptionV3(input_shape = (150, 150, 3), include_top = False, weights = None) pre_trained_model.load_weights(local_weights_file) for layer in pre_trained_model.layers: layer.trainable = False # pre_trained_model.summary() last_layer = pre_trained_model.get_layer('mixed7') print('last layer output shape: ', last_layer.output_shape) last_output = last_layer.output # + colab_type="code" id="BMXb913pbvFg" colab={} from tensorflow.keras.optimizers import RMSprop # Flatten the output layer to 1 dimension x = layers.Flatten()(last_output) # Add a fully connected layer with 1,024 hidden units and ReLU activation x = layers.Dense(1024, activation='relu')(x) # Add a dropout rate of 0.2 x = layers.Dropout(0.2)(x) # Add a final sigmoid layer for classification x = layers.Dense (1, activation='sigmoid')(x) model = Model( pre_trained_model.input, x) model.compile(optimizer = RMSprop(lr=0.0001), loss = 'binary_crossentropy', metrics = ['acc']) # + colab_type="code" id="O4s8HckqGlnb" outputId="76893e93-b44b-41f2-a4f2-428ad05c537d" executionInfo={"status": "ok", "timestamp": 1550095528147, "user_tz": 480, "elapsed": 5048, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-RcxktLY-TBk/AAAAAAAAAAI/AAAAAAAAABY/b4V4dTIqmPI/s64/photo.jpg", "userId": "06401446828348966425"}} colab={"height": 276} # !wget --no-check-certificate \ # https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip \ # -O /tmp/cats_and_dogs_filtered.zip from tensorflow.keras.preprocessing.image import ImageDataGenerator import os import zipfile local_zip = '//tmp/cats_and_dogs_filtered.zip' zip_ref = zipfile.ZipFile(local_zip, 'r') zip_ref.extractall('/tmp') zip_ref.close() # Define our example directories and files base_dir = '/tmp/cats_and_dogs_filtered' train_dir = os.path.join( base_dir, 'train') validation_dir = os.path.join( base_dir, 'validation') train_cats_dir = os.path.join(train_dir, 'cats') # Directory with our training cat pictures train_dogs_dir = os.path.join(train_dir, 'dogs') # Directory with our training dog pictures validation_cats_dir = os.path.join(validation_dir, 'cats') # Directory with our validation cat pictures validation_dogs_dir = os.path.join(validation_dir, 'dogs')# Directory with our validation dog pictures train_cat_fnames = os.listdir(train_cats_dir) train_dog_fnames = os.listdir(train_dogs_dir) # Add our data-augmentation parameters to ImageDataGenerator train_datagen = ImageDataGenerator(rescale = 1./255., rotation_range = 40, width_shift_range = 0.2, height_shift_range = 0.2, shear_range = 0.2, zoom_range = 0.2, horizontal_flip = True) # Note that the validation data should not be augmented! test_datagen = ImageDataGenerator( rescale = 1.0/255. ) # Flow training images in batches of 20 using train_datagen generator train_generator = train_datagen.flow_from_directory(train_dir, batch_size = 20, class_mode = 'binary', target_size = (150, 150)) # Flow validation images in batches of 20 using test_datagen generator validation_generator = test_datagen.flow_from_directory( validation_dir, batch_size = 20, class_mode = 'binary', target_size = (150, 150)) # + colab_type="code" id="Blhq2MAUeyGA" outputId="a1659af1-32c1-4503-8972-d47b77b5035d" executionInfo={"status": "ok", "timestamp": 1550095833960, "user_tz": 480, "elapsed": 298966, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-RcxktLY-TBk/AAAAAAAAAAI/AAAAAAAAABY/b4V4dTIqmPI/s64/photo.jpg", "userId": "06401446828348966425"}} colab={"height": 708} history = model.fit_generator( train_generator, validation_data = validation_generator, steps_per_epoch = 100, epochs = 20, validation_steps = 50, verbose = 2) # + id="C2Fp6Se9rKuL" colab_type="code" colab={"height": 306} outputId="498822a1-4a8d-4840-cdb9-26366f754c41" executionInfo={"status": "ok", "timestamp": 1550095870701, "user_tz": 480, "elapsed": 593, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-RcxktLY-TBk/AAAAAAAAAAI/AAAAAAAAABY/b4V4dTIqmPI/s64/photo.jpg", "userId": "06401446828348966425"}} import matplotlib.pyplot as plt acc = history.history['acc'] val_acc = history.history['val_acc'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(len(acc)) plt.plot(epochs, acc, 'r', label='Training accuracy') plt.plot(epochs, val_acc, 'b', label='Validation accuracy') plt.title('Training and validation accuracy') plt.legend(loc=0) plt.figure() plt.show()
dlaicourse/Course 2 - Part 6 - Lesson 3 - Notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # TechBytes: Using Python with Teradata Vantage # ## Part 2: Data Exploration and Transformations - Building an ADS # # The contents of this file are Teradata Public Content and have been released to the Public Domain. # Please see _license.txt_ file in the package for more information. # # <NAME> and <NAME> - May 2021 - v.2.0 \ # Copyright (c) 2021 by Teradata \ # Licensed under BSD # # This TechByte demonstrates how to # * explore your data in teradataml via functions for summary statistics, feature characteristics, data type reporting, and creating plots from table data. # * use options to display the actual SQL query submitted by teradataml to the Database. # * transform your data features by using teradataml DataFrame methods, Vantage Analytics Library (VAL) functions, and SQLAlchemy expressions. # * persist a teradataml DataFrame as a table in the Database. # # Contributions by: # - <NAME>, Sr Staff Software Architect, Teradata Product Engineering / Vantage Cloud and Applications. # - <NAME>, Principal Software Architect, Teradata Product Management / Advanced Analytics. # ### Initial Steps: Load libraries and create a Vantage connection # + # Load teradataml and dependency packages. # import os import getpass as gp from teradataml import create_context, remove_context, get_context from teradataml import DataFrame, copy_to_sql, in_schema from teradataml.options.display import display # Import "valib" object from teradataml to execute Vantage Analytic Library functions. # Set 'configure.val_install_location' to the database name where Vantage analytic library functions are installed. from teradataml.analytics.valib import * from teradataml.options.configure import configure from teradataml.dataframe.sql_functions import case configure.val_install_location = "TRNG_XSP" from sqlalchemy.sql.expression import select, or_, extract, text, join, case as case_when from sqlalchemy import func import pandas as pd import numpy as np import matplotlib.pyplot as plt from matplotlib import cm import plotly.express as px import plotly.graph_objects as go # - # Specify a Teradata Vantage server to connect to. In the following statement, # replace the following argument values with strings as follows: # <HOST> : Specify your target Vantage system hostname (or IP address). # <UID> : Specify your Database username. # <PWD> : Specify your password. You can also use encrypted passwords via # the Stored Password Protection feature. #con = create_context(host = <HOST>, username = <UID>, password = <<PASSWORD>>, # database = <DB_Name>, "temp_database_name" = <Temp_DB_Name>) # con = create_context(host = "<Host_Name>", username = "<Username>", password = <PASSWORD>(prompt='Password:'), logmech = "LDAP", database = "TRNG_TECHBYTES", temp_database_name = "<Database_Name>") # ### 1. Database tables in teradataml # The following data have been used for the present TechBytes series on Analytics with R and Python. They reside in the TRNG_XSP database on the Transcend server. It is a simplistic fictitious dataset of banking Customers (10K-ish rows), Accounts (20K-ish rows) and Transactions (1M-ish rows). The 3 subsets are related to each other in the following ways: # # ![TWMDemoDataModel](Inputs/Plots/DemoData.png) # Create DataFrames for the Customer, Accounts and Transactions tables in the # Vantage Advanced SQL Engine. Using to_pandas() for a cleaner display format. # Note: to_pandas() converts a teradataml DataFrame to a pandas DataFrame. To # this, it transfers all data of a table to the client; use with caution. # Note: in_schema() argument can read data from nondefault databases, as long # as you have read permissions for the specified database. # tdCustomer = DataFrame(in_schema("TRNG_XSP", "Customer")) tdCustomer.to_pandas().head(5) tdAccounts = DataFrame(in_schema("TRNG_XSP", "Accounts")) tdAccounts.to_pandas().head(5) tdTransactions = DataFrame(in_schema("TRNG_XSP", "Transactions")) tdTransactions.to_pandas().head(5) # ### 2. Data Exploration # #### 2.1. Table info and summary statistics # Inquire the data types of a table behind a teradataml DataFrame. # print(tdCustomer.tdtypes) # Use the Values function from VAL to inspect feature characteristics in the # tdCustomer teradataml dataset. # tdCustomer_values = valib.Values(data = tdCustomer, columns=["all"]) tdCustomer_values.result.to_pandas() # Use the Statistics function from VAL to compute summary statistics about the # tdCustomer teradataml dataset. tdCustomer_stats = valib.Statistics(data=tdCustomer, columns="allnumericanddate") tdCustomer_stats.result.to_pandas() # + # Use the teradataml option to print the SQL code of calls to Advanced SQL # or ML Engines analytic functions. # display.print_sqlmr_query = True # Use the show_query() method in analytic functions to display the SQL code # that teradataml pushes to the Database for execution. For example: # valib.Values(data = tdCustomer, columns=["all"]).show_query() # - # #### 2.2. Plots from table data # Histogram of income # tdCustomer_hist_pd = tdCustomer[tdCustomer.income != None].to_pandas() counts, bins = np.histogram(tdCustomer_hist_pd.income, bins = range(0, int(round(tdCustomer_hist_pd.income.max())), 10000)) bins = 0.5 * (bins[:-1] + bins[1:]) fig = px.bar(x = bins, y = counts) fig.update_layout(height = 400, title = "Histogram of Income Distribution") fig.update_xaxes(tickangle = 0, title = "Income") fig.update_yaxes(title = "Count") fig.show() # + # Bar plot: Number of customers state-wise # tdCustomer_bar_pd = tdCustomer.groupby("state_code").count().select(['state_code','count_cust_id']).sort(['state_code']).to_pandas() index = np.arange(tdCustomer_bar_pd.shape[0]) fig = px.bar(x = index, y = tdCustomer_bar_pd['count_cust_id']) fig.update_layout(height = 400, title = "Bar plot of Customers per State") fig.update_xaxes(tickangle = 0, title = "State ID") fig.update_yaxes(title = "Count") fig.show() #plt.bar(index, tdCustomer_bar_pd['count_cust_id']) #plt.xlabel("State ID") #plt.ylabel("No of customers") #plt.xticks(index, tdCustomer_bar_pd['state_code'], rotation=30) #plt.title("State-wise customers") #plt.show() # - # ### 2.3. Data Transformation # First, grab the customer demographic variables and create indicator variables # for gender, marital_status and state_code (we consider both a classification # into the top 6 states and jointly the rest as "OTHER", as well as creating # indicator columns for each of the top 6 states). # # For this task, we illustrate data manipulation with the use of SQLAlchemy. # Initially we assemble the list of columns to be projected in SQL. We build it # by using the SQLAlchemy function case_when() and objects 'Column' and 'Case'. # cust_select_query_column_projection = [ tdCustomer.cust_id.expression, tdCustomer.income.expression, tdCustomer.age.expression, tdCustomer.years_with_bank.expression.label("tot_cust_years"), tdCustomer.nbr_children.expression.label("tot_children"), case_when([(tdCustomer.gender.expression == "M", 1)], else_=0).expression.label("male_ind"), case_when([(tdCustomer.gender.expression == "F", 1)], else_=0).expression.label("female_ind"), case_when([(tdCustomer.marital_status.expression == "1", 1)], else_=0).expression.label("single_ind"), case_when([(tdCustomer.marital_status.expression == "2", 1)], else_=0).expression.label("married_ind"), case_when([(tdCustomer.marital_status.expression == "3", 1)], else_=0).expression.label("separated_ind"), case_when([(tdCustomer.state_code.expression == "CA", "CA"), (tdCustomer.state_code.expression == "NY", "NY"), (tdCustomer.state_code.expression == "TX", "TX"), (tdCustomer.state_code.expression == "IL", "IL"), (tdCustomer.state_code.expression == "AZ", "AZ"), (tdCustomer.state_code.expression == "OH", "OH") ], else_="OTHER").expression.label("state_code"), case_when([(tdCustomer.state_code.expression == "CA", 1)], else_=0).expression.label("ca_resident_ind"), case_when([(tdCustomer.state_code.expression == "NY", 1)], else_=0).expression.label("ny_resident_ind"), case_when([(tdCustomer.state_code.expression == "TX", 1)], else_=0).expression.label("tx_resident_ind"), case_when([(tdCustomer.state_code.expression == "IL", 1)], else_=0).expression.label("il_resident_ind"), case_when([(tdCustomer.state_code.expression == "AZ", 1)], else_=0).expression.label("az_resident_ind"), case_when([(tdCustomer.state_code.expression == "OH", 1)], else_=0).expression.label("oh_resident_ind") ] # # Build and run the SQL from the list, then use it to create a DataFrame. # cust = DataFrame.from_query(str(select(cust_select_query_column_projection).compile(compile_kwargs={"literal_binds": True}))) cust.to_pandas().head(5) # + # Next, get the accounts information. Create account indicators and calculate # account balances for each acct_type. Assume the median value for NaN balance # entries. # # For this task, we illustrate data manipulation with the use of VAL functions. # fn = FillNa(style = "literal", value = 0) account_type_t = OneHotEncoder(values = {"CC":"cc_acct_ind", "CK":"ck_acct_ind", "SV":"sv_acct_ind"}, columns = "acct_type", fillna = fn) fillna_t = FillNa(style = "median", columns = ["cust_id", "starting_balance", "ending_balance"]) acctObj = valib.Transform(data = tdAccounts, one_hot_encode = [account_type_t], fillna = fillna_t, key_columns = "cust_id") acct_bal = acctObj.result.starting_balance + acctObj.result.ending_balance # # Build a DataFrame upon conclusion of the transformations. # acct = acctObj.result.assign( cc_bal = case_when( [(acctObj.result.cc_acct_ind.expression == 1, acct_bal.expression) ], else_ = 0 ) ).assign( ck_bal = case_when( [(acctObj.result.ck_acct_ind.expression == 1, acct_bal.expression) ], else_ = 0 ) ).assign( sv_bal = case_when( [(acctObj.result.sv_acct_ind.expression == 1, acct_bal.expression) ], else_ = 0 ) ) acct.to_pandas().head(5) # + # Next, work on the transactions information. Add columns that indicate the # quarter a transaction was made. # acct_mon = extract('month', tdTransactions.tran_date.expression).expression trans = tdTransactions.assign( q1_trans = case( [(acct_mon == "1", 1), (acct_mon == "2", 1), (acct_mon == "3", 1)], else_ = 0 ), q2_trans = case( [(acct_mon == "4", 1), (acct_mon == "5", 1), (acct_mon == "6", 1)], else_ = 0 ), q3_trans = case( [(acct_mon == "7", 1), (acct_mon == "8", 1), (acct_mon == "9", 1)], else_ = 0 ), q4_trans = case( [(acct_mon == "10", 1), (acct_mon == "11", 1), (acct_mon == "12", 1)], else_ = 0 ), ) trans.to_pandas().head(5) # - # Finally, we want to bring together the 3 transformed tables. # # Left-join the transformed Customer table to the transformed Account table. # cust_acct = cust.join(other = acct, how = "left", on = ["cust_id"], lsuffix = "cust", rsuffix = "acct") cust_acct.to_pandas().head(5) # + # Then, left-join the previous result to the transformed Transaction table. # acct_tran_amt = trans.principal_amt + trans.interest_amt cust_acct_tran = cust_acct.join(other = trans, how = "left", on = ["acct_nbr"], lsuffix = "cu_ac", rsuffix = "trans" ).assign( cc_tran_amt = case_when( [(cust_acct.cc_acct_ind.expression == 1, acct_tran_amt.expression) ], else_=0 ) ).assign( ck_tran_amt = case_when( [(cust_acct.ck_acct_ind.expression == 1, acct_tran_amt.expression) ], else_=0 ) ).assign( sv_tran_amt = case_when( [(cust_acct.ck_acct_ind.expression == 1, acct_tran_amt.expression) ], else_=0 ) ) cust_acct_tran.to_pandas().head(5) # - # ### 2.4. Building an Analytic Data Set (ADS) # + # Finally, aggregate and roll up by 'cust_id' all variables to produce the # Analytic Data Set (ADS) we will be using for our analyses. # ADS_Py = cust_acct_tran.groupby("cust_cust_id").agg( { "income" : "max", "age" : "max", "tot_cust_years" : "max", "tot_children" : "max", "female_ind" : "max", "single_ind" : "max", "married_ind" : "max", "separated_ind" : "max", "state_code" : "max", "ca_resident_ind" : "max", "ny_resident_ind" : "max", "tx_resident_ind" : "max", "il_resident_ind" : "max", "az_resident_ind" : "max", "oh_resident_ind" : "max", "ck_acct_ind" : "max", "sv_acct_ind" : "max", "cc_acct_ind" : "max", "ck_bal" : "mean", "sv_bal" : "mean", "cc_bal" : "mean", "ck_tran_amt" : "mean", "sv_tran_amt" : "mean", "cc_tran_amt" : "mean", "q1_trans" : "sum", "q2_trans" : "sum", "q3_trans" : "sum", "q4_trans" : "sum" } ) # Rename columns columns = ['cust_id','income','age','tot_cust_years','tot_children', 'female_ind', 'single_ind', 'married_ind', 'separated_ind', 'state_code', 'ca_resident_ind', 'ny_resident_ind', 'tx_resident_ind','il_resident_ind', 'az_resident_ind', 'oh_resident_ind', 'ck_acct_ind', 'sv_acct_ind', 'cc_acct_ind', 'ck_avg_bal', 'sv_avg_bal', 'cc_avg_bal', 'ck_avg_tran_amt', 'sv_avg_tran_amt', 'cc_avg_tran_amt', 'q1_trans_cnt', 'q2_trans_cnt', 'q3_trans_cnt','q4_trans_cnt'] ADS_Py = ADS_Py.assign(drop_columns = True, cust_id = ADS_Py.cust_cust_id, income = ADS_Py.max_income, age = ADS_Py.max_age, tot_cust_years = ADS_Py.max_tot_cust_years, tot_children = ADS_Py.max_tot_children, female_ind = ADS_Py.max_female_ind, single_ind = ADS_Py.max_single_ind, married_ind = ADS_Py.max_married_ind, separated_ind = ADS_Py.max_separated_ind, state_code = ADS_Py.max_state_code, ca_resident_ind = ADS_Py.max_ca_resident_ind, ny_resident_ind = ADS_Py.max_ny_resident_ind, tx_resident_ind = ADS_Py.max_tx_resident_ind, il_resident_ind = ADS_Py.max_il_resident_ind, az_resident_ind = ADS_Py.max_az_resident_ind, oh_resident_ind = ADS_Py.max_oh_resident_ind, ck_acct_ind = ADS_Py.max_ck_acct_ind, sv_acct_ind = ADS_Py.max_sv_acct_ind, cc_acct_ind = ADS_Py.max_cc_acct_ind, ck_avg_bal = ADS_Py.mean_ck_bal, sv_avg_bal = ADS_Py.mean_sv_bal, cc_avg_bal = ADS_Py.mean_cc_bal, ck_avg_tran_amt = ADS_Py.mean_ck_tran_amt, sv_avg_tran_amt = ADS_Py.mean_sv_tran_amt, cc_avg_tran_amt = ADS_Py.mean_cc_tran_amt, q1_trans_cnt = ADS_Py.sum_q1_trans, q2_trans_cnt = ADS_Py.sum_q2_trans, q3_trans_cnt = ADS_Py.sum_q3_trans, q4_trans_cnt = ADS_Py.sum_q4_trans ).select(columns) ADS_Py = ADS_Py.dropna() # teradataml DataFrame creates views at the backend which are temporary. # At the end of the context removal, all temporary table/views perish. # For this reason, persist the output of ADS_Py as a table in the SQLE. copy_to_sql(ADS_Py, table_name="ak_TBv2_ADS_Py", if_exists="replace") # - # Create a teradataml DataFrame and take a glimpse at it. # td_ADS_Py = DataFrame("ak_TBv2_ADS_Py") td_ADS_Py.to_pandas().head(5) # + # Split the ADS into 2 samples, each with 60% and 40% of total rows. # Use the 60% sample to train, and the 40% sample to test/score. # Persist the samples as tables in the Database, and create DataFrames. # td_Train_Test_ADS = td_ADS_Py.sample(frac = [0.6, 0.4]) Train_ADS = td_Train_Test_ADS[td_Train_Test_ADS.sampleid == "1"] copy_to_sql(Train_ADS, table_name="ak_TBv2_Train_ADS_Py", if_exists="replace") td_Train_ADS = DataFrame("ak_TBv2_Train_ADS_Py") Test_ADS = td_Train_Test_ADS[td_Train_Test_ADS.sampleid == "2"] copy_to_sql(Test_ADS, table_name="ak_TBv2_Test_ADS_Py", if_exists="replace") td_Test_ADS = DataFrame("ak_TBv2_Test_ADS_Py") # - # ### End of session # Remove the context of present teradataml session and terminate the Python # session. It is recommended to call the remove_context() function for session # cleanup. Temporary objects are removed at the end of the session. # remove_context()
TBv2_Py-2-Explore-Transform-ADS.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + colab={} colab_type="code" id="SkNRkmTCJFJ5" import pandas as pd import numpy as np import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import classification_report from sklearn.model_selection import train_test_split from sklearn.metrics import roc_auc_score from sklearn.model_selection import GridSearchCV import warnings warnings.filterwarnings('ignore') # + colab={} colab_type="code" id="9UCwm8R9JQJe" df_train=pd.read_csv('train.csv') df_test=pd.read_csv('test.csv') # + colab={"base_uri": "https://localhost:8080/", "height": 218} colab_type="code" id="jl2DIvnaKcrA" outputId="dae3debb-14e3-4360-86f8-6e004483bbef" df_train.head() # + colab={"base_uri": "https://localhost:8080/", "height": 108} colab_type="code" id="dxPK9UJpKf1F" outputId="6f60a9ce-6ee2-40c4-ff87-c7b23ff8f39f" df_train['Segmentation'].value_counts() # + colab={"base_uri": "https://localhost:8080/", "height": 345} colab_type="code" id="W3gM5u6CKwJZ" outputId="7a7789ae-8794-48cf-a456-7fb674177bf8" df_train.info() # + colab={} colab_type="code" id="f-mYPbLpLTz0" x=df_train[df_train['Work_Experience']==0] # + colab={"base_uri": "https://localhost:8080/", "height": 427} colab_type="code" id="o2daRflmMdC8" outputId="b4b65e14-4797-413c-93b9-7fefde584530" x[x['Profession']=='Artist'] # + colab={} colab_type="code" id="DTnfb3i8N8es" map_gender={"Male" : 1, "Female": 2} df_train['Gender'] = df_train['Gender'].map(map_gender) df_test['Gender'] = df_test['Gender'].map(map_gender) # + colab={"base_uri": "https://localhost:8080/", "height": 218} colab_type="code" id="dpgWnkXlljLk" outputId="02cda8cc-9d29-43a8-8764-37b9d70be7bd" df_train.head() # + colab={} colab_type="code" id="oKADtPgHl3fN" df_train.sort_values(["Age"], axis=0, ascending=[True], inplace=True) df_test.sort_values(["Age"], axis=0, ascending=[True], inplace=True) # + colab={} colab_type="code" id="Whw2yTfXnuwZ" map_married={"No" : 1, "Yes": 2} df_train['Ever_Married'] = df_train['Ever_Married'].map(map_married) df_test['Ever_Married'] = df_test['Ever_Married'].map(map_married) # + colab={} colab_type="code" id="XWeLB5olnxV0" df_train["Ever_Married"].fillna( method ='ffill', inplace = True) df_test["Ever_Married"].fillna( method ='ffill', inplace = True) # + colab={"base_uri": "https://localhost:8080/", "height": 218} colab_type="code" id="ZukZxCGkoudc" outputId="130fde41-3b4a-4117-df0f-4e0ad5136e1f" df_train.head() # + colab={"base_uri": "https://localhost:8080/", "height": 345} colab_type="code" id="s_eUO5u3qg3k" outputId="9e861bb0-c384-4170-e091-fac9c6e4194b" df_train.info() # + colab={"base_uri": "https://localhost:8080/", "height": 90} colab_type="code" id="xrXFH0Vyq_Y5" outputId="22fb5d69-4688-4587-9069-d59cca9e43c2" df_train['Spending_Score'].value_counts() # + colab={} colab_type="code" id="OBKbeFeDr6vi" map_score={"Low" : 1, "Average": 2,"High":3} df_train['Spending_Score'] = df_train['Spending_Score'].map(map_score) df_test['Spending_Score'] = df_test['Spending_Score'].map(map_score) # + colab={} colab_type="code" id="AjiLZY68sqgt" df_train.sort_values(["Spending_Score"], axis=0, ascending=[True], inplace=True) df_test.sort_values(["Spending_Score"], axis=0, ascending=[True], inplace=True) # + colab={} colab_type="code" id="0YPtTBaAs5zF" #df_train["Graduated"].fillna( method ='ffill', inplace = True) #df_test["Graduated"].fillna( method ='ffill', inplace = True) # + colab={"base_uri": "https://localhost:8080/", "height": 427} colab_type="code" id="3dUPGcvNtHRQ" outputId="37a79c62-eb98-42ff-ca84-dc0882fe2244" df_train # + colab={} colab_type="code" id="eL0yYyhttJCg" map_garduate={"No" : 1, "Yes": 2} df_train['Graduated'] = df_train['Graduated'].map(map_garduate) df_test['Graduated'] = df_test['Graduated'].map(map_garduate) # + colab={"base_uri": "https://localhost:8080/", "height": 199} colab_type="code" id="LrQI5CQJtg5N" outputId="c575ac99-5bb7-4f4e-f151-080c7eb7d4ce" df_train['Profession'].value_counts() # + colab={} colab_type="code" id="yAti8qfLwPje" map_profession={"Healthcare" : 1, "Engineer": 2,"Doctor":3,"Healthcare":4,"Executive":5} df_train['Profession'] = df_train['Profession'].map(map_profession) df_test['Profession'] = df_test['Profession'].map(map_profession) # + colab={"base_uri": "https://localhost:8080/", "height": 163} colab_type="code" id="OZDe-T6Xw0my" outputId="391a6ff7-aa9d-49f2-b6d0-ebf575f22933" df_train["Var_1"].value_counts() # + colab={} colab_type="code" id="VhNteXyCxKq6" map_var={"Cat_1" : 1, "Cat_2": 2,"Cat_3":3,"Cat_4":4,"Cat_5":5,"Cat_6":6,"Cat_7":7} df_train['Var_1'] = df_train['Var_1'].map(map_var) df_test['Var_1'] = df_test['Var_1'].map(map_var) # + colab={"base_uri": "https://localhost:8080/", "height": 218} colab_type="code" id="afBhadkyyGul" outputId="aa84b15d-d1ea-40a6-8719-27338330ed9f" df_train.head() # + colab={"base_uri": "https://localhost:8080/", "height": 368} colab_type="code" id="h6DZRi_ryLvH" outputId="f4ed719c-7297-4255-a032-fd491cc2a37c" df_train.corr() # + colab={} colab_type="code" id="CLuFvVW4yTJs" df_train.sort_values(["Age"], axis=0, ascending=[True], inplace=True) df_test.sort_values(["Age"], axis=0, ascending=[True], inplace=True) # + colab={} colab_type="code" id="bHyUtSMNyw1O" df_train["Graduated"].fillna( method ='ffill', inplace = True) df_test["Graduated"].fillna( method ='ffill', inplace = True) # + colab={} colab_type="code" id="5V7y-fXTy4Sr" df_train.sort_values(["Gender"], axis=0, ascending=[True], inplace=True) df_test.sort_values(["Gender"], axis=0, ascending=[True], inplace=True) # + colab={} colab_type="code" id="oSJzg3NEzApb" df_train["Profession"].fillna( df_train['Profession'].mode()[0], inplace = True) df_test["Profession"].fillna( df_test['Profession'].mode()[0], inplace = True) # + colab={} colab_type="code" id="9iXWGPuEzHdq" df_train.sort_values(["Age"], axis=0, ascending=[True], inplace=True) df_test.sort_values(["Age"], axis=0, ascending=[True], inplace=True) # + colab={} colab_type="code" id="fBovoPvezXVr" df_train["Work_Experience"].fillna( df_train['Work_Experience'].mode()[0], inplace = True) df_test["Work_Experience"].fillna( df_test['Work_Experience'].mode()[0], inplace = True) # + colab={} colab_type="code" id="rCsC8YLBzfr4" df_train["Family_Size"].fillna( df_train['Family_Size'].mode()[0], inplace = True) df_test["Family_Size"].fillna( df_test['Family_Size'].mode()[0], inplace = True) # + colab={} colab_type="code" id="_HJAQjkbzsLF" df_train["Var_1"].fillna( df_train['Var_1'].mode()[0], inplace = True) df_test["Var_1"].fillna( df_test['Var_1'].mode()[0], inplace = True) # + colab={"base_uri": "https://localhost:8080/", "height": 345} colab_type="code" id="1WfdShOazz6X" outputId="47a735ab-1aae-4d04-bf99-2a3cc19a0ea3" df_train.info() # + colab={"base_uri": "https://localhost:8080/", "height": 326} colab_type="code" id="Tvg45Ag6z56i" outputId="50107541-1b90-4332-9d57-f709938fb9a9" df_test.info() # + colab={"base_uri": "https://localhost:8080/", "height": 218} colab_type="code" id="_KDFXJm901SQ" outputId="437a4990-7240-4895-ff13-423d20765f04" df_train.head() # + colab={} colab_type="code" id="ofzbYYoY1INs" df_train.sort_values(["ID"], axis=0, ascending=[True], inplace=True) df_test.sort_values(["ID"], axis=0, ascending=[True], inplace=True) # + colab={} colab_type="code" id="9vEn6asT1UQv" y_train=df_train['Segmentation'] # + colab={} colab_type="code" id="i4TKpW0J1WJp" x_train=df_train.drop(['Segmentation'],axis=1) #df_test=df_test.drop(['ID'],axis=1) # + colab={} colab_type="code" id="1b6Iam5f13LD" # + colab={} colab_type="code" id="iezlUxGX2WpJ" # + colab={"base_uri": "https://localhost:8080/", "height": 235} colab_type="code" id="x35hbwbU3GUz" outputId="f90415d5-f387-42db-ceb5-20030fa340c4" y_train # + colab={} colab_type="code" id="dNpzRkTU2bxd" map={"A":1,"B":2,"C":3,"D":4} y_train1 = y_train.map(map) # + colab={"base_uri": "https://localhost:8080/", "height": 427} colab_type="code" id="eIRLxqQ-27WO" outputId="3019d7ff-d937-4ddd-cc24-a94503fe0da5" #x_train['Age']=(x_train['Age']-x_train['Age'].mean())/x_train['Age'].std() x # + colab={} colab_type="code" id="dJ79T3E93jAS" #from xgboost import XGBClassifier # + colab={} colab_type="code" id="cjINkHfI3qlt" #XGBClassifier = XGBClassifier() # + colab={"base_uri": "https://localhost:8080/", "height": 145} colab_type="code" id="Eb3sU7-23uIW" outputId="0e4231e1-8823-4a78-f307-908ff03e1258" #XGBClassifier.fit(x_train,y_train) # + colab={} colab_type="code" id="5owh16u33zL6" #y_pred=XGBClassifier.predict(df_test) # - from sklearn.ensemble import RandomForestClassifier classifier = RandomForestClassifier(n_estimators = 10, criterion = 'entropy', random_state = 0) RandomForestClassifier =RandomForestClassifier() RandomForestClassifier.fit(x_train,y_train) y_pred=RandomForestClassifier.predict(df_test) # + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="FUQGlCLT7QOD" outputId="3834ac80-d2fa-40fb-f8ed-ec47c0a4ad8f" y_pred # + colab={} colab_type="code" id="iHquWaIKQQ3G" from sklearn.neighbors import KNeighborsClassifier neigh = KNeighborsClassifier(n_neighbors=1,leaf_size=10,weights='distance',p=1,n_jobs=-1) # + colab={"base_uri": "https://localhost:8080/", "height": 218} colab_type="code" id="0TXS0e0_pRu3" outputId="ea7a1409-9a4e-4640-cf08-c4a0c3fd2ebe" x_train.head() # + colab={} colab_type="code" id="xLBaCp6Wuw0N" x_train['Famil/spending']=x_train['Spending_Score']*x_train['Family_Size']*x_train['Gender'] df_test['Famil/spending']=df_test['Spending_Score']*df_test['Family_Size']*x_train['Gender'] # + colab={} colab_type="code" id="uRYa-KSyyJ0P" x_train['gender/marr']=x_train['Gender']*x_train['Ever_Married'] df_test['gender/marr']=df_test['Gender']*df_test['Ever_Married'] # + colab={} colab_type="code" id="HMV8UKtU8sfO" x_train=x_train.drop(['Work_Experience'],axis=1) df_test=df_test.drop(['Work_Experience'],axis=1) # + colab={} colab_type="code" id="rSJ8kX3P9eB1" x_train['var1/id']=x_train['Var_1']*x_train['ID'] df_test['var1/id']=df_test['Var_1']*df_test['ID'] # + colab={"base_uri": "https://localhost:8080/", "height": 398} colab_type="code" id="EtbCAcYfAU-l" outputId="c680f676-aa68-463c-e2de-5cb9e226c970" df_train['target']=y_train1 df_train.corr() # + colab={"base_uri": "https://localhost:8080/", "height": 72} colab_type="code" id="UI3GbMI8QTfq" outputId="728a0c42-3ec9-40da-ab98-3145f8561b53" neigh.fit(x_train,y_train) # + colab={} colab_type="code" id="8pegABeVQcGE" y_pred=neigh.predict(df_test) # + colab={} colab_type="code" id="LlIxtXwD5wgd" np.savetxt("data009.csv", y_pred,delimiter=',',fmt=['%s']) # + colab={} colab_type="code" id="n6BWNiPeu3mX"
consumer_segmentation (1).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Environment (conda_pytorch_latest_p37) # language: python # name: conda_pytorch_latest_p37 # --- from options.options import Options import os import torch from build_dataset_model import build_loaders, build_model from utils import get_model_attr, calculate_model_losses, tensor_aug from collections import defaultdict import math from torch.utils.tensorboard import SummaryWriter from tqdm.auto import tqdm args = Options().parse() if (args.output_dir is not None) and (not os.path.isdir(args.output_dir)): os.mkdir(args.output_dir) if (args.test_dir is not None) and (not os.path.isdir(args.test_dir)): os.mkdir(args.test_dir) args.use_AE = True # tensorboard writer = SummaryWriter() # load data vocab, train_loader, val_loader = build_loaders(args) dt = train_loader.dataset from new.CustomVAE import * from utils import calculate_model_losses # load decoder ovaed = OriVAEDecoder(dt.vocab, embedding_dim=64) optimizer = torch.optim.Adam(ovaed.parameters(), lr=args.learning_rate) ovaed = ovaed.cuda() t = 0 # total steps # training ovaed.train() for batch in tqdm(train_loader): t += 1 ids, objs, boxes, triples, angles, attributes, obj_to_img, triple_to_img = tensor_aug(batch) z = torch.randn(objs.size(0), 64).to(objs.device) boxes_pred, angles_pred = ovaed.decoder(z, objs, triples, attributes) total_loss, losses = calculate_model_losses(args, ovaed, boxes_pred, boxes, angles, angles_pred) optimizer.zero_grad() total_loss.backward() optimizer.step() if t % args.print_every == 0: print("On batch {} out of {}".format(t, args.num_iterations)) for name, val in losses.items(): print(' [%s]: %.4f' % (name, val)) writer.add_scalar('Loss/'+ name, val, t) valid_t = 0 # validation ovaed.eval() for batch in tqdm(val_loader): valid_t += 1 ids, objs, boxes, triples, angles, attributes, obj_to_img, triple_to_img = tensor_aug(batch) z = torch.randn(objs.size(0), 64).to(objs.device) boxes_pred, angles_pred = ovaed.decoder(z, objs, triples, attributes) total_loss, losses = calculate_model_losses(args, ovaed, boxes_pred, boxes, angles, angles_pred) if t % args.print_every == 0: print("On batch {} out of {}".format(t, args.num_iterations)) for name, val in losses.items(): print(' [%s]: %.4f' % (name, val)) writer.add_scalar('Loss/Validation/'+ name, val, valid_t)
Decoder.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline from pyvista import set_plot_theme set_plot_theme('document') # Plot Open Street Map Data {#open_street_map_example} # ========================= # # This was originally posted to # [pyvista/pyvista-support\#486](https://github.com/pyvista/pyvista-support/issues/486). # # Be sure to check out [osmnx](https://github.com/gboeing/osmnx) # # Start by generating a graph from an address. # # + import numpy as np import osmnx as ox import pyvista as pv # Alternatively, use the pickeled graph included in our examples. from pyvista import examples # - # Read in the graph directly from the Open Street Map server. # # + # address = 'Holzgerlingen DE' # graph = ox.graph_from_address(address, dist=500, network_type='drive') # pickle.dump(graph, open('/tmp/tmp.p', 'wb')) graph = examples.download_osmnx_graph() # - # Next, convert the edges into pyvista lines using # `pyvista.lines_from_points`{.interpreted-text role="func"}. # # + nodes, edges = ox.graph_to_gdfs(graph) lines = [] # convert each edge into a line for idx, row in edges.iterrows(): x_pts = row['geometry'].xy[0] y_pts = row['geometry'].xy[1] z_pts = np.zeros(len(x_pts)) pts = np.column_stack((x_pts, y_pts, z_pts)) line = pv.lines_from_points(pts) lines.append(line) # - # Finally, merge the lines and plot # combined_lines = lines[0].merge(lines[1:]) combined_lines.plot(line_width=3, cpos='xy')
osmnx-example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from glob import glob files = glob('news/*.json') files = [f.replace('news/','') for f in files] files.sort() len(files) # - for no, news in enumerate(files): news = news.replace('.json','') print(f'{no + 1}. {news}')
misc/crawl/sort.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + id="6DhaqVFf7y1n" # Colab library to upload files to notebook from google.colab import files # Install Kaggle library # !pip install -q kaggle # + id="-0BOcQDk8Nzn" colab={"resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY>", "ok": true, "headers": [["content-type", "application/javascript"]], "status": 200, "status_text": ""}}, "base_uri": "https://localhost:8080/", "height": 72} outputId="217b8f39-b5ea-4e9e-b36e-06429b67972e" # Upload kaggle API key file uploaded = files.upload() #here # + id="ZxlaSWssdNib" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="7e84f730-cfa5-4a56-c3b8-1fbb83a9d310" # !ls -l ~/.kaggle # + id="deOW2Z03fYuK" # !rm -rf ~/.kaggle # + id="PZ7S5mPxBnqU" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="ed188306-1545-413c-a60f-be3856f3acfa" # !mkdir -p ~/.kaggle # !cp kaggle.json ~/.kaggle/ # !chmod 600 ~/.kaggle/kaggle.json # !ls ~/.kaggle # + id="dnKl0RrX8Nml" colab={"base_uri": "https://localhost:8080/", "height": 748} outputId="15768ab8-1c9e-43a1-f694-19e59c87593d" # !kaggle competitions download -c bengaliai-cv19 # + id="EA1Aw-qzhNhG" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="9790de83-ca73-43fb-d253-2d12a9cfeec5" # !kaggle datasets download -d kaushal2896/kalpurush-fonts # + id="sBrE6YL3hSXs" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="831c106e-9c75-418b-ebb5-ebefb151dbf1" # !unzip kalpurush-fonts.zip # + id="GrTRUgfe8NfO" colab={"base_uri": "https://localhost:8080/", "height": 289} outputId="5e65313b-499d-4d56-f509-838b5fc96a0d" # !unzip /content/test_image_data_0.parquet.zip # !unzip /content/test_image_data_1.parquet.zip # !unzip /content/test_image_data_2.parquet.zip # !unzip /content/test_image_data_3.parquet.zip # !unzip /content/train_image_data_0.parquet.zip # !unzip /content/train_image_data_1.parquet.zip # !unzip /content/train_image_data_2.parquet.zip # !unzip /content/train_image_data_3.parquet.zip # + id="pY5T-mHPge4D" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="f0ace8ef-6ced-4757-f36f-4b0acce3ba67" # !unzip train.csv.zip # + id="bfZPEmRCJx3p" colab={"base_uri": "https://localhost:8080/", "height": 255} outputId="624b5393-f611-446a-f8c6-1c6f0a838576" # !ls # + id="RpNupvOveUnb" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="0f09cd6b-67ba-46ed-a572-676e98fda91f" import os os.getcwd () # + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" id="IKmfRwZ8bOml" colab={"base_uri": "https://localhost:8080/", "height": 799} outputId="0d5af2d5-ae84-4f76-a8e4-da0319012401" # This Python 3 environment comes with many helpful analytics libraries installed # It is defined by the kaggle/python docker image # For example, here's several helpful packages to load in import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from tqdm.auto import tqdm from glob import glob import time, gc import cv2 from tensorflow import keras import matplotlib.image as mpimg from keras.preprocessing.image import ImageDataGenerator from keras.models import Model from keras.models import clone_model from keras.layers import Dense,Conv2D,Flatten,MaxPool2D,Dropout,BatchNormalization, Input from keras.optimizers import Adam from keras.callbacks import ReduceLROnPlateau from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix import PIL.Image as Image, PIL.ImageDraw as ImageDraw, PIL.ImageFont as ImageFont from matplotlib import pyplot as plt import seaborn as sns # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk('/content'): for filename in filenames: print(os.path.join(dirname, filename)) # Any results you write to the current directory are saved as output. # + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" id="XHXi-XF7bOm5" train_df_ = pd.read_csv('/content/train.csv') test_df_ = pd.read_csv('/content/test.csv') class_map_df = pd.read_csv('/content/class_map.csv') sample_sub_df = pd.read_csv('/content/sample_submission.csv') # + id="cB1JCnmsbOnH" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="7e7bc0fe-3b20-413a-9e5e-7be27f93a975" train_df_.head() # + id="VNm15XOgbOnS" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="293ef1c0-4ea6-4ba0-af6e-9f426eb2a9fb" test_df_.head() # + id="NRJN1KcvbOnb" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="a64fb00c-1969-450c-e7d3-59c6f8729953" sample_sub_df.head() # + id="1zbGQkjdbOnl" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="126ddd94-d100-42d5-e1ea-ae98c910fc24" class_map_df.head() # + id="Ukd6zoHwbOnu" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="5f12abb2-a1a8-45a1-f8f6-24df0dc2a961" print(f'Size of training data: {train_df_.shape}') print(f'Size of test data: {test_df_.shape}') print(f'Size of class map: {class_map_df.shape}') # + [markdown] id="vDa47gI1bOn2" # ## Exploratory Data Analysis # Exploratory data analysis (EDA) is an approach to analyzing data sets to summarize their main characteristics, often with visual methods. # + id="T3JhgqYIbOn3" HEIGHT = 236 WIDTH = 236 def get_n(df, field, n, top=True): top_graphemes = df.groupby([field]).size().reset_index(name='counts')['counts'].sort_values(ascending=not top)[:n] top_grapheme_roots = top_graphemes.index top_grapheme_counts = top_graphemes.values top_graphemes = class_map_df[class_map_df['component_type'] == field].reset_index().iloc[top_grapheme_roots] top_graphemes.drop(['component_type', 'label'], axis=1, inplace=True) top_graphemes.loc[:, 'count'] = top_grapheme_counts return top_graphemes def image_from_char(char): image = Image.new('RGB', (WIDTH, HEIGHT)) draw = ImageDraw.Draw(image) myfont = ImageFont.truetype('/content/kalpurush-2.ttf', 120) w, h = draw.textsize(char, font=myfont) draw.text(((WIDTH - w) / 2,(HEIGHT - h) / 3), char, font=myfont) return image # + [markdown] id="mCrvTJJUbOn-" # ### Number of unique values # + id="xv8GctmWbOn_" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="50ffc72c-2da3-4109-d3cd-2f0a552358a3" print(f'Number of unique grapheme roots: {train_df_["grapheme_root"].nunique()}') print(f'Number of unique vowel diacritic: {train_df_["vowel_diacritic"].nunique()}') print(f'Number of unique consonant diacritic: {train_df_["consonant_diacritic"].nunique()}') # + [markdown] id="1rNfFVzubOoL" # ### Most used top 10 Grapheme Roots in training set # + id="ma5YbkPnbOoN" colab={"base_uri": "https://localhost:8080/", "height": 359} outputId="b0461d44-4998-486f-ddb0-edb9399bf2e7" top_10_roots = get_n(train_df_, 'grapheme_root', 10) top_10_roots # + id="TnbUmQ77bOoT" colab={"base_uri": "https://localhost:8080/", "height": 443} outputId="a67c2ddd-bb6a-4fdd-a9e5-c130caa4a14c" f, ax = plt.subplots(2, 5, figsize=(16, 8)) ax = ax.flatten() for i in range(10): ax[i].imshow(image_from_char(top_10_roots['component'].iloc[i]), cmap='Greys') # + [markdown] id="k0MaAiE7bOob" # ### Least used 10 Grapheme Roots in training set # + id="oxQdrsgybOod" colab={"base_uri": "https://localhost:8080/", "height": 359} outputId="f01d74ee-65c4-422c-d7e2-3c851f7c08c2" bottom_10_roots = get_n(train_df_, 'grapheme_root', 10, False) bottom_10_roots # + id="F_5jghY6bOok" colab={"base_uri": "https://localhost:8080/", "height": 443} outputId="652dd70e-f7d1-4b0b-841d-761c21444ed6" f, ax = plt.subplots(2, 5, figsize=(16, 8)) ax = ax.flatten() for i in range(10): ax[i].imshow(image_from_char(bottom_10_roots['component'].iloc[i]), cmap='Greys') # + [markdown] id="JJESOyLAbOos" # ### Top 5 Vowel Diacritic in taining data # + id="KjFo8o-xbOot" colab={"base_uri": "https://localhost:8080/"} outputId="c6ab4256-2d74-4e70-edf9-9ab00a07071f" top_5_vowels = get_n(train_df_, 'vowel_diacritic', 5) top_5_vowels # + id="TJXhZ_pGbOo1" colab={"base_uri": "https://localhost:8080/"} outputId="65dcf071-e880-42ed-b917-d055ed00b7d5" f, ax = plt.subplots(1, 5, figsize=(16, 8)) ax = ax.flatten() for i in range(5): ax[i].imshow(image_from_char(top_5_vowels['component'].iloc[i]), cmap='Greys') # + [markdown] id="omXsbXqNbOo7" # ### Top 5 Consonant Diacritic in training data # + id="IklVUW0cbOo9" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="54e485d5-ea48-474a-be6a-7624fbe45317" top_5_consonants = get_n(train_df_, 'consonant_diacritic', 5) top_5_consonants # + id="txTlvhdobOpC" colab={"base_uri": "https://localhost:8080/", "height": 206} outputId="a7780ee1-1eee-40d3-f8e9-bd80165a644b" f, ax = plt.subplots(1, 5, figsize=(16, 8)) ax = ax.flatten() for i in range(5): ax[i].imshow(image_from_char(top_5_consonants['component'].iloc[i]), cmap='Greys') # + id="Kd4lL9ePbOpH" train_df_ = train_df_.drop(['grapheme'], axis=1, inplace=False) # + id="Y4uqkYB2bOpM" train_df_[['grapheme_root', 'vowel_diacritic', 'consonant_diacritic']] = train_df_[['grapheme_root', 'vowel_diacritic', 'consonant_diacritic']].astype('uint8') # + id="-UIq3emYbOpS" IMG_SIZE=64 N_CHANNELS=1 # + [markdown] id="zk4707TzbOpZ" # Let's apply some image processing (credits: [this kernel](https://www.kaggle.com/shawon10/bangla-graphemes-image-processing-deep-cnn)) while resizing the images, which will center crop the region of interest from the original images. # + id="ANTxnBUrbOpZ" def resize(df, size=64, need_progress_bar=True): resized = {} resize_size=64 if need_progress_bar: for i in tqdm(range(df.shape[0])): image=df.loc[df.index[i]].values.reshape(137,236) _, thresh = cv2.threshold(image, 30, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU) contours, _ = cv2.findContours(thresh,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)[-2:] idx = 0 ls_xmin = [] ls_ymin = [] ls_xmax = [] ls_ymax = [] for cnt in contours: idx += 1 x,y,w,h = cv2.boundingRect(cnt) ls_xmin.append(x) ls_ymin.append(y) ls_xmax.append(x + w) ls_ymax.append(y + h) xmin = min(ls_xmin) ymin = min(ls_ymin) xmax = max(ls_xmax) ymax = max(ls_ymax) roi = image[ymin:ymax,xmin:xmax] resized_roi = cv2.resize(roi, (resize_size, resize_size),interpolation=cv2.INTER_AREA) resized[df.index[i]] = resized_roi.reshape(-1) else: for i in range(df.shape[0]): #image = cv2.resize(df.loc[df.index[i]].values.reshape(137,236),(size,size),None,fx=0.5,fy=0.5,interpolation=cv2.INTER_AREA) image=df.loc[df.index[i]].values.reshape(137,236) _, thresh = cv2.threshold(image, 30, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU) contours, _ = cv2.findContours(thresh,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)[-2:] idx = 0 ls_xmin = [] ls_ymin = [] ls_xmax = [] ls_ymax = [] for cnt in contours: idx += 1 x,y,w,h = cv2.boundingRect(cnt) ls_xmin.append(x) ls_ymin.append(y) ls_xmax.append(x + w) ls_ymax.append(y + h) xmin = min(ls_xmin) ymin = min(ls_ymin) xmax = max(ls_xmax) ymax = max(ls_ymax) roi = image[ymin:ymax,xmin:xmax] resized_roi = cv2.resize(roi, (resize_size, resize_size),interpolation=cv2.INTER_AREA) resized[df.index[i]] = resized_roi.reshape(-1) resized = pd.DataFrame(resized).T return resized # + id="UIrInojobOpi" def get_dummies(df): cols = [] for col in df: cols.append(pd.get_dummies(df[col].astype(str))) return pd.concat(cols, axis=1) # + [markdown] id="3gS4PPwMbOpo" # ## Basic Model # + id="U6CnEMN6bOpo" inputs = Input(shape = (IMG_SIZE, IMG_SIZE, 1)) model = Conv2D(filters=32, kernel_size=(3, 3), padding='SAME', activation='relu', input_shape=(IMG_SIZE, IMG_SIZE, 1))(inputs) model = Conv2D(filters=32, kernel_size=(3, 3), padding='SAME', activation='relu')(model) model = Conv2D(filters=32, kernel_size=(3, 3), padding='SAME', activation='relu')(model) model = Conv2D(filters=32, kernel_size=(3, 3), padding='SAME', activation='relu')(model) model = BatchNormalization(momentum=0.15)(model) model = MaxPool2D(pool_size=(2, 2))(model) model = Conv2D(filters=32, kernel_size=(5, 5), padding='SAME', activation='relu')(model) model = Dropout(rate=0.3)(model) model = Conv2D(filters=64, kernel_size=(3, 3), padding='SAME', activation='relu')(model) model = Conv2D(filters=64, kernel_size=(3, 3), padding='SAME', activation='relu')(model) model = Conv2D(filters=64, kernel_size=(3, 3), padding='SAME', activation='relu')(model) model = Conv2D(filters=64, kernel_size=(3, 3), padding='SAME', activation='relu')(model) model = BatchNormalization(momentum=0.15)(model) model = MaxPool2D(pool_size=(2, 2))(model) model = Conv2D(filters=64, kernel_size=(5, 5), padding='SAME', activation='relu')(model) model = BatchNormalization(momentum=0.15)(model) model = Dropout(rate=0.3)(model) model = Conv2D(filters=128, kernel_size=(3, 3), padding='SAME', activation='relu')(model) model = Conv2D(filters=128, kernel_size=(3, 3), padding='SAME', activation='relu')(model) model = Conv2D(filters=128, kernel_size=(3, 3), padding='SAME', activation='relu')(model) model = Conv2D(filters=128, kernel_size=(3, 3), padding='SAME', activation='relu')(model) model = BatchNormalization(momentum=0.15)(model) model = MaxPool2D(pool_size=(2, 2))(model) model = Conv2D(filters=128, kernel_size=(5, 5), padding='SAME', activation='relu')(model) model = BatchNormalization(momentum=0.15)(model) model = Dropout(rate=0.3)(model) model = Conv2D(filters=256, kernel_size=(3, 3), padding='SAME', activation='relu')(model) model = Conv2D(filters=256, kernel_size=(3, 3), padding='SAME', activation='relu')(model) model = Conv2D(filters=256, kernel_size=(3, 3), padding='SAME', activation='relu')(model) model = Conv2D(filters=256, kernel_size=(3, 3), padding='SAME', activation='relu')(model) model = BatchNormalization(momentum=0.15)(model) model = MaxPool2D(pool_size=(2, 2))(model) model = Conv2D(filters=256, kernel_size=(5, 5), padding='SAME', activation='relu')(model) model = BatchNormalization(momentum=0.15)(model) model = Dropout(rate=0.3)(model) model = Flatten()(model) model = Dense(1024, activation = "relu")(model) model = Dropout(rate=0.3)(model) dense = Dense(512, activation = "relu")(model) head_root = Dense(168, activation = 'softmax')(dense) head_vowel = Dense(11, activation = 'softmax')(dense) head_consonant = Dense(7, activation = 'softmax')(dense) model = Model(inputs=inputs, outputs=[head_root, head_vowel, head_consonant]) # + id="QwDLGs8lbOpu" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="73c596de-d2e3-4e70-f952-75b8d4e590a9" model.summary() # + [markdown] id="5Jf3wFf2bOpz" # Let's visualize the 3-tailed (3 output) CNN by plotting it. # + id="GKM0LfZibOp0" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="c6dc8275-f613-42a7-b30c-7ef251666df6" from keras.utils import plot_model plot_model(model, to_file='model.png') # + id="0ONbSGYSbOp7" model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) # + id="QOEtHk7nbOp_" # Set a learning rate annealer. Learning rate will be half after 3 epochs if accuracy is not increased learning_rate_reduction_root = ReduceLROnPlateau(monitor='dense_3_accuracy', patience=3, verbose=1, factor=0.5, min_lr=0.00001) learning_rate_reduction_vowel = ReduceLROnPlateau(monitor='dense_4_accuracy', patience=3, verbose=1, factor=0.5, min_lr=0.00001) learning_rate_reduction_consonant = ReduceLROnPlateau(monitor='dense_5_accuracy', patience=3, verbose=1, factor=0.5, min_lr=0.00001) # + id="313NKeDCbOqE" batch_size = 256 epochs = 30 # + id="aYhUgoofbOqJ" class MultiOutputDataGenerator(keras.preprocessing.image.ImageDataGenerator): def flow(self, x, y=None, batch_size=32, shuffle=True, sample_weight=None, seed=None, save_to_dir=None, save_prefix='', save_format='png', subset=None): targets = None target_lengths = {} ordered_outputs = [] for output, target in y.items(): if targets is None: targets = target else: targets = np.concatenate((targets, target), axis=1) target_lengths[output] = target.shape[1] ordered_outputs.append(output) for flowx, flowy in super().flow(x, targets, batch_size=batch_size, shuffle=shuffle): target_dict = {} i = 0 for output in ordered_outputs: target_length = target_lengths[output] target_dict[output] = flowy[:, i: i + target_length] i += target_length yield flowx, target_dict # + id="cYFWYbvNbOqS" HEIGHT = 137 WIDTH = 236 # + [markdown] id="EL_L8kHabOqX" # ### Training loop # + id="q-zYT4oDo_HD" import tensorflow as tf tf.config.run_functions_eagerly(True) # + id="dYCa86_6bOqY" colab={"base_uri": "https://localhost:8080/", "height": 1000, "referenced_widgets": ["a58ed6bd0871496e92e80c9aa99d2f18", "7d8d684c95e441e5ad1e9a97a206fd12", "66d71fc0e0d0465abdc296ba83e13294", "112063aa5dbc4ee1a060406105f1ea1e", "f7d2e95c716242e79eb1372a80f2db8c", "<KEY>", "1df53d84bc5347bfac9099bf8db0d14a", "2536723c6a0b45558788afb0ed6e64b7"]} outputId="d873f2e9-5227-4297-e71e-010a35ac0e82" #don't run this cell during prediction histories = [] for i in range(1): train_df = pd.merge(pd.read_parquet(f'/content/train_image_data_{i}.parquet'), train_df_, on='image_id').drop(['image_id'], axis=1) # Visualize few samples of current training dataset fig, ax = plt.subplots(nrows=3, ncols=4, figsize=(16, 8)) count=0 for row in ax: for col in row: col.imshow(resize(train_df.drop(['grapheme_root', 'vowel_diacritic', 'consonant_diacritic'], axis=1).iloc[[count]], need_progress_bar=False).values.reshape(-1).reshape(IMG_SIZE, IMG_SIZE).astype(np.float64)) count += 1 plt.show() X_train = train_df.drop(['grapheme_root', 'vowel_diacritic', 'consonant_diacritic'], axis=1) X_train = resize(X_train)/255 # CNN takes images in shape `(batch_size, h, w, channels)`, so reshape the images X_train = X_train.values.reshape(-1, IMG_SIZE, IMG_SIZE, N_CHANNELS) Y_train_root = pd.get_dummies(train_df['grapheme_root']).values Y_train_vowel = pd.get_dummies(train_df['vowel_diacritic']).values Y_train_consonant = pd.get_dummies(train_df['consonant_diacritic']).values print(f'Training images: {X_train.shape}') print(f'Training labels root: {Y_train_root.shape}') print(f'Training labels vowel: {Y_train_vowel.shape}') print(f'Training labels consonants: {Y_train_consonant.shape}') # Divide the data into training and validation set x_train, x_test, y_train_root, y_test_root, y_train_vowel, y_test_vowel, y_train_consonant, y_test_consonant = train_test_split(X_train, Y_train_root, Y_train_vowel, Y_train_consonant, test_size=0.08, random_state=666) del train_df del X_train del Y_train_root, Y_train_vowel, Y_train_consonant # Data augmentation for creating more training data datagen = MultiOutputDataGenerator( featurewise_center=False, # set input mean to 0 over the dataset samplewise_center=False, # set each sample mean to 0 featurewise_std_normalization=False, # divide inputs by std of the dataset samplewise_std_normalization=False, # divide each input by its std zca_whitening=False, # apply ZCA whitening rotation_range=8, # randomly rotate images in the range (degrees, 0 to 180) zoom_range = 0.15, # Randomly zoom image width_shift_range=0.15, # randomly shift images horizontally (fraction of total width) height_shift_range=0.15, # randomly shift images vertically (fraction of total height) horizontal_flip=False, # randomly flip images vertical_flip=False) # randomly flip images # This will just calculate parameters required to augment the given data. This won't perform any augmentations datagen.fit(x_train) # Fit the model history = model.fit_generator(datagen.flow(x_train, {'dense_2': y_train_root, 'dense_3': y_train_vowel, 'dense_4': y_train_consonant}, batch_size=batch_size), epochs = epochs, validation_data = (x_test, [y_test_root, y_test_vowel, y_test_consonant]), steps_per_epoch=x_train.shape[0] // batch_size, callbacks=[learning_rate_reduction_root, learning_rate_reduction_vowel, learning_rate_reduction_consonant]) histories.append(history) model.save('bengali_model.h5') np.save('history.npy',history) # Delete to reduce memory usage del x_train del x_test del y_train_root del y_test_root del y_train_vowel del y_test_vowel del y_train_consonant del y_test_consonant gc.collect() # + id="QspcHVgtr1Yz" #don't run this cell during prediction from google.colab import files files.download('bengali_model.h5') files.download('history.npy') # + id="pDUmLy_tbOqf" # %matplotlib inline def plot_loss(his, epoch, title): plt.style.use('ggplot') plt.figure() plt.plot(np.arange(0, epoch), his.history['loss'], label='train_loss') plt.plot(np.arange(0, epoch), his.history['dense_2_loss'], label='train_root_loss') plt.plot(np.arange(0, epoch), his.history['dense_3_loss'], label='train_vowel_loss') plt.plot(np.arange(0, epoch), his.history['dense_4_loss'], label='train_consonant_loss') plt.plot(np.arange(0, epoch), his.history['val_dense_2_loss'], label='val_train_root_loss') plt.plot(np.arange(0, epoch), his.history['val_dense_3_loss'], label='val_train_vowel_loss') plt.plot(np.arange(0, epoch), his.history['val_dense_4_loss'], label='val_train_consonant_loss') plt.title(title) plt.xlabel('Epoch #') plt.ylabel('Loss') plt.legend(loc='upper right') plt.show() def plot_acc(his, epoch, title): plt.style.use('ggplot') plt.figure() plt.plot(np.arange(0, epoch), his.history['dense_2_accuracy'], label='train_root_acc') plt.plot(np.arange(0, epoch), his.history['dense_3_accuracy'], label='train_vowel_accuracy') plt.plot(np.arange(0, epoch), his.history['dense_4_accuracy'], label='train_consonant_accuracy') plt.plot(np.arange(0, epoch), his.history['val_dense_2_accuracy'], label='val_root_acc') plt.plot(np.arange(0, epoch), his.history['val_dense_3_accuracy'], label='val_vowel_accuracy') plt.plot(np.arange(0, epoch), his.history['val_dense_4_accuracy'], label='val_consonant_accuracy') plt.title(title) plt.xlabel('Epoch #') plt.ylabel('Accuracy') plt.legend(loc='upper right') plt.show() # + id="jsKyfbC-bOqk" colab={"base_uri": "https://localhost:8080/", "height": 581} outputId="a4aace6f-fad2-44ab-fa8a-fd97a3cb19b4" for dataset in range(1): plot_loss(histories[dataset], epochs, f'Training Dataset: {dataset}') plot_acc(histories[dataset], epochs, f'Training Dataset: {dataset}') # + [markdown] id="gC20hX3wahuT" # ## Prediction # + id="rKPb_Pdpakzu" colab={"resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY>", "ok": true, "headers": [["content-type", "application/javascript"]], "status": 200, "status_text": ""}}, "base_uri": "https://localhost:8080/", "height": 89} outputId="251d9610-5d99-4fde-d3ba-23cfae4e2b3c" from google.colab import files from tensorflow import keras import cv2 uploaded = files.upload() for fn in uploaded.keys(): print('User uploaded file "{name}" with length {length} bytes'.format( name=fn, length=len(uploaded[fn]))) # + id="Hniug1TrYoJW" model = keras.models.load_model("bengali_model.h5") # + id="yOLxlqsNay69" colab={"resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY>", "ok": true, "headers": [["content-type", "application/javascript"]], "status": 200, "status_text": ""}}, "base_uri": "https://localhost:8080/", "height": 72} outputId="6df47298-4520-49f5-b5ad-2fd58e60f592" #upload image upload = files.upload() for fn in upload.keys(): image_name = fn # + id="F9U0ms6ya6Pz" IMG_SIZE = 64 N_CHANNELS = 1 img = cv2.imread(image_name,0) img = cv2.resize(img, (IMG_SIZE, IMG_SIZE)) img = img.reshape(1, IMG_SIZE, IMG_SIZE, N_CHANNELS) result = model.predict(img) # + id="HCmkcj8hboFL" colab={"base_uri": "https://localhost:8080/", "height": 782} outputId="ec1fc43e-1802-4e4c-a6ad-dc647e9a375e" print('Result:',result) # + id="xti-b2ZQrsgd" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="dbc60294-803c-4a73-b592-dff0ad1cdaed" import numpy as np print('Root:',np.argmax(result[0])) print('Vowel:',np.argmax(result[1])) print('Consonants:',np.argmax(result[2]))
bengali_graphemes_classification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Import the packages for experiment import warnings warnings.simplefilter("ignore") import pandas as pd import numpy as np import random from tensorflow import keras from itertools import product from joblib import Parallel, delayed # - # # Random Classification Experiment # # This experiment will use images from the **CIFAR 100** database (https://www.cs.toronto.edu/~kriz/cifar.html) and showcase the classification efficiency of the omnidirectional algorithms in the **ProgLearn** project (https://github.com/neurodata/ProgLearn). # ## Omnidirectional Learning # # The **ProgLearn** project aims to improve program performance on sequentially learned tasks, proposing a lifelong learning approach. # # It contains two different algorithms: **Omnidirectional Forests** (**Odif**) and **Omnidirectional Network** (**Odin**). **Odif** uses Uncertainy Forest as transformers, while **Odin** uses deep networks. These two algorithms achieve both forward knowledge transfer and backward knowledge transfer, and this experiment is designed to cover the **Odif** model. # ## Choosing hyperparameters # # The hyperparameters here are used for determining how the experiment will run. ### MAIN HYPERPARAMS ### num_points_per_task = 500 shift_num = 6 task_num = 20 tree_num = 10 ######################## # ## Loading datasets # # The CIFAR 100 database contains 100 classes of 600 images, each separating into 500 training images and 100 testing images. # + # load image datasets from the CIFAR-100 database (X_train, y_train), (X_test, y_test) = keras.datasets.cifar100.load_data() # modify data shapes for specific model data_x = np.concatenate([X_train, X_test]) data_x = data_x.reshape( (data_x.shape[0], data_x.shape[1] * data_x.shape[2] * data_x.shape[3]) ) data_y = np.concatenate([y_train, y_test]) data_y = data_y[:, 0] # - # ## Running experiment # # The following codes will run multiple experiments in parallel. For each experiment, we have task_num number of tasks. For each task, we randomly select 10 classes of the classes to train on. As we will observe below, each task increases Backwards Transfer Efficiency (BTE) with respect to Task 1 (Task 1 being the first task corresponding to 10 randomly selected classes). # + from functions.random_class_functions import Odif_experiment slot_num = int(5000 / num_points_per_task) slot_fold = range(slot_num) shift_fold = range(1, shift_num + 1, 1) # run the Odif model n_trees = [tree_num] iterable = product(n_trees, shift_fold, slot_fold) df_results = Parallel(n_jobs=-1, verbose=0)( delayed(Odif_experiment)( data_x, data_y, ntree, shift, slot, num_points_per_task, acorn=12345 ) for ntree, shift, slot in iterable ) # - # ## Plotting backward transfer efficiency # # Backward transfer efficiency (BTE) measures the relative effect of future task data on the performance on a certain task. # # $$BTE^t (f_n) := \mathbb{E} [R^t (f_n^{<t} )/R^t (f_n)]$$ # # It is the expected ratio of two risk functions of the learned hypothesis, one with access to the data up to and including the last observation from task t, and the other with access to the entire data sequence. The codes below uses the experiment results to calculate the average BTE numbers and display their changes over tasks learned. # + from functions.random_class_functions import calculate_results # obtain bte results btes = calculate_results(df_results, slot_num, shift_num) # calculate the average numbers bte = np.mean(btes, axis=0) # setting plot parameters fontsize = 22 ticksize = 20 # + from functions.random_class_functions import plot_bte plot_bte(bte, fontsize, ticksize)
docs/experiments/random_class_exp.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Data Visualization # # One of the best ways to communicate what is happening in your dataset is through visualizations. Visualizations make it easy for a human to understand millions of rows of data at a glance. Today we will go through basics of the [Seaborn library](https://seaborn.pydata.org/). # # > Seaborn is a Python data visualization library based on matplotlib. It provides a high-level interface for drawing attractive and informative statistical graphics. # # To install seaborn we need to run either: # # ``` # pip install seaborn # ``` # # or: # # ``` # conda install seaborn # ``` # # Let's start by importing packages: # Imports import pandas as pd import seaborn as sns import matplotlib.pyplot as plt # Let's read our PoKeMoN dataset. Below you can see the column descriptions: # # | Column | Description | # | :--- | :--- | # | `Name` | The name of the pokemon | # | `Type 1` | The type of the pokemon we will use | # | `Type 2` | Later generations were using dual types, we won't be using this here | # | `Total` | The sum of all stat columns | # | `HP`, `Attack`, `Defense`, `Sp. Atk`, `Sp. Def`, `Speed` | Pokemon stats | # | `Generation` | When was this pokemon introduced | # | `Legendary` | Is the pokemon a Legendary pokemon | pokemon = pd.read_csv( "datasets_121_280_Pokemon.csv", index_col="#", ) pokemon.sample(5) # We will setup the standard Seaborn theme by running the following command: sns.set() # ## Line plots # # One of the basic types of plots are line plots. Those are handled in seaborn by the `lineplot` function. By default, the plot aggregates over multiple y values at each value of x and shows an estimate of the central tendency and a confidence interval for that estimate. # # Full documentation of `lineplot` is available [here](https://seaborn.pydata.org/generated/seaborn.lineplot.html#seaborn.lineplot). # # Let's see how the Total stats changed over the generations. sns.lineplot( x="Generation", y="Total", data=pokemon, ) # We can have multiple values on a line plot as well. A useful technique to visualize multiple columns is melting the dataframe. Then we can just use `value` for the actual value of the column, and `variable` for the label as `hue`. The `ci` parameter hides the estimator shade you could see above. # + # We melt the values, so that it is useful for data viz generation_values = pd.melt( pokemon, id_vars=["Generation"], value_vars=["Speed", "Defense", "Sp. Def", "Sp. Atk", "HP", "Attack"], ) fig, ax = plt.subplots() sns.lineplot( x="Generation", y="value", hue="variable", ci=None, data=generation_values, ) fig.set_size_inches(12, 8) # - # # Bar plots # # Another often used type of a plot is the Bar plot. You might remember the use of it in pandas: # Bar plot using pandas pokemon["Generation"].value_counts().plot.bar() # We will use Seaborn `barplot` function to create bar plots. Here you can see an example of an attack `Total` bar plot depending on the `Generation`. The whiskers (vertical black lines), show the estimation error. Ideally there would be no estimation error, but we work with what we got. # # Full documentation of `barplot` is available [here](https://seaborn.pydata.org/generated/seaborn.barplot.html#seaborn.barplot). sns.barplot( x="Generation", y="Total", data=pokemon, ) # A nice alternative to this, is the `countplot`, which automatically counts things in the bucket. Countplot documentation is available [here](https://seaborn.pydata.org/generated/seaborn.countplot.html#seaborn.countplot). sns.countplot( "Generation", data=pokemon, ) sns.countplot( y="Type 1", order=pokemon["Type 1"].value_counts().index, data=pokemon, ) # ## Categorical plot # # A nice way to create a plot is to use the `catplot` function. This function provides access to several axes-level functions that show the relationship between a numerical and one or more categorical variables using one of several visual representations. The kind parameter selects the underlying axes-level function to use like stripplots, swarms, boxplots, barplots and more. We will use a barplot here, to show the `Total` means over `Generation` depending whether the pokemon was `Legendary` or not. # # Full documentation on `catplot` is available [here](https://seaborn.pydata.org/generated/seaborn.catplot.html#seaborn.catplot). sns.catplot( x="Generation", y="Total", hue="Legendary", kind="bar", data=pokemon, ) # ## Scatter plot # # Scatter plot is a relational plot, that shows the relation based on the `x` and `y` value. The relationship between x and y can be shown for different subsets of the data using the hue, size, and style parameters. These parameters control what visual semantics are used to identify the different subsets. It is possible to show up to three dimensions independently by using all three semantic types, but this style of plot can be hard to interpret and is often ineffective. Using redundant semantics (i.e. both hue and style for the same variable) can be helpful for making graphics more accessible. # # Full documentation on `scatterplot` is available [here](https://seaborn.pydata.org/generated/seaborn.scatterplot.html#seaborn.scatterplot). sns.scatterplot( x="Attack", y="Defense", hue="Legendary", data=pokemon, ) # ## Swarm plot # # A simillar plot to scatter is the swarm plot. It shows you the count distribution over a numerical range. Here we categorize the swarmplot by `Generation` to see what was the distribution of starter types (water, fire, grass) pokemon over the generations. We are also adding a `palette` to colour them accordingly. # # The full documentation for `swarmplot` is available [here](https://seaborn.pydata.org/generated/seaborn.swarmplot.html#seaborn.swarmplot). # + starter_types = ["Water", "Fire", "Grass"] palette = { "Water": "#6890F0", "Fire": "#F08030", "Grass": "#78C850", } g = sns.swarmplot( x="Generation", y="Attack", hue="Type 1", palette=palette, data=pokemon[pokemon["Type 1"].isin(starter_types)], ) g.legend_.remove() # - # ## Distribution plots # # A good way to show what is happening with our data is to show the distribution. The basic method to do that is the `distplot`. Below we can see the distribution of values for the `Total` column. We can see that most pokemon have around 500 and 300 `Total` power. The line you can see is the KDE = Kernel Density Estimation, a non-parametric way to estimate the probability density function of a random variable. Kernel density estimation is a fundamental data smoothing problem where inferences about the population are made, based on a finite data sample. # # Full documentation for the `distplot` is available [here](). sns.distplot(pokemon["Total"]) # To see how our data distribution is happening through the dataset, we can also use the `boxplot`. This is a fun little graph that shows us many parameters of the selected column. Here we selected the `Total` values and groupped them by `Generation`. What elements we have here: # # - The box is where most of the values land. # - The line in the middle of the box is the mean value. # - The lower end of the box is the 1st quartile. # - The upper end of the box is the 3rd quartile. # - Whiskers show minimum and maximum values. # - The little diamond is an outlier value. # # The full documentation on the `boxplot` is available [here](https://seaborn.pydata.org/generated/seaborn.boxplot.html). sns.boxplot( x="Generation", y="Total", data=pokemon, ) # ## Cluster maps # # Cluster maps are a good way to show correlation between variables in the dataset. Here I selected all of the Pokemon stats and created a correlation matrix of them. You can easily see, which ones correlate with each other and in what way due to the clustering (above and left). Clustermaps are there to show you a hierarchical overview of the data. # # The full documentation for `clustermap` is available [here](https://seaborn.pydata.org/generated/seaborn.clustermap.html). stats = ["Speed", "Defense", "Sp. Def", "Sp. Atk", "HP", "Attack"] sns.clustermap( pokemon[stats].corr(), cmap="mako", linewidths=.75, ) # # Exercises # # 1. Read in the `avocado.csv` dataset # - Set the index properly # 2. Create a line plot showing the average price of avocado over months # 3. Create a horizontal bar plot showing 10 highest mean prices depending on region # 4. Create a count plot for the year of the avocado # 5. Create a scatter plot of average price vs `Total Volume` for year 2018, when the `Total Volume` is lower than `1e6` # 6. Show the `AveragePrice` distribution. # 7. Create a clustermap of avocado correlations. # 8. Show a boxplot of average price per year. #1 avocados = pd.read_csv( "avocado.csv", index_col=0, ) avocados.sample(5) #2 avocados["Month"] = avocados['Date'].dt.strftime('%B') months = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December'] avocados['Month'] = pd.Categorical(avocados['Month'], categories=months, ordered=True) avocados.sort_values(by='Month') sns.lineplot( x="Month", y="AveragePrice", data=avocados ) #4 sns.countplot( "year", data=avocados, ) #4 sns.barplot(x="year", y="Total Volume", data=avocados, estimator=sum) # + #5 yearCondition = avocados['year'] == 2018 volumeCondition = avocados['Total Volume'] < 1e6 sns.scatterplot( x="AveragePrice", y="Total Volume", data=avocados[yearCondition & volumeCondition], ) # - #6 sns.distplot(avocados["AveragePrice"]) #7 stats = ["AveragePrice", "Total Volume", "Total Bags", "Small Bags", "XLarge Bags"] sns.clustermap( avocados[stats].corr(), linewidths=.75, ) #7 stats = ["Total Volume", "Total Bags", "Small Bags", "XLarge Bags"] sns.clustermap( avocados[stats].corr(), linewidths=.75, linecolor="k", annot = True, ) #8 sns.boxplot( x="year", y="AveragePrice", data=avocados, )
05 Data Visualization/notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Lung Segmentation - Montgomery Dataset # %reload_ext autoreload # %autoreload 2 import os import tempfile import tensorflow as tf import matplotlib.pyplot as plt import pandas as pd import numpy as np import cv2 from tensorflow.keras.models import load_model import fastestimator as fe from fastestimator.architecture import UNet from fastestimator.dataset import montgomery from fastestimator.trace import Dice, ModelSaver from fastestimator.op.numpyop import ImageReader, Reshape, Resize from fastestimator.op.tensorop import Augmentation2D, BinaryCrossentropy, Minmax, ModelOp from fastestimator.op import NumpyOp from fastestimator.util import RecordWriter # ## Download and prepare the montgomery dataset # # The montgomery dataset API will generate a summary CSV file for the data. The path of the csv file is returned as csv_path. The dataset path is returned as path. Inside the CSV file, the file paths are all relative to path. train_csv_path, path = montgomery.load_data() df = pd.read_csv(train_csv_path) df.head() # ## Create TF Records from input image and mask # RecordWriter api shall convert the training images and masks into 'TF Records'. Writing of training dataset into TF-Records is preceded by the following processing of the feature 'image', 'mask_left' and 'mask_left'. <br> # # Processing( transformation ) are carried out using 'Operations'.<br> # Operation can be seen as processing step over feature(s). Operation can generate newer feature(s) or can update existing feature(s). # # # Preprocessing step for feature 'image' : <br> # 'image' = ImageReader('image') <br> # 'image' = Resize('image') <br> # 'image' = Reshape('image') <br> # # Preprocessing step for feature 'mask_left' and 'mask_right': <br> # 'mask' = ImageReader('mask_left') + ImageReader('mask_right') <br> # 'CombinedLeftRightMask' does the addition between left mask and right mask <br> # 'mask' = Resize('mask') <br> # 'mask' = Reshape('mask') <br> # # # class CombineLeftRightMask(NumpyOp): def forward(self, data, state): mask_left, mask_right = data data = mask_left + mask_right return data writer = RecordWriter( save_dir=os.path.join(path, "tfrecords"), train_data=train_csv_path, validation_data=0.2, ops=[ ImageReader(grey_scale=True, inputs="image", parent_path=path, outputs="image"), ImageReader(grey_scale=True, inputs="mask_left", parent_path=path, outputs="mask_left"), ImageReader(grey_scale=True, inputs="mask_right", parent_path=path, outputs="mask_right"), CombineLeftRightMask(inputs=("mask_left", "mask_right")), Resize(target_size=(512, 512)), Reshape(shape=(512, 512, 1), outputs="mask"), Resize(inputs="image", target_size=(512, 512)), Reshape(shape=(512, 512, 1), outputs="image"), ], write_feature=["image", "mask"]) # ## Create data pipeline # # Pipeline api generates sample batches one at a time, from TF Records. Preprocessing steps on feature 'image' and 'mask' are as follow:<br> # 'image' = Augmentation('image') - rotation and flip being the type of augmentation <br> # 'mask' = Augmentation('mask') - rotation and flip <br> # 'image' = Minmax('image') <br> # 'mask' = Minmax('mask') <br> # # # # + tags=["parameters"] batch_size=4 epochs = 25 steps_per_epoch = None validation_steps = None # - pipeline = fe.Pipeline( batch_size=batch_size, data=writer, ops=[ Augmentation2D(inputs=["image", "mask"], outputs=["image", "mask"], mode="train", rotation_range=10, flip_left_right=False), Minmax(inputs="image", outputs="image"), Minmax(inputs="mask", outputs="mask") ]) # ## Check the results of preprocessing idx = np.random.randint(low=0,high=batch_size) fig, ax = plt.subplots(nrows=1, ncols=2, sharex=True, sharey=True, figsize = (6,6), squeeze=True) sample_data = pipeline.show_results(mode='train') sample_img = sample_data[0]['image'][idx] sample_mask = sample_data[0]['mask'][idx] ax[0].imshow(np.squeeze(sample_img), cmap='gray') ax[1].imshow(np.squeeze(sample_mask), cmap='gray') plt.tight_layout() plt.show() # ## Creation of the network # FEModel api enables to create a model for training. It accepts a function as argument which return tf.keras.model, # it also accept optimizer type to perform optimization on network weights. # + model_dir=tempfile.mkdtemp() model = fe.build(model_def=lambda: UNet(input_size=(512, 512, 1)), model_name="lung_segmentation", optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001), loss_name="loss") # - # Network api enables user to constitute a network as a sequence of operations. # # The model defined in the previous step is seen as an operation. # # Operation can be seen as processing step over feature(s). Operation can generate newer feature(s) or can update existing feature(s). # # Along with model defined in previous step, BinaryCrossEntropy is another operation added to the network. By default this operation generates a 'loss' and in this example, from feature 'pred_segment' which is prediction result and 'mask' which is ground truth. network = fe.Network(ops=[ ModelOp(inputs="image", model=model, outputs="pred_segment"), BinaryCrossentropy(y_true="mask", y_pred="pred_segment", outputs="loss") ]) # Trace defined here are 'Dice' and 'ModelSaver'. Trace can be seen as tf.keras callbacks. traces = [ Dice(true_key="mask", pred_key="pred_segment"), ModelSaver(model_name="lung_segmentation", save_dir=model_dir, save_best=True) ] # Estimator api accepts network object, pipeline object , number of epochs and trace object and returns an estimator object which can orchestrate the training loop on the training data along with evaluaton on validation data. estimator = fe.Estimator(network=network, pipeline=pipeline, epochs=epochs, log_steps=20, traces=traces, steps_per_epoch=steps_per_epoch, validation_steps=validation_steps) estimator.fit() # ## Let's do Inference # Inference in done here on validation batch set. # + pipeline = fe.Pipeline( batch_size=batch_size, data=writer, ops=[ Augmentation2D(inputs=["image", "mask"], outputs=["image", "mask"], mode="train", rotation_range=10, flip_left_right=False), Minmax(inputs="image", outputs="image"), Minmax(inputs="mask", outputs="mask") ]) sample_data = pipeline.show_results(mode='eval') # - predict_model = load_model(os.path.join(model_dir, 'lung_segmentation_best_loss.h5'), compile=False) predict_batch = predict_model.predict(sample_data[0]['image']) # Display a image, predicted mask and overlay version. Image sample is randomly selected from the validation batch. # + idx = np.random.randint(low=0,high=batch_size) img = sample_data[0]['image'][idx] img = img.numpy() img = img.reshape(512,512) img_rgb = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) img_rgb = (img_rgb*255).astype(np.uint8) mask_gt = sample_data[0]['mask'][idx] mask_gt = mask_gt.numpy() mask_gt = mask_gt.reshape(512, 512) mask_gt_rgb = cv2.cvtColor(mask_gt, cv2.COLOR_GRAY2RGB) mask_gt_rgb = (mask_gt_rgb).astype(np.uint8) mask = predict_batch[idx] mask = mask.reshape(512,512) mask_rgb = cv2.cvtColor(mask, cv2.COLOR_GRAY2RGB) mask_rgb = (mask_rgb*255).astype(np.uint8) ret, mask_thres = cv2.threshold(mask, 0.5,1, cv2.THRESH_BINARY) mask_overlay = mask_rgb * np.expand_dims(mask_thres,axis=-1) mask_overlay = np.where( mask_overlay != [0,0,0], [255,0,0] ,[0,0,0]) mask_overlay = mask_overlay.astype(np.uint8) img_with_mask = cv2.addWeighted(img_rgb, 0.7, mask_overlay, 0.3,0 ) maskgt_with_maskpred = cv2.addWeighted(mask_rgb, 0.7, mask_overlay, 0.3, 0) fig, ax = plt.subplots(nrows=1, ncols=3,figsize=(18,8)) ax[0].imshow(img_rgb) ax[0].set_title('original lung') ax[1].imshow(maskgt_with_maskpred) ax[1].set_title('mask gt - predict mask') ax[2].imshow(img_with_mask) ax[2].set_title('img - predict mask ') plt.show() # -
apphub/image_segmentation/unet_montgomery/unet_montgomery.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import tensorflow as tf import pandas as pd import numpy as np import time from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt iris = load_iris() iris_X, iris_y = iris.data[:-1,:], iris.target[:-1] iris_y= pd.get_dummies(iris_y).values trainX, testX, trainY, testY = train_test_split(iris_X, iris_y, test_size=0.33, random_state=42) # Now we define x and y. These variables will hold our iris data (both the features and label matrices) We also need to give them shapes which correspond to the shape of our data. # # + # numFeatures is the number of features in our input data. # In the iris dataset, this number is '4'. numFeatures = trainX.shape[1] print('numFeatures is : ', numFeatures ) # numLabels is the number of classes our data points can be in. # In the iris dataset, this number is '3'. numLabels = trainY.shape[1] print('numLabels is : ', numLabels ) #X = tf.Variable( np.identity(numFeatures), tf.TensorShape(numFeatures),dtype='float32') # Iris has 4 features, so X is a tensor to hold our data. #yGold = tf.Variable(np.array([1,1,1]),shape=tf.TensorShape(numLabels),dtype='float32') # This will be our correct answers matrix for 3 classes. # - trainX = tf.constant(trainX, dtype='float32') trainY = tf.constant(trainY, dtype='float32') testX = tf.constant(testX, dtype='float32') testY = tf.constant(testY, dtype='float32') # <h3>Set model weights and bias</h3> # # Much like Linear Regression, we need a shared variable weight matrix for Logistic Regression. We initialize both <code>W</code> and <code>b</code> as tensors full of zeros. Since we are going to learn <code>W</code> and <code>b</code>, their initial value does not matter too much. These variables are the objects which define the structure of our regression model, and we can save them after they have been trained so we can reuse them later. # # We define two TensorFlow variables as our parameters. These variables will hold the weights and biases of our logistic regression and they will be continually updated during training. # # Notice that <code>W</code> has a shape of \[4, 3] because we want to multiply the 4-dimensional input vectors by it to produce 3-dimensional vectors of evidence for the difference classes. <code>b</code> has a shape of \[3] so we can add it to the output. TensorFlow variables need to be initialized with values, e.g. with zeros. W = tf.Variable(tf.zeros([4, 3])) # 4-dimensional input and 3 classes b = tf.Variable(tf.zeros([3])) # 3-dimensional output [0,0,1],[0,1,0],[1,0,0] # + #Randomly sample from a normal distribution with standard deviation .01 weights = tf.Variable(tf.random.normal([numFeatures,numLabels], mean=0., stddev=0.01, name="weights"),dtype='float32') bias = tf.Variable(tf.random.normal([1,numLabels], mean=0., stddev=0.01, name="bias")) # - # <h3>Logistic Regression model</h3> # # We now define our operations in order to properly run the Logistic Regression. Logistic regression is typically thought of as a single equation: # # $$ # ŷ =sigmoid(WX+b) # $$ # # However, for the sake of clarity, we can have it broken into its three main components: # # * a weight times features matrix multiplication operation, # * a summation of the weighted features and a bias term, # * and finally the application of a sigmoid function. # # As such, you will find these components defined as three separate operations below. # # Three-component breakdown of the Logistic Regression equation. # Note that these feed into each other. def logistic_regression(x): apply_weights_OP = tf.matmul(x, weights, name="apply_weights") add_bias_OP = tf.add(apply_weights_OP, bias, name="add_bias") activation_OP = tf.nn.sigmoid(add_bias_OP, name="activation") return activation_OP # <a id="ref3"></a> # # <h2>Training</h2> # # The learning algorithm is how we search for the best weight vector (${\\\bf w}$). This search is an optimization problem looking for the hypothesis that optimizes an error/cost measure. # # <b>What tells us that our model is bad?</b>\ # The Cost or Loss of the model, so what we want is to minimize that. # # <h3>Cost function</h3> # Before defining our cost function, we need to define how long we are going to train and how should we define the learning rate. # + # Number of Epochs in our training numEpochs = 700 # Defining our learning rate iterations (decay) learningRate = tf.keras.optimizers.schedules.ExponentialDecay(initial_learning_rate=0.0008, decay_steps=trainX.shape[0], decay_rate= 0.95, staircase=True) # - # <b>What is the cost function in our model?</b>\ # The cost function we are going to utilize is the Squared Mean Error loss function. # # <b>How to minimize the cost function?</b>\ # We can't use <b>least-squares linear regression</b> here, so we will use <a href="http://en.wikipedia.org/wiki/Gradient_descent?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkDL0120ENSkillsNetwork20629446-2021-01-01">gradient descent</a> instead. Specifically, we will use batch gradient descent which calculates the gradient from all data points in the data set. # #Defining our cost function - Squared Mean Error loss_object = tf.keras.losses.MeanSquaredLogarithmicError() optimizer = tf.keras.optimizers.SGD(learningRate) # We also want some additional operations to keep track of our model's efficiency over time. We can do this like so: # # Accuracy metric. def accuracy(y_pred, y_true): # Predicted class is the index of the highest score in prediction vector (i.e. argmax). correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.argmax(y_true, 1)) return tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # We first wrap computation inside a GradientTape for automatic differentiation. Then we compute gradients and update W and b. # + # Optimization process. def run_optimization(x, y): with tf.GradientTape() as g: pred = logistic_regression(x) loss = loss_object(pred, y) gradients = g.gradient(loss, [weights, bias]) optimizer.apply_gradients(zip(gradients, [weights, bias])) # - # Now we move on to actually running our operations. We will start with the operations involved in the prediction phase (i.e. the logistic regression itself). # # Now we can define and run the actual training loop, like this: # # + # Initialize reporting variables display_step = 10 epoch_values = [] accuracy_values = [] loss_values = [] loss = 0 diff = 1 # Training epochs for i in range(numEpochs): if i > 1 and diff < .0001: print("change in loss %g; convergence."%diff) break else: # Run training step run_optimization(trainX, trainY) # Report occasional stats if i % display_step == 0: # Add epoch to epoch_values epoch_values.append(i) pred = logistic_regression(testX) newLoss = loss_object(pred, testY) # Add loss to live graphing variable loss_values.append(newLoss) # Generate accuracy stats on test data acc = accuracy(pred, testY) accuracy_values.append(acc) # Re-assign values for variables diff = abs(newLoss - loss) loss = newLoss #generate print statements print("step %d, training accuracy %g, loss %g, change in loss %g"%(i, acc, newLoss, diff)) # How well do we perform on held-out test data? print("final accuracy on test set: %s" %acc.numpy()) # - # Why don't we plot the loss to see how it behaves? # # %matplotlib inline import numpy as np import matplotlib.pyplot as plt plt.xlabel("Epoch") plt.ylabel("Loss") plt.plot(loss_values) plt.show() # Try changing the parameters such as the length of training, and maybe some operations to see how the model behaves. Does it take much longer? How is the performance? #
Logistical regresion/Logical regresion Manual.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # VirES - access to INTERMAGNET ground observatory data # # This notebook demonstrates access to AUX_OBS observatory data via VirES for Swarm API. # # Avalable collections # # | Collection Name | Description | # |---|---| # | `SW_OPER_AUX_OBSM2_` | 1 minute data from all observatories | # | `SW_OPER_AUX_OBSM2_:<code>` | 1 minute data from a single selected observatory | # | `SW_OPER_AUX_OBSS2_` | 1 second data from all observatories | # | `SW_OPER_AUX_OBSS2_:<code>` | 1 second data from a single selected observatory | # | `SW_OPER_AUX_OBSH2_` | 1 hour data from all observatories | # | `SW_OPER_AUX_OBSH2_:<code>` | 1 hour data from a single selected observatory | # # The `<code>` is a 3-letter [IAGA observatory code](http://www.wdc.bgs.ac.uk/catalog/obs_code.html). # # # Avalable variables (same for all collections) # # | Variable | Unit | Dimension | Description | # |---|---|---|---| # | `IAGA_code` | $-$ | char [3] | [IAGA observatory code](http://www.wdc.bgs.ac.uk/catalog/obs_code.html) | # | `Timestamp` | $-$ | scalar |UTC time of observation | # | `Latitude` | $\text{deg}$ | scalar | ITRF geocentric latitude | # | `Longitude` | $\text{deg}$ | scalar | ITRF geocentric longitude | # | `Radius` | $\text{m}$ | scalar | ITRF geocentric radius | # | `B_NEC` | $\text{nT}$ | vector [3] | Measured magnetic field vector in ITRF NEC frame. | # | `F` | $\text{nT}$ | scalar | Magnetic field intensity calculated from `B_NEC` | # | `Quality` | $-$ | char [1] |Data quality: `D` for definitive and `Q` for quasi-definitive | # # + from matplotlib.pyplot import figure, subplot, show from viresclient import SwarmRequest server_url = None # default VirES server # - # ## 1 second observatory data - all # + request = SwarmRequest(server_url) request.set_collection('SW_OPER_AUX_OBSS2_') request.set_products( measurements=['B_NEC', 'F', 'IAGA_code', 'Quality'], auxiliaries=['QDLat', 'QDLon', 'MLT'], ) observatories = request.available_observatories('SW_OPER_AUX_OBSS2_', details=True) print("Avaiable observatories and temporal extent of their data:") # 'IAGACode' field name replaced by 'site' in newer version key = 'IAGACode' if 'IAGACode' in observatories.keys() else 'site' print('IAGACode\tstartTime\t\t\tendTime') for item in zip(observatories[key], observatories['startTime'], observatories['endTime']): print("%s\t%s\t%s" % item) data = request.get_between( start_time='2016-01-01T10:00:00Z', end_time='2016-01-01T14:00:00Z', asynchronous=False, ).as_xarray() # --------- print(data) # %matplotlib inline fig = figure(figsize=(18, 9), dpi=100) ax = subplot(1, 1, 1) times = data['Timestamp'].values idx = (times == times.max()).nonzero()[0] ax.plot(data['MLT'].values, data['QDLat'].values, '.', ms=2) ax.set_ylim([-90, 90]) for qdlat, mlt, code in zip(data['QDLat'].values[idx], data['MLT'].values[idx], data['IAGA_code'].values[idx]): ax.text(mlt, qdlat, code) ax.set_yticks([-90, -45, 0, +45, 90]) ax.set_ylim([-90, 90]) ax.set_ylabel('QD latitide / deg') ax.set_xticks([0, 6, 12, 18, 24]) ax.set_xlim([0, 24]) ax.set_xlabel('MLT / hour') ax.grid() show() # - # ## 1 second observatory data - single sensor # + request = SwarmRequest(server_url) request.set_collection('SW_OPER_AUX_OBSS2_:CLF') request.set_products( measurements=['B_NEC', 'F', 'IAGA_code', 'Quality'], auxiliaries=['QDLat', 'QDLon', 'MLT'], ) data = request.get_between( start_time='2016-01-01T10:00:00Z', end_time='2016-01-01T14:00:00Z', asynchronous=False, ).as_xarray() # --------- print(data) # %matplotlib inline fig = figure(figsize=(18, 18), dpi=100) ax = subplot(4, 1, 1) ax.plot(data['Timestamp'].values, data['F'].values, '-') ax.set_ylabel('F / nT') ax.grid() ax = subplot(4, 1, 2) ax.plot(data['Timestamp'].values, data['B_NEC'].values[:, 0], '-') ax.set_ylabel('B_N / nT') ax.grid() ax = subplot(4, 1, 3) ax.plot(data['Timestamp'].values, data['B_NEC'].values[:, 1], '-') ax.set_ylabel('B_E / nT') ax.grid() ax = subplot(4, 1, 4) ax.plot(data['Timestamp'].values, data['B_NEC'].values[:, 2], '-') ax.set_ylabel('B_C / nT') ax.grid() show() # - # ## 1 minute observatory data - all # + request = SwarmRequest(server_url) request.set_collection('SW_OPER_AUX_OBSM2_') request.set_products( measurements=['B_NEC', 'F', 'IAGA_code', 'Quality'], auxiliaries=['QDLat', 'QDLon', 'MLT'], ) observatories = request.available_observatories('SW_OPER_AUX_OBSM2_', details=True) # 'IAGACode' field name replaced by 'site' in newer version key = 'IAGACode' if 'IAGACode' in observatories.keys() else 'site' print("Avaiable observatories and temporal extent of their data:") print('IAGACode\tstartTime\t\t\tendTime') for item in zip(observatories[key], observatories['startTime'], observatories['endTime']): print("%s\t%s\t%s" % item) data = request.get_between( start_time='2016-01-01T10:00:00Z', end_time='2016-01-01T14:00:00Z', asynchronous=False, ).as_xarray() # --------- print(data) # %matplotlib inline fig = figure(figsize=(18, 9), dpi=100) ax = subplot(1, 1, 1) times = data['Timestamp'].values idx = (times == times.max()).nonzero()[0] ax.plot(data['MLT'].values, data['QDLat'].values, '.', ms=2) ax.set_ylim([-90, 90]) for qdlat, mlt, code in zip(data['QDLat'].values[idx], data['MLT'].values[idx], data['IAGA_code'].values[idx]): ax.text(mlt, qdlat, code) ax.set_yticks([-90, -45, 0, +45, 90]) ax.set_ylim([-90, 90]) ax.set_ylabel('QD latitide / deg') ax.set_xticks([0, 6, 12, 18, 24]) ax.set_xlim([0, 24]) ax.set_xlabel('MLT / hour') ax.grid() show() # - # ## 1 minute observatory data - single sensor # + request = SwarmRequest(server_url) request.set_collection('SW_OPER_AUX_OBSM2_:CLF') request.set_products( measurements=['B_NEC', 'F', 'IAGA_code', 'Quality'], auxiliaries=['QDLat', 'QDLon', 'MLT'], ) data = request.get_between( start_time='2016-01-01T10:00:00Z', end_time='2016-01-01T14:00:00Z', asynchronous=False, ).as_xarray() # --------- print(data) # %matplotlib inline fig = figure(figsize=(18, 18), dpi=100) ax = subplot(4, 1, 1) ax.plot(data['Timestamp'].values, data['F'].values, '-') ax.set_ylabel('F / nT') ax.grid() ax = subplot(4, 1, 2) ax.plot(data['Timestamp'].values, data['B_NEC'].values[:, 0], '-') ax.set_ylabel('B_N / nT') ax.grid() ax = subplot(4, 1, 3) ax.plot(data['Timestamp'].values, data['B_NEC'].values[:, 1], '-') ax.set_ylabel('B_E / nT') ax.grid() ax = subplot(4, 1, 4) ax.plot(data['Timestamp'].values, data['B_NEC'].values[:, 2], '-') ax.set_ylabel('B_C / nT') ax.grid() show() # - # ## 1 hour observatory data - all # + request = SwarmRequest(server_url) request.set_collection('SW_OPER_AUX_OBSH2_') request.set_products( measurements=['B_NEC', 'F', 'IAGA_code', 'ObsIndex', 'Quality'], auxiliaries=['QDLat', 'QDLon', 'MLT'], ) observatories = request.available_observatories('SW_OPER_AUX_OBSH2_', details=True) print("Avaiable observatories and temporal extent of their data:") print('IAGACode\tstartTime\t\t\tendTime') for item in zip(observatories['site'], observatories['startTime'], observatories['endTime']): print("%s\t%s\t%s" % item) data = request.get_between( start_time='2016-01-01T10:00:00Z', end_time='2016-01-01T14:00:00Z', asynchronous=False, ).as_xarray() # --------- print(data) # %matplotlib inline fig = figure(figsize=(18, 9), dpi=100) ax = subplot(1, 1, 1) times = data['Timestamp'].values idx = (times == times.max()).nonzero()[0] ax.plot(data['QDLon'].values, data['QDLat'].values, '.', ms=4) ax.set_ylim([-90, 90]) for qdlat, mlt, code in zip(data['QDLat'].values[idx], data['QDLon'].values[idx], data['IAGA_code'].values[idx]): ax.text(mlt, qdlat, code) ax.set_yticks([-90, -45, 0, +45, +90]) ax.set_ylim([-90, 90]) ax.set_ylabel('QD latitide / deg') ax.set_xticks([-180, -135, -90, -45, 0, +45, +90, +135, +180]) ax.set_xlim([-180, 180]) ax.set_xlabel('QD longitude / deg') ax.grid() show() # - # ## 1 hour observatory data - single sensor # + request = SwarmRequest(server_url) request.set_collection('SW_OPER_AUX_OBSH2_:CLF') request.set_products( measurements=['B_NEC', 'F', 'IAGA_code', 'ObsIndex', 'Quality'], auxiliaries=['QDLat', 'QDLon', 'MLT'], ) data = request.get_between( start_time='2016-01-01T00:00:00Z', end_time='2016-01-02T00:00:00Z', asynchronous=False, ).as_xarray() # --------- print(data) # %matplotlib inline fig = figure(figsize=(18, 18), dpi=100) ax = subplot(4, 1, 1) ax.plot(data['Timestamp'].values, data['F'].values, '-') ax.set_ylabel('F / nT') ax.grid() ax = subplot(4, 1, 2) ax.plot(data['Timestamp'].values, data['B_NEC'].values[:, 0], '-') ax.set_ylabel('B_N / nT') ax.grid() ax = subplot(4, 1, 3) ax.plot(data['Timestamp'].values, data['B_NEC'].values[:, 1], '-') ax.set_ylabel('B_E / nT') ax.grid() ax = subplot(4, 1, 4) ax.plot(data['Timestamp'].values, data['B_NEC'].values[:, 2], '-') ax.set_ylabel('B_C / nT') ax.grid() show()
OBS/OBS_data_access.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Parameter identification example # # Here is a simple toy model that we use to demonstrate the working of the inference package # # $\emptyset \xrightarrow[]{k_1(I)} X \; \; \; \; X \xrightarrow[]{d_1} \emptyset$ # # # # $ k_1(I) = \frac{k_1 I^2}{K_R^2 + I^2}$ # # + # %matplotlib inline # %config InlineBackend.figure_format = "retina" from matplotlib import rcParams rcParams["savefig.dpi"] = 100 rcParams["figure.dpi"] = 100 rcParams["font.size"] = 20 # + # %matplotlib inline import bioscrape as bs from bioscrape.types import Model from bioscrape.simulator import py_simulate_model import numpy as np import pylab as plt import pandas as pd species = ['I','X'] reactions = [(['X'], [], 'massaction', {'k':'d1'}), ([], ['X'], 'hillpositive', {'s1':'I', 'k':'k1', 'K':'KR', 'n':2})] k1 = 50.0 d1 = 0.5 params = [('k1', k1), ('d1', d1), ('KR', 20)] initial_condition = {'X':0, 'I':0} M = Model(species = species, reactions = reactions, parameters = params, initial_condition_dict = initial_condition) # - # # Generate experimental data for multiple initial conditions # 1. Simulate bioscrape model # 2. Add Gaussian noise of non-zero mean and non-zero variance to the simulation # 3. Create appropriate Pandas dataframes # 4. Write the data to a CSV file num_trajectories = 4 # each with different initial condition initial_condition_list = [{'I':5},{'I':10},{'I':15},{'I':20}] timepoints = np.linspace(0,5,100) result_list = [] for init_cond in initial_condition_list: M.set_species(init_cond) result = py_simulate_model(timepoints, Model = M)['X'] result_list.append(result) plt.plot(timepoints, result, label = 'I =' + str(list(init_cond.values())[0])) plt.xlabel('Time') plt.ylabel('[X]') plt.legend() plt.show() # + exp_data = pd.DataFrame() exp_data['timepoints'] = timepoints for i in range(num_trajectories): exp_data['X' + str(i)] = result_list[i] + np.random.normal(5, 1, size = np.shape(result)) plt.plot(timepoints, exp_data['X' + str(i)], 'r', alpha = 0.3) plt.plot(timepoints, result_list[i], 'k', linewidth = 3) plt.xlabel('Time') plt.ylabel('[X]') plt.show() # - # ## CSV looks like: # exp_data.to_csv('birth_death_data_multiple_conditions.csv') exp_data # # Run the bioscrape MCMC algorithm to identify parameters from the experimental data # + from bioscrape.inference import py_inference # Import data from CSV # Import a CSV file for each experiment run exp_data = [] for i in range(num_trajectories): df = pd.read_csv('birth_death_data_multiple_conditions.csv', usecols = ['timepoints', 'X'+str(i)]) df.columns = ['timepoints', 'X'] exp_data.append(df) prior = {'k1' : ['uniform', 0, 100]} sampler, pid = py_inference(Model = M, exp_data = exp_data, measurements = ['X'], time_column = ['timepoints'], nwalkers = 15, init_seed = 0.15, nsteps = 5000, sim_type = 'stochastic', params_to_estimate = ['k1'], prior = prior, plot_show = False, convergence_check = False) # - pid.plot_mcmc_results(sampler, convergence_check = False); # ### Check mcmc_results.csv for the results of the MCMC procedure and perform your own analysis. # # # OR # # ### You can also plot the results as follows M_fit = M timepoints = pid.timepoints[0] flat_samples = sampler.get_chain(discard=200, thin=15, flat=True) inds = np.random.randint(len(flat_samples), size=200) for init_cond in initial_condition_list: for ind in inds: sample = flat_samples[ind] for pi, pi_val in zip(pid.params_to_estimate, sample): M_fit.set_parameter(pi, pi_val) M_fit.set_species(init_cond) plt.plot(timepoints, py_simulate_model(timepoints, Model= M_fit)['X'], "C1", alpha=0.6) # plt.errorbar(, y, yerr=yerr, fmt=".k", capsize=0) for i in range(num_trajectories): plt.plot(timepoints, list(pid.exp_data[i]['X']), 'b', alpha = 0.1) plt.plot(timepoints, result, "k", label="original model") plt.legend(fontsize=14) plt.xlabel("Time") plt.ylabel("[X]"); # ## Let us now try to fit all three parameters to see if results improve: # + # prior = {'d1' : ['gaussian', 0, 10, 1e-3], 'k1' : ['gaussian', 0, 50, 1e-4]} prior = {'d1' : ['uniform', 0.1, 10],'k1' : ['uniform',0,100],'KR' : ['uniform',0,100]} sampler, pid = py_inference(Model = M, exp_data = exp_data, measurements = ['X'], time_column = ['timepoints'], nwalkers = 15, init_seed = 0.15, nsteps = 10000, sim_type = 'stochastic', params_to_estimate = ['d1','k1','KR'], prior = prior, plot_show = True, convergence_check = False) # - M_fit = M timepoints = pid.timepoints[0] flat_samples = sampler.get_chain(discard=200, thin=15, flat=True) inds = np.random.randint(len(flat_samples), size=200) for init_cond in initial_condition_list: for ind in inds: sample = flat_samples[ind] for pi, pi_val in zip(pid.params_to_estimate, sample): M_fit.set_parameter(pi, pi_val) M_fit.set_species(init_cond) plt.plot(timepoints, py_simulate_model(timepoints, Model= M_fit)['X'], "C1", alpha=0.6) # plt.errorbar(, y, yerr=yerr, fmt=".k", capsize=0) for i in range(num_trajectories): plt.plot(timepoints, list(pid.exp_data[i]['X']), 'b', alpha = 0.2) plt.plot(timepoints, result_list[i], "k") # plt.legend(fontsize=14) plt.xlabel("Time") plt.ylabel("[X]"); # ## Alll methods above have other advanced options that you can use. Refer to Parameter Identification Tools and Advanced Examples notebook for more details. There are many other tools available such as for multiple initial conditions and timepoints for each trajectory, options for the estimator etc.
inference examples/Stochastic Inference.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/drc10723/GAN_design/blob/master/GAN_implementations/Basic_GAN.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="L4E109s868zM" colab_type="text" # #Generative Adversarial Networks # # Aim of this notebook is to implement [Generative Adversarial Nets](https://papers.nips.cc/paper/5423-generative-adversarial-nets.pdf) paper by <NAME>. # # GAN models are trained to generate new images/content. Two models are trained parallel using adversarial training process. A generative model G, which try to generate new images and a discriminator model D, which estimates probability of sample coming from training data rather than G. # # # ### Let's start by imports # # + id="7yab643D6XxP" colab_type="code" colab={} import numpy as np from PIL import Image import matplotlib.pyplot as plt # %matplotlib inline # + id="REVYXxAndCZ-" colab_type="code" colab={} import torch import torchvision import torch.nn as nn import torch.nn.functional as F from torchvision import datasets, transforms # + id="Y3hzCeX4fC1l" colab_type="code" outputId="cc169559-4fda-401d-89c0-af882f836776" colab={"base_uri": "https://localhost:8080/", "height": 34} # use cuda if available DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") print(f"Using {DEVICE} backend") # batch size for training models # Change multiple of 16 only, else modify below code BATCH_SIZE = 128 #@param {type:"integer"} # + [markdown] id="npKgap6npJaT" colab_type="text" # ## Data Loading # # We will use MNIST dataset available as part of torchvision module. # + id="d4EyOEgUlZE3" colab_type="code" colab={} # convert images to tensors and normalize # after normalization images will be in range [-1, 1] with mean zero. transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize([0.5], [0.5]) ]) # download and load training set of MNIST mnist = datasets.MNIST("./mnist", train=True, download=True, transform = transform) # create generator data loader with given batch size dataloader = torch.utils.data.DataLoader(mnist, batch_size=BATCH_SIZE, shuffle=True, num_workers=2, drop_last=True) # + id="10Vdj6cU52zN" colab_type="code" colab={} # for checking data, lets create iterator # We won't use this iterator for training purpose dataiter = iter(dataloader) images, labels = next(dataiter) # Create images grid of 16 rows, batch size need to be multiple of 16 # for batch size of 128, 128 images will be arranged in 8*16 grid torchvision.utils.save_image(images, "MNIST_input_grid.jpg", nrow=16, padding=0, normalize=False) # + id="vLRSL_OeskXU" colab_type="code" outputId="3c92a356-81b2-4f65-f867-f749e3aa4d8f" colab={"base_uri": "https://localhost:8080/", "height": 499} # Load saved training images grid and visualize using matplotlib # Figure of size 16*(Batch_size/16) plt.figure(figsize=(16, BATCH_SIZE/16)) plt.axis("off") plt.title("Training Images") # using _ for avoiding extra outputs _ = plt.imshow(Image.open("MNIST_input_grid.jpg")) # + [markdown] id="IW4E-IV4y3Qn" colab_type="text" # ## Discriminator Model ( D ) # # Discriminator is a binary classification model, which predicts if given image is generated one or taken from training data. # # Original paper implementation uses maxout and dropout for training discriminator. We will use Linear layers with Leaky ReLU and dropout only. # + id="UAAU5B0HfVzs" colab_type="code" colab={} class Discriminator(nn.Module): """ D(x) """ def __init__(self): # initalize super module super(Discriminator, self).__init__() # create a sequential layer with Linear layers # we are not using CNN layers here for keeping it simple # as mentioned in paper a fully connected with enough parameters should be sufficient # layer will have input of size : (batch_size, 784) self.layer = nn.Sequential(nn.Linear(784, 512), # size : (batch_size, 512) nn.LeakyReLU(0.2), # Dropout layer nn.Dropout(0.4), # size : (batch_size, 512) nn.Linear(512, 256), # size : (batch_size, 256) nn.LeakyReLU(0.2), # Dropout layer nn.Dropout(0.4), # size : (batch_size, 256) nn.Linear(256, 1), # size : (batch_size, 1) # sigmoid layer to convert in [0,1] range nn.Sigmoid() ) def forward(self, x): # size of x : (batch_size, 1, 28, 28) x = x.view(x.shape[0], -1) # size of x : (batch_size, 784) x = self.layer(x) # size of x : (batch_size, 1) return x # + id="4SVZbnxzpLL0" colab_type="code" outputId="382652fc-1351-4b1a-d221-318ee1bf8f70" colab={"base_uri": "https://localhost:8080/", "height": 221} # Create the Discriminator D = Discriminator().to(DEVICE) print(D) # + [markdown] id="1Zm6hsLk4kHv" colab_type="text" # ## Generator Model ( G ) # # Aim of the generator is to fool the discriminator model. # # # For stable trainining of Generator model # # * Using batchnorm in the generator # # * Using LeakyReLU activation in the discriminator # * Last layer of the generator is tanh # # [DCGAN paper](https://arxiv.org/pdf/1511.06434.pdf) provides some guidelines on stabilizing GAN models # # # # # # + id="yp6MPki8jy8z" colab_type="code" colab={} class Generator(nn.Module): """ G(z) """ def __init__(self, input_size=100): # initalize super module super(Generator, self).__init__() # layer will have input of size : (batch_size, 100) self.layer = nn.Sequential(nn.Linear(input_size, 128), # size : (batch_size, 128) nn.BatchNorm1d(128), # size : (batch_size, 128) nn.ReLU(), # size : (batch_size, 128) nn.Linear(128, 256), # size : (batch_size, 256) nn.BatchNorm1d(256), # size : (batch_size, 256) nn.ReLU(), # size : (batch_size, 256) nn.Linear(256, 512), # size : (batch_size, 512) nn.BatchNorm1d(512), # size : (batch_size, 512) nn.ReLU(), # size : (batch_size, 512) nn.Linear(512, 1024), # size : (batch_size, 1024) nn.BatchNorm1d(1024), # size : (batch_size, 1024) nn.ReLU(), # size : (batch_size, 1024) nn.Linear(1024, 784), # size : (batch_size, 784) nn.Tanh()) # size : (batch_size, 784) def forward(self, x): # size : (batch_size, 100) x = self.layer(x) # size : (batch_size, 784) x = x.view(x.shape[0], 1, 28, 28) # size : (batch_size, 1, 28, 28) return x # + id="FLNGGbjrnq5u" colab_type="code" outputId="d1d47f2c-78fe-47de-cf0c-33f6e63d253f" colab={"base_uri": "https://localhost:8080/", "height": 323} # Create the Generator G = Generator().to(DEVICE) print(G) # + [markdown] id="h_fmu5lBUdPv" colab_type="text" # ## Model Training # # Value of beta1 in Adam optimizer has huge impact on stability of generator and [DCGAN paper](https://arxiv.org/pdf/1511.06434.pdf) recommend 0.5 value. # # We can get far better results by using Ksteps more than 1, but you will need to train for more epochs. # + id="e7ppD6ZOoQP-" colab_type="code" colab={} # number of training epochs NUM_EPOCH = 50 #@param {type:"integer"} # size of latent vector z size_z = 100 # number of discriminator steps for each generator step Ksteps = 2 #@param {type:"integer"} # learning rate of adam # DCGAN recommend 0.0002 lr Adam_lr = 0.0002 #@param {type:"number"} # DCGAN recommend 0.5 Adam_beta1 = 0.5 #@param {type:"number"} # + id="r4I_fj3knUrW" colab_type="code" colab={} # We calculate Binary cross entropy loss criterion = nn.BCELoss() # Adam optimizer for generator optimizerG = torch.optim.Adam(G.parameters(), lr=Adam_lr, betas=(Adam_beta1, 0.999)) # Adam optimizer for discriminator optimizerD = torch.optim.Adam(D.parameters(), lr=Adam_lr, betas=(Adam_beta1, 0.999)) # + id="AyhZNliUrgCD" colab_type="code" colab={} # labels for training images x for Discriminator training labels_real = torch.ones((BATCH_SIZE, 1)).to(DEVICE) # labels for generated images G(z) for Discriminator training labels_fake = torch.zeros((BATCH_SIZE, 1)).to(DEVICE) # + id="IllIZmPCpU30" colab_type="code" colab={} # List of values, which will be used for plotting purpose D_losses = [] G_losses = [] Dx_values = [] DGz_values = [] # number of training steps done on discriminator step = 0 for epoch in range(NUM_EPOCH): epoch_D_losses = [] epoch_G_losses = [] epoch_Dx = [] epoch_DGz = [] # iterate through data loader generator object # we don't need real class labels for training, so will be ignored for images, _ in dataloader: step += 1 ############################ # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z))) ########################### # images will be send to gpu, if cuda available x = images.to(DEVICE) # forward pass D(x) x_preds = D(x) # calculate loss log(D(x)) D_x_loss = criterion(x_preds, labels_real) # create latent vector z from normal distribution z = torch.randn(BATCH_SIZE, size_z).to(DEVICE) # generate image fake_image = G(z) # calculate D(G(z)), fake or not z_preds = D(fake_image.detach()) # loss log(1 - D(G(z))) D_z_loss = criterion(z_preds, labels_fake) # total loss = log(D(x)) + log(1 - D(G(z))) D_loss = D_x_loss + D_z_loss # save values for plots epoch_D_losses.append(D_loss.item()) epoch_Dx.append(x_preds.mean().item()) # zero accumalted grads D.zero_grad() # do backward pass D_loss.backward() # update discriminator model optimizerD.step() ############################ # (2) Update G network: maximize log(D(G(z))) ########################### # if Ksteps of Discriminator training are done, update generator if step % Ksteps == 0: # As we done one step of discriminator, again calculate D(G(z)) z_out = D(fake_image) # loss log(D(G(z))) G_loss = criterion(z_out, labels_real) # save values for plots epoch_DGz.append(z_out.mean().item()) epoch_G_losses.append(G_loss) # zero accumalted grads G.zero_grad() # do backward pass G_loss.backward() # update generator model optimizerG.step() else: # calculate average value for one epoch D_losses.append(sum(epoch_D_losses)/len(epoch_D_losses)) G_losses.append(sum(epoch_G_losses)/len(epoch_G_losses)) Dx_values.append(sum(epoch_Dx)/len(epoch_Dx)) DGz_values.append(sum(epoch_DGz)/len(epoch_DGz)) print(f" Epoch {epoch+1}/{NUM_EPOCH} Discriminator Loss {D_losses[-1]:.3f} Generator Loss {G_losses[-1]:.3f}" + f" D(x) {Dx_values[-1]:.3f} D(G(x)) {DGz_values[-1]:.3f}") # Generating images after each epoch and saving # set generator to evaluation mode G.eval() with torch.no_grad(): # generate z z_test = torch.randn(BATCH_SIZE, size_z).to(DEVICE) # forward pass of G and generated image fake_test = G(z_test).cpu() # save images in grid of 16 * batch_size/16 torchvision.utils.save_image(fake_test, f"MNIST_epoch_{epoch+1}.jpg", nrow=16, padding=0, normalize=False) # set generator to training mode G.train() # + [markdown] id="wD0jTPLKZzTu" colab_type="text" # ## Results # # After 50 epoch training, we are able to generate good quailty images. We can improve more by using DCGAN and hyperparameter tuning. # + id="fEbWD88gZ2Rw" colab_type="code" outputId="02e87cc5-385f-4bd9-bd78-fe1f75865925" colab={"base_uri": "https://localhost:8080/", "height": 499} # Load saved generated images grid and visualize using matplotlib # Figure of size 16*(Batch_size/16) plt.figure(figsize=(16, BATCH_SIZE/16)) plt.axis("off") plt.title("Generated Images") # using _ for avoiding extra outputs _ = plt.imshow(Image.open("MNIST_epoch_50.jpg")) # + [markdown] id="dARIjCogc55s" colab_type="text" # #### Plot for D(x) and D(G(z)) over the epochs # Ideally after long training, discriminator accuracy should go down to 0.5. Lets observe D(x) and D(G(z)) for all epochs. # + id="JeQdEvag4uKk" colab_type="code" outputId="510d2c26-a375-496f-cb3d-d513080a0c72" colab={"base_uri": "https://localhost:8080/", "height": 351} plt.figure(figsize=(10,5)) plt.title("D(x) and D(G(z)) during Training") plt.plot(DGz_values,label="D(G(z))") plt.plot(Dx_values,label="D(x)") ax = plt.gca() ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) plt.xlabel("num_epochs") plt.legend() plt.show() # + [markdown] id="urOunOeud4a5" colab_type="text" # As we can see values of both D(x) and D(G(x)) look like converging close to 0.5 # # #### Plot for Discriminator and Generator loss over the epochs # + id="HCMwKVoT4x5A" colab_type="code" outputId="4fa76ccc-00cd-4962-8c80-186ff2a14306" colab={"base_uri": "https://localhost:8080/", "height": 351} plt.figure(figsize=(10,5)) plt.title("Discrminator and Generator loss during Training") plt.plot(D_losses,label="D Loss") plt.plot(G_losses,label="G Loss") ax = plt.gca() ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) plt.xlabel("num_epochs") plt.legend() plt.show()
Dharmendra_Choudhary/GAN_design/GAN_implementations/Basic_GAN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Decision Tree & Grid Search Example # This notebook shows how to tune a simple classifier like a decision tree via GridSearch. # %load_ext watermark # %watermark -p scikit-learn,mlxtend,xgboost # ## Dataset # + from sklearn import model_selection from sklearn.model_selection import train_test_split from sklearn import datasets data = datasets.load_breast_cancer() X, y = data.data, data.target X_train, X_test, y_train, y_test = \ train_test_split(X, y, test_size=0.3, random_state=1, stratify=y) X_train_sub, X_valid, y_train_sub, y_valid = \ train_test_split(X_train, y_train, test_size=0.2, random_state=1, stratify=y_train) print('Train/Valid/Test sizes:', y_train.shape[0], y_valid.shape[0], y_test.shape[0]) # - # ## Grid Search # + import numpy as np from sklearn.model_selection import GridSearchCV from sklearn.tree import DecisionTreeClassifier clf = DecisionTreeClassifier(random_state=123) params = { 'min_samples_split': [2, 3, 4], 'max_depth': [6, 16, None] } grid = GridSearchCV(estimator=clf, param_grid=params, cv=10, n_jobs=1, verbose=2) grid.fit(X_train, y_train) grid.best_score_ # - grid.best_params_ print(f"Training Accuracy: {grid.best_estimator_.score(X_train, y_train):0.2f}") #print(f"Validation Accuracy: {grid.best_estimator_.score(X_valid, y_valid):0.2f}") print(f"Test Accuracy: {grid.best_estimator_.score(X_test, y_test):0.2f}")
hyperparameter-tuning-examples/01.1-gridsearch-decisiontree-example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import sys sys.path.append('..') import warnings warnings.filterwarnings("ignore") # standard import json, pickle from collections import defaultdict import pandas as pd import numpy as np import seaborn as sns from statistics import median from matplotlib import pyplot as plt from sklearn import metrics import matplotlib.ticker as ticker from itertools import combinations from scipy import stats import math from matplotlib.ticker import FormatStrFormatter # my lib import PPILinkPred as pred import genData_helper as helper import traversalHelper as tr # + colors = { "L3E1_f1": "tab:blue", "L3E1_f2": "tab:olive", "L3E4_f1": "tab:cyan", "L3E4_f2": "tab:pink", "L3": "tab:orange", "CN": "tab:green", "CRA": "tab:red", "CH2": "tab:brown", "Sim": "tab:purple", "rand": "tab:grey" } methods = ["commonNeighbor", "L3Normalizing", "CRA", "Sim", "CH2_L3", "random"]+["L3E1_{}".format(i) for i in ['f1', 'f2']] methods_map = ["CN", "L3", "CRA", "Sim", "CH2", "rand"]+["L3E1_{}".format(i) for i in ['f1', 'f2']] abbrev_map = ["CN", "L3", "CRA", "Sim", "CH2", "rand"]+["L3E\n($f_{"+str(i)+"}$)" for i in range(1,3)] label_map = ["CN", "L3", "CRA", "Sim", "CH2", "rand"]+["L3E($f_{"+str(i)+"}$)" for i in range(1,3)] methods_names = dict(zip(methods, methods_map)) abbrevs = dict(zip(methods_map, abbrev_map)) labels = dict(zip(methods_map, label_map)) # + methods = ["commonNeighbor", "L3Normalizing", "CRA", "Sim", "CH2_L3", "L3E1_f1", "L3E1_f2", "random"] ds_names = ['bioGRID_human', 'STRING_human', 'MINT_human', 'HuRI'] parseTopPPIs = {} for randSz in range(50, 100, 10): parseTopPPIs[randSz] = {} for ds in ds_names: parseTopPPIs[randSz][ds] = {} for method in methods: methodName = methods_names[method] parseTopPPIs[randSz][ds][methodName] = set() if randSz == 50: filename = "./linkPred_out_reduced/{}_{}_topPPI.json".format(method, ds) else: filename = "./linkPred_out_reduced/{}_{}_randSz{}_topPPI.json".format(method, ds, randSz) with open(filename, "r") as f: tmpPPIs = json.loads(f.read()) for trial in range(len(tmpPPIs)): parseTopPPIs[randSz][ds][methodName].update(set(["\t".join(sorted(ppi)) for ppi in tmpPPIs[trial]])) #print(parseTopPPIs[50]['bioGRID']['CN']) # + hide_input=false # calculate overlap class-based myMethods = ["CN", "CRA", "L3", "CH2", "Sim", "L3E1_f1", "L3E1_f2", "rand"] principles = ["CN", "CN", "L3", "L3", "L3", "L3", "L3", "rand"] ds_names = ['bioGRID_human', 'STRING_human', 'MINT_human', 'HuRI'] overlaps_mats_grouped = {} for randSz in range(50, 100, 10): overlaps_mats_grouped[randSz] = {} for ds in ds_names: overlaps_mats_grouped[randSz][ds] = defaultdict(list) for A_i in range(len(myMethods)): for B_i in range(len(myMethods)): if B_i < A_i or A_i == B_i: continue methodA, methodB = myMethods[A_i], myMethods[B_i] overlaps_mats_grouped[randSz][ds]["{}_{}".format(principles[A_i], principles[B_i])].append( np.around(len(parseTopPPIs[randSz][ds][methodA]&parseTopPPIs[randSz][ds][methodB] )/len(parseTopPPIs[randSz][ds][methodA]), 4)) overlaps_mats_grouped[randSz][ds]["CRA_L3E".format()].append( np.around(len(parseTopPPIs[randSz][ds]["CRA"]&parseTopPPIs[randSz][ds]["L3E1_f1"] )/len(parseTopPPIs[randSz][ds]["CRA"]), 4)) # convert grouped to mean & std overlaps_mats_mean, overlaps_mats_std = {}, {} for randSz in range(50, 100, 10): overlaps_mats_mean[randSz], overlaps_mats_std[randSz] = {}, {} for ds in ds_names: overlaps_mats_mean[randSz][ds], overlaps_mats_std[randSz][ds] = {}, {} for pair in overlaps_mats_grouped[randSz][ds]: overlaps_mats_mean[randSz][ds][pair] = np.around(np.mean(overlaps_mats_grouped[randSz][ds][pair]), 2) overlaps_mats_std[randSz][ds][pair] = np.around(np.std(overlaps_mats_grouped[randSz][ds][pair]), 2) print(overlaps_mats_mean) print(overlaps_mats_std) # + # overlaps_mats_mean, overlaps_mats_std # y: dataset w sample size, x: principle pair principlePairs = ['CN_CN', 'L3_L3', 'CN_L3', 'CRA_L3E'] colors = ["blue", "blue", "red", "red"] colorsMap = dict(zip(principlePairs, colors)) overlapMeanTB_str = [] for ds in ['bioGRID_human', 'STRING_human', 'MINT_human', 'HuRI']: for randSz in range(50, 51, 10): buildStr = "\cellcolor{gray!15} "+ds for pair in principlePairs: if 'rand' in pair: continue if 'CN_CN' == pair or "CRA" in pair: buildStr += " & \\textcolor{"+colorsMap[pair]+"}{"+(str(int(overlaps_mats_mean[randSz][ds][pair]*100))+"\\%}") else: buildStr += " & \\textcolor{"+colorsMap[pair]+"}{"+(str(int(np.around(overlaps_mats_mean[randSz][ds][pair]*100, 2)))+" $\\pm$ "+str( int(np.around(overlaps_mats_std[randSz][ds][pair]*100, 2)))+" \\%}") print(buildStr+" \\\\ \\hline") # + # overlaps_mats_mean, overlaps_mats_std # y: dataset w sample size, x: principle pair principlePairs = ['CN_CN', 'L3_L3', 'CN_L3', 'CRA_L3E'] colors = ["blue", "blue", "red", "red"] colorsMap = dict(zip(principlePairs, colors)) overlapMeanTB_str = [] for ds in ['bioGRID_human', 'STRING_human', 'MINT_human', 'HuRI']: print("\\multicolumn{5}{|l|}{\\textbf{"+ds+" Yeast}} \\\\ \\hline") for randSz in range(50, 100, 10): buildStr = "\\cellcolor{gray!15} "+str(randSz)+"\\% sample size" for pair in principlePairs: if 'rand' in pair: continue if 'CN_CN' == pair or "CRA" in pair: buildStr += " & \\textcolor{"+colorsMap[pair]+"}{"+(str(int(np.around(overlaps_mats_mean[randSz][ds][pair]*100, 2)))+"\\%}") else: buildStr += " & \\textcolor{"+colorsMap[pair]+"}{"+(str(int(np.around(overlaps_mats_mean[randSz][ds][pair]*100, 2)))+" $\\pm$ "+str( int(np.around(overlaps_mats_std[randSz][ds][pair]*100, 2)))+" \\%}") print(buildStr+" \\\\ \\hline") # -
src/notebook/predicted PPIs overlaps human.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda_python3 # language: python # name: conda_python3 # --- # # [모듈 0.0] 추가 Python 패키지 설치 및 환경 셋업 # --- # ## SageMaker 최신 버전 설치 # !pip install --upgrade sagemaker # ## SageMaker Experiment 설치 # !pip install --upgrade sagemaker-experiments
phase01/0.0.Setup-Environment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Introducing pwangbot # # I was chatting with @dff of @tidelift at the NumFocus summit last year, and he suggested classifying @pwang's tweets. For those who don't know, @pwang tweets alot, and the content is quite good - but it spans a variety of subjects. If you want to hear @pwang tweet about tech, but not about politics, it can be a problem. So we're going to collect @pwang tweets, do some topic modeling, and then make some bots! # ## Topic Modeling of Tweets # # # # %matplotlib inline import s3fs import json import logging import tweepy logging.basicConfig(level=logging.INFO) from string import punctuation from nltk.stem import WordNetLemmatizer, SnowballStemmer from nltk.tokenize import casual_tokenize from nltk.stem import PorterStemmer from nltk.stem.wordnet import WordNetLemmatizer import nltk nltk.download('wordnet') nltk.download('punkt') nltk.download('stopwords') nltk.download('wordnet') en_stop = set(nltk.corpus.stopwords.words('english')) lemmatizer = WordNetLemmatizer() stoplist = en_stop.union(set(punctuation)) stemmer = SnowballStemmer('english') fs = s3fs.S3FileSystem() root = 'saturn-cloud-data/hugo/pwang-bot-data' paths = fs.ls(root) data = [] for p in paths: with fs.open(p) as f: data.append(json.load(f)) # ## Tokenization # # We do the following - convert everything to lower case words, Throwing out "stop" words, optionally throwing out usernames, urls, and then stemming and lemmatizing. # + en_stop.add('http') en_stop.add('https') en_stop.add('co') en_stop.add('rt') en_stop.add('like') en_stop.add('first') en_stop.add('time') en_stop.add('next') def tokenize(x, with_usernames=True): for token in casual_tokenize(x): if "'" in token: token = token.split("'")[0] token = token.lower() token = stemmer.stem(token) if token in en_stop: continue if not with_usernames: if "@" in token: continue if ":" in token: continue if len(token) > 3: yield lemmatizer.lemmatize(token) texts = [" ".join(list(tokenize(x['text']))) for x in data] texts_no_username = [" ".join(list(tokenize(x['text'], with_usernames=False))) for x in data] # + from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer documents = texts_no_username no_features = 1000 # LDA can only use raw term counts for LDA because it is a probabilistic graphical model tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2, max_features=no_features, stop_words=en_stop) tf = tf_vectorizer.fit_transform(documents) tf_feature_names = tf_vectorizer.get_feature_names() # - # Run LDA from sklearn.decomposition import LatentDirichletAllocation no_topics = 2 lda = LatentDirichletAllocation(n_topics=no_topics, max_iter=50, learning_method='online', learning_offset=50.,random_state=0).fit(tf) import numpy as np # results = np.argmax(nmf.transform(tfidf_vectorizer.transform(texts)), axis=1) results = np.argmax(lda.transform(tf_vectorizer.transform(texts)), axis=1) categorized = {'raw': {}, 'token_no_username': {}, 'token': {}} for idx, result in enumerate(results): categorized['token_no_username'].setdefault(result, []).append(texts_no_username[idx]) categorized['token'].setdefault(result, []).append(texts[idx]) categorized['raw'].setdefault(result, []).append(data[idx]['text']) {len(categorized['raw'][x]) for x in range(no_topics)} # + import os from os import path from wordcloud import WordCloud # Generate a word cloud image for c in range(no_topics): print(len(categorized['token_no_username'][c])) text = " ".join(categorized['token_no_username'][c]) import matplotlib.pyplot as plt # lower max_font_size wordcloud = WordCloud(background_color="white", max_font_size=40).generate(text) plt.figure(figsize=(15, 15)) plt.imshow(wordcloud, interpolation="bilinear") plt.axis("off") plt.show() # The pil way (if you don't have matplotlib) # image = wordcloud.to_image() # image.show() # - # ## Running the bot # # So again - you can't run this without your own twitter credentials. Essentially, every loop we sleep 10 minutes, and then, # we load the 20 most recent tweets. If there are new tweets, we tokenize them, classify them according to our model, # and tweet them out of the appropriate account. # # pwangbot_tech is defunct right now - I already have 3 twitter accounts associated with my phone number, # twitter won't let me add another. But I get the feeling pwangbot_tech is the more popular option. accounts = ['pwangbot_tech', "pwangbot_politics"] with open("/home/jovyan/twitter-creds.json") as f: creds = json.load(f) tapis = [] for a in accounts: cred = creds.get(a, None) if cred is None: tapis.append(None) continue auth = tweepy.OAuthHandler(cred['consumer_key'], cred['consumer_secret']) auth.set_access_token(cred['access_token'], cred['access_token_secret']) api = tweepy.API(auth, wait_on_rate_limit=True, timeout=120) tapis.append(api) max_id = 0 import time while True: new_tweets = tapis[0].user_timeline('pwang') new_tweets = [x._json for x in new_tweets if x.id > max_id] print ('%s new tweets' % len(new_tweets)) if len(new_tweets) == 0: time.sleep(600) continue new_tweets = sorted(new_tweets, key=lambda x: x['id']) # new_tweets = data texts = [] for tweet in new_tweets: text_no_username = " ".join(list(tokenize(tweet['text'], with_usernames=False))) texts.append(text_no_username) results = np.argmax(lda.transform(tf_vectorizer.transform(texts)), axis=1) for r, tweet in zip(results, new_tweets): account = accounts[r] api = tapis[r] print('tweeting %s to %s' % (tweet['text'], account)) if api is not None: try: api.retweet(tweet['id']) except tweepy.TweepError as e: if e.api_code == 327: print('duplicate retweet') max_id = max([x['id'] for x in new_tweets])
Topic Modeling.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import streamlit as st from geopy.geocoders import Nominatim user_input = input("Localização do acidente") geolocator = Nominatim(user_agent="Acidente") location = geolocator.geocode(user_input) print(location.latitude, location.longitude) BBox = ((df.longitude.min(), df.longitude.max(), df.latitude.min(), df.latitude.max()) BBox = ((-33.943,4.828, -74.092, -34.893)) ruh_m = plt.imread('Brasil.png') fig, ax = plt.subplots(figsize = (12.5,12.5)) ax.scatter(location.longitude, location.latitude, zorder=1, alpha= 0.2, c='b', s=10) ax.set_title('Localização Acidente') ax.set_xlim(BBox[0],BBox[1]) ax.set_ylim(BBox[2],BBox[3]) ax.imshow(ruh_m, zorder=0, extent = BBox, aspect= 'equal') user_input_2 = input('Escreva aqui qual hora') # + user_input_2.split() # Trabalhar na transformação para float:!!!!!!!!! list_h = horario.split(':') # Cria lista com Hora, Minuto hor_flt = float(list_h[0]) + float(list_h[1])/60 # Transforma hora e minuto em um float horario_sin=np.sin(2.*np.pi*hor_flt/24.) # Transforma o horário em sin. print(horario_sin) # - horario = '10:20' list_h = horario.split(':') hor_flt = float(list_h[0]) + float(list_h[1])/60 hor_flt horario_sin=np.sin(2.*np.pi*hor_flt/24.) horario_sin df_testando = pd.DataFrame({'horario_sin':[0.3827],'latitude':[-23.6325],'longitude':[-46.6227]}) df import pickle loaded_model = pickle.load(open('finalized_model.sav', 'rb')) result = loaded_model.predict(df) result
04_Deploy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="R3D25d45DFHY" # ## Download the data # + colab={"base_uri": "https://localhost:8080/"} id="pUM4TIExj7EA" outputId="6feb27a2-8fdc-420a-e426-15a43972bd32" # ! wget --header="Host: vaex.s3.us-east-2.amazonaws.com" --header="User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36" --header="Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9" --header="Accept-Language: en-US,en;q=0.9" --header="Referer: https://vaex.io/docs/datasets.html" "https://vaex.s3.us-east-2.amazonaws.com/taxi/yellow_taxi_2015_f32s.hdf5" -c -O 'yellow_taxi_2015_f32s.hdf5' # + colab={"base_uri": "https://localhost:8080/"} id="gyckjIhckBeP" outputId="f7e41136-be5d-4000-b79c-d89025b91f6a" # ! ls # + id="PMU8d-guhHoS" import vaex # from vaex.ui.colormaps import cm_plusmin import numpy as np import seaborn as sns import warnings warnings.filterwarnings("ignore") # + id="2wTPlDq7WZT8" colab={"base_uri": "https://localhost:8080/"} outputId="2daf5107-8248-4282-9773-498ff74ae8ca" # %%time import vaex df = vaex.open('yellow_taxi_2015_f32s.hdf5') # + colab={"base_uri": "https://localhost:8080/", "height": 441} id="jc3kDAh1ZKue" outputId="bfbe2bed-35c8-46a9-ab0a-56986ba0343f" df # + colab={"base_uri": "https://localhost:8080/"} id="VRwiYCHnaimb" outputId="bc6c3db7-3992-407d-af69-ad7fcf51d8b4" df.columns.keys() # + id="sGqFDo2mlb-o" # ! pip install vaex # + id="R6XsApd5mM62" colab={"base_uri": "https://localhost:8080/"} outputId="81849dde-29aa-4421-dc00-5409b82b72c0" # # !pip install --upgrade python # # !pip uninstall ipython # + id="XZ4_wFLLZE2u" import vaex # + colab={"base_uri": "https://localhost:8080/"} id="mVt8QRt7lp6w" outputId="2ff85099-9f51-4841-a183-13aff8c1f3c5" # # !pip install --upgrade python # # ! pip install --upgrade vaex # !pip install --upgrade ipython # # !pip install vaex # # !pip install --upgrade ipython # + colab={"base_uri": "https://localhost:8080/"} id="3cj_g2rumhCx" outputId="15862883-a289-46dd-f327-050c803e0cc6" # ! ls # + [markdown] id="uLor5M2hDIOr" # ## Various visualization with Vaex on huge data # + id="07MczVv7l1rj" import vaex # + colab={"base_uri": "https://localhost:8080/", "height": 297} id="H_docR8jbazw" outputId="f888d640-e026-47ff-eff0-f6a532fdaf2a" df.plot1d(df.fare_amount, limits='99.7%'); # + id="PPMYvI4rhOog" import pylab as plt # + id="47udjy1BhPKA" SMALL_SIZE = 12 MEDIUM_SIZE = 14 BIGGER_SIZE = 16 plt.rc('font', size=SMALL_SIZE) # controls default text sizes plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title # + colab={"base_uri": "https://localhost:8080/", "height": 326} id="MjzCTJxKhR3Q" outputId="a650e4b8-04ee-46d1-80f3-b4906649c089" # Plot the distribution of distances. plt.figure(figsize=(8, 4)) df.plot1d('trip_distance', limits=[0, 500], f='log10', shape=128, lw=3, progress=True) plt.xlabel('Trip distance [miles]') plt.ylabel('Number of trips [dex]') plt.show() # + id="IastXrg6izoc" # Define the boundaries by interactively choosing the area of interest! long_min = -74.05 long_max = -73.75 lat_min = 40.58 lat_max = 40.90 # Make a selection based on the boundaries df_filtered = df[(df.pickup_longitude > long_min) & (df.pickup_longitude < long_max) & \ (df.pickup_latitude > lat_min) & (df.pickup_latitude < lat_max) & \ (df.dropoff_longitude > long_min) & (df.dropoff_longitude < long_max) & \ (df.dropoff_latitude > lat_min) & (df.dropoff_latitude < lat_max)] # + colab={"base_uri": "https://localhost:8080/", "height": 382} id="30Qj_1bYi2Ts" outputId="efcbc016-4abd-4b71-e631-f833573eb92b" plt.figure(figsize=(7, 5)) df_filtered.plot('pickup_longitude', 'pickup_latitude', what='mean(fare_amount)', colormap='plasma', f='log1p', shape=512, colorbar=True, colorbar_label='mean fare amount [$]', vmin=1, vmax=4.5) # + colab={"base_uri": "https://localhost:8080/", "height": 380} id="BReORi1EXcrv" outputId="25adc385-4e3b-4daf-856e-78b20b51f46b" # %%time import vaex import warnings; warnings.filterwarnings("ignore") df = vaex.open('yellow_taxi_2015_f32s.hdf5') print(f'number of rows: {df.shape[0]:,}') print(f'number of columns: {df.shape[1]}') long_min = -74.05 long_max = -73.75 lat_min = 40.58 lat_max = 40.90 df.plot(df.pickup_longitude, df.pickup_latitude, f="log1p", limits=[[-74.05, -73.75], [40.58, 40.90]], show=True); # + id="zCfHNlvxbec-"
Chapter 07/Chapter_7_More_Visualization_with_Vaex.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Airbnb Lisbon Data Analysis # This notebook will analyse public data from airbnb, obtained from http://insideairbnb.com/get-the-data.html. We will look at data from Lisbon, the capital city of Portugal, to answer the following questions: # # ###### 1 - Which neighbourhoods have the most listings, are more expensive and have the most positive reviews? # # ###### 2 - Do super hosts have more positive reviews than regular hosts? # # ###### 3 - What are the factors that most contribute to more positive reviews? # # # ## Table of Contents: # * [Data Sneak Peek](#first-bullet) # * [Neighbourhoods Analysis (Question 1)](#second-bullet) # * [Number of Listings](#second-1-bullet) # * [Review Score per Neighbourhood](#second-2-bullet) # * [Price per Neighbourhood](#second-3-bullet) # * [Super Host Analysis (Question 2)](#third-bullet) # * [Review Score Prediction (Question 3)](#fourth-bullet) # * [NaN values analysis](#fifth-bullet) # * [Encoding](#sixth-bullet) # * [Scaling](#seventh-bullet) # * [Random Forest Grid Search](#eighth-bullet) # * [Learning Curve](#nineth-bullet) # * [Feature Importance Graph](#tenth-bullet) # * [Conclusion](#conclusion) # ### Data Sneak Peek <a class="anchor" id="first-bullet"></a> # + # Library imports import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.preprocessing import MultiLabelBinarizer from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split from sklearn.model_selection import learning_curve from sklearn.model_selection import ShuffleSplit from sklearn.model_selection import permutation_test_score from sklearn.grid_search import GridSearchCV from sklearn.metrics import make_scorer from sklearn.metrics import r2_score from sklearn.ensemble import RandomForestRegressor # magic word for producing visualizations in notebook # %matplotlib inline # + # Functions def parse_into_float(price_string): ''' Args: price_string: The price string which will be parsed into a float. The expected format is '$(d)+'. Parses string containing a price into a float variable. Returns: A float variable containing the parsed argument. The first character is trimmed because it is expected to contain the currency symbol. ''' return float(price_string[1:].replace(',', '')) def get_true_false_dummy(true_false_string): ''' Args: true_false_string: A string which is expected to be either 't' or 'f'. Parses string into a boolean value. Returns: A boolean value of the parsed argument. ''' if true_false_string == 't': return 1 elif true_false_string == 'f': return 0 else: return np.nan def parse_lists(list_string): ''' Args: list_string: A string is expected to contain a list. Parses string into a list of strings. The first and last character are expected to be '[' and ']' and will therefore be excluded. The reamining characters will be split by comma into a list of values. Returns: A list of string values parsed from the argument. ''' list_string = list_string[1:-1] list_string = list_string.replace('"', '') list_string = list_string.replace(' ', '') list_string = list_string.replace('\'', '') return list_string.split(',') def parse_lists_into_count(list_string): ''' Args: list_string: A string is expected to contain a list. Parses string into a list of strings. Applies the len function to return the length of the list. Returns: An integer value containing the length of the argument list. ''' return len(parse_lists(list_string)) def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None, n_jobs=None, train_sizes=np.linspace(.1, 1.0, 10)): """ Generate a simple plot of the test and training learning curve. Taken from the example page in scikit-learn documentation. Parameters ---------- estimator : object type that implements the "fit" and "predict" methods An object of that type which is cloned for each validation. title : string Title for the chart. X : array-like, shape (n_samples, n_features) Training vector, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape (n_samples) or (n_samples, n_features), optional Target relative to X for classification or regression; None for unsupervised learning. ylim : tuple, shape (ymin, ymax), optional Defines minimum and maximum yvalues plotted. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross-validation, - integer, to specify the number of folds. - An object to be used as a cross-validation generator. - An iterable yielding train/test splits. For integer/None inputs, if ``y`` is binary or multiclass, :class:`StratifiedKFold` used. If the estimator is not a classifier or if ``y`` is neither binary nor multiclass, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validators that can be used here. n_jobs : int or None, optional (default=None) Number of jobs to run in parallel. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. train_sizes : array-like, shape (n_ticks,), dtype float or int Relative or absolute numbers of training examples that will be used to generate the learning curve. If the dtype is float, it is regarded as a fraction of the maximum size of the training set (that is determined by the selected validation method), i.e. it has to be within (0, 1]. Otherwise it is interpreted as absolute sizes of the training sets. Note that for classification the number of samples usually have to be big enough to contain at least one sample from each class. (default: np.linspace(0.1, 1.0, 5)) """ plt.figure() plt.title(title) if ylim is not None: plt.ylim(*ylim) plt.xlabel("Training examples") plt.ylabel("Score") train_sizes, train_scores, test_scores = learning_curve( estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes) train_scores_mean = np.mean(train_scores, axis=1) train_scores_std = np.std(train_scores, axis=1) test_scores_mean = np.mean(test_scores, axis=1) test_scores_std = np.std(test_scores, axis=1) plt.grid() plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.1, color="r") plt.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std, alpha=0.1, color="g") plt.plot(train_sizes, train_scores_mean, 'o-', color="r", label="Training score") plt.plot(train_sizes, test_scores_mean, 'o-', color="g", label="Cross-validation score") plt.legend(loc="best") return plt # - # Get the raw data from the csv file and take a peak air_bnb_listings_raw = pd.read_csv('listings.csv') air_bnb_listings_raw.head(2) # + # Learn the shape of the dataset print(f'This dataset has {air_bnb_listings_raw.shape[0]} rows and {air_bnb_listings_raw.shape[1]} columns') # Get some basic information about each of the columns air_bnb_listings_raw.describe() # - # ## Neighbourhoods Analysis <a class="anchor" id="second-bullet"></a> # ### Number Of Listings Per Neighbourhood <a class="anchor" id="second-1-bullet"></a> # By analysing the graph below, we can see that the 10 neighbourhoods with the most listings are all inside the center of Lisbon, # which is not surprising since the center is by far the most tourism prone area. nr_listings_per_neighbourhood = air_bnb_listings_raw.groupby(['neighbourhood'])['id'].count() nr_listings_per_neighbourhood.sort_values(ascending=False).head(10) mask0 = nr_listings_per_neighbourhood > 400 nr_listings_per_neighbourhood_sorted = nr_listings_per_neighbourhood.sort_values() plt.barh(nr_listings_per_neighbourhood_sorted[mask0].index, nr_listings_per_neighbourhood_sorted[mask0], color = 'blue') plt.xlabel('Number of listings') plt.ylabel('Neighbourhood') plt.title('Number Of Listings Per Neighbourhood') plt.show() plt.close() # ### Review Score Per Neighbourhood <a class="anchor" id="second-2-bullet"></a> # By analysing the graph below, we can see that there is not much of a difference, between the review scores for each of the neighbourhoods. # Therefore, we can conclude that all neighbourhood are well liked by the guests. review_score_per_neighbourhood = air_bnb_listings_raw.groupby(['neighbourhood'])['review_scores_rating'].mean() review_score_per_neighbourhood.sort_values(ascending=False).head(10) mask0 = review_score_per_neighbourhood > 0 review_score_per_neighbourhood_sorted = review_score_per_neighbourhood.sort_values() plt.barh(review_score_per_neighbourhood_sorted[mask0].index,review_score_per_neighbourhood_sorted[mask0], color = 'blue') plt.xlabel('Review Score') plt.ylabel('Neighbourhood') plt.title('Review Score Per neighbourhood') plt.yticks([]) plt.show() plt.close() # ### Price Per Neighbourhood <a class="anchor" id="second-3-bullet"></a> air_bnb_listings_raw['float_price'] = air_bnb_listings_raw['price'].apply(parse_into_float) # By analysing the graph below, we notice that "Bairro Alto" has by far the most expensive price. The neighbourhoods "Benfica", "São Domingos de Benfica" and "Carnide" also have higher prices than average. This might be explained to the fact that "Estádio da Luz", a football stadium that receives a great number of high profile internacional and nacional football games is in their vicinity. mean_prices_per_neighbourhood = air_bnb_listings_raw.groupby(['neighbourhood'])['float_price'].mean() mean_prices_per_neighbourhood.sort_values(ascending=False).head(10) mean_prices_per_neighbourhood_sorted = mean_prices_per_neighbourhood.sort_values() mask0 = mean_prices_per_neighbourhood_sorted > 90 plt.barh(mean_prices_per_neighbourhood_sorted[mask0].index,mean_prices_per_neighbourhood_sorted[mask0], color = 'blue') plt.xlabel('Average Price') plt.ylabel('Neighbourhood') plt.title('Average Price Per Neighbourhood') plt.show() plt.close() # ### Super Host Analysis <a class="anchor" id="third-bullet"></a> # As we can see below, super hosts have on average a score of 96, while regular hosts only have an average score of 90. Therefore, it is safe to say that guests are satisfied with super hosts overall. # + air_bnb_listings_raw['host_is_superhost_encoded'] = air_bnb_listings_raw['host_is_superhost'].apply(get_true_false_dummy) print(air_bnb_listings_raw['host_is_superhost_encoded'].unique()) # - review_score_per_super_user = air_bnb_listings_raw.groupby(['host_is_superhost_encoded'])['review_scores_rating'].mean() review_score_per_super_user.sort_values(ascending=False).head(10) # ### Review Score Prediction <a class="anchor" id="fourth-bullet"></a> # In the next section we will useforest regression methods in order to try predict the guests' review score based on the other features of the dataset. Afterwhards, feature importance will be analysed based on the results of these regression methods. # ### Nan values analysis <a class="anchor" id="fifth-bullet"></a> # The dataset has a total of 29819 NaN values. We will classify any column with more than 60% NaN values an outlier column. By analysing the bar plot below, we can confirm that 60% appears to be the ideal threshold value. The columns with less than 5% NaN values were excluded from the bar plot. # # The outlier columns discovered in this step will be dropped from the dataset. print(f'The total number of nan values in this dataset is {air_bnb_listings_raw.isnull().sum().sum()}') null_values_per_column = air_bnb_listings_raw.isnull().sum()/air_bnb_listings_raw.shape[0] print(null_values_per_column.sort_values(ascending=False)[null_values_per_column > .6]) # + mask0 = null_values_per_column > 0.05 mask1 = null_values_per_column >= 0.6 mask2 = null_values_per_column < 0.6 plt.bar(null_values_per_column[mask0][mask1].index.values, null_values_per_column[mask0][mask1].values, color = 'red') plt.xticks([]) plt.bar(null_values_per_column[mask0][mask2].index.values, null_values_per_column[mask0][mask2].values, color = 'blue') plt.xlabel('Columns') plt.ylabel('Ratio of NaN') plt.title('NaN ratio per column') plt.show() plt.close() # - # The previously identified outlier columns will be dropped now. # + # Drop the outlier columns outlier_columns = ['xl_picture_url', 'thumbnail_url', 'medium_url', 'host_acceptance_rate', \ 'square_feet', 'monthly_price', 'weekly_price'] air_bnb_listings = air_bnb_listings_raw.drop(outlier_columns, axis=1) # Also drop the original price and is_super_host columns, since now we have the dummies to_drop = ['price', 'host_is_superhost'] air_bnb_listings = air_bnb_listings.drop(to_drop, axis=1) # Next we will drop columns which are not relevant to the analysis, like names, ids, # free text fields and image urls. We will also drop very similar/repeated columns to_drop = ['id', 'listing_url', 'scrape_id', 'name', 'summary', 'space', 'description', 'neighborhood_overview', \ 'notes', 'transit', 'access', 'interaction', 'house_rules', 'picture_url', \ 'host_id', 'host_url', 'host_name', 'host_since', 'host_location', 'host_about', 'host_thumbnail_url', \ 'host_picture_url', 'host_total_listings_count', 'street', 'state', 'zipcode', 'market', 'smart_location', \ 'country_code', 'country', 'latitude', 'longitude', 'calendar_updated', 'first_review', 'last_review', \ 'license', 'jurisdiction_names', 'calendar_last_scraped', 'last_scraped', 'host_neighbourhood', \ 'neighbourhood_cleansed', 'neighbourhood_group_cleansed', 'city', 'calculated_host_listings_count'] air_bnb_listings = air_bnb_listings.drop(to_drop, axis=1) # - # Print the shape of the dataset after dropping the columns air_bnb_listings.shape # + # Drop all rows with na values in the price columns air_bnb_listings = air_bnb_listings.dropna(subset=['float_price']) print(f'shape after dropping rows with nan in price: {air_bnb_listings.shape}') # Drop all rows with number of na values above threshold thresh_ratio = 0.2 missing_values_in_rows = air_bnb_listings.isnull().sum(axis=1) indexes_above_threshold = missing_values_in_rows[missing_values_in_rows>=thresh_ratio*air_bnb_listings.shape[1]] air_bnb_listings_no_na = air_bnb_listings.drop(air_bnb_listings.index[indexes_above_threshold.index.values]) # air_bnb_listings_no_na = air_bnb_listings.dropna(axis = 'rows', thresh = 0.95*air_bnb_listings.shape[0]) print(f'shape after dropping rows with nan above threshold: {air_bnb_listings_no_na.shape}') # + # Impute the missing values that still exist with the mean or the mode cols1 = air_bnb_listings_no_na.select_dtypes([np.number]).columns cols2 = air_bnb_listings_no_na.select_dtypes(exclude = [np.number]).columns if len(cols1) > 0: air_bnb_listings_no_na[cols1] = air_bnb_listings_no_na[cols1].fillna(air_bnb_listings_no_na[cols1].mean()) if len(cols2) > 0: air_bnb_listings_no_na[cols2] = air_bnb_listings_no_na[cols2].fillna(air_bnb_listings_no_na[cols2].mode().iloc[0]) # - # Check if there's any nan values left air_bnb_listings_no_na.isnull().sum().sum() # Drop these columns because all the rows have the same value to_drop = ['is_business_travel_ready', 'experiences_offered', 'has_availability'] air_bnb_listings_no_na = air_bnb_listings_no_na.drop(to_drop, axis=1) # ### Encoding <a class="anchor" id="sixth-bullet"></a> # The remaining mixed type columns will be reengineered and the categorical columns will be encoded. # Figure out which are the categorical columns and the mixed type columns so that they can be handled appropriately cat_air_bnb_listings_no_na = air_bnb_listings_no_na.select_dtypes(include=['object']) print(cat_air_bnb_listings_no_na.columns) # + # Encode true/false columns columns_to_encode = ['host_has_profile_pic', 'host_identity_verified', 'is_location_exact', 'instant_bookable', \ 'require_guest_profile_picture', 'require_guest_phone_verification', 'requires_license'] for column_to_encode in columns_to_encode: air_bnb_listings_no_na[column_to_encode] = air_bnb_listings_no_na[column_to_encode].apply(get_true_false_dummy) # + # Encode price columns columns_to_encode = ['security_deposit', 'cleaning_fee', 'extra_people' ] for column_to_encode in columns_to_encode: air_bnb_listings_no_na[column_to_encode] = air_bnb_listings_no_na[column_to_encode].apply(parse_into_float) # - # The Amenities column has incredibly dirty data because the same amenity can have many different names. Therefore, instead of trying to create dummy columns for each amenity, we will count how many amenities each listing has and encode it into a new column. # Generate new column for the number of amenities per listing. Drop the old amenities column air_bnb_listings_no_na['amenities_count'] = air_bnb_listings_no_na['amenities'].apply(parse_lists_into_count) air_bnb_listings_no_na = air_bnb_listings_no_na.drop(['amenities'], axis = 1) # + # Generate a new column for each type of host verification using the MultiLabelBinarizer air_bnb_listings_no_na['host_verifications_enc'] = air_bnb_listings_no_na['host_verifications'].apply(parse_lists) mlb = MultiLabelBinarizer() print('Shape of the dataframe before encoding host_verifications:') print(air_bnb_listings_no_na.shape) air_bnb_listings_no_na = air_bnb_listings_no_na.join(pd.DataFrame(mlb.fit_transform(air_bnb_listings_no_na.pop('host_verifications_enc')), columns=mlb.classes_, index=air_bnb_listings_no_na.index).add_prefix('host_verification_')) air_bnb_listings_no_na = air_bnb_listings_no_na.drop(['host_verifications'], axis=1) print('Shape of the dataframe after encoding host_verifications:') print(air_bnb_listings_no_na.shape) # - # Let's check what columns we have on the dataset now: air_bnb_listings_no_na.columns # Are there any categorical columns left to handle? cat_air_bnb_listings_no_na = air_bnb_listings_no_na.select_dtypes(include=['object']) print(cat_air_bnb_listings_no_na.columns) # Inspect the shape of the dataset before encoding air_bnb_listings_no_na.shape # Encode the remaining categorical columns air_bnb_listings_no_na = pd.get_dummies(columns = cat_air_bnb_listings_no_na.columns.values, data = air_bnb_listings_no_na) # Inspect the shape of the dataset after encoding air_bnb_listings_no_na.shape # Are there any categorical columns left to handle? cat_air_bnb_listings_no_na = air_bnb_listings_no_na.select_dtypes(include=['object']) print(cat_air_bnb_listings_no_na.columns) # ### Scaling <a class="anchor" id="seventh-bullet"></a> # Since there are no more categorical columns to handle, we will apply standard scaling to the data next. # Apply Scaling scaler = StandardScaler(with_mean=True, with_std=True) air_bnb_listings_no_na[air_bnb_listings_no_na.columns] = scaler.fit_transform(air_bnb_listings_no_na[air_bnb_listings_no_na.columns]) # Check if the data looks good air_bnb_listings_no_na.head(2) y_true = air_bnb_listings_no_na['review_scores_rating'] features = air_bnb_listings_no_na.drop(['review_scores_rating', 'review_scores_value', \ 'review_scores_accuracy', 'review_scores_cleanliness', \ 'review_scores_communication', 'review_scores_checkin', \ 'review_scores_location'], axis = 1) # ### Random Forest Grid Search <a class="anchor" id="eighth-bullet"></a> # Split the 'features' and 'income' data into training and testing sets X_train, X_test, y_train, y_test = train_test_split(features, y_true, test_size = 0.1, random_state = 5) # Show the results of the split print("Training set has {} samples.".format(X_train.shape[0])) print("Testing set has {} samples.".format(X_test.shape[0])) # + clf = RandomForestRegressor(random_state=5) parameters = { 'criterion' : ['mse', 'mae'], \ 'max_depth': [5, 10, 25], \ 'n_estimators': [75, 100, 150, 250]} scorer = make_scorer(r2_score) grid_obj = GridSearchCV(clf, param_grid = parameters, scoring=scorer, n_jobs=3) grid_fit = grid_obj.fit(X_train, y_train) best_clf = grid_fit.best_estimator_ clf = clf.fit(X_train, y_train) y_pred = clf.predict(X_test) best_predictions = best_clf.predict(X_test) print(f'(Unoptimized) r_2 score is: {r2_score(y_test, y_pred)}') print(f'(Optimized) r_2 score is: {r2_score(y_test, best_predictions)}') # - # ### Cross Validation Learning Curve <a class="anchor" id="nineth-bullet"></a> # + title = "Learning Curves" # Cross validation with 100 iterations to get smoother mean test and train # score curves, each time with 20% data randomly selected as a validation set. cv = ShuffleSplit(n_splits=100, test_size=0.2, random_state=0) estimator = best_clf plot_learning_curve(estimator, title, features, y_true, ylim=(0.0, 1.01), cv=cv, n_jobs=3) # - # ### Feature Importance Graph <a class="anchor" id="thenth-bullet"></a> # + features_for_graph = features.columns[:features.shape[1]] importances = grid_fit.best_estimator_.feature_importances_ indices = np.argsort(importances) plt.title('Feature Importances') plt.barh(range(len(indices[-15:])), importances[indices[-15:]], color='b', align='center') plt.yticks(range(len(indices[-15:])), features_for_graph[indices[-15:]]) plt.xlabel('Relative Importance'); # - # ## Conclusion <a class="anchor" id="conclusion"></a> # ### Response to Question 1: # # Given the results of the analysis, the top 3 neightbourhoods per number of listings, review score and price were: # # **Number of Listings** # 1. Alfama (1376) # 2. Baixa (941) # 3. <NAME> (815) # # **Review Score** # 1. Charneca (98.25) # 2. <NAME> (94.17) # 3. <NAME> (94.14) # # **Price in American Dollars** # 1. Bairro Alto (300.00) # 2. Benfica (210.59) # 3. <NAME> (197.35) # # ### Response to Question 2: # # The response is yes! Super hosts do have more positive reviews on average than regular hosts. # # **Average Review Score** # 1. Super Hosts - 96.862366 # 2. Regular Hosts - 90.778280 # # ### Response to Question 3: # # By looking at the feature importance graph above, is it safe to say that super hosts do have the highest review scores. Price, the number of amenities and availability seem to the following most important factors. It is interesting to notice that the cleaning fee and whether extra people are allowed or not seems to have an impact as well.
Airbnb-Lisbon-Data-Analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "ca48dee6-4eec-4600-9d0f-b37babbff76b", "showTitle": false, "title": ""} # ![JohnSnowLabs](https://nlp.johnsnowlabs.com/assets/images/logo.png) # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "90dfbf3d-c220-4962-a4c5-1129f24ec086", "showTitle": false, "title": ""} # # 20.SentenceDetectorDL for Healthcare v2.7.0 # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "6894bf5e-d81b-4c16-bd41-e4866701c893", "showTitle": false, "title": ""} # `SentenceDetectorDL` (SDDL) is based on a general-purpose neural network model for sentence boundary detection. The task of sentence boundary detection is to identify sentences within a text. Many natural language processing tasks take a sentence as an input unit, such as part-of-speech tagging, dependency parsing, named entity recognition or machine translation. # # In this model, we treated the sentence boundary detection task as a classification problem using a DL CNN architecture. We also modified the original implemenation a little bit to cover broken sentences and some impossible end of line chars. # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "97295fc9-9b98-4494-89dc-c9bf4ae043fb", "showTitle": false, "title": ""} import os import json import sys import string import numpy as np import pandas as pd import sparknlp import sparknlp_jsl from sparknlp.base import * from sparknlp.util import * from sparknlp.annotator import * from sparknlp_jsl.annotator import * from sparknlp.pretrained import ResourceDownloader from pyspark.sql import functions as F from pyspark.ml import Pipeline, PipelineModel pd.set_option('display.max_columns', 100) pd.set_option('display.width', 1000) pd.set_option('max_colwidth', 800) pd.set_option('display.expand_frame_repr', False) print('sparknlp_jsl.version : ',sparknlp_jsl.version()) spark # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "4e3b6ac8-7f41-4b31-a94b-c712cfafcf0f", "showTitle": false, "title": ""} documenter = DocumentAssembler()\ .setInputCol("text")\ .setOutputCol("document") sentencerDL_hc = SentenceDetectorDLModel.pretrained("sentence_detector_dl_healthcare","en","clinical/models") \ .setInputCols(["document"]) \ .setOutputCol("sentences") sd_pipeline_hc = PipelineModel(stages=[documenter, sentencerDL_hc]) sd_model_hc = LightPipeline(sd_pipeline_hc) # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "3793b90d-4ac9-460d-86ef-532490aaae2c", "showTitle": false, "title": ""} # ## SetenceDetectorDL_HC Performance on different Clinical Texts # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "c5cf907e-45d7-4273-95b3-8f61c498f272", "showTitle": false, "title": ""} text_1 = '''He was given boluses of MS04 with some effect, he has since been placed on a PCA - he take 80mg of oxycontin at home, his PCA dose is ~ 2 the morphine dose of the oxycontin, he has also received ativan for anxiety.Repleted with 20 meq kcl po, 30 mmol K-phos iv and 2 gms mag so4 iv. LASIX CHANGED TO 40 PO BID WHICH IS SAME AS HE TAKES AT HOME - RECEIVED 40 PO IN AM - 700CC U/O TOTAL FOR FLUID NEGATIVE ~ 600 THUS FAR TODAY, ~ 600 NEG LOS. pt initially hypertensive 160s-180s gave prn doses if IV hydralazine without effect, labetalol increased from 100 mg to 200 mg PO TID which kept SBP > 160 for rest of night.Transferred to the EW found to be hypertensive BP 253/167 HR 132 ST treated with IV NTG gtt 20-120mcg/ and captopril 12.5mg sl x3. During the day pt's resp status has been very tenuous, responded to lasix in the am but then became hypotensive around 1800 tx with 500cc NS bolus and a unit of RBC did improve her BP to 90-100/60 but also became increasingly more tachypneic, RR 30-40's crackles went from bases bilaterally to [**2-14**] way up bilaterally.Lasix given 10mg x 2 during the evening without much change in her respiratory status.10units iv insulin given, 1 amps of 50%dextrose, 1amp of sodium bicard, 2gm calc gluconate.LOPRESSOR 7MG IV Q6H, ENALAPRIL 0.625MG IV Q6H TOLERATED. ID: Continues to receive flagyl, linazolid, pipercillin, ambisome, gent, and acyclovir, the acyclovir was changed from PO to IV-to be given after dialysis.Meds- Lipitor, procardia, synthroid, lisinopril, pepcid, actonel, calcium, MVI Social- Denies tobacco and drugs.''' # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "dbf9bf09-2026-4b06-830d-797583643653", "showTitle": false, "title": ""} text_2 = '''ST 109-120 ST. Pt had two 10 beat runs and one 9 beat run of SVT s/p PICC line placement. Stable BP, VT nonsustained. Pt denies CP/SOB. EKG and echo obtained. Cyclying enzymes first CK 69. Cardiology consulted, awaiting echo report. Pt to be started on beta blocker for treatment of NSVT. ? secondary to severe illness.K+ 3.4 IV. IVF with 20meq KCL at 200cc/hr. S/p NSVT pt rec'd 40meq po and 40 meq IV KCL. K+ 3.9 repeat K+ at 8pm. Mg and Ca repleted. Please follow electrolyte SS. ''' # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "fabee76f-322b-453d-a092-221663a328b9", "showTitle": false, "title": ""} text_3 = '''PT. IS A 56 Y/O FEMALE S/P CRANIOTOMY ON 7/16 FOR REMOVAL OF BENIGN CYSTIC LESION. SURGERY PERFORMED AT BIDMC.STARTED ON DILANTIN POST-OP FOR SEIZURE PROPHYLAXIS. 2 DAYS PRIOR TO ADMISSION PT DEVELOPED BILAT. EYE DISCHARGE-- SEEN BY EYE MD AND TREATED WITH SULFATE OPTHALMIC DROPS.ALSO DEVELOPED ORAL SORES AND RASH ON CHEST AND RAPIDLY SPREAD TO TRUNK, ARMS, THIGHS, BUTTOCKS, AND FACE WITHIN 24 HRS.UNABLE TO EAT DUE TO MOUTH PAIN. + FEVER, + DIARRHEA, WEAKNESS. PRESENTED TO EW ON 8/4 WITH TEMP 104.3 SBP 90'S.GIVEN NS FLUID BOLUS, TYLENOL FOR TEMP. SHE PUSTULAR RED RASH ON FACE, RED RASH NOTED ON TRUNK, UPPER EXTREMITIES AND THIGHS. ALSO BOTH EYES DRAINING GREENISH-YELLOW DRAINAGE. ADMITTED TO CCU ( MICU BORDER) FOR CLOSE OBSERVATION. ''' # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "93aa9af6-538a-43f1-81de-e6f56aaea51f", "showTitle": false, "title": ""} text_4 = ''' Tylenol 650mg po q6h CVS: Aspirin 121.5mg po daily for graft patency, npn 7p-7a: ccu nsg progress note: s/o: does understand and speak some eng, family visiting this eve and states that pt is oriented and appropriate for them resp--ls w/crackles approx 1/2 up, rr 20's, appeared sl sob, on 4l sat when sitting straight up 95-99%, when lying flat or turning s-s sat does drop to 88-93%, pt does not c/o feeling sob, sat does come back up when sitting up, did rec 40mg iv lasix cardiac hr 90's sr w/occ pvc's, bp 95-106/50's, did not c/o any cp during the noc, conts on hep at 600u/hr, ptt during noc 79, am labs pnd, remains off pressors, at this time no further plans to swan pt or for her to go to cath lab, gi--abd soft, non tender to palpation, (+)bs, passing sm amt of brown soft stool, tol po's w/out diff renal--u/o cont'd low during the eve, team decided to give lasix 40mg in setting of crackles, decreased u/o and sob, did diuresis well to lasix, pt approx 700cc neg today access--pt has 3 peripheral iv's in place, all working, unable to draw bloods from pt d/t poor veins, pt is going to need access to draw bloods, central line or picc line social--son & dtr in visiting w/their famlies tonight, pt awake and conversing w/them a/p: cont to monitor/asses cvs follow resp status, additional lasix f/u w/team re: plan of care for her will need iv access for blood draws keep family & pt updated w/plan, Neuro:On propofol gtt dose increased from 20 to 40mcg/kg/min,moves all extrimities to pain,awaken to stimuly easily,purposeful movements.PERL,had an episode of seizure at 1815 <1min when neuro team in for exam ,responded to 2mg ativan on keprra Iv BID.2 grams mag sulfate given, IVF bolus 250 cc started, approx 50 cc in then dc'd d/t PAD's,2.5 mg IV lopressor given x 2 without effect. CCU NURSING 4P-7P S DENIES CP/SOB O. SEE CAREVUE FLOWSHEET FOR COMPLETE VS 1600 O2 SAT 91% ON 5L N/C, 1 U PRBC'S INFUSING, LUNGS CRACKLES BILATERALLY, LASIX 40MG IV ORDERED, 1200 CC U/O W/IMPROVED O2 SATS ON 4L N/C, IABP AT 1:1 BP 81-107/90-117/48-57, HR 70'S SR, GROIN SITES D+I, HEPARIN REMAINS AT 950 U/HR INTEGRELIN AT 2 MCGS/KG A: IMPROVED U/O AFTER LASIX, AWAITING CARDIAC SURGERY P: CONT SUPPORTIVE CARE, REPEAT HCT POST- TRANSFUSION, CHECK LYTES POST-DIURESIS AND REPLACE AS NEEDED, AWAITING CABG-DATE.Given 50mg IV benadryl and 2mg morphine as well as one aspirin PO and inch of NT paste. When pt remained tachycardic w/ frequent ectopy s/p KCL and tylenol for temp - Orders given by Dr. [**Last Name (STitle) 2025**] to increase diltiazem to 120mg PO QID and NS 250ml given X1 w/ moderate effect.Per team, the pts IV sedation was weaned over the course of the day and now infusing @ 110mcg/hr IV Fentanyl & 9mg/hr IV Verced c pt able to open eyes to verbal stimuli and nod head appropriately to simple commands (are you in pain?). A/P: 73 y/o male remains intubated, IVF boluses x2 for CVP <12, pt continues in NSR on PO amio 400 TID, dopamine gtt weaned down for MAPs>65 but the team felt he is overall receiving about the same amt fentanyl now as he has been in past few days, as the fentanyl patch 100 mcg was added a 48 hrs ago to replace the decrease in the IV fentanyl gtt today (fent patch takes at least 24 hrs to kick in).Started valium 10mg po q6hrs at 1300 with prn IV valium as needed.''' # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "7e0a1ecf-2758-4764-bd3f-2882dec0771f", "showTitle": false, "title": ""} # !pip install Jinja2 # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "782248b2-2bf0-4a50-9506-8fc59af53e7f", "showTitle": false, "title": ""} # Select one of the medical texts given above for the prediction def get_sentences_sddl(text): data = [] for anno in sd_model_hc.fullAnnotate(text)[0]["sentences"]: data.append((anno.metadata["sentence"], anno.begin, anno.end, anno.result)) sentences_df = pd.DataFrame(data, columns=["sentence","begin", "end", "result" ]) return sentences_df.style.set_properties(**{'text-align': 'left', 'border-color':'Black','border-width':'thin','border-style':'solid'}) get_sentences_sddl(text_4) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "faf1c618-7deb-4737-ac9c-b56c692c5122", "showTitle": false, "title": ""} get_sentences_sddl(text_1) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "2ff8c8d8-531c-4020-923f-0cca6a3c0f65", "showTitle": false, "title": ""} get_sentences_sddl(text_2) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "1bbe2cac-223d-42ff-b6d2-9009a128906c", "showTitle": false, "title": ""} get_sentences_sddl(text_3) # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "359db277-1b09-440b-928a-ad66fad28d35", "showTitle": false, "title": ""} # ## Compare with Spacy Sentence Splitter # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "cb1f5560-fb25-4a73-a813-e9ceca59e453", "showTitle": false, "title": ""} # !pip install spacy # !python3 -m spacy download en_core_web_sm # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "aca691a6-61e3-4dcf-b5f2-bf88c0b1541c", "showTitle": false, "title": ""} import sys sys.path.append('/databricks/driver/') sys.path.append('/databricks/driver/spacy.py') sys.path.append('/databricks/driver/en_core_web_sm.py') import spacy import en_core_web_sm nlp = en_core_web_sm.load() # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "55a0488f-34a8-4fed-9088-b5bb3aa98a1c", "showTitle": false, "title": ""} def get_sentences_spacy(text): data = [] for i,sent in enumerate(nlp(text).sents): data.append((i, sent)) sentences_df = pd.DataFrame(data, columns=["sentence","result" ]) return sentences_df.style.set_properties(**{'text-align': 'left', 'border-color':'Black','border-width':'thin','border-style':'solid'}) get_sentences_spacy(text_4) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "d535b31a-18cd-43c1-bcaa-9d55bf8871b4", "showTitle": false, "title": ""} get_sentences_spacy(text_1) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "cbe79718-bb10-426f-bb3d-66f9cd0f7c79", "showTitle": false, "title": ""} get_sentences_spacy(text_2) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "f12f6208-ab4a-46c5-9e91-ded43903327b", "showTitle": false, "title": ""} get_sentences_spacy(text_3) # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "422ed5a6-5c3f-4895-8690-4ee6343ed985", "showTitle": false, "title": ""} # ## Test and Compare with Spacy on Broken Sentences # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "c63196f2-8abb-46aa-a23b-caf023226c94", "showTitle": false, "title": ""} random_broken_text_1 = '''He was given boluses of MS04 with some effect, he has since been placed on a PCA - he take 80mg of oxycontin at home, his PCA dose is ~ 2 the morphine dose of the oxycontin, he has also received ativan for anxiety.Repleted with 20 meq kcl po, 30 m mol K-phos iv and 2 gms mag so4 iv. LASIX CHANGED TO 40 PO BID WHICH IS SAME AS HE TAKES AT HOME - RECEIVED 40 PO IN AM - 700CC U/O TOTAL FOR FLUID NEGATIVE ~ 600 THUS FAR TODAY, ~ 600 NEG LOS pt initially hypertensive 160s-180s gave prn doses if IV hydralazine without effect, labetalol increased from 100 mg to 200 mg PO TID which kept SBP > 160 for rest of night.Transferred to the EW found to be hypertensive BP 253/167 HR 132 ST treated with IV NTG gtt 20-120mcg/ and captopril 12.5mg sl x3. During the day pt's resp status has been very tenuous, responded to lasix in the am but then became hypotensive around 1800 tx with 500cc NS bolus and a unit of RBC did improve her BP to 90-100/60 but also became increasingly more tachypneic, RR 30-40's crackles went from bases bilaterally to [**2-14**] way up bilaterally.Lasix given 10 mg x 2 during the evening without much change in her respiratory status.10units iv insulin given, 1 amps of 50%dextrose, 1amp of sodium bicard, 2gm calc gluconate.LOPRESSOR 7MG IV Q6H, ENALAPRIL 0.625MG IV Q6H TOLERATED. ID: Continues to receive flagyl, linazolid, pipercillin, ambisome, gent, and acyclovir, the acyclovir was changed from PO to IV-to be given after dialysis.Meds- Lipitor, procardia, synthroid, lisinopril, pepcid, actonel, calcium, MVI Social- Denies tobacco and drugs.''' # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "5ef7ffac-5380-4a6d-88ec-dca8ffbebc35", "showTitle": false, "title": ""} random_broken_text_2 = '''A 28-year- old female with a history of gestational diabetes mellitus diagnosed eight years prior to presentation and subsequent type two diabetes mellitus ( T2DM ), one prior episode of HTG-induced pancreatitis three years prior to presentation, associated with an acute hepatitis , and obesity with a body mass index ( BMI ) of 33.5 kg/m2 , presented with a one- week history of polyuria , polydipsia , poor appetite , and vomiting.Two weeks prior to presentation , she was treated with a five-day course of amoxicillin for a respiratory tract infection.She was on metformin , glipizide , and dapagliflozin for T2DM and atorvastatin and gemfibrozil for HTG . She had been on dapagliflozin for six months at the time of presentation. Physical examination on presentation was significant for dry oral mucosa ; significantly , her abdominal examination was benign with no tenderness , guarding , or rigidity . Pertinent laboratory findings on admission were : serum glucose 111 mg /dl , bicarbonate 18 mmol/l , anion gap 20 , creatinine 0.4 mg/dL , triglycerides 508 mg /dL , total cholesterol 122 mg/dL , glycated hemoglobin ( HbA1c ) 10% , and venous pH 7.27 .Serum lipase was normal at 43U/L .Serum acetone levels could not be assessed as blood samples kept hemolyzing due to significant lipemia.The patient was initially admitted for starvation ketosis , as she reported poor oral intake for three days prior to admission.However , serum chemistry obtained six hours after presentation revealed her glucose was 186 mg/dL , the anion gap was still elevated at 21 , serum bicarbonate was 16 mmol/L , triglyceride level peaked at 2050 mg/dL , and lipase was 52 U/L.The β-hydroxybutyrate level was obtained and found to be elevated at 5. 29 mmol/L - the original sample was centrifuged and the chylomicron layer removed prior to analysis due to interference from turbidity caused by lipemia again.The patient was treated with an insulin drip for euDKA and HTG with a reduction in the anion gap to 13 and triglycerides to 1400 mg/ dL , within 24 hours.Her euDKA was thought to be precipitated by her respiratory tract infection in the setting of SGLT2 inhibitor use . The patient was seen by the endocrinology service and she was discharged on 40 units of insulin glargine at night , 12 units of insulin lispro with meals , and metformin 1000 mg two times a day.It was determined that all SGLT2 inhibitors should be discontinued indefinitely .''' # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "2b1d0950-474e-4984-8bee-07d78616d2a4", "showTitle": false, "title": ""} random_broken_text_3 = ''' Tylenol 650mg po q6h CVS: Aspirin 121.5mg po daily for graft patency.npn 7p-7a: ccu nsg progress note: s/o: does understand and speak some eng, family visiting this eve and states that pt is oriented and appropriate for them resp--ls w/crackles approx 1/2 up, rr 20's, appeared sl sob, on 4l sat when sitting straight up 95-99%, when lying flat or turning s-s sat does drop to 88-93%, pt does not c/o feeling sob, sat does come back up when sitting up, did rec 40 mg iv lasix cardiac--hr 90's sr w/occ pvc's, bp 95- 106/50's, did not c/o any cp during the noc, conts on hep at 600u/ hr, ptt during noc 79, am labs pnd, remains off pressors, at this time no further plans to swan pt or for her to go to cath lab, gi--abd soft, non tender to palpation, (+)bs, passing sm amt of brown soft stool, tol po's w/out diff renal--u/o cont'd low during the eve, team decided to give lasix 40mg in setting of crackles, decreased u/o and sob, did diuresis well to lasix, pt approx 700cc neg today access--pt has 3 peripheral iv's in place, all working, unable to draw bloods from pt d/t poor veins, pt is going to need access to draw bloods, ?central line or picc line social--son & dtr in visiting w/their famlies tonight, pt awake and conversing w/them a/p: cont to monitor/asses cvs follow resp status, additional lasix f/u w/team re: plan of care for her will need iv access for blood draws keep family & pt updated w/plan, Neuro:On propofol gtt dose increased from 20 to 40mcg/kg/min,moves all extrimities to pain,awaken to stimuly easily,purposeful movements.PERL,had an episode of seizure at 1815 <1min when neuro team in for exam ,responded to 2mg ativan on keprra Iv BID.2 grams mag sulfate given, IVF bolus 250 cc started, approx 50 cc in then dc'd d/t PAD's, 2.5 mg IV lopressor given x 2 without effect. CCU NURSING 4P-7P S DENIES CP/SOB O. SEE CAREVUE FLOWSHEET FOR COMPLETE VS 1600 O2 SAT 91% ON 5L N/C, 1 U PRBC'S INFUSING, LUNGS CRACKLES BILATERALLY, LASIX 40MG IV ORDERED, 1200 CC U/O W/IMPROVED O2 SATS ON 4L N/C, IABP AT 1:1 BP 81-107/90-117/48-57, HR 70'S SR, GROIN SITES D+I, HEPARIN REMAINS AT 950 U/HR INTEGRELIN AT 2 MCGS/KG A: IMPROVED U/O AFTER LASIX, AWAITING CARDIAC SURGERY P: CONT SUPPORTIVE CARE, REPEAT HCT POST-TRANSFUSION, CHECK LYTES POST-DIURESIS AND REPLACE AS NEEDED, AWAITING CABG -DATE.Given 50mg IV benadryl and 2mg morphine as well as one aspirin PO and inch of NT paste. When pt remained tachycardic w/ frequent ectopy s/p KCL and tylenol for temp - Orders given by Dr. [**Last Name (STitle) 2025**] to increase diltiazem to 120mg PO QID and NS 250ml given X1 w/ moderate effect.Per team, the pts IV sedation was weaned over the course of the day and now infusing @ 110mcg/hr IV Fentanyl & 9mg/hr IV Verced c pt able to open eyes to verbal stimuli and nod head appropriately to simple commands (are you in pain?) . A/P: 73 y/o male remains intubated, IVF boluses x2 for CVP <12, pt continues in NSR on PO amio 400 TID, dopamine gtt weaned down for MAPs>65 but the team felt he is overall receiving about the same amt fentanyl now as he has been in past few days, as the fentanyl patch 100 mcg was added a 48 hrs ago to replace the decrease in the IV fentanyl gtt today (fent patch takes at least 24 hrs to kick in).Started valium 10mg po q6hrs at 1300 with prn IV valium as needed.''' # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "d6ed75b7-fa8c-4f38-8d96-31842325a5a2", "showTitle": false, "title": ""} # ### Spark NLP SentenceDetectorDL (Broken Text-Clinical Notes) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "dae7d7b6-9575-4564-84e3-bec6ac7239f8", "showTitle": false, "title": ""} get_sentences_sddl(random_broken_text_1) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "70a4dc8b-40a1-43cb-9412-e05a114f0a90", "showTitle": false, "title": ""} get_sentences_sddl(random_broken_text_2) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "bd9047e4-0552-4119-b806-c906a58ec91e", "showTitle": false, "title": ""} get_sentences_sddl(random_broken_text_3) # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "382a2166-c1f6-4000-8252-b51133127e3b", "showTitle": false, "title": ""} # ### Spacy Sentence Detection (Broken Sentences-Clinical Notes) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "96893309-deac-44d0-8f4f-cbb445ed1bfc", "showTitle": false, "title": ""} get_sentences_spacy(random_broken_text_1) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "7998da7f-f568-4241-b61b-425166607956", "showTitle": false, "title": ""} get_sentences_spacy(random_broken_text_2) # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "fcc81b57-d6a5-479e-b70b-36187c85031a", "showTitle": false, "title": ""} get_sentences_spacy(random_broken_text_3) # + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "32dab596-d8e4-42a2-b1d2-3f8540491eb8", "showTitle": false, "title": ""} # End of Notebook # 20
tutorials/Certification_Trainings/Healthcare/databricks_notebooks/2.7/20.SentenceDetectorDL_HC_v2.7.0.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import pandas as pd import bokeh import bokeh.plotting from bokeh.plotting import ColumnDataSource from bokeh.models import LabelSet from bokeh.models import FuncTickFormatter import bokeh.io bokeh.io.output_notebook() # - df = pd.read_csv('191025_Deuterium_Transfer_Peak_Areas.csv', comment='#') df.tail() #function to normalize the peak are of each unique peak using the area of the C18 peak for each sample def normalize_area(data): #check to make sure that the dataframe hasn't already been normalized if not {'Normalized Area', 'Hydrocarbon amount'}.issubset(data.columns): #Create array of c18 values repeated in groups of three, matching the number of unique peaks for each sample c18_Areas = np.repeat(data['Area'].loc[data['Peak ID'] == 'C18'],3).reset_index().drop("index", axis=1) #Divide the area of each peak in the dataframe by the corresponding c18 peak area data['Normalized Area'] = np.divide(data['Area'],c18_Areas['Area']) #Calculate the hydrocarbon amount, in ng, by multiplying the normalized peak area by 25. #25 ng of c18 was injected in each sample (1 microliter of a 25 ng/microliter solution of c18 in hexane) data['Hydrocarbon amount'] = data['Normalized Area']*25 return data df = normalize_area(df) #function to create an identifier for each sample in the order (Liometopum or not : Beetle: Type) def add_identifier(data): #check if identifier column already exists if not {'Identifier'}.issubset(data.columns): #create identifier column, populating it with the beetle species used in each well data = data.merge(data[['WellNumber','Species']].loc[data['Species'] != 'L'].rename(columns={"Species": "Identifier"}).drop_duplicates()) #modify the identifier column, adding a leading 'L' for Liometopum runs and adding a suffix from the 'Type' column data['Identifier'] = ((data['Species']=='L')*pd.Series('L',index=data.index)) + data['Identifier'] + data['Type'] return data df = add_identifier(df) def box_and_whisker(data,plt,x_vals,y_vals): groups = data.groupby(x_vals) q1 = groups.quantile(q=0.25) q2 = groups.quantile(q=0.5) q3 = groups.quantile(q=0.75) iqr = q3 - q1 upper = q3 + 1.5*iqr lower = q1 - 1.5*iqr # find the outliers for each category def outliers(group): cat = group.name return group[(group[y_vals] > upper.loc[cat][y_vals]) | (group[y_vals] < lower.loc[cat][y_vals])][y_vals] out = groups.apply(outliers).dropna() # prepare outlier data for plotting, we need coordinates for every outlier. if not out.empty: outx = [] outy = [] for keys in out.index: outx.append(keys[0]) outy.append(out.loc[keys[0]].loc[keys[1]]) # if no outliers, shrink lengths of stems to be no longer than the minimums or maximums qmin = groups.quantile(q=0.00) qmax = groups.quantile(q=1.00) upper[y_vals] = [min([x,y]) for (x,y) in zip(list(qmax.loc[:,y_vals]),upper[y_vals])] lower[y_vals] = [max([x,y]) for (x,y) in zip(list(qmin.loc[:,y_vals]),lower[y_vals])] # stems plt.segment(q3.reset_index()[x_vals], upper[y_vals], q3.reset_index()[x_vals], q3[y_vals], line_color="black") plt.segment(q3.reset_index()[x_vals], lower[y_vals], q3.reset_index()[x_vals], q1[y_vals], line_color="black") #boxes plt.vbar(q3.reset_index()[x_vals], 0.7, q2[y_vals], q3[y_vals], fill_color=None, line_color="black") plt.vbar(q3.reset_index()[x_vals], 0.7, q1[y_vals], q2[y_vals], fill_color=None, line_color="black") # whiskers (almost-0 height rects simpler than segments) plt.vbar(q3.reset_index()[x_vals], top=lower[y_vals],bottom=lower[y_vals], width=0.2, line_color="black") plt.vbar(q3.reset_index()[x_vals], top=upper[y_vals],bottom=upper[y_vals], width=0.2, line_color="black") return plt # + plotting_df=df.loc[df['Peak ID']=='D50C24'] #Identifiers = ['LSU','SU','LSL','SL','LPU','PU','LPL','PL','LDU','DU','LDL','DL',] Identifiers = ['LSU','SU','LSL','SL','LPU','PU','LPL','PL'] Labels={'LSU': '','SU': 'Sceptobius Control','LSL': '','SL': 'Sceptobius Treated', 'LPU': '','PU': 'Platyusa Control','LPL': '','PL': 'Platyusa Treated', 'LDU': '','DU': 'Dalotia Control','LDL': '','DL': 'Dalotia Treated'} np.random.seed(666) p = bokeh.plotting.figure(plot_width=800, plot_height=600, title='C24', x_range=Identifiers, y_axis_label='ng D50C24', y_axis_type='log') colors=['#494949','#5F56FF','#494949','#5F56FF','#494949','#C42F2F','#494949','#C42F2F','#494949','#832161','#494949','#832161'] p = box_and_whisker(plotting_df,p,'Identifier','Hydrocarbon amount') for _,i in enumerate(np.unique(plotting_df['WellNumber'])): data = {'identifier': plotting_df['Identifier'].loc[(plotting_df['WellNumber']==i)].values, 'hydrocarbon amount': plotting_df['Hydrocarbon amount'].loc[(plotting_df['WellNumber']==i)].values} source = ColumnDataSource(data=data) outlier=np.unique(plotting_df['Outlier'].loc[(plotting_df['WellNumber']==i)]) offsetVal=(np.random.rand(1)[0]-0.5)*0.5 p.line(bokeh.transform.dodge('identifier', offsetVal, range=p.x_range), 'hydrocarbon amount', source=source, color='black', alpha=0.3) p.circle(bokeh.transform.dodge('identifier', offsetVal, range=p.x_range), 'hydrocarbon amount', source=source, color=bokeh.transform.factor_cmap('identifier',colors,Identifiers), alpha=0.6, size=7) if outlier=='AI': p.circle(bokeh.transform.dodge('identifier', offsetVal, range=p.x_range), 'hydrocarbon amount', source=source, color='lime', alpha=1, size=7, legend='Ant Injured') if outlier=='A': p.circle(bokeh.transform.dodge('identifier', offsetVal, range=p.x_range), 'hydrocarbon amount', source=source, color='purple', alpha=1, size=7, legend='Ant Chomped') if outlier=='BI': p.circle(bokeh.transform.dodge('identifier', offsetVal, range=p.x_range), 'hydrocarbon amount', source=source, color='orange', alpha=1, size=7, legend='Beetle Injured') if outlier=='Y': p.circle(bokeh.transform.dodge('identifier', offsetVal, range=p.x_range), 'hydrocarbon amount', source=source, color='cyan', alpha=1, size=7, legend='Ant Crushed or Not Labeled') p.xgrid.visible = False p.ygrid.visible = False # Add custom axis p.xaxis.formatter = FuncTickFormatter(code=""" var labels = %s; return labels[tick]; """ %Labels) bokeh.io.show(p) # + plotting_df=df_old.loc[df_old['Peak ID']=='D50C24'] #Identifiers = ['LSU','SU','LSL','SL','LPU','PU','LPL','PL','LDU','DU','LDL','DL',] Identifiers = ['LSU','SU','LSL','SL','LPU','PU','LPL','PL'] Labels={'LSU': '','SU': 'Sceptobius Control','LSL': '','SL': 'Sceptobius Treated', 'LPU': '','PU': 'Platyusa Control','LPL': '','PL': 'Platyusa Treated', 'LDU': '','DU': 'Dalotia Control','LDL': '','DL': 'Dalotia Treated'} np.random.seed(666) p = bokeh.plotting.figure(plot_width=800, plot_height=600, title='C24', x_range=Identifiers, y_axis_label='ng D50C24', y_axis_type='log') colors=['#494949','#5F56FF','#494949','#5F56FF','#494949','#C42F2F','#494949','#C42F2F','#494949','#832161','#494949','#832161'] p = box_and_whisker(plotting_df,p,'Identifier','Hydrocarbon amount') for _,i in enumerate(np.unique(plotting_df['WellNumber'])): data = {'identifier': plotting_df['Identifier'].loc[(plotting_df['WellNumber']==i)].values, 'hydrocarbon amount': plotting_df['Hydrocarbon amount'].loc[(plotting_df['WellNumber']==i)].values} source = ColumnDataSource(data=data) offsetVal=(np.random.rand(1)[0]-0.5)*0.5 p.line(bokeh.transform.dodge('identifier', offsetVal, range=p.x_range), 'hydrocarbon amount', source=source, color='black', alpha=0.3) p.circle(bokeh.transform.dodge('identifier', offsetVal, range=p.x_range), 'hydrocarbon amount', source=source, color=bokeh.transform.factor_cmap('identifier',colors,Identifiers), alpha=0.6, size=7) p.xgrid.visible = False p.ygrid.visible = False # Add custom axis p.xaxis.formatter = FuncTickFormatter(code=""" var labels = %s; return labels[tick]; """ %Labels) bokeh.io.show(p) # -
191025_DCHC_transfer_analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- import glob import pandas as pd import numpy as np # import scipy.sparse from sklearn.preprocessing import normalize from sklearn.svm import SVC from sklearn.metrics.pairwise import chi2_kernel from sklearn.cluster import KMeans from sklearn.ensemble import AdaBoostClassifier, GradientBoostingClassifier from sklearn.metrics import average_precision_score from sklearn.feature_extraction import text from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer from xgboost import XGBClassifier import time # + # path = './hw1_git/11775-hws/videos/*.mp4' path = './hw1_git/11775-hws/asrs/*.txt' filelist = [] for file in glob.glob(path): filelist.append(file) # - def concatenate_list_data(list): result= '' for element in list: result += str(element) return result text = [] for i in range(len(filelist)): # for i in range(10): with open (filelist[i], "r") as myfile: data=myfile.readlines() data=concatenate_list_data(data) text.append(data) if i % 1000 == 0: print('{}th txt file reading...'.format(i)) else: pass # #### Vectorizer TDIF Vectorizer ###TF-IDF Vectorization vectorize = TfidfVectorizer(stop_words="english",min_df=2,sublinear_tf=True)#, norm='l1') # tf값에 1+log(tf)를 적용하여 tf값이 무한정 커지는 것을 막음 norm_bow = vectorize.fit_transform(text).toarray() norm_data = pd.DataFrame(norm_bow) norm_data.head(3) # bow = vectorize.fit_transform(text)#.toarray() vect = CountVectorizer(stop_words="english") # vect = CountVectorizer() bow = vect.fit_transform(text).toarray() norm_bow = normalize(bow, norm = 'l1', axis=1) norm_data = pd.DataFrame(norm_bow) norm_data.head(3) # #### ASRS Modeling... # Making Video name video_name_ind = [] for i in range(len(filelist)): import re match_front = re.search('asrs/', filelist[i]) match_end = re.search('.txt', filelist[i]) video_name_ind.append(filelist[i][match_front.end():match_end.start()]) video_name = pd.DataFrame({'video': video_name_ind}) # print(i, filelist[i][match_front.end():match_end.start()]) # + # Making features columns k = norm_data.shape[1] column_name = ['video'] for i in range(k): column_name.append('feature_{}'.format(i)) total_data = pd.concat([video_name, norm_data], axis = 1) total_data.columns = column_name # - total_data.head(3) # + train_ind = pd.read_csv('./hw1_git/11775-hws/hw1_code/list/train', sep = ' ', header = None) valid_ind = pd.read_csv('./hw1_git/11775-hws/hw1_code/list/val', sep = ' ', header = None) test_ind = pd.read_csv('./hw1_git/11775-hws/hw1_code/list/test.video', sep = ' ', header = None) train_ind['Data'] = 'TRAIN' valid_ind['Data'] = 'VALID' test_ind[1] = 'UNK' test_ind['Data'] = 'TEST' train_ind.columns = ['video','target','Data'] valid_ind.columns = ['video','target','Data'] test_ind.columns = ['video','target','Data'] # + data_lable = pd.concat([train_ind, valid_ind, test_ind], axis = 0).reset_index().drop('index', axis = 1) # data_lable['target_p001'] = data_lable['target_p001'] = data_lable['target'] data_lable['target_p002'] = data_lable['target'] data_lable['target_p003'] = data_lable['target'] data_lable['target_p001_10'] = 1 data_lable['target_p002_10'] = 1 data_lable['target_p003_10'] = 1 data_lable['target_p001'][data_lable['target'] != 'P001'] = 'Other' data_lable['target_p002'][data_lable['target'] != 'P002'] = 'Other' data_lable['target_p003'][data_lable['target'] != 'P003'] = 'Other' data_lable['target_p001_10'][data_lable['target'] != 'P001'] = 0 data_lable['target_p002_10'][data_lable['target'] != 'P002'] = 0 data_lable['target_p003_10'][data_lable['target'] != 'P003'] = 0 # - total_mart = total_data.merge(data_lable, how = 'left', on = 'video') train_mart = total_mart[total_mart['Data'] == 'TRAIN'] valid_mart = total_mart[total_mart['Data'] == 'VALID'] test_mart = total_mart[total_mart['Data'] == 'TEST'] print(train_mart.shape, valid_mart.shape, test_mart.shape) # #### Modeling.... train_mart.head(3) # + def modeling_ap_SVM(k, train_data, valid_data, target = 'target_p001_10'): start_time = time.time() k = k train_mart = train_data valid_mart = valid_data target = target X_train = train_mart.iloc[:,1:k+1] y_train = train_mart[target] X_valid = valid_mart.iloc[:,1:k+1] y_valid = valid_mart[target] model = SVC(kernel=chi2_kernel, probability=True) model.fit(X_train, y_train) y_preds = model.predict(X_valid) y_probs = model.predict_proba(X_valid) results = average_precision_score(y_true=y_valid.values, y_score=y_probs[:,1]) print("===== The time consuming of SVM Modeling : {} seconds =====".format((time.time() - start_time))) print(results) return results def modeling_ap_AdaB(k, train_data, valid_data, target = 'target_p001_10'): start_time = time.time() k = k train_mart = train_data valid_mart = valid_data target = target X_train = train_mart.iloc[:,1:k+1] y_train = train_mart[target] X_valid = valid_mart.iloc[:,1:k+1] y_valid = valid_mart[target] model = AdaBoostClassifier(n_estimators=200, random_state=0) model.fit(X_train, y_train) y_preds = model.predict(X_valid) y_probs = model.predict_proba(X_valid) results = average_precision_score(y_true=y_valid.values, y_score=y_probs[:,1]) print("===== The time consuming of AdaBoosting Modeling : {} seconds =====".format((time.time() - start_time))) print(results) return results def modeling_ap_Boost(k, train_data, valid_data, target = 'target_p001_10'): start_time = time.time() k = k train_mart = train_data valid_mart = valid_data target = target X_train = train_mart.iloc[:,1:k+1] y_train = train_mart[target] X_valid = valid_mart.iloc[:,1:k+1] y_valid = valid_mart[target] model = GradientBoostingClassifier(n_estimators=200, random_state=0) model.fit(X_train, y_train) y_preds = model.predict(X_valid) y_probs = model.predict_proba(X_valid) results = average_precision_score(y_true=y_valid.values, y_score=y_probs[:,1]) print("===== The time consuming of Boosting Modeling : {} seconds =====".format((time.time() - start_time))) print(results) return results def modeling_ap_xgb(k, train_data, valid_data, target = 'target_p001_10'): start_time = time.time() k = k train_mart = train_data valid_mart = valid_data target = target X_train = train_mart.iloc[:,1:k+1] y_train = train_mart[target] X_valid = valid_mart.iloc[:,1:k+1] y_valid = valid_mart[target] model = XGBClassifier() model.fit(X_train, y_train) y_preds = model.predict(X_valid) y_probs = model.predict_proba(X_valid) results = average_precision_score(y_true=y_valid.values, y_score=y_probs[:,1]) print("===== The time consuming of XgBoosting Modeling : {} seconds =====".format((time.time() - start_time))) print(results) return results # - SVM_results_p001 = modeling_ap_SVM(k=100, train_data = train_mart, valid_data = valid_mart, target = 'target_p001_10') SVM_results_p002 = modeling_ap_SVM(k=100, train_data = train_mart, valid_data = valid_mart, target = 'target_p002_10') SVM_results_p003 = modeling_ap_SVM(k=100, train_data = train_mart, valid_data = valid_mart, target = 'target_p003_10') AdaB_results_p001 = modeling_ap_AdaB(k=100, train_data = train_mart, valid_data = valid_mart, target = 'target_p001_10') AdaB_results_p002 = modeling_ap_AdaB(k=100, train_data = train_mart, valid_data = valid_mart, target = 'target_p002_10') AdaB_results_p003 = modeling_ap_AdaB(k=100, train_data = train_mart, valid_data = valid_mart, target = 'target_p003_10') Boost_results_p001 = modeling_ap_Boost(k=100, train_data = train_mart, valid_data = valid_mart, target = 'target_p001_10') Boost_results_p002 = modeling_ap_Boost(k=100, train_data = train_mart, valid_data = valid_mart, target = 'target_p002_10') Boost_results_p003 = modeling_ap_Boost(k=100, train_data = train_mart, valid_data = valid_mart, target = 'target_p003_10') Xgb_results_p001 = modeling_ap_xgb(k=100, train_data = train_mart, valid_data = valid_mart, target = 'target_p001_10') Xgb_results_p002 = modeling_ap_xgb(k=100, train_data = train_mart, valid_data = valid_mart, target = 'target_p002_10') Xgb_results_p003 = modeling_ap_xgb(k=100, train_data = train_mart, valid_data = valid_mart, target = 'target_p003_10') ===== The time consuming of Modeling : 0.502795934677 seconds ===== 0.02681992337164751 ===== The time consuming of Modeling : 0.543972969055 seconds ===== 0.03501945525291829 ===== The time consuming of Modeling : 0.651535987854 seconds ===== 0.08946728520577304 ===== The time consuming of Modeling : 0.188441038132 seconds ===== 0.051043338683788124 ===== The time consuming of Modeling : 0.183010101318 seconds ===== 0.03501945525291829 ===== The time consuming of Modeling : 0.183886051178 seconds ===== 0.09318639879554727 ===== The time consuming of Modeling : 0.162078142166 seconds ===== 0.0219435736677116 ===== The time consuming of Modeling : 0.0856039524078 seconds ===== 0.0313588850174216 ===== The time consuming of Modeling : 0.160410165787 seconds ===== 0.1558041282822041
hw2_code/99.working_file_ipnb/00.ASRS_modeling.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Pandas 실습 - 전 세계 코로나19 데이터 분석 # # * 분석 대상 데이터 : [Microsoft Bing COVID-19 Dataset](https://azure.microsoft.com/ko-kr/services/open-datasets/catalog/bing-covid-19-data/) # * Microsoft에서 서비스하는 검색엔진인 Bing에서 제공하는 "[Bing 코로나19 추적기](https://bing.com/covid)"에서 사용되는 실제 Dataset # * 전 세계 모든 지역의 확진, 사망 및 완치 사례가 포함되며, 데이터는 매일 업데이트 됨 # * 20년 12월 3일 CSV 파일 기준 1,108,106행, 총 파일 크기 124MB # * 목표 : 나만의 코로나 19 Dashboard 만들기 # ### 필요 패키지 import # pandas + numpy + matplotlib %matplotlib inline 의 경우 matplotlib 로 출력하는 plot을 jupyter notebook 내에서 바로 출력하라는 jupyter notebook 특수 명령어임. import pandas as pd import numpy as np # %matplotlib inline import matplotlib.pyplot as plt # ### CSV로 부터 DataFrame 생성하기 df = pd.read_csv("https://pandemicdatalake.blob.core.windows.net/public/curated/covid-19/bing_covid-19_data/latest/bing_covid-19_data.csv") df.head(10) # ### 전 세계 일별 확진자/사망자 수 시각화 # 기본 DataFrame(df)에서 country_region이 'Worldwide'인 데이터만 추출하여 df_Worldwide라는 새로운 DataFrame 생성 df_Worldwide=df[df['country_region']=='Worldwide'] # 시각화. 일자별 누적 확진자 / 누적 사망자 / 일별 확진자 / 일별 사망자 graph 그리기 df_Worldwide.plot(kind='line',x='updated',y="confirmed",grid=True, title="Worldwide confirmed cases(count)", figsize=(8, 4)) df_Worldwide.plot(kind='line',x='updated',y="deaths",grid=True, title="Worldwide deaths(count)", figsize=(8, 4)) df_Worldwide.plot(kind='line',x='updated',y="confirmed_change",grid=True, title="Worldwide daily cofirmed cases(count)" , figsize=(8, 4)) df_Worldwide.plot(kind='line',x='updated',y="deaths_change",grid=True, title="Worldwide daily deaths(count)" , figsize=(8, 4)) # ### 국가별 누적 확진자/사망자 수 df_country = df[(df['country_region']!='Worldwide') & (df.admin_region_1.isnull())] df_country_group = df_country[["country_region", "confirmed", "deaths"]].groupby("country_region").max().sort_values("confirmed", ascending=False) df_country_group.head(30) # ### 국내 도시별 확진자/사망자 수 분석 # 국가명(country_region)이 "South Korea"이고 국내 전체 합계가 아닌 도시별 데이터만을 가져오기 위해 # 도시명(admin_region_1)이 null(비어있는 값)이 아닌 경우만 가져와서 df_korea 생성 df_korea = df[(df['country_region']=='South Korea') & (~df.admin_region_1.isnull())] df_korea.head(5) # groupby 함수를 통하여 도시명(admin_region_1)으로 그룹화 하고 confirmed, deaths 수치의 최대값(=가장 최신의 확진자/사망자 수)을 가져옴 # .sort_values("정렬할 기준 열(conlumn) 이름", ascending=True/False[오름차순/내림차순]) df_province = df_korea[["admin_region_1", "confirmed", "deaths"]].groupby("admin_region_1").max().sort_values("confirmed", ascending=False) df_province.head(20) df_province.plot(kind="barh", figsize=(12, 4), title="Korea confirmed cases/deaths by province") # ### 국내 일자/도시별 확진자 수 변화 시각화 # .pivot 함수의 경우 Excel의 피벗 테이블 기능과 유사하게 주어진 dataframe의 행/열 데이터 구조를 변환하는 역할 data_pivot_by_date = df_korea.pivot(index='updated', columns='admin_region_1', values='confirmed_change') data_pivot_by_date.head(10) # 그래프 그리기 data_pivot_by_date.plot(figsize=(16, 10))
assets/pdf/covid-19-data-analysis_2020_2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: openvino_env # language: python # name: openvino_env # --- # # OpenVINO API Tutorial # This notebook explains the basics of the OpenVINO Inference Engine API. It covers: # # - [Load Inference Engine and Show Info](#Load-Inference-Engine-and-Show-Info) # - [Loading a Model](#Loading-a-Model) # - [IR Model](#IR-Model) # - [ONNX Model](#ONNX-Model) # - [Getting Information about a Model](#Getting-Information-about-a-Model) # - [Model Inputs](#Model-Inputs) # - [Model Outputs](#Model-Outputs) # - [Doing Inference on a Model](#Doing-Inference-on-a-Model) # - [Reshaping and Resizing](#Reshaping-and-Resizing) # - [Change Image Size](#Change-Image-Size) # - [Change Batch Size](#Change-Batch-Size) # - [Caching a Model](#Caching-a-Model) # # The notebook is divided into sections with headers. Each section is standalone and does not depend on previous sections. A segmentation and classification IR model and a segmentation ONNX model are provided as examples. You can replace these model files with your own models. The exact outputs will be different, but the process is the same. # ## Load Inference Engine and Show Info # # Initialize Inference Engine with IECore() # + from openvino.inference_engine import IECore ie = IECore() # - # Inference Engine can load a network on a device. A device in this context means a CPU, an Intel GPU, a Neural Compute Stick 2, etc. The `available_devices` property shows the devices that are available on your system. The "FULL_DEVICE_NAME" option to `ie.get_metric()` shows the name of the device. # # In this notebook the CPU device is used. To use an integrated GPU, use `device_name="GPU"` instead. Note that loading a network on GPU will be slower than loading a network on CPU, but inference will likely be faster. devices = ie.available_devices for device in devices: device_name = ie.get_metric(device_name=device, metric_name="FULL_DEVICE_NAME") print(f"{device}: {device_name}") # ## Loading a Model # # After initializing Inference Engine, first read the model file with `read_network()`, then load it to the specified device with `load_network()`. # # ### IR Model # # An IR (Intermediate Representation) model consists of an .xml file, containing information about network topology, and a .bin file, containing the weights and biases binary data. `read_network()` expects the weights file to be located in the same directory as the xml file, with the same filename, and the extension .bin: `model_weights_file == Path(model_xml).with_suffix(".bin")`. If this is the case, specifying the weights file is optional. If the weights file has a different filename, it can be specified with the `weights` parameter to `read_network()`. # # See the [tensorflow-to-openvino](../101-tensorflow-to-openvino/101-tensorflow-to-openvino.ipynb) and [pytorch-onnx-to-openvino](../102-pytorch-onnx-to-openvino/102-pytorch-onnx-to-openvino.ipynb) notebooks for information on how to convert your existing TensorFlow, PyTorch or ONNX model to OpenVINO's IR format with OpenVINO's Model Optimizer. For exporting ONNX models to IR with default settings, the `.serialize()` method can also be used. # + from openvino.inference_engine import IECore ie = IECore() classification_model_xml = "model/classification.xml" net = ie.read_network(model=classification_model_xml) exec_net = ie.load_network(network=net, device_name="CPU") # - # ### ONNX Model # # An ONNX model is a single file. Reading and loading an ONNX model works the same way as reading and loading an IR model. The `model` argument points to the ONNX filename. # + from openvino.inference_engine import IECore ie = IECore() onnx_model = "model/segmentation.onnx" net_onnx = ie.read_network(model=onnx_model) exec_net_onnx = ie.load_network(network=net_onnx, device_name="CPU") # - # The ONNX model can be exported to IR with `.serialize()`: net_onnx.serialize("model/exported_onnx_model.xml") # ## Getting Information about a Model # # The OpenVINO IENetwork instance stores information about the model. Information about the inputs and outputs of the model are in `net.input_info` and `net.outputs`. These are also properties of the ExecutableNetwork instance. Where we use `net.input_info` and `net.outputs` in the cells below, you can also use `exec_net.input_info` and `exec_net.outputs`. # ### Model Inputs # + from openvino.inference_engine import IECore ie = IECore() classification_model_xml = "model/classification.xml" net = ie.read_network(model=classification_model_xml) net.input_info # - # The cell above shows that the model loaded expects one input, with the name _input_. If you loaded a different model, you may see a different input layer name, and you may see more inputs. # # It is often useful to have a reference to the name of the first input layer. For a model with one input, `next(iter(net.input_info))` gets this name. input_layer = next(iter(net.input_info)) input_layer # Information for this input layer is stored in `input_info`. The next cell prints the input layout, precision and shape. print(f"input layout: {net.input_info[input_layer].layout}") print(f"input precision: {net.input_info[input_layer].precision}") print(f"input shape: {net.input_info[input_layer].tensor_desc.dims}") # This cell output tells us that the model expects inputs with a shape of [1,3,224,224], and that this is in NCHW layout. This means that the model expects input data with a batch size (N) of 1, 3 channels (C), and images of a height (H) and width (W) of 224. The input data is expected to be of FP32 (floating point) precision. # ### Model Outputs # + from openvino.inference_engine import IECore ie = IECore() classification_model_xml = "model/classification.xml" net = ie.read_network(model=classification_model_xml) net.outputs # - # Model output info is stored in `net.outputs`. The cell above shows that the model returns one output, with the name _MobilenetV3/Predictions/Softmax_. If you loaded a different model, you will probably see a different output layer name, and you may see more outputs. # # Since this model has one output, follow the same method as for the input layer to get its name. output_layer = next(iter(net.outputs)) output_layer # Getting the output layout, precision and shape is similar to getting the input layout, precision and shape. print(f"output layout: {net.outputs[output_layer].layout}") print(f"output precision: {net.outputs[output_layer].precision}") print(f"output shape: {net.outputs[output_layer].shape}") # This cell output shows that the model returns outputs with a shape of [1, 1001], where 1 is the batch size (N) and 1001 the number of classes (C). The output is returned as 32-bit floating point. # ## Doing Inference on a Model # # To do inference on a model, call the `infer()` method of the _ExecutableNetwork_, the `exec_net` that we loaded with `load_network()`. `infer()` expects one argument: _inputs_. This is a dictionary, mapping input layer names to input data. # **Preparation: load network** # + from openvino.inference_engine import IECore ie = IECore() classification_model_xml = "model/classification.xml" net = ie.read_network(model=classification_model_xml) exec_net = ie.load_network(network=net, device_name="CPU") input_layer = next(iter(net.input_info)) output_layer = next(iter(net.outputs)) # - # **Preparation: load image and convert to input shape** # # To propagate an image through the network, it needs to be loaded into an array, resized to the shape that the network expects, and converted to the network's input layout. # + import cv2 image_filename = "data/coco_hollywood.jpg" image = cv2.imread(image_filename) image.shape # - # The image has a shape of (663,994,3). It is 663 pixels in height, 994 pixels in width, and has 3 color channels. We get a reference to the height and width that the network expects and resize the image to that size. # N,C,H,W = batch size, number of channels, height, width N, C, H, W = net.input_info[input_layer].tensor_desc.dims # OpenCV resize expects the destination size as (width, height) resized_image = cv2.resize(src=image, dsize=(W, H)) resized_image.shape # Now the image has the width and height that the network expects. It is still in H,C,W format. We change it to N,C,H,W format (where N=1) by first calling `np.transpose()` to change to C,H,W and then adding the N dimension by calling `np.expand_dims()`. Convert the data to FP32 with `np.astype()`. # + import numpy as np input_data = np.expand_dims(np.transpose(resized_image, (2, 0, 1)), 0).astype(np.float32) input_data.shape # - # **Do inference** # # Now that the input data is in the right shape, doing inference is one simple command: result = exec_net.infer({input_layer: input_data}) result # `.infer()` returns a dictionary, mapping output layers to data. Since we know this network returns one output, and we stored the reference to the output layer in the `output_layer` variable, we can get the data with `result[output_layer]` output = result[output_layer] output.shape # The output shape is (1,1001), which we saw is the expected shape of the output. This output shape indicates that the network returns probabilities for 1001 classes. To transform this into meaningful information, check out the [hello world notebook](../001-hello-world/001-hello-world.ipynb). # ## Reshaping and Resizing # # ### Change Image Size # Instead of reshaping the image to fit the model, you can also reshape the model to fit the image. Note that not all models support reshaping, and models that do may not support all input shapes. The model accuracy may also suffer if you reshape the model input shape. # # We first check the input shape of the model, and then reshape to the new input shape. # + from openvino.inference_engine import IECore ie = IECore() segmentation_model_xml = "model/segmentation.xml" segmentation_net = ie.read_network(model=segmentation_model_xml) segmentation_input_layer = next(iter(segmentation_net.input_info)) segmentation_output_layer = next(iter(segmentation_net.outputs)) print("~~~~ ORIGINAL MODEL ~~~~") print(f"input layout: {segmentation_net.input_info[segmentation_input_layer].layout}") print(f"input shape: {segmentation_net.input_info[segmentation_input_layer].tensor_desc.dims}") print(f"output shape: {segmentation_net.outputs[segmentation_output_layer].shape}") new_shape = (1, 3, 544, 544) segmentation_net.reshape({segmentation_input_layer: new_shape}) segmentation_exec_net = ie.load_network(network=segmentation_net, device_name="CPU") print("~~~~ RESHAPED MODEL ~~~~") print(f"net input shape: {segmentation_net.input_info[segmentation_input_layer].tensor_desc.dims}") print( f"exec_net input shape: " f"{segmentation_exec_net.input_info[segmentation_input_layer].tensor_desc.dims}" ) print(f"output shape: {segmentation_net.outputs[segmentation_output_layer].shape}") # - # The input shape for the segmentation network is [1,3,512,512], with an NCHW layout: the network expects 3-channel images with a width and height of 512 and a batch size of 1. We reshape the network to make it accept input images with a width and height of 544 with the `.reshape()` method of `IENetwork`. This segmentation network always returns arrays with the same width and height as the input width and height, so setting the input dimensions to 544x544 also modifies the output dimensions. After reshaping, load the network to the device again. # ### Change Batch Size # We can also use `.reshape()` to set the batch size, by increasing the first element of _new_shape_. For example, to set a batch size of two, set `new_shape = (2,3,544,544)` in the cell above. If you only want to change the batch size, you can also set the `batch_size` property directly. # + from openvino.inference_engine import IECore ie = IECore() segmentation_model_xml = "model/segmentation.xml" segmentation_net = ie.read_network(model=segmentation_model_xml) segmentation_input_layer = next(iter(segmentation_net.input_info)) segmentation_output_layer = next(iter(segmentation_net.outputs)) segmentation_net.batch_size = 2 segmentation_exec_net = ie.load_network(network=segmentation_net, device_name="CPU") print(f"input layout: {segmentation_net.input_info[segmentation_input_layer].layout}") print(f"input shape: {segmentation_net.input_info[segmentation_input_layer].tensor_desc.dims}") print(f"output shape: {segmentation_net.outputs[segmentation_output_layer].shape}") # - # The output shows that by setting the batch size to 2, the first element (N) of the input and output shape now has a value of 2. Let's see what happens if we propagate our input image through the network: # + import numpy as np from openvino.inference_engine import IECore ie = IECore() segmentation_model_xml = "model/segmentation.xml" segmentation_net = ie.read_network(model=segmentation_model_xml) segmentation_input_layer = next(iter(segmentation_net.input_info)) segmentation_output_layer = next(iter(segmentation_net.outputs)) input_data = np.random.rand(*segmentation_net.input_info[segmentation_input_layer].tensor_desc.dims) segmentation_net.batch_size = 2 segmentation_exec_net = ie.load_network(network=segmentation_net, device_name="CPU") result_batch = segmentation_exec_net.infer({segmentation_input_layer: input_data}) print(f"input data shape: {input_data.shape}") print(f"result data data shape: {result_batch[segmentation_output_layer].shape}") # - # The output shows that if `batch_size` is set to 2, the network output will have a batch size of 2, even if only one image was propagated through the network. Regardless of batch size, you can always do inference on one image. In that case, only the first network output contains meaningful information. # # Verify that inference on two images works by creating random data with a batch size of 2: # + import numpy as np from openvino.inference_engine import IECore ie = IECore() segmentation_model_xml = "model/segmentation.xml" segmentation_net = ie.read_network(model=segmentation_model_xml) segmentation_input_layer = next(iter(segmentation_net.input_info)) segmentation_output_layer = next(iter(segmentation_net.outputs)) segmentation_net.batch_size = 2 input_data = np.random.rand(*segmentation_net.input_info[segmentation_input_layer].tensor_desc.dims) segmentation_exec_net = ie.load_network(network=segmentation_net, device_name="CPU") result_batch = segmentation_exec_net.infer({segmentation_input_layer: input_data}) print(f"input data shape: {input_data.shape}") print(f"result data shape: {result_batch[segmentation_output_layer].shape}") # - # ## Caching a Model # # For some devices, like GPU, loading a model can take some time. Model Caching solves this issue by caching the model in a cache directory. If `ie.set_config({"CACHE_DIR": cache_dir}, device_name=device_name)` is set, caching will be used. This option checks if a model exists in the cache. If so, it loads it from the cache. If not, it loads the model regularly, and stores it in the cache, so that the next time the model is loaded when this option is set, the model will be loaded from the cache. # # In the cell below, we create a *model_cache* directory as a subdirectory of *model*, where the model will be cached for the specified device. The model will be loaded to the GPU. After running this cell once, the model will be cached, so subsequent runs of this cell will load the model from the cache. Note: Model Caching is not available on CPU devices # + import time from pathlib import Path from openvino.inference_engine import IECore ie = IECore() device_name = "GPU" # Model Caching is not available for CPU if device_name in ie.available_devices and device_name != "CPU": cache_path = Path("model/model_cache") cache_path.mkdir(exist_ok=True) # Enable caching for Inference Engine. Comment out this line to disable caching ie.set_config({"CACHE_DIR": str(cache_path)}, device_name=device_name) classification_model_xml = "model/classification.xml" net = ie.read_network(model=classification_model_xml) start_time = time.perf_counter() exec_net = ie.load_network(network=net, device_name=device_name) end_time = time.perf_counter() print(f"Loading the network to the {device_name} device took {end_time-start_time:.2f} seconds.") else: print("Model caching is not available on CPU devices.") # - # After running the previous cell, we know the model exists in the cache directory. We delete exec net and load it again, and measure the time it takes now. if device_name in ie.available_devices and device_name != "CPU": del exec_net start_time = time.perf_counter() exec_net = ie.load_network(network=net, device_name=device_name) end_time = time.perf_counter() print(f"Loading the network to the {device_name} device took {end_time-start_time:.2f} seconds.")
notebooks/002-openvino-api/002-openvino-api.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # SpotiScry # Date: January 2021 # # > *What makes a song reach the top of the charts while others flop? Using data from Spotify, our team will determine what features influence song popularity - such as the danceability or song length. We will then predict a song’s popularity using regression and achieve an RMSE lower than the baseline model.* # # ### Project Goals: # - Build a dataset of songs using Spotify's API # - Identify the drivers of song popularity # - Create a regression model to predict how popular a song will be that has an RMSE lower than the baseline # # ### Creators: # **The SpotiScyers:** [<NAME>](https://github.com/Brandon-Martinez27), <NAME>, [<NAME>](https://github.com/KwameTaylor), <NAME> # # --- # # ## Table of Contents # - [Acquire](#acquire) # - [Prepare](#prepare) # - [Explore](#explore) # - [Model](#model) # - [Conclusions](#conclusions) # --- # # Set up Environment # standard imports for creating dataframes and manipulating data import pandas as pd import numpy as np # viz imports import matplotlib.pyplot as plt plt.style.use('ggplot') import matplotlib as mpl import seaborn as sns from cycler import cycler # default viz size settings sns.set(rc={'figure.figsize':(14, 10)}) sns.set_context("talk", rc={"font.size":14,"axes.titlesize":18,"axes.labelsize":14}) plt.rc('figure', figsize=(12, 10)) plt.rc('font', size=14) mpl.rcParams['font.size'] = 14 mpl.rcParams['figure.figsize'] = 14, 10 mpl.rcParams['lines.linewidth'] = 2 mpl.rcParams['lines.linestyle'] = '--' mpl.rcParams['axes.prop_cycle'] = cycler(color=['deepskyblue', 'firebrick', 'darkseagreen', 'violet']) # accessing functions from py scripts from acquire import create_spotipy_client, get_capstone_playlist from prepare import handle_nulls from explore import corr_heatmap, explicit_viz, explicit_ttest, danceability_viz, release_dates_viz from preprocessing import spotify_split, scale_data # --- # # Acquire <a class="anchor" id="acquire"></a> # Create the spotipy client sp = create_spotipy_client() # Get the dataset using the get_capstone_playlist function df = get_capstone_playlist(sp) # Here's how the data looks df.head(3) # <div class="alert alert-block alert-info"> # While we ran these two functions at first we then wrote our datasets to csvs and included in our functions the ability to check to see if we had the data folder that contains the dataframe csvs already. If we have it, it will just read the csv files and if not it will contact the api and generate the csv files for us. # # This may take some time. # # Another way to access the data as long as we had the csv already made was to simply use pd.read_csv() # </div> # import the data from a csv df = pd.read_csv('full-playlist.csv', index_col=0) # what does the data look like? df.head(3) # --- # ### Data Summary # rows X columns df.shape # columns, data types, and null counts df.info() # ## Takeaways # <div class="alert alert-block alert-info"> # <li>5733 total rows</li> # <li>30 columns of features including the target variable</li> # <li>Data is mostly clean from prepare functions</li> # </div> # --- # # Prepare <a class="anchor" id="prepare"></a> # + # handle null values (replace with the updated prepare functions) df = handle_nulls(df) # split the data X_train, y_train, X_validate, y_validate, X_test, y_test, train, validate, test = spotify_split(df, 'popularity') train.head(3) # - # `spotify_split` # - This function takes in a dataframe and the string name of the target variable. # - Splits it into test (15%), validate (15%), and train (70%). # - It also splits test, validate, and train into X and y dataframes. # - Returns X_train, y_train, X_validate, y_validate, X_test, y_test, train, validate, test. # # ## Takeaways # <div class="alert alert-block alert-info"> # <b>Data Split</b>: # <li>Train: 4012 ~ 70%</li> # <li>Validate: 861 ~ 15%</li> # <li>Test: 860 ~ 15%</li> # Data is stratified by binned target variable # </div> # --- # # Explore <a class="anchor" id="explore"></a> # ## What are the distributions of the variables? df.hist(xlabelsize=1, ylabelsize=1) plt.tight_layout() # ## What are the drivers of popularity? corr_heatmap(train.drop(columns=['artist', 'album', 'track_name', 'release_date', 'album_popularity', 'duration_ms', 'duration_minutes', 'release_year', 'speechiness', 'instrumentalness', 'release_month', 'release_day'])) # ## Does a track being explicit or not correlate with its popularity? explicit_viz(train) # --- # ### Independent T-Test # # **Set hypothesis and alpha:** # # $H_{0}$: Mean of song popularity of explicit tracks = Mean of song popularity of non-explicit tracks # # $H_{a}$: Mean of song popularity of explicit tracks > Mean of song popularity of non-explicit tracks # # $\alpha$: .05 # # The features are independent because there is no overlap between observations in explicit and observations in not explicit. explicit_ttest(train) # ## Is the mean popularity of songs with time signature 4 significantly greater than the mean popularity of all songs(within the train data)? sns.boxplot(x='time_signature', y='popularity', data=train) # ## Is there a difference in mean popularity across dancebility bins? danceability_viz(train) # ## How many minutes long are songs that are popular? Does having a featured artist(s) on a track make people more willing to listen to longer songs? sns.lineplot(data=train, x="duration_minutes", y="popularity", hue="is_featured_artist", err_style="bars") plt.title(label="How many minutes long are songs that are popular, grouped by songs with featured artists and without?", size=20) # ## Does a track's release year, release month, or release day have an impact on its popularity? release_dates_viz(train) # ## Takeaways # <div class="alert alert-block alert-info"> # # * Key drivers of popularity include danceability, whether a track is explicit, loudness, track number, and whether a track has featured artist(s) or not. # # * The most popular songs were 2 minutes long, and longer tracks having a featured artist(s) were more popular than their non-featured artist counterparts. # # * The mean popularity of songs with time signature 4 was significantly greater than the mean popularity of all songs. # # </div> # --- # # Model <a class="anchor" id="model"></a> # ### Prepare Data for Modeling import numpy as np import pandas as pd import warnings warnings.filterwarnings("ignore") from preprocessing import spotify_split, scale_data from preprocessing import modeling_prep # features are selected, encoded, engineered, and preppared df = modeling_prep() # `modeling_prep` # - read the data from a csv into a dataframe # - drop any observations that contain nulls # - used the helper function to encode categorical features # - creates the top_ten_label feature # - creates dummy variables for the album type (encode) # - drop any columns that won't contribute to modeling # split the data for modeling X_train, y_train, X_validate, y_validate, X_test, y_test, train, validate, test = spotify_split(df, 'popularity') X_train.head(3) # scales the data: min-max scaler X_train_mm, X_validate_mm, X_test_mm = scale_data(train, validate, test, 'popularity', 'MinMax') X_train_mm.head(3) # `scale_data` # - Scales a DataFrame based on scaler chosen: 'MinMax', 'Standard', or 'Robust'. # - Needs three DataFrames: train, validate, and test. # - Fits the scaler object to train only, transforms on all 3. # - Returns the three dfs scaled. # - Takes in the the target variable name as an arguement. # <div class="alert alert-block alert-info"> # <b>Takeaways</b>: # <li>Useable features are added to the data</li> # <li>Data is split into train, validate, test</li> # <li>Data is scaled by minimum and max values (0-1)</li> # </div> # --- # ## Feature Selection Tools from preprocessing import rfe, select_kbest # Select K Best Algorithm skb_features = select_kbest(X_train_mm, y_train, 5) skb_features # Recursive Feature Elmination Algorithm rfe_features = rfe(X_train_mm, y_train, 5) rfe_features # <div class="alert alert-block alert-info"> # <b>Takeaways</b>: # <li>Used tools that select important features</li> # <li><i>Select K Best</i>: selects features according to the k highest scores</li> # <li><i>Recursive Feature Elimination</i>: features that perform well on a simple linear regression model</li> # </div> # --- # ## Feature Groups # Select K Best: Top 5 Features DF X_tr_skb = X_train_mm[skb_features] X_v_skb = X_validate_mm[skb_features] X_te_skb = X_test_mm[skb_features] # Recursive Feature Elimination: Top 5 Features DF X_tr_rfe = X_train_mm[rfe_features] X_v_rfe = X_validate_mm[rfe_features] X_te_rfe = X_test_mm[rfe_features] # Combo 3: Top 6 Features top_feats = ['danceability', 'speechiness', 'explicit', 'is_featured_artist', 'track_number', 'energy', 'single'] X_tr_top = X_train_mm[top_feats] X_v_top = X_validate_mm[top_feats] X_te_top = X_test_mm[top_feats] # <div class="alert alert-block alert-info"> # <b>Takeaways</b>: # <li>Set top 5 features from <i>Select K Best</i> to a dataframe for modeling</li> # <li>Set top 5 features from <i>Recursive Feature Elimination</i> to a dataframe for modeling</li> # <li>Set top 7 features from both <i>RFE and SKB</i> to a dataframe for modeling</li> # </div> # --- # ## Baseline Model from model import get_baseline_metrics, linear_regression_model, lasso_lars from model import polynomial_regression, svr_model, glm_model, evaluate_df #baseline is the mean popularity of the training dataset bl, bl_train_rmse = get_baseline_metrics(y_train) # `get_baseline_metrics` # - Creates the variables for the mean for popularity # - and the rmse of the train data # - Prints out the rmse # --- # ## Models using SKB Features # `linear_regression_model`, `lasso_lars`, `polynomial_regression` # - These functions run the respective regression models # - They take in the X (features) and y (target) train, validate and/or test data # - Some include the option to include key word arguments # - The train RMSE is printed # + # OLS Model lm_rmse, lm_rmse_v, lm_rmse_t = linear_regression_model( X_tr_skb, y_train, X_v_skb, y_validate, X_te_skb, y_test) # LASSO + LARS Model lars_rmse, lars_rmse_v, lars_rmse_t = lasso_lars( X_tr_skb, y_train, X_v_skb, y_validate, X_te_skb, y_test) # Polynomial Features (squared, deg=2) with Linear Regression lm_sq_rmse, lm_sq_rmse_v, lm_sq_rmse_t, lm_sq_pred_t = polynomial_regression( X_tr_skb, y_train, X_v_skb, y_validate, X_te_skb, y_test, 'Squared', degree=2) # Support Vector Regression with RBF Kernel svr_rmse, svr_rmse_v, svr_rmse_t = svr_model( X_tr_skb, y_train, X_v_skb, y_validate, X_te_skb, y_test, 'RBF') # General Linearized Model with Normal Distribution glm_rmse, glm_rmse_v, glm_rmse_t, glm_pred_t = glm_model( X_tr_skb, y_train, X_v_skb, y_validate, X_te_skb, y_test, 'Normal', alpha=0, power=1) # - # --- # ### Evaluate SKB Models # `evaluate_df` # - Creates a dataframe with rmse as the evaluating metric # - Columns are the data split assessed for each model # - Index is the name of each model # - Data is the RMSE value for each model's performance on the respective datasets # + # Dataframe Evaluation for Models with SKB features columns = ['train_rmse', 'validate_rmse', 'test_rmse'] index = ['baseline', 'ols', 'lassolars', 'pf2_lr', 'SVM', 'GLM'] data = [[bl_train_rmse, '', ''], [lm_rmse, lm_rmse_v, ''], [lars_rmse, lars_rmse_v, ''], [lm_sq_rmse, lm_sq_rmse_v, ''], [svr_rmse, svr_rmse_v, ''], [glm_rmse, glm_rmse_v, '']] print('SKB FEATURES') print(f'Model beat baseline by {abs((lm_sq_rmse_t - bl_train_rmse)/bl_train_rmse)*100:.2f}%') print(skb_features) pd.DataFrame(columns=columns, data=data, index=index).sort_values(by='train_rmse') # - # --- # ## Models using RFE Features # + # OLS Model lm_rmse, lm_rmse_v, lm_rmse_t = linear_regression_model( X_tr_rfe, y_train, X_v_rfe, y_validate, X_te_rfe, y_test) # LASSO + LARS Model lars_rmse, lars_rmse_v, lars_rmse_t = lasso_lars( X_tr_rfe, y_train, X_v_rfe, y_validate, X_te_rfe, y_test) # Polynomial Features (squared, deg=2) with Linear Regression lm_sq_rmse, lm_sq_rmse_v, lm_sq_rmse_t, lm_sq_pred_t = polynomial_regression( X_tr_rfe, y_train, X_v_rfe, y_validate, X_te_rfe, y_test, 'Squared', degree=2) # Support Vector Regression with RBF Kernel svr_rmse, svr_rmse_v, svr_rmse_t = svr_model( X_tr_rfe, y_train, X_v_rfe, y_validate, X_te_rfe, y_test, 'RBF') # General Linearized Model with Normal Distribution glm_rmse, glm_rmse_v, glm_rmse_t, glm_pred_t = glm_model( X_tr_rfe, y_train, X_v_rfe, y_validate, X_te_rfe, y_test, 'Normal', alpha=0, power=1) # - # --- # ### Evaluate RFE Models # + # Dataframe Evaluation for Models with RFE features columns = ['train_rmse', 'validate_rmse', 'test_rmse'] index = ['baseline', 'ols', 'lassolars', 'pf2_lr', 'SVM', 'GLM'] data = [[bl_train_rmse, '', ''], [lm_rmse, lm_rmse_v, ''], [lars_rmse, lars_rmse_v, ''], [lm_sq_rmse, lm_sq_rmse_v, ''], [svr_rmse, svr_rmse_v, ''], [glm_rmse, glm_rmse_v, '']] print('RFE FEATURES') print(f'Model beat baseline by {abs((lm_sq_rmse_t - bl_train_rmse)/bl_train_rmse)*100:.2f}%') print(rfe_features) pd.DataFrame(columns=columns, data=data, index=index).sort_values(by='train_rmse') # - # --- # ## Models using COMBO: 7 Features # + # OLS Model lm_rmse, lm_rmse_v, lm_rmse_t = linear_regression_model( X_tr_top, y_train, X_v_top, y_validate, X_te_top, y_test) # LASSO + LARS Model lars_rmse, lars_rmse_v, lars_rmse_t = lasso_lars( X_tr_top, y_train, X_v_top, y_validate, X_te_top, y_test) # Polynomial Features (squared, deg=2) with Linear Regression lm_sq_rmse, lm_sq_rmse_v, lm_sq_rmse_t, lm_sq_pred_t = polynomial_regression( X_tr_top, y_train, X_v_top, y_validate, X_te_top, y_test, 'Squared', degree=2) # Support Vector Regression with RBF Kernel svr_rmse, svr_rmse_v, svr_rmse_t = svr_model( X_tr_top, y_train, X_v_top, y_validate, X_te_top, y_test, 'RBF') # General Linearized Model with Normal Distribution glm_rmse, glm_rmse_v, glm_rmse_t, glm_pred_t = glm_model( X_tr_top, y_train, X_v_top, y_validate, X_te_top, y_test, 'Normal', alpha=0, power=1) # - # --- # ### Evaluate "Combo" Models # + # Dataframe Evaluation for Models with COMBO features columns = ['train_rmse', 'validate_rmse', 'test_rmse'] iindex = ['Baseline', 'Linear Regression', 'lassolars', 'Polynomial 2nd Degree', 'SVM', 'GLM'] data = [[bl_train_rmse, '', ''], [lm_rmse, lm_rmse_v, ''], [lars_rmse, lars_rmse_v, ''], [lm_sq_rmse, lm_sq_rmse_v, lm_sq_rmse_t], [svr_rmse, svr_rmse_v, ''], [glm_rmse, glm_rmse_v, '']] print('COMBO TOP FEATURES') print(f'Model beat baseline by {abs((lm_sq_rmse_t - bl_train_rmse)/bl_train_rmse)*100:.2f}%') print(top_feats) mod_perf = pd.DataFrame(columns=columns, data=data, index=iindex).sort_values(by='train_rmse') mod_perf # - # <div class="alert alert-block alert-info"> # <b>Takeaways</b>: # <li>COMBO features are the optimum combination for best model performance. These feature combinations are significant</li> # <li>Second Degree Polynomial Features with Linear Regression is best performing model among all feature sets</li> # <li>Final model predicts 6% better than baseline with RMSE of 21.52 on test data(unseen).</li> # </div> # --- # ## Model Performance Plot from model import polyreg_predictions, plot_polyreg # Returns the Polynomial Regression Model Object, the predictions, # and the Polynomial Features Object lm_sq, y_pred_test, pf = polyreg_predictions(X_tr_top, X_te_top, y_train) # Plots the baseline, Polynomial Regression Model Predictions, # and the line of best fit plot_polyreg(y_test, lm_sq_pred_t, y_pred_test, bl) # <div class="alert alert-block alert-info"> # <b>Takeaways</b>: # Model is plotted against the baseline with predictions # </div> # --- # ## Top Drivers of the 2nd Degree PR Model from model import get_important_feats, plot_top_feats # extracts the most influential features by # creating a dataframe of the most important features/combination, # with their rank feature_importances = get_important_feats(lm_sq, pf, X_tr_top) # top 5 positive drivers of popularity feature_importances.head() # Bottom 5 negative drivers of popularity feature_importances.tail() # plots the dataframes above plot_top_feats(feature_importances) # <div class="alert alert-block alert-info"> # <b>Takeaways</b>: # <li><i>Polynomial Features</i> creates new features: combinations of each input variable, and each variable squared. This allows non-linear relationships to add value.</li> # <li> <b>+ Positive</b> drivers on the left</li> # <li> <b>- Negative</b> drivers on the right</li> # <li>Some of the feature combinations are shown that may have a more than linear relationship with song popularity</li> # </div> # # Conclusions <a class="anchor" id="conclusions"></a> # <div class="alert alert-block alert-info"> # # * Key drivers of popularity include **danceability with speechiness**, whether a track is **explicit**, **energy**, **track number**, and whether a track has **featured artist(s)** or not. # # * The best performing model is **Second Degree Polynomial** with an **RMSE of 21.5236** on the test dataset. # # * The most popular songs were **2 minutes long**. # # </div>
SpotiScry_Final_Notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import seaborn as sns import matplotlib.pyplot as plt def rename_metrics(df): df['metric'] = df['metric'].map({"correlation":"$CE_{av}$", "nrmse dc": "$NRMSE_{av}$", "rae dc": "$REE_{av}$"}) return df results = pd.read_csv("/Users/b1017579/Documents/PhD/Projects/14-temporal-granularity/temporal_granularity/reports/data/long_term_method_comparison/long_term_method_comparison.csv") results = rename_metrics(results) results g = sns.FacetGrid(data=results[results.method=="medoids"], col="year", hue="metric", col_wrap=3, margin_titles=True, legend_out=True) g.map(sns.lineplot, "n_clusters", 'value') g.add_legend() # g = g.map(sns.lineplot, x="n_clusters", y="value", hue="metric", marker=".") results.head() mean_error_results = results.groupby(['metric','n_clusters','method']).value.mean() mean_error_results = pd.DataFrame(mean_error_results) mean_error_results.reset_index(inplace=True) mean_error_results sns.lineplot(data=mean_error_results, x="n_clusters", y="value", hue='metric') g = sns.FacetGrid(data=results[results.method=="centroids"], col="year", hue="metric", col_wrap=3, margin_titles=True, legend_out=True) g.map(sns.lineplot, "n_clusters", 'value') g.add_legend() original_year = results[results.year==2006] original_year['method'] = original_year['method']+"_original" original_year.head() # mean_error_results.head() mean_error_results = mean_error_results.append(original_year.drop(columns="year")) mean_error_results = mean_error_results.rename(columns={"metric":"Metric", "method":"Method"}) mean_error_results['Method'] = mean_error_results['Method'].map({'centroids':'Centroids', 'medoids':"Medoids"}) sns.set(font_scale=1.5) sns.set_style("whitegrid") g = sns.catplot(data=mean_error_results, kind="point", x="n_clusters", y="value", hue="Method", col="Metric", sharey=False) g.set(ylim=(0)) g.set(xlabel='Number of clusters', ylabel='Value') g.savefig('/Users/b1017579/Documents/PhD/Projects/14-temporal-granularity/temporal_granularity/reports/figures/clusters_compared.pdf') results_half = pd.read_csv('/Users/b1017579/Documents/PhD/Projects/14-temporal-granularity/temporal_granularity/reports/data/long_term_method_comparison/long_term_method_comparison_half_years.csv') results_half = rename_metrics(results_half) results_half['method'] = results_half['method'] + "_half" mean_results_with_half = mean_error_results.append(results_half) mean_results_with_half g = sns.catplot(data=mean_results_with_half, kind="point", x="n_clusters", y="value", hue="method", col="metric", sharey=False) g.set(ylim=(0)) g.set(xlabel='Number of clusters', ylabel='Value')
notebooks/compare-methods/.ipynb_checkpoints/10.0-ajmk-compare-long-term-metrics-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.utils import shuffle # 쓸데없는 warning 끄기 pd.options.mode.chained_assignment = None import warnings warnings.simplefilter(action='ignore', category=FutureWarning) root = '/Users/13579/Desktop/Git/TIL/Data Science/data/pubg/' wrangled = pd.read_csv(root + 'train.csv') # ### Drop row with NaN value for winPlacePerc wrangled.drop(2744604, inplace=True) # ### Anomaly # 움직이지 않고 킬 한 경우 wrangled['killsWithoutMoving'] = (wrangled['kills'] > 0) & (wrangled['walkDistance'] == 0) wrangled = wrangled.drop(wrangled[wrangled['killsWithoutMoving']==True].index) wrangled = wrangled.drop(columns=['killsWithoutMoving']) # Drop columns with 'flare' or'crash' or 'normal' matchtype wrangled = wrangled[wrangled.matchType!='flarefpp'] wrangled = wrangled[wrangled.matchType!='flaretpp'] wrangled = wrangled[wrangled.matchType!='crashfpp'] wrangled = wrangled[wrangled.matchType!='crashtpp'] wrangled = wrangled[wrangled.matchType!='normal-squad-fpp'] wrangled = wrangled[wrangled.matchType!='normal-duo-fpp'] wrangled = wrangled[wrangled.matchType!='normal-solo-fpp'] wrangled = wrangled[wrangled.matchType!='normal-squad'] wrangled = wrangled[wrangled.matchType!='normal-duo'] wrangled = wrangled[wrangled.matchType!='normal-solo'] # ### Group # 팀원 수 wrangled = wrangled.merge(wrangled.groupby(['groupId']).size().to_frame('players_in_team'), how='left', on=['groupId']) # 팀 전체 킬 수 wrangled = wrangled.merge(wrangled.groupby(['groupId'])['kills'].sum().to_frame('total_kills'), how='left', on=['groupId']) # 팀 최고 킬 수 wrangled = wrangled.merge(wrangled.groupby(['groupId'])['kills'].max().to_frame('max_kills'), how='left', on=['groupId']) # 팀 전체 기절 수 wrangled = wrangled.merge(wrangled.groupby(['groupId'])['DBNOs'].sum().to_frame('total_DBNOs'), how='left', on=['groupId']) # 팀 최고 기절 수 wrangled = wrangled.merge(wrangled.groupby(['groupId'])['DBNOs'].max().to_frame('max_DBNOs'), how='left', on=['groupId']) # 팀 전체 데미지량 wrangled = wrangled.merge(wrangled.groupby(['groupId'])['damageDealt'].sum().to_frame('total_damageDealt'), how='left', on=['groupId']) # 팀 최고 데미지량 wrangled = wrangled.merge(wrangled.groupby(['groupId'])['damageDealt'].max().to_frame('max_damageDealt'), how='left', on=['groupId']) # 팀 전체 어시스트 수 wrangled = wrangled.merge(wrangled.groupby(['groupId'])['assists'].sum().to_frame('total_assists'), how='left', on=['groupId']) # 팀 평균 어시스트 수 wrangled = wrangled.merge(wrangled.groupby(['groupId'])['assists'].mean().to_frame('avg_assists'), how='left', on=['groupId']) # 팀 최고 어시스트 수 wrangled = wrangled.merge(wrangled.groupby(['groupId'])['assists'].max().to_frame('max_assists'), how='left', on=['groupId']) # 팀 최고 헤드샷 wrangled = wrangled.merge(wrangled.groupby(['groupId'])['headshotKills'].max().to_frame('max_headshotKills'), how='left', on=['groupId']) # 팀 최고 kill place wrangled = wrangled.merge(wrangled.groupby(['groupId'])['killPlace'].max().to_frame('max_killPlace'), how='left', on=['groupId']) # 팀 최고 heals & boosts 수 wrangled = wrangled.merge(wrangled.groupby(['groupId'])['heals'].max().to_frame('max_heals'), how='left', on=['groupId']) wrangled = wrangled.merge(wrangled.groupby(['groupId'])['boosts'].max().to_frame('max_boosts'), how='left', on=['groupId']) # 팀 최고 이동거리 wrangled = wrangled.merge(wrangled.groupby(['groupId'])['walkDistance'].max().to_frame('max_walk'), how='left', on=['groupId']) wrangled = wrangled.merge(wrangled.groupby(['groupId'])['rideDistance'].max().to_frame('max_ride'), how='left', on=['groupId']) wrangled = wrangled.merge(wrangled.groupby(['groupId'])['swimDistance'].max().to_frame('max_swim'), how='left', on=['groupId']) # 팀 최고 무기 수 wrangled = wrangled.merge(wrangled.groupby(['groupId'])['weaponsAcquired'].max().to_frame('max_weapons'), how='left', on=['groupId']) # 팀 평균 소생 & 팀킬 수 wrangled = wrangled.merge(wrangled.groupby(['groupId'])['revives'].mean().to_frame('avg_revives'), how='left', on=['groupId']) wrangled = wrangled.merge(wrangled.groupby(['groupId'])['teamKills'].mean().to_frame('avg_teamKills'), how='left', on=['groupId']) # ### Create features # duo나 squad인데 인원수가 적은 경우(솔듀오 솔쿼드 듀쿼드 삼쿼드) wrangled['less'] = 0 wrangled.loc[((wrangled['matchType'] == 'duo') | (wrangled['matchType'] == 'duo-fpp')) & (wrangled['players_in_team'] < 2), 'less'] = 1 wrangled.loc[((wrangled['matchType'] == 'squad') | (wrangled['matchType'] == 'squad-fpp')) & (wrangled['players_in_team'] < 4), 'less'] = 1 # matchType one hot encoding wrangled = pd.get_dummies(wrangled, columns=['matchType']) matchType_encoding = wrangled.filter(regex='matchType') wrangled = pd.concat([wrangled, matchType_encoding], axis=1) # totalDistance wrangled['totalDistance'] = wrangled['walkDistance'] + wrangled['swimDistance'] + wrangled['rideDistance'] wrangled = wrangled.merge(wrangled.groupby(['groupId'])['totalDistance'].max().to_frame('max_distance'), how='left', on=['groupId']) wrangled['headshotKills_over_kills'] = wrangled['max_headshotKills'] / wrangled['max_kills'] wrangled['headshotKills_over_kills'].fillna(0, inplace=True) # replace NaN value wrangled['headshotKills_over_kills'].replace(np.inf, 0, inplace=True) # replace infinite value wrangled['killPlace_over_maxPlace'] = wrangled['max_killPlace'] / wrangled['maxPlace'] wrangled['killPlace_over_maxPlace'].fillna(0, inplace=True) wrangled['killPlace_over_maxPlace'].replace(np.inf, 0, inplace=True) wrangled['totalDistance_over_heals'] = wrangled['max_distance'] / wrangled['max_heals'] wrangled['totalDistance_over_heals'].fillna(0, inplace=True) wrangled['totalDistance_over_heals'].replace(np.inf, 0, inplace=True) wrangled['totalDistance_over_boosts'] = wrangled['max_distance'] / wrangled['max_boosts'] wrangled['totalDistance_over_boosts'].fillna(0, inplace=True) wrangled['totalDistance_over_boosts'].replace(np.inf, 0, inplace=True) wrangled['totalDistance_over_kills'] = wrangled['max_distance'] / wrangled['max_kills'] wrangled['totalDistance_over_kills'].fillna(0, inplace=True) wrangled['totalDistance_over_kills'].replace(np.inf, 0, inplace=True) wrangled['totalDistance_over_damage'] = wrangled['max_distance'] / wrangled['max_damageDealt'] wrangled['totalDistance_over_damage'].fillna(0, inplace=True) wrangled['totalDistance_over_damage'].replace(np.inf, 0, inplace=True) wrangled['totalDistance_over_DBNOs'] = wrangled['max_distance'] / wrangled['max_DBNOs'] wrangled['totalDistance_over_DBNOs'].fillna(0, inplace=True) wrangled['totalDistance_over_DBNOs'].replace(np.inf, 0, inplace=True) wrangled['totalDistance_over_weapons'] = wrangled['max_distance'] / wrangled['max_weapons'] wrangled['totalDistance_over_weapons'].fillna(0, inplace=True) wrangled['totalDistance_over_weapons'].replace(np.inf, 0, inplace=True) # teamplay wrangled['teamwork'] = wrangled['assists'] + wrangled['revives'] - wrangled['teamKills'] # ### Drop Features # ID wrangled.drop(columns = ['Id'], inplace=True) wrangled.drop(columns = ['groupId'], inplace=True) wrangled.drop(columns = ['matchId'], inplace=True) # Points ... 0값이 너무 많음 wrangled.drop(columns = ['killPoints'], inplace=True) wrangled.drop(columns = ['rankPoints'], inplace=True) wrangled.drop(columns = ['winPoints'], inplace=True) # 중복된 column 삭제, index 값 초기화 wrangled = wrangled.loc[:,~wrangled.columns.duplicated()] wrangled = wrangled.reset_index() wrangled.drop(columns = ['index'], inplace=True) # ## Model import sklearn from sklearn import metrics # + # Metric used for the PUBG competition (Mean Absolute Error (MAE)) from sklearn.metrics import mean_absolute_error from sklearn.linear_model import LinearRegression # Function to print the MAE (Mean Absolute Error) score # This is the metric used by Kaggle in this competition def print_score(m : LinearRegression): res = ['mae train: ', mean_absolute_error(m.predict(X_train), y_train), 'mae val: ', mean_absolute_error(m.predict(X_valid), y_valid)] if hasattr(m, 'oob_score_'): res.append(m.oob_score_) print(res) # - import xgboost sample_size = 100000 sample = wrangled.iloc[0:sample_size] original = sample.drop(columns=['winPlacePerc']) target = sample['winPlacePerc'] # + # Function for splitting training and validation data def split_vals(a, n : int): return a[:n].copy(), a[n:].copy() val_perc = 0.1 # % to use for validation set n_valid = int(val_perc * sample_size) n_trn = len(original)-n_valid # Split data raw_train, raw_valid = split_vals(sample, n_trn) X_train, X_valid = split_vals(original, n_trn) y_train, y_valid = split_vals(target, n_trn) # - model = xgboost.XGBRegressor(random_state=42, n_estimators=400, subsample = 0.8, colsample_bytree=1,max_depth=7, learning_rate=0.08) model.fit(X_train, y_train) print_score(model) # ## Feature Importance def xgb_feat_importance(m, df): return pd.DataFrame({'cols':df.columns, 'imp':m.feature_importances_}).sort_values('imp', ascending=False) # + # What are the most predictive features according to our basic random forest model fi = xgb_feat_importance(model, original) # Plot a feature importance graph for the 20 most important features fi.plot('cols', 'imp', figsize=(14,15), legend=False, kind = 'barh', title='Important features') # -
.ipynb_checkpoints/learn_by_group_all-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.2 64-bit # language: python # name: python38264bita9fd037794cf47ee89e35e0adf3d53bd # --- # + # importing required libraries import json import os import tweepy import plotly.express as px import pandas as pd from dotenv import load_dotenv from datetime import date import calendar # + # Accessing Twitter API - Ask Team Lead for API Keys load_dotenv() TWITTER_API_KEY = os.getenv("TWITTER_API_KEY") TWITTER_API_SECRET = os.getenv("TWITTER_API_SECRET") TWITTER_ACCESS_TOKEN = os.getenv("TWITTER_ACCESS_TOKEN") TWITTER_ACCESS_TOKEN_SECRET = os.getenv("TWITTER_ACCESS_TOKEN_SECRET") auth = tweepy.OAuthHandler(TWITTER_API_KEY, TWITTER_API_SECRET) auth.set_access_token(TWITTER_ACCESS_TOKEN, TWITTER_ACCESS_TOKEN_SECRET) api = tweepy.API(auth, wait_on_rate_limit=True) # - # Using Chrisa Bon account data for example chrisalbon_followers = api.followers_ids('chrisalbon') class data_wrangling: """ Performs the data wrangling required to find the content and times that the followers of the given twitter handle (specified as user_id parameter) engage with content, and the content they engage with. Used in the flask_app to give a response of the optimal time for the given twitteruser to post to optimize engagement. """ # Definining api as a class variable # (shared among all instances of the class) api = tweepy.API(auth, wait_on_rate_limit=True) def __init__(self, user_id, follower_count=10): # instance variables (unique for each instance of the class) self.user_id = user_id self.follower_count = follower_count def min_bin(mins): l = [] for _min in mins: if _min < 30: l.append('00') else: l.append('30') return l # The first 10 ids of the user's followers def followers_ids(self): followers_ids = api.followers_ids(self.user_id) return followers_ids def get_follower_data(self, followers_ids): times = [] text = [] # Loop through the follower_count (int) defined in instance for followers in followers_ids[:self.follower_count]: # Try and excepts statement to bipass an error # that arises when we call a protected user's information try: favorited_tweets = api.favorites(id=f'{followers}') # For each tweet that the follower liked, # lets add it to the l string for tweet in range(len(favorited_tweets)): status = favorited_tweets[tweet] # Convert to string json_str = json.dumps(status._json) # Deserialise string into python object parsed = json.loads(json_str) # Gets the created_at (time) # and the text from the tweets the followers liked times.append(parsed.get('created_at')) text.append(parsed.get('text')) except tweepy.TweepError: pass # Seperates hours, mins into lists to be put into a df # (leave hours as str for put request to backend) hours, mins = [i[11:13] for i in times], [int(i[14:16]) for i in times] _min_bin = data_wrangling.min_bin(mins) # creates df with times and text df = pd.DataFrame(data={'hours': hours, 'mins': mins, 'min_bin': _min_bin, 'text': text}) df['time'] = df['hours'] + ':' + df['min_bin'].astype(str) return df def optimal_time(self, df): """ This function finds the optimal time in 30 min bins, then returns it in UTC format """ # Today's date my_date = date.today() # Today's date reformatted for UTC day = my_date.strftime("%b %d %Y") # Day of week + day day_and_date = calendar.day_name[my_date.weekday()][:3] + ' ' + day # Optimal time recommendation optimal_time = df['time'].value_counts().idxmax() # Example output: 'Tue Apr 21 2020 20:00:00 UTC+0000' return day_and_date + ' ' + optimal_time + ':00 UTC+0000' # Check to make sure the function works well dw = data_wrangling('LambdaSchool', 5) fi = dw.followers_ids() get_follower_data = dw.get_follower_data(fi) dw.optimal_time(get_follower_data) # Check function's documentation data_wrangling.__doc__ # Checking optimal time fuction dw.optimal_time(get_follower_data) # Checking military time fuction military_time(get_follower_data['time']) # Checking shape of the dataset get_follower_data.shape # Convert the dataset to csv get_follower_data.to_csv("get_follower_data.csv") # Checking dataset's format get_follower_data # Visualizing tweet hours fig = px.bar(get_follower_data, x='hours') fig.show()
python_notebooks/data_wrangling_time_and_content.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Seaborn Visualization Library # ## Introduction and Installation # Mandatory Dependencies # Numpy # Scipy # Pandas # Matplotlib # Statsmodels (recommended not mandatory) # # ### Installation and Setup # Conda or pip environments can be used to install Seaborn Library # # pip install seaborn # # conda install -c conda-forge seaborn # # you can install the whole Anaconda package [here](https://www.anaconda.com/products/individual) # Importing the library import seaborn as sns # Checking the seaborn version installed sns.__version__
Seaborn Visualization Library/1) IntroducingSeaborn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # CAUSAL GRAPHS IN ACTION from dag import * # This notebook imports our coded dag module and lets the user test its functionality by inputting different variations of a DAGs # We implemented an algorithm that, given a DAG, an exposure variable, an outcome variable, and a proposed set of adjustment variables, returns true if the set is a valid adjustment set and false otherwise. The user first needs to instantiate a `Dag` class, which takes the following inputs: # # scm: Structural causal model - dictionary, where key: cause (str), values (list of str): effected variables # exposure: string, e.g. "D" # outcome: string, e.g. "Y" # # For the `scm` *all* nodes need to be supplied as keys to the dictionary. If they have no descendants, they should be assigned an empty list. To test for valid adjustment set the user just needs to supply the proposed set: # # proposed_set: string or list of strings, e.g. "C" or ["C", "X"] or "". # # Path finding has been implemented from scratch. In case of potential backdoor paths the user can also check if/how their proposed set affects those paths `dag.final_backdoor_paths` and whether paths have been closed or not `dag.back_door_closed`. # ## Inputs for the function # # Below we provide a simple example: scm = { "D": ["Y", "X"], "Y": ["F"], "F": [], "X": [], "G": ["D", "Y", "I"], "H": ["F"], "I": [] } outcome = "Y" exposure = "D" # instantiate the class: dag = Dag(scm, outcome, exposure) # ## Visualizing the graph # # The `Dag` class has a `plot` method to draw a chart of the DAG. This uses helper functions from the `networks` library. dag.plot() # ## Check if valid adjustment set # # Now let us check a few examples of proposed adjustment sets for this particular graph. The path that we need to worry about is the backdoor path leading through node *G*. # Propose a VAS: proposed_set = ["G"] dag.is_valid_adjustment_set(proposed_set) # *G* is a VAS since it closes the backdoor path (indicated through a value of `1` for *G*). A backdoor path is closed if the sum of all node values is at least `1`. print(dag.final_backdoor_paths) print(dag.back_door_closed) # You will note that `dag` also stores `{'G': inf, 'I': inf}`. This is a path leading through a parent of the exposure variable but never ends in the outcome variable. Adjusting for nodes on this path is always valid, hence their corresponding values default to `inf`. It is not strictly necessary to store these paths but turned out to be a convenient way of accounting for these valid adjustment sets. # Now let's try the empty set: proposed_set = [] dag.is_valid_adjustment_set(proposed_set) # As expected this is not a valid adjustment set, since it leaves the backdoor path through *G* open: print(dag.final_backdoor_paths) print(dag.back_door_closed)
causal_graphs_in_action.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Init Spark # + import findspark findspark.init() from pyspark.sql import SparkSession spark = SparkSession \ .builder \ .appName("Regression") \ .getOrCreate() # - # # Get Data # https://archive.ics.uci.edu/ml/datasets/Real+estate+valuation+data+set # !wget https://raw.githubusercontent.com/subashgandyer/datasets/main/Real%20estate.csv -O real_estate.csv # # Path Setup import os path = os.getcwd() print(path) # # Read Data # Inspired from [anujsyal.com](https://anujsyal.com/introduction-to-pyspark-ml-lib-build-your-first-linear-regression-model) real_estate = spark.read.option("inferSchema", "true").csv(f"file://{path}/real_estate.csv", header=True) # # Check Data real_estate.show(3) # Attribute Information: # # The inputs are as follows # - X1 =the transaction date (for example, 2013.250=2013 March, 2013.500=2013 June, etc.) # - X2 =the house age (unit: year) # - X3 =the distance to the nearest MRT station (unit: meter) # - X4 =the number of convenience stores in the living circle on foot (integer) # - X5 =the geographic coordinate, latitude. (unit: degree) # - X6 =the geographic coordinate, longitude. (unit: degree) # # The output is as follow # - Y = house price of unit area (10000 New Taiwan Dollar/Ping, where Ping is a local unit, 1 Ping = 3.3 meter squared) real_estate.printSchema() # # Features # + from pyspark.ml.feature import VectorAssembler assembler = VectorAssembler(inputCols=[ 'X1 transaction date', 'X2 house age', 'X3 distance to the nearest MRT station', 'X4 number of convenience stores'], outputCol='features') data_set = assembler.transform(real_estate) # - data_set.select(['features','Y house price of unit area']).show(2, False) (train_data, test_data) = data_set.randomSplit([0.7,0.3]) train_data.count() test_data.count() # # Linear Regression # + from pyspark.ml.regression import LinearRegression lr = LinearRegression(maxIter=10, regParam=0.3, elasticNetParam=0.5, labelCol='Y house price of unit area') # - print(lr.explainParams()) lr_model = lr.fit(train_data) lr_model.coefficients test_stats = lr_model.evaluate(test_data) print(f"RMSE: {test_stats.rootMeanSquaredError}") print(f"MSE: {test_stats.meanSquaredError}") test lr_predictions = lr_model.transform(test_data) lr_predictions.select("prediction","Y house price of unit area").show(5) # # Stop Spark spark.stop()
V9/3_Linear_Regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from deepar.dataset.time_series import TimeSeries from deepar.model.lstm import DeepARLearner import pandas as pd sunspots_df = pd.read_csv('../datasets/seed_datasets_current/56_sunspots/TRAIN/dataset_TRAIN/tables/learningData.csv') sunspots_df['year'] = pd.to_timedelta(pd.to_datetime(sunspots_df['year'], format='%Y')).dt.total_seconds() sunspots_ds_one = TimeSeries(sunspots_df, target_idx = 4, timestamp_idx = 1, index_col=0) # + active="" # # + from deepar.dataset.time_series import TimeSeries, TimeSeriesTest import pandas as pd import numpy as np from deepar.model.learner import DeepARLearner sunspots_df = pd.read_csv('../datasets/seed_datasets_current/56_sunspots/TRAIN/dataset_TRAIN/tables/learningData.csv') # sunspots_df['sunspots'][5] = np.nan # sunspots_df['sunspots'][10] = np.nan # sunspots_df['sunspots'][15] = np.nan # sunspots_df['sunspots'][20] = np.nan # sunspots_df['sunspots'][25] = np.nan sunspots_df['year'] = pd.to_timedelta(pd.to_datetime(sunspots_df['year'], format='%Y')).dt.total_seconds() sunspots_ds_one = TimeSeries(sunspots_df, target_idx = 4, timestamp_idx = 1, index_col=0) learner = DeepARLearner(sunspots_ds_one, verbose=1) # !rm -r ./tb_test learner.fit(epochs = 1, batches = 10, early_stopping = False, checkpoint_dir = "./tb_test") test_df = pd.read_csv('../datasets/seed_datasets_current/56_sunspots/TEST/dataset_TEST/tables/learningData.csv') test_df['year'] = pd.to_timedelta(pd.to_datetime(test_df['year'], format='%Y')).dt.total_seconds() test_ds = TimeSeriesTest(test_df, sunspots_ds_one, target_idx = 4, timestamp_idx = 1, index_col=0) preds = learner.predict(test_ds, horizon = None, samples = 1, include_all_training = True).reshape(-1) scores = pd.read_csv('../datasets/seed_datasets_current/56_sunspots/SCORE/dataset_SCORE/tables/learningData.csv') # evaluate compared to D3M test set from sklearn.metrics import mean_squared_error from math import sqrt rms = sqrt(mean_squared_error(scores['sunspots'], preds)) print(f'rms: {rms}') # + from deepar.dataset.time_series import TimeSeries, TimeSeriesTest import pandas as pd import numpy as np from deepar.model.learner import DeepARLearner pop_df = pd.read_csv('../datasets/seed_datasets_current/LL1_736_population_spawn/TRAIN/dataset_TRAIN/tables/learningData.csv') pop_df = pop_df.set_index(['species', 'sector']) pop_df['group'] = pop_df.index pop_df['group'] = pop_df['group'].apply(lambda x: " ".join(x)) pop_df = pop_df.reset_index(drop=True) pop_df['day'] = pd.to_timedelta(pd.to_datetime(pop_df['day'], unit='D')).dt.total_seconds() pop_ds = TimeSeries(pop_df, target_idx = 2, timestamp_idx = 1, index_col=0, grouping_idx = 3, count_data = True) learner = DeepARLearner(pop_ds, verbose = 1) learner.fit(epochs = 1, batches = 10, early_stopping = False) test_df = pd.read_csv('../datasets/seed_datasets_current/LL1_736_population_spawn/TEST/dataset_TEST/tables/learningData.csv') test_df = test_df.set_index(['species', 'sector']) test_df['group'] = test_df.index test_df['group'] = test_df['group'].apply(lambda x: " ".join(x)) test_df = test_df.reset_index(drop=True) test_df['day'] = pd.to_timedelta(pd.to_datetime(test_df['day'], unit='D')).dt.total_seconds() test_ds = TimeSeriesTest(test_df, pop_ds, target_idx = 2, timestamp_idx = 1, index_col=0, grouping_idx = 3) preds = learner.predict(test_ds, horizon = None, samples = 1, include_all_training = True) # evaluate compared to D3M test set scores = pd.read_csv('../datasets/seed_datasets_current/LL1_736_population_spawn/SCORE/dataset_SCORE/tables/learningData.csv') from sklearn.metrics import mean_squared_error from math import sqrt print(scores['count'].shape) # rms = sqrt(mean_squared_error(scores['count'], preds)) # print(f'rms: {rms}') # + # %matplotlib inline from numpy.random import normal import pandas as pd from matplotlib import pyplot as plt import numpy as np window = 20 features = 9 def get_sample_prediction(sample, fn): sample = np.array(sample).reshape(1, window, features) output = fn([sample]) samples = [] for mu,sigma in zip(output[0].reshape(window), output[1].reshape(window)): samples.append(normal(loc=mu, scale=np.sqrt(sigma), size=1)[0]) return np.array(samples) batch = sunspots_ds_one.next_batch(1, window) ress = [] for i in range(300): ress.append(get_sample_prediction(batch[0], dp_model.predict_theta_from_input)) res_df = pd.DataFrame(ress).T tot_res = res_df plt.plot(batch[1].reshape(window), linewidth=6) tot_res['mu'] = tot_res.apply(lambda x: np.mean(x), axis=1) tot_res['upper'] = tot_res.apply(lambda x: np.mean(x) + np.std(x), axis=1) tot_res['lower'] = tot_res.apply(lambda x: np.mean(x) - np.std(x), axis=1) tot_res['two_upper'] = tot_res.apply(lambda x: np.mean(x) + 2*np.std(x), axis=1) tot_res['two_lower'] = tot_res.apply(lambda x: np.mean(x) - 2*np.std(x), axis=1) plt.plot(tot_res.mu, 'bo') plt.plot(tot_res.mu, linewidth=2) plt.fill_between(x = tot_res.index, y1=tot_res.lower, y2=tot_res.upper, alpha=0.5) plt.fill_between(x = tot_res.index, y1=tot_res.two_lower, y2=tot_res.two_upper, alpha=0.5) plt.title('Prediction uncertainty') plt.show() # + from deepar.model.loss import negative_binomial_likelihood from deepar.model.loss import gaussian_likelihood import tensorflow as tf sigma = 1.0 sigma1 = 2.0 sigma2 = 4.0 g = gaussian_likelihood([sigma] * 3) b = negative_binomial_likelihood([sigma] * 3) b1 = negative_binomial_likelihood([sigma1] * 3) b2 = negative_binomial_likelihood([sigma2] * 3) with tf.Session() as sess: y_true = tf.convert_to_tensor([1,1,1], dtype=tf.float32) y_pred0 = tf.convert_to_tensor([1,1,1], dtype=tf.float32) y_pred1 = tf.convert_to_tensor([2,2,2], dtype=tf.float32) y_pred2 = tf.convert_to_tensor([4,4,4], dtype=tf.float32) y_pred3 = tf.convert_to_tensor([8,8,8], dtype=tf.float32) print(b(y_true, y_pred0).eval()) print(b(y_true, y_pred1).eval()) print(b(y_true, y_pred2).eval()) print(b(y_true, y_pred3).eval()) print(b1(y_true, y_pred0).eval()) print(b2(y_true, y_pred0).eval()) # - x = pd.to_datetime("21/11/06 16:30", format="%d/%m/%y %H:%M") x.timetuple().tm_yday df.groupby('cat').apply(lambda x: x.count())['cat'] df.groupby('cat').sum().reset_index() for cat, c_d in df.groupby('cat'): c_d['age'] = c_d.index print(c_d) df = df.drop('age', axis=1) df['age'] = df.groupby('cat').cumcount() df['age1'] = range(df.shape[0]) df from sklearn.preprocessing import StandardScaler df[[0,1]] = StandardScaler().fit_transform(df[[0,1]]) 1 + df.groupby('cat')[0].agg('mean')
Experimentation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/kap2fox/Intro-to-Robotics/blob/main/Robot_Navigation_5.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="7He0UkcuWQhD" # # Моделируем движение в точку # Подключаем необходимы библиотеки. # + id="cv4xXXicWAe2" import numpy as np # модуль для поддержки многомерных массивов import matplotlib.pyplot as plt # модуль для визуализации данных from scipy.integrate import odeint # модуль для оптимизации функций import math as m # модуль для математических функций # + [markdown] id="JFnCLTtMWQ5A" # Задаем параметры и время. # + id="MVb6DnmtWFdd" T_m = 0.08 # электромеханическая постоянная времени k_e = 0.5 # конструктивная постоянная двигателя n = 15001 # количество шагов моделирования t = np.linspace(0,15,n) # задание пространства времени, массив n элементов значениями от 0 до 15 секунд B = 0.15 # расстояние между колесами r = 0.026 # радиус колеса # + [markdown] id="1dA-7WCQXlHZ" # Описываем модель двигателя. # + id="VRgszDKdWICi" def motor(state,t,u): speed, angle = state # обозначение состояния двигателя как скорости и угла поворота state_dt = [-speed/T_m + u/ (T_m * k_e), speed ]# задание производной состояния двигателя return state_dt # + [markdown] id="2W1ylLTmXqTi" # Указывавем начальные значения переменных. # + id="XH5c13QxWK1s" state_l = [0 , 0] # переменная состояния левого двигателя state_r = [0 , 0] # переменная состояния правого двигателя control_l = np.zeros(n) # массив управляющего напряжение левого двигателя control_r = np.zeros(n) # массив управляющего напряжение правого двигателя theta_l = np.zeros(n) # массив углов поворота левого двигателя theta_r = np.zeros(n) # массив углов поворота правого двигателя x = np.zeros(n) # массив X координат робота y = np.zeros(n) # массив Y координат робота path = np.zeros(n) # массив пройденного пути course = np.zeros(n) # массив значений курса робота distance = np.zeros(n) # массив пройденной дистанции bearing = np.zeros(n) # массив углов пеленга робота courseAngle = np.zeros(n) # массив курсовых углов X_ref = -1 # координата X точки назначения Y_ref = -1 # координата Y точки назначения distance [0] = m.sqrt((X_ref)**2+(Y_ref)**2) # задание начального значения дистанции bearing [0] = m.atan2(Y_ref,X_ref) # задание начального значения пеленга # + [markdown] id="0_MbMXB7XRUK" # В целом, эта программа повторяет предыдущий вариант с вычислением местоположения робота, но здесь мы добавляем управляющее воздействие для движение в заданную точку. Для этого есть большое количество вариантов решения от простого П-регулятора до нелинейного закона управления. Можно решить эту здачу алгоритмически, снача повернуться на цель, а потом двигаться вперед, все зависит от контекста и окружающих препятствий. # + id="kMREmxsKkhy6" colab={"base_uri": "https://localhost:8080/", "height": 283} outputId="819c590a-a8a0-43ee-923e-4501de6c9fc8" for i in range(1,n): path[i] = (theta_r[i-1] + theta_l[i-1]) * r / 2 # вычисление пройденного пути course[i] = (theta_r[i-1] - theta_l[i-1]) * r / B # вычисление угла поворота робота, его курса x[i] = x[i-1] + (path[i]-path[i-1]) * m.cos(course[i]) # вычисление укоординаты X y[i] = y[i-1] + (path[i]-path[i-1]) * m.sin(course[i]) # вычисление укоординаты Y distance [i] = m.sqrt((X_ref - x[i])**2+(Y_ref - y[i])**2) # вычисление расстояния до цели bearing [i] = m.atan2(Y_ref - y[i],X_ref - x[i]) # вычисление угла на цель, пеленга courseAngle [i] = bearing [i] - course [i] # вычисление курсового угла if courseAngle[i] > np.pi: courseAngle[i] = courseAngle[i] - 2 * np.pi if courseAngle[i] < -np.pi: courseAngle[i] = courseAngle[i] + 2 * np.pi # приведение курсового угла в диапазон [-pi, pi] #control_l [i] = 6 * np.cos (courseAngle [i]) * np.tanh(distance [i]) - 2 * (courseAngle [i]) #control_r [i] = 6 * np.cos (courseAngle [i]) * np.tanh(distance [i]) + 2 * (courseAngle [i]) control_l [i] = 4 * distance [i] - 4 * courseAngle [i] # вычисление управляющего напряжения левого двигателя control_r [i] = 4 * distance [i] + 4 * courseAngle [i] # вычисление управляющего напряжения правого двигателя if control_l[i] > 8: control_l[i] = 8 if control_l[i] < -8: control_l[i] = -8 if control_r[i] > 8: control_r[i] = 8 if control_r[i] < -8: control_r[i] = -8 # ограничение диапазона управляющего напряжения motor_l = odeint(motor, state_l,[t[i-1],t[i]],args=(control_l [i],)) # вычисление угла поворота левого двигателя state_l = motor_l[1,:] # запоминание состояния левого двигателя theta_l[i] = motor_l[1,1] # сохранение угла поворота левого двигателя motor_r = odeint(motor, state_r,[t[i-1],t[i]],args=(control_r [i],)) # вычисление угла поворота правого двигателя state_r = motor_r[1,:] # запоминание состояния правого двигателя theta_r[i] = motor_r[1,1] # сохранение угла поворота правого двигателя plt.plot(x, y)
Robot_Navigation_5.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %matplotlib widget import ipywidgets from ipywidgets import widgets from ipywidgets.widgets import VBox,HBox from IPython.display import display # import numpy as np import pandas as pd from matplotlib import pyplot as plt import nmrExpt # import nmrglue as ng import tempfile import io # + # # %matplotlib inline # - # ### Global lists and dictionaries # + nmrExpts = {} upload_file_names = ["proton1D", "carbon1D", "cosy2D", "hsqc2D", "hmbc2D", "peakTable"] upload_file_widgets = {} upload_button_status = [""] # - # ### Define All Widgets # + upload_1D_proton = ipywidgets.widgets.FileUpload(multiple=True, description="1D Proton", description_tooltip="Upload a 1D proton NMR fid\nif varian, choose procar and fid simutaneously") upload_1D_carbon = ipywidgets.widgets.FileUpload(multiple=True, description="1D Carbon", description_tooltip="Upload a 1D carbon NMR fid\nif varian, choose procar and fid simutaneously") upload_2D_cosy = ipywidgets.widgets.FileUpload(multiple=True, description="2D Cosy", description_tooltip="Upload a 2D COSY NMR fid\nif varian, choose procar and fid simutaneously") upload_2D_hsqc = ipywidgets.widgets.FileUpload(multiple=True, description="2D HSQC", description_tooltip="Upload a 2D HSQC NMR fid\nif varian, choose procar and fid simutaneously") upload_2D_hmbc = ipywidgets.widgets.FileUpload(multiple=True, description="2D HMBC", description_tooltip="Upload a 2D HMBC NMR spectrum\n if varian, choose procar and fid simutaneously") upload_peak_table = ipywidgets.widgets.FileUpload(multiple=True, description="Peak Tables", description_tooltip="Upload MNOVA generated excel peak table ") varianBrukerRadioButtons = widgets.RadioButtons( options=['varian', 'bruker'], value='varian', layout={'width': '150px'}, # If the items' names are long description='Input Source', disabled=False ) output2 = ipywidgets.widgets.Output() upload_file_widgets["proton1D"] = { "nmrExptKey" : "proton1D", "uploadButton" : upload_1D_proton } upload_file_widgets["carbon1D"] = { "nmrExptKey" : "carbon1D", "uploadButton" : upload_1D_carbon } upload_file_widgets["cosy2D"] = { "nmrExptKey" : "cosy2D", "uploadButton" : upload_2D_cosy } upload_file_widgets["hsqc2D"] = { "nmrExptKey" : "hsqc2D", "uploadButton" : upload_2D_hsqc } upload_file_widgets["hmbc2D"] = { "nmrExptKey" : "hmbc2D", "uploadButton" : upload_2D_hmbc } upload_file_widgets["peakTable"] = { "nmrExptKey" : "peakTable", "uploadButton" : upload_peak_table } upload_buttons = [ widgets.Button(description=b) for b in upload_file_names ] btn_output = widgets.Output() mpl_output = widgets.Output(height="600px",width="600px") phase_output = widgets.Output() slider_width = '350px' phse_ftext_width = '75px' rp0_slider = widgets.FloatSlider(description='rp0', value=0, min=-360.0, max=360.0, step=0.1, continuous_update=False, layout={'width': slider_width}) rp0_ftext = widgets.FloatText( value=0, step=0.1, layout={'width': phse_ftext_width}) lp0_slider = widgets.FloatSlider(description='lp0', value=0, min=-360.0, max=360.0, step=0.1, continuous_update=False, layout={'width': slider_width}) lp0_ftext = widgets.FloatText( value=0, step=0.1, layout={'width': phse_ftext_width}) rp1_slider = widgets.FloatSlider(description='rp1', value=0, min=-360.0, max=360.0, step=0.1, continuous_update=False, layout={'width': slider_width}) rp1_ftext = widgets.FloatText( value=0, step=0.1, layout={'width': phse_ftext_width}) lp1_slider = widgets.FloatSlider(description='lp1', value=0, min=-360.0, max=360.0, step=0.1, continuous_update=False, layout={'width': slider_width}) lp1_ftext = widgets.FloatText( value=0, step=0.1, layout={'width': phse_ftext_width}) # - # ### Create Buttons to Display Spectra to phase correct # + # upload_buttons = [ widgets.Button(description=b) for b in upload_file_names ] def on_button_clicked(clicked): #set all buttons background to light gray for b in upload_buttons: b.style.button_color = 'lightgray' i = upload_file_names.index(clicked.description) upload_button_status[0]=clicked.description upload_buttons[i].style.button_color = 'lightgreen' if clicked.description in ['proton1D', 'carbon1D']: nmrExpts[clicked.description].plot_1D(fig=nmrfig,ax=ax1D1H) # set phase slider values from current spectra for 1D data rp0_slider.value = nmrExpts[clicked.description].udic[0]['rp'] lp0_slider.value = nmrExpts[clicked.description].udic[0]['lp'] rp0_ftext.value = nmrExpts[clicked.description].udic[0]['rp'] lp0_ftext.value = nmrExpts[clicked.description].udic[0]['lp'] elif clicked.description in ['cosy2D', 'hsqc2D', 'hmbc2D']: nmrExpts[clicked.description].plot_2D(fig=nmrfig,ax=ax1D1H) # set phase slider values from current spectra for 2D data rp0_slider.value = nmrExpts[clicked.description].udic[1]['rp'] lp0_slider.value = nmrExpts[clicked.description].udic[1]['lp'] rp0_ftext.value = nmrExpts[clicked.description].udic[1]['rp'] lp0_ftext.value = nmrExpts[clicked.description].udic[1]['lp'] rp1_slider.value = nmrExpts[clicked.description].udic[0]['rp'] lp1_slider.value = nmrExpts[clicked.description].udic[0]['lp'] rp1_ftext.value = nmrExpts[clicked.description].udic[0]['rp'] lp1_ftext.value = nmrExpts[clicked.description].udic[0]['lp'] # with btn_output: # print("description", type(clicked),clicked.description, upload_file_names.index(clicked.description)) for b in upload_buttons: b.on_click( lambda clicked: on_button_clicked(clicked)) b.style.button_color = 'lightgray' # - # ### create buttons to upload NMR data # + def on_value_change(change, nmrExpts, nmrExptKey): # print("nmrExptKey", nmrExptKey) # upload_file_widgets[nmrExptKey]["completedTick"].value = False upload_file_widgets[nmrExptKey]["uploadButton"].style.button_color = 'lightgray' if nmrExptKey == "peakTable": try: for k in change['new']: nmrExpts[nmrExptKey] = pd.read_excel(io.BytesIO(change['new'][k]["content"]), sheet_name=None, index_col=0) # with output2: # print(change['new'][k].keys()) # if nmrExptKey == "peakTable": # upload_file_widgets[nmrExptKey]["completedTick"].value = True upload_file_widgets[nmrExptKey]["uploadButton"].style.button_color = 'lightgreen' on_button_clicked(upload_buttons[upload_file_names.index(nmrExptKey)]) except: # upload_file_widgets[nmrExptKey]["completedTick"].value = False upload_file_widgets[nmrExptKey]["uploadButton"].style.button_color = 'lightgray' else: #if 'procpar' in change['new'] and 'fid' in change['new']: try: # print("read in nmr file") procpar_file = change['new']['procpar'] fid_file = change['new']['fid'] f, fprocpar = tempfile.mkstemp( text=True) with open(fprocpar, "wb" ) as f: f.write(procpar_file["content"]) f, ffid = tempfile.mkstemp( text=True) with open(ffid, "wb" ) as f: f.write(fid_file["content"]) nmrExpts[nmrExptKey] = nmrExpt.NMRexpt.from_varian(fid_file=ffid, procpar_file=fprocpar) if nmrExpts[nmrExptKey].udic['ndim'] == 1: nmrExpts[nmrExptKey].produce_1D(lb = [0.001]) else: nmrExpts[nmrExptKey].produce_2D() # upload_file_widgets[nmrExptKey]["completedTick"].value = True upload_file_widgets[nmrExptKey]["uploadButton"].style.button_color = 'lightgreen' on_button_clicked(upload_buttons[upload_file_names.index(nmrExptKey)]) # with output2: # print("read in nmr file", change['owner'].description, nmrExpts[nmrExptKey].data.shape, change['new'].keys()) except: # upload_file_widgets[nmrExptKey]["completedTick"].value = False upload_file_widgets[nmrExptKey]["uploadButton"].style.button_color = 'lightgray' # with output2: # print("Exception Occured: Wrong File Type") upload_1D_proton.observe( lambda change: on_value_change(change, nmrExpts, 'proton1D'), names='value') upload_1D_carbon.observe( lambda change: on_value_change(change, nmrExpts, 'carbon1D'), names='value') upload_2D_cosy.observe( lambda change: on_value_change(change, nmrExpts, 'cosy2D'), names='value') upload_2D_hsqc.observe( lambda change: on_value_change(change, nmrExpts, 'hsqcq2D'), names='value') upload_2D_cosy.observe( lambda change: on_value_change(change, nmrExpts, 'hmbc2D'), names='value') upload_2D_cosy.observe( lambda change: on_value_change(change, nmrExpts, 'hmbc2D'), names='value') upload_peak_table.observe( lambda change: on_value_change(change, nmrExpts, 'peakTable'), names='value') # - # ### Create NMR plot window # + with mpl_output: nmrfig = plt.figure(figsize=(8,6)) ax1D1H = plt.axes([0.1,0.1,0.8,0.8]) # lineplot, = ax1D1H.plot([1,2,3],[1,2,3]) # - # ### Phase sliders and values # + # Phase sliders and direct input # phase_vbox = widgets.VBox([ widgets.HBox([rp0_slider,rp0_ftext]), # widgets.HBox([lp0_slider,lp0_ftext]), # widgets.HBox([rp1_slider,rp1_ftext]), # widgets.HBox([lp1_slider,lp1_ftext])]) phase_hbox = HBox([ VBox([ HBox([rp0_slider,rp0_ftext]), HBox([lp0_slider,lp0_ftext]) ]), VBox([ HBox([rp1_slider,rp1_ftext]), HBox([lp1_slider,lp1_ftext]) ]) ]) ### Link phase sliders to float input boxes link_rp0 = widgets.jslink((rp0_slider, 'value'), (rp0_ftext, 'value')) link_lp0 = widgets.jslink((lp0_slider, 'value'), (lp0_ftext, 'value')) link_rp1 = widgets.jslink((rp1_slider, 'value'), (rp1_ftext, 'value')) link_lp1 = widgets.jslink((lp1_slider, 'value'), (lp1_ftext, 'value')) ### Create a callback to update screen def on_phase_changed(change, widgetname): # collect values rp0 = rp0_slider.value lp0 = lp0_slider.value rp1 = rp1_slider.value lp1 = lp1_slider.value if upload_button_status[0] in ['proton1D', 'carbon1D']: nmrExpts[upload_button_status[0]].produce_1D( rp = [rp0,], lp=[lp0]) elif upload_button_status[0] in ['hsqc2D', 'hmbc2D']: nmrExpts[upload_button_status[0]].produce_2D( rp = [rp0, rp1], lp=[lp0, lp1]) on_button_clicked(upload_buttons[upload_file_names.index(upload_button_status[0])]) # with phase_output: # print("phase changed", widgetname, upload_button_status[0], type(nmrExpts[upload_button_status[0]]) ) ### Attach callback rp0_slider.observe( lambda change: on_phase_changed(change, 'rp0'), names='value') lp0_slider.observe( lambda change: on_phase_changed(change, 'lp0'), names='value') rp1_slider.observe( lambda change: on_phase_changed(change, 'rp1'), names='value') lp1_slider.observe( lambda change: on_phase_changed(change, 'lp1'), names='value') # HBox([rp0_slider, rp0_ftext, phase_output] ) # - # ### Create ipywidgets display # + VBox([ varianBrukerRadioButtons, HBox([ upload_1D_proton, upload_1D_carbon, upload_2D_cosy, upload_2D_hsqc, upload_2D_hmbc, upload_peak_table ]), HBox(upload_buttons), phase_hbox, mpl_output, # btn_output, # phase_output ])
NMRdiscover.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import json import pandas as pd import numpy as np import ast import math from nltk.parse.generate import generate, demo_grammar from nltk import CFG def remove_second_pair(list_orders): removed_second_pairs=[] for item in list_orders: removed_second_pairs.append(item[0]) return(removed_second_pairs) def add_words_lists(words,list_orders): table = pd.concat([words,list_orders],axis=1) new_list_orders=[] for row in range(0,len(table)): if (table.iloc[row,0]!=np.NaN) and (table.iloc[row,1]!=[]): if table.iloc[row,0] in table.iloc[row,1]: table.iloc[row,1].remove(table.iloc[row,0]) table.iloc[row,1].insert(0,table.iloc[row,0]) return (table.iloc[:,1]) def convert_list_2_cfg(ordinary_list): acc_str="" for w in ordinary_list: acc_str+=" | "+ "\""+w+"\"" return(acc_str) def generate_cfg(wpos,w1,w2,w3,s1,s2,s3,trigger_rule,group): cfg_rules=[] for idx in range(0,len(wpos)): wpos[idx][1]=wpos[idx][1].replace(" ", "_") wpos[idx][1]=wpos[idx][1].capitalize()+str(idx+1) str_start="S -> " for idx in range(0,len(wpos)): str_start +=" "+wpos[idx][1] str_start +=" Trigger_Rule" cfg_rules.append(str_start) for idx in range(0,len(wpos)): str_rule="" str_rule +=wpos[idx][1]+" -> "+"\""+wpos[idx][0]+"\"" cfg_rules.append(str_rule) if not isinstance(w1, str): if np.isnan(w1): w1="" if not isinstance(w2, str): if np.isnan(w2): w2="" if not isinstance(w3, str): if np.isnan(w3): w3="" cfg_rules_augmented=[] for rule in cfg_rules: if rule.find(w1)>=0: cfg_rules_augmented.append(rule+convert_list_2_cfg(s1[1:])) elif rule.find(w2)>=0: cfg_rules_augmented.append(rule+convert_list_2_cfg(s2[1:])) elif rule.find(w3)>=0: cfg_rules_augmented.append(rule+convert_list_2_cfg(s3[1:])) else: cfg_rules_augmented.append(rule) cfg_grammar="" for rule in cfg_rules_augmented: cfg_grammar+=rule+"\n" cfg_grammar+="Trigger_Rule -> " for tr in trigger_rule: cfg_grammar+="\"|"+tr+"|Group"+str(group)+"|PRE-VALIDATION\"|" cfg_grammar=cfg_grammar[0:-1] cfg_grammar+="\n" return(cfg_grammar) # + words_table=pd.read_excel("./excels/4_Phase3_TranslationTable.xlsx") words_table=words_table.reset_index() words_table=words_table.fillna("") exp_fr=words_table["French Expression"] exp_en=words_table["English Expression(s)"].apply(lambda s: ast.literal_eval(s)) indexes=words_table["Occured Index(es)"].apply(lambda s: ast.literal_eval(s)) groups=words_table["Occured Groups"].apply(lambda s: ast.literal_eval(s)) rules=words_table["Rule"].apply(lambda s: ast.literal_eval(s)) word_pos=words_table["Tokenization with POS"].apply(lambda s: ast.literal_eval(s)) word1=words_table["word1"] syn1_list=words_table["syn_1"].apply(lambda s: ast.literal_eval(s)) word2=words_table["word2"] syn2_list=words_table["syn_2"].apply(lambda s: ast.literal_eval(s)) word3=words_table["word3"] syn3_list=words_table["syn_3"].apply(lambda s: ast.literal_eval(s)) json_notebook = {} json_notebook["metadata"] = {} json_notebook["metadata"]["kernelspec"]={} json_notebook["metadata"]["kernelspec"]["display_name"]= "Python 3" json_notebook["metadata"]["kernelspec"]["language"]= "python" json_notebook["metadata"]["kernelspec"]["name"]= "python3" json_notebook["metadata"]["language_info"]={} json_notebook["metadata"]["language_info"]["codemirror_mode"]={} json_notebook["metadata"]["language_info"]["codemirror_mode"]["name"]= "ipython" json_notebook["metadata"]["language_info"]["codemirror_mode"]["version"]= 3 json_notebook["metadata"]["file_extension"]= ".py" json_notebook["metadata"]["mimetype"]= "text/x-python" json_notebook["metadata"]["name"]= "python" json_notebook["metadata"]["nbconvert_exporter"]= "python" json_notebook["metadata"]["pygments_lexer"]= "ipython3" json_notebook["metadata"]["version"]= "3.7.2" json_notebook["nbformat"]= 4 json_notebook["nbformat_minor"]=2 seperation_margin=[[0,100],[101,200],[201,300],[301,400],[401,500],[501,600],[601,737]] for sm in seperation_margin: json_notebook["cells"]=[] index_start=sm[0] index_end=sm[1] strlist="_"+str(index_start)+"_"+str(index_end) threshold=15 for i in range(index_start,index_end+1): print(i) if (i!=661 and i!=245): expfr=exp_fr[i] expen=exp_en[i] wpos=word_pos[i] w1=word1[i] w2=word2[i] w3=word3[i] s1=syn1_list[i][0:threshold] s2=syn2_list[i][0:threshold] s3=syn3_list[i][0:threshold] index_r=indexes[i] group=groups[i] rule=rules[i] cfg_grammar=generate_cfg(wpos,w1,w2,w3,s1,s2,s3,rule,group) print(cfg_grammar) json_cell={} json_cell["cell_type"] = "code" json_cell["metadata"] = {} json_cell["outputs"] = [] json_cell["execution_count"] = "null" json_source1=["from nltk.parse.generate import generate, demo_grammar\n", "from nltk import CFG\n", "\n", # "# Item Number : "+str(i)+"\n", # "# French Term : "+str(expfr)+"\n", # "# English Term(s) : "+str(expen)+"\n", # "# Index(es) : "+str(index_r)+"\n", # "# Grouping(s) : "+str(group)+"\n\n", # "\n", "cfg_grammar= \"\"\"\n"] json_source2=[cfg_grammar] json_source3=["\"\"\"\n", "\n", "for sentence in generate(CFG.fromstring(cfg_grammar), n=1000):\n", " print(' '.join(sentence))"] json_cell["source"]=json_source1+json_source2+json_source3 json_notebook["cells"].append(json_cell) with open("./notebooks/Validation_Notebook"+strlist+".ipynb", 'w') as outfile: json.dump(json_notebook, outfile) print("Finished...") # -
5_CFG_Maker.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import umap import umap.plot df = pd.read_hdf("../data/processed/summaries_Terran.hdf", "summaries") mapper = umap.UMAP().fit(df) umap.plot.points(mapper)
notebooks/Explore summaries.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/Philippe-AD/essai2019/blob/master/natural_language_processing_tutorial.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="8CbkQnvBVXPv" colab_type="text" # #Natural Language Processing (NLP) with Python - Tutorial # # https://towardsai.net/nlp-tutorial-with-python # + id="dzRE-7BVREOJ" colab_type="code" colab={} #To get the text file used : # !wget https://raw.githubusercontent.com/towardsai/tutorials/master/natural_language_processing/Natural_Language_Processing_Text.txt #To get the image file used : # !wget https://raw.githubusercontent.com/towardsai/tutorials/master/natural_language_processing/circle.png # + id="__TNs3DOBiC0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 222} outputId="461208b2-9e8f-48c9-c7d4-f0435962a649" #Open the text file : text_file = open("Natural_Language_Processing_Text.txt") #Read the data : text = text_file.read() #Datatype of the data read : print (type(text)) print("\n") #Print the text : print(text) print("\n") #Length of the text : print (len(text)) # + id="kk-wDQ4zdAvF" colab_type="code" colab={} #Import required libraries : import nltk from nltk import sent_tokenize from nltk import word_tokenize import nltk nltk.download("popular") # + id="nR2CzbHWdKrH" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 185} outputId="60a09545-e9f4-4966-d24f-72dca1dbc1e0" #Tokenize the text by sentences : sentences = sent_tokenize(text) #How many sentences are there? : print (len(sentences)) #Print the sentences : #print(sentences) sentences # + id="pUPacHqcdqc1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 104} outputId="46f8da82-3e79-4442-c5b7-c7a7e5182572" #Tokenize the text with words : words = word_tokenize(text) #How many words are there? : print (len(words)) print("\n") #Print words : print (words) # + id="JsBVhkCFdsuU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 185} outputId="09d4de0c-baeb-4e7e-cd8c-a19694fec9dc" #Import required libraries : from nltk.probability import FreqDist #Find the frequency : fdist = FreqDist(words) #Print 10 most common words : fdist.most_common(10) # + id="LtNIRQvTduKt" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 299} outputId="3b770b40-2ae8-4ab1-fbea-733020b0b8e6" #Plot the graph for fdist : import matplotlib.pyplot as plt fdist.plot(10) # + id="tUPyjVHrdveU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 104} outputId="11060870-662a-432f-e5b9-00564d41f992" #Empty list to store words: words_no_punc = [] #Removing punctuation marks : for w in words: if w.isalpha(): words_no_punc.append(w.lower()) #Print the words without punctution marks : print (words_no_punc) print ("\n") #Length : print (len(words_no_punc)) # + id="b3SqTkNedxYM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 185} outputId="080909e0-8f2e-48b9-e11d-968990d1c720" #Frequency distribution : fdist = FreqDist(words_no_punc) fdist.most_common(10) # + id="kotl9nuTdyge" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 299} outputId="cd99fde8-e460-4d59-fd40-ddd22d49358b" #Plot the most common words on grpah: fdist.plot(10) # + id="443sa0zKdzpd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="08418e68-7eec-4850-a68c-823958795af4" from nltk.corpus import stopwords #List of stopwords stopwords = stopwords.words("english") print(stopwords) # + id="JaRf2eeYd1Zt" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 104} outputId="64f087e1-59b8-463c-e6d6-8312c04787fc" #Empty list to store clean words : clean_words = [] for w in words_no_punc: if w not in stopwords: clean_words.append(w) print(clean_words) print("\n") print(len(clean_words)) # + id="GX7Z3fmEd2hF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 185} outputId="c3705384-7e50-4190-8d02-b04fe3fa8dd1" #Frequency distribution : fdist = FreqDist(clean_words) fdist.most_common(10) # + id="_9bes0f_d3dv" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 307} outputId="efe05e95-44b7-42b0-fa94-ad439a630481" #Plot the most common words on grpah: fdist.plot(10) # + id="WGptyQEDd44c" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 366} outputId="4833d1f7-2d22-489b-aaef-d6b9df67a6cd" #Library to form wordcloud : from wordcloud import WordCloud #Library to plot the wordcloud : import matplotlib.pyplot as plt #Generating the wordcloud : wordcloud = WordCloud().generate(text) #Plot the wordcloud : plt.figure(figsize = (12, 12)) plt.imshow(wordcloud) #To remove the axis value : plt.axis("off") plt.show() # + id="z56ilmqod6TU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 416} outputId="533e99ea-3fd0-4ff2-b8ef-63567b9ae5db" #Import required libraries : import numpy as np from PIL import Image from wordcloud import WordCloud #Here we are going to use a circle image as mask : char_mask = np.array(Image.open("circle.png")) #Generating wordcloud : wordcloud = WordCloud(background_color="black",mask=char_mask).generate(text) #Plot the wordcloud : plt.figure(figsize = (8,8)) plt.imshow(wordcloud) #To remove the axis value : plt.axis("off") plt.show() # + id="0EXIjA7vd8EE" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 84} outputId="dc86952d-5de0-44ec-86d1-922fdca960bf" #Stemming Example : #Import stemming library : from nltk.stem import PorterStemmer porter = PorterStemmer() #Word-list for stemming : word_list = ["Study","Studying","Studies","Studied"] for w in word_list: print(porter.stem(w)) # + id="rdMUAzybd9eW" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 84} outputId="8ee14bf8-afb5-4f96-a305-e8aa4b14e28c" #Stemming Example : #Import stemming library : from nltk.stem import PorterStemmer porter = PorterStemmer() #Word-list for stemming : word_list = ["studies","leaves","decreases","plays"] for w in word_list: print(porter.stem(w)) # + id="YUDA2PJ5d_T8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 84} outputId="27cd2225-b6c0-4e50-f17d-5a64e012c9c3" #Stemming Example : #Import stemming library : from nltk.stem import SnowballStemmer snowball = SnowballStemmer("english") #Word-list for stemming : word_list = ["Study","Studying","Studies","Studied"] for w in word_list: print(snowball.stem(w)) # + id="CSPFh_QGeAxQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 286} outputId="c600933d-5539-4508-df4f-2c416c7cd23a" #Stemming Example : #Import stemming library : from nltk.stem import SnowballStemmer #Print languages supported : SnowballStemmer.languages # + id="mwnV6hy3eCKs" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 84} outputId="fd901be2-cca6-4351-892d-38ba789ceae5" from nltk import WordNetLemmatizer lemma = WordNetLemmatizer() word_list = ["Study","Studying","Studies","Studied"] for w in word_list: print(lemma.lemmatize(w ,pos="v")) # + id="NteiC9AleDSb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 101} outputId="9f7de2f9-9c32-475a-a865-efdd32e13012" from nltk import WordNetLemmatizer lemma = WordNetLemmatizer() word_list = ["am","is","are","was","were"] for w in word_list: print(lemma.lemmatize(w ,pos="v")) # + id="cKpErUbHeEhX" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="17a894ca-a4f4-446d-f432-0b2117f94dde" from nltk.stem import PorterStemmer stemmer = PorterStemmer() print(stemmer.stem('studies')) # + id="T-qdgl4JeFpO" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="b41952ae-e430-4d1c-e8ba-e18a0c732f21" from nltk.stem import WordNetLemmatizer lemmatizer = WordNetLemmatizer() print(lemmatizer.lemmatize('studies')) # + id="XDBbV6k_eHRl" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="aab0c2b0-33b9-4a9e-fc59-d71b73b071de" from nltk.stem import WordNetLemmatizer lemmatizer = WordNetLemmatizer() print(lemmatizer.lemmatize('studies')) # + id="AVZ8q1a2eIm8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 84} outputId="ee53d263-0512-44e7-e453-6cb2ab7316af" from nltk.stem import WordNetLemmatizer lemmatizer = WordNetLemmatizer() print(lemmatizer.lemmatize('studying', pos="v")) print(lemmatizer.lemmatize('studying', pos="n")) print(lemmatizer.lemmatize('studying', pos="a")) print(lemmatizer.lemmatize('studying', pos="r")) # + id="TLBjhAnReJwJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 84} outputId="5e18ada5-a836-4f42-f5e9-82ca8cf0d048" from nltk import WordNetLemmatizer lemma = WordNetLemmatizer() word_list = ["studies","leaves","decreases","plays"] for w in word_list: print(lemma.lemmatize(w)) # + id="4AaooJeeeLQJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="c2260df4-011a-4337-8a96-0c8b455ca053" #PoS tagging : tag = nltk.pos_tag(["Studying","Study"]) print (tag) # + id="i3DEiFVPeMu0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 185} outputId="1f7fc8f1-ac8b-4235-8648-7c09ca214321" #PoS tagging example : sentence = "A very beautiful young lady is walking on the beach" #Tokenizing words : tokenized_words = word_tokenize(sentence) for words in tokenized_words: tagged_words = nltk.pos_tag(tokenized_words) tagged_words # + id="Ed4f86LjeQU2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 151} outputId="3683b5e3-1037-414d-ddde-30b0d95253d6" #Extracting Noun Phrase from text : # ? - optional character # * - 0 or more repetations grammar = "NP : {<DT>?<JJ>*<NN>} " import matplotlib.pyplot as plt #Creating a parser : parser = nltk.RegexpParser(grammar) #Parsing text : output = parser.parse(tagged_words) print (output) #To visualize : #output.draw() # + id="_TUHFtD0eRru" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 101} outputId="2c74d556-d156-4bb9-f15c-5fadc64775d3" #Chinking example : # * - 0 or more repetations # # + - 1 or more repetations #Here we are taking the whole string and then #excluding adjectives from that chunk. grammar = r""" NP: {<.*>+} }<JJ>+{""" #Creating parser : parser = nltk.RegexpParser(grammar) #parsing string : output = parser.parse(tagged_words) print(output) #To visualize : #output.draw() # + id="BYrmBSHieSRe" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 252} outputId="a6f7f32d-eae2-4225-ce2c-447d95b7373f" #Sentence for NER : sentence = "Mr. Smith made a deal on a beach of Switzerland near WHO." #Tokenizing words : tokenized_words = word_tokenize(sentence) #PoS tagging : for w in tokenized_words: tagged_words = nltk.pos_tag(tokenized_words) #print (tagged_words) #Named Entity Recognition : N_E_R = nltk.ne_chunk(tagged_words,binary=False) print(N_E_R) #To visualize : #N_E_R.draw() # + id="F5pzo1JKeT48" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 235} outputId="fda5f32d-9518-4be4-e325-5ec6b6e69845" #Sentence for NER : sentence = "Mr. Smith made a deal on a beach of Switzerland near WHO." #Tokenizing words : tokenized_words = word_tokenize(sentence) #PoS tagging : for w in tokenized_words: tagged_words = nltk.pos_tag(tokenized_words) #print (tagged_words) #Named Entity Recognition : N_E_R = nltk.ne_chunk(tagged_words,binary=True) print(N_E_R) #To visualize : #N_E_R.draw() # + id="g1S1GB4SeVed" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 84} outputId="ee412002-8052-41bf-92e5-3637f3aa6671" #Import wordnet : from nltk.corpus import wordnet for words in wordnet.synsets("Fun"): print(words) # + id="-Hl5yXEqeXOc" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 504} outputId="7ab14166-eef9-4733-9f32-b25a0c87baff" #Word meaning with definitions : for words in wordnet.synsets("Fun"): print(words.name()) print(words.definition()) print(words.examples()) for lemma in words.lemmas(): print(lemma) print("\n") # + id="_iuOvWeteYoN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 302} outputId="9c48856e-d250-4830-c2cc-7855b6671de6" #How many differnt meanings : for words in wordnet.synsets("Fun"): for lemma in words.lemmas(): print(lemma) print("\n") # + id="JQUAaYDbeaQM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 67} outputId="1f58da7c-0bba-4cc7-b185-9fee4db42df2" word = wordnet.synsets("Play")[0] #Checking name : print(word.name()) #Checking definition : print(word.definition()) #Checking examples: print(word.examples()) # + id="kkmDDmInebb6" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="59e21eb5-d58a-4f93-86ed-91a1b719c309" word = wordnet.synsets("Play")[0] #Find more abstract term : print(word.hypernyms()) # + id="rpFeTM-Bec8V" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 156} outputId="78063b72-a65f-4815-99ea-5243a3eb3135" word = wordnet.synsets("Play")[0] #Find more specific term : word.hyponyms() # + id="-co9eRcRefoK" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="aa55f4af-8194-4c9a-d9f5-ebe674529b62" word = wordnet.synsets("Play")[0] #Get only name : print(word.lemmas()[0].name()) # + id="ZbzP3baNegP3" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 173} outputId="8b5ea6c7-90c8-483c-9257-4f1a0f4cd47d" #Finding synonyms : #Empty list to store synonyms : synonyms = [] for words in wordnet.synsets('Fun'): for lemma in words.lemmas(): synonyms.append(lemma.name()) synonyms # + id="Qf7KzP2cehvs" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="5ce1f3e0-8e83-4c6f-d934-3b8cf0100214" #Finding antonyms : #Empty list to store antonyms : antonyms = [] for words in wordnet.synsets('Natural'): for lemma in words.lemmas(): if lemma.antonyms(): antonyms.append(lemma.antonyms()[0].name()) #Print antonyms : antonyms # + id="ivQlf_e7ejNX" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 107} outputId="35829ebf-da8f-49c6-8f9a-85ff30e870fe" #Finding synonyms and antonyms : #Empty lists to store synonyms/antonynms : synonyms = [] antonyms = [] for words in wordnet.synsets('New'): for lemma in words.lemmas(): synonyms.append(lemma.name()) if lemma.antonyms(): antonyms.append(lemma.antonyms()[0].name()) #Print lists : print(synonyms) print("\n") print(antonyms) # + id="AdXqU1BCek2W" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="8f663766-5f80-4159-cb81-ae14c47868b3" #Similarity in words : word1 = wordnet.synsets("ship","n")[0] word2 = wordnet.synsets("boat","n")[0] #Check similarity : print(word1.wup_similarity(word2)) # + id="Kd_lUIn1emHm" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="696ee006-0aa6-449e-c141-8289fcd3c11f" #Similarity in words : word1 = wordnet.synsets("ship","n")[0] word2 = wordnet.synsets("bike","n")[0] #Check similarity : print(word1.wup_similarity(word2)) # + id="mJFlVmGhenvd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 193} outputId="972eaeb6-4e48-405e-914f-666da3df6cad" #Import required libraries : from sklearn.feature_extraction.text import CountVectorizer #Text for analysis : sentences = ["Jim and Pam travelled by the bus:", "The train was late", "The flight was full.Travelling by flight is expensive"] #Create an object : cv = CountVectorizer() #Generating output for Bag of Words : B_O_W = cv.fit_transform(sentences).toarray() #Total words with their index in model : print(cv.vocabulary_) print("\n") #Features : print(cv.get_feature_names()) print("\n") #Show the output : print(B_O_W) # + id="IanI1jqLetx8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 156} outputId="34e61f4f-fec5-42ef-fe52-ee4dc33afc74" #Import required libraries : from sklearn.feature_extraction.text import TfidfVectorizer #Sentences for analysis : sentences = ['This is the first document','This document is the second document'] #Create an object : vectorizer = TfidfVectorizer(norm = None) #Generating output for TF_IDF : X = vectorizer.fit_transform(sentences).toarray() #Total words with their index in model : print(vectorizer.vocabulary_) print("\n") #Features : print(vectorizer.get_feature_names()) print("\n") #Show the output : print(X)
natural_language_processing_tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.6.0 # language: julia # name: julia-1.6 # --- include("StackyFan.jl") # + function getConeIndices(coneRays, rayMatrix) rayDict = Dict{String, Int64}() for i in 1:size(rayMatrix, 1) E=encode(rayMatrix[i,:]) rayDict[E] = i end cone = [] for i in 1:size(coneRays, 1) E = encode(coneRays[i,:]) push!(cone, rayDict[E]) end return cone end """ BerghA(F::StackyFan,D::Array{Int64,1}) Given a stacky fan F and a vector of booleans D representing the distinguished structure, returns a smooth stacky fan where the distinguished rays are independent. """ function BerghA(F::StackyFan,D::Array{Int64,1},verbose::Bool=true) if verbose==true println("==algorithm is running in verbose mode==") println(" ") println("=======") end X=deepcopy(F) rayMatrix=convert(Array{Int64,2},Array(Polymake.common.primitive(X.fan.RAYS))) coneList=getCones(X.fan) dim=size(rayMatrix,2) numRays=size(rayMatrix,1) stackTracker=ones(Int64,numRays) #check if the vector D has length equal to the number of rays in F if numRays != size(D,1) error("length of vector representing distinguished structure does not agree with number of rays in stacky fan.") end #A0: initialization while(true) @time begin rayMatrix=convert(Array{Int64,2},Array(Polymake.common.primitive(X.fan.RAYS))) numRays=size(rayMatrix,1) coneList=getCones(X.fan) coneMultiplicities=Int64[] for cone in coneList C=coneConvert(cone,rayMatrix) push!(coneMultiplicities,coneMultiplicity(C)) end #A1: check if finished # Find S the set of cones that contain a distinguised ray and an interior a lattice point #Note: cones in S are 1-indexed. S=filter(cone->distinguishedAndIntPoint(cone,rayMatrix,D),coneList) # If S is empty, the program terminates. if S==[] break end #A2 - find extremal cones Smax=extremalCones(S,rayMatrix,D) if verbose==true Smaxcount=size(Smax,1) println("Number of extremal cones: $Smaxcount") testCone=Smax[1] c1=convertToIncidence(testCone,numRays) nonDist=size(testCone,1)-dot(c1,D) mult=coneMultiplicity(coneConvert(testCone,rayMatrix)) println("Maximal non-distinguished rays and multiplicity: $nonDist, $mult") end intList = [] for cone in Smax #A2 - find interior points in Smax intPoints=[] C=coneConvert(cone,rayMatrix) coneIntPoints=interiorPoints(C) if coneIntPoints==nothing # does the program ever reach this line if it breaks at S empty? return C end for point in coneIntPoints push!(intPoints,point) end coneRays = rowMinors(rayMatrix,cone) push!(intList, (intPoints, coneRays)) end for (intPoints, coneRays) in intList rayMatrix=convert(Array{Int64,2},Array(Polymake.common.primitive(X.fan.RAYS))) # Get cone indices from the rays cone = getConeIndices(coneRays, rayMatrix) #A2 - find stacky points (in terms of coefficients) derived from interior points P=Array{Int64,1}[] for point in intPoints stackyPoint=coneRayDecomposition(cone,rayMatrix,point,stackyWeights(X)) push!(P,stackyPoint) end #A2 - find smallest element of P with respect to lex ordering. psi=minimalByLex(P) #A3 - perform root construction X=rootConstructionDistinguishedIndices(X,D,psi) #A3 - modify psi with respect to root construction for i in 1:length(psi) if D[i]==1 && psi[i]>0 psi[i]=1 end end #A5 - repeat star subdivision while(count(x->x>0,psi)>1) #A4 - perform stacky star subdivision # Get the indices of the non-zero coefficients in psi supportCone=findall(x->x!=0,psi) #if verbose==true #sW=stackyWeights(X) #println("Modified stacky weights: $sW") #end exceptional=findStackyBarycenter(supportCone,X) #if verbose==true #println("Blowing up at $exceptional") #end # Convert arrays to dictionaries as constructing fan objects through subdivision # may reorder rays code_rays = mapslices(encode, Polymake.common.primitive(X.fan.RAYS), dims=2) # Track the indices of distinguished rays D_pairs = map((x,y) -> (x,y), code_rays, D) D_Dict = Dict(D_pairs) # Track psi as a linear combination of the generators psiPairs = map((x,y) -> (x,y), code_rays,psi) psiDict = Dict(psiPairs) trackerPairs = map((x,y) -> (x,y), code_rays, stackTracker) trackerDict= Dict(trackerPairs) X=stackyBlowup(X,[x-1 for x in supportCone],exceptional) G=gcd(exceptional) primExcep=Polymake.common.primitive(exceptional) # Update the dictionaries storing fan information D_Dict[encode(primExcep)]=1 psiDict[encode(primExcep)]=1 trackerDict[encode(primExcep)]=G newRays=slicematrix(convert(Array{Int64,2},Array(Polymake.common.primitive(X.fan.RAYS)))) newD=Int64[] newpsi=Int64[] newTracker=Int64[] for ray in newRays E=encode(ray) excepCode=encode(primExcep) push!(newD,D_Dict[E]) #A4 - modify psi if E==excepCode push!(newpsi,1) elseif psiDict[E]>1 push!(newpsi,psiDict[E]-1) else push!(newpsi,0) end push!(newTracker,trackerDict[E]) end psi=newpsi D=newD stackTracker=newTracker #A4 - modify psi end end end println("^Mainloop time") if verbose==true println("=======") end end return X, stackTracker end # - X=Polymake.fulton.NormalToricVariety(INPUT_RAYS=[1 0; 1 5],INPUT_CONES=[[0,1]]) F=addStackStructure(X,[1,1]) @time C=BerghA(F,[0,1])
BerghAlgorithms/src/OldFiles/BerghAReducedFilter.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Import library # + import pandas as pd import numpy as np import os, random, warnings, gc, psutil, datetime from tqdm import tqdm_notebook, tqdm from multiprocessing import Pool from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import GroupKFold, StratifiedKFold, KFold from sklearn.metrics import mean_squared_error from math import sqrt import lightgbm as lgbm from glob import glob from IPython.display import display import seaborn as sns import matplotlib.pyplot as plt from sklearn.cluster import MiniBatchKMeans, KMeans # Set options pd.set_option('max_columns',500) pd.set_option('max_rows',500) pd.options.display.max_colwidth = 300 warnings.filterwarnings('ignore') # %matplotlib inline sns.set_palette('bright') # - path = '../raw_dataset/' # ## Load Dataset df_bus = pd.read_csv(path + 'bus_bts.csv') df_bus['geton_date'] = pd.to_datetime(df_bus['geton_date']) # + # 주중에 정기적으로 타는 사람들 df_bus['weekday'] = df_bus['geton_date'].dt.weekday df_weekday = df_bus[df_bus['weekday']<5] df_commuter = df_weekday.groupby(['user_card_id','geton_station_code']).size().reset_index() df_commuter.columns = ['user_card_id','geton_station_code','num_usage'] df_commuter = df_commuter[df_commuter['num_usage']>=10].reset_index(drop=True) df_commuter = df_commuter.groupby('geton_station_code')['user_card_id'].count() df_commuter = df_commuter.reset_index() df_commuter.columns = ['station_code','regular_commuter_count'] # - # 해당 정류장에 12시 이후에 몇명이 내렸는지 df_afternoon = df_bus[df_bus['getoff_time']>='12:00:00'][['bus_route_id','getoff_date','getoff_station_code','getoff_time','user_category','user_count']] df_afternoon_getoff_amount = df_afternoon.groupby(['bus_route_id','getoff_date','getoff_station_code'])['user_count'].sum().reset_index() df_afternoon_getoff_amount = df_afternoon_getoff_amount.rename(columns = {'user_count' : 'afternoon_takeoff'}) # + # 해당 정류장에 같은 노선의 버스가 직전 몇분 전에 왔었는지 first_passenger_tagtime = df_bus.groupby(['geton_date', 'bus_route_id', 'vhc_id','geton_station_code'])['geton_time'].min().reset_index() first_passenger_tagtime = first_passenger_tagtime.sort_values(by=['geton_date','bus_route_id','geton_station_code','geton_time']).reset_index(drop=True) first_passenger_tagtime['geton_time_second']= first_passenger_tagtime['geton_time'].apply(lambda x: 60*60 *int(x.split(':')[0] ) +\ 60 * int(x.split(':')[1]) +\ int(x.split(':')[2]) ) first_passenger_tagtime['next_bus_time_diff'] = first_passenger_tagtime.groupby(['geton_date','bus_route_id','geton_station_code'])['geton_time_second'].diff() date_route_stataion_waittime = first_passenger_tagtime.groupby(['geton_date','bus_route_id','geton_station_code'])['next_bus_time_diff'].mean().reset_index() date_route_stataion_waittime = date_route_stataion_waittime.groupby(['geton_date','bus_route_id'])['next_bus_time_diff'].mean() date_route_stataion_waittime =date_route_stataion_waittime.reset_index() # + # 6~9, 9~12시 사이에 각기 다른 집단의 사람들이 몇명 탑승했는지 bus_sample = df_bus[['geton_date','geton_station_code','geton_time','user_category','user_count']].copy() bus_sample['geton_morning'] = bus_sample['geton_time'].apply(lambda x: int(x.split(':')[0]) <=9 ) bus_passender_cluster_count = bus_sample.groupby(['geton_date','geton_station_code','geton_morning','user_category'])['user_count'].sum().reset_index() bus_passender_cluster_count_morning = bus_passender_cluster_count[bus_passender_cluster_count['geton_morning']==True] geton_bus_passender_cluster_count_morning = pd.pivot_table( bus_passender_cluster_count_morning, index = ['geton_date', 'geton_station_code'], columns=['user_category'], values = ['user_count'], aggfunc='sum').reset_index() geton_bus_passender_cluster_count_morning.columns = ['geton_date', 'geton_station_code'] +\ ['getin_user_count1_morning','getin_user_count2_morning','getin_user_count4_morning','getin_user_count6_morning','d1','d2','d3','d4'] geton_bus_passender_cluster_count_morning = geton_bus_passender_cluster_count_morning.drop(['d1','d2','d3','d4'],1) # - def calculate_getoff_time(val): if val <= 9 : return 0 elif val <= 12: return 1 else: return 2 # + # 6~9, 9~12시 사이에 각기 다른 집단의 사람들이 몇명 내렸는지 bus_sample = df_bus[['geton_date','getoff_station_code','getoff_time','user_category','user_count']].copy() bus_sample = bus_sample[bus_sample['getoff_time'].notnull()] bus_sample['getoff_hour'] = bus_sample['getoff_time'].apply(lambda x: int(x.split(':')[0]) ) bus_sample['getoff_hour'] = bus_sample['getoff_hour'].apply(calculate_getoff_time) bus_passender_cluster_count = bus_sample.groupby(['geton_date','getoff_station_code','getoff_hour','user_category'])['user_count'].sum().reset_index() takeoff_bus_passender_cluster_count_noon = bus_passender_cluster_count[bus_passender_cluster_count['getoff_hour']==1] takeoff_bus_passender_cluster_count_noon = pd.pivot_table( takeoff_bus_passender_cluster_count_noon, index = ['geton_date', 'getoff_station_code'], columns=['user_category'], values = ['user_count'], aggfunc='sum').reset_index() takeoff_bus_passender_cluster_count_noon.columns = ['geton_date', 'getoff_station_code'] +\ ['takeoff_user_count1_noon','takeoff_user_count2_noon','takeoff_user_count4_noon','takeoff_user_count6_noon','d1','d2','d3','d4'] takeoff_bus_passender_cluster_count_noon = takeoff_bus_passender_cluster_count_noon.drop(['d1','d2','d3','d4'],1) # - # ## Main Dataset train = pd.read_csv(path+'train.csv', parse_dates =['date']) test = pd.read_csv(path+'test.csv', parse_dates =['date']) n_trn = len(train) target_col = '18~20_ride' # Make a whole dataset combined = train.append(test, ignore_index=True) combined.head() # Sequence of the station combined['station_sequence'] = 1 combined['station_reverse_sequence'] = combined[::-1].groupby(['date','bus_route_id'])['station_sequence'].cumsum()[::-1] combined['station_sequence'] = combined.groupby(['date','bus_route_id'])['station_sequence'].cumsum() # Change the dtype of "date" combined['weekday'] = combined['date'].dt.weekday.astype(np.int8) # Holidays national_holidays = [datetime.date(2019, 9,12),datetime.date(2019, 9,13), datetime.date(2019, 9,14), datetime.date(2019, 10,3), datetime.date(2019, 10,9)] combined['is_national_holiday'] = combined['date'].apply(lambda x: x in national_holidays).astype(np.int8) # + # Sum-up the number of passengers for two intervers morining_getin_cols = ['6~7_ride', '7~8_ride', '8~9_ride'] noon_getin_cols = ['9~10_ride', '10~11_ride', '11~12_ride'] morning_takeoff_cols = ['6~7_takeoff', '7~8_takeoff','8~9_takeoff'] noon_takeoff_cols = ['9~10_takeoff', '10~11_takeoff', '11~12_takeoff'] # Morning getin/takeoff & Noon getin/takeoff combined['morning_getin'] = combined[morining_getin_cols].sum(axis=1) combined['morning_takeoff'] = combined[morning_takeoff_cols].sum(axis=1) combined['noon_getin'] = combined[noon_getin_cols].sum(axis=1) combined['noon_takeoff'] = combined[noon_takeoff_cols].sum(axis=1) combined = combined.drop(morining_getin_cols + noon_getin_cols + morning_takeoff_cols + noon_takeoff_cols ,1) # + # STATION_CODE # Sum of passenger per morning (getin) station_morning_getin_sum = combined.groupby(['date','station_code'])['morning_getin'].sum().reset_index() station_morning_getin_sum = station_morning_getin_sum.rename(columns = {'morning_getin': 'station_morning_getin_sum'}) # Sum of passenger per morning (takeoff) station_morning_takeoff_sum = combined.groupby(['date','station_code'])['morning_takeoff'].sum().reset_index() station_morning_takeoff_sum = station_morning_takeoff_sum.rename(columns = {'morning_takeoff': 'station_morning_takeoff_sum'}) # Merge combined = pd.merge(combined, station_morning_getin_sum , on =['date','station_code'], how='left') combined = pd.merge(combined, station_morning_takeoff_sum , on =['date','station_code'], how='left') # + # BUS_ROUTE # Sum of passenger per morning (getin) bus_route_getin_sum = combined.groupby(['date','bus_route_id'])['morning_getin'].sum().reset_index() bus_route_getin_sum = bus_route_getin_sum.rename(columns = {'morning_getin': 'bus_route_getin_sum'}) # Sum of passenger per morning (takeoff) bus_route_takeoff_sum = combined.groupby(['date','bus_route_id'])['morning_takeoff'].sum().reset_index() bus_route_takeoff_sum = bus_route_takeoff_sum.rename(columns = {'morning_takeoff': 'bus_route_takeoff_sum'}) # Merge combined = pd.merge(combined, bus_route_getin_sum , on =['date','bus_route_id'], how='left') combined = pd.merge(combined, bus_route_takeoff_sum , on =['date','bus_route_id'], how='left') # + # STATION_CODE # mean of passenger per morning (getin) -- noon getin not working station_morning_getin_mean = combined.groupby(['date','station_code'])['morning_getin'].mean().reset_index() station_morning_getin_mean = station_morning_getin_mean.rename(columns = {'morning_getin': 'station_morning_getin_mean'}) # mean of passenger per morning (getin) -- noon getin not working station_morning_takeoff_mean = combined.groupby(['date','station_code'])['morning_takeoff'].mean().reset_index() station_morning_takeoff_mean = station_morning_takeoff_mean.rename(columns = {'morning_takeoff': 'station_morning_takeoff_mean'}) # Merge combined = pd.merge(combined, station_morning_getin_mean , on =['date','station_code'], how='left') combined = pd.merge(combined, station_morning_takeoff_mean , on =['date','station_code'], how='left') # + # BUS_ROUTE # mean of passenger per morning (getin) bus_route_getin_mean = combined.groupby(['date','bus_route_id'])['morning_getin'].mean().reset_index() bus_route_getin_mean = bus_route_getin_mean.rename(columns = {'morning_getin': 'bus_route_getin_mean'}) # mean of passenger per morning (takeoff) bus_route_takeoff_mean = combined.groupby(['date','bus_route_id'])['morning_takeoff'].mean().reset_index() bus_route_takeoff_mean = bus_route_takeoff_mean.rename(columns = {'morning_takeoff': 'bus_route_takeoff_mean'}) # Merge combined = pd.merge(combined, bus_route_getin_mean , on =['date','bus_route_id'], how='left') combined = pd.merge(combined, bus_route_takeoff_mean , on =['date','bus_route_id'], how='left') # + # Kmeans combined['bus_route_station'] = combined['bus_route_id'].astype(np.str)+'_'+combined['station_code'].astype(np.str) # + # Kmeans1 df_cluster = combined[['date','bus_route_id','station_code','morning_getin']].copy() df_cluster['bus_route_station'] = df_cluster['bus_route_id'].astype(np.str)+'_'+df_cluster['station_code'].astype(np.str) df_cluster_pivot = pd.pivot_table(data = df_cluster, index='bus_route_station', columns='date', values='morning_getin', aggfunc='sum').fillna(0) kmeans = MiniBatchKMeans(n_clusters=200, random_state=1993) # %time kmeans.fit(df_cluster_pivot) df_cluster_pivot['kmeans1'] = kmeans.predict(df_cluster_pivot) combined = pd.merge(combined, df_cluster_pivot[['kmeans1']], left_on = 'bus_route_station', right_index=True, how='left') # + # Kmeans2 df_cluster = combined[['date','bus_route_id','station_code','noon_getin']].copy() df_cluster['bus_route_station'] = df_cluster['bus_route_id'].astype(np.str)+'_'+df_cluster['station_code'].astype(np.str) df_cluster_pivot = pd.pivot_table(data = df_cluster, index='bus_route_station', columns='date', values='noon_getin', aggfunc='sum').fillna(0) kmeans = MiniBatchKMeans(n_clusters=200, random_state=1993) # %time kmeans.fit(df_cluster_pivot) df_cluster_pivot['kmeans2'] = kmeans.predict(df_cluster_pivot) combined = pd.merge(combined, df_cluster_pivot[['kmeans2']], left_on = 'bus_route_station', right_index=True, how='left') # - # Merge combined = pd.merge(combined, df_commuter, on = 'station_code',how='left') # + # Afternoon-Getoff-Amount df_afternoon_getoff_amount['getoff_date'] = pd.to_datetime(df_afternoon_getoff_amount['getoff_date'] ) combined = pd.merge(combined, df_afternoon_getoff_amount, left_on = ['bus_route_id','date','station_code'], right_on = ['bus_route_id','getoff_date','getoff_station_code'], how='left') combined = combined.drop(['getoff_date','getoff_station_code'], 1) combined['afternoon_takeoff'] = combined['afternoon_takeoff'].fillna(0) # - # 이전 버스와의 배차간격? date_route_stataion_waittime['geton_date'] = pd.to_datetime(date_route_stataion_waittime['geton_date'] ) combined = pd.merge(combined, date_route_stataion_waittime, left_on =['date','bus_route_id'] , right_on =['geton_date','bus_route_id'], how='left') combined = combined.drop(['geton_date'],1) # + # 6~9, 9~12시 사이에 각기 다른 집단의 사람들이 몇명 탑승했는지 geton_bus_passender_cluster_count_morning['geton_date'] = pd.to_datetime(geton_bus_passender_cluster_count_morning['geton_date'] ) combined = pd.merge( combined, geton_bus_passender_cluster_count_morning , left_on = ['date','station_code'], right_on = ['geton_date', 'geton_station_code'], how = 'left') combined = combined.drop(['geton_station_code','geton_date'],1) # + # 6~9, 9~12시 사이에 각기 다른 집단의 사람들이 몇명 내렸는지 takeoff_bus_passender_cluster_count_noon['geton_date'] = pd.to_datetime(takeoff_bus_passender_cluster_count_noon['geton_date'] ) combined = pd.merge( combined, takeoff_bus_passender_cluster_count_noon , left_on = ['date','station_code'], right_on = ['geton_date', 'getoff_station_code'], how = 'left') combined = combined.drop(['getoff_station_code','geton_date'],1) # + # 기상데이터 -- 12시 전에 수집된 강수량 df_rain = pd.read_csv('../preprocessed_external_dataset/hourly_rain.csv') df_rain['date'] = pd.to_datetime(df_rain['date']) combined = pd.merge(combined, df_rain, on='date', how='left') # 기상데이터 -- 전날 강수량 df_daily_rain = pd.read_csv('../preprocessed_external_dataset/daily_rain.csv') df_daily_rain['date'] = pd.to_datetime(df_daily_rain['date']) df_daily_rain.columns = ['prev_date','prev_daily_rain'] combined['prev_date'] = pd.to_datetime(combined['date']) - pd.Timedelta('1 day') combined = pd.merge(combined, df_daily_rain, on='prev_date', how='left') # 기상데이터 -- 12시 전에 수집된 운집량 df_cloud = pd.read_csv('../preprocessed_external_dataset/hourly_cloud.csv') df_cloud['date'] = pd.to_datetime(df_rain['date']) combined = pd.merge(combined, df_cloud, on='date', how='left') # - # Google-Map을 통한 정류장의 주소정보 # 위경도 좌표를 입력값으로 해당 좌표의 주소를 크롤링한 pickle파일입니다. geo_df2 = pd.read_pickle('../preprocessed_external_dataset/second_whole_dict.pickle') combined['latlong_second'] = combined['latitude'].astype(np.str) +'_'+ combined['longitude'].astype(np.str) combined['latlong_second'] = combined['latlong_second'].apply(lambda x: geo_df2.get(x)) # 주소 별 거주자 수 df_pop = pd.read_csv('../preprocessed_external_dataset/제주도_거주자수.csv') combined['district'] = combined['latlong_second'].apply(lambda x: x.split(' ')[1].split(' ')[0]) combined = pd.merge(combined, df_pop, on='district', how='left') # Drop unnecessary columns drop_cols = ['id','date', 'station_name','bus_route_station',] +\ ['getin_user_count4_morning', 'takeoff_user_count4_noon', 'getin_user_count6_morning', 'takeoff_user_count6_noon', 'prev_date','district', ] train, test = combined[:n_trn].drop(drop_cols,1) , combined[n_trn:].drop(drop_cols,1) train.to_pickle('preprocessed_train.pickle') test.to_pickle('preprocessed_test.pickle') # + # Label Encoding cat_cols = ['bus_route_id','station_code','in_out','latlong_second' ] for col in tqdm_notebook(cat_cols): lbl = LabelEncoder() lbl.fit( train[col].tolist() + test[col].tolist() ) train[col] = lbl.transform( train[[col]] ) test[col] = lbl.transform( test[[col]] ) # + # Before modeling train_set = train.drop([target_col]+[],1) test_set = test.drop([target_col]+[],1) train_label = train[target_col] test_label = test[target_col] # - # Basic LGBM Model n_splits= 5 NUM_BOOST_ROUND = 100000 SEED = 1993 lgbm_param = {'objective':'rmse', 'boosting_type': 'gbdt', 'random_state':1993, 'learning_rate':0.01, 'subsample':0.7, 'tree_learner': 'serial', 'colsample_bytree':0.78, 'early_stopping_rounds':50, 'subsample_freq': 1, 'reg_lambda':7, 'reg_alpha': 5, 'num_leaves': 96, 'seed' : SEED } # + seeds = [1993] outer_oof_train = np.zeros( train.shape[0] ) outer_oof_test = np.zeros( test.shape[0] ) for seed in tqdm_notebook(seeds): cv_list = [] oof_train = np.zeros( train.shape[0] ) final_test = np.zeros( test.shape[0] ) kfolds = StratifiedKFold(n_splits = n_splits, shuffle=True, random_state=seed ) for ind, (trn_ind, val_ind) in tqdm_notebook( enumerate(kfolds.split(train_set, train_set['bus_route_id'])) ): X_train, y_train = train_set.iloc[trn_ind], train_label[trn_ind] X_valid, y_valid = train_set.iloc[val_ind], train_label[val_ind] dtrain = lgbm.Dataset( X_train, y_train ) dvalid = lgbm.Dataset( X_valid, y_valid ,reference=dtrain) lgbm_param['seed'] = seed model = lgbm.train(lgbm_param , dtrain, NUM_BOOST_ROUND, valid_sets=(dtrain, dvalid), valid_names=('train','valid'), categorical_feature=['bus_route_id','station_code','weekday',\ 'kmeans1','kmeans2', # 'latlong_second' # 'route_kmeans_cluster', ] , verbose_eval= 100) valid_pred = model.predict(X_valid) test_pred = model.predict(test_set) oof_train[val_ind] += valid_pred final_test += test_pred cv_list.append( sqrt(mean_squared_error(y_valid, valid_pred)) ) print('='*80) final_test /= n_splits print(f"Average CV : {np.mean(cv_list)}") print(f"RMSE for OOF: {sqrt(mean_squared_error(train_label, oof_train))}") outer_oof_train += oof_train outer_oof_test += final_test outer_oof_train /= len(seeds) outer_oof_test /= len(seeds) print(f"Overall for OOF: {sqrt(mean_squared_error(train_label, outer_oof_train))}") # - df_imp = pd.DataFrame(data = {'col': model.feature_name(), 'imp': model.feature_importance() }) df_imp = df_imp.sort_values(by='imp', ascending=False) df_imp oof_train = [x if x>0 else 0 for x in oof_train] final_test = [x if x>0 else 0 for x in final_test] print(f"RMSE for OOF: {sqrt(mean_squared_error(train_label, oof_train))}") sns.distplot( np.log1p( train_label ) ) sns.distplot( np.log1p( oof_train ) ) sns.distplot( np.log1p( final_test ) ) # + df_sub = pd.read_csv(path + 'submission_sample.csv') df_sub['18~20_ride'] = final_test df_sub.to_csv('ggg.csv',index=False) # -
Prediction Jeju Bus Passengers at Time off work/code/FEATURE/Make a feature-set.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # # The curious case of `__mro__` # ## Method resolution order in multiple inheritance using C3 linearization # <img src="figures/twix.png" style="display:block;margin:auto;width:40%;"/> # + [markdown] slideshow={"slide_type": "slide"} # # Inheritance in Python # # * Python supports **multiple inheritance**, i.e. a class can be derived from more than one base classes # * In multiple inheritance, the features of all the base classes are inherited into the derived class # * The syntax for multiple inheritance is similar to single inheritance # + slideshow={"slide_type": "fragment"} class A: pass class B: pass class C(A,B): pass print(C.__bases__) # + [markdown] slideshow={"slide_type": "slide"} # # What is MRO? # * MRO stands for Method Resolution Order # * MRO is the order in which base classes in the inheritance hierarchy are searched when looking for a method # * All Python versions after 2.3 use the *C3 linearization* algorithm to determine this order # * Not all classes admit a linearization. There are cases, in complicated hierarchies, where it is not possible to derive a class such that its linearization respects all the desired properties. # # Check [this article](https://www.python.org/download/releases/2.3/mro/) on python.org for more info. Part of this presentation also based on [this tutorial](https://www.youtube.com/watch?v=YLgf8wGCX2w). # + [markdown] slideshow={"slide_type": "slide"} # # Why is this useful? # # * Given a class `C`, in the case of single inheritance hierarchy, if `C` is a subclass of `C1`, and `C1` is a subclass of `C2`, then the linearization of `C` is simply the list `[C,C1,C2]`. # * However, in a complicated multiple inheritance hierarchy, it is a non-trivial task to specify the order in which methods are overridden, i.e. to specify the order of the ancestors of `C`. # * We need to be able discover **deterministically** the order of classes for method calls in the inheritance chain. # * The list of the ancestors of a class `C`, including the class itself, ordered from the nearest ancestor to the furthest, is called the *class precedence list* or the *linearization* of `C`. # * The Method Resolution Order (MRO) is the set of rules that construct the linearization. In the Python literature, the idiom the *MRO* of the class `C` is also used as a synonymous for the *linearization* of `C`. # + [markdown] slideshow={"slide_type": "slide"} # # The diamond problem # <img src="figures/diamond_problem.png" style="display:block;margin:auto;width:40%;"/> # * In the above example the `Button` class inherits two differerent implementations of `equals()` # * It has no implementation of the operation of its own # * When `button.equals()` is called, it is unknown which implemetation - from `Rectangle`, `Clickable` or `object` will be used # + [markdown] slideshow={"slide_type": "slide"} # # C3 Linearization # * First introduced in the Dylan language # * Algorithm based on 3 important properties (this is how the name *C3* is derived) # 1. Consistent extended precedence graph (MRO is determined based on structure of the inheritance graph) # 2. Preserving local precedence ordering (no class will appear before any of its subclasses) # 3. Monotonicity # + [markdown] slideshow={"slide_type": "slide"} # # Monotonicity # * An MRO is monotonic when the following is true: # - If `C1` precedes `C2` in the linearization of `C` then `C1` precedes `C2` in the linearization of any subclass of `C`. # * Consider: `class X(O), class Y(O), class A(X,Y), class B(Y,X), class C(B,A)` # - Based on monotonicity it is **not** possible to derive a new class `C` from `A` and `B` since `X` precedes `Y` in `A`, but `Y` precedes `X` in `B`, therefore the method resolution order would be ambiguous in `C` (`XY` breaks monotonicity with `B`, `YX` breaks monotonicity with `A`). # <img src="figures/monotonicity.png" style="display:block;margin:auto;width:35%;"/> # + [markdown] slideshow={"slide_type": "slide"} # # Definition and notation # * Notation # - `C1 C2 ... CN` indicates the list of classes `[C1,C2,...,CN]` # - The *head* of the list is its first element: head = `C1` # - The *tail* is the rest of the list: tail = `C2 ... CN` # - The sum of the lists `[C] + [C1,C2,...,CN] = C + (C1 C2 ... CN) = C C1 C2 ... CN` # * Consider a class `C` in a multiple inheritance hierarchy, with `C` inheriting from the base classes `B1, B2, ..., BN`: # - The linearization of `C` is the sum of `C` plus the merge of linearizations of the parents and the list of the parents # - `L[C(B1 ... BN)] = C + merge(L[B1],...,L[BN], B1 ... BN)` # * Example: `L[Y(X1 X2 X3)] = Y + merge(L[X1],L[X2],L[X3], X1 X2 X3)` # # + [markdown] slideshow={"slide_type": "slide"} # # Computing merge # Consider a simple merge example: `merge(DO,EO,DE) = DEO` # 1. Select the first head of the lists which does not appear in the tail of any of the other lists. # - A good head may appear as the first element in multiple lists at the same time, but it is forbidden to appear anywhere else. # 2. Remove the selected element from all the lists where it appears as a head and append to the output list. # 3. Repeat the operation of selecting and removing a good head to extend the output list until all remaining lists are exhausted. # 4. If at some point no good head can be selected, because the heads of all remaining lists appear in any one tail of the lists, then the merge is impossible to compute due to cyclic dependencies in the inheritance hierarchy and no linearization of the original class exists. # # + [markdown] slideshow={"slide_type": "slide"} # # Properties of merge # * Three important considerations when computing merge: # 1. The merge of several sequences is a sequence that contains **each** of the the elements of the input sequence # - All elements within the input lists *DO*, *EO* and *DE* are present in the merged result *DEO*. # 2. An element that appears in more than once of the input sequences appears **only once** in the output sequence # - *D*, *E* and *O* appear in more than on input sequence, but the result has only one instance of each. # 3. If two elements appear in the same input sequence, their order in the output sequence is the same as their order in the input sequence. # - In the input sequence, D precedes both O and E; E precedes O. The same ordering is maintained in the merged output. # + [markdown] slideshow={"slide_type": "slide"} # # Compute the linearization: # `class A(B,C), class B(D,E) class C(D,F), class D(O), class E(O), class F(O), class O` # # <img src="figures/c3_example.png" style="display:block;margin:auto;width:40%;"/> # + [markdown] slideshow={"slide_type": "slide"} # # C3 computing example # **`L[C(B1 ... BN)] = C + merge(L[B1],...,L[BN], B1 ... BN)`** # ``` # L[O] = O # L[D] = D + merge(L[O],O) = D + merge(O,O) = DO # L[E] = EO, L[F] = FO # L[B] = B + merge(L[D],L[E],DE) # = B + merge(DO,EO,DE) # = B + D + merge(O,EO,E) # = B + D + E + merge(O,O) # = BDEO # L[C] = C + merge(L[D],L[F],DF) # = C + merge(DO,FO,DF) # = CDFO # L[A] = A + merge(L[B],L[C],BC) # = A + merge(BDEO,CDFO,BC) # = A + B + merge(DEO,CDFO,C) # = A + B + C + merge(DEO,DFO) # = A + B + C + D + merge(EO,FO) # = A + B + C + D + E + merge(O,FO) # = A + B + C + D + E + F + merge(O,O) # = ABCDEFO # ``` # + slideshow={"slide_type": "slide"} class F: pass class E: pass class D: pass class C(D,F): pass class B(D,E): pass class A(B,C): pass from inspect import getmro print(getmro(A)) print(A.__mro__) # + slideshow={"slide_type": "slide"} class A: pass class B: pass class C(A,B): pass class D(B,A): pass class E(C,D): pass print(E.__mro__) # + [markdown] slideshow={"slide_type": "slide"} # <img src="figures/python_wonka.jpg" style="display:block;margin:auto;width:60%;"/>
014 - Lorde/tao_mro/case_mro.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # + import panel as pn from bokeh.sampledata.autompg import autompg css = ['https://cdn.datatables.net/1.10.24/css/jquery.dataTables.min.css', # Below: Needed for export buttons 'https://cdn.datatables.net/buttons/1.7.0/css/buttons.dataTables.min.css' ] js = { '$': 'https://code.jquery.com/jquery-3.5.1.js', 'DataTable': 'https://cdn.datatables.net/1.10.24/js/jquery.dataTables.min.js', # Below: Needed for export buttons 'buttons': 'https://cdn.datatables.net/buttons/1.7.0/js/dataTables.buttons.min.js', 'jszip': 'https://cdnjs.cloudflare.com/ajax/libs/jszip/3.1.3/jszip.min.js', 'pdfmake': 'https://cdnjs.cloudflare.com/ajax/libs/pdfmake/0.1.53/pdfmake.min.js', 'vfsfonts': 'https://cdnjs.cloudflare.com/ajax/libs/pdfmake/0.1.53/vfs_fonts.js', 'html5buttons': 'https://cdn.datatables.net/buttons/1.7.0/js/buttons.html5.min.js', } pn.extension(css_files=css, js_files=js) # - # ## Panel - Datatable # # This example demonstrates how to load the [jQuery DataTable extension](https://datatables.net/) and use it to render a pandas DataFrame. # + script = """ <script> if (document.readyState === "complete") { $('.example').DataTable(); } else { $(document).ready(function () { $('.example').DataTable(); }) } </script> """ html = autompg.to_html(classes=['example', 'panel-df']) pn.pane.HTML(html+script, sizing_mode='stretch_width') # - # ## Panel - Datatable with Export Buttons # # This example demonstrates how to add [export buttons](https://datatables.net/extensions/buttons/examples/html5/simple.html) as requested in [Discourse 2079](https://discourse.holoviz.org/t/adding-buttons-to-panel-datatable/2079) # + script = """ <script> function renderTable(){ $('.example2').DataTable( { dom: 'Bfrtip', buttons: [ 'copyHtml5', 'excelHtml5', 'csvHtml5', 'pdfHtml5' ] } ); } if (document.readyState === "complete") { renderTable() } else { $(document).ready(renderTable); } </script> """ html = autompg.to_html(classes=['example2', 'panel-df']) table_with_export_buttons = pn.pane.HTML(html+script, sizing_mode='stretch_width', margin=(10,5,35,5)) table_with_export_buttons # - # ## App # # Let's wrap the above into a nice app that can be run via `panel serve DataTable.ipynb` pn.template.FastListTemplate(site="Panel Gallery", title="DataTable", main=["This example demonstrates **how to use the [jQuery DataTable extension](https://datatables.net/) with Panel** and use it to render a pandas DataFrame.", table_with_export_buttons]).servable();
examples/gallery/external/DataTable.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # <script async src="https://www.googletagmanager.com/gtag/js?id=UA-59152712-8"></script> # <script> # window.dataLayer = window.dataLayer || []; # function gtag(){dataLayer.push(arguments);} # gtag('js', new Date()); # # gtag('config', 'UA-59152712-8'); # </script> # # # Tutorial-IllinoisGRMHD: `convert_ADM_to_BSSN__enforce_detgtij_eq_1__and_compute_gtupij.C` # # ## Authors: <NAME> & <NAME> # # <font color='red'>**This module is currently under development**</font> # # ## In this tutorial module we explain the algorithm used to get the BSSN variables from the ADM variables. We will also explain how to impose the constraint that the conformal metric has unit determinant. # # ### Required and recommended citations: # # * **(Required)** <NAME>., <NAME>., <NAME>., <NAME>., and <NAME>. L. IllinoisGRMHD: an open-source, user-friendly GRMHD code for dynamical spacetimes. Class. Quantum Grav. 32 (2015) 175009. ([arxiv:1501.07276](http://arxiv.org/abs/1501.07276)). # * **(Required)** <NAME>., <NAME>., <NAME>., <NAME>. Primitive Variable Solvers for Conservative General Relativistic Magnetohydrodynamics. Astrophysical Journal, 641, 626 (2006) ([astro-ph/0512420](https://arxiv.org/abs/astro-ph/0512420)). # * **(Recommended)** <NAME>., <NAME>., <NAME>. An efficient shock-capturing central-type scheme for multidimensional relativistic flows - II. Magnetohydrodynamics. A&A 400 (2) 397-413 (2003). DOI: 10.1051/0004-6361:20021641 ([astro-ph/0210618](https://arxiv.org/abs/astro-ph/0210618)). # <a id='toc'></a> # # # Table of Contents # $$\label{toc}$$ # # This module is organized as follows # # 0. [Step 0](#src_dir): **Source directory creation** # 1. [Step 1](#introduction): **Introduction** # 1. [Step 2](#convert_adm_to_bssn__det_gammabar_eq_1): **`convert_ADM_to_BSSN__enforce_detgtij_eq_1__and_compute_gtupij.C`** # 1. [Step 2.a](#physical_metric_and_its_determinant): *Loading the physical metric $\gamma_{ij}$ and computing $\gamma = \det\left(\gamma_{ij}\right)$* # 1. [Step 2.b](#phi_and_psi): *Computing $\phi$ and $\psi$* # 1. [Step 2.c](#enforce_det_gammabar_eq_1): *Enforcing the constraint $\bar\gamma = 1$* # 1. [Step 2.d](#update_gfs): *Updating the gridfunctions* # 1. [Step 3](#code_validation): **Code validation** # 1. [Step 4](#latex_pdf_output): **Output this module to $\LaTeX$-formatted PDF file** # <a id='src_dir'></a> # # # Step 0: Source directory creation \[Back to [top](#toc)\] # $$\label{src_dir}$$ # # We will now use the [cmdline_helper.py NRPy+ module](Tutorial-Tutorial-cmdline_helper.ipynb) to create the source directory within the `IllinoisGRMHD` NRPy+ directory, if it does not exist yet. # + # Step 0: Creation of the IllinoisGRMHD source directory # Step 0a: Add NRPy's directory to the path # https://stackoverflow.com/questions/16780014/import-file-from-parent-directory import os,sys nrpy_dir_path = os.path.join("..","..") if nrpy_dir_path not in sys.path: sys.path.append(nrpy_dir_path) # Step 0b: Load up cmdline_helper and create the directory import cmdline_helper as cmd IGM_src_dir_path = os.path.join("..","src") cmd.mkdir(IGM_src_dir_path) # Step 0c: Create the output file path outfile_path__ADM_to_BSSN__det_gammabar_eq_1__C = os.path.join(IGM_src_dir_path,"convert_ADM_to_BSSN__enforce_detgtij_eq_1__and_compute_gtupij.C") # - # <a id='introduction'></a> # # # Step 1: Introduction \[Back to [top](#toc)\] # $$\label{introduction}$$ # # In this module we will explain the procedure used within `IllinoisGRMHD` to compute the conformal metric, $\bar\gamma_{ij}$, and its inverse, $\bar\gamma^{ij}$, from the physical metric $\gamma_{ij}$. We will also describe how to compute $\phi$, the conformal factor, and $\psi\equiv e^{\phi}$. Finally, we also explain the procedure used to enforce the constraint $\bar\gamma = \det\left(\bar\gamma_{ij}\right) = 1$. # # **A note on notation**: The notation used throuhgout the NRPy tutorial notebooks for the conformal metric is $\bar\gamma_{ij}$. However, in the literature the notation $\tilde\gamma_{ij}$ is also used to represent the conformal metric. It is important to note that in `IllinoisGRMHD` we refer to the conformal metric using the *latter* notation, i.e. ${\rm gtij} := \tilde\gamma_{ij}$ and ${\rm gtupij} := \tilde\gamma^{ij}$. To keep the discussion consistent with the other notebooks, however, we will still present the discussion using the notation $\tilde\gamma_{ij} \to \bar\gamma_{ij}$. Bottom line, here $\tilde\gamma_{ij}$ and $\bar\gamma_{ij}$ represent exactly the same quantity, the conformal metric. # <a id='convert_adm_to_bssn__det_gammabar_eq_1'></a> # # # Step 2: `convert_ADM_to_BSSN__enforce_detgtij_eq_1__and_compute_gtupij.C` \[Back to [top](#toc)\] # $$\label{convert_adm_to_bssn__det_gammabar_eq_1}$$ # # <a id='physical_metric_and_its_determinant'></a> # # ## Step 2.a: Loading the physical metric $\gamma_{ij}$ and computing $\gamma = \det\left(\gamma_{ij}\right)$ \[Back to [top](#toc)\] # $$\label{physical_metric_and_its_determinant}$$ # # We start by reading in the physical metric ${\rm gij_physL} := \gamma_{ij}$ and computing its determinant # # $$ # \boxed{ # \begin{align} # \gamma = \det\left(\gamma_{ij}\right) # &= \gamma_{xx}\gamma_{yy}\gamma_{zz} + \gamma_{xy}\gamma_{yz}\gamma_{xz} + \gamma_{xz}\gamma_{xy}\gamma_{yz}\\ # &- \gamma_{xz}\gamma_{yy}\gamma_{xz} - \gamma_{xy}\gamma_{xy}\gamma_{zz} - \gamma_{xx}\gamma_{yz}\gamma_{yz} # \end{align} # }\ . # $$ # # Notice that we have used the fact that $\gamma_{ij}$ is symmetric above, i.e. $\gamma_{ij}=\gamma_{ji}$. # + # %%writefile $outfile_path__ADM_to_BSSN__det_gammabar_eq_1__C #include "cctk.h" #include <cmath> #include <cstdio> #include <cstdlib> #include "cctk_Parameters.h" void IllinoisGRMHD_convert_ADM_to_BSSN__enforce_detgtij_eq_1__and_compute_gtupij (const cGH *cctkGH,const int *cctk_lsh, CCTK_REAL *gxx,CCTK_REAL *gxy,CCTK_REAL *gxz,CCTK_REAL *gyy,CCTK_REAL *gyz,CCTK_REAL *gzz,CCTK_REAL *alp, CCTK_REAL *gtxx,CCTK_REAL *gtxy,CCTK_REAL *gtxz,CCTK_REAL *gtyy,CCTK_REAL *gtyz,CCTK_REAL *gtzz, CCTK_REAL *gtupxx,CCTK_REAL *gtupxy,CCTK_REAL *gtupxz,CCTK_REAL *gtupyy,CCTK_REAL *gtupyz,CCTK_REAL *gtupzz, CCTK_REAL *phi,CCTK_REAL *psi,CCTK_REAL *lapm1) { DECLARE_CCTK_PARAMETERS; #pragma omp parallel for for(int k=0;k<cctk_lsh[2];k++) for(int j=0;j<cctk_lsh[1];j++) for(int i=0;i<cctk_lsh[0];i++) { int index=CCTK_GFINDEX3D(cctkGH,i,j,k); CCTK_REAL gxx_physL=gxx[index]; CCTK_REAL gxy_physL=gxy[index]; CCTK_REAL gxz_physL=gxz[index]; CCTK_REAL gyy_physL=gyy[index]; CCTK_REAL gyz_physL=gyz[index]; CCTK_REAL gzz_physL=gzz[index]; /********************************************************************** * Compute \tilde{\gamma_{ij}}, phi, and psi (BSSN) from g_{ij} (ADM) * **********************************************************************/ CCTK_REAL gijdet = gxx_physL * gyy_physL * gzz_physL + gxy_physL * gyz_physL * gxz_physL + gxz_physL * gxy_physL * gyz_physL - gxz_physL * gyy_physL * gxz_physL - gxy_physL * gxy_physL * gzz_physL - gxx_physL * gyz_physL * gyz_physL; gijdet = fabs(gijdet); # - # <a id='phi_and_psi'></a> # # ## Step 2.b: Computing $\phi$ and $\psi$ \[Back to [top](#toc)\] # $$\label{phi_and_psi}$$ # # The BSSN formalism relates the physical metric, $\gamma_{ij}$, to a conformal metric, $\bar\gamma{ij}$, via the relation # # $$ # \boxed{\bar\gamma_{ij} = e^{-4\phi}\gamma_{ij}}\ , # $$ # # with the constraint that $\bar\gamma \equiv \det\left(\bar\gamma_{ij}\right) = 1$. This immediately implies that # # $$ # e^{-12\phi} \gamma = \bar\gamma = 1 \implies -12\phi = \log\left(\frac{1}{\gamma}\right) = - \log\gamma # \implies \boxed{\phi = \frac{1}{12}\log\gamma}\ . # $$ # # Useful quantities to compute are # # $$ # \boxed{ # \begin{align} # \psi &\equiv e^{\phi}\\ # \psi^{-4} &\equiv e^{-4\phi} # \end{align} # }\ . # $$ # # All the boxed quantities above are evaluated below. # %%writefile -a $outfile_path__ADM_to_BSSN__det_gammabar_eq_1__C CCTK_REAL phiL = (1.0/12.0) * log(gijdet); CCTK_REAL psiL = exp(phiL); CCTK_REAL Psim4 = 1.0/(psiL*psiL*psiL*psiL); CCTK_REAL gtxxL = gxx_physL*Psim4; CCTK_REAL gtxyL = gxy_physL*Psim4; CCTK_REAL gtxzL = gxz_physL*Psim4; CCTK_REAL gtyyL = gyy_physL*Psim4; CCTK_REAL gtyzL = gyz_physL*Psim4; CCTK_REAL gtzzL = gzz_physL*Psim4; # <a id='enforce_det_gammabar_eq_1'></a> # # ## Step 2.c: Enforcing the constraint $\bar\gamma = 1$ \[Back to [top](#toc)\] # $$\label{enforce_det_gammabar_eq_1}$$ # # The BSSN formalism evolves the determinant of the conformal metric through $\partial_{t}\bar\gamma = 0$. Since initially $\bar\gamma=1$, one would expect that $\bar\gamma=1$ at all times. However, numerical errors can cause the value of the determinant of the conformal metric to drift away from unity. To remedy this, we enforce the constraint $\bar\gamma=1$ by transforming the conformal metric according to the algebraic condition (cf. eq. 53 of [Ruchlin, Etienne, and Baumgarte (2018)](https://arxiv.org/pdf/1712.07658.pdf) with $\hat\gamma = 1$). # # $$ # \boxed{\bar{\gamma}_{ij} \to \left(\frac{1}{\bar{\gamma}}\right)^{1/3} \bar{\gamma}_{ij}}\ . # $$ # # We then start by evaluating # # $$ # \boxed{ # \begin{align} # \bar\gamma = \det\left(\bar\gamma_{ij}\right) # &= \bar\gamma_{xx}\bar\gamma_{yy}\bar\gamma_{zz} + \bar\gamma_{xy}\bar\gamma_{yz}\bar\gamma_{xz} + \bar\gamma_{xz}\bar\gamma_{xy}\bar\gamma_{yz}\\ # &- \bar\gamma_{xz}\bar\gamma_{yy}\bar\gamma_{xz} - \bar\gamma_{xy}\bar\gamma_{xy}\bar\gamma_{zz} - \bar\gamma_{xx}\bar\gamma_{yz}\bar\gamma_{yz} # \end{align} # }\ . # $$ # # In the code below we also define # # $$ # \boxed{{\rm gtijdet\_Fm1o3} = \left(\frac{1}{\bar{\gamma}}\right)^{1/3}}\ . # $$ # %%writefile -a $outfile_path__ADM_to_BSSN__det_gammabar_eq_1__C /********************************* * Apply det gtij = 1 constraint * *********************************/ CCTK_REAL gtijdet = gtxxL * gtyyL * gtzzL + gtxyL * gtyzL * gtxzL + gtxzL * gtxyL * gtyzL - gtxzL * gtyyL * gtxzL - gtxyL * gtxyL * gtzzL - gtxxL * gtyzL * gtyzL; CCTK_REAL gtijdet_Fm1o3 = fabs(1.0/cbrt(gtijdet)); gtxxL = gtxxL * gtijdet_Fm1o3; gtxyL = gtxyL * gtijdet_Fm1o3; gtxzL = gtxzL * gtijdet_Fm1o3; gtyyL = gtyyL * gtijdet_Fm1o3; gtyzL = gtyzL * gtijdet_Fm1o3; gtzzL = gtzzL * gtijdet_Fm1o3; if(gtijdet<0.0) { CCTK_VWarn(CCTK_WARN_ALERT,__LINE__, __FILE__, CCTK_THORNSTRING, "WARNING: det[3-metric]<0.0 at point %d %d %d | cctk_lsh: %d %d %d. Hopefully this is occurring in gz's! gtij_phys = %.2e %.2e %.2e %.2e %.2e %.2e gtij_new = %.2e %.2e %.2e %.2e %.2e %.2e | gijdet = %.2e | gtijdet = %.2e", i,j,k,cctk_lsh[0],cctk_lsh[1],cctk_lsh[2],gxx_physL,gxy_physL,gxz_physL,gyy_physL,gyz_physL,gzz_physL,gtxxL,gtxyL,gtxzL,gtyyL,gtyzL,gtzzL,-gijdet,gtijdet); } # <a id='update_gfs'></a> # # ## Step 2.d: Updating the gridfunctions \[Back to [top](#toc)\] # $$\label{update_gfs}$$ # # Finally, we update the gridfunctions for $\phi$, $\psi$, $\bar\gamma_{ij}$, $\gamma_{ij}$, and $\bar\gamma^{ij}$, respectively, in the code below. # # If $\mathcal{M}$ is a $3\times3$ symmetric matrix and $\det(\mathcal{M})\neq0$, [recall that we have](https://en.wikipedia.org/wiki/Invertible_matrix#Inversion_of_3_%C3%97_3_matrices) # # $$ # \mathcal{M}^{-1} = # \begin{pmatrix} # a & b & c\\ # b & d & e\\ # c & e & f # \end{pmatrix}^{-1} # = # \frac{1}{\det(\mathcal{M})} # \begin{pmatrix} # A & B & C\\ # B & D & E\\ # C & E & F # \end{pmatrix}\ , # $$ # # where # # $$ # \boxed{ # \begin{align} # A &= \ \ \left(df - ee\right)\\ # B &= -\left(bf - ce\right)\\ # C &= \ \ \left(be - cd\right)\\ # D &= \ \ \left(af - cc\right)\\ # E &= -\left(ae - bc\right)\\ # F &= \ \ \left(ad - bb\right) # \end{align} # }\ . # $$ # # Notice that if we replace $\mathcal{M}\to\bar\gamma_{ij}$, then we have also $\det(\mathcal{M})\to1$. # + # %%writefile -a $outfile_path__ADM_to_BSSN__det_gammabar_eq_1__C CCTK_REAL Psi4 = psiL*psiL*psiL*psiL; /***************************************** * Set all the needed BSSN gridfunctions * *****************************************/ phi[index] = phiL; psi[index] = psiL; lapm1[index] = alp[index] - 1.0; gtxx[index] = gtxxL; gtxy[index] = gtxyL; gtxz[index] = gtxzL; gtyy[index] = gtyyL; gtyz[index] = gtyzL; gtzz[index] = gtzzL; gxx[index] = gtxxL*Psi4; gxy[index] = gtxyL*Psi4; gxz[index] = gtxzL*Psi4; gyy[index] = gtyyL*Psi4; gyz[index] = gtyzL*Psi4; gzz[index] = gtzzL*Psi4; gtupxx[index] = ( gtyyL * gtzzL - gtyzL * gtyzL ); gtupxy[index] = - ( gtxyL * gtzzL - gtyzL * gtxzL ); gtupxz[index] = ( gtxyL * gtyzL - gtyyL * gtxzL ); gtupyy[index] = ( gtxxL * gtzzL - gtxzL * gtxzL ); gtupyz[index] = - ( gtxxL * gtyzL - gtxyL * gtxzL ); gtupzz[index] = ( gtxxL * gtyyL - gtxyL * gtxyL ); } } # - # <a id='code_validation'></a> # # # Step 3: Code validation \[Back to [top](#toc)\] # $$\label{code_validation}$$ # # First we download the original `IllinoisGRMHD` source code and then compare it to the source code generated by this tutorial notebook. # + # Verify if the code generated by this tutorial module # matches the original IllinoisGRMHD source code # First download the original IllinoisGRMHD source code import urllib from os import path original_IGM_file_url = "https://bitbucket.org/zach_etienne/wvuthorns/raw/5611b2f0b17135538c9d9d17c7da062abe0401b6/IllinoisGRMHD/src/convert_ADM_to_BSSN__enforce_detgtij_eq_1__and_compute_gtupij.C" original_IGM_file_name = "convert_ADM_to_BSSN__enforce_detgtij_eq_1__and_compute_gtupij-original.C" original_IGM_file_path = os.path.join(IGM_src_dir_path,original_IGM_file_name) # Then download the original IllinoisGRMHD source code # We try it here in a couple of ways in an attempt to keep # the code more portable try: original_IGM_file_code = urllib.request.urlopen(original_IGM_file_url).read() # Write down the file the original IllinoisGRMHD source code with open(original_IGM_file_path,"w") as file: file.write(original_IGM_file_code) except: try: original_IGM_file_code = urllib.urlopen(original_IGM_file_url).read() # Write down the file the original IllinoisGRMHD source code with open(original_IGM_file_path,"w") as file: file.write(original_IGM_file_code) except: # If all else fails, hope wget does the job # !wget -O $original_IGM_file_path $original_IGM_file_url # Perform validation # Validation__ADM_to_BSSN__det_gammabar_eq_1__C = !diff $original_IGM_file_path $outfile_path__ADM_to_BSSN__det_gammabar_eq_1__C if Validation__ADM_to_BSSN__det_gammabar_eq_1__C == []: # If the validation passes, we do not need to store the original IGM source code file # !rm $original_IGM_file_path print("Validation test for convert_ADM_to_BSSN__enforce_detgtij_eq_1__and_compute_gtupij.C: PASSED!") else: # If the validation fails, we keep the original IGM source code file print("Validation test for convert_ADM_to_BSSN__enforce_detgtij_eq_1__and_compute_gtupij.C: FAILED!") # We also print out the difference between the code generated # in this tutorial module and the original IGM source code print("Diff:") for diff_line in Validation__ADM_to_BSSN__det_gammabar_eq_1__C: print(diff_line) # - # <a id='latex_pdf_output'></a> # # # Step 4: Output this module to $\LaTeX$-formatted PDF file \[Back to [top](#toc)\] # $$\label{latex_pdf_output}$$ # # The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename # [Tutorial-IllinoisGRMHD__convert_ADM_to_BSSN__enforce_detgtij_eq_1__and_compute_gtupij.pdf](Tutorial-IllinoisGRMHD__convert_ADM_to_BSSN__enforce_detgtij_eq_1__and_compute_gtupij.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means). latex_nrpy_style_path = os.path.join(nrpy_dir_path,"latex_nrpy_style.tplx") # #!jupyter nbconvert --to latex --template $latex_nrpy_style_path Tutorial-IllinoisGRMHD__convert_ADM_to_BSSN__enforce_detgtij_eq_1__and_compute_gtupij.ipynb # #!pdflatex -interaction=batchmode Tutorial-IllinoisGRMHD__convert_ADM_to_BSSN__enforce_detgtij_eq_1__and_compute_gtupij.tex # #!pdflatex -interaction=batchmode Tutorial-IllinoisGRMHD__convert_ADM_to_BSSN__enforce_detgtij_eq_1__and_compute_gtupij.tex # #!pdflatex -interaction=batchmode Tutorial-IllinoisGRMHD__convert_ADM_to_BSSN__enforce_detgtij_eq_1__and_compute_gtupij.tex # !rm -f Tut*.out Tut*.aux Tut*.log
IllinoisGRMHD/doc/Tutorial-IllinoisGRMHD__convert_ADM_to_BSSN__enforce_detgtij_eq_1__and_compute_gtupij.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Импортирование библиотек import sklearn import csv import numpy as np from numpy import genfromtxt from sklearn import metrics from sklearn import svm from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split import pandas import matplotlib.pyplot as plt import warnings warnings.filterwarnings("ignore") # # Загрузка входных данных data = pandas.read_csv('data/bank.csv', delimiter=',', header=0) data # # Кодирование категориальных признаков (one hot кодирование) # ## Категориальные признаки categorial = pandas.DataFrame() for c in ['job', 'marital', 'education', 'contact', 'month', 'poutcome']: categorial = pandas.concat([categorial, pandas.get_dummies(data[c], prefix=c)], axis=1) categorial # ## Бинарные признаки binary = pandas.DataFrame() for c in ['default', 'housing', 'loan', 'y']: binary = pandas.concat([binary, data[c].map({'yes': 1, 'no': 0})], axis=1) binary # ## Объединение всех обработанных признаков prepared = data[['age', 'balance', 'day', 'duration', 'campaign', 'pdays', 'previous']].join(binary).join(categorial) prepared # ## Разделение данных на обучающую и тестовую выборки # + x = prepared.drop('y', 1) y = prepared[['y']] x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.33, random_state=1) # - # # Логистическая регрессия # ## Вычисление параметров модели lr_clf = LogisticRegression(random_state=0).fit(x_train, y_train) # ## Оценка модели обучающей выборки lr_train_score = lr_clf.score(x_train, y_train) lr_train_score # ## Оценка модели тестовой выборки lr_test_score = lr_clf.score(x_test, y_test) lr_test_score # ## Вычисление метрик y_pred = lr_clf.predict(x_test) lr_tn, lr_fp, lr_fn, lr_tp = sklearn.metrics.confusion_matrix(y_test, y_pred).ravel() lr_precision = sklearn.metrics.precision_score(y_test, y_pred) lr_recall = sklearn.metrics.recall_score(y_test, y_pred) lr_accuracy = sklearn.metrics.accuracy_score(y_test, y_pred) print(f'TN: {lr_tn}, FP: {lr_fp}, FN: {lr_fn}, TP: {lr_tp}') print(f'precision: {lr_precision}, recall: {lr_recall}, accuracy: {lr_accuracy}') # ## Построение графика логистической регрессии lr_roc_display = sklearn.metrics.plot_roc_curve(lr_clf, x_test, y_test) # # Метод опорных векторов # ## Вычисление параметров модели svm_clf = svm.LinearSVC(random_state=2).fit(x_train, y_train) # ## Оценка модели обучающей выборки svm_train_score = svm_clf.score(x_train, y_train) svm_train_score # ## Оценка модели тестовой выборки svm_test_score = svm_clf.score(x_test, y_test) svm_test_score # ## Вычисление метрик y_pred = svm_clf.predict(x_test) svm_tn, svm_fp, svm_fn, svm_tp = sklearn.metrics.confusion_matrix(y_test, y_pred).ravel() svm_precision = sklearn.metrics.precision_score(y_test, y_pred) svm_recall = sklearn.metrics.recall_score(y_test, y_pred) svm_accuracy = sklearn.metrics.accuracy_score(y_test, y_pred) print(f'TN: {svm_tn}, FP: {svm_fp}, FN: {svm_fn}, TP: {svm_tp}') print(f'precision: {svm_precision}, recall: {svm_recall}, accuracy: {svm_accuracy}') # ## Построение графика логистической регрессии svm_roc_display = sklearn.metrics.plot_roc_curve(svm_clf, x_test, y_test) # # Сравнение ax = plt.gca() svm_roc_display.plot(ax=ax) lr_roc_display.plot(ax=ax) metrics = { 'Models':['Logistic Regression', 'SVM'], 'TN': [lr_tn, svm_tn], 'FP': [lr_fp, svm_fp], 'FN': [lr_fn, svm_fn], 'TP': [lr_tp, svm_tp], 'Precision': [lr_precision, svm_precision], 'Recall': [lr_recall, svm_recall],'Accuracy': [lr_accuracy, svm_accuracy] } comparison = pandas.DataFrame.from_dict(metrics) comparison.set_index('Models')
bank.ipynb