code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import botocore import boto3 from boto3.session import Session from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor import threading from concurrent.futures import wait import msgpack # type: ignore from monty.dev import deprecated from monty.msgpack import default as monty_default from monty.msgpack import object_hook as monty_object_hook import threading import multiprocessing from multiprocessing.pool import ThreadPool import numpy as np import uuid from concurrent.futures import ThreadPoolExecutor # - session = Session(profile_name="mp_shreyas") resource = session.resource("s3", endpoint_url="https://minio.materialsproject.org", verify=False) s3_bucket = resource.Bucket("shreyas") # + s3_bucket.objects.all().delete() # - data = np.random.rand(100,100,100) data = msgpack.packb(data, default=monty_default) s3_bucket.put_object(Key="test/1", Body=data) # + class MyPut(threading.Thread): def run(self): # Here we create a new session per thread session = Session(profile_name="mp_shreyas") # Next, we create a resource client using our thread's session object resource = session.resource("s3", endpoint_url="https://minio.materialsproject.org", verify=False) s3_bucket = resource.Bucket("shreyas") for i in range(10): key = <KEY> s3_bucket.put_object(Key=f"test/{key}", Body=data) # + for i in range(4): t = MyPut() t.start() # - def put_data(key): # Here we create a new session per thread session = Session(profile_name="mp_shreyas") # Next, we create a resource client using our thread's session object resource = session.resource("s3", endpoint_url="https://minio.materialsproject.org", verify=False) s3_bucket = resource.Bucket("shreyas") s3_bucket.put_object(Key=f"test/{key}", Body=data) with ThreadPoolExecutor(max_workers=4) as pool: fs = { pool.submit(fn=put_data, key=uuid.uuid4()) for i in range(40) } fs, _ = wait(fs) # + thread_local = threading.local() def get_bucket(): if not hasattr(thread_local, "s3_bucket"): print(f'Create_bucket on thread {threading.current_thread()}') session = Session(profile_name="mp_minio_jshen") resource = session.resource("s3", endpoint_url="http://app.mp-minio.prod-cattle.stable.spin.nersc.org:60013") thread_local.s3_bucket = resource.Bucket("jshen") return thread_local.s3_bucket def write_doc_to_s3(doc, data): print(f'runnin on thread {threading.current_thread()}') # s3_bucket = get_bucket() session = Session(profile_name="mp_shreyas") resource = session.resource("s3", endpoint_url="https://minio.materialsproject.org", verify=False) s3_bucket = resource.Bucket("shreyas") s3_bucket.put_object( Key="test/" + str(doc), Body=data ) docs = range(100000) with ThreadPoolExecutor(max_workers=4) as pool: fs = { pool.submit( fn=write_doc_to_s3, doc=itr_doc, data=data ) for itr_doc in docs } fs, _ = wait(fs) # -
Untitled2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.7.6 64-bit # name: python37664bit8f4e8029c50c4abfa3fe45b7b9893def # --- import time import pandas as pd import numpy as np import math import random def some_math(x): return round(math.sqrt(x)**math.e,3) def some_more_math(x): return math.sqrt(x**3)+math.pi**2 some_more_math(1) a=[10]*1000000 b=[] seed=42 for i in range(1000000): b.append(random.randint(0,1337)) print(a,b) test1=pd.Series(a) test2=pd.Series(b) l=[] wl=[] inx=0 for i in range(100): # lambdas test t1=time.time() test1.apply(lambda x:round(math.sqrt(x)**math.e,3)) test2.apply(lambda x:math.sqrt(x**3)+math.pi**2) t2=time.time() print(f"WITH lambdas it took {t2-t1}/ms to compleate both tests\n") l.append(t2-t1) inx+=1 # without lambdas test t1=time.time() test1.apply(some_math) test2.apply(some_more_math) t2=time.time() wl.append(t2-t1) inx+=1 print(f"with .apply() it took {t2-t1}/ms to compleat both tests\n") data=pd.DataFrame([l,wl]) data.head() data=data.T data.head() l[0] data.columns=['lambda','without_lambda'] data.head() import matplotlib.pyplot as plt plt.rcParams['figure.figsize']=[20,10] fig,ax=plt.subplots() ax.plot(data['lambda'],lw=3,c='blue') ax.plot(data['without_lambda'],lw=3,c='red') plt.title("lambda vs Series.apply()") plt.xlabel("Epochs") plt.ylabel("Time taken to Execute [sec]") plt.show() plt.scatter(data['lambda'],data['without_lambda'],c=data.index) plt.xlabel("time to execute [lambda]") plt.ylabel("time to execute [without_lambda]") plt.xlim(2.35,2.8) plt.ylim(2.35,2.8) plt.show() fig,ax=plt.subplots() pd.Series(sorted(data['lambda'])).plot(ax=ax,c='blue',label='lambda') pd.Series(sorted(data['without_lambda'])).plot(ax=ax,c='red',label='Series.apply()') plt.legend() plt.xlabel("epochs") plt.ylabel("time to execute [sec]") plt.title("Time to execute lambda's vs defined function [sec] sorted(low->high)",fontsize=20,fontweight='bold') plt.show()
unit-1-build/notebooks/testing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="iLXW02eIYpcB" # clone and cd into repo, nshepperd's fork https://github.com/nshepperd/gpt-2 # - import tensorflow as tf tf.__version__ # + colab={"base_uri": "https://localhost:8080/", "height": 136} colab_type="code" executionInfo={"elapsed": 2753, "status": "ok", "timestamp": 1559179823276, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "15284233239426922637"}, "user_tz": 240} id="ICYu3w9hIJkC" outputId="2087d87c-0b0e-4e06-dd32-2ca79e13330f" # !git clone https://github.com/ncoop57/gpt-2.git ./data/gpt-2 # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 269, "status": "ok", "timestamp": 1559179823520, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "15284233239426922637"}, "user_tz": 240} id="6eEIs3ApZUVO" outputId="3440ede5-4fa8-4d64-c54e-6ccc9446add4" # cd data/gpt-2 # + [markdown] colab_type="text" id="Qtn1qZPgZLb0" # Install requirements # + colab={"base_uri": "https://localhost:8080/", "height": 187} colab_type="code" executionInfo={"elapsed": 4257, "status": "ok", "timestamp": 1559084518006, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "15284233239426922637"}, "user_tz": 240} id="434oOx0bZH6J" outputId="978bac50-18f1-4d92-e266-2d02a95e25c9" # !pip install -r requirements.txt # + [markdown] colab_type="text" id="o1hrgeKFYsuE" # Download the model data # + colab={"base_uri": "https://localhost:8080/", "height": 136} colab_type="code" executionInfo={"elapsed": 25190, "status": "ok", "timestamp": 1559179853688, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "15284233239426922637"}, "user_tz": 240} id="5UDpEGjfO8Q2" outputId="73851f02-2db7-48f6-d99e-6ed034c1e0b0" # !python download_model.py 345M # + [markdown] colab_type="text" id="Zq-YwRnNOBYO" # encoding # + colab={} colab_type="code" id="7oJPQtdLbbeK" # !export PYTHONIOENCODING=UTF-8 # - # !apt install wget -y # + [markdown] colab_type="text" id="0p--9zwqQRTc" # # Let's get our train on! In this case the file is A Tale of Two Cities (Charles Dickens) from Project Gutenberg. To change the dataset GPT-2 models will fine-tune on, change this URL to another .txt file, and change corresponding part of the next cell. Note that you can use small datasets if you want but you will have to be sure not to run the fine-tuning for too long or you will overfit badly. Roughly, expect interesting results within minutes to hours in the 1-10s of megabyte ballpark, and below this you may want to stop the run early as fine-tuning can be very fast. # + colab={"base_uri": "https://localhost:8080/", "height": 204} colab_type="code" executionInfo={"elapsed": 172427, "status": "ok", "timestamp": 1559056398480, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "15284233239426922637"}, "user_tz": 240} id="QOCvrs-DHvxa" outputId="4cd80ef2-2ed0-4f55-89e5-0791ab23a726" # !wget http://groups.inf.ed.ac.uk/cup/javaGithub/java_projects.tar.gz # + colab={} colab_type="code" id="jJObRkjwO2fQ" # !tar xzf java_projects.tar.gz # !ls # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 4143, "status": "ok", "timestamp": 1559078862534, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "15284233239426922637"}, "user_tz": 240} id="RfP7MFOxL6Ze" outputId="4bc26adf-75c5-4920-8b9e-7ff7df553dec" # !pip install toposort # + [markdown] colab_type="text" id="yPfJ5b3CQXqr" # # Start training, add --model_name '345M' to use 345 model # + colab={"base_uri": "https://localhost:8080/", "height": 93996} colab_type="code" executionInfo={"elapsed": 1540240, "status": "ok", "timestamp": 1559092407520, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "15284233239426922637"}, "user_tz": 240} id="pEn_ihcGI00T" outputId="2e663d39-12d0-43c8-a571-52c7c41b76cc" # !PYTHONPATH=src ./train.py --dataset ../java_projects --model_name '345M' # + [markdown] colab_type="text" id="vS1RJJDFOPnb" # Save our checkpoints to start training again later # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 2328, "status": "ok", "timestamp": 1559092458087, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "15284233239426922637"}, "user_tz": 240} id="JretqG1zOXdi" outputId="17ccb4c5-cfa0-4b8d-fa71-439787dc1ba5" # !cp -r /content/gpt-2/checkpoint/ /content/drive/My\ Drive/ # + [markdown] colab_type="text" id="6D-i7vERWbNS" # Load your trained model for use in sampling below (117M or 345M) # + colab={} colab_type="code" id="VeETvWvrbKga" # !cp -r /content/gpt-2/checkpoint/run1/* /content/gpt-2/models/117M/ # + colab={} colab_type="code" id="np0r6qfXBeUX" # !cp -r /content/gpt-2/checkpoint/run1/* /content/gpt-2/models/345M/ # + [markdown] colab_type="text" id="GmnSrXqtfRbq" # Generate conditional samples from the model given a prompt you provide - change top-k hyperparameter if desired (default is 40), if you're using 345M, add "--model-name 345M" # + colab={"base_uri": "https://localhost:8080/", "height": 7500} colab_type="code" executionInfo={"elapsed": 1045982, "status": "ok", "timestamp": 1559093703237, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "15284233239426922637"}, "user_tz": 240} id="utJj-iY4gHwE" outputId="b43f4461-040a-4281-9cce-218663bc6ef4" # !python3 src/interactive_conditional_samples.py --top_k 40 --model_name "345M" # + [markdown] colab_type="text" id="LeDhY97XMDXn" # To check flag descriptions, use: # + colab={} colab_type="code" id="pBaj2L_KMAgb" # !python3 src/interactive_conditional_samples.py -- --help # + [markdown] colab_type="text" id="K8rSqkGxg5OK" # Generate unconditional samples from the model, if you're using 345M, add "--model-name 345M" # + colab={} colab_type="code" id="LaQUEnRxWc3c" # !python3 src/generate_unconditional_samples.py --model_name "345M" | tee /tmp/samples # + [markdown] colab_type="text" id="VM1Hag-JL3Bt" # To check flag descriptions, use: # + colab={} colab_type="code" id="Sdxfye-SL66I" # !python3 src/generate_unconditional_samples.py -- --help
main/nbs/poc/gpt-2/fine_tune_gpt-2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # importing all libraries from tkinter import * from timeit import default_timer as timer import random # creating window using gui window = Tk() # the size of the window is defined window.geometry("450x200") x = 0 # defining the function for the test def game(): global x # loop for destroying the window # after on test if x == 0: window.destroy() x = x+1 # defining function for results of test def check_result(): if entry.get() == words[word]: # here start time is when the window # is opened and end time is when # window is destroyed end = timer() # we deduct the start time from end # time and calculate results using # timeit function print(end-start) else: print("Wrong Input") words = ['programming', 'coding', 'algorithm', 'systems', 'python', 'software'] # Give random words for testing the speed of user word = random.randint(0, (len(words)-1)) # start timer using timeit function start = timer() windows = Tk() windows.geometry("450x200") # use lable method of tkinter for labling in window x2 = Label(windows, text=words[word], font="times 20") # place of labling in window x2.place(x=150, y=10) x3 = Label(windows, text="Start Typing", font="times 20") x3.place(x=10, y=50) entry = Entry(windows) entry.place(x=280, y=55) # buttons to submit output and check results b2 = Button(windows, text="Done", command=check_result, width=12, bg='grey') b2.place(x=150, y=100) b3 = Button(windows, text="Try Again", command=game, width=12, bg='grey') b3.place(x=250, y=100) windows.mainloop() x1 = Label(window, text="Lets start playing..", font="times 20") x1.place(x=10, y=50) b1 = Button(window, text="Go", command=game, width=12, bg='grey') b1.place(x=150, y=100) # calling window window.mainloop() # -
TypingSpeed/TypingSpeedTest.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # HOUSE ROCKET - EMPRESA # ### Questões de negócio: # # * Encontrar as melhores oportunidades de compra de imóveis da House Rocket # # ### Qual a problema, a dor, a necessidade do time de negócios? # # * O time do negócio não consegue tomar boas decisões de compra sem analisar os dados # # ### Quais hipóteses temos que resolver? # # 1. A maioria das casas com 3 quartos são acima da mediana do preço. # 2. A maioria das casas com 3 quartos e 2 banheiros são acima da mediana do preço. # 3. Casas com com vista para água são 100% mais caras que a mediana do preço. # 4. A maioria das casas com porão e 2 andares são mais caras que que a mediana do preço. # 5. A maioria das casas com datas acima de 1990 que tiveram reformas são mais caras que a mediana. # 6. A maioria das casas antigas são mais caras que a mediana de mercado # 7. A maioria das casas novas são mais caros que a mediana de mercado # # # ### Quais perguntas do CEO temos que resolver? # # 1. Quais são os imóveis que a House Rocket deveria comprar e por qual preço ? # 2. A House Rocket deveria fazer uma reforma para aumentar o preço da venda ? # 3. Qual o incremento no preço dado por cada opção de reforma ? # 4. <NAME> vendas seguindo a análise ? # # ## 0.0 Import import pandas as pd import numpy as np import plotly.express as px import seaborn as sns import matplotlib.pyplot as plt # ### 0.1 Functions # + def boxplot1(x1, x2, x3, x4, y, df): plt.subplot(2, 2, 1) sns.boxplot(x=x1, y=y, data=df) plt.subplot(2, 2, 2) sns.boxplot(x=x2, y=y, data=df) plt.subplot(2, 2, 3) sns.boxplot(x=x3, y=y, data=df) plt.subplot(2, 2, 4) sns.boxplot(x=x4, y=y, data=df); return None def grafdisplot(dt): sns.set(rc={'figure.figsize':(20, 10)}) sns.distplot(dt, kde=True); return None # - # ## 1.0 Loading Dataseet data = pd.read_csv("datas\kc_house_data.csv") data.head(5) # ## 2.0 Data Description print(f"Number of rows {data.shape[0]}") print(f"Number of columns {data.shape[1]}") # ### 2.1 Data Types data.dtypes # ### 2.2 Check NA data.isna().sum() # ### 2.3 Data Trasformer # # There was an error on data. Before of the transformer it was the object. Afeter the transformer is date data["date"] = pd.to_datetime( data["date"]) # ### 2.4 Data Descriptive # + #descriptive statistics num_attibutes = data.select_dtypes( include=["int64", "float64"]) # quero somente essas colunas #central tendency - media, mediana, std, min, max import numpy as np pd.set_option("display.float.format", lambda x:"%.3f" % x) # arruma para sair de notação científica media = pd.DataFrame( num_attibutes.apply(np.mean, axis = 0 ) ) mediana = pd.DataFrame( num_attibutes.apply(np.median, axis = 0) ) # dispersion - std, min, max std = pd.DataFrame( num_attibutes.apply( np.std, axis = 0) ) min_ = pd.DataFrame( num_attibutes.apply( np.min, axis = 0) ) max_ = pd.DataFrame( num_attibutes.apply( np.max, axis = 0) ) df2 = pd.concat([media, mediana, std, min_, max_], axis=1) df2.columns = ["Média", "Mediana", "Desvio Padrão", "Mínimo", "Máximo"] df2 # - # ### 2.5 Columns Description # id :uma notação para uma casa # # date: a data foi vendida # # price: o preço é a meta de previsão # # bedrooms:Número de quartos / casa # # bathrooms: Número de banheiros / quartos # # sqft_living: metragem quadrada da casa # # sqft_lot: metragem quadrada do lote # # floors :Total de pisos (níveis) na casa # # waterfront :casa com vista para a beira-mar # # view: foi visualizado # # condition :quão boa é a condição geral # # grade: nota geral dada à unidade habitacional, com base no sistema de classificação de King County # # sqft_above :metragem quadrada da casa além do porão # # sqft_basement: metragem quadrada do porão # # yr_built :Ano de construção # # yr_renovated :ano em que a casa foi reformada # # zipcode:código postal # # lat: coordenada de latitude # # long: coordenada de longitude # # sqft_living15 :Área da sala de estar em 2015 (implica-- algumas renovações) Isso pode ou não ter afetado o tamanho do lote # # sqft_lot15 :rea lotSize em 2015 (implica-- algumas renovações) # ## 3.0 New Considerable Variables # ### 3.1 Price_Median on reference the ZIP CODE ("Código Postal") # We need of price_median. To be that we need to create the new column to named "price_median". df = pd.DataFrame(data[["zipcode", "price"]].groupby("zipcode").median().reset_index()) df = df.rename( columns = {"price": "price_median"}) df2 = pd.merge(data, df, on="zipcode", how = "inner") # + df2["status"] = "NA" for i in range(len(df2)): if (df2.loc[i, "price"] < df2.loc[i, "price_median"]) & (df2.loc[i,"condition"] >=2): df2.loc[i, "status"] = "compra" else: df2.loc[i, "status"] = "não compra" # - # ### 3.2 Real State Level df2["levels"] = df2["price"].apply(lambda x: "Level 0" if (x >= 0) & (x < 321950) else "Level 1" if (x>=321950) & (x < 450000) else "Level 2" if (x>=450000) & (x < 654000) else "Level 3" ) # ### 3.3 Dormitory type # + # Studio == bedrooms igual ou menor que 1 # Apart_or_House == bedrooms entre 2 e 4 # house == bedrooms maior que 4 df2['dormitory_type'] = df2['bedrooms'].apply(lambda x: 'studio' if x <=1 else 'apart_or_house' if (x >=2) & (x <= 3) else 'house') # - # ### 3.3 Condition Type # + # Bad == Se a condition for menor ou igual a 2 # Regular == Se a condition for igual a 3 ou 4 # Good == Se a condition for igual 5 df2['condition_type'] = df2['condition'].apply(lambda x: 'bad' if x <= 2 else 'regular' if (x == 3) | (x == 4) else 'good') # - # ### 3.4 Year of Built df2["date-age"] = df2["yr_built"].apply(lambda x: "new_house" if x > 2014 else "old_house") # ### 3.5 DIF - "PRICE" / "PRICE_MEDIAN" # + df2["dif"] = "NA" for i in range(len(df2)): df2.loc[i,"dif"] = round((((1-(df2.loc[i,"price"]) / df2.loc[i,"price_median"])) * 100),2) # - df2 # ## 4.0 Exploratory Analysis df3 = df2.copy() grafdisplot(df3["price"]) df3 # ### H1. A maioria das casas com 3 quartos são acima da mediana do preço. # # **FALSA** A maioria das casas estão abaixo da mediana de mercado df4 = df2.copy() house_bed3 = len(df4[df4['bedrooms'] == 3]) h_upmedian = len(df4[(df4['bedrooms'] == 3) & (df4['price'] > df4['price'].median())]) h_belowmedian = len(df4[(df4['bedrooms'] == 3) & (df4['price'] <= df4['price'].median())]) print('Quantidade de casas com 3 quartos: {} uninidades'.format(house_bed3)) print('Quantidade de casas acima do valor da mediana dos preços: {} uninidades'.format(h_upmedian)) print('Quantidade de casas abaixo do valor da mediana dos preços: {} unidades'.format(h_belowmedian)) # ### H2. A maioria das casas com 3 quartos e 2 banheiros são acima da mediana do preço. # **FALSA** # + house_bed3 = len(df4[(df4['bedrooms'] == 3) & (df4["bathrooms"] == 2)]) h_upmedian = len(df4[(df4['bedrooms'] == 3) & (df4["bathrooms"] == 2) & (df4['price'] > df4['price'].median())]) h_belowmedian = len(df4[(df4['bedrooms'] == 3) & (df4["bathrooms"] == 2) & (df4['price'] <= df4['price'].median())]) print('Quantidade de casas com 3 quartos: {} uninidades'.format(house_bed3)) print('Quantidade de casas acima do valor da mediana dos preços: {} uninidades'.format(h_upmedian)) print('Quantidade de casas abaixo do valor da mediana dos preços: {} unidades'.format(h_belowmedian)) plt.title('Casas agrupadas por quartos X banheiros') sns.stripplot(y = df4['bathrooms'], x = df4['bedrooms']); # - # ### H3. Casas com com vista para água são 100% mais caras que a mediana do preço. # # **VERDADEIRO** # + high = len(df4[(df4["waterfront"]==1) & (df4["price"] > df4["price_median"])]) low = len(df4[(df4["waterfront"]==1) & (df4["price"] < df4["price_median"])]) print(f"Quantidade de casas mais do que a mediana do preço {high}") print(f"Quantidade de casas abaixo da mediana do preço {low}") print("") waterh_1 = len(df4[(df4["waterfront"]==1) & (df4["price"] > df4["price_median"]) & (df4["dif"] > -100)]) waterh_0 = len(df4[(df4["waterfront"]==1) & (df4["price"] > df4["price_median"]) & (df4["dif"] <= -100)]) print(f"Quantidade de casas com mediana maior que ou igual a 100% da mediana do mercado: {waterh_0}") print(f"Quantidade de casas com mediana menor que 100% da mediana do mercado: {waterh_1}") waterh_0 = df4[(df4["waterfront"]==1) & (df4["price"] > df4["price_median"]) & (df4["dif"] < 100)] sns.countplot(x=df4['waterfront']); # - # ### H4. A maioria das casas com porão e 2 andares são mais caras que que a mediana do preço. # # **VERDADEIRO** # * sqft_basement - porão # * floors - andares (níveis) # + qtd = len(df4[(df4["sqft_basement"]>0) & (df4["floors"] > 1)]) high = len(df4[(df4["sqft_basement"]>0) & (df4["floors"] > 1) & (df4["price"]> df4["price_median"])]) low = len(df4[(df4["sqft_basement"]>0) & (df4["floors"] > 1) & (df4["price"]<= df4["price_median"])]) print(f"Quantidade de casas que tem porão e 2 andares {qtd}") print(f"Quantidade de casas que tem porão e 2 andares mais cara do que a mediana de mercado: {high}") print(f"Quantidade de casas que tem porão e 2 andares mais barata do que a mediana de mercado: {low}") plt.title('Casas agrupadas por andares x metro quadrado do porão') sns.stripplot(y = df4['sqft_basement'], x = df4['floors']); # - # ### H5. A maioria das casas com datas acima de 1990 que tiveram reformas são mais caras que a mediana. # # **VERDADEIRO** # + total = len(df4[(df4["yr_renovated"] != 0) & (df4["yr_built"] >= 1990)]) total_h = len(df4[(df4["yr_renovated"] != 0) & (df4["yr_built"] >= 1990) & (df4["price"] > df4["price_median"])]) total_l = len(df4[(df4["yr_renovated"] != 0) & (df4["yr_built"] >= 1990) & (df4["price"] < df4["price_median"])]) print(f"Quantidade de casas construídas em 1990 e que tiveram reformas {total}") print(f"Quantidade de casas que tem o preço acima da mediana de preço {total_h}") print(f"Quantidade de casas que tem o preço abaixo da mediana de preço {total_l}") # - # ### H6. A maioria das casas antigas são mais caras que a mediana de mercado # **VERDADEIRO** # + old_houses = len(df4[df4["date-age"] == "old_house"]) old_houses_h = len(df4[(df4["date-age"] == "old_house") & (df4["price"] >= df4["price_median"])]) old_houses_l = len(df4[(df4["date-age"] == "old_house") & (df4["price"] < df4["price_median"])]) print(f"Quantidade de casas antigas {old_houses}") print(f"Quantidade de casas antigas com preço acima da mediana de mercado {old_houses_h}") print(f"Quantidade de casas antigas com preço abaixo da mediana de mercado {old_houses_l}") sns.countplot(x=df4['date-age']); # - # ### H7. A maioria das casas novas são mais caras que a mediana de mercado # # **VERDADEIRO** # + new_house = len(df4[df4["date-age"] == "new_house"]) newh = len(df4[(df4["date-age"] == "new_house") & (df4["price"] >= df4["price_median"])]) newl = len(df4[(df4["date-age"] == "new_house") & (df4["price"] < df4["price_median"])]) print(f"Quantidade de casas novas {new_house}") print(f"Quantidade de casas novas com preço acima da mediana de mercado {newh}") print(f"Quantidade de casas novas com preço abaixo da mediana de mercado {newl}") sns.countplot(x=df4['date-age']); # - # ### 4.1 Tabela de Hipótese # + data = {"HIPÓTESE": ["H1", "H2", "H3", "H4", "H5", "H6" , "H7"], "RESULTADO":["FALSA","FALSA","VERDADEIRO","VERDADEIRO","VERDADEIRO", "VERDADEIRO","VERDADEIRO"], "RELEVÂNCIA":[ "BAIXA","BAIXA","ALTA","BAIXA","MEDIA","ALTA","BAIXA"]} hip = pd.DataFrame(data) hip # - # ## 5.0 Respondendo as perguntas do CEO # ### 5.1 Quais casas a House Rocket deveria comprar e por qual preço de compra? # # + df5 = df4.copy() total = len(df5[df5["status"] == "compra"]) print(f"Temos ao todo {total} casas abaixo da mediana de mercado") # + bom = len(df5[(df5["status"] == "compra") & (df5["condition_type"] == "good")]) print(f"Ao todo temos {bom} casas abaixo da mediana de mercado e com condição boa ") # - # **Agora iremos salvar as casas com "condições boas" e "preço" abaixo do "preço de mediana"** # + # salvar o dataseet de recomendações de compra bomsalvar = df5[(df5["status"] == "compra") & (df5["condition_type"] == "good")] bomsalvar.to_csv('data_upmedian.csv', encoding='utf-8', index=False) # - # **Vamos salvar em outro datasset os empreendimentos da Hipótese 3: Casas com com vista para água são 100% mais caras que a mediana do preço.** # + # salvar o dataseet de recomendações de compra waterh_0 = df4[(df4["waterfront"]==1) & (df4["price"] > df4["price_median"]) & (df4["dif"] > -100)] waterh_0.to_csv('house_waterfront_upmedian.csv', encoding='utf-8', index=False) # - # ### 5.2 A House Rocket deveria fazer uma reforma para aumentar o preço da venda? # # + ## filtrando as casas com price < price.median houses_buy = df5[df5["price"] <= df5["price_median"]] # - # * As casas com condições bad deveriam passar por uma reforma para se tornarem mais atraentes. Já as casas em condições regulares precisariam ser selecionadas caso a caso # + h_b_r = len(houses_buy[(houses_buy["condition_type"]=="regular") | (houses_buy["condition_type"]=="bad")]) h_b = houses_buy[houses_buy["condition_type"] == "bad"] h_r = houses_buy[houses_buy["condition_type"] == "regular"] print(f"São {h_b_r} casas que estão com preço menor que a mediana de mercado, estando em condição má ou regular ") print(f"São {len(h_b)} casas que estão com preço menor que a mediana de mercado, estando em condição má") print(f"São {len(h_r)} casas que estão com preço menor que a mediana de mercado, estando em condição regular ") # + # casas com vista para o mar acima da medianan e menor que o dobro da mediana no preço hwf_count = waterh_0[['id', 'condition_type']].groupby('condition_type').count().reset_index() print('São casas que esteja acima da mediana e que não custe o do dobro do preço com vista para a água que devem ter reformas') print(hwf_count) print('As casas que tem a condição bad deve passar por reformas, as regulares devem ser avaliadas caso a caso.') # - # ### 5.3 Qual o incremento no preço dado por cada opção de reforma? # * Casas com condições bad devem ter um custo de até 30% no valor na reforma. # * Casas com condições regular devem ter um custo de até 10% no valor na reforma. # * As casas podem ter um acrécimo ou diminuição nessa porcentagem de acordo com o grau de qualidade do imóvel # concat house to buy houses_buy_01 = pd.concat([houses_buy, waterh_0], axis=0) # * Tabela de opções de preço para cada condição de reforma: # * Casas com condições bad: 30% do valor da casa # * Casa com condições regular: 10% do valor da casa # #### 5.3.1 Tabela com preço com os acréscimos da reforma # reset o index houses_buy_01 = houses_buy_01.reset_index().drop(columns='index') # + # adcionar o custo da reform ano preço houses_buy_01['price+renovation'] = houses_buy_01[['condition_type', 'price']].apply(lambda x: x['price'] * 1.30 if x['condition_type'] == 'bad' else x['price'] * 1.10 if x['condition_type'] == 'regular' else x['price'], axis=1) # criar coluna so com os preços estimado da reforam houses_buy_01['renovation_cost'] = houses_buy_01[['condition_type', 'price', 'price+renovation']].apply(lambda x: x['price+renovation'] - x['price'] if x['condition_type'] == 'bad' else x['price+renovation'] - x['price'] if x['condition_type'] == 'regular' else 0.0, axis=1) # - h1 = houses_buy_01.copy() h1 = h1[h1["price_median"] > h1["price+renovation"]] print(f" Temos ao todo {len(h1)} empreendimentos que vale a pena a compra. Os quais teremos um maior retorno, mesmo após a reforma") # + ## export dos dados h1.to_csv('houses_buy.csv', encoding='utf-8', index=False) # - # ### 5.4 Resultado de Venda df10 = houses_buy_01.copy() h_total_buy = df10['price'].sum() print('O total das somas dos preços das casas para compra é ${}.'.format(h_total_buy)) # #### 5.4.2 Soma das casas pela condição e total da soma do custo da reforma. # # + h_bad = df10[df10['condition_type'] == 'bad'] h_badp = h_bad['price'].sum() h_badrc = h_bad['renovation_cost'].sum() print('Somatoria das casa com codições bad ${}, e somatoria custo reforma ${}.'.format(h_badp, h_badrc)) h_bad1 = df10[df10['condition_type'] == 'good'] h_badp1 = h_bad1['price'].sum() h_badrc1 = h_bad1['renovation_cost'].sum() print('Somatoria das casa com codições good ${}, e somatoria custo reforma ${}.'.format(h_badp1, h_badrc1)) h_bad2 = df10[df10['condition_type'] == 'regular'] h_badp2 = h_bad2['price'].sum() h_badrc2 = h_bad2['renovation_cost'].sum() print('Somatoria das casa com codições regular ${}, e somatoria custo reforma ${}.'.format(h_badp2, h_badrc2)) df10[['price', 'condition_type', 'renovation_cost', 'price+renovation']].groupby('condition_type').sum() # - # #### 5.4.3 Resultado de Vendas # * Casas com condições bad vai ter 10% adcionado ao final para a venda. # * Casas com condições regular vai ter 15% adcionado ao final para a venda. # * Casas com condições good vai ter 20% adcionado ao final para a venda. # adcionar lucro df10['price_sales'] = df10[['condition_type', 'price+renovation']].apply(lambda x: x['price+renovation'] * 1.10 if x['condition_type'] == 'bad' else x['price+renovation'] * 1.20 if x['condition_type'] == 'good' else x['price+renovation'] * 1.15, axis=1) # lucro lucro = df10['price_sales'] - df10['price+renovation'] print('O lucro da vendas das {} casas é de ${}. '.format(len(df10['id']), round(lucro.sum(), 2))) # **A coluna abaixo compara os valores de price_median e o preço da venda. Caso a price_median esteja maior que o preço da venda, isso quer dizer que a nossa venda está menor do que o preço da mediana. Consequentemente é um preço mais atrativo, essa venda tem uma alta probabilidade de ser uma venda mais rápida** df10["velocidade_venda"] = df10[["price_sales", "price_median"]].apply(lambda x: "sim" if x["price_sales"] <= x["price_median"] else "nao", axis=1) ##Export arquivo compra final df10.to_csv('houses_buy_final.csv', encoding='utf-8', index=False) df10[['price', 'condition_type', 'renovation_cost', 'price+renovation', "price_sales"]].groupby('condition_type').sum()
HOUSE ROCKET - PROJECT.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import os import imageio from skimage import io import numpy as np # + filename = 'VJ_OFCVTA_7_260_D6_mc' folder = 'C:/2pData/Vijay data/VJ_OFCVTA_7_D8_trained/' datafile = os.path.join(folder, '%s.tif'%filename) datafile # - im = io.imread(datafile) im_int8 = np.squeeze(im).astype(np.uint8) dims = im_int8.shape dims im_final = np.empty( [len(range(3,dims[0]-3,3)),dims[1],dims[2]], dtype='uint8') # + frame_count = 0 for iframe in range(3,dims[0]-3,3): im_final[frame_count,:,:] = np.mean( im_int8[ iframe-2:iframe+2 ,:,:], 0 ) frame_count += 1 # - im_final.shape imageio.mimwrite(folder + 'temp.mp4', im_final , fps = 15.0)
.ipynb_checkpoints/makeAvgMovieFromTiff-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # (lambdas)= # # Lambdas # ``` {index} Lambdas # ``` # Lambdas are a way of defining small anonymous functions in Python. The name stems from functional programming. Anonymous means that these functions do not have a name. These functions can be treated as variables in a sense. # f will now refer to a function f = lambda q: q * 10 print(f(10)) # applied multiple times print(f(f(f(10)))) # Lambdas can have multiple arguments which also can be lambdas: # + g = lambda a, d: d(a) + 10 g(10,f) # - # We can use lambdas in the `sort` method for arrays. This is especially useful if we want to sort by some different property of the elements. Imagine you have an array of tuples and you want to sort by their sum. If some elements are equal in this property, the order in which they appeared in the initial array will be preserved: tuples = [(2,1), (1,2), (3,9), (2,5), (12,7)] tuples.sort(key = lambda a: a[0]+a[1]) print(tuples) # Lambdas are usually small functions. Longer lambdas can make code unreadable. # (lambdas_exercises)= # ## Exercises # * **Define** the following lambdas: # # 1) \\(f(x) = x + 7\\) # 2) \\(f(x,y) = x * y + 8\\) # 3) \\(f(x,g) = 4 + g(x)\\) # ````{admonition} Answer # :class: dropdown # # ```python # # 1) # f = lambda x : x + 7 # # 2) # f = lambda x,y: x * y + 8 # # 3) # f = lambda x, y: 4 + g(x) # ``` # # ```` # ________________ # # * **Sort** a list of tuples by the second element in non-increasing order. # ````{admonition} Answer # :class: dropdown # # ```python # tuples = [(1,2),(3,4),(5,3), (0,4), (2,7)] # tuples.sort(key = lambda a : -a[1]) # ``` # # ````
notebooks/b_coding/Intro to Python/20_Lambdas.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import librosa as lb import pandas as pd import scipy import numpy as np import sklearn from sklearn.model_selection import train_test_split import os from pylab import plot, show, figure, imshow, xlim, ylim, title import matplotlib.pyplot as plt import keras from keras.utils import np_utils from keras import layers from keras import models # + #CONSTANTS DATA_DIR = "data/" CATEGORY_COUNT = 8 # + class Instrument: def __init__(self, name, audio, mfcc, category): self.name = name self.audio = audio self.mfcc = mfcc self.category = category instruments = [] i = 0 for folder in os.listdir(DATA_DIR): if "." not in folder: new_instrument = Instrument(folder, [], [], i) print(new_instrument.name, 'started') for file_path in os.listdir(DATA_DIR + folder): if file_path.endswith(".mp3"): try: audio, sr = lb.load(DATA_DIR + folder + "/" + file_path) #audio, sr = lb.core.load(lb.util.example_audio_file()) new_instrument.audio.append(audio) except: print('Failed to add',folder + "/" + file_path + ' (' + str(len(new_instrument.audio)) + '. ' + new_instrument.name + ').') instruments.append(new_instrument) print(new_instrument.name, 'added') i+=1 print('FINISHED') # - for instrument in instruments: instrument.mfcc = [] for audio in instrument.audio: mfccs = lb.feature.mfcc(y=audio,n_mfcc=13) mfccs = lb.util.normalize(mfccs) scipy.linalg.norm(mfccs) # mfccs = mfccs / 500 instrument.mfcc.append(mfccs.mean(axis=1)) imshow(mfccs, aspect = 'auto', origin='lower', interpolation='none') plt.colorbar() plt.title('MFCC') plt.tight_layout() plt.show() '''w = Windowing(type = 'hann') spectrum = Spectrum() # FFT() would return the complex FFT, here we just want the magnitude spectrum mfcc = MFCC() for instrument in instruments: instrument.mfcc = [] for audio in instrument.audio: mfccs = [] melbands = [] melbands_log = [] for frame in FrameGenerator(audio, frameSize=1024, hopSize=512, startFromZero=True): mfcc_bands, mfcc_coeffs = mfcc(spectrum(w(frame))) mfccs.append(mfcc_coeffs) melbands.append(mfcc_bands) mfccs = essentia.array(mfccs).mean(axis=0) mfccs = mfccs / 500 instrument.mfcc.append(mfccs) # melbands_log.append(logNorm(mfcc_bands)) # transpose to have it in a better shape # we need to convert the list to an essentia.array first (== numpy.array of floats) melbands = essentia.array(melbands).T melbands_log = essentia.array(melbands_log).T # and plot imshow(melbands[:,:], aspect = 'auto', origin='lower', interpolation='none') plt.title("Mel band spectral energies in frames") show() #imshow(melbands_log[:,:], aspect = 'auto', origin='lower', interpolation='none') #plt.title("Log-normalized mel band spectral energies in frames") #show() #imshow(mfccs[1:,:], aspect='auto', origin='lower', interpolation='none') #plt.title("MFCCs in frames") #show()''' mfccs.shape # + for instrument in instruments: print(len(instrument.mfcc)) # + fig = plt.figure(figsize=(20,10)) i = 0 for instrument in instruments: i += 1 fig.add_subplot(4,2,i) plot(instrument.mfcc[3]) #ylim(-1,1) show() # + X = [] y = [] for instrument in instruments: i = 0 for mfcc in instrument.mfcc: i += 1 X.append(mfcc) y.append(instrument.category) X = np.array(X) print(X.shape) X = X.astype('float32') y = np.array(y) print(len(X)) print(len(X[0])) print(X.shape) # Test Train Split X_train, X_test, y_train, y_test = train_test_split(X, y) # + print(len(X_train)) print(len(y_train)) print(y_train[1]) y_train = np_utils.to_categorical(y_train,CATEGORY_COUNT) y_test = np_utils.to_categorical(y_test,CATEGORY_COUNT) print(len(X_train)) print(X_train.shape) # + model = models.Sequential() #model.add(layers.Conv2D(filters=8,kernel_size=(3,3), input_shape=(3411,13,1),activation='relu')) #model.add(layers.AveragePooling2D()) #model.add(layers.Conv2D(filters=16,kernel_size=(3,3), activation='relu')) #model.add(layers.AveragePooling2D()) #model.add(layers.Flatten()) model.add(layers.Dense(units=512, activation='relu', input_shape=(X_train.shape[1],))) model.add(layers.Dense(units=256, activation='relu')) model.add(layers.Dense(units=100, activation='relu')) model.add(layers.Dense(units=CATEGORY_COUNT, activation='sigmoid')) model.compile(loss='categorical_crossentropy', optimizer='Adam', metrics=['acc']) # - y_train.dtype # + history = model.fit(X_train, y_train, epochs=70, batch_size=128) # - model.summary() loss, acc = model.evaluate(X_test, y_test) print('Test loss: {}'.format(loss)) print('Test accuracy: {:.2%}'.format(acc)) new = model.predict_classes(X) for i in range(len(X)): print("Predicted=%s" % ( new[i])) # + print(X_train.shape[1]) model2 = models.Sequential() #model.add(layers.Conv2D(filters=8,kernel_size=(3,3), input_shape=(3411,13,1),activation='relu')) #model.add(layers.AveragePooling2D()) #model.add(layers.Conv2D(filters=16,kernel_size=(3,3), activation='relu')) #model.add(layers.AveragePooling2D()) #model.add(layers.Flatten()) model2.add(layers.Dense(units=512, activation='relu', input_shape=(X_train.shape[1],))) model2.add(layers.Dense(units=256, activation='relu')) model2.add(layers.Dense(units=100, activation='relu')) model2.add(layers.Dense(units=CATEGORY_COUNT, activation='sigmoid')) model2.compile(loss='binary_crossentropy', optimizer='Adam', metrics=['acc']) # - history = model2.fit(X_train, y_train, epochs=15, batch_size=128) pred = model2.predict(np.array(X_test))[5] sum = 0 for i, prob in enumerate(pred): print (i) print (prob) sum += prob print(sum)
philharmonia-DL.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Exercise # # - Detect blue cars on `traffic.mp4` import numpy as np # Red and blue lower = np.array([100,100,100]) upper = np.array([180,255,255]) ### Color to HSV blue = np.uint8([[[133,55,40]]]) blue_hsv = cv2.cvtColor(blue, cv2.COLOR_BGR2HSV_FULL) print(blue_hsv)
Day 2 - Color detector.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # PyThaiNLP # Cr. https://github.com/PyThaiNLP/pythainlp # https://thainlp.org/pythainlp/docs/2.0/notes/pythainlp-1_7-2_0.html#soundex # # สำหรับ note nlp อันนี้ใช้เวปไซต์นี้ เป็นหลัก <br> # https://github.com/PyThaiNLP/pythainlp/blob/dev/notebooks/pythainlp-get-started.ipynb # # โน๊ตนี้เป็น โน๊ตจดสรุปlibrary pythainlp ตามความเข้าใจ # ### PACKAGE # 1. pythainlp.util # 2. pythainlp.tokenize # -------------------------------------------------- # librery are included Thai consonants(Charactors), vowels, tonemarks, symbols # # 1. pythainlp.thai_characters --> ตัวไทยทั้งหมด # 2. pythainlp.thai_consonants --> อักษรไทย 44 ตัว # 3. pythainlp.thai_vowels # 4. pythainlp.thai_symbols --> '฿' # 5. pythainlp.thai_digits --> ตัวเลขอักษรไทย (Arabic) <br> # ex. <br> # "๕" in pythainlp.thai_digits --> True # # -------------------------------------------------- # ## 1. pythainlp.util # text conversation and formatting # ### isthai # สำหรับการ ตรวจคำว่า ใช่ภาษาไทยหรือไม่ <br> # แต่ไม่ใช่สำหรับเชคว่า เป็นคำไทยที่มีความหมายหรือป่าว <br> # สามารถใช้ในกรณี เชคว่ามีภาษาอื่นปนมาหรือไม่ import pythainlp.util pythainlp.util.isthai("พี่ชาย") pythainlp.util.isthai("ก.พ.") pythainlp.util.isthai("(ก.ค.)",ignore_chars=".())") #can't have () # ### Collation # [เรียงลำดับข้อมูลภาษาไทยใน List] - รับ list คืนค่า list ตามตัวอักษรไทย # + from pythainlp.util import collate thai_words = ["อัยย์", "พี่เจ", "นุ่นนิ่น","เน็ท", "อุ้ม", "เข็ม"] print(collate(thai_words)) print(collate(thai_words, reverse= True)) # - # ### rank # สำหรับนับfrequency ของlistภาษาไทย from pythainlp.util import rank data = ['วันนี้','ร้อน','วันนี้','หนาว','วันนี้','ฝนตก'] rank(data) # -------------------------------------------------- # # 2. Tokenization # [ตัดคำไทย] # 1. icu # 2. dict # 3. mm # 4. newmm # 5. pylexto # 6. deepcut # 7. attacut # 8. longest # 9. ulmfit -> PyThaiNLP version 2.1 # # คืนค่าเป็น ''list'' # + from pythainlp.tokenize import word_tokenize text='เค้าหิวข้าวอยากกินขนม อยากกินราเมง อยากกินผัดไทย' a=word_tokenize(text,engine='deepcut') b=word_tokenize(text,engine='newmm') print(a) print(b) # + from pythainlp import sent_tokenize, word_tokenize text = "อยากกินสปาเกตตี้ อยากกินไก่ย่าง " print("sent_tokenize:", sent_tokenize(text)) print("word_tokenize:", word_tokenize(text)) print("word_tokenize, without whitespace:", word_tokenize(text, keep_whitespace=False)) # + from pythainlp import word_tokenize, Tokenizer text = "เค้าหิวข้าวแล้ว" print("newmm:", word_tokenize(text)) # default engine is "newmm" print("longest:", word_tokenize(text, engine="longest")) words = ["หิว", "ข้าว"] custom_tokenizer = Tokenizer(words) print("custom:", custom_tokenizer.word_tokenize(text)) # + from pythainlp.corpus.common import thai_words from pythainlp import word_tokenize, Tokenizer text = "อัยย์ ณัฏฐวิตรา" print("newmm:", word_tokenize(text)) words = set(thai_words()) # thai_words() returns frozenset words.add("ณัฏฐวิตรา") custom_tokenizer = Tokenizer(words) print("custom:", custom_tokenizer.word_tokenize(text)) # + # Get all possible segmentations from pythainlp.tokenize.multi_cut import find_all_segment, mmcut, segment find_all_segment("มีความเป็นไปได้อย่างไรบ้าง") # - # 2. Subword and Thai Character Cluster (TCC) # # According to Character Cluster Based Thai Information Retrieval (Theeramunkong et al. 2004). from pythainlp import subword_tokenize subword_tokenize("ประเทศไทย") # Low-level TCC operations # + from pythainlp.tokenize import tcc tcc.segment("ประเทศไทย") # - tcc.tcc_pos("ประเทศไทย") # return positions for ch in tcc.tcc("ประเทศไทย"): # generator print(ch, end='-') # # Postaggers ภาษาไทย # from pythainlp.tag import pos_tag <br> # pos_tag(list,engine='old') # # list คือ list ที่เก็บข้อความหลังผ่านการตัดคำแล้ว <br> # engine คือ ชุดเครื่องมือในการ postaggers มี 2 ตัว # 1. old เป็น UnigramTagger (ค่าเริ่มต้น) # 2. artagger เป็น RDR POS Tagger # # แปลงข้อความเป็น Latin # from pythainlp.romanization import romanization <br> # romanization(str,engine='pyicu') # # มี 2 engine ดังนี้ # 1. pyicu ส่งค่า Latin # 2. royin ใช้หลักเกณฑ์การถอดอักษรไทยเป็นอักษรโรมัน ฉบับราชบัณฑิตยสถาน (หากมีข้อผิดพลาด ให้ใช้คำอ่าน เนื่องจากตัว royin ไม่มีตัวแปลงคำเป็นคำอ่าน) # + # from pythainlp.romanization import romanization # romanization("แมว") # 'mæw' # - # # Transliteration # + from pythainlp.transliterate import romanize print(romanize("แมว")) print(romanize("หมา")) # - # # Normalization # + from pythainlp.util import normalize normalize("เเปลก") == "แปลก" # เ เ ป ล ก vs แปลก # - # # เช็คคำผิด # install hunspell and hunspell-th from pythainlp.spell import * a=spell("สี่เหลียม") print(a) # ['สี่เหลี่ยม', 'เสียเหลี่ยม', 'เหลี่ยม'] # # pythainlp.number # from pythainlp.number import * # nttn(str) - เป็นการแปลงเลขไทยสู่เลข <br> # nttt(str) - เลขไทยสู่ข้อความ <br> # ntnt(str) - เลขสู่เลขไทย <br> # ntt(str) - เลขสู่ข้อความ <br> # ttn(str) - ข้อความสู่เลข <br> # numtowords(float) - อ่านจำนวนตัวเลขภาษาไทย (บาท) รับค่าเป็น ''float'' คืนค่าเป็น 'str' # # WordNet ภาษาไทย # from pythainlp.corpus import wordnet # # การใช้งาน : API เหมือนกับ NLTK โดยรองรับ API ดังนี้ # # wordnet.synsets(word) <br> # wordnet.synset(name_synsets) <br> # wordnet.all_lemma_names(pos=None, lang="tha") <br> # wordnet.all_synsets(pos=None) <br> # wordnet.langs() <br> # wordnet.lemmas(word,pos=None,lang="tha") <br> # wordnet.lemma(name_synsets) <br> # wordnet.lemma_from_key(key) <br> # wordnet.path_similarity(synsets1,synsets2) <br> # wordnet.lch_similarity(synsets1,synsets2) <br> # wordnet.wup_similarity(synsets1,synsets2) <br> # wordnet.morphy(form, pos=None) <br> # wordnet.custom_lemmas(tab_file, lang) <br> from pythainlp.corpus import wordnet print(wordnet.synsets('หนึ่ง')) print(wordnet.synsets('หนึ่ง')[0].lemma_names('tha')) print(wordnet.synset('one.s.05')) print(wordnet.synset('spy.n.01').lemmas()) print(wordnet.synset('spy.n.01').lemma_names('tha')) # # หาคำที่มีจำนวนการใช้งานมากที่สุด # from pythainlp.rank import rank <br> # rank(list) <br> # คืนค่าเป็น dict # + # from pythainlp.rank import rank # rank(['แมง','แมง','คน']) from pythainlp.util import rank rank(['แมง','แมง','คน']) # - # # แก้ไขปัญหาการพิมพ์ลืมเปลี่ยนภาษา # from pythainlp.change import * <br> # มีคำสั่งดังนี้ <br> # # texttothai(str) แปลงแป้นตัวอักษรภาษาอังกฤษเป็นภาษาไทย <br> # texttoeng(str) แปลงแป้นตัวอักษรภาษาไทยเป็นภาษาอังกฤษ <br> # คืนค่าออกมาเป็น str # + # from pythainlp.change import * # print(texttothai("/o9dsoyd,kd")) # from pythainlp.change.texttothai to from pythainlp.util import thai_to_eng # from pythainlp.change.texttoeng to from pythainlp.util import eng_to_thai # - # # Thai Character Clusters (TCC) from pythainlp.tokenize import tcc tcc.tcc('ประเทศไทย') # 'ป/ระ/เท/ศ/ไท/ย' # # Enhanced Thai Character Cluster (ETCC) from pythainlp.tokenize import etcc # # Thai Soundex ภาษาไทย # กฎที่รองรับในเวชั่น 1.4 # # 1. กฎการเข้ารหัสซาวน์เด็กซ์ของ วิชิตหล่อจีระชุณห์กุล และ เจริญ คุวินทร์พันธุ์ - LK82 # 2. กฎการเข้ารหัสซาวน์เด็กซ์ของ วรรณี อุดมพาณิชย์ - Udom83 (DEFAULT) # 3. metasound # + # from pythainlp.soundex import LK82 # print(LK82('รถ')) # # ร3000 # print(LK82('รด')) # # ร3000 # print(LK82('จัน')) # # จ4000 # print(LK82('จันทร์')) # # จ4000 # print(Udom83('รถ')) # # ร800000 # from pythainlp.metasound import metasound # metasound("ลัก")from pythainlp.soundex.LK82 to pythainlp.soundex.lk82 # + # from pythainlp.soundex.LK82 to from pythainlp.soundex import lk82 print(lk82('รถ')) # from pythainlp.soundex.Udom83 to from pythainlp.soundex import udom83 print(udom83('รถ')) # - # # MetaSound # + # from pythainlp.MetaSound.MetaSound(name) to # from pythainlp.soundex import metasound(name) # - # # Sentiment analysis ภาษาไทย # Thai text of positive and negative. <br> # รับค่า str ส่งออกเป็น pos , neg หรือ neutral # + # from pythainlp.sentiment import sentiment # sentiment(str) # - # # ngrams # # สำหรับสร้าง ngrams <br> # ngrams(token,num) <br> # token คือ list <br> # num คือ จำนวน ngrams <br> # # Corpus # 1. stopword ภาษาไทย # + # from pythainlp.corpus import stopwords # stopwords = stopwords.words('thai') from pythainlp.corpus.common import thai_stopwords print(thai_stopwords()) # + # from pythainlp.corpus import country # country.get_data() from pythainlp.corpus import countries countries() # - # # corpus pythainlp.corpus.common.thai_stopwords() # Return a frozenset of Thai stopwords pythainlp.corpus.common.thai_words() pythainlp.corpus.common.thai_syllables() # พยางค์ pythainlp.corpus.common.thai_negations() pythainlp.corpus.common.countries() pythainlp.corpus.common.provinces() # + # pythainlp.corpus.conceptnet.edges(word: str, lang: str = 'th') # -
learn_pythainlp/note_pythainlp_1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import sys from glob import glob files = glob('../data_intensity/pkl/id_*.measurements.covd.pkl.intensityANDmorphology.csv.gz') file = ('../data_intensity/pkl/id_13.measurements.covd.pkl.intensityANDmorphology.csv.gz') import pandas as pd # + import matplotlib.pyplot as plt import seaborn as sns # %matplotlib inline sns.set(style="whitegrid") # - for f in files[:]: outfile = f+'.summary_stat.absolute.png' df = pd.read_csv(f) df = df.rename(columns={"cluster_intensity": "clusterID"}) f, axes = plt.subplots(2, 3, figsize=(15, 7)) sns.boxplot(x='clusterID', y='area', data=df, orient="v", ax=axes[0, 0]) sns.boxplot(x='clusterID', y='perimeter', data=df, orient="v", ax=axes[0, 1]) sns.boxplot(x='clusterID', y='solidity', data=df, orient="v", ax=axes[0, 2]) sns.boxplot(x='clusterID', y='eccentricity', data=df, orient="v", ax=axes[1, 0]) sns.boxplot(x='clusterID', y='mean_intensity', data=df, orient="v", ax=axes[1, 1]) axes[-1, -1].axis('off') plt.tight_layout() plt.savefig(str(outfile)) for f in files[:]: outfile = f+'.summary_stat.percentile.png' df = pd.read_csv(f) df = df.rename(columns={"cluster_intensity": "clusterID"}) for c in df.columns[7:12]: df["q_"+c] = pd.qcut(df[c],100,labels=False, duplicates='drop') f, axes = plt.subplots(2, 3, figsize=(15, 7)) sns.boxplot(x='clusterID', y='q_area', data=df, orient="v", ax=axes[0, 0]) sns.boxplot(x='clusterID', y='q_perimeter', data=df, orient="v", ax=axes[0, 1]) sns.boxplot(x='clusterID', y='q_solidity', data=df, orient="v", ax=axes[0, 2]) sns.boxplot(x='clusterID', y='q_eccentricity', data=df, orient="v", ax=axes[1, 0]) sns.boxplot(x='clusterID', y='q_mean_intensity', data=df, orient="v", ax=axes[1, 1]) axes[-1, -1].axis('off') plt.tight_layout() plt.savefig(str(outfile)) outfile = file+'.summary_stat.absolute.png' df = pd.read_csv(file) for c in ['area', 'perimeter', 'solidity', 'eccentricity', 'circularity', 'mean_intensity', 'std_intensity', 'cov_intensity']: df["q_"+c] = pd.qcut(df[c],100,labels=False, duplicates='drop') f, axes = plt.subplots(2, 4, figsize=(15, 7)) sns.boxplot(x='clusterID1', y='area', data=df, orient="v", ax=axes[0, 0]) sns.boxplot(x='clusterID1', y='perimeter', data=df, orient="v", ax=axes[0, 1]) sns.boxplot(x='clusterID1', y='solidity', data=df, orient="v", ax=axes[0, 2]) sns.boxplot(x='clusterID1', y='eccentricity', data=df, orient="v", ax=axes[0, 3]) sns.boxplot(x='clusterID1', y='circularity', data=df, orient="v", ax=axes[1, 0]) sns.boxplot(x='clusterID1', y='mean_intensity', data=df, orient="v", ax=axes[1, 1]) sns.boxplot(x='clusterID1', y='cov_intensity', data=df, orient="v", ax=axes[1, 2]) axes[-1, -1].axis('off') plt.tight_layout() plt.show() #plt.savefig(str(outfile)) def plot_boxplot_profiles(filename): df = pd.read_csv(filename) dfmelted = pd.DataFrame() f, axes = plt.subplots(1, 4, figsize=(25, 10)) idx=0 for c in set(df["clusterID3"]): dfc = df[df["clusterID3"] == c][["area", "circularity", "eccentricity", "mean_intensity", "cov_intensity"]] data = pd.melt(dfc) dfmelted = dfmelted.append(data) chart = sns.boxplot(x='variable', y='value', data=data, meanline=True, orient="v", ax=axes[idx]).set_title('Cluster ID:'+str(c+1)) axes[idx].set_xticklabels(axes[idx].get_xticklabels(), rotation=45) idx += 1 plt.savefig(filename+'.boxplot.profile.png') plt.show() return for f in files[:]: plot_boxplot_profiles(f) for f in files[:]: print(f) df = pd.read_csv(f) g = sns.pairplot(df[["area","circularity","eccentricity","mean_intensity","cov_intensity"]].sample(n=10000), markers='.') g.savefig(f+'.scatter-pairplot.png') for f in files[:]: print(f) for c in set(df["clusterID3"]): dfc = df[df["clusterID3"] == c][["area", "circularity", "eccentricity", "mean_intensity", "cov_intensity"]] g = sns.pairplot(dfc.sample(n=10000),markers='.') g.fig.suptitle('Cluster ID: '+str(c+1), fontsize=16,y=1) g.savefig(f+'.scatter-pairplot.'+'clusterID_'+str(c+1)+'.png') plt.close()
SG/pipeline/ipynb/statSummary.morphologyBYclusters.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="WFf7SVsGUT0Q" executionInfo={"status": "ok", "timestamp": 1619163411626, "user_tz": -120, "elapsed": 1172, "user": {"displayName": "<NAME>015awierczek", "photoUrl": "", "userId": "16071209424152399726"}} import pandas as pd import numpy as np import random # + id="mPr0KMesVBhj" executionInfo={"status": "ok", "timestamp": 1619163433920, "user_tz": -120, "elapsed": 813, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16071209424152399726"}} N = 5 data = np.empty((10**5, N + 1)) for x in range(data.shape[0]): for y in range(data.shape[1]): data[x,y] = random.random() data_frame = pd.DataFrame(data, columns=('a%d' % (i) for i in range(N+1))) # + colab={"base_uri": "https://localhost:8080/", "height": 204} id="mzjbvB-tbhmc" executionInfo={"status": "ok", "timestamp": 1619163439101, "user_tz": -120, "elapsed": 552, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16071209424152399726"}} outputId="7653c7bd-38c6-4215-bc1a-28637af7e61a" data_frame.head() # + id="DSUi08oGc5U6" executionInfo={"status": "ok", "timestamp": 1619163442334, "user_tz": -120, "elapsed": 500, "user": {"displayName": "Oskar \u015awierczek", "photoUrl": "", "userId": "16071209424152399726"}} data_frame.to_pickle('./input.pkl')
ColabNotebooks/features.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # #### New to Plotly? # Plotly's Python library is free and open source! [Get started](https://plotly.com/python/getting-started/) by downloading the client and [reading the primer](https://plotly.com/python/getting-started/). # <br>You can set up Plotly to work in [online](https://plotly.com/python/getting-started/#initialization-for-online-plotting) or [offline](https://plotly.com/python/getting-started/#initialization-for-offline-plotting) mode, or in [jupyter notebooks](https://plotly.com/python/getting-started/#start-plotting-online). # <br>We also have a quick-reference [cheatsheet](https://images.plot.ly/plotly-documentation/images/python_cheat_sheet.pdf) (new!) to help you get started! # #### Version Check # Note: Animations are available in version 1.12.10+ # Run `pip install plotly --upgrade` to update your Plotly version. import plotly plotly.__version__ # #### Import Data # This tutorial walks you through how to make an example using the [Gapminder dataset](http://www.gapminder.org/world/) to present the GDP per Capita vs Life Expectancy across the years 1952 to 2007 in an animated Bubble Chart, in which the bubbles represent countries and their sizes represent the population. # # First import the Gapminder data that we will be using for the example and store in a dataframe: # + import plotly.plotly as py import plotly.figure_factory as ff from plotly.grid_objs import Grid, Column import pandas as pd import time url = 'https://raw.githubusercontent.com/plotly/datasets/master/gapminderDataFiveYear.csv' dataset = pd.read_csv(url) table = ff.create_table(dataset.head(10)) py.iplot(table, filename='animations-gapminder-data-preview') # - # #### Make the Grid # Since we are using the v2 api for animations in Plotly, we need to first make a `grid`. You can learn more in the [introduction to animation doc](https://plotly.com/python/animations/). # # We will first define a list of _string_ years which will represent the values that our `slider` will take on. Going through the dataset, we will take out all the unique continents from the column `continent` and store them as well. Finally, we make a grid with each column representing a slice of the dataframe by _year_, _continent_ and _column name_, making sure to name each column uniquly by these variables: # + years_from_col = set(dataset['year']) years_ints = sorted(list(years_from_col)) years = [str(year) for year in years_ints] years.remove('1957') # make list of continents continents = [] for continent in dataset['continent']: if continent not in continents: continents.append(continent) columns = [] # make grid for year in years: for continent in continents: dataset_by_year = dataset[dataset['year'] == int(year)] dataset_by_year_and_cont = dataset_by_year[dataset_by_year['continent'] == continent] for col_name in dataset_by_year_and_cont: # each column name is unique column_name = '{year}_{continent}_{header}_gapminder_grid'.format( year=year, continent=continent, header=col_name ) a_column = Column(list(dataset_by_year_and_cont[col_name]), column_name) columns.append(a_column) # upload grid grid = Grid(columns) url = py.grid_ops.upload(grid, 'gapminder_grid'+str(time.time()), auto_open=False) url # - # #### Make the Figure # + figure = { 'data': [], 'layout': {}, 'frames': [], 'config': {'scrollzoom': True} } # fill in most of layout figure['layout']['xaxis'] = {'range': [30, 85], 'title': 'Life Expectancy', 'gridcolor': '#FFFFFF'} figure['layout']['yaxis'] = {'title': 'GDP per Capita', 'type': 'log', 'gridcolor': '#FFFFFF'} figure['layout']['hovermode'] = 'closest' figure['layout']['plot_bgcolor'] = 'rgb(223, 232, 243)' # - # #### Add Slider # For the slider to appear, we need to adda `sliders` dictionary to `layout`. The `sliders` dictionary is set in the following way: # # ``` # figure['layout']['sliders] = { # 'active': 0, # 'yanchor': 'top', # 'xanchor': 'left', # 'currentvalue': { # 'font': {'size': 20}, # 'prefix': 'text-before-value-on-display', # 'visible': True, # 'xanchor': 'right' # }, # 'transition': {'duration': 300, 'easing': 'cubic-in-out'}, # 'pad': {'b': 10, 't': 50}, # 'len': 0.9, # 'x': 0.1, # 'y': 0, # 'steps': [...] # } # ``` # # - `yanchor` determines whether the slider is on the _top_ or _bottom_ of the chart page # - `xanchor` is similar, only with _left_ and _right_ as possible values # - `currentvalue` sets the display of the current value that the slider is hovering on. It contains args such as `prefix`, which sets the text that appears before the value. # - `steps` is a list of dictionaries each of which corresponds to a frame in the figure. They should be ordered in the sequence in which the frames occur in the animation. # # Each dictionary in `steps` has the following form: # # ``` # { # 'method': 'animate', # 'label': 'label-for-frame', # 'value': 'value-for-frame(defaults to label)', # 'args': [{'frame': {'duration': 300, 'redraw': False}, # 'mode': 'immediate'} # ], # } # ``` # - the first item in the list `args` is a list containing the slider-value of that frame # - `label` is the text that appears next to the `prefix` arg mentioned in the `slider` section (eg. _Year: 1952_) # # For more information, check out the [documentation](https://plotly.com/python/reference/#layout-sliders). sliders_dict = { 'active': 0, 'yanchor': 'top', 'xanchor': 'left', 'currentvalue': { 'font': {'size': 20}, 'prefix': 'Year:', 'visible': True, 'xanchor': 'right' }, 'transition': {'duration': 300, 'easing': 'cubic-in-out'}, 'pad': {'b': 10, 't': 50}, 'len': 0.9, 'x': 0.1, 'y': 0, 'steps': [] } # ### Add Play and Pause Buttons # + figure['layout']['updatemenus'] = [ { 'buttons': [ { 'args': [None, {'frame': {'duration': 500, 'redraw': False}, 'fromcurrent': True, 'transition': {'duration': 300, 'easing': 'quadratic-in-out'}}], 'label': 'Play', 'method': 'animate' }, { 'args': [[None], {'frame': {'duration': 0, 'redraw': False}, 'mode': 'immediate', 'transition': {'duration': 0}}], 'label': 'Pause', 'method': 'animate' } ], 'direction': 'left', 'pad': {'r': 10, 't': 87}, 'showactive': False, 'type': 'buttons', 'x': 0.1, 'xanchor': 'right', 'y': 0, 'yanchor': 'top' } ] custom_colors = { 'Asia': 'rgb(171, 99, 250)', 'Europe': 'rgb(230, 99, 250)', 'Africa': 'rgb(99, 110, 250)', 'Americas': 'rgb(25, 211, 243)', 'Oceania': 'rgb(50, 170, 255)' } # - # #### Fill in Figure with Data and Frames # Now we can put the data from our grid into the figure. Since we are using referenced data from the grid, we use the `.get_column_reference()` method on the grid and supply the name of the column we want via a looping through all the years and continents. First we # # Note: If you are using referenced data for a particular parameter, you `MUST` change the parameter name from `name` to `namesrc` to indicate that you are using referenced data from a grid. For instance, `x` becomes `xsrc`, `text` becomes `textsrc`, etc. col_name_template = '{year}_{continent}_{header}_gapminder_grid' year = 1952 for continent in continents: data_dict = { 'xsrc': grid.get_column_reference(col_name_template.format( year=year, continent=continent, header='lifeExp' )), 'ysrc': grid.get_column_reference(col_name_template.format( year=year, continent=continent, header='gdpPercap' )), 'mode': 'markers', 'textsrc': grid.get_column_reference(col_name_template.format( year=year, continent=continent, header='country' )), 'marker': { 'sizemode': 'area', 'sizeref': 200000, 'sizesrc': grid.get_column_reference(col_name_template.format( year=year, continent=continent, header='pop' )), 'color': custom_colors[continent] }, 'name': continent } figure['data'].append(data_dict) # ### Create Frames # Finally we make our `frames`. Here we are running again through the years and continents, but for each combination we instantiate a frame dictionary of the form: # # ``` # frame = {'data': [], 'name': value-name} # ``` # We add a dictionary of data to this list and at the end of each loop, we ensure to add the `steps` dictionary to the steps list. At the end, we attatch the `sliders` dictionary to the figure via: # # ``` # figure['layout']['sliders'] = [sliders_dict] # ``` # # and then we plot! Enjoy the Gapminder example! # + for year in years: frame = {'data': [], 'name': str(year)} for continent in continents: data_dict = { 'xsrc': grid.get_column_reference(col_name_template.format( year=year, continent=continent, header='lifeExp' )), 'ysrc': grid.get_column_reference(col_name_template.format( year=year, continent=continent, header='gdpPercap' )), 'mode': 'markers', 'textsrc': grid.get_column_reference(col_name_template.format( year=year, continent=continent, header='country' )), 'marker': { 'sizemode': 'area', 'sizeref': 200000, 'sizesrc': grid.get_column_reference(col_name_template.format( year=year, continent=continent, header='pop' )), 'color': custom_colors[continent] }, 'name': continent } frame['data'].append(data_dict) figure['frames'].append(frame) slider_step = {'args': [ [year], {'frame': {'duration': 300, 'redraw': False}, 'mode': 'immediate', 'transition': {'duration': 300}} ], 'label': year, 'method': 'animate'} sliders_dict['steps'].append(slider_step) figure['layout']['sliders'] = [sliders_dict] # - # ### Plot Animation py.icreate_animations(figure, 'gapminder_example'+str(time.time())) # #### Reference # For additional information and attributes for creating bubble charts in Plotly see: https://plotly.com/python/bubble-charts/. # For more documentation on creating animations with Plotly, see https://plotly.com/python/#animations. # + from IPython.display import display, HTML display(HTML('<link href="//fonts.googleapis.com/css?family=Open+Sans:600,400,300,200|Inconsolata|Ubuntu+Mono:400,700" rel="stylesheet" type="text/css" />')) display(HTML('<link rel="stylesheet" type="text/css" href="http://help.plot.ly/documentation/all_static/css/ipython-notebook-custom.css">')) # !pip install git+https://github.com/plotly/publisher.git --upgrade import publisher publisher.publish( 'gapminder-example.ipynb', 'python/gapminder-example/', 'Adding Sliders to Animations | plotly', 'How to make the classic Gapminder Animation using sliders and buttons in Python.', title='Adding Sliders to Animations | plotly', name='Adding Sliders to Animations', language='python', page_type='example_index', has_thumbnail='true', thumbnail='thumbnail/gapminder_animation.gif', display_as='animations', ipynb= '~notebook_demo/129', order=2) # -
_posts/python-v3/animations/gapminder-example/gapminder-example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from fastai.vision.all import * from fastai.vision.widgets import * # # The Dosa Classifier # # The app will classify between: # # - Masala dosa # - Onion dosa # - Set dosa # # Try your image by clicking the Upload button below! path = Path() learn_inf = load_learner(path/'export.pkl', cpu=True) btn_upload = widgets.FileUpload() out_pl = widgets.Output() lbl_pred = widgets.Label() def on_data_change(change): lbl_pred.value = '' img = PILImage.create(btn_upload.data[-1]) out_pl.clear_output() with out_pl: display(img.to_thumb(128,128)) pred,pred_idx,probs = learn_inf.predict(img) lbl_pred.value = "Predication: {} (Probability: {:.2f})".format(pred.upper() + ' DOSA', probs[pred_idx]) btn_upload.observe(on_data_change, names=['data']) display(VBox([widgets.Label('Select your image!'), btn_upload, out_pl, lbl_pred]))
dosa_classifier.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Energy dispersion check # # This notebook checks the self consistency of the energy dispersion during event sampling. For this we compute an energy dispersion map. Then we choose one value of true energy and sample the reconstructed energy. Finally we compare against resulting distribution against the PSF contained in the energy migration matrix. # %matplotlib inline import numpy as np import matplotlib.pyplot as plt from gammapy.datasets import MapDataset from gammapy.makers import MapDatasetMaker from gammapy.maps import MapCoord, MapAxis, WcsGeom from gammapy.irf import EnergyDispersion2D, load_cta_irfs, EDispMap from astropy.coordinates import SkyCoord from astropy import units as u from gammapy.data import Observation # + IRF_FILE = "$GAMMAPY_DATA/cta-1dc/caldb/data/cta/1dc/bcf/South_z20_50h/irf_file.fits" E_TRUE_AXIS = MapAxis.from_energy_bounds("0.03 TeV", "300 TeV", nbin=40, name="energy_true") E_RECO_AXIS = MapAxis.from_energy_bounds("0.1 TeV", "100 TeV", nbin=30) # this is the defaulr used in Gammapy right now MIGRA_AXIS = MapAxis.from_bounds(0.2, 5, nbin=150, node_type="edges", name="migra") N_EVENTS = 100_000 # idx=10 is ~1 TeV E_TRUE = E_TRUE_AXIS.center[15] # - # Here we define a little helper function to compute an `EDispMap`: def make_edisp_map(edisp): pointing = SkyCoord(1, 0, frame="galactic", unit="deg") irfs = load_cta_irfs(IRF_FILE) irfs["edisp"] = edisp obs = Observation.create(pointing=pointing, livetime="1 h", irfs=irfs) geom = WcsGeom.create( skydir=pointing, width=(1, 1), binsz=0.02, frame="galactic", axes=[E_RECO_AXIS] ) dataset_empty = MapDataset.create(geom=geom, energy_axis_true=E_TRUE_AXIS, migra_axis=MIGRA_AXIS) maker = MapDatasetMaker(selection=["edisp"]) dataset = maker.run(dataset_empty, obs) return dataset.edisp def plot_edisp_comparison(e_reco_events, edisp_kernel): plt.figure(figsize=(12, 8)) idx = edisp_kernel.e_true.coord_to_idx(E_TRUE) values = edisp_kernel.pdf_matrix[int(idx)] norm = 1 / (edisp_kernel.e_reco.bin_width * values).sum() plt.step(edisp_kernel.e_reco.edges[:-1].value, norm * values, label="Edisp kernel", where="post") plt.scatter(edisp_kernel.e_reco.center.value, norm * values, zorder=10) data, _ = np.histogram(e_reco_events.value, bins=edisp_kernel.e_reco.edges.value) norm = 1 / (edisp_kernel.e_reco.bin_width.value * data).sum() plt.step(edisp_kernel.e_reco.edges[:-1].value, data * norm, label="Sampled edisp", where="post") plt.vlines(E_TRUE.value, 0, (norm * values).max()) mean = e_reco_events.mean() mean_pdf = (edisp_kernel.e_reco.center * values).sum() print(f"E_true : {E_TRUE:.4f}") print(f"Mean samples: {mean:.4f}") print(f"Mean PDF : {mean_pdf:.4f}") plt.semilogx() plt.xlim(0.4, 3) plt.xlabel("$E_{Reco}$ [TeV]"); plt.legend() def sample_events_e_reco(edisp_map): lon, lat = np.zeros(N_EVENTS), np.zeros(N_EVENTS) skycoord = SkyCoord(lon, lat, unit="deg", frame="galactic") energy = E_TRUE * np.ones(len(skycoord)) coords = MapCoord.create({"skycoord": skycoord, "energy_true": energy}) coords_reco = edisp_map.sample_coord(coords) return coords_reco["energy"] # # Diagonal energy dispersion edisp_map_diag = EDispMap.from_diagonal_response(energy_axis_true=E_TRUE_AXIS, migra_axis=MIGRA_AXIS) e_reco_events_diag = sample_events_e_reco(edisp_map=edisp_map_diag) e_reco = MapAxis.from_energy_bounds("0.4 TeV", "2 TeV", nbin=10, per_decade=True).edges edisp_kernel_diag = edisp_map_diag.get_edisp_kernel( e_reco=e_reco, position=SkyCoord(0, 0, unit="deg", frame="galactic") ) plot_edisp_comparison(e_reco_events_diag, edisp_kernel_diag) # ## Gaussian energy dispersion # # Define a Gaussian edisp which is independent of offset: edisp_gauss = EnergyDispersion2D.from_gauss( e_true=E_TRUE_AXIS.edges, migra=MIGRA_AXIS.edges, sigma=0.1, bias=0, offset=[0, 2, 4, 6, 8] * u.deg ) edisp_map_gauss = make_edisp_map(edisp=edisp_gauss) # Now we sample the reconstructed energies, assuming a true energy of `E_TRUE`: e_reco_events_gauss = sample_events_e_reco(edisp_map=edisp_map_gauss) # Now we compute the edisp kernel and extract the PDF at the given true energy: e_reco = MapAxis.from_energy_bounds("0.4 TeV", "2 TeV", nbin=41, per_decade=True).edges edisp_kernel_gauss = edisp_map_gauss.get_edisp_kernel( e_reco=e_reco, position=SkyCoord(0, 0, unit="deg", frame="galactic") ) # Finally we plot the comparison: # + plot_edisp_comparison(e_reco_events_gauss, edisp_kernel_gauss) response = edisp_gauss.get_response(offset=0 * u.deg, e_true=E_TRUE, e_reco=e_reco) norm = 1 / (edisp_kernel_gauss.e_reco.bin_width * response).sum() plt.step(edisp_kernel_gauss.e_reco.edges[:-1].value, norm * response, label="Edisp response", where="post") # - # A few things are immediately clear: # - We should use oversampling in migra, when drawing events or distribute the events linearly (not uniformly) in the migra bin # - The sampled distribution is shifted by 0.5 migra bin to the left # - The integrated PDF is also shifted (probably by 0.5 and upsampled migra bin?) to the left # ## CTA energy dispersion edisp_cta = load_cta_irfs(IRF_FILE)["edisp"] edisp_map_cta = make_edisp_map(edisp=edisp_cta) e_reco_events_cta = sample_events_e_reco(edisp_map=edisp_map_cta) e_reco = MapAxis.from_energy_bounds("0.4 TeV", "2 TeV", nbin=40, per_decade=True).edges edisp_kernel_cta = edisp_map_cta.get_edisp_kernel( e_reco=e_reco, position=SkyCoord(0, 0, unit="deg", frame="galactic") ) plot_edisp_comparison(e_reco_events_cta, edisp_kernel_cta) # Conclusions for the CTA edisp: # - The default migra binning we have in Gammapy is too coarse # - What about the outlier on the left??? # # HESS Energy Dispersion from gammapy.data import DataStore datastore = DataStore.from_dir("$GAMMAPY_DATA/hess-dl3-dr1") observations = datastore.get_observations([23592, 23559]) edisp_hess = observations[0].edisp edisp_map_hess = make_edisp_map(edisp=edisp_hess) e_reco_events_hess = sample_events_e_reco(edisp_map=edisp_map_hess) e_reco = MapAxis.from_energy_bounds("0.1 TeV", "10 TeV", nbin=50, per_decade=True).edges edisp_kernel_hess = edisp_map_hess.get_edisp_kernel( e_reco=e_reco, position=SkyCoord(0, 0, unit="deg", frame="galactic") ) plot_edisp_comparison(e_reco_events_hess, edisp_kernel_hess)
validation/event-sampling/checks/edisp_check.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import csv import os import glob import re from pandas import DataFrame, Series from openslide import open_slide from PIL import Image import timeit import time import math # + #train_paths = ["/scratch/kk4ze/data_svs/valid/Celiac", "/scratch/kk4ze/data_svs/valid/EE", "/scratch/kk4ze/data_svs/valid/Normal", # "/scratch/kk4ze/data_svs/train/Celiac", "/scratch/kk4ze/data_svs/train/EE", "/scratch/kk4ze/data_svs/train/Normal"] train_paths = ["/scratch/kk4ze/data_svs/train/EE"] images = {} images_by_folder = {} for train_path in train_paths: images_by_folder[str(train_path)] = [] files = glob.glob(os.path.join(train_path, '*.svs')) for fl in files: flbase = os.path.basename(fl) flbase_noext = os.path.splitext(flbase)[0] images[flbase_noext]=fl images_by_folder[str(train_path)].append(flbase) # + path_change_map = {} for key in list(images_by_folder.keys()): temp = key.replace('data_svs', 'data_lowres') path_change_map[key] = temp # + #image_by_folder.keys() #images_by_folder #path_change_map #op_slide_img = open_slide('/scratch/kk4ze/data_svs/train/EE/17.svs') # + #op_slide_img.dimensions # + #y_break=math.ceil(op_slide_img.dimensions[1]/op_slide_img.dimensions[0]) #y_break # + # for count in range(0,y_break): # x=op_slide_img.dimensions[0] # if count==0: # y=0 # else: # y=0+op_slide_img.dimensions[1]-(count*x) # img = op_slide_img.read_region((0,y), 0, (x,x)) # jpg_img = img.convert('RGB') # jpg_img.save('test_'+str(count)+'.jpg') # - width_height = (2200,2200) for key in images_by_folder.keys(): for value in images_by_folder[key]: img_path = str(key) +'/'+ str(value) op_slide_img = open_slide(img_path) if op_slide_img.dimensions[0]>18000 or op_slide_img.dimensions[1]>18000: continue value_strip=value[:-4] if op_slide_img.dimensions[1]>op_slide_img.dimensions[0]: y_break=math.ceil(op_slide_img.dimensions[1]/op_slide_img.dimensions[0]) for count in range(0,y_break): x=op_slide_img.dimensions[0] if count==0: y=0 else: y=0+op_slide_img.dimensions[1]-(count*x) img = op_slide_img.read_region((0,y), 0, (x,x)) img = img.resize(width_height, Image.ANTIALIAS) jpg_img = img.convert('RGB') jpg_img.save(path_change_map[key] + '/' + str(value_strip) + '_' + str(op_slide_img.dimensions[0])+'_'+str(op_slide_img.dimensions[1])+'_vert_'+'_'+str(count)+'.jpg') elif op_slide_img.dimensions[0]>op_slide_img.dimensions[1]: x_break=math.ceil(op_slide_img.dimensions[0]/op_slide_img.dimensions[1]) for count in range(0,x_break): y=op_slide_img.dimensions[1] if count==0: x=0 else: x=0+op_slide_img.dimensions[0]-(count*y) img = op_slide_img.read_region((x,0), 0, (y,y)) img = img.resize(width_height, Image.ANTIALIAS) jpg_img = img.convert('RGB') jpg_img.save(path_change_map[key] + '/' + str(value_strip) + '_'+ str(op_slide_img.dimensions[0])+'_'+str(op_slide_img.dimensions[1])+'_horiz_'+'_'+str(count)+'.jpg') print(key + ' Saved!\n')
image_segmention/lowres/seg_image_low_res_2200x2200.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from io import BytesIO import itertools import pandas as pd import numpy as np from diligent import diligent, registry # - test_df = pd.DataFrame({'a': [None, None] + [x for x in range(1000)] + [999] + [998]}) diligent(test_df, include='basic', interactive=False) # Interactive is set to False here to allow rendering on GitHub # + test_df = pd.DataFrame({'a': [None] + [x for x in range(1000)] + ([999] * 3) + ([123] * 5) + [None], 'b': [None, None] + [x for x in range(1000, 0, -1)] + ([999] * 3) + ([123] * 5)}) diligent(test_df, interactive=False) # - test_df = pd.DataFrame({'alternate': [1,1,1,1] + list(itertools.chain(*zip([x for x in range(0, 1000, 2)], [x for x in range(1, 1000)]))) + [1,2,3,4,5], 'linear': range(1009), 'random': np.random.rand(1009) }) diligent(test_df, interactive=False)
diligent_demo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="Gi5BUYsONn5Y" colab_type="code" outputId="1596d937-e366-4e2c-dd63-dee625ebc65c" executionInfo={"status": "ok", "timestamp": 1541939827471, "user_tz": -660, "elapsed": 44196, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-mTnxyVZGf6Y/AAAAAAAAAAI/AAAAAAAAAac/HoKq7cFHz2Q/s64/photo.jpg", "userId": "14403072526210189011"}} colab={"base_uri": "https://localhost:8080/", "height": 768} from google.colab import drive drive.mount('/content/drive') # !pip install fuel # !pip install picklable_itertools # + id="aTAJH0wONnDA" colab_type="code" colab={} # %reload_ext autoreload # %autoreload 2 # %matplotlib inline import numpy as np PROJECT_DIR = "/content/drive/My Drive/2018/Colab_Deep_Learning/one_class_neural_networks/" import sys,os import numpy as np sys.path.append(PROJECT_DIR) # + [markdown] id="4uzY00gubJSf" colab_type="text" # ## **Soft Bound Deep SVDD _Aeroplane Vs All-** # + id="0wEpcGYrbHzP" colab_type="code" outputId="ac6270c6-166f-4b4d-9901-d382be64021a" colab={"base_uri": "https://localhost:8080/", "height": 10832} executionInfo={"status": "ok", "timestamp": 1541887077328, "user_tz": -660, "elapsed": 954784, "user": {"displayName": "<NAME>", "photoUrl": "https://<KEY>", "userId": "14403072526210189011"}} ## Obtaining the training and testing data # %reload_ext autoreload # %autoreload 2 from src.models.OneClass_SVDD import OneClass_SVDD from src.models.config import Configuration as Cfg DATASET = "cifar10" IMG_DIM= 3072 IMG_HGT =32 IMG_WDT=32 IMG_CHANNEL=3 HIDDEN_LAYER_SIZE= 128 MODEL_SAVE_PATH = PROJECT_DIR + "/models/CIFAR10/OC_NN/" REPORT_SAVE_PATH = PROJECT_DIR + "/reports/figures/CIFAR10/OC_NN/" PRETRAINED_WT_PATH = "" LOSS_FUNCTION = "SOFT_BOUND_DEEP_SVDD" # LOSS_FUNCTION = "ONE_CLASS_DEEP_SVDD" # LOSS_FUNCTION = "ONE_CLASS_NEURAL_NETWORK" import os os.chdir(PROJECT_DIR) RANDOM_SEED = [42,56,81,67,33,25,90,77,15,11] AUC = [] for seed in RANDOM_SEED: Cfg.seed = seed ocnn = OneClass_SVDD(DATASET,LOSS_FUNCTION,IMG_DIM, HIDDEN_LAYER_SIZE, IMG_HGT, IMG_WDT,IMG_CHANNEL, MODEL_SAVE_PATH, REPORT_SAVE_PATH,PRETRAINED_WT_PATH,seed) print("[INFO:] Testing with ALL other Image Classes as anomalies") ocnn.fit() print("==============PREDICTING THE LABELS ==============================") auc_score = ocnn.predict() AUC.append(auc_score) print("===========TRAINING AND PREDICTING WITH OCSVDD============================") print("AUROC computed ", AUC) auc_roc_mean = np.mean(np.asarray(AUC)) auc_roc_std = np.std(np.asarray(AUC)) print ("AUROC =====", auc_roc_mean ,"+/-",auc_roc_std) print("========================================================================") # + [markdown] id="mReapG-9NEEC" colab_type="text" # ##** Horse Vs All # + id="lRFUggzZNBoC" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 10186} outputId="c7c690bf-83fd-4e0a-ee50-73d3c89ddb50" executionInfo={"status": "ok", "timestamp": 1541900694449, "user_tz": -660, "elapsed": 4825813, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-mTnxyVZGf6Y/AAAAAAAAAAI/AAAAAAAAAac/HoKq7cFHz2Q/s64/photo.jpg", "userId": "14403072526210189011"}} ## Obtaining the training and testing data # %reload_ext autoreload # %autoreload 2 from src.models.OneClass_SVDD import OneClass_SVDD from src.models.config import Configuration as Cfg DATASET = "cifar10" IMG_DIM= 3072 IMG_HGT =32 IMG_WDT=32 IMG_CHANNEL=3 HIDDEN_LAYER_SIZE= 128 MODEL_SAVE_PATH = PROJECT_DIR + "/models/CIFAR10/OC_NN/" REPORT_SAVE_PATH = PROJECT_DIR + "/reports/figures/CIFAR10/OC_NN/" PRETRAINED_WT_PATH = "" LOSS_FUNCTION = "SOFT_BOUND_DEEP_SVDD" # LOSS_FUNCTION = "ONE_CLASS_DEEP_SVDD" # LOSS_FUNCTION = "ONE_CLASS_NEURAL_NETWORK" import os os.chdir(PROJECT_DIR) RANDOM_SEED = [42,56,81,67,33,25,90,77,15,11] # RANDOM_SEED = [42,56,81] AUC = [] for seed in RANDOM_SEED: Cfg.seed = seed ocnn = OneClass_SVDD(DATASET,LOSS_FUNCTION,IMG_DIM, HIDDEN_LAYER_SIZE, IMG_HGT, IMG_WDT,IMG_CHANNEL, MODEL_SAVE_PATH, REPORT_SAVE_PATH,PRETRAINED_WT_PATH,seed) print("[INFO:] Testing with ALL other Image Classes as anomalies") ocnn.fit() print("==============PREDICTING THE LABELS ==============================") auc_score = ocnn.predict() AUC.append(auc_score) print("===========TRAINING AND PREDICTING WITH OCSVDD============================") print("AUROC computed ", AUC) auc_roc_mean = np.mean(np.asarray(AUC)) auc_roc_std = np.std(np.asarray(AUC)) print ("AUROC =====", auc_roc_mean ,"+/-",auc_roc_std) print("========================================================================") # + [markdown] id="FZ_t14ZeKacR" colab_type="text" # # ** Soft Bound Deep SVDD _ SHIP Vs All- **## # + id="3ueRU3W9KY1z" colab_type="code" outputId="35a7c844-fb73-4992-f1bb-7fddbbaf6b51" executionInfo={"status": "ok", "timestamp": 1541893864453, "user_tz": -660, "elapsed": 5732033, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-mTnxyVZGf6Y/AAAAAAAAAAI/AAAAAAAAAac/HoKq7cFHz2Q/s64/photo.jpg", "userId": "14403072526210189011"}} colab={"base_uri": "https://localhost:8080/", "height": 10152} ## Obtaining the training and testing data # %reload_ext autoreload # %autoreload 2 from src.models.OneClass_SVDD import OneClass_SVDD from src.models.config import Configuration as Cfg DATASET = "cifar10" IMG_DIM= 3072 IMG_HGT =32 IMG_WDT=32 IMG_CHANNEL=3 HIDDEN_LAYER_SIZE= 128 MODEL_SAVE_PATH = PROJECT_DIR + "/models/CIFAR10/OC_NN/" REPORT_SAVE_PATH = PROJECT_DIR + "/reports/figures/CIFAR10/OC_NN/" PRETRAINED_WT_PATH = "" LOSS_FUNCTION = "SOFT_BOUND_DEEP_SVDD" # LOSS_FUNCTION = "ONE_CLASS_DEEP_SVDD" # LOSS_FUNCTION = "ONE_CLASS_NEURAL_NETWORK" import os os.chdir(PROJECT_DIR) RANDOM_SEED = [42,56,81,67,33,25,90,77,15,11] AUC = [] for seed in RANDOM_SEED: Cfg.seed = seed ocnn = OneClass_SVDD(DATASET,LOSS_FUNCTION,IMG_DIM, HIDDEN_LAYER_SIZE, IMG_HGT, IMG_WDT,IMG_CHANNEL, MODEL_SAVE_PATH, REPORT_SAVE_PATH,PRETRAINED_WT_PATH,seed) print("[INFO:] Testing with ALL other Image Classes as anomalies") ocnn.fit() print("==============PREDICTING THE LABELS ==============================") auc_score = ocnn.predict() AUC.append(auc_score) print("===========TRAINING AND PREDICTING WITH OCSVDD============================") print("AUROC computed ", AUC) auc_roc_mean = np.mean(np.asarray(AUC)) auc_roc_std = np.std(np.asarray(AUC)) print ("AUROC =====", auc_roc_mean ,"+/-",auc_roc_std) print("========================================================================") # + [markdown] id="NKpJ8_UfBALz" colab_type="text" # ##** Truck Vs All # + id="Of-8C62PA7H0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 10866} outputId="9436450f-cc82-4017-b17b-3e27377a5a44" executionInfo={"status": "ok", "timestamp": 1541880675154, "user_tz": -660, "elapsed": 510087, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-mTnxyVZGf6Y/AAAAAAAAAAI/AAAAAAAAAac/HoKq7cFHz2Q/s64/photo.jpg", "userId": "14403072526210189011"}} ## Obtaining the training and testing data # %reload_ext autoreload # %autoreload 2 from src.models.OneClass_SVDD import OneClass_SVDD from src.models.config import Configuration as Cfg DATASET = "cifar10" IMG_DIM= 3072 IMG_HGT =32 IMG_WDT=32 IMG_CHANNEL=3 HIDDEN_LAYER_SIZE= 128 MODEL_SAVE_PATH = PROJECT_DIR + "/models/CIFAR10/OC_NN/" REPORT_SAVE_PATH = PROJECT_DIR + "/reports/figures/CIFAR10/OC_NN/" PRETRAINED_WT_PATH = "" LOSS_FUNCTION = "SOFT_BOUND_DEEP_SVDD" # LOSS_FUNCTION = "ONE_CLASS_DEEP_SVDD" # LOSS_FUNCTION = "ONE_CLASS_NEURAL_NETWORK" import os os.chdir(PROJECT_DIR) RANDOM_SEED = [42,56,81,67,33,25,90,77,15,11] # RANDOM_SEED = [42,11] AUC = [] for seed in RANDOM_SEED: Cfg.seed = seed ocnn = OneClass_SVDD(DATASET,LOSS_FUNCTION,IMG_DIM, HIDDEN_LAYER_SIZE, IMG_HGT, IMG_WDT,IMG_CHANNEL, MODEL_SAVE_PATH, REPORT_SAVE_PATH,PRETRAINED_WT_PATH,seed) print("[INFO:] Testing with ALL other Image Classes as anomalies") ocnn.fit() print("==============PREDICTING THE LABELS ==============================") auc_score = ocnn.predict() AUC.append(auc_score) print("===========TRAINING AND PREDICTING WITH OCSVDD============================") print("AUROC computed ", AUC) auc_roc_mean = np.mean(np.asarray(AUC)) auc_roc_std = np.std(np.asarray(AUC)) print ("AUROC =====", auc_roc_mean ,"+/-",auc_roc_std) print("========================================================================") # + [markdown] id="ez_v_1JSjZbR" colab_type="text" # ## **Automobile Vs All ** # + id="8II8CuEajYRA" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 10186} outputId="5fa38312-e019-4e6a-ee34-d4e84ea60907" executionInfo={"status": "ok", "timestamp": 1541944752647, "user_tz": -660, "elapsed": 4900229, "user": {"displayName": "<NAME>", "photoUrl": "https://<KEY>", "userId": "14403072526210189011"}} ## Obtaining the training and testing data # %reload_ext autoreload # %autoreload 2 from src.models.OneClass_SVDD import OneClass_SVDD from src.models.config import Configuration as Cfg DATASET = "cifar10" IMG_DIM= 3072 IMG_HGT =32 IMG_WDT=32 IMG_CHANNEL=3 HIDDEN_LAYER_SIZE= 128 MODEL_SAVE_PATH = PROJECT_DIR + "/models/CIFAR10/OC_NN/" REPORT_SAVE_PATH = PROJECT_DIR + "/reports/figures/CIFAR10/OC_NN/" PRETRAINED_WT_PATH = "" LOSS_FUNCTION = "SOFT_BOUND_DEEP_SVDD" # LOSS_FUNCTION = "ONE_CLASS_DEEP_SVDD" # LOSS_FUNCTION = "ONE_CLASS_NEURAL_NETWORK" import os os.chdir(PROJECT_DIR) RANDOM_SEED = [42,56,81,67,33,25,90,77,15,11] # RANDOM_SEED = [42,11] AUC = [] for seed in RANDOM_SEED: Cfg.seed = seed ocnn = OneClass_SVDD(DATASET,LOSS_FUNCTION,IMG_DIM, HIDDEN_LAYER_SIZE, IMG_HGT, IMG_WDT,IMG_CHANNEL, MODEL_SAVE_PATH, REPORT_SAVE_PATH,PRETRAINED_WT_PATH,seed) print("[INFO:] Testing with ALL other Image Classes as anomalies") ocnn.fit() print("==============PREDICTING THE LABELS ==============================") auc_score = ocnn.predict() AUC.append(auc_score) print("===========TRAINING AND PREDICTING WITH OCSVDD============================") print("AUROC computed ", AUC) auc_roc_mean = np.mean(np.asarray(AUC)) auc_roc_std = np.std(np.asarray(AUC)) print ("AUROC =====", auc_roc_mean ,"+/-",auc_roc_std) print("========================================================================") # + [markdown] id="gD7ROBsxjqfU" colab_type="text" # ##** Bird Vs All # + id="_JqnGpwijojH" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 10152} outputId="f2d1478f-dd32-4248-9bf7-40beb481ee47" executionInfo={"status": "ok", "timestamp": 1541937686658, "user_tz": -660, "elapsed": 1583559, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-mTnxyVZGf6Y/AAAAAAAAAAI/AAAAAAAAAac/HoKq7cFHz2Q/s64/photo.jpg", "userId": "14403072526210189011"}} ## Obtaining the training and testing data # %reload_ext autoreload # %autoreload 2 from src.models.OneClass_SVDD import OneClass_SVDD from src.models.config import Configuration as Cfg DATASET = "cifar10" IMG_DIM= 3072 IMG_HGT =32 IMG_WDT=32 IMG_CHANNEL=3 HIDDEN_LAYER_SIZE= 128 MODEL_SAVE_PATH = PROJECT_DIR + "/models/CIFAR10/OC_NN/" REPORT_SAVE_PATH = PROJECT_DIR + "/reports/figures/CIFAR10/OC_NN/" PRETRAINED_WT_PATH = "" LOSS_FUNCTION = "SOFT_BOUND_DEEP_SVDD" # LOSS_FUNCTION = "ONE_CLASS_DEEP_SVDD" # LOSS_FUNCTION = "ONE_CLASS_NEURAL_NETWORK" import os os.chdir(PROJECT_DIR) RANDOM_SEED = [42,56,81,67,33,25,90,77,15,11] # RANDOM_SEED = [42,11] AUC = [] for seed in RANDOM_SEED: Cfg.seed = seed ocnn = OneClass_SVDD(DATASET,LOSS_FUNCTION,IMG_DIM, HIDDEN_LAYER_SIZE, IMG_HGT, IMG_WDT,IMG_CHANNEL, MODEL_SAVE_PATH, REPORT_SAVE_PATH,PRETRAINED_WT_PATH,seed) print("[INFO:] Testing with ALL other Image Classes as anomalies") ocnn.fit() print("==============PREDICTING THE LABELS ==============================") auc_score = ocnn.predict() AUC.append(auc_score) print("===========TRAINING AND PREDICTING WITH OCSVDD============================") print("AUROC computed ", AUC) auc_roc_mean = np.mean(np.asarray(AUC)) auc_roc_std = np.std(np.asarray(AUC)) print ("AUROC =====", auc_roc_mean ,"+/-",auc_roc_std) print("========================================================================") # + [markdown] id="lgGhmIdtj56O" colab_type="text" # ##** Cat Vs All # + id="eSW2qoB1kP3M" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 7755} outputId="f80d8409-87ae-4a7f-d45f-24e1d028635d" executionInfo={"status": "ok", "timestamp": 1541932055197, "user_tz": -660, "elapsed": 5883613, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-mTnxyVZGf6Y/AAAAAAAAAAI/AAAAAAAAAac/HoKq7cFHz2Q/s64/photo.jpg", "userId": "14403072526210189011"}} ## Obtaining the training and testing data # %reload_ext autoreload # %autoreload 2 from src.models.OneClass_SVDD import OneClass_SVDD from src.models.config import Configuration as Cfg DATASET = "cifar10" IMG_DIM= 3072 IMG_HGT =32 IMG_WDT=32 IMG_CHANNEL=3 HIDDEN_LAYER_SIZE= 128 MODEL_SAVE_PATH = PROJECT_DIR + "/models/CIFAR10/OC_NN/" REPORT_SAVE_PATH = PROJECT_DIR + "/reports/figures/CIFAR10/OC_NN/" PRETRAINED_WT_PATH = "" LOSS_FUNCTION = "SOFT_BOUND_DEEP_SVDD" # LOSS_FUNCTION = "ONE_CLASS_DEEP_SVDD" # LOSS_FUNCTION = "ONE_CLASS_NEURAL_NETWORK" import os os.chdir(PROJECT_DIR) RANDOM_SEED = [42,56,81,67,33,25,90,77,15,11] # RANDOM_SEED = [42,11] AUC = [] for seed in RANDOM_SEED: Cfg.seed = seed ocnn = OneClass_SVDD(DATASET,LOSS_FUNCTION,IMG_DIM, HIDDEN_LAYER_SIZE, IMG_HGT, IMG_WDT,IMG_CHANNEL, MODEL_SAVE_PATH, REPORT_SAVE_PATH,PRETRAINED_WT_PATH,seed) print("[INFO:] Testing with ALL other Image Classes as anomalies") ocnn.fit() print("==============PREDICTING THE LABELS ==============================") auc_score = ocnn.predict() AUC.append(auc_score) print("===========TRAINING AND PREDICTING WITH OCSVDD============================") print("AUROC computed ", AUC) auc_roc_mean = np.mean(np.asarray(AUC)) auc_roc_std = np.std(np.asarray(AUC)) print ("AUROC =====", auc_roc_mean ,"+/-",auc_roc_std) print("========================================================================") # + [markdown] id="UgKhDYKvkBSK" colab_type="text" # ##** Deer Vs All # + id="lc9aXWvrkQja" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 10135} outputId="64d43417-1bd7-419d-e86f-79feb8248b20" executionInfo={"status": "ok", "timestamp": 1541925830899, "user_tz": -660, "elapsed": 4684933, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-mTnxyVZGf6Y/AAAAAAAAAAI/AAAAAAAAAac/HoKq7cFHz2Q/s64/photo.jpg", "userId": "14403072526210189011"}} ## Obtaining the training and testing data # %reload_ext autoreload # %autoreload 2 from src.models.OneClass_SVDD import OneClass_SVDD from src.models.config import Configuration as Cfg DATASET = "cifar10" IMG_DIM= 3072 IMG_HGT =32 IMG_WDT=32 IMG_CHANNEL=3 HIDDEN_LAYER_SIZE= 128 MODEL_SAVE_PATH = PROJECT_DIR + "/models/CIFAR10/OC_NN/" REPORT_SAVE_PATH = PROJECT_DIR + "/reports/figures/CIFAR10/OC_NN/" PRETRAINED_WT_PATH = "" LOSS_FUNCTION = "SOFT_BOUND_DEEP_SVDD" # LOSS_FUNCTION = "ONE_CLASS_DEEP_SVDD" # LOSS_FUNCTION = "ONE_CLASS_NEURAL_NETWORK" import os os.chdir(PROJECT_DIR) RANDOM_SEED = [42,56,81,67,33,25,90,77,15,11] # RANDOM_SEED = [42,56] AUC = [] for seed in RANDOM_SEED: Cfg.seed = seed ocnn = OneClass_SVDD(DATASET,LOSS_FUNCTION,IMG_DIM, HIDDEN_LAYER_SIZE, IMG_HGT, IMG_WDT,IMG_CHANNEL, MODEL_SAVE_PATH, REPORT_SAVE_PATH,PRETRAINED_WT_PATH,seed) print("[INFO:] Testing with ALL other Image Classes as anomalies") ocnn.fit() print("==============PREDICTING THE LABELS ==============================") auc_score = ocnn.predict() AUC.append(auc_score) print("===========TRAINING AND PREDICTING WITH OCSVDD============================") print("AUROC computed ", AUC) auc_roc_mean = np.mean(np.asarray(AUC)) auc_roc_std = np.std(np.asarray(AUC)) print ("AUROC =====", auc_roc_mean ,"+/-",auc_roc_std) print("========================================================================") # + [markdown] id="Sphpi2PXkFQK" colab_type="text" # ##** Dog Vs All # + id="iO19ueTzkRIL" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 7755} outputId="5bd4abbb-cf02-4f88-9607-bcba6fb96b25" executionInfo={"status": "ok", "timestamp": 1541920606590, "user_tz": -660, "elapsed": 4873520, "user": {"displayName": "<NAME>", "photoUrl": "https://<KEY>", "userId": "14403072526210189011"}} ## Obtaining the training and testing data # %reload_ext autoreload # %autoreload 2 from src.models.OneClass_SVDD import OneClass_SVDD from src.models.config import Configuration as Cfg DATASET = "cifar10" IMG_DIM= 3072 IMG_HGT =32 IMG_WDT=32 IMG_CHANNEL=3 HIDDEN_LAYER_SIZE= 128 MODEL_SAVE_PATH = PROJECT_DIR + "/models/CIFAR10/OC_NN/" REPORT_SAVE_PATH = PROJECT_DIR + "/reports/figures/CIFAR10/OC_NN/" PRETRAINED_WT_PATH = "" LOSS_FUNCTION = "SOFT_BOUND_DEEP_SVDD" # LOSS_FUNCTION = "ONE_CLASS_DEEP_SVDD" # LOSS_FUNCTION = "ONE_CLASS_NEURAL_NETWORK" import os os.chdir(PROJECT_DIR) RANDOM_SEED = [42,56,81,67,33,25,90,77,15,11] # RANDOM_SEED = [42,67] AUC = [] for seed in RANDOM_SEED: Cfg.seed = seed ocnn = OneClass_SVDD(DATASET,LOSS_FUNCTION,IMG_DIM, HIDDEN_LAYER_SIZE, IMG_HGT, IMG_WDT,IMG_CHANNEL, MODEL_SAVE_PATH, REPORT_SAVE_PATH,PRETRAINED_WT_PATH,seed) print("[INFO:] Testing with ALL other Image Classes as anomalies") ocnn.fit() print("==============PREDICTING THE LABELS ==============================") auc_score = ocnn.predict() AUC.append(auc_score) print("===========TRAINING AND PREDICTING WITH OCSVDD============================") print("AUROC computed ", AUC) auc_roc_mean = np.mean(np.asarray(AUC)) auc_roc_std = np.std(np.asarray(AUC)) print ("AUROC =====", auc_roc_mean ,"+/-",auc_roc_std) print("========================================================================") # + [markdown] id="yZ8dgnE_kKXF" colab_type="text" # ##** Frog Vs All # + id="qFXsjYPkkR7K" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 10152} outputId="5bd142c9-0642-4551-fef4-7c4858e9f1c7" executionInfo={"status": "ok", "timestamp": 1541908765224, "user_tz": -660, "elapsed": 4691568, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-mTnxyVZGf6Y/AAAAAAAAAAI/AAAAAAAAAac/HoKq7cFHz2Q/s64/photo.jpg", "userId": "14403072526210189011"}} ## Obtaining the training and testing data # %reload_ext autoreload # %autoreload 2 from src.models.OneClass_SVDD import OneClass_SVDD from src.models.config import Configuration as Cfg DATASET = "cifar10" IMG_DIM= 3072 IMG_HGT =32 IMG_WDT=32 IMG_CHANNEL=3 HIDDEN_LAYER_SIZE= 128 MODEL_SAVE_PATH = PROJECT_DIR + "/models/CIFAR10/OC_NN/" REPORT_SAVE_PATH = PROJECT_DIR + "/reports/figures/CIFAR10/OC_NN/" PRETRAINED_WT_PATH = "" LOSS_FUNCTION = "SOFT_BOUND_DEEP_SVDD" # LOSS_FUNCTION = "ONE_CLASS_DEEP_SVDD" # LOSS_FUNCTION = "ONE_CLASS_NEURAL_NETWORK" import os os.chdir(PROJECT_DIR) RANDOM_SEED = [42,56,81,67,33,25,90,77,15,11] # RANDOM_SEED = [42] AUC = [] for seed in RANDOM_SEED: Cfg.seed = seed ocnn = OneClass_SVDD(DATASET,LOSS_FUNCTION,IMG_DIM, HIDDEN_LAYER_SIZE, IMG_HGT, IMG_WDT,IMG_CHANNEL, MODEL_SAVE_PATH, REPORT_SAVE_PATH,PRETRAINED_WT_PATH,seed) print("[INFO:] Testing with ALL other Image Classes as anomalies") ocnn.fit() print("==============PREDICTING THE LABELS ==============================") auc_score = ocnn.predict() AUC.append(auc_score) print("===========TRAINING AND PREDICTING WITH OCSVDD============================") print("AUROC computed ", AUC) auc_roc_mean = np.mean(np.asarray(AUC)) auc_roc_std = np.std(np.asarray(AUC)) print ("AUROC =====", auc_roc_mean ,"+/-",auc_roc_std) print("========================================================================") # + id="SMvU6sPtj5Ts" colab_type="code" colab={} # + id="sV7LcRGCNnDK" colab_type="code" colab={}
notebooks/OC-NN/CIFAR/Soft_Boundary_Deep_SVDD_Experiments.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- # ## pQTL # # When trying to understand the mechanisms underlying the association of genetic variants with phenotypes, it can be useful to see whether the variants are also associated with the abundance of a transcript or of a protein. Loci associated with transcript and protein abundances are called expression quantative trait loci (eQTL) and protein quantative trait loci (pQTL), respectively. # # ##### _❔ How can a non-coding variant influence the abundance of a protein?_ # ## Genome-proteome association # # To find pQTLs, one runs the association of protein levels against the number of alleles of genetic variants. # # ##### _❔ How many comparisons will this represent? What challenges does this pose to the analysis?_ # # ##### 👨‍💻 Download the file available at this [link](https://drive.google.com/file/d/157GqxsL8DpkjAvMHkLtKgMF44Fdt-6Ig/view?usp=sharing). # ### Libraries # # We will need the following libraries, please make sure that they are installed. # + library(conflicted) # A very convenient package to avoid and resolve namespace conflicts library(janitor) # A package to clean tables library(tidyr) # A reference package to manipulate data library(dplyr) # A reference package to manipulate data library(ggplot2) # A reference package to plot data library(glue) # A package to put variables in strings. conflict_prefer("filter", "dplyr") # Conflict resolution for the 'filter' function theme_set(theme_bw(base_size = 13)) # Theme to use for all plots # - # ### Association between genotype and protein level # # ##### 👨‍💻 Set the path to the file you downloaded and load the data in R as in the code below. # + myFile <- "resources/pqtl.gz" qtlDF <- read.table( file = myFile, header = T, stringsAsFactors = F, sep = "\t" ) # - head(qtlDF) # ##### ❔ _What do the columns represent?_ # We will now focus on the variant at position `37754982`. # # ##### 👨‍💻 Create a data frame containing the values for the given variant. snpDF <- qtlDF %>% filter( position == 37754982 ) table( snpDF$genotype ) # ##### ❔ _What is the minor allele frequency for this variant in this population?_ # # ##### 👨‍💻 Compute the correlation between protein A and the number of alternative alleles. # + snpLmResults <- lm( formula = protein_A ~ genotype, data = snpDF ) summary(snpLmResults) # - # ##### ❔ _Is the association significant? What about the other proteins?_ # # ##### 👨‍💻 Plot the abundance of proteins relative to the genotype. # + plotDF <- snpDF %>% pivot_longer( cols = c("protein_A", "protein_B", "protein_C", "protein_D") ) ggplot( data = plotDF ) + geom_hline( yintercept = 0 ) + geom_point( mapping = aes( x = genotype, y = value, col = name ), alpha = 0.05 ) + geom_smooth( mapping = aes( x = genotype, y = value, col = name ), method = "lm" ) + facet_grid( name ~ . ) + theme( legend.position = "none" ) # - # ### Association across many variants # # ##### 👨‍💻 Run the association for all variants. # + lmResults <- data.frame( position = unique(qtlDF$pos), stringsAsFactors = F ) print("Processing protein A") lmResults$p_A <- sapply( X = lmResults$position, FUN = function (position) summary( lm( formula = protein_A ~ genotype, data = qtlDF[qtlDF$position == position,] ) )$coef["genotype", "Pr(>|t|)"] ) print("Processing protein B") lmResults$p_B <- sapply( X = lmResults$position, FUN = function (position) summary( lm( formula = protein_B ~ genotype, data = qtlDF[qtlDF$position == position,] ) )$coef["genotype", "Pr(>|t|)"] ) print("Processing protein C") lmResults$p_C <- sapply( X = lmResults$position, FUN = function (position) summary( lm( formula = protein_C ~ genotype, data = qtlDF[qtlDF$position == position,] ) )$coef["genotype", "Pr(>|t|)"] ) print("Processing protein D") lmResults$p_D <- sapply( X = lmResults$position, FUN = function (position) summary( lm( formula = protein_D ~ genotype, data = qtlDF[qtlDF$position == position,] ) )$coef["genotype", "Pr(>|t|)"] ) # - # ##### 👨‍💻 Plot the p-value obtained for every variant and protein against the genomic position. # + plotDF <- lmResults %>% pivot_longer( cols = c("p_A", "p_B", "p_C", "p_D"), names_prefix = "p_" ) ggplot( data = plotDF ) + geom_hline( yintercept = 0 ) + geom_point( mapping = aes( x = position, y = -log10(value), col = name ) ) + scale_y_continuous( name = "p-value [-log10]" ) + facet_grid( name ~ . ) + theme( legend.position = "none" ) # - # ##### ❔ _What do you think about the association with protein A at position `37754982`?_ # ##### 👨‍💻 Plot the obtained p-value against what one would expect by chance. # + getCiUp <- function(n, confidence) { return( qbeta(p = (1-confidence)/2, shape1 = 1:n, shape2 = n - 1:n + 1) ) } getCiDown <- function(n, confidence) { return( qbeta(p = 1-(1-confidence)/2, shape1 = 1:n, shape2 = n - 1:n + 1) ) } confidence <- 0.95 qqPlotDF <- plotDF %>% group_by( name ) %>% arrange( value ) %>% mutate( logP = -log10(value), expectedLogP = -log10(sort(ppoints(n = n()))), ciUp = getCiUp( n = n(), confidence = confidence ), ciDown = getCiDown( n = n(), confidence = confidence ), ciUpLog = -log10(ciDown), ciDownLog = -log10(ciUp) ) ggplot( data = qqPlotDF ) + geom_ribbon( data = qqPlotDF, mapping = aes( x = expectedLogP, ymin = ciDownLog, ymax = ciUpLog ), fill = "black", alpha = 0.2 ) + geom_abline( linetype = "dotted" ) + geom_point( data = qqPlotDF, mapping = aes( x = expectedLogP, y = logP, col = name ) ) + scale_x_continuous( name = "Expected p-value [-log10]" ) + scale_y_continuous( name = "Observed p-value [-log10]" ) + theme( legend.position = "top" ) # + ##### ❔ _Where would you set the significance threshold? Do we need to correct for the number of proteins tested?_
pages/proteogenomics/pQTL/.ipynb_checkpoints/pQTL-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # %load_ext noworkflow # nip = %now_ip w = 'w' trial18 = nip.Trial(18) trial19 = nip.Trial(19) open('.18', w).write(str(trial18.script_content)) open('.19', w).write(str(trial19.script_content)) # !diff .18 .19 | colordiff # %%now_run --interactive import numpy as np import matplotlib.pyplot as plt from precipitation import read, prepare _.modules(find='precipitation') # %%now_sql SELECT name, content_hash_before FROM file_access WHERE trial_id = 2 AND name IN ("p13.dat", "p14.dat") trial = nip.Trial(2) trial # %%now_prolog {trial.id} indirect_activation({trial.id}, bar_graph, X), duration({trial.id}, X, Y) print( nip.persistence.get('9138b1e2c0f6b80ab7ac902835e7bc88ea585c7a') )
tests/tapp/Presentation/Provenance Analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Stochastic Calculus Problem Set 2 Question 2 import numpy as np import matplotlib.pyplot as plt import seaborn as sns sns.set_style('whitegrid') # ## Pre-Question # # _How does the SDE tell you that the Hull-White interest rate is not a martingale?_ # # Because it has a $dt$ term. # ## Part (a) # # ### Hull-White # # Here, we have $$ \beta(u, x) = a_t - b_t R_t^{HW} $$ and $$ \gamma(u, x) = \sigma_t $$ # # Thus, $$ g_t(t, x) + (a_t - b_t R_t^{HW}) g_x(t, x) + \frac{1}{2} \sigma_t^2 g_{xx}(t, x) = 0 $$ # # ### Cox-Ingersol-Ross # # Here, we have $$ \beta(u, x) = a_t + b_t R_t^{CIR} $$ and $$ \gamma(u, x) = \sigma \sqrt{R_t^{CIR}} $$ # # Thus, $$ g_t(t, x) + (a_t - b_t R_t^{CIR}) g_x(t, x) + \frac{1}{2} \sigma^2 R_t^{CIR} g_{xx}(t, x) = 0 $$ # # ## Part (b) # + epsilon = 0.01 # Keep interest rate bounded away from 0 delta = 0.01 # Time step T = 500 # Roughly 2 years K_a = 0.01 K_b = 0.005 K_sigma = 0.05 t = np.arange(0, T) b = K_b * (1.1 + np.sin(np.pi*t / T)) sigma = K_sigma * (1.1 + np.cos(4*np.pi*t / T)) a = 0.5 * sigma**2 + K_a * (1.1 + np.cos(np.pi*t / T)) # - fig, ax = plt.subplots(figsize=[8, 6]) ax.plot(a, label='$a_t$') ax.plot(b, label='$b_t$') ax.plot(sigma, label='$\sigma_t$') ax.set_title('Values of $a_t$, $b_t$ and $\sigma_t$ over time') ax.set_xlabel('Time') ax.set_ylabel('Value') ax.legend(); # ### Sub-Part 1 # + # Hull-White R_HW = np.ones(shape=[T, 10]) for idx, row in enumerate(R_HW[1:]): if (row < epsilon).any(): print('Exception occured at iteration {}'.format(idx)) row[row < epsilon] = epsilon dR = (a[idx] - b[idx] * R_HW[idx]) + sigma[idx] * np.random.randn() R_HW[idx + 1] = row + dR # - fig, ax = plt.subplots(figsize=[12, 8]) ax.plot(R_HW) ax.set_title('Hull-White Interest Rate') ax.set_xlabel('Time step') ax.set_ylabel('Interest Rate'); # + # Cox-Ingersoll-Ross R_CIR = np.ones(shape=[T, 10]) for idx, row in enumerate(R_CIR[1:]): if (row < epsilon).any(): print('Exception occured at iteration {}'.format(idx)) row[row < epsilon] = epsilon dR = (a[idx] - b[idx] * R_CIR[idx]) + sigma[idx] * np.sqrt(R_CIR[idx]) * np.random.randn() R_CIR[idx + 1] = row + dR # - fig, ax = plt.subplots(figsize=[12, 8]) ax.plot(R_CIR) ax.set_title('Cox-Ingersoll-Ross Interest Rate') ax.set_xlabel('Time step') ax.set_ylabel('Interest Rate'); # ### Sub-Part 2 # Using WolframAlpha, we have that # # $$ c(t) = \int_0^t K_b (1.1 + \sin(\frac{\pi t}{T})) du = K_b (1.1 t - \frac{T}{\pi} cos(\frac{\pi t}{T}))$$ # # Thus, # # $$ \int_0^T \exp[-a(u) (c(T) - c(0))] du = \int_0^T \exp[- 1.1 K_b T (\frac{1}{2} (K_\sigma (1.1 + \cos(4\pi t / T)))^2 + K_a(1.1 + \cos(\frac{\pi t}{T}))) ] du $$ # # This integral does not appear to have a closed-form solution. Let us call it $I(T)$. # # Finally, the integrand of the last integral is # # $$ \exp[-2(c(T) - c(u))] \sigma(u) = \exp[-2(K_b (1.1 T + \frac{T}{\pi} - 1.1 u + \frac{T}{\pi} cos(\frac{\pi u}{T})))] (K_\sigma (1.1 + \cos(4 \pi t / T))) $$ # # $$ = \exp[-2 (1.41831 T - 1.1 u + (T cos((\pi u)/T))/\pi) K_b] (1.1 + cos((4 \pi t)/T)) K_\sigma $$ # # So, all told, the Hull-White interest rate is # # $$ R(T) = r \exp[- 1.1 K_b T] + I(T) +\int_0^T \exp[-2 (1.41831 T - 1.1 u + (T cos((\pi u)/T))/\pi) K_b] (1.1 + cos((4 \pi t)/T)) K_\sigma du $$ # ## Part (c) # + # Hull-White R_HW = np.ones(shape=[T, 1000]) for idx, row in enumerate(R_HW[1:]): if (row < epsilon).any(): print('Exception occured at iteration {}'.format(idx)) row[row < epsilon] = epsilon dR = (a[idx] - b[idx] * R_HW[idx]) + sigma[idx] * np.random.randn() R_HW[idx + 1] = row + dR # + # Cox-Ingersoll-Ross R_CIR = np.ones(shape=[T, 1000]) for idx, row in enumerate(R_CIR[1:]): if (row < epsilon).any(): print('Exception occured at iteration {}'.format(idx)) row[row < epsilon] = epsilon dR = (a[idx] - b[idx] * R_CIR[idx]) + sigma[idx] * np.sqrt(R_CIR[idx]) * np.random.randn() R_CIR[idx + 1] = row + dR # - R_HW_mean = np.mean(R_HW, axis=1) R_CIR_mean = np.mean(R_CIR, axis=1) R_HW_var = np.var(R_HW, axis=1) R_CIR_var = np.var(R_CIR, axis=1) fig, ax = plt.subplots(figsize=[12, 8]) ax.plot(R_HW_mean, label='HW mean') ax.plot(R_CIR_mean, label='CIR mean') ax.set_title('Mean of Interest Rate') ax.set_xlabel('Time step') ax.set_ylabel('Mean of Interest Rate') ax.legend(); fig, ax = plt.subplots(figsize=[12, 8]) ax.plot(R_HW_var, label='HW variance') ax.plot(R_CIR_var, label='CIR variance') ax.set_title('Variance of Interest Rate') ax.set_xlabel('Time step') ax.set_ylabel('Variance of Interest Rate') ax.legend(); # ## Part (d) # # The exception does not occur at all.
quantfin/financial-signal-processing/stochastic-calculus/pset2/stoch_calc_pset_q2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="3O6z2z41gZPi" colab_type="text" # # Burritos example # + id="EmbSulX3aTuD" colab_type="code" outputId="c06d3787-a812-4b06-be56-e52631875755" executionInfo={"status": "ok", "timestamp": 1573497692884, "user_tz": 360, "elapsed": 721, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "18193624300732209966"}} colab={"base_uri": "https://localhost:8080/", "height": 168} import pandas as pd url = ('https://raw.githubusercontent.com/' 'LambdaSchool/DS-Unit-2-Linear-Models' '/master/data/burritos/burritos.csv') df = pd.read_csv(url) df['overall'].describe() # + [markdown] id="fxAF4cIPgfYh" colab_type="text" # # Choose your target. # # Which column in your tabular dataset will you predict? # + id="73R3lQzPc7Jm" colab_type="code" outputId="52ba4904-40c8-40b0-cd36-930e9b4c0d49" executionInfo={"status": "ok", "timestamp": 1573497693899, "user_tz": 360, "elapsed": 1076, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "18193624300732209966"}} colab={"base_uri": "https://localhost:8080/", "height": 101} # Derive binary classification target: # We define a 'Great' burrito as having an # overall rating of 4 or higher, on a 5 point scale. # Drop unrated burritos. df = df.dropna(subset=['overall']) df['Great'] = df['overall'] >= 4 df['Great'].describe() # + [markdown] id="overTirKgpIX" colab_type="text" # I've derived my own target to redefine the target (previously 1-5 stars) to a binary classification problem: Predict if a burrito is "Great" or not. # + [markdown] id="8OMBoEX-3IUp" colab_type="text" # # + [markdown] id="FHcess9Ug7gX" colab_type="text" # # How is your target distributed? # # Classification: How many classes? Are the classes imbalanced? # + id="MghM94kUgob_" colab_type="code" colab={} y = df['Great'] # + id="bKey2KkwhAUF" colab_type="code" outputId="9c34b311-1983-4cfa-9002-65264a48d877" executionInfo={"status": "ok", "timestamp": 1573497696749, "user_tz": 360, "elapsed": 483, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "18193624300732209966"}} colab={"base_uri": "https://localhost:8080/", "height": 34} y.nunique() # + id="hRo-RnyshBfs" colab_type="code" outputId="f49cf1e5-4c2d-4bdb-d0e5-88a5393d08e2" executionInfo={"status": "ok", "timestamp": 1573497697721, "user_tz": 360, "elapsed": 1194, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "18193624300732209966"}} colab={"base_uri": "https://localhost:8080/", "height": 34} y.value_counts(normalize=True).max() # + id="0SlUMEcFhH6C" colab_type="code" outputId="d2a750be-1b9e-4da4-cd9c-c3b25781620b" executionInfo={"status": "ok", "timestamp": 1573497697722, "user_tz": 360, "elapsed": 854, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "18193624300732209966"}} colab={"base_uri": "https://localhost:8080/", "height": 67} y.value_counts(normalize=True) # + [markdown] id="izWgEPARptnY" colab_type="text" # There are 2 classes, this is a binary classification problem. The majority class occurs with 57% frequency, so this is not too imbalanced. I could just use accuracy score as my evaluation metric. # # Precision when predicting great burritos may be the most important because I'm only going to eat one, so I want to make sure it's good. # # (On the other hand, is a "bad" burrito really that bad? Not to me, not from a taste perspective.) # # Recall when predicting great burritos could mean trying more things to make sure you don't miss some new, different, great burrito that you otherwise wouldn't have tried. # # Which burito place would you take your first date to? Precision. # + [markdown] id="vaOZNuktxY44" colab_type="text" # # Begin to clean and explore your data # + id="aa84-5osxYgg" colab_type="code" outputId="b0f2d3b3-4a44-4a20-933d-f51ad28274c8" executionInfo={"status": "ok", "timestamp": 1573497782732, "user_tz": 360, "elapsed": 861, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "18193624300732209966"}} colab={"base_uri": "https://localhost:8080/", "height": 218} df['Burrito'].value_counts() # + id="f-MPaLTVxz82" colab_type="code" outputId="1a3d1736-b3ad-43c6-db4f-9514a60df41c" executionInfo={"status": "ok", "timestamp": 1573497825042, "user_tz": 360, "elapsed": 900, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "18193624300732209966"}} colab={"base_uri": "https://localhost:8080/", "height": 655} df['Burrito'].unique() # + id="1QMLd742yEfq" colab_type="code" colab={} # Clean/combine the Burrito categories df['Burrito'] = df['Burrito'].str.lower() # + id="7bXdtDHNyefk" colab_type="code" outputId="1e2d943f-97e9-4b20-9360-cd6db6d2492a" executionInfo={"status": "ok", "timestamp": 1573497995282, "user_tz": 360, "elapsed": 843, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "18193624300732209966"}} colab={"base_uri": "https://localhost:8080/", "height": 34} df['Burrito'].nunique() # + id="bH5RCIRQyLnY" colab_type="code" colab={} california = df['Burrito'].str.contains('california') asada = df['Burrito'].str.contains('asada') surf = df['Burrito'].str.contains('surf') carnitas = df['Burrito'].str.contains('carnitas') # + id="SxsaFdfNyNn9" colab_type="code" outputId="ce855488-ddd8-46d4-d097-fd3ed2fe208a" executionInfo={"status": "ok", "timestamp": 1573498012406, "user_tz": 360, "elapsed": 849, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "18193624300732209966"}} colab={"base_uri": "https://localhost:8080/", "height": 67} california.value_counts() # + id="R9SoagIZytNW" colab_type="code" colab={} df.loc[california, 'Burrito'] = 'California' df.loc[asada, 'Burrito'] = 'Asada' df.loc[surf, 'Burrito'] = 'Surf & Turf' df.loc[carnitas, 'Burrito'] = 'Carnitas' df.loc[~california & ~asada & ~surf & ~carnitas, 'Burrito'] = 'Other' # + id="QEF2GhACytwF" colab_type="code" outputId="85ff2336-a789-4d5b-e3c8-e5dd45032747" executionInfo={"status": "ok", "timestamp": 1573498058914, "user_tz": 360, "elapsed": 362, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "18193624300732209966"}} colab={"base_uri": "https://localhost:8080/", "height": 118} df['Burrito'].value_counts() # + id="sTZIAi50y54k" colab_type="code" colab={} # Drop some high cardinality categoricals df = df.drop(columns=['Notes', 'Location', 'Reviewer', 'Address', 'URL', 'Neighborhood']) # + [markdown] id="5NWkwHeIzD5e" colab_type="text" # # What happens if we *DON'T* drop features with leakage? # + id="j4eVG0BGy-BB" colab_type="code" outputId="7b39052d-34aa-4ccd-9c6f-da8f0c457d42" executionInfo={"status": "ok", "timestamp": 1573498502646, "user_tz": 360, "elapsed": 1469, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "18193624300732209966"}} colab={"base_uri": "https://localhost:8080/", "height": 34} import category_encoders as ce from sklearn.impute import SimpleImputer from sklearn.model_selection import cross_val_score, train_test_split from sklearn.pipeline import make_pipeline from sklearn.tree import DecisionTreeClassifier target = 'Great' features = df.columns.drop(target) X = df[features] y = df[target] X_train, X_test, y_train, y_test = train_test_split( X, y, random_state=42 ) pipeline = make_pipeline( ce.OrdinalEncoder(), SimpleImputer(), DecisionTreeClassifier(max_depth=3) ) cross_val_score(pipeline, X_train, y_train, scoring='accuracy', cv=5) # + id="uJaqLU34zwi8" colab_type="code" outputId="72a1b109-9080-4317-d3b6-7ef906f0810c" executionInfo={"status": "error", "timestamp": 1573498508512, "user_tz": 360, "elapsed": 546, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "18193624300732209966"}} colab={"base_uri": "https://localhost:8080/", "height": 330} pipeline.fit(X_train, y_train) tree = pipeline.named_steps['decisiontreeclassifier'] encoder = pipeline.named_steps['ordinalencoder'] X_train_encoded = encoder.transform(X_train) importances = pd.Series(tree.feature_importances_, X_train_encoded.columns) importances.sort_values().plot.barh(); # + [markdown] id="e4S_UPUI0hKb" colab_type="text" # # Titanic # + id="td9LOHV00dy8" colab_type="code" outputId="1fbcaa65-a05a-4135-ce3f-d3caab557557" executionInfo={"status": "ok", "timestamp": 1573498554614, "user_tz": 360, "elapsed": 614, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "18193624300732209966"}} colab={"base_uri": "https://localhost:8080/", "height": 34} df = sns.load_dataset('titanic') target = 'survived' features = df.columns.drop(target) X = df[features] y = df[target] X_train, X_test, y_train, y_test = train_test_split( X, y, random_state=42) pipeline = make_pipeline( ce.OrdinalEncoder(), SimpleImputer(), DecisionTreeClassifier(max_depth=3) ) cross_val_score(pipeline, X_train, y_train, scoring='accuracy', cv=5) # + id="CW7EP1Ww0oZP" colab_type="code" outputId="2676ad97-75a1-46c6-b60c-a9ec0da05640" executionInfo={"status": "ok", "timestamp": 1573498577775, "user_tz": 360, "elapsed": 1098, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "18193624300732209966"}} colab={"base_uri": "https://localhost:8080/", "height": 265} pipeline.fit(X_train, y_train) tree = pipeline.named_steps['decisiontreeclassifier'] importances = pd.Series(tree.feature_importances_, X_train.columns) importances.sort_values().plot.barh(); # + id="uL9YbuIB0t7e" colab_type="code" outputId="1ffc8021-b532-49cb-fac5-c673ec453cfc" executionInfo={"status": "ok", "timestamp": 1573498600407, "user_tz": 360, "elapsed": 496, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "18193624300732209966"}} colab={"base_uri": "https://localhost:8080/", "height": 195} df.head() # + id="R-qD7i6y1DuY" colab_type="code" outputId="361c6c36-ff18-46d7-dbc3-6ff0ef538e82" executionInfo={"status": "ok", "timestamp": 1573498669691, "user_tz": 360, "elapsed": 580, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "18193624300732209966"}} colab={"base_uri": "https://localhost:8080/", "height": 241} import graphviz from sklearn.tree import export_graphviz pipeline.fit(X_train, y_train) tree = pipeline.named_steps['decisiontreeclassifier'] dot_data = export_graphviz( tree, out_file=None, feature_names=X.columns, class_names=y.unique().astype(str), filled=True, impurity=False, proportion=True) graphviz.Source(dot_data) # + [markdown] id="2JjgVxD8sIXy" colab_type="text" # # Regression: NYC apartment data # + id="AmqLUcirhKG3" colab_type="code" colab={} # %%capture import sys # If you're on Colab: if 'google.colab' in sys.modules: DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Applied-Modeling/master/data/' # !pip install category_encoders==2.* # If you're working locally: else: DATA_PATH = '../data/' # + id="aWU1rEsLsNX-" colab_type="code" colab={} import numpy as np import pandas as pd # Read New York City apartment rental listing data df = pd.read_csv(DATA_PATH+'apartments/renthop-nyc.csv') assert df.shape == (49352, 34) # + id="vLko0fOTsQij" colab_type="code" colab={} y = df['price'] # + id="xlsfr_2GsWRp" colab_type="code" outputId="c2b93558-7fe7-44e8-ef1c-d761ba0b6c16" executionInfo={"status": "ok", "timestamp": 1573496400161, "user_tz": 360, "elapsed": 1104, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "18193624300732209966"}} colab={"base_uri": "https://localhost:8080/", "height": 279} # %matplotlib inline import seaborn as sns sns.distplot(y); # + id="cvSCFJqssaST" colab_type="code" outputId="6b16d79a-06ab-4c8a-d2ae-4d246e23fc35" executionInfo={"status": "ok", "timestamp": 1573496419771, "user_tz": 360, "elapsed": 417, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "18193624300732209966"}} colab={"base_uri": "https://localhost:8080/", "height": 168} y.describe() # + id="B01wj-zSsfPF" colab_type="code" outputId="5210b37e-b6f8-4deb-a829-ac69ab22acac" executionInfo={"status": "ok", "timestamp": 1573496488345, "user_tz": 360, "elapsed": 382, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "18193624300732209966"}} colab={"base_uri": "https://localhost:8080/", "height": 67} (y < 100).value_counts() # + [markdown] id="dpauHixJtDxI" colab_type="text" # # Are some observations outliers? # # Will you exclude # # them? # + id="C2-coskUtDFA" colab_type="code" colab={} # Yes! There are outliers # Some prices that are so high or low it doesn't really make sense. # SOme locations that aren't even in New York City. # + id="OOAYP-Gqsskw" colab_type="code" colab={} # Remove the most extreme 1% prices, # the most extreme .1% latitudes, & # the most extreme .1% longitudes df = df[(df['price'] >= np.percentile(df['price'], 0.5)) & (df['price'] <= np.percentile(df['price'], 99.5)) & (df['latitude'] >= np.percentile(df['latitude'], 0.05)) & (df['latitude'] < np.percentile(df['latitude'], 99.95)) & (df['longitude'] >= np.percentile(df['longitude'], 0.05)) & (df['longitude'] <= np.percentile(df['longitude'], 99.95))] # + id="quX_J2RxtTrX" colab_type="code" outputId="29b6f1ac-98ab-41c0-afd1-bc07c4377b1c" executionInfo={"status": "ok", "timestamp": 1573496649771, "user_tz": 360, "elapsed": 580, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "18193624300732209966"}} colab={"base_uri": "https://localhost:8080/", "height": 279} y = df['price'] sns.distplot(y); # + id="eY_0wLPVtWr6" colab_type="code" outputId="61cbdf0b-12e2-4e23-d3bd-f54912fba3ee" executionInfo={"status": "ok", "timestamp": 1573496753679, "user_tz": 360, "elapsed": 403, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "18193624300732209966"}} colab={"base_uri": "https://localhost:8080/", "height": 168} y.describe() # + id="1vqIJQHpupxD" colab_type="code" colab={} import numpy as np y_log = np.log1p(y) # + id="hvmnJYf2uyA0" colab_type="code" outputId="6393f09f-a058-46ce-a65b-bbf1c24e39ab" executionInfo={"status": "ok", "timestamp": 1573497125031, "user_tz": 360, "elapsed": 573, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "18193624300732209966"}} colab={"base_uri": "https://localhost:8080/", "height": 295} import matplotlib.pyplot as plt sns.distplot(y) plt.title('Original target, in the unit of US Dollars'); # + id="eeF7qH2FtwwZ" colab_type="code" outputId="be4248c5-9a68-41c8-bef4-6d201853e55b" executionInfo={"status": "ok", "timestamp": 1573497140130, "user_tz": 360, "elapsed": 641, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "18193624300732209966"}} colab={"base_uri": "https://localhost:8080/", "height": 295} sns.distplot(y_log) plt.title('Log-transformed target, in log-dollars'); # + id="bamZT9ORuwH3" colab_type="code" outputId="daae07a5-3f80-4c03-81d3-4328c4b01eeb" executionInfo={"status": "ok", "timestamp": 1573497387787, "user_tz": 360, "elapsed": 561, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "18193624300732209966"}} colab={"base_uri": "https://localhost:8080/", "height": 279} y_untransformed = np.expm1(y_log) sns.distplot(y_untransformed); # + id="ri-oh-t-un6U" colab_type="code" colab={} y_train_log = np.log1p(y_train) y_val_log = np.log1p(y_val) model.fit(X_train, y_train_log) y_pred_log = model.predict(X_val) y_pred = np.expm1(y_pred_log) mean_absolute_error(y_val, y_pred)
module1/D1_Lesson_Applied_Modeling.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Pandas # erlaubt schnelle Analysen u. Datenbereinigung import numpy as np import pandas as pd # ## Series/Serien # * ähnlich numpy array, jedoch mit Achsenbeschriftung mögl. # * Zugriff über Zahlen und Strings mögl. # * nicht nur Zahlen sondern auch Strings im Array mögl. achsenbeschriftung = ["a","b","c"] daten = [10,20,30] numpy_array = np.array(daten) dictonary = {"a":10,"b":20,"c":30} pd.Series(data=daten) #pandas Liste mit automatischem Key pd.Series(data=daten, index=achsenbeschriftung) pd.Series(daten, achsenbeschriftung) #Reihenfolge beachten pd.Series(dictonary) ser1 = pd.Series([1,2,3,4], index=["USA","Deutschland","Polen","Japan"]) ser2 = pd.Series([5,6,7,8], index=["USA","Deutschland","Italien","Japan"]) ser1["USA"] #schnelles suche eines El. / ähnlich Hashtabelle ser1+ser2 # ## Dataframes # * vielzahl von Series die zusammengefügt wurden um Index/Spalte zu teilen from numpy.random import randn #import von rand, schnellerer Zugriff anstatt ständig zu zugreifen #randn --> Standartnormalverteilte Zufallszahlen np.random.seed(101) #für Pseudozufallszahlen brauch man immer einen Startwert """ +------------+---------+--------+ | | A | B | +------------+---------+--------- | 0 | 0.626386| 1.52325|<----axis=1----- +------------+---------+--------+ | | | axis=0 | ↓ ↓ """ # ### Data Frame erstellen df = pd.DataFrame(randn(5,4), index='A B C D E'.split(), columns =' W X Y Z'.split()) #5 Zeilen, 4 Spalten, split = String nach Leerzeichen trennen in Array (ähnlich JS) df df.info() #Informationen über das DF df.shape #5Zeilen und 4 Spalten df["W"] #komplette Spalte df[["Z","W","Y"]] # ### Spalte/Index anfügen df["neu"] = df["Y"] + df["Z"] #einfügen einer neuen Spalte/ mit dem Index/Spaltenname neu und weisen der die addierten Werte von Y und Z zu df # ### Spalte/Index entfernen df.drop('neu', axis=1) #Spalte entfernen (axis = 1), Zeile -> axis = 0 df.drop('neu', axis=1, inplace=True) #Spalte für immer entfernen # ### Zeilen selektieren df.loc['A'] #Zeilen ausgeben df.iloc[2] #Zeilen ausgeben mit Index der Zeile (Zeile C) df.loc['B','Y'] #einen genauen Wert ausgeben df.loc[['A','B'],['W','Y']] #Teilwerte ausgeben # ### Selektion df[['W','X']]>0 df[df>0] df[df['W']>0] #Zeile C wird nicht mit ausgegeben, weil der Wert < 0 ist df[df['W']>0][['Y','Z']] #Ausgabe der Spalten Y und Z (['Y'] -> für nur einen Wert) ohne Zeile C df[ (df['W'] > 0) & (df['X'] > 1 ) ] #2 Bedingungen mit & -> UND verknüpft df[ (df['W'] > 0) | (df['X'] > 1 ) ] # mit | -> ODER verknüpft df.reset_index() #Index wird zurück gesetzt als Spalte u. Standartindex wird gesetzt neu_index = 'DD H GC MW Z'.split() df["Kennzeichen"] = neu_index #die Spalte mit dem index Kennzeichen wird hinzugefügt df.set_index('Kennzeichen',inplace = True) # der Index wird neu gesetzt/ überschrieben, inplace = wirklich überschreiben df # ### hierarchischer Index aussen_index = ['G1', 'G1', 'G1', 'G2', 'G2', 'G2'] innen_index = [1,2,3,1,2,3] hierarchischer_index = list(zip(aussen_index, innen_index)) #zip fügt 2 Listen zusammen hierarchischer_index hierarchischer_index = pd.MultiIndex.from_tuples(hierarchischer_index) hierarchischer_index df = pd.DataFrame(np.random.randn(6,2), index = hierarchischer_index, columns = ['A','B']) df df.loc['G1'].loc[2] df['A'].loc['G1'].loc[1] #einzelnen Wert ausgeben df.index.names = ['Gruppe','Num'] #dem Index einen Namen zuweisen df df.xs('G1') #Zugriff auf Reihen oder Spalten -> ähnlich loc df.xs(['G1',1])['B'] df.xs(1, level='Num') #Zeile mit dem Index 1 # ## Missing Data und Imputation # * fehlende Daten sollen umgangen werden # ### Eleminierungsverfahren / Complete-Case Analysis # * Missing-Data Technik # * Datensätze bei dennen eine oder mehrere Erhebungsmerkmale fehlende Werte aufweisen, werden aus der Matrix gestrichen # # Nachteile: # * zur Folge Informationsverlust # * Verfälschung der Stichprobe mögl. df = pd.DataFrame({'A':[1, 2, np.nan],'B':[5, np.nan, np.nan], 'C':[1,2,3]}) df df.dropna() #erhalte nur Zeilen ohne NULL Werte df.dropna(axis=1) #man kann mit axis angeben ob x oder y Achse df.dropna(thresh=2) #alle Zeilen filtern die weniger als 2 Null-Werte haben # ### Implementation # fehlende Werte auffüllen df.fillna(value="Füllwert") #alle Nullwerte auffüllen df['A'].fillna(value=df['A'].mean()) # der Null-Wert wird durch den MW der Spalte ersetzt
Pandas/Pandas_Teil1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Most people are using distance features, but not using **ANGLE** features. My teacher 'Google' taught me that the angles among atoms are important to estimate molecular properties. Let me show some examples in this kernel. # ## Import & Load # + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" import pandas as pd import numpy as np import matplotlib.pyplot as plt import warnings warnings.filterwarnings("ignore") warnings.filterwarnings(action="ignore",category=DeprecationWarning) warnings.filterwarnings(action="ignore",category=FutureWarning) # + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" df_train=pd.read_csv('../input/train.csv') #df_test=pd.read_csv('../input/test.csv') df_struct=pd.read_csv('../input/structures.csv') # - # I use this great kernel to get x,y,z position. https://www.kaggle.com/seriousran/just-speed-up-calculate-distance-from-benchmark def map_atom_info(df_1,df_2, atom_idx): df = pd.merge(df_1, df_2, how = 'left', left_on = ['molecule_name', f'atom_index_{atom_idx}'], right_on = ['molecule_name', 'atom_index']) df = df.drop('atom_index', axis=1) return df for atom_idx in [0,1]: df_train = map_atom_info(df_train,df_struct, atom_idx) df_train = df_train.rename(columns={'atom': f'atom_{atom_idx}', 'x': f'x_{atom_idx}', 'y': f'y_{atom_idx}', 'z': f'z_{atom_idx}'}) # ## Create Features # Let's get the distance between atoms first. def make_features(df): df['dx']=df['x_1']-df['x_0'] df['dy']=df['y_1']-df['y_0'] df['dz']=df['z_1']-df['z_0'] df['distance']=(df['dx']**2+df['dy']**2+df['dz']**2)**(1/2) return df df_train=make_features(df_train) # Next, find the coupled atom of atom_1 and atom_2. I'd like to use the distance to get the coupled atom(closest atom). You can use 'type' feature instead. # + #I apologize for my poor coding skill. Please make the better one. print(df_train.shape) df_temp=df_train.loc[:,["molecule_name","atom_index_0","atom_index_1","distance","x_0","y_0","z_0","x_1","y_1","z_1"]].copy() df_temp_=df_temp.copy() df_temp_= df_temp_.rename(columns={'atom_index_0': 'atom_index_1', 'atom_index_1': 'atom_index_0', 'x_0': 'x_1', 'y_0': 'y_1', 'z_0': 'z_1', 'x_1': 'x_0', 'y_1': 'y_0', 'z_1': 'z_0'}) df_temp=pd.concat((df_temp,df_temp_),axis=0) df_temp["min_distance"]=df_temp.groupby(['molecule_name', 'atom_index_0'])['distance'].transform('min') df_temp= df_temp[df_temp["min_distance"]==df_temp["distance"]] df_temp=df_temp.drop(['x_0','y_0','z_0','min_distance'], axis=1) df_temp= df_temp.rename(columns={'atom_index_0': 'atom_index', 'atom_index_1': 'atom_index_closest', 'distance': 'distance_closest', 'x_1': 'x_closest', 'y_1': 'y_closest', 'z_1': 'z_closest'}) print(df_temp.duplicated(subset=['molecule_name', 'atom_index']).value_counts()) #delete duplicated rows (some atom pairs have perfectly same distance) #This code is added based on <NAME>'s comment. df_temp=df_temp.drop_duplicates(subset=['molecule_name', 'atom_index']) for atom_idx in [0,1]: df_train = map_atom_info(df_train,df_temp, atom_idx) df_train = df_train.rename(columns={'atom_index_closest': f'atom_index_closest_{atom_idx}', 'distance_closest': f'distance_closest_{atom_idx}', 'x_closest': f'x_closest_{atom_idx}', 'y_closest': f'y_closest_{atom_idx}', 'z_closest': f'z_closest_{atom_idx}'}) print(df_train.shape) # - # Now, I get xyz positions of 4 atoms. # 1. atom_0 # 2. atom_1 # 3. closest one to atom_0 # 4. closest one to atom_1 # If atom_1 is C or N, it has some connections. It's not considered here. df_train.head() # Let's get **cosine angles** by calculating dot product of vectors. # + def add_cos_features(df): df["distance_0"]=((df['x_0']-df['x_closest_0'])**2+(df['y_0']-df['y_closest_0'])**2+(df['z_0']-df['z_closest_0'])**2)**(1/2) df["distance_1"]=((df['x_1']-df['x_closest_1'])**2+(df['y_1']-df['y_closest_1'])**2+(df['z_1']-df['z_closest_1'])**2)**(1/2) df["vec_0_x"]=(df['x_0']-df['x_closest_0'])/df["distance_0"] df["vec_0_y"]=(df['y_0']-df['y_closest_0'])/df["distance_0"] df["vec_0_z"]=(df['z_0']-df['z_closest_0'])/df["distance_0"] df["vec_1_x"]=(df['x_1']-df['x_closest_1'])/df["distance_1"] df["vec_1_y"]=(df['y_1']-df['y_closest_1'])/df["distance_1"] df["vec_1_z"]=(df['z_1']-df['z_closest_1'])/df["distance_1"] df["vec_x"]=(df['x_1']-df['x_0'])/df["distance"] df["vec_y"]=(df['y_1']-df['y_0'])/df["distance"] df["vec_z"]=(df['z_1']-df['z_0'])/df["distance"] df["cos_0_1"]=df["vec_0_x"]*df["vec_1_x"]+df["vec_0_y"]*df["vec_1_y"]+df["vec_0_z"]*df["vec_1_z"] df["cos_0"]=df["vec_0_x"]*df["vec_x"]+df["vec_0_y"]*df["vec_y"]+df["vec_0_z"]*df["vec_z"] df["cos_1"]=df["vec_1_x"]*df["vec_x"]+df["vec_1_y"]*df["vec_y"]+df["vec_1_z"]*df["vec_z"] df=df.drop(['vec_0_x','vec_0_y','vec_0_z','vec_1_x','vec_1_y','vec_1_z','vec_x','vec_y','vec_z'], axis=1) return df df_train=add_cos_features(df_train) # - # I'd like to show some graph. You can see the obvious relationship between 'angle' and 'scalar_coupling_constant'. # + mol_types=df_train["type"].unique() df_train_=df_train.iloc[:100000,:].copy() fig, ax = plt.subplots(8, 1, figsize=(8, 32)) for i, mol_type in enumerate(mol_types): ax[i].scatter(df_train_.loc[df_train_['type'] ==mol_type]["cos_0_1"], df_train_.loc[df_train_['type'] == mol_type] ['scalar_coupling_constant'],s=10,alpha=0.3); ax[i].set_title(str(mol_type)) # + mol_types=df_train["type"].unique() df_train_=df_train.iloc[:100000,:].copy() fig, ax = plt.subplots(8, 1, figsize=(8, 32)) for i, mol_type in enumerate(mol_types): ax[i].scatter(df_train_.loc[df_train_['type'] ==mol_type]["cos_0"], df_train_.loc[df_train_['type'] == mol_type] ['scalar_coupling_constant'],s=10,alpha=0.3); ax[i].set_title(str(mol_type)) # + mol_types=df_train["type"].unique() df_train_=df_train.iloc[:100000,:].copy() fig, ax = plt.subplots(8, 1, figsize=(8, 32)) for i, mol_type in enumerate(mol_types): ax[i].scatter(df_train_.loc[df_train_['type'] ==mol_type]["cos_1"], df_train_.loc[df_train_['type'] == mol_type] ['scalar_coupling_constant'],s=10,alpha=0.3); ax[i].set_title(str(mol_type))
download_code/effective-feature.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Data Quality Preparation # In this section I will be cleaning and preparing the vehicles and trip tables for the prediction model. It will be divided into two parts as they will be cleaned and prepared seperately. # # #### Introduction: # # ### Trip Dataset # Each row represents one trip(route) # #### Understanding the features: # * DATASOURCE: Unique Bus Operator Code # * DAYOFSERVICE: Day of service. One day of service could last more than 24 hours # * TRIPID: Unique Trip code # * LINEID: Unique Line code # * ROUTEID: Unique route code # * DIRECTION: Route direction: (2)IB = inbound / going / northbound / eastbound, (1)OB = outbound / back / southbound / westbound # * PLANNEDTIME_ARR: Planned arrival time of the trip, in seconds # * PLANNEDTIME_DEP: Planned departure time of the trip, in seconds # * ACTUALTIME_ARR: Actual arrival time of the trip, in seconds # * ACTUALTIME_DEP: Actual departure time of the trip, in seconds # * BASIN: basin code # * TENDERLOT: tender lot # * SUPPRESSED: The whole trip has been supressed (0 = achieved, 1 = suppressed) # * JUSTIFICATIONID: Fault code # * LASTUPDATE: Time of the last record update # * NOTE: Free note # # ### Vehicles Dataset # Each record of this table represents the service of one vehicle for one day of service and tells about # the overall distance and time worked by the associated vehicle in that specific day. # # #### Understanding the features: # * DATASOURCE: Unique Bus Operator Code # * DAYOFSERVICE: Day of service # * VEHICLEID: Unique vehicle code arriving at this stop point # * DISTANCE: Distance travelled by the vehicle in the corresponding day # * MINUTES: Time worked by the veihcle in the corresponding day # * LASTUPDATE: Time of the last record update # * NOTE: Free note # + # Importing the modules that might be used import pandas as pd import numpy as np import matplotlib.pyplot as plt import matplotlib.patches as patches import dask.dataframe as dd import sqlite3 from sqlalchemy import create_engine from pprint import pprint # ignore warnings import warnings warnings.filterwarnings('ignore') # - # leave_times = pd.read_feather('/home/faye/data/leavetimes_cleaned_1.feather') # Loading in the data trips = pd.read_csv('/home/faye/data/rt_trips_DB_2018.txt', sep=';',error_bad_lines=False) vehicles = pd.read_csv('/home/faye/data/rt_vehicles_DB_2018.txt', sep=';',error_bad_lines=False) leave_times_db = create_engine('sqlite:///rt_leavetimes_DB_2018.db') con = sqlite3.connect('/home/faye/notebooks/rt_leavetimes_DB_2018.db') leavetimes = pd.read_sql("select * from chunk_sql group by TRIPID order by max(PROGRNUMBER);", con) # ## Trips # *Exploring the data* # <br><br> trips.head(50) trips.tail(50) # Columns and rows trips.shape trips.dtypes trips.isnull().sum() trips.nunique() # + # trips_columns = list(trips.columns.values) # - # Seperating features categorical_columns = ['DATASOURCE','TRIPID','LINEID','ROUTEID','DIRECTION','BASIN','TENDERLOT','JUSTIFICATIONID','NOTE','SUPPRESSED'] time_columns = ['PLANNEDTIME_ARR','PLANNEDTIME_DEP','ACTUALTIME_ARR','ACTUALTIME_DEP'] datetime = ['DAYOFSERVICE','LASTUPDATE'] # # Converting seconds into times and into datetime series # import datetime # for row in trips['PLANNEDTIME_ARR']: # trips['PLANNEDTIME_ARR'] = trips['PLANNEDTIME_ARR'].replace(row, str(datetime.timedelta(seconds=row))) # # for row in trips['PLANNEDTIME_DEP']: # trips['PLANNEDTIME_DEP'] = trips['PLANNEDTIME_DEP'].replace(row, str(datetime.timedelta(seconds=row))) # # for row in trips['ACTUALTIME_ARR']: # trips['ACTUALTIME_ARR'] = trips['ACTUALTIME_ARR'].replace(row, str(datetime.timedelta(seconds=row))) # # for row in trips['ACTUALTIME_DEP']: # trips['ACTUALTIME_DEP'] = trips['ACTUALTIME_DEP'].replace(row, str(datetime.timedelta(seconds=row))) # + # Converting objects into categorical types for column in categorical_columns: trips[column] = trips[column].astype('category') # Converting objects into datetime series for column2 in datetime: trips[column2] = pd.to_datetime(trips[column2]) # - trips.dtypes # Converting the time columns seconds into times. The days part should be ignored. for column3 in time_columns: trips[column3] = trips[column3].apply(pd.to_timedelta, unit='s') trips[column3] = trips[column3]- pd.to_timedelta(trips[column3].dt.days, unit='d') trips.head(10) trips_sorted = trips.sort_values(by='TRIPID') trips_sorted.head(50) trip77a = trips_sorted.loc[trips_sorted['LINEID']=='77A'] trip77a = trip77a.loc[trips_sorted['DAYOFSERVICE']=='2018-01-03'] trip77a = trip77a.sort_values(by='PLANNEDTIME_ARR') trip77a.head(50) # ## Note about this specific route # In this route, we took the route 77a from a date (January 3, 2018) and sorted it by PLANNEDTIME_ARR. It has a total of 101 rows. The first few values of PLANNEDTIME_ARR, these may be the last few trips that ran that day and it was just sorted as the top? In any case, We see that they provide data for each route for one day with all of the times. It is a matter of matching it with the leavetimes dataset then. This will be explored at the very end. # ## Duplicates # Exploring if the trips dataset has any duplicates. There shouldn't be duplicates for any reason. The trips only siginify one route. # # We see there aren't any duplicates so this is a good sign for the data. duplicates = trips[trips.duplicated(keep=False)] print("Number of rows that are duplicates: ", duplicates.shape[0]) # ## Checking descriptive columns trips[datetime].describe(datetime_is_numeric=True).T trips[time_columns].describe().T trips[categorical_columns].describe().T # <br><br> # We see there a couple of features that have <= 1 cardinalities: # * DATASOURCE has 1 unique value. # * TRIPID has more than 600k unique values. # * LINEID has 130 unique values. # * ROUTEID has 588 unique values. # * DIRECTION has 2 unique values. # * BASIN has 1 unique values. # * TENDERLOT has no unique values meaning that it is just null. # * JUSTIFICATIONID has 4330 unique values. # * NOTE has more than 46k unique values. # * SUPPRESSED has 4333 unique values. # # **RESULT :** As we can see, DATASOURCE, BASIN, TENDERLOT and SUPPRESSED has 1 or 0 unique values. This means the same value is consistent throughout the dataset. Therefore, these will be dropped. # <br><br> # ## Logical Integrity Tests # List of integrity tests to be performed: # * Make sure LASTUPDATE dates doesn't occur before DAYSERVICE # #### TEST 1: Ensure LASTUPDATE doesn't occur before DAYOFSERVICE test1 = trips[trips['DAYOFSERVICE'] > trips['LASTUPDATE']] print("The number of rows that have dates that occur before DAYOFSERVICE from LASTUPDATES is: ", test1.shape[0]) # #### TEST 2: Ensure timetables match the specific route with PLANNEDTIME_DEP # This is to check that the timetables match with all of the routes. We take a look at PLANNEDTIME_DEP to check this. # + # Need to get a list of all of the routes and their timetables, then compare it with the routes # and their planned departure times # Make a dataframe with just LINEID and PLANNEDTIME_DEP and make a list of all of the routes, order them data = {'LINEID': trips['LINEID'], 'PLANNEDTIME_DEP': trips['PLANNEDTIME_DEP']} line_dep = pd.DataFrame(data) list_routes = list(line_dep.LINEID.unique()) list_routes = sorted(list_routes) # - trips_gtfs = pd.read_csv('/home/faye/Data-Analytics-CityRoute/Dublin_Bus_GTFS/17-07-2021/stop_times.txt', sep=',',error_bad_lines=False) trips_gtfs.head(20) # <br><br> # ### Process to get the times: # * Got all of the stop_sequence rows # * Reseted the index (did not need to because my previous plan was scraped) # * Made three lists: service, route, and direction. Service was to indicate if it's weekday,weekend (see calendar.txt for more info), route indicates which route number, direction is for inbound and outbound (I/O) # * looped through the trips_id # * split the column value by . # * split the 2nd value by - # * added first value to service # * added first value of the 2nd value split to route # * added fourth value to direction # # **Extra info:** # * I parsed this from the 2021 dataset. Now, some route_id won't match with the 2018 data because there are routes that are called H3 that used to be old routes. They just changed the name. I was told it was changed because the new company that owns Dublin Bus now wants to change the system again but it was halted for reasons. So for example H3 used to be 29A (I think). # <br><br> # Store the stop_sequence = 1 first trips_1 = trips_gtfs.loc[trips_gtfs['stop_sequence']==1] # Resetting the index for this new dataframe trips_1 = trips_1.reset_index(drop=True) trips_1.head(5) # + # Making a list for calendar, route number, inbound/outbound serviceid = [] routeid = [] direction = [] for value in trips_1.trip_id: id_ = value.split(".") id_2 = id_[2].split("-") serviceid += [id_[1]] routeid += [id_2[1]] direction += [id_[4]] # - trips_1['service_id'] = np.array(serviceid) trips_1['route_id'] = np.array(routeid) trips_1['direction'] = np.array(direction) trips_2 = trips_1 # trips_duplicate = trips_2[trips_2.duplicated(keep=False)] trips_2 = pd.DataFrame({'departure_time': trips_2['departure_time'], 'service_id': trips_2['service_id'], 'route_id': trips_2['route_id'], 'stop_headsign': trips_2['stop_headsign'], 'direction': trips_2['direction']}) trips_2[trips_2.duplicated(keep=False)] trips_2 = trips_2.drop_duplicates() trips_2 trips_2.loc[trips_2['route_id']=='H1'].head(50) # <br><br> # I decided against going through this local integrity test. Because there are new replacements for some of the routes, we won't be able to match them side by side. After looking at this website: https://www.transportforireland.ie/h-spine/ , it's become apparent that the departure times will be different. I will, however replace H1 > 29A, H2> 32. H3 > 31/31A. They changed the timetables so it wouldn't be possible to perform this test. It also looks like H9 is a completely new route. # # I will save the new dataset for anyone who wants to use it. This will be done in the data quality plan. # <br><br> # ## Vehicles # *Exploring the data* # <br><br> vehicles.head(10) vehicles.tail(10) vehicles.shape vehicles.dtypes vehicles.isnull().sum() vehicles.nunique() # Creating columns for different datatypes categorical_column = ['DATASOURCE', 'VEHICLEID'] time_columns = ['MINUTES'] numerical_columns = ['DISTANCE'] datetime_columns = ['DAYOFSERVICE', 'LASTUPDATE'] continuous_columns = ['MINUTES', 'DISTANCE', 'DAYOFSERVICE', 'LASTUPDATE'] # + # Converting objects into categorical types for column in categorical_column: vehicles[column] = vehicles[column].astype('category') # Converting objects into datetime series for column2 in datetime_columns: vehicles[column2] = pd.to_datetime(vehicles[column2]) # Converting the time columns seconds into times. The days part should be ignored. for column3 in time_columns: vehicles[column3] = vehicles[column3].apply(pd.to_timedelta, unit='s') vehicles[column3] = vehicles[column3]- pd.to_timedelta(vehicles[column3].dt.days, unit='d') # - vehicles.head(10) vehicles.dtypes # ## Duplicates # Exploring if the trips dataset has any duplicates. There shouldn't be duplicates for any reason. The trips only siginify one route. # # Again, there are no duplicates in this vehicles dataset. duplicates = vehicles[vehicles.duplicated(keep=False)] print("Number of rows that are duplicates: ", duplicates.shape[0]) # ## Checking descriptive columns vehicles[datetime].describe(datetime_is_numeric=True).T vehicles[categorical_column].describe().T vehicles[time_columns].describe().T vehicles[numerical_columns].describe().T # We see there a couple of features that have <= 1 cardinalities: # * DATASOURCE has 1 unique value. # * NOTE has 0 unique values and we see that 100% of the feature's values is missing. # # **RESULT :** These two features will be investigated further. However, they will be dropped as constant features do no good for the targeted feature. NOTE has no unique values and has 100% missing values so this will most likely be dropped. # ## Logical Integrity Tests # # List of integrity tests to be performed: # * Make sure LASTUPDATE dates doesn't occur before DAYSERVICE # * Ensure that the dates match with the missing dates from the trips table # **Test 1: Ensuring LASTUPDATE dates doesn't occur before DAYOFSERVICE.** # # As we can see, there are no dates that occur before DAYOFSERVICE meaning it is a good indication and there are no alterations to be done. test1 = vehicles[vehicles['DAYOFSERVICE'] > vehicles['LASTUPDATE']] print("The number of rows that have dates that occur before DAYOFSERVICE from LASTUPDATES is: ", test1.shape[0]) # **Test 2: Ensure that the dates match with the missing dates from the trips table.** # # As we can see, the dates that are missing from the vehicles table match with the missing dates from the trips table. This is a good sign. Therefore, no further actions are to be done. # daysofservice = trips['DAYOFSERVICE'] daysofservice = daysofservice.sort_values() daysofservice_vec = vehicles['DAYOFSERVICE'] daysofservice_vec = daysofservice_vec.sort_values() pd.date_range(start = '2018-01-01', end = '2018-12-31' ).difference(daysofservice_vec) pd.date_range(start = '2018-01-01', end = '2018-12-31' ).difference(daysofservice) vehicles['DISTANCE'].sum() # Interesting observation here. I'm not sure if talked about before but it's intersting to see the total distance is so high. The documentation states that it either represents km/miles but the number is too high when added together. But if you treat the total sum as metres then convert it to km then it will be around 48 million km which is close to the 2019 data Brian mentioned in the presentation. Worth discussing over I feel. # <br><br> # # Data Visualizations # # ## Trips # + # Plot histograms for continuous features # trips[datetime].bar(layout=(2, 2), figsize=(30,30), bins=12) # plt.xlabel("Time (seconds)") # plt.show() # plt.plot_date(trips[datetime], trips[datetime]) # - # ## Vehicles # Plot histograms for continuous features vehicles[continuous_columns].hist(layout=(2, 2), figsize=(30,30), bins=12) plt.xlabel("Time (seconds)") plt.show() # # Data Quality Report # # ## Trips # **Feature - &emsp;Issue &emsp; - Solution** # <br> # * DATASOURCE - constant column - remove feature # <br> # * DAYOFSERVICE - 5 days from 365 days - investigate if the missing 5 days are holidays # <br> # * TRIPID - None detected - no action needed # <br> # * LINEID - None detected - no action needed # <br> # * ROUTEID - None detected - no action needed # <br> # * DIRECTION - None detected - no action needed # <br> # * PLANNEDTIME_DEP - None detected - no action needed # <br> # * PLANNEDTIME_ARR - None detected - no action needed # <br> # * ACTUALTIME_DEP - 6% missing values - investigate and find average and impute # <br> # * ACTUALTIME_ARR - 7% missing values - investigate and find average and impute # <br> # * BASIN - constant column - remove feature # <br> # * TENDERLOT - 100% missing values - investigate and remove feature # <br> # * SUPPRESSED - 99% missing values - investigate and remove feature # <br> # * JUSTIFICATIONID - 99% missing values - investigate and remove feature # <br> # * LASTUPDATE - None detected - no action needed # <br> # * NOTE - None detected - investigate # <br> # * Trips from GTFS feed - nonexisting routes on old timetable - insert new features and finalize dataset # <br><br> # ## Vehicles # **Feature - &emsp;Issue &emsp; - Solution** # <br> # * DATASOURCE - constant column - remove feature # <br> # * DAYOFSERVICE - None detected - no action needed # <br> # * VEHICLEID - None detected - no action needed # <br> # * DISTANCE - Discuss with the team about metrics to be used - no action needed # <br> # * MINUTES - None detected - no action needed # <br> # * LASTUPDATE - None detected - no action needed # <br> # * NOTE - Missing values - investigate then remove feature # <br><br> # # Data Quality Plan # # In this section I will investigating the issues that was found in each feature (see data quality report) and the appropriate actions to be taken. # # ## Trips # **DATASOURCE:** The feature only has one unique value which is DB. Since we know that the source is constant throughout, we will remove this feature from the dataframe. trips.pop('DATASOURCE') trips.head(5) # **DAYOFSERVICE:** We see that there are 5 days that aren't accounted for. Let us see what dates that weren't included. # # The dates that were not included: # * March 1 2018 # * March 2 2018 # * December 9 2018 # * December 10 2018 # * December 25 2018 # # This just means that there were no trips planned for this day. Will it affect the prediction model? Probably not. daysofservice = trips['DAYOFSERVICE'] daysofservice = daysofservice.sort_values() # + #daysofservice.unique() # - pd.date_range(start = '2018-01-01', end = '2018-12-31' ).difference(daysofservice) # <br><br> # **ACTUALTIME_DEP:** We will investigate the 6% values missing. First, we will make a dataframe that only have the rows that have ACTUALTIME_DEP missing. # # It is important that these missing values are dealt with. In order to do that, leavetimes table will be loaded. I'm going to try and load an SQL query. The query is: "For every TRIPID, load the maximum PROGRNUMBER and return the rows" # # We see here at the first 60 rows it is unusual that we only see PROGRNUMBER. It looks as if might be a mistake. However after investigating there are some trips that only had one stop and there were no further records. # # Then, we will make a seperate dataframe that will only have rows of the tripid's from leavetimes table. Then, replace trips' actualtime_dep with leavetimes' actualtime_dep. The dataframe should match # <br><br> actual_dep = trips[trips['ACTUALTIME_DEP'].isna()] tripday = [[day for day in actual_dep.DAYOFSERVICE], [id_ for id_ in actual_dep.TRIPID]] # #Converting to timedelta # leave_times['PLANNEDTIME_ARR'] = leave_times['PLANNEDTIME_ARR'].apply(pd.to_timedelta, unit='s') # leave_times['PLANNEDTIME_DEP'] = leave_times['PLANNEDTIME_DEP'].apply(pd.to_timedelta, unit='s') # leave_times['ACTUALTIME_DEP'] = leave_times['ACTUALTIME_DEP'].apply(pd.to_timedelta, unit='s') # leave_times.shape # #Making an empty dataframe # leavetrips = pd.DataFrame() # #df.append(leavetimes[leavetimes['TRIPID'].isin(tripday[1])]) # leave_times['DAYOFSERVICE'] = pd.to_datetime(leave_times['DAYOFSERVICE']) # leavetimes = leave_times[(leave_times['TRIPID'].isin(tripday[1]) & (leave_times['DAYOFSERVICE'].isin(tripday[0]))].sort_values(by='PROGRNUMBER') # #leavetimes = leave_times.sort_values(by='PROGRNUMBER') # leave = leave_times.groupby(['TRIPID']) # leave = leave.tail(1) # leavetrips = leavetrips.append(leave) # leave = leavetimes[(leavetimes['TRIPID'].isin(tripday[1])) & (leavetimes['DAYOFSERVICE'].isin(tripday[0]))].sort_values(by='PROGRNUMBER') # leavetrips = leavetrips.append(leave) # leavetrips.shape # actual_dep.shape # actual_dep = actual_dep.sort_values(by='TRIPID') # leavetrips = leavetrips.sort_values(by='TRIPID') # actual_dep.head(10) # leaveday = [[day for day in lmeavetrips.DAYOFSERVICE], [id_ for id_ in leavetrips.TRIPID]] # x = pd.DataFrame() # leavetrips.head(10) # for val in range(len(leaveday[0])): # row = actual_dep.loc[(actual_dep['DAYOFSERVICE']==tripday[0][val]) & (actual_dep['TRIPID']==tripday[1][val])] # x = x.append(row) # nonmissing_dep = [value for value in leavetrips.ACTUALTIME_DEP] # x['ACTUALTIME_DEP'] = nonmissing_dep # for val in range(len(leaveday[0])): # row = actual_dep.loc[(actual_dep['DAYOFSERVICE']==tripday[0][val]) & (actual_dep['TRIPID']==tripday[1][val])] # row['ACTUALTIME_DEP'] = leavetrips = pd.DataFrame() leavetimes['DAYOFSERVICE'] = pd.to_datetime(leavetimes['DAYOFSERVICE']) leavetimes['ACTUALTIME_DEP'] = leavetimes['ACTUALTIME_DEP'].apply(pd.to_timedelta, unit='s') leave = leavetimes[(leavetimes['TRIPID'].isin(tripday[1])) & (leavetimes['DAYOFSERVICE'].isin(tripday[0]))] leavetrips = leavetrips.append(leave) # leavetrips['DAYOFSERVICE'] = pd.to_datetime(leavetrips['DAYOFSERVICE']) # leavetrips['ACTUALTIME_DEP'] = leavetrips['ACTUALTIME_DEP'].apply(pd.to_timedelta, unit='s') leavetrips leaveday = [[day for day in leavetrips.DAYOFSERVICE], [id_ for id_ in leavetrips.TRIPID], [time for time in leavetrips.ACTUALTIME_DEP]] leavetrips = leavetrips.sort_values(by='TRIPID') for n in range(len(leaveday[0])): missing = trips['ACTUALTIME_DEP'].loc[(trips['DAYOFSERVICE']==leaveday[0][n]) & (trips['TRIPID']==leaveday[1][n])].index.item() trips['ACTUALTIME_DEP'][missing] = leaveday[2][n] trips.loc[(trips['DAYOFSERVICE']==leaveday[0][0]) & (trips['TRIPID']==leaveday[1][0])].index.item() leavetrips.dtypes # for val in range(len(tripday[0])): # row = leavetimes.loc[(leavetimes['DAYOFSERVICE']==tripday[0][val]) & (leavetimes['TRIPID']==tripday[1][val])].sort_values(by='PROGRNUMBER').iloc[[-1]] # df = df.append(row) # **ACTUALTIME_ARR - TO DO** actual_arr = trips[trips['ACTUALTIME_ARR'].isna()] arr_day = [[day for day in actual_arr.DAYOFSERVICE], [id_ for id_ in actual_arr.TRIPID]] trips['ACTUALTIME_ARR'] = trips['ACTUALTIME_ARR'].apply(pd.to_timedelta, unit='s') arr_trip_leave = pd.DataFrame() leave = leavetimes[(leavetimes['TRIPID'].isin(arr_day[1])) & (leavetimes['DAYOFSERVICE'].isin(arr_day[0]))].sort_values(by='PROGRNUMBER') arr_trip_leave = arr_trip_leave.append(leave) arr_trip_leave['DAYOFSERVICE'] = pd.to_datetime(arr_trip_leave['DAYOFSERVICE']) arr_trip_leave['ACTUALTIME_DEP'] = arr_trip_leave['ACTUALTIME_DEP'].apply(pd.to_timedelta, unit='s') leave_arr = [[day for day in arr_trip_leave.DAYOFSERVICE], [id_ for id_ in arr_trip_leave.TRIPID], [time for time in arr_trip_leave.ACTUALTIME_ARR]] arr_trip_leave = arr_trip_leave.sort_values(by='TRIPID') for n in range(len(leave_arr[0])): missing = trips['ACTUALTIME_ARR'].loc[(trips['DAYOFSERVICE']==leave_arr[0][n]) & (trips['TRIPID']==leave_arr[1][n])].index.item() trips['ACTUALTIME_ARR'] = trips['ACTUALTIME_ARR'].replace(trips['ACTUALTIME_ARR'][missing],leave_arr[2][n]) # <br><br>**BASIN:** This was considered a constant column and so this feature will be dropped. After investigating it from data understanding, having a constant column would not affect the prediction model as it only has one value throughout. Therefore, it will be dropped.<br><br> trips.pop('BASIN') trips.head(1) # <br><br>**TENDERLOT:** Since 100% of TENDERLOT values are missing it will most likely be dropped. From the documentation, trying to relate tender lot to business terms. This may refer to whether or not the trip is paid for a service. Makes sense that it's hidden for privacy if this is the case. It doesn't relate much to the project.<br><br> trips.pop('TENDERLOT') trips.head(1) # <br><br>**SUPPRESSED:** 99% of SUPPRESSED values are missing. It doesn't look like it gives any meaningful data. Not sure why it has to be suppressed. From the documentation, it looks like the NaN values just means it's partially suppressed which may explain TENDERLOT feature being 100% missing because of personal information. Like TENDERLOT, it doesn't relate much to the project and so, it will be dropped.<br><br> trips.pop('SUPPRESSED') trips.head(1) # <br><br>**JUSTIFICATIONID:** From the documentation, it looks as if the JUSTIFICATIONID is a primary key that is used to link to another table that we are not given, that represents some kind of fault code. Since the majority of the value is missing, the relevancy of the feature is probably not needed for the model. <br><br> trips.pop('JUSTIFICATIONID') trips.head(1) # <br><br>**NOTE:** I will investigate the meaning of the numbers. # # It seems like the documentation states that it means free note. Meaning that the numbers could be entirely unrelated to our project. For this reason, <br><br> trips.dtypes # Convert NOTE to type int64 trips['NOTE'].nunique() trips.pop('NOTE') trips.head(1) # <br><br> # ## Vehicles # <br><br> # **DATASOURCE:** This feature will be removed as it is a constant column. # <br><br> vehicles.pop('DATASOURCE') vehicles.head(1) # <br><br> # **NOTE:** As seen while investigating the null values that 100% NOTE was missing. This will be re confirmed. # # As we can see, 100% of the values is missing. As the documentation states that it is "Free note" I do not see the significance of keeping the feature. It doesn't look like it will be particularly useful for predicting the journey time. # <br><br> vehicles['NOTE'].isnull().sum() vehicles.shape[0] vehicles.pop('NOTE') vehicles.head(1) # ## Last checks then save into # ### Vehicles vehicles.shape vehicles.isnull().sum() vehicles.nunique() vehicles.dtypes vehicles_2018_cleaned_dataset = vehicles.to_csv("/home/faye/data/vehicles_2018_cleaned_dataset", index=False) # ### Trips trips.shape trips.isnull().sum() trips.nunique() trips.dtypes trips_2018_cleaned_dataset = trips.to_csv("/home/faye/data/trips_2018_cleaned_dataset", index=False)
Preparation/Faye-Data_Quality_Plan_&_Report.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # # (c) <NAME> <br /> # Team Neighborhood Change <br /> # Last Updated 9/7/2016 <br /> # # # NOTEBOOK: FEATURE SELECTION & MODEL SELECTION-IMPUTER METHOD-MEAN # * This is to chose the best feature selecton method and model selection method # # ## DEPENDENCIES # indicators.csv -- which has a list of all features from the PostgreSQL database # + import pandas as pd import os import csv import xlrd import numpy as np import matplotlib import seaborn as sns import matplotlib.pyplot as plt from pandas.tools.plotting import scatter_matrix from __future__ import print_function from sklearn.decomposition import PCA from sklearn.feature_selection import SelectFromModel from sklearn.linear_model import Ridge, Lasso, ElasticNet from sklearn.decomposition import PCA from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA from sklearn.preprocessing import Imputer from sklearn import preprocessing matplotlib.style.use('ggplot') from sklearn.cluster import KMeans from sklearn.cluster import MiniBatchKMeans ## http://stats.stackexchange.com/questions/89809/is-it-important-to-scale-data-before-clustering # above says that scaling makes for better from time import time import numpy as np import matplotlib.pyplot as plt from sklearn import metrics from sklearn.cluster import KMeans from sklearn.datasets import load_digits from sklearn.decomposition import PCA from sklearn.preprocessing import scale from sklearn.preprocessing import StandardScaler, MinMaxScaler # %matplotlib inline from sklearn.cluster import AgglomerativeClustering import warnings # - # ## Part 1: # * read indicators csv into pandas dataframe # * remove unnecessary columns # * convert Nans into blank space # * imputed missing data using imputer using means from scikitlearn # df = pd.read_csv('indicators.csv') indicators = df[['cbsa', 'msa_name', 'violent_crime_rate', 'murder_manslaughter', 'rape', 'robbery', 'aggravated_assault', 'property_crime_rate', 'burglary', 'larceny_theft', 'motor_vehicle_theft', 'total_crime_rate', 'median_gross_rent', 'median_monthly_mortgage', 'rent_burden', 'mortgage_burden', 'income_change_2012_to_2014', 'median_age_of_men', 'median_age_of_women', 'median_age', 'median_household_income', 'single_men_population', 'single_women_population', 'ratio_of_single_men_to_single_women', 'population_percent_of_single_men', 'population_percent_of_single_women', 'population', 'edu_average_scale_score', 'pct_laccess_pop10', 'pct_laccess_lowi10', 'pct_laccess_child10', 'pct_laccess_seniors10', 'pct_laccess_hhnv10', 'event_mpmt', 'fatalities_mpmt', 'injuries_mpmt', 'walk_score', 'transit_score', 'bike_score', 'unemploymentrate', 'employment', 'laborforce']] ## all Nans to white space indicators = indicators.replace(np.nan,' ', regex=True) ##convert to all floats indicators = indicators.convert_objects(convert_numeric=True) indicators.columns = ['cbsa', 'msa_name', 'violent_crime_rate', 'murder_manslaughter', 'rape', 'robbery', 'aggravated_assault', 'property_crime_rate', 'burglary', 'larceny_theft', 'motor_vehicle_theft', 'total_crime_rate', 'median_gross_rent', 'median_monthly_mortgage', 'rent_burden', 'mortgage_burden', 'income_change_2012_to_2014', 'median_age_of_men', 'median_age_of_women', 'median_age', 'median_household_income', 'single_men_population', 'single_women_population', 'ratio_of_single_men_to_single_women', 'population_percent_of_single_men', 'population_percent_of_single_women', 'population', 'edu_average_scale_score', 'pct_laccess_pop10', 'pct_laccess_lowi10', 'pct_laccess_child10', 'pct_laccess_seniors10', 'pct_laccess_hhnv10', 'event_mpmt', 'fatalities_mpmt', 'injuries_mpmt', 'walk_score', 'transit_score', 'bike_score', 'unemploymentrate', 'employment', 'laborforce'] # + featuresx = df[['violent_crime_rate', 'murder_manslaughter', 'rape', 'robbery', 'aggravated_assault', 'property_crime_rate', 'burglary', 'larceny_theft', 'motor_vehicle_theft', 'total_crime_rate', 'median_gross_rent', 'median_monthly_mortgage', 'rent_burden', 'mortgage_burden', 'income_change_2012_to_2014', 'median_age_of_men', 'median_age_of_women', 'median_age', 'median_household_income', 'single_men_population', 'single_women_population', 'ratio_of_single_men_to_single_women', 'population_percent_of_single_men', 'population_percent_of_single_women', 'population', 'edu_average_scale_score', 'pct_laccess_pop10', 'pct_laccess_lowi10', 'pct_laccess_child10', 'pct_laccess_seniors10', 'pct_laccess_hhnv10', 'event_mpmt', 'fatalities_mpmt', 'injuries_mpmt', 'walk_score', 'transit_score', 'bike_score', 'unemploymentrate', 'employment', 'laborforce']] labelsx = df["cbsa"] # - ## all Nans to white space featuresx = featuresx.replace(np.nan,' ', regex=True) featuresx = featuresx.replace(np.nan,'NaN', regex=True) ##convert to all floats featuresx = featuresx.convert_objects(convert_numeric=True) ##imputer imp = Imputer(missing_values='NaN', strategy='mean', axis=0) featimpx = imp.fit_transform(featuresx) featuresx = pd.DataFrame(featimpx) featuresx.columns = ['violent_crime_rate', 'murder_manslaughter', 'rape', 'robbery', 'aggravated_assault', 'property_crime_rate', 'burglary', 'larceny_theft', 'motor_vehicle_theft', 'total_crime_rate', 'median_gross_rent', 'median_monthly_mortgage', 'rent_burden', 'mortgage_burden', 'income_change_2012_to_2014', 'median_age_of_men', 'median_age_of_women', 'median_age', 'median_household_income', 'single_men_population', 'single_women_population', 'ratio_of_single_men_to_single_women', 'population_percent_of_single_men', 'population_percent_of_single_women', 'population', 'edu_average_scale_score', 'pct_laccess_pop10', 'pct_laccess_lowi10', 'pct_laccess_child10', 'pct_laccess_seniors10', 'pct_laccess_hhnv10', 'event_mpmt', 'fatalities_mpmt', 'injuries_mpmt', 'walk_score', 'transit_score', 'bike_score', 'unemploymentrate', 'employment', 'laborforce'] featuresx.head() len(featuresx.columns) # # Part 2: # * Unscaled data # * Loop through K-Means Clustering, K-Means MiniBatch, and then Aggomerative Clustering with linkage default ward. # * Features used were subset through Lasso, Ridge and ElasticNet # + print ('UNSCALED DATA') range_n_clusters = range(4,10) feat_models = [Lasso(), Ridge(), ElasticNet()] for feat_model in feat_models: print(20 * '*') print ('Modeling used for feature selection : %9s' % (feat_model)) print(20 * '*') model = feat_model sfm = SelectFromModel(model) sfm.fit(featuresx, labelsx) feat_names = (list(featuresx[sfm.get_support(indices=True)])) print (list(featuresx[sfm.get_support(indices=True)])) features = featuresx[feat_names] features = features.values for n_clusters in range_n_clusters: print ('number of clusters: %d' % (n_clusters)) print(69 * '_') print('% 9s' % 'name' ' time inertia homo compl v-meas ARI AMI silhouette') def bench_clustering(estimator, name, data): t0 = time() estimator.fit(data) try: print('% 9s %.2fs %i %.3f %.3f %.3f %.3f %.3f %.3f' % (name, (time() - t0), estimator.inertia_, metrics.homogeneity_score(labelsx, estimator.labels_), metrics.completeness_score(labelsx, estimator.labels_), metrics.v_measure_score(labelsx, estimator.labels_), metrics.adjusted_rand_score(labelsx, estimator.labels_), metrics.adjusted_mutual_info_score(labelsx, estimator.labels_), metrics.silhouette_score(data, estimator.labels_))) except AttributeError: print('% 9s %.2fs %9s %.3f %.3f %.3f %.3f %.3f %.3f' % (name, (time() - t0), "Na", metrics.homogeneity_score(labelsx, estimator.labels_), metrics.completeness_score(labelsx, estimator.labels_), metrics.v_measure_score(labelsx, estimator.labels_), metrics.adjusted_rand_score(labelsx, estimator.labels_), metrics.adjusted_mutual_info_score(labelsx, estimator.labels_), metrics.silhouette_score(data, estimator.labels_))) bench_clustering(KMeans(init='k-means++', n_clusters=n_clusters, n_init=12), name="k-means", data=features) with warnings.catch_warnings(): warnings.simplefilter("ignore") bench_clustering(MiniBatchKMeans(init='k-means++', n_clusters=n_clusters, n_init=12,max_no_improvement=10, verbose=0, random_state=0), name="minibatchk-means", data=features) bench_clustering(AgglomerativeClustering(n_clusters=n_clusters, linkage='ward'), name="Ward", data=features) print(69 * '_') # - # # Part 2: # * Features ran through Standard Scaler # * Loop through K-Means Clustering, K-Means MiniBatch, and then Aggomerative Clustering with linkage default ward. # * Features used were subset through Lasso, Ridge and ElasticNet # + print (20 * '*') print (20 * '*') print ('DATA RAN THROUGH STANDARD SCALER') print (20 * '*') print (20 * '*') ##################### ## STANDARD SCALE ## ################### standard_scaler = preprocessing.StandardScaler() stdscaledfeat= standard_scaler.fit_transform(featuresx) ## turns into scaled array stdscaledfeatures = pd.DataFrame(stdscaledfeat) ## make array pd again stdscaledfeatures.columns = ['violent_crime_rate', 'murder_manslaughter', 'rape', 'robbery', 'aggravated_assault', 'property_crime_rate', 'burglary', 'larceny_theft', 'motor_vehicle_theft', 'total_crime_rate', 'median_gross_rent', 'median_monthly_mortgage', 'rent_burden', 'mortgage_burden', 'income_change_2012_to_2014', 'median_age_of_men', 'median_age_of_women', 'median_age', 'median_household_income', 'single_men_population', 'single_women_population', 'ratio_of_single_men_to_single_women', 'population_percent_of_single_men', 'population_percent_of_single_women', 'population', 'edu_average_scale_score', 'pct_laccess_pop10', 'pct_laccess_lowi10', 'pct_laccess_child10', 'pct_laccess_seniors10', 'pct_laccess_hhnv10', 'event_mpmt', 'fatalities_mpmt', 'injuries_mpmt', 'walk_score', 'transit_score', 'bike_score', 'unemploymentrate', 'employment', 'laborforce'] ################################################### # standard scaled features variable is stdscaledfatures ##################################################### range_n_clusters = range(4,10) feat_models = [Lasso(), Ridge(), ElasticNet()] for feat_model in feat_models: print(20 * '*') print ('Modeling used for feature selection : %9s' % (feat_model)) print(20 * '*') model = feat_model sfm = SelectFromModel(model) sfm.fit(featuresx, labelsx) feat_names = (list(featuresx[sfm.get_support(indices=True)])) print (list(featuresx[sfm.get_support(indices=True)])) features = stdscaledfeatures[feat_names] features = features.values for n_clusters in range_n_clusters: print ('number of clusters: %d' % (n_clusters)) print(69 * '_') print('% 9s' % 'name' ' time inertia homo compl v-meas ARI AMI silhouette') def bench_clustering(estimator, name, data): t0 = time() estimator.fit(data) try: print('% 9s %.2fs %i %.3f %.3f %.3f %.3f %.3f %.3f' % (name, (time() - t0), estimator.inertia_, metrics.homogeneity_score(labelsx, estimator.labels_), metrics.completeness_score(labelsx, estimator.labels_), metrics.v_measure_score(labelsx, estimator.labels_), metrics.adjusted_rand_score(labelsx, estimator.labels_), metrics.adjusted_mutual_info_score(labelsx, estimator.labels_), metrics.silhouette_score(data, estimator.labels_))) except AttributeError: print('% 9s %.2fs %9s %.3f %.3f %.3f %.3f %.3f %.3f' % (name, (time() - t0), "Na", metrics.homogeneity_score(labelsx, estimator.labels_), metrics.completeness_score(labelsx, estimator.labels_), metrics.v_measure_score(labelsx, estimator.labels_), metrics.adjusted_rand_score(labelsx, estimator.labels_), metrics.adjusted_mutual_info_score(labelsx, estimator.labels_), metrics.silhouette_score(data, estimator.labels_))) bench_clustering(KMeans(init='k-means++', n_clusters=n_clusters, n_init=12), name="k-means", data=features) with warnings.catch_warnings(): warnings.simplefilter("ignore") bench_clustering(MiniBatchKMeans(init='k-means++', n_clusters=n_clusters, n_init=12,max_no_improvement=10, verbose=0, random_state=0), name="minibatchk-means", data=features) bench_clustering(AgglomerativeClustering(n_clusters=n_clusters, linkage='ward'), name="Ward", data=features) print(69 * '_') # - # # Part 3: # * Features ran through MinMax Scaler # * Loop through K-Means Clustering, K-Means MiniBatch, and then Aggomerative Clustering with linkage default ward. # * Features used were subset through Lasso, Ridge and ElasticNet # + print (20 * '*') print (20 * '*') print ('DATA RAN THROUGH MINMAX SCALE') print (20 * '*') print (20 * '*') ##################### ## STANDARD SCALE ## ################### minmaxscaler = preprocessing.MinMaxScaler() featz= minmaxscaler.fit_transform(featuresx) ## turns into scaled array mmscaledfeatures = pd.DataFrame(featz) ## make array pd again mmscaledfeatures.columns = ['violent_crime_rate', 'murder_manslaughter', 'rape', 'robbery', 'aggravated_assault', 'property_crime_rate', 'burglary', 'larceny_theft', 'motor_vehicle_theft', 'total_crime_rate', 'median_gross_rent', 'median_monthly_mortgage', 'rent_burden', 'mortgage_burden', 'income_change_2012_to_2014', 'median_age_of_men', 'median_age_of_women', 'median_age', 'median_household_income', 'single_men_population', 'single_women_population', 'ratio_of_single_men_to_single_women', 'population_percent_of_single_men', 'population_percent_of_single_women', 'population', 'edu_average_scale_score', 'pct_laccess_pop10', 'pct_laccess_lowi10', 'pct_laccess_child10', 'pct_laccess_seniors10', 'pct_laccess_hhnv10', 'event_mpmt', 'fatalities_mpmt', 'injuries_mpmt', 'walk_score', 'transit_score', 'bike_score', 'unemploymentrate', 'employment', 'laborforce'] ################################################### # standard scaled features variable is stdscaledfatures ##################################################### range_n_clusters = [4,5,6,7,8,9] feat_models = [Lasso(), Ridge(), ElasticNet()] for feat_model in feat_models: print(20 * '*') print ('Modeling used for feature selection : %9s' % (feat_model)) print(20 * '*') model = feat_model sfm = SelectFromModel(model) sfm.fit(featuresx, labelsx) feat_names = (list(featuresx[sfm.get_support(indices=True)])) print (list(featuresx[sfm.get_support(indices=True)])) features = mmscaledfeatures[feat_names] features = features.values for n_clusters in range_n_clusters: print ('number of clusters: %d' % (n_clusters)) print(69 * '_') print('% 9s' % 'name' ' time inertia homo compl v-meas ARI AMI silhouette') def bench_clustering(estimator, name, data): t0 = time() estimator.fit(data) try: print('% 9s %.2fs %i %.3f %.3f %.3f %.3f %.3f %.3f' % (name, (time() - t0), estimator.inertia_, metrics.homogeneity_score(labelsx, estimator.labels_), metrics.completeness_score(labelsx, estimator.labels_), metrics.v_measure_score(labelsx, estimator.labels_), metrics.adjusted_rand_score(labelsx, estimator.labels_), metrics.adjusted_mutual_info_score(labelsx, estimator.labels_), metrics.silhouette_score(data, estimator.labels_))) except AttributeError: print('% 9s %.2fs %9s %.3f %.3f %.3f %.3f %.3f %.3f' % (name, (time() - t0), "Na", metrics.homogeneity_score(labelsx, estimator.labels_), metrics.completeness_score(labelsx, estimator.labels_), metrics.v_measure_score(labelsx, estimator.labels_), metrics.adjusted_rand_score(labelsx, estimator.labels_), metrics.adjusted_mutual_info_score(labelsx, estimator.labels_), metrics.silhouette_score(data, estimator.labels_))) bench_clustering(KMeans(init='k-means++', n_clusters=n_clusters, n_init=12), name="k-means", data=features) with warnings.catch_warnings(): warnings.simplefilter("ignore") bench_clustering(MiniBatchKMeans(init='k-means++', n_clusters=n_clusters, n_init=12,max_no_improvement=10, verbose=0, random_state=0), name="minibatchk-means", data=features) bench_clustering(AgglomerativeClustering(n_clusters=n_clusters, linkage='ward'), name="Ward", data=features) print(69 * '_') # - featuresx.head() # # Part 4: # * Features ran through PCA (Principal Component Analysis) # * Used Unscaled Data, Data ran through Standard Scaler and Min Max Scaler # * Loop through K-Means Clustering, K-Means MiniBatch, and then Aggomerative Clustering with linkage default ward. # * Features used were subset through Lasso, Ridge and ElasticNet # + from sklearn.decomposition import PCA from sklearn.cluster import AgglomerativeClustering print (20 * '*') print (20 * '*') print ('DATA RAN THROUGH PCA') print (20 * '*') print (20 * '*') ############### ## PCA SCALE## ############### pca = PCA(n_components = 21) ## half of the features + 1 this is the standard pca_features = pca.fit(featuresx).transform(featuresx) #pca_features = pd.DataFrame(pca_feat) #pca.columns = ['violent_crime_rate', 'murder_manslaughter', 'rape', 'robbery', 'aggravated_assault', 'property_crime_rate', 'burglary', 'larceny_theft', 'motor_vehicle_theft', 'total_crime_rate', 'median_gross_rent', 'median_monthly_mortgage', 'rent_burden', 'mortgage_burden', 'income_change_2012_to_2014', 'median_age_of_men', 'median_age_of_women', 'median_age', 'median_household_income', 'single_men_population', 'single_women_population', 'ratio_of_single_men_to_single_women', 'population_percent_of_single_men', 'population_percent_of_single_women', 'population', 'edu_average_scale_score', 'pct_laccess_pop10', 'pct_laccess_lowi10', 'pct_laccess_child10', 'pct_laccess_seniors10', 'pct_laccess_hhnv10', 'event_mpmt', 'fatalities_mpmt', 'injuries_mpmt', 'walk_score', 'transit_score', 'bike_score', 'unemploymentrate', 'employment', 'laborforce'] ################ # pca_features ################ print (20 * '*') print ('pca unscaled data') print (20 * '*') range_n_clusters = [4,5,6,7,8,9] features = pca_features for n_clusters in range_n_clusters: print ('number of clusters: %d' % (n_clusters)) print(69 * '_') print('% 9s' % 'name' ' time inertia homo compl v-meas ARI AMI silhouette') def bench_clustering(estimator, name, data): t0 = time() estimator.fit(data) try: print('% 9s %.2fs %i %.3f %.3f %.3f %.3f %.3f %.3f' % (name, (time() - t0), estimator.inertia_, metrics.homogeneity_score(labelsx, estimator.labels_), metrics.completeness_score(labelsx, estimator.labels_), metrics.v_measure_score(labelsx, estimator.labels_), metrics.adjusted_rand_score(labelsx, estimator.labels_), metrics.adjusted_mutual_info_score(labelsx, estimator.labels_), metrics.silhouette_score(data, estimator.labels_))) except AttributeError: print('% 9s %.2fs %9s %.3f %.3f %.3f %.3f %.3f %.3f' % (name, (time() - t0), "Na", metrics.homogeneity_score(labelsx, estimator.labels_), metrics.completeness_score(labelsx, estimator.labels_), metrics.v_measure_score(labelsx, estimator.labels_), metrics.adjusted_rand_score(labelsx, estimator.labels_), metrics.adjusted_mutual_info_score(labelsx, estimator.labels_), metrics.silhouette_score(data, estimator.labels_))) bench_clustering(KMeans(init='k-means++', n_clusters=n_clusters, n_init=12), name="k-means", data=features) with warnings.catch_warnings(): warnings.simplefilter("ignore") bench_clustering(MiniBatchKMeans(init='k-means++', n_clusters=n_clusters, n_init=12,max_no_improvement=10, verbose=0, random_state=0), name="minibatchk-means", data=features) bench_clustering(AgglomerativeClustering(n_clusters=n_clusters, linkage='ward'), name="Ward", data=features) print(69 * '_') feat_scales = [StandardScaler(), MinMaxScaler()] for feat_scale in feat_scales: print (20 * '*') print ('Scaled data for PCA : yes : %s' % (feat_scale)) print (20 * '*') ## still use featurex is because we are scaling data for feature modeling only stdfeat = feat_scale.fit_transform(featuresx) ## turns into scaled array stdfeatures = pd.DataFrame(stdfeat) ## make array pd again pca = PCA(n_components = 2) pca_stdfeatures = pca.fit(stdfeatures).transform(stdfeatures) #pca_features = pd.DataFrame(pca_feat) #pca.columns = ['violent_crime_rate', 'murder_manslaughter', 'rape', 'robbery', 'aggravated_assault', 'property_crime_rate', 'burglary', 'larceny_theft', 'motor_vehicle_theft', 'total_crime_rate', 'median_gross_rent', 'median_monthly_mortgage', 'rent_burden', 'mortgage_burden', 'income_change_2012_to_2014', 'median_age_of_men', 'median_age_of_women', 'median_age', 'median_household_income', 'single_men_population', 'single_women_population', 'ratio_of_single_men_to_single_women', 'population_percent_of_single_men', 'population_percent_of_single_women', 'population', 'edu_average_scale_score', 'pct_laccess_pop10', 'pct_laccess_lowi10', 'pct_laccess_child10', 'pct_laccess_seniors10', 'pct_laccess_hhnv10', 'event_mpmt', 'fatalities_mpmt', 'injuries_mpmt', 'walk_score', 'transit_score', 'bike_score', 'unemploymentrate', 'employment', 'laborforce'] features = pca_stdfeatures for n_clusters in range_n_clusters: print ('number of clusters: %d' % (n_clusters)) print(69 * '_') print('% 9s' % 'name' ' time inertia homo compl v-meas ARI AMI silhouette') def bench_clustering(estimator, name, data): t0 = time() estimator.fit(data) try: print('% 9s %.2fs %i %.3f %.3f %.3f %.3f %.3f %.3f' % (name, (time() - t0), estimator.inertia_, metrics.homogeneity_score(labelsx, estimator.labels_), metrics.completeness_score(labelsx, estimator.labels_), metrics.v_measure_score(labelsx, estimator.labels_), metrics.adjusted_rand_score(labelsx, estimator.labels_), metrics.adjusted_mutual_info_score(labelsx, estimator.labels_), metrics.silhouette_score(data, estimator.labels_))) except AttributeError: print('% 9s %.2fs %9s %.3f %.3f %.3f %.3f %.3f %.3f' % (name, (time() - t0), "Na", metrics.homogeneity_score(labelsx, estimator.labels_), metrics.completeness_score(labelsx, estimator.labels_), metrics.v_measure_score(labelsx, estimator.labels_), metrics.adjusted_rand_score(labelsx, estimator.labels_), metrics.adjusted_mutual_info_score(labelsx, estimator.labels_), metrics.silhouette_score(data, estimator.labels_))) bench_clustering(KMeans(init='k-means++', n_clusters=n_clusters, n_init=12), name="k-means", data=features) with warnings.catch_warnings(): warnings.simplefilter("ignore") bench_clustering(MiniBatchKMeans(init='k-means++', n_clusters=n_clusters, n_init=12,max_no_improvement=10, verbose=0, random_state=0), name="minibatchk-means", data=features) bench_clustering(AgglomerativeClustering(n_clusters=n_clusters, linkage='ward'), name="Ward", data=features) print(69 * '_') # - #
ml-application/ml-unsupervised-part2b-feat-model-sel-imputermean.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.5.4 # language: julia # name: julia-1.5 # --- # ## Construct grids using LinearAlgebra using SparseArrays using Plots pyplot() # + nx = 100 ny = 100 Lx = 1 Ly = 1 dx = Lx/nx dy = Ly/ny cuti = (nx-1)*ny # - px = range(0+0.5Lx/nx,Lx-0.5Lx/nx,length=nx)' # note ': this is a row vector py = range(0+0.5Ly/ny,Ly-0.5Ly/ny,length=ny) pvalue = @. sin(2*pi*px) * cos(2*pi*py); # + ux = range(0+Lx/nx,Lx-Lx/nx,length=nx-1)' uy = range(0+0.5Ly/ny,Ly-0.5Ly/ny,length=ny) uvalue = @. sin(2pi*ux) * sin(2pi*uy); # uvalue = @. sin(2pi*ux) + 0*sin(2pi*uy); # - vx = range(0+0.5Lx/nx,Lx-0.5Lx/nx,length=nx)' # note ': this is a row vector vy = range(0+Ly/ny,Ly-Ly/ny,length=ny-1) vvalue= @. sin(2*pi*vx) * sin(2*pi*vy); # vvalue= @. 0*vx + sin(2*pi*vy) p = reshape([i for i = 1:length(pvalue[:])],ny,nx) u = reshape([i for i = 1:length(uvalue[:])],ny,nx-1) v = reshape([i for i = length(uvalue[:])+1:length(uvalue[:])+length(vvalue[:])],ny-1,nx); # + uvec = vcat(uvalue...) vvec = vcat(vvalue...) velovec = [uvec;vvec] pvec = vcat(pvalue...) vec = [velovec;pvec]; # - i=1 j=5 u_right = 0.5*(velovec[u[j,i]] + velovec[u[j,i+1]]) u_left = 0.5*(velovec[u[j,end]] + velovec[u[j,i]]) u_up = 0.5*(velovec[u[j,i]] + velovec[u[j+1,i]]) u_bottom = 0.5*(velovec[u[j-1,i]] + velovec[u[j,i]]) v_up = 0.5*(velovec[v[j,i]] + velovec[v[j,i+1]]) v_bottom = 0.5*(velovec[v[j-1,i]] + velovec[v[j-1,i+1]]) qq = (u_right*u_right - u_left*u_left)/dx + (u_up*v_up - u_bottom*v_bottom)/dy function adv(velovec,ux,uy,vx,vy,u,v,nx,ny,dx,dy) q = deepcopy(0*velovec) for i = 1:nx-1 for j = 1:ny if i != 1 && j != 1 && i != nx-1 && j != ny u_right = 0.5*(velovec[u[j,i]] + velovec[u[j,i+1]]) u_left = 0.5*(velovec[u[j,i-1]] + velovec[u[j,i]]) u_up = 0.5*(velovec[u[j,i]] + velovec[u[j+1,i]]) u_bottom = 0.5*(velovec[u[j-1,i]] + velovec[u[j,i]]) v_up = 0.5*(velovec[v[j,i]] + velovec[v[j,i+1]]) v_bottom = 0.5*(velovec[v[j-1,i]] + velovec[v[j-1,i+1]]) q[u[j,i]] = (u_right*u_right - u_left*u_left)/dx + (u_up*v_up - u_bottom*v_bottom)/dy end if i == 1 && j != 1 && j != ny u_right = 0.5*(velovec[u[j,i]] + velovec[u[j,i+1]]) u_left = 0.5*(0 + velovec[u[j,i]]) u_up = 0.5*(velovec[u[j,i]] + velovec[u[j+1,i]]) u_bottom = 0.5*(velovec[u[j-1,i]] + velovec[u[j,i]]) v_up = 0.5*(velovec[v[j,i]] + velovec[v[j,i+1]]) v_bottom = 0.5*(velovec[v[j-1,i]] + velovec[v[j-1,i+1]]) q[u[j,i]] = (u_right*u_right - u_left*u_left)/dx + (u_up*v_up - u_bottom*v_bottom)/dy end if i == nx-1 && j != 1 && j != ny u_right = 0.5*(velovec[u[j,i]] + 0) u_left = 0.5*(velovec[u[j,i-1]] + velovec[u[j,i]]) u_up = 0.5*(velovec[u[j,i]] + velovec[u[j+1,i]]) u_bottom = 0.5*(velovec[u[j-1,i]] + velovec[u[j,i]]) v_up = 0.5*(velovec[v[j,i]] + velovec[v[j,i+1]]) v_bottom = 0.5*(velovec[v[j-1,i]] + velovec[v[j-1,i+1]]) q[u[j,i]] = (u_right*u_right - u_left*u_left)/dx + (u_up*v_up - u_bottom*v_bottom)/dy end if j == 1 && i != 1 && i != nx-1 u_right = 0.5*(velovec[u[j,i]] + velovec[u[j,i+1]]) u_left = 0.5*(velovec[u[j,i-1]] + velovec[u[j,i]]) u_up = 0.5*(velovec[u[j,i]] + velovec[u[j+1,i]]) u_bottom = 0.5*(velovec[u[end,i]] + velovec[u[j,i]]) v_up = 0.5*(velovec[v[j,i]] + velovec[v[j,i+1]]) v_bottom = 0 q[u[j,i]] = (u_right*u_right - u_left*u_left)/dx + (u_up*v_up - u_bottom*v_bottom)/dy end if j == ny && i != 1 && i != nx-1 u_right = 0.5*(velovec[u[j,i]] + velovec[u[j,i+1]]) u_left = 0.5*(velovec[u[j,i-1]] + velovec[u[j,i]]) u_up = 0.5*(velovec[u[j,i]] + velovec[u[1,i]]) u_bottom = 0.5*(velovec[u[j-1,i]] + velovec[u[j,i]]) v_up = 0 v_bottom = 0.5*(velovec[v[j-1,i]] + velovec[v[j-1,i+1]]) q[u[j,i]] = (u_right*u_right - u_left*u_left)/dx + (u_up*v_up - u_bottom*v_bottom)/dy end # four corners if j == 1 && i == 1 u_right = 0.5*(velovec[u[j,i]] + velovec[u[j,i+1]]) u_left = 0.5*(0 + velovec[u[j,i]]) u_up = 0.5*(velovec[u[j,i]] + velovec[u[1,i]]) u_bottom = 0.5*(velovec[u[end,i]] + velovec[u[j,i]]) v_up = 0.5*(velovec[v[j,i]] + velovec[v[j,i+1]]) v_bottom = 0 q[u[j,i]] = (u_right*u_right - u_left*u_left)/dx + (u_up*v_up - u_bottom*v_bottom)/dy end if j == 1 && i == nx-1 u_right = 0.5*(velovec[u[j,i]] + 0) u_left = 0.5*(velovec[u[j,i-1]] + velovec[u[j,i]]) u_up = 0.5*(velovec[u[j,i]] + velovec[u[1,i]]) u_bottom = 0.5*(velovec[u[end,i]] + velovec[u[j,i]]) v_up = 0.5*(velovec[v[j,i]] + velovec[v[j,i+1]]) v_bottom = 0 q[u[j,i]] = (u_right*u_right - u_left*u_left)/dx + (u_up*v_up - u_bottom*v_bottom)/dy end if j == ny && i == 1 u_right = 0.5*(velovec[u[j,i]] + velovec[u[j,i+1]]) u_left = 0.5*(0 + velovec[u[j,i]]) u_up = 0.5*(velovec[u[j,i]] + velovec[u[1,i]]) u_bottom = 0.5*(velovec[u[j-1,i]] + velovec[u[j,i]]) v_up = 0 v_bottom = 0.5*(velovec[v[j-1,i]] + velovec[v[j-1,i+1]]) q[u[j,i]] = (u_right*u_right - u_left*u_left)/dx + (u_up*v_up - u_bottom*v_bottom)/dy end if j == ny && i == nx-1 u_right = 0.5*(velovec[u[j,i]] + 0) u_left = 0.5*(velovec[u[j,i-1]] + velovec[u[j,i]]) u_up = 0.5*(velovec[u[j,i]] + velovec[u[1,i]]) u_bottom = 0.5*(velovec[u[j-1,i]] + velovec[u[j,i]]) v_up = 0 v_bottom = 0.5*(velovec[v[j-1,i]] + velovec[v[j-1,i+1]]) q[u[j,i]] = (u_right*u_right - u_left*u_left)/dx + (u_up*v_up - u_bottom*v_bottom)/dy end end end for i = 1:nx for j = 1:ny-1 if i != 1 && j != 1 && i != nx && j != ny-1 v_up = 0.5*(velovec[v[j,i]] + velovec[v[j+1,i]]) v_bottom = 0.5*(velovec[v[j,i]] + velovec[v[j-1,i]]) v_right = 0.5*(velovec[v[j,i]] + velovec[v[j,i+1]]) v_left = 0.5*(velovec[v[j,i]] + velovec[v[j,i-1]]) u_right = 0.5*(velovec[u[j,i]] + velovec[u[j+1,i]]) u_left = 0.5*(velovec[u[j,i-1]] + velovec[u[j+1,i-1]]) q[v[j,i]] = (u_right*v_right - u_left*v_left)/dx + (v_up*v_up - v_bottom*v_bottom)/dy end if i == 1 && j != 1 && j != ny-1 v_up = 0.5*(velovec[v[j,i]] + velovec[v[j+1,i]]) v_bottom = 0.5*(velovec[v[j,i]] + velovec[v[j-1,i]]) v_right = 0.5*(velovec[v[j,i]] + velovec[v[j,i+1]]) v_left = 0.5*(velovec[v[j,i]] + velovec[v[j,end]]) u_right = 0.5*(velovec[u[j,i]] + velovec[u[j+1,i]]) u_left = 0 q[v[j,i]] = (u_right*v_right - u_left*v_left)/dx + (v_up*v_up - v_bottom*v_bottom)/dy end if i == nx && j != 1 && j != ny-1 v_up = 0.5*(velovec[v[j,i]] + velovec[v[j+1,i]]) v_bottom = 0.5*(velovec[v[j,i]] + velovec[v[j-1,i]]) v_right = 0.5*(velovec[v[j,i]] + velovec[v[j,1]]) v_left = 0.5*(velovec[v[j,i]] + velovec[v[j,i-1]]) u_right = 0 u_left = 0.5*(velovec[u[j,i-1]] + velovec[u[j+1,i-1]]) q[v[j,i]] = (u_right*v_right - u_left*v_left)/dx + (v_up*v_up - v_bottom*v_bottom)/dy end if j == 1 && i != 1 && i != nx v_up = 0.5*(velovec[v[j,i]] + velovec[v[j+1,i]]) v_bottom = 0.5*(velovec[v[j,i]] + 0) v_right = 0.5*(velovec[v[j,i]] + velovec[v[j,i+1]]) v_left = 0.5*(velovec[v[j,i]] + velovec[v[j,i-1]]) u_right = 0.5*(velovec[u[j,i]] + velovec[u[j+1,i]]) u_left = 0.5*(velovec[u[j,i-1]] + velovec[u[j+1,i-1]]) q[v[j,i]] = (u_right*v_right - u_left*v_left)/dx + (v_up*v_up - v_bottom*v_bottom)/dy end if j == ny-1 && i != 1 && i != nx v_up = 0.5*(velovec[v[j,i]] + 0) v_bottom = 0.5*(velovec[v[j,i]] + velovec[v[j-1,i]]) v_right = 0.5*(velovec[v[j,i]] + velovec[v[j,i+1]]) v_left = 0.5*(velovec[v[j,i]] + velovec[v[j,i-1]]) u_right = 0.5*(velovec[u[j,i]] + velovec[u[j+1,i]]) u_left = 0.5*(velovec[u[j,i-1]] + velovec[u[j+1,i-1]]) q[v[j,i]] = (u_right*v_right - u_left*v_left)/dx + (v_up*v_up - v_bottom*v_bottom)/dy end # four corners if i == 1 && j == 1 v_up = 0.5*(velovec[v[j,i]] + velovec[v[j+1,i]]) v_bottom = 0.5*(velovec[v[j,i]] + 0) v_right = 0.5*(velovec[v[j,i]] + velovec[v[j,i+1]]) v_left = 0.5*(velovec[v[j,i]] + velovec[v[j,end]]) u_right = 0.5*(velovec[u[j,i]] + velovec[u[j+1,i]]) u_left = 0 q[v[j,i]] = (u_right*v_right - u_left*v_left)/dx + (v_up*v_up - v_bottom*v_bottom)/dy end if i == 1 && j == ny-1 v_up = 0.5*(velovec[v[j,i]] + 0) v_bottom = 0.5*(velovec[v[j,i]] + velovec[v[j-1,i]]) v_right = 0.5*(velovec[v[j,i]] + velovec[v[j,i+1]]) v_left = 0.5*(velovec[v[j,i]] + velovec[v[j,end]]) u_right = 0.5*(velovec[u[j,i]] + velovec[u[j+1,i]]) u_left = 0 q[v[j,i]] = (u_right*v_right - u_left*v_left)/dx + (v_up*v_up - v_bottom*v_bottom)/dy end if i == nx && j == 1 v_up = 0.5*(velovec[v[j,i]] + velovec[v[j+1,i]]) v_bottom = 0.5*(velovec[v[j,i]] + 0) v_right = 0.5*(velovec[v[j,i]] + velovec[v[j,1]]) v_left = 0.5*(velovec[v[j,i]] + velovec[v[j,i-1]]) u_right = 0 u_left = 0.5*(velovec[u[j,i-1]] + velovec[u[j+1,i-1]]) q[v[j,i]] = (u_right*v_right - u_left*v_left)/dx + (v_up*v_up - v_bottom*v_bottom)/dy end if i == nx && j == ny-1 v_up = 0.5*(velovec[v[j,i]] + 0) v_bottom = 0.5*(velovec[v[j,i]] + velovec[v[j-1,i]]) v_right = 0.5*(velovec[v[j,i]] + velovec[v[j,1]]) v_left = 0.5*(velovec[v[j,i]] + velovec[v[j,i-1]]) u_right = 0 u_left = 0.5*(velovec[u[j,i-1]] + velovec[u[j+1,i-1]]) q[v[j,i]] = (u_right*v_right - u_left*v_left)/dx + (v_up*v_up - v_bottom*v_bottom)/dy end end end return q end # + qq = adv(velovec,ux,uy,vx,vy,u,v,nx,ny,dx,dy) advu = qq[1:cuti] advv = qq[cuti+1:end] advu = reshape(advu,ny,nx-1) advv = reshape(advv,ny-1,nx) advu # - advuref = @. sin(2pi*uy)*sin(2pi*uy) * (2*pi)*sin(4pi*ux) + sin(2pi*ux)*sin(2pi*ux) * (2*pi)*sin(4pi*uy); advvref = @. sin(2pi*vy)*sin(2pi*vy) * (2*pi)*sin(4pi*vx) + sin(2pi*vx)*sin(2pi*vx) * (2*pi)*sin(4pi*vy); contour(ux,uy,advu, label="numerical",c = :bluesreds,line_z=true) contour!(ux,uy,advuref, line = :dot, c = :bluesreds, title="x-component advection verification", linewidth = 2, aspect_ratio=:equal, legend=:outerbottomleft, xlim=(0,1), ylim=(0,1)) savefig("advu.pdf") contour(advv, label="numerical",c = :bluesreds,line_z=true) contour!(advvref, line = :dot, c = :bluesreds, linewidth = 2, label="aa", legend=:outerbottomleft) u1 = norm(advu-advuref,Inf) norm(advv-advvref,Inf) plot([1/20,1/40,1/80,1/100],[u1,u2,u3,u4],marker = 8, label = "x-advection infinite norm error") plot!([1/20, 1/100],[1e-2,1e-2/25],scale = :log10, xlabel="Δx",ylabel="infinite norm error", line=:dash, title="x-advection error evaluation",label = "2nd-order error reference" ) savefig("adverr.pdf")
checkpoint2/Checkpoint2-1-Advection.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Residual analysis to determine the optimal cutoff frequency # # <NAME> # A common problem is signal processing is to automatically determine the optimal cutoff frequency that should be employed in a low-pass filter to attenuate as much as possible the noise without compromising the signal content of the data. # # Before we continue, see [this notebook](http://nbviewer.ipython.org/github/demotu/BMC/blob/master/notebooks/DataFiltering.ipynb) for an overview about data filtering if needed. # # Unfortunately, there is no definite solution for this problem, but there are some techniques, with different degrees of success, to try to determine the optimal cutoff frequency. # # <NAME>, in his classic book *Biomechanics and motor control of human movement*, proposed a method to find the optimal cutoff frequency based on residual analysis of the difference between filtered and unfiltered signals over a range of cutoff frequencies. The optimal cutoff frequency is the one where the residual starts to change very little because it is considered that from this point, it's being filtered mostly noise and minimally signal, ideally. This concept is straightforward to implement. # # The function `residual_analysis.py` (its code is shown at the end of this text) is an implmmentation of this method and it is divided in three parts (after the help section): first, the residuals over a range of cutoff frequencies are calculated; second, an algorithm tries to find the noisy region (supposedly linear) of the residuals versus cutoff frequency plot and finds the optimal cutoff frequency; and third, the results are plotted. The code is lengthy relatively to the simplicity of the idea because of the long help section, the implementation of the automatic search and a rich plot. # # Let's test this code with benchmark data. # # In 1977, Pezzack, <NAME> Winter published a paper where they investigated the effects of differentiation and filtering processes on experimental data (the angle of a bar manipulated in space). Since then, these data have became a benchmark to test new algorithms. Let's work with these data (available at [http://isbweb.org/data/pezzack/index.html](http://isbweb.org/data/pezzack/index.html)). The data have the angular displacement measured by video and the angular acceleration directly measured by an accelerometer, which we will consider as the true acceleration. # Part of these data are showing next: # Import the necessary libraries import numpy as np import matplotlib.pyplot as plt # %matplotlib inline import sys sys.path.insert(1, r'./../functions') # add dir to pythonpath # load data file time, disp, disp2, aacc = np.loadtxt('./../data/Pezzack.txt', skiprows=6, unpack=True) dt = np.mean(np.diff(time)) # plot data fig, (ax1,ax2) = plt.subplots(1, 2, sharex = True, figsize=(11, 4)) plt.suptitle("Pezzack's benchmark data", fontsize=20) ax1.plot(time, disp, 'b') ax1.set_xlabel('Time [s]'); ax1.set_ylabel('Angular displacement [rad]') ax2.plot(time, aacc, 'g') ax2.set_xlabel('Time [s]'); ax2.set_ylabel('Angular acceleration [rad/s$^2$]') plt.subplots_adjust(wspace=0.3) # And using the residual analsysis code: # + from residual_analysis import residual_analysis freq = np.mean(1/np.diff(time)) fc_opt = residual_analysis(disp, freq=freq, show=True) # - # The optimal cutoff frequency found is 5.6 Hz. Note that the filtering process is relevant only for the derivative of the data; we cannot distinguish the unfiltered and unfiltered displacements (see that the RMSE residual is very small). # Let's employ this filter, differentiate the data twice and compare with the true acceleration as we did before: from scipy.signal import butter, filtfilt # Butterworth filter # Correct the cutoff frequency for the number of passes in the filter C = 0.802 # for dual pass; C = (2**(1/npasses) - 1)**0.25 b, a = butter(2, (fc_opt/C)/(freq/2)) dispf = filtfilt(b, a, disp) aaccBW = np.diff(dispf, 2)*freq*freq # RMSE: rmseBW = np.sqrt(np.mean((aaccBW-aacc[1:-1])**2)) # plot data fig, ax1 = plt.subplots(1, 1, figsize=(11, 4)) plt.suptitle("Pezzack's benchmark data", fontsize=20) ax1.plot(time[1:-1], aacc[1:-1], 'g', label='Analog acceleration: (True value)') ax1.plot(time[1:-1], aaccBW, 'r', label='Butterworth %.3g Hz: RMSE = %0.2f' %(fc_opt,rmseBW)) ax1.set_xlabel('Time [s]'); ax1.set_ylabel('Angular acceleration [rad/s$^2$]'); plt.legend(frameon=False, fontsize=12, loc='upper left'); # The peeformance seems satisfactory (see [this notebook](http://nbviewer.ipython.org/github/demotu/BMC/blob/master/notebooks/DataFiltering.ipynb) for a comparison using other filters), but it is known that this residual analysis algorithm results in oversmoothing the kinematic data (see [http://www.clinicalgaitanalysis.com/faq/cutoff.html](http://www.clinicalgaitanalysis.com/faq/cutoff.html)). # To read more about the determination of the optimal cutoff frequency, see the following papers: # # - [Pezzack, Norman, & Winter (1977). An assessment of derivative determining techniques used for motion analysis. Journal of Biomechanics, 10, 377-382](http://www.health.uottawa.ca/biomech/courses/apa7305/JB-Pezzack-Norman-Winter-1977.pdf). # - [Giakas & Baltizopoulos (1997) A comparison of automatic filtering techniques applied to biomechanical walking data. J. Biomech. 30, 847-850](http://www.pe.uth.gr/sk_cms/scriptlib/getblob.php?redir=../sk_cms/images/notfound.htm&table=pepublications&field=doc&id=30). # - [Alonso, Salgado, Cuadrado & Pintado (2009) Automatic smoothing of raw kinematic signals using SSA and cluster analysis. 7th EUROMECH Solid Mechanics Conference](http://lim.ii.udc.es/docs/proceedings/2009_09_EUROMECH_Automatic.pdf). # - [Kristianslund, Krosshaug & Bogert (2012) Effect of low pass filtering on joint moments from inverse dynamics: Implications for injury prevention. J. Biomech. 45, 666-671](http://www.klokavskade.no/upload/Publication/Kristianslund_2012_J%20Biomechan_Effect%20of%20low-pass%20filtering%20on%20joint%20moments%20from%20inverse%20dynamics.pdf). # ## References # # - <NAME>, <NAME>, & <NAME> (1977). [An assessment of derivative determining techniques used for motion analysis](http://www.health.uottawa.ca/biomech/courses/apa7305/JB-Pezzack-Norman-Winter-1977.pdf). Journal of Biomechanics, 10, 377-382. [PubMed](http://www.ncbi.nlm.nih.gov/pubmed/893476). # - Winter DA (2009) [Biomechanics and motor control of human movement](http://books.google.com.br/books?id=_bFHL08IWfwC&printsec=frontcover&source=gbs_ge_summary_r&cad=0#v=onepage&q&f=false). 4 ed. Hoboken, EUA: Wiley. # ## Function `residual_analysis.py` # %loadpy ./../functions/residual_analysis # + # #!/usr/bin/env python """Automatic search of filter cutoff frequency based on residual analysis.""" from __future__ import division, print_function import numpy as np from scipy.signal import butter, filtfilt __author__ = '<NAME>, https://github.com/demotu/BMC' __version__ = 'residual_analysis.py v.3 2014/06/13' def residual_analysis(y, freq=1, fclim=[], show=False, ax=None): """ Automatic search of filter cutoff frequency based on residual analysis. This method was proposed by Winter in his book [1]_. The 'optimal' cutoff frequency (in the sense that a filter with such cutoff frequency removes as much noise as possible without considerably affecting the signal) is found by performing a residual analysis of the difference between filtered and unfiltered signals over a range of cutoff frequencies. The optimal cutoff frequency is the one where the residual starts to change very little because it is considered that from this point, it's being filtered mostly noise and minimally signal, ideally. Parameters ---------- y : 1D array_like Data freq : float, optional (default = 1) sampling frequency of the signal y fclim : list with 2 numbers, optional (default = []) limit frequencies of the noisy part or the residuals curve show : bool, optional (default = False) True (1) plots data in a matplotlib figure False (0) to not plot ax : a matplotlib.axes.Axes instance, optional (default = None). Returns ------- fc_opt : float optimal cutoff frequency (None if not found) Notes ----- A second-order zero-phase digital Butterworth low-pass filter is used. # The cutoff frequency is correctyed for the number of passes: # C = 0.802 # for dual pass; C = (2**(1/npasses) - 1)**0.25 The matplotlib figure with the results will show a plot of the residual analysis with the optimal cutoff frequency, a plot with the unfiltered and filtered signals at this optimal cutoff frequency (with the RMSE of the difference between these two signals), and a plot with the respective second derivatives of these signals which should be useful to evaluate the quality of the optimal cutoff frequency found. Winter should not be blamed for the automatic search algorithm used here. The algorithm implemented is just to follow as close as possible Winter's suggestion of fitting a regression line to the noisy part of the residuals. This function performs well with data where the signal has frequencies considerably bellow the Niquist frequency and the noise is predominantly white in the higher frequency region. If the automatic search fails, the lower and upper frequencies of the noisy part of the residuals curve cam be inputed as a parameter (fclim). These frequencies can be chosen by viewing the plot of the residuals (enter show=True as input parameter when calling this function). It is known that this residual analysis algorithm results in oversmoothing kinematic data [2]_. Use it with moderation. This code is described elsewhere [3]_. References ---------- .. [1] <NAME> (2009) Biomechanics and motor control of human movement. .. [2] http://www.clinicalgaitanalysis.com/faq/cutoff.html .. [3] http://nbviewer.ipython.org/github/demotu/BMC/blob/master/notebooks/ResidualAnalysis.ipynb Examples -------- >>> import numpy as np >>> from residual_analysis import residual_analysis >>> y = np.cumsum(np.random.randn(1000)) >>> # optimal cutoff frequency based on residual analysis and plot: >>> fc_opt = residual_analysis(y, freq=1000, show=True) >>> # sane analysis but specifying the frequency limits and plot: >>> residual_analysis(y, freq=1000, fclim=[200,400], show=True) >>> # Not always it's possible to find an optimal cutoff frequency >>> # or the one found can be wrong (run this example many times): >>> y = np.random.randn(100) >>> residual_analysis(y, freq=100, show=True) """ from scipy.interpolate import UnivariateSpline # Correct the cutoff frequency for the number of passes in the filter C = 0.802 # for dual pass; C = (2**(1/npasses) - 1)**0.25 # signal filtering freqs = np.linspace((freq/2) / 101, (freq/2)*C, 101) res = [] for fc in freqs: b, a = butter(2, (fc/C) / (freq / 2)) yf = filtfilt(b, a, y) # residual between filtered and unfiltered signals res = np.hstack((res, np.sqrt(np.mean((yf - y) ** 2)))) # find the optimal cutoff frequency by fitting an exponential curve # y = A*exp(B*x)+C to the residual data and consider that the tail part # of the exponential (which should be the noisy part of the residuals) # decay starts after 3 lifetimes (exp(-3), 95% drop) if not len(fclim) or np.any(fclim < 0) or np.any(fclim > freq / 2): fc1 = 0 fc2 = 0.95*(len(freqs)-1) # log of exponential turns the problem to first order polynomial fit # make the data always greater than zero before taking the logarithm reslog = np.log(np.abs(res[fc1:fc2 + 1] - res[fc2]) + 10 * np.finfo(np.float).eps) Blog, Alog = np.polyfit(freqs[fc1:fc2 + 1], reslog, 1) fcini = np.nonzero(freqs >= -3 / Blog) # 3 lifetimes fclim = [fcini[0][0], fc2] if np.size(fcini) else [] else: fclim = [np.nonzero(freqs >= fclim[0])[0][0], np.nonzero(freqs >= fclim[1])[0][0]] # find fc_opt with linear fit y=A+Bx of the noisy part of the residuals if len(fclim) and fclim[0] < fclim[1]: B, A = np.polyfit(freqs[fclim[0]:fclim[1]], res[fclim[0]:fclim[1]], 1) # optimal cutoff frequency is the frequency where y[fc_opt] = A roots = UnivariateSpline(freqs, res - A, s=0).roots() fc_opt = roots[0] if len(roots) else None else: fc_opt = None if show: _plot(y, freq, freqs, res, fclim, fc_opt, B, A, ax) return fc_opt def _plot(y, freq, freqs, res, fclim, fc_opt, B, A, ax): """Plot results of the residual_analysis function, see its help.""" try: import matplotlib.pyplot as plt except ImportError: print('matplotlib is not available.') else: if ax is None: plt.figure(num=None, figsize=(10, 5)) ax = np.array([plt.subplot(121), plt.subplot(222), plt.subplot(224)]) plt.rc('axes', labelsize=12, titlesize=12) plt.rc('xtick', labelsize=12) plt.rc('ytick', labelsize=12) ax[0].plot(freqs, res, 'b.', markersize=9) time = np.linspace(0, len(y) / freq, len(y)) ax[1].plot(time, y, 'g', linewidth=1, label='Unfiltered') ydd = np.diff(y, n=2) * freq ** 2 ax[2].plot(time[:-2], ydd, 'g', linewidth=1, label='Unfiltered') if fc_opt: ylin = np.poly1d([B, A])(freqs) ax[0].plot(freqs, ylin, 'r--', linewidth=2) ax[0].plot(freqs[fclim[0]], res[fclim[0]], 'r>', freqs[fclim[1]], res[fclim[1]], 'r<', ms=9) ax[0].set_ylim(ymin=0, ymax=4 * A) ax[0].plot([0, freqs[-1]], [A, A], 'r-', linewidth=2) ax[0].plot([fc_opt, fc_opt], [0, A], 'r-', linewidth=2) ax[0].plot(fc_opt, 0, 'ro', markersize=7, clip_on=False, zorder=9, label='$Fc_{opt}$ = %.1f Hz' % fc_opt) ax[0].legend(fontsize=12, loc='best', numpoints=1, framealpha=.5) # Correct the cutoff frequency for the number of passes C = 0.802 # for dual pass; C = (2**(1/npasses) - 1)**0.25 b, a = butter(2, (fc_opt/C) / (freq / 2)) yf = filtfilt(b, a, y) ax[1].plot(time, yf, color=[1, 0, 0, .5], linewidth=2, label='Opt. filtered') ax[1].legend(fontsize=12, loc='best', framealpha=.5) ax[1].set_title('Signals (RMSE = %.3g)' % A) yfdd = np.diff(yf, n=2) * freq ** 2 ax[2].plot(time[:-2], yfdd, color=[1, 0, 0, .5], linewidth=2, label='Opt. filtered') ax[2].legend(fontsize=12, loc='best', framealpha=.5) resdd = np.sqrt(np.mean((yfdd - ydd) ** 2)) ax[2].set_title('Second derivatives (RMSE = %.3g)' % resdd) else: ax[0].text(.5, .5, 'Unable to find optimal cutoff frequency', horizontalalignment='center', color='r', zorder=9, transform=ax[0].transAxes, fontsize=12) ax[1].set_title('Signal') ax[2].set_title('Second derivative') ax[0].set_xlabel('Cutoff frequency [Hz]') ax[0].set_ylabel('Residual RMSE') ax[0].set_title('Residual analysis') ax[0].grid() # ax2.set_xlabel('Time [s]') ax[1].set_xlim(0, time[-1]) ax[1].grid() ax[2].set_xlabel('Time [s]') ax[2].set_xlim(0, time[-1]) ax[2].grid() plt.tight_layout() plt.show()
notebooks/ResidualAnalysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #config import fiftyone as fo import os file_path = "/tf/testing" #print(planes) batch_size = 1000 print(fo.list_datasets()) dataset = fo.load_dataset("planeBox") # - session = fo.launch_app(dataset, auto=False) view = dataset.exists("planebox").skip(12000).limit(1000).match({"tags": {"$nin": ["relabel"]}})#.match({"relabel": {"$exists": False, "$eq": None}}) print(view) session.view = view # Create a view containing only the selected samples selected_view = dataset.select(session.selected) print(selected_view) for sample in selected_view: sample.tags.append("relabel") sample.save() # #### Find the number of photos targeted for relabeling relabel_view = dataset.match_tags("relabel") print(relabel_view) # ### Find the number of images with good bounding boxes # Add a tag that bounding boxes are good # + good_view = dataset.exists("planebox").match({"tags": {"$nin": ["relabel"]}}) print(good_view) #session.view = good_view for sample in good_view: sample.tags.append("good_box") sample.save() # - good_box_view = dataset.match_tags("good_box") print(good_box_view) # ### Create Bounding Boxes with the Model # For the image where the bounding boxes have been confirmed, we will now create bounding boxes with the normalized model. # It will print out the Sample ID for samples that either do not have a normalize model or do not have a plane detected good_box_view = dataset.match_tags("good_box") bad_detections=[] bad_models=[] for sample in good_box_view: #print(sample) planeDetections = sample["planebox"].detections if len(planeDetections)==0: print(sample["id"]) bad_detections.append(sample["id"]) else: bbox = planeDetections[0]["bounding_box"] if sample["norm_model"] == None: print(sample["id"]) bad_models.append(sample["id"]) else: label = sample["norm_model"].label detections = [fo.Detection(label=label, bounding_box=bbox)] sample["modelbox"] = fo.Detections(detections=detections) sample.save() # We are going to find that some samples do not have a plane detected or no model. When there is no model found, remove the `good_box` tag. When there is no detection, remove `good_box` tag and add a `relabel` tag. # + for sample in dataset.select(bad_detections): sample.tags.remove("good_box") sample.tags.append("relabel") sample.save() for sample in dataset.select(bad_models): sample.tags.remove("good_box") sample.save() # - good_box_view = dataset.match_tags("good_box") print(good_box_view) session.view = good_box_view
ml-model/notebooks/Review Object Detection Samples.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import os os.chdir("/gpfs/milgram/project/turk-browne/projects/rtTest/") # !pwd import subprocess # + # call(f"sbatch class.sh {tmpFile}",shell=True) proc = subprocess.Popen(['sbatch class.sh'],shell=True) # proc.wait() # -
archive/Untitled5.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %pylab inline import os, sys, glob import time import numpy as np import pickle import json import matplotlib as mpl import matplotlib.pyplot as plt # + # Load credential data from untappd_credentials import * USER_NAME = 'ovarol' #CLIENT_ID = "XXXXXXXXXXXXXX" #CLIENT_SECRET = "<KEY>" #print 'Id:\t{}\nSecret:\t{}\n'.format(CLIENT_ID, CLIENT_SECRET) # + # Using API from https://github.com/marshall91/pythonUntappd import untappd_api as pythonUntappd api = pythonUntappd.api(CLIENT_ID,CLIENT_SECRET) userData = api.user_info(USER_NAME)['response'] #print json.dumps(userData, indent=4) # + uniqueBeers = list() userData = api.user_info(USER_NAME)['response'] count, keepCollect = 0, True scanCount = 0 while keepCollect: resp = api.user_distinct_beers(USER_NAME, offset=count)['response'] count += resp['beers']['count'] uniqueBeers.extend(resp['beers']['items']) print('Unique beers collected: {}'.format(len(uniqueBeers))) scanCount += 1 if resp['beers']['count'] != 0: time.sleep(1) else: break with open('docs/data/{}_untappd_data.json'.format(USER_NAME),'w') as fl: fl.write(json.dumps({'user_data':userData, 'beer_data':uniqueBeers})) # - #print json.dumps(uniqueBeers, indent=4, sort_keys=True) print(json.dumps(uniqueBeers[-1], indent=4, sort_keys=True)) # + # Check style counts styleCounts = dict() styleCountSimple = dict() for b in uniqueBeers: if b['beer']['beer_style'] not in styleCounts: styleCounts[b['beer']['beer_style']] = 0 styleCounts[b['beer']['beer_style']] += 1 sstyle = b['beer']['beer_style'].split('-')[0].split('/')[0] if sstyle not in styleCountSimple: styleCountSimple[sstyle] = 0 styleCountSimple[sstyle] += 1 for s in sorted(styleCountSimple, key=styleCountSimple.get, reverse=True): print(s, styleCountSimple[s]) # + # Most preferred brewery breweryCount = dict() for b in uniqueBeers: if b['brewery']['brewery_name'] not in breweryCount: breweryCount[b['brewery']['brewery_name']] = 0 breweryCount[b['brewery']['brewery_name']] += 1 for s in sorted(breweryCount, key=breweryCount.get, reverse=True): print(s, breweryCount[s]) # - for b in sorted(uniqueBeers, key=lambda x: x['rating_score'], reverse=True)[:30]: print(b['brewery']['brewery_name'], b['beer']['beer_name'], b['rating_score']) # + ibuList, abvList = list(), list() for b in uniqueBeers: ibuList.append(b['beer']['beer_ibu']) abvList.append(b['beer']['beer_abv']) print('Avg. IBU: {}'.format(np.mean(ibuList))) print('Avg. ABV: {}'.format(np.mean(abvList))) fig, axarr = plt.subplots(1, 2, figsize=(7,3)) axarr[0].hist(ibuList, bins=20, alpha=0.5, linewidth=0) axarr[0].set_xlabel('IBU (International Bitterness Units)', fontsize=12) axarr[0].tick_params(axis='both', which='major', labelsize=12) axarr[1].hist(abvList, bins=20, alpha=0.5, linewidth=0) axarr[1].set_xlabel('ABV (Alcohol By Volume)', fontsize=12) axarr[1].tick_params(axis='both', which='major', labelsize=12) plt.tight_layout() # + # Beer ABV vs IBU beerMeasures = list() for b in uniqueBeers: beerMeasures.append((b['beer']['beer_abv'],b['beer']['beer_ibu'])) beerMeasures = np.array(beerMeasures) print(beerMeasures.shape) fig = plt.figure(figsize=(4,4)) plt.scatter(beerMeasures[:,0], beerMeasures[:,1], alpha=0.5) plt.xlim(xmin=0) plt.ylim(ymin=0) plt.xlabel('ABV (Alcohol By Volume)', fontsize=12) plt.ylabel('IBU (International Bitterness Units)', fontsize=12) plt.tick_params(axis='both', which='major', labelsize=14) # + # Scatter plot of avg. ratings vs. my ratings scatterData = list() for b in uniqueBeers: if b['beer']['rating_score'] == 0: continue scatterData.append((b['beer']['rating_score'], b['rating_score'])) scatterData = np.array(scatterData) fig = plt.figure(figsize=(4,4)) plt.plot([0,5],[0,5],'r--', linewidth=2) plt.scatter(scatterData[:,0], scatterData[:,1], alpha=0.5) minV, maxV = np.min(scatterData), np.max(scatterData) print(minV, maxV) plt.xlim((minV-0.2, maxV+0.2)) plt.ylim((minV-0.2, maxV+0.2)) plt.xlabel('Avg. rating', fontsize=14) plt.ylabel('My rating', fontsize=14) plt.tick_params(axis='both', which='major', labelsize=12) # + # Scatter plot of avg. ratings vs. my ratings scatterData = list() for b in uniqueBeers: if b['beer']['rating_score'] == 0 or b['rating_score'] == 0: continue scatterData.append((b['beer']['rating_score'], b['rating_score'])) scatterData = np.array(scatterData) fig3 = plt.figure(figsize=(4.5,4), constrained_layout=True) gs = fig3.add_gridspec(3, 3) f3_ax1 = fig3.add_subplot(gs[0, :-1]) f3_ax2 = fig3.add_subplot(gs[1:, 0:-1]) f3_ax3 = fig3.add_subplot(gs[1:, -1]) minV, maxV = np.min(scatterData)-0.1, np.max(scatterData)+0.1 print(minV, maxV) f3_ax1.set_xlim(minV, maxV) f3_ax2.set_xlim(minV, maxV) f3_ax2.set_ylim(minV, maxV) f3_ax3.set_ylim(minV, maxV) f3_ax1.set_xticks([]) f3_ax3.set_yticks([]) f3_ax1.set_yticks([]) f3_ax3.set_xticks([]) f3_ax1.hist(scatterData[:,0], bins=20, color='0.5', alpha=0.5) f3_ax3.hist(scatterData[:,0], bins=20, color='0.5', alpha=0.5, orientation="horizontal") f3_ax2.scatter(scatterData[:,0], scatterData[:,1], alpha=0.5, c='0.5') f3_ax2.plot([0,5],[0,5],'r--', linewidth=2) f3_ax2.set_xlabel('Avg. rating', fontsize=14) f3_ax2.set_ylabel('My rating', fontsize=14) fig3.tight_layout() # + # What kind of beers I like more than avg. people scatterData = list() for b in uniqueBeers: if b['beer']['rating_score'] == 0: continue scatterData.append((b['rating_score'] - b['beer']['rating_score'], b['beer']['beer_abv'], b['beer']['beer_ibu'])) scatterData = np.array(scatterData) print(scatterData.shape) fig, axarr = plt.subplots(1, 2, figsize=(7,3), sharey=True) axarr[0].scatter(scatterData[:,2], scatterData[:,0], alpha=0.5) axarr[0].axhline(y=0, c='k', linewidth=2, linestyle='--') fit = np.polyfit(scatterData[:,2], scatterData[:,0], 1) fit_fn = np.poly1d(fit) axarr[0].plot(scatterData[:,2], fit_fn(scatterData[:,2]), 'r--', linewidth=2) axarr[0].set_xlim(xmin=-5, xmax=105) axarr[0].set_xlabel('IBU (International Bitterness Units)', fontsize=12) axarr[0].set_ylabel('Rating difference', fontsize=12) axarr[0].tick_params(axis='both', which='major', labelsize=12) axarr[1].scatter(scatterData[:,1], scatterData[:,0], alpha=0.5) axarr[1].axhline(y=0, c='k', linewidth=2, linestyle='--') fit = np.polyfit(scatterData[:,1], scatterData[:,0], 1) fit_fn = np.poly1d(fit) axarr[1].plot(scatterData[:,1], fit_fn(scatterData[:,1]), 'r--', linewidth=2) axarr[1].set_xlim(xmin=-1, xmax=max(scatterData[:,1])+1) axarr[1].set_xlabel('ABV (Alcohol By Volume)', fontsize=12) axarr[1].tick_params(axis='both', which='major', labelsize=12) plt.tight_layout() # + # Cumulative beer discoveries dTimes = list() for b in uniqueBeers: dTimes.append(datetime.datetime.strptime(b['first_created_at'].split(' -')[0], '%a, %d %b %Y %H:%M:%S')) dTimes.sort() fig = plt.figure(figsize=(7,3)) plt.plot(dTimes, range(len(dTimes)), linewidth=2) plt.ylabel('Unique beer count', fontsize=14) plt.xticks(rotation=30) plt.tick_params(axis='both', which='major', labelsize=12) # -
untappd_analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.6 64-bit (''graphs'': conda)' # name: python_defaultSpec_1595069844827 # --- # + import random import itertools import networkx as nx from time import time from os import listdir from os.path import isfile, join from networkx.algorithms.community.quality import modularity # + dir_path = "input_files" input_files = [join(dir_path, f) for f in listdir(dir_path) if isfile(join(dir_path, f))] input_files # - file_index = 1 filename = input_files[file_index] filename G = nx.read_edgelist(filename) G = nx.convert_node_labels_to_integers(G) # + tags=[] def label_partition(G, k=8): unlabeled_nodes = set(G.nodes()) labeled_nodes = set() labels = [0] * len(unlabeled_nodes) random_k_nodes = random.sample(unlabeled_nodes, k) for i, r in enumerate(random_k_nodes): labels[r] = i unlabeled_nodes.discard(r) labeled_nodes.add(r) while unlabeled_nodes: for n in labeled_nodes.copy(): neighbors = set(nx.neighbors(G, n)) - labeled_nodes if neighbors: neighbor = random.sample(neighbors, 1)[0] labels[neighbor] = labels[n] unlabeled_nodes.discard(neighbor) labeled_nodes.add(neighbor) partitions = [[] for i in range(k)] nodes = list(G.nodes()) for i, l in enumerate(labels): partitions[l].append(nodes[i]) return partitions # + tags=[] def graph_partition(G, k=4, c=10): best_partition = list() best_modularity = float("-inf") for i in range(c): partitions = label_partition(G, k) partition_modularity = modularity(G, partitions) print(partition_modularity) if (best_modularity < partition_modularity): best_partition = partitions best_modularity = partition_modularity return best_partition, best_modularity partitions, mod = graph_partition(G) # + tags=[] print("Modularity of the partitions: " + str(mod)) # + tags=[] results = dict() for f in input_files: print("Processing " + f) G = nx.read_edgelist(f) G = nx.convert_node_labels_to_integers(G) partitions, mod = graph_partition(G) results[f] = (mod, partitions) results # - results[input_files[0]]
LabelPartition.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # This is a simlpe cuGraph example of calculating "Page Rank" for NCAAW basketball teams based on the wins and losses in a single season. We treat two teams as having a "link" between them if they've played a game against each other, with the "link" proceeding from the losing team to the winning team. # # I've used cuGraph to calculate Page Rank not becasue I need a lot of speed - we are dealing here with a very small dataset. But primarily becasue I am familiar with it, and it's part of Rapids library, whcih I am all too eager to promote. ;) Unfortunately, cuGraph is still not available in Kaggle Docker, so I have to install it from skrach here from <NAME>'s dataset. # !nvidia-smi # !nvcc --version # نصب کوداگراف حدود ۱۵ دقیقه طول می کشد # !conda install -y -c nvidia -c rapidsai -c numba -c conda-forge -c defaults cugraph cudatoolkit=11.0 # + # # %%time # import networkx as nx # import cugraph # G=nx.erdos_renyi_graph(5000,0.5) # # b=nx.betweenness_centrality(G) # bc = cugraph.betweenness_centrality(G) # bc # - import nltk nltk.download('stopwords') nltk.download('punkt') # + # # %cd .. # # !rm -r TC-with-GCN # - # !git clone https://github.com/mamintoosi/TC-with-GCN.git # %cd TC-with-GCN # %%time # %run data_processor.py # %%time # %run build_graph.py from google.colab import drive drive.mount('/content/gdrive') # %cd .. # !zip -r -q TCGCN_graphs.zip tmp/ # !ls from IPython.display import FileLink FileLink(r'TCGCN_graphs.zip') # لینک بالا رو در کولب دبلیوگت کردم # + # # !cp TCGCN_graphs.zip '/content/gdrive/My Drive/models/' # + # from IPython.display import FileLink # graph_path = "../tmp/TCGCN/graph" # # dataset = "mr" # # dataset = "ohsumed" # # dataset = "R52" # dataset = "R8" # file_name = f"{graph_path}/{dataset}_g_info.pkl" # FileLink(file_name) # + # # !pip show fastai # + # # !conda install -y fastai # # !pip install -U fastai # + # # %%time # # %run trainer.py # - # !ls # + # # %cd .. # # !git clone https://github.com/mamintoosi/temp_rep.git # # %cd temp_rep # # !cp -r ../tmp ./ # + # # !ls tmp # + # # !git add * # # !git config user.email "<EMAIL>" # # !git commit -m "Add TCGCN graphs"
tmp/cugraph-bc.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from utils import NerDataset dev_dataset = NerDataset('./processed/processed_dev_bio.txt') # - len(dev_dataset.sents) train_dataset = NerDataset('./processed/processed_training_bio.txt') len(train_dataset) test_dataset[0] train_dataset[1] # ### 分段处理,使长度低于256 MAX_LEN = 256 - 2 sents, tags_li = [], [] # list of lists for entry in entries: words = [line.split()[0] for line in entry.splitlines()] tags = ([line.split()[-1] for line in entry.splitlines()]) # sents.append(["[CLS]"] + words + ["[SEP]"]) # 每个句子前后加['CLS']和['SEP'] # tags_li.append(["<PAD>"] + tags + ["<PAD>"]) if len(words) > MAX_LEN: # 先对句号分段 word, tag = [], [] for char, t in zip(words, tags): if char != '。': word.append(char) tag.append(t) else: # if len(word) > MAX_LEN: # word = word[:MAX_LEN] # tag = tag[:MAX_LEN] sents.append(["[CLS]"] + word[:MAX_LEN] + ["[SEP]"]) tags_li.append(["PAD"] + tag[:MAX_LEN] + ["PAD"]) word, tag = [], [] # 最后的 if len(word): sents.append(["[CLS]"] + word[:MAX_LEN] + ["[SEP]"]) tags_li.append(["PAD"] + tag[:MAX_LEN] + ["PAD"]) word, tag = [], [] else: sents.append(["[CLS]"] + word[:MAX_LEN] + ["[SEP]"]) tags_li.append(["PAD"] + tag[:MAX_LEN] + ["PAD"]) len(sents) len(tags_li) sents[0], tags_li[0] from pytorch_pretrained_bert import BertModel bert = BertModel.from_pretrained('bert-base-chinese') x = dev_dataset[1] import torch encode_layers, _ = bert(torch.tensor(x[1]).unsqueeze(0)) enc = encode_layers[-1] enc enc.size() len(x[1]) enc = enc.view(len(x[1]), 1, -1) enc.size() from utils import NerDataset dev_dataset = NerDataset('./processed/processed_dev_bio.txt') dev_dataset.sents[0] dev_dataset.tags_li[0] for sent, tag in zip(dev_dataset.sents, dev_dataset.tags_li): assert len(sent) == len(tag), f"{len(sent)}, {len(tag)}" from pytorch_pretrained_bert import BertTokenizer tokenizer = BertTokenizer.from_pretrained('/root/workspace/qa_project/chinese_L-12_H-768_A-12/') VOCAB = ('<PAD>', 'O', 'B-INF', 'I-INF', 'B-PAT', 'I-PAT', 'B-OPS', 'I-OPS', 'B-DSE', 'I-DSE', 'B-DRG', 'I-DRG', 'B-LAB', 'I-LAB') tag2idx = {tag: idx for idx, tag in enumerate(VOCAB)} idx = 0 for words, tags in zip(dev_dataset.sents, dev_dataset.tags_li): x, y = [], [] is_heads = [] for w, t in zip(words, tags): tokens = tokenizer.tokenize(w) if w not in ("[CLS]", "[SEP]") else [w] xx = tokenizer.convert_tokens_to_ids(tokens) is_head = [1] + [0]*(len(tokens) - 1) t = [t] + ["<PAD>"] * (len(tokens) - 1) yy = [tag2idx[each] for each in t] # (T,) x.extend(xx) is_heads.extend(is_head) y.extend(yy) assert len(x)==len(y)==len(is_heads), f"{idx}" idx += 1 dev_dataset.sents[366] # ### CRF import torch import torch.nn as nn import torch.optim as optim torch.manual_seed(1) def argmax(vec): _, idx = torch.max(vec, 1) return idx.item() def prepare_sequence(seq, to_ix): idxs = [to_ix[w] for w in seq] return torch.tensor(idxs, dtype=torch.long) def log_sum_exp(vec): max_score = vec[0, argmax(vec)] max_score_broadcast = max_score.view(1, -1).expand(1, vec.size()[1]) return max_score + \ torch.log(torch.sum(torch.exp(vec - max_score_broadcast))) START_TAG = "<START>" STOP_TAG = "<STOP>" EMBEDDING_DIM = 5 HIDDEN_DIM = 4 training_data = [( "the wall street journal reported today that apple corporation made money".split(), "B I I I O O O B I O O".split() ), ( "georgia tech is a university in georgia".split(), "B I O O O O B".split() )] word_to_ix = {} for sentence, tags in training_data: for word in sentence: if word not in word_to_ix: word_to_ix[word] = len(word_to_ix) word_to_ix tag_to_ix = {"B": 0, "I": 1, "O": 2, START_TAG: 3, STOP_TAG: 4} precheck_sent = prepare_sequence(training_data[1][0], word_to_ix) precheck_sent precheck_tags = torch.tensor([tag_to_ix[t] for t in training_data[0][1]], dtype=torch.long) precheck_tags model = BiLSTM_CRF(len(word_to_ix), tag_to_ix, EMBEDDING_DIM, HIDDEN_DIM) lstm_feates = model._get_lstm_features(precheck_sent) lstm_feates lstm_feates.size() # 5是标签长度, 7是文本长度 ### 前向算法 init_alphas = torch.full((1, model.tagset_size), -10000.) init_alphas init_alphas[0][model.tag_to_ix[START_TAG]] = 0. init_alphas forward_var = init_alphas for feat in lstm_feates: alphas_t = [] for next_tag in range(model.tagset_size): emit_score = feat[next_tag].view(1, -1).expand(1, model.tagset_size) #print(emit_score) #break trans_score = model.transitions[next_tag].view(1, -1) # print(trans_score) # break next_tag_var = forward_var + trans_score + emit_score alphas_t.append(log_sum_exp(next_tag_var).view(1)) # 改变 forward_var = torch.cat(alphas_t).view(1, -1) alphas_t terminal_var = torch.cat(alphas_t).view(1, -1) model.transitions model.word_embeds embeds = model.word_embeds(precheck_sent).view(len(precheck_sent), 1, -1) embeds.size() sentence hidden = model.init_hidden() hidden lstm_output, hidden = model.lstm(embeds, hidden) lstm_output.size() lstm_out = lstm_output.view(len(precheck_sent), model.hidden_dim) lstm_out.size() lstm_feates = model.hidden2tag(lstm_out) lstm_feates.size() from pytorch_pretrained_bert import BertModel bert = BertModel.from_pretrained('bert-base-chinese') precheck_sent from utils import NerDataset training_data = NerDataset('./processed/processed_training_bio.txt') training_data[0] for batch in train_iter: data = batch break data words, x, is_heads, tags, y, seqlens = data x.size() encoded_layers, _ = bert(x) enc = encoded_layers[-1] enc.size() fc = nn.Linear(768, 16) lstm_feats = fc(enc) lstm_feats.size() lstm_out forward_score = model._score_sentence(lstm_feates, ) precheck_tags gold_score = model._score_sentence(lstm_feates, precheck_tags) gold_score score = torch.zeros(1) score tags = torch.cat([torch.tensor([model.tag_to_ix[START_TAG]], dtype=torch.long), precheck_tags]) tags for i, feat in enumerate(lstm_feates): score = score + model.transitions[tags[i+1], tags[i]] + feat[tags[i+1]] score score = score + model.transitions[model.tag_to_ix[STOP_TAG], tags[-1]] score from crf import Bert_BiLSTM_CRF, tag2idx, log_sum_exp bert_crf = Bert_BiLSTM_CRF(tag2idx) from utils import NerDataset train_dataset = NerDataset('./processed/processed_training_bio.txt') train_dataset[0] from torch.utils.data import DataLoader from utils import pad train_iter = DataLoader(dataset=train_dataset, batch_size=8, shuffle=True, collate_fn=pad) for batch in train_iter: print(batch) break words, x, is_heads, tags, y, seqlens = batch x y score = bert_crf.neg_log_likelihood(x, y) score bert_crf(x) from crf import Bert_BiLSTM_CRF from utils import tag2idx, NerDataset from torch.utils.data import DataLoader train_dataset = NerDataset('./processed/processed_training_bio.txt') from utils import pad train_iter = DataLoader(dataset=train_dataset, batch_size=8, shuffle=True, collate_fn=pad) model = Bert_BiLSTM_CRF(tag2idx).cuda() import torch import torch.nn as nn optimizer = torch.optim.Adam(model.parameters(), lr = 1e-4) criterion = nn.CrossEntropyLoss(ignore_index=0) model.train() device = torch.device('cuda') for i, batch in enumerate(train_iter): words, x, is_heads, tags, y, seqlens = batch #print(x, y) x = x.to(device) y = y.to(device) _y = y loss = model.neg_log_likelihood(x, y) print(loss) break x x.size() _, y_hat = model(x) y_hat.size() x.size() y_hat feats = model._get_lstm_features(x) feats.size() feats forward_score = model._forward_alg(feats) forward_score T = feats.shape[1] # 48 batch_size = feats.shape[0] # 8 log_alpha = torch.Tensor(batch_size, 1, model.tagset_size).fill_(-10000.).to(device) log_alpha log_alpha.size() log_alpha[:, 0, model.start_label_id] = 0 # start log_alpha from crf import log_sum_exp_batch for t in range(1, T): log_alpha = (log_sum_exp_batch(model.transitions + log_alpha, axis=-1) + \ feats[:, t]).unsqueeze(1) log_alpha log_prob_all_barX = log_sum_exp_batch(log_alpha) log_prob_all_barX log_tensor = model.transitions + log_alpha log_tensor torch.max(log_tensor, -1)[0] log_alpha.size() enc = model._bert_enc(x) enc.size() enc[:, 1:-1, :].size() enc, _ = model.lstm(enc[:, 1:-1, :]) feats = model.fc(enc) feats.size() score, tag_seq = model._viterbi_decode(feats) score tag_seq
data_process.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import random import sys from copy import copy def swap(s, i, j): if i != j: temp = s[i] s[i] = s[j] s[j] = temp s = list(range(30)) random.shuffle(s) print(s) # - # ## Selection Sort # + def selection_sort(s): for i in range(len(s)): minidx = i for j in range(i + 1, len(s)): if s[j] < s[minidx]: minidx = j swap(s, i, minidx) return s print(selection_sort(copy(s))) # - # ## Insertion Sort # + def insertion_sort(s): for i in range(1, len(s)): cur = i while cur > 0 and s[cur - 1] > s[cur]: swap(s, cur - 1, cur) cur = cur - 1 return s print(insertion_sort(copy(s))) # - # ## Quick Sort # + def partition(s, l, h): pivot = h first = l for i in range(l, h): if s[i] < s[pivot]: swap(s, i, first) first += 1 swap(s, first, pivot) pivot = first return pivot def quick_sort(s, l, h): if h > l: pivot = partition(s, l, h) quick_sort(s, l, pivot - 1) quick_sort(s, pivot + 1, h) return s print(quick_sort(copy(s), 0, len(s) - 1)) # - # ## Bubble Sort # + def bubble_sort(seq): for i in range(0, len(seq)): for j in range(len(seq) - 1, i, -1): if seq[j] < seq[j-1]: swap(seq, j, j - 1) #print(seq) return seq print(s) #print(bubble_sort([55, 7, 78, 12, 42])) print(bubble_sort(copy(s))) # - # ## Merge Sort # + def merge(l, r): ret = [] lsize = len(l) rsize = len(r) l.append(sys.maxsize) r.append(sys.maxsize) curl = 0 curr = 0 for i in range(0, lsize + rsize): if l[curl] <= r[curr]: ret.append(l[curl]) curl += 1 else: ret.append(r[curr]) curr += 1 return ret def merge_sort(seq): if len(seq) > 1: q = len(seq)//2 l = merge_sort(seq[:q]) r = merge_sort(seq[q:]) return merge(l, r) else: return seq print(s) #print(bubble_sort([55, 7, 78, 12, 42])) print(merge_sort(copy(s))) # - # ## Heap Sort # + def max_heapify(seq, size, i): left = 2*i + 1 right = 2*i + 2 largest = i if left < size and seq[left] > seq[i]: largest = left if right < size and seq[right] > seq[largest]: largest = right if largest != i: swap(seq, i, largest) max_heapify(seq, size, largest) def build_max_heap(seq): for i in range(len(seq)//2 - 1, -1 , -1): max_heapify(seq, len(seq), i) def heap_sort(seq): build_max_heap(seq) print(seq) for i in range(len(seq) - 1): last = (len(seq) - 1) - i swap(seq, 0, last) #print(i, seq) max_heapify(seq, len(seq) - i - 1, 0) #print(i, seq) return seq print(s) print(heap_sort([55, 7, 78, 12, 42])) print(heap_sort(copy(s))) # -
Issues/algorithms/Sorting.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # [View in Colaboratory](https://colab.research.google.com/github/sonukick/backtests/blob/master/Open_n_close.ipynb) # + id="vNFrvx8HCMVR" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1105} outputId="b1b35057-1278-43cd-98a7-59c642ea985e" # !pip install openpyxl # !pip install pydrive # !pip install quandl # !pip install pyfolio import quandl import pandas as pd import numpy as np import matplotlib.pyplot as plt from pydrive.auth import GoogleAuth from pydrive.drive import GoogleDrive from google.colab import auth from oauth2client.client import GoogleCredentials import io import seaborn as sns import matplotlib.pyplot as plt import pyfolio as pf # 1. Authenticate and create the PyDrive client. auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) file_list = drive.ListFile({'q': "'1P1c2VCcaHquJC0Fp5yfeiZjgqwNZlBEb' in parents and trashed=false"}).GetList() for file1 in file_list: print('title: %s, id: %s' % (file1['title'], file1['id'])) #data = drive.CreateFile({'id': '1O-6oCbmJ3vRKGbh7mL7GLWPtN6mNogkL'}) #data.GetContentFile('f_o.csv') data = drive.CreateFile({'id': '1cqSAgTq8Q7a-YLDoEwW312aAxZHY3MuG'}) data.GetContentFile('nifty50.csv') api_key = '<KEY>' quandl.ApiConfig.api_key = api_key #from google.colab import files #uploaded = files.upload() #symbols = pd.read_csv(io.StringIO(uploaded['ind_nifty50list.csv'].decode('utf-8'))) # + id="V0ZrZtBpIOqD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 867} outputId="2ede9784-10e0-4a33-fef8-ddd63c215e0c" symbols = pd.read_csv('nifty50.csv') symbols = symbols['Symbol'] symbols[0] close_list = [] close_list.append(('Date','symbol','entry','exit','profit')) entry = 0 exit = 0 returns = 0 for j in symbols: print(j) try: data = quandl.get('NSE/'+str(j), start_date='2012-01-1', end_date='2018-08-20') except: continue for i in range(len(data.Close)-1): if data.Open[i] == data.Low[i]: entry = data.Close[i] exit = data.Close[i+1] returns = (exit/entry)-1 close_list.append((data.index[i],j,entry,exit,returns)) # + [markdown] id="8zvudwgyhL20" colab_type="text" # # + id="y3XmIxGHgKDZ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1969} outputId="dfcae52a-4e1b-4386-bc98-f53295196990" df = pd.DataFrame(close_list) df.columns = df.iloc[0] df.drop([0],inplace = True) df # + id="v_9dQ4UvFT_r" colab_type="code" colab={} dates = list(set(df.Date)) returns_by_dates = [] for i in dates: returns_by_date = np.mean(df[df['Date']==i].profit) returns_by_dates.append((i,returns_by_date*100)) # + id="K-RsvaxLb4La" colab_type="code" colab={} df_returns = pd.DataFrame(returns_by_dates) # + id="KaRcD4IkJI4C" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 2000} outputId="014c63d8-4097-47be-ce5f-6bac7293ca3e" df_returns = df_returns.sort_values(0) df_returns.index = df_returns[0] df_returns.drop(columns = [0],inplace=True) (df_returns) # + id="PvYLqWtbJSgI" colab_type="code" colab={} cum_ret = np.cumsum(df_returns) # + id="g6d2dbaEdNmI" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 297} outputId="2da99b3e-d6e2-4719-a802-551cc1be7f1c" df_returns.groupby(df_returns.index.year).sum() # + id="pS_2FVDLem18" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 365} outputId="a98f56e7-7db0-4121-d514-85b85cb3467f" np.cumsum(df_returns[df_returns.index.year==2017]).plot() # + id="nl8DIg9VgMM4" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 452} outputId="4abcbb05-ab4e-44da-b3fb-033c41d42d7b" df_returns[df_returns.index.year==2017].groupby(df_returns[df_returns.index.year==2017].index.month).sum() # + id="IhC3zu3fgmco" colab_type="code" colab={} stock_rets = df_returns[1] # + id="RjENiK7vjqFt" colab_type="code" colab={}
Open_n_close.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # ============================================================= # Copyright © 2020 Intel Corporation # # SPDX-License-Identifier: MIT # ============================================================= # - # # Modin Getting Started Example for Distributed Pandas # ## Importing and Organizing Data # In this example we will be generating a **synthetic dataset** and **demonstrating stock Pandas operations running with Modin**. # # Let's start by **importing** all the necessary packages and modules import numpy as np import matplotlib.pyplot as plt import time # ## How to Use Modin # We will also be importing **stock Pandas as pd** and **Modin as mpd to show differentiation**. You can see importing Modin is simple and **does not require any additional steps.** import pandas import modin.pandas as pd # We will now **generate a synthetic dataset** using NumPy to use with Modin and save it to a CSV. array=np.random.randint(low=100,high=10000,size=(2**18,2**8)) #array np.savetxt("foo.csv", array, delimiter=",") #how to generate array # Now we will convert the ndarray into a Pandas dataframe and display the first five rows. # For **stock pandas, the dataframe is being stored as `pandas_df`** and for **Modin, the same dataframe is being stored as `modin_df`**. # Let's try running the following cell with Pandas first. # %%time pandas_df = pandas.read_csv("foo.csv", names=["col{}".format(i) for i in range(256)]) pandas_df.head() # Now let's run the same code, but use **Modin instead of stock Pandas.** # # **Note the speedup!** # %%time modin_df=pd.read_csv("foo.csv", names=["col{}".format(i) for i in range(256)]) modin_df.head() # Let's now **visualize** this speedup from Modin with a plot! def plotter(outputdict): fig = plt.figure(figsize = (10, 5)) plt.bar(outputdict.keys(),outputdict.values(),color='blue',width=0.4) plt.xlabel("Python Package") plt.ylabel("Runtime(seconds)") plt.show() # + t0 = time.time() pandas_df = pandas.read_csv("foo.csv", names=["col{}".format(i) for i in range(256)]) pandas_time = time.time()- t0 t1 = time.time() modin_df = pd.read_csv("foo.csv", names=["col{}".format(i) for i in range(256)]) modin_time = time.time() - t1 print("Pandas Time(seconds):",pandas_time,"\nModin Time(seconds):",modin_time) outputDict={"Pandas":pandas_time,"Modin":modin_time} plotter(outputDict) # - # ## Other DataFrame Function Performance Example # We will now show the speedup in performance from Modin compared to stock Pandas with a few common functions. # # Like before, **`pandas_df` is for stock Pandas**, **`modin_df` is for Modin**. # ### `df.mean()` # Mean t2 = time.time() pandas_df.mean(axis=0) pandas_time=time.time()- t2 print(" stock Pandas wall time for completion in seconds:",pandas_time) # Mean t3 = time.time() modin_df.mean(axis=0) modin_time=time.time()- t3 print("Modin wall time for completion in seconds:",modin_time) print("Modin was {}X faster than stock Pandas!".format(round(pandas_time/modin_time, 2))) # ### `df.applymap` # Long apply function t6 = time.time() print(pandas_df.applymap(lambda x: x + 1)) pandas_time = time.time() - t6 print(" stock Pandas wall time for completion in seconds:",pandas_time) # Long apply function t7 = time.time() print(modin_df.applymap(lambda x: x + 1)) modin_time = time.time() - t7 print("Modin wall time for completion in seconds:",modin_time) print("Modin was {}X faster than stock Pandas!".format(round(pandas_time/modin_time, 2))) # ### `pd.concat([df, df])` # Concat t8 = time.time() print(pandas.concat([pandas_df, pandas_df], axis=0)) pandas_time = time.time() - t8 print("stock Pandas wall time for completion in seconds:",pandas_time) # Concat t9 = time.time() print(pd.concat([modin_df, modin_df], axis=0)) modin_time = time.time() - t9 print("Modin wall time for completion in seconds:",modin_time) print("Modin was {}X faster than stock Pandas!".format(round(pandas_time/modin_time, 2))) # ## Modin Coverage Examples # The Modin package supports a large variety of Pandas functions. # Here are some examples: # ### Count modin_df.count() # ### Filter modin_df.filter(regex='0$', axis=1) # ### iloc modin_df.iloc[0] modin_df.iloc[-1] modin_df.iloc[:,0] modin_df.iloc[:,-1] # ## Series s = pd.Series(np.random.randn(5), index=['a', 'b', 'c', 'd', 'e']) # ### DataFrame to NumPy Array modin_df.to_numpy() # ### Series to NumPy Array ser = pd.Series(pd.date_range('2000', periods=2, tz="CET")) ser.to_numpy(dtype=object) ser.to_numpy(dtype="datetime64[ns]") # ### Set Options pd.set_option('compute.use_bottleneck', False) pd.set_option('compute.use_numexpr', False) # ### Unique Function for Series pd.unique(pd.Series([2, 1, 3, 3])) print("[CODE_SAMPLE_COMPLETED_SUCCESFULLY]")
AI-and-Analytics/Getting-Started-Samples/IntelModin_GettingStarted/IntelModin_GettingStarted.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import pandas as pd import numpy as np path="" # ## Pandas # Pandas will be a major tool of interest throughout the course. It contains data structures and data manipulation tools designed to make data cleaning and analysis fast and easy in Python. Pandas is often used in tandem with numerical computing tools like NumPy and SciPy, analytical libraries like statsmodels and scikit-learn, and data visualization libraries like matplotlib. # # ## <font style="color:rgb(34,169,34)"> File Reading </font> # # A DataFrame represents a rectangular table of data and contains an ordered collection of columns, each of which can be a different value type (numeric, string,boolean, etc.). The DataFrame has both a row and column index. # # homelessness = pd.read_csv(path+"homelessness.csv") homelessness # ## <font style="color:rgb(34,169,34)">head() method is used to return top n (5 by default) rows of a data frame or series. </font> homelessness.head() # Print the shape of homelessness print(homelessness.shape) # Print a description of homelessness print(homelessness.describe()) # Print the column index of homelessness print(homelessness.columns) # Print the row index of homelessness print(homelessness.index) # # Sorting and subsetting # + # https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.sort_values.html # - # Sort homelessness by individual homelessness_ind = homelessness.sort_values("individuals")# show documentation & why we provide a list # as the parameter & need to store in a variable # Print the top few rows homelessness_ind.head() # + # Sort homelessness by region, then descending family members homelessness_reg_fam = homelessness.sort_values(["region","family_members"],ascending=[True,False]) # Print the top few rows homelessness_reg_fam.head() # - # ### Subsetting columns # + # Select the individuals column individuals = homelessness["individuals"] # Print the head of the result individuals.head() # + # Select the state and family_members columns state_fam = homelessness[["state","family_members"]] # Providing list # Print the head of the result state_fam.head() # + # Filter for rows where individuals is greater than 10000 # See the result #ind_gt_10k # + # Filter for rows where family_members is less than 1000 # and region is Pacific # homelessness["family_members"]<1000 # homelessness["region"]=="Pacific" # See the result #fam_lt_1k_pac # - # # Adding new columns # + # Add total col as sum of individuals and family_members homelessness["total"] = homelessness["individuals"] + homelessness["family_members"] #homelessness.head() # Add p_individuals col as proportion of individuals homelessness["p_individuals"] = homelessness["individuals"] / homelessness["total"] #homelessness.head() # See the result # homelessness.head() # - # # Summary statistics # this data set contains weekly sales in US dollars in various stores # each store has id number and specific store type # unemp is national employement rate that week sales = pd.read_csv(path+"sales_subset.csv") sales # Print the mean of weekly_sales print(sales["weekly_sales"].mean()) # Print the median of weekly_sales print(sales["weekly_sales"].median()) # + # Print the maximum of the date column print(sales['date'].max()) # Print the minimum of the date column print(sales['date'].min()) # + # A custom IQR function def iqr(column): return column.quantile(0.75) - column.quantile(0.25) # Print IQR of the temperature_c column # The agg() method allows you to apply a function or a list of function names to be executed along # one of the axis of the DataFrame, default 0, which is the index (row) axis. # Note: the agg() method is an alias of the aggregate() method. print(sales["temperature_c"].agg(iqr)) # + # A custom IQR function def iqr(column): return column.quantile(0.75) - column.quantile(0.25) # Update to print IQR of temperature_c, fuel_price_usd_per_l, & unemployment # + # Update to print IQR and median of temperature_c, fuel_price_usd_per_l, & unemployment # See the documentation as to what agg accepts for the function # - # # Dropping duplicates # + # Drop duplicate type combinations # sales['date']= pd.to_datetime(sales['date']) # sales.head() # store_types = sales.drop_duplicates(subset = ['type']) #store_types.head() # - # # Groupping # + # Group by type; calc total weekly sales # sales_by_type = sales.groupby("type") # sales_by_type = sales.groupby("type")["weekly_sales"].sum() # sales_by_type # + # sales_by_type_is_holiday = sales.groupby(["type", "is_holiday"]) # sales_by_type_is_holiday = sales.groupby(["type", "is_holiday"])["weekly_sales"].sum() sales_by_type_is_holiday.head() # - sales.head() # + # Import numpy with the alias np import numpy as np # For each store type, aggregate weekly_sales: get min, max, mean, and median sales_stats = sales.groupby("type")["weekly_sales"].agg([np.min, np.max, np.mean, np.median]) # Print sales_stats sales_stats.head() # - # # Indexes temperatures = pd.read_csv(path+"temperatures.csv") temperatures.head() temperatures_ind = temperatures.set_index("city") temperatures_ind.head() # + # Index temperatures by country & city # temperatures_ind = temperatures.set_index(["country","city"]) # temperatures_ind # List of tuples: Brazil, Rio De Janeiro & Pakistan, Lahore # rows_to_keep = [("Brazil","Rio De Janeiro"),("Pakistan","Lahore")] # Subset for rows to keep temperatures_ind.loc[rows_to_keep] # - # # Slicing # Sort the index of temperatures_ind temperatures_srt = temperatures_ind.sort_index() temperatures_srt.head() # Subset rows from Pakistan to Russia temperatures_srt.loc["Pakistan":"Russia"] # Try to subset rows from Lahore to Moscow temperatures_srt.loc["Lahore":"Moscow"] # Subset rows from Pakistan, Lahore to Russia, Moscow temperatures_srt.loc[("Pakistan", "Lahore"):("Russia", "Moscow")] temperatures_srt.loc[("India","Hyderabad"):("Iraq","Baghdad"),"date":"avg_temp_c"] temperatures['date']= pd.to_datetime(temperatures['date']) temperatures.head() # Use Boolean conditions to subset temperatures for rows in 2010 and 2011 temperatures_bool = temperatures[(temperatures["date"] >= "2010-01-01") & (temperatures["date"] <= "2011-12-31")] temperatures_bool.head() # + # Set date as the index and sort the index temperatures_ind = temperatures.set_index("date").sort_index() # Use .loc[] to subset temperatures_ind for rows in 2010 and 2011 temperatures_ind.loc["2010":"2011"] # - # Use .loc[] to subset temperatures_ind for rows from Aug 2010 to Feb 2011 temperatures_ind.loc["2010-08":"2011-02"] # Get 23rd row, 2nd column (index 22, 1) print(temperatures.iloc[22, 1]) # Use slicing to get the first 5 rows temperatures.iloc[:5] # Use slicing to get columns 3 to 4 temperatures.iloc[:, 2:4] # Use slicing in both directions at once temperatures.iloc[:5, 2:4] # + # Add a year column to temperatures temperatures["year"] = temperatures["date"].dt.year # Pivot avg_temp_c by country and city vs year temp_by_country_city_vs_year = temperatures.pivot_table("avg_temp_c", index = ["country", "city"], columns = "year") # See the result temp_by_country_city_vs_year.head() # - # Subset for Egypt to India temp_by_country_city_vs_year.loc["Egypt":"India"] # Subset for Egypt, Cairo to India, Delhi temp_by_country_city_vs_year.loc[("Egypt", "Cairo"):("India", "Delhi")] # Subset in both directions at once temp_by_country_city_vs_year.loc[("Egypt", "Cairo"):("India", "Delhi"), "2005":"2010"] temp_by_country_city_vs_year.head() # Get the worldwide mean temp by year mean_temp_by_year = temp_by_country_city_vs_year.mean() mean_temp_by_year.head() # Filter for the year that had the highest mean temp mean_temp_by_year[mean_temp_by_year == mean_temp_by_year.max()] # Get the mean temp by city mean_temp_by_city = temp_by_country_city_vs_year.mean(axis="columns") mean_temp_by_city.head() # Filter for the city that had the lowest mean temp mean_temp_by_city[mean_temp_by_city == mean_temp_by_city.min()]
Session 6 - Data Analysis with Pandas (Python)/Data Analysis with Pandas _ Prepared by Zohaib Azam.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from sklearn.metrics import make_scorer, accuracy_score from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import classification_report from sklearn.model_selection import GridSearchCV from sklearn.metrics import confusion_matrix from sklearn.metrics import accuracy_score from sklearn import preprocessing import matplotlib.pylab as pylab import matplotlib.pyplot as plt from pandas import get_dummies import matplotlib as mpl #import xgboost as xgb import seaborn as sns import pandas as pd import numpy as np import matplotlib import warnings import sklearn import scipy import numpy import json import sys import csv import os print('matplotlib: {}'.format(matplotlib.__version__)) print('sklearn: {}'.format(sklearn.__version__)) print('scipy: {}'.format(scipy.__version__)) print('seaborn: {}'.format(sns.__version__)) print('pandas: {}'.format(pd.__version__)) print('numpy: {}'.format(np.__version__)) print('Python: {}'.format(sys.version)) sns.set(style='white', context='notebook', palette='deep') pylab.rcParams['figure.figsize'] = 12,8 warnings.filterwarnings('ignore') mpl.style.use('ggplot') sns.set_style('white') # %matplotlib inline cwd = os.getcwd() cwd # import train and test to play with it df_train = pd.read_csv('/home/kadas/Desktop/data science/10-steps-to-become-a-data-scientist/1-python/input/train.csv') df_test = pd.read_csv('/home/kadas/Desktop/data science/10-steps-to-become-a-data-scientist/1-python/input/test.csv') g = sns.FacetGrid(df_train, hue="Survived", col="Pclass", margin_titles=True, palette={1:"seagreen", 0:"gray"}) g=g.map(plt.scatter, "Fare", "Age",edgecolor="w").add_legend(); df_train.shape[0] df_train.shape[1] range(df_train.shape[0]) plt.figure(figsize=(8,6)) plt.scatter(range(df_train.shape[0]), np.sort(df_train['Age'].values)) plt.xlabel('index') plt.ylabel('Survived') plt.title('Explore: Age') plt.show() ax= sns.boxplot(x="Pclass", y="Age", data=df_train) ax= sns.stripplot(x="Pclass", y="Age", data=df_train, jitter=True, edgecolor="gray") plt.show() df_train.hist(figsize=(15,20)); plt.figure(); df_train["Age"].hist(); # has Gausian Distribution
9-A Comprehensive Machine Learning Workflow with Python/.ipynb_checkpoints/Untitled-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/dksifoua/Neural-Machine-Translation/blob/master/3%20-%20SeqToSeq%20Model%20with%20Luong%20Attention.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="KPSVRxBWaojK" outputId="a1fdef35-8086-4820-e8e8-174827183e55" colab={"base_uri": "https://localhost:8080/", "height": 357} # !nvidia-smi # + [markdown] id="1THDppI0aywR" # ## Load dependencies # + id="4a27rVu-atum" # !pip install tqdm --upgrade >> /dev/null 2>&1 # !pip install torchtext --upgrade >> /dev/null 2>&1 # !pip install spacy --upgrade >> /dev/null 2>&1 # !python -m spacy download de >> /dev/null 2>&1 # !python -m spacy download en >> /dev/null 2>&1 # + id="pybKH9lLa1YJ" import tqdm import spacy import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt # %matplotlib inline from IPython.core.display import display, HTML import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torchtext.data import Dataset, Example, Field from torchtext.data.iterator import BucketIterator from torchtext.data.metrics import bleu_score from torchtext.datasets import Multi30k # + id="reLYawYsa3H2" outputId="a93e4da5-6632-4c47-ba21-f7139fbcaa2b" colab={"base_uri": "https://localhost:8080/", "height": 34} warnings.simplefilter(action='ignore', category=UserWarning) warnings.simplefilter(action='ignore', category=FutureWarning) warnings.simplefilter(action='ignore', category=DeprecationWarning) SEED = 546 np.random.seed(SEED) torch.manual_seed(SEED) torch.cuda.manual_seed(SEED) torch.backends.cudnn.deterministic = True DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu') print(f'Device: {DEVICE}') # + [markdown] id="jEGjbsyJa7ut" # ## Load data # + id="deq5UW3Ha4_m" outputId="2caedccc-a1ed-4b8d-c4ec-ef31cdef9dc5" colab={"base_uri": "https://localhost:8080/", "height": 139} # %%time DE = Field(lower=True, tokenize='spacy', tokenizer_language='de', include_lengths=True) EN = Field(init_token='<sos>', eos_token='<eos>', lower=True, tokenize='spacy', tokenizer_language='en', include_lengths=True) train_data, valid_data, test_data = Multi30k.splits(exts=('.de', '.en'), fields=(DE, EN)) print(f'train set size: {len(train_data.examples):,}') print(f'valid set size: {len(valid_data.examples):,}') print(f'test set size: {len(test_data.examples):,}') print(vars(train_data.examples[0])) # + [markdown] id="iFLBuKIfa_ts" # ## Build vocabularies # + id="K-LDzjN7a-Nn" outputId="62fdc172-0310-4130-db7a-c088befc3318" colab={"base_uri": "https://localhost:8080/", "height": 85} # %%time MIN_COUNT = 2 DE.build_vocab(train_data, min_freq=MIN_COUNT, specials=['<unk>', '<pad>']) EN.build_vocab(train_data, min_freq=MIN_COUNT, specials=['<sos>', '<eos>', '<unk>', '<pad>']) print(f'Length of DE vocabulary: {len(DE.vocab):,}') print(f'Length of EN vocabulary: {len(EN.vocab):,}') # + [markdown] id="_guegzlSbDw4" # ## Modeling # # ***Encoder layer*** # + id="3_v2sKZ3bDIo" class EncoderLayer(nn.Module): def __init__(self, vocab_size, embedding_size, hidden_size, n_layers, embedding_dropout, recurrent_dropout): super(EncoderLayer, self).__init__() self.vocab_size = vocab_size self.embedding_size = embedding_size self.hidden_size = hidden_size self.n_layers = n_layers self.embedding_dropout = embedding_dropout self.recurrent_dropout = recurrent_dropout if n_layers > 1 else 0 self.embedding = nn.Embedding(vocab_size, embedding_size) self.gru = nn.GRU(embedding_size, hidden_size, num_layers=n_layers, dropout=self.recurrent_dropout, bidirectional=True) def forward(self, input_sequences, sequence_lengths): """ :param Tensor[seq_len, batch_size] input_sequences :param Tensor[batch_size,] sequence_lengths :return Tensor[seq_len, batch_size, hidden_size * 2] outputs :return Tensor[n_layers * 2, batch_size, hidden_size] h_state """ embedded = self.embedding(input_sequences) embedded = F.dropout(embedded, p=self.embedding_dropout) packed = nn.utils.rnn.pack_padded_sequence(embedded, sequence_lengths) outputs, h_state = self.gru(packed) outputs, _ = nn.utils.rnn.pad_packed_sequence(outputs) return outputs, h_state # + [markdown] id="ccN8h3C1bXpt" # ***Attention layer*** # + id="vb2FgJLVbXFE" class LuongAttnLayer(nn.Module): def __init__(self, hidden_size): super(LuongAttnLayer, self).__init__() self.hidden_size = hidden_size self.W = nn.Linear(hidden_size * 2, hidden_size) def forward(self, h_state, enc_outputs, mask): """ :params Tensor[1, batch_size, hidden_size] h_state :params Tensor[seq_len, batch_size, hidden_size * 2] enc_outputs :params Tensor[seq_len, batch_size] mask :return Tensor[seq_len, batch_size, 1] """ enc_outputs = self.W(enc_outputs) # [seq_len, batch_size, hidden_size] scores = torch.sum(enc_outputs * h_state, dim=-1).unsqueeze(-1) # [seq_len, batch_size, 1] mask = mask.unsqueeze(2) # [seq_len, batch_size, 1] Apply mask to ignore <pad> tokens scores = scores.masked_fill(mask == 0, 1e-18) attn_weights = F.softmax(scores, dim=0) # [seq_len, batch_size, 1] return attn_weights # + [markdown] id="xAgeH0MMc8Zp" # ***Decoder layer*** # + id="fmdFxAMfc7gk" class DecoderLayer(nn.Module): def __init__(self, vocab_size, embedding_size, hidden_size, n_layers, embedding_dropout, recurrent_dropout, attention_layer): super(DecoderLayer, self).__init__() self.vocab_size = vocab_size self.embedding_size = embedding_size self.hidden_size = hidden_size self.n_layers = n_layers self.embedding_dropout = embedding_dropout self.recurrent_dropout = recurrent_dropout if n_layers > 1 else 0 self.attention_layer = attention_layer self.embedding = nn.Embedding(vocab_size, embedding_size) self.gru = nn.GRU(embedding_size, hidden_size, num_layers=n_layers, dropout=self.recurrent_dropout) self.fc = nn.Linear(hidden_size * 3, vocab_size) def forward(self, input_word_index, h_state_prev, enc_outputs, mask): """ :param Tensor[batch_size,] input_word_index :param Tensor[n_layers, batch_size, hidden_size] h_state_prev :param Tensor[seq_len, batch_size, hidden_size * 2] enc_outputs :param Tensor[seq_len, batch_size] mask :return Tensor[batch_size, vocab_size] logit :return Tensor[n_layers, batch_size, hidden_size] h_state :return Tensor[batch_size, seq_len] attn_weights """ embedded = self.embedding(input_word_index.unsqueeze(0)) embedded = F.dropout(embedded, p=self.embedding_dropout) outputs, h_state = self.gru(embedded, h_state_prev) attn_weights = self.attention_layer(h_state=outputs, enc_outputs=enc_outputs, mask=mask) # [1, batch_size, 1] context_vector = torch.sum(enc_outputs * attn_weights, dim=0).unsqueeze(0) # [1, batch_size, hidden_size * 2] new_input = torch.cat((outputs, context_vector), dim=2) # [1, batch_size, hidden_size * 3] New input logit = self.fc(new_input.squeeze(0)) # [batch_size, vocab_size] return logit, h_state, attn_weights.squeeze(2) # + [markdown] id="SafL7TCtyQZf" # ***Sequence-to-sequence model*** # + id="tt6_x90JyLA7" class SeqToSeqNet(nn.Module): def __init__(self, encoder, decoder, device, pad_index): assert encoder.n_layers == decoder.n_layers, 'Encoder and Decoder must have the same number of reccurent layers' assert encoder.hidden_size == decoder.hidden_size, 'Encoder and Decoder must have the same number of reccurrent hidden units' super(SeqToSeqNet, self).__init__() self.encoder = encoder self.decoder = decoder self.device = device self.pad_index = pad_index self.init_h0 = nn.Linear(encoder.n_layers * 2, decoder.n_layers) def create_mask(self, src_sequences): return src_sequences != self.pad_index def encode(self, input_sequences, sequence_lengths): enc_outputs, h_state = self.encoder(input_sequences, sequence_lengths) h_state = torch.tanh(self.init_h0(h_state.permute(1, 2, 0))) # [batch_size, hidden_size, n_layers] h_state = h_state.permute(2, 0, 1) # [n_layers, batch_size, hidden_size] mask = self.create_mask(input_sequences) # [seq_len, batch_size] return enc_outputs, h_state, mask def sort_batches(self, dest_sequences, dest_lengths, h_state, enc_outputs, mask): sorted_dest_lengths, sorted_indices = torch.sort(dest_lengths, dim=0, descending=True) sorted_dest_sequences = dest_sequences[:, sorted_indices] h_state = h_state[:, sorted_indices, :] enc_outputs = enc_outputs[:, sorted_indices, :] mask = mask[:, sorted_indices] # We won't decode at the <eos> position, since we've finished generating as soon as we generate <eos>. # So, decoding lengths are actual lengths - 1 sorted_decode_lengths = (sorted_dest_lengths - 1).tolist() return sorted_dest_sequences, sorted_decode_lengths, h_state, enc_outputs, mask def decode(self, h_state, enc_outputs, mask, sorted_dest_sequences, sorted_decode_lengths, tf_ratio): batch_size, last = sorted_dest_sequences.size(1), None logits = torch.zeros(max(sorted_decode_lengths), batch_size, self.decoder.vocab_size).to(self.device) for t in range(max(sorted_decode_lengths)): batch_size_t = sum([l > t for l in sorted_decode_lengths]) if last is not None: if np.random.rand() < tf_ratio: input_word_index = last[:batch_size_t] # in_ [batch_size,] else: input_word_index = sorted_dest_sequences[t, :batch_size_t] # in_ [batch_size,] else: input_word_index = sorted_dest_sequences[t, :batch_size_t] # in_ [batch_size,] logit, h_state, _ = self.decoder(input_word_index=input_word_index, h_state_prev=h_state[:, :batch_size_t, :].contiguous(), enc_outputs=enc_outputs[:, :batch_size_t, :], mask=mask[:, :batch_size_t]) # logit: [batch_size, vocab_size] - h_state: [n_layers, batch_size, hidden_size] logits[t, :batch_size_t, :] = logit last = torch.argmax(F.softmax(logit, dim=1), dim=1) # [batch_size,] return logits def forward(self, src_sequences, src_lengths, dest_sequences, dest_lengths, tf_ratio): """ :param Tensor[seq_len, batch_size] src_sequences :param Tensor[batch_size,] src_lengths :param Tensor[seq_len, batch_size] dest_sequences :param Tensor[batch_size,] dest_lengths :param float tf_ratio :return Tensor[max(decode_lengths), batch_size, vocab_size] logits :return Tensor[seq_len, batch_size] sorted_dest_sequences :return list[max(decode_lengths) - 1] sorted_decode_lengths """ enc_outputs, h_state, mask = self.encode(src_sequences, src_lengths) sorted_dest_sequences, sorted_decode_lengths, h_state, enc_outputs, mask = self.sort_batches(dest_sequences, dest_lengths, h_state, enc_outputs, mask) logits = self.decode(h_state, enc_outputs, mask, sorted_dest_sequences, sorted_decode_lengths, tf_ratio) return logits, sorted_dest_sequences, sorted_decode_lengths # + [markdown] id="n2nLnkL1yjfk" # ***Training routines*** # + id="qmr7cTr_yhvi" class AverageMeter: def __init__(self): self.value = 0. self.sum = 0. self.count = 0 self.average = 0. def reset(self): self.value = 0. self.sum = 0. self.count = 0 self.average = 0. def update(self, value, n=1): self.value = value self.sum += value * n self.count += n self.average = self.sum / self.count # + id="HsTGn5d1ymvz" def accuracy(outputs, target_sequences, k=5): batch_size = outputs.size(1) _, indices = outputs.topk(k, dim=1, largest=True, sorted=True) correct = indices.eq(target_sequences.view(-1, 1).expand_as(indices)) correct_total = correct.view(-1).float().sum() # 0D tensor return correct_total.item() * (100.0 / batch_size) # + id="8hlw5aLny4Kb" class Trainer: def __init__(self, model, optimizer, criterion, train_iterator, valid_iterator): self.model = model self.optimizer = optimizer self.criterion = criterion self.train_iterator = train_iterator self.valid_iterator = valid_iterator def clip_gradients(self, grad_clip): if grad_clip is not None: for group in self.optimizer.param_groups: for param in group['params']: if param.grad is not None: param.grad.data.clamp_(-grad_clip, grad_clip) def adjust_lr(self, shrink_factor=0.9, verbose=True): if verbose: print("\nDecaying learning rate.") for param_group in self.optimizer.param_groups: param_group['lr'] = param_group['lr'] * shrink_factor if verbose: print("The new learning rate is %f\n" % (self.optimizer.param_groups[0]['lr'],)) def adjust_tf(self, tf_ratio, shrink_factor=0.9, verbose=False): tf_ratio = tf_ratio * shrink_factor if verbose: print("The teacher forcing rate is %f\n" % (tf_ratio,)) return tf_ratio def train_step(self, epoch, grad_clip, tf_ratio): loss_tracker, acc_tracker = AverageMeter(), AverageMeter() self.model.train() progress_bar = tqdm.tqdm(enumerate(self.train_iterator), total=len(self.train_iterator)) for i, data in progress_bar: logits, sorted_dest_sequences, sorted_decode_lengths = self.model(*data.src, *data.trg, tf_ratio=tf_ratio) sorted_dest_sequences = sorted_dest_sequences[1:, :] # Since we decoded starting with <sos>, the targets are all words after <sos>, up to <eos> logits = nn.utils.rnn.pack_padded_sequence(logits, sorted_decode_lengths).data # Remove paddings sorted_dest_sequences = nn.utils.rnn.pack_padded_sequence(sorted_dest_sequences, sorted_decode_lengths).data # Remove paddings loss = criterion(logits, sorted_dest_sequences) optimizer.zero_grad() loss.backward() self.clip_gradients(grad_clip) optimizer.step() loss_tracker.update(loss.item(), sum(sorted_decode_lengths)) acc_tracker.update(accuracy(logits, sorted_dest_sequences), sum(sorted_decode_lengths)) loss_, ppl_, acc_ = loss_tracker.average, np.exp(loss_tracker.average), acc_tracker.average progress_bar.set_description(f'Epoch: {epoch+1:02d} - loss: {loss_:.3f} - ppl: {ppl_:.3f} - acc: {acc_:.3f}%') return loss_tracker.average, np.exp(loss_tracker.average), acc_tracker.average def validate(self, epoch): loss_tracker, acc_tracker = AverageMeter(), AverageMeter() self.model.eval() with torch.no_grad(): progress_bar = tqdm.tqdm(enumerate(self.valid_iterator), total=len(self.valid_iterator)) for i, data in progress_bar: logits, sorted_dest_sequences, sorted_decode_lengths = self.model(*data.src, *data.trg, tf_ratio=0.) sorted_dest_sequences = sorted_dest_sequences[1:, :] logits = nn.utils.rnn.pack_padded_sequence(logits, sorted_decode_lengths).data sorted_dest_sequences = nn.utils.rnn.pack_padded_sequence(sorted_dest_sequences, sorted_decode_lengths).data loss = criterion(logits, sorted_dest_sequences) loss_tracker.update(loss.item(), sum(sorted_decode_lengths)) acc_tracker.update(accuracy(logits, sorted_dest_sequences), sum(sorted_decode_lengths)) loss_, ppl_, acc_ = loss_tracker.average, np.exp(loss_tracker.average), acc_tracker.average progress_bar.set_description(f'Epoch: {epoch+1:02d} - val_loss: {loss_:.3f} - val_ppl: {ppl_:.3f} - val_acc: {acc_:.3f}%') return loss_tracker.average, np.exp(loss_tracker.average), acc_tracker.average def train(self, n_epochs, grad_clip, tf_ratio): history = {'acc': [], 'loss': [], 'ppl': [], 'val_ppl': [], 'val_acc': [], 'val_loss': []} best_loss, last_improv = np.inf, 0 for epoch in range(n_epochs): if last_improv == 4: print('Training Finished - The model has stopped improving since last 4 epochs') break if last_improv > 0: self.adjust_lr() loss, ppl, acc = self.train_step(epoch, grad_clip, tf_ratio) val_loss, val_ppl, val_acc = self.validate(epoch) tf_ratio = self.adjust_tf(tf_ratio) if best_loss > val_loss: best_loss, last_improv = val_loss, 0 torch.save(self.model.state_dict(), 'seq2seq-luong-attn.pth') else: last_improv += 1 print(f'\nLast improvement since epoch {epoch - last_improv + 1}') history['acc'].append(acc) history['ppl'].append(ppl) history['loss'].append(loss) history['val_acc'].append(val_acc) history['val_ppl'].append(val_ppl) history['val_loss'].append(val_loss) return history # + [markdown] id="QyhnOqMMy9HH" # ***Train the model*** # + id="bx7nmXCpy7ue" N_LAYERS = 2 HIDDEN_SIZE = 256 EMBED_SIZE = 300 EMBED_DROPOUT = 0.25 REC_DROPOUT = 0.25 N_EPOCHS = 10 BATCH_SIZE = 64 LR = 1e-3 GRAD_CLIP = 1.0 TF_RATIO = 1.0 # + id="4bhn-IkhzAeQ" outputId="508943c9-22cf-42d5-9ad8-47a5479de466" colab={"base_uri": "https://localhost:8080/", "height": 289} encoder = EncoderLayer(vocab_size=len(DE.vocab), embedding_size=EMBED_SIZE, hidden_size=HIDDEN_SIZE, n_layers=N_LAYERS, embedding_dropout=EMBED_DROPOUT, recurrent_dropout=REC_DROPOUT) attention = LuongAttnLayer(hidden_size=HIDDEN_SIZE) decoder = DecoderLayer(vocab_size=len(EN.vocab), embedding_size=EMBED_SIZE, hidden_size=HIDDEN_SIZE, n_layers=N_LAYERS, embedding_dropout=EMBED_DROPOUT, recurrent_dropout=REC_DROPOUT, attention_layer=attention) seq2seq = SeqToSeqNet(encoder=encoder, decoder=decoder, device=DEVICE, pad_index=EN.vocab.stoi[EN.pad_token]).to(DEVICE) optimizer = optim.RMSprop(params=seq2seq.parameters(), lr=LR) criterion = nn.CrossEntropyLoss() print(f'Number of parameters of the model: {sum(p.numel() for p in seq2seq.parameters() if p.requires_grad):,}') print(seq2seq) train_iterator, valid_iterator, test_iterator = BucketIterator.splits((train_data, valid_data, test_data), batch_size=BATCH_SIZE, sort_key=lambda x: len(x.src), sort_within_batch=True, device=DEVICE) trainer = Trainer(model=seq2seq, optimizer=optimizer, criterion=criterion, train_iterator=train_iterator, valid_iterator=valid_iterator) # + id="-1o0XCCkzQiS" outputId="2744dfb5-9ce2-460d-cfeb-40fff77f43f8" colab={"base_uri": "https://localhost:8080/", "height": 357} history = trainer.train(n_epochs=N_EPOCHS, grad_clip=GRAD_CLIP, tf_ratio=TF_RATIO) # + id="TT4fw9imzlfx" outputId="97f2b210-34ec-491a-bfd4-8bdcc941c4db" colab={"base_uri": "https://localhost:8080/", "height": 350} _, axes = plt.subplots(1, 3, figsize=(15, 5)) axes[0].plot(history['loss'], label='train') axes[0].plot(history['val_loss'], label='valid') axes[0].set_title('Loss history') axes[0].set_xlabel('Epoch') axes[0].set_ylabel('Loss') axes[0].grid(True) axes[0].legend() axes[1].plot(history['ppl'], label='train') axes[1].plot(history['val_ppl'], label='valid') axes[1].set_title('Perplexity history') axes[1].set_xlabel('Epoch') axes[1].set_ylabel('Perplexity') axes[1].grid(True) axes[1].legend() axes[2].plot(history['acc'], label='train') axes[2].plot(history['val_acc'], label='valid') axes[2].set_title('Top-5 Accuracy & BLEU-4 history') axes[2].set_xlabel('Epoch') axes[2].set_ylabel('Accuracy & BLEU-4 (%)') axes[2].grid(True) axes[2].legend() plt.show() # + [markdown] id="nRud6vwD2WS3" # ## Evaluation - Beam search & BLEU score # + id="HHka4YRJ2UjK" from beam_utils import Node, find_best_path def evaluate(model, data, beam_size, src_field, dest_field, max_len, device): src_sentences = [*map(lambda example: example.src, data.examples)] dest_sentences = [*map(lambda example: example.trg, data.examples)] data = [*zip([*map(lambda word_list: src_field.process([word_list]), src_sentences)], [*map(lambda word_list: dest_field.process([word_list]), dest_sentences)])] references, hypotheses, sources = [], [], [] model.eval() with torch.no_grad(): for i, ((src_sequence, src_length), (dest_sequence, dest_length)) in tqdm.tqdm(enumerate(data), total=len(data)): src_sequence, src_length = src_sequence.to(device), src_length.to(device) dest_sequence, dest_length = dest_sequence.to(device), dest_length.to(device) enc_outputs, h_state, mask = model.encode(src_sequence, src_length) # Encoding # Decoding tree = [[Node(token=torch.LongTensor([dest_field.vocab.stoi[dest_field.init_token]]).to(device), states=(h_state,))]] for _ in range(max_len): next_nodes = [] for node in tree[-1]: if node.eos: # Skip eos token continue logit, h_state, _ = model.decoder(input_word_index=node.token, h_state_prev=node.states[0].contiguous(), enc_outputs=enc_outputs, mask=mask) # Decode logp = F.log_softmax(logit, dim=1).squeeze(dim=0) # [vocab_size] Get scores topk_logps, topk_tokens = torch.topk(logp, beam_size) # Get top k tokens & logps for k in range(beam_size): next_nodes.append(Node(token=topk_tokens[k, None], states=(h_state,), logp=topk_logps[k, None].cpu().item(), parent=node, eos=topk_tokens[k].cpu().item() == dest_field.vocab[dest_field.eos_token])) if len(next_nodes) == 0: break next_nodes = sorted(next_nodes, key=lambda node: node.logps, reverse=True) # Sort next_nodes to get the best tree.append(next_nodes[:beam_size]) # Update the tree best_path = find_best_path(tree) # Find the best path of the tree # Get the translation pred_translated = [*map(lambda node: dest_field.vocab.itos[node.token], best_path)] pred_translated = [*filter(lambda word: word not in [dest_field.init_token, dest_field.eos_token], pred_translated[::-1])] hypotheses.append(pred_translated) # Update hypotheses # Update references references.append([[dest_field.vocab.itos[indice] for indice in dest_sequence if indice not in ( dest_field.vocab.stoi[dest_field.init_token], dest_field.vocab.stoi[dest_field.eos_token], dest_field.vocab.stoi[dest_field.pad_token] )]]) # Update sources sources.append([src_field.vocab.itos[indice] for indice in src_sequence if indice not in ( src_field.vocab.stoi[src_field.init_token], src_field.vocab.stoi[src_field.eos_token], src_field.vocab.stoi[src_field.pad_token] )]) assert len(hypotheses) == len(references) == len(sources) bleu4 = bleu_score(hypotheses, references, max_n=4, weights=[0.25, 0.25, 0.25, 0.25]) # Calculate BLEU-4 score return hypotheses, references, sources, bleu4 # + id="zp0_iB6b2obt" outputId="49090116-f7e3-48f1-e2a6-97ea5e1c0c66" colab={"base_uri": "https://localhost:8080/", "height": 272} seq2seq.load_state_dict(torch.load('seq2seq-luong-attn.pth')) seq2seq.to(DEVICE) # + id="Zcz-VwqT2tT2" outputId="ee1b2b04-ebde-4124-e547-858bf7f9f991" colab={"base_uri": "https://localhost:8080/", "height": 221} bleu_scores = [] for beam_size in [1, 3, 5]: for name, data in [('validation', valid_data), ('test', test_data)]: _, _, _, bleu4 = evaluate(model=seq2seq, data=data, beam_size=beam_size, src_field=DE, dest_field=EN, max_len=50, device=DEVICE) bleu_scores.append((beam_size, name, bleu4)) for score in bleu_scores: print(f'BLEU-4: {score[2]*100:.3f}% with beam_size={score[0]} on {score[1]} data') # + [markdown] id="bk_zUSvZLfjS" # ## Inference # + id="77jVLqmX2vN8" def translate(sentences, model, beam_size, src_field, dest_field, max_len, device): if isinstance(sentences, list): sentences = [*map(src_field.preprocess, sentences)] targets = None if isinstance(sentences, Dataset): targets = [*map(lambda example: ' '.join(example.trg), sentences.examples)] sentences = [*map(lambda example: example.src, sentences.examples)] data = [*map(lambda word_list: src_field.process([word_list]), sentences)] translated_sentences, attention_weights, pred_logps = [], [], [] model.eval() with torch.no_grad(): for i, (src_sequence, src_length) in tqdm.tqdm(enumerate(data), total=len(data)): src_sequence, src_length = src_sequence.to(device), src_length.to(device) enc_outputs, h_state, mask = model.encode(src_sequence, src_length) tree = [[Node(token=torch.LongTensor([dest_field.vocab.stoi[dest_field.init_token]]).to(device), states=(h_state,))]] for _ in range(max_len): next_nodes = [] for node in tree[-1]: if node.eos: # Skip eos token continue logit, h_state, attn_weights = model.decoder(input_word_index=node.token, h_state_prev=node.states[0].contiguous(), enc_outputs=enc_outputs, mask=mask) logp = F.log_softmax(logit, dim=1).squeeze(dim=0) topk_logps, topk_tokens = torch.topk(logp, beam_size) for k in range(beam_size): next_nodes.append(Node(token=topk_tokens[k, None], states=(h_state, attn_weights), logp=topk_logps[k, None].cpu().item(), parent=node, eos=topk_tokens[k].cpu().item() == dest_field.vocab[dest_field.eos_token])) if len(next_nodes) == 0: break next_nodes = sorted(next_nodes, key=lambda node: node.logps, reverse=True) tree.append(next_nodes[:beam_size]) best_path = find_best_path(tree)[::-1] # Get the translation pred_translated = [*map(lambda node: dest_field.vocab.itos[node.token], best_path)] pred_translated = [*filter(lambda word: word not in [ dest_field.init_token, dest_field.eos_token ], pred_translated)] translated_sentences.append(' '.join(pred_translated)) # Get probabilities pred_logps.append(sum([*map(lambda node: node.logps, best_path)])) # Get attention weights attention_weights.append(torch.cat([*map(lambda node: node.states[1], best_path[1:])], dim=-1).cpu().numpy()) sentences = [*map(lambda sentence: ' '.join(sentence), sentences)] return sentences, translated_sentences, targets, attention_weights, pred_logps # + id="e_bgKvkzRRfy" def plot_attention(src, trg, scores): fig, ax = plt.subplots() heatmap = ax.pcolor(scores, cmap='viridis') ax.set_xticklabels(trg.split(), minor=False, rotation='vertical') ax.set_yticklabels(src.split(), minor=False) # put the major ticks at the middle of each cell # and the x-ticks on top ax.xaxis.tick_top() ax.set_xticks(np.arange(scores.shape[1]) + 0.5, minor=False) ax.set_yticks(np.arange(scores.shape[0]) + 0.5, minor=False) ax.invert_yaxis() plt.colorbar(heatmap) plt.show() # + id="Lw1UzfzFMmsO" outputId="1d8c2d21-31e3-4aff-abc0-8b45e02ca907" colab={"base_uri": "https://localhost:8080/", "height": 1000} sentences, translated_sentences, dest_sentences, attention_weights, pred_logps = translate(sentences=test_data, model=seq2seq, beam_size=5, src_field=DE, dest_field=EN, max_len=50, device=DEVICE) indexes = np.random.choice(len(test_data.examples), size=10, replace=False) print(indexes) print() for i in indexes: html = f'<p><span style="color:blue"><b>Source:</b> {sentences[i]}</span><br />' html += f'<span style="color:green"><b>Ground truth translation:</b> {dest_sentences[i]}</span><br />' html += f'<span style="color:red"><b>Predicted translation:</b> {translated_sentences[i]}</span></p>' display(HTML(html)) plot_attention(sentences[i], translated_sentences[i], attention_weights[i]) print('='*100) # + [markdown] id="uGexDVoIOF2e" # ## Error Analysis # + id="mjn5gGytOIlh" def get_dest_logp(model, data, src_field, dest_field, device): dest_logps = [] model.eval() with torch.no_grad(): for i, data in tqdm.tqdm(enumerate(data), total=len(data)): src_sequence, src_length = src_field.process([data.src]) dest_sequence, dest_length = dest_field.process([data.trg]) src_sequence, src_length = src_sequence.to(device), src_length.to(device) dest_sequence, dest_length = dest_sequence.to(device), dest_length.to(device) enc_outputs, h_state, mask = model.encode(src_sequence, src_length) logps = [] input_word_index = torch.LongTensor([dest_field.vocab.stoi[dest_field.init_token]]).to(device) for idx in range(dest_sequence.shape[0]): logit, h_state, _ = model.decoder(input_word_index=input_word_index, h_state_prev=h_state.contiguous(), enc_outputs=enc_outputs, mask=mask) logp = F.log_softmax(logit, dim=1).squeeze(dim=0) input_word_index = dest_sequence[idx] logps.append(logp[input_word_index].cpu().item()) # Get logp of ground truth ouput dest_logps.append(sum(logps)) return dest_logps # + id="HM-Os1lCSjJw" outputId="48854f5f-8d43-4570-b518-077fd166deb1" colab={"base_uri": "https://localhost:8080/", "height": 34} dest_logps = get_dest_logp(model=seq2seq, data=test_data, src_field=DE, dest_field=EN, device=DEVICE) assert len(pred_logps) == len(dest_logps) == len(test_data), f'{len(pred_logps)}, {len(dest_logps)}, {len(test_data)}' # + id="CJOxhz0uSjit" outputId="ef4f9a50-f499-40d8-adb5-5f2e286cd424" colab={"base_uri": "https://localhost:8080/", "height": 51} beam_search_faults = np.array(dest_logps) > np.array(pred_logps) beam_search_fault_rate = beam_search_faults.sum() / beam_search_faults.size print(f'Beam search fault rate: {beam_search_fault_rate * 100:.3f}%') print(f'Model fault rate: {(1 - beam_search_fault_rate) * 100:.3f}%') # + id="J10JvYO3Slpm" beam_search_fault_indexes = np.where(beam_search_faults == True)[0] model_fault_indexes = np.where(beam_search_faults == False)[0] # + id="-6Y8SjOLSqOK" outputId="1c8774d4-2bc6-4a66-d4dc-497e71a58b97" colab={"base_uri": "https://localhost:8080/", "height": 417} for i in np.random.choice(beam_search_fault_indexes, size=5, replace=False): html = f'<p><span><b>Source:</b> {sentences[i]}</span><br />' html += f'<span style="color:green"><b>Ground truth translation:</b> {dest_sentences[i]}</span><br />' html += f'<span style="color:red"><b>Predicted translation (Beam search fault):</b> {translated_sentences[i]}</span><br />' display(HTML(html)) print('='*100) # + id="hqsK_U1ASugO" outputId="99abc148-3423-4e94-c521-15ec530ca637" colab={"base_uri": "https://localhost:8080/", "height": 417} for i in np.random.choice(model_fault_indexes, size=5, replace=False): html = f'<p><span><b>Source:</b> {sentences[i]}</span><br />' html += f'<span style="color:green"><b>Ground truth translation:</b> {dest_sentences[i]}</span><br />' html += f'<span style="color:blue"><b>Predicted translation (Model fault):</b> {translated_sentences[i]}</span><br />' display(HTML(html)) print('='*100) # + id="AUQ44Tl8Sw0k" outputId="b61af618-84a2-4b14-b4e3-6302e7dcdbd9" colab={"base_uri": "https://localhost:8080/", "height": 336} lengths = np.array([*map(len, dest_sentences)]) _, axes = plt.subplots(1, 2, figsize=(12, 5)) axes[0].hist(lengths[beam_search_fault_indexes]) axes[0].set_title('Lengths of beam search faults') axes[0].grid(True) axes[1].hist(lengths[model_fault_indexes]) axes[1].set_title('Lengths of model faults') axes[1].grid(True) plt.show() # + id="TqFE90BUS5uE"
3 - SeqToSeq Model with Luong Attention.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 3.1 Python als Taschenrechner # # ## 3.1.2 Conversion & Kommentare # # In dieser Übungseinheit lernst du Pythons numerische Datentypen genauer kennen. Du übst, anhand von Beispielen, wie du sie ineinander umwandeln kannst. Das bedeutet Conversion: die Umwandlung von Datentypen. Weiterhin erfährst und übst du, wie du mit Kommentaren Code verständlicher gestaltest. # # Im Anschluss dieser Übungseinheit kannst du ... # + die wichtigsten numerischen Datentypen voneinander unterscheiden # + numerische Datentypen umwandeln # + Kommentare setzen # ## 3.1.2.1 Numerische Datentypen # # <div class="alert alert-block alert-info"> # <font size="3"><b>Wiederholung:</b></font> # <br> # # * Ganzzahlen wie 1, 20 und 300 werden <b>Integers</b> genannt. # <br> # # * Fließkommazahlen wie 1.1, 2.22 und 3.333 (im Englischen und in Python mit einem Punkt statt Komma geschrieben) dagegen Floating Point Numbers, kurz: <b>Floats</b>. # <br> # * <b>Sobald ein Integer mit einem Float verrechnet wird (oder umgekehrt), ist das Ergebnis auch ein Float.</b> # </div> # <br> # # # Python verfügt neben Integer und Float noch über den Datentyp **complex**, für komplexe Zahlen. Dieser findet aber kaum Verwendung. # <br> # <br> # # ### Wenn beim Programmieren Integers den Floats vorgezogen werden, liegt das daran, dass ... # * Python mit Integers schneller rechnen kann # * Integers weniger Speicherplatz beanspruchen # # Letztendlich kommt es aber auf die Aufgabenstellung an, anhand derer du entscheidest, welcher numerische Datentyp für sie angebracht ist bzw. ob die Stellen nach dem Komma wissenswert sind oder nicht. # <br> # # <div class="alert alert-block alert-info"> # <font size="3"><b>Tipp:</b></font> <b>Mit welchem Datentypen arbeite ich eigentlich gerade? </b> # <br> # # Falls du dich einmal wundern solltest, weshalb du bei einer Rechnung eine Fehlermeldung erhältst oder sie nicht das richtige Ergebnis liefert, kann das daran liegen, dass du mit dem falschen Datentyp arbeitest. # <br> # # Das Folgende gilt für alle Datentypen: Solltest du dir einmal nicht sicher sein, mit welchem Datentyp du es gerade zu tun hast, kannst du dir den Datentyp jederzeit mit der Funktion <b>type()</b> anzeigen lassen. # </div> # <br> # # **Beispiel zur Ausgabe des Datentypen:** # + x = 3 type(x) # - # <b>x</b> bzw. die 3 ist demzufolge ein Integer. Das kürzt Python mit **int** ab. # # <div class="alert alert-block alert-warning"> # <font size="3"><b>Übung:</b></font> Welche Antwort liefert Python bei Floats? Probier es in der folgenden Code-Zelle aus! Du kannst auch noch weitere Datentypen testen, die du bereits kennst. # </div> # ## 3.1.2.2 Umwandlung (= Conversion) von numerischen Datentypen # # **Conversion** ermöglicht es uns, Datentypen ineinander umzuwandeln: aus Floats Integers zu zaubern und umgekehrt. Das Prinzip ist ähnlich zu dem der Type- sowie Print-Funktion: zuerst schreibst du den Namen der Funktion, also ob du in int (Integer) oder float (Float) umwandeln möchtest, dann folgt die Klammer, in welche du die umzuwandelnde Variable/Zahl schreibst. # <br> # Allgemeine Syntax von Funktionen: <font color = "green">funktionsname(übergabewert)</font> # <br> # **Beispiel zu Conversion:** # + a = 3.0 int(a) # Anwendung der Funktion: int() # - # Aus dem Float-Wert wurde somit ein Integer. # Das Äquivalent zu ``int()`` ist ``float()``. # # <br> # <b> In den folgenden Übungen geht es um die Conversion von numerischen Datentypen. Es geht los mit einem Beispiel und seiner Lösung:</b> # <div class="alert alert-block alert-warning"> # <font size="3"><b>Beispielaufgabe:</b></font> # <br> # # 1) Speichere das Ergebnis der folgenden Rechnung in der Variable <b>beispiel</b>: 3 plus 4 # <br> # 2) Wandle anschließend <b>beispiel</b> in Integer bzw. Float um (je nach Ergebnis): # </div> # + # Beispiellösung beispiel = 3 + 4 float(beispiel) # - # <div class="alert alert-block alert-info"> # <font size="3"><b>Tipp:</b></font> Du hast bereits erfahren, welche Rechenart welchen Datentypen ergibt. Bevor du nachrechnest, überleg dir, welchen Datentyps das Ergebnis sein wird. Es ist nicht zwingend notwendig, dir das Ergebnis extra ausgeben zu lassen, wenn du dir über den Datentypen sicher bist. # </div> # <br> # # <div class="alert alert-block alert-warning"> # <font size="3"><b>Übung a) :</b></font> # <br> # 1) Speichere das Ergebnis der folgenden Rechnung in der Variable <b>a</b>: 5 hoch 2 # <br> # # 2) Wandle anschließend <b>a</b> in Integer bzw. Float um (je nach Ergebnis): # </div> # # <div class="alert alert-block alert-warning"> # <font size="3"><b>Übung b):</b></font> # <br> # 1) Speichere das Ergebnis der folgenden Rechnung in der Variable <b>b</b>: der Rest von 44 geteilt durch 2 # <br> # # 2) Überschreibe <b>b</b> mit ihrer Conversion zu Integer bzw. Float um (je nach Ergebnis). # <br> # # 3) Gib anschließend das umgewandelte <b>b</b> mittels <b>print()</b> aus: # </div> # <div class="alert alert-block alert-warning"> # <font size="3"><b>Übung c):</b></font> # <br> # 1) Speichere das Ergebnis der folgenden Fragestellung in der Variable <b>c</b>: Wie viele Male passt die 5 vollständig in die 88? Diese Rechenart wird in Kapitel 3.1.1 behandelt. # <br> # # 2) Wandle anschließend <b>c</b> in Integer bzw. Float innerhalb von <b>print()</b> um und gib somit gleichzeitig die umgewandelte Variable aus: # </div> # <div class="alert alert-block alert-success"> # <b>Super!</b> Jetzt kennst du schon Pythons numerische Datentypen und kannst sie ineinander konvertieren, wie du sie gerade benötigst. # <br> # # Nun machen wir einen kleinen Ausflug zu den Kommentierungsmöglichkeiten in Python. Mit ihnen kannst du dir und anderen wichtige Notizen im Code hinterlegen. # ## 3.2.3 Kommentare in Python # # Du kennst nun schon einige wichtige Grundfunktionen von Python und wie du es zum Rechnen verwenden kannst. Bevor es damit noch ein Stück weiter in die Tiefe geht, lernst du zunächst die sehr nützlichen und praktischen Kommentarfunktionen von Python kennen. # # **Sie sind praktisch, denn ...** # * du kannst dir damit hilfreiche **Notizen innerhalb der Code-Zellen** hinterlegen # * durch sie können auch andere, die mit deinem **Code** weiterarbeiten möchten, **besser nachvollziehen**, was du dir bei ihm gedacht hast # # **Es gibt zwei Arten von Kommentaren:** # * einzeilige Kommentare erscheinen in blau und beginnen mit: <font color = green>#</font> Kommentar (... Raute) # * mehrzeilige Kommentare erscheinen in rot und werden geschrieben innerhalb von: <font color = green>"""</font> Kommentar <font color = green>"""</font> (... je 3 Anführungsstrichen) # # <div class="alert alert-block alert-info"> # <font size="3"><b>Hinweise zu einzeiligen Kommentaren:</b></font> # <br> # Du kannst einzeiligen Kommentar sowohl über Code, als auch direkt daneben setzen. # <br> # # Wird er neben Code gesetzt, ist es Python-konform, einen Abstand von 2 Leerzeichen zum Code einzuhalten. Dies macht den Code leserlicher und du hältst dich damit an die professionellen Richtlinien zum Schreiben von Python-Code. # <br> # Python-Konventionen wie diese sind vergleichbar mit "Ausdruck" im Sprachunterricht. Grammatikfehler sind dementsprechend solche Fehler im Code, die Fehlermeldungen verursachen und das Programm zum Absturz bringen. Der Ausdruck zeigt darüber hinaus, wie gut du eine Sprache stilistisch anwenden kannst. # <br> # Indem du dich an Python-Konventionen hältst, zeigst du folglich professionellen Stil! # </div> # <br> # # # **Beispiel für einzeilige Kommentare:** # + # Berechnung zu einer Raumgröße in kubikmetern fläche = 5 # Fläche in m² höhe = 3 # höhe in m volumen = fläche * höhe # ergibt m³ volumen # - # Mehrzeiligen Kommentar kannst du **ausschließlich** oberhalb von Code setzen. # # **Beispiel für die unsachgemäße Anwendung eines mehrzeiligen Kommentars:** # + """Das ist die Berechnung zum Volumen eines Raumes. Zuerst wird die Fläche in m² der Variablen "fläche" zugewiesen:""" fläche = 5 höhe = 3 volumen = fläche * höhe print(volumen) """Dann wird die Raumhöhe der Variablen "höhe" zugewiesen. Anschließend wird das Raumvolumen durch Multiplikation von "fläche" und "höhe" berechnet.""" # - # Kommentare erscheinen normalerweise nicht in der Ausgabe. Ihr Sinn ist es, gerade nicht ausgegeben zu werden und nur innerhalb des Programms Abläufe zu erläutern, nach außen hin (für User) nicht sichtbar. # <br> # Weil aber der mehrzeilige Kommentar ans Ende des Codes gesetzt wurde, greift dieser Zweck nicht. # # **Beispiel für einen Fehler durch einen mehrzeiligen Kommentar:** # + """Das ist die Berechnung zum Volumen eines Raumes. Zuerst wird die Fläche in m² der Variablen "fläche" zugewiesen:""" fläche = 5 höhe = 3 """Dann wird die Raumhöhe der Variablen "höhe" zugewiesen. Anschließend wird das Raumvolumen durch Multiplikation von "fläche" und "höhe" berechnet.""" volumen = fläche * höhe print(volumen) # - # Dieser Fehler ist ein Syntaxfehler, verursacht durch falsch gesetzte oder fehlende Zeichen. Möchtest du mehr über Fehlermeldungen lernen, haben wir dazu ein extra Kapitel für dich vorbereitet: "3.2 Fehlermeldungen verstehen und Hilfe zu Python finden". # <br> # Dieser hier wurde ausgelöst, weil der mehrzahlige Kommentar neben Code platziert wurde. # # **Beispiel für die korrekte Anwendung des mehrzeiligen Kommentars:** # + """Das ist die Berechnung zum Volumen eines Raumes. Zuerst wird die Fläche in m² der Variablen "fläche" zugewiesen. Dann wird die Raumhöhe der Variablen "höhe" zugewiesen. Anschließend wird das Raumvolumen durch Multiplikation von "fläche" und "höhe" berechnet:""" fläche = 5 höhe = 3 volumen = fläche * höhe volumen # - # <div class="alert alert-block alert-warning"> # <font size="3"><b>Übung:</b></font> Probier beide Kommentar-Arten in der unteren Code-Zelle aus, indem du kommentierst, welche Rechnung (mit dem Namen der Rechenart) welchen Datentyp ergibt. Damit hast du gleich eine Wiederholung für dich ;-) # </div> # <br> # <div class="alert alert-block alert-info"> # <font size="3"><b>Wiederholung:</b></font> <b>Warum ist es wichtig, die Datentypen der Ergebnisse bereits vorher zu wissen?</b> # <br> # # Besonders bei komplexeren Berechnungen können Fehler entstehen, wenn Float-Werte unabsichtlich auf Integers ab- oder aufgerundet werden. Außerdem kann es zu Fehlermeldungen kommen, wenn du mit zusätzlichen Funktionen aus Modulen arbeitest, die nur mit einem bestimmten Datentyp ausgeführt werden können. Wenn du die Datentypen der Ergebnisse kennst, kannst du solche Fallstricke ganz leicht vermeiden. # </div> # <br> # # <div class="alert alert-block alert-warning"> # <font size="3"><b>Beispiel:</b></font> Als Beispiel, wie du an diese Aufgabe herangehen könntest, ist die erste Rechenart bereits kommentiert. Solltest du dir unsicher über die Datentypen der Ergebnisse sein, nutze die leeren Codezellen darunter, um sie zu überprüfen. Du kannst dir auch neue Code-Zellen anlegen (mit <b>+</b>). # </div> """Addition ergibt dann Float, wenn ein Integer mit einem Float addiert wird (oder umgekehrt), sonst Integer.""" a1 = 5 + 2.0 # Datentyp des Ergebnisses: Float a2 = 5 + 2 # Datentyp des Ergebnisses: Integer # + s1 = 2.0 - 5 s2 = 2 - 5 d1 = 24 / 8 d2 = 24 / 8.0 m1 = 5 * 5 m2 = 5 * 5.0 r1 = 8 % 3 r2 = 8.5 % 3 f1 = 8 // 3 f2 = 8.5 // 3 p1 = 2.5**3 p2 = 2**0.3 p3 = 4**2 w1 = 25**(1/5) w2 = 36**0.5 # - # <div class="alert alert-block alert-success"> # <b>Glückwunsch</b> Jetzt weißt du sicher, welche Rechnungen welche numerischen Datentypen ergeben und du kannst deinen Code für dich und andere verständlicher gestalten. # <br> # # Im folgenden Kapitel gehen wir genau darauf ein, in welcher Reihenfolge Python Rechenschritte ausführt. # </div> # <div class="alert alert-block alert-info"> # <h3>Das kannst du dir aus dieser Übung mitnehmen:</h3> # # * **Ganzzahlen können zu Fließkommazahlen konvertiert werden und umgekehrt:** # * Eine Rechnung, in der mindestens ein Float vorkommt, ergibt immer einen Float, sonst Integer # * Division in Python ergibt stets Float, selbst, wenn mit Integers gerechnet wird # * Ganzzahl zu Fließkommazahl: ``float(2)`` => 2.0 # * Fließkommazahl zu Ganzzahl: ``int(2.0)`` => 2 # * kennst du den Datentyp nicht, frage ihn ab mit ``type()``, z.B.: ``type(x)`` # <br> # * **Kommentare** sind in Python ... # * wichtig, um den Code für dich und andere verständlich zu halten # * einzeilig (mit 2 Leerzeichen Abstand zu nebenstehendem Code): `` # Kommentar`` # * mehrzeilig: ``""" Kommentar """``
3 Python Grundlagen/3.1 Python als Taschenrechner/3.1.2 Conversion & Kommentare.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # import statistical tools import numpy as np import pandas as pd import sklearn from statsmodels.formula.api import ols import statsmodels as sm from sklearn.linear_model import LinearRegression from sklearn import datasets from statsmodels.stats.outliers_influence import summary_table # import data visualisation tools import matplotlib.pyplot as plt import seaborn as sns # %matplotlib inline # import dataset url = "/Users/arpanganguli/Documents/Finance/ISLR/Datasets/Boston.csv" Boston = pd.read_csv(url, index_col = "SlNo") # please use the absolute path in your own computer like I have used in mine # basic exploration of data Boston.head() Boston.corr() # fit model through linear regression Y = Boston['medv'] X = Boston['lstat'] model = ols("Y~X", data = Boston).fit() model.summary() # predict the model dt = summary_table(model, alpha = 0.5)[1] Y_prd = dt[:, 2] Yprd_ci_lower, Yprd_ci_upper = dt[:, 6:8].T pd.DataFrame(np.column_stack([Y_prd, Yprd_ci_lower, Yprd_ci_upper])).head() # + # plot graph with regression line plt.xkcd() plt.figure(figsize = (25, 10)) plt.figure(1).add_subplot(121) print(sns.regplot(X, Y, data = model, color = 'g')) plt.title("Linear Model") plt.figure(figsize = (25, 10)) plt.figure(2).add_subplot(122) print(sns.residplot(X, Y, lowess = True, color = 'r')) plt.title("Non-Linear Model")
Chapter 3/R Lab/3.6.2 Simple Linear Regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] id="gUl_qfOR8JV6" # ##Setup # # You will need to make a copy of this notebook in your Google Drive before you can edit the homework files. You can do so with **File &rarr; Save a copy in Drive**. # + id="iizPcHAp8LnA" outputId="13237eec-7f09-42f3-eb49-838c9cd65452" colab={"base_uri": "https://localhost:8080/"} #@title mount your Google Drive #@markdown Your work will be stored in a folder called `cds_rl_2022` by default to prevent Colab instance timeouts from deleting your edits. import os from google.colab import drive drive.mount('/content/gdrive') # + id="nAb10wnb8N0m" #@title set up mount symlink DRIVE_PATH = '/content/gdrive/My\ Drive/cds_rl_2022' DRIVE_PYTHON_PATH = DRIVE_PATH.replace('\\', '') if not os.path.exists(DRIVE_PYTHON_PATH): # !mkdir $DRIVE_PATH ## the space in `My Drive` causes some issues, ## make a symlink to avoid this SYM_PATH = '/content/cds_rl_2022' if not os.path.exists(SYM_PATH): # !ln -s $DRIVE_PATH $SYM_PATH # + id="gtS9-WsD8QVr" #@title apt install requirements #@markdown Run each section with Shift+Enter #@markdown Double-click on section headers to show code. # !apt update # !apt install -y --no-install-recommends \ # build-essential \ # curl \ # git \ # git-lfs \ # gnupg2 \ # make \ # cmake \ # ffmpeg \ # swig \ # libz-dev \ # unzip \ # zlib1g-dev \ # libglfw3 \ # libglfw3-dev \ # libxrandr2 \ # libxinerama-dev \ # libxi6 \ # libxcursor-dev \ # libgl1-mesa-dev \ # libgl1-mesa-glx \ # libglew-dev \ # libosmesa6-dev \ # lsb-release \ # ack-grep \ # patchelf \ # wget \ # xpra \ # xserver-xorg-dev \ # xvfb \ # python-opengl \ # ffmpeg # set up git lfs # !git lfs install # + id="VcKGekJN80NO" #@title install mujoco-py # %pip install free-mujoco-py # Cythonizes pkg on the first run import mujoco_py # + [markdown] id="k1Nj846VSExE" # ### Clone/update repo # # Now we need to clone the HW3 codebase. There are two options: # # 1. Git clone the [repository](https://github.com/pkuderov/mipt-rl-hw-2022.git), install requirements, start coding HW3. This's the only option if you haven't cloned the repo yet for HW1. # If you have the repo already cloned, it's better to follow the 2-nd option. Otherwise, you will need to move the old `rl_hw` folder first. But don't delete it - make sure you've kept the HW1 solution as you will need it for this assignment! # # 2. Use already cloned local repository in `rl_hw`. Save the HW1 solution to the separate branch, then git pull changes from the remote upstream to get HW3 codebase. # + id="-XcwBiBN8-Fg" cellView="form" #@title clone homework repo (option #1) # # %cd $SYM_PATH # # !git clone https://github.com/nortem/mipt-rl-hw-2022.git rl_hw # # %cd rl_hw # + id="X0i9iN1wSN6e" cellView="form" #@title pull updated repo (option #2) # Don't hesitate to update the script for yourself # # %cd $SYM_PATH/rl_hw # # git commit before pulling # # !git checkout -b "hw2" # # !git add . # # !git commit -m "HW2 solution" # # !git checkout main # # update # # !git pull # + id="rHZs9_U525Ij" outputId="577ad0c4-1fa1-443d-e057-186269622b01" colab={"base_uri": "https://localhost:8080/"} # %cd $SYM_PATH/rl_hw # + id="RWCz72b2SNsA" #@title install requirements (the same as for previous homeworks + gym[atari] is added) # %cd hw1 # %pip install -r requirements.colab.txt # %pip install -e . # also install hw3 package # %cd ../hw3 # %pip install -e . # + id="edu3tXLiQNuI" #@title set up the Ms. Pacman environment import urllib.request urllib.request.urlretrieve('http://www.atarimania.com/roms/Roms.rar','Roms.rar') ATARI_PATH=f'{SYM_PATH}/Atari' # %pip install unrar # !unrar x Roms.rar # %mkdir $ATARI_PATH # %mv Roms.rar $ATARI_PATH # %mv 'HC ROMS' $ATARI_PATH # %mv 'ROMS' $ATARI_PATH # !python -m atari_py.import_roms $ATARI_PATH # + id="g5xIOIpW8_jC" #@title set up virtual display from pyvirtualdisplay import Display display = Display(visible=0, size=(1400, 900)) display.start() # For later from hw3.infrastructure.colab_utils import ( wrap_env, show_video ) # + id="2rsWAWaK9BVp" cellView="form" outputId="4698ad5d-6b19-42e8-d277-5985435e02a2" colab={"base_uri": "https://localhost:8080/", "height": 439} #@title test virtual display #@markdown If you see a video of a four-legged ant fumbling about, setup is complete! import gym import matplotlib matplotlib.use('Agg') env = wrap_env(gym.make("Ant-v2")) observation = env.reset() for i in range(10): env.render(mode='rgb_array') obs, rew, term, _ = env.step(env.action_space.sample() ) if term: break; env.close() print('Loading video...') show_video() # + [markdown] id="QizpiHDh9Fwk" # ## Editing Code # # To edit code, click the folder icon on the left menu. Navigate to the corresponding file (`cds_rl_2022/...`). Double click a file to open an editor. There is a timeout of about ~12 hours with Colab while it is active (and less if you close your browser window). We sync your edits to Google Drive so that you won't lose your work in the event of an instance timeout, but you will need to re-mount your Google Drive and re-install packages with every new instance. # + [markdown] id="Nii6qk2C9Ipk" # ## Run DQN and Double DQN # + id="<KEY>" #@title imports import os import time from hw3.infrastructure.rl_trainer import RL_Trainer from hw3.agents.dqn_agent import DQNAgent from hw3.infrastructure.dqn_utils import get_env_kwargs # + id="2fXlzARJ9i-t" #@title runtime arguments class Args: def __getitem__(self, key): return getattr(self, key) def __setitem__(self, key, val): setattr(self, key, val) def __contains__(self, key): return hasattr(self, key) env_name = 'MsPacman-v0' #@param ['MsPacman-v0', 'LunarLander-v3', 'PongNoFrameSkip-v4'] exp_name = 'q3_dqn' #@param ## PDF will tell you how to set ep_len ## and discount for each environment ep_len = 200 #@param {type: "integer"} #@markdown batches and steps batch_size = 32 #@param {type: "integer"} eval_batch_size = 1000 #@param {type: "integer"} num_agent_train_steps_per_iter = 1 #@param {type: "integer"} num_critic_updates_per_agent_update = 1 #@param {type: "integer"} #@markdown Q-learning parameters double_q = False #@param {type: "boolean"} #@markdown system save_params = False #@param {type: "boolean"} no_gpu = False #@param {type: "boolean"} which_gpu = 0 #@param {type: "integer"} seed = 1 #@param {type: "integer"} #@markdown logging ## default is to not log video so ## that logs are small enough to be ## uploaded to gradscope video_log_freq = -1 #@param {type: "integer"} scalar_log_freq = 10000#@param {type: "integer"} args = Args() ## ensure compatibility with hw1 code args['train_batch_size'] = args['batch_size'] if args['video_log_freq'] > 0: import warnings warnings.warn( '''\nLogging videos will make eventfiles too''' '''\nlarge for the autograder. Set video_log_freq = -1''' '''\nfor the runs you intend to submit.''' ) # + id="T0cJlp6s-ogO" cellView="form" #@title create directories for logging def create_log_dir(args, data_path_sub=''): data_path = '/content/cds_rl_2022/hw3/data'+data_path_sub if not (os.path.exists(data_path)): os.makedirs(data_path) logdir = args.exp_name + '_' + args.env_name + '_' + time.strftime("%d-%m-%Y_%H-%M-%S") logdir = os.path.join(data_path, logdir) args['logdir'] = logdir if not(os.path.exists(logdir)): os.makedirs(logdir) print("LOGGING TO: ", logdir) # data_path = '/content/cds_rl_2022/rl_hw/hw3/data' # if not (os.path.exists(data_path)): # os.makedirs(data_path) # logdir = args.exp_name + '_' + args.env_name + '_' + time.strftime("%d-%m-%Y_%H-%M-%S") # logdir = os.path.join(data_path, logdir) # args['logdir'] = logdir # if not(os.path.exists(logdir)): # os.makedirs(logdir) # print("LOGGING TO: ", logdir) # + id="4YaionmWNGuO" #@title read_logs from tensorflow.python.summary.summary_iterator import summary_iterator def read_logs(filename, tag): info_tag = [] for e in summary_iterator(filename): for v in e.summary.value: if v.tag == tag: info_tag.append(v.simple_value) return info_tag # + id="I525KFRN-42s" cellView="form" #@title Define Q-function trainer class Q_Trainer(object): def __init__(self, params): self.params = params train_args = { 'num_agent_train_steps_per_iter': params['num_agent_train_steps_per_iter'], 'num_critic_updates_per_agent_update': params['num_critic_updates_per_agent_update'], 'train_batch_size': params['batch_size'], 'double_q': params['double_q'], } env_args = get_env_kwargs(params['env_name']) for k, v in env_args.items(): params[k] = v self.params['agent_class'] = DQNAgent self.params['agent_params'] = params self.params['train_batch_size'] = params['batch_size'] self.params['env_wrappers'] = env_args['env_wrappers'] self.rl_trainer = RL_Trainer(self.params) def run_training_loop(self): self.rl_trainer.run_training_loop( self.params['num_timesteps'], collect_policy = self.rl_trainer.agent.actor, eval_policy = self.rl_trainer.agent.actor, ) # + id="hDP6mUDjw-Dy" #@title plot import matplotlib.pyplot as plt # %matplotlib inline def plot_info(y, labels, y_label='', x_label='', koef=1): fig, ax = plt.subplots(figsize=(20, 10)) for i in range(len(y)): ax.plot(list(range(len(y[i])))*koef, y[i], label=labels[i]) ax.legend() ax.grid() ax.set_ylabel(y_label, fontsize=16) ax.set_xlabel(x_label, fontsize=16) plt.show() # + [markdown] id="6RRh2nQMwrdj" # ### Эксперимент # + id="wF4LSRGn-_Cv" args = Args() args.exp_name = 'q1' create_log_dir(args, '/q1_2') trainer = Q_Trainer(args) trainer.run_training_loop() # + [markdown] id="ST1lx9Avwga2" # ``` # ********** Iteration 1610000 ************ # # Training agent... # # Beginning logging procedure... # Timestep 1610001 # mean reward (100 episodes) 1532.900000 # best mean reward 1599.700000 # running time 13487.616885 # Train_EnvstepsSoFar : 1610001 # Train_AverageReturn : 1532.9 # Train_BestReturn : 1599.7 # TimeSinceStart : 13487.616884946823 # Training Loss : 0.25639164447784424 # Done logging... # ``` # + id="2q0Jr67ejHyd" colab={"base_uri": "https://localhost:8080/", "height": 521} outputId="b72a393c-68c2-445f-a067-edf33a5ed628" filename = '/content/cds_rl_2022/hw3/data/q1_2/q1_MsPacman-v0_24-05-2022_07-23-26/events.out.tfevents.1653377006.0d2644e26876' tag1 = 'Train_AverageReturn' X_1 = read_logs(filename, tag1) tag2 = 'Train_BestReturn' X_2 = read_logs(filename, tag2) plot_info([X_1, X_2], ['Av', 'B'], y_label='Train_Return')
hw3/hw3/scripts/answer_run_hw3_dqn (1).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="KCJoEkAiimsL" # # Задание 3.2 - сверточные нейронные сети (CNNs) в PyTorch # # Это упражнение мы буде выполнять в Google Colab - https://colab.research.google.com/ # Google Colab позволяет запускать код в notebook в облаке Google, где можно воспользоваться бесплатным GPU! # # Авторы курса благодарят компанию Google и надеятся, что праздник не закончится. # # Туториал по настройке Google Colab: # https://medium.com/deep-learning-turkey/google-colab-free-gpu-tutorial-e113627b9f5d # (Keras инсталлировать не нужно, наш notebook сам установит PyTorch) # # + colab={"base_uri": "https://localhost:8080/"} id="FcXBeP1O7cnY" executionInfo={"status": "ok", "timestamp": 1618550438996, "user_tz": -420, "elapsed": 14158, "user": {"displayName": "\u0412\u0430\u0434\u0438\u043c \u0414\u043c\u0438\u0442\u0440\u0438\u0435\u0432", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhIeFj_kZjZej_FdpM_5HMuZ-JycxiaMkDpn12y=s64", "userId": "15273218268132906197"}} outputId="b33742db-3db2-43a4-ed74-afd439b9399e" # Intstall PyTorch and download data # !pip3 install torch torchvision # !wget -c http://ufldl.stanford.edu/housenumbers/train_32x32.mat http://ufldl.stanford.edu/housenumbers/test_32x32.mat # + id="-afwWw-Q85vD" executionInfo={"status": "ok", "timestamp": 1618550449865, "user_tz": -420, "elapsed": 4359, "user": {"displayName": "\u0412\u0430\u0434\u0438\u043c \u0414\u043c\u0438\u0442\u0440\u0438\u0435\u0432", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhIeFj_kZjZej_FdpM_5HMuZ-JycxiaMkDpn12y=s64", "userId": "15273218268132906197"}} from collections import namedtuple import matplotlib.pyplot as plt import numpy as np import PIL import torch import torch.nn as nn import torch.optim as optim import torchvision.datasets as dset from torch.utils.tensorboard import SummaryWriter from torch.utils.data.sampler import SubsetRandomSampler from torchvision import transforms # + id="NNU-OD9O9ltP" executionInfo={"status": "ok", "timestamp": 1618550452314, "user_tz": -420, "elapsed": 671, "user": {"displayName": "\u0412\u0430\u0434\u0438\u043c \u0414\u043c\u0438\u0442\u0440\u0438\u0435\u0432", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhIeFj_kZjZej_FdpM_5HMuZ-JycxiaMkDpn12y=s64", "userId": "15273218268132906197"}} device = torch.device("cuda:0") # Let's make sure GPU is available! # + [markdown] id="kqB9jvArimsb" # # Загружаем данные # + id="YAvkoRx-9FsP" executionInfo={"status": "ok", "timestamp": 1618550458258, "user_tz": -420, "elapsed": 3783, "user": {"displayName": "\u0412\u0430\u0434\u0438\u043c \u0414\u043c\u0438\u0442\u0440\u0438\u0435\u0432", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhIeFj_kZjZej_FdpM_5HMuZ-JycxiaMkDpn12y=s64", "userId": "15273218268132906197"}} # First, lets load the dataset data_train = dset.SVHN('./', transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize(mean=[0.43,0.44,0.47], std=[0.20,0.20,0.20]) ]) ) data_test = dset.SVHN('./', split='test', transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize(mean=[0.43,0.44,0.47], std=[0.20,0.20,0.20]) ])) # + [markdown] id="Z5gWFfzHimsd" # Разделяем данные на training и validation. # # На всякий случай для подробностей - https://pytorch.org/tutorials/beginner/data_loading_tutorial.html # + id="YRnr8CPg7Hli" executionInfo={"status": "ok", "timestamp": 1618556882431, "user_tz": -420, "elapsed": 684, "user": {"displayName": "\u0412\u0430\u0434\u0438\u043c \u0414\u043c\u0438\u0442\u0440\u0438\u0435\u0432", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhIeFj_kZjZej_FdpM_5HMuZ-JycxiaMkDpn12y=s64", "userId": "15273218268132906197"}} batch_size = 32 data_size = data_train.data.shape[0] validation_split = .2 split = int(np.floor(validation_split * data_size)) indices = list(range(data_size)) np.random.shuffle(indices) train_indices, val_indices = indices[split:], indices[:split] train_sampler = SubsetRandomSampler(train_indices) val_sampler = SubsetRandomSampler(val_indices) train_loader = torch.utils.data.DataLoader(data_train, batch_size=batch_size, sampler=train_sampler) val_loader = torch.utils.data.DataLoader(data_train, batch_size=batch_size, sampler=val_sampler) # + id="LyYvt-T67PBG" executionInfo={"status": "ok", "timestamp": 1618550480908, "user_tz": -420, "elapsed": 606, "user": {"displayName": "\u0412\u0430\u0434\u0438\u043c \u0414\u043c\u0438\u0442\u0440\u0438\u0435\u0432", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhIeFj_kZjZej_FdpM_5HMuZ-JycxiaMkDpn12y=s64", "userId": "15273218268132906197"}} # We'll use a special helper module to shape it into a flat tensor class Flattener(nn.Module): def forward(self, x): batch_size, *_ = x.shape return x.view(batch_size, -1) # + [markdown] id="jMlOAjAYimse" # Создадим простейшую сеть с новыми слоями: # Convolutional - `nn.Conv2d` # MaxPool - `nn.MaxPool2d` # + id="w9SFVGZP7SQd" nn_model = nn.Sequential( nn.Conv2d(3, 64, 3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(4), nn.Conv2d(64, 64, 3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(4), Flattener(), nn.Linear(64*2*2, 10), ) nn_model.type(torch.cuda.FloatTensor) nn_model.to(device) loss = nn.CrossEntropyLoss().type(torch.cuda.FloatTensor) optimizer = optim.SGD(nn_model.parameters(), lr=1e-1, weight_decay=1e-4) # + [markdown] id="-N8Ph6d4imsf" # Восстановите функцию `compute_accuracy` из прошлого задания. # Единственное отличие в новом - она должна передать данные на GPU прежде чем прогонять через модель. Сделайте это так же, как это делает функция `train_model` # + id="2ek3KVQK7hJ6" executionInfo={"status": "ok", "timestamp": 1618556885921, "user_tz": -420, "elapsed": 814, "user": {"displayName": "\u0412\u0430\u0434\u0438\u043c \u0414\u043c\u0438\u0442\u0440\u0438\u0435\u0432", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhIeFj_kZjZej_FdpM_5HMuZ-JycxiaMkDpn12y=s64", "userId": "15273218268132906197"}} def train_model(model, train_loader, val_loader, loss, optimizer, num_epochs, sheduler = None): loss_history = [] train_history = [] val_history = [] for epoch in range(num_epochs): model.train() # Enter train mode loss_accum = 0 correct_samples = 0 total_samples = 0 for i_step, (x, y) in enumerate(train_loader): x_gpu = x.to(device) y_gpu = y.to(device) prediction = model(x_gpu) loss_value = loss(prediction, y_gpu) optimizer.zero_grad() loss_value.backward() optimizer.step() _, indices = torch.max(prediction, 1) correct_samples += torch.sum(indices == y_gpu) total_samples += y.shape[0] loss_accum += loss_value ave_loss = float(loss_accum / i_step) train_accuracy = float(correct_samples) / total_samples val_accuracy = compute_accuracy(model, val_loader) if not sheduler is None: sheduler.step(val_accuracy) loss_history.append(ave_loss) train_history.append(train_accuracy) val_history.append(val_accuracy) print("Epoch: %d, Average loss: %f, Train accuracy: %f, Val accuracy: %f" % (epoch, ave_loss, train_accuracy, val_accuracy)) return loss_history, train_history, val_history def compute_accuracy(model, loader): """ Computes accuracy on the dataset wrapped in a loader Returns: accuracy as a float value between 0 and 1 """ model.eval() # Evaluation mode correct_samples = 0 total_samples = 0 for x, y in loader: x_gpu = x.to(device) y_gpu = y.to(device) prediction = model(x_gpu) _, indices = torch.max(prediction, 1) correct_samples += torch.sum(indices == y_gpu) total_samples += y.shape[0] return float(correct_samples) / total_samples # + colab={"base_uri": "https://localhost:8080/"} id="g1j_nz5Sl9JQ" executionInfo={"status": "ok", "timestamp": 1618462116570, "user_tz": -420, "elapsed": 107849, "user": {"displayName": "\u0412\u0430\u0434\u0438\u043c \u0414\u043c\u0438\u0442\u0440\u0438\u0435\u0432", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhIeFj_kZjZej_FdpM_5HMuZ-JycxiaMkDpn12y=s64", "userId": "15273218268132906197"}} outputId="7bab8abb-77f4-450f-a743-684ed0d57946" loss_history, train_history, val_history = train_model(nn_model, train_loader, val_loader, loss, optimizer, 5) # + [markdown] id="6a-3a1ZFGEw_" # # Аугментация данных (Data augmentation) # # В работе с изображениями одним из особенно важных методов является аугментация данных - то есть, генерация дополнительных данных для тренировки на основе изначальных. # Таким образом, мы получаем возможность "увеличить" набор данных для тренировки, что ведет к лучшей работе сети. # Важно, чтобы аугментированные данные были похожи на те, которые могут встретиться в реальной жизни, иначе польза от аугментаций уменьшается и может ухудшить работу сети. # # С PyTorch идут несколько таких алгоритмов, называемых `transforms`. Более подробно про них можно прочитать тут - # https://pytorch.org/tutorials/beginner/data_loading_tutorial.html#transforms # # Ниже мы используем следующие алгоритмы генерации: # - ColorJitter - случайное изменение цвета # - RandomHorizontalFlip - горизонтальное отражение с вероятностью 50% # - RandomVerticalFlip - вертикальное отражение с вероятностью 50% # - RandomRotation - случайный поворот # + id="jCWMUWmr7t5g" tfs = transforms.Compose([ transforms.ColorJitter(hue=.50, saturation=.50), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.RandomRotation(50, interpolation=transforms.InterpolationMode.BILINEAR), transforms.ToTensor(), transforms.Normalize(mean=[0.43,0.44,0.47], std=[0.20,0.20,0.20]) ]) # Create augmented train dataset data_aug_train = dset.SVHN('./', transform=tfs ) train_aug_loader = torch.utils.data.DataLoader(data_aug_train, batch_size=batch_size, sampler=train_sampler) # + [markdown] id="F8YIgmnMimsj" # Визуализируем результаты агментации (вообще, смотреть на сгенерированные данные всегда очень полезно). # + colab={"base_uri": "https://localhost:8080/", "height": 148} id="YlJJEro1KZ45" executionInfo={"status": "ok", "timestamp": 1618394355997, "user_tz": -420, "elapsed": 3726, "user": {"displayName": "\u0412\u0430\u0434\u0438\u043c \u0414\u043c\u0438\u0442\u0440\u0438\u0435\u0432", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhIeFj_kZjZej_FdpM_5HMuZ-JycxiaMkDpn12y=s64", "userId": "15273218268132906197"}} outputId="5d321339-df63-42d9-929a-ed4779611be6" # TODO: Visualize some augmented images! # hint: you can create new datasets and loaders to accomplish this # Based on the visualizations, should we keep all the augmentations? tfs = transforms.Compose([ transforms.ColorJitter(hue=.20, saturation=.20), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.RandomRotation(10, interpolation=transforms.InterpolationMode.BILINEAR), ]) data_aug_vis = dset.SVHN('./', transform=tfs) plt.figure(figsize=(30, 3)) for i, (x, y) in enumerate(data_aug_vis): if i == 10: break plt.subplot(1, 10, i+1) plt.grid(False) plt.imshow(x) plt.axis('off') # + [markdown] id="o2LrmsYHoguB" # Все ли агментации одинаково полезны на этом наборе данных? Могут ли быть среди них те, которые собьют модель с толку? # # Выберите из них только корректные # + id="evro9ksXGs9u" executionInfo={"status": "ok", "timestamp": 1618556892544, "user_tz": -420, "elapsed": 2653, "user": {"displayName": "\u0412\u0430\u0434\u0438\u043c \u0414\u043c\u0438\u0442\u0440\u0438\u0435\u0432", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhIeFj_kZjZej_FdpM_5HMuZ-JycxiaMkDpn12y=s64", "userId": "15273218268132906197"}} # TODO: tfs = transforms.Compose([ transforms.ColorJitter(hue=.20, saturation=.20), transforms.RandomRotation(15, interpolation=transforms.InterpolationMode.BILINEAR), transforms.ToTensor(), transforms.Normalize(mean=[0.43,0.44,0.47], std=[0.20,0.20,0.20]) ]) data_aug_train = dset.SVHN('./', transform=tfs) train_aug_loader = torch.utils.data.DataLoader(data_aug_train, batch_size=batch_size, sampler=train_sampler) # + colab={"base_uri": "https://localhost:8080/"} id="PeO6Zw0DHqPR" executionInfo={"status": "ok", "timestamp": 1618462466877, "user_tz": -420, "elapsed": 301874, "user": {"displayName": "\u0412\u0430\u0434\u0438\u043c \u0414\u043c\u0438\u0442\u0440\u0438\u0435\u0432", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhIeFj_kZjZej_FdpM_5HMuZ-JycxiaMkDpn12y=s64", "userId": "15273218268132906197"}} outputId="54c8bddd-f59c-4d15-9081-dce6f226b9e6" # Finally, let's train with augmentations! # Note we shouldn't use augmentations on validation loss_history_aug, train_history_aug, val_history_aug = train_model(nn_model, train_aug_loader, val_loader, loss, optimizer, 5) # + [markdown] id="r0bcioK6JBDK" # # LeNet # Попробуем имплементировать классическую архитектуру сверточной нейронной сети, предложенную Яном ЛеКуном в 1998 году. В свое время она достигла впечатляющих результатов на MNIST, посмотрим как она справится с SVHN? # Она описана в статье ["Gradient Based Learning Applied to Document Recognition"](http://yann.lecun.com/exdb/publis/pdf/lecun-01a.pdf), попробуйте прочитать ключевые части и имплементировать предложенную архитетуру на PyTorch. # # Реализовывать слои и функцию ошибки LeNet, которых нет в PyTorch, **не нужно** - просто возьмите их размеры и переведите в уже известные нам Convolutional, Pooling и Fully Connected layers. # # Если в статье не очень понятно, можно просто погуглить LeNet и разобраться в деталях :) # + id="ieEzZUglJAUB" # TODO: Implement LeNet-like architecture for SVHN task lenet_model = nn.Sequential( nn.Conv2d(3, 6, 5), nn.LeakyReLU(inplace=True), nn.MaxPool2d(2), nn.Conv2d(6, 16, 5), nn.LeakyReLU(inplace=True), nn.MaxPool2d(2), nn.Conv2d(16, 120, 5), nn.LeakyReLU(inplace=True), Flattener(), nn.Linear(120, 84), nn.LeakyReLU(inplace=True), nn.Linear(84, 10) ) lenet_model.type(torch.cuda.FloatTensor) lenet_model.to(device) loss = nn.CrossEntropyLoss().type(torch.cuda.FloatTensor) optimizer = optim.SGD(lenet_model.parameters(), lr=1e-1, weight_decay=1e-4) # + id="WMmaPfdeKk9H" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1618395928699, "user_tz": -420, "elapsed": 448042, "user": {"displayName": "\u0412\u0430\u0434\u0438\u043c \u0414\u043c\u0438\u0442\u0440\u0438\u0435\u0432", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhIeFj_kZjZej_FdpM_5HMuZ-JycxiaMkDpn12y=s64", "userId": "15273218268132906197"}} outputId="fed9f4f6-6fc2-4be3-fc6e-2e27c54ddfbf" # Let's train it! loss_history, train_history, val_history = train_model(lenet_model, train_aug_loader, val_loader, loss, optimizer, 10) # + [markdown] id="u_O9qiYySvuj" # # Подбор гиперпараметров # + id="i6mhfdQ9K-N3" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1618408977426, "user_tz": -420, "elapsed": 5249628, "user": {"displayName": "\u0412\u0430\u0434\u0438\u043c \u0414\u043c\u0438\u0442\u0440\u0438\u0435\u0432", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhIeFj_kZjZej_FdpM_5HMuZ-JycxiaMkDpn12y=s64", "userId": "15273218268132906197"}} outputId="805a7502-d842-453f-f93a-f5fe468f5412" # The key hyperparameters we're going to tune are learning speed, annealing rate and regularization # We also encourage you to try different optimizers as well from itertools import product Hyperparams = namedtuple("Hyperparams", ['learning_rate', 'anneal_epochs', 'reg']) RunResult = namedtuple("RunResult", ['model', 'train_history', 'val_history', 'final_val_accuracy']) learning_rates = [1e-1, 1e-2, 1e-3] anneal_coeff = 0.2 anneal_epochs = [4, 16] reg = [1e-4, 1e-5] batch_size = 64 epoch_num = 10 train_aug_loader = torch.utils.data.DataLoader(data_aug_train, batch_size=batch_size, sampler=train_sampler) # Record all the runs here # Key should be Hyperparams and values should be RunResult run_record = {} num = 1; for lr, an_ep, re in product(learning_rates, anneal_epochs, reg): header = f"--- Model: {num}, with params: learning rate = {lr}, anneal epochs = {an_ep}, regularization = {re}" print(header) params = Hyperparams(lr, an_ep, re) model = nn.Sequential( nn.Conv2d(3, 6, 5), nn.LeakyReLU(inplace=True), nn.MaxPool2d(2), nn.Conv2d(6, 16, 5), nn.LeakyReLU(inplace=True), nn.MaxPool2d(2), nn.Conv2d(16, 120, 5), nn.LeakyReLU(inplace=True), Flattener(), nn.Linear(120, 84), nn.LeakyReLU(inplace=True), nn.Linear(84, 10) ) model.type(torch.cuda.FloatTensor) model.to(device) loss = nn.CrossEntropyLoss().type(torch.cuda.FloatTensor) optimizer = optim.SGD(model.parameters(), lr=lr, weight_decay=re) lr_lambda = lambda epoch: anneal_coeff if epoch % an_ep == 0 else 1.0 sheduler = optim.lr_scheduler.MultiplicativeLR(optimizer, lr_lambda=lr_lambda) loss_history, train_history, val_history = train_model( model, train_aug_loader, val_loader, loss, optimizer, epoch_num, sheduler) run_record[params] = RunResult(model, train_history, val_history, val_history[-1]) num += 1 # Use grid search or random search and record all runs in run_record dictionnary # Important: perform search in logarithmic space! # + id="Y6xExdw8JB1l" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1618408992378, "user_tz": -420, "elapsed": 969, "user": {"displayName": "\u0412\u0430\u0434\u0438\u043c \u0414\u043c\u0438\u0442\u0440\u0438\u0435\u0432", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhIeFj_kZjZej_FdpM_5HMuZ-JycxiaMkDpn12y=s64", "userId": "15273218268132906197"}} outputId="5435609d-56a5-4bba-ac2d-7e9da7adeb36" best_val_accuracy = None best_hyperparams = None best_run = None for hyperparams, run_result in run_record.items(): if best_val_accuracy is None or best_val_accuracy < run_result.final_val_accuracy: best_val_accuracy = run_result.final_val_accuracy best_hyperparams = hyperparams best_run = run_result print("Best validation accuracy: %4.2f, best hyperparams: %s" % (best_val_accuracy, best_hyperparams)) # + [markdown] id="LOmsR0uVgtgf" # # Свободное упражнение - догоним и перегоним LeNet! # # Попробуйте найти архитектуру и настройки тренировки, чтобы выступить лучше наших бейзлайнов. # # Что можно и нужно попробовать: # - BatchNormalization (для convolution layers он в PyTorch называется [batchnorm2d](https://pytorch.org/docs/stable/nn.html#batchnorm2d)) # - Изменить количество слоев и их толщину # - Изменять количество эпох тренировки # - Попробовать и другие агментации # + id="BfH6qip6kVX_" executionInfo={"status": "ok", "timestamp": 1618556898048, "user_tz": -420, "elapsed": 786, "user": {"displayName": "\u0412\u0430\u0434\u0438\u043c \u0414\u043c\u0438\u0442\u0440\u0438\u0435\u0432", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhIeFj_kZjZej_FdpM_5HMuZ-JycxiaMkDpn12y=s64", "userId": "15273218268132906197"}} my_model = nn.Sequential( #32x32 nn.Conv2d(3, 16, 3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(16, 16, 3, padding=1), nn.ReLU(inplace=True), nn.BatchNorm2d(16), #----------------------- nn.MaxPool2d(2), #16x16 #--------------------- nn.Conv2d(16, 32, 3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(32, 32, 3, padding=1), nn.ReLU(inplace=True), nn.BatchNorm2d(32), #-------------------- nn.MaxPool2d(2), #8x8 #---------------------- nn.Conv2d(32, 64, 3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(64, 64, 3, padding=1), nn.ReLU(inplace=True), nn.BatchNorm2d(64), #-------------------- nn.MaxPool2d(2), #4x4 #---------------------- nn.Conv2d(64, 128, 3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(128, 128, 3, padding=1), nn.ReLU(inplace=True), nn.BatchNorm2d(128), #-------------------- nn.MaxPool2d(2),#2x2 #---------------------- nn.Conv2d(128, 128, 3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(128, 128, 3, padding=1), nn.ReLU(inplace=True), nn.BatchNorm2d(128), #-------------------- nn.MaxPool2d(2), #1x1 #-------------------- Flattener(), #---------------------- nn.Linear(128, 512), nn.ReLU(inplace=True), nn.BatchNorm1d(512), nn.Linear(512, 10) ) my_model.type(torch.cuda.FloatTensor) my_model.to(device) loss = nn.CrossEntropyLoss().type(torch.cuda.FloatTensor) optimizer = optim.Adadelta(my_model.parameters(), lr=1e-1, weight_decay=1e-2) sheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.8, patience=3, threshold=0.01, verbose=True) # + colab={"base_uri": "https://localhost:8080/"} id="dor0KuC1Ve_T" executionInfo={"status": "ok", "timestamp": 1618557786298, "user_tz": -420, "elapsed": 886494, "user": {"displayName": "\u0412\u0430\u0434\u0438\u043c \u0414\u043c\u0438\u0442\u0440\u0438\u0435\u0432", "photoUrl": "<KEY>", "userId": "15273218268132906197"}} outputId="296a8106-2ff9-444e-826e-0a7ce66de2d5" loss_history, train_history, val_history = train_model(my_model, train_aug_loader , val_loader, loss, optimizer, 15, sheduler) # + [markdown] id="ubeKgBcnhx7N" # # Финальный аккорд - проверим лучшую модель на test set # # В качестве разнообразия - напишите код для прогона модели на test set вы. # # В результате вы должны натренировать модель, которая покажет более **90%** точности на test set. # Как водится, лучший результат в группе получит дополнительные баллы! # + colab={"base_uri": "https://localhost:8080/"} id="01tdrcklc2dI" executionInfo={"status": "ok", "timestamp": 1618557847917, "user_tz": -420, "elapsed": 6858, "user": {"displayName": "\u0412\u0430\u0434\u0438\u043c \u0414\u043c\u0438\u0442\u0440\u0438\u0435\u0432", "photoUrl": "<KEY>", "userId": "15273218268132906197"}} outputId="7d9ac92e-d44f-44d9-853f-3c300fef6a67" # TODO Write the code to compute accuracy on test set test_loader = torch.utils.data.DataLoader(data_test, batch_size=batch_size) final_test_accuracy = compute_accuracy(my_model_2, test_loader) print(f"Final test accuracy {final_test_accuracy: .4f} ") # + colab={"base_uri": "https://localhost:8080/", "height": 489} id="tl9uIVMHe4pE" executionInfo={"status": "ok", "timestamp": 1618557858383, "user_tz": -420, "elapsed": 1301, "user": {"displayName": "\u0412\u0430\u0434\u0438\u043c \u0414\u043c\u0438\u0442\u0440\u0438\u0435\u0432", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhIeFj_kZjZej_FdpM_5HMuZ-JycxiaMkDpn12y=s64", "userId": "15273218268132906197"}} outputId="c033d4f8-1f82-4216-d40c-e34c3bc95cfb" import matplotlib.pyplot as plt plt.figure(figsize=(14, 6), dpi=80) plt.subplot(1, 2, 1) plt.title("Loss") plt.plot(loss_history) plt.grid() plt.subplot(1,2,2) plt.title("Accuracy") plt.plot(train_history, label="train") plt.plot(val_history, label="validate") plt.legend() plt.grid() plt.tight_layout()
assignments/assignment3/PyTorch_CNN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # !pwd # !which python # # BBCW2MAD SANITY CHECKS # --- # # In this notebook, we follow the new approach proposed in [1] and install the beam-beam wire compensators on the LHC, testing the effect on the tunes and orbit. # + # %% Import few packages from cpymad.madx import Madx #import madxp from madxp import cpymadTool as mt from collections import OrderedDict import numpy as np import pandas as pd from matplotlib import pylab as plt from IPython.display import display import time import os import warnings warnings.filterwarnings('always') from cl2pd import importData #bbcw2mad package from bbcw2mad.WireObj import * #phd-tools from phd_toolbox import constants from phd_toolbox import wires_tools # + # %% # ============================================================================================================ # # WORKFLOW FUNCTIONS # # ============================================================================================================ # def power_wires(mad,wires_lst,table_out): ''' Function to turn on the wires as done in the LHC Run 3. Args: mad: madx instance wires_lst: list, contains the wires under the form of WireObj instances table_out: str, name of the madx table to be added to "twiss_" and "summary_" Return: my_output_dict: dict, contains the output info to be added to the working flow DF. ''' start_time = time.time() # Turn off BB mad.input(f''' ON_BB_CHARGE=0; ''') for wire in wires_lst: wire.switch_wire(on=True) wire.print_lense() mad.input(f''' use, sequence=lhcb1; twiss, table=twiss_{table_out}; exec, clonesumm('summary_{table_out}'); !ON_BB_CHARGE=1; ''') my_output_dict = get_status(mad) elapsed_time = time.time() - start_time my_output_dict['elapsed_time'] = elapsed_time return my_output_dict def get_status(mad): start_time = time.time() variables=mt.variables_dict(mad) my_output_dict= {'constant_df': variables['constant_df'], 'independent_variable_df': variables['independent_variable_df'], 'dependent_variable_df': variables['dependent_variable_df'], 'sequences_df': mt.sequences_df(mad), 'beams_df': mt.beams_df(mad), 'tables_list': list(mad.table)} elapsed_time = time.time() - start_time my_output_dict['elapsed_time'] = elapsed_time return my_output_dict # - # ## Checks on B1 # --- # %% Definition of the parameters that are not knobs of the beam sequence (no strings please!) parameter_dict={ # ============================================================================= # Beam parameters # ============================================================================= ## LHC beam 1 (clockwise), LHC beam 2 (clockwise), LHC beam 2 (counterclockwise) 'par_mylhcbeam': 1, ## beam normalized emittance [m rad] 'par_beam_norm_emit': 2.5e-6, ## [m] 'par_beam_sigt': 0.075, ## [-] 'par_beam_sige': 1.1e-4, ## [-] 'par_beam_npart': 1.1e11, ## [GeV] 'par_beam_energy_tot': 7000, ## [A] 'par_oct_current': 350, ## [-] 'par_chromaticity': 15, ## [MV] 'par_vrf_total': 16., ## Tunes with fractional part 'par_qx0': 62.31, 'par_qy0': 60.32, # ============================================================================= # Beam-Beam configuration # ============================================================================= ## install the BB elements [0,1] 'par_on_bb_switch': 1, ## if 1 lumi leveling in ip8 is applied and q/q' match is done with bb off [0,1] 'par_on_collision': 1, ## bunch separation [ns] 'par_b_t_dist': 25., ## default value for the number of additionnal parasitic encounters inside D1 'par_n_inside_D1': 5, ## number of slices for head-on in IR1 [between 0 and 201] 'par_nho_IR1': 11, 'par_nho_IR2': 11, 'par_nho_IR5': 11, 'par_nho_IR8': 11, ## flag to install the Crab Cavities [0, 1] 'par_install_crabcavities': 0, # ============================================================================= # Beam-Beam Wire Compensators # ============================================================================= ## IR1 'par_s_w_ir1': 19848.217400, # s-position [m] 'par_x_w_ir1': 0, # Horizontal transverse position of the wire, wrt to the beam [m] 'par_y_w_ir1': 0.0091, # Vertical transverse position of the wire, wrt to the beam [m] 'par_I_w_ir1': 350, # Wire Current [A] ## IR5 'par_s_w_ir5': 6516.623433, # s-position [m] 'par_x_w_ir5': 0.01223, # Horizontal transverse position of the wire, wrt to the beam [m] 'par_y_w_ir5': 0, # Vertical transverse position of the wire, wrt to the beam [m] 'par_I_w_ir5': 350, # Wire Current [A] # ============================================================================= # Leveling in IP8 # ============================================================================= # leveled luminosity in IP8 (considered if par_on_collision=1) [Hz/cm2] 'par_lumi_ip8': 2e32, # These variables define the number of Head-On collisions in the 4 IPs 'par_nco_IP1': 2544, 'par_nco_IP2': 2215, 'par_nco_IP5': 2544, 'par_nco_IP8': 2332, # ============================================================================= # Errors and corrections # ============================================================================= # Select seed for errors 'par_myseed': 0, # Set this flag to correct the errors of D2 in the NLC # (warning: for now only correcting b3 of D2, still in development) 'par_correct_for_D2': 0, # Set this flag to correct the errors of MCBXF in the NLC # (warning: this might be less reproducable in reality, use with care) 'par_correct_for_MCBX': 0, 'par_off_all_errors': 0, 'par_on_errors_LHC': 0, 'par_on_errors_MBH': 0, 'par_on_errors_Q5': 0, 'par_on_errors_Q4': 0, 'par_on_errors_D2': 0, 'par_on_errors_D1': 0, 'par_on_errors_IT': 0, 'par_on_errors_MCBRD': 0, 'par_on_errors_MCBXF': 0, # ============================================================================= # Additional parameters # ============================================================================= # parameter for having verbose output [0,1] 'par_verbose': 1, # definition of the slicefactor used in the makethin 'par_slicefactor': 4, # number of optics to use 'par_optics_number':27, # Specify machine version 'ver_lhc_run' : 3, 'ver_hllhc_optics' : 0, } mad = Madx() mad.input('option, -echo,-warn,-info;') mad.input('call, file=seq_b1.seq;') mad.call('/afs/cern.ch/user/s/sterbini/public/tracking_tools/tools/macro.madx') mad.call('tools/optics_indep_macros.madx') # + # Definition of the wire (ir1-like wire) sequence_w = f'lhcb{parameter_dict["par_mylhcbeam"]}' mad_inst_w = mad s_w_ir1 = parameter_dict['par_s_w_ir1'] x_w_ir1 = parameter_dict['par_x_w_ir1'] y_w_ir1 = parameter_dict['par_y_w_ir1'] I_w_ir1 = parameter_dict['par_I_w_ir1'] my_bbcw_test = WireObj('bbcw_int_l1b1',I_w_ir1,s_w_ir1,x_w_ir1,y_w_ir1,parameter_dict['par_beam_npart'],mad_inst=mad_inst_w,sequence=sequence_w) my_bbcw_test.describe() wires_lst = [my_bbcw_test] my_workflow_dict = OrderedDict() # - mad.input(''' use, sequence=lhcb1; twiss, table=twiss_after_machine_tuning; exec, clonesumm('summary_after_machine_tuning'); ''') # + twiss_before = mad.table.twiss_after_machine_tuning.dframe() x_bef = twiss_before[twiss_before['name']=='bbcw_int_l1b1:1']['x'].values[0] y_bef = twiss_before[twiss_before['name']=='bbcw_int_l1b1:1']['y'].values[0] qx_bef = mad.table.summary_after_machine_tuning['q1'][0] qy_bef = mad.table.summary_after_machine_tuning['q2'][0] betx_w = twiss_before[twiss_before['name']=='bbcw_int_l1b1:1']['betx'].values[0] bety_w = twiss_before[twiss_before['name']=='bbcw_int_l1b1:1']['bety'].values[0] # Update wire position for wire in wires_lst: wire.get_closed_orbit(table_input='twiss_after_machine_tuning') mad.input('option, bborbit=true;') my_workflow_dict['turn_on_wires'] = power_wires(mad,wires_lst,table_out='b1_wires') twiss_after = mad.table.twiss_b1_wires.dframe() x_aft = twiss_after[twiss_after['name']=='bbcw_int_l1b1:1']['x'].values[0] y_aft = twiss_after[twiss_after['name']=='bbcw_int_l1b1:1']['y'].values[0] qx_aft = mad.table.summary_b1_wires['q1'][0] qy_aft = mad.table.summary_b1_wires['q2'][0] dx = x_aft - x_bef dy = y_aft - y_bef dqx = qx_aft - qx_bef dqy = qy_aft - qy_bef # + from cl2pd import particle # theory dx_th = wires_tools.orbit_shift(I_L=my_bbcw_test.current, r_w=my_bbcw_test.y_pos, betx=betx_w, bety=bety_w, qx=qx_bef, qy=qy_bef, phi_w=np.pi/2, Brho=particle.setTotalEnergy_GeV(7000)['magneticRigidity_Tm'])[0] dy_th = wires_tools.orbit_shift(I_L=my_bbcw_test.current, r_w=my_bbcw_test.y_pos, betx=betx_w, bety=bety_w, qx=qx_bef, qy=qy_bef, phi_w=np.pi/2, Brho=particle.setTotalEnergy_GeV(7000)['magneticRigidity_Tm'])[1] dqx_th = wires_tools.tune_shift(I_L=my_bbcw_test.current, r_w=my_bbcw_test.y_pos, betx=betx_w, bety=bety_w, phi_w=np.pi/2, Brho=particle.setTotalEnergy_GeV(7000)['magneticRigidity_Tm'])[0] dqy_th = wires_tools.tune_shift(I_L=my_bbcw_test.current, r_w=my_bbcw_test.y_pos, betx=betx_w, bety=bety_w, phi_w=np.pi/2, Brho=particle.setTotalEnergy_GeV(7000)['magneticRigidity_Tm'])[1] # + # results print('##### Orbit shift #####') print('+++ From MAD +++') print(f'dx = {dx*1e3} mm') print(f'dy = {dy*1e3} mm') print('+++ From formula +++') print(f'dx = {dx_th*1e3} mm') print(f'dy = {dy_th*1e3} mm') print('+++ Error +++') print(f'err_dx = {100*(dx-dx_th)/np.abs(dx_th)} %') print(f'err_dy = {100*(dy-dy_th)/np.abs(dy_th)} %\n') print('##### Tune shift #####') print('+++ From MAD +++') print(f'dqx = {dqx}') print(f'dqy = {dqy}') print('+++ From formula +++') print(f'dqx = {dqx_th}') print(f'dqy = {dqy_th}') print('+++ Error +++') print(f'err_dqx = {100*(dqx-dqx_th)/np.abs(dqx_th)} %') print(f'err_dqy = {100*(dqy-dqy_th)/np.abs(dqy_th)} %')
examples/sanity_checks.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import matplotlib.cm as cm from matplotlib import ticker import math import scipy from scipy import spatial import matplotlib.pyplot as plt import matplotlib import xarray as xr import dask from sklearn.neighbors import KDTree import netCDF4 from metpy import calc from metpy.units import units from mpl_toolkits.mplot3d import Axes3D from mpl_toolkits.mplot3d import axes3d from IPython.display import HTML from matplotlib import animation # + fz = 15*1.5 lw = 4 siz = 100 XNNA = 1.25 # Abscissa where architecture-constrained network will be placed XTEXT = 0.25 # Text placement YTEXT = 0.3 # Text placement plt.rc('text', usetex=False) matplotlib.rcParams['mathtext.fontset'] = 'stix' matplotlib.rcParams['font.family'] = 'STIXGeneral' #mpl.rcParams["font.serif"] = "STIX" plt.rc('font', family='serif', size=fz) matplotlib.rcParams['lines.linewidth'] = lw # - others = netCDF4.Dataset("/fast/gmooers/Raw_Data/extras/TimestepOutput_Neuralnet_SPCAM_216.cam.h1.2009-01-01-00000.nc") levs = np.array(others.variables['lev']) lons = np.array(others.variables['lon']) new = np.flip(levs) crms = np.arange(1,129,1) Xs, Zs = np.meshgrid(crms, new) # You will need to change the paths below. # # - Z_test_tsne_track will be the 3D Latent Space # - Test Images and min/max scalar need to be taken from the config file # + z_test_tsne_track = np.load("/fast/gmooers/gmooers_git/CBRAIN-CAM/MAPS/model_graphs/latent_space/3D_PCA_Latent_Space__31.npy") Test_Images = np.load("/fast/gmooers/Preprocessed_Data/W_Variable/Trackable_Space_Time_W_Test.npy") Max_Scalar = np.load("/fast/gmooers/Preprocessed_Data/Centered_50_50/Space_Time_Max_Scalar.npy") Min_Scalar = np.load("/fast/gmooers/Preprocessed_Data/Centered_50_50/Space_Time_Min_Scalar.npy") Test_Images = np.interp(Test_Images, (0, 1), (Min_Scalar, Max_Scalar)) # - W_500_Label_All = np.nanmean(np.abs(np.squeeze(Test_Images[:,-12,:])), axis=1) # You will need to create both a Png_Storage Directory and within an Intensity Directory # + fig = plt.figure(figsize=(16,12)) ax = fig.add_subplot(111, projection='3d') cp = ax.scatter(xs=z_test_tsne_track[:, 0], ys=z_test_tsne_track[:, 1], zs=z_test_tsne_track[:, 2], c=W_500_Label_All, cmap="Purples", s=10.0) ax.set_title("Total Absolute Intensity at 500 hPa", fontsize = fz*2.0, y = 1.05) ax.set_zlim(-25,25) cbar = fig.colorbar(cp, pad=0.002) cbar.set_label(label="m/s", rotation="horizontal", fontsize=fz*1.5, labelpad=30, y = 0.55) for ii in range(360): ax.view_init(elev=30, azim=ii) if ii < 10: plt.savefig("Png_Storage/Intensity_31/00"+str(ii)+".png") if ii >= 10 and ii < 100: plt.savefig("Png_Storage/Intensity_31/0"+str(ii)+".png") if ii >= 100: plt.savefig("Png_Storage/Intensity_31/"+str(ii)+".png") # - path = '/DFS-L/DATA/pritchard/gmooers/Workflow/MAPS/SPCAM/100_Days/New_SPCAM5/archive/TimestepOutput_Neuralnet_SPCAM_216/atm/hist/TimestepOutput_Neuralnet_SPCAM_216.cam.h0.2008-11.nc' next_ds = xr.open_dataset(path) land_frac = next_ds.LANDFRAC land_frac = xr.DataArray.squeeze(land_frac).values trop_land = land_frac[37:59, :] geo_labels = np.zeros(shape=(int(len(z_test_tsne_track)/(22*144)), 22, 144)) geo_labels[:,:,:] = np.nan for i in range(len(geo_labels)): geo_labels[i,:,:] = trop_land[:,:] geo_label_final = np.reshape(geo_labels, (geo_labels.size)) # You will need to create a Land_Sea Directory (31 refers to my config file so I would change it to the number of your config file to keep track of which VAE it is for) # + fig = plt.figure(figsize=(16,12)) ax = fig.add_subplot(111, projection='3d') cp = ax.scatter(xs=z_test_tsne_track[:, 0], ys=z_test_tsne_track[:, 1], zs=z_test_tsne_track[:, 2], c=geo_label_final, cmap="winter", s=10.0) ax.set_title("Land Fraction", fontsize = fz*2.0, y = 1.05) ax.set_zlim(-25,25) cbar = fig.colorbar(cp, pad=0.002) cbar.set_label(label="Fraction", rotation="horizontal", fontsize=fz*1.5, labelpad=30, y = 0.55) for ii in range(360): ax.view_init(elev=30, azim=ii) if ii < 10: plt.savefig("Png_Storage/Land_Sea_31/00"+str(ii)+".png") if ii >= 10 and ii < 100: plt.savefig("Png_Storage/Land_Sea_31/0"+str(ii)+".png") if ii >= 100: plt.savefig("Png_Storage/Land_Sea_31/"+str(ii)+".png") # - path_to_file = '/DFS-L/DATA/pritchard/gmooers/Workflow/MAPS/SPCAM/100_Days/New_SPCAM5/archive/TimestepOutput_Neuralnet_SPCAM_216/atm/hist/TimestepOutput_Neuralnet_SPCAM_216.cam.h1.2009-01-01-00000.nc' extra_variables = xr.open_dataset(path_to_file) latitudes = np.squeeze(extra_variables.LAT_20s_to_20n.values) longitudes = np.squeeze(extra_variables.LON_0e_to_360e.values) # + reshaped_Test_Images = np.empty(shape=(16,30,128,22,144)) lst_times = np.empty(shape=(16,22,144)) count = 0 for i in range(len(reshaped_Test_Images)): for j in range(len(reshaped_Test_Images[0][0][0])): for k in range(len(reshaped_Test_Images[0][0][0][0])): reshaped_Test_Images[i,:,:,j,k] = Test_Images[count,:,:] count = count+1 for i in range(16): for j in range(144): splitter = ((longitudes[j]/360.)*96.)/4.0 +i*0.15 +12.0 if splitter >= 24.0: splitter = splitter - 24.0 lst_times[i,:,j] = splitter # - LTS_Label_final = np.reshape(lst_times, (lst_times.size)) # + fig = plt.figure(figsize=(16,12)) ax = fig.add_subplot(111, projection='3d') cp = ax.scatter(xs=z_test_tsne_track[:, 0], ys=z_test_tsne_track[:, 1], zs=z_test_tsne_track[:, 2], c=LTS_Label_final, cmap="hsv", s=10.0) ax.set_title("Local Solar Time", fontsize = fz*2.0, y = 1.05) ax.set_zlim(-25,25) cbar = fig.colorbar(cp, pad=0.002) cbar.set_label(label="Hour", rotation="horizontal", fontsize=fz*1.5, labelpad=30, y = 0.55) for ii in range(360): ax.view_init(elev=30, azim=ii) if ii < 10: plt.savefig("Png_Storage/Diurnal_31/00"+str(ii)+".png") if ii >= 10 and ii < 100: plt.savefig("Png_Storage/Diurnal_31/0"+str(ii)+".png") if ii >= 100: plt.savefig("Png_Storage/Diurnal_31/"+str(ii)+".png")
MAPS/Mooers_Logbook/Fully_Convolutional_W/3D_Analysis/3D_Visuals_For_Harshini_05272021.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.3.0 # language: julia # name: julia-1.3 # --- # # High Scores # # Manage a game player's High Score list. # # Your task is to build a high-score component of the classic Frogger game, one of the highest selling and addictive games of all time, and a classic of the arcade era. Your task is to write methods that return the highest score from the list, the last added score and the three highest scores. # # ## Source # # Tribute to the eighties' arcade game Frogger # # ## Version compatibility # This exercise has been tested on Julia versions >=1.0. # # ## Submitting Incomplete Solutions # It's possible to submit an incomplete solution so you can see how others have completed the exercise. # ## Your solution # + # submit function scores(a::AbstractArray; latest = false, sorted=false, top=0) end # - # ## Test suite # + # canonical data version 4.0.0 using Test # include("high-scores.jl") @testset "List of scores" begin @test scores([30, 50, 20, 70]) == [30, 50, 20, 70] end @testset "Latest score" begin @test scores([100, 0, 90, 30], latest=true) == 30 end @testset "Personal best" begin @test scores([40, 100, 70], top=1) == 100 end @testset "Top 3 scores" begin @test scores([10, 30, 90, 30, 100, 20, 10, 0, 30, 40, 40, 70, 70], top=3) == [100, 90, 70] end @testset "Personal top highest to lowest" begin @test scores([20, 10, 30], top=3) == [30, 20, 10] end @testset "Personal top when there is a tie" begin @test scores([40, 20, 40, 30], top = 3) == [40, 40, 30] end @testset "Personal top when there are less than 3" begin @test scores([30, 70], top=3) == [70, 30] end @testset "Personal top when there is only one" begin @test scores([40], top=3) == [40] end # - # ## Prepare submission # To submit your exercise, you need to save your solution in a file called `high-scores.jl` before using the CLI. # You can either create it manually or use the following functions, which will automatically write every notebook cell that starts with `# submit` to the file `high-scores.jl`. # # + # using Pkg; Pkg.add("Exercism") # using Exercism # Exercism.create_submission("high-scores")
exercises/high-scores/high-scores.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Ruby 2.5.3 # language: ruby # name: ruby # --- require 'daru/view' Daru::View.plotting_library = :highcharts opts = { chart: {defaultSeriesType: 'line'}, title: { text: 'Solar Employment Growth by Sector, 2010-2016' }, subtitle: { text: 'Source: thesolarfoundation.com' }, yAxis: { title: { text: 'Number of Employees' } }, legend: { layout: 'vertical', align: 'right', verticalAlign: 'middle' }, plotOptions: { # this is not working. Find the bug # series: { # pointStart: 43934 # } }, } line_1 = Daru::View::Plot.new ([]) line_1.chart.options = opts; line_1.chart.series_data = ([{ name: 'Installation', data: [43934, 52503, 57177, 69658, 97031, 119931, 137133, 154175] }, { name: 'Manufacturing', data: [24916, 24064, 29742, 29851, 32490, 30282, 38121, 40434] }, { name: 'Sales & Distribution', data: [11744, 17722, 16005, 19771, 20185, 24377, 32147, 39387] }, { name: 'Project Development', data: [nil, nil, 7988, 12169, 15112, 22452, 34400, 34227] }, { name: 'Other', data: [12908, 5948, 8105, 11248, 8989, 11816, 18274, 18111] }]) line_1.show_in_iruby line_3 = Daru::View::Plot.new # + # line chart : chart with data-labels opts = { chart: { type: 'line' }, title: { text: 'Monthly Average Temperature' }, subtitle: { text: 'Source: WorldClimate.com' }, xAxis: { categories: ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] }, yAxis: { title: { text: 'Temperature (°C)' } }, plotOptions: { line: { dataLabels: { enabled: true }, enableMouseTracking: false } }, } series_dt = ([{ name: 'Tokyo', data: [7.0, 6.9, 9.5, 14.5, 18.4, 21.5, 25.2, 26.5, 23.3, 18.3, 13.9, 9.6] }, { name: 'London', data: [3.9, 4.2, 5.7, 8.5, 11.9, 15.2, 17.0, 16.6, 14.2, 10.3, 6.6, 4.8] }]) line_3.chart.options = opts; line_3.chart.series_data = series_dt # - line_3.show_in_iruby # or line_2.chart (but it will show chart when notebook is saved) # line chart : inverted axis opts = { chart: { type: 'spline', inverted: true }, title: { text: 'Atmosphere Temperature by Altitude' }, subtitle: { text: 'According to the Standard Atmosphere Model' }, xAxis: { reversed: false, title: { enabled: true, text: 'Altitude' }, labels: { formatter: "function () { return this.value + 'km'; }".js_code }, maxPadding: 0.05, showLastLabel: true }, yAxis:{ title: { text: 'Temperature' }, labels: { formatter: "function () { return this.value + '°'; }".js_code }, lineWidth: 2 }, legend:{ enabled: false }, tooltip:{ headerFormat: '<b>{series.name}</b><br/>', pointFormat: '{point.x} km: {point.y}°C' }, plotOptions:{ spline: { marker: { enable: false } } }, } data = [[0, 15], [10, -50], [20, -56.5], [30, -46.5], [40, -22.1], [50, -2.5], [60, -27.7], [70, -55.7], [80, -76.5]] line_6 = Daru::View::Plot.new(data) line_6.chart.options = opts; line_6.show_in_iruby # when multiple charts in one frame: Just add after above chart line_6.chart.series({ :data => [[10, 15], [20, -50], [30, -56.5], [40, -46.5], [50, -22.1], [55, -2.5], [65, -27.7], [80, -55.7], [90, -76.5]]}) line_6.show_in_iruby # line chart : chart with symbols opts = { chart: { type: 'spline' }, title: { text: 'Monthly Average Temperature' }, subtitle: { text: 'Source: WorldClimate.com' }, xAxis: { categories: ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] }, yAxis: { title: { text: 'Temperature' }, labels: { formatter: "function () { return this.value + '°'; }".js_code } }, tooltip: { crosshairs: true, shared: true }, plotOptions: { spline: { marker: { radius: 4, lineColor: '#666666', lineWidth: 1 } } }, } series_dt = [{ name: 'Tokyo', marker: { symbol: 'square' }, data: [7.0, 6.9, 9.5, 14.5, 18.2, 21.5, 25.2, { y: 26.5, marker: { symbol: 'url(https://www.highcharts.com/samples/graphics/sun.png)' } }, 23.3, 18.3, 13.9, 9.6] }, { name: 'London', marker: { symbol: 'diamond' }, data: [{ y: 3.9, marker: { symbol: 'url(https://www.highcharts.com/samples/graphics/snow.png)' } }, 4.2, 5.7, 8.5, 11.9, 15.2, 17.0, 16.6, 14.2, 10.3, 6.6, 4.8] }] line_7 = Daru::View::Plot.new(data) line_7.chart.options = opts; line_7.chart.series_data = series_dt line_7.show_in_iruby # line chart : chart with symbols opts = { chart: { type: 'spline' }, title: { text: 'Wind speed during two days' }, subtitle: { text: 'May 31 and and June 1, 2015 at two locations in Vik i Sogn, Norway' }, xAxis: { type: 'datetime', labels: { overflow: 'justify' } }, yAxis: { title: { text: 'Wind speed (m/s)' }, minorGridLineWidth: 0, gridLineWidth: 0, alternateGridColor: nil, plotBands: [{ # Light air from: 0.3, to: 1.5, color: 'rgba(68, 170, 213, 0.1)', label: { text: 'Light air', style: { color: '#606060' } } }, { # Light breeze from: 1.5, to: 3.3, color: 'rgba(0, 0, 0, 0)', label: { text: 'Light breeze', style: { color: '#606060' } } }, { # Gentle breeze from: 3.3, to: 5.5, color: 'rgba(68, 170, 213, 0.1)', label: { text: 'Gentle breeze', style: { color: '#606060' } } }, { # Moderate breeze from: 5.5, to: 8, color: 'rgba(0, 0, 0, 0)', label: { text: 'Moderate breeze', style: { color: '#606060' } } }, { # Fresh breeze from: 8, to: 11, color: 'rgba(68, 170, 213, 0.1)', label: { text: 'Fresh breeze', style: { color: '#606060' } } }, { # Strong breeze from: 11, to: 14, color: 'rgba(0, 0, 0, 0)', label: { text: 'Strong breeze', style: { color: '#606060' } } }, { # High wind from: 14, to: 15, color: 'rgba(68, 170, 213, 0.1)', label: { text: 'High wind', style: { color: '#606060' } } }] }, tooltip: { valueSuffix: ' m/s' }, plotOptions: { spline: { lineWidth: 4, states: { hover: { lineWidth: 5 } }, marker: { enabled: false }, pointInterval: 3600000, # one hour # pointStart: Date.UTC(2015, 4, 31, 0, 0, 0) } }, navigation: { menuItemStyle: { fontSize: '10px' } } } series_dt = [{ name: 'Hestavollane', data: [0.2, 0.8, 0.8, 0.8, 1, 1.3, 1.5, 2.9, 1.9, 2.6, 1.6, 3, 4, 3.6, 4.5, 4.2, 4.5, 4.5, 4, 3.1, 2.7, 4, 2.7, 2.3, 2.3, 4.1, 7.7, 7.1, 5.6, 6.1, 5.8, 8.6, 7.2, 9, 10.9, 11.5, 11.6, 11.1, 12, 12.3, 10.7, 9.4, 9.8, 9.6, 9.8, 9.5, 8.5, 7.4, 7.6] }, { name: 'Vik', data: [0, 0, 0.6, 0.9, 0.8, 0.2, 0, 0, 0, 0.1, 0.6, 0.7, 0.8, 0.6, 0.2, 0, 0.1, 0.3, 0.3, 0, 0.1, 0, 0, 0, 0.2, 0.1, 0, 0.3, 0, 0.1, 0.2, 0.1, 0.3, 0.3, 0, 3.1, 3.1, 2.5, 1.5, 1.9, 2.1, 1, 2.3, 1.9, 1.2, 0.7, 1.3, 0.4, 0.3] }] line_8 = Daru::View::Plot.new(series_dt, opts) line_8.show_in_iruby # line chart : chart with line-log-axis/ opts = { title: { text: 'Logarithmic axis demo' }, xAxis: { tickInterval: 1 }, yAxis: { type: 'logarithmic', minorTickInterval: 0.1 }, tooltip: { headerFormat: '<b>{series.name}</b><br />', pointFormat: 'x = {point.x}, y = {point.y}' }, chart: {type: 'line'} } series_dt = [{ data: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512], pointStart: 1 }] line_9 = Daru::View::Plot.new(series_dt, opts) line_9.show_in_iruby
spec/dummy_iruby/Highcharts - line graphs.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python2 # --- # ## Introduction # # This tutorial is primarily focussed to introduce you to "Natural Language Processing (NLP)". Natural language, i.e. language used by humans for daily communications (like Japanese, English, German, etc.), have evolved over the years and NLP is an attempt for computers to fully understand human language. From analysing parts of sentences based on word frquencies and identifying common writing styles, to an extent where the computer can give an appropriate response to a person, NLP aims to improve computer's understanding of human speech. # # This tutorial is designed and created on Python 2.7 using [Natural Language Toolkit (NLTK)](http://www.nltk.org/). For working in computational linguistics using Python, NLTK is an amazing library to play with natural language. # ### Tutorial content # # In this tutorial, we will be covering Language Processing tasks using the functionalites provided by the NTK modules # # We plan to cover the following topics in this tutorial: # - [Installing the libraries](#Installing-the-libraries) # - [Language Processing](#Language-Processing) # - [Accessing Text Corpora and Lexical Resources](#Accessing-Text-Corpora-and-Lexical-Resources) # - [Processing Raw Text](#Processing-Raw-Text) # - [Categorizing and Tagging Words](#Categorizing-and-Tagging-Words) # ## Installing the libraries # Before getting started, you'll need to install the following libraries: Numpy, Matplotlib and NLTK. You can install using pip command as follows: # # $ pip install -U numpy nltk Matplotlib # We will be using the data from the NLTK Book collection for the examples in the below tutorial. import nltk from nltk.book import * # ## Language Processing # Access the examples by entering their names: text1 # Check the NLTK text type: type(text1) # Access the contents in the sample NLTK text: print text1[:20] print (' '.join(str(text1[idx]) for idx, words in enumerate(text1[:42]))) # ### Searching Text # We first examine the context of a language, and instead of simply reading the text, we can use the concordance view to get the occurance of a given word along woth some context: text1.concordance("pictures") # Further, using the dispersion_plot attribute you can see the location of a word and the frequency of its occurrences in the below plot. text1.dispersion_plot(["old", "whale", "boat", "God", "Captain", 'the']) # From the above plot, it is clearly evident how frequently "the" word occurs in the text. # ### Counting Vocabulary # A good term to identify here is the lexical diversity of given text. # We use the lexical_diversity function to calculate on an average how many times each word is used in a text: def lexical_diversity(text): #calculate the length of text len_text = len(text) #calculate unique words in text vocab_text = len(set(text)) return len_text / float(vocab_text) lexical_diversity(text1) # Similarly, the percentage function gets the count of a word as a percentage of total number of words in the text: def percentage(word, text): #calculate the count of word in text word_count = text.count(word) #calculate the length of text len_text = len(text) return 100 * word_count / float(len_text) percentage('whale', text1) # ### Computing with Language: Simple Statistics # NLTK provides a built-in feature "FreqDist(text)" to calculate the Frequency distribution of each vocabulary in the text. # Calculate the Frequency distribution of text1: freq_dist_1 = FreqDist(text1) # Check the frequency of a particular vocabulary in text1: (Example: 'the') freq_dist_1['the'] # Attribute: items() - returns the sample with its count in the text: vocab = freq_dist_1.items() print vocab[0:15] # Attribute: N() - returns the total number of samples in the text i.e. length of text: freq_dist_1.N() # Attribute: max - returns the vocabulary with their total counts in the given text: freq_dist_1.max # We can also generate a Frequency Distribution plot. Following is a cumulative plot that shows 40 most frequently occuring words in text1: freq_dist_1.plot(40, cumulative=True) # ### Collocations # A collocation in NLTK is just a bigram that occurs frequently in the text and is calculated based on the frequency of individual words. The result of a collocation involves all the bigrams that occur frequently but involve rare words. text1.collocations() # ## Accessing Text Corpora and Lexical Resources # A text corpora is just a large body of text and NLTK package provides a list of corpus that we can use. In particular, we will be importing the Brown Corpus from NLTK and using it. # ### Accessing Text Corpora from nltk.corpus import brown # Check the NLTK corpora type: type(brown) # Attribute: words() - returns a list of words in the corpus: brown.words() # Attribute: sents() - returns a list of sentences where each sentence itself is a list of words: [' '.join(str(word) for word in brown.sents()[0])] # Since the sources have been categorized by the Genre, we can find all the categories available: # # Attribute: categories() print brown.categories() # We can further specify the categories of corpus to read using the following commands: print brown.words(categories='romance')[:20] [' '.join(str(word) for word in brown.sents(categories='romance')[0])] # ### NLTK's Frequency Distribution # We can calculate the count of words using NLTK's Freq Distribution. # + """ Source: #2.1 (http://www.nltk.org/book_1ed/ch02.html) """ humor_text = brown.words(categories='humor') humor_freqDist = nltk.FreqDist([w for w in humor_text]) samples = ['can', 'could', 'may', 'might', 'must', 'will'] for s in samples: print(s + ': %s' % humor_freqDist[s]) # - # We can further calculate the count of words based on different categories using NLTK's Conditional Freq Distribution and plot the counts of each sample for different categories. # + """ Source: #2.1 (http://www.nltk.org/book_1ed/ch02.html) """ cfd = nltk.ConditionalFreqDist((genre, word)\ for genre in brown.categories()\ for word in brown.words(categories=genre)) genres = ['news', 'religion', 'hobbies', 'romance', 'humor'] samples = ['can', 'could', 'may', 'might', 'must', 'will'] cfd.plot(conditions=genres, samples=samples) # - # ### Using Language Processing # It is important to understand and distinguish between the data types used here. # # To use the attributes of NLTK text, we first need to convert the corpus into type NLTK.text and then we can apply its functions. romance_text = nltk.Text(brown.words(categories='romance')) # Using attributs of NLTK text as described earlier: romance_text.concordance('may') print lexical_diversity(romance_text) print percentage('could', romance_text) # ### Lexical Resources # Lexical resouce is a collection of words/phrases along with some information that is associated with it. # # For instance, if vocab = set(text1) then vocab is a simple lexical resource. To put simply, a list of sorted words is another simplest leical resource. # ### Wordlist Corpora # NLTK contains corpora of lists of words that can be used to find an unusual or most commonly occuring words. # For instance, there is a corpus of stopwords i.e. frequently occuring words like 'the', 'of', 'a', etc and we usually want to filter these out before processing the document. from nltk.corpus import stopwords # Stopwords are available in different languages: print stopwords.words('english')[:15] print stopwords.words('german')[:15] # Here's a simple function that compares the input text with the NLTK's dictionary and returns a list of words that are unusual: def unusual_words(text): """ Source: #2.4 (http://www.nltk.org/book_1ed/ch02.html) """ text_vocab = set(w.lower() for w in text) english_vocab = set(w.lower() for w in nltk.corpus.words.words()) unusual = text_vocab.difference(english_vocab) return (unusual) unusual_words(['Hi','my','naame','is','Andrew','Carnegie','Mellon','!!!!','My','E-mail','id', 'is','<EMAIL>']) # ## Processing Raw Text # While it's conveniet to use existing collections of text corpora as seen above, the most common source of texts is undoubtedly the Web. And such texts need some form of processing. # # Using a sample of texts from Project Gutenberg that is available in the NLTK corpus collection, we will focus on concepts like tokenization, stemming and lemmatization functionality offered by NLTK library: # + from urllib import urlopen url = "http://www.gutenberg.org/files/2554/2554.txt" raw = urlopen(url).read() print type(raw) print len(raw) print raw[:75] # - # The raw content includes details like whitespaces, punctuations, line breaks, etc. that we are not interested in. Thus, for language processing, we break the strings into a list of words and punctuations. This process is called tokenization. # + tokens = nltk.word_tokenize(raw) print type(tokens) print len(tokens) print tokens[:10] # - # The next step is to create the NLTK text from this list to enable us to carry out all of the NLTK linguistic processing as explained earlier: # + text = nltk.Text(tokens) print type(text) print ' ' print text[1020:1060] print ' ' print text.collocations() # - # ### Normalizing Text # We often normalize the text by converting texts to lower case and thereby ignoring the distinction between "The" and "the". To further normalize the text, we strip off any affixes. This process is known as stemming. # Stemming is not a well-defined process, and we typically pick the stemmer that best suits the application we have in mind. Typically, NLTK provides us with a couple of stemmers. For instance, Porter and Lancaster stemmers. Each stemmer follows its own rules for stripping affixes and thus will often return different results. print tokens[265:300] # Porter Stemmer: porter = nltk.PorterStemmer() print [str(porter.stem(t)) for t in tokens[265:300]] # Lancaster Stemmer: lancaster = nltk.LancasterStemmer() print [str(lancaster.stem(t)) for t in tokens[265:300]] # The next step is to make sure that the resulting form is a known word in a dictionary. This process is known as lemmatization. # # The WordNet lemmatizer only removes affixes if the resulting word is in its dictionary. This additional checking process makes the lemmatizer slower than the above stemmers. lemmatizer = nltk.WordNetLemmatizer() print [str(lemmatizer.lemmatize(t)) for t in tokens[265:300]] # Note: Apart from the functions described above, Regular expressions can be widely used to create functions that will allow us to have more control over how we want to process/normalise texts (e.g. split the words, remove punctuations, remove non-string values and so on). # # Categorizing and Tagging Words # Back in elementary school we learnt the word classed such as nouns, verbs, adjectives, and adverbs. These "word classes" are useful categories for many language processing tasks. Our goal here is to understand how they are used in Natural Language Processing. # The process of classifying words into their parts of speech and labeling them accordingly is known as part-of-speech tagging (POS-tagging) or simply tagging. The collection of tags used for a particular task is known as a tagset. Our emphasis in this module is on understanding tags, and tagging text automatically. # ### Using a Tagger # Attribute: POS_tag: returns (word, tag) by processing a sequence of words, and attaching a part of speech tag to each word text = nltk.word_tokenize("They refuse to permit us to obtain the refuse permit") nltk.pos_tag(text) # You can use the following commands to understand the meaning of the tags: nltk.help.upenn_tagset() nltk.help.upenn_tagset('NN') # Let's look at another example, this time including some homonyms: text = nltk.word_tokenize("They refuse to permit us to obtain the refuse permit") nltk.pos_tag(text) # Notice that refuse and permit both appear as a present tense verb (VBP) and a noun (NN). The use of words in different parts of speech refer to a different meaning and thus, we need to know which word is being used in order to pronounce the text correctly. (For this reason, text-to-speech systems usually perform POS-tagging.) # ### Automatic Word Tagging # We see that the tag of a word depends on the word and its context within a sentence. For this reason, we will be working with data at the level of (tagged) sentences rather than words. Let's begin by loading the data we will be using. # The brown corpus from NLTK package has already been tagged for its part-of-speech. Since the corpus is also segmented into sentences, it has a tagged_sents() method that divides up the tagged words into sentences rather than presenting them as one big list. from nltk.corpus import brown brown_tagged_sents = brown.tagged_sents(categories='news') brown_sents = brown.sents(categories='news') # Example of 1st sentence in brown_tagged_sents: brown_tagged_sents[0] # Let's now analyse the different ways to tag the words and evaluate the functions performance. # #### The Default Tagger # The simplest possible tagger assigns the same tag to each word based on the most likely tag. # # Let's get the most likey tag: tags = [tag for (word, tag) in brown.tagged_words(categories='news')] nltk.FreqDist(tags).max() # Now we can create a tagger that tags everything as NN. raw = 'I do not like green eggs and ham, I do not like them Sam I am!' tokens = nltk.word_tokenize(raw) default_tagger = nltk.DefaultTagger('NN') default_tagger.tag(tokens) # Default taggers assign their tag to every single word, even words that have never been encountered before. This method performs rather poorly. On a typical corpus, it will tag only about an eighth of the tokens correctly, as we see below: default_tagger.evaluate(brown_tagged_sents) # Similarly, we can use Regular expressions to assign tags to tokens on the basis of matching patterns. The performance will again be based on how good our guess is to determine if a pattern matches a word correctly. And the results will be incorrect for homonyms as sen earlier. # #### The Lookup Tagger # A lot of high-frequency words do not have the NN tag. Instead of selecting the most frquently occuring word tag, we could select about 100 most frequently words and create a model(dictionary) of the likely tags for the word. The models performance is likely to improve. # # We can then use this information as the model for a "lookup tagger" (an NLTK UnigramTagger). Unigram taggers are based on a simple statistical algorithm: for each token, assign the tag that is most likely for that particular token. # In the following code sample, we train a unigram tagger, use it to tag a sentence, then evaluate on the same data: """ Source: #5.5 (http://www.nltk.org/book_1ed/ch05.html) """ brown_tagged_sents = brown.tagged_sents(categories='news') brown_sents = brown.sents(categories='news') unigram_tagger = nltk.UnigramTagger(brown_tagged_sents) unigram_tagger.tag(brown_sents[2007]) unigram_tagger.evaluate(brown_tagged_sents) # Now that we are training a tagger on some data, we must be careful not to test it on the same data, as we did in the above example. A tagger that simply memorized its training data and made no attempt to construct a general model would get a perfect score, but would also be useless for tagging new text. Instead, we should split the data, training on 90% and testing on the remaining 10%: """ Source: #5.5 (http://www.nltk.org/book_1ed/ch05.html) """ size = int(len(brown_tagged_sents) * 0.9) print 'Size: ', size train_sents = brown_tagged_sents[:size] test_sents = brown_tagged_sents[size:] unigram_tagger = nltk.UnigramTagger(train_sents) unigram_tagger.evaluate(test_sents) # Although the score is worse, we now have a better picture of the usefulness of this tagger, i.e. its performance on new data. # ### Summary and references: # # The tutorial provides an overview of few terminologies and functions of NLTK package taht are taken from the below link. Following link gives a detailed description about each of these concepts. A lot of other materials on NLTK available online eventually refer to this link. # # 1. Natural Language Processing with Python: http://www.nltk.org/book_1ed/ # 2. Dive Into NLTK: http://textminingonline.com/category/nltk
2016/tutorial_final/112/NLTK_Tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Neurodesign (tutorial) # In the previous tutorial, you have seen that jittered-ISI designs display quite some variance in efficiency (ranging from approx. 2.3 tot 3.1). This shows you that there are very many different designs possible, even with only two conditions, each associated with its own efficiency. Ideally, we want the **best** design (i.e., most efficient) out of all the possible designs. This is, however, for many (slightly more complex) designs not possible, because you would need to simulate billions of designs to exhaustively test them all! Moreover, we have also seen that high statistical efficiency (long blocked designs) may also have unwanted psychological effects (predictability, boredom). # # So, how then should we look for (psychologically and statistically) efficient designs in the space of all possible designs? Fortunately, there exist methods to intelligently "search" for the optimal design among all possible designs without having to test them all and that allow you to define a balance between different aspects of your desired design. # # Many different packages exist that implement this type of "design optimisation" (like [optseq2](https://surfer.nmr.mgh.harvard.edu/fswiki/optseq2), and the MATLAB toolbox by [Kao](https://www.jstatsoft.org/article/view/v030i11)). Here, we will give a short demonstration of the "neurodesign" toolbox; `neurodesign` is a Python package, but they offer also a [web interface](http://neuropowertools.org/). Here, we will briefly walk you through the package. (Note that we use a slightly modified version of the original that works with Python3, which you can download [here](https://github.com/Neuroimaging-UvA/neurodesign).) # + import os # Neurodesigninternally paralellizes some computations using multithreading, # which is a massive burden on the CPU. So let's limit the number of threads os.environ["OMP_NUM_THREADS"] = "1" os.environ["OPENBLAS_NUM_THREADS"] = "1" os.environ["MKL_NUM_THREADS"] = "1" import numpy as np # we'll also need Numpy import neurodesign # - # The `neurodesign` package contains two important Python classes: the `experiment` class and the `optimisation` class. In the `experiment` class you define the most important parameters of your experiment. # # First, we have to define the intended functional scan's `TR` (let's choose 1.6) and the expected degree of autocorrelation, `rho`, in the BOLD signal (we'll discuss this in more detail next week; let's set it to 0.6). TR = 1.6 rho = 0.6 # Now, more importantly, we have to define parameters related to the experimental conditions: # * `n_stimuli`: the number of different stimulus *conditions*; # * `n_trials`: the number of different stimulus presentations per condition; # * `duration`: the duration of the experimental run (you can choose to set *either* `n_trials` *or* `duration`); # * `stim_duration`: the duration of each stimulus; # * `P`: the probability of occurring for each condition (e.g., [0.5, 0.5] for 50/50); # * `t_pre`: the time before the stimulus within each trial (e.g., fixation cross); # * `t_post`: the time after the stimulus within each trial (e.g., fixation cross; see image below); # # ![](neurodesign_trial_structure.png) # *Image from the [neurodesign preprint from Biorxiv](https://www.biorxiv.org/content/10.1101/119594v2)* # # Now, let's assume we our experiment is a simple "functional localizer", in which we want to find brain regions that respond more strongly to faces than to houses. Now, we want those to conditions to occur equally often and have a stimulus duration of 2 seconds. We assume that we don't want runs longer than 5 minutes (otherwise the participant might lose focus/fall asleep), so we'll set the `duration` instead of the `n_trials` parameter. And we assume that there is a pre and post trial time of 0.1 seconds. n_stimuli = 2 stim_duration = 1 duration = 5*60 P = [0.5, 0.5] t_pre = 0.1 t_post = 0.1 # Now, we have to define how `neurodesign` should create intertrial intervals (ITIs), the time between the offset of the previous trial and the onset of the next trial. In this package, you can specify a specific *distribution* and associated parameters from which these ITIs should be drawn (`ITImodel`). You can choose from `"fixed"`, `"uniform"`, and `"exponential"`. Let's pick `"uniform"` here, with a minimum ITI (`ITImin`) of 0.1 seconds and a maximum ITI (`ITImax`) of 8 seconds. ITImodel = "uniform" ITImin = 2 ITImax = 4 # Lastly, we need to define the contrast(s) that we want to test (`C`). Let's assume we want to test both "faces > houses" and "houses > faces". C = np.array([ [1, -1], [-1, 1] ]) # Now, with these parameters, let's create an `experiment` object! exp = neurodesign.experiment( TR=TR, rho=rho, n_stimuli=n_stimuli, stim_duration=stim_duration, P=P, duration=duration, t_pre=t_pre, t_post=t_post, ITImodel=ITImodel, ITImin=ITImin, ITImax=ITImax, C=C ) # Alright, now we need to create the `optimisation` object! This object defines how the experiment will be optimised. Neurodesign uses a ["genetic algorithm"](https://en.wikipedia.org/wiki/Genetic_algorithm) to efficiently look for the most efficient designs within the vast set of all possible designs, inspired by the way biological evolution works. We won't go into the specifics of the algorithm. # # The `weights` parameter of the `optimisation` class is very important: this allows you to balance different statistical and psychological aspects of your design. There are four aspects that can be optimized: # * $F_{e}$: the estimation efficiency (when you want to investigate the entire shape of the HRF); # * $F_{d}$: the detection efficiency (when you are only interested in amplitude changes/differences); # * $F_{f}$: how close the frequency of each condition is to the desired probability (given by `P` in the `experiment` object); # * $F_{c}$: how well the conditions are "counterbalanced" in time. "Counterbalancing", here, means that trying to make sure that each trial ($n$) has an equal probability to be followed by any condition (e.g., 50% condition A, 50% condition B for trial $n+1$), which can be extended to trials further in the future ($n+2, n+3$, etc.). You can think of this parameter reflecting "predictability" (higher = less predictable). # # Given that we're only interested in *detecting* the difference between faces and houses, let's set the weight for $F_{d}$ to 1 and the rest to zero. Also, let's tell `neurodesign` to save the best 10 designs (instead of only the very best) by setting the parameter `outdes` to 10. The other parameters (`preruncycles`, `cycles`, `seed`) are not important for now (but know that setting `cycles`, the number of iteration the algorithm runs for, to a higher number will increase the time it takes to run). # + weights = [0, 1, 0, 0] # order: Fe, Fd, Ff, Fc outdes = 10 opt = neurodesign.optimisation( experiment=exp, # we have to give our previously created `exp` object to this class as well weights=weights, preruncycles=10, cycles=1, seed=2, outdes=outdes ) # - # Now we can start the algorithm by calling the `optimize` method from the `opt` variable (this may take 5 minutes or so). opt.optimise() # Lastly, call the `evaluate` method to finalize the optimization run: opt.evaluate() # We can inspect the `opt` object, which now contains an attribute, `bestdesign`, that gives us the onsets (`.onsets`) and order (`.order`) of the best design found by the algorithm. print("Onsets: %s" % (opt.bestdesign.onsets,), end='\n\n') print("Order: %s" % (opt.bestdesign.order,)) # We even have access to the HRF-convolved design (`Xconv`; we'll plot the onsets on top): # + import matplotlib.pyplot as plt # %matplotlib inline Xconv = opt.bestdesign.Xconv plt.figure(figsize=(15, 5)) plt.plot(Xconv) for ons, cond in zip(opt.bestdesign.onsets, opt.bestdesign.order): c = 'tab:blue' if cond == 0 else 'tab:orange' plt.plot([ons, ons], [0.35, 0.37], c=c, lw=2) plt.legend(['Faces', 'Houses']) plt.grid() plt.xlim(0, Xconv.shape[0]) plt.show() # - # <div class='alert alert-warning'> # <b>ToDo</b> (0 points): Try changing the weights for the <tt>optimisation</tt> object and rerun the optimisation procedure. Plot the <tt>Xconv</tt> attribute of the resulting <tt>bestdesign</tt>. How does it differ from just optimizing detection efficiency? # </div> """ Try the ToDo here. """
NI-edu/fMRI-introduction/week_3/neurodesign.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import tensorflow as tf tf.__version__ import glob import imageio import matplotlib.pyplot as plt import numpy as np import os import PIL from IPython import display import numpy as np from keras.layers import Input from keras.models import Model, Sequential from keras.layers.core import Dense, Dropout, Flatten from keras.layers.advanced_activations import LeakyReLU from keras.datasets import mnist from keras.optimizers import Adam from keras import backend as K from keras import initializers (train_images, train_labels), (_, _) = tf.keras.datasets.mnist.load_data() train_images = train_images.reshape(train_images.shape[0], 28, 28, 1).astype('float32') train_images = (train_images - 127.5) / 127.5 nDim=100 X_train = train_images.reshape(60000, 784) adam = Adam(lr=0.0002, beta_1=0.5) # + generator = Sequential() generator.add(Dense(256, input_dim=nDim, kernel_initializer=initializers.RandomNormal(stddev=0.015))) generator.add(LeakyReLU(0.3)) generator.add(Dense(512)) generator.add(LeakyReLU(0.3)) generator.add(Dense(512)) generator.add(LeakyReLU(0.3)) generator.add(Dense(1024)) generator.add(LeakyReLU(0.3)) generator.add(Dense(784, activation='tanh')) generator.compile(loss='binary_crossentropy', optimizer=adam) # + discriminator = Sequential() discriminator.add(Dense(1024, input_dim=784, kernel_initializer=initializers.RandomNormal(stddev=0.015))) discriminator.add(LeakyReLU(0.3)) discriminator.add(Dropout(0.2)) discriminator.add(Dense(512)) discriminator.add(LeakyReLU(0.3)) discriminator.add(Dense(512)) discriminator.add(LeakyReLU(0.3)) discriminator.add(Dropout(0.2)) discriminator.add(Dense(256)) discriminator.add(LeakyReLU(0.3)) discriminator.add(Dropout(0.2)) discriminator.add(Dense(1, activation='sigmoid')) discriminator.compile(loss='binary_crossentropy', optimizer=adam) # + discriminator.trainable = False ganInput = Input(shape=(nDim,)) x = generator(ganInput) ganOutput = discriminator(x) gan = Model(inputs=ganInput, outputs=ganOutput) gan.compile(loss='binary_crossentropy', optimizer=adam) # + def plotGeneratedImages(epoch, examples=100, dim=(10, 10), figsize=(10, 10)): noise = np.random.normal(0, 1, size=[examples, nDim]) generatedImages = generator.predict(noise) generatedImages = generatedImages.reshape(examples, 28, 28) plt.figure(figsize=figsize) for i in range(generatedImages.shape[0]): plt.subplot(dim[0], dim[1], i+1) plt.imshow(generatedImages[i], interpolation='nearest', cmap='gray_r') plt.axis('off') plt.tight_layout() def train(epochs=1, batch_size=128): batch_count = int(X_train.shape[0] / batch_size) for e in range(1, epochs+1): for i in range(batch_count): noise = np.random.normal(0, 1, size=[batch_size, nDim]) image_batch = X_train[np.random.randint(0, X_train.shape[0], size=batch_size)] generated_images = generator.predict(noise) X = np.concatenate([image_batch, generated_images]) yDis = np.zeros(2*batch_size) yDis[:batch_size] = np.random.normal(0.9,0.03, [batch_size]) discriminator.trainable = True discriminator.train_on_batch(X, yDis) yGen = np.random.normal(0.9,0.03, [batch_size]) discriminator.trainable = False gan.train_on_batch(noise, yGen) print(f"Epoch: {e}") if e % 20 == 0: plotGeneratedImages(e) # - train(100, 128)
2a.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ##### Approach #1: Using built in sort. # There are a few similar approaches we can take here, each with its own subtle differences. All are of an O(n-log-n) time complexity due to using the inbuilt sort, although they differ in their space complexity. # # a) Overwriting input: # # def sortedSquares(self, A: List[int]) -> List[int]: # for i in range(len(A)): # A[i] *= A[i] # A.sort() # return A # # This approach uses O(1) memory beyond the input array, and is truely in-place. However, it is not always a good idea to overwrite inputs. Remember that because we passed it by reference, the original is actually lost. Often functions like this are a part of an API, and in a lot of cases, nobody wants an API that clobbers their input. # # I think it's best to ask your interviewer if they want something done in-place or not. It is a common misconception that we should always be trying to do things in-place, overwriting the inputs. # # # b) Making a new array, not in place, O(n) auxillary space. # # def sortedSquares(self, A: List[int]) -> List[int]: # return sorted([v**2 for v in A]) # # Ahhhh, our good 'ol clever Python one-liner. There is a suble space inefficiency in it though. For a brief moment, we're using 3n memory, not 2n. The one line has 2 not-in-place operations in it; the list comprehension creates a new list, and so does sorted. The list comprehension list is promptly garbage collected upon function return, but because it was briefly there, the max memory usage was ultimately 3n. With lots of memory, this is totally fine, and the pythonic nature of it makes it a great solution. But we do need to be aware of it. # # c) Making a new array, not in place, O(1) auxillary space. # Making a new array, in place. # # def sortedSquares(self, A: List[int]) -> List[int]: # return_array = [v**2 for v in A] # return_array.sort() # This is in place! # return return_array # # So, is this O(1) space or O(n) space? Arguments can be made either way, sometimes people say to count the output data stucture in the calculation, and sometimes they don't. If we're talking about auxillary space, we generally don't count the output data structure, which would make it O(1). I think this is a more accurate way of putting it -- we are trying to measure what the algorithm itself is using, not just what the inevitable size of the output is. But it's important to be clear in interviews what you are and aren't counting. # # Overall thoughts on these approaches # You won't be coding any of these approaches in an interview (in my own very, very limited experience though!). By all means your interviewer will want to hear that you could do it this way, but there is 3 big problems if they are the only approaches you can come up with. # # We shouldn't need to use an O(n-log-n) sort operation on data that for the most part is already sorted. There's something not right with that. If this is the approach the interviewer wanted, they wouldn't have given you the numbers in a sorted list in the first place. # Following on from that, there are O(n) solutions. # Why would they be asking you to code something so trivial? Let's be honest. They want to see you writing some meaty code. # The remaining approaches exploit the existing sorting. If we were to go down the list squaring each number, we'd have a "v" sorted list, in that the squares of the negatives decrease, and then the squares of the positives increase, i.e. # [-4, -2, -1, 0, 1, 2, 3, 5] -> [16, 4, 1, 0, 1, 4, 9, 25] # # We can get this into a sorted list in O(n) time. # # # # https://leetcode.com/problems/squares-of-a-sorted-array/discuss/310865/Python%3A-A-comparison-of-lots-of-approaches!-Sorting-two-pointers-deque-iterator-generator def sortedSquares(self, A: List[int]) -> List[int]: cnt = len(A) - 1 right = len(A)-1 left = 0 res = [0]*len(A) numr = A[right]**2 numl = A[left]**2 while cnt >= 0: if numl > numr: res[cnt] = numl left += 1 numl = A[left]**2 else: res[cnt] = numr right -= 1 numr = A[right]**2 cnt -= 1 return res
977. Squares of a Sorted Array.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from sklearn import datasets from sklearn.neighbors import KNeighborsClassifier from sklearn import metrics iris = datasets.load_iris() print(iris.DESCR) iris.data iris.feature_names iris.target iris.target_names
Machine Learning/.ipynb_checkpoints/Machine Learning scikit-learn 1-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import tensorflow as tf #if not tf.executing_eagerly(): tf.enable_eager_execution() from vacuum.datasets import generative_model from vacuum.io_ import preprocess, deprocess from vacuum.util import shift import matplotlib.pyplot as plt # %matplotlib inline train_batch = generative_model(psf_glob="/scratch/datasets/meerkat16_deep2like_morerange/*-bigpsf-psf.fits") iterator = train_batch.make_one_shot_iterator() index, min_flux, max_flux, psf, dirty, skymodel = iterator.get_next() scaled_skymodel = preprocess(skymodel, min_flux, max_flux) scaled_dirty = preprocess(dirty, min_flux, max_flux) deprocessed_output = deprocess(scaled_skymodel, min_flux, max_flux) shifted = shift(psf, x=-1) filter_ = tf.expand_dims(tf.expand_dims(tf.squeeze(shifted), 2), 3) convolved = tf.nn.conv2d(deprocessed_output, filter_, [1, 1, 1, 1], "SAME") scaled = (convolved/tf.reduce_max(convolved)) * tf.reduce_max(dirty) scaled_residuals = dirty - scaled residuals = dirty - convolved print(tf.reduce_max(convolved)/tf.reduce_max(dirty)) def render(f, a, imgdata, title): i = a.imshow(imgdata) f.colorbar(i, ax=a) a.set_title(title) fig, ((a1, a2), (a3, a4)) = plt.subplots(2, 2, figsize=(20, 20)) render(fig, a1, tf.squeeze(scaled), 'scaled') render(fig, a2, tf.squeeze(dirty), 'dirty') render(fig, a3, tf.squeeze(residuals), 'residuals') render(fig, a4, tf.squeeze(scaled_residuals), 'scaled_residuals')
notebooks/check if the tensorflow fft shift is correctly working.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Build PNG Files # # ### Basic Data Set # In this notebook, we'll take the *`basic`* data set, use `ibmseti` Python package to convert each data file into a spectrogram, then save as `.png` files. # # ### Split into training / test (cross-validation) and zip # Also, we'll split the data set into a training set and a test set and create a handful of zip files for each class. This will dovetail into the next tutorial where we will train a custom Watson Visual Recognition classifier (we will use the zip files of pngs) and measure it's performance with the test (cross-validation) set. # # ### Update for `primary` # # You may want to adapt this script to use the `primary` data set. # + from __future__ import division import cStringIO import glob import json import requests import ibmseti import os import zipfile import numpy as np import matplotlib.pyplot as plt # + #Making a local folder to put my data. mydatafolder = os.environ['PWD'] + '/' + 'current_data' if os.path.exists(mydatafolder) is False: os.makedirs(mydatafolder) print mydatafolder # + #If you are running this in IBM Apache Spark (via Data Science Experience) base_url = 'https://dal05.objectstorage.service.networklayer.com/v1/AUTH_cdbef52bdf7a449c96936e1071f0a46b' #ELSE, if you are outside of IBM: base_url = 'https://dal.objectstorage.open.softlayer.com/v1/AUTH_cdbef52bdf7a449c96936e1071f0a46b' #NOTE: if you are outside of IBM, pulling down data will be slower. :/ # + ## You don't need to repeat this, of course, if you've already done this in the Step 1 notebook basic4zip = '{}/simsignals_basic_v2/basic4.zip'.format(base_url) os.system('curl {} > {}/{}'.format(basic4zip, mydatafolder, 'basic4.zip')) # - # !ls -alrht $mydatafolder outputpng_folder = mydatafolder + '/png' if os.path.exists(outputpng_folder) is False: os.makedirs(outputpng_folder) print outputpng_folder # + #Use `ibmseti`, or other methods, to draw the spectrograms def draw_spectrogram(data): aca = ibmseti.compamp.SimCompamp(data) spec = aca.get_spectrogram() # Instead of using SimCompAmp.get_spectrogram method # perform your own signal processing here before you create the spectrogram # # SimCompAmp.get_spectrogram is relatively simple. Here's the code to reproduce it: # # header, raw_data = r.content.split('\n',1) # complex_data = np.frombuffer(raw_data, dtype='i1').astype(np.float32).view(np.complex64) # shape = (int(32*8), int(6144/8)) # spec = np.abs( np.fft.fftshift( np.fft.fft( complex_data.reshape(*shape) ), 1) )**2 # # But instead of the line above, can you maniputlate `complex_data` with signal processing # techniques in the time-domain (windowing?, de-chirp?), or manipulate the output of the # np.fft.fft process in a way to improve the signal to noise (Welch periodogram, subtract noise model)? # # example: Apply Hanning Window # complex_data = complex_data.reshape(*shape) # complex_data = complex_data * np.hanning(complex_data.shape[1]) # spec = np.abs( np.fft.fftshift( np.fft.fft( complex_data ), 1) )**2 # Alternatively: # If you're using ibmseti 1.0.5 or greater, you can define a signal processing function, # which will be passed the 2D complex-value time-series numpy array. Your processing function should return a 2D # numpy array -- though it doesn't need to be complex-valued or even the same size. # The SimCompamp.get_spectrogram function will treat the output of your signals processing function # in the same way it treats the raw 2d complex-valued time-series data. # The fourier transform of each row in the 2D array will be calculated # and then squared to produce the spectrogram. # # def mySignalProcessing(compData): # return compData * np.hanning(compData.shape[1]) # # aca.sigProc(mySignalProcessing) # spc = aca.get_spectrogram() # # You can define more sophisticated signal processing inside your function. # fig, ax = plt.subplots(figsize=(10, 5)) # do different color mappings affect Watson's classification accuracy? # ax.imshow(np.log(spec), aspect = 0.5*float(spec.shape[1]) / spec.shape[0], cmap='hot') # ax.imshow(np.log(spec), aspect = 0.5*float(spec.shape[1]) / spec.shape[0], cmap='gray') # ax.imshow(np.log(spec), aspect = 0.5*float(spec.shape[1]) / spec.shape[0], cmap='Greys') # If you're going to plot the log, make sure there are no values less than or equal to zero spec_pos_min = spec[spec > 0].min() spec[spec <= 0] = spec_pos_min ax.imshow(np.log(spec), aspect = 0.5*float(spec.shape[1]) / spec.shape[0], cmap='gray') return fig, aca.header() # + from pyspark import SparkConf, SparkContext conf = SparkConf().set("spark.ui.showConsoleProgress", "false") sc = SparkContext(appName="PythonStatusAPIDemo", conf=conf) ## We're going to use Spark to distribute the job of creating the PNGs on the executor nodes myzipfilepath = os.path.join(mydatafolder,'basic4.zip') print myzipfilepath zz = zipfile.ZipFile(myzipfilepath) filenames = filter(lambda x: x.endswith('.dat'), zz.namelist()) #this filters out the top-level folder in the zip file, which is a separate entry in the namelist rdd = sc.parallelize(filenames, 8) #2 executors are available on free-tier IBM Spark clusters. If you have access to an Enterprise cluster, which has 30 executors, you should parallize to 120 partitions # + def extract_data(row): zzz = zipfile.ZipFile(myzipfilepath) return (row, zzz.open(row).read()) rdd = rdd.map(extract_data) # - def convert_to_spectrogram_and_save(row): name = os.path.basename(row[0]) fig, header = draw_spectrogram(row[1]) png_file = name + '.png' fig.savefig(outputpng_folder + '/' + png_file) plt.close(fig) return (name, header, png_file) rdd = rdd.map(convert_to_spectrogram_and_save) results = rdd.collect() #This took about 70s on an Enterprise cluster. It will take longer on your free-tier. results[0] # # Create Training / Test sets # # Using the `basic` list, we'll create training and test sets for each signal class. Then we'll archive the `.png` files into a handful of `.zip` files (We need the .zip files to be smaller than 100 MB because there is a limitation with the size of batches of data that are uploaded to Watson Visual Recognition when training a classifier.) # + # Grab the Basic file list in order to # Organize the Data into classes r = requests.get('{}/simsignals_files/public_list_basic_v2_26may_2017.csv'.format(base_url), timeout=(9.0, 21.0)) uuids_classes_as_list = r.text.split('\n')[1:-1] #slice off the first line (header) and last line (empty) def row_to_json(row): uuid,sigclass = row.split(',') return {'uuid':uuid, 'signal_classification':sigclass} uuids_classes_as_list = map(lambda row: row_to_json(row), uuids_classes_as_list) print "found {} files".format(len(uuids_classes_as_list)) uuids_group_by_class = {} for item in uuids_classes_as_list: uuids_group_by_class.setdefault(item['signal_classification'], []).append(item) # + #At first, use just 20 percent and 10 percent. This will be useful #as you prototype. Then you can come back here and increase these #percentages as needed. training_percentage = 0.20 test_percentage = 0.10 assert training_percentage + test_percentage <= 1.0 training_set_group_by_class = {} test_set_group_by_class = {} for k, v in uuids_group_by_class.iteritems(): total = len(v) training_size = int(total * training_percentage) test_size = int(total * test_percentage) training_set = v[:training_size] test_set = v[-1*test_size:] training_set_group_by_class[k] = training_set test_set_group_by_class[k] = test_set print '{}: training set size: {}'.format(k, len(training_set)) print '{}: test set size: {}'.format(k, len(test_set)) # - training_set_group_by_class['noise'][0] fnames = [outputpng_folder + '/' + vv['uuid'] + '.dat.png' for vv in v] zipfilefolder = mydatafolder + '/zipfiles' if os.path.exists(zipfilefolder) is False: os.makedirs(zipfilefolder) max_zip_file_size_in_mb = 25 # + #Create the Zip files containing the training PNG files #Note that this limits output files to be less than <max_zip_file_size_in_mb> MB because WatsonVR has a limit on the #size of input files that can be sent in single HTTP calls to train a custom classifier for k, v, in training_set_group_by_class.iteritems(): fnames = [outputpng_folder + '/' + vv['uuid'] + '.dat.png' for vv in v] #yes, files are <uuid>.dat.png :/ count = 1 for fn in fnames: archive_name = '{}/classification_{}_{}.zip'.format(zipfilefolder, count, k) if os.path.exists(archive_name): zz = zipfile.ZipFile(archive_name, mode='a') else: print 'creating new archive', archive_name zz = zipfile.ZipFile(archive_name, mode='w') zz.write(fn) zz.close() #if archive_name folder exceeds <max_zip_file_size_in_mb> MB, increase count to create a new one if os.path.getsize(archive_name) > max_zip_file_size_in_mb * 1024 ** 2: count += 1 # + # Create the Zip files containing the test PNG files using the following naming convention: # testset_<NUMBER>_<CLASS>.zip (The next notebook example using Watson will break if a # different naming convention is used) Refer to # https://www.ibm.com/watson/developercloud/visual-recognition/api/v3/#classify_an_image # for ZIP size and content limitations: # "The max number of images in a .zip file is limited to 20, and limited to 5 MB." for k, v, in test_set_group_by_class.iteritems(): fnames = [outputpng_folder + '/' + vv['uuid'] + '.dat.png' for vv in v] #yes, files are <uuid>.dat.png :/ count = 1 img_count = 0 for fn in fnames: archive_name = '{}/testset_{}_{}.zip'.format(zipfilefolder, count, k) if os.path.exists(archive_name): zz = zipfile.ZipFile(archive_name, mode='a') else: print 'creating new archive', archive_name zz = zipfile.ZipFile(archive_name, mode='w') zz.write(fn) zz.close() img_count += 1 #if archive_name folder exceeds 5 MB or there are more than 20 images, # increase count to create a new one if (os.path.getsize(archive_name) >= 4.7 * 1024 ** 2) or img_count == 20: count += 1 img_count = 0 # - # !ls -alrth $mydatafolder/zipfiles # # Exporting your Zip files # # If you've been running this on an IBM DSX Spark cluster and you wish to move your data from the local filespace, the easiest and fastest way is to push these PNG files to an IBM Object Storage account. An Object Storage instances was created for you when you signed up for DSX. # # You do NOT need to do this if you're going on to the next notebook where you use Watson to classify your images from this Spark cluster. That notebook will read the data from the local file space. # # # ### Get your Object Storage Credentials # 1. Log in to https://bluemix.net # 2. Scroll down and find your Object Storage instance. # * If you do not have one, find the "Catalog" link and look for the Object Storage service to create a new instance (5 GB of free space) # 3. Select the `Service Credentials` tab and `View Credentials` # 4. Copy these into your notebook below. # # ### Create a Container # 5. CREATE A CONTAINER in your Object Storage that you will use below. # # + import swiftclient.client as swiftclient credentials = { 'auth_uri':'', 'global_account_auth_uri':'', 'username':'xx', 'password':"xx", 'auth_url':'https://identity.open.softlayer.com', 'project':'xx', 'projectId':'xx', 'region':'dallas', 'userId':'xx', 'domain_id':'xx', 'domain_name':'xx', 'tenantId':'xx' } # - conn_seti_data = swiftclient.Connection( key=creds_seti_public['password'], authurl=creds_seti_public['auth_url']+"/v3", auth_version='3', os_options={ "project_id": creds_seti_public['projectId'], "user_id": creds_seti_public['userId'], "region_name": creds_seti_public['region']}) # + myObjectStorageContainer = 'seti_pngs' someFile = os.path.join(zipfilefolder, 'classification_1_narrowband.zip') etag = conn_seti_data.put_object(myObjectStorageContainer, someFile, open(someFile,'rb').read())
tutorials/Step_3_Build_Set_Of_PNG_Files.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="N7ITxKLUkX0v" # ##### Copyright 2020 The TensorFlow Authors. # + cellView="form" id="yOYx6tzSnWQ3" #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] id="6xgB0Oz5eGSQ" # # Introduction to graphs and tf.function # + [markdown] id="w4zzZVZtQb1w" # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://www.tensorflow.org/guide/intro_to_graphs"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> # </td> # <td> # <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/guide/intro_to_graphs.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> # </td> # <td> # <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/guide/intro_to_graphs.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> # </td> # <td> # <a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/guide/intro_to_graphs.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> # </td> # </table> # + [markdown] id="RBKqnXI9GOax" # ## Overview # # This guide goes beneath the surface of TensorFlow and Keras to demonstrate how TensorFlow works. If you instead want to immediately get started with Keras, check out the [collection of Keras guides](https://www.tensorflow.org/guide/keras/). # # In this guide, you'll learn how TensorFlow allows you to make simple changes to your code to get graphs, how graphs are stored and represented, and how you can use them to accelerate your models. # # Note: For those of you who are only familiar with TensorFlow 1.x, this guide demonstrates a very different view of graphs. # # **This is a big-picture overview that covers how `tf.function` allows you to switch from eager execution to graph execution.** For a more complete specification of `tf.function`, go to the [`tf.function` guide](function). # # + [markdown] id="v0DdlfacAdTZ" # ### What are graphs? # # In the previous three guides, you ran TensorFlow **eagerly**. This means TensorFlow operations are executed by Python, operation by operation, and returning results back to Python. # # While eager execution has several unique advantages, graph execution enables portability outside Python and tends to offer better performance. **Graph execution** means that tensor computations are executed as a *TensorFlow graph*, sometimes referred to as a `tf.Graph` or simply a "graph." # # **Graphs are data structures that contain a set of `tf.Operation` objects, which represent units of computation; and `tf.Tensor` objects, which represent the units of data that flow between operations.** They are defined in a `tf.Graph` context. Since these graphs are data structures, they can be saved, run, and restored all without the original Python code. # # This is what a TensorFlow graph representing a two-layer neural network looks like when visualized in TensorBoard. # # + [markdown] id="FvQ5aBuRGT1o" # <img alt="A simple TensorFlow graph" src="https://github.com/tensorflow/docs/blob/master/site/en/guide/images/intro_to_graphs/two-layer-network.png?raw=1"> # + [markdown] id="DHpY3avXGITP" # ### The benefits of graphs # # With a graph, you have a great deal of flexibility. You can use your TensorFlow graph in environments that don't have a Python interpreter, like mobile applications, embedded devices, and backend servers. TensorFlow uses graphs as the format for [saved models](saved_model) when it exports them from Python. # # Graphs are also easily optimized, allowing the compiler to do transformations like: # # * Statically infer the value of tensors by folding constant nodes in your computation *("constant folding")*. # * Separate sub-parts of a computation that are independent and split them between threads or devices. # * Simplify arithmetic operations by eliminating common subexpressions. # # + [markdown] id="o1x1EOD9GjnB" # There is an entire optimization system, [Grappler](./graph_optimization.ipynb), to perform this and other speedups. # # In short, graphs are extremely useful and let your TensorFlow run **fast**, run **in parallel**, and run efficiently **on multiple devices**. # # However, you still want to define your machine learning models (or other computations) in Python for convenience, and then automatically construct graphs when you need them. # + [markdown] id="k-6Qi0thw2i9" # ## Setup # + id="goZwOXp_xyQj" import tensorflow as tf import timeit from datetime import datetime # + [markdown] id="pSZebVuWxDXu" # ## Taking advantage of graphs # # You create and run a graph in TensorFlow by using `tf.function`, either as a direct call or as a decorator. `tf.function` takes a regular function as input and returns a `Function`. **A `Function` is a Python callable that builds TensorFlow graphs from the Python function. You use a `Function` in the same way as its Python equivalent.** # # + id="HKbLeJ1y0Umi" # Define a Python function. def a_regular_function(x, y, b): x = tf.matmul(x, y) x = x + b return x # `a_function_that_uses_a_graph` is a TensorFlow `Function`. a_function_that_uses_a_graph = tf.function(a_regular_function) # Make some tensors. x1 = tf.constant([[1.0, 2.0]]) y1 = tf.constant([[2.0], [3.0]]) b1 = tf.constant(4.0) orig_value = a_regular_function(x1, y1, b1).numpy() # Call a `Function` like a Python function. tf_function_value = a_function_that_uses_a_graph(x1, y1, b1).numpy() assert(orig_value == tf_function_value) # + [markdown] id="PNvuAYpdrTOf" # On the outside, a `Function` looks like a regular function you write using TensorFlow operations. [Underneath](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/eager/def_function.py), however, it is *very different*. A `Function` **encapsulates [several `tf.Graph`s behind one API](#polymorphism_one_function_many_graphs).** That is how `Function` is able to give you the [benefits of graph execution](#the_benefits_of_graphs), like speed and deployability. # + [markdown] id="MT7U8ozok0gV" # `tf.function` applies to a function *and all other functions it calls*: # + id="rpz08iLplm9F" def inner_function(x, y, b): x = tf.matmul(x, y) x = x + b return x # Use the decorator to make `outer_function` a `Function`. @tf.function def outer_function(x): y = tf.constant([[2.0], [3.0]]) b = tf.constant(4.0) return inner_function(x, y, b) # Note that the callable will create a graph that # includes `inner_function` as well as `outer_function`. outer_function(tf.constant([[1.0, 2.0]])).numpy() # + [markdown] id="P88fOr88qgCj" # If you have used TensorFlow 1.x, you will notice that at no time did you need to define a `Placeholder` or `tf.Session`. # + [markdown] id="wfeKf0Nr1OEK" # ### Converting Python functions to graphs # # Any function you write with TensorFlow will contain a mixture of built-in TF operations and Python logic, such as `if-then` clauses, loops, `break`, `return`, `continue`, and more. While TensorFlow operations are easily captured by a `tf.Graph`, Python-specific logic needs to undergo an extra step in order to become part of the graph. `tf.function` uses a library called AutoGraph (`tf.autograph`) to convert Python code into graph-generating code. # # + id="PFObpff1BMEb" def simple_relu(x): if tf.greater(x, 0): return x else: return 0 # `tf_simple_relu` is a TensorFlow `Function` that wraps `simple_relu`. tf_simple_relu = tf.function(simple_relu) print("First branch, with graph:", tf_simple_relu(tf.constant(1)).numpy()) print("Second branch, with graph:", tf_simple_relu(tf.constant(-1)).numpy()) # + [markdown] id="hO4DBUNZBMwQ" # Though it is unlikely that you will need to view graphs directly, you can inspect the outputs to check the exact results. These are not easy to read, so no need to look too carefully! # + id="lAKaat3w0gnn" # This is the graph-generating output of AutoGraph. print(tf.autograph.to_code(simple_relu)) # + id="8x6RAqza1UWf" # This is the graph itself. print(tf_simple_relu.get_concrete_function(tf.constant(1)).graph.as_graph_def()) # + [markdown] id="GZ4Ieg6tBE6l" # Most of the time, `tf.function` will work without special considerations. However, there are some caveats, and the [tf.function guide](./function.ipynb) can help here, as well as the [complete AutoGraph reference](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/autograph/g3doc/reference/index.md) # + [markdown] id="sIpc_jfjEZEg" # ### Polymorphism: one `Function`, many graphs # # A `tf.Graph` is specialized to a specific type of inputs (for example, tensors with a specific [`dtype`](https://www.tensorflow.org/api_docs/python/tf/dtypes/DType) or objects with the same [`id()`](https://docs.python.org/3/library/functions.html#id])). # # Each time you invoke a `Function` with new `dtypes` and shapes in its arguments, `Function` creates a new `tf.Graph` for the new arguments. The `dtypes` and shapes of a `tf.Graph`'s inputs are known as an **input signature** or just a **signature**. # # The `Function` stores the `tf.Graph` corresponding to that signature in a `ConcreteFunction`. **A `ConcreteFunction` is a wrapper around a `tf.Graph`.** # # + id="LOASwhbvIv_T" @tf.function def my_relu(x): return tf.maximum(0., x) # `my_relu` creates new graphs as it observes more signatures. print(my_relu(tf.constant(5.5))) print(my_relu([1, -1])) print(my_relu(tf.constant([3., -3.]))) # + [markdown] id="1qRtw7R4KL9X" # If the `Function` has already been called with that signature, `Function` does not create a new `tf.Graph`. # + id="TjjbnL5OKNDP" # These two calls do *not* create new graphs. print(my_relu(tf.constant(-2.5))) # Signature matches `tf.constant(5.5)`. print(my_relu(tf.constant([-1., 1.]))) # Signature matches `tf.constant([3., -3.])`. # + [markdown] id="UohRmexhIpvQ" # Because it's backed by multiple graphs, a `Function` is **polymorphic**. That enables it to support more input types than a single `tf.Graph` could represent, as well as to optimize each `tf.Graph` for better performance. # + id="dxzqebDYFmLy" # There are three `ConcreteFunction`s (one for each graph) in `my_relu`. # The `ConcreteFunction` also knows the return type and shape! print(my_relu.pretty_printed_concrete_signatures()) # + [markdown] id="V11zkxU22XeD" # ## Using `tf.function` # # So far, you've learned how to convert a Python function into a graph simply by using `tf.function` as a decorator or wrapper. But in practice, getting `tf.function` to work correctly can be tricky! In the following sections, you'll learn how you can make your code work as expected with `tf.function`. # + [markdown] id="yp_n0B5-P0RU" # ### Graph execution vs. eager execution # # The code in a `Function` can be executed both eagerly and as a graph. By default, `Function` executes its code as a graph: # # + id="_R0BOvBFxqVZ" @tf.function def get_MSE(y_true, y_pred): sq_diff = tf.pow(y_true - y_pred, 2) return tf.reduce_mean(sq_diff) # + id="zikMVPGhmDET" y_true = tf.random.uniform([5], maxval=10, dtype=tf.int32) y_pred = tf.random.uniform([5], maxval=10, dtype=tf.int32) print(y_true) print(y_pred) # + id="07r08Dh158ft" get_MSE(y_true, y_pred) # + [markdown] id="cyZNCRcQorGO" # To verify that your `Function`'s graph is doing the same computation as its equivalent Python function, you can make it execute eagerly with `tf.config.run_functions_eagerly(True)`. This is a switch that **turns off `Function`'s ability to create and run graphs**, instead executing the code normally. # + id="lKoF6NjPoI8w" tf.config.run_functions_eagerly(True) # + id="9ZLqTyn0oKeM" get_MSE(y_true, y_pred) # + id="cV7daQW9odn-" # Don't forget to set it back when you are done. tf.config.run_functions_eagerly(False) # + [markdown] id="DKT3YBsqy0x4" # However, `Function` can behave differently under graph and eager execution. The Python [`print`](https://docs.python.org/3/library/functions.html#print) function is one example of how these two modes differ. Let's check out what happens when you insert a `print` statement to your function and call it repeatedly. # # + id="BEJeVeBEoGjV" @tf.function def get_MSE(y_true, y_pred): print("Calculating MSE!") sq_diff = tf.pow(y_true - y_pred, 2) return tf.reduce_mean(sq_diff) # + [markdown] id="3sWTGwX3BzP1" # Observe what is printed: # + id="3rJIeBg72T9n" error = get_MSE(y_true, y_pred) error = get_MSE(y_true, y_pred) error = get_MSE(y_true, y_pred) # + [markdown] id="WLMXk1uxKQ44" # Is the output surprising? **`get_MSE` only printed once even though it was called *three* times.** # # To explain, the `print` statement is executed when `Function` runs the original code in order to create the graph in a process known as ["tracing"](function.ipynb#tracing). **Tracing captures the TensorFlow operations into a graph, and `print` is not captured in the graph.** That graph is then executed for all three calls **without ever running the Python code again**. # # As a sanity check, let's turn off graph execution to compare: # + id="oFSxRtcptYpe" # Now, globally set everything to run eagerly to force eager execution. tf.config.run_functions_eagerly(True) # + id="qYxrAtvzNgHR" # Observe what is printed below. error = get_MSE(y_true, y_pred) error = get_MSE(y_true, y_pred) error = get_MSE(y_true, y_pred) # + id="_Df6ynXcAaup" tf.config.run_functions_eagerly(False) # + [markdown] id="PUR7qC_bquCn" # `print` is a *Python side effect*, and there are [other differences](function#limitations) that you should be aware of when converting a function into a `Function`. # + [markdown] id="oTZJfV_tccVp" # Note: If you would like to print values in both eager and graph execution, use `tf.print` instead. # + [markdown] id="rMT_Xf5yKn9o" # ### Non-strict execution # # <a id="non-strict"></a> # # Graph execution only executes the operations necessary to produce the observable effects, which includes: # # - The return value of the function # - Documented well-known side-effects such as: # - Input/output operations, like `tf.print` # - Debugging operations, such as the assert functions in `tf.debugging` # - Mutations of `tf.Variable` # # This behavior is usually known as "Non-strict execution", and differs from eager execution, which steps through all of the program operations, needed or not. # # In particular, runtime error checking does not count as an observable effect. If an operation is skipped because it is unnecessary, it cannot raise any runtime errors. # # In the following example, the "unnecessary" operation `tf.gather` is skipped during graph execution, so the runtime error `InvalidArgumentError` is not raised as it would be in eager execution. Do not rely on an error being raised while executing a graph. # + id="OdN0nKlUwj7M" def unused_return_eager(x): # Get index 1 will fail when `len(x) == 1` tf.gather(x, [1]) # unused return x try: print(unused_return_eager(tf.constant([0.0]))) except tf.errors.InvalidArgumentError as e: # All operations are run during eager execution so an error is raised. print(f'{type(e).__name__}: {e}') # + id="d80Fob4MwhTs" @tf.function def unused_return_graph(x): tf.gather(x, [1]) # unused return x # Only needed operations are run during graph exection. The error is not raised. print(unused_return_graph(tf.constant([0.0]))) # + [markdown] id="def6MupG9R0O" # ###`tf.function` best practices # # It may take some time to get used to the behavior of `Function`. To get started quickly, first-time users should play around with decorating toy functions with `@tf.function` to get experience with going from eager to graph execution. # # *Designing for `tf.function`* may be your best bet for writing graph-compatible TensorFlow programs. Here are some tips: # - Toggle between eager and graph execution early and often with `tf.config.run_functions_eagerly` to pinpoint if/ when the two modes diverge. # - Create `tf.Variable`s # outside the Python function and modify them on the inside. The same goes for objects that use `tf.Variable`, like `keras.layers`, `keras.Model`s and `tf.optimizers`. # - Avoid writing functions that [depend on outer Python variables](function#depending_on_python_global_and_free_variables), excluding `tf.Variable`s and Keras objects. # - Prefer to write functions which take tensors and other TensorFlow types as input. You can pass in other object types but [be careful](function#depending_on_python_objects)! # - Include as much computation as possible under a `tf.function` to maximize the performance gain. For example, decorate a whole training step or the entire training loop. # # + [markdown] id="ViM3oBJVJrDx" # ## Seeing the speed-up # + [markdown] id="A6NHDp7vAKcJ" # `tf.function` usually improves the performance of your code, but the amount of speed-up depends on the kind of computation you run. Small computations can be dominated by the overhead of calling a graph. You can measure the difference in performance like so: # + id="jr7p1BBjauPK" x = tf.random.uniform(shape=[10, 10], minval=-1, maxval=2, dtype=tf.dtypes.int32) def power(x, y): result = tf.eye(10, dtype=tf.dtypes.int32) for _ in range(y): result = tf.matmul(x, result) return result # + id="ms2yJyAnUYxK" print("Eager execution:", timeit.timeit(lambda: power(x, 100), number=1000)) # + id="gUB2mTyRYRAe" power_as_graph = tf.function(power) print("Graph execution:", timeit.timeit(lambda: power_as_graph(x, 100), number=1000)) # + [markdown] id="Q1Pfo5YwwILi" # `tf.function` is commonly used to speed up training loops, and you can learn more about it in [Writing a training loop from scratch](https://www.tensorflow.org/guide/keras/writing_a_training_loop_from_scratch#speeding-up_your_training_step_with_tffunction) with Keras. # # Note: You can also try [`tf.function(jit_compile=True)`](https://www.tensorflow.org/xla#explicit_compilation_with_tffunctionjit_compiletrue) for a more significant performance boost, especially if your code is heavy on TF control flow and uses many small tensors. # + [markdown] id="sm0bNFp8PX53" # ### Performance and trade-offs # # Graphs can speed up your code, but the process of creating them has some overhead. For some functions, the creation of the graph takes more time than the execution of the graph. **This investment is usually quickly paid back with the performance boost of subsequent executions, but it's important to be aware that the first few steps of any large model training can be slower due to tracing.** # # No matter how large your model, you want to avoid tracing frequently. The `tf.function` guide discusses [how to set input specifications and use tensor arguments](function#controlling_retracing) to avoid retracing. If you find you are getting unusually poor performance, it's a good idea to check if you are retracing accidentally. # + [markdown] id="F4InDaTjwmBA" # ## When is a `Function` tracing? # # To figure out when your `Function` is tracing, add a `print` statement to its code. As a rule of thumb, `Function` will execute the `print` statement every time it traces. # + id="hXtwlbpofLgW" @tf.function def a_function_with_python_side_effect(x): print("Tracing!") # An eager-only side effect. return x * x + tf.constant(2) # This is traced the first time. print(a_function_with_python_side_effect(tf.constant(2))) # The second time through, you won't see the side effect. print(a_function_with_python_side_effect(tf.constant(3))) # + id="inzSg8yzfNjl" # This retraces each time the Python argument changes, # as a Python argument could be an epoch count or other # hyperparameter. print(a_function_with_python_side_effect(2)) print(a_function_with_python_side_effect(3)) # + [markdown] id="rtN8NW6AfKye" # New Python arguments always trigger the creation of a new graph, hence the extra tracing. # # + [markdown] id="D1kbr5ocpS6R" # ## Next steps # # You can learn more about `tf.function` on the API reference page and by following the [Better performance with `tf.function`](function) guide.
site/en/guide/intro_to_graphs.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 1.1 Exercise 1 - Our first Quantum Program: Hello Quantum World # --- # ## 1.1.0 Import of libraries for the program # Full import of Qiskit library from qiskit import * # --- # ## 1.1.1 The Quantum Circuit # For our first Quantum Program, we will input 2x |0<b>⟩</b> Qubits, then measure the output of the Qubits, which should yield the output: 00.<br> # # When we initiate our quantum program, we need three components:<br> # **Quantum Registers:** Registers which consist of Qubits.<br> # **Classical Registers:** Registers containing classical bits.<br> # **Quantum Circuit:** A circuit to setup the experiment.<br> # # <strong style="color: orange;">Firstly</strong>, we create a quantum register with 2 Qubits and a classical register with 2 bits and set them into the circuit using the script below: # + try: # Create a Quantum Register with 2 Qubits. qr = QuantumRegister(2) # Create a classical register with 2 bits cr = ClassicalRegister(2) # Create a Quantum Circuit containing our QR and CR. circuit = QuantumCircuit(qr,cr) # Prepare the method to draw our quantum program circuit.draw(); except NameError: print("ERROR: You have not run the library import block above correctly\n"*10) # - # <div class="alert alert-block alert-warning"> # <b>Note:</b> When we execute our Quantum Program and measure the Qubits in the end, always reading either 1 or 0, the information is stored in the Classical Register and consequently returned to us as the output of the Quantum Program. # </div> # # --- # ## 1.1.2 Adding operations to the Quantum Circuit # <strong style="color: orange;">Secondly</strong>, we add the measurement operations. # + # Add a measurement gate here as presented on stage or if you are at home, write "circuit.measure(qr, cr)" #TODO: ⚠️⚠️⚠️⚠️⚠️ Add a Measurement Operation with two bits here ⚠️⚠️⚠️⚠️⚠️ # - # --- # ## 1.1.3 Visualising the Quantum Circuit # <strong style="color: orange;">Thirdly</strong>, as we want to view our circuit, we call the circuit.draw() function, which prints the Quantum Register (represented as single lines), the Classical Register (represented as double lines) and all the operations that have been added to the circuit (Which in this case is only measuring operations). circuit.draw(output='mpl') # --- # ## 1.1.4 Run the Quantum Program # <strong style="color: orange;">Lastly</strong>, to measure the Qubit, we need to run the Quantum Program using a tool from Qiskit called the BasicAer module. From the BasicAer module, we will import a Quantum Simulator, which simulates how our Quantum Computer would process the Quantum Program. *(Don't worry: The real thing comes later)* # + # Load the backend to run our Quantum Program backend = BasicAer.get_backend('statevector_simulator') # Execute the Quantum Program job = qiskit.execute(circuit, backend) # Get the results from the job result = job.result() # Print the result result.get_counts(circuit) # - # --- # ## Extra: Online IBM Q Composer # IBM Offers an interactive tool online, which can be seen below.<br> # It too has a Quantum Register with Qubits in it (q[0], q[1]) and a Classical Register that stores the measured values (the bottom line with a "c" in front of it). # # To the right of the cuircuit you may notice some colorful boxes with letters in them, these are Quantum Gates which containes one specific operation each that can be applied to a Qubit by drag-and-drop. # ![Sk%C3%A6rmbillede%202019-04-01%20kl.%2011.11.59.png](attachment:Sk%C3%A6rmbillede%202019-04-01%20kl.%2011.11.59.png) # --- # ## Next Jupyter notebook - Now let's perform some operations on the Qubit! # [1.2 Exercise - [x]-gate operation on Qubit](1.2%20-%20Gate%20operation%20-%20X-gate.ipynb)
notebook-exercises/1.1 - Hello Quantum World.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Maxwell Boltzman distribution of speed # \begin{equation} # f(v,T)=4\pi v^2(\frac{m}{2\pi kT})^{3/2}e^{\frac{-mv^2}{2 KT}} # \end{equation} # \begin{equation} # RMS=\sqrt{\frac{3kT}{m}} # \end{equation} # \begin{equation} # MPS=\sqrt{\frac{2kT}{m}} # \end{equation} # \begin{equation} # MS=\sqrt{\frac{8kT}{\pi m}} # \end{equation} import math as mt import numpy as np import pandas as pd import matplotlib.pyplot as plt # %matplotlib inline def maxwell(M,v, T): NA = 6.02e+23 k = 1.38e-23 m=M/NA a = m/(2*k) b =(a /(np.pi))**1.5 prob =b*T**(-1.5) *4*np.pi*v**2*np.exp(-a*v**2/T) return prob def maxwell1(M,v, T): NA = 6.02e+23 k = 1.38e-23 m=M/NA a = m/(2*k)*v**2 b =(a /(np.pi))**1.5 prob2 =b*T**(-1.5) *4*np.pi*v**2 return prob2 def maxwell2(M,v, T): NA = 6.02e+23 k = 1.38e-23 m=M/NA a = m/(2*k)*v**2 b =(a /(np.pi))**1.5 prob3 =np.exp(-a*v**2/T) return prob3 Mo=16e-3 #mass of oxygen Mh=2e-3 # mass of hydrogen Mn=28e-3 # mass of nitrogen velocity = np.arange(100, 750, 10) # probability for oxygen at 200K,300K,400K prob200 = maxwell(Mo,velocity, 100.0) prob300 = maxwell(Mo,velocity, 200.0) prob400 = maxwell(Mo,velocity, 300.0) # probability for oxygen at 200K,300K,400K prob2001 = maxwell1(Mo,velocity, 100.0) prob3001 = maxwell1(Mo,velocity, 200.0) prob4001 = maxwell1(Mo,velocity, 300.0) # probability for oxygen at 200K,300K,400K prob2002 = maxwell2(Mo,velocity, 100.0) prob3002 = maxwell2(Mo,velocity, 200.0) prob4002 = maxwell2(Mo,velocity, 300.0) plt.plot(velocity, prob200, 'g-',label='100k') plt.plot(velocity, prob300, 'b-',label='200k') plt.plot(velocity, prob400, 'r-',label='300k') plt.xlabel('Velocity(m/s)') plt.ylabel('Probability') plt.title('Maxwell Boltzman distribution of speed for Oxygen') plt.legend() plt.savefig("image/BoltmanO.png", dpi = 600) # dpi dot per inch plt.show() plt.plot(velocity, prob2001, 'g-',label='100k') plt.plot(velocity, prob3001, 'b-',label='200k') plt.plot(velocity, prob4001, 'r-',label='300k') plt.xlabel('Velocity(m/s)') plt.ylabel('Probability') plt.title('Maxwell Boltzman distribution of speed for Oxygen') plt.legend() plt.savefig("image/Boltman1.png", dpi = 600) # dpi dot per inch plt.show() plt.plot(velocity, prob2002, 'g-',label='100k') plt.plot(velocity, prob3002, 'b-',label='200k') plt.plot(velocity, prob4002, 'r-',label='300k') plt.xlabel('Velocity(m/s)') plt.ylabel('Probability') plt.title('Maxwell Boltzman distribution of speed for Oxygen') plt.legend() plt.savefig("image/Boltman2.png", dpi = 600) # dpi dot per inch plt.show() plt.semilogy(velocity, prob300, 'g-',label='original') plt.semilogy(velocity, prob3001, 'b-',label='v2') plt.semilogy(velocity, prob3002, 'r-',label='exp') plt.xlabel('Velocity(m/s)') plt.ylabel('Probability') plt.title('Maxwell Boltzman distribution of speed for Oxygen') plt.legend() plt.savefig("image/Boltman3.png", dpi = 600) # dpi dot per inch plt.show() M = {"Velocity(m/s)": velocity,"Probability(100K)": prob200,"Probability(200K)": prob300,"Probability(300K)": prob400} DF = pd.DataFrame(M) DF DF.to_csv("data/Boltzman.csv") # probability for hydrogen,nitrogen ,oxygen at 300K probh = maxwell(Mh,velocity, 300) # for hydrogen probn = maxwell(Mn,velocity, 300) # for nitrogen probo = maxwell(Mo,velocity, 300) #for oxygen plt.plot(velocity, probh, 'g-',label='Hydrogen') plt.plot(velocity, probn, 'b-',label='Nitrogen') plt.plot(velocity, probo, 'r-',label='Oxygen') plt.xlabel('Velocity(m/s)') plt.ylabel('Probability') plt.title('Maxwell Boltzman distribution of speed for gas at ') plt.legend() plt.savefig("image/Boltmanal.png", dpi = 600) # dpi dot per inch plt.show() def rms(M,T): NA = 6.02e+23 k = 1.38e-23 m=M/NA vel =np.sqrt(3*k*T/m) return vel temp = np.arange(200, 500, 10) # rms for gas rmsh = rms(Mh,temp) #for hydrogen rmso = rms(Mo,temp) #for oxygen rmsn = rms(Mn,temp) #for nitrogen plt.plot(temp, rmsh, 'g-',label='Hydrogen') plt.plot(temp, rmso, 'b-',label='Oxygen') plt.plot(temp, rmsn, 'r-',label='Nitrogen') plt.xlabel('Temperature(K)') plt.ylabel('RMS(m/s)') plt.title('RMS speed for gas') plt.legend() plt.savefig("image/rms.png", dpi = 600) # dpi dot per inch plt.show() # # Planck's formula of radiation # \begin{equation} # f(\lambda,T)=\frac{2hc^2}{\lambda^5}\frac{1}{e^{\frac{hc}{\lambda KT}}-1} # \end{equation} def planck(wav, T): h = 6.626e-34 c = 3.0e+8 k = 1.38e-23 a = 2.0*h*c**2 b = h*c/(wav*k*T) intensity = a/ ( (wav**5) * (np.exp(b) - 1.0) ) return intensity wavelengths = np.arange(6e-9, 2e-6, 1e-9) # wavelength in m # intensity at 4000K, 5000K, 6000K, 7000K intensity4000 = planck(wavelengths, 4000) intensity5000 = planck(wavelengths, 5000) intensity6000 = planck(wavelengths, 6000) intensity7000 = planck(wavelengths, 7000) plt.plot(wavelengths*1e9, intensity4000, 'r-',label='4000k') plt.plot(wavelengths*1e9, intensity5000, 'g-',label='5000k') plt.plot(wavelengths*1e9, intensity6000, 'k-',label='6000k') plt.xlabel('Wavelength(nm)') plt.ylabel('Spetral energy densiry(W/m$^3$)') plt.title('Black body radiation') plt.legend() plt.savefig("image/planck.png", dpi = 600) # dpi dot per inch plt.show() M1 = {"wavelength(m)": wavelengths,"Intensity(4000K)": intensity4000,"Intensity(5000K)": intensity5000,"Intensity(6000K)": intensity6000} DF1 = pd.DataFrame(M1) DF1 DF1.to_csv("data/planck.csv")
maxwell.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + # default_exp losses # + # export import math import torch import torch.nn as nn import torch.nn.functional as F import numpy as np import llamass.transforms as transforms # - # spl from llamass.core import unpack_body_models, npz_paths import tempfile import warnings import gaitplotlib.core as gpl body_model = gpl.init_body_model("neutral") fk_engine = transforms.SMPLHForwardKinematics() # # Losses # # > A selection of loss functions I've used with this dataset. # ## Axis-Angle Cosine Distance # # > Cosine similarity between rotation vectors. # This is probably not a correct distance metric for rotations. [This blog][belousov] makes an argument that there's only one correct distance metric for rotations and it's the angular distance between unit quaternions. But, this one is quick to evaluate on rotation vectors. # # There are also implementations of this correct distance metric using rotation matrices in Python [here][ahrs]. # # [ahrs]: https://github.com/Mayitzin/ahrs/blob/master/ahrs/utils/metrics.py # [belousov]: http://www.boris-belousov.net/2016/12/01/quat-dist/ # exports def aa_cosine(out, target): if out.ndim == 2: b, d = out.size() elif out.ndim == 3: b, f, d = out.size() assert f == 1, f'{out.size()}' j = d//3 out, target = out.view(b, j, 3), target.view(b, j, 3) def theta(x, eps=1e-6): return torch.sqrt(torch.clamp(torch.sum(x**2, 2, keepdims=True), eps, 2*math.pi)) theta_a = theta(out) theta_b = theta(target) cosine_sim = F.cosine_similarity(out, target, dim=2) cosine_sim_loss = 1. - cosine_sim cosine_angle_diff = 1. - torch.cos(theta_a - theta_b) return cosine_sim_loss + cosine_angle_diff[:,:,0] # ## VPoser # # > The loss function used by the VPoser VAE in the [SMPL-X][smplx] paper. # # Appears to cause NaNs when used on the sample AMASS data below in tests. # # [smplx]: https://ps.is.tuebingen.mpg.de/uploads_file/attachment/attachment/497/SMPL-X.pdf # exports class GeodesicLossR(nn.Module): def __init__(self, reduction='batchmean'): super(GeodesicLossR, self).__init__() self.reduction = reduction self.eps = 1e-6 # batch geodesic loss for rotation matrices def bgdR(self,m1,m2): assert m1.ndim == m2.ndim, \ f"Rotation matrices ndim must be equal but was {m1.ndim} {m2.ndim}" for m in [m1, m2]: assert m.size(-1) == 3 and m.size(-2) == 3, \ f"Trailing 2 dimensions must 3x3 rotation matrices {m.size()}" if m1.ndim == 2: # ndim 2 must be single rotation matrix m1 = m1.view(1, 3, 3) m2 = m2.view(1, 3, 3) elif m1.ndim > 3: m1 = m1.view(-1, 3, 3) m2 = m2.view(-1, 3, 3) batch = m1.shape[0] m = torch.bmm(m1, m2.transpose(1, 2)) # batch*3*3 cos = (m[:, 0, 0] + m[:, 1, 1] + m[:, 2, 2] - 1) / 2 cos = torch.min(cos, m1.new(np.ones(batch))) cos = torch.max(cos, m1.new(np.ones(batch)) * -1) return torch.acos(cos) def forward(self, ypred, ytrue): theta = self.bgdR(ypred,ytrue) if self.reduction == 'mean': return torch.mean(theta) elif self.reduction == 'batchmean': return torch.mean(torch.sum(theta, dim=theta.shape[1:])) elif self.reduction == 'none': return theta else: raise NotImplementedError(f"Reduction {self.reduction} not known") #exports class ContinuousRotReprDecoder(nn.Module): def __init__(self): super().__init__() def forward(self, module_input): b, d = module_input.size() assert d%6 == 0 reshaped_input = module_input.view(-1, 3, 2) b1 = F.normalize(reshaped_input[:, :, 0], dim=1) dot_prod = torch.sum(b1 * reshaped_input[:, :, 1], dim=1, keepdim=True) b2 = F.normalize(reshaped_input[:, :, 1] - dot_prod * b1, dim=-1) b3 = torch.cross(b1, b2, dim=1) return torch.stack([b1, b2, b3], dim=-1).view(b, -1, 3, 3) # exports class ForwardKinematicLoss(nn.Module): "Must be initialized with an SMPL-like `body_model`." def __init__(self, body_model): super().__init__() self.bm = body_model self.geodesic_loss = GeodesicLossR(reduction="mean") def kinematics(self, aa_out, pose_target): with torch.no_grad(): bm_orig = self.bm(pose_body=pose_target) bm_rec = self.bm(pose_body=aa_out.contiguous()) return bm_orig, bm_rec # exports class VPoserLikelihood(ForwardKinematicLoss): def forward( self, dec_out, aa_out, pose_target, pose_target_rotmat, bm_orig=None, bm_rec=None, loss_rec_wt=torch.tensor(4), loss_matrot_wt=torch.tensor(2), loss_jtr_wt=torch.tensor(2), callback=None ): """ Default settings for loss weights taken from: https://github.com/nghorbani/human_body_prior/blob/master/src/human_body_prior/train/V02_05/V02_05.yaml Inputs: - dec_out: output of network as rotation matrix, shape (batch, frames, joints, 3, 3) - aa_out: output of network as axis-angle vectors, shape (batch, frames, joints, 3) - pose_target: target as axis-angle vectors, shape (batch, frames, joints, 3) - pose_target_rotmat: target as rotation matrix, shape (batch, frames, joints, 3, 3) """ l1_loss = torch.nn.L1Loss(reduction="mean") # cast decoder output to aa bs, f, d = pose_target.size() # forward kinematics if bm_orig is None or bm_rec is None: bm_orig, bm_rec = self.kinematics(aa_out.view(bs*f, -1), pose_target.view(bs*f, d)) # Reconstruction loss - L1 on the output mesh v2v = l1_loss(bm_rec.v, bm_orig.v) # Geodesic loss between rotation matrices matrot_loss = self.geodesic_loss( dec_out.view(-1, 3, 3), pose_target_rotmat.view(-1, 3, 3) ) # L1 Loss on joint positions jtr_loss = l1_loss(bm_rec.Jtr, bm_orig.Jtr) # apply weights to make weighted loss weighted_loss = ( loss_matrot_wt * matrot_loss + loss_rec_wt * v2v + loss_jtr_wt * jtr_loss ) # log results with torch.no_grad(): unweighted_loss = matrot_loss + v2v + jtr_loss if callback is not None: callback(all_univariate_tensors_in(locals())) return weighted_loss # spl with tempfile.TemporaryDirectory() as tmpdirname: unpack_body_models("sample_data/", tmpdirname, 1, verify=False) for npz_path in npz_paths(tmpdirname): cdata = np.load(npz_path) _poses = torch.tensor(cdata['poses'][::6, 3:66], dtype=torch.float, requires_grad=True) n, d = _poses.size() j = d//3 poses = _poses.reshape(n, 1, j*3) vposer_loss = VPoserLikelihood(body_model) pred, target = poses[:-1], poses[1:] n = pred.size(0) pred_rotmat = transforms.Rotation.from_rotvec(pred.reshape(-1, 3)).as_matrix().view(n, -1) target_rotmat = transforms.Rotation.from_rotvec(target.reshape(-1, 3)).as_matrix().view(n, -1) loss = vposer_loss(pred_rotmat, pred, target, target_rotmat) # loss = poses.sum() loss.backward() try: assert not torch.any(torch.isnan(_poses.grad)), "gradient contains NaNs" assert torch.abs(_poses.grad).max() > 1e-6, "gradient is zero" except AssertionError as e: warnings.warn(str(e)) break torch.any(torch.isnan(_poses.grad)) # ## Discretized Euler Angles # # > Intuitively, an ADC applied to Euler angles. Allows use of a NLL loss, which is can be useful for training a neural network. # + #exports def discretize(x, nquant, eps=1e-6, dither=False, zero_centered=True): if zero_centered: x = x + math.pi m = math.pi*2 assert x.max() < m x = x/m # scale to between zero and 1 x = x*nquant if dither: d = 2.*(torch.rand_like(x)-0.5) x = torch.clamp(x+d, 0, nquant-eps) return torch.floor(x).long() # bin account to nquant levels class DiscretizedEulerLoss(nn.Module): def __init__(self, nquant, dither=False, zero_centered=True): super().__init__() self.nquant, self.dither, self.zero_centered = nquant, dither, zero_centered def forward(self, out, target): assert out.size(-1) == self.nquant, f'trailing dimension should hold logits {out.size()}' target = discretize(target, self.nquant, dither=self.dither, zero_centered=self.zero_centered) return F.nll_loss(out.view(-1, self.nquant), target.view(-1)) # - # ## SPL Loss Functions # # > Loss functions adapted from the [SPL][] repository. # # [spl]: https://github.com/eth-ait/spl # To do: # # * `euler_angle_mse` is an inaccurate name, it's actually a pairwise distance # * Geodesic needs better documentation # In the standard evaluation on Human3.6M and AMASS the following loss functions are recommended: # # * Euler angle # * Geodesic joint angle (calculated above in the VPoser loss) # * Absolute position error (calculated above in the VPoser loss) # * PCK: "percentage of joints lying within a spherical threshold $\rho$ around the target joint position", typically reported as an AUC while varying the $\rho$ threshold # Required packages to run tests: # # * [quaternion][] # * [cv2][] # * [gaitplotlib][] # # And it's necessary to have unpacked body model files with gaitplotlib as described [here][unpack]. # # [quaternion]: https://quaternion.readthedocs.io/en/latest/ # [cv2]: https://pypi.org/project/opencv-python/ # [gaitplotlib]: https://github.com/gngdb/gaitplotlib # [unpack]: https://github.com/gngdb/gaitplotlib#unpack-body-models # spl # I don't want to install tensorflow to install their whole package for testing # !git clone https://github.com/eth-ait/spl.git 2>/dev/null import sys from pathlib import Path common_path = "./spl/common" if common_path not in sys.path: sys.path.append(common_path) from conversions import (is_valid_rotmat, rotmat2euler, aa2rotmat, get_closest_rotmat, sparse_to_full, local_rot_to_global) import cv2 # ### SPL Implementations # # > Included for testing. # + # spl """ SPL: training and evaluation of neural networks with a structured prediction layer. Copyright (C) 2019 ETH Zurich, <NAME>, <NAME> This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <https://www.gnu.org/licenses/>. """ def euler_diff(predictions, targets): """ Computes the Euler angle error as in previous work, following https://github.com/una-dinosauria/human-motion-prediction/blob/master/src/translate.py#L207 Args: predictions: np array of predicted joint angles represented as rotation matrices, i.e. in shape (..., n_joints, 3, 3) targets: np array of same shape as `predictions` Returns: The Euler angle error an np array of shape (..., ) """ assert predictions.shape[-1] == 3 and predictions.shape[-2] == 3 assert targets.shape[-1] == 3 and targets.shape[-2] == 3 n_joints = predictions.shape[-3] ori_shape = predictions.shape[:-3] preds = np.reshape(predictions, [-1, 3, 3]) targs = np.reshape(targets, [-1, 3, 3]) euler_preds = rotmat2euler(preds) # (N, 3) euler_targs = rotmat2euler(targs) # (N, 3) # reshape to (-1, n_joints*3) to be consistent with previous work euler_preds = np.reshape(euler_preds, [-1, n_joints*3]) euler_targs = np.reshape(euler_targs, [-1, n_joints*3]) # l2 error on euler angles idx_to_use = np.where(np.std(euler_targs, 0) > 1e-4)[0] euc_error = np.power(euler_targs[:, idx_to_use] - euler_preds[:, idx_to_use], 2) euc_error = np.sqrt(np.sum(euc_error, axis=1)) # (-1, ...) # reshape to original return np.reshape(euc_error, ori_shape) def angle_diff(predictions, targets): """ Computes the angular distance between the target and predicted rotations. We define this as the angle that is required to rotate one rotation into the other. This essentially computes || log(R_diff) || where R_diff is the difference rotation between prediction and target. Args: predictions: np array of predicted joint angles represented as rotation matrices, i.e. in shape (..., n_joints, 3, 3) targets: np array of same shape as `predictions` Returns: The geodesic distance for each joint as an np array of shape (..., n_joints) """ assert predictions.shape[-1] == predictions.shape[-2] == 3 assert targets.shape[-1] == targets.shape[-2] == 3 ori_shape = predictions.shape[:-2] preds = np.reshape(predictions, [-1, 3, 3]) targs = np.reshape(targets, [-1, 3, 3]) # compute R1 * R2.T, if prediction and target match, this will be the identity matrix r = np.matmul(preds, np.transpose(targs, [0, 2, 1])) # convert `r` to angle-axis representation and extract the angle, which is our measure of difference between # the predicted and target orientations angles = [] for i in range(r.shape[0]): aa, _ = cv2.Rodrigues(r[i]) angles.append(np.linalg.norm(aa)) angles = np.array(angles) return np.reshape(angles, ori_shape) def positional(predictions, targets): """ Computes the Euclidean distance between joints in 3D space. Args: predictions: np array of predicted 3D joint positions in format (..., n_joints, 3) targets: np array of same shape as `predictions` Returns: The Euclidean distance for each joint as an np array of shape (..., n_joints) """ return np.sqrt(np.sum((predictions - targets) ** 2, axis=-1)) def pck(predictions, targets, thresh): """ Percentage of correct keypoints. Args: predictions: np array of predicted 3D joint positions in format (..., n_joints, 3) targets: np array of same shape as `predictions` thresh: radius within which a predicted joint has to lie. Returns: Percentage of correct keypoints at the given threshold level, stored in a np array of shape (..., len(threshs)) """ dist = np.sqrt(np.sum((predictions - targets) ** 2, axis=-1)) pck = np.mean(np.array(dist <= thresh, dtype=np.float32), axis=-1) return pck # - # ### Euler Angle # # > I don't know what euler angle convention this corresponds to, but it matches the implementation used in prior papers. # exports def euler_angle_mse(predictions, targets, n_joints=21): "Inputs predictions and targets are assumed to be rotation matrices" predictions = transforms.Rotation.from_matrix(predictions).as_euler() targets = transforms.Rotation.from_matrix(targets).as_euler() predictions = predictions.view(-1, n_joints*3) targets = targets.view(-1, n_joints*3) # l2 error on euler angles #idx_to_use = np.where(np.std(euler_targs, 0) > 1e-4)[0] mask = (torch.std(targets, 0) > 1e-4).float().view(1, -1) euc_error = torch.square(targets*mask - predictions*mask) euc_error = torch.sqrt(torch.sum(euc_error, 1)) # (-1, ...) return euc_error # spl with tempfile.TemporaryDirectory() as tmpdirname: unpack_body_models("sample_data/", tmpdirname, 1, verify=False) for npz_path in npz_paths(tmpdirname): cdata = np.load(npz_path) poses = torch.tensor(cdata['poses'][:, 3:66]).float() n, d = poses.size() j = d//3 poses = poses.view(n, j, 3) # convert poses to rotation matrices rotmats = transforms.Rotation.from_rotvec(poses).as_matrix().reshape(n, j, 3, 3) outputs = rotmats[:-1:6] targets = rotmats[1::6] loss = euler_diff(outputs.numpy(), targets.numpy()) _loss = euler_angle_mse(outputs.reshape(-1, 3, 3), targets.reshape(-1, 3, 3)) assert np.abs(loss - _loss.numpy()).max() < 1e-4 print(outputs.shape, targets.shape, loss.shape) # ### Geodesic # # Basing this on the geodesic loss calculated in the combined Vposer loss above. #exports class GeodesicLossSPL(GeodesicLossR): def __init__(self, reduction='none'): super().__init__(reduction=reduction) def bgdR(self, m1, m2): assert m1.ndim == m2.ndim, \ f"Rotation matrices ndim must be equal but was {m1.ndim} {m2.ndim}" for m in [m1, m2]: assert m.size(-1) == 3 and m.size(-2) == 3, \ f"Trailing 2 dimensions must 3x3 rotation matrices {m.size()}" if m1.ndim == 2: # ndim 2 must be single rotation matrix m1 = m1.view(1, 3, 3) m2 = m2.view(1, 3, 3) elif m1.ndim > 3: m1 = m1.view(-1, 3, 3) m2 = m2.view(-1, 3, 3) batch = m1.shape[0] m = torch.bmm(m1, m2.transpose(1, 2)) # batch*3*3 aa = transforms.Rotation.from_matrix(m).as_rotvec().view(batch, 3) angles = torch.linalg.norm(aa, axis=1) return angles # spl with tempfile.TemporaryDirectory() as tmpdirname: unpack_body_models("sample_data/", tmpdirname, 1, verify=False) for npz_path in npz_paths(tmpdirname): cdata = np.load(npz_path) poses = torch.tensor(cdata['poses'][:, 3:66]) n, d = poses.size() j = d//3 poses = poses.view(n, j, 3) # convert poses to rotation matrices rotmats = transforms.Rotation.from_rotvec(poses).as_matrix().view(n, j, 3, 3) outputs = rotmats[:-1:6] targets = rotmats[1::6] loss = angle_diff(outputs.numpy(), targets.numpy()) geodesic_loss = GeodesicLossSPL() _loss = geodesic_loss(outputs.reshape(-1,3,3), targets.reshape(-1,3,3)) assert np.allclose(loss, _loss.numpy().reshape(*loss.shape)) print(outputs.shape, targets.shape, loss.shape) # ### Positional # # Basing this on the VPoser loss above, has to contain a body model to do forward kinematics to estimate the joint positions given a fixed body model. (Although, I think betas are often provided for AMASS examples, so it may be more correct to use those.) #exports class PositionalLossSPL(ForwardKinematicLoss): def forward( self, aa_out=None, pose_target=None, bm_orig=None, bm_rec=None, positions=None, target_positions=None ): for p in [positions, target_positions]: if p is not None: assert p.ndim == 3 assert p.size(-1) == 3, "final dim must contain 3D locations" if pose_target is not None: if pose_target.ndim == 3: bs, f, d = pose_target.size() n = bs*f assert d == n_joints*3 elif pose_target.ndim == 2: n, d = pose_target.size() # forward kinematics no_bm_output = bm_orig is None or bm_rec is None no_positions = positions is None or target_positions is None if no_bm_output and no_positions: bm_orig, bm_rec = self.kinematics(aa_out.reshape(n, d), pose_target.reshape(n, d)) positions = bm_rec.Jtr target_positions = bm_orig.Jtr return torch.sqrt(torch.square(positions - target_positions).sum(2)) # spl with tempfile.TemporaryDirectory() as tmpdirname: unpack_body_models("sample_data/", tmpdirname, 1, verify=False) for npz_path in npz_paths(tmpdirname): cdata = np.load(npz_path) _poses = torch.tensor(cdata['poses'][::6, 3:66], dtype=torch.float, requires_grad=True) n, d = _poses.size() j = d//3 poses = _poses.view(n, j, 3) poses = poses.view(-1, j*3) # convert poses to rotation matrices with torch.no_grad(): positions = body_model(pose_body=poses).Jtr outputs = positions[:-1] targets = positions[1:] loss = positional(outputs.numpy(), targets.numpy()) positional_loss_spl = PositionalLossSPL(body_model) _loss = positional_loss_spl(poses[:-1], poses[1:]) assert np.allclose(loss, _loss.detach().numpy()) _loss.mean().backward() assert not torch.any(torch.isnan(_poses.grad)), "Gradient contains NaNs" assert torch.abs(_poses.grad).max() > 1e-6, "gradient is zero" print(outputs.shape, targets.shape, loss.shape) # spl with tempfile.TemporaryDirectory() as tmpdirname: unpack_body_models("sample_data/", tmpdirname, 1, verify=False) for npz_path in npz_paths(tmpdirname): cdata = np.load(npz_path) _poses = torch.tensor(cdata['poses'][::6, :3*24], dtype=torch.float, requires_grad=True) n, d = _poses.size() j = d//3 poses = _poses.view(n, j, 3) poses = poses.view(-1, j*3) # convert poses to rotation matrices positions = fk_engine.from_aa(poses) outputs = positions[:-1] targets = positions[1:] loss = positional(outputs.detach().numpy(), targets.detach().numpy()) positional_loss_spl = PositionalLossSPL(None) _loss = positional_loss_spl(positions=outputs, target_positions=targets) assert np.allclose(loss, _loss.detach().numpy()) _loss.mean().backward() assert not torch.any(torch.isnan(_poses.grad)), "Gradient contains NaNs" assert torch.abs(_poses.grad).max() > 1e-6, "gradient is zero" print(outputs.shape, targets.shape, loss.shape) # ### PCK # # #exports class PCK_SPL(ForwardKinematicLoss): def forward( self, aa_out=None, pose_target=None, positions=None, target_positions=None, thresh=None, bm_orig=None, bm_rec=None, n_joints=21 ): assert thresh is not None for p in [positions, target_positions]: if p is not None: assert p.ndim == 3 assert p.size(-1) == 3, "final dim must contain 3D locations" if pose_target is not None: if pose_target.ndim == 3: bs, f, d = pose_target.size() n = bs*f assert d == n_joints*3 elif pose_target.ndim == 2: n, d = pose_target.size() # forward kinematics no_bm_output = bm_orig is None or bm_rec is None no_positions = positions is None or target_positions is None if no_bm_output and no_positions: bm_orig, bm_rec = self.kinematics(aa_out.reshape(n, d), pose_target.reshape(n, d)) positions = bm_rec.Jtr target_positions = bm_orig.Jtr # percentage of coordinates in the ball defined by thresh around a joint n, d, _ = positions.size() dist = torch.sqrt(torch.square(positions - target_positions).sum(2)) return torch.mean((dist <= thresh).float(), 1) # + # spl pck_thresholds = [0.001, 0.005, 0.01, 0.02, 0.05, 0.1, 0.15, 0.2, 0.3] with tempfile.TemporaryDirectory() as tmpdirname: unpack_body_models("sample_data/", tmpdirname, 1, verify=False) for npz_path in npz_paths(tmpdirname): cdata = np.load(npz_path) poses = torch.tensor(cdata['poses'][:, 3:66]).float() n, d = poses.size() j = d//3 poses = poses.view(n, j, 3) poses = poses[::6].view(-1, j*3) # convert poses to rotation matrices with torch.no_grad(): positions = body_model(pose_body=poses).Jtr outputs = positions[:-1] targets = positions[1:] _pck = PCK_SPL(body_model) for thresh in pck_thresholds: loss = pck(outputs.numpy(), targets.numpy(), thresh) _loss = _pck(poses[:-1], poses[1:], thresh=thresh) assert np.allclose(loss, _loss) # + # spl pck_thresholds = [0.001, 0.005, 0.01, 0.02, 0.05, 0.1, 0.15, 0.2, 0.3] with tempfile.TemporaryDirectory() as tmpdirname: unpack_body_models("sample_data/", tmpdirname, 1, verify=False) for npz_path in npz_paths(tmpdirname): cdata = np.load(npz_path) _poses = torch.tensor(cdata['poses'][::6, :3*24], dtype=torch.float, requires_grad=True) n, d = _poses.size() j = d//3 poses = _poses.view(n, j, 3) poses = poses.view(-1, j*3) # convert poses to rotation matrices positions = fk_engine.from_aa(poses) outputs = positions[:-1] targets = positions[1:] _pck = PCK_SPL(None) pck_vals = [] for thresh in pck_thresholds: loss = pck(outputs.detach().numpy(), targets.detach().numpy(), thresh) pck_vals.append(loss) _loss = _pck(positions=outputs, target_positions=targets, thresh=thresh) assert np.allclose(loss, _loss) pck_vals = [np.mean(v) for v in pck_vals] print(f"AUC: {calculate_auc(pck_vals, pck_thresholds, 6)}") # - # ## PCK AUC # # > Area under curve for PCK losses # + # exports """ SPL: training and evaluation of neural networks with a structured prediction layer. Copyright (C) 2019 ETH Zurich, <NAME>, <NAME> This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <https://www.gnu.org/licenses/>. """ def calculate_auc(pck_values, pck_thresholds, target_length): """Calculate area under a curve (AUC) metric for PCK. If the sequence length is shorter, we ignore some of the high-tolerance PCK values in order to have less saturated AUC. Args: pck_values (list): PCK values. pck_thresholds (list): PCK threshold values. target_length (int): determines for which time-step we calculate AUC. Returns: """ # Due to the saturation effect, we consider a limited number of PCK thresholds in AUC calculation. if target_length < 6: n_pck = 6 elif target_length < 12: n_pck = 7 elif target_length < 18: n_pck = 8 else: n_pck = len(pck_thresholds) norm_factor = np.diff(pck_thresholds[:n_pck]).sum() auc_values = [] for i in range(n_pck - 1): auc = (pck_values[i] + pck_values[i + 1]) / 2 * (pck_thresholds[i + 1] - pck_thresholds[i]) auc_values.append(auc) return np.array(auc_values).sum() / norm_factor # + # spl pck_thresholds = [0.001, 0.005, 0.01, 0.02, 0.05, 0.1, 0.15, 0.2, 0.3] with tempfile.TemporaryDirectory() as tmpdirname: unpack_body_models("sample_data/", tmpdirname, 1, verify=False) for npz_path in npz_paths(tmpdirname): cdata = np.load(npz_path) _poses = torch.tensor(cdata['poses'][::6, :3*24], dtype=torch.float, requires_grad=True) n, d = _poses.size() j = d//3 poses = _poses.view(n, j, 3) poses = poses.view(-1, j*3) # convert poses to rotation matrices positions = fk_engine.from_aa(poses) outputs = positions[:-1] targets = positions[1:] _pck = PCK_SPL(None) pck_vals = [] for thresh in pck_thresholds: loss = pck(outputs.detach().numpy(), targets.detach().numpy(), thresh) pck_vals.append(loss) _loss = _pck(positions=outputs, target_positions=targets, thresh=thresh) assert np.allclose(loss, _loss) pck_vals = [np.mean(v) for v in pck_vals] print(f"AUC: {calculate_auc(pck_vals, pck_thresholds, 6)}") # - # ## Logging Utilities # # > Utility functions for logging losses during training. #exports def all_univariate_tensors_in(d): "Utility function for logging with a callback function" def is_univariate_tensor(x): if isinstance(x, torch.Tensor): return x.nelement() == 1 return {k: v for k, v in d.items() if is_univariate_tensor(v)}
05_losses.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Demo: Simulation of Tyrosine NMR Spectrum # This notebook shows how the **nmrsim** library can be used to compose an entire <sup>1</sup>H NMR spectrum from scratch. # # The nmrsim.plt routines are convenient for quick plots, but for entire spectrums their small size and low resolution is noticeable (e.g. misleading signal intensities). # # *{TODO: provide ways to customize the plots (e.g. have `plt.mplplot` return the actual matplotlib object for customization, or use the peaklist data in another visualization library).}* # This tutorial is adapted from the [nmrmint](https://nmrmint.readthedocs.io/en/latest/tutorial.html) tutorial. # # *(If you're interested in an app for the simulation of a complete NMR spectrum, see the [nmrmit project](https://github.com/sametz/nmrmint).)* # + import os import sys import numpy as np import matplotlib as mpl mpl.rcParams['figure.dpi']= 300 # %config InlineBackend.figure_format = 'svg' # makes inline plot look less blurry # %matplotlib inline home_path = os.path.abspath(os.path.join('..')) if home_path not in sys.path: sys.path.append(home_path) tests_path = os.path.abspath(os.path.join('..', 'tests')) if tests_path not in sys.path: sys.path.append(tests_path) # - # Here is the data for the spectrum of tyrosine in D<sub>2</sub>O: # # 1H NMR (500 MHz, Deuterium Oxide) δ 7.18 (d, J = 8.5 Hz, 1H), 6.89 (d, J = 8.5 Hz, 1H), 3.93 (dd, J = 7.7, 5.1 Hz, 1H), # 3.19 (dd, J = 14.7, 5.1 Hz, 1H), 3.05 (dd, J = 14.7, 7.8 Hz, 1H). # Data is provided in ppm on a 500 MHz spectrometer. We'll create a function to perform ppm-to-Hz conversions for us: def ppm_to_hz(ppm, spec_freq): """Given a chemical shift in ppm and spectrometer frequency in MHz, return the corresponding chemical shift in Hz.""" return [d * spec_freq for d in ppm] # The two "doublets" in the aromatic region actually comprise an AA'XX' system. This 4-nuclei spin system can be modeled using the SpinSystem class: from nmrsim import SpinSystem # Create a frequency list (in Hz) for the A, A', X, and X' nuclei: v_aaxx = ppm_to_hz([7.18, 7.18, 6.89, 6.89], 500) v_aaxx # For the *J* values, as a first approximation we'll assume J<sub>AX</sub> (an J<sub>A'X'</sub>) are close to the faux-doublet splitting of 8.5 Hz. We'll estimate that J<sub>AA'</sub> and J<sub>XX'</sub> are about 2 Hz, and that the J<sub>AX'</sub> and J<sub>A'X</sub> couplings are about 0 Hz. j_aaxx = [[0, 2, 8.5, 0], [2, 0, 0, 8.5], [8.5, 0, 0, 2], [0, 8.5, 2, 0]] aaxx = SpinSystem(v_aaxx, j_aaxx) from nmrsim.plt import mplplot, mplplot_lineshape mplplot(aaxx.peaklist()); # Next, we'll create the ABX system for the aliphatic protons. For this exercise, we are assuming that the coupling constants that the first-order analysis provided are close enough. # # *(If accuracy is critical, there are methods for solving the ABX system. For example, see https://www.chem.wisc.edu/areas/reich/nmr/05-hmr-12-abx.htm#solving%20ABX )* v_abx = ppm_to_hz([3.93,3.19, 3.05], 500) j_abx = [[0, 5.1, 7.75], [5.1, 0, -14.7], # geminal Js should be negative [7.75, -14.7, 0]] abx = SpinSystem(v_abx, j_abx) mplplot(abx.peaklist(), y_max=0.2); # These spin systems can be combined into a spectrum: tyr_spectrum = aaxx + abx mplplot(tyr_spectrum.peaklist(), y_max=0.2) type(tyr_spectrum) # Addition of the two SpinSystem objects returned a Spectrum object. # If peak intensities look off, try using more data points for the lineshape. Here is the same example with ~ 10 data points per Hz: points=int((tyr_spectrum.vmax - tyr_spectrum.vmin) * 10) print(points) mplplot(tyr_spectrum.peaklist(), y_max=0.5, points=points); # The Spectrum class can also provide lineshape data for the spectrum: mplplot_lineshape(*tyr_spectrum.lineshape(points=points)); # The Spectrum.linewidth() method has an advantage over the .peaklist() method: it can take into account the linewidths specified by its component Multiplet/SpinSystem objects. The default value is 0.5 Hz, but this can be set to other values. # # In D<sub>2</sub>O, the -OH and -NH protons are exchanged for D and are not seen in the spectrum. If we wanted to include these in the spectrum for pedagogical reasons, we could create broad singlets with the Multiplet class: from nmrsim import Multiplet # frequency in Hz, integration, [empty list for no coupling constants], peakwidth = 20 Hz nh3 = Multiplet(8.3 * 500, 3, [], 20) tyr_oh = Multiplet(9.8 * 500, 1, [], 10) tyr_spectrum2 = tyr_spectrum + nh3 + tyr_oh # A Spectrum can have its .vmin and .vmax attributes reset to give a full spectral window (defaults are to provide a 50 Hz margin): tyr_spectrum2.default_limits() # resets limits, and returns vmin, vmax tuple points2 = int((tyr_spectrum2.vmax - tyr_spectrum2.vmin) * 10) mplplot_lineshape(*tyr_spectrum2.lineshape(points=points2)); # What if you want the x axis to be in ppm? # + # A future version of nmrsim should extend the API to facilitate using ppm in simulations. # For now, simulations use Hz only, and ppm conversions need to be done manually. tyr_spectrum2.vmin = -0.5 * 500 tyr_spectrum2.vmax = 10.5 * 500 x, y = tyr_spectrum2.lineshape(points=50000) x_ppm = x / 500 mplplot_lineshape(x_ppm, y, limits=(-0.5, 10.5)); # -
jupyter/4-Build-Tyrosine-Spectrum.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- l = [23245, 27, 546, 215, -1234] l # + #two types of arrays. 'arrayarrays are python. better version in numpy # - import numpy as np np.array(l) a = np.array(l) a b = np.array([12, 455, 63.3, True,'abc']) b #note that arrays have to all have the same datatype print(b) l.pop l.pop() l a.mean() # + #do it myself # - #create a few different arrays for a business intelligence problem #dim1: years between 2010 and 2018 #mea1: sales figures years = np.array(['2010', '2011','2012','2013','2014','2015','2016','2017','2018']) years sales = np.array([100,105,110,120,140,175,225,250,300]) len(years) len(sales) l l[1:] l[0:2] # + #------------ # - a a[2:] a[2:4] # + #whenever you slice an array, you do not create a copy of the array # + #example of this non-copy nature # - b = a[2:4] b b[:]=111 b a #you will see that modifying b also modified a # + # if you want a copy you have to create one like this # - c = a.copy() c
Arrays in Python.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <img src="Logo.png" width="100" align="left"/> # # # <center> Unit 3 Project </center> # # <center> Third section : supervised task </center> # # In this notebook you will be building and training a supervised learning model to classify your data. # For this task we will be using another classification model "The random forests" model. # # Steps for this task: # 1. Load the already clustered dataset # 2. Take into consideration that in this task we will not be using the already added column "Cluster" # 3. Split your data. # 3. Build your model using the SKlearn RandomForestClassifier class # 4. classify your data and test the performance of your model # 5. Evaluate the model ( accepted models should have at least an accuracy of 86%). Play with hyper parameters and provide a report about that. # 6. Provide evidence on the quality of your model (not overfitted good metrics) # 7. Create a new test dataset that contains the testset + an additional column called "predicted_class" stating the class predicted by your random forest classifier for each data point of the test set. # ## 1. Load the data and split the data: from sklearn.ensemble import RandomForestClassifier import pandas as pd from sklearn.model_selection import train_test_split # To-Do: load the data df = pd.read_csv('clustered_dataset.csv') df.head() # + # To-Do : keep only the columns to be used : all features except ID, cluster # The target here is the Category column # Do not forget to split your data (this is a classification task) # test set size should be 20% of the data # Keep only columns to use df_cols = list(df.columns) cols = df_cols[2:len(df_cols)-1] print(cols) # Get target y = df['Category'] # Get data using right columns X = df[cols].copy() # Split the data X_train, X_valid, y_train, y_valid = train_test_split(X, y, train_size=0.8, test_size=0.2, random_state=0) # - # ## 2. Building the model and training and evaluate the performance: # + # To-do build the model and train it # note that you will be providing explanation about the hyper parameter tuning # So you will be iterating a number of times before getting the desired performance # Create the base model to tune rf = RandomForestClassifier(n_estimators = 100, criterion ='entropy', random_state = 0) # Check the different params of the classifier print(model.get_params()) # + import numpy as np # Using RandomizedSearchCV to get best params from sklearn.model_selection import RandomizedSearchCV # Create a random grid to train the model on # Number of trees in random forest n_estimators = [int(x) for x in np.linspace(start = 200, stop = 2000, num = 10)] # Number of features to consider at every split max_features = ['auto', 'sqrt'] # Maximum number of levels in tree max_depth = [int(x) for x in np.linspace(10, 110, num = 11)] max_depth.append(None) # Minimum number of samples required to split a node min_samples_split = [2, 5, 10] # Minimum number of samples required at each leaf node min_samples_leaf = [1, 2, 4] # Method of selecting samples for training each tree bootstrap = [True, False] # Create the random grid random_grid = {'n_estimators': n_estimators, 'max_features': max_features, 'max_depth': max_depth, 'min_samples_split': min_samples_split, 'min_samples_leaf': min_samples_leaf, 'bootstrap': bootstrap} print(random_grid) # - # Use the random grid to search for best hyperparameters # First create the base model to tune rf = RandomForestClassifier(n_estimators = 100, criterion ='entropy', random_state = 0) # Random search of parameters, using 3 fold cross validation (We use cross validation to avoid overfitting), # search across 100 different combinations, and use all available cores rf_random = RandomizedSearchCV(estimator = rf, param_distributions = random_grid, n_iter = 100, cv = 3, verbose=2, random_state=42, n_jobs = -1) # Fit the random search model rf_random.fit(X_train, y_train) # To-do : evaluate the model in terms of accuracy and precision # Provide evidence that your model is not overfitting y_hat = rf_random.predict(X_valid) from sklearn.metrics import precision_score, accuracy_score print('Random forest model accuracy:', accuracy_score(y_valid, y_hat)*100,"%") print('Random forest model precision:', precision_score(y_valid, y_hat, average='micro')*100,"%") # > Hint : A Perfect accuracy on the train set suggest that we have an overfitted model So the student should be able to provide a detailed table about the hyper parameters / parameters tuning with a good conclusion stating that the model has at least an accuracy of 86% on the test set without signs of overfitting # ## 3. Create the summary test set with the additional predicted class column: # In this part you need to add the predicted class as a column to your test dataframe and save this one # To-Do : create the complete test dataframe : it should contain all the feature column + the actual target and the ID as well test_df = X_valid.copy() test_df['Category'] = df['Category'] test_df['ID'] = df['ID'] test_df.shape # To-Do : Add the predicted_class column test_df["Predicted_class"] = y_hat test_df.shape # > Make sure you have 16 column in this test set # Save the test set test_df.to_csv("test_summary.csv", index=False)
.ipynb_checkpoints/3. Supervised task-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Binary Tree Implementation Example # # Below is a representation of a Tree using a list of lists. # ## json file handling # ## (1) Input Data: {"a":"a0","b":"b1","c": "c2","d":"d3"} # ## (2) Output Data: [d3,[a0,b1,c2]] # + import json data = '{"a":"a0","b":"b1","c":"c2","d":"d3"}' jsonData = json.loads(data) a = str(jsonData['a']) b = str(jsonData['b']) c = str(jsonData['c']) d = str(jsonData['d']) print(a) print(b) print(c) print(d) # - # ## (3) Making Tree Structure # + def BinaryTree(r): #return [r, [], []] return [r, []] def insertLeft(root,newBranch): t = root.pop(1) if len(t) > 1: root.insert(1,[newBranch,t,[]]) else: root.insert(1,[newBranch, [], []]) return root def insertRight(root,newBranch): t = root.pop(2) if len(t) > 1: root.insert(2,[newBranch,[],t]) else: root.insert(2,[newBranch,[],[]]) return root def getRootVal(root): return root[0] def setRootVal(root,newVal): root[0] = newVal def getLeftChild(root): return root[1] def getRightChild(root): return root[2] # + # Output Data: [d3,[a0,b1,c2]] BinaryTree(d) # - root = BinaryTree(d) insertLeft(root, [a,b,c]) getRootVal(root) print(root) # + # END # - # ## Basic Tree Concept # <img src = 'https://github.com/leehaesung/BinaryTreeImplementation/raw/master/TreeConcept.jpg' > # + # END
02_ipynb/Binary_Tree_Implementation_With_Json_Data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np from scipy.ndimage.filters import convolve import skimage.io import matplotlib.pyplot as plt import sys # %matplotlib inline input = np.array([[[10, 110, 210], [20, 120, 220], [30, 130, 230], [40, 140, 240], [50, 150, 250]], [[20, 120, 220], [30, 130, 230], [40, 140, 240], [50, 150, 250], [10, 110, 210]], [[30, 130, 230], [40, 140, 240], [50, 150, 250], [10, 110, 210], [20, 120, 220]], [[40, 140, 240], [50, 150, 250], [10, 110, 210], [20, 120, 220], [30, 130, 230]], [[50, 150, 250], [10, 110, 210], [20, 120, 220], [30, 130, 230], [40, 140, 240]]], dtype="uint8") input_img = input #input_img = plt.imread("lights.png") skimage.io.imshow(input_img) for i in range(1,len(input_img)-1): for j in range(1,len(input_img[i])-1): for k in range(0, len(input_img[i][j])): tl = np.array(input_img[i+1][j-1][k], dtype="uint64") tm = np.array(input_img[i+1][j][k], dtype="uint64") tr = np.array(input_img[i+1][j+1][k], dtype="uint64") cl = np.array(input_img[i][j-1][k], dtype="uint64") cm = np.array(input_img[i][j][k], dtype="uint64") cr = np.array(input_img[i][j+1][k], dtype="uint64") bl = np.array(input_img[i-1][j-1][k], dtype="uint64") bm = np.array(input_img[i-1][j][k], dtype="uint64") br = np.array(input_img[i-1][j+1][k], dtype="uint64") print(bl, bm, br) print(cl,cm,cr) print(tl,tm,tr) print(bl + bm + br + cl + cm + cr + tl + tm + tr) print((1/9)*(bl + bm + br + cl + cm + cr + tl + tm + tr)) print(np.round((1/9)*(bl + bm + br + cl + cm + cr + tl + tm + tr))) # + output_img = np.ones((len(input_img)-2, len(input_img[0])-2, 3), dtype="uint8") #print(output_img) for i in range(1,len(input_img)-1): for j in range(1,len(input_img[i])-1): for k in range(0, len(input_img[i][j])): tl = np.array(input_img[i+1][j-1][k], dtype="uint64") tm = np.array(input_img[i+1][j][k], dtype="uint64") tr = np.array(input_img[i+1][j+1][k], dtype="uint64") cl = np.array(input_img[i][j-1][k], dtype="uint64") cm = np.array(input_img[i][j][k], dtype="uint64") cr = np.array(input_img[i][j+1][k], dtype="uint64") bl = np.array(input_img[i-1][j-1][k], dtype="uint64") bm = np.array(input_img[i-1][j][k], dtype="uint64") br = np.array(input_img[i-1][j+1][k], dtype="uint64") new_pix = np.round((1/9)*(tl+tm+tr+cl+cm+cr+bl+bm+br)) print(new_pix) output_img[i-1][j-1][k] = new_pix # + print(output_img) skimage.io.imshow(output_img) #skimage.io.imsave("lights_blur.png", output_img) input3 = np.array([[[30, 130, 230], [34, 134, 234], [33, 133, 233]], [[34, 134, 234], [33, 133, 233], [27, 127, 227]], [[33, 133, 233], [27, 127, 227], [26, 126, 226]]], dtype="uint8") # -
.ipynb_checkpoints/blur-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #KRM import os import numpy as np import matplotlib.pyplot as plt #from scipy.stats import nanmean from math import * from pylab import * # %matplotlib inline # + # These numbers come from DiffusivityExperimentsCalculations spread sheet on Google Drive. # Each of these numbers was calculated in a separate notebook (see notebook names on spreadsheet) # using the functions in NumDiffTools.py Kh1=[1.00E-07,1.00E-05,1.00E-02,1,20,50] Kv1=[1.00E-05,1.00E-05,1.00E-05,1.00E-05,1.00E-05,1.00E-05] Kh2=[1.00E-05,1.00E-05,1.00E-05,1.00E-05,1.00E-05,1.00E-05] Kv2=[1.00E-07,1.00E-03,1.00E-02,1.00E-01,1,10] Khnum1=[0.078,0.078,0.078,0.078,0.077,0.077] Kvnum1=[1.39E-05,1.39E-05,1.39E-05,1.39E-05,1.38E-05,1.38E-05] Khnum2=[0.079,0.029,-0.379,-3.65,-14.826,-12.06] Kvnum2=[1.40E-05,5.34E-06,-7.46E-05,-7.67E-04,-2.56E-03,-1.41E-03] Khnum3=[-0.92684996,-0.92685571,-0.93249396,-1.33754178,-3.62353446,-5.19363577] Kvnum3=[-6.29094984e-05,-6.29095998e-05,-6.30104661e-05,-7.13881822e-05,-0.00012962,-0.00017344] Khnum4=[-0.922,-1.17,-1.695,-2.15523471,-2.29423407,-1.98210887] Kvnum4=[-6.27E-05,-6.85E-05,-7.83E-05,-8.75E-05,-8.87E-05,-7.81E-05] # + plt.rcParams.update({'font.size': 13}) plt.figure(figsize=(36,6)) plt.subplot(1,4,1) plt.plot(Kh1,Khnum1,'o-',label='run01') plt.plot(Kh1,Khnum3,'o-',label='run03') #plt.xlim((31.5,34.7)) #plt.ylim((0,1650)) #plt.gca().invert_yaxis() plt.xlabel('Kh implicit') plt.ylabel('Kh num') plt.legend(loc='lower left') plt.title('Vertical vs horizontal concentration gradients') plt.subplot(1,4,2) plt.plot(Kh1,Kvnum1,'o-',label='run01') plt.plot(Kh1,Kvnum3,'o-',label='run03') #plt.xlim((31.5,34.7)) #plt.ylim((0,1650)) #plt.gca().invert_yaxis() plt.xlabel('Kh implicit') plt.ylabel('Kv num') plt.legend(loc='lower left') plt.title('Vertical vs horizontal concentration gradients') plt.subplot(1,4,3) plt.plot(Kv1,Khnum1,'o-',label='run01') plt.plot(Kv1,Khnum3,'o-',label='run03') #plt.xlim((31.5,34.7)) #plt.ylim((0,1650)) #plt.gca().invert_yaxis() plt.xlabel('Kh implicit') plt.ylabel('Kv num') plt.legend(loc='lower left') plt.title('Vertical vs horizontal concentration gradients') # + rcParams['legend.numpoints'] = 1 fig, ax1 = plt.subplots(figsize=(10,8)) lns1=ax1.plot(Kh1,Khnum1, ':b^',label='run01 vgrad') lns2=ax1.plot(Kh1,Khnum3, ':bs',label='run03 hgrad') ax1.set_xlabel('Kh implicit') ax1.set_xscale('log') # Make the y-axis label and tick labels match the line color. ax1.set_ylabel('Kh num', color='b') for tl in ax1.get_yticklabels(): tl.set_color('b') ax2 = ax1.twinx() lns3=ax2.plot(Kh1,Kvnum1 , ':r^',label='run01 vgrad') lns4=ax2.plot(Kh1,Kvnum3 , ':rs',label='run03 hgrad') ax2.set_ylabel('Kv num', color='r') for tl in ax2.get_yticklabels(): tl.set_color('r') lns = lns1+lns2+lns3+lns4 labs = [l.get_label() for l in lns] ax1.legend(lns, labs, loc=0) plt.title('Constant implicit Kv (1E-5)') plt.show() # + fig, ax1 = plt.subplots(figsize=(10,8)) lns1=ax1.plot(Kv2,Khnum2, ':b^', label='run02 vgrad') lns2=ax1.plot(Kv2,Khnum4, ':bs',label='run04 hgrad') ax1.set_xlabel('Kv implicit') ax1.set_xscale('log') # Make the y-axis label and tick labels match the line color. ax1.set_ylabel('Kh num', color='b') for tl in ax1.get_yticklabels(): tl.set_color('b') ax2 = ax1.twinx() lns3=ax2.plot(Kv2,Kvnum2 , ':r^',label='run02 vgrad') lns4=ax2.plot(Kv2,Kvnum4 , ':rs',label='run04 hgrad') ax2.set_ylabel('Kv num', color='r') for tl in ax2.get_yticklabels(): tl.set_color('r') lns = lns1+lns2+lns3+lns4 labs = [l.get_label() for l in lns] ax1.legend(lns, labs, loc=0) plt.title('Constant implicit Kh (1E-5)') # -
DiffusivityCalculationPlots.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="D3QflLv0qdiy" # # Python 入門 # # 本章では、プログラミング言語 Python の基礎的な文法を学んでいきます。 # 次章以降に登場するコードを理解するにあたって必要となる最低限の知識について、最短で習得するのが目標です。 # より正確かつ詳細な知識を確認したい場合には、[公式のチュートリアル](https://docs.python.jp/3/tutorial/index.html)などを参照してください。 # # Pythonにはバージョンとして 2 系と 3 系の 2 つの系統があり、互換性のない部分もあります。 # 本チュートリアルでは、3 系である **Python 3.6** 以上を前提とした解説を行っています。 # # ## Python の特徴 # # プログラミング言語には、Python 以外にも C 言語や Java、Ruby、R のように様々なものがあります。それぞれの言語がすべての用途に適しているわけではなく、しばしば用途によって得手不得手があります。 # # 本チュートリアルでは基本的に Python というプログラミング言語を扱います。 # その理由は、Python はデータ解析・機械学習のためのライブラリが充実しており、データ解析や機械学習の分野で最もよく使われている言語だからです。 # また、Web アプリケーションフレームワークの開発も活発で、データ解析だけでなく Web サービス開発まで同じ言語で統一して行える点も魅力です。 # # # 本チュートリアルのテーマである Chainer も Python 向けに開発されています。 # + [markdown] colab_type="text" id="unYvpNZCv9aF" # さらには、初学者にとっても学びやすい言語です。 # 初学者がプログラミングを学び始めるときにつまづきがちな難しい概念が他の言語と比べ多くなく、入門しやすい言語といえます。 # # まとめると、Python には # # - データ解析や機械学習によく使われている # - Web アプリケーションの開発などでもよく使われている # - 初学者がはじめやすい言語 # # のような魅力があります。 # + [markdown] colab_type="text" id="jZNTuBQ54BSu" # ### 文法とアルゴリズム # # プログラミングによってある特定の処理をコンピュータで自動化したい場合、**文法**と**アルゴリズム**の 2 つを理解しておく必要があります。 # # プログラムでは、まずはじめにコンピュータに命令を伝えるためのルールとなる**文法**を覚える必要があります。 # 文法を無視した記述があるプログラムは、実行した際にエラーとなり処理が停止します。そのため、文法はしっかりと理解しておく必要があります。 # # ただし、文法さえ理解していれば十分かというとそうではありません。一般的に、初学者向けのプログラミングの参考書では、この文法だけを取り扱うことも多いのですが、コンピュータに処理を自動化させることが目的であれば、文法だけでなく**アルゴリズム**も理解する必要があります。アルゴリズムとは、どういう順番でどのような処理をしていくかの一連の手順をまとめたものです。 # # この章では、Python の文法について紹介し、機械学習やディープラーニングで必要となるアルゴリズムについてはこれ以降の章で紹介します。 # # ここでは以下 4 つの文法に主眼を置きながら説明していきます。 # # - 変数 # - 制御構文 # - 関数 # - クラス # + [markdown] colab_type="text" id="hO4YLiKjIfDi" # ## 変数 # # **変数 (variable)** とは、様々な値を格納することができる、**名前がついた入れ物**です。 # この変数に値を格納したり、更新したりすることで、計算結果を一時的に保持しておくことができます。 # # ### 代入と値の確認 # # それでは、`a` という名前の変数に、`1` を**代入**してみましょう。 # + colab_type="code" id="SjpDQJn-3Rct" colab={} a = 1 # + [markdown] colab_type="text" id="aitz3Hri_a-n" # 代入は `=` の記号を用います。 # 数学的には `=` は等しいという意味を持ちますが、Python では**「左辺の変数に、右辺の値を代入する」**という意味になります。 # # Jupyter Notebook 上では、変数名だけ、もしくは変数名を最後の行に記述したセルを実行すると、値を確認することができます。 # + colab_type="code" id="6SbAfM5Rv9aM" outputId="e8173162-1c03-418e-f01f-3785b6b255e9" colab={"base_uri": "https://localhost:8080/", "height": 35} a # + [markdown] colab_type="text" id="8b0i8oelv9aR" # このように、変数に格納されている値を確認することができました。 # また、値を確認するための他の方法として、`print()` と呼ばれる**関数 (function)** を使用することもできます。 # 関数について詳しくは後述しますが、`print()` のように Python には予め多くの関数が定義されています。 そのような関数を**組み込み関数 (built-in function)** といいます。 # + colab_type="code" id="oTHXMTjiv9aR" outputId="fc9f6804-a059-48f1-ae0a-45ca25eecc86" colab={"base_uri": "https://localhost:8080/", "height": 35} print(a) # + [markdown] colab_type="text" id="9yizNcBvv9aU" # 変数名だけをセルに記入して実行する場合と`print()`を利用する場合の違いについては、後述します。 # + [markdown] colab_type="text" id="nGNlqHW_v9aV" # 変数につける名前は、コードを書く人が自由に決めることができます。 # ただし、わかりやすい名前をつけることがとても大切です。 # 例えば、人の名前を格納するための変数が `a` という変数名だと、それがどのような使われ方をするのかを容易に類推することができません。 # `name` という名前であれば、ひと目で見て何のための変数かが分かるようになります。 # これは、自分のコードを読む他人や、未来の自分にとってコードを理解するための大きな手がかりとなります。 # + [markdown] colab_type="text" id="BQhk7-XfDFhS" # ### コメント # # Python では、`#` の後からその行の終わりまでに存在する全ての文字列は無視されます。 # この `#` の後ろに続く部分を**コメント (comment)**と呼び、すでに書かれたコードをコメントにすることを**コメントアウト (comment out)**と言います。 # コメントは、コード中に変数の意味や処理の意味をコードを読む人に伝えるためによく使われます。 # # Jupyter Notebook のコードセルに書かれたコードを行ごとコメントアウトしたい場合は、その行を選択した状態で `Ctrl + /` を入力することで自動的に行の先頭に `#` 記号を挿入することができます。複数行を選択していれば、選択された複数の行が同時にコメントアウトされます。また、コメントアウトされている行を選択した状態で同じキー入力を送ると、コメントアウトが解除されます。これを**アンコメント (uncomment)**と呼ぶこともあります。 # # 下のセルを実行してみましょう。 # + colab_type="code" id="YK_XjChXDXmH" colab={} # この行及び下の行はコメントアウトされているため実行時に無視されます # print(a) # + [markdown] colab_type="text" id="_xAqta5WIfGJ" # `print(a)` が書かれているにも関わらず、何も表示されませんでした。 # これは、`print(a)` 関数が書かれた行がコメントアウトされていたためです。 # + [markdown] colab_type="text" id="I_Z6hhts_v4c" # ### 変数の型 # # プログラミングで扱う値には種類があります。 # Python では、**整数 (integer)**、**実数 (real number)**、**文字列 (string)** などが代表的な値の種類です。 # それぞれの種類によって、コンピュータ内での取扱い方が異なり、この種類のことは一般に**型 (type)** と呼びます。 # # 例えば、整数、実数、そして文字列をそれぞれ別々の変数に代入するコードは以下のとおりです。 # + colab_type="code" id="FChuhC8Jv9aa" colab={} a = 1 # + colab_type="code" id="qCdjuQqNv9ac" colab={} b = 1.2 # + colab_type="code" id="VLMovMgnv9ae" colab={} c = 'Chainer' # + [markdown] colab_type="text" id="DwMajDvAv9ag" # コンピュータの中での取り扱い方は異なりますが、Python では**変数の型を自動的に決定する**ため、初めのうちはあまり気にする必要はありません。 # ただし、違う型同士の演算では場合によってエラーが発生するなどの問題が生じるため、簡単に型の特徴は把握しておく必要があります。 # # まずは、上記の `a`, `b`, `c` の型を確認する方法を紹介します。 # 型の確認は `type()` という組み込み関数を使用します。 # + colab_type="code" id="RUflpSu6v9ag" outputId="3219c3be-c030-478f-ef28-b2da097fc8f4" colab={"base_uri": "https://localhost:8080/", "height": 35} type(a) # + colab_type="code" id="YBtHAigUv9aj" outputId="36b7795d-d590-4b51-c02e-b3aa656d6bd5" colab={"base_uri": "https://localhost:8080/", "height": 35} type(b) # + colab_type="code" id="6PaQmlMCv9an" outputId="a0b6ebc3-0871-47f6-b8e1-46937b23d623" colab={"base_uri": "https://localhost:8080/", "height": 35} type(c) # + [markdown] colab_type="text" id="gatuDQAyv9aq" # `a` は `int` という整数の型をもつ変数であり、`b` は `float` という実数の型をもつ変数です。 # この `float` という型の名前は、コンピュータ内で実数を扱うときの形式である**浮動小数点数 (floating-point number)** に由来しています。 # # `c` は `str` という文字列の型をもつ変数であり、値を定義するにはシングルクォーテーション `' '` もしくはダブルクォーテーション `" "` で対象の文字列をくくる必要があります。 # + [markdown] colab_type="text" id="Chdmx6HGIfEV" # Python では、`.` を含まない連続した数字を `int`、直前・直後も含め `.` が含まれる連続した数字を `float` だと自動的に解釈します。 # 例えば、`7` や `365` は `int` ですが、`2.718`、`.25`、`10.` などは `float` になります。 # # 実数の `0` は `0.0` とも `.0` とも `0.` とも書くことができます。 # + colab_type="code" id="sUKUmRgwIfEY" outputId="797e97a5-ea05-4a08-ac99-d6b7cbd39f49" colab={"base_uri": "https://localhost:8080/", "height": 35} type(0) # + colab_type="code" id="VOiscn8OIfEc" outputId="f84135ab-0652-4a21-cfad-f93518575455" colab={"base_uri": "https://localhost:8080/", "height": 35} type(0.) # + colab_type="code" id="442-_TpmIfEf" outputId="3516a8d3-02b6-4b2d-b366-aec051d785e5" colab={"base_uri": "https://localhost:8080/", "height": 35} type(.0) # + [markdown] colab_type="text" id="by0V-W7nIfEj" # 例えば、実数の `5` は以下のように書くことができます。 # + colab_type="code" id="uUYCOBaAIfEj" outputId="00c3e638-e7dd-4d7a-affa-863d9407c9e0" colab={"base_uri": "https://localhost:8080/", "height": 35} type(5.0) # + colab_type="code" id="y26NMdQ2IfEo" outputId="cc8fe9ce-1a65-444c-cc59-3d11de8862b0" colab={"base_uri": "https://localhost:8080/", "height": 35} type(5.) # + [markdown] colab_type="text" id="g3BdGDJaIfEr" # 一方、`.5` と書くと、これは `0.5` の略記と解釈されることに注意してください。 # + colab_type="code" id="aXpxR7WmIfEs" outputId="f6ade488-902d-4d96-9cc0-e0<PASSWORD>aca<PASSWORD>" colab={"base_uri": "https://localhost:8080/", "height": 35} type(.5) # + colab_type="code" id="enBKbL9fIfEx" outputId="992db59f-1989-4d2d-db0d-241aa434cc83" colab={"base_uri": "https://localhost:8080/", "height": 35} print(.5) # + [markdown] colab_type="text" id="xIr4oyIav9a-" # ### 複数同時の代入 # # Python では複数の変数に対する代入を一度に行うことができ、**複数同時の代入 (multiple assignment)** と呼びます。 # 例えば、上記の `a = 1` と `b = 1.2` を同時に一行で記述すると以下のようになります。 # + colab_type="code" id="GxERn_L6v9a-" colab={} a, b = 1, 1.2 # + [markdown] colab_type="text" id="MrlNU5sQv9bA" # 3 つ以上の変数に対して、複数同時の代入を行うことも可能です。 # + colab_type="code" id="pzzK0Ocfv9bB" colab={} a, b, c = 1, 1.2, 'Chainer' # + [markdown] colab_type="text" id="oJCqJg0jv9bD" # 複数同時の代入は後の章で頻出するため、覚えておきましょう。 # + [markdown] colab_type="text" id="VsdsnV9uiQev" # ### 算術演算子 # # 様々な計算を意味する**演算子**と呼ばれる記号があります。 # はじめに紹介するのは**算術演算子 (arithmetic operator)** と呼ばれるもので、 2 つの変数または値を取り、 1 つの演算結果を返します。 # # 代表的な演算として**四則演算(加算・減算・乗算・除算)**があります。 # 四則演算に対応する演算子として、Python では以下の記号が用いられます。 # # | 演算 | 記号 | # |------|------| # | 加算(足し算) | `+` | # | 減算(引き算) | `-` | # | 乗算(掛け算) | `*` | # | 除算(割り算) | `/` | # # 具体例を見ながら使い方を説明します。 # + colab_type="code" id="MBaRZ2RkBWbd" outputId="744c489e-75bd-4fbb-f6c1-7e73d751ff07" colab={"base_uri": "https://localhost:8080/", "height": 35} # 整数と整数で加算 -> 結果は整数 1+1 # + [markdown] colab_type="text" id="DE56EVRvIfFJ" # このように、演算子を使う際には、**記号の両側に値を書きます。** # このとき、演算子の両側にひとつずつ空白を空けることが多いです。 # 文法的な意味はありませんが、コードが読みやすくなります。 # この空白は Python のコーディング規約である [PEP8](https://www.python.org/dev/peps/pep-0008/#should-a-line-break-before-or-after-a-binary-operator) でも推奨されています。 # + colab_type="code" id="BSsWGE_Ev9bH" outputId="3a1c9e29-c5de-4d34-b3b3-3a240378e4bd" colab={"base_uri": "https://localhost:8080/", "height": 35} 1 + 1 # + [markdown] colab_type="text" id="ai1hzuv6v9bJ" # 値が代入されている変数との演算も下記のように行うことができます。 # + colab_type="code" id="yvA4BP5hIfFJ" outputId="cedf23b1-33b6-4f96-c655-9b64346e755c" colab={"base_uri": "https://localhost:8080/", "height": 35} a + 2 # + [markdown] colab_type="text" id="y6xkeITrv9bL" # また、`int` と `float` は異なる型同士ですが、計算を行うことができます。 # `int` と `float` の演算結果の型は `float` になります。 # + colab_type="code" id="IjU6vwcdIfFM" outputId="21f73fdf-108e-4988-f2b4-4cfcffc219bf" colab={"base_uri": "https://localhost:8080/", "height": 35} # 整数と実数で加算 -> 結果は実数 a + b # + [markdown] colab_type="text" id="PwsK4SxPIfFN" # 他の演算子の例を示します。 # + colab_type="code" id="hL5QmnGgBYNF" outputId="d408993d-3a50-4e49-9e40-f15696984208" colab={"base_uri": "https://localhost:8080/", "height": 35} # 整数と整数で減算 -> 結果は整数 2 - 1 # + colab_type="code" id="huifgJVkv9bR" outputId="1b6ddcff-4a15-4306-b615-cc27c0ccb0b2" colab={"base_uri": "https://localhost:8080/", "height": 35} # 実数と整数で減算 -> 結果は実数 3.5 - 2 # + colab_type="code" id="v82yE4keBZeo" outputId="40c8b70c-1e5a-4649-c5d4-141a9cefc76e" colab={"base_uri": "https://localhost:8080/", "height": 35} # 整数と整数で乗算 -> 結果は整数 3 * 5 # + colab_type="code" id="kYYOMXTHv9bX" outputId="a1fde9ed-7a8f-4184-f2e4-86e1a76b6c3d" colab={"base_uri": "https://localhost:8080/", "height": 35} # 実数と整数で乗算 -> 結果は実数 2.5 * 2 # + colab_type="code" id="KHRGnOcNBi05" outputId="a988ca7c-861f-4529-b243-c195573c0e2c" colab={"base_uri": "https://localhost:8080/", "height": 35} # 整数と整数で除算 -> 結果は実数 3 / 2 # + colab_type="code" id="NxnIvty-v9bd" outputId="d1057e6c-490d-4dfc-fea6-53b6121bda09" colab={"base_uri": "https://localhost:8080/", "height": 35} # 整数と整数で除算 -> 結果は実数 4 / 2 # + [markdown] colab_type="text" id="TDJRJR5vIfFe" # Python 3 では、 `/` 記号を用いて除算を行う場合、除数(割る数)と被除数(割られる数)が整数であっても、計算結果として実数が返ります。 # 計算結果として実数を返す除算のことを特に、**真の除算 (true division)** と言います。 # 一方、商(整数部分)を返すような除算演算子として、 `//` 記号が用意されています。 `/` 記号を 2 回、間を空けずに繰り返します。計算結果として商を返す除算のことを、 **切り捨て除算 (floor division)** と呼びます。 # 商を計算したい場合に便利な演算子であるため、こちらも覚えておきましょう。 # # ※ Python 2 では、除数も被除数も整数であった場合、 `/` 記号を用いても切り捨て除算が行われるので注意してください。 # + colab_type="code" id="PFwX_4_pBbY0" outputId="260dd494-deb0-49c6-edb6-19d1662137e7" colab={"base_uri": "https://localhost:8080/", "height": 35} # 整数と整数で切り捨て除算 -> 結果は整数 3 // 2 # + colab_type="code" id="nCyFpJHKv9bh" outputId="32099b02-2f65-46c5-89d9-343396552ed9" colab={"base_uri": "https://localhost:8080/", "height": 35} # 整数と整数で切り捨て除算 -> 結果は整数 4 // 2 # + [markdown] colab_type="text" id="Mywi7srHv9bj" # また、ここで注意すべき点として、整数や実数と文字列の演算は基本的にエラーになります。 # + colab_type="code" id="ia8mAAr5v9bj" outputId="38cd2f68-c08d-4abe-c8bf-300d8c8b1154" colab={"base_uri": "https://localhost:8080/", "height": 172} # error a + c # + [markdown] colab_type="text" id="T24iNnwSv9bk" # **エラーメッセージを読みましょう。** # # > TypeError: unsupported operand type(s) for +: 'int' and 'str' # # と言われています。「+ にとって int と str はサポートされていない被作用子(+ が作用する対象のこと。operand)です」と書かれています。「int に str を足す」ということはできないというわけです。 # # `int` もしくは `float` と、 `str` の間の加算、減算、除算では上記のエラーが生じます。 # ただし、`str` と `int` の**乗算**は特別にサポートされており、計算を実行することができます。 # + colab_type="code" id="oKJmWQKcv9bl" outputId="513c4f7e-081e-4b3f-826a-39e8e5885dff" colab={"base_uri": "https://localhost:8080/", "height": 35} # str と int で乗算 c * 3 # + [markdown] colab_type="text" id="xPX4dcGUv9bm" # 上のコードは、`c` という文字列を `3` 回繰り返す、という意味になります。 # + [markdown] colab_type="text" id="mmlvVMcIv9bm" # `str` 同士は足し算を行うことができます。 # + colab_type="code" id="DOK7H8LJv9bn" outputId="5654572a-9d44-423a-9ada-da324b5f7dfa" colab={"base_uri": "https://localhost:8080/", "height": 35} name1 = 'Chainer' name2 = 'チュートリアル' name1 + name2 # + [markdown] colab_type="text" id="QoCq87hkv9bq" # 整数と文字列を連結したいこともあります。 # 例えば、`1` という整数に、 `'番目'` という文字列を足して `'1番目'` という文字列を作りたいような場合です。 # その場合には、型を変換する**キャスト (cast)** という操作をする必要があります。 # # 何かを `int` にキャストしたい場合は `int()` という組み込み関数を使い、`str` にキャストしたい場合は `str()` という組み込み関数を使います。では、`1` という整数を `str` にキャストして、 `'番目'` という文字列と足し算を行ってみましょう。 # + colab_type="code" id="H8E1Puhmv9bq" outputId="31d58419-7b97-4bc6-f923-79f2c8fd9edb" colab={"base_uri": "https://localhost:8080/", "height": 35} 1 # + colab_type="code" id="TlnTtXUnv9br" outputId="b970fbfe-4151-42f9-ad44-0e607f912e02" colab={"base_uri": "https://localhost:8080/", "height": 35} type(1) # + colab_type="code" id="EK9fF95av9bt" outputId="a5ef7604-c6ed-4ffd-e19c-39b39496c752" colab={"base_uri": "https://localhost:8080/", "height": 35} str(1) # + colab_type="code" id="JxGEBOZzv9bw" outputId="cca1f61a-82c9-4f12-9921-97068ab3a6d0" colab={"base_uri": "https://localhost:8080/", "height": 35} type(str(1)) # + colab_type="code" id="2boojYfbv9by" outputId="9ac453cd-d210-4f5c-fcac-1502f0b69482" colab={"base_uri": "https://localhost:8080/", "height": 35} str(1) + '番目' # + [markdown] colab_type="text" id="RzvUMnTQv9bz" # また、`+=` や `-=` もよく使います。 # これは、演算と代入を合わせて行うもので、**累積代入文 (augmented assignment statement)** と呼ばれます。 # # 下記に示すとおり、`+=` では左辺の変数に対して右辺の値を足した結果で、左辺の変数を更新します。 # + colab_type="code" id="g7g0hOrOv9b0" outputId="914559f8-251d-4d5c-9edd-6cc9a778c484" colab={"base_uri": "https://localhost:8080/", "height": 35} # 累積代入文を使わない場合 count = 0 count = count + 1 count # + colab_type="code" id="bdgvqEjZv9b2" outputId="0cbef171-cbc1-4cb8-b965-1f52fcd005a6" colab={"base_uri": "https://localhost:8080/", "height": 35} # 累積代入文を使う場合 count = 0 count += 1 count # + [markdown] colab_type="text" id="AZ7QMIt-v9b3" # 四則演算の全てで累積代入文を利用することができます。 # つまり、`+=`, `-=`, `*=`, `/=` がそれぞれ利用可能です。 # + [markdown] colab_type="text" id="36GMdun-IfFn" # Python には、他にも幾つかの算術演算子が用意されています。 # 例えば以下の演算子です。 # # | 演算 | 記号 | # |------|------| # | 累乗 | `**` | # | 剰余  | `%` | # # `**` を使うと、$2^3$ は以下のように記述することができます。 # + colab_type="code" id="cK6zHxguIfFo" outputId="eb9bdd74-8294-49f5-aae4-a9566f3d7ddf" colab={"base_uri": "https://localhost:8080/", "height": 35} # 累乗 2 ** 3 # + [markdown] colab_type="text" id="g8RG0jTDv9b6" # `%` を使って、`9` を `2` で割った余りを計算してみましょう。 # + colab_type="code" id="CbsiwPcev9b6" outputId="27688f90-fef0-4d2d-8633-960ea0e8b128" colab={"base_uri": "https://localhost:8080/", "height": 35} # 剰余 9 % 2 # + [markdown] colab_type="text" id="Z62QzE-4Bc4a" # ### 比較演算子 # # 比較演算子は、2 つの値の比較を行うための演算子です。 # # | 演算 | 記号 | # |------|------| # | 小なり | `<` | # | 大なり | `>` | # | 以下 | `<=` | # | 以上 | `>=` | # | 等しい | `==` | # | 等しくない | `!=` | # # 比較演算子は、その両側に与えられた値が決められた条件を満たしているかどうか計算し、満たしている場合は `True` を、満たしていない場合は `False` を返します。 # `True` や `False` は、**ブール (bool) 型**と呼ばれる型を持った値です。 # ブール型の値は `True` もしくは `False` の 2 つしか存在しません。 # # いくつかの比較演算子の計算例を示します。 # + colab_type="code" id="5AXOptIWCN53" outputId="2525fafa-f2dd-40b6-adaa-9af505b059c4" colab={"base_uri": "https://localhost:8080/", "height": 35} 1 < 2 # + colab_type="code" id="ETSRMdkDv9b-" outputId="fb2a62ee-87a1-488a-9a77-e99854e5a083" colab={"base_uri": "https://localhost:8080/", "height": 35} # 型の確認 type(1 < 2) # + colab_type="code" id="wR2OPEwiCP5_" outputId="bd6701e6-b948-4ebb-f852-12ac81244631" colab={"base_uri": "https://localhost:8080/", "height": 35} 2 == 5 # + colab_type="code" id="TfEDslvKCQ7o" outputId="629c30b4-ab73-49be-e8dc-e9bd78ba460b" colab={"base_uri": "https://localhost:8080/", "height": 35} 1 != 2 # + colab_type="code" id="W1v_F8-7CWI-" outputId="789351f8-d4a5-41a7-8732-72f5ab885abd" colab={"base_uri": "https://localhost:8080/", "height": 35} 3 >= 3 # + colab_type="code" id="XapxlGSYCSOU" outputId="7f4fac18-bed4-490d-cb25-598737c2f7e2" colab={"base_uri": "https://localhost:8080/", "height": 35} 'test' == 'test' # + [markdown] colab_type="text" id="aBzVaMJ1IfF7" # 等しいかどうかを判定する比較演算子 `==` を使う際は、代入演算子 `=` と間違えないように気をつけてください。 # + [markdown] colab_type="text" id="Wr7NohtiCUN_" # ## エスケープシーケンス # # 通常の文字列では表せない特殊な文字を、規定された特別な文字の並びにより表したものを**エスケープシーケンス (escape sequence)** と呼びます。 # # よく使用するものとして、**改行**を意味する `\n`(もしくは `¥n`)、**タブ**を意味する `\t`(もしくは `¥t`)があります。 # + colab_type="code" id="m3CBKEsrC4L-" outputId="a3f2481b-b0d5-4b2b-f9b8-e30d9ad65422" colab={"base_uri": "https://localhost:8080/", "height": 53} print('Hello\nWorld') # + colab_type="code" id="FosQ7-tzC_45" outputId="c1e2bf56-0067-4983-fc0b-359561451085" colab={"base_uri": "https://localhost:8080/", "height": 35} print('Hello\tWorld') # + [markdown] colab_type="text" id="2_FlL8P5v9cI" # 最初に Jupyter Notebook 上で変数の値を確認する際に、`print()` を使う場合と使わない場合の違いについて触れましたが、エスケープシーケンスを評価したい場合には、`print()` を使う必要があります。 # + colab_type="code" id="cTxeZsrPv9cJ" colab={} d = 'Hello\nWorld' # + colab_type="code" id="ZgVfZOKRv9cL" outputId="e0a850a4-2094-405c-877a-1a54ae61022f" colab={"base_uri": "https://localhost:8080/", "height": 35} # エスケープシーケンスが評価されない d # + colab_type="code" id="2DcxLEa1v9cM" outputId="7da2adaf-417a-4d22-fda2-b61c5f72f05a" colab={"base_uri": "https://localhost:8080/", "height": 53} # エスケープシーケンスが評価される print(d) # + [markdown] colab_type="text" id="Udw0u53Lv9cO" # ## 文字列メソッド # # `str` 型の変数には、いくつか便利な機能がついています。 # 例えば、その変数が持つ全ての文字を小文字や大文字に変換する `lower()` や `upper()` といった機能があります。 # このような型が持っている関数を**メソッド (method)** と呼びます。 # # + colab_type="code" id="tDhIrHvev9cP" outputId="0209e82b-f9fd-4868-b6c8-fc2b9e414d3f" colab={"base_uri": "https://localhost:8080/", "height": 35} name = 'Chainer' name # + colab_type="code" id="pUb1lrhzv9cQ" outputId="56044dec-afe5-4577-e8b9-db46b16c8617" colab={"base_uri": "https://localhost:8080/", "height": 35} # すべてを小文字に変換 name.lower() # + colab_type="code" id="Ma-TiLUsv9cS" outputId="0e35604e-7314-442c-ebab-6d6bf4a296ec" colab={"base_uri": "https://localhost:8080/", "height": 35} # すべてを大文字に変換 name.upper() # + [markdown] colab_type="text" id="qvpJWVYkv9cT" # よく使う文字列メソッドの一つに、 `format()` があります。 # これは、ある文字列の一部分に、あとから別な文字列を埋め込むために使用します。 # 対象の文字列には `{}` で予め値を埋め込みたい場所を指定しておきます。 # + colab_type="code" id="UBDe56zvv9cT" outputId="e3324141-b48b-4668-841d-442abe5a6ffc" colab={"base_uri": "https://localhost:8080/", "height": 35} name = 'Chainer' '{} チュートリアルへようこそ'.format(name) # + colab_type="code" id="mN4n4NYMv9cV" outputId="64f48915-fb3b-49f1-f00d-0dd55cf302b9" colab={"base_uri": "https://localhost:8080/", "height": 35} name1 = 'Chainer' name2 = 'チュートリアル' '{} {}へようこそ'.format(name1, name2) # + [markdown] colab_type="text" id="G_LHyH7Uv9cW" # `format()` メソッドを用いると `int` 型 や `float` 型の変数を、`str` 型へ明示的にキャストすることなく文字列に埋め込むことができます。 # + colab_type="code" id="hnNv40_Pv9cX" outputId="0d06d60c-4a0b-4af3-a660-ff7ebbe3dad2" colab={"base_uri": "https://localhost:8080/", "height": 35} version = 3.7 'Python {}'.format(version) # + [markdown] colab_type="text" id="Pm4cvrnQv9cY" # ## 浮動小数点数がもつメソッド # # 「メソッド」は `str` 型の変数だけが持つものではありません。 # `int` 型の変数や、`float` 型の変数にも、その型の特徴に合わせた機能が、メソッドとして提供されています。 # # 例えば、`float` 型の変数には、`as_integer_ratio()` というメソッドがあり、比がその浮動小数点数の値となるような整数の組を返します。 # # 例えば、0.5 という値は、分数で表すと $\frac{1}{2}$ です。 # これは、以下のようにして調べることができます。 # + colab_type="code" id="pFPtOpz-v9cY" outputId="0b615cbf-b97a-413e-edcb-8116b0dcbe90" colab={"base_uri": "https://localhost:8080/", "height": 35} 0.5.as_integer_ratio() # + [markdown] colab_type="text" id="EZNdyQZLv9cZ" # 0.25 であれば、$\frac{1}{4}$ となります。 # + colab_type="code" id="OuwnCcupv9cZ" outputId="c987b2ef-bec9-4e0b-ab90-27bffc0e88a0" colab={"base_uri": "https://localhost:8080/", "height": 35} 0.25.as_integer_ratio() # + [markdown] colab_type="text" id="P1-Ak83pv9ca" # このような、型に紐付いたメソッドなどについては、この章の最後にある「クラス」という概念の説明の際にもう少し詳しく解説します。 # + [markdown] colab_type="text" id="VsBRE0Wyv9ca" # ## 複合データ型 # # これまでは `a = 1` のように 1 つの変数に 1 つの値を代入する場合を扱ってきましたが、複数の値をまとめて取り扱いたい場面もあります。 # Python では複数の変数や値をまとめて扱うのに便利な、以下の 3 つの複合データ型があります。 # # - リスト (list) # - タプル (tuple) # - 辞書 (dictionary) # + [markdown] colab_type="text" id="S1TLaajMDtmr" # ### リスト # # 複数の変数を `,` (カンマ)区切りで並べ、それらの全体を `[ ]` で囲んだものを **リスト (list)** と言います。 # リストに含まれる値を**要素**と呼び、整数の**インデックス** (要素番号)を使ってアクセスします。 # + colab_type="code" id="MJ7ZYj82D-a1" outputId="970de678-1f56-4078-8f10-c7042c0fb8e5" colab={"base_uri": "https://localhost:8080/", "height": 35} # リスト型の変数を定義 numbers = [4, 5, 6, 7] # 値の確認 print(numbers) # + colab_type="code" id="HH3NT-y4EC4Z" outputId="cbcc163e-040f-4878-d3f5-65b6659bffa3" colab={"base_uri": "https://localhost:8080/", "height": 35} # 型の確認 type(numbers) # + [markdown] colab_type="text" id="Rk_jY_TTIfGY" # `numbers` には 4 つの数値が入っており、**要素数** は 4 です。 # リストの要素数は、リストの**長さ (length)** とも呼ばれ、組み込み関数の `len()` を用いて取得することができます。 # `len()` はよく使う関数であるため、覚えておきましょう。 # + colab_type="code" id="DK6aH0AaoELi" outputId="76d0dab8-bd1b-4c86-95b8-e25d6cca02c5" colab={"base_uri": "https://localhost:8080/", "height": 35} # 要素数の確認 len(numbers) # + [markdown] colab_type="text" id="RiJeRUdrEEUq" # リストの各要素へアクセスする方法はいくつかあります。 # 最も簡単な方法は `[]` を使ってアクセスしたい要素番号を指定して、リストから値を取り出したり、その位置の値を書き換えたりする方法です。 # ここで、注意が必要な点として、Python では先頭の要素のインデックス番号が `0` である点があります。 # インデックス番号 `1` は 2 番目の要素を指します。 # + colab_type="code" id="DeCsf_L1Em29" outputId="4cfc90bc-62e4-466d-ab0d-0a29877decef" colab={"base_uri": "https://localhost:8080/", "height": 35} # 先頭の要素にアクセス numbers[0] # + colab_type="code" id="ox-Ma4yv5xqO" outputId="7c6ee95d-c6cd-472d-9ea5-c4a44e8d9f0f" colab={"base_uri": "https://localhost:8080/", "height": 35} # 先頭から3番目の要素にアクセス numbers[2] # + colab_type="code" id="ve-ETFIrv9cg" colab={} # 2 番目の要素を書き換え numbers[1] = 10 # + colab_type="code" id="AXpG-tqNv9cg" outputId="1fa368f6-7164-4896-89fd-981d033adaa3" colab={"base_uri": "https://localhost:8080/", "height": 35} # 値の確認 numbers # + [markdown] colab_type="text" id="kOVYiVOwIfGW" # また、インデックスに負の値を指定すると、末尾からの位置となります。 # 要素番号 `-1` で最後の要素を参照することができます。 # + colab_type="code" id="WSaIdvb4IfGX" outputId="5bbb1cee-081d-4a68-a918-ad275b3b1172" colab={"base_uri": "https://localhost:8080/", "height": 35} # 末尾の要素にアクセス numbers[-1] # + colab_type="code" id="jP5fkl3V54WL" outputId="aaf2ac71-f21a-4c41-aab4-337e25f54331" colab={"base_uri": "https://localhost:8080/", "height": 35} # 末尾から3番目の要素にアクセス numbers[-3] # + [markdown] colab_type="text" id="DLzLjTQ8EoG9" # 次に、リストから一度に複数の要素を取り出す操作である**スライス (slice)** を紹介します。 # `開始位置:終了位置` のようにコロン `:` を用いてインデックスを範囲指定し、複数の部分要素にアクセスします。 # このスライスの処理は、この後の章でも多用するため、慣れておきましょう。 # # 例えば、先頭から 2 つの要素を取り出したい場合、以下のように指定します。 # + colab_type="code" id="MPI2H38DEy2N" outputId="623cb4f8-54fc-4f34-924c-7da10dacdeef" colab={"base_uri": "https://localhost:8080/", "height": 35} numbers[0:2] # + [markdown] colab_type="text" id="gK6_JXXTE2AY" # `開始位置:終了位置` と指定することで、開始位置から**終了位置のひとつ手前**までの要素を抽出します。 # 終了位置に指定したインデックスの値は含まれないことに注意してください。 # # また、指定する開始番号が `0` である場合、以下のような略記がよく用いられます。 # + colab_type="code" id="hBo2_C5aFPHR" outputId="1b249507-b615-4adf-dd4b-3c399799ba88" colab={"base_uri": "https://localhost:8080/", "height": 35} numbers[:2] # + [markdown] colab_type="text" id="Qqem8sujFQVP" # このように、先頭のインデックスは省略することができます。 # このような記法を使う場合は、終了位置を示す数字を**取り出したい要素の個数**と捉えて、**先頭から 2 つを取り出す**操作だと考えると分かりやすくなります。 # # 同様に、ある位置からリストの末尾までを取り出す場合も、終了位置のインデックスを省略することができます。 # 例えば、2 個目の要素から最後までを取り出すには以下のようにします。 # + colab_type="code" id="gItFJqoUFYzX" outputId="2bc2f7e8-b6cb-4bbb-8245-ec8835e87447" colab={"base_uri": "https://localhost:8080/", "height": 35} numbers[1:] # + [markdown] colab_type="text" id="RRlf3vqOv9cq" # この場合は、取り出される要素の個数は `len(numbers) - 1` 個となることに注意してください。 # # 以上から、`numbers[:2]` と `numbers[2:]` は、ちょうど 2 個目の要素を境に `numbers` の要素を 2 分割した前半部分と後半部分になっています。 # ここで、インデックスが 2 の要素自体は**後半部に含まれる**ということに注意してください。 # # また、開始位置も終了位置も省略した場合は、すべての要素が選択されます。 # + colab_type="code" id="nav8WoxNv9cq" outputId="b35c3b9d-f123-42a4-9d5b-505a31be278e" colab={"base_uri": "https://localhost:8080/", "height": 35} numbers[:] # + [markdown] colab_type="text" id="9qPrP6sYv9cs" # 現状では、`numbers[:]` と `numbers` の結果が同じであるため、どのように使用するか疑問に思われるかも知れません。 # しかし、後の章では NumPy というライブラリを用いてリストの中にリストが入ったような**多次元配列 (multidimensional array)** を扱っていきます。 # そして多次元配列を用いて行列を表す場合には、`0 列目のすべての値`を抽出するために `[:, 0]` のような記法を用いるケースが登場します。 # これは Python 標準の機能ではありませんが、Python 標準のスライス表記を拡張したものになっています。 # + [markdown] colab_type="text" id="AyoiOU5OFfET" # リストは数値以外に、文字列を扱うこともでき、また複数の型を同一のリスト内に混在させることもできます。 # + colab_type="code" id="9JglgRiWFyo8" outputId="f8700d40-519f-4e92-db32-60fcadc145c0" colab={"base_uri": "https://localhost:8080/", "height": 35} # 文字列を格納したリスト array = ['hello', 'world'] array # + colab_type="code" id="Kcf8MyrBv9cu" outputId="c5f989f4-138a-4a1c-ac4a-60760f318618" colab={"base_uri": "https://localhost:8080/", "height": 35} # 複数の型が混在したリスト array = [1, 1.2, 'Chainer'] array # + [markdown] colab_type="text" id="9fKT2sjNv9cu" # リストにリストを代入することもできます。 # また、Python 標準のリストでは入れ子になったリスト内の要素数がばらばらでも問題ありません。 # + colab_type="code" id="Q8JxfHS8v9cv" outputId="572e4d48-aae1-46e9-cf6a-f2fbf1d68ac4" colab={"base_uri": "https://localhost:8080/", "height": 35} array = [[1, 1.2, 'Chainer', True], [3.2, 'Tutorial']] array # + [markdown] colab_type="text" id="5lYy-LP6F3wP" # リストを使う際に頻出する操作として、**リストへの値の追加**があります。 # リスト型には `append()` というメソッドが定義されており、これを用いてリストの末尾に新しい値を追加することができます。 # # 上記の `array` に値を追加してみましょう。 # + colab_type="code" id="RmgtEN8Lv9cw" colab={} # 末尾に 2.5 を追加 array.append(2.5) # + colab_type="code" id="2E8q5y94v9cx" outputId="c2993fd6-a0f4-4786-b61a-f5462cae0817" colab={"base_uri": "https://localhost:8080/", "height": 35} # 値の確認 array # + [markdown] colab_type="text" id="y5dQJA57v9cz" # また、今後頻出する処理として、**空のリスト**を定義しておき、そこに後段の処理の中で適宜新たな要素を追加していくという使い方があります。 # + colab_type="code" id="00AEmnpxGPky" outputId="7f95e646-3264-4f63-cb58-98fc6bd42e73" colab={"base_uri": "https://localhost:8080/", "height": 35} # 空のリストを定義 array = [] # 空のリストに要素を追加 array.append('Chainer') array.append('チュートリアル') array # + [markdown] colab_type="text" id="CV1072C3M7Qy" # ### タプル # # **タプル (tuple)** はリストと同様に複数の要素をまとめた型ですが、リストとは異なる点として、定義した後に**中の要素を変更できない**という性質を持ちます。 # # タプルの定義には `( )`を用います。 # + colab_type="code" id="mo4lGZN6NGAI" outputId="8234b24e-fd6d-4e77-f409-0681ad27c406" colab={"base_uri": "https://localhost:8080/", "height": 35} # タプルを定義 array = (4, 5, 6, 7) array # + colab_type="code" id="LEUgknZov9c3" outputId="96b964b2-97ef-4312-d552-ee24b5d47238" colab={"base_uri": "https://localhost:8080/", "height": 35} # 型の確認 type(array) # + [markdown] colab_type="text" id="9w2M0A02v9c3" # タプルの定義する際に `( )` を使用したため、要素へのアクセスも `( )` を使うように感じるかもしれませんが、実際にはリストと同様 `[ ]` を使用します。 # + colab_type="code" id="Fq7zIHMfNIXG" outputId="8cc0dcde-bfc6-4d74-cb73-6a716394bb98" colab={"base_uri": "https://localhost:8080/", "height": 35} # 先頭の要素へアクセス array[0] # + colab_type="code" id="3XdB2S5pv9c4" outputId="c270d121-31e8-4b38-f1c7-0f15b49f4b1e" colab={"base_uri": "https://localhost:8080/", "height": 35} # リストと同様、スライスも使用可能 array[:3] # + [markdown] colab_type="text" id="KjdkYtSmNSl3" # 先述の通り、タプルは各要素の値を変更することができません。 # この性質は、定数項などプログラムの途中で書き換わってしまうことが望ましくないものをまとめて扱うのに便利です。 # # 実際に、タプルの要素に値の書き換えを行うとエラーが発生します。 # + colab_type="code" id="DRoToOjUNI51" outputId="3db3cb2f-673f-4463-dbe3-82ced4e09830" colab={"base_uri": "https://localhost:8080/", "height": 172} # error array[0] = 10 # + [markdown] colab_type="text" id="6aq_3yNZv9c6" # `tuple` のように中身が変更できない性質のことを**イミュータブル (immutable)**であると言います。反対に、`list` のように中身が変更できる性質のことを**ミュータブル (mutable)**であると言います # # タプルも Chainer でデータセットを扱うときなどに頻出する型です。その性質と取り扱い方を覚えておきましょう。 # + [markdown] colab_type="text" id="nV8Lp5jLNO66" # ### 辞書 # # リストやタプルでは、複数の値をまとめて扱うことができました。 # そこで、定期テストの結果をまとめることを考えてみましょう。 # # 例えば、数学 90 点、理科 75 点、英語 80 点だったという結果を `scores = [90, 75, 80]` とリストで表してみます。 # しかし、これでは**何番目がどの教科の点数に対応するか**、一見して分かりにくいと思われます。 # # Python の `dict` 型は、**キー (key)** とそれに対応する**値 (value)** をセットにして格納することができる型であり、このようなときに便利です。 # # リストやタプルでは、各要素にアクセスする際に整数のインデックスを用いていましたが、辞書ではキーでインデックス化されているため、整数や文字列など、色々なものを使って要素を指定することができます。 # # 辞書は `{}` を用いて定義し、要素にアクセスする際には、リストやタプルと同様に `[ ]` を使用し、`[ ]` の中にキーを指定して対応する値を取り出します。 # + colab_type="code" id="NyxphI5fNMO8" outputId="c4d9a198-171f-4c0e-ec96-de8cdc161c0d" colab={"base_uri": "https://localhost:8080/", "height": 35} # 辞書を定義 scores = {'Math': 90, 'Science': 75, 'English': 80 } scores # + colab_type="code" id="M-V2XOEdNu2a" outputId="ee4bdf90-c4e5-4a92-a6d0-d79e3bcf2b1c" colab={"base_uri": "https://localhost:8080/", "height": 35} # key が Math の value にアクセス scores['Math'] # + colab_type="code" id="TA5V7SUBv9c8" outputId="90c16e6d-1de9-4931-efbe-b2164e075b6b" colab={"base_uri": "https://localhost:8080/", "height": 35} # key に日本語を使用することも可能 scores = {'数学': 90, '理科': 75, '英語': 80} scores # + colab_type="code" id="IL2y81t-v9c9" outputId="7ea8f85a-c3d2-4ad2-cda5-74bd524868f3" colab={"base_uri": "https://localhost:8080/", "height": 35} scores['数学'] # + [markdown] colab_type="text" id="Jd_XpPMqfrSL" # 他の人が定義した辞書に、**どのようなキーが存在するのか**を調べたいときがあります。 # 辞書には、そのような場合に使える便利なメソッドがいくつか存在します。 # # - `keys()`: キーのリストを取得。`dict_keys` というリストと性質が似た型が返る # - `values()`: 値のリストを取得。`dict_values` というリストと性質が似た型が返る # - `items()`: 各要素の `(key, value)` のタプルが並んだリストを取得。`dict_items` というリストと性質が似た型が返る # + colab_type="code" id="FkJjSlbShKjK" outputId="93597067-1c5e-4ab0-ab5f-3301524f6bff" colab={"base_uri": "https://localhost:8080/", "height": 35} # キーのリスト scores.keys() # + colab_type="code" id="YOAOTDtUhNBz" outputId="ad624c25-5b59-4544-dde2-94ec3adde7ea" colab={"base_uri": "https://localhost:8080/", "height": 35} # 値のリスト scores.values() # + colab_type="code" id="mWA7opwAhVEa" outputId="26ee1d21-3011-49df-f1ff-e6188e698b1c" colab={"base_uri": "https://localhost:8080/", "height": 35} # (キー, 値)というタプルを要素とするリスト scores.items() # + [markdown] colab_type="text" id="I9WEXgSZv9dC" # `dict_keys`, `dict_values`, `dict_items` と新しい型が登場しましたが、これは辞書型特有の型であり厳密には標準のリストとは異なりますが、リストと性質の似た型であるという程度の認識で問題ありません。 # + [markdown] colab_type="text" id="qpoTmSY8bTDw" # 辞書に要素を追加する場合は、新しいキーを指定して値を代入します。 # + colab_type="code" id="nDZ4RRcjnERu" colab={} scores['国語'] = 85 # + colab_type="code" id="P0HJVZJSnKal" outputId="700a1c1a-d175-40c9-c008-56dca29eb5f8" colab={"base_uri": "https://localhost:8080/", "height": 35} scores # + [markdown] colab_type="text" id="NIC2fwAknPf9" # また、既に存在するキーを指定した場合には、値が上書きされます。 # + colab_type="code" id="teLZbAHQnUfN" colab={} scores['数学'] = 95 # + colab_type="code" id="kQESKoXgnYRN" outputId="1b2e3718-26d7-4d5e-fa90-e7fc4850795a" colab={"base_uri": "https://localhost:8080/", "height": 35} scores # + [markdown] colab_type="text" id="zTYLayT3qEzA" # ## 制御構文 # # 複雑なプログラムを記述しようとすると、繰り返しの処理や、条件によって動作を変える処理が必要となります。 # これらは**制御構文**を用いて記述します。 # # ここでは最も基本的な制御構文を 2 つ紹介します。 # # - 繰り返し (`for`, `while`) # - 条件分岐 (`if`) # # Python の制御構文は、**ヘッダ (header)** と **ブロック (block)** と呼ばれる 2 つの部分で構成されています。 # これらを合わせて **複合文 (compound statement)** と呼びます。 # # ![ヘッダーとブロック](images/02/02_05.png) # + [markdown] colab_type="text" id="ciZc5_PCv9dJ" # 上図に示すように、制御構文ではヘッダ行に `for` 文や `if-else` 句を記述し、行末に `:` 記号を書きます。次に、ヘッダ行の条件で実行したい一連の処理文を、ブロックとしてその次の行以降に記述していきます。その際、 **インデント (indent)** と呼ばれる空白文字を先頭に挿入することで、ブロックを表現します。同じ数の空白でインデントされた文がブロックとみなされます。 # Python では、インデントとして**スペース 4 つ**を用いることが推奨されています。 # + [markdown] colab_type="text" id="oELnTpERNwCQ" # ### 繰り返し(for 文) # # 同じ内容のメールを宛名だけ個別に変えて、1000 人に一斉送信したい場合など、繰り返す処理を記述する制御構文である `for` を使います。 # # ![for文](images/02/02_06.png) # + [markdown] colab_type="text" id="_-50XwKOv9dJ" # `for` 文の文法は上図のとおりです。 # # **イテラブルオブジェクト (iterable object)** とは、反復可能オブジェクトのことであり、要素を一度に 1 つずつ返せるオブジェクトのことを指します。 # `range()` という組み込み関数を使うと、引数に与えた整数の回数だけ順番に整数を返すイテラブルオブジェクトを作ることができます。 # `range(5)` と書くと、0, 1, 2, 3, 4 という整数 5 つを順番に返すイテラブルオブジェクトになります。 # # 後述しますが、このイテラブルオブジェクトとして、リストやタプルも指定することができます。 # + colab_type="code" id="f7Y4APgkON_7" outputId="8d3fedd9-b0af-4cf4-ec66-bffb78ce9569" colab={"base_uri": "https://localhost:8080/", "height": 106} # 5回繰り返す for i in range(5): print(i) # + [markdown] colab_type="text" id="oLpTgIv4OPnt" # 上記の例では、イテラブルオブジェクトが1 つずつ返す値を変数 `i` で受け取っています。 # 最初は `i = 0` から始まっていることに注意してください。 # 最後の値も、`5` ではなく `4` となっています。 # このように、`range()` に 1 つの整数を与えた場合は、その整数 - 1 まで 0 から 1 つずつ増えていく整数を順番に返します。 # + colab_type="code" id="SUTjSLuKv9dK" outputId="be21ab95-21c5-4a56-8e73-35f157f13954" colab={"base_uri": "https://localhost:8080/", "height": 35} # 繰り返し処理が終わった後の値の確認 i # + [markdown] colab_type="text" id="4sMmj2Ewv9dL" # Jupyter Notebook では変数名をコードセルの最後の行に書いて実行するとその変数に代入されている値を確認できましたが、for 文の中のブロックでは明示的に `print()` を使う必要があります。 # `print()` を用いないと、以下のように何も表示されません。 # + colab_type="code" id="2SlR9vcGv9dL" colab={} # 変数の値は表示されない for i in range(5): i # + [markdown] colab_type="text" id="tEczQdb9v9dM" # for 文を使って、0 から始まって 1 ずつ大きくなっていく整数順番に取得し、これをリストのインデックスに利用すれば、リストの各要素に順番にアクセスすることができます。 # + colab_type="code" id="b6XXuQ_gOnxk" outputId="ad85251e-4bcf-4b1f-c9d0-0aead91f025e" colab={"base_uri": "https://localhost:8080/", "height": 70} names = ['佐藤', '鈴木', '高橋'] for i in range(3): print(names[i]) # + [markdown] colab_type="text" id="ci6hVBb-v9dN" # 少し応用して、自動的に敬称をつけて表示してみましょう。 # + colab_type="code" id="B24Fyoe9v9dN" outputId="2157b8d3-b75a-4f19-c7c4-46c4a18bf4fa" colab={"base_uri": "https://localhost:8080/", "height": 70} for i in range(3): print('{}さん'.format(names[i])) # + [markdown] colab_type="text" id="msGhS1ISv9dN" # つぎに、さらに汎用性の高いプログラムを目指します。 # # 上記のコードに関して、汎用性が低い点として、`range(3)` のように `3` という値を直接記述していることが挙げられます。 # この `3` はリストの要素の数を意味していますが、リストの要素の数が変わると、このプログラムも書き換える必要があり、手間がかかったり、ミスが発生する原因となったりします。 # # リスト内の要素の数は、組み込み関数である `len()` を用いて取得できるため、これを使用した汎用性の高いプログラムに書き換えましょう。 # + colab_type="code" id="e4gbC4CIOzve" outputId="b5472139-d75b-46ac-de43-4ab365a4d14c" colab={"base_uri": "https://localhost:8080/", "height": 35} len(names) # + colab_type="code" id="4i21QYMwPB1C" outputId="0dffef88-45d8-4025-c5bd-75a333a70e0c" colab={"base_uri": "https://localhost:8080/", "height": 70} for i in range(len(names)): print('{}さん'.format(names[i])) # + [markdown] colab_type="text" id="9wx-qJovPEC0" # これでリストの要素数に依存しないプログラムにすることができました。 # + [markdown] colab_type="text" id="KIO9gkUnv9dR" # また、リスト自体をイテラブルオブジェクトとして指定することにより、リスト要素数の取得も `[]` でのインデックス番号の指定もせずに、より可読性の高いプログラムを書くことができます。 # + colab_type="code" id="EDqeGWucv9dR" outputId="261c1319-2e24-474f-f283-0d3352e24a27" colab={"base_uri": "https://localhost:8080/", "height": 70} # リストをイテラブルオブジェクトに指定 for name in names: print('{}さん'.format(name)) # + [markdown] colab_type="text" id="iWEhrw_9v9dS" # 最初のケースと比べていかがでしょうか。 # 動作としては変わりがありませんが、可読性という観点も重要です。 # + [markdown] colab_type="text" id="eeTHJ3hzPW7U" # リストをイテラブルオブジェクトとして指定した場合、要素番号を取得できませんが、状況によっては要素番号を使用したいことがあります。 # # そのような場合は、`enumerate()` という組み込み関数を使います。 # これにイテラブルオブジェクトを渡すと、`(要素番号, 要素)` というタプルを 1 つずつ返すイテラブルオブジェクトになります。 # + colab_type="code" id="8dT-qUhGv9dT" outputId="6cdbe801-9f59-49c4-96e6-6d3c54da2e04" colab={"base_uri": "https://localhost:8080/", "height": 70} for i, name in enumerate(names): message = '{}番目: {}さん'.format(i, name) print(message) # + [markdown] colab_type="text" id="wGwCa9wmP2r4" # `enumerate()` と同様、`for` 文と合わせてよく使う組み込み関数に `zip()` があります。 # # `zip()` は、複数のイテラブルオブジェクトを受け取り、その要素のペアを順番に返すイテラブルオブジェクトを作ります。 # このイテラブルオブジェクトは、渡されたイテラブルオブジェクトそれぞれの先頭の要素から順番に、タプルに束ねて返します。 # このイテラブルオブジェクトの長さは、渡されたイテラブルオブジェクトのうち最も短い長さと一致します。 # + colab_type="code" id="5w8huhHcQR8F" outputId="528f66f9-c589-481b-f82e-4bf4dfb9982d" colab={"base_uri": "https://localhost:8080/", "height": 53} names = ['Python', 'Chainer'] versions = ['3.7', '5.3.0'] suffixes = ['!!', '!!', '?'] for name, version, suffix in zip(names, versions, suffixes): print('{} {} {}'.format(name, version, suffix)) # + [markdown] id="IeQCA00UQYDq" colab_type="text" # `suffixes` の要素数は 3 ですが、より短いイテラブルオブジェクトと共に `zip` に渡されたため、先頭から 2 つ目までしか値が取り出されていません。 # + [markdown] colab_type="text" id="WguFiYgqQnQT" # ### 条件分岐(if 文) # # `if` は、指定した条件が `True` か `False` かによって、処理を変えるための制御構文です。 # # ![if文](images/02/02_08.png) # + [markdown] colab_type="text" id="J-Z8g-Klv9dV" # `elif` と `else` は任意であり、`elif` は 1 つだけでなく複数連ねることができます。 # # 例えば、0 より大きいことを条件とした処理を書いてみます。 # + colab_type="code" id="E-x_6gXwSsm7" outputId="86d289f1-92f1-4ada-ff3e-017e71f7c4b4" colab={"base_uri": "https://localhost:8080/", "height": 35} # if の条件を満たす場合 a = 1 if a > 0: print('0より大きいです') else: print('0以下です') # + colab_type="code" id="BmcHf-w5v9dV" outputId="134ddbe4-5605-4286-945e-a128ad6abc23" colab={"base_uri": "https://localhost:8080/", "height": 35} # if の条件を満たさない場合 a = -1 if a > 0: print('0より大きいです') else: print('0以下です') # + [markdown] colab_type="text" id="u0F8tX6FStcQ" # また、`if` に対する条件以外の条件分岐を追加する場合は、下記のように `elif` を使います。 # + colab_type="code" id="6li0UyjpStjg" outputId="8e0cecde-fce4-4db6-8d1c-0dbeb80e36b0" colab={"base_uri": "https://localhost:8080/", "height": 35} a = 0 if a > 0: print('0より大きいです') elif a == 0: print('0です') else: print('0より小さいです') # + [markdown] colab_type="text" id="tZ15Hv9Gv9dY" # ### 繰り返し(while 文) # # 繰り返し処理は、`for` 以外にも `while` を用いて記述することもできます。 # `while` 文では、以下のように**ループを継続する条件**を指定します。 # 指定された条件文が `True` である限り、ブロックの部分に記述された処理が繰り返し実行されます。 # # ![while文](images/02/02_09.png) # + [markdown] colab_type="text" id="Zjv8d3Rmv9dZ" # `while` 文を使用した 3 回繰り返すプログラムは下記のとおりです。 # + colab_type="code" id="aFhfzaDFv9dZ" outputId="075646ab-8c87-41ea-fd6e-d7ee37334441" colab={"base_uri": "https://localhost:8080/", "height": 70} count = 0 while count < 3: print(count) count += 1 # + [markdown] colab_type="text" id="1Q3QReThv9da" # ここで使われている `count` という変数は、ループの中身が何回実行されたかを数えるために使われています。 # まず `0` で初期化し、ループ内の処理が一度行われるたびに `count` の値に 1 を足しています。 # この `count` を使った条件式を `while` 文に与えることで、ループを回したい回数を指定しています。 # # 一方、`while True` と指定すると、`True` は変数ではなく値なので、変更されることはなく、ループは無限に回り続けます。 # `while` 文自体は無限ループの状態にしておき、ループの中で `if` 文を使って、ある条件が満たされた場合はループを中断する、という使い方ができます。 # これには `break` 文が用いられます。 # # 以下は、`break` 文を使って上のコードと同様に 3 回ループを回すコードです。 # + colab_type="code" id="wj65vycFv9da" outputId="14670424-de4d-4493-bf64-2339416c69db" colab={"base_uri": "https://localhost:8080/", "height": 70} count = 0 while True: print(count) count += 1 if count == 3: break # + [markdown] colab_type="text" id="AQFnBfgEv9da" # `count` の値が 3 と等しいかどうかが毎回チェックされ、等しくなっていれば `break` 文が実行されて `while` ループが終了します。 # + [markdown] colab_type="text" id="2rtEn2APv9db" # `while` 文を使って、指定された条件を満たして**いない**間ループを繰り返すという処理も書くことができます。`while` 文自体の使い方は同じですが、条件を反転して与えることで、与えた条件が `False` である間繰り返されるようにすることができます。 # # これには、ブール値を反転する `not` を用います。 # `not True` は `False` を返し、`not False` は `True` を返します。 # + colab_type="code" id="0PucNNA0v9db" outputId="dc0151bc-a617-48c1-c63b-5826c8ed201c" colab={"base_uri": "https://localhost:8080/", "height": 35} not True # + colab_type="code" id="9iX7JkEOv9db" outputId="3d72981a-bde9-46e1-de00-4fad8b237fda" colab={"base_uri": "https://localhost:8080/", "height": 35} not False # + colab_type="code" id="TkSUZ1iZv9dc" outputId="5577f165-0fa0-4ffa-af45-ac847a311049" colab={"base_uri": "https://localhost:8080/", "height": 35} not 1 == 2 # + [markdown] colab_type="text" id="0aM2yZ7qv9dd" # このように、`not` はあとに続くブール値を反転します。 # これを用いて、`count` が 3 **ではない**限りループを繰り返すというコードを `while` 文を使って書いてみましょう。 # + colab_type="code" id="vhvBNYCXv9dd" outputId="c7651cd7-4f86-47ad-b8a6-c505dd996de5" colab={"base_uri": "https://localhost:8080/", "height": 70} count = 0 while not count == 3: print(count) count += 1 # + [markdown] colab_type="text" id="e96zsBv7IfDn" # ## 関数 # # 何かひとまとまりの処理を書いた際には、その処理のためのコードをまとめて、プログラム全体の色々な箇所から再利用できるようにしておくと、便利な場合があります。 # ここでは、処理をひとまとめにする方法の一つとして**関数 (function)** を定義する方法を紹介します。 # + [markdown] colab_type="text" id="RCB98_8xv9df" # ### 関数を定義する # # ![関数の定義](images/02/02_10.png) # # 例えば、**受け取った値を 2 倍して表示する関数**を作ってみましょう。 # # 関数を定義するには、まず名前を決める必要があります。 # 今回は `double()` という名前の関数を定義してみます。 # # 関数も制御構文と同じく**ヘッダー**と**ブロック**を持っています。 # + colab_type="code" id="iDh3Mpi_IfDp" colab={} # 関数 double() の定義 def double(x): print(2 * x) # + [markdown] colab_type="text" id="yG-qN405v9dg" # **関数は定義されただけでは実行されません。** # 定義した関数を使用するためには、定義を行うコードとは別に、実行を行うコードが必要です。 # + colab_type="code" id="6QUecRiUv9dg" outputId="7241aa8c-2a9c-4c95-8d63-ebe27a2a6fd8" colab={"base_uri": "https://localhost:8080/", "height": 35} # 関数の実行 double(3) # + colab_type="code" id="dR7DqN2fv9dh" outputId="89bb6598-567d-4247-8e0e-2d15072322df" colab={"base_uri": "https://localhost:8080/", "height": 35} double(5) # + colab_type="code" id="wjStuxcBv9di" outputId="138a4607-ebf9-48f3-e4f5-96863b9eec0f" colab={"base_uri": "https://localhost:8080/", "height": 35} double(1.5) # + [markdown] colab_type="text" id="CRggp-QcIfDs" # `double(x)` における `x` のように、関数に渡される変数や値のことを**引数 (argument)** といいます。 # 上の例は、名前が `double` で、1つの引数 `x` をとり、`2 * x` という計算を行い、その結果を表示しています。 # + [markdown] colab_type="text" id="VDqeEdX5v9dj" # ### 複数の引数をとる関数 # # 複数の引数をとる関数を定義する場合は、関数名に続く `()` の中に、カンマ `,` 区切りで引数名を並べます。 # # 例えば、引数を 2 つとり、足し算を行う関数 `add()` を作ってみましょう。 # + colab_type="code" id="ARE2LU8Qv9dl" colab={} # 関数の定義 def add(a, b): print(a + b) # + colab_type="code" id="Zte-_M1-v9dl" outputId="cf78e6e7-522c-4551-b876-8f7f4f582d92" colab={"base_uri": "https://localhost:8080/", "height": 35} # 関数の実行 add(1, 2) # + colab_type="code" id="7HUAVoGhv9dm" outputId="8e0d0ec9-9774-42b6-ef79-52b20cf88b8a" colab={"base_uri": "https://localhost:8080/", "height": 35} add(3, 2.5) # + colab_type="code" id="IjMBFjvmv9dm" outputId="75da673b-d203-460c-f054-205cbe9ace97" colab={"base_uri": "https://localhost:8080/", "height": 35} add(1, -5) # + [markdown] colab_type="text" id="mwZ-T1CTv9dn" # 今回の `double()` や `add()` は定義を行い自作した関数ですが、Python には予め多くの関数が定義されています。 # そのような関数を**組み込み関数 (built-in function)** と呼びます。 # すでに使用している `print()` や `len()`, `range()` などが、これに該当します。 # 組み込み関数の一覧は[こちら](https://docs.python.org/ja/3/library/functions.html)で確認することができます。 # # + [markdown] colab_type="text" id="A8XpuE_av9dn" # ### 引数をとらない関数 # # 引数をとらない関数を定義する場合でも、関数名の後に `()` を加える必要があります。 # # 例えば、実行するとメッセージを表示する関数を定義して、実行してみましょう。 # + colab_type="code" id="z2Xej00Bv9dn" colab={} # 引数のない関数の定義 def hello(): print('Chainerチュートリアルにようこそ') # + colab_type="code" id="peShhHg2v9dn" outputId="b437e0f9-1c01-4d93-a13c-2d50cf8202f9" colab={"base_uri": "https://localhost:8080/", "height": 35} # 引数のない関数の実行 hello() # + [markdown] colab_type="text" id="BZGSKTbcv9do" # ### 引数のデフォルト値 # # 引数には、あらかじめ値を与えておくことができます。 # これは、引数をとる関数を定義する際に、何も引数に値が渡されなかったときにどのような値がその引数に渡されたことにするかをあらかじめ決めておける機能で、その値のことを**デフォルト値**と呼びます。 # # 例えば、上の `hello()` という関数に、`message` という引数をもたせ、そこにデフォルト値を設定しておきます。 # + colab_type="code" id="uw2d8qDov9do" colab={} def hello(message='Chainerチュートリアルにようこそ'): print(message) # + [markdown] colab_type="text" id="7i8TQcfzv9do" # この関数は引数に何も与えずに呼び出すと、「Chainerチュートリアルにようこそ」というメッセージを表示し、引数に別な値が渡されると、その値を表示します。 # + colab_type="code" id="Fh4VEHmMv9dp" outputId="d284fd9b-4a96-4039-f585-a01804e360e9" colab={"base_uri": "https://localhost:8080/", "height": 35} hello() # + colab_type="code" id="p2g-ULOWv9dp" outputId="9dd78b5c-4185-4f40-a40f-9d1bdfe3309a" colab={"base_uri": "https://localhost:8080/", "height": 35} hello('Welcome to Chainer tutorial') # + [markdown] colab_type="text" id="GWhAbt3ov9dq" # デフォルト値が与えられていない引数は、関数呼び出しの際に必ず何らかの値が渡される必要がありますが、デフォルト値を持つ場合は、何も指定しなくても関数を呼び出すことができるようになります。 # + [markdown] colab_type="text" id="DpwRYy4ov9dq" # ### 返り値のある関数 # # 上で定義した足し算を行う関数 `add()` では、計算結果を表示するだけで、計算結果を呼び出し元に戻していませんでした。 # そのため、このままでは計算結果を関数の外から利用することができません。 # # そこで、`add()` 関数の末尾に `return` 文を追加して、計算結果を呼び出し元に返すように変更してみましょう。 # + colab_type="code" id="ZCtzpxG1v9dq" colab={} # 返り値のある関数の定義 def add(a, b): return a + b # + [markdown] colab_type="text" id="M9c1_e-sv9dr" # このように、呼び出し元に返したい値を `return` に続いて書くと、その値が `add()` 関数を呼び出したところへ戻されます。 # `return` で返される値のことを**返り値 (return value)** と言います。 # # 以下に、計算結果を `result` という変数に格納し、表示する例を示します。 # + colab_type="code" id="RPJnjuL5v9dr" outputId="48546d3e-a444-4109-ddf0-7df7b14b9236" colab={"base_uri": "https://localhost:8080/", "height": 35} result = add(1, 3) result # + [markdown] colab_type="text" id="tagr61vUv9ds" # 計算結果が `result` に格納されているので、この結果を用いてさらに別の処理を行うことができます。 # + colab_type="code" id="Qe8Enb6Ev9ds" outputId="99526b92-37c3-4832-caef-f17aa45701d9" colab={"base_uri": "https://localhost:8080/", "height": 35} result = add(1, 3) result_doubled = result * 2 result_doubled # + [markdown] colab_type="text" id="fP4OdcI-v9ds" # また、返り値は「呼び出し元」に返されると書きました。 # この「呼び出し元」というのは、関数を呼び出す部分のことで、上のコードは `add(1, 3)` の部分が `4` という結果の値になり、それが左辺の `result` に代入されています。 # # これを用いると、例えば「2 と 3 を足した結果と、1 と 3 を足した結果を、掛け合わせる」という計算が、以下のように書けます。 # + colab_type="code" id="gs0hBjg6v9ds" outputId="4c76fe15-686d-4aed-f87c-ec55d177728c" colab={"base_uri": "https://localhost:8080/", "height": 35} add(2, 3) * add(1, 3) # + [markdown] colab_type="text" id="BJnkcc_Qv9dt" # ### 変数のスコープ # # 関数の中で定義した変数は基本的には関数の外では利用できません。 # 例えば、以下の例を見てみましょう。 # + colab_type="code" id="L8vtxEV7v9dt" outputId="509fbb4d-0b47-4f37-d38c-51018e6f19b2" colab={"base_uri": "https://localhost:8080/", "height": 35} a = 1 # 関数の内部で a に 2 を代入 def change(): a = 2 change() a # + [markdown] colab_type="text" id="Z9slKEKXv9du" # 関数の外で `a = 1` と初期化した変数と同じ名前の変数に対して、`change()` 関数の内部で `a = 2` という代入を行っているにもかかわらず、`change()` 関数の実行後にも関数の外側では `a` の値は 1 のままになっています。 # **関数の外側で定義された変数** `a` **に、関数内部での処理が影響していないことがわかります。** # # なぜこうなるかというと、関数の中で変数に値が代入されるとき、その変数はその関数の**スコープ (scope)** でだけ有効な**ローカル変数**になり、関数の外にある同じ名前の変数とは別のものを指すようになるためです。 # スコープとは、その変数が参照可能な範囲のことです。 # 上の例では、`a = 2` の代入を行った時点で`change()` 関数のスコープに `a` という変数が作られ、`change()` 関数の中からは `a` といえばこれを指すようになります。関数から抜けると、`a` は 1 を値に持つ外側の変数を指すようになります。 # # ただし、代入を行わずに、参照するだけであれば、関数の内側から外側で定義された変数を利用することができます。 # + colab_type="code" id="XgGcEUSQv9du" outputId="0871b4ef-d71a-4f3f-c556-fc6429e0ee94" colab={"base_uri": "https://localhost:8080/", "height": 53} a = 1 def change(): print('From inside:', a) change() print('From outside:', a) # + [markdown] colab_type="text" id="vAHe5xDGv9dv" # この場合は、`change()` 関数のスコープには `a` という変数は作られないので、関数の中で `a` といえば外側で定義された変数を指します。 # # 関数の外で定義された変数は**グローバル変数**と呼ばれます。 # グローバル変数は、特に特別な記述を要せず参照することはできますが、関数の中で**代入**を行う場合は、`global` 文を使って、代入先をグローバル変数とする宣言を行う必要があります。 # + colab_type="code" id="D1uNClamv9dv" outputId="d44e9ee4-8250-4236-9476-0cc610cb9572" colab={"base_uri": "https://localhost:8080/", "height": 35} a = 1 def change(): global a # a をグローバル変数である宣言 a = 2 # グローバル変数への代入 # 関数の実行 change() # 結果の確認 <- a の値が上書きされている a # + [markdown] colab_type="text" id="MSTyBWulv9dv" # `global a` という行を `change()` 関数内で `a` という変数を使用する前に追加すると、その行以降は `a` という変数への代入も関数の外側で定義されたグローバル変数の `a` に対して行われます。 # + [markdown] colab_type="text" id="FBe0bEnnVoE5" # ## クラス # # **オブジェクト指向プログラミング (object-oriented programming)** の特徴の一つである**クラス (class)** は、**オブジェクト (object)** を生成するための設計図にあたるものです。 # まず、クラスとは何か、オブジェクトとは何かについて説明します。 # # ここで、唐突に感じられるかもしれませんが、家を何軒も建てるときのことを考えましょう。 # それぞれの家の形や大きさ、構造は同じですが、表札に書かれている名前は異なっているとします。 # この場合、家の設計図は同じですが、表札に何と書くか、において多少の変更がそれぞれの家ごとに必要となります。 # この**全ての家に共通した設計図の役割を果たすのがクラス**です。 # そして、設計図は、家として現実に存在しているわけではありませんが、個別の家は、現実に家としての**実体**を持って存在しています。 # よって、**設計図に基づいて個別の家を建てる**ということを抽象的に言うと、**クラスから実体を作成する**、となります。 # クラスから作成された実体のことを**インスタンス (instance)** または**オブジェクト (object)** とも呼び、**クラスから実体を作成する**という操作のことを**インスタンス化 (instantiation)** と呼びます。 # + [markdown] colab_type="text" id="M1XEvf6Kv9dw" # ### クラスの定義 # # それでは、家の設計図を表す `House` というクラスを定義してみましょう。 # `House` クラスには、インスタンス化されたあとに、各インスタンス、すなわち誰か特定の人の家ごとに異なる値を持つ、`name_plate` という変数を持たせてみます。 # # `name_plate` という変数には、個別の家の表札に表示するための文字列が与えられますが、クラスを定義する際には「`name_plate` という変数を持つことができる」ようにしておくだけでよく、**実際にその変数に何か具体的な値を与える必要はありません。** # クラスは、**設計図**であればよく、具体的な値を持たせなくてもよいためです。 # 具体的な値は、個別の家を作成するとき、すなわちインスタンス化の際に与え、各インスタンスが `name_plate` という値に自分の家の表札の名前を保持するようにします。 # # このような、インスタンスに属している変数を**属性 (attribute)** と呼びます。同様に、インスタンスから呼び出すことができる関数のことを**メソッド (method)** と呼びます。 # # クラスは、以下のような構文を使って定義します。 # # ![クラス](images/02/02_11.png) # # 具体的には、以下のようになります。 # + colab_type="code" id="9ZrUz4kHv9dw" colab={} # クラスの定義 class House: # __init__() メソッドの定義 def __init__(self, name): self.name_plate = name # + [markdown] colab_type="text" id="151zWM4Dv9dw" # ここで、`__init__()` という名前のメソッドが `House` クラスの中に定義されています。 # メソッドの名前は自由に名付けることができますが、いくつか特別な意味を持つメソッド名が予め決められています。 # `__init__()` はそういったメソッドの一つで、**インスタンス化する際に自動的に呼ばれるメソッド**です。 # # `House` クラスの `__init__()` は、`name` という引数をとり、これを `self.name_plate` という変数に代入しています。 # この `self` というのは、クラスがインスタンス化されたあと、作成されたインスタンス自身を参照するのに用いられます。 # これを使って、`self.name_plate = name` とすることで、作成された個別のインスタンスに属する変数 `self.name_plate` へ、引数に渡された `name` が持つ値を代入することができます。 # `self` が指すものは、各インスタンスから見た「自分自身」なので、各インスタンスごとに異なります。 # これによって、`self.name_plate` は各インスタンスに紐付いた別々の値を持つものとなります。 # # メソッドは、インスタンスから呼び出されるとき自動的に第一引数にそのインスタンスへの参照を渡します。 # そのため、メソッドの第一引数は `self` とし、渡されてくる自分自身への参照を受け取るようにしています。 # ただし、呼び出す際には**そのインスタンスを引数に指定する必要はありません。** # 以下に具体例を示し、再度このことを確認します。 # # それでは、上で定義した `House` クラスのインスタンスを作成してみます。 # クラスのインスタンス化には、クラス名のあとに `()` を追加して、クラスを呼び出すような記法を使います。 # この際、関数を呼び出すときと同様にして、`()` に引数を渡すことができます。 # その引数は、`__init__()` メソッドに渡されます。 # + colab_type="code" id="PE3dT2ytv9dx" colab={} my_house = House('Chainer') # + [markdown] colab_type="text" id="9xHHa6rwv9dx" # `House` というクラスの `__init__()` メソッドに、`'Chainer'` という文字列を渡しています。 # `my_house` が、`House` クラスから作成されたインスタンスです。 # ここで、クラス定義では `__init__()` メソッドは `self` と `name` という 2 つの引数をとっていましたが、呼び出しの際には `'Chainer'` という一つの引数しか与えていませんでした。 # この `'Chainer'` という文字列は、1 つ目の引数であるにも関わらず、`__init__()` メソッドの定義では 2 つ目の引数であった `name` に渡されます。 # 前述のように、**メソッドは、インスタンスから呼び出されるとき自動的に第一引数にそのインスタンスへの参照を渡す**ためです。 # この自動的に渡される自身への参照は、呼び出しの際には明示的に指定しません。 # また、かならず 1 つ目の引数に自動的に渡されるため、呼び出し時に明示的に与えられた引数は 2 つ目以降の引数に渡されたものとして取り扱われます。 # # それでは次に、このクラスに `hello()` というメソッドを追加し、呼び出すと誰の家であるかを表示するという機能を実装してみます。 # + colab_type="code" id="k2Zcvs1xv9dx" colab={} # クラスの定義 class House: # __init__() の定義 def __init__(self, name): self.name_plate = name # メソッドの定義 def hello(self): print('{}の家です。'.format(self.name_plate)) # + [markdown] colab_type="text" id="oTQagqYdv9dz" # それでは、2 つのインスタンスを作成して、それぞれから `hello()` メソッドを呼び出してみます。 # + colab_type="code" id="MP8iXQM0v9dz" outputId="74495bc8-073e-4d46-addf-ee02f45ed8b8" colab={"base_uri": "https://localhost:8080/", "height": 53} sato = House('佐藤') suzuki = House('スズキ') sato.hello() # 実行の際には hello() の引数にある self は無視 suzuki.hello() # 実行の際には hello() の引数にある self は無視 # + [markdown] colab_type="text" id="_vI_Dqjdv9d0" # `sato` というインスタンスの `name_plate` 属性には、`'佐藤'` という文字列が格納されています。 # `suzuki` というインスタンスの `name_plate` 属性には、`'スズキ'` という文字列が格納されています。 # それぞれのインスタンスから呼び出された `hello()` メソッドは、`self.name_plate` に格納された別々の値を `print()` を用いて表示しています。 # # このように、同じ機能を持つが、インスタンスによって保持するデータが異なったり、一部の動作が異なったりするようなケースを扱うのにクラスを利用します。 # Python の `int` 型、`float` 型、`str` 型…などは、実際には `int` クラス、`float` クラス、`str` クラスであり、それらの中では個別の変数(インスタンス)がどのような値になるかには関係なく、同じ型であれば共通して持っている機能が定義されています。 # `5` や `0.3` や `'Chainer'` などは、それぞれ `int` クラスのインスタンス、`float` クラスのインスタンス、`str` クラスのインスタンスです。 # # 以上から、クラスを定義するというのは、**新しい型を作る**ということでもあると分かります。 # + [markdown] colab_type="text" id="0M1dmJSacA7g" # ### 継承 # # あるクラスを定義したら、その一部の機能を変更したり、新しい機能を付け足したりしたくなることがあります。 # これを実現する機能が**継承 (inheritance)** です。 # 例えば、`Link` というクラスを定義し、そのクラスを継承した `Chain` という新しいクラスを作ってみましょう。 # まず、`Link` クラスを定義します。 # + colab_type="code" id="wn9XJxjpv9d0" colab={} class Link: def __init__(self): self.a = 1 self.b = 2 # + [markdown] colab_type="text" id="Qn0fSwvyv9d1" # この `Link` というクラスは、インスタンス化を行う際には 1 つも引数をとりませんが、属性として `a` と `b` の 2 つの変数を保持し、それぞれには `__init__()` メソッドで 1 と 2 という値が代入されます。 # このクラスのインスタンスを作成してみます。 # + colab_type="code" id="BsfYK88qv9d1" outputId="fe561266-04ce-4e67-9c08-79027499b92f" colab={"base_uri": "https://localhost:8080/", "height": 35} l = Link() l.a # + colab_type="code" id="JA6sXOrvv9d2" outputId="e58234d5-268a-4668-e946-e9fe18ef7c23" colab={"base_uri": "https://localhost:8080/", "height": 35} l.b # + [markdown] colab_type="text" id="DxipToKmv9d2" # `l` という `Link` クラスのインスタンスが持つ 2 つの属性を表示しています。 # インスタンス化を行った際に `__init__()` メソッドの中で代入していた値が、表示されています。 # # 次に、このクラスを**継承**する、`Chain` というクラスを定義してみます。 # 継承を行う場合は、クラス定義の際にクラス名に続けて `()` を書き、その中にベースにしたいクラスの名前を書きます。 # `()` の中に書かれたクラスのことを、定義されるクラスの**親クラス**といいます。 # それに対し、`()` の中に書かれたクラスからみると、定義されるクラスは**子クラス**と呼ばれます。 # 親から子へ機能が受け継がれるためです。 # + colab_type="code" id="A4eTa6jZv9d3" colab={} class Chain(Link): def sum(self): return self.a + self.b # + [markdown] colab_type="text" id="dhxR4J0mv9d3" # `Chain` クラスは `__init__()` メソッドの定義を持ちません。 # `__init__()` メソッドが定義されていない場合、親クラスの `__init__()` メソッドが自動的に呼び出されます。 # そのため、`Chain` クラスでは一見何も属性を定義していないように見えますが、インスタンス化を行うと親クラスである `Link` の `__init__()` メソッドが自動的に実行され、`a`、`b` という属性が定義されます。 # 以下のコードで確認してみましょう。 # + colab_type="code" id="ipmqPwsKv9d3" outputId="2c340a24-3414-4c84-da5f-48f911366135" colab={"base_uri": "https://localhost:8080/", "height": 35} # Chain クラスをインスタンス化 c = Chain() c.a # + colab_type="code" id="lWL0VTMOv9d4" outputId="0342c542-8a1a-4793-f54e-496f31a8a5ba" colab={"base_uri": "https://localhost:8080/", "height": 35} c.b # + [markdown] colab_type="text" id="nVjBNK1Cv9d4" # `Chain` クラスの `sum()` メソッドでは、この親クラスの `__init__()` メソッドで定義されている 2 つの属性を足し合わせて返しています。 # 今作成したインスタンスから、この `sum()` メソッドを呼び出してみます。 # + colab_type="code" id="pEf10A44v9d5" outputId="18759ed5-2255-473c-d7c5-ab922b8d1491" colab={"base_uri": "https://localhost:8080/", "height": 35} # sum メソッドを実行 c.sum() # + [markdown] colab_type="text" id="QI1v_Ib0v9d5" # このように、**親クラスを継承し、親クラスに無かった新しい機能が追加された、新しいクラスを定義することができます。** # # それでは、この `Chain` というクラスにも `__init__()` メソッドを定義して、新しい属性 `c` を定義し、`sum()` メソッドでは親クラスの `a`、`b` という属性とこの新たな `c` という属性の 3 つの和を返すように変更してみます。 # + colab_type="code" id="ds9A6Xfpv9d5" colab={} class Chain(Link): def __init__(self): self.c = 5 # self.c を新たに追加 def sum(self): return self.a + self.b + self.c # インスタンス化 C = Chain() # + colab_type="code" id="_2_7avSHv9d6" outputId="c80707d3-ae9d-4b24-a0c9-ee6c492ed6c7" colab={"base_uri": "https://localhost:8080/", "height": 296} # error C.sum() # + [markdown] colab_type="text" id="ZfTe1NMUv9d6" # エラーが出ました。 # # **エラーメッセージを読みましょう。** # # > AttributeError: 'Chain' object has no attribute 'a' # # `'Chain'` というオブジェクトは、`'a'` という名前の属性を持っていない、と言われています。 # `a` という属性は、`Chain` の親クラスである `Link` の `__init__()` メソッドで定義されています。 # そのため、`Chain` クラスをインスタンス化する際に、親クラスである `Link` の `__init__()` メソッドが呼ばれているのであれば、このエラーは起こらないはずです。 # なぜエラーとなってしまったのでしょうか。 # # それは、`Chain` クラスにも `__init__()` メソッドを定義したため、親クラスである `Link` の `__init__()` メソッドが上書きされてしまい、実行されなかったためです。 # しかし、親クラスの `__init__()` メソッドを明示的に呼ぶことで、これは解決できます。 # # それには、`super()` という組み込み関数を用います。 # これを用いると、子クラスから親クラスを参照することができます。 # + colab_type="code" id="_NcT6aN7v9d6" colab={} class Chain(Link): def __init__(self): # 親クラスの `__init__()` メソッドを呼び出す super().__init__() # self.c を新たに追加 self.c = 5 def sum(self): return self.a + self.b + self.c # インスタンス化 c = Chain() # + colab_type="code" id="FW__tTtGv9d7" outputId="4fab420e-d2af-4d1f-9c3a-f3e5a1fe5820" colab={"base_uri": "https://localhost:8080/", "height": 35} c.sum() # + [markdown] colab_type="text" id="HcDciFKRv9d7" # 今回はエラーが起きませんでした。 # `Link` クラスの `__init__()` メソッドの冒頭で、まず親クラスの `__init__()` メソッドを実行し、`a`、`b` という属性を定義しているためです。 # # あるクラスを継承して作られたクラスを、さらに継承して別のクラスを定義することもできます。 # + colab_type="code" id="RmL3FvD-v9d7" colab={} class MyNetwork(Chain): def mul(self): return self.a * self.b * self.c # + [markdown] colab_type="text" id="21eb0EOKv9d8" # `MyNetwork` クラスは、`Link` クラスを継承した `Chain` クラスをさらに継承したクラスで、`a`、`b`、`c` という 3 つの属性を掛け合わせた結果を返す `mul()` というメソッドを持ちます。 # # このクラスのインスタンスを作成し、`mul()` を実行してみましょう。 # + colab_type="code" id="Cq4lJIm3v9d8" outputId="afcc0ba0-f917-41ca-d06a-64b42e1e2456" colab={"base_uri": "https://localhost:8080/", "height": 35} net = MyNetwork() net.mul() # + [markdown] colab_type="text" id="VujV1nqfv9d8" # $1 \times 2 \times 5 = 10$ が返ってきました。 # + [markdown] colab_type="text" id="mIPsXEy0v9d9" # 以上で、Python の基本についての解説を終了します。 # Python には他にもここでは紹介されていない多くの特徴や機能があります。 # さらに詳しく学びたい方は、[Pythonチュートリアル](https://docs.python.org/ja/3/tutorial/index.html) などを参照してください。
ja/02_Basics_of_Python.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pretrainedmodels print(pretrainedmodels.model_names) model_name = 'nasnetalarge' # could be fbresnet152 or inceptionresnetv2 model = pretrainedmodels.__dict__[model_name](num_classes=1000, pretrained='imagenet') model.eval() # + import torch import pretrainedmodels.utils as utils load_img = utils.LoadImage() # transformations depending on the model # rescale, center crop, normalize, and others (ex: ToBGR, ToRange255) tf_img = utils.TransformImage(model) path_img = 'a1.jpg' input_img = load_img(path_img) input_tensor = tf_img(input_img) # 3x400x225 -> 3x299x299 size may differ input_tensor = input_tensor.unsqueeze(0) # 3x299x299 -> 1x3x299x299 input = torch.autograd.Variable(input_tensor, requires_grad=False) output_logits = model(input) # 1x1000 # - model_name = 'fbresnet152' # could be fbresnet152 or inceptionresnetv2 model = pretrainedmodels.__dict__[model_name](num_classes=1000, pretrained='imagenet') model.eval()
Load_Pretrained_ImageNet.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # # # Cross Compilation and RPC # ========================= # **Author**: `<NAME> <https://github.com/ZihengJiang/>`_, `<NAME> <https://github.com/merrymercy/>`_ # # This tutorial introduces cross compilation and remote device # execution with RPC in TVM. # # With cross compilation and RPC, you can **compile a program on your # local machine then run it on the remote device**. It is useful when # the remote device resource are limited, like Raspberry Pi and mobile # platforms. In this tutorial, we will use the Raspberry Pi for a CPU example # and the Firefly-RK3399 for an OpenCL example. # # Build TVM Runtime on Device # --------------------------- # # The first step is to build the TVM runtime on the remote device. # # <div class="alert alert-info"><h4>Note</h4><p>All instructions in both this section and the next section should be # executed on the target device, e.g. Raspberry Pi. We assume the target # is running Linux.</p></div> # # Since we do compilation on the local machine, the remote device is only used # for running the generated code. We only need to build the TVM runtime on # the remote device. # # .. code-block:: bash # # git clone --recursive https://github.com/apache/tvm tvm # cd tvm # make runtime -j2 # # After building the runtime successfully, we need to set environment variables # in :code:`~/.bashrc` file. We can edit :code:`~/.bashrc` # using :code:`vi ~/.bashrc` and add the line below (Assuming your TVM # directory is in :code:`~/tvm`): # # .. code-block:: bash # # export PYTHONPATH=$PYTHONPATH:~/tvm/python # # To update the environment variables, execute :code:`source ~/.bashrc`. # # # Set Up RPC Server on Device # --------------------------- # To start an RPC server, run the following command on your remote device # (Which is Raspberry Pi in this example). # # .. code-block:: bash # # python -m tvm.exec.rpc_server --host 0.0.0.0 --port=9090 # # If you see the line below, it means the RPC server started # successfully on your device. # # .. code-block:: bash # # INFO:root:RPCServer: bind to 0.0.0.0:9090 # # # # Declare and Cross Compile Kernel on Local Machine # ------------------------------------------------- # # <div class="alert alert-info"><h4>Note</h4><p>Now we go back to the local machine, which has a full TVM installed # (with LLVM).</p></div> # # Here we will declare a simple kernel on the local machine: # # # + import numpy as np import tvm from tvm import te from tvm import rpc from tvm.contrib import utils n = tvm.runtime.convert(1024) A = te.placeholder((n,), name="A") B = te.compute((n,), lambda i: A[i] + 1.0, name="B") s = te.create_schedule(B.op) # - # Then we cross compile the kernel. # The target should be 'llvm -mtriple=armv7l-linux-gnueabihf' for # Raspberry Pi 3B, but we use 'llvm' here to make this tutorial runnable # on our webpage building server. See the detailed note in the following block. # # # + local_demo = True if local_demo: target = "llvm" else: target = "llvm -mtriple=armv7l-linux-gnueabihf" func = tvm.build(s, [A, B], target=target, name="add_one") # save the lib at a local temp folder temp = utils.tempdir() path = temp.relpath("lib.tar") func.export_library(path) # - # <div class="alert alert-info"><h4>Note</h4><p>To run this tutorial with a real remote device, change :code:`local_demo` # to False and replace :code:`target` in :code:`build` with the appropriate # target triple for your device. The target triple which might be # different for different devices. For example, it is # :code:`'llvm -mtriple=armv7l-linux-gnueabihf'` for Raspberry Pi 3B and # :code:`'llvm -mtriple=aarch64-linux-gnu'` for RK3399. # # Usually, you can query the target by running :code:`gcc -v` on your # device, and looking for the line starting with :code:`Target:` # (Though it may still be a loose configuration.) # # Besides :code:`-mtriple`, you can also set other compilation options # like: # # * -mcpu=<cpuname> # Specify a specific chip in the current architecture to generate code for. By default this is inferred from the target triple and autodetected to the current architecture. # * -mattr=a1,+a2,-a3,... # Override or control specific attributes of the target, such as whether SIMD operations are enabled or not. The default set of attributes is set by the current CPU. # To get the list of available attributes, you can do: # # .. code-block:: bash # # llc -mtriple=<your device target triple> -mattr=help # # These options are consistent with `llc <http://llvm.org/docs/CommandGuide/llc.html>`_. # It is recommended to set target triple and feature set to contain specific # feature available, so we can take full advantage of the features of the # board. # You can find more details about cross compilation attributes from # `LLVM guide of cross compilation <https://clang.llvm.org/docs/CrossCompilation.html>`_.</p></div> # # # Run CPU Kernel Remotely by RPC # ------------------------------ # We show how to run the generated CPU kernel on the remote device. # First we obtain an RPC session from remote device. # # if local_demo: remote = rpc.LocalSession() else: # The following is my environment, change this to the IP address of your target device host = "10.77.1.162" port = 9090 remote = rpc.connect(host, port) # Upload the lib to the remote device, then invoke a device local # compiler to relink them. Now `func` is a remote module object. # # # + remote.upload(path) func = remote.load_module("lib.tar") # create arrays on the remote device ctx = remote.cpu() a = tvm.nd.array(np.random.uniform(size=1024).astype(A.dtype), ctx) b = tvm.nd.array(np.zeros(1024, dtype=A.dtype), ctx) # the function will run on the remote device func(a, b) np.testing.assert_equal(b.asnumpy(), a.asnumpy() + 1) # - # When you want to evaluate the performance of the kernel on the remote # device, it is important to avoid the overhead of network. # :code:`time_evaluator` will returns a remote function that runs the # function over number times, measures the cost per run on the remote # device and returns the measured cost. Network overhead is excluded. # # time_f = func.time_evaluator(func.entry_name, ctx, number=10) cost = time_f(a, b).mean print("%g secs/op" % cost) # Run OpenCL Kernel Remotely by RPC # --------------------------------- # For remote OpenCL devices, the workflow is almost the same as above. # You can define the kernel, upload files, and run via RPC. # # <div class="alert alert-info"><h4>Note</h4><p>Raspberry Pi does not support OpenCL, the following code is tested on # Firefly-RK3399. You may follow this `tutorial <https://gist.github.com/mli/585aed2cec0b5178b1a510f9f236afa2>`_ # to setup the OS and OpenCL driver for RK3399. # # Also we need to build the runtime with OpenCL enabled on rk3399 board. In the TVM # root directory, execute</p></div> # # .. code-block:: bash # # cp cmake/config.cmake . # sed -i "s/USE_OPENCL OFF/USE_OPENCL ON/" config.cmake # make runtime -j4 # # The following function shows how we run an OpenCL kernel remotely # # def run_opencl(): # NOTE: This is the setting for my rk3399 board. You need to modify # them according to your environment. target_host = "llvm -mtriple=aarch64-linux-gnu" opencl_device_host = "10.77.1.145" opencl_device_port = 9090 # create schedule for the above "add one" compute declaration s = te.create_schedule(B.op) xo, xi = s[B].split(B.op.axis[0], factor=32) s[B].bind(xo, te.thread_axis("blockIdx.x")) s[B].bind(xi, te.thread_axis("threadIdx.x")) func = tvm.build(s, [A, B], "opencl", target_host=target_host) remote = rpc.connect(opencl_device_host, opencl_device_port) # export and upload path = temp.relpath("lib_cl.tar") func.export_library(path) remote.upload(path) func = remote.load_module("lib_cl.tar") # run ctx = remote.cl() a = tvm.nd.array(np.random.uniform(size=1024).astype(A.dtype), ctx) b = tvm.nd.array(np.zeros(1024, dtype=A.dtype), ctx) func(a, b) np.testing.assert_equal(b.asnumpy(), a.asnumpy() + 1) print("OpenCL test passed!") # Summary # ------- # This tutorial provides a walk through of cross compilation and RPC # features in TVM. # # - Set up an RPC server on the remote device. # - Set up the target device configuration to cross compile the kernels on the # local machine. # - Upload and run the kernels remotely via the RPC API. # #
_downloads/b20f5152cdb316ef2deae48432f548b7/cross_compilation_and_rpc.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from sort_recommendations import sort from FoodItem import FoodRecommendation import random import time ftArray = [] for i in range(1000000): ftArray.append( FoodRecommendation(i, random.randrange(0, 1000)) ) start = time.perf_counter() sort(ftArray) end = time.perf_counter() print(f"Done in {end - start:0.4f} seconds")
recommender/sort_food_recommendations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: PythonData # language: python # name: pythondata # --- # %matplotlib inline import matplotlib.pyplot as plt import numpy as np import statistics import pandas as pd np.arange(0,22,step=2) # + # Set the x-axis to a list of strings for each month. x_axis = ["Jan", "Feb", "Mar", "April", "May", "June", "July", "Aug", "Sept", "Oct", "Nov", "Dec"] # Set the y-axis to a list of floats as the total fare in US dollars accumulated for each month. y_axis = [10.02, 23.24, 39.20, 35.42, 32.34, 27.04, 43.82, 10.56, 11.85, 27.90, 20.71, 20.09] # Create plot and labels and title plt.plot(x_axis, y_axis,marker='d',color='green',linewidth=1,label='Boston') plt.xlabel('Date') plt.ylabel('Fare($)') plt.ylim(0,45) plt.xlim(0,8) plt.title('PyBer Fare by Month') plt.legend() # Add a grid plt.grid() # - # Create a bar chart with the same data, labels and title plt.barh(x_axis,y_axis,label = 'Boston',color='green') plt.xlabel('Date') plt.ylabel('Fare($)') plt.xlim(0,45) plt.title('PyBer Fare by Month') plt.legend() plt.barh(x_axis,y_axis,color='magenta',label='Boston') plt.gca().invert_yaxis() plt.legend() plt.xticks(np.arange(0,46,step=5)) plt.title('PyBer Fare by Month') plt.xlabel('Fare($)') plt.ylabel('Date') # Create a scatter plot using plot method plt.scatter(x_axis, y_axis,s=100*[10,200,300,420,55,600,70,25,45,500,20,100],c='skyblue', edgecolor='black',alpha=0.8, label='Boston') plt.xlabel('Date') plt.ylabel('Fare($)') plt.legend() plt.title('PyBer Fare by Month') # + plt.subplots(figsize=(8, 8)) # Assign 12 colors, one for each month. colors = ["slateblue", "magenta", "lightblue", "green", "yellowgreen", "greenyellow", "yellow", "orange", "gold", "indianred", "tomato", "mistyrose"] explode_values = (0, 0, 0, 0.2, 0, 0, 0.2, 0, 0, 0, 0, 0) plt.pie(y_axis, explode=explode_values, labels=x_axis, colors=colors,autopct='%.1f%%') plt.show() # + # Get the standard deviation of the values in the y-axis. stdev = statistics.stdev(y_axis) plt.errorbar(x_axis,y_axis,yerr=stdev,capsize=3) # - fig, ax = plt.subplots() ax.errorbar(x_axis,y_axis,yerr=stdev,capsize=3) plt.show() plt.bar(x_axis, y_axis, yerr=stdev, capsize=3) # Create the plot with ax.plot fig1, ax = plt.subplots() ax.plot(x_axis,y_axis) # Create the plot with ax.plt() fig, ax = plt.subplots() ax.bar(x_axis, y_axis)
matplotlib_practice.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Language Level Classification # --------- # ## Dimensionality Reduction through Feature Agglomeration of Weak Features # # # <NAME> <br> # February 2021<br> # Project: Language Level Analysis and Classification <br> # # # ## Project Description # # *lang-level* is a tool for **identifying linguistic constructions and characteristics** in order to determine the **German language level** of the writer (or target reader). It also provides a **visualization** of the found characteristics in text. Furthermore, the extracted features can be used to train an SVM classifier for **language level or author classification**, which clearly outperforms the baseline. # # ### Features # # The **80 manually designed linguistic features** include information about part-of-speech, tempus, mode, use of objects, subjunctive clauses, or passive voice. The tool returns information about the frequency occurrence of these linguistic features, which can be used for numerically analyzing texts. It also provides an option for visualizing the results by highlighting the features interest for any given text. # # <a href="url"><img src="figures/text_visualization.png" width="800"></a> # # # # # # ---- # # # ### Classification # # Experiments with a linear support vector classifier trained on these features demonstrate that this approach is **fit for assigning language levels to unseen texts**. After investigating the feature sparsity and relevance (computed by the coefficient strength), weaker features are merged by means of **feature agglomeration** in order to reduce feature space. This raises the performance to 93% accuracy. # # # We chose a linear model based on the assumption that the levels A1-B2 are linearly separable since they represent increasing levels of language skills. Indeed, experiments show that a non-linear SVC with a radial # basis function (RBF) kernel does not outperform a linear model. The simple linear SVC is highly advantageous as it takes considerably less training time and allows inspection of the coefficient of each feature, revealing which features contribute most to the prediction. # # ### Project Steps # # 1. Collection of 264 German texts representing four different language levels (A1-B2) # 2. Feature extraction (linguistic constructions and characteristics) # 3. Visualization # 4. SVM classifier for language levels and authors # 5. Feature selection/agglomeration # # # ---- # # ### Structure of this Jupyter Notebook<a id='top'></a> # # This Jupyter Notebook focuses on the project steps 4 and 5. It shows how load and preprocess the data and then train and optimize a classifier for language level classification (or author clasisfication) based on linguistic features. In order to reduce dimensionality, we agglomerate weaker features into feature clusters. # # Language_Level_Classification_with_Feature_Agglomeration.ipynb # # - [Loading, Analysing and Preprocessing](#part1) # - Load the data # - Inspect and preprocess the data # - Principal Component Analysis # # - [Training and Evaluating an SVM Classifier](#part2) # - Train a linear SVM classifier # - Cross-validation # - Hyper-parameter screening # # - [Feature Agglomeration](#agglo) # - Find sparse and less relevant features # - Agglomerate features into clusters # - Evaluate new feature sets # # - [Results and Discussion](#disc) # - Interpret results # - Discuss findings # # # ## Loading, Analysing and Preprocessing the Data<a id='part1'></a> # [[back to top]](#top) # # In this section, # - we import the necessary packages and modules # - we load the precomputed linguistic features for all samples # - we look at the data saved as pandas dataframe and visualize the distributions # - we standardize the features by removing the mean and scaling to unit variance # ### Imports # # Required packages/libraries: # # - scikit-learn # - pandas # - numpy # - matplotib # - ipywidgets # # Custom modules can be found [here](https://github.com/athrado/language_levels): # - language_level_features # - data_visualization # # You can find the `requirements.txt` in the project folder. # + # Import Statements import sklearn from sklearn import svm from sklearn import cluster from sklearn.decomposition import PCA from sklearn.model_selection import train_test_split from sklearn.metrics import make_scorer from sklearn.metrics import recall_score import pandas as pd import numpy as np # %matplotlib inline import matplotlib.pyplot as plt from ipywidgets import interact # Import own modules import language_level_features as lang_levels import data_visualization as data_vis # - # ### Settings # # There are two pre-processed datasets available: # - **lang_levels**: 264 German texts from language levels A1, A2, B1 and B2 # - **classical_lit**: 7365 German classical literature texts by 20 different authors # # By default 80 linguistic features are loaded.<br> # For the baseline version, only two features are considered (words/sentence ratio and LIX score). # + # Technical settings RANDOM_SEED = 42 # Data and version settings dataset_name = "lang_levels" # or: classical_lit # - # ### Load dataset # # Load the linguistic and baseline features for the dataset of choice. # + # Load linguistic features and labels dataset, solutions, label_set = lang_levels.load_data(dataset_name=dataset_name, baseline=False) feature_names = dataset.columns # Load the baseline features baseline_features, _, _ = lang_levels.load_data(dataset_name=dataset_name, baseline=True) # Print dataset size print("N samples: {}".format(dataset.shape[0])) print("N linguistic features: {}".format(dataset.shape[1])) print("N baseline features: {}".format(baseline_features.shape[1])) # - # ### Analyse dataset # # Inspect the data both as tabular representation and as histograms. # Show 5 samples dataset.head() # Get statitics about features dataset.describe() # ### Analysis with Histograms # # Plot the histograms for all features. This helps finding features that may not be suitable, for instance because of sparsity. # + # Plot the histograms for the features dataset.hist(bins=50, figsize=(15,30), layout=(20,4)) # Save and plot data_vis.save_fig("attribute_histogram_plots") plt.show() # - # ### Zero features # # The histograms show that some features have only zeros values. Here we count how many features consist of only zeros (or near zeros). We find that possessive pronouns, reflexive pronouns and future 1 forms do not appear in any of the language level texts. # + # Get mask for features with only zero values mask = (np.isclose(dataset, 0.0)).all(axis=0) # Get zero features zero_features = dataset.loc[:, mask] # How many/what zero features? print("Number of zero features: {}".format(zero_features.shape[1])) print("Names of zero features: {}".format(", ".join(list(zero_features.columns)))) # - # ### Feature scaling # # We standardize the features by removing the mean and scaling to unit variance. # + # Rename the feature array features = dataset # Scale features scaler = sklearn.preprocessing.StandardScaler() features = scaler.fit_transform(features) # Plot and save features after scaling plt.boxplot(features) data_vis.save_fig("post_scaling") plt.show() # - # ### Principal Component Analysis # # Here we perform a principal component analysis for our data. Later on we use the transformed features as a new feature set for classification. # # PCA is by far the most common dimensionality reduction algorithm. It identifies the hyperplane that lies closes to the data and then projects the data onto it. # + # Principal component analysis pca = PCA() pca_transformed = pca.fit_transform(features) if dataset_name == "lang_levels": data_vis.plot_PCA(pca, pca_transformed, features, solutions) # Get top components (with combined explained variance ratio of 95%) pca_95 = PCA(n_components=0.95) pca_reduced_features = pca_95.fit_transform(features) print("Number of features after PCA: {}".format(pca_reduced_features.shape[1])) # - # ## Training and Evaluating an SVM Classifier<a id='part2'></a> # # [[back to top]](#top) # # In this section, we will train and evaluate a linear SVM classifier using the linguistic features for language level analysis. # # - We split the data into train, val and test set with similar label distributions # - We train a linear SVM classifier # - We evaluate on train and dev set and plot the confusion matrix # - We evaluate using cross-validation # - We also perform hyper-parameter screening # # # ### Split the data into train, val and test set # # Split the data and make sure you each set contains a similar amount of samples for each label. def get_train_val_test_split(features, solutions=None, train_ratio=0.8, val_ratio=0.1, test_ratio=0.1): """Split data (features) into train, validation/development and test set according to given ratios. If solutions/labels are given, equally statify the among the classes. Parameters: features (numpy array) -- dataset features Keyword arguments: solutions (numpy array) -- dataset solutions, default=None train_ratio (float) -- relative size of training set, default = 80% val_ratio (float) -- relative size of validation set, default = 10 test_ratio (float) -- relatize size of test set, default = 10% Return: X_train (numpy array) -- features of training set X_val (numpy array) -- features of validation/develepment set X_test (numpy array) -- features of test set y_train (numpy array) -- labels of training set y_val (numpy array) -- labels of validation/development set y_test (numpy array) -- labels of test set""" # Check whether ratios add up to 1.0 ratio_sum = train_ratio + val_ratio + test_ratio # Raise error if ratios are set incorrectly if ratio_sum != 1.0: raise IOError("Train, val and test ratios have to sum up to 1.0.") # Split the data a first time into train and test set (= validation+test set) X_train, X_test, y_train, y_test = train_test_split(features, solutions, train_size=train_ratio, stratify=solutions, random_state=RANDOM_SEED, shuffle=True) # Set test size test_size = test_ratio/(test_ratio + val_ratio) # Split data (previous "test") a second time into validation and test set X_val, X_test, y_val, y_test = train_test_split(X_test, y_test, test_size=test_size, stratify=y_test, random_state=RANDOM_SEED, shuffle=True) # Return all sets return X_train, X_val, X_test, y_train, y_val, y_test # + # Get training, validation and test data X_train, X_val, X_test, y_train, y_val, y_test = get_train_val_test_split(features, solutions) # Print size print('Train size: {}'.format(y_train.shape[0])) print('Val size: {}'.format(y_val.shape[0])) print('Test size: {}'.format(y_test.shape[0])) # Print distribution of labels/classes among sets print('\nLabel distributions\n********************') print("\nTrain set:", np.unique(y_train,return_counts=True)[1]) print("Val set: ", np.unique(y_val, return_counts=True)[1]) print("Test set: ", np.unique(y_test, return_counts=True)[1]) # - # ### Linear SVM Classifier # # Initialize a linear SVM classifier and fit it using the training data. # + # Set the classifier classifier = svm.LinearSVC(random_state=RANDOM_SEED, max_iter=3000) # Fit the data classifier.fit(X_train, y_train) # - # ### Evaluation # # We evaluate the classifier on the training and validation data. # Accuracy is only one way of inspecting the performance of our system. We can also compute the confusion matrix and check which classes were predicted correctly and which were mixed up. For instance, B2 texts are sometimes confused as B1 texts. # # Both accuracies and confusion matrices show that the classifier overfits the training data. Overfitting can be counteracted using the following methods: # # - Adding more training data (not possible here) # - Cross-validation # - Adjust the regularization parameter C # - Dimensionality reduction # + # Predict the train and val set pred_train = classifier.predict(X_train) pred_val = classifier.predict(X_val) pred_test = classifier.predict(X_test) # Print Accuracies print('Accuracy train: {}%'.format(np.round(sklearn.metrics.accuracy_score(y_train, pred_train)*100),2)) print('Accuracy val: {}%'.format(np.round(sklearn.metrics.accuracy_score(y_val, pred_val)*100),2)) print('Accuracy test: {}%'.format(np.round(sklearn.metrics.accuracy_score(y_test, pred_test)*100),2)) # - # ### Confusion matrix # # The confusion matrix shows the ground truths and prediction and lets find out which labels were often confused. # + # Plot the confusion matrix for training set data_vis.plot_confusion_matrix(y_train, pred_train, label_set, 'Training set') # Plot the confusion matrix for validation set data_vis.plot_confusion_matrix(y_val, pred_val, label_set, 'Validation set') # - # ### Cross-Validation # # Evaluating on one singe validation or test set can be problematic if the set is very small, as it is the case here. Different validation sets yield different performance scores, as we see above with 85% and 88%. # # Cross-validation uses different splits of the data as training and test data, which gives a more balanced evaluation. def cross_validation(classifier, features, solutions, hold_out_test_set=False, train_ratio=0.8, n_split = 20): """Get performance scores for cross validation. Arguments: classifier (sklearn model) -- classifier/model to evaluate (does not have to be fitted) features (np.array) -- features for training solutions (np.array) -- correct labels Keyword arguments: hold_out_test_set (Boolean) -- whether or not to split data and hold out test set (default = False) train_ratio (float) -- ratio of training size (default = 0.8) n_split (int) -- number of cross validation splits Return: train_acc (float) -- mean accuracy on training sets test_acc (float) -- mean accuracy on test sets scores (dict) -- dictionary with makro averaged precision, recall, acurracy and F1 score""" if dataset_name == 'lang_levels': n_splits = 50 else: n_splits = 10 # If test set has to be left untouched if hold_out_test_set: # Get train, dev and val split X_train, X_val, X_test, y_train, y_val, y_test = get_train_dev_test_split(features, solutions) # Concatenate train and val set for cross validation # EXCLUDE test set! X_train_val = np.concatenate([X_train, X_val]) y_train_val = np.concatenate([y_train, y_val]) # Otherwise use full feature set else: X_train_val = features y_train_val = solutions # Set metrics of interest scoring = {'precision_macro': 'precision_macro', 'recall_macro': 'recall_macro', 'accuracy': 'accuracy', 'F1_macro':'f1_weighted' } # Set up cross validation splits cross_val = sklearn.model_selection.ShuffleSplit(n_splits=n_splits, train_size=0.8, random_state=RANDOM_SEED) # Get accuracies over all cross validations scores = sklearn.model_selection.cross_validate(classifier, X_train_val, y_train_val, cv=cross_val, scoring=scoring, n_jobs=5, return_train_score=True) # Get mean training and test accuracy train_acc = np.round(np.mean(scores["train_accuracy"]),4) test_acc = np.round(np.mean(scores["test_accuracy"]),4) # Return train/test accuracy and all collected scores return train_acc, test_acc, scores # + # Merge train and val set for cross validation X_train_val = np.concatenate([X_train, X_val]) y_train_val = np.concatenate([y_train, y_val]) # Get cross validation results train_acc, test_acc, scores = cross_validation(classifier, X_train_val, y_train_val) # Print accuracies print('Training accuracy: {}'.format(train_acc)) print('Val accuracy: \t {}'.format(test_acc)) # - # ### Hyper-parameter screening # # Since we clearly overfit the training data, it's a good idea to find a better value for the regularization parameter (*C*). **A smaller hyper-parameter *C* leads to a wider margin area but more margin violations**. # We also optimize the tolerance for stopping criteria (*t*). # # # The hyper-parameter screening yields the optimal values and improves the accuracy by more than 1%. # + # Import (warning ignoring) from sklearn.utils.testing import ignore_warnings from sklearn.exceptions import ConvergenceWarning @ignore_warnings(category=ConvergenceWarning) def parameter_screening(classifier, X_train, y_train, parameter_dict): """Find the best parameter settings for a given classifier using grid search. Paramters: classifier (sklearn model) -- classifier/model (e.g. sklearn.svm._classes.LinearSVC) parameter_dict (dict) -- dictionary containing parameters and values to screen Return: para_search.best_score_ (np.float) -- highest score achieved by Grid Search para_search.best_params_ (dict) -- parameters that yield highest score""" # Set up cross validation cross_val = sklearn.model_selection.ShuffleSplit(n_splits=50, train_size=0.8, random_state=RANDOM_SEED) # Find most suitable parameters using Grid Search para_search = sklearn.model_selection.GridSearchCV(classifier, parameter_dict, cv=cross_val) para_search.fit(X_train, y_train) # Return best score and its parameters return para_search.best_score_, para_search.best_params_ # + # Set a classifier classifier = svm.LinearSVC(random_state=RANDOM_SEED, max_iter=2000) # Get performance before hyper-parameter screening train_acc, test_acc, scores = cross_validation(classifier, X_train_val, y_train_val) # Printing results print("Accuracy before parameter screening") print('Training accuracy: {}'.format(train_acc)) print('Validation accuracy: \t {}'.format(test_acc)) # Set ranges of paramters to screen parameter_dict = {'C': [0.05, 0.1, 0.5, 1.0, 1.5, 2.0, 2.5], 'tol': [0.0001, 0.001, 0.01, 0.1, 1.0]} # Parameter screening to retrieve best hyper-paramters (C, tol) best_score, best_params = parameter_screening(classifier, X_train_val, y_train_val, parameter_dict) # Print best results print('\nBest parameters:', best_params, '\n') # Train new classifier with ideal hyper-parameters classifier = svm.LinearSVC(random_state=RANDOM_SEED, max_iter=2000, C=best_params["C"], tol=best_params["tol"]) # # Get performance after hyper-parameter screening train_acc, test_acc, scores = cross_validation(classifier, X_train_val, y_train_val) # Printing results print("Accuracy before parameter screening") print('Training accuracy: \t{}'.format(train_acc)) print('Validation accuracy: \t{}'.format(test_acc)) # - # ### Problem # # The SVM classifier is **highly overfitting** the training data. The large number of features (80) given the relatively small number of samples (264) reduces the performance of the algorithm, a problem commonly knowns as the **curse of dimensionality**. Sparse features further enhance this effect. # # What can we do against it? # # - Add more training data (not possible) # - Regularization # - Dimensionality reduction # # Dimensionality reduction using feature projection is a common method for counter-acting problems associated with high dimensionality. It transforms the data from the high-dimensional space to a space of fewer dimensions. Common techniques are PCA (Principal Component Analysis) or LDA (Linear Discriminant Analysis). # # A lesser known method for reducing feature space is feature agglomeration by hierarchical clustering: similar features are merged, which reduces the feature space. # # ----- # ## Feature Agglomeration<a id='agglo'></a> # # [[back to top]](#top) # # In this section, we will reduce the feature dimensionality by agglomerating a selection features. # # - We identify a sets of weaker features # - We agglomerate these features into clusters # - We train and evaluate classifier with the set of unchanged strong features and the agglomerated features # # # ---- # # Feature agglomeration is very similar to [agglomerative clustering](https://scikit-learn.org/stable/modules/clustering.html#hierarchical-clustering), which is used to build clusters of similar samples. For feature agglomeration we use the transformed data, so instead of samples we cluster similar features. # # <a href="url"><img src="figures/dendrogram.png" width="600"></a> # # # # **Agglomerative** indicates a "bottom-up" approach: each observation starts in its own cluster, and pairs of clusters are merged as one moves up the hierarchy. The **linkage** criteria determines the metric used for the merge strategy. We use the **Ward linkage**, which minimizes the sum of the squared differences within all clusters. It **minimizes the variance** of the clusters being merged. The **affinity** is the metric used to compute the linkage. For **Ward linkage** the only option is euclidean. We do not add a **connectivity** constraint as our features are not structured in a way that would benefit from it. # # # Here is an example for feature agglomeration of pixel features found on [scikit-learn](https://sklearn.org/auto_examples/cluster/plot_digits_agglomeration.html): # # # <a href="url"><img src="figures/f_agglo.png" width="400"></a> # # # ### Only agglomerate weak features # # Normally feature agglomeration is performed on the entire set of features. However, we do not want to reduce the effect of strong features and thus only agglomerate the weaker features, i.e. the sparse or less relevant features. Thus, we agglomerate the weak features and combine them with the strong features to achieve the highest performance. # # ### Identify sparse features # # Here we identify sparse features or, in other words, the features that are zero for many samples and have a low variance. We consider all features that have a 0 value in more than one third of the samples as sparse. This method classifies **27 features as sparse - a third of all features**. # # In the feature agglomeration process, those features are merged into 5 clusters. The original 27 features are discarded and the 5 new clustered features are included instead. Combined with the 27 non-sparse features, this yields a total of 32 features. This method counter-acts sparsity, reduces the number of features and mitigates the impact of noise. The feature agglomeration step led to an improvement in the classifier’s performance # + # Reload unscaled features features = np.array(dataset) # Count how many times feature is empty non_zeros_in_features = np.count_nonzero(features, axis=0) # Get mask for features that are empty more than half the time sparse_feature_mask = (non_zeros_in_features <= features.shape[0]*0.33) non_sparse_feature_mask = np.invert(sparse_feature_mask) # Get indices of sparse and non-sparse features sparse_index = np.where(sparse_feature_mask)[0] non_sparse_index = np.where(np.invert(sparse_feature_mask))[0] # Scale the features features = scaler.fit_transform(features) # Get sparse and non-sparse features sparse_features = features[:, sparse_index] non_sparse_features = features[:, non_sparse_index] # Get sparse and non-sparse features sparse_f_names = feature_names[sparse_feature_mask] non_sparse_f_names = feature_names[non_sparse_feature_mask] # Print number of sparse and non-sparse features; print sum print('Sparse features\n------------------') print('Total features: {}'.format(sparse_index.shape[0] + non_sparse_index.shape[0])) print('Sparse features: {}'.format(sparse_index.shape[0])) print('Non-sparse features: {}'.format(non_sparse_index.shape[0])) print() print('Sparse feature names:') print(sparse_f_names.tolist()) # - # ### Identify less relevant features # # Sparsity does not necessarily make a feature irrelevant. The two sparse features *subjunctive 1 forms* and *genitives*, for example, are in the top 15 relevant features when looking at their coefficient strengths. # # Linear support vector classifiers allow inspection of the **feature coefficients**. They show how much a feature contributes to the prediction of each class. The figure below shows the complete set of features sorted according to the strength of their coefficients: red indicates that a feature is considered a strong indicator for the class; blue means the feature works as a counter-indicator; white indicates that the feature is not informative for this class. # # *Modal clauses*, for instance, are considered strong indicators for B2 level texts, while they are unlikely to appear in B1 level texts. This could indicate that modal clauses are only learned at level B2. # # Since only roughly the first 15 features show strong coefficients, we only consider those **15 features as relevant features**. We consider all the other **65 features as less relevant features** (also called irrelevant features). We merge the 65 features using feature agglomeration and add the 5 resulting agglomerated features to the 15 relevant features. # + def get_sorted_coefficients(classifier, feature_names): """Get features and coefficients sorted by coeffience strength in Linear SVM. Parameter: classifier (sklearn.svm._classes.LinearSVC) -- linear SVM classifier (has to be fitted!) feature_names (list) -- feature names as list of strings Return: sort_idx (np.array) -- sorting array for features (feature with strongest coeffienct first) sorted_coef (np.array) -- sorted coefficient values sorted_fnames (list) -- feature names sorted by coefficient strength""" # Sort the feature indices according absolute coefficients (highest coefficient first) sort_idx = np.argsort(-abs(classifier.coef_).max(axis=0)) # Get sorted coefficients and feature names sorted_coef = classifier.coef_[:,sort_idx] sorted_fnames = feature_names[sort_idx].tolist() return sort_idx, sorted_coef, sorted_fnames # Fit a linar SVM classifier = svm.LinearSVC(random_state=RANDOM_SEED, max_iter=2000, C=best_params["C"], tol=best_params["tol"]) classifier.fit(X_train, y_train) # Coefficients for example feature "modal" print('Coefficients for feature "modal":') print(classifier.coef_[:, feature_names == 'modal']) # Get feature indices sorted by coefficient strength sort_idx,_,_ = get_sorted_coefficients(classifier, feature_names) # Plot the classifier's coefficients for each feature and label plt = data_vis.plot_feature_coefficients(classifier, feature_names, label_set) # + # Set number of relevant features N_REL_FEATURES = 15 # Less-rel features and names less_relevant_features = features[:,sort_idx[N_REL_FEATURES:]] less_relevant_f_names = feature_names[sort_idx[N_REL_FEATURES:]] # Relevant features and names relevant_features = features[:,sort_idx[:N_REL_FEATURES]] relevant_f_names = feature_names[sort_idx[:N_REL_FEATURES]] # Check number of features assert less_relevant_features.shape[1] == features.shape[1] - N_REL_FEATURES assert relevant_features.shape[1] == N_REL_FEATURES # - # ### Feature Agglomeration # # # After identifying sets of feature to be agglomerated, we merge them using feature agglomeration. We use 5 feature clusters, meaning that feature agglomeration returns five agglomerated features. # def feature_agglomeration(n_clusters, agglo_features, agglo_f_names): """Reduce the feature space by agglomerating the given agglo_features to a specific number (n_clusters) using feature agglomeration. Parameters: n_clusters (int) -- number of clusters for feature agglomeration agglo_features (np.array) -- features to agglomerate agglo_f_names (list) -- feature names for agglo_features Return: agglomerated_features (np.array) -- combined array with agglomerated and keep features agglomerated_f_names (np.array) -- feature names for reduced features""" # How to compute feature clusters with feature agglomeration agglo_cluster = sklearn.cluster.FeatureAgglomeration(n_clusters=n_clusters, affinity="euclidean", linkage="ward", connectivity=None, pooling_func=np.mean) # Fit data agglo_cluster.fit(agglo_features) # Get agglomerated feature set agglomerated_features = agglo_cluster.transform(agglo_features) # Get labels labels = agglo_cluster.labels_ # Sort the label indices sorted_label_idx = np.argsort(labels) # Get sorted cluster components cluster_components = labels[sorted_label_idx] # Sort the feature names by cluster sorted_agglo_f_names = agglo_f_names[sorted_label_idx] # Plot feature clusters data_vis.plot_cluster_components(cluster_components, sorted_agglo_f_names) # Agglomerated feature names agglomerated_names = ["cluster_"+(str(i+1)) for i in range(n_clusters)] # Check number of features and clusters assert agglomerated_features.shape[1] == n_clusters # Return agglomerated features and feature names return agglomerated_features, agglomerated_names # ### Feature Sets # # We have divided our features into sparse and non-sparse features as well as into relevant and less relevant features. We have also reduced the features using feature agglomeration. Those agglomerated features of weaker features can be combined with the stronger features. Since we also want to compare the newly created feature sets with the original features and the baseline, this leaves us with a large number of differnet feature sets to test. # # In order not to lose track of the different feature set and for easier comparison, we implemented widget which lets you select the desire feature set. # # - original features # - original agglomerated features # - baseline features # - PCA components # # - only non-sparse features # - only relevant features # - only sparse features # - only less relevant features # # - only agglomerated sparse features # - only agglomerated less relevant features # # - agglomerated sparse features + non-sparse features # - agglomerated less relevant features + relevant features # # When selecting a feature set, the desired feature set is automatically generated. If necessary, feature agglomeration is executed. The performance is immediately evaluated using cross-evaluation. # # def generate_feature_set(feature_set, n_clusters=5): """Generate the feature set of choice. Some feature sets agglomerate weaker features and/or combine them with strong features. Parameters: feature_set (string) -- feature set to compute Keyword arguments: n_clusters (int) -- number of clusters for feature agglomeration Return: feature set of choice (np.array)""" if feature_set == "original features": return features elif feature_set == "only relevant features": return relevant_features elif feature_set == "baseline features": return baseline_features elif feature_set == "PCA components (explained variance of 95%)": return pca_reduced_features elif feature_set == "only less relevant features": return less_relevant_features elif feature_set == "only sparse features": return sparse_features elif feature_set == "only non-sparse features": return non_sparse_features elif feature_set == "original agglomerated features": # Get agglomerated original features agglomerated_features, agglomerated_f_names = feature_agglomeration(n_clusters, features, feature_names) return agglomerated_features elif feature_set == "only agglomerated sparse features": # Get agglomerated sparse features agglomerated_features, agglomerated_f_names = feature_agglomeration(n_clusters, sparse_features, sparse_f_names) return agglomerated_features elif feature_set == "only agglomerated less relevant features": # Get agglomerated less relevant features agglomerated_features, agglomerated_f_names = feature_agglomeration(n_clusters, less_relevant_features, less_relevant_f_names) return agglomerated_features elif feature_set == "agglomerated sparse features + non-sprase features": # Get agglomerated for sparse features agglomerated_features, agglomerated_f_names = feature_agglomeration(n_clusters, sparse_features, sparse_f_names) # Combine agglomerated sparse + non-sparse features combined_features = np.concatenate([agglomerated_features, non_sparse_features], axis=1) return combined_features elif feature_set == "agglomerated less relevant features + relevant features": agglomerated_features, agglomerated_f_names = feature_agglomeration(n_clusters, less_relevant_features, less_relevant_f_names) # Combine agglomerated less relevant + relevant features combined_features = np.concatenate([agglomerated_features, relevant_features], axis=1) return combined_features else: # In case unkown feature set is given raise NotImplementedError("Unknown feature set: {}".format(feature_set)) return None def evaluate_feature_set(new_features, solutions, svm_parameters=None): """ Evaluate (and generate) the feature set of choice using cross-validation. Parameters: new_features (string) -- feature set of choice soltutions (np.array) -- labels/solutions for features svm_parameters (dict) -- linear SVM parameters (default=None) Return: None""" # Get training, validation and test data X_train, X_val, X_test, y_train, y_val, y_test = get_train_val_test_split(new_features, solutions) X_train_val = np.concatenate([X_train, X_val]) y_train_val = np.concatenate([y_train, y_val]) # If no svm parameters available, use default if svm_parameters is None: # Define a classifier with default values classifier = svm.LinearSVC(random_state=RANDOM_SEED, max_iter=2000) else: # Define a classifier with screened svm parameters classifier = svm.LinearSVC(random_state=RANDOM_SEED, max_iter=2000, C=svm_parameters["C"], tol=svm_parameters["tol"]) svm.LinearSVC(ConvergenceWarning) # Get performance before hyper-parameter screening train_acc, test_acc, scores = cross_validation(classifier, X_train_val, y_train_val) print("\n\nOriginal number of features: {}".format(features.shape[1])) print("New number of features: {}".format(new_features.shape[1])) # Printing results print("\nPerformance\n*************") print('Training accuracy: {}'.format(train_acc)) print('Val accuracy: \t {}'.format(test_acc)) # Widget interaction @interact(feature_set=data_vis.feature_set_widi, n_clusters=data_vis.n_cluster_widi) def generate_and_evaluate_feature_set(feature_set, n_clusters): """Generate and evaluate feature set of choice. Widget interaction decorator calls function each sample different feature set or number of cluster is selected. Parameters: new_features (string) -- feature set of choice n_clusters (int) -- number of clusters for feature agglomeration Return: None""" # Get the features for the respective feature set features = generate_feature_set(feature_set, n_clusters) # Re-scale the features features = scaler.fit_transform(features) # Evaluate the new feature set evaluate_feature_set(features, solutions, best_params) # ## Results and Discussion<a id='disc'></a> # # [[back to top]](#top) # # ### Main insights # In the table below, we collect the results for all different feature sets. <br> # The **original features** yield an accuracy of **85.21%**, which outperforms the baseline by far and shows that a linear SVM classifier trained on linguistic features can be used to classify language levels. # # **Feature agglomeration of weaker features** clearly improves performance. Agglomerating the sparse features and combining them with the non-sparse features slightly increases accuracy to 89.96%. Selecting the 15 most relevant features using the coefficient strengths and agglomerating the less relevant features results in an **increase of 7%**, yielding an accuracy of **92.79%**. # # One could also simply discard the sparse or less relevant features but our experiments show that adding the agglomerated features to the non-sparse or relevant features slightly improves the performancy by roughly 1% when comparing to the non-sparse or relevant features alone. Therefore, the sparse and less relevant features can **contribute to the classifier when preprocessed through feature agglomeration**. # # We also show that **selecting weak features using the coefficient strengths** is a very suitable method for selecting features that could benefit from feature agglomeration. The feature set with the agglomerated relevant features achieves the highest results, **outperforming the sparse/non-sparse feature set by 3%**. The less relevant features also clearly outperform the baseline while the sparse features do not. # | Feature set | # Features | Training Accuracy | Validation Accuracy | # | --- | --- | --- | --- | # | Original | 80 | 99.99% | **85.21%** | # | Baseline | 2 | 51.11% | **48.38%** | # | | | | # |Non-sparse features | 53| 99.93% | **88.33%**| # |Sparse features | 27 | 59.19% | **42.88%**| # |Relevant features | 15 | 95.92% | **91.96%**| # |Less relevant features | 65 | 97.14% | **73.38%**| # | | | | # |Agglomerated original features | 5 | 78.60% | **74.96%**| # |Agglomerated sparse features | 5 | 40.47% | **31.54%**| # |Agglomerated less relevant features | 5 | 56.12% | **50.29%**| # |||| # | PCA components (explained variance of 95%) | 36 | 96.42% | **76.00%** | # | | | | # |Agglomerated sparse + non-sparse features | 58 | 99.95% | **89.96%**| # |Agglomerated less relevant + relevant features | 20 | 97.93% | **92.79%**| # # For the full analysis, please read the report [here](https://github.com/athrado/lang-level/tree/master/4_Reports). # ## Conclusion # # - Feature agglomeration on a select set of features can considerably improve performance # - Coefficient strengths are good indicators for selecting relevant features # - Concatenating strong and agglomerated weak features reduces overfitting while preserving high accuracy # # ## Take Home Poem # # # <br> # # ``` # Linguistic features everywhere, # about domain they do not care, # perform just fine on any type # of text, no matter who does write. # # Sev'ral, though, are oh! so rare, # just won't occur, to our despair! # While others shine just like a hero, # they often simply show a zero. # # Those troublemakers are a threat, # overfit our training set! # Shall we discard those features, then? # Or could there be a better plan? # # Why not let birds of a feather, # share a feature space together. # Reduce dimensions and you'll get # a nice and smaller feature set. # # Combine them now with features strong, # and - hey, look there! - they get along! # They help each other and then beat # the orig'nal model, ain't that neat? # # ``` #
2_Scripts/Complete_Language_Level_Classification_with_Feature_Agglomeration.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/julianovale/simulacao_python/blob/master/0010_Example_4_4_Thesis_p61.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="PUTbqWKhJ_E7" colab_type="code" colab={} from sympy import I, Matrix, symbols, Symbol, eye from datetime import datetime import numpy as np import pandas as pd # + id="-S4xSRcKOuVi" colab_type="code" colab={} # Rotas R1 = Matrix([[0,"R1_p1",0],[0,0,"R1_v1"],[0,0,0]]) R2 = Matrix([[0,"R2_p1",0],[0,0,"R2_v1"],[0,0,0]]) # + id="CZv9d2LNQTK0" colab_type="code" colab={} # Seções (semáforos) T1 = Matrix([[0, "p1"],["v1", 0]]) # + id="01ulPWXsa9_X" colab_type="code" colab={} def kronSum(A,B): m = np.size(A,1) n = np.size(B,1) A = np.kron(A,np.eye(n)) B = np.kron(np.eye(m),B) return A + B # + id="5jdBt8uOaM46" colab_type="code" colab={} momento_inicio = datetime.now() ''' Algebra de rotas ''' rotas = kronSum(R1,R2) ''' Algebra de seções secoes = kronSum(T1,T2) secoes = kronSum(secoes,T3) secoes = kronSum(secoes,T4) secoes = kronSum(secoes,T5) ''' ''' Algebra de sistema ''' sistema = np.kron(rotas, T1) # lembrar de trocar para "secoes" se tiver vários semáforos # calcula tempo de processamento tempo_processamento = datetime.now() - momento_inicio # + id="T7AUCWveONVC" colab_type="code" colab={} sistema = pd.DataFrame(data=sistema,index=list(range(1,np.size(sistema,0)+1)), columns=list(range(1,np.size(sistema,1)+1))) # + id="ypO7AzRXU3gP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="f34ea86e-49fc-4fff-ace4-b9eda9d518ec" sistema.shape # + id="hcY5S41T83Nf" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="341db4ab-645f-435f-9fe0-9c06de55dde5" print(tempo_processamento) # + id="RsGa13ITeyTD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 627} outputId="c4126625-c56d-474a-d1ec-e1c457d204f1" sistema # + id="eQcAoDzjsMMP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="aa6fabcf-929c-4f88-fa0b-16ff048a6d70" momento_inicio = datetime.now() colunas = ['de_noh', 'para_noh', 'aresta'] grafo = pd.DataFrame(columns=colunas) r = 1 c = 1 for j in range(np.size(sistema,0)): for i in range(np.size(sistema,0)): if sistema.loc[r,c]==0 and c < np.size(sistema,0): c += 1 elif c < np.size(sistema,0): grafo.loc[len(grafo)+1] = (r, c, sistema.loc[r,c]) c += 1 else: c = 1 r += 1 tempo_processamento = datetime.now() - momento_inicio print(tempo_processamento) # + id="OUFRHbS79w6x" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 731} outputId="4a559bcf-3087-4aed-c409-cd6935b79fea" grafo['aresta'] = grafo['aresta'].astype('str') grafo # + id="5eI6U5izUOhl" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 731} outputId="b11354b0-8515-4b85-e9c2-ef9a3aa9dd7c" new = grafo["aresta"].str.split("*", n = -1, expand = True) grafo["aresta"]=new[1] grafo["semaforo_secao"]=new[2] new = grafo["aresta"].str.split("_", n = -1, expand = True) grafo["semaforo_trem"]=new[1] grafo['coincide'] = np.where(grafo['semaforo_secao']==grafo['semaforo_trem'], True, False) grafo # + id="KJpWXieMWDN5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 421} outputId="1d43ee9e-a1c0-4be5-c6bd-46793cbeef10" grafo.drop(grafo[grafo.coincide == False].index, inplace=True) grafo
0010_Example_4_4_Thesis_p61.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import json aug_data_path = "/Users/minjoons/data/squad/train-v1.0-aug.json" aug_data = json.load(open(aug_data_path, 'r')) # + def compare_answers(): for article in aug_data['data']: for para in article['paragraphs']: deps = para['deps'] nodess = [] for dep in deps: nodes, edges = dep if dep is not None: nodess.append(nodes) else: nodess.append([]) wordss = [[node[0] for node in nodes] for nodes in nodess] for qa in para['qas']: for answer in qa['answers']: text = answer['text'] word_start = answer['answer_word_start'] word_stop = answer['answer_word_stop'] answer_words = wordss[word_start[0]][word_start[1]:word_stop[1]] yield answer_words, text ca = compare_answers() print(next(ca)) print(next(ca)) print(next(ca)) print(next(ca)) # - def nodep_counter(): x_count = 0 q_count = 0 for article in aug_data['data']: for para in article['paragraphs']: deps = para['deps'] nodess = [] for sent, dep in zip(para['sents'], deps): if dep is None: print("x:", sent) x_count += 1 for qa in para['qas']: if qa['dep'] is None: print("q:", qa['question']) q_count += 1 print(x_count, q_count) nodep_counter() def bad_node_counter(): count = 0 for article in aug_data['data']: for para in article['paragraphs']: sents = para['sents'] deps = para['deps'] nodess = [] for dep in deps: if dep is not None: nodes, edges = dep for node in nodes: if len(node) != 5: count += 1 print(count) bad_node_counter() def noanswer_counter(): count = 0 for article in aug_data['data']: for para in article['paragraphs']: deps = para['deps'] nodess = [] for dep in deps: if dep is not None: nodes, edges = dep nodess.append(nodes) else: nodess.append([]) wordss = [[node[0] for node in nodes] for nodes in nodess] for qa in para['qas']: for answer in qa['answers']: text = answer['text'] word_start = answer['answer_word_start'] word_stop = answer['answer_word_stop'] if word_start is None: count += 1 print(count) noanswer_counter() def mult_sent_answer_counter(): count = 0 for article in aug_data['data']: for para in article['paragraphs']: for qa in para['qas']: for answer in qa['answers']: text = answer['text'] word_start = answer['answer_word_start'] word_stop = answer['answer_word_stop'] if word_start is not None and word_start[0] != word_stop[0]: count += 1 print(count) mult_sent_answer_counter()
squad/eda_aug_train.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Fitzhugh-Nagumo simplified action-potential model # # This example shows how the [Fitzhugh-Nagumo simplified action potential (AP) model](http://www.scholarpedia.org/article/FitzHugh-Nagumo_model) can be used. # # The model is based on a simplification and state-reduction of the original squid axon model by Hodgkind and Huxley. # It has two state variables, a voltage-like variable and a recovery variable. # # + import pints import pints.toy import numpy as np import matplotlib.pyplot as plt # Create a model model = pints.toy.FitzhughNagumoModel() # Run a simulation parameters = [0.1, 0.5, 3] times = np.linspace(0, 20, 200) values = model.simulate(parameters, times) # Plot the results plt.figure() plt.xlabel('Time') plt.ylabel('Value') plt.plot(times, values) plt.legend(['Voltage', 'Recovery']) plt.show() # - # With these parameters, the model creates wide AP waveforms that are more reminiscent of muscle cells than neurons. # We now set up a simple optimisation problem with the model. # + # First add some noise sigma = 0.5 noisy = values + np.random.normal(0, sigma, values.shape) # Plot the results plt.figure() plt.xlabel('Time') plt.ylabel('Noisy values') plt.plot(times, noisy) plt.show() # - # Next, we set up a problem. Because this model has multiple outputs (2), we use a [MultiOutputProblem](http://pints.readthedocs.io/en/latest/core_classes_and_methods.html#multi-output-problem). problem = pints.MultiOutputProblem(model, times, noisy) score = pints.SumOfSquaresError(problem) # Finally, we choose a wide set of boundaries and run! # + # Select boundaries boundaries = pints.RectangularBoundaries([0., 0., 0.], [10., 10., 10.]) # Select a starting point x0 = [1, 1, 1] # Perform an optimization found_parameters, found_value = pints.optimise(score, x0, boundaries=boundaries) print('Score at true solution:') print(score(parameters)) print('Found solution: True parameters:' ) for k, x in enumerate(found_parameters): print(pints.strfloat(x) + ' ' + pints.strfloat(parameters[k])) # - # Plot the results plt.figure() plt.xlabel('Time') plt.ylabel('Values') plt.plot(times, noisy, '-', alpha=0.25, label='noisy signal') plt.plot(times, values, alpha=0.4, lw=5, label='original signal') plt.plot(times, problem.evaluate(found_parameters), 'k--', label='recovered signal') plt.legend() plt.show() # This shows the parameters are not retrieved entirely correctly, but the traces still strongly overlap.
examples/toy-model-fitzhugh-nagumo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from urllib.request import urlopen as uReq from bs4 import BeautifulSoup my_url = 'http://stockpage.10jqka.com.cn/000566/' uClient = uReq(my_url) html_doc =uClient.read() uClient.close()
Python/WebScraping_Stock/.ipynb_checkpoints/WebScraping_China_Stock_BS4-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import torch import time from datetime import datetime import os import matplotlib.pyplot as plt import torch.nn.functional as F import seaborn as sns from scipy.stats import norm # # CIFAR-10 constituent samples' extraction # This notebook shows how to construct a dataset that has only CIFAR samples. This can be used for other tasks or for assessment of models trained on the imagenet constituents, to understand how well these models deal with distribution shift. # # #### ENSURE THAT CINIC-10 IS DOWNLOADED AND STORED IN ../data/cinic-10 # + br_file = '../cifar10-vs-ti/benrecht_cifar10.csv' cifar_file = '../cifar10-vs-ti/cifar10_test.csv' cinic_file = '../cifar10-vs-ti/cinic10_test.csv' ti_file = '../cifar10-vs-ti/tinyimages.csv' cifar_train_file = '../cifar10-vs-ti/cifar10_train.csv' cinic_train_file = '../cifar10-vs-ti/cinic10_train.csv' br_df = pd.read_csv(br_file) cifar_df = pd.read_csv(cifar_file) cinic_df = pd.read_csv(cinic_file) cifar_train_df = pd.read_csv(cifar_train_file) cinic_train_df = pd.read_csv(cinic_train_file) ti_df = pd.read_csv(ti_file) # - print(br_df.shape) print(cifar_df.shape) print(cinic_df.shape) print(cifar_train_df.shape) print(cinic_train_df.shape) print(ti_df.shape) ti_df[0:5].iloc[:,0:15] # + br_det_scores = br_df.iloc[:,2:13] cifar_det_scores = cifar_df.iloc[:,2:13] cinic_det_scores = cinic_df.iloc[:,2:13] ti_det_scores = ti_df.iloc[:,2:13] cifar_train_det_scores = cifar_train_df.iloc[:,2:13] cinic_train_det_scores = cinic_train_df.iloc[:,2:13] br_base_scores = br_df.iloc[:,13:23] cifar_base_scores = cifar_df.iloc[:,13:23] cinic_base_scores = cinic_df.iloc[:,13:23] ti_base_scores = ti_df.iloc[:,13:23] cifar_train_base_scores = cifar_train_df.iloc[:,13:23] cinic_train_base_scores = cinic_train_df.iloc[:,13:23] br_targets = br_df.iloc[:,1] cifar_targets = cifar_df.iloc[:,1] cinic_targets = cinic_df.iloc[:,1] ti_targets = ti_df.iloc[:,1] cifar_train_targets = cifar_train_df.iloc[:,1] cinic_train_targets = cinic_train_df.iloc[:,1] br_preds = br_df.iloc[:,23] cifar_preds = cifar_df.iloc[:,23] cinic_preds = cinic_df.iloc[:,23] ti_preds = ti_df.iloc[:,23] cifar_train_preds = cifar_train_df.iloc[:,23] cinic_train_preds = cinic_train_df.iloc[:,23] br_base_preds = br_df.iloc[:,24] cifar_base_preds = cifar_df.iloc[:,24] cinic_base_preds = cinic_df.iloc[:,24] ti_base_preds = ti_df.iloc[:,24] cifar_train_base_preds = cifar_train_df.iloc[:,24] cinic_train_base_preds = cinic_train_df.iloc[:,24] # - # br_det_scores[0:5] print(ti_det_scores[0:5]) print(ti_base_scores[0:5]) print(ti_targets[0:5]) print(ti_preds[0:5]) print(ti_base_preds[0:5]) # + # (pd.to_numeric(cifar_det_scores.iloc[:,0:10].idxmax(axis=1)) - 2 == cifar_targets).sum() # (cifar_preds == cifar_targets).sum() ((pd.to_numeric(cifar_det_scores.iloc[:,0:10].idxmax(axis=1)) - 2) == cifar_targets).sum() # pd.to_numeric(cifar_det_scores[cifar_preds == 10].iloc[:,0:10].idxmax(axis=1)) - 2 # ((pd.to_numeric(cifar_det_scores[cifar_preds == 10].iloc[:,0:10].idxmax(axis=1)) - 2) == cifar_targets[cifar_preds == 10]).sum() # ((pd.to_numeric(cifar_det_scores[cifar_preds != 10].iloc[:,0:10].idxmax(axis=1)) - 2) == cifar_targets[cifar_preds != 10]).sum() # (cifar_preds == 10).sum() # (cifar_base_preds[cifar_preds != 10] == cifar_targets[cifar_preds != 10]).sum() # len(cifar_targets) # - def verify(df, df_det_scores, df_base_scores, df_targets, df_preds, df_base_preds): cifar10_correct = ((pd.to_numeric(df_det_scores.iloc[:,0:10].idxmax(axis=1)) - 2) == df_targets).sum() total = len(df_targets) predti_pseudo_corr = ((pd.to_numeric(df_det_scores[df_preds == 10].iloc[:,0:10].idxmax(axis=1)) - 2) == df_targets[df_preds == 10]).sum() predcifar_corr = ((pd.to_numeric(df_det_scores[df_preds != 10].iloc[:,0:10].idxmax(axis=1)) - 2) == df_targets[df_preds != 10]).sum() predti_count = (df_preds == 10).sum() predcifar_count = (df_preds != 10).sum() base_cifar_correct = (df_base_preds == df_targets).sum() base_predcifar_correct = (df_base_preds[df_preds != 10] == df_targets[df_preds != 10]).sum() base_predti_correct = (df_base_preds[df_preds == 10] == df_targets[df_preds == 10]).sum() print("Total %d cifar10 correct %d" %(total, cifar10_correct)) print("Predicted c10 count %d predicted c10 correct %d, predicted ti count %d pred ti pseudo correct %d" \ %(predcifar_count, predcifar_corr, predti_count, predti_pseudo_corr)) print("base cifar10 correct %d, base pred c10 corect %d, base predicted ti correct %d" \ %(base_cifar_correct, base_predcifar_correct, base_predti_correct)) print('------------------------------------------------------------------------') # + verify(cifar_df, cifar_det_scores, cifar_base_scores, cifar_targets, cifar_preds, cifar_base_preds) verify(br_df, br_det_scores, br_base_scores, br_targets, br_preds, br_base_preds) verify(cinic_df, cinic_det_scores, cinic_base_scores, cinic_targets, cinic_preds, cinic_base_preds) verify(ti_df, ti_det_scores, ti_base_scores, ti_targets, ti_preds, ti_base_preds) verify(cifar_train_df, cifar_train_det_scores, cifar_train_base_scores, cifar_train_targets, cifar_train_preds, cifar_train_base_preds) verify(cinic_train_df, cinic_train_det_scores, cinic_train_base_scores, cinic_train_targets, cinic_train_preds, cinic_train_base_preds) # - df, df_det_scores, df_base_scores, df_targets, df_preds, df_base_preds = (cifar_df, cifar_det_scores, cifar_base_scores, cifar_targets, cifar_preds, cifar_base_preds) br_det_scores[br_preds==10].iloc[:,-1] def plot_df_hist(df): mu, std = norm.fit(df) plt.figure() plt.hist(df, bins = 100, histtype = 'bar') xmin, xmax = plt.xlim() x = np.linspace(xmin, xmax, 100) p = norm.pdf(x, mu, std) plt.plot(x, p, 'k', linewidth=2) title = "Softmax confidence Fit results: mu = %.2f, std = %.2f, count = %d" % (mu, std, len(df)) plt.title(title) plt.xlabel("Max confidence of softmax outputs") plt.ylabel('Count') # plt.title('Histogram of cross entropy loss for unlabeled data') plt.show() # + def plot_det_11thclass_conf_on_hard_part_hist(df_det_scores, df_preds): assert len(df_det_scores) == len(df_preds), 'length of dfs unequal' plot_df_hist(br_det_scores[br_preds==10].iloc[:,-1]) def plot_det_11thclass_conf_on_hard_part_hist(df_det_scores, df_preds): assert len(df_det_scores) == len(df_preds), 'length of dfs unequal' plot_df_hist(df_det_scores, df_preds) def plot_base_target_conf_on_hard_part_hist(df_det_scores, df_preds): assert len(df_det_scores) == len(df_preds), 'length of dfs unequal' plot_df_hist(df_det_scores, df_preds) # - plot_hard_part_hist() plot_df_hist(br_det_scores[br_preds==10].iloc[:,-1]) plot_df_hist(ti_det_scores[ti_preds==10].iloc[:,-1]) plot_df_hist(cifar_det_scores[cifar_preds==10].iloc[:,-1]) plot_df_hist(cifar_train_det_scores[cifar_train_preds==10].iloc[:,-1])
dataset_utils/.ipynb_checkpoints/confidence_analysis-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ___ # # <a href='http://www.pieriandata.com'><img src='../Pierian_Data_Logo.png'/></a> # ___ # <center><em>Copyright by Pierian Data Inc.</em></center> # <center><em>For more information, visit us at <a href='http://www.pieriandata.com'>www.pieriandata.com</a></em></center> # # Categorical Plots - Statistical Estimation within Categories # # Often we have **categorical** data, meaning the data is in distinct groupings, such as Countries or Companies. There is no country value "between" USA and France and there is no company value "between" Google and Apple, unlike continuous data where we know values can exist between data points, such as age or price. # # To begin with categorical plots, we'll focus on statistical estimation within categories. Basically this means we will visually report back some statistic (such as mean or count) in a plot. We already know how to get this data with pandas, but often its easier to understand the data if we plot this. # ## Imports import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # ## The Data df = pd.read_csv("dm_office_sales.csv") df.head() # ## Countplot() # # A simple plot, it merely shows the total count of rows per category. plt.figure(figsize=(10,4),dpi=200) sns.countplot(x='division',data=df) plt.figure(figsize=(10,4),dpi=200) sns.countplot(x='level of education',data=df) # ### Breakdown within another category with 'hue' plt.figure(figsize=(10,4),dpi=200) sns.countplot(x='level of education',data=df,hue='training level') # **NOTE: You can always edit the palette to your liking to any matplotlib [colormap](https://matplotlib.org/3.1.1/gallery/color/colormap_reference.html)** plt.figure(figsize=(10,4),dpi=200) sns.countplot(x='level of education',data=df,hue='training level',palette='Set1') plt.figure(figsize=(10,4),dpi=200) # Paired would be a good choice if there was a distinct jump from 0 and 1 to 2 and 3 sns.countplot(x='level of education',data=df,hue='training level',palette='Paired') # # barplot() # # So far we've seen the y axis default to a count (similar to a .groupby(x_axis).count() call in pandas). We can expand our visualizations by specifying a specific continuous feature for the y-axis. Keep in mind, you should be careful with these plots, as they may imply a relationship continuity along the y axis where there is none. plt.figure(figsize=(10,6),dpi=200) # By default barplot() will show the mean # Information on the black bar: https://stackoverflow.com/questions/58362473/what-does-black-lines-on-a-seaborn-barplot-mean sns.barplot(x='level of education',y='salary',data=df,estimator=np.mean,ci='sd') plt.figure(figsize=(12,6)) sns.barplot(x='level of education',y='salary',data=df,estimator=np.mean,ci='sd',hue='division') # + plt.figure(figsize=(12,6),dpi=100) # https://stackoverflow.com/questions/30490740/move-legend-outside-figure-in-seaborn-tsplot sns.barplot(x='level of education',y='salary',data=df,estimator=np.mean,ci='sd',hue='division') plt.legend(bbox_to_anchor=(1.05, 1)) # - # -------- # --------
Data Science Resources/Jose portila - ML/05-Seaborn/02-Categorical-Plots-Stat-Estimation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="x_nhyH9XPiBo" # # # Linear Regression with Tensorflow # # Reference 1: # https://www.tensorflow.org/tutorials/keras/regression # # Reference 2: # https://stackabuse.com/tensorflow-2-0-solving-classification-and-regression-problems/ # # Reference 3: # https://towardsdatascience.com/what-is-one-hot-encoding-and-how-to-use-pandas-get-dummies-function-922eb9bd4970 # # The goal of this notebook is to build a linear regression model with TensorFlow # # # + colab={"base_uri": "https://localhost:8080/"} id="YtwEqT5YPbTA" outputId="8a303a11-77b0-42d2-c399-bc152efc4e91" # import tensorflow import tensorflow as tf from tensorflow import keras from tensorflow.keras import Sequential from tensorflow.keras.layers import Dense print(tf.__version__) # + id="S0pZNcv0P2Cf" #import additional libraries import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.model_selection import train_test_split from sklearn.preprocessing import MinMaxScaler # + [markdown] id="oJWHD3lCR9Ou" # ## Reading in data # 1. Uploading a csv file # 2. From github # 3. Using tensorflow datasets # 4. External resource # + colab={"base_uri": "https://localhost:8080/", "height": 206} id="AHaR1hH0mFBL" outputId="ef4bd4da-66b6-4b04-cac4-6925ef0c182f" #Loading data from a csv file #Loaing from a github account df=pd.read_csv('https://raw.githubusercontent.com/oluwole-packt/datasets/main/Fuel_Efficiency.csv', index_col=0) df.head() # + colab={"base_uri": "https://localhost:8080/", "height": 206} id="4pyxicE3QCNP" outputId="87071baf-e5ba-4f9a-f7c7-6a37b343a5b8" #Loading data from a Local csv file df=pd.read_csv('/content/Fuel_Efficiency.csv', index_col=0) df.head() # + colab={"base_uri": "https://localhost:8080/"} id="m1QbG9tXSi1A" outputId="5343fbed-b145-47b7-bd2e-1fa6b82c2b36" #checking the columns df.columns # + [markdown] id="3S-gUSHiTCUP" # ##Data preprocessing # + colab={"base_uri": "https://localhost:8080/", "height": 206} id="Xuo4wbf_SgG2" outputId="f7ed6959-3d5a-4161-82c4-6c68b58e7923" #rearranging the rows df= df[['Cylinders', 'Displacement', 'Horsepower', 'Weight', 'Acceleration', 'Model Year', 'Origin', 'MPG']] #viewing the first five rows in the dataframe df.head() # + colab={"base_uri": "https://localhost:8080/"} id="GDAlDDbvS8bw" outputId="9fa6d276-c8f9-4f0e-c971-1b0650d44206" df.info() # + [markdown] id="9K6_cK5nTJkX" # We can see the total entries are 398 rows, however the horsepower row has a value of 392. Let us check this column to find out what is going on there. # + colab={"base_uri": "https://localhost:8080/"} id="lQJ_SabAS8ZJ" outputId="ed016207-7741-4802-e12d-426e1ea6bb1e" #Lets check if we have some missing values df['Horsepower'].isnull().sum() # + colab={"base_uri": "https://localhost:8080/"} id="CyMPG1W5S8WX" outputId="6f924282-0543-41c0-a678-0f19f846691e" #Lets check the entire dataset for missing values df.isnull().sum() # + [markdown] id="zwEXrdG2Txbn" # The number of missing values are quite small, in this scenario we will drop them. There are other methods of handling missing values. Although the exam will focus more on testing your modelling skills, data preprocessing will come in handy so I will refer you to this excellent book on data preprocessing by packt # # Hands-On Data Preprocessing in Python: Learn how to effectively prepare data for successful data analytics by <NAME>. # + id="Z7zowmG2S8UA" #drop the missing values df=df.dropna() # + colab={"base_uri": "https://localhost:8080/"} id="LXPkkl8VUXw3" outputId="6354e0e8-9519-42cd-d55a-54bf3d3a7755" #Lets check the entire dataset for missing values df.isnull().sum() # + colab={"base_uri": "https://localhost:8080/"} id="eV3XoRufUe1o" outputId="bcee877c-4956-45be-e27a-f982e6b38b28" #Checking the data types of each attribute df.dtypes # + colab={"base_uri": "https://localhost:8080/", "height": 206} id="FZ00VlExUa1G" outputId="5bae037d-87f8-46f9-bfb0-ce0a5a131ee9" #Converting categorical variables to numeric df = pd.get_dummies(df, drop_first=True, prefix='Origin') df.head() # + colab={"base_uri": "https://localhost:8080/", "height": 331} id="LVSMYJocos21" outputId="4a726e48-ef23-4375-bfbf-e283289e2fd7" df.corr() # + colab={"base_uri": "https://localhost:8080/", "height": 920} id="eRipJj1lSfZ4" outputId="b0646dd4-a722-425c-84bb-a97b0a830f69" #Lets us visualize the relationship between these strongly correlated features and the Miles per gallon sns.pairplot(df[['Cylinders', 'Displacement', 'Horsepower', 'Weight', 'MPG']]) # + [markdown] id="g9rxRzh1pRpR" # # Model Building # + id="iOtUAEfAQCLX" # Create X & y values X = df.drop("MPG", axis=1) y = df["MPG"] # + id="K-tKSXW7QCIv" # Create training and test sets #We set the random state to ensure reproduciblility X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=10) # + colab={"base_uri": "https://localhost:8080/"} id="L_O0jha3QCGQ" outputId="4c76342c-4022-40bb-c967-c7785e2e64d1" #Set random set for reproducilbility tf.random.set_seed(10) #create a model using the Keras API model =Sequential([Dense(units=1,input_shape=[len(X_train.columns)])]) #compile the model model.compile(loss=tf.keras.losses.mae, optimizer=tf.keras.optimizers.SGD(), metrics = ['mae']) #Fit the model model.fit(X_train,y_train, epochs =50) #number of times the model will go through training examples # + colab={"base_uri": "https://localhost:8080/"} id="dgnxacBnQCD4" outputId="4bf3b796-49b3-4b93-db71-7624719644a4" #Set the random seed tf.random.set_seed(10) #create a model model =Sequential([Dense(units=1,input_shape=[len(X_train.columns)])]) #compile the model model.compile(loss="mae", optimizer="SGD", metrics ="mae") #fit the model history= model.fit(X_train,y_train, epochs =500) #number of times the model will go through training examples # + colab={"base_uri": "https://localhost:8080/", "height": 312} id="uU-kiX5PpukH" outputId="09ce98d1-27b2-4453-ac94-075aec4bd704" print(history.history.keys()) # Lets plot the loss plt.plot(history.history['loss']) plt.title('Model loss') plt.ylabel('Loss') plt.xlabel('Number of epochs') plt.legend(['loss plot'], loc='upper right') plt.show() # + [markdown] id="uT_lWOoCqzjO" # The model is clearly underfitting, hence we have to figure out how to improve our model to ensure it can make accruate predictions. # + colab={"base_uri": "https://localhost:8080/"} id="xOmtId_EQCBQ" outputId="8fbf9859-b527-46f4-8225-63ad79739df8" #Set random set tf.random.set_seed(25) #create a model model2 =Sequential([ Dense(units=64, activation='relu', input_shape=[len(X_train.columns)]), Dense(units=1) ]) #compile the model model2.compile(loss="mae", optimizer="SGD", metrics ="mae") #fit the model history2 =model2.fit( X_train, y_train, epochs=500) # + colab={"base_uri": "https://localhost:8080/", "height": 312} id="h8gEfLdArM4w" outputId="6ae32c04-91fa-4fb9-bcd9-3f00f22b62df" print(history2.history.keys()) # Lets plot the loss plt.plot(history2.history['loss']) plt.title('Model loss') plt.ylabel('Loss') plt.xlabel('Number of epochs') plt.legend(['loss plot'], loc='upper right') plt.ylim([0,40]) plt.show() # + colab={"base_uri": "https://localhost:8080/"} id="Q7hGAe-aQB_B" outputId="ee91cb30-713e-44e8-b4ce-2a7bfcdb7cd5" #Set random set tf.random.set_seed(10) #create a model using the sequential API model3 =Sequential([ Dense(units=64, activation='relu', input_shape=[len(X_train.columns)]), Dense(units=1) ]) #compile the model model3.compile(loss="mae", optimizer="Adam", metrics = ['mae']) #fit our model history3 =model3.fit( X_train, y_train, epochs=500) # + colab={"base_uri": "https://localhost:8080/", "height": 312} id="ZFQc-cBnsuPN" outputId="b8ad3e43-ef0d-499f-a296-65ed71366908" print(history3.history.keys()) # Lets plot the loss plt.plot(history3.history['loss']) plt.title('Model loss') plt.ylabel('Loss') plt.xlabel('Number of epochs') plt.legend(['loss plot'], loc='upper right') plt.ylim([0,15]) plt.show() # + colab={"base_uri": "https://localhost:8080/"} id="4qoGHMT4QB8g" outputId="f5782618-421d-49c9-ad23-f8dfdaf9f3e6" #Set random set tf.random.set_seed(25) #create a model using the sequential API model4 =Sequential([ Dense(units=64, activation='relu', input_shape=[len(X_train.columns)]), Dense(units=64, activation='relu'), Dense(units=1) ]) #compile the model model4.compile(loss="mae", optimizer=tf.keras.optimizers.SGD(), metrics = "mae") #fit our model history4 =model4.fit( X_train, y_train, epochs=500) # + colab={"base_uri": "https://localhost:8080/", "height": 530} id="njADjT9Is558" outputId="5531de2b-f4ba-4e19-c27a-7246fab739b4" print(history4.history.keys()) # Lets plot the loss plt.figure(figsize=[10,8]) plt.plot(history4.history['loss']) plt.title('Model loss') plt.ylabel('Loss') plt.xlabel('Number of epochs') plt.legend(['loss plot'], loc='upper right') plt.ylim([0,60]) plt.show() # + colab={"base_uri": "https://localhost:8080/"} id="7-pR2EDGX3gx" outputId="ec5edf17-c649-49b0-ee9a-b2762793576a" #Set random set tf.random.set_seed(25) #create a model using the sequential API model3 =Sequential([ Dense(units=50, activation='relu', input_shape=[len(X_train.columns)]), Dense(units=1) ]) #compile the model model3.compile(loss="mae", optimizer="Adam", metrics = 'mae') #fit our model history3 =model3.fit( X_train, y_train, epochs=500) # + colab={"base_uri": "https://localhost:8080/"} id="enCIb3PBt8h_" outputId="aa378a7b-640b-4573-a797-76ee404c106d" #Evaluate the model on the best model.evaluate(X_test, y_test) # + colab={"base_uri": "https://localhost:8080/"} id="I9OK2VhWuDNd" outputId="bd0c3e08-5946-42b6-eccf-512b4ac31d13" #Evaluate the model on the best model2.evaluate(X_test, y_test) # + colab={"base_uri": "https://localhost:8080/"} id="NEwLEstCuFgM" outputId="ccee964d-6d7a-49e9-fbe3-c114d4267222" #Evaluate the model on the best model3.evaluate(X_test, y_test) # + colab={"base_uri": "https://localhost:8080/"} id="R8e4AeCluHg0" outputId="f9e7837d-80cf-4e4e-ac8f-003d04b9df7b" #Evaluate the model on the best model4.evaluate(X_test, y_test) # + [markdown] id="SY8ZWQcLxKsM" # #Improving our model's performance # + [markdown] id="OMH_U3HPXIgX" # ### Normalization # + colab={"base_uri": "https://localhost:8080/", "height": 206} id="xmr3V4StXP3h" outputId="0db5a2a3-7e39-4863-b4b0-b90bd28a4e6a" X.head() # + colab={"base_uri": "https://localhost:8080/", "height": 300} id="9vOglJUsXTwG" outputId="e73b7b69-d241-46d7-e188-d8e8e28e69c5" X.describe() # + [markdown] id="1iThcZA5Xh0H" # We can see the attributes are of a different scale. Hence we want to bring the attribute to a uniform scale, hence we apply normalization. # + colab={"base_uri": "https://localhost:8080/", "height": 206} id="hejogviQQ_nQ" outputId="44a1e12c-6ac1-46e1-a192-51f0291718dc" # create a scaler object scaler = MinMaxScaler() # fit and transform the data X_norm = pd.DataFrame(scaler.fit_transform(X), columns=X.columns) X_norm.head() # + colab={"base_uri": "https://localhost:8080/", "height": 300} id="v4p5LbtvuUi0" outputId="8521680b-2c5c-4a65-dcc7-2fa9b3ffb23e" X_norm.describe() # + id="rjgnrMgPXQoJ" # Create training and test sets X_train, X_test, y_train, y_test = train_test_split(X_norm, y, test_size=0.2, random_state=42) # set random state for reproducible splits # + colab={"base_uri": "https://localhost:8080/"} id="V3KJsnWFXyRB" outputId="0a122737-7e98-4c25-8aaf-daf31ae86d9f" #Set random set tf.random.set_seed(10) #create a model using the sequential API model5 =Sequential([ Dense(units=64, activation='relu', input_shape=[len(X_train.columns)]), Dense(units=64, activation ="relu"), Dense(units=1) ]) #compile the model model5.compile(loss="mae", optimizer=tf.keras.optimizers.RMSprop(0.001), metrics = ['mae']) #fit our model early_stop=keras.callbacks.EarlyStopping(monitor='loss', patience=10) history5 =model5.fit( X_train, y_train, epochs=500, callbacks=[early_stop]) # + colab={"base_uri": "https://localhost:8080/"} id="rYoK0PAYXyOX" outputId="c55b292b-2c1a-44f9-887a-fb8e9b9f7379" #Set random set tf.random.set_seed(25) #create a model using the sequential API model6 =Sequential([ Dense(units=64, activation='relu', input_shape=[len(X_train.columns)]), Dense(units=64, activation ="relu"), Dense(units=1) ]) #compile the model model6.compile(loss="mae", optimizer=tf.keras.optimizers.Adam(), metrics = ['mae']) #fit our model early_stop=keras.callbacks.EarlyStopping(monitor='loss', patience=10) history6 =model6.fit( X_train, y_train, epochs=500, callbacks=[early_stop]) # + colab={"base_uri": "https://localhost:8080/"} id="lvxgjLB1vHm_" outputId="27f4f9f4-f18e-47da-cf54-16e6055491b1" #Set random set tf.random.set_seed(25) #create a model model7 =Sequential([ Dense(units=64, activation='relu', input_shape=[len(X_train.columns)]), Dense(units=1) ]) #compile the model model7.compile(loss="mae", optimizer=tf.keras.optimizers.RMSprop(0.001), metrics ="mae") #fit the model early_stop=keras.callbacks.EarlyStopping(monitor='loss', patience=10) history7 =model7.fit( X_train, y_train, epochs=500, callbacks=[early_stop]) # + colab={"base_uri": "https://localhost:8080/"} id="SNnXv-4_vcs1" outputId="b659385d-639b-44b0-dafa-c18e506da315" #Evaluate the model on the best model5.evaluate(X_test, y_test) # + colab={"base_uri": "https://localhost:8080/"} id="lKiCbLYQviuH" outputId="83767845-38cf-47b8-95ee-91470b4aeb33" #Evaluate the model on the best model6.evaluate(X_test, y_test) # + colab={"base_uri": "https://localhost:8080/"} id="6T12nadAvjMU" outputId="8f03f235-04da-4ba7-c96c-bf509409a643" #Evaluate the model on the best model7.evaluate(X_test, y_test) # + colab={"base_uri": "https://localhost:8080/"} id="OftgMLynXyL-" outputId="0f55da2e-2f93-4520-84b4-9118c81ccdd7" loss, mae=model5.evaluate(X_test, y_test, verbose=0) print("mae: {:5.2f} MPG".format(mae)) # + colab={"base_uri": "https://localhost:8080/"} id="d2wDpD7mZbNG" outputId="955dd679-e7b4-49fa-c692-2efe3bf3c2b0" y_preds=model5.predict(X_test).flatten() y_preds # + id="OowPb2czXyJe" df_predictions = pd.DataFrame({'Ground_Truth': y_test, 'Model_prediction': y_preds}, columns=[ 'Ground_Truth', 'Model_prediction']) # + colab={"base_uri": "https://localhost:8080/", "height": 363} id="9uI_TIpaaY1G" outputId="5d412b43-5091-4d08-91ae-1196510118d5" df_predictions.head(10) # + [markdown] id="9Lx8JpaOw3AX" # Now we have successfully ran a good number of experiments, trying out various hyperparameters in improving our model.
Chapter 3/Regression_Task.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- Simple Imputer import numpy as np import pandas as pd data = [ [1.0, 2.0, np.nan, 2.0, 5.0, "Cat1"], [np.nan, 8.0, 4.0, 5.0, 1.0, "Cat2"], [3.0, 4.0, 7.0, np.nan, 2.0, np.nan], [4.0, np.nan, 1.0, 6.0, 3.0, "Cat1"], [5.0, 1.0, 2, 3.0, np.nan, "Cat2"], [9.0, 4.0, np.nan, 3.0, np.nan, "Cat2"]] data = pd.DataFrame(data, columns = [0, 1, 2, 3 ,4 ,5]) data.head() from sklearn.impute import SimpleImputer num_imputer = SimpleImputer(missing_values = np.nan, strategy = 'mean') cat_imputer = SimpleImputer(missing_values = np.nan, strategy = 'most_frequent') X = data[[0, 1, 2, 3, 4]] data[[0, 1, 2, 3, 4]] = num_imputer.fit_transform(X) data.head() Y = data[[5]] data[[5]] = cat_imputer.fit_transform(Y) data.head() # Thanks
1_Useful_Methods/Simple_Imputer.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: authorid # language: python # name: authorid # --- # # Spooky Author Identification: fastText Models # ## Implementation # ### Import packages # %matplotlib inline # %load_ext autoreload # %autoreload 2 # + import os os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID' os.environ['CUDA_VISIBLE_DEVICES'] = '' # '' to run on CPU, '0' to run on the first GPU # - # If you don't already have these packages, run this cell import nltk nltk.download('punkt') nltk.download('stopwords') nltk.download('wordnet') nltk.download('averaged_perceptron_tagger') from packages import * # + MODEL_NAME = 'test_fasttext' INPUT_DIR = '../input/' TRAIN_FILE_PATH = f'{INPUT_DIR}train.csv' TEST_FILE_PATH = f'{INPUT_DIR}test.csv' SAMPLE_SUBMISSION_FILE_PATH = f'{INPUT_DIR}sample_submission.csv' EMBEDDINGS_DIR = f'{INPUT_DIR}embeddings/' EMBEDDINGS_FILE_PATH = f'{EMBEDDINGS_DIR}crawl-300d-2M.vec' OUTPUT_DIR = '../output/' OUTPUT_LOGS_DIR = f'{OUTPUT_DIR}{MODEL_NAME}/logs/' OUTPUT_MODELS_DIR = f'{OUTPUT_DIR}{MODEL_NAME}/models/' OUTPUT_SCORES_DIR = f'{OUTPUT_DIR}{MODEL_NAME}/scores/' OUTPUT_SUBMISSIONS_DIR = f'{OUTPUT_DIR}{MODEL_NAME}/submissions/' OUTPUT_SUMMARIES_DIR = f'{OUTPUT_DIR}{MODEL_NAME}/summaries/' # Create the output directories if they do not exist (the `_` is necessary # in order to create intermediate directories and is itself not created) os.makedirs(os.path.dirname(f'{OUTPUT_LOGS_DIR}_'), exist_ok=True) os.makedirs(os.path.dirname(f'{OUTPUT_MODELS_DIR}_'), exist_ok=True) os.makedirs(os.path.dirname(f'{OUTPUT_SCORES_DIR}_'), exist_ok=True) os.makedirs(os.path.dirname(f'{OUTPUT_SUBMISSIONS_DIR}_'), exist_ok=True) os.makedirs(os.path.dirname(f'{OUTPUT_SUMMARIES_DIR}_'), exist_ok=True) EMBEDDING_DIM = 300 MAX_FEATURES = None # The top most common words if an integer; otherwise, all words are used MAX_SEQUENCE_LENGTH = 128 N_SPLITS = 10 # Fix a random seed for reproducibility SEED = 42 np.random.seed(SEED) # - train, test, submission = load_data(TRAIN_FILE_PATH, TEST_FILE_PATH, SAMPLE_SUBMISSION_FILE_PATH) # + # Apply text preprocessing on each sentence X_train_sequences = list(train['text'].apply( lambda x: process_text(x, lower=False, remove_punc=False, normalize_spelling=False, stem=False, lemmatize=False, remove_stopwords=False)).values) X_test_sequences = list(test['text'].apply( lambda x: process_text(x, lower=False, remove_punc=False, normalize_spelling=False, stem=False, lemmatize=False, remove_stopwords=False)).values) # Tokenize and pad the sentences X_train_tokenized, X_test_tokenized, word_index = compute_word_index(X_train_sequences, X_test_sequences, MAX_FEATURES, MAX_SEQUENCE_LENGTH) # - embeddings_index = load_embeddings(EMBEDDINGS_FILE_PATH) embedding_matrix, vocab_size, num_unknown = construct_embedding_matrix(word_index, embeddings_index, EMBEDDING_DIM, MAX_FEATURES) # Here we subtract 1 from the vocab size because 1 has been added to the # actual number of tokens to account for masking in the embedding matrix unknown_word_percentage = (num_unknown / (vocab_size - 1)) * 100 unknown_word_lines = ('Number of vocabulary words not found in the pre-trained embeddings: ' f'{num_unknown} of {vocab_size - 1} ' f'({unknown_word_percentage:.2f}%)') print(unknown_word_lines) preprocessing_file_path = f'{OUTPUT_LOGS_DIR}preprocessing.log.txt' save_line_to_file(unknown_word_lines, preprocessing_file_path, 'w') # The target classes need to be converted to integers so that # EAP --> 0 # HPL --> 1 # MWS --> 2 y_train_integers = integer_encode_classes(train['author'].values) # The target classes need to be one-hot encoded so that # EAP --> 0 --> [1, 0, 0] # HPL --> 1 --> [0, 1, 0] # MWS --> 2 --> [0, 0, 1] y_train_encoded = one_hot_encode_classes(y_train_integers) # Import model-dependent files from models import build_embedding_layer, build_model_callbacks, save_model_summary from models import get_random_cnn_params as get_random_model_params from models import build_cnn_model as build_model # Set this to the best number of epochs based on the evaluation phase final_num_epochs = 3 # Select the best model params based on the evaluation phase # CNN final_model_params = { 'batch_size': 64, 'filters': 250, 'kernel_size': 3, 'dropout_rate': 0.2, 'optimizer': 'adam', 'use_special_arch': True, 'normal_arch_params': {} } # RNN # final_model_params = { # 'batch_size': 64, # 'use_gru_layer': True, # 'use_global_max_pooling_layer': True, # 'units': 128, # 'spatial_dropout_rate': 0.2, # 'optimizer': 'adam', # 'num_rnn_stacks': 1, # } final_batch_size = final_model_params['batch_size'] # Build the embedding layer embedding_layer = build_embedding_layer(embedding_matrix, vocab_size, EMBEDDING_DIM, MAX_SEQUENCE_LENGTH) # Build the model with the best model params model = build_model(embedding_layer, MAX_SEQUENCE_LENGTH, final_model_params) # Save the model architecture, weights, and optimizer state to file model.save(f'{OUTPUT_MODELS_DIR}final.model.hdf5') # Save the model summary to file save_model_summary(model, f'{OUTPUT_SUMMARIES_DIR}final.model_summary.txt') # Train the model history = model.fit(X_train_tokenized, y_train_encoded, batch_size=final_batch_size, epochs=final_num_epochs, verbose=1, shuffle=True) predictions = model.predict(X_test_tokenized, batch_size=final_batch_size, verbose=1) # Check the final submission values predictions[0:3] submission[['EAP', 'HPL', 'MWS']] = predictions submission_num = 27 submission_description = 'fasttext_best_manual_cnn_after_tp_e_2' submission_filename = f'{submission_num:03d}_{submission_description}.csv' submission_file_path = f'{OUTPUT_SUBMISSIONS_DIR}{submission_filename}' submission.to_csv(submission_file_path, index=False) calculate_mean_logloss(0.48172, 0.51083)
code/test_fasttext_models.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # This notebook was prepared by [<NAME>](https://github.com/donnemartin). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges). # # Challenge Notebook # ## Problem: Implement a priority queue backed by an array. # # * [Constraints](#Constraints) # * [Test Cases](#Test-Cases) # * [Algorithm](#Algorithm) # * [Code](#Code) # * [Unit Test](#Unit-Test) # * [Solution Notebook](#Solution-Notebook) # ## Constraints # # * Do we expect the methods to be insert, extract_min, and decrease_key? # * Yes # * Can we assume there aren't any duplicate keys? # * Yes # * Do we need to validate inputs? # * No # * Can we assume this fits memory? # * Yes # ## Test Cases # # ### insert # # * `insert` general case -> inserted node # # ### extract_min # # * `extract_min` from an empty list -> None # * `extract_min` general case -> min node # # ### decrease_key # # * `decrease_key` an invalid key -> None # * `decrease_key` general case -> updated node # ## Algorithm # # Refer to the [Solution Notebook](priority_queue_solution.ipynb). If you are stuck and need a hint, the solution notebook's algorithm discussion might be a good place to start. # ## Code # + class PriorityQueueNode(object): def __init__(self, obj, key): self.obj = obj self.key = key def __repr__(self): return str(self.obj) + ': ' + str(self.key) class PriorityQueue(object): def __init__(self): self.array = [] def __len__(self): return len(self.array) def insert(self, node): # TODO: Implement me pass def extract_min(self): # TODO: Implement me pass def decrease_key(self, obj, new_key): # TODO: Implement me pass # - # ## Unit Test # **The following unit test is expected to fail until you solve the challenge.** # + # # %load test_priority_queue.py from nose.tools import assert_equal class TestPriorityQueue(object): def test_priority_queue(self): priority_queue = PriorityQueue() assert_equal(priority_queue.extract_min(), None) priority_queue.insert(PriorityQueueNode('a', 20)) priority_queue.insert(PriorityQueueNode('b', 5)) priority_queue.insert(PriorityQueueNode('c', 15)) priority_queue.insert(PriorityQueueNode('d', 22)) priority_queue.insert(PriorityQueueNode('e', 40)) priority_queue.insert(PriorityQueueNode('f', 3)) priority_queue.decrease_key('f', 2) priority_queue.decrease_key('a', 19) mins = [] while priority_queue.array: mins.append(priority_queue.extract_min().key) assert_equal(mins, [2, 5, 15, 19, 22, 40]) print('Success: test_min_heap') def main(): test = TestPriorityQueue() test.test_priority_queue() if __name__ == '__main__': main() # - # ## Solution Notebook # # Review the [Solution Notebook](priority_queue_solution.ipynb) for a discussion on algorithms and code solutions.
arrays_strings/priority_queue/priority_queue_challenge.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: venv-datascience # language: python # name: venv-datascience # --- # # Keras Regression - House Price Prediction Project # # ![house](house.jpg) # ## The Data # # We will be using data from a Kaggle data set: # # https://www.kaggle.com/harlfoxem/housesalesprediction # # #### Feature Columns # # * id - Unique ID for each home sold # * date - Date of the home sale # * price - Price of each home sold # * bedrooms - Number of bedrooms # * bathrooms - Number of bathrooms, where .5 accounts for a room with a toilet but no shower # * sqft_living - Square footage of the apartments interior living space # * sqft_lot - Square footage of the land space # * floors - Number of floors # * waterfront - A dummy variable for whether the apartment was overlooking the waterfront or not # * view - An index from 0 to 4 of how good the view of the property was # * condition - An index from 1 to 5 on the condition of the apartment, # * grade - An index from 1 to 13, where 1-3 falls short of building construction and design, 7 has an average level of construction and design, and 11-13 have a high quality level of construction and design. # * sqft_above - The square footage of the interior housing space that is above ground level # * sqft_basement - The square footage of the interior housing space that is below ground level # * yr_built - The year the house was initially built # * yr_renovated - The year of the house’s last renovation # * zipcode - What zipcode area the house is in # * lat - Lattitude # * long - Longitude # * sqft_living15 - The square footage of interior housing living space for the nearest 15 neighbors # * sqft_lot15 - The square footage of the land lots of the nearest 15 neighbors # # Data import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns df = pd.read_csv('data/kc_house_data.csv') df.head() # # Exploratory Data Analysis df.isnull().sum() df.info() df.describe().T sns.displot(df['price'], bins=30); # We might want to consider removing those extremely expensive houses when we train our model. # # Since it's really not that many houses on the market that are that expensive. # # It may not be useful to actually have our model train on those extreme outliers. sns.countplot(data=df, x='bedrooms'); # We can see that most of the houses have 2-3 bedrooms. However there are some houses with 33 bedrooms. # ## Checking correlation df.corr()['price'].sort_values(ascending=False) # We can see `sqft_living` is highly correlated. So we will continue exploring those highly corrleated features using scatterplot. plt.figure(figsize=(12,5)) sns.scatterplot(data=df, x='sqft_living', y='price'); plt.figure(figsize=(12,8)) sns.boxplot(x='bedrooms', y='price', data=df); # ------ # # Geographical Properties # let's check the distribution of prices per latitude and longtitude. plt.figure(figsize=(12,8)) sns.scatterplot(data=df, x='price', y='long'); # We can see that longtitude `-122.2` around there, seem to be expensive houses. plt.figure(figsize=(12,8)) sns.scatterplot(data=df, x='price', y='lat'); # We can see that latitdue 47.6 and 47.6 around there, seem to be expensive housing area. # # ## let's plot out the king county area heat map plt.figure(figsize=(12,8)) sns.scatterplot(data=df, x='long', y='lat', hue='price'); # We are not getting the color gradient as we would like. That's because of those expensive outliers. # # So let's clean up those data a little bit more. df.sort_values('price', ascending=False).head(20) # We can see most expensive house is 7.7 million dollars and prices get quickly drop off to 3 millions range. # # So we will ignore top 1 percent of our housing dataset (outliers). # # In this case till 216 houses indexes are top 1 percent. # len(df)*0.01 # + # we will grab all dataset start from 216 index (ignore the top one percent) non_top_1_percent = df.sort_values('price', ascending=False).iloc[216:] # - plt.figure(figsize=(12,8)) sns.scatterplot(data=non_top_1_percent, x='long', y='lat', hue='price', edgecolor=None, alpha=0.2, palette='RdYlGn'); # We can see Green color around the water front areas, which makes sense because those types of houses are more expensive. sns.boxplot(data=df, x='waterfront', y='price'); # ------ # # Working with Feature Data (Feature Engineering) # ### As we know `id` column doesn't provide any value for our model prediction, we will drop it. df = df.drop('id', axis=1) # ----- # ### for `date` object, currently it is in object. We want to convert to datetime. # # In this way, we can manipulate date data. df['date'].dtype df['date'] = pd.to_datetime(df['date']) df['date'].dtype df['date'] # #### Extracing date information df['year'] = df['date'].apply(lambda date: date.year) df['month'] = df['date'].apply(lambda date: date.month) df.head(2) plt.figure(figsize=(10,6)) sns.boxplot(data=df, x='month', y='price'); # #### check the mean price per month to see if there is any seasonality df.groupby('month').mean()['price'] df.groupby('month').mean()['price'].plot(); df.groupby('year').mean()['price'].plot(); # we will drop the original date column df = df.drop('date', axis=1) df.head(2) df.columns # ### Zip code df['zipcode'].value_counts() # As zipcode is not necessarily to keep it as integer, we might want to treat it as categorical values. # # So if we look at the unique zip code, there are 70. # # If we convert this to dummy variables, we will get extra 70 categories. For this particular case, it is too much. So we decided to drop this column. # # + Alternatively, we can categorize them into `Expensive Zip Code` and `InExpensive Zip Code` using domain experiences. # + Another way is to group them as `North`, `South`, `East`, `West` etc. df = df.drop('zipcode', axis=1) # ------- # ### Year Renovated df['yr_renovated'].value_counts() df['yr_renovated'].dtype # Most of the data are `0` meaning `No Renovation data`. Other are like `2014` ones are 91. etc. # # So we can categroize them as `Renovated` or `No Renovated` categories, rather than integer number. # # **However, we can also imagine that recently rennovated houses may tend to correlate with higher price than old houses.** # # Therefor in this case, the more recent the year, the higher the price. We can leave the column as it is, as `year value (such as 2014)` are higher than`0 value: No Renovated` and it is aligned with `the more recent the year, the higher the price` . df['yr_renovated'].apply(lambda year: 1 if year > 0 else 0) # ----- # ### Square Feet Basement # # This is the similar situation as `yr_renovated` column. # # `0` means there is no basement. So it makes sense to keep it as continuous. df['sqft_basement'].value_counts() # ----- # ----- # # Scaling and Train Test Split # Tensorflow will complain if we don't pass numeric array. It can't handle Pandas series or dataframe X = df.drop('price', axis=1).values y = df['price'].values from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=101) # ## Scaling from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler() X_train = scaler.fit_transform(X_train) X_test = scaler.transform(X_test) # # Modelling from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense # ### Typically, we try to base the number of neurons in our layer as the size of actual features data. # # We can see there are 19 features in our data. X_train.shape model = Sequential() # + model.add(Dense(19, activation='relu')) model.add(Dense(19, activation='relu')) model.add(Dense(19, activation='relu')) model.add(Dense(19, activation='relu')) model.add(Dense(1)) model.compile(optimizer='adam', loss='mse') # - # # Training the Model # # ### validation_data: # + it is used for validation on each epoch. It will quickly runs the test data and check our loss on the test data. In this way, we can keep a tracking of how well we are performing, not just on the training data, but also on test data. # + This test data won't affect the weights and bias of the network. # # ### batch_size: typical values is power of 2 (128, 256, 518, etc) # + If the data is too large, we can pass data in batches. # + **The smaller the batch size, the longer it takes to train**, the **less likely you are going to overfit** because you are passing in entire training data set at once.Instead you are focusing on these batches. model.fit(X_train, y_train, validation_data=(X_test, y_test), batch_size=128, epochs=400) # ### Explore the history of losses # # There are two columns `loss` and `val_loss`. # + loss on training data # + loss on validation data df_losses = pd.DataFrame(model.history.history) df_losses.head() # This is a good sign. We normally want both loss and val_loss to be decreasing. # # **Always take note of spikes in val_loss which indicates the overfitting of training data.** df_losses.plot(); # # Evaluation on Test Data # # # https://scikit-learn.org/stable/modules/model_evaluation.html#regression-metrics from sklearn.metrics import mean_absolute_error, mean_squared_error, explained_variance_score predictions = model.predict(X_test) mean_absolute_error(y_test, predictions) mean_squared_error(y_test, predictions) np.sqrt(mean_squared_error(y_test, predictions)) # ### Check the mean price of original dataset df['price'].describe() 5.402966e+05 # Average price of house is around 54000\$. # # And our MAE is 102180\$ which is about 19%. This is not great and not horrible either too. 100* 102180 /5.402966e+05 # ## explained_variance_score # + tells you how much variance is being explained by your actual model. # + `Best possible score is 1.0, lower values are worse.` # # https://scikit-learn.org/stable/modules/generated/sklearn.metrics.explained_variance_score.html # # https://www.statisticshowto.com/explained-variance-variation/ # explained_variance_score(y_test, predictions) # Is it good or bad? # # This is based on context. If we have a previously trained model, we might want to compare this value. # ### Visualization of predictions vs true value # + plt.figure(figsize=(12,8)) # Our predictions plt.scatter(y_test, predictions); # Perfect predictions plt.plot(y_test, y_test, color='r', label='perfect fit line') plt.legend(); # - predictions.shape # ### Plot the error errors = y_test.reshape(6480, 1) - predictions sns.displot(errors); # ------- # # Predicting on Brand New Data single_house = df.drop('price',axis=1).iloc[0] single_house.values.reshape(-1, 19) single_house = scaler.transform(single_house.values.reshape(-1, 19)) model.predict(single_house) df.iloc[0] # Our model preidcts 283173 and actual price is 221900. # # We are kinda overshooting. What we can do is drop off 1 or 2% of top value and re-train the model, see if it can reduce MSE on our dataset.
Complete Tensorflow 2 and Keras Deep Learning Bootcamp - JP/03-ANNs/01-Keras-Regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import numpy as np from pathlib import Path import matplotlib.pyplot as plt import seaborn as sns color = sns.color_palette() # %matplotlib inline # - # ## load and inspect data link_df = pd.read_csv(Path('../../resources/prices/coin_ChainLink.csv'), index_col='SNo') link_df link_df['Date'] = pd.to_datetime(link_df['Date']).dt.date link_df['Date'] = pd.to_datetime(link_df['Date']) link_df['Spread'] = link_df.High - link_df.Low link_df.info() # ## Plot the closing value of Chainlink over time # + import matplotlib.dates as mdates fig, ax = plt.subplots(figsize=(12,8)) # sns.lineplot(y = link_df.Close.values, x=link_df.Date_mpl.values, alpha=0.8, color=color[3]) sns.lineplot(y = link_df.Close.values, x=link_df.Date.values, alpha=0.8, color=color[3]) ax.xaxis.set_major_locator(mdates.AutoDateLocator()) ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y.%m.%d')) # fig.autofmt_xdate() plt.xlabel('Date', fontsize=12) plt.ylabel('Price in USD', fontsize=12) plt.title("Closing price distribution of LINK", fontsize=15) plt.show() # - # ## Candlestick chart # + import matplotlib.ticker as mticker # from matplotlib.finance import candlestick_ohlc import mplfinance as mpf link_df['Date_mpl'] = link_df['Date'].apply(lambda x: mdates.date2num(x)) temp_link_df = link_df.copy(deep=False) temp_link_df = temp_link_df.set_index(['Date']) temp_link_df = temp_link_df.drop(['Name', 'Symbol', 'Marketcap','Spread'], axis=1) temp_link_df # - mpf.plot(temp_link_df.loc['2020-9-1':], type='candle', mav=(5,10), volume=True) # ## Price prediction from fbprophet import Prophet # + INPUT_FILE = "coin_ChainLink.csv" price_predict_df = pd.read_csv("../../resources/prices/" + INPUT_FILE, parse_dates=['Date'], usecols=["Date", "Close"]) price_predict_df.columns = ["ds", "y"] price_predict_df = price_predict_df[price_predict_df['ds']>'2020-9-1'] m = Prophet() m.fit(price_predict_df); future = m.make_future_dataframe(periods=7) forecast = m.predict(future) forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']].tail() # - m.plot(forecast) m.plot_components(forecast)
notebooks/by_coin/chainlink_notebook_from_CSV.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Dicas # # * Antes de ir pro código, leia a descrição do desafio e busque definir pequenos passos para chegar a solução. # * Leia o código para se familiarizar com a solução que está em desenvolvimento; # # * Tente entender o que o código faz ou significa. # # * Busque encontrar as partes que faltam ser colocadas e erros no código que precisam ser corrigidos; # # * Se aparecer erros, procure corrigir e/ou repita os passos acima. # # # # # 1. Desafio: dicionário # # Faça um programa para descobrir quantas pessoas candidatas no seu estado seriam mais novas do que você usando como referência a coluna "NR_IDADE_DATA_POSSE". Além disso, descubra quantos porcentos isso representa em relação ao total de pessoas candidatas. # # - Use o conjunto de dados consulta_cand_2020_PE_desafio1.csv # - Use o dicionário de dados "leiame.pdf" para tirar dúvidas sobre as colunas # # > Fonte: TSE. Repositório de dados eleitorais. Conjunto de dados [Candidatos - 2020](https://www.tse.jus.br/eleicoes/estatisticas/repositorio-de-dados-eleitorais-1) # ### Importe as bibliotecas que usuará no código # ### Determine sua idade # ### Leia o csv e conversa para dicionário # ### Quantas pessoas candidatas no seu estado seriam mais novas do que você # ### Quantos porcentos isso representa em relação ao total de pessoas candidatas.
modulo_1/desafios/desafio_1/desafio_1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pyspark import os from datetime import date import functools from IPython.core.display import display, HTML #import findspark #findspark.init() from pyspark.sql import SparkSession import pyspark.sql.functions as F import pyspark.sql.types as T from pyspark.sql.functions import to_timestamp, count, isnan, mean, col, countDistinct, format_number, dayofmonth, hour, dayofyear, month, year, weekofyear, date_format, when,datediff, months_between import re from functools import reduce # - spark = SparkSession.builder.appName("SRAG").getOrCreate() # # Read data df = spark.read.csv('/Volumes/My Passport/PAMEpi_datalake/raw_data_covid19_version-21-12-07/data-sindrome_respiratoria_aguda_grave_incluindo_covid/INFLUD-29-11-2021.csv',sep=';', header = True, encoding="utf-8", inferSchema = True) df1 = spark.read.csv('/Volumes/My Passport/PAMEpi_datalake/raw_data_covid19_version-21-12-07/data-sindrome_respiratoria_aguda_grave_incluindo_covid/INFLUD21-29-11-2021.csv',sep=';', header = True, encoding="utf-8", inferSchema = True) df.count() df1.count() df.count() + df1.count() df.limit(5).toPandas() df.printSchema() df_concat= df.unionByName(df1, allowMissingColumns=True) df_concat.limit(5).toPandas() # # Convert date colunms to date df_concat.printSchema() list_var_date = ['DT_NOTIFIC', 'DT_SIN_PRI', 'DT_NASC', 'DT_INTERNA','DT_ENTUTI','DT_SAIDUTI','DT_COLETA','DT_ENCERRA'] for col in list_var_date: df_concat = df_concat.withColumn(col, F.col(col).cast(T.DateType()))
Data Description/Data analysis SRAG.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.7.6 64-bit # language: python # name: python3 # --- # # 2. RoseTTAFold on Azure ML - Run Experiment # # ## Introduction # # Azure is collaborating with the Baker Lab to expose their RoseTTAFold model as a service. This document describes how to get started exploring RoseTTAFold on Azure Machine Learning (Azure ML). # # **This is notebook #2 of 3**. In this notebook, we run RosettaFold as an Experiment on Azure ML. We'll create an input file, submit a Run, check status, and get the results. # # In *previous* notebook, [1-setup-workspace.ipynb](1-setup-workspace.ipynb), we ran some one-time setup steps to prepare our Azure ML Workspace with the dependency Datasets and a Compute Cluster. # # In the *next* notebook, [3-batch-endpoint.ipynb](3-batch-endpoint.ipynb), we'll create a Batch Endpoint that can be called from the Azure CLI or as a REST call. # # **Note.** These RoseTTAFold endpoints are not designed to run in production environments, and is strictly for non-production test environments. # ## Setup # ### Load Workspace Config # You must first download the config.json file from your Azure ML workspace (see previous steps in "Create a workspace"). This file should be saved locally, in the same directory as this notebook. # # Like in the first notebook, run the following block to load your workspace config and ensure it succeeds before proceeding. Watch the output feed, as it may require you to launch your browser and sign in. # + from azureml.core import Workspace try: ws = Workspace.from_config() print(ws.name, ws.location, ws.resource_group, ws.location, sep='\t') print('Azure ML workspace loaded') except: print('Azure ML workspace not found') # - # ### Get the Azure ML Compute Cluster Reference # In the first notebook, we created a compute cluster within our Azure Machine Learning workspace. Now, we'll get a referece to it using the `compute_name`. # Specify the name for your compute cluster compute_name = 'gpu-cluster' # + from azureml.core.compute import ComputeTarget, AmlCompute from azureml.core.compute_target import ComputeTargetException # Check if a compute cluster with this name already exists. If so, use it. try: compute_target = ComputeTarget(workspace=ws, name=compute_name) print('Found an existing cluster with this name, use it!') except ComputeTargetException: print('Could not find compute target. Please follow the setup instructions in notebook 1, and specify the correct compute_name in the box above') # - # ### Specify input data # The RoseTTAFold job requires inputs in [FASTA format](https://en.wikipedia.org/wiki/FASTA_format). Each input should be in its own file. # # Here are the contents of a sample input file: # ``` # >T1078 Tsp1, Trichoderma virens, 138 residues| # MAAPTPADKSMMAAVPEWTITNLKRVCNAGNTSCTWTFGVDTHLATATSCTYVVKANANASQASGGPVTCGPYTITSSWSGQFGPNNGFTTFAVTDFSKKLIVWPAYTDVQVQAGKVVSPNQSYAPANLPLEHHHHHH # ``` # # **Notes:** The leading `>` indicates the beginning of a record, and **is required**. The remaining record data on the first line is useful for labelling sequence and is part of the FASTA standard, but it is not used by the RoseTTAFold algorithm. # The amino acid sequence begins on the second line with no leading characters, and should be continuous (no spaces, no line breaks). This information is the actual input for the RoseTTAFold algoritm. # # Specify your sequence data below as shown, then *run* the cell. This will overwrite the contents of the file `inputs/my-sequence.fa`. # + active="" # %%writefile inputs/my-sequence.fa # >T1078 Tsp1, Trichoderma virens, 138 residues| # MAAPTPADKSMMAAVPEWTITNLKRVCNAGNTSCTWTFGVDTHLATATSCTYVVKANANASQASGGPVTCGPYTITSSWSGQFGPNNGFTTFAVTDFSKKLIVWPAYTDVQVQAGKVVSPNQSYAPANLPLEHHHHHH # - # Now upload the `inputs/my-sequence.fa` file to your Azure ML Datastore. This will create a input Dataset, which is a reference to a subdirectory within the workspace's default Datastore. # + from azureml.core import Dataset from azureml.data.datapath import DataPath ds = ws.get_default_datastore() input_dataset = Dataset.File.upload_directory( src_dir='./inputs/', target=DataPath(ds, 'input_data'), show_progress=True, overwrite=True) # - # ### Submit the Experiment # - Define the `Environment`, and specify the dockerfile (included in the same folder as this notebook) to be used. # - Specify inputs and outputs # - Create `ScriptRunConfig` that includes all of the args and configuration needed # - Wrap it up in an `Experiment` and submit to your Azure Machine Learning workspace. # # This will queue a job in Azure Machine Learning, which **is a billable activity**. # # A link to this job's status web page will be printed below the cell. # + from azureml.core import Environment, ScriptRunConfig from azureml.core.runconfig import DockerConfiguration from azureml.core import Experiment from azureml.data import OutputFileDatasetConfig env = Environment(name="rosettaenv") env.docker.base_image = None env.docker.base_dockerfile = "./Dockerfile" outputs_path = 'outputs/' args = ['--inputs', input_dataset.as_mount(), '--outputs', outputs_path] run_config = ScriptRunConfig(source_directory='.', script="score.py", arguments=args, compute_target=compute_target, docker_runtime_config=DockerConfiguration(use_docker=True), environment=env) run = Experiment(ws, 'RoseTTAFold-Scoring').submit(run_config) run # - run.wait_for_completion(show_output=True) # ### Get the outputs run.download_files(prefix=outputs_path)
2-run-experiment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + """ Applying transformations to large files in batches: BatchProcessor.multi_channel_apply lets you apply transformations to batches of data where every batch has observations from every channel. This example show how to extract information from a large file by processing it in batches. """ import logging import os import numpy as np from yass.batch import BatchProcessor # - # configure logging to get information about the process logging.basicConfig(level=logging.INFO) # raw data file path_to_neuropixel_data = (os.path.expanduser('~/data/ucl-neuropixel' '/rawDataSample.bin')) # on each batch, we find the maximum value in every channel def max_in_channel(batch): """Add one to every element in the batch """ return np.max(batch, axis=0) # + # create batch processor for the data bp = BatchProcessor(path_to_neuropixel_data, dtype='int16', n_channels=385, data_format='wide', max_memory='10MB') # appply a multi channel transformation, each batch will be a temporal # subset with observations from all selected n_channels, the size # of the subset is calculated depending on max_memory. Results # from every batch are returned in a list res = bp.multi_channel_apply(max_in_channel, mode='memory', channels=[0, 1, 2]) # - # we have one element per batch len(res) # output for the first batch res[0] # stack results from every batch arr = np.stack(res, axis=0) # let's find the maximum value along every channel in all the dataset np.max(arr, axis=0)
notebooks/batch/multi_channel_apply_memory.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/thiru2024/Swidish-leaf-prediction/blob/main/Code-1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="b5wp17hrq1ww" from tensorflow.keras.applications import VGG19 from tensorflow.keras.layers import Input,Dense,Flatten from tensorflow.keras.preprocessing.image import ImageDataGenerator,load_img from tensorflow.keras.models import Model,Sequential #importing other required libraries import numpy as np import pandas as pd from sklearn.utils.multiclass import unique_labels import os import matplotlib.pyplot as plt import matplotlib.image as mpimg import seaborn as sns import itertools from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix from keras import Sequential from keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.optimizers import SGD,Adam from tensorflow.keras.callbacks import ReduceLROnPlateau from tensorflow.keras.layers import Flatten,Dense,BatchNormalization,Activation,Dropout from tensorflow.keras.utils import to_categorical # + id="CNd_9uqeq-RA" outputId="b3b77a78-055f-4832-94c2-2a8704a74508" colab={"base_uri": "https://localhost:8080/"} image_size = [256,256] #model = VGG19(input_shape = image_size+[3],include_top=False,weights='imagenet') model = VGG19(include_top = False, weights = 'imagenet', input_shape = (256,256,3)) # + id="XtDwc_fWq-cq" outputId="8dfed9e9-0ad4-44fa-a2b5-ad9ff2a7fad7" colab={"base_uri": "https://localhost:8080/"} for layer in model.layers: layer.trainable = False model.summary() # + colab={"base_uri": "https://localhost:8080/"} id="jhWpxOxs8vfQ" outputId="2e10d453-d5e3-41ec-878d-ccf7f46bad72" model.summary() # + id="fA39Mvlhq-i4" final = Model(inputs = model.input,outputs = Dense(15,activation = 'softmax')(Flatten()(model.output))) # + colab={"base_uri": "https://localhost:8080/"} id="lsN5KIOeq-nZ" outputId="fbd266eb-12d2-4eac-834f-e87a2ed8bf7e" final.compile(loss = 'sparse_categorical_crossentropy',optimizer='adam',metrics = ['accuracy']) from google.colab import drive drive.mount('/content/drive') # + colab={"base_uri": "https://localhost:8080/"} id="2Fp5KrAf83C7" outputId="997c1f54-ba89-4e32-f616-4bcdb85e727e" final.summary() # + id="GXlXAgLcq-sx" train = '/content/drive/My Drive/swidish leaf prediction' # + id="ha-bAvK8q-2B" colab={"base_uri": "https://localhost:8080/", "height": 269} outputId="cc9a952c-3c22-4da3-9d03-964d6f82643d" import matplotlib.pyplot as plt import random import os import cv2 categories = ['Acer', 'Alnus incana', 'Betula pubescens', 'Fagus silvatica', 'Populus', 'Populus tremula', 'Quercus', 'Salix alba', 'Salix aurita', 'Salix sinerea', 'Sorbus aucuparia', 'Sorbus intermedia', 'Tilia', 'Ulmus carpinifolia', 'Ulmus glabra'] data = [] for cat in categories: folder = os.path.join(train,cat) label = categories.index(cat) for img in os.listdir(folder): img_path = os.path.join(folder,img) img_arr = cv2.imread(img_path) img_arr = cv2.resize(img_arr,(256,256)) data.append([img_arr,label]) plt.imshow(img_arr) # + id="wceoXGVIq-8K" colab={"base_uri": "https://localhost:8080/"} outputId="2b72eec1-7464-488e-fd7b-66da9774728d" x = [] y = [] for features,labels in data: x.append(features) y.append(labels) import numpy as np x = np.array(x) y = np.array(y) x.shape # + id="9YmTAwo1q_BT" from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.3) train_generator = ImageDataGenerator(rotation_range=2, horizontal_flip=True, zoom_range=.1) test_generator = ImageDataGenerator(rotation_range=2, horizontal_flip= True, zoom_range=.1) #Fitting the augmentation defined above to the data train_generator.fit(X_train) test_generator.fit(X_test) # + id="kskZXHb3q_F-" colab={"base_uri": "https://localhost:8080/"} outputId="965e018c-c5fb-4324-e514-d7aa803797e5" vgg19 = final.fit(X_train,y_train,epochs=15,batch_size=100,validation_data=(X_test, y_test)) # + id="h219YJlcq_J4" final.save('/content/drive/My Drive/imagevgg19.h5') # + id="UpXA2WrBXXBz" # import tensorflow as tf # final = tf.keras.models.load_model('/content/drive/My Drive/imagevgg19.h5') # Show the model architecture # + id="I3JzP8-Nq_Nx" colab={"base_uri": "https://localhost:8080/"} outputId="96081486-6a52-4a06-8c63-6c9de030bc2f" import numpy as np predictions = final.predict( X_test, steps=None, callbacks=None, max_queue_size=10, workers=1, use_multiprocessing=False, verbose=0)# Vector of probabilities pred_labels = np.argmax(predictions, axis = 1) # We take the highest probability # z = [] # def get_class(prediction): # if prediction > 0.5: # z.append(1) # else: # z.append(0) # for prediction in predictions: # get_class(prediction) # z = np.array(z) pred_labels # + id="2xWJL6-vq_Rz" colab={"base_uri": "https://localhost:8080/"} outputId="bc667b51-f253-499a-b1d1-326f78e826a4" from sklearn.metrics import accuracy_score acc = accuracy_score(y_test, pred_labels) print('Accuracy: %.2f' % acc) # + id="QKKisjVJq_kt" colab={"base_uri": "https://localhost:8080/", "height": 600} outputId="94a631ef-2903-4aa6-b671-042985d84512" # %matplotlib inline import matplotlib.pyplot as plt from sklearn.metrics import plot_confusion_matrix acc = vgg19.history['accuracy'] val_acc = vgg19.history['val_accuracy'] loss = vgg19.history['loss'] val_loss = vgg19.history['val_loss'] epochs = range(15) plt.figure(figsize=(5, 5)) plt.plot(epochs,acc, label='Training Accuracy') plt.plot(epochs,val_acc, label='Validation Accuracy') plt.legend(loc='lower right') plt.title('Training and Validation Accuracy') plt.show() plt.subplot(1, 1, 1) plt.plot(epochs, loss, label='Training Loss') plt.plot(epochs, val_loss, label='Validation Loss') plt.legend(loc='upper right') plt.title('Training and Validation Loss') plt.show() # + id="a8NYvSeXq_o2" colab={"base_uri": "https://localhost:8080/", "height": 589} outputId="4149e519-9be6-4f19-ac21-ef32536bb8f3" # %matplotlib inline from sklearn.metrics import confusion_matrix import itertools import matplotlib.pyplot as plt cm = confusion_matrix(y_true=y_test, y_pred=pred_labels) def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] print("Normalized confusion matrix") else: print('Confusion matrix, without normalization') print(cm) thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, cm[i, j], horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') cm_plot_labels = ['Acer', 'Alnus incana', 'Betula pubescens', 'Fagus silvatica', 'Populus', 'Populus tremula', 'Quercus', 'Salix alba', 'Salix aurita', 'Salix sinerea', 'Sorbus aucuparia', 'Sorbus intermedia', 'Tilia', 'Ulmus carpinifolia', 'Ulmus glabra'] plot_confusion_matrix(cm=cm, classes=cm_plot_labels, title='Confusion Matrix') # + id="ToYEm8nwrpUq" colab={"base_uri": "https://localhost:8080/", "height": 877} outputId="ef551f6e-bbfb-494a-db0e-a2bf802ae6ea" cm_plot_labels = ['Acer', 'Alnus incana', 'Betula pubescens', 'Fagus silvatica', 'Populus', 'Populus tremula', 'Quercus', 'Salix alba', 'Salix aurita', 'Salix sinerea', 'Sorbus aucuparia', 'Sorbus intermedia', 'Tilia', 'Ulmus carpinifolia', 'Ulmus glabra'] plt.figure(figsize=(20, 8)) plot_confusion_matrix(cm=cm, classes=cm_plot_labels, title='Confusion Matrix') # + id="kvEiy611rphg" colab={"base_uri": "https://localhost:8080/"} outputId="dab86cab-5607-4f08-ee1d-8657619ac9fe" from sklearn.datasets import make_circles from sklearn.metrics import accuracy_score from sklearn.metrics import precision_score from sklearn.metrics import recall_score from sklearn.metrics import f1_score from sklearn.metrics import cohen_kappa_score from sklearn.metrics import roc_auc_score from sklearn.metrics import confusion_matrix from keras.models import Sequential from keras.layers import Dense # predict probabilities for test set yhat_probs = final.predict(X_test, verbose=0) # predict crisp classes for test set yhat_classes = pred_labels # reduce to 1d array # accuracy: (tp + tn) / (p + n) accuracy = accuracy_score(y_test, yhat_classes) print('Accuracy: %f' % accuracy) # precision tp / (tp + fp) print("Precision Score : ",precision_score(y_test, pred_labels, pos_label='positive', average='micro')) print("Recall Score : ",recall_score(y_test, pred_labels, pos_label='positive', average='micro')) # f1: 2 tp / (2 tp + fp + fn) print("f1_Score : ",f1_score(y_test, pred_labels, pos_label='positive', average='micro')) # kappa kappa = cohen_kappa_score(y_test, yhat_classes) print('Cohens kappa: %f' % kappa) # ROC AUC auc = roc_auc_score(y_test, yhat_probs,multi_class="ovr",average='macro') print('ROC AUC: %f' % auc) # confusion matrix matrix = confusion_matrix(y_test, yhat_classes) print(matrix) # + id="EGQoI98Kfwu_" colab={"base_uri": "https://localhost:8080/"} outputId="9e1c8867-baf7-4abc-ad85-c53a8327c94e" import pandas as pd import numpy as np from scipy import interp from sklearn.metrics import precision_recall_fscore_support from sklearn.metrics import roc_curve, auc from sklearn.preprocessing import LabelBinarizer def class_report(y_true, y_pred, y_score=None, average='micro'): if y_true.shape != y_pred.shape: print("Error! y_true %s is not the same shape as y_pred %s" % ( y_true.shape, y_pred.shape) ) return lb = LabelBinarizer() if len(y_true.shape) == 1: lb.fit(y_true) #Value counts of predictions labels, cnt = np.unique( y_pred, return_counts=True) n_classes = len(labels) pred_cnt = pd.Series(cnt, index=labels) metrics_summary = precision_recall_fscore_support( y_true=y_true, y_pred=y_pred, labels=labels) avg = list(precision_recall_fscore_support( y_true=y_true, y_pred=y_pred, average='weighted')) metrics_sum_index = ['precision', 'recall', 'f1-score', 'support'] class_report_df = pd.DataFrame( list(metrics_summary), index=metrics_sum_index, columns=['Acer', 'Alnus incana', 'Betula pubescens', 'Fagus silvatica', 'Populus', 'Populus tremula', 'Quercus', 'Salix alba', 'Salix aurita', 'Salix sinerea', 'Sorbus aucuparia', 'Sorbus intermedia', 'Tilia', 'Ulmus carpinifolia', 'Ulmus glabra']) support = class_report_df.loc['support'] total = support.sum() class_report_df['avg / total'] = avg[:-1] + [total] class_report_df = class_report_df.T class_report_df['pred'] = pred_cnt class_report_df['pred'].iloc[-1] = total if not (y_score is None): fpr = dict() tpr = dict() roc_auc = dict() for label_it, label in enumerate(labels): fpr[label], tpr[label], _ = roc_curve( (y_true == label).astype(int), y_score[:, label_it]) roc_auc[label] = auc(fpr[label], tpr[label]) if average == 'micro': if n_classes <= 2: fpr["avg / total"], tpr["avg / total"], _ = roc_curve( lb.transform(y_true).ravel(), y_score[:, 1].ravel()) else: fpr["avg / total"], tpr["avg / total"], _ = roc_curve( lb.transform(y_true).ravel(), y_score.ravel()) roc_auc["avg / total"] = auc( fpr["avg / total"], tpr["avg / total"]) elif average == 'macro': # First aggregate all false positive rates all_fpr = np.unique(np.concatenate([ fpr[i] for i in labels] )) # Then interpolate all ROC curves at this points mean_tpr = np.zeros_like(all_fpr) for i in labels: mean_tpr += interp(all_fpr, fpr[i], tpr[i]) # Finally average it and compute AUC mean_tpr /= n_classes fpr["macro"] = all_fpr tpr["macro"] = mean_tpr roc_auc["avg / total"] = auc(fpr["macro"], tpr["macro"]) class_report_df['AUC'] = pd.Series(roc_auc) return class_report_df report_with_auc = class_report( y_true=y_test, y_pred=pred_labels, y_score=yhat_probs) print(report_with_auc) # + id="iGYjS8QKrpqt" colab={"base_uri": "https://localhost:8080/", "height": 513} outputId="a50bb38a-761a-4338-c8c6-abfeca3cc81b" import numpy as np import matplotlib.pyplot as plt from itertools import cycle from sklearn import svm, datasets from sklearn.metrics import roc_curve, auc from sklearn.model_selection import train_test_split from sklearn.preprocessing import label_binarize from sklearn.multiclass import OneVsRestClassifier from scipy import interp from sklearn.metrics import roc_auc_score roc_auc_score(y_test, yhat_probs, average='macro', sample_weight=None, max_fpr=None, multi_class='ovo', labels=None) fpr = {} tpr = {} thresh ={} n_class = 15 for i in range(n_class): fpr[i], tpr[i], thresh[i] = roc_curve(y_test, yhat_probs[:,i], pos_label=i) # plotting fig = plt.figure(figsize=(15,8)) plt.plot(fpr[0], tpr[0], linestyle='--',color='orange', label='Acer vs Rest',linewidth=3) plt.plot(fpr[1], tpr[1], linestyle='--',color='green', label='Alnus incana vs Rest',linewidth=3) plt.plot(fpr[2], tpr[2], linestyle='--',color='blue', label='Betula pubescens vs Rest',linewidth=3) plt.plot(fpr[3], tpr[3], linestyle='--',color='yellow', label='Fagus silvatica vs Rest',linewidth=3) plt.plot(fpr[4], tpr[4], linestyle='--',color='pink', label='Populus vs Rest',linewidth=3) plt.plot(fpr[5], tpr[5], linestyle='--',color='black', label='Populus tremula vs Rest',linewidth=3) plt.plot(fpr[6], tpr[6], linestyle='--',color='aqua', label='Quercus vs Rest',linewidth=3) plt.plot(fpr[7], tpr[7], linestyle='--',color='purple', label='Salix alba vs Rest',linewidth=3) plt.plot(fpr[8], tpr[8], linestyle='--',color='gray', label='Salix aurita vs Rest',linewidth=3) plt.plot(fpr[9], tpr[9], linestyle='--',color='brown', label='Salix sinerea vs Rest',linewidth=3) plt.plot(fpr[10], tpr[10], linestyle='--',color='gold', label='Sorbus aucuparia vs Rest',linewidth=3) plt.plot(fpr[11], tpr[11], linestyle='--',color='silver', label='Sorbus intermedia vs Rest',linewidth=3) plt.plot(fpr[12], tpr[12], linestyle='--',color='lime', label='Tilia vs Rest',linewidth=3) plt.plot(fpr[13], tpr[13], linestyle='--',color='red', label='Ulmus carpinifolia vs Rest',linewidth=3) plt.plot(fpr[14], tpr[14], linestyle='--',color='maroon', label='Ulmus glabra vs Rest',linewidth=3) plt.plot(np.arange(0,1.01,0.01), np.arange(0,1.01,0.01), linewidth=3) plt.title('Multiclass ROC curve') plt.xlabel('False Positive Rate') plt.ylabel('True Positive rate') plt.legend(loc='best') plt.savefig('Multiclass ROC',dpi=300); # + colab={"base_uri": "https://localhost:8080/"} id="QkQmNgTAHTZo" outputId="77dfaf23-5376-443f-d2c4-c856fbed57b9" from sklearn.metrics import matthews_corrcoef matthews_corrcoef(y_test, yhat_classes) # + colab={"base_uri": "https://localhost:8080/"} id="F-wKSBt6Hl3E" outputId="5ac48ee9-3ac7-44fe-fa8b-d58bbc54c0e4" from sklearn.metrics import jaccard_score sc = jaccard_score(y_test, yhat_classes, average=None) sc # + colab={"base_uri": "https://localhost:8080/", "height": 468} id="CHfE48CTJ5BR" outputId="11fee6cc-81f3-4ea1-b2f3-a822cf3b5b67" import numpy as np import matplotlib.pyplot as plt data = {} labelss = ['Acer', 'Alnus incana', 'Betula pubescens', 'Fagus silvatica', 'Populus', 'Populus tremula', 'Quercus', 'Salix alba', 'Salix aurita', 'Salix sinerea', 'Sorbus aucuparia', 'Sorbus intermedia', 'Tilia', 'Ulmus carpinifolia', 'Ulmus glabra'] for i in range(len(sc)): data[labelss[i]] = sc[i] # creating the dataset courses = list(data.keys()) values = list(data.values()) fig = plt.figure(figsize = (10, 5)) # creating the bar plot plt.bar(courses, values, color ='maroon', width = 0.5) plt.xticks(range(len(courses)), courses, rotation='vertical',fontweight ='bold',fontsize = 10) plt.xlabel("leaf types",fontweight ='bold',fontsize = 15) plt.ylabel("jaccard score",fontweight ='bold',fontsize = 15) plt.title("jaccard score for all leaf labels",fontweight ='bold',fontsize = 15) plt.legend(loc='best') plt.show() # + id="gpPig_JoVi1e"
Code-1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Summary # # In this notebook we load a network trained to solve Sudoku puzzles and use this network to solve a single Sudoku. # # ---- # + [markdown] colab_type="text" id="eoBMUoW2Hvhp" # ## Imports # + colab={} colab_type="code" id="TbKxMUZWHvhq" import os import sys import tempfile import time from collections import deque from pathlib import Path import tqdm from torch_geometric.data import DataLoader import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np import pandas as pd import pyarrow import torch import torch.nn as nn from matplotlib import cm # + colab={"base_uri": "https://localhost:8080/", "height": 334} colab_type="code" id="rczy7pPiHvhs" outputId="8e4673d7-4cc7-4f8e-ffc4-de94302e5fab" import proteinsolver import proteinsolver.datasets from proteinsolver.utils import gen_sudoku_graph_featured # + # %matplotlib inline try: inline_rc except NameError: inline_rc = mpl.rcParams.copy() mpl.rcParams.update({"font.size": 12}) # + [markdown] colab_type="text" id="dsEY3dtLHvhy" # ## Parameters # - UNIQUE_ID = "c8de7e56" DATA_ROOT = Path(tempfile.gettempdir()) DATA_ROOT.mkdir(exist_ok=True) DATA_ROOT NOTEBOOK_NAME = "sudoku_demo" NOTEBOOK_PATH = Path(NOTEBOOK_NAME) NOTEBOOK_PATH.mkdir(exist_ok=True) NOTEBOOK_PATH DATAPKG_DATA_DIR = Path(f"~/datapkg_data_dir").expanduser().resolve() DATAPKG_DATA_DIR device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") device proteinsolver.settings.data_url = DATAPKG_DATA_DIR.as_posix() proteinsolver.settings.data_url # ## Load model # %run sudoku_train/{UNIQUE_ID}/model.py # + state_files = sorted( Path("sudoku_train").joinpath(UNIQUE_ID).glob("*.state"), key=lambda s: (int(s.stem.split("-")[3].strip("amv")), int(s.stem.split("-")[2].strip("d"))), ) state_files[-5:] # - state_file = state_files[-1] state_file # + net = Net( x_input_size=13, adj_input_size=3, hidden_size=162, output_size=9, batch_size=8 ).to(device) net.load_state_dict(torch.load(state_file, map_location=device)) net = net.eval() net = net.to(device) # - # ## Define helper functions def encode_puzzle(puzzle): puzzle = puzzle - 1 puzzle = torch.where(puzzle >= 0, puzzle, torch.tensor(9)) return puzzle def decode_puzzle(puzzle): puzzle = (puzzle + 1) % 10 return puzzle puzzle = torch.tensor([1,1,1]) assert torch.equal(decode_puzzle(encode_puzzle(puzzle)), puzzle) def solve_sudoku(net, puzzle): sudoku_graph = torch.from_numpy(gen_sudoku_graph_featured()).to_sparse(2) edge_index = sudoku_graph.indices() edge_attr = sudoku_graph.values() output = net( encode_puzzle(puzzle).view(-1).to(device), edge_index.clone().to(device), edge_attr.clone().to(device) ).to("cpu") output = torch.softmax(output, dim=1) _, predicted = output.max(dim=1) return decode_puzzle(predicted).reshape(9, 9) def show_sudoku(puzzle, solved=None, pred=None, ax=None): # Simple plotting statement that ingests a 9x9 array (n), and plots a sudoku-style grid around it. if ax is None: _, ax = plt.subplots() for y in range(10): ax.plot([-0.05, 9.05], [y, y], color="black", linewidth=1) for y in range(0, 10, 3): ax.plot([-0.05, 9.05], [y, y], color="black", linewidth=3) for x in range(10): ax.plot([x, x], [-0.05, 9.05], color="black", linewidth=1) for x in range(0, 10, 3): ax.plot([x, x], [-0.05, 9.05], color="black", linewidth=3) ax.axis("image") ax.axis("off") # drop the axes, they're not important here for x in range(9): for y in range(9): puzzle_element = puzzle[8 - y][ x ] # need to reverse the y-direction for plotting if puzzle_element > 0: # ignore the zeros T = f"{puzzle_element}" ax.text(x + 0.3, y + 0.2, T, fontsize=20) elif solved is not None and pred is not None: solved_element = solved[8 - y][x] pred_element = pred[8 - y][x] if solved_element == pred_element: T = f"{solved_element}" ax.text(x + 0.3, y + 0.2, T, fontsize=20, color="b") else: ax.text(x + 0.1, y + 0.3, f"{pred_element}", fontsize=13, color="r") ax.text( x + 0.55, y + 0.3, f"{solved_element}", fontsize=13, color="g" ) # ## Solve a simple puzzle # ### Input sudoku puzzle puzzle = torch.tensor( [ [0, 8, 0, 0, 3, 2, 0, 0, 1], [7, 0, 3, 0, 8, 0, 0, 0, 2], [5, 0, 0, 0, 0, 7, 0, 3, 0], [0, 5, 0, 0, 0, 1, 9, 7, 0], [6, 0, 0, 7, 0, 9, 0, 0, 8], [0, 4, 7, 2, 0, 0, 0, 5, 0], [0, 2, 0, 6, 0, 0, 0, 0, 9], [8, 0, 0, 0, 9, 0, 3, 0, 5], [3, 0, 0, 8, 2, 0, 0, 1, 0], ] ) # ### Input solution (for reference) solution = torch.tensor( [ [4, 8, 9, 5, 3, 2, 7, 6, 1], [7, 1, 3, 4, 8, 6, 5, 9, 2], [5, 6, 2, 9, 1, 7, 8, 3, 4], [2, 5, 8, 3, 4, 1, 9, 7, 6], [6, 3, 1, 7, 5, 9, 2, 4, 8], [9, 4, 7, 2, 6, 8, 1, 5, 3], [1, 2, 5, 6, 7, 3, 4, 8, 9], [8, 7, 6, 1, 9, 4, 3, 2, 5], [3, 9, 4, 8, 2, 5, 6, 1, 7], ] ) assert proteinsolver.utils.sudoku.sudoku_is_solved(solution.to("cpu")) show_sudoku(puzzle, solution, solution) # ### Use trained network to solve puzzle solution_pred = solve_sudoku(net, puzzle) proteinsolver.utils.sudoku.sudoku_is_solved(solution_pred) show_sudoku(puzzle, solution, solution_pred)
notebooks/20_sudoku_demo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.10 64-bit (''.dewloosh'': venv)' # language: python # name: python3 # --- import numpy as np # ## Input Parameters # + # geometry L = 1.0 w = 0.05 h = 0.1 # mesh shape = (100, 10) # material E = 1000 nu = 0.2 # load F = 100 # - # ## Analytic Bernoulli Solution EI = E * w * h**3 / 12 UY_Bernoulli = -F * L**3 / (3 * EI) # ## Finite Element Solution # Hooke model A = np.array([ [1, nu, 0], [nu, 1, 0], [0, 0, (1-nu)/2]]) * (E /(1-nu**2)) * w from dewloosh.geom import grid, Q9, PolyData coords, topo = grid(size=(L, h), shape=shape, eshape='Q9') coords_ = np.zeros((coords.shape[0], 3)) coords_[:, :2] = coords coords = coords_ mesh = PolyData(coords=coords, topo=topo, celltype=Q9) # essential boundary conditions cond_ebc = np.abs(coords[:, 0]) <= 1e-5 ebcinds = np.where(cond_ebc)[0] fixity = np.zeros((coords.shape[0], 2), dtype=bool) fixity[ebcinds, :] = True # natural boundary conditions target = np.array([L, h/2, 0]) nbcinds = [mesh.index_of_closest_point(target)] loads = np.zeros((coords.shape[0], 2)) loads[nbcinds, 1] = -F from dewloosh.solid.fem import FemMesh, Structure, Q9M mesh = FemMesh(coords=coords, topo=topo, celltype=Q9M, fixity=fixity, loads=loads, model=A) structure = Structure(mesh=mesh) structure.linsolve() structure.summary UY_FEM = structure.pointdata.dofsol.to_numpy()[nbcinds[0]][1] # ## Comparison print("Analytic Bernoulli Solution : {}".format(UY_Bernoulli)) print("FEM Solution : {}".format(UY_FEM)) diff = 100 * (UY_FEM - UY_Bernoulli) / UY_Bernoulli print("Difference : {} %".format(diff)) structure.plot()
verification/fem/console/Q9.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np from statsmodels.tsa.arima_model import ARIMA from statsmodels.tsa.stattools import adfuller from scipy.ndimage.interpolation import shift from sklearn.metrics import mean_squared_error from math import sqrt import matplotlib import matplotlib.pyplot as plt temperatureData = pd.read_csv("Data/daily-minimum-temperatures-in-me.csv", dtype={'Temperature': float}) temperatureData.head() fig = matplotlib.pyplot.gcf() fig.set_size_inches(18.5, 10.5) plt.plot(temperatureData['Temperature'].values, label = "Temperature data") plt.legend() plt.savefig("TemperatureData.png") passengersData = pd.read_csv("Data/AirPassengers.csv") passengersData.head() fig = matplotlib.pyplot.gcf() fig.set_size_inches(18.5, 10.5) plt.plot(passengersData['#Passengers'].values, label = "Passenger Data") plt.legend() plt.savefig("PassengerData.png") # ### Test stationarity def isSeriesStationary(series): pValue = adfuller(series)[1] if pValue > 0.05: return False else: return True def isSeriesStationaryAvg(series, delta = 2): split = int(len(series)/2) split1, split2 = series[:split], series[split:] avg1, avg2 = split1.mean(), split2.mean() var1, var2 = split1.var(), split2.var() if abs(avg1 - avg2) > delta or abs(var1 - var2) > delta**2: return False else: return True # ## Make time series starionary # + def difference(dataset, interval=1): diff = list() for i in range(interval, len(dataset)): value = dataset[i] - dataset[i - interval] diff.append(value) return np.array(diff) def inverse_difference(history, yhat, interval=1): return yhat + history[-interval] def describeSeries(data, label): fig = matplotlib.pyplot.gcf() fig.set_size_inches(18.5, 10.5) plt.plot(data, label = "Series") plt.plot(pd.rolling_mean(data, window = 2), '--', label = "Rolling mean") plt.plot(pd.rolling_std(data, 2), ":", label = "Rolling Std") plt.legend() plt.savefig(label) plt.clf() # - describeSeries(passengersData['#Passengers'], "DescribePassengers.png") describeSeries(temperatureData['Temperature'].values, "DescribeTemperature.png") def splitTrainTest(series, testSplit): totalData = len(series) trainSplit = int(totalData * (1 - testSplit)) trainSet = series[:trainSplit] testSet = series[trainSplit:] return trainSet, testSet # ### For temperature data isSeriesStationaryAvg(temperatureData["Temperature"].values) isSeriesStationary(temperatureData["Temperature"].values) trainSet, testSet = splitTrainTest(temperatureData["Temperature"].values, 0.1) differencedTrainSet = difference(trainSet, 365) model = ARIMA(differencedTrainSet, order=(7,0,1)) """Fit model with non constant trend and no displacement""" model_fit = model.fit(disp = 0) forecast = model_fit.predict(len(differencedTrainSet), len(differencedTrainSet) + len(testSet)) yPrediction = [] history = list(trainSet) for f in forecast: yPredict = inverse_difference(history, f, 365) yPrediction.append(yPredict) history.append(yPredict) fig = matplotlib.pyplot.gcf() fig.set_size_inches(18.5, 10.5) plt.plot(testSet, color='green', label = "Test") plt.plot(yPrediction[:-1], color='red', label = "Prediction") plt.legend() plt.savefig("TemperaturePrediction.png") print(sqrt(mean_squared_error(testSet, yPrediction[:-1]))) # ### For passenger data set isSeriesStationaryAvg(passengersData["#Passengers"].values) isSeriesStationary(passengersData["#Passengers"].values) # + series = passengersData["#Passengers"].values series = difference(series, 12) print(isSeriesStationary(series)) # - trainSet, testSet = splitTrainTest(passengersData['#Passengers'], 0.1) # + differencedTrainSet = difference(trainSet, 12) * 1.0 model = ARIMA(differencedTrainSet, order=(1,0,1)) """Fit model with non constant trend and no displacement""" model_fit = model.fit(disp = 0) forecast = model_fit.predict(len(differencedTrainSet), len(differencedTrainSet) + len(testSet)) yPrediction = [] history = list(trainSet) for f in forecast: yPredict = inverse_difference(history, f, 12) yPrediction.append(yPredict) history.append(yPredict) # + import matplotlib import matplotlib.pyplot as plt fig = matplotlib.pyplot.gcf() fig.set_size_inches(18.5, 10.5) plt.plot(testSet.values, color='green', label = "Test") plt.plot(yPrediction, color='red', label = "Prediction") plt.legend() plt.savefig("PassengerPrediction.png") # - print(sqrt(mean_squared_error(testSet, yPrediction[:-1])))
Arima.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd from sklearn.cluster import KMeans import numpy as np class Spotify_recommendation(object): def __init__(self): self.df=pd.read_csv('top50.csv') self.numarical_df=None self.results=dict() def columns_renaming(self): self.df.columns=[i.lower().replace('.','') for i in self.df] def cleaning_str_columns(self): for i in ['trackname','artistname','genre']: self.df[i]=self.df[i].str.lower() def remove_column(self): self.df.drop(columns=['Unamed: 0'],axis=1,inplace=True) def normalization_algo(self, col): max_d=self.df[col].max() min_d=self.df[col].min() return (self.df[col]-min_d)/(max_d-min_d) def normalizing_col(self): df_numarical=self.df.select_dtypes(include=['int64']) self.numarical_df=df_numarical for i in self.numarical_df.columns: self.df[i]=self.normalization_algo(i) def cluster_creation(self): kn=KMeans(n_clusters=10) pred=kn.fit_predict(self.numarical_df) self.df['cluster_val']=pred def fit(self): total_songs =list(self.df['trackname'].unique()) for i in total_songs: distance=[] song_res=self.df[self.df['trackname']==i].head(1).values[0] rem_data=self.df[self.df['trackname']!=i] for r_song in rem_data.values: dist=0 for idx, col in enumerate(rem_data.columns): if not col in ['trackname','artistname','genre']: dist=dist + np.absolute(float(song_res[idx]-float(r_song[idx]))) distance.append(dist) rem_data['distance']=distance rem_data=rem_data.sort_values('distance') temp=rem_data.to_dict(orient='records') self.results[i]=temp def predict(self, song_name,top_songs=5): return self.results[song_name][0:top_songs] s=Spotify_recommendation() s.columns_renaming() s.cleaning_str_columns() s.normalizing_col() s.cluster_creation() s.fit() s.predict('bad guy')
Spotify's recommendation engine/Spotify's recommendation engine.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Variational Autoencoders with Gluon # # Recent progress in variation autoencoders (VAE) has made it one of the most popular frameworks for building deep generative models. In this notebook, we will first introduce the necessary background. Then, we proceed to build a VAE model based on the paper [Auto-Encoding Variational Bayes](https://arxiv.org/abs/1312.6114) and apply it to MNIST dataset for representation learning and sample generation tasks. In this implementation we use the MXNet Gluon API, i.e. `gluon.HybridBlock` and `autograd`. # # # ## Introduction of Variational Autoencoders (VAE) # # ### A Quick Recap of Expectation-Maximization (EM) # A straightforward way to approach VAE is through the construction of the well-known Expectation-Maximization (EM) algorithm. Please refer to this [tutorial](https://www2.ee.washington.edu/techsite/papers/documents/UWEETR-2010-0002.pdf) or this [blog](https://dingran.github.io/algorithm/EM/) as a refresher on EM. Just to quicly recap a few key elements in EM: insteand of optimizing the log-liklihood ($\ell(\theta)$) directly with observable data $x$, latent variable $z$, EM constructs and optimize on a lower bound $\mathcal{L}(q,\theta)$ often referred to as **Evidence Lower Bond (EBLO)**. The following equation derives from Jensen's inequality and holds for any $q(z)$ as long as it is a valid probability distribution. # # $$ # \begin{equation*} # \begin{split} # \ell(\theta) &= \sum_i \log\left( p_{\theta}(x_i) \right) \\ # & = \sum_i \log\left( \int p_{\theta}(x_i, z) dz \right)\\ # &= \sum_i \log\left( \mathbb{E}_{z \sim Q} \left[ \frac {p_{\theta}(x_i, z)}{q(z)} \right]\right)\\ # & {\ge}\underbrace{ \sum_i \mathbb{E}_{z \sim Q} \left[\log\left( \frac {p_{\theta}(x_i,z)}{q(z)} \right)\right]}_{ELBO: \mathcal{L}(q,\theta)} # \end{split} # \end{equation*} # $$ # # Importantly, among all choices of $q(z)$, we'd be able to maximize the ELBO $\mathcal{L}(q,\theta)$ with respect to $q$ if $q$ is chosen to be the inferred posterior, i.e. at $t$-th iteration $q^t(z) = p(z\vert x_i; \hat\theta^{t-1}) = \frac{p(x_i\vert z; \hat\theta^{t-1})p(z; \hat\theta^{t-1})}{\int p(x_i\vert z; \hat\theta^{t-1})p(z; \hat\theta^{t-1}) dz}$. This is the essesce of the E-step in EM algorithm. In M-step, we then maximize over $\theta$. The particular choice of $q(z)$ in E-step ensures that EM would monotonically increase the ELBO $\mathcal{L}(q,\theta)$, thus the log-liklihood $\ell(\theta)$. The chain of improvements through E-step and M-step are illustrated below. # # $$\ell(\theta^{t-1}) \underset{E-step}{=} \mathcal L(q^t,\theta^{t-1}) \underset{M-step}{\le} \mathcal L(q^t,\theta^t) \underset{Jensen}{\le} \ell(\theta^{t})$$ # # ### From EM to VAE # # With more complex distributions of $p_\theta(x\vert z)$, the integration in E-step for **exact inference** of the posterier $p_\theta(z\vert x)$ is intractable. This posterier inference problem can be addressed with **variational inference** methods such as mean-field approximation (where we assume factorizable $q(z)$) or sampling based methods (e.g. collapsed Gibbs sampling for solving Latent Dirichlet allocation). Mean-field approximation put undue constraints on the variational family $q(z)$, and sampling based methods could have slow convergence problems. Moreover, both methods involves arduous derivation of update functions, that would require rederivation even for small changes in model and thus could limit the exploration of more complex models. # # [Auto-Encoding Variational Bayes](https://arxiv.org/abs/1312.6114) brought about a flexible neural-network based approach. In this framework, the variational inference / variational optimization task of finding the optimal $q$ become a matter of finding the best parameters of a neural network via backpropagation and stochastic gradient descent. Thus making blackbox inference possible as well as allowing scalable to trainng for deeper and larger neural network models. We refer to this framework as **Neural Variational Inference**. # # Here is how it works: # - Select a prior for latent variable $p_\theta(z)$, which may or may not actually involve parameters. # - Use a neural network to parameterize the distribution $p_\theta(x\vert z)$. Because this part of the model maps latent varibale (code) $z$ to observed data $x$, it is viewed as a "decoder" network. # - Rather than explictly calculating the intractable $p(z\vert z)$, use another neural network to parameterize the distribution $q_\phi(z\vert x)$ as the approximate posterior. Due to the mapping from from data $x$ to latent variable (code) $z$, this part of the model is viewed as a "encoder" network. # - The objective is still to maxmize ELBO $\mathcal{L}(\phi,\theta)$. But now instead of separately finding the optimal $\phi$ (corresponding to $q$ in EM) and $\theta$ like EM, we can find the parameters $\theta$ and $\phi$ jointly via standard stochastic gradient descent. # # The resulted model resembles an encoder-decoder structure, thus commonly referred to as **variational auto-encoder (VAE)**. # # In the classic example in [Auto-Encoding Variational Bayes](https://arxiv.org/abs/1312.6114), we have the prior $p(z)$ as a standard isotropic Gaussian $\mathcal N(0, I)$, and the approximate posterior $q_\phi(z\vert x)$ also be isotropic Gaussian $\mathcal N(\mu_\phi(x), \sigma_\phi(x) I)$, where $\mu_\phi(x)$ and $\sigma_\phi(x)$ are functions implemented as neural networks and their outputs are used as the parameters for the Guassian distribution $q_\phi(z\vert x)$. This model configuration is often referred as **Gaussian VAE**. # # With this setup the training loss to minimize is the negative of ELBO and can be expressed as follows: # # $$ # \begin{equation*} # \begin{split} # - \mathcal L(x_i, \phi,\theta) & = - \mathbb{E}_{z \sim Q_\phi(z|x_i)} \left[\log p_{\theta}(x_i \vert z) + \log p_\theta(z) - \log q_\phi(z\vert x_i) \right] \\ # & = - \mathbb{E}_{z \sim Q_\phi(z|x_i)} \left[\log p_{\theta}(x_i \vert z) \right] + D_{KL}\left[\log q_\phi(z\vert x_i) \| p_\theta(z)\right] \\ # & \approx \underbrace{\frac{1}{L} \sum_s^L \left[-\log p_{\theta}(x_i \vert z_s) \right]}_{\text{Sampling}\ z_s \sim Q_\phi(z\vert x_i)} + \underbrace{D_{KL}\left[\log q_\phi(z\vert x_i) \| p_\theta(z)\right]}_{\text{Can be calculated analytically between Gaussians}} # \end{split} # \end{equation*} # $$ # # where the ELBO above is the same as the ELBO expression in EM but with $p(x,z)$ expanded and with $D_{KL}$ denoting the KL-divergence, i.e. $D_{KL}(Q \| P)= \mathbb{E}_{x\sim Q}[\log(\frac{q(x)}{p(x)}]$. As indicated, the first term can be approximated by drawing $L$ Monte Carlo samples from the distribution $q_\phi(z\vert x)$ (a very feasible task of drawing from an isotropic Gaussian distribution), while the $D_{KL}$ has convenient analytical solutions which is preferred over Monte Carlo samples in order to have lower variance gradient. # # With sampling involved, the remaining question is how do we backpropagate through a sampling node in the computation graph. The authors of [Auto-Encoding Variational Bayes](https://arxiv.org/abs/1312.6114) proposed **Reparameterize Trick (RT)**. Instead of sampling $z$ from $\mathcal N(\mu_\phi(x), \sigma_\phi(x) I)$ directly, we sample $\epsilon$ from fixed distribution $\mathcal{N}(0,I)$ and construct $z = \mu(x) + \sigma(x) \cdot \epsilon$. This way the random sampling is based on $\epsilon$, and $z$ deterministically depends on $\mu(x)$ and $\sigma(x)$ allowing gradient to flow through them. RT is a generally applicable technique for distribution that allows location-scale transformation or has analytical inverse CDFs. # # Implementing a Gaussian VAE # # With the theoretical side of things out of the way, let's start implemting a VAE model! import time import numpy as np import mxnet as mx from tqdm import tqdm, tqdm_notebook from mxnet import nd, autograd, gluon from mxnet.gluon import nn import matplotlib.pyplot as plt # %matplotlib inline # + def gpu_exists(): try: mx.nd.zeros((1,), ctx=mx.gpu(0)) except: return False return True data_ctx = mx.cpu() if gpu_exists(): print('Using GPU for model_ctx') model_ctx = mx.gpu(0) else: print('Using CPU for model_ctx') model_ctx = mx.cpu() # - mx.random.seed(1) output_fig = False # ## Load MNIST # + mnist = mx.test_utils.get_mnist() #print(mnist['train_data'][0].shape) #plt.imshow(mnist['train_data'][0][0],cmap='Greys') n_samples = 10 idx = np.random.choice(len(mnist['train_data']), n_samples) _, axarr = plt.subplots(1, n_samples, figsize=(16,4)) for i,j in enumerate(idx): axarr[i].imshow(mnist['train_data'][j][0], cmap='Greys') #axarr[i].axis('off') axarr[i].get_xaxis().set_ticks([]) axarr[i].get_yaxis().set_ticks([]) plt.show() # - train_data = np.reshape(mnist['train_data'],(-1,28*28)) test_data = np.reshape(mnist['test_data'],(-1,28*28)) mnist['test_label'].shape batch_size = 100 n_batches = train_data.shape[0]/batch_size train_iter = mx.io.NDArrayIter(data={'data': train_data}, label={'label': mnist['train_label']}, batch_size = batch_size) test_iter = mx.io.NDArrayIter(data={'data': test_data}, label={'label': mnist['test_label']}, batch_size = batch_size) #train_iter = mx.io.NDArrayIter(data={'data': train_data}, batch_size = batch_size) #test_iter = mx.io.NDArrayIter(data={'data': test_data}, batch_size = batch_size) # ## Define model class VAE(gluon.HybridBlock): def __init__(self, n_hidden=400, n_latent=2, n_layers=1, n_output=784, batch_size=100, act_type='relu', **kwargs): self.soft_zero = 1e-10 self.n_latent = n_latent self.batch_size = batch_size self.output = None self.mu = None # note to self: requring batch_size in model definition is sad, not sure how to deal with this otherwise though super(VAE, self).__init__(**kwargs) # self.use_aux_logits = use_aux_logits with self.name_scope(): self.encoder = nn.HybridSequential(prefix='encoder') for i in range(n_layers): self.encoder.add(nn.Dense(n_hidden, activation=act_type)) self.encoder.add(nn.Dense(n_latent*2, activation=None)) self.decoder = nn.HybridSequential(prefix='decoder') for i in range(n_layers): self.decoder.add(nn.Dense(n_hidden, activation=act_type)) self.decoder.add(nn.Dense(n_output, activation='sigmoid')) def hybrid_forward(self, F, x): h = self.encoder(x) #print(h) mu_lv = F.split(h, axis=1, num_outputs=2) mu = mu_lv[0] lv = mu_lv[1] self.mu = mu #eps = F.random_normal(loc=0, scale=1, shape=mu.shape, ctx=model_ctx) # this would work fine only for nd (i.e. non-hybridized block) eps = F.random_normal(loc=0, scale=1, shape=(self.batch_size, self.n_latent), ctx=model_ctx) z = mu + F.exp(0.5*lv)*eps y = self.decoder(z) self.output = y KL = 0.5*F.sum(1+lv-mu*mu-F.exp(lv),axis=1) logloss = F.sum(x*F.log(y+self.soft_zero)+ (1-x)*F.log(1-y+self.soft_zero), axis=1) loss = -logloss-KL return loss # + n_hidden=400 n_latent=2 n_layers=2 # num of dense layers in encoder and decoder respectively n_output=784 model_prefix = 'vae_gluon_{}d{}l{}h.params'.format(n_latent, n_layers, n_hidden) net = VAE(n_hidden=n_hidden, n_latent=n_latent, n_layers=n_layers, n_output=n_output, batch_size=batch_size) # - # ## Model training net.collect_params().initialize(mx.init.Xavier(), ctx=model_ctx) net.hybridize() trainer = gluon.Trainer(net.collect_params(), 'adam', {'learning_rate': .001}) # + n_epoch = 50 print_period = n_epoch // 10 start = time.time() training_loss = [] validation_loss = [] for epoch in tqdm_notebook(range(n_epoch), desc='epochs'): epoch_loss = 0 epoch_val_loss = 0 train_iter.reset() test_iter.reset() n_batch_train = 0 for batch in train_iter: n_batch_train +=1 data = batch.data[0].as_in_context(model_ctx) with autograd.record(): loss = net(data) loss.backward() trainer.step(data.shape[0]) epoch_loss += nd.mean(loss).asscalar() n_batch_val = 0 for batch in test_iter: n_batch_val +=1 data = batch.data[0].as_in_context(model_ctx) loss = net(data) epoch_val_loss += nd.mean(loss).asscalar() epoch_loss /= n_batch_train epoch_val_loss /= n_batch_val training_loss.append(epoch_loss) validation_loss.append(epoch_val_loss) if epoch % max(print_period,1) == 0: tqdm.write('Epoch{}, Training loss {:.2f}, Validation loss {:.2f}'.format(epoch, epoch_loss, epoch_val_loss)) end = time.time() print('Time elapsed: {:.2f}s'.format(end - start)) # - net.save_parameters(model_prefix) batch_x = np.linspace(1, n_epoch, len(training_loss)) plt.plot(batch_x, -1*np.array(training_loss)) plt.plot(batch_x, -1*np.array(validation_loss)) plt.legend(['train', 'valid']) # ## Load model net2 = VAE(n_hidden=n_hidden, n_latent=n_latent, n_layers=n_layers, n_output=n_output, batch_size=batch_size) net2.load_parameters(model_prefix, ctx=model_ctx) # ## Visualizing reconstruction quality test_iter.reset() test_batch = test_iter.next() net2(test_batch.data[0].as_in_context(model_ctx)) result = net2.output.asnumpy() original = test_batch.data[0].asnumpy() n_samples = 10 idx = np.random.choice(batch_size, n_samples) _, axarr = plt.subplots(2, n_samples, figsize=(16,4)) for i,j in enumerate(idx): axarr[0,i].imshow(original[j].reshape((28,28)), cmap='Greys') if i==0: axarr[0,i].set_title('original') #axarr[0,i].axis('off') axarr[0,i].get_xaxis().set_ticks([]) axarr[0,i].get_yaxis().set_ticks([]) axarr[1,i].imshow(result[j].reshape((28,28)), cmap='Greys') if i==0: axarr[1,i].set_title('reconstruction') #axarr[1,i].axis('off') axarr[1,i].get_xaxis().set_ticks([]) axarr[1,i].get_yaxis().set_ticks([]) plt.show() # ## Visualizing latent space (when it is 2D) n_batches = 10 counter = 0 results = [] labels = [] for batch in test_iter: net2(batch.data[0].as_in_context(model_ctx)) results.append(net2.mu.asnumpy()) labels.append(batch.label[0].asnumpy()) counter +=1 if counter >= n_batches: break result= np.vstack(results) labels = np.hstack(labels) if result.shape[1]==2: from scipy.special import ndtri from scipy.stats import norm fig, axarr = plt.subplots(1,2, figsize=(10,4)) im=axarr[0].scatter(result[:, 0], result[:, 1], c=labels, alpha=0.6, cmap='Paired') axarr[0].set_title(r'scatter plot of $\mu$') axarr[0].axis('equal') fig.colorbar(im, ax=axarr[0]) im=axarr[1].scatter(norm.cdf(result[:, 0]), norm.cdf(result[:, 1]), c=labels, alpha=0.6, cmap='Paired') axarr[1].set_title(r'scatter plot of $\mu$ on norm.cdf() transformed coordinates') axarr[1].axis('equal') fig.colorbar(im, ax=axarr[1]) plt.tight_layout() if output_fig: plt.savefig('2d_latent_space_for_test_samples.png') # ## Sample latent space and generate images # ### Random sampling n_samples = 10 zsamples = nd.array(np.random.randn(n_samples*n_samples, n_latent)) images = net2.decoder(zsamples.as_in_context(model_ctx)).asnumpy() canvas = np.empty((28*n_samples, 28*n_samples)) for i, img in enumerate(images): x = i // n_samples y = i % n_samples canvas[(n_samples-y-1)*28:(n_samples-y)*28, x*28:(x+1)*28] = img.reshape(28, 28) plt.figure(figsize=(4, 4)) plt.imshow(canvas, origin="upper", cmap="Greys") plt.axis('off') plt.tight_layout() if output_fig: plt.savefig('generated_samples_with_{}D_latent_space.png'.format(n_latent)) # ### Grid scan 2D latent space if n_latent==2: n_pts = 20 idx = np.arange(0, n_pts) x = np.linspace(norm.cdf(-3), norm.cdf(3),n_pts) x = ndtri(x) x_grid = np.array(np.meshgrid(*[i for i in np.matlib.repmat(x,n_latent,1)])) id_grid = np.array(np.meshgrid(*[i for i in np.matlib.repmat(idx,n_latent,1)])) zsamples = nd.array(x_grid.reshape((n_latent, -1)).transpose()) zsamples_id = id_grid.reshape((n_latent, -1)).transpose() images = net2.decoder(zsamples.as_in_context(model_ctx)).asnumpy() #plot canvas = np.empty((28*n_pts, 28*n_pts)) for i, img in enumerate(images): x, y = zsamples_id[i] canvas[(n_pts-y-1)*28:(n_pts-y)*28, x*28:(x+1)*28] = img.reshape(28, 28) plt.figure(figsize=(6, 6)) plt.imshow(canvas, origin="upper", cmap="Greys") plt.axis('off') plt.tight_layout() if output_fig: plt.savefig('2d_latent_space_scan_for_generation.png')
chapter13_unsupervised-learning/vae-gluon.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # [Pressure](https://www.youtube.com/watch?v=8tb3RLqY9Xo) # # ## Neutral pressure gauges # # The calibrated data for the neutral pressure gauges is stored in the IOC shotfile. The gauges have a position and measuring direction that can change during vessel openings. Some gauges might not be available during some shots due to problems with the electronics. But when they work, they are a wonderful diagnostic. import sys sys.path.append('ipfnlite/') sys.path.append('/afs/ipp/aug/ads-diags/common/python/lib/') from getsig import getsig import matplotlib.pyplot as plt #plt.style.use('./Styles/darklab.mplstyle') shotnr = 34532 f12 = getsig(shotnr, 'IOC', 'F12') f18 = getsig(shotnr, 'IOC', 'F19') f12.unit#LOL # The conversion from e/s/m-2 to Pa takes the ideal gas into account. You can derive it from a kinetic standpoint. The conversion factor is the following: conv_factor = 1.5e23 plt.plot(f18.time, f18.data/conv_factor, label='F18 [Pa]', lw=0.5) plt.plot(f12.time, f12.data/conv_factor, label='F12 [Pa]') plt.ylabel('Pressure [Pa]') plt.xlabel('time [s]') plt.ylim(bottom=0) plt.legend() plt.show() # ## Baratrons # # There are three baratron ports at AUG. The best description so far of this system is in Arne's [paper](https://iopscience.iop.org/article/10.1088/1361-6587/aaab21/meta). The data is stored in the `MSQ` shotfile. from getsig import getsig import matplotlib.pyplot as plt plt.style.use('./Styles/darklab.mplstyle') shotnr = 30733 bmid = getsig(shotnr, 'MSQ', 'Pre_MiPl') bhfs = getsig(shotnr, 'MSQ', 'Pre_InDi') blfs = getsig(shotnr, 'MSQ', 'Pre_OuDi') blfs.unit plt.plot(bmid.time, bmid.data*100, label='Midplabe') plt.plot(bhfs.time, bhfs.data*100, label='Inner') plt.plot(blfs.time, blfs.data*100, label='Outer') plt.legend() plt.ylabel('Pressure [Pa]') plt.xlabel('time [s]') plt.ylim(bottom=0) plt.show() # ## All the Pressure, [remixed](https://www.youtube.com/watch?v=IH-CyuXfC-Q) # # <NAME> has developed a very nice set of packages to handle pressure data that also include Residual Gas Analysis. # # To install all the packages you need to clone the several git repos: # # * [pressure_gauge_data](ssh://git@gitlab.<EMAIL>.ipp.mpg.de:2222/adrenik/pressure_gauge_data.git) # * [rga_min](<EMAIL>:adrenik/rga_min.git) # * [rga_wrapper_raw](ssh://git@gitlab.aug.ipp.mpg.de:2222/adrenik/rga_wrapper_raw.git) # * [RGA_public](ssh://git@gitlab.<EMAIL>.ipp.mpg.de:2222/adrenik/RGA_results_library_public.git) # # And then run the following command under each folder to install the package: # # `python setup.py --install user` # # There is also an RGA tutorial: # # * [RGA tutorial](https://github.com/sdrenik/rga_data_handling_tutorials.git) import pressure_gauge_data as pgd import matplotlib.pyplot as plt #plt.style.use('./Styles/darklab.mplstyle') bar = pgd.baratrons(30733) bar.plot_signals() # Or alternatively: plt.plot(bar.joined_signals['Co'][0],bar.joined_signals['Co'][1]*100, label='midplane') plt.plot(bar.joined_signals['Fu'][0],bar.joined_signals['Fu'][1]*100, label='inner') plt.plot(bar.joined_signals['Cu'][0],bar.joined_signals['Cu'][1]*100, label='outer') plt.xlim(0,9) plt.ylim(0,6) plt.xlabel('time [s]') plt.ylabel('Pressure [Pa]') plt.title('#30733',loc='left') plt.legend(loc='best') plt.show()
10-Pressure.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import optuna from optuna import Trial from fbprophet import Prophet import warnings from sklearn.metrics import mean_squared_error warnings.filterwarnings("ignore") df = pd.read_csv( "../input/10min/ontune2016.csv", usecols=["date", "value"], parse_dates=["date"] ) print(df.shape) df.head() df.rename(columns={"date": "ds", "value": "y"}, inplace=True) df.tail() train = df[df['ds'] < '2021-03-20'] valid = df[df['ds'] >= '2021-03-20'].copy() valid["days"] = valid["ds"].apply(lambda x: x.day) valid = valid.groupby("days")["y"].agg("mean").reset_index() print(train.shape) print(valid.shape) cap = np.max(train.y) floor = np.min(train.y) def objective(trial: Trial) -> float: params = { "changepoint_range": trial.suggest_discrete_uniform( "changepoint_range", 0.8, 0.95, 0.001 ), "n_changepoints": trial.suggest_int("n_changepoints", 20, 35), "changepoint_prior_scale": trial.suggest_discrete_uniform( "changepoint_prior_scale", 0.001, 0.5, 0.001 ), "seasonality_prior_scale": trial.suggest_discrete_uniform( "seasonality_prior_scale", 1, 25, 0.5 ), "yearly_fourier": trial.suggest_int("yearly_fourier", 5, 15), "monthly_fourier": trial.suggest_int("monthly_fourier", 3, 12), "weekly_fourier": trial.suggest_int("weekly_fourier", 3, 7), "quaterly_fourier": trial.suggest_int("quaterly_fourier", 3, 10), "yearly_prior": trial.suggest_discrete_uniform("yearly_prior", 1, 25, 0.5), "monthly_prior": trial.suggest_discrete_uniform("monthly_prior", 1, 25, 0.5), "weekly_prior": trial.suggest_discrete_uniform("weekly_prior", 1, 25, 0.5), "quaterly_prior": trial.suggest_discrete_uniform("quaterly_prior", 1, 25, 0.5), "growth": "logistic", "seasonality_mode": "additive", "weekly_seasonality": True, "daily_seasonality": True, } # fit_model m = Prophet( changepoint_range=params["changepoint_prior_scale"], n_changepoints=params["n_changepoints"], changepoint_prior_scale=params["changepoint_prior_scale"], seasonality_prior_scale=params["seasonality_prior_scale"], yearly_seasonality=False, weekly_seasonality=True, daily_seasonality=True, growth="logistic", seasonality_mode="additive", ) m.add_seasonality( name="yearly", period=365.25, fourier_order=params["yearly_fourier"], prior_scale=params["yearly_prior"], ) m.add_seasonality( name="monthly", period=30.5, fourier_order=params["monthly_fourier"], prior_scale=params["monthly_prior"], ) m.add_seasonality( name="weekly", period=7, fourier_order=params["weekly_fourier"], prior_scale=params["weekly_prior"], ) m.add_seasonality( name="quaterly", period=365.25 / 4, fourier_order=params["quaterly_fourier"], prior_scale=params["quaterly_prior"], ) train["cap"] = cap train["floor"] = floor m.fit(train) future = m.make_future_dataframe(periods=144, freq="d") future["cap"] = cap future["floor"] = floor forecast = m.predict(future) valid_forecast = forecast.tail(7) val_mse = mean_squared_error(valid.y, valid_forecast.yhat, squared=False) return val_mse study = optuna.create_study(direction="minimize", sampler=optuna.samplers.TPESampler(seed=42)) study.optimize(objective, n_trials=20) # + # fit_model m = Prophet( changepoint_range=study.best_params["changepoint_prior_scale"], n_changepoints=study.best_params["n_changepoints"], seasonality_prior_scale=study.best_params["seasonality_prior_scale"], changepoint_prior_scale=study.best_params["changepoint_prior_scale"], yearly_seasonality=False, weekly_seasonality=False, daily_seasonality=False, growth="logistic", seasonality_mode="additive", ) m.add_seasonality( name="yearly", period=365.25, fourier_order=study.best_params["yearly_fourier"], prior_scale=study.best_params["yearly_prior"], ) m.add_seasonality( name="monthly", period=30.5, fourier_order=study.best_params["monthly_fourier"], prior_scale=study.best_params["monthly_prior"], ) m.add_seasonality( name="weekly", period=7, fourier_order=study.best_params["weekly_fourier"], prior_scale=study.best_params["weekly_prior"], ) m.add_seasonality( name="quaterly", period=365.25 / 4, fourier_order=study.best_params["quaterly_fourier"], prior_scale=study.best_params["quaterly_prior"], ) train["cap"] = cap train["floor"] = floor m.fit(train) future = m.make_future_dataframe(periods=365, freq="d") future["cap"] = cap future["floor"] = floor forecast = m.predict(future) forecast_plot = m.plot(forecast) # - cap = np.max(df.y) floor = np.min(df.y) df['cap'] = cap df['floor'] = floor # fit_model m = Prophet( changepoint_range=study.best_params["changepoint_prior_scale"], n_changepoints=study.best_params["n_changepoints"], seasonality_prior_scale=study.best_params["seasonality_prior_scale"], changepoint_prior_scale=study.best_params["changepoint_prior_scale"], yearly_seasonality=False, weekly_seasonality=False, daily_seasonality=False, growth="logistic", seasonality_mode="additive", ) m.add_seasonality( name="yearly", period=365.25, fourier_order=study.best_params["yearly_fourier"], prior_scale=study.best_params["yearly_prior"], ) m.add_seasonality( name="monthly", period=30.5, fourier_order=study.best_params["monthly_fourier"], prior_scale=study.best_params["monthly_prior"], ) m.add_seasonality( name="weekly", period=7, fourier_order=study.best_params["weekly_fourier"], prior_scale=study.best_params["weekly_prior"], ) m.add_seasonality( name="quaterly", period=365.25 / 4, fourier_order=study.best_params["quaterly_fourier"], prior_scale=study.best_params["quaterly_prior"], ) m.fit(df) future = m.make_future_dataframe(periods=365, freq="d") future['cap'] = cap future['floor'] = floor forecast1 = m.predict(future) fig1 = m.plot(forecast1) m.plot_components(forecast1)
notebook/ontune2016.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Programming Exercise 3 - Multi-class Classification and Neural Networks # + # # %load ../../standard_import.txt import pandas as pd import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt # load MATLAB files from scipy.io import loadmat from scipy.optimize import minimize from sklearn.linear_model import LogisticRegression pd.set_option('display.notebook_repr_html', False) pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', 150) pd.set_option('display.max_seq_items', None) # #%config InlineBackend.figure_formats = {'pdf',} # %matplotlib inline import seaborn as sns sns.set_context('notebook') sns.set_style('white') # - # #### Load MATLAB datafiles data = loadmat('data/ex3data1.mat') data.keys() weights = loadmat('data/ex3weights.mat') weights.keys() # + y = data['y'] # Add constant for intercept X = np.c_[np.ones((data['X'].shape[0],1)), data['X']] print('X: {} (with intercept)'.format(X.shape)) print('y: {}'.format(y.shape)) # + theta1, theta2 = weights['Theta1'], weights['Theta2'] print('theta1: {}'.format(theta1.shape)) print('theta2: {}'.format(theta2.shape)) # - sample = np.random.choice(X.shape[0], 20) plt.imshow(X[sample,1:].reshape(-1,20).T) plt.axis('off'); # ### Multiclass Classification # #### Logistic regression hypothesis # #### $$ h_{\theta}(x) = g(\theta^{T}x)$$ # #### $$ g(z)=\frac{1}{1+e^{−z}} $$ def sigmoid(z): return(1 / (1 + np.exp(-z))) # #### Regularized Cost Function # #### $$ J(\theta) = \frac{1}{m}\sum_{i=1}^{m}\big[-y^{(i)}\, log\,( h_\theta\,(x^{(i)}))-(1-y^{(i)})\,log\,(1-h_\theta(x^{(i)}))\big] + \frac{\lambda}{2m}\sum_{j=1}^{n}\theta_{j}^{2}$$ # #### Vectorized Cost Function # #### $$ J(\theta) = \frac{1}{m}\big((\,log\,(g(X\theta))^Ty+(\,log\,(1-g(X\theta))^T(1-y)\big) + \frac{\lambda}{2m}\sum_{j=1}^{n}\theta_{j}^{2}$$ def lrcostFunctionReg(theta, reg, X, y): m = y.size h = sigmoid(X.dot(theta)) J = -1*(1/m)*(np.log(h).T.dot(y)+np.log(1-h).T.dot(1-y)) + (reg/(2*m))*np.sum(np.square(theta[1:])) if np.isnan(J[0]): return(np.inf) return(J[0]) def lrgradientReg(theta, reg, X,y): m = y.size h = sigmoid(X.dot(theta.reshape(-1,1))) grad = (1/m)*X.T.dot(h-y) + (reg/m)*np.r_[[[0]],theta[1:].reshape(-1,1)] return(grad.flatten()) # #### One-vs-all Classification def oneVsAll(features, classes, n_labels, reg): initial_theta = np.zeros((X.shape[1],1)) # 401x1 all_theta = np.zeros((n_labels, X.shape[1])) #10x401 for c in np.arange(1, n_labels+1): res = minimize(lrcostFunctionReg, initial_theta, args=(reg, features, (classes == c)*1), method=None, jac=lrgradientReg, options={'maxiter':50}) all_theta[c-1] = res.x return(all_theta) theta = oneVsAll(X, y, 10, 0.1) # #### One-vs-all Prediction def predictOneVsAll(all_theta, features): probs = sigmoid(X.dot(all_theta.T)) # Adding one because Python uses zero based indexing for the 10 columns (0-9), # while the 10 classes are numbered from 1 to 10. return(np.argmax(probs, axis=1)+1) pred = predictOneVsAll(theta, X) print('Training set accuracy: {} %'.format(np.mean(pred == y.ravel())*100)) # #### Multiclass Logistic Regression with scikit-learn clf = LogisticRegression(C=10, penalty='l2', solver='liblinear') # Scikit-learn fits intercept automatically, so we exclude first column with 'ones' from X when fitting. clf.fit(X[:,1:],y.ravel()) pred2 = clf.predict(X[:,1:]) print('Training set accuracy: {} %'.format(np.mean(pred2 == y.ravel())*100)) # ### Neural Networks def predict(theta_1, theta_2, features): z2 = theta_1.dot(features.T) a2 = np.c_[np.ones((data['X'].shape[0],1)), sigmoid(z2).T] z3 = a2.dot(theta_2.T) a3 = sigmoid(z3) return(np.argmax(a3, axis=1)+1) pred = predict(theta1, theta2, X) print('Training set accuracy: {} %'.format(np.mean(pred == y.ravel())*100))
notebooks/.ipynb_checkpoints/Programming Exercise 3 - Multi-class Classification and Neural Networks-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # # Official data indicators # # Here we collect data and create indicators of industrial activity in NUTS2 areas based on BRES and NOMIS data. # # <!-- We have already collected the data by running `make data` in the terminal. This has stored the BRES and IDBR data in the `data/external/` folder, and processed it into Nesta segments (a shorter number of industrial categories) in `data/processed` # --> # # This involves running the script `make_dataset.py`. This already results in indicators that are relevant for the project, such as complexity. # # We will also create a clean table and an indicator of the share of employment working in high median salary occupations according to the ashe data we calculate in `0-jmg-ashe_sectoral`. # # ## Preamble # %run ../notebook_preamble.ipy #Need to put this in utils def make_dirs(name,dirs = ['raw','processed','interim']): ''' Utility that creates directories to save the data ''' for d in dirs: if name not in os.listdir(f'../../data/{d}'): os.mkdir(f'../../data/{d}/{name}') def create_local_industry_dataset(path,salary_lookup,cluster_name,year,save=False): ''' This creates a long dataset with industry activity per NUTS area and extra variables with share of activity in top two deciles of salary, and bottom two deciles of salary Arguments: path (str) path to a tidy dataframe with the industrial activity information (could be employment or establishments) salary_lookup (dict) a lookup between industry segments and position in the salary distribution cluster_name (str) name of the cluster variable in the industry df ''' #Read the data industry = pd.read_csv(path,dtype={'SIC4':str}) #Label with salary info industry['median_salary_decile'] = industry[cluster_name].map(ashe_lookup) #Create wide dataset with industry activity per geography industry_long = industry.groupby( ['geo_nm','geo_cd',cluster_name])['value'].sum().reset_index(drop=False).pivot_table( index=['geo_nm','geo_cd'],columns=cluster_name,values='value') #Share of activity in top and bottom of salary distribution salary_long = industry.groupby( ['geo_nm','geo_cd','median_salary_decile'])['value'].sum().reset_index(drop=False).pivot_table( index=['geo_nm','geo_cd'],columns='median_salary_decile',values='value') #Top of distro high_salary = salary_long.apply(lambda x: x/x.sum(),axis=1)[[8,9]].sum(axis=1) #Bottom of distro low_salary = salary_long.apply(lambda x: x/x.sum(),axis=1)[[0,1]].sum(axis=1) salary_stats = pd.concat([high_salary,low_salary],axis=1) #Names salary_stats.columns = ['top_20_salary_share','bottom_20_salary_share'] #Concatenate combined = pd.concat([industry_long,salary_stats],axis=1) if save==True: #Take the informative bit of the name name = '_'.join(path.split('_')[1:3]) combined.to_csv(f'../../data/interim/industry/{today_str}_{name}_industry_salary.csv') salary_stats['year']=year return(salary_stats) #Return everything #return(salary_long) # + def extract_segment(path,sector_list,sector_variable,sector_name): ''' This function takes official data from a path and returns a segment of interest. We will use it to produce indicators about cultural activities in different NUTS2 regions. Arguments: path (str) is the path we use segment (list) is the list of codes we are interested in - could be segments or sectors sector_variable (str) is the variable that we use to identify sectors. It could be the sic code or the Nesta segment. ''' #Read data all_sectors = pd.read_csv(path,dtype={'SIC4':str}) #Activity in sector sector = all_sectors.loc[[x in sector_list for x in all_sectors[sector_variable]]].reset_index( drop=True) #Regroup and aggregate sector_agg = sector.groupby(['geo_nm','geo_cd','year'])['value'].sum() #Add the name sector_agg.name = sector_name #Create dataframe so we can add years #sector_agg = pd.DataFrame(sector_agg) #And add years #sector_agg['year'] = year return(pd.DataFrame(sector_agg)) # + def make_indicator(table,target_path,var_lookup,year_var,nuts_var='nuts_code',nuts_spec=2018,decimals=3): ''' We use this function to create and save indicators using our standardised format. Args: table (df) is a df with relevant information target_path (str) is the location of the directory where we want to save the data (includes interim and processed) var_lookup (dict) is a lookup to rename the variable into our standardised name year (str) is the name of the year variable nuts_var (str) is the name of the NUTS code variable. We assume it is nuts_code nuts_spec (y) is the value of the NUTS specification. We assume we are working with 2018 NUTS ''' #Copy t = table.reset_index(drop=False) #Reset index (we assume that the index is the nuts code, var name and year - this might need to be changed) #Process the interim data into an indicator #This is the variable name and code var_name = list(var_lookup.keys())[0] var_code = list(var_lookup.values())[0] #Focus on those t = t[[year_var,nuts_var,var_name]] #Add the nuts specification t['nuts_year_spec'] = nuts_spec #Rename variables t.rename(columns={var_name:var_code,year_var:'year',nuts_var:'region_id'},inplace=True) #Round variables t[var_code] = [np.round(x,decimals) if decimals>0 else int(x) for x in t[var_code]] #Reorder variables t = t[['year','region_id','nuts_year_spec',var_code]] print(t.head()) #Save in the processed folder t.to_csv(f'../../data/processed/{target_path}/{var_code}.csv',index=False) # - make_dirs('industry') # ## Read data # ### Sector names # + #Cultural industries cultural = ['services_cultural','services_recreation','services_entertainment'] # - # ### metadata (ASHE) # # This is a lookup indicating the position in the salary distribution of various industries based on the analysis in the `ashe` notebook # + #Read ashe and turn it into a lookup ashe = pd.read_csv('../../data/interim/industry/2020_02_18_ashe_rankings.csv') ashe_lookup = ashe.set_index('cluster')['ashe_median_salary_rank'].to_dict() # + #bres bres_2018 = pd.read_csv('../../data/interim/industry/nomis_BRES_2018_TYPE450.csv',dtype={'SIC4':str}, index_col=None) bres_2018['sal'] = bres_2018['cluster_name'].map(ashe_lookup) bres_2018 # - # ### Make indicators # #### Level of employment in the cultural industries bres_cult = pd.concat([extract_segment( f'../../data/interim/industry/nomis_BRES_{y}_TYPE450.csv',cultural,'cluster_name', 'culture_entertainment_recreation') for y in [2016,2017,2018]]) make_indicator(bres_cult, 'industry', {'culture_entertainment_recreation':'employment_culture_entertainment_recreation'},year_var='year', nuts_spec=2013,nuts_var='geo_cd',decimals=0) # #### Level of employment and business activity in sectors with different salaries # # We are not saving these for now as they are not key to the project bres_nuts,idbr_nuts = [pd.concat([create_local_industry_dataset( f'../../data/interim/industry/nomis_{data}_{y}_TYPE450.csv',ashe_lookup,'cluster_name',y) for y in [2016,2017,2018]]) for data in ['BRES','IDBR']] # #### Complexity compl= pd.read_csv('../../data/interim/industry/nomis_ECI.csv') # + #make_indicator(compl.loc[compl['source']=='BRES'], # 'industry', # {'eci':'economic_complexity_index'},year_var='year',nuts_spec=2013,nuts_var='geo_cd') # -
ds/notebooks/dev/08_jmg_bres_idbr.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Aufgabe 4 # a) Erforderlicher Außenluftvolumenstrom # Da die Bedingungen die gleichen sind, wie in Aufgabe 3, muss auch der Außenluftvolumenstrom der gleiche sein, wie dort. # b) Verlauf der Schadstoffkonzentration im Raum # + V_ra = 40*2.5 # m**3 k_au = k_0 = 400e-6 # 400 ppM k_zul = 1000e-6 # 1000 ppM dV_co2 = 4*30e-3 # m**3/h dV_au = dV_co2/(k_zul-k_au) beta = dV_au/V_ra beta # + # %config InlineBackend.figure_format='retina' # nur MacBook Pro import numpy as np import pandas as pd # + lt = np.linspace(0,4) # Zeitintervall von vier Stunden df = pd.DataFrame( { 't': lt, 'k': (k_zul + (k_0-k_zul)*np.exp(-beta*lt))*1e6 # in ppM } ) df.head().T # + ax = df.plot(x='t',y='k',label='$k=k(t)$') ax.axhline(k_zul*1e6,c='r') # in ppM ax.set( xlim=(0,4),xlabel='Zeit $t$ in Stunden', ylim=(0,1200),ylabel='CO_2-Konzentration $k$ in ppM', ) ax.grid()
Aufgaben/A_4_Lsg.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Neural Architecture Search # Developing neural network models often requires significant architecture engineering. User can sometimes get by with **transfer learning**, but if user really want the best possible performance it’s usually best to design your own network. This requires specialised skills and is challenging in general. # # We may not even know the limits of the current state-of-the-art techniques! It’s a lot of trial and error and the experimentation itself is time consuming and expensive. # # This is where **NAS** comes in. NAS is an algorithm that searches for the best **neural network architecture**. # ## Katib AutoML/NAS Support # Katib is a scalable Kubernetes-native general AutoML platform that can # support a range of AutoML algorithms including both hyperparameter tuning and neural architecture search. # # AutoML algorithms share the common ground that they run in an iterative manner. The user first defines the search space, metrics target and maximum iterations. The algorithm searches for the optimal solution until the target metrics or the maximum number of iterations is reached. However, they may vary in terms of their internal mechanisms # ## NAS Examples # !kubectl apply -f nasjob-example-RL.yaml # !kubectl describe experiment nas-rl-example # ### Monitoring # # You can monitor your results in the Katib UI. If you installed Kubeflow using the deployment guide, you can access the Katib UI at `https://<your kubeflow endpoint>/katib/` # # For this job, please go to `NAS (HypterParameter)` -> `Monitor` -> `nas-rl-example`. # ### Pick up best parameters in from results # # Once you click job and go the detail page, you will see different combination of parameters and accuracy. # # # | trialName | Validation-accuracy | accuracy | --lr | --num-layers | --optimizer| # |----------------------------|----------|----------|----------------------|---|------| # | random-experiment-rfwwbnsd | 0.974920 | 0.984844 | 0.013831565266960293 | 4 | sgd | # | random-experiment-vxgwlgqq | 0.113854 | 0.116646 | 0.024225789898529138 | 4 | ftrl | # | random-experiment-wclrwlcq | 0.979697 | 0.998437 | 0.021916171239020756 | 4 | sgd | # | random-experiment-7lsc4pwb | 0.113854 | 0.115312 | 0.024163810384272653 | 5 | ftrl | # | random-experiment-86vv9vgv | 0.963475 | 0.971562 | 0.02943228249244735 | 3 | adam | # | random-experiment-jh884cxz | 0.981091 | 0.999219 | 0.022372025623908262 | 2 | sgd | # | random-experiment-sgtwhrgz | 0.980693 | 0.997969 | 0.016641686851083654 | 4 | sgd | # | random-experiment-c6vvz6dv | 0.980792 | 0.998906 | 0.0264125850165842 | 3 | sgd | # | random-experiment-vqs2xmfj | 0.113854 | 0.105313 | 0.026629394628228185 | 4 | ftrl | # | random-experiment-bv8lsh2m | 0.980195 | 0.999375 | 0.021769570793012488 | 2 | sgd | # | random-experiment-7vbnqc7z | 0.113854 | 0.102188 | 0.025079750575740783 | 4 | ftrl | # | random-experiment-kwj9drmg | 0.979498 | 0.995469 | 0.014985919312945063 | 4 | sgd | # # # ![katib-experiment-result.png](./images/katib-experiment-result.png) # # You can also click trail name to check Trial data.
notebooks/08_Hyperparameter_Tuning/08_02_Neural_Architecture_Search.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Converting Ó Raghallaigh (2010) # > "Re-implementation of the 'global' phonetiser, plus Kerry" # # - toc: false # - branch: master # - badges: true # - comments: true # - categories: [irish, g2p, kerry] # This notebook contains a re-implementation of the "global" phonetiser from <NAME>'s Ph.D. thesis using [rbg2p](https://github.com/stts-se/rbg2p), along with the "local" phonetiser for Kerry. # # The global phonetiser here is essentially the same as [the earlier one]({% post_url 2021-05-16-o-raghallaigh-thesis-attempt-1 %}), except the output phonemes are lowercase, and there are no spaces between output phonemes, to work around some slight limitations of rbg2p. # # <NAME> (2010). *Multi-dialect phonetisation for Irish text-to-speech synthesis: a modular approach.* (Doctoral thesis, Trinity College, Dublin), Appendix B.1 # + # %%writefile oraghallaigh.g2p CHARACTER_SET "aábcdeéfghiíjklmnoópqrstuúvwxyz'-’" DEFAULT_PHONEME "_" PHONEME_DELIMITER "" VAR CONS [bcdfghjklmnpqrstvwxyz] VAR CONSS [bcdfghjklmnpqrstvwxyz]* VAR CONSP [bcdfghjklmnpqrstvwxyz]+ VAR NONSYLLABIC [ábcdfghjklmnópqrstúvwxyz] // broad future/conditional endings VAR BFCE (á|adh|aidh|aidís|aimid|aimis|ainn|as) // slender future/conditional endings VAR SFCE (eá|eadh|idh|idís|imid|imis|inn) VAR FMP [fmp] VAR LNRP [lnr]+ VAR DNLST [dnlst] VAR RDNLR (rd|rn|rl|rr) VAR VOWEL [aáeéiíoóuú] VAR VOWELS [aáeéiíoóuú]* VAR VOWELP [aáeéiíoóuú]+ // left context short broad vowel VAR LCSBV (ea|io|iu|a|o|u) // left context short slender vowel VAR LCSSV (ai|eai|ei|e|iui|i|oi|ui) // left context broad vowel VAR LCBV (adh|ae|ao|aá|ea|eá|eo|éa|io|iu|iú|ío|oó|uío|ua|u|ú) // left context slender vowel VAR LCSV (aei|aidh|ai|aí|aoi|ái|eai|eái|ei|eoi|e|éi|é|iai|iui|iúi|i|í|oi|ói|uai|ui|uí|úi) VAR LCSVS (aei|aidh|ai|aí|aoi|ái|eai|eái|ei|eoi|e|éi|é|iai|iui|iúi|i|í|oi|ói|uai|ui|uí|úi)* // right context slender vowel VAR RCSV (eai|ea|eái|eá|ei|eoi|eo|e|éa|éi|é|iai|ia|io|iui|iu|iúi|iú|i|ío|í) // left context long vowel VAR LCLV (aei|ae|aoi|ao|ái|á|eái|eá|eoi|eó|eo|éi|é|iúi|iú|ío|í|ói|ó|uío|uí|úi|ú) // left context slender long vowel VAR LCSLV (aei|aidh|aoi|ái|eái|eoi|éi|é|iúi|í|ói|uai|uí|úi) ádh -> aa ái -> aa á -> aa abh -> abh adh -> adh / _ # adh -> ai agh -> ai aei -> ee ae -> ee aíodh -> íodh / _ # aío -> aío aí -> ii aidh -> idh / _ # aidh -> ai aigh -> igh / _ # aigh -> ai aithe -> ithe / _ # ai -> ∅ / # CONSS VOWELS (abh|adh|agh|amh|ódh|ogh|omh|umh) _ CONSP ai -> ∅ / # CONSS (obh|odh) _ CONSP ai -> aa / # CONSS _ RDNLR ai -> a / # CONSS _ ai -> @@ / VOWELP CONSP _ ai -> a amh -> amh / _ # amh -> au aoi -> ao ao -> ao a -> ∅ / # CONSS VOWELS (abh|adh|agh|amh|ódh|ogh|omh|umh) _ CONSP a -> ∅ / # CONSS (obh|odh) _ CONSP # addition a -> ∅ / # CONSS VOWELS ogh _ # omh -> OO / (gc|ch|c) _ (ai|a) r a -> aa / # CONSS _ RDNLR a -> a / # CONSS _ a -> @@ / VOWELP CONSP _ a -> a éa -> ee éi -> ee é -> ee eái -> aa eá -> aa eabh -> abh eadh -> adh / _ # eadh -> au eagh -> ai eai -> a eamh -> amh / _ # eamh -> au / # CONSS _ # VOWEL, or VOWELS ?? ea -> ∅ / # CONSS VOWEL igh _ CONSP ea -> aa / # CONSS _ RDNLR ea -> a / # CONSS _ ea -> @@ / VOWELP CONSP _ ea -> a eidh -> eidh eigh -> eigh ei -> ee / # CONSS _ RDNLR # ei -> ee / # CONSS _ RDNLR NONSYLLABIC ei -> e / # CONSS _ ei -> e eódh -> oo eoi -> oo eó -> oo eo -> oo e -> e / # CONSS _ e -> @@ / VOWELP CONSP _ e -> e íodh -> íodh / _ # ío -> ii í -> ii iadh -> i@ iath -> i@ iai -> i@ ia -> i@ idh -> idh igh -> igh io -> io ithe -> ithe / _ # iúi -> uu iú -> uu iubh -> ubh iumh -> uu iui -> uu iu -> u i -> ∅ / # CONSS VOWEL idh _ CONSP # i -> ∅ / # CONSS VOWEL igh _ CONSP i -> @@ / VOWELP CONSP _ i -> I ódh -> ódh / _ # ói -> oo ó -> oo obh -> obh odh -> odh ogh -> ogh oí -> ii oidh -> oidh oigh -> oigh oi -> oo / # CONSS _ RDNLR oi -> @@ / # VOWELP CONSP _ oi -> oi omh -> omh o -> oo / # CONSS _ RDNLR # o -> oo / # CONSS _ RDNLR NONSYLLABIC o -> o / # VOWELP CONSP _ o -> o úi -> uu ú -> uu uath -> u@ uai -> u@ ua -> u@ ubh -> ubh ue -> e ui -> uu / # CONSS _ RDNLR # ui -> uu / # CONSS _ RDNLR NONSYLLABIC ui -> i / # CONSS _ ui -> @@ / VOWELP CONSP _ ui -> ui uío -> ii uí -> ii umh -> uu u -> uu / # CONSS _ RDNLR u -> u / # CONSS _ u -> @@ / VOWELP CONSP _ u -> u bf -> p / _ BFCE # bf -> pj / _ SFCE # bhf -> vj / # _ CONSS RCSV # bhf -> vj / # _ CONSS RCSV NONSYLLABIC bhf -> v / # _ bhf -> f / _ BFCE # bhf -> fj / _ SFCE # bh -> @@ v / # LCSBV LNRP _ bh -> @@ v / NONSYLLABIC LCSBV LNRP _ bh -> @@ vj / # LCSSV LNRP _ bh -> @@ vj / NONSYLLABIC LCSSV LNRP _ bh -> vj / # _ CONSS RCSV # bh -> vj / # _ CONSS RCSV NONSYLLABIC bh -> v / # _ bh -> vj / _ CONSS RCSV # bh -> vj / _ CONSS RCSV NONSYLLABIC bh -> vj / # LCSV CONSP _ bh -> vj / NONSYLLABIC LCSV CONSP _ bh -> vj / # LCSV _ bh -> vj / NONSYLLABIC LCSV _ bh -> v bp -> bj / # _ CONSS RCSV # bp -> bj / # _ CONSS RCSV NONSYLLABIC bp -> b / # _ bth -> p / LCBV CONSS _ bth -> pj / LCSV CONSS _ b -> @@ b / # LCSBV LNRP _ b -> @@ b / NONSYLLABIC LCSBV LNRP _ b -> @@ bj / # LCSSV LNRP _ b -> @@ bj / NONSYLLABIC LCSSV LNRP _ b -> bj / _ CONSS RCSV # b -> bj / _ CONSS RCSV NONSYLLABIC b -> bj / # LCSV CONSP _ b -> bj / NONSYLLABIC LCSV CONSP _ b -> bj / # LCSV _ b -> bj / NONSYLLABIC LCSV _ b -> b cf -> k / _ BFCE # cf -> kj / _ SFCE # chf -> x / _ BFCE # chf -> xj / _ SFCE # ch -> @@ x / # LCSBV LNRP _ ch -> @@ x / NONSYLLABIC LCSBV LNRP _ ch -> @@ xj / # LCSSV LNRP _ ch -> @@ xj / NONSYLLABIC LCSSV LNRP _ ch -> xj / # _ CONSS RCSV # ch -> xj / # _ CONSS RCSV NONSYLLABIC ch -> x / # _ ch -> xj / _ CONSS RCSV # ch -> xj / _ CONSS RCSV NONSYLLABIC ch -> xj / # LCSV CONSP _ ch -> xj / NONSYLLABIC LCSV CONSP _ ch -> xj / # LCSV _ ch -> xj / NONSYLLABIC LCSV _ ch -> x cth -> k / LCBV CONSS _ cth -> kj / LCSV CONSS _ c -> kj / _ CONSS RCSV # c -> kj / _ CONSS RCSV NONSYLLABIC c -> kj / # LCSV CONSP _ c -> kj / NONSYLLABIC LCSV CONSP _ c -> kj / # LCSV _ c -> kj / NONSYLLABIC LCSV _ c -> k df -> t / _ BFCE # df -> tj / _ SFCE # dha -> ∅ / # LCLV _ dha -> ∅ / NONSYLLABIC LCLV _ dh -> gfj / # LCSLV _ dh -> gfj / NONSYLLABIC LCSLV _ dh -> gfj / # CONSS LCSVS _ # dh -> ∅ / # LCLV _ dh -> ∅ / NONSYLLABIC LCLV _ dh -> gfj / # _ CONSS RCSV # dh -> gfj / # _ CONSS RCSV NONSYLLABIC dh -> gf / # _ dh -> gfj / _ CONSS RCSV # dh -> gfj / _ CONSS RCSV NONSYLLABIC dh -> gfj / # LCSV CONSP _ dh -> gfj / NONSYLLABIC LCSV CONSP _ dh -> gfj / # LCSV _ dh -> gfj / NONSYLLABIC LCSV _ dh -> gf dt -> dj / # _ CONSS RCSV # dt -> dj / # _ CONSS RCSV NONSYLLABIC dt -> d / # _ dt -> t / LCBV CONSS _ dt -> tj / LCSV CONSS _ d -> dj / _ CONSS RCSV # d -> dj / _ CONSS RCSV NONSYLLABIC d -> dj / # LCSV CONSP _ d -> dj / NONSYLLABIC LCSV CONSP _ d -> dj / # LCSV _ d -> dj / NONSYLLABIC LCSV _ d -> d fh -> ∅ f -> h / VOWEL _ BFCE # f -> hj / VOWEL _ SFCE # f -> @@ f / # LCSBV LNRP _ f -> @@ f / NONSYLLABIC LCSBV LNRP _ f -> @@ fj / # LCSSV LNRP _ f -> @@ fj / NONSYLLABIC LCSSV LNRP _ f -> fj / _ CONSS RCSV # f -> fj / _ CONSS RCSV NONSYLLABIC f -> fj / # LCSV CONSP _ f -> fj / NONSYLLABIC LCSV CONSP _ f -> fj / # LCSV _ f -> fj / NONSYLLABIC LCSV _ f -> f gc -> gj / # _ CONSS RCSV # gc -> gj / # _ CONSS RCSV NONSYLLABIC gc -> g / # _ gf -> k / _ BFCE # gf -> kj / _ SFCE # gh -> gfj / # _ CONSS RCSV # gh -> gfj / # _ CONSS RCSV NONSYLLABIC gh -> gf / # _ gh -> gfj / # LCSLV _ gh -> gfj / NONSYLLABIC LCSLV _ gh -> gfj / # CONSS LCSVS _ # gh -> ∅ / # LCLV _ gh -> ∅ / NONSYLLABIC LCLV _ gh -> gfj / _ CONSS RCSV # gh -> gfj / _ CONSS RCSV NONSYLLABIC gh -> gfj / # LCSV CONSP _ gh -> gfj / NONSYLLABIC LCSV CONSP _ gh -> gfj / # LCSV _ gh -> gfj / NONSYLLABIC LCSV _ gh -> gf gth -> k / LCBV CONSS _ gth -> kj / LCSV CONSS _ g -> @@ g / # LCSBV LNRP _ g -> @@ g / NONSYLLABIC LCSBV LNRP _ g -> @@ gj / # LCSSV LNRP _ g -> @@ gj / NONSYLLABIC LCSSV LNRP _ g -> gj / _ CONSS RCSV # g -> gj / _ CONSS RCSV NONSYLLABIC g -> gj / # LCSV CONSP _ g -> gj / NONSYLLABIC LCSV CONSP _ g -> gj / # LCSV _ g -> gj / NONSYLLABIC LCSV _ g -> g h -> hj / _ CONSS RCSV # h -> hj / _ CONSS RCSV NONSYLLABIC h -> hj / # LCSV CONSP _ h -> hj / NONSYLLABIC LCSV CONSP _ h -> hj / # LCSV _ h -> hj / NONSYLLABIC LCSV _ h -> h j -> djzj k -> kj / _ CONSS RCSV # k -> kj / _ CONSS RCSV NONSYLLABIC k -> kj / # LCSV CONSP _ k -> kj / NONSYLLABIC LCSV CONSP _ k -> kj / # LCSV _ k -> kj / NONSYLLABIC LCSV _ k -> k llf -> ll_d / _ BFCE # llf -> llj_d / _ SFCE # llth -> ll_d / LCBV CONSS _ llth -> llj_d / LCSV CONSS _ ll -> llj / _ CONSS RCSV # ll -> llj / _ CONSS RCSV NONSYLLABIC ll -> llj / # LCSV CONSP _ ll -> llj / NONSYLLABIC LCSV CONSP _ ll -> llj / # LCSV _ ll -> llj / NONSYLLABIC LCSV _ ll -> ll lf -> ll_d / _ BFCE # lf -> lj_d / _ SFCE # lth -> ll_d / LCBV CONSS _ lth -> lj_d / LCSV CONSS _ l -> lj / _ CONSS RCSV # l -> lj / _ CONSS RCSV NONSYLLABIC l -> lj / # LCSV CONSP _ l -> lj / NONSYLLABIC LCSV CONSP _ l -> lj / # LCSV _ l -> lj / NONSYLLABIC LCSV _ l -> ll mb -> mj / # _ CONSS RCSV # mb -> mj / # _ CONSS RCSV NONSYLLABIC mb -> m / # _ mf -> m_d / _ BFCE # mf -> mj_d / _ SFCE # mhf -> f / _ BFCE # mhf -> fj / _ SFCE # mh -> vj / # _ CONSS RCSV # mh -> vj / # _ CONSS RCSV NONSYLLABIC mh -> v / # _ mh -> @@ v / # LCSBV LNRP _ mh -> @@ v / NONSYLLABIC LCSBV LNRP _ mh -> @@ vj / # LCSSV LNRP _ mh -> @@ vj / NONSYLLABIC LCSSV LNRP _ mh -> vj / _ CONSS RCSV # mh -> vj / _ CONSS RCSV NONSYLLABIC mh -> vj / # LCSV CONSP _ mh -> vj / NONSYLLABIC LCSV CONSP _ mh -> vj / # LCSV _ mh -> vj / NONSYLLABIC LCSV _ mh -> v mth -> m_d / LCBV CONSS _ mth -> mj_d / LCSV CONSS _ m -> @@ m / # LCSBV LNRP _ m -> @@ m / NONSYLLABIC LCSBV LNRP _ m -> @@ mj / # LCSSV LNRP _ m -> @@ mj / NONSYLLABIC LCSSV LNRP _ m -> mj / _ CONSS RCSV # m -> mj / _ CONSS RCSV NONSYLLABIC m -> mj / # LCSV CONSP _ m -> mj / NONSYLLABIC LCSV CONSP _ m -> mj / # LCSV _ m -> mj / NONSYLLABIC LCSV _ m -> m nnf -> nn_d / _ BFCE # nnf -> nnj_d / _ SFCE # nnth -> nn_d / LCBV CONSS _ nnth -> nnj_d / LCSV CONSS _ nn -> nnj / _ CONSS RCSV # nn -> nnj / _ CONSS RCSV NONSYLLABIC nn -> nnj / # LCSV CONSP _ nn -> nnj / NONSYLLABIC LCSV CONSP _ nn -> nnj / # LCSV _ nn -> nnj / NONSYLLABIC LCSV _ nn -> nn n- -> nj / # _ RCSV # n- -> nj / # _ RCSV NONSYLLABIC n- -> nn / # _ nd -> nnj / # _ CONSS RCSV # nd -> nnj / # _ CONSS RCSV NONSYLLABIC nd -> nn / # _ nf -> nn_d / _ BFCE # nf -> nj_d / _ SFCE # ngf -> ng_d / _ BFCE # ngf -> ngj_d / _ SFCE # ngth -> ng_d / LCBV CONSS _ ngth -> ngj_d / LCSV CONSS _ ng -> ngj / # _ CONSS RCSV # ng -> ngj / # _ CONSS RCSV NONSYLLABIC ng -> ng / # _ ng -> nj / # LCSV _ t # ng -> nj / NONSYLLABIC LCSV _ t # ng -> ngj / _ CONSS RCSV # ng -> ngj / _ CONSS RCSV NONSYLLABIC ng -> ngj / # LCSV CONSP _ ng -> ngj / NONSYLLABIC LCSV CONSP _ ng -> ngj / # LCSV _ ng -> ngj / NONSYLLABIC LCSV _ ng -> ng nth -> nn_d / LCBV CONSS _ nth -> nj_d / LCSV CONSS _ n -> ngj / # LCSV _ c n -> ngj / NONSYLLABIC LCSV _ c n -> ng / _ c n -> nj / _ CONSS RCSV # n -> nj / _ CONSS RCSV NONSYLLABIC n -> nj / # LCSV CONSP _ n -> nj / NONSYLLABIC LCSV CONSP _ n -> nj / # LCSV _ n -> nj / NONSYLLABIC LCSV _ n -> nn pf -> p / _ BFCE # pf -> pj / _ SFCE # ph -> fj / # _ CONSS RCSV # ph -> fj / # _ CONSS RCSV NONSYLLABIC ph -> f / # _ ph -> fj / _ CONSS RCSV # ph -> fj / _ CONSS RCSV NONSYLLABIC ph -> fj / # LCSV CONSP _ ph -> fj / NONSYLLABIC LCSV CONSP _ ph -> fj / # LCSV _ ph -> fj / NONSYLLABIC LCSV _ ph -> f pth -> p / LCBV CONSS _ pth -> pj / LCSV CONSS _ p -> pj / _ CONSS RCSV # p -> pj / _ CONSS RCSV NONSYLLABIC p -> pj / # LCSV CONSP _ p -> pj / NONSYLLABIC LCSV CONSP _ p -> pj / # LCSV _ p -> pj / NONSYLLABIC LCSV _ p -> p # really? there's a 'W' in the phoneset q -> k v rrf -> rr_d / _ BFCE # rrf -> rrj_d / _ SFCE # rrth -> rr_d / LCBV CONSS _ rrth -> rrj_d / LCSV CONSS _ rr -> rrj / _ CONSS RCSV # rr -> rrj / _ CONSS RCSV NONSYLLABIC rr -> rrj / # LCSV CONSP _ rr -> rrj / NONSYLLABIC LCSV CONSP _ rr -> rrj / # LCSV _ rr -> rrj / NONSYLLABIC LCSV _ rr -> rr rf -> r_d / _ BFCE # rf -> rj_d / _ SFCE # rth -> r_d / LCBV CONSS _ rth -> rj_d / LCSV CONSS _ r -> r / # s _ r -> r / # _ r -> r / _ DNLST r -> rj / _ CONSS RCSV # r -> rj / _ CONSS RCSV NONSYLLABIC r -> rj / # LCSV CONSP _ r -> rj / NONSYLLABIC LCSV CONSP _ r -> rj / # LCSV _ r -> rj / NONSYLLABIC LCSV _ r -> r sf -> s / _ BFCE # sf -> sj / _ SFCE # shl -> lj_d / _ CONSS RCSV # shl -> lj_d / _ CONSS RCSV NONSYLLABIC shl -> ll_d shm -> mj_d / _ CONSS RCSV # shm -> mj_d / _ CONSS RCSV NONSYLLABIC shm -> m_d shn -> nj_d / _ CONSS RCSV # shn -> nj_d / _ CONSS RCSV NONSYLLABIC shn -> nn_d shr -> rj_d / _ CONSS RCSV # shr -> rj_d / _ CONSS RCSV NONSYLLABIC shr -> r_d sh -> xj / # _ CONSS RCSV # sh -> xj / # _ CONSS RCSV NONSYLLABIC sh -> h / # _ sh -> xj / _ CONSS RCSV # sh -> xj / _ CONSS RCSV NONSYLLABIC sh -> xj / # LCSV CONSP _ sh -> xj / NONSYLLABIC LCSV CONSP _ sh -> xj / # LCSV _ sh -> xj / NONSYLLABIC LCSV _ sh -> h s -> s / # _ r s -> s / # _ FMP CONSS RCSV # s -> s / # _ FMP CONSS RCSV NONSYLLABIC s -> sj / _ CONSS RCSV # s -> sj / _ CONSS RCSV NONSYLLABIC s -> sj / # LCSV CONSP _ s -> sj / NONSYLLABIC LCSV CONSP _ s -> sj / # LCSV _ s -> sj / NONSYLLABIC LCSV _ s -> s t- -> tj / # _ RCSV # t- -> tj / # _ RCSV NONSYLLABIC t- -> t / # _ tf -> t / _ BFCE # tf -> tj / _ SFCE # // "hack for compound boundaries" th -> ∅ / _ CONS h thb -> pj / _ CONSS RCSV # thb -> pj / _ CONSS RCSV NONSYLLABIC thb -> p thc -> kj / _ CONSS RCSV # thc -> kj / _ CONSS RCSV NONSYLLABIC thc -> k thd -> tj / _ CONSS RCSV # thd -> tj / _ CONSS RCSV NONSYLLABIC thd -> t thf -> h / _ BFCE # thf -> xj / _ SFCE # thl -> lj_d / _ CONSS RCSV # thl -> lj_d / _ CONSS RCSV NONSYLLABIC thl -> ll_d thm -> mj_d / _ CONSS RCSV # thm -> mj_d / _ CONSS RCSV NONSYLLABIC thm -> m_d thn -> nj_d / _ CONSS RCSV # thn -> nj_d / _ CONSS RCSV NONSYLLABIC thn -> nn_d thp -> pj / _ CONSS RCSV # thp -> pj / _ CONSS RCSV NONSYLLABIC thp -> p thr -> rj_d / _ CONSS RCSV # thr -> rj_d / _ CONSS RCSV NONSYLLABIC thr -> r_d ths -> sj / _ CONSS RCSV # ths -> sj / _ CONSS RCSV NONSYLLABIC ths -> s tht -> tj / _ CONSS RCSV # tht -> tj / _ CONSS RCSV NONSYLLABIC tht -> t th -> hj / # _ CONSS RCSV # th -> hj / # _ CONSS RCSV NONSYLLABIC th -> h / # _ th -> hj / _ CONSS RCSV # th -> hj / _ CONSS RCSV NONSYLLABIC th -> hj / # LCSV CONSP _ th -> hj / NONSYLLABIC LCSV CONSP _ th -> hj / # LCSV _ th -> hj / NONSYLLABIC LCSV _ th -> h ts -> tj / # _ CONSS RCSV # ts -> tj / # _ CONSS RCSV NONSYLLABIC ts -> t / # _ t -> tj / _ CONSS RCSV # t -> tj / _ CONSS RCSV NONSYLLABIC t -> tj / # LCSV CONSP _ t -> tj / NONSYLLABIC LCSV CONSP _ t -> tj / # LCSV _ t -> tj / NONSYLLABIC LCSV _ t -> t v -> vj / _ CONSS RCSV # v -> vj / _ CONSS RCSV NONSYLLABIC v -> vj / # LCSV CONSP _ v -> vj / NONSYLLABIC LCSV CONSP _ v -> vj / # LCSV _ v -> vj / NONSYLLABIC LCSV _ v -> v w -> v x- -> e kj s x -> zj y -> gfj z -> zj / _ CONSS RCSV # z -> zj / _ CONSS RCSV NONSYLLABIC z -> zj / # LCSV CONSP _ z -> zj / NONSYLLABIC LCSV CONSP _ z -> zj / # LCSV _ z -> zj / NONSYLLABIC LCSV _ z -> z ' -> ∅ ’ -> ∅ - -> ∅ TEST ádh -> aa TEST áiseanna -> aa sj @@ nn @@ TEST áthas -> aa h @@ s TEST abhainn -> abh nnj TEST bualadh -> b u@ ll adh TEST sadhbh -> s ai v TEST saghas -> s ai s TEST gaeilge -> g ee lj gj @@ TEST saolaíodh -> s ao ll íodh TEST gardaí -> g aa r d ii TEST dúnfaidh -> d uu nn_d idh TEST aidhm -> ai mj TEST cheadaigh -> xj a d igh TEST aighneas -> ai nj @@ s TEST diúltaithe -> dj uu ll t ithe TEST seabhaic -> sj abh kj TEST feadhain -> fj au nj TEST teaghais -> tj ai sj TEST eamhain -> au nj TEST lobhair -> ll obh rj TEST leódhais -> lj oo sj TEST bodhair -> b odh rj TEST eoghain -> oo nj TEST broghais -> b r ogh sj TEST comhair -> k oo rj TEST ciumhais -> kj uu sj TEST airde -> aa r dj @@ TEST cait -> k a tj TEST sodair -> s o d @@ rj TEST ait -> a tj TEST déanamh -> dj ee nn amh TEST amharc -> au r k TEST gaoil -> g ao lj TEST gaol -> g ao ll TEST seabhac -> sj abh k TEST ceadharlach -> kj au r ll @@ x TEST teaghasán -> tj ai s aa nn TEST lobhar -> ll obh r TEST leódhas -> lj oo s TEST bodhar -> b odh r TEST eoghan -> oo nn TEST bogha -> b ogh TEST comhar -> k oo r TEST dumhach -> d uu x TEST ard -> aa r d TEST cat -> k a t TEST sodar -> s o d @@ r TEST at -> a t TEST éan -> ee nn TEST éiníní -> ee nj ii nj ii TEST é -> ee TEST sheáin -> xj aa nj TEST seán -> sj aa nn TEST seabhac -> sj abh k TEST seinneadh -> sj e nnj adh TEST ceadharlach -> kj au r ll @@ x TEST teaghlach -> tj ai ll @@ x TEST beairic -> bj a rj @@ kj TEST áireamh -> aa rj amh TEST sleamhnán -> sj lj au nn aa nn TEST oighear -> oigh r TEST ceard -> kj aa r d TEST cead -> kj a d TEST áireamhán -> aa rj @@ v aa nn TEST eas -> a s TEST feidhm -> fj eidh mj TEST leigheas -> lj eigh s TEST ceird -> kj ee r dj TEST deis -> dj e sj TEST eitpheil -> e tj fj e lj TEST ceoil -> kj oo lj TEST bainseó -> b a nj sj oo TEST ceol -> kj oo ll TEST uile -> i lj @@ TEST ceannaíodh -> kj a nn íodh TEST síos -> sj ii s TEST sí -> sj ii TEST siadhail -> sj i@ lj TEST sciath -> sj kj i@ TEST riail -> r i@ lj TEST siad -> sj i@ d TEST seinnfidh -> sj e nnj_d idh TEST cheannaigh -> xj a nn igh TEST fios -> fj io s TEST imithe -> i mj ithe TEST siúil -> sj uu lj TEST siúl -> sj uu ll TEST tiubh -> tj ubh TEST ciumhais -> kj uu sj TEST giuirléid -> gj uu r lj ee dj TEST fiuch -> fj u x TEST leighis -> lj eigh sj TEST foighid -> f oigh dj TEST aithris -> a rj_d @@ sj TEST sin -> sj i nj TEST cheannódh -> xj a nn ódh TEST óil -> oo lj TEST ól -> oo ll TEST lobhadh -> ll obh adh TEST todhchaí -> t odh x ii TEST toghadh -> t ogh adh TEST oíche -> ii xj @@ TEST oidhe -> oidh @@ TEST oighear -> oigh r TEST boird -> b oo r dj TEST soir -> s oi rj TEST comhar -> k oo r TEST bord -> b oo r d TEST bos -> b o s TEST súil -> s uu lj TEST súl -> s uu ll TEST uathúil -> u@ uu lj TEST uaine -> u@ nj @@ TEST uan -> u@ nn TEST subh -> s ubh TEST bhuel -> v e ll TEST guird -> g uu r dj TEST cuid -> k i dj TEST uile -> i lj @@ TEST bruíon -> b r ii nn TEST bruíne -> b r ii nj @@ TEST cumhacht -> k uu x t TEST burdún -> b uu r d uu nn TEST cur -> k u r TEST bus -> b u s TEST scuabfaidh -> s k u@ p idh TEST clibfidh -> kj lj i pj idh TEST scuabfadh -> s k u@ p adh TEST clibfeadh -> kj lj i pj adh TEST bhfáinne -> v aa nnj @@ TEST bhfianaise -> vj i@ nn @@ sj @@ TEST scríobhfaidh -> sj kj rj ii f idh TEST díbhfidh -> dj ii fj idh TEST scríobhfadh -> sj kj rj ii f adh TEST díbhfeadh -> dj ii fj adh TEST searbh -> sj a r @@ v TEST seirbhís -> sj e rj @@ vj ii sj TEST bhrostaigh -> v r o s t igh TEST bhris -> vj rj i sj TEST coibhín -> k oi vj ii nj TEST bpáistí -> b aa sj tj ii TEST bpéisteanna -> bj ee sj tj @@ nn @@ TEST scuabtha -> s k u@ p @@ TEST clibthe -> kj lj i pj @@ TEST borb -> b o r @@ b TEST seirbiach -> sj e rj @@ bj i@ x TEST bróna -> b r oo nn @@ TEST brian -> bj rj i@ nn TEST leódhas -> lj oo s TEST t-uisce -> t ui sj kj @@ TEST t-éabhlóidí -> tj ee v ll oo dj ii TEST atfaidh -> a t idh TEST titfidh -> tj i tj idh TEST athdhéanamh -> a gfj ee nn amh TEST meathfadh -> mj a h adh TEST rithfeadh -> r i xj adh TEST bláthra -> b ll aa r_d @@ TEST tharla -> h aa r ll @@ TEST thit -> hj i tj TEST tseachtain -> tj a x t @@ nj TEST tsagairt -> t a g @@ r tj TEST teann -> tj a nn TEST tit -> tj i tj TEST togra -> t o g r @@ TEST sadhbh -> s ai v TEST bhéal -> vj ee ll TEST bhéil -> vj ee lj # + # %%writefile oraghallaigh-cd.g2p CHARACTER_SET "abcdefghiíjklmnoóprstuvwxz@.012-_" DEFAULT_PHONEME "_" PHONEME_DELIMITER " " VAR BC (p|b|f|v|m_d|m|t_|t|d_|d|ll_d|ll|l_d|l|nn_d|nn|n_d|n|rr_d|r_d|rr|r|s|z|k|g|x|gf|ng_d|ng|h) VAR LLJ (llj|nnj|rrj|mj) VAR LL (ll|nn|rr|m) aa -> aa abh -> au adh -> @ x agh -> ai aío -> i@ amh -> @ v ai -> ai ao -> ee au -> au a -> ai / _ LLJ a -> au / _ LL a -> a ee -> i@ / _ BC ee -> ee eidh -> e gj / _ # eidh -> ai eigh -> ai e -> e idh -> @ gj igh -> @ gj íodh -> i@ x ithe -> @ . 0 h @ i@ -> i@ ii -> ii io -> i i -> ii / _ LLJ # i -> i obh -> au ódh -> oo x odh -> au ogh -> au oidh -> ai oigh -> ai oi -> ii / _ LLJ # oi -> i omh -> au oo -> oo o -> au / _ LL # o -> o ubh -> u v / _ # ubh -> @ v u@ -> u@ ui -> ii / _ LLJ ui -> i uu -> uu u -> u @@ -> @ pj -> pj p -> p bj -> bj b -> b fj -> fj f -> f vj -> vj v -> v w -> w mj_d -> mj_d m_d -> m_d mj -> mj m -> m tjsj -> tjsj djzj -> djzj t_- -> t_- tj -> tj t -> t d_- -> d_- dj -> dj d -> d llj_d -> lj_d ll_d -> l_d llj -> lj ll -> l lj_d -> lj_d l_d -> l_d lj -> lj l -> l nnj_d -> nj_d nn_d -> n_d nnj -> nj nn -> n nj_d -> nj_d n_d -> n_d rrj_d -> rj_d rj_d -> rj_d rr_d -> r_d r_d -> r_d rrj -> rj rj -> rj rr -> r r -> r sj -> sj s -> s zj -> zj z -> z kj -> kj k -> k gj -> gj g -> g xj -> xj x -> x gfj -> ∅ / _ # gfj -> gfj ngj_d -> ngj h ng_d -> ng h ngj -> nj / _ # ngj -> ngj ng -> ng nj -> nj n -> n hj -> h h -> h 0 -> 0 1 -> 1 2 -> 2 . -> . - -> ∅ @ -> ∅ c -> ∅ j -> ∅ í -> ∅ ó -> ∅ _ -> ∅ TEST faas -> f aa s TEST abhnnj -> au nj TEST saghd -> s ai d TEST saíoxt -> s i@ x t TEST djeennamh -> dj ee n @ v TEST taig -> t ai g TEST saoll -> s ee l TEST saulj -> s au lj TEST kallj -> k ai lj TEST krann -> k r au n TEST kad -> k a d TEST eenn -> i@ n TEST sjee -> sj ee TEST beidh -> b e gj TEST fjeidhmj -> fj ai mj TEST ljeighs -> lj ai s TEST djesj -> dj e sj TEST bjrjisjidh -> bj rj i sj @ gj TEST xjannigh -> xj a n @ gj TEST kjanníodh -> kj a n i@ x TEST kjannithe -> kj a n @ h @ TEST i@rr -> i@ r TEST sjiinj -> sj ii nj TEST fjios -> fj i s TEST imj -> ii mj TEST sjinj -> sj i nj TEST llobhr -> l au r TEST kjannódh -> kj a n oo x TEST bodhr -> b au r TEST toghxaann -> t au x aa n TEST oidhrjaxt -> ai rj a x t TEST oighr -> ai r TEST koillj -> k ii lj TEST koillj@@ -> k i lj @ TEST domhnn -> d au n TEST moor -> m oo r TEST poll -> p au l TEST polladh -> p o l @ x TEST ubh@@gaann -> @ v @ g aa n TEST bu@ -> b u@ TEST suimj -> s ii mj TEST kuidj -> k i dj TEST kuur -> k uu r TEST kur -> k u r TEST farjsj@@ngj -> f a rj sj @ nj
_notebooks/2021-05-17-o-raghallaigh-thesis-attempt-2-cd.ipynb