code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="ZtrZWV8q5_4U" colab_type="text"
# ## Simply Convert Data to COCO Format
#
# - Split the training data to train and validation
# - Convert both to COCO Formatted JSON
# + id="RIKa6VOx6YrX" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="4c64f4ed-1849-4a96-c117-8e789ee12037"
from google.colab import drive
drive.mount('/content/drive/')
# + id="MaaDRKeY5_4V" colab_type="code" colab={}
# Some basic setup:
# import some common libraries
import numpy as np
import pandas as pd
import os
import json
import random
import cv2
import matplotlib.pyplot as plt
import ast
from tqdm import tqdm
from sklearn.model_selection import train_test_split
# + id="aOMXNgXO5_4a" colab_type="code" colab={}
TOP_INPUT_DIR = "/content/drive/My Drive/GWD/global-wheat-detection/"
DATA_TRAIN_DIR = f"{TOP_INPUT_DIR}train/"
DATA_TEST_DIR = f"{TOP_INPUT_DIR}test/"
# + [markdown] id="eXnhdRfZ5_4e" colab_type="text"
# ### Create COCO Base
# + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" id="qJ5nD4GT5_4f" colab_type="code" colab={}
coco_base = { "info": {},
"licenses": [],
"images": [],
"annotations": [],
"categories": []}
# + [markdown] id="nTRL5W1w5_4i" colab_type="text"
# ### Write dataset info to COCO Format
# + id="Ba83Uz5Y5_4i" colab_type="code" colab={}
coco_base["info"] = {
"description": "Global Wheat Detection Dataset, Kaggle 2020",
"url": "https://www.kaggle.com/c/global-wheat-detection/data",
"version": "1.0",
"year": 2020,
"contributor": "http://www.global-wheat.com/contributors/",
"date_created": "2020/05/29"
}
# + [markdown] id="YAxbS9M15_4l" colab_type="text"
# ### Write licenses to COCO Format
# + id="JMmGO-wF5_4m" colab_type="code" colab={}
coco_base["licenses"].append(
{
"url": "https://opensource.org/licenses/MIT",
"id": 1,
"name": "MIT License"
}
)
# + [markdown] id="9Dz2zpRb5_4p" colab_type="text"
# ### Write category to COCO Format
# + id="iPYIpDkj5_4q" colab_type="code" colab={}
coco_base["categories"].append({"supercategory": "grain","id": 1,"name": "wheat"})
# + id="5M74XXXE5_4u" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="b6f00844-87c3-4bf5-c774-aadccbd06853"
train_df = pd.read_csv(f'{TOP_INPUT_DIR}train.csv')
train_df.head()
# + id="ttWibA-45_4y" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="eea9b3e3-7dfb-4016-d13d-5777c772a4db"
uniq_images = train_df.image_id.unique()
len(uniq_images)
# + [markdown] id="ZT7yc7py5_41" colab_type="text"
# #### There are 3373 unique images in the training set
# + id="C3ITBD4Q5_42" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="cf260ab4-8ec9-4217-dfc3-aef22f213557"
print(train_df.width.unique()[0])
print(train_df.height.unique()[0])
# + [markdown] id="GmvpmQvV5_46" colab_type="text"
# #### All images are of size 1024x1024
# + [markdown] id="wREi7Ce75_47" colab_type="text"
# ### Group bboxes by image
#
# Create one row per image - combining all bboxes for that image into one column. This helps significantly reduce the number of rows to traverse.
# + id="-SGDUhGG5_47" colab_type="code" colab={}
def get_bboxes_per_image(df):
"""author: @impiyush"""
# convert from string list, to python list
df.bbox = df.bbox.apply(ast.literal_eval)
# group-by on image id and return all bboxes for that image as a list of lists
return train_df.groupby('image_id')['bbox'].apply(list).reset_index(name='bboxes')
# + id="34AtuSHJ5_4_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="9a8e78e1-5bec-444a-e306-862ce04fae7d"
train_df_bboxes_grped = get_bboxes_per_image(train_df)
train_df_bboxes_grped.head()
# + id="MofHSPiw5_5C" colab_type="code" colab={}
assert train_df_bboxes_grped.shape[0] == len(uniq_images), "Number of images differ when grouped"
# + [markdown] id="QJ5uf1uC5_5F" colab_type="text"
# ### Split to train and validation
# + id="2Qah98sz5_5H" colab_type="code" colab={}
df_train, df_valid = train_test_split(
train_df_bboxes_grped,
test_size=0.05,
random_state=32,
shuffle=True
)
df_train = df_train.reset_index(drop=True)
df_valid = df_valid.reset_index(drop=True)
# + id="rfd00lnb5_5L" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="40f371bc-b756-484e-e7ef-7b14429a69f9"
print(df_train.shape[0], df_valid.shape[0])
# + [markdown] id="uIEtCKID5_5Q" colab_type="text"
# ### Write images to COCO Format
# + id="mwn7RAj05_5S" colab_type="code" colab={}
coco_base_train = coco_base.copy()
coco_base_valid = coco_base.copy()
# + id="B3pwzO9w5_5W" colab_type="code" colab={}
def set_coco_images(df):
"""author: @impiyush"""
images = []
for _,img in enumerate(tqdm(df.image_id.unique())):
img_dict = {"license":1,
"height":1024,
"width":1024,
"id":img}
img_dict["file_name"] = f"{img}.jpg"
images.append(img_dict)
return images
# + id="VMa2QTvr5_5a" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="3f80473f-929e-4021-9fda-080d4744ea5a"
coco_base_train["images"] = set_coco_images(df_train)
coco_base_valid["images"] = set_coco_images(df_valid)
# + [markdown] id="SPfx3Hzw5_5d" colab_type="text"
# Let's check the first three images in the list
# + id="iEBuJhrr5_5d" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 272} outputId="bc1a9250-64e9-4044-f475-1f0312e24ddf"
coco_base_train["images"][:3]
# + id="P-njfEvi5_5h" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 272} outputId="87564971-45a6-4d73-b6c2-9bcfaf826bac"
coco_base_valid["images"][:3]
# + [markdown] id="LlWpVTOw5_5k" colab_type="text"
# Let's also verify the number of images match, just for sanity
# + id="jJJdtQB25_5k" colab_type="code" colab={}
assert len(coco_base_train["images"])==len(df_train), "Number of images differ from df_train"
assert len(coco_base_valid["images"])==len(df_valid), "Number of images differ from df_valid"
# + [markdown] id="oU3I2ngy5_5n" colab_type="text"
# ### Write annotations to COCO Format
#
# The most important piece of this puzzle
# + id="vFWpWe3e5_5n" colab_type="code" colab={}
def set_coco_annotations(df):
"""author: @impiyush"""
annos = []
id_cnt = 1
for _,row in tqdm(df.iterrows(), total=len(df)):
# print(row)
anno = {}
anno['segmentation'] = []
anno['iscrowd'] = 0
anno['image_id'] = row['image_id']
anno['category_id'] = 1
bboxes = row['bboxes']
# annos.append(box)
for ix, box in enumerate(bboxes):
anno['bbox'] = box # x,y,w,h
anno['area'] = box[2] * box[3] # w*h
anno['id'] = f"{id_cnt:05}"
annos.append(anno.copy()) # copy is necessary here, otherwise it will always point to the last value of anno
id_cnt += 1
return annos
# + id="ibwZP3F95_5q" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="7a7030b5-44c0-49b8-f7a2-122bbbcb57c5"
coco_base_train['annotations'] = set_coco_annotations(df_train)
coco_base_valid['annotations'] = set_coco_annotations(df_valid)
# + [markdown] id="9dLIkaVx5_5t" colab_type="text"
# ### Dump COCO formatted JSON for train and validation sets
# + id="Ydcwaea65_5t" colab_type="code" colab={}
with open(f'/content/drive/My Drive/GWD/coco_train.json','w') as train_coco:
json.dump(coco_base_train, train_coco)
# + id="rLs0___95_5w" colab_type="code" colab={}
with open(f'/content/drive/My Drive/GWD/coco_valid.json','w') as valid_coco:
json.dump(coco_base_valid, valid_coco)
# + id="QsIZgCRALcZK" colab_type="code" colab={}
with open(f'/content/drive/My Drive/GWD/coco_test.json','w') as valid_cocoÑ:
json.dump(None, valid_cocoÑ)
# + [markdown] id="heWc7wDm5_50" colab_type="text"
# ### There you go, now you have your data in COCO Format. Hope you liked this quick Kernel!
| simply_convert_data_to_coco_format.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Biblioteca Pandas
# ### Series y DataFrames
import pandas as pd
obj = pd.Series([4, 7, -5, 3])
obj
animales = ['Tortuga', 'Zorro', 'Paloma', 'Elefante']
tipo = ['reptil', 'mamÃfero', 'ave', 'mamÃfero']
obj = pd.Series(tipo, index=animales)
obj
# DataFrames
d = {'tipo_vivienda': ['casa', 'departamento'], 'm2': [35, 49]}
df = pd.DataFrame(data=d)
df
# ### Cargando datos
df = pd.read_csv('fifa-statistics.csv')
df
pd.read_table('fifa-statistics.csv',sep=',')
# ## Selección e Indexing
# ### loc e iloc
df.loc[0, 'Team']
df.iloc[0, [1]]
df.iloc[2]
# ## Análisis descriptivo en Pandas
# ### describe, info y shape
df.describe()
df.info()
df.shape
df.columns
df.head()
df.tail(4)
# ### unique y value_counts()
df['Team'].unique()
df.Team.value_counts()
# ###Â count, min, max, mean, median, std y corr
#
df.corr()
# ###Â Quantiles
df['Distance Covered (Kms)'].quantile(0.90) # 10th percentile
df['Distance Covered (Kms)'].median()
# ### Ordenando datos
df.sort_index()
# ## Combinando datasets
# ### concat y append
# +
df1 = pd.DataFrame({'A': ['A0', 'A1', 'A2', 'A3'],
'B': ['B0', 'B1', 'B2', 'B3'],
'C': ['C0', 'C1', 'C2', 'C3'],
'D': ['D0', 'D1', 'D2', 'D3']},
index=[0, 1, 2, 3])
df2 = pd.DataFrame({'A': ['A4', 'A5', 'A6', 'A7'],
'B': ['B4', 'B5', 'B6', 'B7'],
'C': ['C4', 'C5', 'C6', 'C7'],
'D': ['D4', 'D5', 'D6', 'D7']},
index=[4, 5, 6, 7])
df3 = pd.DataFrame({'A': ['A8', 'A9', 'A10', 'A11'],
'B': ['B8', 'B9', 'B10', 'B11'],
'C': ['C8', 'C9', 'C10', 'C11'],
'D': ['D8', 'D9', 'D10', 'D11']},
index=[8, 9, 10, 11])
frames = [df1, df2, df3]
# -
df3
pd.concat(frames)
df4 = pd.DataFrame({'B': ['B2', 'B3', 'B6', 'B7'],
'D': ['D2', 'D3', 'D6', 'D7'],
'F': ['F2', 'F3', 'F6', 'F7']},
index=[2, 3, 6, 7])
df4
pd.concat([df1, df4], axis=1) # Concatenación horizontal
pd.concat([df1, df4], axis=1, join='inner') # Cambiando la forma de concatenar con join
df1.append(df2) # Append es otra forma de fácilmente concatenerar
# ### Merge
# +
left = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3'],
'A': ['A0', 'A1', 'A2', 'A3'],
'B': ['B0', 'B1', 'B2', 'B3']})
right = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3'],
'C': ['C0', 'C1', 'C2', 'C3'],
'D': ['D0', 'D1', 'D2', 'D3']})
pd.merge(left, right, on='key')
# -
import numpy as np
# ## apply y map
# ### apply
usuarios_df = pd.DataFrame({'edad': [np.nan, 20, 41, 20],
'clics_por_hora': [156, np.nan, 210, 210]})
usuarios_df
import numpy as np
usuarios_df.apply(np.mean) # Se puede aplicar cualquier función que toma una fila o columna por entrada.
# ### applymap
usuarios_df.applymap(lambda x: x/2) # Se aplica elemento a elemento. En las series podemos usar directamente serie.map
# ## Missing values
# ### Filtrando valores faltantes
usuarios_df
usuarios_df.isna()
usuarios_df.dropna() # Puede borrarse toda fila que contenga un NaN o solamente en las columnas que especificamos
usuarios_df.dropna(subset=['edad'])
copia_df = usuarios_df
usuarios_df
# ### Completando valores
copia_df['edad'].fillna(usuarios_df.edad.mean())
copia_df.fillna(0)
copia_df
# +
copia_df.fillna(usuarios_df.edad.mean(), inplace=True) # Podemos elegir con qué completar los valores faltantes.
# -
copia_df
# ### Borrando duplicados
usuarios_df
usuarios_df.drop_duplicates(subset=['edad'], keep='first') # Se puede jugar con el first, last, etc. Y también elegir en qué columnas
| NoteBooks/Curso de Análisis de datos/biblioteca-pandas.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/ronva-h/technology_fundamentals/blob/main/C1%20Fundamentals/LABS_PROJECT/Tech%20Fun%20C1%20L1%20Practice%20with%20Python%20and%20Jupyter%20Notebooks.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="vhe1yX4AMckE"
# # Technology Fundamentals Course 1, Lab 1: Practice with Python and Jupyter Notebooks
#
# **Instructor**: <NAME>
#
# **Contact**: <EMAIL>
#
# **Teaching Assitants** <NAME>, <NAME>
#
# **Contact** <EMAIL>, <EMAIL>
#
# <br>
#
# ---
#
# <br>
#
# This first lab assignment will be a review of what we discussed today.
#
# <br>
#
# ---
#
#
#
# + [markdown] id="O4pwVw5ZdIqS"
# #### 1 Python variables
#
# In the empty code cell below, create the following four variables:
# - A string variable named `favorite_movie` that represents your favorite movie
# - A string variable named `national_chain` that represents your favorite fast food restaurant
# - An integer variable named `streaming_video_hours` that represents the whole number of hours you watch any streaming video service (ex. Netflix, Hulu, Disney+) per week.
# - A float variable named `headphone_cost` that represents the most money you had to pay, in dollar-cent amount (0.00), for headphones. Do not include a '$' symbol.
#
# Then after they are declared, print each one using the print() function explained in section 1.1.2.
# + id="1AXZpA-cdXKP"
# + [markdown] id="TlvYLQj7gBat"
# To check if each variable is the correct data type, use the type() function explained from section 1.1.3, in the empty code cell below. For example, `type(favorite_movie)` should return the output `<class 'str'>`, which indicates the string type.
# + id="tFyoOMp7Q-l2"
# + [markdown] id="lXoPAgV1htlX"
# #### 2 Practice with math in Python
#
# Let's start with a few basic operators that was covered in section 1.1.4. Write the expression for multiplying 23 by 31, so that running this function will correctly output the product of these two numbers.
# + id="Mh2_aoVdiBdo"
# + [markdown] id="IaRbtO6KiCBM"
# Consider the operation written in Python: `27 / 3 + 6`. Write the same syntax in the empty cell below, and modify it to include parentheses in the right location so that the result/answer of the math is 3.0, rather than 15.0.
# + id="KYGOO284i2Jq"
# + [markdown] id="3YNWUvgfi3LV"
# Let's learn more operators beyond the ones we covered earlier. Write the line of code `3 ** 2` in the cell below. From the output, what do you think the double asterisks (`**`) operator represents in Python?
# + id="vJL7ExVMjq6f"
# + [markdown] id="_zfMqrrtkINb"
# Write the line of code `28 / 3` below. Then just below the line in the same cell, write the code `28 // 3`. Compare the differences in output between the two. Can you decipher what `//` means in Python?
# + id="bLzljZ2nkHnP"
# + id="4IRVZY16NKat"
# + [markdown] id="fTGXdUqvlrJK"
# Now for more complicated mathematical operations! Try to write the Python equivalent of the following:
#
# $\frac{14 + 28}{28 - 14}$
# + id="MMqvz_JXmaGv"
# + [markdown] id="wmntjBztmatb"
# Now try this one:
#
# $\frac{15 + 984}{-(217+4)}$
# + id="ZrDfdW1bmjUF"
# + [markdown] id="Z9PBEXQKm0W_"
# And finally, write the Python equivalent for this:
#
# $\frac{-(3655 * 44)}{(8 * 16)^3}$
# + id="34Zw1uLhnN3-"
# + [markdown] id="UgTm-9K5HX0F"
# #### 3 Practice writing helpful comments
#
# Consider the following code below. No need to decipher and understand every piece but just be aware of the output when you run the code cell. Based on the output, modify the code cell by adding a code comment at the top of the cell briefly explaining what the code does. This comment can be as many lines as you'd like, and may or may not include direct references to the example print statements below.
# + colab={"base_uri": "https://localhost:8080/"} id="cIBcxtcPHpII" outputId="c135a23d-312d-4d9f-f851-18a70ed8cf4d"
def mystery_function(x):
y = list(x)
return " ".join(y[::-1])
print(mystery_function("UniversityOfWashington"))
print(mystery_function("AvocadoToast"))
print(mystery_function("RacecaR"))
# + [markdown] id="EPQvTgAWoCvB"
# #### 4 More Markdown
#
# Consider the vision statement of the Global Innovation Exchange: "Our mission is to build the talent that leverages emerging technologies in new and impactful ways". Type that same statement in a new text cell below, only add a `_` (underscore) at the beginning and end. What ends up happening to the format of the text as a result?
# + [markdown] id="f3PKpVzLpfRc"
# In a new text cell, list all of the potential data science projects you might work on post DSE, with each one on its own separate line. Then add a `- ` at the beginning of each line; include a single space between the hyphen and the first letter in your project name! Based on the output, what can you decipher that this `- ` changes in the formatting?
# + [markdown] id="fQknBYvNjo7A"
# #### 5 Get familiar with the Python community
#
# Python has strong support from a community of avid developers and computer scientists. The Python Software Foundation (PSF) tries to maintain input and activity through their own website, [python.org](https://). Please explore their community section - https://www.python.org/community/ - and answer the following questions in a new text cell just below this one:
# - What is the name of the mailing list that the PSF manages for those who have questions about Python code?
# - In your own words, what is the goal of their Community Survey?
# - According to their most recent annual report, which continent provides the highest proportion of grants to the PSF?
# - Name at least three ways that the PSF recommends you can get involved with the community.
# + [markdown] id="ANQDw40wl8li"
# #### 6 Advanced - Understanding the switch from Python 2 to 3
#
# Inside of the following link, https://www.python.org/doc/, is an article about the Python Software Foundation's decision to end support for Python version 2, and move with support for version 3. From the article, answer the following questions in a new text cell just below this one:
# - What official date was Python 2 no longer supported?
# - What is the version number of the last supported Python 2?
# - In your own words, describe why the Python Software Foundation made the decision to stop supporting Python 2.
# + [markdown] id="miOpG431h5Id"
# #### 7 Advanced - Create your own Google Colab notebook!
#
# Create your own separate Google Colab notebook with the following rules and content:
# - The file name of your notebook should be in the format ***lastname_C1S1_breakout_custom_notebook.ipynb***.
# - Include a header (of any size) that lists your first name and last name, followed by "**Technology Explorers**".
# - Create a short paragraph bio of yourself in a text cell.
# - Include/embed an image of the Python logo, which can be found here: https://www.python.org/community/logos/.
# - Create a code cell with just the line of code: `import this`
# - Take your favorite line from that output and past it in a text cell below, both bolding and italicizing it
#
| C1 Fundamentals/LABS_PROJECT/Tech Fun C1 L1 Practice with Python and Jupyter Notebooks.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.10 64-bit (''env'': venv)'
# name: python3
# ---
# # Aggregating data with pandas and numpy
#
# ## About the Data
# In this notebook, we will be working with 2 datasets:
# - Facebook's stock price throughout 2018 (obtained using the [`stock_analysis` package](https://github.com/stefmolin/stock-analysis)).
# - Daily weather data for NYC from the [National Centers for Environmental Information (NCEI) API](https://www.ncdc.noaa.gov/cdo-web/webservices/v2).
#
# *Note: The NCEI is part of the National Oceanic and Atmospheric Administration (NOAA) and, as you can see from the URL for the API, this resource was created when the NCEI was called the NCDC. Should the URL for this resource change in the future, you can search for "NCEI weather API" to find the updated one.*
#
# ## Background on the weather data
#
# Data meanings:
# - `AWND`: average wind speed
# - `PRCP`: precipitation in millimeters
# - `SNOW`: snowfall in millimeters
# - `SNWD`: snow depth in millimeters
# - `TMAX`: maximum daily temperature in Celsius
# - `TMIN`: minimum daily temperature in Celsius
#
# ## Setup
# +
import numpy as np
import pandas as pd
fb = pd\
.read_csv(
'data/fb_2018.csv',
index_col='date',
parse_dates=True
)\
.assign(
trading_volume=lambda x: pd.cut(
x.volume,
bins=3,
labels=['low', 'med', 'high']
)
)
fb.head()
# +
weather = pd.read_csv(
'data/weather_by_station.csv',
index_col='date',
parse_dates=True
)
weather.head()
# -
# Before we dive into any calculations, let's make sure `pandas` won't put things in scientific notation. We will modify how floats are formatted for displaying. The format we will apply is `.2f`, which will provide the float with 2 digits after the decimal point:
pd.set_option(
'display.float_format',
lambda x: '%.2f' % x
)
# ## Summarizing DataFrames
# We learned about `agg()` in the [`2-dataframe_operations.ipynb`](./2-dataframe_operations.ipynb) notebook when we learned about window calculations; however, we can call this on the dataframe directly to aggregate its contents into a single series:
fb.agg(
{
'open': np.mean,
'high': np.max,
'low': np.min,
'close': np.mean,
'volume': np.sum
}
)
# We can use this to find the total snowfall and precipitation recorded in Central Park in 2018:
# +
weather\
.query('station == "GHCND:USW00094728"')\
.pivot(columns='datatype', values='value')[['SNOW', 'PRCP']]\
.sum()
# -
# This is equivalent to passing `'sum'` to `agg()`:
weather\
.query('station == "GHCND:USW00094728"')\
.pivot(columns='datatype', values='value')[['SNOW', 'PRCP']]\
.agg('sum')
# Note that we aren't limited to providing a single aggregation per column. We can pass a list, and we will get a dataframe back instead of a series. Null values are placed where we don't have a calculation result to display:
fb.agg(
{
'open': 'mean',
'high': ['min', 'max'],
'low': ['min', 'max'],
'close': 'mean'
}
)
# ## Using `groupby()`
# Often we won't want to aggregate on the entire dataframe, but on groups within it. For this purpose, we can run `groupby()` before the aggregation. If we group by the `trading_volume` column, we will get a row for each of the values it takes on:
fb\
.groupby('trading_volume')\
.mean()
# After we call `groupby()`, we can still select columns for aggregation:
fb\
.groupby('trading_volume')['close']\
.agg(['min', 'max', 'mean'])
# We can still provide a dictionary specifying the aggregations to perform, but passing a list for a column will result in a hierarchical index for the columns:
# +
fb_agg = fb\
.groupby('trading_volume')\
.agg(
{
'open': 'mean',
'high': ['min', 'max'],
'low': ['min', 'max'],
'close': 'mean'
}
)
fb_agg
# -
# The hierarchical index in the columns looks like this:
fb_agg.columns
# Using a list comprehension, we can join the levels (in a tuple) with an `_` at each iteration:
# +
fb_agg.columns = list(
'_'.join(col_agg)
for col_agg in fb_agg.columns
)
fb_agg.head()
# -
# We can group on values in the index if we tell `groupby()`, which `level` to use:
weather.loc['2018-10']\
.query('datatype == "PRCP"')\
.groupby(level=0)\
.mean().head().squeeze()
# We can also create a `Grouper` object, which can also roll up the datetimes in the index. Here, we find the quarterly total precipitation per station:
weather\
.query('datatype == "PRCP"')\
.groupby(['station_name', pd.Grouper(freq='Q')])\
.sum().unstack()\
.sample(5, random_state=1)
# Note that we can use `filter()` to exclude some groups from aggregation. Here, we only keep groups with names ending in "NY US" in the group's `name` attribute, which is the station name in this case:
weather\
.groupby('station_name')\
.filter(lambda x: x.name.endswith('NY US'))\
.query('datatype == "SNOW"')\
.groupby('station_name')\
.sum().squeeze() # aggregate and make a series (squeeze)
# Let's see which months have the most precipitation. First, we need to group by day and average the precipitation across the stations. Then we can group by month and sum the resulting precipitation. We use `nlargest()` to give the 5 months with the most precipitation:
weather\
.query('datatype == "PRCP"')\
.groupby(level=0).mean()\
.groupby(pd.Grouper(freq='M')).sum()\
.value.nlargest()
# Perhaps the previous result was surprising. The saying goes "April showers bring May flowers"; yet April wasn't in the top 5 (neither was May for that matter). Snow will count towards precipitation, but that doesn't explain why summer months are higher than April. Let's look for days that accounted for a large percentage of the precipitation in a given month.
#
# In order to do so, we need to calculate the average daily precipitation across stations and then find the total per month. This will be the denominator. However, in order to divide the daily values by the total for their month, we will need a series of equal dimensions. This means we will need to use `transform()`:
weather\
.query('datatype == "PRCP"')\
.rename(dict(value='prcp'), axis=1)\
.groupby(level=0).mean()\
.groupby(pd.Grouper(freq='M'))\
.transform(np.sum)['2018-01-28':'2018-02-03']
# Notice how we have the same value repeated for each day in the month it belongs to. This will allow us to calculate the percentage of the monthly precipitation that occurred each day and then pull out the largest values:
weather\
.query('datatype == "PRCP"')\
.rename(dict(value='prcp'), axis=1)\
.groupby(level=0).mean()\
.assign(
total_prcp_in_month=lambda x: \
x\
.groupby(pd.Grouper(freq='M'))\
.transform(np.sum),
pct_monthly_prcp=lambda x: \
x.prcp\
.div(x.total_prcp_in_month)
)\
.nlargest(5, 'pct_monthly_prcp')
# `transform()` can be used on dataframes as well. We can use it to easily standardize the data:
fb[['open', 'high', 'low', 'close']]\
.transform(
lambda x: (x - x.mean()).div(x.std())
)\
.head()
# ## Pivot tables and crosstabs
# We saw pivots in [`ch_03/4-reshaping_data.ipynb`](../ch_03/4-reshaping_data.ipynb); however, we weren't able to provide any aggregations. With `pivot_table()`, we get the mean by default. In its simplest form, we provide a column to place along the columns:
fb.pivot_table(columns='trading_volume')
# By placing the trading volume in the index, we get the transpose:
fb.pivot_table(index='trading_volume')
# With `pivot()`, we also weren't able to handle multi-level indices or indices with repeated values. For this reason we haven't been able to put the weather data in the wide format. The `pivot_table()` method solves this issue:
weather\
.reset_index()\
.pivot_table(
index=['date', 'station', 'station_name'],
columns='datatype',
values='value',
aggfunc='median'
)\
.reset_index()\
.tail()
# We can use the `pd.crosstab()` function to create a frequency table. For example, if we want to see how many low-, medium-, and high-volume trading days Facebook stock had each month, we can use crosstab:
pd.crosstab(
index=fb.trading_volume,
columns=fb.index.month,
colnames=['month'] # name the columns index
)
# We can normalize with the row or column totals with the `normalize` parameter. This shows percentage of the total:
pd.crosstab(
index=fb.trading_volume,
columns=fb.index.month,
colnames=['month'],
normalize='columns'
)
# If we want to perform a calculation other than counting the frequency, we can pass the column to run the calculation on to `values` and the function to use to `aggfunc`:
pd.crosstab(
index=fb.trading_volume,
columns=fb.index.month,
colnames=['month'],
values=fb.close,
aggfunc=np.mean
)
# We can also get row and column subtotals with the `margins` parameter. Let's count the number of times each station recorded snow per month and include the subtotals:
# +
snow_data = weather.query('datatype == "SNOW"')
pd.crosstab(
index=snow_data.station_name,
columns=snow_data.index.month,
colnames=['month'],
values=snow_data.value,
aggfunc=lambda x: (x > 0).sum(),
margins=True, # show row and column subtotals
margins_name='total observations of snow' # name the subtotals
)
# -
# <hr>
# <div>
# <a href="./2-dataframe_operations.ipynb">
# <button>← Previous Notebook</button>
# </a>
# <a href="./4-time_series.ipynb">
# <button style="float: right;">Next Notebook →</button>
# </a>
# </div>
# <hr>
| ch_04/3-aggregations.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # test_car
#
# In this file we test a simple car dynamics with a linear controller
# The car has to stop before the obstacle, but it gets noisy readings of the
# location of the obstacle at every time.
# We consider a horizon on length 100 and show that using KernelPCA produces
# more counterexample than using random sampling.
#
# Requirement G_[0,H] (x - x_obs) > 0 where x_obs = 5.0
# min of this is a smooth function, and so we need only one node to represent it
# +
from adversarial_testing import pred_node
from adversarial_testing import test_module
import numpy as np
import GPy
# +
# Car dynamics + controller
def car_dyn(x, x_obs, delta_t, eig1, eig2):
a = -(eig1+eig2)*x[1] + (eig1*eig2)*(x_obs - x[0])
if a < -3:
a = -3
if a > 3:
a = 3
x_t1 = x[0] + x[1] * delta_t
v_t1 = x[1] + a * delta_t
return np.array([x_t1, v_t1])
# Set the eigen values of the closed loop control system to be -0.15, -0.25
eig1 = 0.15
eig2 = 0.25
delta_t = 0.025
def compute_traj(x0, xobs_n):
if len(xobs_n.shape) > 1:
xobs_n = xobs_n[0]
traj = [x0]
for xo in xobs_n:
traj.append(car_dyn(traj[-1], xo, delta_t, eig1, eig2))
return traj
def f_prednode(traj):
return np.array([5 - t[0] for t in traj]).min()
# +
bounds = [(4.5, 5.5)] * 100
x0 = np.array([0., 3.])
rand_nums = []
rand_details = []
smooth_details = []
for _ in range(15):
r = np.random.randint(2**32 - 1)
np.random.seed(r)
node0 = pred_node(f=f_prednode)
TM = test_module(bounds=bounds, sut=lambda x: compute_traj(x0, x), f_tree=node0,
with_random = True, init_sample = 100, optimize_restarts = 1,
exp_weight = 10, kernel_type = GPy.kern.RBF)
TM.initialize()
TM.run_BO(150)
smooth_details.append([TM.smooth_min_val, TM.smooth_count])
rand_details.append([TM.rand_min_val, TM.rand_count])
rand_nums.append(r)
# -
| test_car.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # å€åæ³ãšå€åéååºæå€ãœã«ããŒæ³ãåŠç¿ãã
# ãã®å®ç¿ã§ã¯ãå€åæ³ã®åºæ¬çãªèãæ¹ãšããã®æ¹æ³ã«åºã¥ããå€åéååè·¯ãšåŒã°ããéåèšç®ã®ææ³ãåŠã³ãŸããç¹ã«ãéåèšç®ãšå€å
žèšç®ãçµã¿åããã**ãéåã»å€å
žãã€ããªããã¢ã«ãŽãªãºã ã**ãšããŠã®å€åéååè·¯ã«çç®ããŸãããã®ææ³ãçšããŠãè¿äŒŒçãªåºæå€èšç®ãå¯èœã«ãã**å€åéååºæå€ãœã«ããŒæ³**ãšåŒã°ããæ¹æ³ãžæ¡åŒµããŠãããŸãã
#
# ãã®ææã¯ãQiskit textbookã®["Simulating Molecules using VQE"](https://qiskit.org/textbook/ch-applications/vqe-molecules.html)ãåèã«ããŠããŸãã
#
# ## å
容
# 1. [ã¯ããã«](#introduction)
# 2. [éåååŠã«ãããå€åæ³](#varmethod)
# 1. [æ°åŠçèæ¯](#backgroundmath)
# 2. [åºåºç¶æ
ã®è¿äŒŒ](#groundstate)
# 3. [å€åéååºæå€ãœã«ããŒæ³](#vqe)
# 1. [å€åéååè·¯](#varforms)
# 2. [åçŽãªå€åãã©ãŒã ](#simplevarform)
# 3. [ãã©ã¡ãŒã¿ã®æé©å](#optimization)
# 4. [å€åãã©ãŒã ã䜿ã£ãå®äŸ](#example)
# 4. [åèæç®](#references)
# ## ã¯ããã«<a id='introduction'></a>
# è¡åã§è¡šçŸãããããç©çç³»ã«å¯ŸããŠããã®åºæå€ã®æå°å€ãèŠã€ãããšããæäœã¯ãå€ãã®ã¢ããªã±ãŒã·ã§ã³ã§å¿
èŠãšãªãéèŠãªæè¡ã§ããäŸãã°ååŠã§ã¯ãååãç¹åŸŽã¥ãããšã«ããŒãè¡åã®æå°åºæå€ã¯ããã®ã·ã¹ãã ã®åºåºç¶æ
ã®ãšãã«ã®ãŒã«ãªããŸããæå°åºæå€ãèŠã€ããã«ã¯**ãéåäœçžæšå®ã**[1]ãšåŒã°ããææ³ïŒãã®[ããŒãããã¯](shor.ipynb#qpe)ãåç
§ïŒã䜿ãããšãã§ããŸãããå®çšçãªå¿çšåé¡ã®å®è£
ã«å¿
èŠãªåè·¯ã¯ãNISQã³ã³ãã¥ãŒã¿ã§ã¯å®çŸã§ããªãã»ã©é·ããªãããšãç¥ãããŠããŸãããã®ããã«ãçãéååè·¯ãå©çšããŠååã®åºåºç¶æ
ãšãã«ã®ãŒãæšå®ããææ³ãšããŠã**å€åéååºæå€ãœã«ããŒæ³**ïŒ*Variational Quantum Eigensolver*, VQEïŒãææ¡ãããŸãã [[2]](https://www.nature.com/articles/ncomms5213)ã
#
# ãŸããVQEã®å
ã«ãªãé¢ä¿ã圢åŒçã«è¡šçŸããŠã¿ãŸããããäœãåãããªãæå°åºæå€$\lambda_{min}$ãšãã®åºæç¶æ
$|\psi_{min}\rangle$ããã£ããšã«ããŒãè¡å$H$ãäžãããããšããŠãVQEã¯ãšãã«ã®ãŒã®äžéã§ãã$\lambda_{min}$ã®è¿äŒŒè§£$\lambda_{\theta}$ãæ±ããææ³ã§ããã€ãŸã
#
# \begin{align*}
# \lambda_{min} \le \lambda_{\theta} \equiv \langle \psi(\theta) |H|\psi(\theta) \rangle
# \end{align*}
#
# ãæºãããã§ããã ãå°ãã$\lambda_{\theta}$ãæ±ããããšã«å¯Ÿå¿ããŸããããã§$|\psi(\theta)\rangle$ã¯è¿äŒŒè§£$\lambda_{\theta}$ã«å¯Ÿå¿ããåºæç¶æ
ã§ã$\theta$ã¯ãã©ã¡ãŒã¿ã§ããã€ãŸããé©åœãªåæç¶æ
$|\psi\rangle$ã«$U(\theta)$ã§è¡šçŸããããã©ã¡ãŒã¿åãããåè·¯ãé©çšããããšã§ã$|\psi_{min}\rangle$ãè¿äŒŒããç¶æ
$|\psi(\theta)\rangle \equiv U(\theta)|\psi\rangle$ãåŸãããšããã¢ã€ãã¢ã§ããæé©ãªãã©ã¡ãŒã¿$\theta$ã®å€ã¯ãæåŸ
å€ $\langle \psi(\theta) |H|\psi(\theta) \rangle$ãæå°ã«ãªãããã«å€å
žèšç®ãç¹°ãè¿ããªããæ±ããŠããããšã«ãªããŸãã
#
# ## éåååŠã«ãããå€åæ³<a id='varmethod'></a>
# ### æ°åŠçèæ¯<a id='backgroundmath'></a>
#
# VQEã¯éåååŠã®**å€åæ³**ãå¿çšããææ³ã§ããå€åæ³ãããè¯ãçè§£ããããã«ãåºç€çãªæ°åŠçèæ¯ã説æããŸããè¡å$A$ã®åºæãã¯ãã«$|\psi_i\rangle$ãšãã®åºæå€$\lambda_i$ã¯ã$A |\psi_i\rangle = \lambda_i |\psi_i\rangle$ã®é¢ä¿ã«ãããŸããè¡å$H$ããšã«ããŒãè¡å$H = H^{\dagger}$ã®å Žåãã¹ãã¯ãã«å®çãã$H$ã®åºæå€ã¯å®æ°ã«ãªããŸãïŒ$\lambda_i = \lambda_i^*$ïŒã枬å®ã§ããéã¯å®æ°ã§ããå¿
èŠããããããéåç³»ã®ããã«ããã¢ã³ãèšè¿°ããããã«ã¯ãšã«ããŒãè¡åãé©åã§ããããã«ã$H$ã¯ä»¥äžã®ããã«è¡šçŸã§ããŸãã
#
# $$
# H = \sum_{i = 1}^{N} \lambda_i |\psi_i\rangle \langle \psi_i |
# $$
#
# ããã§ãå$\lambda_i$ã¯å¯Ÿå¿ããåºæãã¯ãã«$|\psi_i\rangle$ã®åºæå€ã§ããä»»æã®éåç¶æ
ã«å¯ŸããŠèŠ³æž¬é$H$ãæž¬å®ããå Žåã®æåŸ
å€ã¯ã以äžã®åŒã§æ±ããããŸãã
#
# $$
# \langle H \rangle_{\psi} \equiv \langle \psi | H | \psi \rangle
# $$
#
# äžåŒã®$H$ãæåŸ
å€ã®åŒã«ä»£å
¥ãããš
#
# $$
# \begin{aligned}
# \langle H \rangle_{\psi} = \langle \psi | H | \psi \rangle &= \langle \psi | \left(\sum_{i = 1}^{N} \lambda_i |\psi_i\rangle \langle \psi_i |\right) |\psi\rangle\\
# &= \sum_{i = 1}^{N} \lambda_i \langle \psi | \psi_i\rangle \langle \psi_i | \psi\rangle \\
# &= \sum_{i = 1}^{N} \lambda_i | \langle \psi_i | \psi\rangle |^2
# \end{aligned}
# $$
#
# ã«ãªããŸããæåŸã®åŒã¯ãä»»æã®ç¶æ
$|\psi\rangle$ã«å¯Ÿãã$H$ã®æåŸ
å€ã¯ã$\lambda_i$ãéã¿ãšããåºæãã¯ãã«$|\psi_i\rangle$ãš$|\psi\rangle$ã®å
ç©ïŒã®çµ¶å¯Ÿå€ã®äºä¹ïŒã®ç·åœ¢çµåãšããŠäžããããããšã瀺ããŠããŸãããã®åŒããã$| \langle \psi_i | \psi\rangle |^2 \ge 0$ ã§ããããã«
#
# $$
# \lambda_{min} \le \langle H \rangle_{\psi} = \langle \psi | H | \psi \rangle = \sum_{i = 1}^{N} \lambda_i | \langle \psi_i | \psi\rangle |^2
# $$
#
# ãæãç«ã€ããšã¯æããã§ããäžèšã®åŒã**å€åæ³**ãšåŒã°ãããã®ïŒããã¹ãã«ãã£ãŠã¯**å€ååç**ãšåŒã¶ïŒ[3]ã§ã$H$ã®æå°åºæå€ãäžéãšããŠãä»»æã®æ³¢å颿°ã®æåŸ
å€ãè¿äŒŒçã«æ±ããããšãã§ããããšã衚ããŠããŸãããã®åŒããã$|\psi_{min}\rangle$ç¶æ
ã®æåŸ
å€ã¯$\langle \psi_{min}|H|\psi_{min}\rangle = \langle \psi_{min}|\lambda_{min}|\psi_{min}\rangle = \lambda_{min}$ã«ãªãããšãåããã§ãããã
#
#
# ### åºåºç¶æ
ã®è¿äŒŒ<a id='groundstate'></a>
# ç³»ã®ããã«ããã¢ã³ããšã«ããŒãè¡å$H$ã§è¡šçŸãããŠããå Žåãç³»ã®åºåºç¶æ
ã®ãšãã«ã®ãŒã¯$H$ã®æå°åºæå€ã«ãªããŸãããŸã$|\psi_{min}\rangle$ã®åææšå®ãšããŠä»»æã®æ³¢å颿°$|\psi \rangle$ïŒ*Ansatz*ãšåŒã°ããïŒãéžã³ããã®ç¶æ
ã§ã®æåŸ
å€$\langle H \rangle_{\psi}$ãèšç®ããŸããå€åæ³ã®éµã¯ããã®æåŸ
å€ãå°ãããªãããã«æ³¢å颿°ãæŽæ°ããªããèšç®ãç¹°ãè¿ããããã«ããã¢ã³ã®åºåºç¶æ
ãšãã«ã®ãŒã«è¿ã¥ããŠãããšããã«ãããŸãã
# ## å€åéååºæå€ãœã«ããŒæ³<a id='vqe'></a>
# ### å€åéååè·¯<a id='varforms'></a>
# éåã³ã³ãã¥ãŒã¿ãŒäžã§å€åæ³ãå®è£
ããã«ã¯ã系統çã«*Ansatz*ãæŽæ°ããæ¹æ³ãå¿
èŠã§ããVQEã¯ãããæ±ºãŸã£ãæ§é ãæã€ãã©ã¡ãŒã¿åãããéååè·¯ïŒ**å€åéååè·¯**ïŒã䜿ã£ãŠè¡ããŸãããã®åè·¯ã¯ãã°ãã°**å€åãã©ãŒã **ïŒ*variational form*ïŒãšãåŒã°ãããŠãã¿ãªãŒå€æ$U(\theta)$ã§è¡šçŸãããŸãïŒ$\theta$ã¯ãã©ã¡ãŒã¿ïŒã
# å€åãã©ãŒã ãåæç¶æ
$|\psi\rangle$ïŒäŸãã°æšæºç¶æ
$|0\rangle$ïŒã«é©çšãããšãåºåãšããŠ$|\psi(\theta)\rangle \equiv U(\theta)|\psi\rangle$ãçæãããŸãããã®ç¶æ
ã®å
ã§æåŸ
å€$\langle \psi(\theta)|H|\psi(\theta)\rangle$ã$\lambda_{min}$ã«è¿ä»ãããã«ã$|\psi(\theta)\rangle$ã«å¯ŸããŠãã©ã¡ãŒã¿$\theta$ã®æé©åãè¡ããŸãã
# ãã©ã¡ãŒã¿ã®æé©åã¯å€å
žèšç®ã§å®è¡ããããšãæ³å®ããŠããããã®æå³ã§VQEã¯å
žåçãª**ãéåã»å€å
žãã€ããªããã¢ã«ãŽãªãºã ã**ã®äžã€ã§ãã
#
# å€åãã©ãŒã ã®æ±ºãæ¹ã§ãããè§£ãããåé¡ã®ãã¡ã€ã³ã«å¿ããŠç¹å®ã®æ§é ãæã€å€åãã©ãŒã ãå°å
¥ããããšããããŸããããã§ã¯ãªããå¹
åºãåé¡ãžã®å¿çšãã§ããããã«ãã¡ã€ã³ã«äŸåããªã圢ã®å€åãã©ãŒã ïŒäŸãã°$R_Y$ã²ãŒãïŒã䜿ãããšããããŸããé«ãšãã«ã®ãŒå®éšãžã®å¿çšã§ã¯ããã®$R_Y$ã²ãŒãã䜿ã£ãå€åãã©ãŒã ãå®è£
ããŸãã
#
# ### åçŽãªå€åãã©ãŒã <a id='simplevarform'></a>
# å€åãã©ãŒã ãæ±ºããæã«ã¯ã2ã€ã®çžåããç®çã«å¯ŸããŠãã©ã³ã¹ãèããå¿
èŠããããŸãã$n$éåãããã®å€åãã©ãŒã ã¯ããã©ã¡ãŒã¿ã®æ°ãå¢ããã°$|\psi\rangle \in \mathbb{C}^N$ïŒ$N=2^n$ïŒã®ä»»æã®ç¶æ
ãã¯ãã«$|\psi\rangle$ãçæã§ããã§ãããããããããã©ã¡ãŒã¿ã®æé©åã®ããšãèããã°ãã§ããã°å¯èœãªéãå°ãªããã©ã¡ãŒã¿ã§å€åãã©ãŒã ãæ§ç¯ãããã§ãããã
#
# ããã§ã¯ããŸã$n=1$ã®å ŽåãèããŸãã$U3$ã²ãŒãã¯3ã€ã®ãã©ã¡ãŒã¿$\theta$ã$\phi$ã$\lambda$ã䜿ã£ãŠä»¥äžã®å€æã衚çŸããŸã:
#
# $$
# U3(\theta, \phi, \lambda) = \begin{pmatrix}\cos\frac{\theta}{2} & -e^{i\lambda}\sin\frac{\theta}{2} \\ e^{i\phi}\sin\frac{\theta}{2} & e^{i\lambda + i\phi}\cos\frac{\theta}{2} \end{pmatrix}
# $$
#
# ã°ããŒãã«äœçžãé€ããŠã3ã€ã®ãã©ã¡ãŒã¿ãé©åã«èšå®ããŠå®è£
ããã°ä»»æã®åäžéåãããã®å€æãè¡ããŸãããã®**ãŠãããŒãµã«**ãªå€åãã©ãŒã ã¯ã3ã€ãããã©ã¡ãŒã¿ããªãããå¹ççã«æé©åã§ãããšããç¹åŸŽããããŸãããã 匷調ãã¹ãç¹ã¯ãä»»æã®ç¶æ
ãçæã§ãããšããããšã¯ããã®å€åãã©ãŒã ãçæããç¶æ
ã¯$H$ã®æåŸ
å€ãèšç®ããäžã§å¿
èŠã«ãªãç¶æ
ã«éå®ãããªããšããããšã§ããã€ãŸãããã®æ§è³ªãæå³ããã®ã¯ãæå°ã®æåŸ
å€ãæ±ãŸããã©ããã¯å€å
žèšç®ã®æé©åã®èœåã ãã«äŸåãããšããããšã§ãã
#
# ### ãã©ã¡ãŒã¿ã®æé©å<a id='optimization'></a>
# ãã©ã¡ãŒã¿åãããå€åãã©ãŒã ãéžæããããã¿ãŒã²ãããšãªãããã«ããã¢ã³ã®æåŸ
å€ãæå°åããããã«ãå€åæ³ã«åŸã£ãŠãã©ã¡ãŒã¿ãæé©åããå¿
èŠããããŸãããã©ã¡ãŒã¿ã®æé©åã®ããã»ã¹ã«ã¯æ§ã
ãªèª²é¡ããããŸããäŸãã°ãéåããŒããŠã§ã¢ã«ã¯æ§ã
ãªã¿ã€ãã®ãã€ãºãããããããã®ç¶æ
ã§ãšãã«ã®ãŒã枬å®ããŠãæ£ããçããè¿ã£ãŠãããšããä¿èšŒã¯ãããŸããããã®ããã«ããã©ã¡ãŒã¿ã®æé©åã«äœ¿ãç®ç颿°ã®è©äŸ¡ãå®éã®ãšãã«ã®ãŒã®å€ãããããŠããŸããæ£ãããã©ã¡ãŒã¿ã®æŽæ°ãã§ããªãå¯èœæ§ããããŸãããŸããæé©åã®ææ³ïŒ**ãªããã£ãã€ã¶ãŒ**ïŒã«ãã£ãŠã¯ããã©ã¡ãŒã¿ã®æ°ã«äŸã£ãŠç®ç颿°ãè©äŸ¡ããåæ°ãå¢ããããšããããããã«ãã€ãºã®åœ±é¿ãåãããããªããŸããã€ãŸããã¢ããªã±ãŒã·ã§ã³ã®èŠæ±ãèæ
®ããªããããªããã£ãã€ã¶ãŒã®éžæã«ãæ°ãé
ãå¿
èŠããããŸãã
#
# æãäžè¬çãªæé©åææ³ã¯ããšãã«ã®ãŒã®æžå°ã極倧ã«ãªããããªæ¹åã«åãã©ã¡ãŒã¿ãæŽæ°ãã**åŸé
éäžæ³**ã§ããåãã©ã¡ãŒã¿ããšã«åŸé
ãèšç®ãããããæé©åãã¹ããã©ã¡ãŒã¿ã®æ°ã«å¿ããŠç®ç颿°ãè©äŸ¡ããåæ°ã¯å¢ããŸãããŸãããã®æ§è³ªããæ¢çŽ¢ç©ºéã®äžã§å±æçãªæé©å€ãçŽ æ©ãçºèŠããããšã¯å¯èœã§ãããéã«æ¢çŽ¢ã屿çãªæå°å€ã«çãŸã£ãŠããŸãããšããããŸããåŸé
éäžæ³ã¯çŽæçã§çè§£ããããæé©åã®ææ³ã§ãããå°ãªããšãçŸåšã®NISQã³ã³ãã¥ãŒã¿ã§ã¯ç²ŸåºŠè¯ãå®è¡ããã®ã¯é£ãããšèããããŠããŠãçŸç¶ã§ã¯ããŸãæšå¥šãããŠã¯ããŸããã
#
# ãã€ãºã®ããéåã³ã³ãã¥ãŒã¿ã§ç®ç颿°ãæé©åããé©åãªãªããã£ãã€ã¶ãŒãšããŠã*Simultaneous Perturbation Stochastic Approximation*ïŒ**SPSA**ïŒããããŸããSPSAã¯ïŒåã®æž¬å®ã ãã§ç®ç颿°ã®åŸé
ãè¿äŒŒã§ãããšããç¹åŸŽããããŸããåŸé
éäžæ³ã§ã¯åãã©ã¡ãŒã¿ãç¬ç«ã«å€åãããã®ã«å¯ŸããŠãSPSAã§ã¯å
šãŠã®ãã©ã¡ãŒã¿ãåæã«ã©ã³ãã ã«å€åãããŸãã以äžã®ããšãããçŸåšã®ãšããVQEãå©çšããå Žåã®ãªããã£ãã€ã¶ãŒãšããŠã¯SPSAãæšå¥šãããŠããããã§ãã
#
# ãã€ãºããªãéåã³ã³ãã¥ãŒã¿ã§ç®ç颿°ãè©äŸ¡ããå ŽåïŒäŸãã°ç¶æ
ãã¯ãã«ã·ãã¥ã¬ãŒã¿ã§å®è¡ããå Žåãªã©ïŒã¯ãPythonã®[SciPy](https://www.scipy.org/scipylib/index.html)ããã±ãŒãžã§æäŸãããŠãããªããã£ãã€ã¶ãŒãªã©æ§ã
ãªéžæè¢ããããŸãããã®å®ç¿ã§ã¯ãQiskit Aquaã§ãµããŒããããŠãããªããã£ãã€ã¶ãŒã®äžã§ãç¹ã«*Constrained Optimization by Linear Approximation*ïŒ**COBYLA**ïŒãšåŒã°ãããªããã£ãã€ã¶ãŒã䜿çšããŸããCOBYLAã¯ç®ç颿°ã®è©äŸ¡ã1åããå®è¡ããªãïŒã€ãŸãè©äŸ¡ã®åæ°ããã©ã¡ãŒã¿ã®æ°ã«äŸåããªãïŒããããã€ãºããªãç¶æ
ã§ãã€è©äŸ¡ã®åæ°ãå°ãªããããå Žåã«ã¯COBYLAã®å©çšãæšå¥šãããŠããããã§ãããã¥ãã«ãããã©ã®ãªããã£ãã€ã¶ãŒããã¹ããã¯VQEã¢ã«ãŽãªãºã ã®å®è£
圢åŒãå®è¡ç°å¢ã«ãã£ãŠå€ãããããããçšåºŠçµéšã«ãã£ãŠæ±ºããå¿
èŠããããšèããããŸãã
#
# ### å€åãã©ãŒã ã䜿ã£ãå®äŸ<a id='example'></a>
# ã§ã¯ããã§ãåäžéåãããã®å€åãã©ãŒã ãå©çšããŠãã©ã¡ãŒã¿æé©åã®äŸãå®è¡ããŠã¿ãŸããããäŸãšããŠãã©ã³ãã ãªç¢ºçååžã®ãã¯ãã«$\vec{x}$ãå
¥åãšããŠäžããæãåºåã®ç¢ºçååžã$\vec{x}$ã«è¿ããªãããã«åäžéåãããã®å€åãã©ãŒã ãæ±ºå®ãããšããåé¡ãèããŸãïŒ2ã€ã®ç¢ºçååžã®è¿ãã¯L1è·é¢ã«ãã£ãŠå®çŸ©ããŸãïŒã
#
# <img src="figs/vqe_u3.png" width=40%>
#
# æåã«ãPythonã§ã©ã³ãã ãªç¢ºçååžã®ãã¯ãã«ãäœæããŸãã
# + pycharm={"name": "#%%\n"}
# Tested with python 3.7.9, qiskit 0.23.5, numpy 1.20.1
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(999999)
target_distr = np.random.rand(2)
target_distr /= sum(target_distr)
# -
# 次ã«ãåäžã®$U3$å€åãã©ãŒã ã®3ã€ã®ãã©ã¡ãŒã¿ãåŒæ°ãšããŠåãåãã察å¿ããéååè·¯ãè¿ã颿°ãå®çŸ©ããŸãã
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister
def get_var_form(params):
qr = QuantumRegister(1, name="q")
cr = ClassicalRegister(1, name='c')
qc = QuantumCircuit(qr, cr)
qc.u(params[0], params[1], params[2], qr[0])
qc.measure(qr, cr[0])
return qc
# å€åãã©ãŒã ã®ãã©ã¡ãŒã¿ã®ãªã¹ããå
¥åãšãããã©ã¡ãŒã¿ã«å¯Ÿå¿ããã³ã¹ããèšç®ããç®ç颿°ãå®çŸ©ããŸããã¢ã«ãŽãªãºã ãå®è¡ããããã¯ãšã³ããšããŠã**QASMã·ãã¥ã¬ãŒã¿**ã䜿çšããŸãã
# +
from qiskit import Aer, execute
backend = Aer.get_backend("qasm_simulator")
NUM_SHOTS = 10000
def get_probability_distribution(counts):
output_distr = [v / NUM_SHOTS for v in counts.values()]
if len(output_distr) == 1:
output_distr.append(0)
return output_distr
def objective_function(params):
qc = get_var_form(params)
result = execute(qc, backend, shots=NUM_SHOTS).result()
output_distr = get_probability_distribution(result.get_counts(qc))
cost = sum([np.abs(output_distr[i] - target_distr[i]) for i in range(2)])
return cost
# -
# æåŸã«COBYLAãªããã£ãã€ã¶ãŒã®ã€ã³ã¹ã¿ã³ã¹ãäœæããã¢ã«ãŽãªãºã ãå®è¡ããŸããåºåããã確çååžã¯å®è¡ã®åºŠã«ç°ãªããã¿ãŒã²ããã®ç¢ºçååžãšå®å
šã«ã¯åãã«ãªããªãããšã«æ³šæããŠãã ãããåºåã®ç²ŸåºŠã¯éåèšç®ã®åæ°ïŒã·ã§ããæ°ïŒNUM_SHOTSïŒã«äŸåããã®ã§ãã·ã§ããæ°ã墿žãããæã®äžèŽå
·åã確èªããŠã¿ãŠãã ããã
# +
from qiskit.aqua.components.optimizers import COBYLA
optimizer = COBYLA(maxiter=500, tol=0.0001)
params = np.random.rand(3)
ret = optimizer.optimize(num_vars=3, objective_function=objective_function, initial_point=params)
qc = get_var_form(ret[0])
counts = execute(qc, backend, shots=NUM_SHOTS).result().get_counts(qc)
output_distr = get_probability_distribution(counts)
print("Target Distribution:", target_distr)
print("Obtained Distribution:", output_distr)
print("Output Error (L1-Distance):", ret[1])
print("Parameters Found:", ret[0])
# + [markdown] pycharm={"name": "#%% md\n"}
# ## åèæç®<a id='references'></a>
# 1. <NAME> and Chuang, <NAME>, "Quantum Computation and Quantum Information", Cambridge University Pres, 2000.
# 2. <NAME>, et al., "A variational eigenvalue solver on a photonic quantum processor", [Nature commun. 5, 4213 (2014)](https://www.nature.com/articles/ncomms5213).
#
| source/jp/vqe.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Reviews
# This is the reference implementation for [Reviews](https://github.com/chaoss/wg-evolution/blob/master/metrics/Reviews.md),
# a metric specified by the
# [Evolution Working Group](https://github.com/chaoss/wg-evolution) of the
# [CHAOSS project](https://chaoss.community).
# This implementation is specific to Git repositories.
#
# Have a look at [README.md](../README.md) to find out how to run this notebook (and others in this directory) as well as to get a better understanding of the purpose of the implementations.
#
# The implementation is described in two parts (see below):
#
# * Class for computing Reviews
# * An explanatory analysis of the class' functionality
#
# Some more auxiliary information in this notebook:
#
# * Examples of the use of the implementation
# As discussed in the [README](../README.md) file, the scripts required to analyze the data fetched by Perceval are located in the `scripts` package. Due to python's import system, to import modules from a package which is not in the current directory, we have to either add the package to `PYTHONPATH` or simply append a `..` to `sys.path`, so that `code_df` can be successfully imported.
from datetime import datetime
import matplotlib.pyplot as plt
import sys
sys.path.append('..')
from scripts import utils
from scripts import conditions
from scripts.pullrequest_github import PullRequestGitHub
# %matplotlib inline
class ReviewsGitHub(PullRequestGitHub):
"""
Class for Reviews
"""
def compute(self):
"""
Compute the total number of reviews created, from the Perceval data.
:returns: The total number of reviews created
"""
pull_ids = {item['hash'] for item in self.items}
return len(pull_ids)
# ## Performing the Analysis
# Using the above class, we can perform several kinds of analysis on the JSON data file, fetched by Perceval.
#
# For starters, we can perform a simple count of all reviews in the data. For this analysis, we can vary the value passed to the `date_range` parameter.
# ### Counting the total number of commits
# We first read the JSON file containing Perceval data using the `read_json_file` utility function.
items = utils.read_json_file('../pull_requests.json')
# Let's use the `compute` method to count the total number of reviews created. First, we will do it without passing any since and until dates.
# Next, we can pass in the start and end dates as a tuple. The format would be `%Y-%m-%d`.
#
# A third kind of analysis we can perform is passing only one of the dates to `date_range` --- either `since` or `until`.
# +
reviews = Reviews(items)
print("The total number of reviews "
"in the file is {}.".format(reviews.compute()))
date_since = datetime.strptime("2018-01-01", "%Y-%m-%d")
date_until = datetime.strptime("2018-07-01", "%Y-%m-%d")
reviews_dated = Reviews(items,
date_range=(date_since, date_until))
print("The total number of reviews created between "
"2018-01-01 and 2018-07-01 is {}.".format(reviews_dated.compute()))
reviews_after = Reviews(items,
date_range=(date_since, None))
print("The total number of reviews created after "
"2018-01-01 is {}.".format(reviews_after.compute()))
| implementations/notebooks/reviews_github.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
from math import pi
from random import choice, uniform, randint
from ipycanvas import Canvas, hold_canvas
from ipywidgets import Button
def draw_tree(canvas, start_x, start_y, length, angle, branch_width):
canvas.begin_path()
canvas.save()
canvas.line_width = branch_width
canvas.translate(start_x, start_y)
canvas.rotate(angle * pi / 180)
canvas.move_to(0, 0)
canvas.line_to(0, -length)
if length > 25:
canvas.stroke_style = '#c68a47'
else:
canvas.stroke_style = choice(['#559b37', '#379b4b', '#879b37'])
canvas.stroke()
if length < 10:
canvas.restore()
return
draw_tree(canvas, 0, -length, length * uniform(0.8, 0.9), angle + uniform(-1, 10), branch_width * 0.7)
draw_tree(canvas, 0, -length, length * uniform(0.8, 0.9), angle + uniform(-10, 1), branch_width * 0.7)
canvas.restore()
canvas = Canvas()
canvas.shadow_color = 'black'
canvas.shadow_offset_x = 1
canvas.shadow_offset_y = 0
canvas.shadow_blur = 3
canvas.line_cap = 'round'
def draw_random_forest(*args):
global canvas
with hold_canvas(canvas):
canvas.clear()
for tree in range(randint(6, 10)):
draw_tree(canvas, uniform(0, canvas.width), canvas.height, uniform(40, 70), uniform(-10, 10), uniform(8, 15))
canvas.fill_style = '#559b37'
canvas.fill_rect(0, canvas.height - 5, canvas.width, canvas.height)
draw_random_forest()
button = Button(description='Generate new forest')
button.on_click(draw_random_forest)
display(canvas, button)
| examples/fractals_forest.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Illustrating the relationship between random walks and diffusion
#
# Imagine a group of particles that are initially arranged in a vertical line, and then start undergoing random motions. The simulation below illustrates how they spread out over time.
#
# (<NAME>, University of Colorado Boulder; last update November 2021)
# ## Part 1: create the model and set up the animation
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import rc
from matplotlib.animation import FuncAnimation
from IPython.display import HTML
class RandomWalkSimulator(object):
def __init__(self, num_particles=100, num_types=1, initial_position='line',
step_size=0.01):
"""Initialize the RandomWalkSimulator
Parameters
----------
num_particles : int (default = 100)
number of particles
num_types : int (default = 1)
number of particle 'types,' each of which will be in a different color
initial_position : string ('line' or 'scatter')
'line' means all particles are lined up along the y axis.
'scatter' means they are scattered, with type 1 between -1 and 0, and type 2
(if present) between 0 and 1.
step_size : float (default = 0.01)
Scale factor for size of random steps.
"""
self.np = num_particles
self.nt = num_types
self.step_size = step_size
self.x = np.zeros((num_particles, num_types))
self.y = np.zeros((num_particles, num_types))
if initial_position == 'line':
self.x[:] = 0.0
for j in range(num_types):
self.y[:, j] = np.linspace(0, 1, num_particles)
else:
for j in range(num_types):
self.x[:, j] = np.random.rand(num_particles) - (j % 2)
self.y[:, j] = np.random.rand(num_particles)
def run_one_step(self):
"""Apply one random offset to each particle"""
dx = np.random.rand(self.np, self.nt) * np.sign(2 * np.random.rand(self.np, self.nt) - 1)
dy = np.sqrt(1.0 - dx**2) * np.sign(2 * np.random.rand(self.np, self.nt) - 1)
self.x[:] += self.step_size * dx # Scale by dr
self.y[:] += self.step_size * dy # "
rws = RandomWalkSimulator(num_particles=1000)
# +
# First, set up a blank figure
fig, ax = plt.subplots()
# And placeholder lists for data
obj = ax.plot([], [], color = 'k')
# And initialize the number of iterations per animation frame
num_iter_per_frame = 10
# -
def init():
ax.set_xlim(-1, 1)
ax.set_ylim(0, 1)
obj = ax.plot(rws.x, rws.y, '.')
return(obj)
# Next, define the update function
def update(i):
for j in range(num_iter_per_frame):
rws.run_one_step()
ax.cla()
ax.set_xlim(-1, 1)
ax.set_ylim(0, 1)
for j in range(rws.nt):
obj = ax.plot(rws.x[:, j], rws.y[:, j], '.')
return(obj)
# ## Part 2: Run and animate the model with particles initially evenly spaced along a vertical line
#
# Note that the `FuncAnimation` function automatically runs our model by iteratively calling the `update` function, which in turn calls the `rws.run_one_step` function to advance the model.
#
# Run the animation!
anim = FuncAnimation(fig, update, 300, init_func = init, blit = True)
plt.show()
# Convert the animation to HTML
vid = HTML(anim.to_jshtml())
vid
# ## Part 3: run and animate the model with two groups of particles, one on each side of the domain
rws2 = RandomWalkSimulator(num_particles=1000, num_types=2, initial_position='scatter')
def init2():
ax.set_xlim(-1, 1)
ax.set_ylim(0, 1)
obj2 = ax.plot(rws2.x, rws2.y, '.')
return(obj2)
# Next, define the update function
def update2(i):
for j in range(num_iter_per_frame):
rws2.run_one_step()
ax.cla()
ax.set_xlim(-1, 1)
ax.set_ylim(0, 1)
for j in range(rws2.nt):
obj2 = ax.plot(rws2.x[:, j], rws2.y[:, j], '.')
return(obj2)
# Run the animation!
anim = FuncAnimation(fig, update2, 300, init_func = init2, blit = True)
plt.show()
# Convert the animation to HTML
vid2 = HTML(anim.to_jshtml())
vid2
# Notice that the two populations gradually mix together.
| random_walks_and_diffusion.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Tomorrow's Map Generation
# In this notebook we will use discrete optimization generating maps from predefined tiles in two dimensional space.
# # !pip install dimod==0.10.1 dwave-system==1.7.0
import numpy as np
import seaborn as sns
# Let's define the number of available tiles, adjacency weights and the dimentions of the output map we would like to generate.
n_tiles = 4
map_dim = [8, 8]
# ## Create BQM object
#
# - Decision variables: $x_{ij} = k$ if the map cell i, j is assigned with the tile k
# - Total number of variables: number of map cells times number of tiles
# ## Representation of map as Decision variables
#
# Below is an example of variable construction.
#
# row\column | 1 | 2 | 3 | 4 | 5 | 6
# :----:|:---:|:----:|:----:|:----:|:-----:|:-----:
# 1 | x00 | x01 |x02 | x03 | x04 | x05
# 2 | x10 | x11 |x12 | x13 | x14 | x15
# 3 | x20 | x21 |x22 | x23 | x24 | x25
#
# here each cell value corresponds to the respective tile index in the output map
# +
from dimod import AdjVectorBQM
bqm = AdjVectorBQM('BINARY')
x = {(i, j, k): bqm.add_variable((f'x_{i}_{j}', k), 0)
for i in range(map_dim[0])
for j in range(map_dim[1])
for k in range(n_tiles)
}
# +
# x
# -
# ## Add constraint
# - Only one tile can be placed in a given map cell
# - The sum of xijk for all values of k (all possible tiles) amounts to one
one_hot_penalty = 10
for i in range(map_dim[0]):
for j in range(map_dim[1]):
bqm.add_linear_equality_constraint(
[(x[i, j, k], 1.0) for k in range(n_tiles)],
constant=-1.0,
lagrange_multiplier=one_hot_penalty
)
# ## Add soft constraints
# - pairwise interactions between every pair of tiles represent their relative association
# - the penalty is inverse proportional to the interaction parameter
# +
# tile_interaction = [[-2, 2],
# [2, -2]]
# tile_interaction = [[-2, 2, 2],
# [2, -2, 2],
# [2, 2, -2]]
coincidences = np.array([[4., 6., 4., 8.],
[6., 0., 0., 4.],
[4., 0., 0., 0.],
[8., 4., 0., 5.]])
coincidence_prob = coincidences / coincidences.sum()
coincidence_nll = -np.log(coincidence_prob)
noise = 0.5
concidence_nll_noise = -np.log((coincidences + noise) / (coincidences + noise).sum())
# sns.heatmap(concidence_nll_noise)
# -
for i in range(map_dim[0]):
for j in range(map_dim[1]):
for k in [i-1, i+1]:
if ((k<0) | (k>=map_dim[0]))==False:
for t_i in range(n_tiles):
for t_k in range(n_tiles):
bqm.add_interaction(x[i, j, t_i], x[k, j, t_k], concidence_nll_noise[t_i][t_k])
for l in [j-1, j+1]:
if ((l<0) | (l>=map_dim[1]))==False:
for t_j in range(n_tiles):
for t_l in range(n_tiles):
bqm.add_interaction(x[i, j, t_j], x[i, l, t_l], concidence_nll_noise[t_j][t_l])
# ## Run on Simulator/Real hardware
# +
from neal import SimulatedAnnealingSampler
response = SimulatedAnnealingSampler().sample(bqm, num_reads=100, num_sweeps=10000).truncate(3)
print(response)
# -
# ## Optimal solution
# +
# Get the best solution
best_solution = response.first.sample
assignments = {grid_cell: tile_id for (grid_cell, tile_id), value in best_solution.items() if value}
print(assignments)
# -
# ## Convert the solution assignments to map
output_map = np.array(list(assignments.values())).reshape(map_dim[0], map_dim[1])
# +
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 1, figsize=(5,4))
tile_names = ['grass', 'tree', 'water', 'road']
tile_colors = ['#0c0', '#050', '#00c', '#cc6']
sns.heatmap(output_map, cmap=tile_colors, linewidth=0.1, linecolor='black', ax=ax)
colorbar = ax.collections[0].colorbar
M = output_map.max().max()
colorbar.set_ticks([(i + 0.5) * (n_tiles - 1) / n_tiles for i in range(n_tiles)])
colorbar.set_ticklabels(tile_names)
plt.show()
# -
# # Thank You!
| The Tomorrow/rb_notebooks/01_map_generation_dwave.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Ingesting Data
#
#
#
# # Ingest some data
#
# *Clean up any previous ingest:* If you have previously used this notebook to ingest the sample data into your data cube you will find the ingest data on your filesystem. This should be deleted. As ingest takes a significant amount of time the notebook doesn't include clean up commands and you should remove the files from your host. The ingest file places them under `easi-training-pc/output/ingest_data`
# # Ingest LS7 data
#
# The sample data for LS7 is a single timeslice so ingestion won't take long. We'll use that as a first example, then we will do the LS8 sample data, which will take a LOT longer.
#
# Whilst ingestion takes a while it is because it is performing all the reprojections required against the scene to ensure all your data ready for analysis in your chosen projection. When you just index those reprojections still need to be done, but they are done when you load. With ingestion, all that repeated projection can be done just once and from then on loading and use will be a lot faster.
# !datacube ingest -c ~/work/data-pipelines/landsat-usgs/ls7_ledaps_usgs_netcdf_ingest.yaml
# # Ingest LS8 data
#
# This will take a while...even with two processors being used.
# +
# # !datacube ingest -c /home/jovyan/work/data-pipelines/landsat-usgs/ls8_usgs_sr_albers_netcdf_ingest.yaml --executor multiproc 2
| work/easi-pc-notebooks/01b (optional) - PC Getting Started - Ingesting Data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Display the images resulting from tests
# +
# %matplotlib inline
import os
import sys
sys.path.append(os.path.join('..', '..'))
from matplotlib import pylab
import numpy
from matplotlib import pyplot as plt
from data_models.parameters import arl_path
from wrappers.serial.image.operations import show_image, qa_image, import_image_from_fits
import logging
log = logging.getLogger()
log.setLevel(logging.INFO)
log.addHandler(logging.StreamHandler(sys.stdout))
# +
pylab.rcParams['figure.figsize'] = (12.0, 12.0)
pylab.rcParams['image.cmap'] = 'rainbow'
fits_files = []
results_dir = './results/mpi'
for file in os.listdir(results_dir):
if file.endswith(".fits"):
fits_files.append(file)
fits_files = numpy.sort(fits_files)
empty = list()
for file in fits_files:
im = import_image_from_fits('%s/%s' % (results_dir,file))
if numpy.max(numpy.abs(im.data)) == 0.0:
empty.append(file)
import pprint
p=pprint.PrettyPrinter()
print("The following files are empty:")
p.pprint(empty)
# +
for file in fits_files:
print('******************************************************************')
im = import_image_from_fits('%s/%s' % (results_dir,file))
print(qa_image(im, context=file))
try:
show_image(im, title=file)
plt.show()
except IndexError:
pass
# -
| deprecated_code/workflows/mpi/plot_fits_images.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # CapÃtulo 2 - Conceptos básicos de programación orientada a objetos
# ___
# ### Index
# - [Variables](#Variables)
# - [Condicionales](#Condicionales)
# - [Listas](#Listas)
# - [Bucles](#Bucles)
# - [Diccionarios](#Diccionarios)
# - [Funciones](#Funciones-(Methods))
# - [Clases](#Clases)
# - [Capturar Errores](#Capturar-Errores)
# - [Inputs](#Inputs)
# - [Módulos](#Módulos)
# - [JSON](#JSON)
# - [Bloque with](#Bloque-with)
# - [Decoradores](#Decoradores)
# - [Leer y escribir en un archivo](#Leer-y-escribir-en-un-archivo)
# ## Variables
# ___
# ### cadenas de texto (strings)
# declarar el valor `value1` a la variable `var1`:
var1 = 'value1'
var2 = "value2"
var3 = 'val"ue2'
var4 = "val'ue2"
var5 = 'val\'ue2' # \ es el sÃmbolo para "escapar" el caracter situado a la derecha
var6 = "val\"ue2"
# mostrar el valor de la variable `var1`: (clicar el cuadrado de abajo y pulsar `shit + enter` evalua el bloque)
print(var1)
# anexar una cadena de texto a otra:
# +
var2 = ' and more'
var3 = var1 + var2
print(var3)
# -
# dar formato a una cadena de texto:
# +
a = "una"
b = "otra"
print( f'{a} y {b}' )
# -
# guardar como texto un salto de lÃnea:
# +
multi = """una lÃnea
otra lÃnea
última lÃnea"""
print(multi)
# -
# pero también se pueden se puede poner como `\n`:
# +
multi = "una lÃnea\notra lÃnea\núltima lÃnea"""
print(multi)
# -
# No se puede hacer referencia a una variable sin declarar antes su valor, hacerlo elevará un error de tipo `NameError`:
print(var0)
# escribir un comentario:
# print('esto no se evalúa')
print('esto sÃ') # esto no se evalúa
# sentencia vacÃa:
pass
# ### Números
# +
x = 2 # int
y = 3.14 # float
print('suma:', x + y)
print('resta:', x - y)
print('multiplicación:', x * y)
print('división:', x / y)
# -
# transformar texto en número: (cast)
# +
print( int('1') )
print( int('00002') )
print( int(' 3 ') )
print( float('1') )
print( float('00002.20000') )
print( float(' 3.33 ') )
print( int('1') + int('1') )
# -
# transformar número en texto:
# +
print( str(1) )
print( str(2.2) )
print( str(1) + str(1) )
# -
# ### Booleanos
# +
ver = True
fal = False
print(ver)
print(fal)
print(not ver)
print(not fal)
# -
# este tipo de variables no se suelen almacenar en una varible tal cual, sino que surgen del resultado de una evaluación:
print(1 == 1)
print(not 1 == 1)
# operadores:
print('igualdad:', 1 == 1)
print('desigualdad:', 1 != 1)
print('mayor que:', 1 > 1)
print('menor que:', 1 < 1)
print('mayor o igual que:', 1 >= 1)
print('menor o igual que:', 1 <= 1)
# ### Nada
# + tags=[]
nada = None
print( nada )
print( bool(nada) )
# -
# transformar una variable en booleano:
print( '1.', bool("") )
print( '2.', bool("algo") )
print( '3.', bool(0) )
print( '4.', bool(-1) )
print( '5.', bool(None) )
# ## Condicionales
# ___
# usar un condicional (la indentación es fundamental en Python):
# +
if 1 == 1:
print('esta sentencia se va a ejecutar')
print('y esta sentencia también')
print('se acabó el primer condicional')
if 1 == 2:
print('esta sentencia no se va a ejecutar')
print('se acabó el segundo condicional')
# -
# ejecutar una sentencia si la condición no se cumple
if 1 < 2:
print('esta sentencia no se va a ejecutar')
else:
print('esta sentencia se va a ejecutar')
# ejecutar una sentencia si otra condición se cumple
if 1 > 2:
print('esta sentencia no se va a ejecutar')
elif 1 < 2:
print('esta sentencia se va a ejecutar')
else:
print('esta sentencia no se va a ejecutar porque ya se cumplió una de las condiciones')
# operadores condicionales:
if fal or ver:
print('al menos de las dos variables es verdadera')
# +
if ver and 1 == 1:
print('las dos variables son verdaderas')
if fal and 1 == 1:
print('esta sentencia no se va a ejecutar')
# -
# comprobar si una cadena de texto está dentro de otra:
if "texto" in "un texto más largo":
print("contiene 'texto'")
# ## Listas
# ___
# declarar una lista y acceder a un elemento de ella:
# +
lista = [1, "algo", True, None]
print( '1.', lista[0] )
print( '2.', lista[1] )
print( '3.', lista[2] )
print( '4.', lista[3] )
print( '5.', lista[-1] )
print( '6.', lista[-2] )
print( '7.', lista[-3] )
print( '8.', lista[-4] )
# -
# las listas no tienen porque definirse en una sola lÃnea:
# +
lista = [1, "algo",
True, None]
lista = [
1, "algo",
True, None
]
print(lista)
# -
# sustituir el valor de un elemento de la lista:
# +
lista[1] = "otro"
print(lista)
# -
# longitud de una lista:
# +
lista = [1, 4, 5, 56]
print( len(lista) )
# -
# añadir un elemento a la lista:
# +
lista = ['uno', 'dos']
lista.append('tres')
print(lista)
lista = lista + ['cuatro', 'cinco']
print(lista)
# -
# listas y condicionales:
# +
lista_de_frutas = ["manzana", "plátano", "cereza"]
if "manzana" in lista_de_frutas:
print("hay manzana")
else:
print("no hay manzana")
if "pera" in lista_de_frutas:
print("hay pera")
else:
print("no hay pera")
# -
# ## Bucles
# ___
# ejecutar una sentencia un número determinado de veces:
for i in range(3):
print(i)
# hacer un bucle con una lista:
# + tags=[]
lista_de_frutas = ["manzana", "plátano", "cereza", "pera"]
for fruta in lista_de_frutas:
print(fruta)
# -
# mezcla de un bucle y condicional, `break` sirve para romper el bucle:
for fruta in lista_de_frutas:
if fruta != "cereza":
print(fruta + " no es cereza")
else:
print('ya encontré cereza')
break
# ejecutar una sentencia mientras que se cumpla una condicion
# +
var = 0
fin = 3
while var < fin:
var = var + 1
print(var)
# -
# ## Diccionarios
# ___
# +
diccionario = {'key1': 'val1', 'key2': 'val2', 'key3': 'val3'}
print( '1.', diccionario['key2'] )
print( '2.', diccionario.get('key') )
print( '3.', diccionario.get('key4') )
print( '4.', diccionario.get('key5', 'val5') )
# -
# modificar el valor de una entrada:
diccionario = {"marca": 'Ford', "modelo": "Mustang", "año": 1964,}
diccionario["año"] = 2018
print(diccionario)
# añadir entradas:
diccionario["puertas"] = 5
print(diccionario)
# diccionarios y condicionales:
# +
if "marca" in diccionario.keys():
print('"marca" es una entrada del diccionario')
if "Ford" in diccionario.values():
print('"Ford" es un valor del diccionario')
# -
# ## Funciones (Methods)
# ___
#
# definir una función:
# +
def mi_funcion(argumento1, argumento2):
print(f"estos son mis argumentos: {argumento1}, {argumento2}")
mi_funcion("uno", "dos")
mi_funcion(argumento2="uno", argumento1="dos")
# -
# para incluir argumentos opcionales hay que indicar un valor por defecto:
# +
def mi_funcion(argumento, argumento_opcional=None):
print(f"estos son mis argumentos: {argumento}, {argumento_opcional}")
mi_funcion("uno", "dos")
mi_funcion("uno")
# -
# retorno de una función:
# +
def mi_funcion(a):
var = [2, 3, 5]
return var
print( mi_funcion(3) )
# -
# ## Clases
# ___
# definicion y uso de una clase:
# +
class mi_clase(object):
atributo1 = "uno"
atributo2 = "dos"
def metodo_suma(self, argumento1, argumento2):
return argumento1 + argumento2
instancia_de_la_clase = mi_clase()
print( instancia_de_la_clase.atributo1 )
print( instancia_de_la_clase.atributo2 )
print( instancia_de_la_clase.metodo_suma(1, 1) )
# -
# clase con constructor:
# +
class mi_clase(object):
def __init__(self, constructor1, constructor2):
self.atributo1 = constructor1
self.atributo2 = constructor2
def suma_de_atributos(self, argumento1, argumento2):
return self.atributo1 + self.atributo2 + argumento1 + argumento2
instancia_de_la_clase = mi_clase(1, 1)
print( instancia_de_la_clase.suma_de_atributos(1, 1) )
instancia_de_la_clase.atributo1 = 2
instancia_de_la_clase.atributo2 = 2
print( instancia_de_la_clase.suma_de_atributos(2, 2) )
# -
# ## Capturar Errores
# ___
# defininir un código a ejecutar cuando se eleva una determinada exceción:
# +
try:
pass
except ZeroDivisionError as e:
print('esta sentencia no se va a ejecutar')
try:
1/0
print('esta sentencia no se va a ejecutar')
except ZeroDivisionError as e:
print(e)
# -
# se puede añadir el apartado `finally` para que se ejecute una sentencia haya o no una excepción en el bloque `try`
try:
1/0
except ZeroDivisionError as e:
print(e)
finally:
print("esta sentencia se ejecuta pase lo que pase")
# también se puede forzar a que "elevar" un error concreto sin que se haya producido:
try:
raise ZeroDivisionError("mensaje de error")
print('esta sentencia no se va a ejecutar')
except ZeroDivisionError as e:
print(e)
# o reelevar el error actual:
try:
raise ZeroDivisionError("mensaje de error")
except ZeroDivisionError as e:
print(e)
raise
# ## Inputs
# ___
# +
var = input("introduce algo y pulsa Enter: ")
print(var)
# -
# ## Módulos
# ___
# importar módulos o una clase de un módulo:
# +
import os
print( os.name ) # atributo del módulo
# +
import platform
print( platform.system() ) # método del módulo
# +
import configparser
config = configparser.ConfigParser() # clase del módulo
# o
from configparser import ConfigParser
config = ConfigParser()
# o
from configparser import ConfigParser as Cp
config = Cp()
# -
# Instalar un módulo (En JupyterLab poner `!` delante de la sentencia se ejecutará en la shell del sistema, no es código Python):
# + tags=[]
# ! pip install selenium
# -
# Desinstalar un modulo:
# ! pip uninstall -y selenium
# la opción `-y` sirve para que no requiera confirmación
# ## JSON
# ___
# es una estructura de información similar a xml. Ejemplo:
# ```
# data = {
# "key1": "value1",
# "key2": 2,
# "key3": true,
# "key4": null,
# "key5": [
# "value51",
# "value52"
# ],
# "key6": {
# "key61": "value61",
# "key62": "value62"
# }
# }
#
# ```
# se puede meter ese texto en una variable y luego transformarlo en un diccionario:
# +
json_raw = """{
"key1": "value1",
"key2": 2,
"key3": true,
"key4": null,
"key5": [
"value51",
"value52"
],
"key6": {
"key61": "value61",
"key62": "value62"
}
}
"""
import json
data = json.loads(json_raw) # json -> dict
print('1: ', data["key1"] )
print('2: ', data["key2"] )
print('3: ', data["key3"] )
print('4: ', data["key4"] )
print('5: ', data["key5"] )
print('5.1: ', data["key5"][0] )
print('5.2: ', data["key5"][1] )
print('6: ', data["key6"] )
print('6.1: ', data["key6"]["key61"] )
print('6.2: ', data["key6"]["key62"] )
# -
# también se puede transforma un diccionario en una cadena JSON:
# +
data = {"key1": "value1",
"key2": 2,
"key3": True,
"key4": None,
"key5": ["value51", "value52"],
"key6": {"key61": "value61", "key62": "value62"}}
data_json = json.dumps(data, indent=4) # dict -> json
print(data_json)
# -
# ## Bloque with
# ___
# con with se define un bloque para un objeto definido, el cual se autodestruirá al terminar:
# + tags=[]
var = 0
class Object(object):
def __enter__(self):
self.value = 1
return self
def __exit__(self, type, value, tb):
self.value = 2
print('VALOR INICIAL:', var)
with Object() as o:
var = o.value
print('VALOR FINAL:', var)
# -
# ## Herencia
# ___
# Una clase puede heredar todo los métodos y atributos de otra con esta sintaxis. Ejemplo de una clase heredada:
# +
class ClaseA(object):
a = 1
b = 2
def c(self):
return 3
def d(self):
return 4
class ClaseB(ClaseA):
b = 5
e = 6
def d(self):
return 7
def f(self):
return 8
clase_a = ClaseA()
clase_b = ClaseB()
print('a1:', clase_a.a)
print('a2:', clase_a.b)
print('a3:', clase_a.c())
print('a4:', clase_a.d())
print()
print('b1:', clase_b.a)
print('b2:', clase_b.b)
print('b2:', clase_b.e)
print('b3:', clase_b.c())
print('b4:', clase_b.d())
print('b4:', clase_b.f())
# -
# ## Decoradores
# ___
# Los decoradores alteran de manera dinámica la funcionalidad de una función sin tener que cambiar el código fuente de la función decorada:
def di_adios_tambien(f): # definicion del decorador
def nueva_funcion():
f()
print('adiós')
return nueva_funcion
# +
@di_adios_tambien
def di_hola():
print('hola')
di_hola()
# -
# ## Leer y escribir en un archivo
# ___
# Para leer el contenido de un archivo y guardarlo en una variable:
# +
with open('file.txt', 'r', encoding='utf8') as f:
texto = f.read()
print(texto)
# -
# Para escribir el texto de una variable como el contenido de un archivo:
with open('file.txt', 'w') as f:
f.write(texto)
| test-lab/02 - base.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + jupyter={"source_hidden": true}
import pandas as pd
from beakerx import *
from beakerx.object import beakerx
# -
# # Heights of surfaces data analysis
# For each image the heights have been measured in three channels, c1=blue, c2=green and c3=red. The values are in the interval (0, 1]. They heights have been normalized with respect to the maximum value of a fourth channel, c4=magenta.
# + jupyter={"outputs_hidden": true, "source_hidden": true}
beakerx.pandas_display_default()
pd.read_csv('./data.csv')
# + jupyter={"source_hidden": true}
import math
images = ['10', '2', '8']
channels = ['1', '2', '3']
ic = {}
for image in images:
for channel in channels:
ic[image, channel] = [y for y in data[image+'_Airyscan Processing.czi-surface_'+channel] if not math.isnan(y)]
# + jupyter={"source_hidden": true}
import matplotlib.pyplot as plt
import numpy as np
fig, (ax1, ax2, ax3) = plt.subplots(3, figsize=(8, 10))
fig.suptitle('heights of surfaces')
ax1.boxplot((ic['10','1'], ic['10','2'], ic['10','3']), notch=True, sym="D", labels=["10_1", "10_2", "10_3"])
ax2.boxplot((ic['2','1'], ic['2','2'], ic['2','3']), notch=True, sym="D", labels=["2_1", "2_2", "2_3"])
ax3.boxplot((ic['8','1'], ic['8','2'], ic['8','3']), notch=True, sym="D", labels=["8_1", "8_2", "8_3"])
plt.show()
# + jupyter={"source_hidden": true}
pd.read_csv('./summary.csv')
# + jupyter={"source_hidden": true}
xic = {}
for image in images:
for channel in channels:
xic[image, channel] = list(set(ic[image, channel]))
xic[image, channel].sort()
# + jupyter={"source_hidden": true}
Histogram(title= "histograms of relative heights 10",
showLegend = True,
normed= True,
data= [ic['10', '1'], ic['10', '2'], ic['10', '3']],
color= [Color(0, 0, 255, 128), Color(0, 255, 0, 128), Color(255, 0, 0, 128)],
binCount= len(xic['10', '2']))
# + jupyter={"source_hidden": true}
Histogram(title= "histograms of relative heights 2",
showLegend = True,
normed= True,
data= [ic['2', '1'], ic['2', '2'], ic['2', '3']],
color= [Color(0, 0, 255, 128), Color(0, 255, 0, 128), Color(255, 0, 0, 128)],
binCount= len(xic['2', '2']))
# + jupyter={"source_hidden": true}
Histogram(title= "histograms of relative heights 8",
showLegend = True,
normed= True,
data= [ic['8', '1'], ic['8', '2'], ic['8', '3']],
color= [Color(0, 0, 255, 128), Color(0, 255, 0, 128), Color(255, 0, 0, 128)],
binCount= len(xic['8', '2']))
| volker/toolsets/heights_of_surfaces_tool/heights_of_surfaces_visualization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Hyenni/practice_tensorflow/blob/master/softmax.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="uFe23OYofU1O" colab_type="text"
# # SOFTMAX (Image Classification)
#
# * Multinomial Classification
# + [markdown] id="O_1C1wIjfU1R" colab_type="text"
# ### CF) MNIST IMAGE PRINT
# + id="B2qPRxp2fU1V" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 542} outputId="31eafcdc-3c38-4fee-efc6-4881c1101fd9"
#MNIST data set download
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("./MNIST_DATA/", one_hot=True)
#train, test data load
X_train = mnist.train.images
Y_train = mnist.train.labels
X_test = mnist.test.images
Y_test = mnist.test.labels
#data shape
print("X_train.shape: ", X_train.shape)
print("Y_train.shape: ", Y_train.shape)
print("X_test.shape: ", X_test.shape)
print("Y_test.shape: ", Y_test.shape)
import matplotlib.pyplot as plt
#show image
digit_img = X_train[0]
digit_label = Y_train[0]
print("digit_img.shape: ", digit_img.shape)
print("digit_label.shape: ", digit_label.shape)
plt.imshow(digit_img.reshape(28, 28), cmap='gray')
plt.title(digit_label)
plt.show()
# + id="SlnCQQICfU1i" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 437} outputId="0135c282-06f7-4f73-d763-cfcbefdff112"
plt.imshow(digit_img.reshape(28, 28), cmap="winter")
plt.show()
print(digit_img.shape)
print(digit_img.reshape(28,28)[10,0:20])
# + [markdown] id="8vIra7p7fU1u" colab_type="text"
# # MNIST Image Classification using Softmax
# + id="1SBHAFCSfU1y" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 89} outputId="b28eb5d6-a3be-46df-b7f1-2403309a6e94"
#MNIST data set load
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
data = input_data.read_data_sets("./MNIST_DATA", one_hot=True)
# + id="tw0TVsmwfU2B" colab_type="code" colab={}
# parameters
NUM_STEPS = 1000
MINIBATCH_SIZE = 100
# + id="qAt35EZgfU2P" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 181} outputId="ca7aa6cd-e0d8-47ff-87a6-ae29d52e0025"
# input placeholder
x = tf.placeholder(tf.float32, [None, 784])
y_true = tf.placeholder(tf.float32, [None, 10])
# variables
W = tf.Variable(tf.zeros([784, 10]))
# logit
y_pred = tf.matmul(x, W)
# loss
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(
logits=y_pred, labels=y_true))
# optimizer
gd_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
# accuracy
correct_mask = tf.equal(tf.argmax(y_pred, 1), tf.argmax(y_true, 1))
accuracy = tf.reduce_mean(tf.cast(correct_mask, tf.float32))
# + id="qnfPE-2KfU2X" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 53} outputId="bcc62485-a058-41fc-ba0a-fc11f912c0cd"
# %%time
# training
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for _ in range(NUM_STEPS):
batch_xs, batch_ys = data.train.next_batch(MINIBATCH_SIZE)
sess.run(gd_step, feed_dict={x: batch_xs,
y_true: batch_ys})
# + id="boId227pfU2n" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 71} outputId="b5507922-cbf9-4b87-fd22-6719ec8b5198"
# %%time
# testing
ans = sess.run(accuracy, feed_dict={x: data.test.images,
y_true: data.test.labels})
print("Accuracy: {:.4}%".format(ans*100))
# + id="A2YElbyAfU2y" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 383} outputId="48774e51-ef9c-4973-bd66-04a5b1b9c86f"
import numpy as np
# predict
test_num = np.random.randint(0, data.test.images.shape[0])
img = mnist.test.images[test_num]
pred = sess.run(tf.argmax(y_pred, 1), feed_dict={x: [img]})
print("Predicted Label: ", pred[0])
print("Real Label: ", np.argmax(data.test.labels[test_num]))
img = np.resize(img, (28,28))
plt.imshow(img, cmap='gray')
plt.show()
# + id="1xlfbZloko9_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 383} outputId="b0e3a5ac-c6da-4ed4-c601-2b583615a392"
test_num2 = np.random.randint(0, data.test.images.shape[0])
img2 = mnist.test.images[test_num2]
pred2 = sess.run(tf.argmax(y_pred, 1), feed_dict={x: [img2]})
print("Predicted Label: ", pred2[0])
print("Real Label: ", np.argmax(data.test.labels[test_num2]))
img2 = np.resize(img2, (28,28))
plt.imshow(img2, cmap='gray')
plt.show()
| softmax.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
from simtk import unit
# # Free Particle
#
# The free particle is characterized for having no external potential goberning its motion. In the case of temperature and friction abscence the particle is moving in a uniform rectilineous trajectory. And when temperature and friction are present, we have nothing but a stochastic brownian particle or random walker characterized by magnitudes as diffusion.
# ## Working with this test system
#
# The python class representing this test system is fully documented in [FreeParticle class API](../api/_autosummary/uibcdf_test_systems.FreeParticle.html). Let's see an example of how to interact with it:
# +
from openmolecularsystems import FreeParticle
open_molecular_system = FreeParticle(n_particles=1, mass=64*unit.amu)
# -
open_molecular_system.parameters
open_molecular_system.coordinates
open_molecular_system.topology
open_molecular_system.system
# ## Newtonian dynamics
from openmolecularsystems.tools.md import newtonian
initial_velocities = np.zeros([1, 3], np.float32) * unit.nanometers/unit.picoseconds
initial_velocities[0,0] = 0.10 * unit.nanometers/unit.picoseconds
traj_dict = newtonian(open_molecular_system, time=0.5*unit.nanoseconds, saving_timestep=1.0*unit.picoseconds,
integration_timestep=0.01*unit.picoseconds, initial_velocities=initial_velocities)
traj_dict.keys()
# We can plot the trajectory of the system along the $X$ axis:
plt.plot(traj_dict['time'], traj_dict['coordinates'][:,0,0])
plt.xlabel('time ({})'.format(traj_dict['time'].unit))
plt.ylabel('X ({})'.format(traj_dict['coordinates'].unit))
plt.show()
# And with external friction:
traj_dict = newtonian(open_molecular_system, time=0.5*unit.nanoseconds, saving_timestep=1.0*unit.picoseconds,
integration_timestep=0.01*unit.picoseconds, friction=0.10/unit.picoseconds,
initial_velocities=initial_velocities)
plt.plot(traj_dict['time'], traj_dict['coordinates'][:,0,0])
plt.xlabel('time ({})'.format(traj_dict['time'].unit))
plt.ylabel('X ({})'.format(traj_dict['coordinates'].unit))
plt.show()
# ## Stochastic Dynamics
from openmolecularsystems.tools.md import langevin_NVT
open_molecular_system = FreeParticle(n_particles = 100, mass = 64 * unit.amu)
traj_dict = langevin_NVT(open_molecular_system, time=0.5*unit.nanoseconds, saving_timestep=1.0*unit.picoseconds,
integration_timestep=0.01*unit.picoseconds,
friction=1.00/unit.picoseconds, temperature=300.0*unit.kelvin,
initial_velocities='boltzmann')
# We represent now the stochastic trajectory of our free particle along the axis $X$ in time:
# +
particle_index=50
plt.plot(traj_dict['time'], traj_dict['coordinates'][:,particle_index,0])
plt.plot(traj_dict['time'], traj_dict['coordinates'][:,particle_index,1])
plt.plot(traj_dict['time'], traj_dict['coordinates'][:,particle_index,2])
plt.xlabel('time ({})'.format(traj_dict['time'].unit))
plt.ylabel('X ({})'.format(traj_dict['coordinates'].unit))
plt.show()
# -
# Or over the plane $X,Y$:
plt.plot(traj_dict['coordinates'][:,particle_index,0], traj_dict['coordinates'][:,particle_index,1])
plt.xlabel('X ({})'.format(traj_dict['coordinates'].unit))
plt.ylabel('Y ({})'.format(traj_dict['coordinates'].unit))
plt.show()
| docs/contents/free_particle/Free_Particle.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6 - AzureML
# language: python
# name: python3-azureml
# ---
# # Regression Challenge
#
# Predicting the selling price of a residential property depends on a number of factors, including the property age, availability of local amenities, and location.
#
# In this challenge, you will use a dataset of real estate sales transactions to predict the price-per-unit of a property based on its features. The price-per-unit in this data is based on a unit measurement of 3.3 square meters.
#
# > **Citation**: The data used in this exercise originates from the following study:
# >
# > *<NAME>., & <NAME>. (2018). Building real estate valuation models with comparative approach through case-based reasoning. Applied Soft Computing, 65, 260-271.*
# >
# > It was obtained from the UCI dataset repository (<NAME>. and <NAME>. (2019). [UCI Machine Learning Repository]([http://archive.ics.uci.edu/ml). Irvine, CA: University of California, School of Information and Computer Science).
#
# ## Review the data
#
# Run the following cell to load the data and view the first few rows.
# +
import pandas as pd
# load the training dataset
data = pd.read_csv('data/real_estate.csv')
data.head()
# -
# The data consists of the following variables:
#
# - **transaction_date** - the transaction date (for example, 2013.250=2013 March, 2013.500=2013 June, etc.)
# - **house_age** - the house age (in years)
# - **transit_distance** - the distance to the nearest light rail station (in meters)
# - **local_convenience_stores** - the number of convenience stores within walking distance
# - **latitude** - the geographic coordinate, latitude
# - **longitude** - the geographic coordinate, longitude
# - **price_per_unit** house price of unit area (3.3 square meters)
#
# ## Train a Regression Model
#
# Your challenge is to explore and prepare the data, identify predictive features that will help predict the **price_per_unit** label, and train a regression model that achieves the lowest Root Mean Square Error (RMSE) you can achieve (which must be less than **7**) when evaluated against a test subset of data.
#
# Add markdown and code cells as required to create your solution.
# ### View the label distribution
# +
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
# Get the label column
label = data[data.columns[-1]]
# Create a figure for 2 subplots (2 rows, 1 column)
fig, ax = plt.subplots(2, 1, figsize = (9,12))
# Plot the histogram
ax[0].hist(label, bins=100)
ax[0].set_ylabel('Frequency')
# Add lines for the mean, median, and mode
ax[0].axvline(label.mean(), color='magenta', linestyle='dashed', linewidth=2)
ax[0].axvline(label.median(), color='cyan', linestyle='dashed', linewidth=2)
# Plot the boxplot
ax[1].boxplot(label, vert=False)
ax[1].set_xlabel('Label')
# Add a title to the Figure
fig.suptitle('Label Distribution')
# Show the figure
fig.show()
# -
# ### Remove outliers
# +
data = data[data['price_per_unit']<70]
# Get the label column
label = data[data.columns[-1]]
# Create a figure for 2 subplots (2 rows, 1 column)
fig, ax = plt.subplots(2, 1, figsize = (9,12))
# Plot the histogram
ax[0].hist(label, bins=100)
ax[0].set_ylabel('Frequency')
# Add lines for the mean, median, and mode
ax[0].axvline(label.mean(), color='magenta', linestyle='dashed', linewidth=2)
ax[0].axvline(label.median(), color='cyan', linestyle='dashed', linewidth=2)
# Plot the boxplot
ax[1].boxplot(label, vert=False)
ax[1].set_xlabel('Label')
# Add a title to the Figure
fig.suptitle('Label Distribution')
# Show the figure
fig.show()
# -
# ### View numeric correlations
for col in data[data.columns[0:-1]]:
fig = plt.figure(figsize=(9, 6))
ax = fig.gca()
feature = data[col]
correlation = feature.corr(label)
plt.scatter(x=feature, y=label)
plt.xlabel(col)
plt.ylabel('Correlations')
ax.set_title('Label vs ' + col + '- correlation: ' + str(correlation))
plt.show()
# ### View categorical features
#
# (**transaction_date** and **local_convenience_stores** seem to be discrete values, so might work better if treated as categorical features)
# plot a boxplot for the label by each categorical feature
for col in data[['transaction_date', 'local_convenience_stores']]:
fig = plt.figure(figsize=(9, 6))
ax = fig.gca()
data.boxplot(column = 'price_per_unit', by = col, ax = ax)
ax.set_title('Label by ' + col)
ax.set_ylabel("Label Distribution by Categorical Variable")
plt.show()
# ### Separate features and label and split data for training and validation
#
# (**transaction_date** doesn't seem to be very predictive, so omit it)
# +
from sklearn.model_selection import train_test_split
# Separate features (columns 1 [house_age] to the last but one) and labels (the last column)
X, y = data[data.columns[1:-1]].values, data[data.columns[-1]].values
# Split data 70%-30% into training set and test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=0)
print ('Training Set: %d, rows\nTest Set: %d rows' % (X_train.size, X_test.size))
# -
# ### Preprocess the data and train a model in a pipeline
#
# Normalize the numeric features, then use a RandomForestRegressor to train a model.
# +
# Train the model
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler, OrdinalEncoder
from sklearn.ensemble import RandomForestRegressor
import numpy as np
# Define preprocessing for numeric columns (scale them)
numeric_features = [0,1,3,4]
numeric_transformer = Pipeline(steps=[
('scaler', StandardScaler())])
# Combine preprocessing steps
preprocessor = ColumnTransformer(
transformers=[
('num', numeric_transformer, numeric_features),
])
# Create preprocessing and training pipeline
pipeline = Pipeline(steps=[('preprocessor', preprocessor),
('regressor', RandomForestRegressor())])
# fit the pipeline to train a linear regression model on the training set
model = pipeline.fit(X_train, (y_train))
print (model)
# -
# ### Evaluate the model
# +
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error, r2_score
# %matplotlib inline
# Get predictions
predictions = model.predict(X_test)
# Display metrics
mse = mean_squared_error(y_test, predictions)
print("MSE:", mse)
rmse = np.sqrt(mse)
print("RMSE:", rmse)
r2 = r2_score(y_test, predictions)
print("R2:", r2)
# Plot predicted vs actual
plt.scatter(y_test, predictions)
plt.xlabel('Actual Labels')
plt.ylabel('Predicted Labels')
plt.title('Predictions vs Actuals')
z = np.polyfit(y_test, predictions, 1)
p = np.poly1d(z)
plt.plot(y_test,p(y_test), color='magenta')
plt.show()
# -
# ## Use the Trained Model
#
# Save your trained model, and then use it to predict the price-per-unit for the following real estate transactions:
#
# | transaction_date | house_age | transit_distance | local_convenience_stores | latitude | longitude |
# | ---------------- | --------- | ---------------- | ------------------------ | -------- | --------- |
# |2013.167|16.2|289.3248|5|24.98203|121.54348|
# |2013.000|13.6|4082.015|0|24.94155|121.50381|
# +
import joblib
# Save the model as a pickle file
filename = './real_estate_model.pkl'
joblib.dump(model, filename)
# Load the model from the file
loaded_model = joblib.load(filename)
# An array of features for each transaction (don't include the transaction date)
X_new = np.array([[16.2,289.3248,5,24.98203,121.54348],
[13.6,4082.015,0,24.94155,121.5038]])
# Use the model to predict unit price
results = loaded_model.predict(X_new)
print('Predictions:')
for prediction in results:
print(round(prediction,2))
| challenges/02 - Real Estate Regression Solution.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/01-vyom/Machine-Learning-Projects/blob/master/Kaggle/Digit%20Recognizer/Digit_Recog_Kaggle.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="515Gm9So4xy6" colab_type="code" outputId="054d4b1b-d848-471a-a08f-3f38b66943b1" colab={"base_uri": "https://localhost:8080/", "height": 122}
from google.colab import drive
drive.mount('/content/drive')
# + id="Z6gtexYj45St" colab_type="code" colab={}
try:
# # %tensorflow_version only exists in Colab.
# %tensorflow_version 2.x
except Exception:
pass
#This is the code used for Tensorflow 2.x in Google Colab!!
# first step is to import the libraries that are currently required
import pandas as pd
import matplotlib.pyplot as plt
# x_data = range(0,l_data.shape[0]
import seaborn as sns
#LotFrontage
#LotArea
#YearBuilt
#TotalBsmtsf
#GarageArea
#PoolArea
import time
import math
from IPython import display
from matplotlib import cm
from matplotlib import gridspec
from matplotlib import pyplot as plt
import numpy as np
from sklearn import metrics
from sklearn import preprocessing
from keras.utils.np_utils import to_categorical # convert to one-hot-encoding
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D
from keras.optimizers import RMSprop
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ReduceLROnPlateau
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_datasets as tfds
from sklearn.utils import shuffle
from tensorflow.python.data import Dataset
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
pd.options.display.max_rows = 25
pd.options.display.float_format = '{:.5f}'.format
# + id="_7FEseUk5_bQ" colab_type="code" colab={}
Digit_Recog_Data = pd.read_csv("/content/drive/My Drive/Digit Recog/train.csv")
Digit_Recog_Data = shuffle(Digit_Recog_Data)
# + id="-m_rAj1n6SNr" colab_type="code" colab={}
Digit_Target = Digit_Recog_Data[['label']]
Digit_Feature = Digit_Recog_Data.drop(columns=['label'])
# + id="wG4wHA0KLGmx" colab_type="code" colab={}
Digit_Feature = Digit_Feature.values
Digit_Target = Digit_Target.values
Digit_Feature = Digit_Feature.reshape(-1,28,28,1)
Digit_Feature = Digit_Feature/255.0
# + id="qMZ5fZ4U9X2P" colab_type="code" colab={}
#.980 with validation
#.97854 without validation
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(16, (3,3), activation='relu', input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(32, (3,3), activation='relu', input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(64, (3,3), activation='relu', input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
# + id="D5c8-m9Ikd5j" colab_type="code" colab={}
#.980 with validation
#.97854 without validation
# with validation and a very different type of network
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32, kernel_size=(5,5),padding = 'Same', activation='relu', input_shape=(28, 28, 1)),
tf.keras.layers.Conv2D(32, kernel_size=(5,5),padding = 'Same', activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Dropout(0.25),
tf.keras.layers.Conv2D(64, kernel_size=(3,3),padding = 'Same', activation='relu'),
tf.keras.layers.Conv2D(64, kernel_size=(3,3),padding = 'Same', activation='relu'),
tf.keras.layers.MaxPooling2D(pool_size=(2,2),strides=(2,2)),
tf.keras.layers.Dropout(0.25),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(256, activation='relu'),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer=tf.keras.optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0), loss='sparse_categorical_crossentropy', metrics=['accuracy'])
# + id="UNaBsDna-CS6" colab_type="code" outputId="c0c104ac-7d10-48d6-a44e-7e048c9fc2d8" colab={"base_uri": "https://localhost:8080/", "height": 1000}
learning_rate_reduction = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_accuracy',
patience=2,
verbose=1,
factor=0.5,
min_lr=0.00001)
history = model.fit(
x=Digit_Feature,
y=Digit_Target,
batch_size = 86,
validation_split = 0.1,
epochs = 30,
callbacks=[learning_rate_reduction]
)
# + id="6AMhB6uGAOen" colab_type="code" outputId="886c2405-96fc-4b25-ec39-57864e1aebc7" colab={"base_uri": "https://localhost:8080/", "height": 499}
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
EPOCHS = 30
epochs_range = range(EPOCHS)
plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()
# + id="M2LCwaRs_O65" colab_type="code" colab={}
Digit_Recog_TestData = pd.read_csv("/content/drive/My Drive/Digit Recog/test.csv")
Digit_Predict = Digit_Recog_TestData.values.reshape(-1,28,28,1)
Digit_Predict = Digit_Predict / 255.0
# + id="OHZrH1IY-Y9G" colab_type="code" colab={}
predictions = model.predict(Digit_Predict)
preds = np.argmax(predictions, axis=-1)
# + id="-pS_AKooA1lG" colab_type="code" outputId="c422cf88-b012-4842-eaa4-361175ee3e48" colab={"base_uri": "https://localhost:8080/", "height": 34}
print(type(preds))
k = 1
main = []
for i in range(len(preds)):
l=[k+i,preds[i]]
main.append(l)
dfans = pd.DataFrame(main)
# + id="dBlku0qHA6Ui" colab_type="code" colab={}
dfans.to_csv('/content/drive/My Drive/Digit Recog/Digit_Recog_6.csv', index=False)
# + id="pfOLteO-lyWm" colab_type="code" outputId="0834eef1-2840-4fff-c00a-badd17e5cfce" colab={"base_uri": "https://localhost:8080/", "height": 34}
#h5 save modal
t = time.time()
export_path_keras = "/content/drive/My Drive/Digit Recog/{}.h5".format(int(t))
print(export_path_keras)
model.save(export_path_keras)
# + id="kHhZ9x8K8F3M" colab_type="code" outputId="e3ea0e36-1b31-4433-fcbf-951c675a206c" colab={"base_uri": "https://localhost:8080/", "height": 190}
t = time.time()
export_path_sm = "/content/drive/My Drive/Digit Recog/{}".format(int(t))
print(export_path_sm)
tf.saved_model.save(model, export_path_sm)
| Kaggle/Digit Recognizer/Digit_Recog_Kaggle.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %reload_ext autoreload
# %autoreload 2
# %matplotlib inline
import warnings
warnings.filterwarnings('ignore')
# -
# # Handle imports
# +
from exact_sync.v1.api.annotations_api import AnnotationsApi
from exact_sync.v1.api.images_api import ImagesApi
from exact_sync.v1.api.image_sets_api import ImageSetsApi
from exact_sync.v1.api.annotation_types_api import AnnotationTypesApi
from exact_sync.v1.api.products_api import ProductsApi
from exact_sync.v1.api.teams_api import TeamsApi
from exact_sync.v1.models import ImageSet, Team, Product, AnnotationType, Image, Annotation, AnnotationMediaFile
from exact_sync.v1.rest import ApiException
from exact_sync.v1.configuration import Configuration
from exact_sync.v1.api_client import ApiClient
# -
import pandas as pd
# # Connect to EXACT
# +
configuration = Configuration()
configuration.username = 'exact'
configuration.password = '<PASSWORD>'
configuration.host = "http://127.0.0.1:1337"
client = ApiClient(configuration)
image_sets_api = ImageSetsApi(client)
annotations_api = AnnotationsApi(client)
annotation_types_api = AnnotationTypesApi(client)
images_api = ImagesApi(client)
product_api = ProductsApi(client)
team_api = TeamsApi(client)
# -
# # Donwload all annotaions from one imageset
help(image_sets_api.list_image_sets)
image_set = image_sets_api.list_image_sets(name__contains="MICCAI Mitotic Figure Study")
image_set
annos = []
for image_id in image_set.results[0].images:
for anno in annotations_api.list_annotations(image=image_id, deleted=False,pagination=False,expand="user,annotation_type",fields="id,user,time,unique_identifier,annotation_type,vector").results:
annos.append([anno.id, anno.user["username"], anno.time, anno.unique_identifier, anno.annotation_type["name"], anno.vector])
anno
annos = pd.DataFrame(annos, columns=["id", "user", "time", "unique_identifier", "type", "vector"])
annos
| doc/DownloadStudyAnnotations.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="wJcYs_ERTnnI"
# ##### Copyright 2021 The TensorFlow Authors.
# + cellView="form" id="HMUDt0CiUJk9"
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] id="77z2OchJTk0l"
# # Migrate multi-worker CPU/GPU training
#
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://www.tensorflow.org/guide/migrate/multi_worker_cpu_gpu_training">
# <img src="https://www.tensorflow.org/images/tf_logo_32px.png" />
# View on TensorFlow.org</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/guide/migrate/multi_worker_cpu_gpu_training.ipynb">
# <img src="https://www.tensorflow.org/images/colab_logo_32px.png" />
# Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/guide/migrate/multi_worker_cpu_gpu_training.ipynb">
# <img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />
# View source on GitHub</a>
# </td>
# <td>
# <a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/guide/migrate/multi_worker_cpu_gpu_training.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
# </td>
# </table>
# + [markdown] id="meUTrR4I6m1C"
# This guide demonstrates how to migrate your multi-worker distributed training workflow from TensorFlow 1 to TensorFlow 2.
#
# To perform multi-worker training with CPUs/GPUs:
#
# - In TensorFlow 1, you traditionally use the `tf.estimator.train_and_evaluate` and `tf.estimator.Estimator` APIs.
# - In TensorFlow 2, use the Keras APIs for writing the model, the loss function, the optimizer, and metrics. Then, distribute the training with Keras `Model.fit` API or a custom training loop (with `tf.GradientTape`) across multiple workers with `tf.distribute.experimental.ParameterServerStrategy` or `tf.distribute.MultiWorkerMirroredStrategy`. For more details, refer to the following tutorials:
# - [Distributed training with TensorFlow](../../guide/distributed_training.ipynb)
# - [Parameter server training with Keras Model.fit/a custom training loop](../../tutorials/distribute/parameter_server_training.ipynb)
# - [MultiWorkerMirroredStrategy with Keras Model.fit](../../tutorials/distribute/multi_worker_with_keras.ipynb)
# - [MultiWorkerMirroredStrategy with a custom training loop](../../tutorials/distribute/multi_worker_with_ctl.ipynb).
# + [markdown] id="YdZSoIXEbhg-"
# ## Setup
# + [markdown] id="28f46832b54d"
# Start with some necessary imports and a simple dataset for demonstration purposes:
# + id="iE0vSfMXumKI"
# Install tf-nightly as the notebook uses a dataset instance for `Model.fit`
# with `ParameterServerStrategy`, which depends on symbols in TF 2.7.
# !pip uninstall -q -y tensorflow keras
# !pip install -q tf-nightly
# Install a utility needed for this demonstration
# !pip install portpicker
import tensorflow as tf
import tensorflow.compat.v1 as tf1
# + id="m7rnGxsXtDkV"
features = [[1., 1.5], [2., 2.5], [3., 3.5]]
labels = [[0.3], [0.5], [0.7]]
eval_features = [[4., 4.5], [5., 5.5], [6., 6.5]]
eval_labels = [[0.8], [0.9], [1.]]
# + [markdown] id="T2uaw9QaDM_X"
# You will need the `'TF_CONFIG'` configuration environment variable for training on multiple machines in TensorFlow. Use `'TF_CONFIG'` to specify the `'cluster'` and the `'task'`s' addresses. (Learn more in the [Distributed_training](../...guide/distributed_training.ipynb) guide.)
# + id="4OUzwoQgXgkG"
import json
import os
tf_config = {
'cluster': {
'chief': ['localhost:11111'],
'worker': ['localhost:12345', 'localhost:23456', 'localhost:21212'],
'ps': ['localhost:12121', 'localhost:13131'],
},
'task': {'type': 'chief', 'index': 0}
}
os.environ['TF_CONFIG'] = json.dumps(tf_config)
# + [markdown] id="PbeoSbbmDdc0"
# Note: Unfortunately, since multi-worker training with `tf.estimator` APIs in TensorFlow 1 requires multiple clients (which would be especially tricky to be done here in this Colab notebook), you will make the notebook runnable without a `'TF_CONFIG'` environment variable, so it falls back to local training. (Learn more in the *Setting up the `'TF_CONFIG'` environment variable* section in the [Distributed training with TensorFlow](../../guide/distributed_training.ipynb) guide.)
#
# Use the `del` statement to remove the variable (but in real-world multi-worker training in TensorFlow 1, you won't have to do this):
# + id="AHuynAR5D8sU"
del os.environ['TF_CONFIG']
# + [markdown] id="4uXff1BEssdE"
# ## TensorFlow 1: Multi-worker distributed training with tf.estimator APIs
# + [markdown] id="MpyINdiLEN3c"
# The following code snippet demonstrates the canonical workflow of multi-worker training in TF1: you will use a `tf.estimator.Estimator`, a `tf.estimator.TrainSpec`, a `tf.estimator.EvalSpec`, and the `tf.estimator.train_and_evaluate` API to distribute the training:
# + id="lqe9obf7suIj"
def _input_fn():
return tf1.data.Dataset.from_tensor_slices((features, labels)).batch(1)
def _eval_input_fn():
return tf1.data.Dataset.from_tensor_slices(
(eval_features, eval_labels)).batch(1)
def _model_fn(features, labels, mode):
logits = tf1.layers.Dense(1)(features)
loss = tf1.losses.mean_squared_error(labels=labels, predictions=logits)
optimizer = tf1.train.AdagradOptimizer(0.05)
train_op = optimizer.minimize(loss, global_step=tf1.train.get_global_step())
return tf1.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
estimator = tf1.estimator.Estimator(model_fn=_model_fn)
train_spec = tf1.estimator.TrainSpec(input_fn=_input_fn)
eval_spec = tf1.estimator.EvalSpec(input_fn=_eval_input_fn)
tf1.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
# + [markdown] id="KEmzBjfnsxwT"
# ## TensorFlow 2: Multi-worker training with distribution strategies
# + [markdown] id="Syb66qsbEp1x"
# In TensorFlow 2, distributed training across multiple workers with CPUs, GPUs, and TPUs is done via `tf.distribute.Strategy`s.
#
# The following example demonstrates how to use two such strategies: `tf.distribute.experimental.ParameterServerStrategy` and `tf.distribute.MultiWorkerMirroredStrategy`, both of which are designed for CPU/GPU training with multiple workers.
#
# `ParameterServerStrategy` employs a _coordinator_ (`'chief'`), which makes it more friendly with the environment in this Colab notebook. You will be using some utilities here to set up the supporting elements essential for a runnable experience here: you will create an _in-process cluster_, where threads are used to simulate the parameter servers (`'ps'`) and workers (`'worker'`). For more information about parameter server training, refer to the [Parameter server training with ParameterServerStrategy](../../tutorials/distribute/parameter_server_training.ipynb) tutorial.
#
# In this example, first define the `'TF_CONFIG'` environment variable with a `tf.distribute.cluster_resolver.TFConfigClusterResolver` to provide the cluster information. If you are using a cluster management system for your distributed training, check if it provides `'TF_CONFIG'` for you already, in which case you don't need to explicitly set this environment variable. (Learn more in the *Setting up the `'TF_CONFIG'` environment variable* section in the [Distributed training with TensorFlow](../../guide/distributed_training.ipynb) guide.)
# + id="rp-gFY0H5rF-"
# Find ports that are available for the `'chief'` (the coordinator),
# `'worker'`s, and `'ps'` (parameter servers).
import portpicker
chief_port = portpicker.pick_unused_port()
worker_ports = [portpicker.pick_unused_port() for _ in range(3)]
ps_ports = [portpicker.pick_unused_port() for _ in range(2)]
# Dump the cluster information to `'TF_CONFIG'`.
tf_config = {
'cluster': {
'chief': ["localhost:%s" % chief_port],
'worker': ["localhost:%s" % port for port in worker_ports],
'ps': ["localhost:%s" % port for port in ps_ports],
},
'task': {'type': 'chief', 'index': 0}
}
os.environ['TF_CONFIG'] = json.dumps(tf_config)
# Use a cluster resolver to bridge the information to the strategy created below.
cluster_resolver = tf.distribute.cluster_resolver.TFConfigClusterResolver()
# + [markdown] id="o_8uVvJb6dqq"
# Then, create `tf.distribute.Server`s for the workers and parameter servers one-by-one:
# + id="ZJopinmG6b2z"
# Workers need some inter_ops threads to work properly.
# This is only needed for this notebook to demo. Real servers
# should not need this.
worker_config = tf.compat.v1.ConfigProto()
worker_config.inter_op_parallelism_threads = 4
for i in range(3):
tf.distribute.Server(
cluster_resolver.cluster_spec(),
job_name="worker",
task_index=i,
config=worker_config)
for i in range(2):
tf.distribute.Server(
cluster_resolver.cluster_spec(),
job_name="ps",
task_index=i)
# + [markdown] id="IpfCcF0g6Ao8"
# In real-world distributed training, instead of starting all the `tf.distribute.Server`s on the coordinator, you will be using multiple machines, and the ones that are designated as `"worker"`s and `"ps"` (parameter servers) will each run a `tf.distribute.Server`. Refer to *Clusters in the real world* section in the [Parameter server training](../../tutorials/distribute/parameter_server_training.ipynb) tutorial for more details.
#
# With everything ready, create the `ParameterServerStrategy` object:
# + id="t45iQeBT7Us_"
strategy = tf.distribute.experimental.ParameterServerStrategy(cluster_resolver)
# + [markdown] id="diNsps1MGRS6"
# Once you have created a strategy object, define the model, the optimizer, and other variables, and call the Keras `Model.compile` within the `Strategy.scope` API to distribute the training. (Refer to the `Strategy.scope` API docs for more information.)
#
# If you prefer to customize your training by, for instance, defining the forward and backward passes, refer to *Training with a custom training loop* section in [Parameter server training](../../tutorials/distribute/parameter_server_training.ipynb) tutorial for more details.
# + id="atVciNgPs0fw"
dataset = tf.data.Dataset.from_tensor_slices(
(features, labels)).shuffle(10).repeat().batch(64)
eval_dataset = tf.data.Dataset.from_tensor_slices(
(eval_features, eval_labels)).repeat().batch(1)
with strategy.scope():
model = tf.keras.models.Sequential([tf.keras.layers.Dense(1)])
optimizer = tf.keras.optimizers.Adagrad(learning_rate=0.05)
model.compile(optimizer, "mse")
model.fit(dataset, epochs=5, steps_per_epoch=10)
# + id="akZ0aaaS1vA9"
model.evaluate(eval_dataset, steps=10, return_dict=True)
# + [markdown] id="pXbS71XmMSoO"
# > **Partitioners (`tf.distribute.experimental.partitioners`)**
# >
# > `ParameterServerStrategy` in TensorFlow 2 supports variable partitioning and offers same partitioners as TensorFlow 1, with less confusing names:
# > - `tf.compat.v1.variable_axis_size_partitioner` -> `tf.distribute.experimental.partitioners.MaxSizePartitioner`: a partitioner that keeps shards under a maximum size).
# > - `tf.compat.v1.min_max_variable_partitioner` -> `tf.distribute.experimental.partitioners.MinSizePartitioner`: a partitioner that allocates a minimum size per shard.
# > - `tf.compat.v1.fixed_size_partitioner` -> `tf.distribute.experimental.partitioners.FixedShardsPartitioner`: a partitioner that allocates a fixed number of shards.
# + [markdown] id="Ig0-uCUbGprd"
# Alternatively, you can use a `MultiWorkerMirroredStrategy` object:
# + id="xHXP8bOBGtXL"
# To clean up the `TF_CONFIG` used for `ParameterServerStrategy`.
del os.environ['TF_CONFIG']
strategy = tf.distribute.MultiWorkerMirroredStrategy()
# + [markdown] id="tOsmqefTGwUf"
# You can replace the strategy used above with a `MultiWorkerMirroredStrategy` object to perform training with this strategy.
#
# As with the `tf.estimator` APIs, since `MultiWorkerMirroredStrategy` is a multi-client strategy, there is no easy way to run distributed training in this Colab notebook. Therefore, replacing the code above with this strategy ends up running things locally. The Multi-worker training [with Keras Model.fit](../../tutorials/distribute/multi_worker_with_keras.ipynb)/[a custom training loop](../../tutorials/distribute/multi_worker_with_ctl.ipynb) tutorials demonstrate how to run multi-worker training with
# the `'TF_CONFIG'` variable set up, with two workers on a localhost in Colab. In practice, you would create multiple workers on external IP addresses/ports, and use the `'TF_CONFIG'` variable to specify the cluster configuration for each worker.
# + [markdown] id="917ef6135660"
# ## Next steps
# + [markdown] id="e76fd9d5c98c"
# To learn more about multi-worker distributed training with `tf.distribute.experimental.ParameterServerStrategy` and `tf.distribute.MultiWorkerMirroredStrategy` in TensorFlow 2, consider the following resources:
#
# - Tutorial: [Parameter server training with ParameterServerStrategy and Keras Model.fit/a custom training loop](../../tutorials/distribute/parameter_server_training.ipynb)
# - Tutorial: [Multi-worker training with MultiWorkerMirroredStrategy and Keras Model.fit](../../tutorials/distribute/multi_worker_with_keras.ipynb)
# - Tutorial: [Multi-worker training with MultiWorkerMirroredStrategy and a custom training loop](../../tutorials/distribute/multi_worker_with_ctl.ipynb)
# - Guide: [Distributed training with TensorFlow](../../guide/distributed_training.ipynb)
# - Guide: [Optimize TensorFlow GPU performance with the TensorFlow Profiler](../../guide/gpu_performance_analysis.ipynb)
# - Guide: [Use a GPU](../../guide/gpu.ipynb) (the Using multiple GPUs section)
| site/en-snapshot/guide/migrate/multi_worker_cpu_gpu_training.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python
# language: python
# name: python3
# ---
# # RiskChanges Desktop : Exposure
# Firstly, In Risk Changes the procedure for calculation is, Data Management, Exposure Analysis, Loss Analysis, Risk Analysis, Cost Benifit Analysis and Multi Criteria Analysis.
#
# In this Section we will talk about the procedure for Data Preparation for SDSS Desktop, Exposure Analysis, Loss Analysis and Risk Analysis
# ## Exposure Analysis: Data Preparation and Templates
# The Input for analysis can be list of objects or a excel file with required parameters and its directory. For Exposure Analysis the template for csv sheet is as follows.
#
# 
# In this sheet you can fill up as many rows as possible which will be a exposure combination, you can make different combinations of hazard and EAR as well as admin units. There are two functions, ExposureAGG and Exposure, The exposure function gives output as exposure and exposure AGG function gives exposure as aggregated value for specific administrative units.
# You have to provide directory [absolute or relative] for EAR, Hazard, Admin Unit and the aggregation column name in admin unit if you want the aggregated results. Output folder is the location of output, output file is file name for output and output format can be either *.shp or *.csv. If csv the geometries will not be saved but if *.shp geometries might get copied based on the number of classess each EAR have
# Once you have the Template load the Risk Changes Desktop and run Exposure by following command
from RiskChangesDesktop import Exposure
# The below one is standalone example without csv file where you can create single exposure
ear="Sample_Data/Elements at risk/Buildings/BU_2020_A0_S0.shp"
hazard="Sample_Data/Hazards/Landslide susceptibility/Susceptibility.tif"
ear_key="BU"
outputdir="test"
admin_unit="Sample_Data/admin units/admin_units.shp"
agg_col="ADMIN_UNIT"
Exposure.ComputeExposure(ear,hazard,ear_key,outputdir,outputformat="csv")
# If you want to use Excel Template and compute for multiple use the following sample
import pandas as pd
exposure_combinations=pd.read_csv("Exposure_template.csv")
for index,combination in exposure_combinations.iterrows():
ear=combination["EAR"]
hazard=combination["Hazard"]
ear_key=combination["EAR Key"]
outputdir=combination["Output Folder"]+"/"+combination["Output File "]
outputformat=combination["Output format"]
admin_unit=combination["Admin Unit"]
agg_col=combination["Aggregation Column"]
Exposure.ComputeExposure(ear,hazard,ear_key,outputdir,outputformat)
# !pip install
| Tutorial1-exposure.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.datasets import load_breast_cancer
# %matplotlib inline
# -
sns.set_style('darkgrid')
cancer = load_breast_cancer()
type(cancer)
cancer.keys()
cancer['target_names']
df = pd.DataFrame(data=cancer['data'], columns=cancer['feature_names'])
df.head()
# ### Preparando o modelo PCA
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(df)
scaled_data = scaler.transform(df)
from sklearn.decomposition import PCA
pca = PCA(n_components=2)
pca.fit(scaled_data)
x_pca = pca.transform(scaled_data)
x_pca.shape
scaled_data.shape
plt.figure(figsize=(10, 5))
plt.scatter(x_pca[:, 0], x_pca[:, 1], c=cancer['target'], cmap='viridis')
plt.xlabel('Primeiro Componente Principal')
plt.ylabel('Segundo Componente Principal')
pca.components_
df_comp = pd.DataFrame(pca.components_, columns=cancer['feature_names'])
df_comp.head()
plt.figure(figsize=(10, 6))
sns.heatmap(df_comp, cmap='viridis')
| secao22 - Analise do Componente Principal/aula105_analise_componente_principal.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# code for loading the format for the notebook
import os
# path : store the current path to convert back to it later
path = os.getcwd()
os.chdir( os.path.join('..', 'notebook_format') )
from formats import load_style
load_style()
# +
os.chdir(path)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# 1. magic for inline plot
# 2. magic to print version
# 3. magic so that the notebook will reload external python modules
# %matplotlib inline
# %load_ext watermark
# %load_ext autoreload
# %autoreload 2
from sklearn import metrics
from sklearn.linear_model import LogisticRegression
# %watermark -a 'Ethen' -d -t -v -p numpy,pandas,matplotlib,sklearn
# -
# # Logistic Regression
#
# **Logistic regression** is an excellent tool to know for classification problems, which are problems where the output value that we wish to predict only takes on only a small number of discrete values. Here we'll focus on the binary classification problem, where the output can take on only two distinct classes. To make our examples more concrete, we will consider the Glass dataset.
# +
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/glass/glass.data'
col_names = ['id', 'ri', 'na', 'mg', 'al', 'si', 'k', 'ca', 'ba', 'fe', 'glass_type']
glass = pd.read_csv(url, names = col_names, index_col = 'id')
glass.sort_values('al', inplace = True)
# convert the glass type into binary outcome
# types 1, 2, 3 are window glass
# types 5, 6, 7 are household glass
glass['household'] = glass['glass_type'].map({1: 0, 2: 0, 3: 0, 5: 1, 6: 1, 7: 1})
glass.head()
# -
# Our task is to predict the `household` column using the `al` column. Let's visualize the relationship between the input and output and also train the logsitic regression to see the outcome that it produces.
# +
logreg = LogisticRegression(C = 1e9)
X = glass['al'].reshape(-1, 1) # sklearn doesn't accept 1d-array, convert it to 2d
y = np.array(glass['household'])
logreg.fit(X, y)
# predict the probability that each observation belongs to class 1
# The first column indicates the predicted probability of class 0,
# and the second column indicates the predicted probability of class 1
glass['household_pred_prob'] = logreg.predict_proba(X)[:, 1]
# +
# plot the predicted probability (familiarize yourself with the S-shape)
# change default figure and font size
plt.rcParams['figure.figsize'] = 8, 6
plt.rcParams['font.size'] = 12
plt.scatter(glass['al'], glass['household'])
plt.plot(glass['al'], glass['household_pred_prob'])
plt.xlabel('al')
plt.ylabel('household')
plt.show()
# -
# As we can see logistic regression can output the probabilities of obsevation belonging to a specific class and these probabilities can be converted into class predictions by choosing a cutoff value (e.g. probability higher than 0.5 is classified as class 1).
#
#
# ## Logsitic Function
#
# In **Logistic Regression**, the log-odds of a categorical response being "true" (1) is modeled as a linear combination of the features:
#
# \begin{align*}
# \log \left({p\over 1-p}\right) &= w_0 + w_1x_1, ..., w_jx_j \nonumber \\
# &= w^Tx \nonumber
# \end{align*}
#
# Where:
#
# - $w_{0}$ is the intercept term, and $w_1$ to $w_j$ represents the parameters for all the other features (a total of j features).
# - By convention of we can assume that $x_0 = 1$, so that we can re-write the whole thing using the matrix notation $w^Tx$.
#
# This is called the **logit function**. The equation can be re-arranged into the **logistic function**:
#
# $$p = \frac{e^{w^Tx}} {1 + e^{w^Tx}}$$
#
# Or in the more commonly seen form:
#
# $$h_w(x) = \frac{1}{ 1 + e^{-w^Tx} }$$
#
# Let's take a look at the plot of the function:
x_values = np.linspace(-5, 5, 100)
y_values = [1 / (1 + np.exp(-x)) for x in x_values]
plt.plot(x_values, y_values)
plt.title('Logsitic Function')
plt.show()
# The **logistic function** has some nice properties. The y-value represents the probability and it is always bounded between 0 and 1, which is want we wanted for probabilities. For an x value of 0 you get a 0.5 probability. Also as you get more positive x value you get a higher probability, on the other hand, a more negative x value results in a lower probability.
#
# Toy sample code of how to predict the probability given the data and the weight is provided below.
def predict_probability(data, weights):
"""probability predicted by the logistic regression"""
score = np.dot(data, weights)
predictions = 1 / (1 + np.exp(-score))
return predictions
# ## Interpreting the Intercept
#
# We can check at the coefficient of the logistic regression does in fact generate the log-odds.
# +
# compute predicted log-odds for al = 2 using the equation
# convert log-odds to odds
# convert odds to probability
logodds = logreg.intercept_ + logreg.coef_[0] * 2
odds = np.exp(logodds)
prob = odds / (1 + odds)
print(prob)
logreg.predict_proba(2)[:, 1]
# -
# examine the coefficient for al
print('a1', logreg.coef_[0])
# **Interpretation:** 1 unit increase in `al` is associated with a 4.18 unit increase in the log-odds of the observation being classified as `household 1`. We can confirm that again by doing the calculation ourselves.
# +
# increasing al by 1 (so that al now becomes 3)
# increases the log-odds by 4.18
logodds = logodds + logreg.coef_[0]
odds = np.exp(logodds)
prob = odds / (1 + odds)
print(prob)
logreg.predict_proba(3)[:, 1]
# -
# ## Defining The Cost Function
#
# When utilizing logistic regression, we are trying to learn the $w$ values in order to maximize the probability of correctly classifying our glasses. Let's say someone did give us some $w$ values of the logisic regression model, how would we determine if they were good values or not? What we would hope is that for the household of class 1, the probability values are close to 1 and for the household of class 0 the probability is close to 0.
#
# But we don't care about getting the correct probability for just one observation, we want to correctly classify all our observations. If we assume our data are independent and identically distributed (think of it as all of them are treated equally), we can just take the product of all our individually calculated probabilities and that is the **cost function we want to maximize**. So in math:
#
# $$\prod_{class1}h_w(x)\prod_{class0}1 - h_w(x)$$
#
# The $\prod$ symbol means take the product of the $h_w(x)$ for the observations that are classified as that class. You will notice that for observations that are labeled as class 0, we are taking 1 minus the logistic function. That is because we are trying to find a value to maximize, and since observations that are labeled as class 0 should have a probability close to zero, 1 minus the probability should be close to 1. Next we will re-write the original cost function as:
#
# $$\ell(w) = \sum_{i=1}^{N}y_{i}log(h_w(x_{i})) + (1-y_{i})log(1-h_w(x_{i}))$$
#
# Where:
#
# - We define $y_{i}$ to be 1 when the $i_{th}$ observation is labeled class 1 and 0 when labeled as class 0, then we only compute $h_w(x_{i})$ for observations that are labeled class 1 and $1 - h_w(x_{i})$ for observations that are labeled class 0, which is still the same idea as the original function.
# - Next we'll transform the original $h_w(x_{i})$ by taking the log. As we'll later see this logarithm transformation will make our cost function more convenient to work with, and because the logarithm is a monotonically increasing function, the logarithm of a function achieves its maximum value at the same points as the function itself. When we take the log, our product across all data points, it becomes a sum. See [log rules](http://www.mathwords.com/l/logarithm_rules.htm) for more details (Hint: log(ab) = log(a) + log(b)).
# - The $N$ simply represents the total number of the data.
#
# Often times you'll also see the notation above be simplified in the form of a maximum likelihood estimator:
#
# $$ \ell(w) = \sum_{i=1}^{N} log \big( P( y_i \mid x_i, w ) \big) $$
#
# The equation above simply denotes the idea that , $\mathbf{w}$ represents the parameters we would like to estimate the parameters $w$ by maximizing conditional probability of $y_i$ given $x_i$.
#
# Now by definition of probability in the logistic regression model: $h_w(x_{i}) = 1 \big/ 1 + e^{-w^T x_i}$ and $1- h_w(x_{i}) = e^{ -w^T x_i } \big/ ( 1 + e^{ -w^T x_i } )$. By substituting these expressions into our $\ell(w)$ equation and simplifying it further we can obtain a simpler expression.
# \begin{align*}
# \ell(w) &= \sum_{i=1}^{N}y_{i}log(h_w(x_{i})) + (1-y_{i})log(1-h_w(x_{i})) \nonumber \\
# &= \sum_{i=1}^{N} y_{i} log( \frac{1}{ 1 + e^{ -w^T x_i } } ) + ( 1 - y_{i} )
# log( \frac{ e^{ -w^T x_i } }{ 1 + e^{ -w^T x_i } } ) \nonumber \\
# &= \sum_{i=1}^{N} -y_{i} log( 1 + e^{ -w^T x_i } ) + ( 1 - y_{i} )
# ( -w^T x_i - log( 1 + e^{ -w^T x_i } ) ) \nonumber \\
# &= \sum_{i=1}^{N} ( y_{i} - 1 ) ( w^T x_i ) - log( 1 + e^{ -w^T x_i } ) \nonumber
# \end{align*}
# We'll use the formula above to compute the log likelihood for the entire dataset, which is used to assess the convergence of the algorithm. Toy code provided below.
def compute_avg_log_likelihood(data, label, weights):
"""
the function uses a simple check to prevent overflow problem,
where numbers gets too large to represent and is converted to inf
an example of overflow is provided below, when this problem occurs,
simply use the original score (without taking the exponential)
scores = np.array( [ -10000, 200, 300 ] )
logexp = np.log( 1 + np.exp(-scores) )
logexp
"""
scores = np.dot(data, weights)
logexp = np.log(1 + np.exp(-scores))
# simple check to prevent overflow
mask = np.isinf(logexp)
logexp[mask] = -scores[mask]
log_likelihood = np.sum((label - 1) * scores - logexp) / data.shape[0]
return log_likelihood
# **Note:** We made one tiny modification to the log likelihood function We added a ${1/N}$ term which averages the log likelihood accross all data points. The ${1/N}$ term will make it easier for us to compare stochastic gradient ascent with batch gradient ascent later.
#
# ## Gradient
#
# Now that we obtain the formula to assess our algorithm, we'll dive into the meat of the algorithm, which is to derive the gradient for the formula (the derivative of the formula with respect to each coefficient):
#
# $$\ell(w) = \sum_{i=1}^{N} ( y_{i} - 1 ) ( w^T x_i ) - log( 1 + e^{ -w^T x_i } )$$
#
# And it turns out the derivative of log likelihood with respect to to a single coefficient $w_j$ is as follows (the form is the same for all coefficients):
#
# $$
# \frac{\partial\ell(w)}{\partial w_j} = \sum_{i=1}^N (x_{ij})\left( y_i - \frac{1}{ 1 + e^{-w^Tx_i} } \right )
# $$
#
# To compute it, you simply need the following two terms:
#
# - $\left( y_i - \frac{1}{ 1 + e^{-w^Tx_i} } \right )$ is the vector containing the difference between the predicted probability and the original label.
# - $x_{ij}$ is the vector containing the $j_{th}$ feature's value.
#
# For detail of the derivation, consider going through the [Coursersa's Course: Washington Classification](https://www.coursera.org/learn/ml-classification), or search it up on the web. Put it in code, the formula above is simply:
def feature_derivative(errors, feature):
derivative = np.dot(errors, feature)
return derivative
# ## Stochastic/Mini-batch Gradient
#
# The problem with computing the gradient (or so called batched gradient) is the term $\sum_{i=1}^{N}$. This means that we must sum the contributions over all the data points to calculate the gradient, and this can be problematic if the dataset we're studying is extremely large. Thus, in stochastic gradient, we can use a single point as an approximation to the gradient:
#
# $$
# \frac{\partial\ell_i(w)}{\partial w_j} = (x_{ij})\left( y_i - \frac{1}{ 1 + e^{-w^Tx_i} } \right )
# $$
#
# **Note1:** Because the **Stochastic Gradient** algorithm uses each row of data in turn to update the gradient, if our data has some sort of implicit ordering, this will negatively affect the convergence of the algorithm. At an extreme, what if we had the data sorted so that all positive reviews came before negative reviews? In that case, even if most reviews are negative, we might converge on an answer of +1 because we never get to see the other data. To avoid this, one practical trick is to shuffle the data before we begin so the rows are in random order.
#
# **Note2:** Stochastic gradient compute the gradient using only 1 data point to update the the parameters, while batch gradient uses all $N$ data points. An alternative to these two extremes is a simple change that allows us to use a **mini-batch** of $B \leq N$ data points to calculate the gradient. This simple approach is faster than batch gradient but less noisy than stochastic gradient that uses only 1 data point. Given a mini-batch (or a set of data points) $\mathbf{x}_{i}, \mathbf{x}_{i+1} \ldots \mathbf{x}_{i+B}$, the gradient function for this mini-batch of data points is given by:
#
# $$
# \sum_{s = i}^{i+B} \frac{\partial\ell_s(w)}{\partial w_j} = \frac{1}{B} \sum_{s = i}^{i+B} (x_{sj})\left( y_i - \frac{1}{ 1 + e^{-w^Tx_i} } \right )
# $$
#
# Here, the $\frac{1}{B}$ means that we are normalizing the gradient update rule by the batch size $B$. In other words, we update the coefficients using the **average gradient over data points** (instead of using a pure summation). By using the average gradient, we ensure that the magnitude of the gradient is approximately the same for all batch sizes. This way, we can more easily compare various batch sizes and study the effect it has on the algorithm.
# ## Implementation
#
# Recall our task is to find the optimal value for each individual weight to lower the cost. This requires taking the partial derivative of the cost/error function with respect to a single weight, and then running gradient descent for each individual weight to update them. Thus, for any individual weight $w_j$, we'll compute the following:
#
# $$ w_j^{(t + 1)} = w_j^{(t)} + \alpha * \sum_{s = i}^{i+B} \frac{\partial\ell_s(w)}{\partial w_j}$$
#
# Where:
#
# - $\alpha$ denotes the the learning rate or so called step size, in other places you'll see it denoted as $\eta$.
# - $w_j^{(t)}$ denotes the weight of the $j_{th}$ feature at iteration $t$.
#
# And we'll do this iteratively for each weight, many times, until the whole network's cost function is minimized.
# +
# put the code together into one cell
def predict_probability(data, weights):
"""probability predicted by the logistic regression"""
score = np.dot(data, weights)
predictions = 1 / (1 + np.exp(-score))
return predictions
def compute_avg_log_likelihood(data, label, weights):
"""
the function uses a simple check to prevent overflow problem,
where numbers gets too large to represent and is converted to inf
an example of overflow is provided below, when this problem occurs,
simply use the original score (without taking the exponential)
scores = np.array([-10000, 200, 300])
logexp = np.log(1 + np.exp(-scores))
logexp
"""
scores = np.dot(data, weights)
logexp = np.log(1 + np.exp(-scores))
# simple check to prevent overflow
mask = np.isinf(logexp)
logexp[mask] = -scores[mask]
log_likelihood = np.sum((label - 1) * scores - logexp) / data.shape[0]
return log_likelihood
# -
def logistic_regression(data, label, step_size, batch_size, max_iter):
# weights of the model are initialized as zero
data_num = data.shape[0]
feature_num = data.shape[1]
weights = np.zeros(data.shape[1])
# `i` keeps track of the starting index of current batch
# and shuffle the data before starting
i = 0
permutation = np.random.permutation(data_num)
data, label = data[permutation], label[permutation]
# do a linear scan over data, for each iteration update the weight using
# batches of data, and store the log likelihood record to visualize convergence
log_likelihood_record = []
for _ in range(max_iter):
# extract the batched data and label use it to compute
# the predicted probability using the current weight and the errors
batch = slice(i, i + batch_size)
batch_data, batch_label = data[batch], label[batch]
predictions = predict_probability(batch_data, weights)
errors = batch_label - predictions
# loop over each coefficient to compute the derivative and update the weight
for j in range(feature_num):
derivative = np.dot(errors, batch_data[:, j])
weights[j] += step_size * derivative / batch_size
# track whether log likelihood is increasing after
# each weight update
log_likelihood = compute_avg_log_likelihood(
data = batch_data,
label = batch_label,
weights = weights
)
log_likelihood_record.append(log_likelihood)
# update starting index of for the batches
# and if we made a complete pass over data, shuffle again
# and refresh the index that keeps track of the batch
i += batch_size
if i + batch_size > data_num:
permutation = np.random.permutation(data_num)
data, label = data[permutation], label[permutation]
i = 0
# We return the list of log likelihoods for plotting purposes.
return weights, log_likelihood_record
# ## Comparing Result and Convergence Behavior
#
# We'll use the logistic regression code that we've implemented and compare the predicted auc score with the scikit-learn's implementation. This only serves to check that the predicted results are similar and that our toy code is correctly implemented. Then we'll also explore the convergence difference between batch gradient descent and stochastic gradient descent.
# +
# manually append the coefficient term,
# every good open-source library does not
# require this additional step from the user
data = np.c_[np.ones(X.shape[0]), X]
# using our logistic regression code
weights_batch, log_likelihood_batch = logistic_regression(
data = data,
label = np.array(y),
step_size = 5e-1,
batch_size = X.shape[0], # batch gradient descent
max_iter = 200
)
# +
# compare both logistic regression's auc score
logreg = LogisticRegression(C = 1e9)
logreg.fit(X, y)
pred_prob = logreg.predict_proba(X)[:, 1]
proba = predict_probability(data, weights_batch)
# check that the auc score is similar
auc1 = metrics.roc_auc_score(y, pred_prob)
auc2 = metrics.roc_auc_score(y, proba)
print('auc', auc1, auc2)
# -
weights_sgd, log_likelihood_sgd = logistic_regression(
data = data,
label = y,
step_size = 5e-1,
batch_size = 30, # stochastic gradient descent
max_iter = 200
)
weights_minibatch, log_likelihood_minibatch = logistic_regression(
data = data,
label = y,
step_size = 5e-1,
batch_size = 100, # mini-batch gradient descent
max_iter = 200
)
# we can see that the it's a good idea to use mini-batch gradient descent
# since it strikes a good balance between batch gradient, which convergences
# steadily but can be computationly too expensive when the dataset is too large,
# and stochastic gradient, which is faster to train, but the result can be too noisy
plt.figure(figsize = (10, 7))
plt.plot(log_likelihood_sgd, label = 'stochastic gradient descent')
plt.plot(log_likelihood_batch, label = 'batch gradient descent')
plt.plot(log_likelihood_minibatch, label = 'mini-batch gradient descent')
plt.legend(loc = 'best')
plt.xlabel('# of iterations')
plt.ylabel('Average log likelihood')
plt.title('Convergence Plot')
plt.show()
# ## Pros and Cons of Logistic Regression
#
# **Pros:**
#
# - Highly interpretable (if you remember how).
# - Model training and prediction are fast.
# - Almost no parameter tuning is required (excluding regularization).
# - Outputs well-calibrated predicted probabilities.
#
# **Cons:**
#
# - Presumes a linear relationship between the features
# - Performance is (generally) not competitive with the best supervised learning methods.
# - Can't automatically learn feature interactions.
# # Reference
#
# - [Notebook: Logistic Regression](http://nbviewer.jupyter.org/github/justmarkham/DAT8/blob/master/notebooks/12_logistic_regression.ipynb)
# - [Coursersa: Washington Classification](https://www.coursera.org/learn/ml-classification)
| text_classification/logistic.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# +
import json
from pathlib import Path
import numpy as np
from copy import deepcopy
import pandas as pd
from deeppavlov.core.commands.train import read_data_by_config, train_evaluate_model_from_config
from deeppavlov.core.commands.infer import interact_model, build_model
from deeppavlov.core.commands.utils import expand_path, parse_config
from deeppavlov.core.common.params import from_params
from deeppavlov.core.common.errors import ConfigError
# -
# read unlabelled data for label propagation
def read_unlabelled_data(UNLABELLED_DATA_PATH):
with open(UNLABELLED_DATA_PATH, "r") as f:
unlabelled_data = f.read().splitlines()
unlabelled_data = [x for x in unlabelled_data if x != '']
return unlabelled_data
def make_pl_config(CONFIG_PATH):
config_path_pl = Path(CONFIG_PATH).parent / Path(Path(CONFIG_PATH).stem + "_pl.json")
with open(CONFIG_PATH, "r") as f:
config = json.load(f)
config_pl = deepcopy(config)
config_pl["dataset_reader"]["train"] = Path(config_pl["dataset_reader"].get("train", "train.csv")).stem + "_pl.csv"
with open(config_path_pl, "w") as f:
json.dump(config_pl, f, indent=2)
return config, config_pl
def save_extended_data(config, samples, labels, new_config = None):
train_data = read_data_by_config(deepcopy(config))
for i in range(len(samples)):
train_data["train"].append((samples[i], labels[i]))
df = pd.DataFrame(train_data["train"],
columns=[config["dataset_reader"]["x"],
config["dataset_reader"]["y"]])
df[config["dataset_reader"]["y"]] = df[config["dataset_reader"]["y"]].apply(
lambda x: config["dataset_reader"].get("class_sep", ",").join(x))
if new_config is not None:
config = new_config
file = expand_path(Path(config["dataset_reader"]["data_path"]) /
Path(config["dataset_reader"]["train"]))
if config["dataset_reader"].get("format", "csv") == "csv":
keys = ('sep', 'header', 'names')
df.to_csv(file,
index=False,
sep=config["dataset_reader"].get("sep", ",")
)
elif config["dataset_reader"].get("format", "csv") == "json":
keys = ('orient', 'lines')
df.to_json(file,
index=False,
orient=config["dataset_reader"].get("orient", None),
lines=config["dataset_reader"].get("lines", False)
)
else:
raise ConfigError("Can not work with current data format")
# +
# manually given parameters for pseudo-labeling
# path to config file
CONFIG_PATH = "../deeppavlov/configs/classifiers/convers_vs_info.json"
# read config, compose new one, save it
config, config_pl = make_pl_config(CONFIG_PATH)
config, config_pl = parse_config(config), parse_config(config_pl)
config
# -
# path to file with unlabelled data
UNLABELLED_DATA_PATH = expand_path(Path(config["dataset_reader"]["data_path"])) / Path("question_L6.txt")
# number of samples that are going to be labelled during one iteration of label propagation
ONE_ITERATION_PORTION = 100
# number of iterations
N_ITERATIONS = 10
CLASSES_VOCAB_ID_IN_PIPE = 0
CONFIDENT_PROBA = 0.9
# +
# read unlabelled dataset
unlabelled_data = read_unlabelled_data(UNLABELLED_DATA_PATH)
# save initial dataset as extended
save_extended_data(config, [], [], new_config=config_pl)
# +
available_unlabelled_ids = np.arange(len(unlabelled_data))
np.random.seed(42)
for i in range(N_ITERATIONS):
samples = []
labels = []
ids_to_label = available_unlabelled_ids[
np.random.randint(low=0,
high=len(available_unlabelled_ids),
size=ONE_ITERATION_PORTION)]
available_unlabelled_ids = np.delete(available_unlabelled_ids, ids_to_label)
train_evaluate_model_from_config(deepcopy(config_pl))
model = build_model(deepcopy(config_pl))
classes = np.array(list(from_params(
deepcopy(config_pl["chainer"]["pipe"][CLASSES_VOCAB_ID_IN_PIPE])).keys()))
for j, sample_id in enumerate(ids_to_label):
prediction = model([unlabelled_data[sample_id]])[0]
if len(np.where(np.array(prediction) > CONFIDENT_PROBA)[0]):
samples.append(unlabelled_data[sample_id])
labels.append(classes[np.where(np.array(prediction) > CONFIDENT_PROBA)])
print("Iteration {}: add {} samples to train dataset".format(i, len(samples)))
save_extended_data(config_pl, samples, labels)
# -
| examples/Pseudo-labeling for classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## 䜿çšå
§å»ºåèœè®å txt æª
with open("../../data/example.txt", 'r') as f:
data = f.readlines()
print(data)
# ## å° txt èœæ pandas dataframe
# +
import pandas as pd
data = []
with open("../../data/example.txt", 'r') as f:
for line in f:
line = line.replace('\n', '').split(',') # å°æ¯å¥æåŸç /n å代æç©ºåŒåŸïŒå以éèæ·å¥
data.append(line)
data
# -
df = pd.DataFrame(data[1:])
df.columns = data[0]
df
# ## å°è³æèœæ json æªåŸèŒžåº
# å° json è®åäŸåŸïŒæ¯åŠèæå忬æ³èŠåå
¥çæ¹åŒäžæš£? (以 id çº key)
import json
df.to_json('../../data/example01.json')
# äžé¢çåå
¥æ¹åŒïŒæå° column name åçºäž»èŠç key, row name åçºæ¬¡èŠç key
with open('../../data/example01.json', 'r') as f:
j1 = json.load(f)
j1
df.set_index('id', inplace=True)
df
df.to_json('../../data/example02.json', orient='index')
with open('../../data/example02.json', 'r') as f:
j2 = json.load(f)
j2
# ## å°æªæ¡åçº npy æª
# äžåå°éå²å numpy array çæªæ¡æ ŒåŒ
# äœ¿çš npy éåžžå¯ä»¥è®äœ æŽå¿«è®åè³æå!
# [建è°é±è®](https://towardsdatascience.com/why-you-should-start-using-npy-file-more-often-df2a13cc0161)
import numpy as np
# å° data çæžåŒéšåèœæ numpy array
array = np.array(data[1:])
array
np.save(arr=array, file='../../data/example.npy')
array_back = np.load('../../data/example.npy')
array_back
# ## Pickle
# åæ pickle æª
# ä»éºŒéœå
ïŒä»éºŒéœäžå¥æªç [Pickle](https://docs.python.org/3/library/pickle.html)
# æ¯åŠèªª [CIFAR10](https://www.cs.toronto.edu/~kriz/cifar.html) çè³æéå°±æ¯çš pickle å
çå!
import pickle
with open('../../data/example.pkl', 'wb') as f:
pickle.dump(file=f, obj=data)
with open('../../data/example.pkl', 'rb') as f:
pkl_data = pickle.load(f)
pkl_data
| 2nd-ML100Days/homework/D-003/Day_003-2_read_and_write_files.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] tags=["pdf-title"]
# # Style Transfer
# In this notebook we will implement the style transfer technique from ["Image Style Transfer Using Convolutional Neural Networks" (Gatys et al., CVPR 2015)](http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Gatys_Image_Style_Transfer_CVPR_2016_paper.pdf).
#
# The general idea is to take two images, and produce a new image that reflects the content of one but the artistic "style" of the other. We will do this by first formulating a loss function that matches the content and style of each respective image in the feature space of a deep network, and then performing gradient descent on the pixels of the image itself.
#
# The deep network we use as a feature extractor is [SqueezeNet](https://arxiv.org/abs/1602.07360), a small model that has been trained on ImageNet. You could use any network, but we chose SqueezeNet here for its small size and efficiency.
#
# Here's an example of the images you'll be able to produce by the end of this notebook:
#
# 
#
#
# -
# ## Part 0: Setup
# + tags=["pdf-ignore"]
import os
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
# Helper functions to deal with image preprocessing
from cs231n.image_utils import load_image, preprocess_image, deprocess_image
from cs231n.classifiers.squeezenet import SqueezeNet
# %matplotlib inline
# %load_ext autoreload
# %autoreload 2
# Older versions of scipy.misc.imresize yield different results
# from newer versions, so we check to make sure scipy is up to date.
def check_scipy():
import scipy
version = scipy.__version__.split('.')
if int(version[0]) < 1:
assert int(version[1]) >= 16, "You must install SciPy >= 0.16.0 to complete this notebook."
check_scipy()
# + [markdown] tags=["pdf-ignore"]
# Load the pretrained SqueezeNet model. This model has been ported from PyTorch, see `cs231n/classifiers/squeezenet.py` for the model architecture.
#
# To use SqueezeNet, you will need to first **download the weights** by descending into the `cs231n/datasets` directory and running `get_squeezenet_tf.sh` . Note that if you ran `get_assignment3_data.sh` then SqueezeNet will already be downloaded.
#
# **NOTE**: Ignore Tensorflow warnings in cells below
# + tags=["pdf-ignore"]
# Load pretrained SqueezeNet model
SAVE_PATH = None
# Local
# SAVE_PATH = 'cs231n/datasets/squeezenet.ckpt'
# Colab
#SAVE_PATH = '/content/drive/My Drive/{}/{}'.format(FOLDERNAME, 'cs231n/datasets/squeezenet.ckpt')
assert SAVE_PATH is not None, "[!] Choose path to squeezenet.ckpt"
if not os.path.exists(SAVE_PATH + ".index"):
raise ValueError("You need to download SqueezeNet!")
CHECKS_PATH = SAVE_PATH.replace('cs231n/datasets/squeezenet.ckpt', 'style-transfer-checks-tf.npz')
STYLES_FOLDER = CHECKS_PATH.replace('style-transfer-checks-tf.npz', 'styles')
# + tags=["pdf-ignore"]
model=SqueezeNet()
model.load_weights(SAVE_PATH)
model.trainable=False
# Load data for testing
content_img_test = preprocess_image(load_image('%s/tubingen.jpg' % (STYLES_FOLDER), size=192))[None]
style_img_test = preprocess_image(load_image('%s/starry_night.jpg' % (STYLES_FOLDER), size=192))[None]
answers = np.load(CHECKS_PATH)
# -
# # Part 1: Computing Loss
#
# We're going to compute the three components of our loss function now. The loss function is a weighted sum of three terms: content loss + style loss + total variation loss. You'll fill in the functions that compute these weighted terms below.
# ## Part 1A: Content loss
# We can generate an image that reflects the content of one image and the style of another by incorporating both in our loss function. We want to penalize deviations from the content of the content image and deviations from the style of the style image. We can then use this hybrid loss function to perform gradient descent **not on the parameters** of the model, but instead **on the pixel values** of our original image.
#
# Let's first write the content loss function. Content loss measures how much the feature map of the generated image differs from the feature map of the source image. We only care about the content representation of one layer of the network (say, layer $\ell$), that has feature maps $A^\ell \in \mathbb{R}^{1 \times H_\ell \times W_\ell \times C_\ell}$. $C_\ell$ is the number of filters/channels in layer $\ell$, $H_\ell$ and $W_\ell$ are the height and width. We will work with reshaped versions of these feature maps that combine all spatial positions into one dimension. Let $F^\ell \in \mathbb{R}^{M_\ell \times C_\ell}$ be the feature map for the current image and $P^\ell \in \mathbb{R}^{M_\ell \times C_\ell}$ be the feature map for the content source image where $M_\ell=H_\ell\times W_\ell$ is the number of elements in each feature map. Each row of $F^\ell$ or $P^\ell$ represents the vectorized activations of a particular filter, convolved over all positions of the image. Finally, let $w_c$ be the weight of the content loss term in the loss function.
#
# Then the content loss is given by:
#
# $L_c = w_c \times \sum_{i,j} (F_{ij}^{\ell} - P_{ij}^{\ell})^2$
#
# Implement `content_loss` in `cs231n/style_transfer_tensorflow.py`
# Test your content loss. The error should be less than 1e-8.
# +
from cs231n.style_transfer_tensorflow import content_loss, extract_features, rel_error
def content_loss_test(correct):
content_layer = 2
content_weight = 6e-2
c_feats = extract_features(content_img_test, model)[content_layer]
bad_img = tf.zeros(content_img_test.shape)
feats = extract_features(bad_img, model)[content_layer]
student_output = content_loss(content_weight, c_feats, feats)
error = rel_error(correct, student_output)
print('Maximum error is {:.3f}'.format(error))
content_loss_test(answers['cl_out'])
# -
# ## Part 1B: Style loss
#
#
# Now we can tackle the style loss. For a given layer $\ell$, the style loss is defined as follows:
#
# First, compute the Gram matrix $G$ which represents the correlations between the values in each channel of the feature map (i.e. the "responses" of the filter responsible for that channel), where $F$ is as above. The Gram matrix is an approximation of the covariance matrix -- it tells us how every channel's values (i.e. that filter's activations) correlate with every other channel's values. If we have $C$ channels, matrix $G$ will be of shape $(C, C)$ to capture these correlations.
#
# We want the activation statistics of our generated image to match the activation statistics of our style image, and matching the (approximate) covariance is one way to do that. There are a variety of ways you could do this, but the Gram matrix is nice because it's easy to compute and in practice shows good results.
#
# Given a feature map $F^\ell$ of shape $(C_\ell, H_\ell, W_\ell)$, we can flatten the height and width dimensions so they're just 1 dimension $M_\ell = H_\ell \times W_\ell$: the new shape of $F^\ell$ is $(C_\ell, M_\ell)$. Then, the Gram matrix has shape $(C_\ell, C_\ell)$ where each element is given by the equation:
#
# $$G_{ij}^\ell = \sum_k F^{\ell}_{ik} F^{\ell}_{jk}$$
#
# Assuming $G^\ell$ is the Gram matrix from the feature map of the current image, $A^\ell$ is the Gram Matrix from the feature map of the source style image, and $w_\ell$ a scalar weight term, then the style loss for the layer $\ell$ is simply the weighted Euclidean distance between the two Gram matrices:
#
# $$L_s^\ell = w_\ell \sum_{i, j} \left(G^\ell_{ij} - A^\ell_{ij}\right)^2$$
#
# In practice we usually compute the style loss at a set of layers $\mathcal{L}$ rather than just a single layer $\ell$; then the total style loss is the sum of style losses at each layer:
#
# $$L_s = \sum_{\ell \in \mathcal{L}} L_s^\ell$$
#
# Begin by implementing the Gram matrix computation function `gram_matrix` inside `cs231n\style_transfer_tensorflow.py`:
# Test your Gram matrix code. You should see errors less than 0.001.
# +
from cs231n.style_transfer_tensorflow import gram_matrix
def gram_matrix_test(correct):
gram = gram_matrix(extract_features(style_img_test, model)[4]) ### 4 instead of 5 - second MaxPooling layer
error = rel_error(correct, gram)
print('Maximum error is {:.3f}'.format(error))
gram_matrix_test(answers['gm_out'])
# -
# Next, put it together and implement the style loss function `style_loss` in `cs231n/style_transfer_tensorflow.py`
# Test your style loss implementation. The error should be less than 0.001.
# +
from cs231n.style_transfer_tensorflow import style_loss
def style_loss_test(correct):
style_layers = [0, 3, 5, 6]
style_weights = [300000, 1000, 15, 3]
c_feats = extract_features(content_img_test, model)
feats = extract_features(style_img_test, model)
style_targets = []
for idx in style_layers:
style_targets.append(gram_matrix(feats[idx]))
s_loss = style_loss(c_feats, style_layers, style_targets, style_weights)
error = rel_error(correct, s_loss)
print('Error is {:.3f}'.format(error))
style_loss_test(answers['sl_out'])
# -
# ## Part 1C: Total-variation regularization
# It turns out that it's helpful to also encourage smoothness in the image. We can do this by adding another term to our loss that penalizes wiggles or "total variation" in the pixel values.
#
# You can compute the "total variation" as the sum of the squares of differences in the pixel values for all pairs of pixels that are next to each other (horizontally or vertically). Here we sum the total-variation regualarization for each of the 3 input channels (RGB), and weight the total summed loss by the total variation weight, $w_t$:
#
# $L_{tv} = w_t \times \left(\sum_{c=1}^3\sum_{i=1}^{H-1}\sum_{j=1}^{W} (x_{i+1,j,c} - x_{i,j,c})^2 + \sum_{c=1}^3\sum_{i=1}^{H}\sum_{j=1}^{W - 1} (x_{i,j+1,c} - x_{i,j,c})^2\right)$
#
# In `cs231/style_transfer_tensorflow.py`, fill in the definition for the TV loss term in `tv_loss`. To receive full credit, your implementation should not have any loops.
# Test your TV loss implementation. Error should be less than 0.001.
# +
from cs231n.style_transfer_tensorflow import tv_loss
from inspect import getsourcelines
import re
def tv_loss_test(correct):
tv_weight = 2e-2
t_loss = tv_loss(content_img_test, tv_weight)
error = rel_error(correct, t_loss)
print('Error is {:.4f}'.format(error))
lines, _ = getsourcelines(tv_loss)
used_loop = any(bool(re.search(r"for \S* in", line)) for line in lines)
if used_loop:
print("WARNING!!!! - Your implementation of tv_loss contains a loop! To receive full credit, your implementation should not have any loops")
tv_loss_test(answers['tv_out'])
# -
# # Part 2: Style Transfer
# Lets put it all together and make some beautiful images! The `style_transfer` function below combines all the losses you coded up above and optimizes for an image that minimizes the total loss.
# + tags=["pdf-ignore-input"]
def style_transfer(content_image, style_image, image_size, style_size, content_layer, content_weight,
style_layers, style_weights, tv_weight, init_random = False):
"""Run style transfer!
Inputs:
- content_image: filename of content image
- style_image: filename of style image
- image_size: size of smallest image dimension (used for content loss and generated image)
- style_size: size of smallest style image dimension
- content_layer: layer to use for content loss
- content_weight: weighting on content loss
- style_layers: list of layers to use for style loss
- style_weights: list of weights to use for each layer in style_layers
- tv_weight: weight of total variation regularization term
- init_random: initialize the starting image to uniform random noise
"""
# Extract features from the content image
content_img = preprocess_image(load_image(content_image, size=image_size))
feats = extract_features(content_img[None], model)
content_target = feats[content_layer]
# Extract features from the style image
style_img = preprocess_image(load_image(style_image, size=style_size))
s_feats = extract_features(style_img[None], model)
style_targets = []
# Compute list of TensorFlow Gram matrices
for idx in style_layers:
style_targets.append(gram_matrix(s_feats[idx]))
# Set up optimization hyperparameters
initial_lr = 3.0
decayed_lr = 0.1
decay_lr_at = 180
max_iter = 200
step = tf.Variable(0, trainable=False)
boundaries = [decay_lr_at]
values = [initial_lr, decayed_lr]
learning_rate_fn = tf.keras.optimizers.schedules.PiecewiseConstantDecay(boundaries, values)
# Later, whenever we perform an optimization step, we pass in the step.
learning_rate = learning_rate_fn(step)
optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
# Initialize the generated image and optimization variables
f, axarr = plt.subplots(1,2)
axarr[0].axis('off')
axarr[1].axis('off')
axarr[0].set_title('Content Source Img.')
axarr[1].set_title('Style Source Img.')
axarr[0].imshow(deprocess_image(content_img))
axarr[1].imshow(deprocess_image(style_img))
plt.show()
plt.figure()
# Initialize generated image to content image
if init_random:
initializer = tf.random_uniform_initializer(0, 1)
img = initializer(shape=content_img[None].shape)
img_var = tf.Variable(img)
print("Intializing randomly.")
else:
img_var = tf.Variable(content_img[None])
print("Initializing with content image.")
for t in range(max_iter):
with tf.GradientTape() as tape:
tape.watch(img_var)
feats = extract_features(img_var, model)
# Compute loss
c_loss = content_loss(content_weight, feats[content_layer], content_target)
s_loss = style_loss(feats, style_layers, style_targets, style_weights)
t_loss = tv_loss(img_var, tv_weight)
loss = c_loss + s_loss + t_loss
# Compute gradient
grad = tape.gradient(loss, img_var)
optimizer.apply_gradients([(grad, img_var)])
img_var.assign(tf.clip_by_value(img_var, -1.5, 1.5))
if t % 100 == 0:
print('Iteration {}'.format(t))
plt.imshow(deprocess_image(img_var[0].numpy(), rescale=True))
plt.axis('off')
plt.show()
print('Iteration {}'.format(t))
plt.imshow(deprocess_image(img_var[0].numpy(), rescale=True))
plt.axis('off')
plt.show()
# -
# ## Generate some pretty pictures!
#
# Try out `style_transfer` on the three different parameter sets below. Make sure to run all three cells. Feel free to add your own, but make sure to include the results of style transfer on the third parameter set (starry night) in your submitted notebook.
#
# * The `content_image` is the filename of content image.
# * The `style_image` is the filename of style image.
# * The `image_size` is the size of smallest image dimension of the content image (used for content loss and generated image).
# * The `style_size` is the size of smallest style image dimension.
# * The `content_layer` specifies which layer to use for content loss.
# * The `content_weight` gives weighting on content loss in the overall loss function. Increasing the value of this parameter will make the final image look more realistic (closer to the original content).
# * `style_layers` specifies a list of which layers to use for style loss.
# * `style_weights` specifies a list of weights to use for each layer in style_layers (each of which will contribute a term to the overall style loss). We generally use higher weights for the earlier style layers because they describe more local/smaller scale features, which are more important to texture than features over larger receptive fields. In general, increasing these weights will make the resulting image look less like the original content and more distorted towards the appearance of the style image.
# * `tv_weight` specifies the weighting of total variation regularization in the overall loss function. Increasing this value makes the resulting image look smoother and less jagged, at the cost of lower fidelity to style and content.
#
# Below the next three cells of code (in which you shouldn't change the hyperparameters), feel free to copy and paste the parameters to play around them and see how the resulting image changes.
# +
# Composition VII + Tubingen
params1 = {
'content_image' : '%s/tubingen.jpg' % (STYLES_FOLDER),
'style_image' : '%s/composition_vii.jpg' % (STYLES_FOLDER),
'image_size' : 192,
'style_size' : 512,
'content_layer' : 2,
'content_weight' : 5e-2,
'style_layers' : (0, 3, 5, 6),
'style_weights' : (20000, 500, 12, 1),
'tv_weight' : 5e-2
}
style_transfer(**params1)
# +
# Scream + Tubingen
params2 = {
'content_image':'%s/tubingen.jpg' % (STYLES_FOLDER),
'style_image':'%s/the_scream.jpg' % (STYLES_FOLDER),
'image_size':192,
'style_size':224,
'content_layer':2,
'content_weight':3e-2,
'style_layers':[0, 3, 5, 6],
'style_weights':[200000, 800, 12, 1],
'tv_weight':2e-2
}
style_transfer(**params2)
# +
# Starry Night + Tubingen
params3 = {
'content_image' : '%s/tubingen.jpg' % (STYLES_FOLDER),
'style_image' : '%s/starry_night.jpg' % (STYLES_FOLDER),
'image_size' : 192,
'style_size' : 192,
'content_layer' : 2,
'content_weight' : 6e-2,
'style_layers' : [0, 3, 5, 6],
'style_weights' : [300000, 1000, 15, 3],
'tv_weight' : 2e-2
}
style_transfer(**params3)
# -
# # Part 3: Feature Inversion
#
# The code you've written can do another cool thing. In an attempt to understand the types of features that convolutional networks learn to recognize, a recent paper "[Understanding Deep Image Representations by Inverting Them](https://arxiv.org/pdf/1412.0035.pdf)" attempts to reconstruct an image from its feature representation. We can easily implement this idea using image gradients from the pretrained network, which is exactly what we did above (but with two different feature representations).
#
# Now, if you set the style weights to all be 0 and initialize the starting image to random noise instead of the content source image, you'll reconstruct an image from the feature representation of the content source image. You're starting with total noise, but you should end up with something that looks quite a bit like your original image.
#
# (Similarly, you could do "texture synthesis" from scratch if you set the content weight to 0 and initialize the starting image to random noise, but we won't ask you to do that here.)
#
# Run the following cell to try out feature inversion.
#
# [1] <NAME>, <NAME>, "Understanding Deep Image Representations by Inverting Them", CVPR 2015
#
# +
# Feature Inversion -- Starry Night + Tubingen
params_inv = {
'content_image' : '%s/tubingen.jpg' % (STYLES_FOLDER),
'style_image' : '%s/starry_night.jpg' % (STYLES_FOLDER),
'image_size' : 192,
'style_size' : 192,
'content_layer' : 2,
'content_weight' : 6e-2,
'style_layers' : [0, 3, 5, 6],
'style_weights' : [0, 0, 0, 0], # we discard any contributions from style to the loss
'tv_weight' : 2e-2,
'init_random': True # we want to initialize our image to be random
}
style_transfer(**params_inv)
# -
| assignment3/StyleTransfer-TensorFlow.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: SageMath 8.0
# language: ''
# name: sagemath
# ---
# # EJERCICIOS Laboratorio.pdf - <NAME>
# EJERCICIO 1 - Demuestra por inducción sobre $n \in \mathbb{N}$ las afirmaciones siguientes:
# 1. $1^2 + 2^2 + ... + n^2 = \frac{n(n+1)(2n+1)}{6}$
[(sum(k^2 for k in srange (1, n+1))) == ((n*(n+1)*((2*n)+1))/6)
for n in srange(10, 20)]
#incrementar el valor de n en el for para comprobar un mayor rango de números
# 2.$\frac{1}{1*2} + \frac{1}{2*3} + ... + \frac{1}{n(n+1)} = \frac{n}{n+1} , \forall n >= 1$
[(sum(1/(k*(k+1)) for k in srange (1, n+1))) == (n/(n+1)) for n in srange(1, 10)]
#incrementar el valor de n en el for para comprobar un mayor rango de números
# 3.$1*1! + 2*2! + ... + n*n! = (n-1)! - 1$
[(sum(k*factorial(k) for k in srange (1, n+1))) == (factorial(n+1) - 1)
for n in srange (1, 10)]
#incrementar el valor de n en el for para comprobar un mayor rango de números
# 4.$\frac{1}{2} + \frac{2}{2^2} + \frac{3}{2^3} + ... + \frac{n}{2^n} = 2 - \frac{n+2}{2^n}$
[(sum(k/(2^k) for k in srange (1, n+1))) == (2-((n+2)/(2^n)))
for n in srange (1, 10)]
#incrementar el valor de n en el for para comprobar un mayor rango de números
# 5.$(1+q)(1+q^2)(1+q^4) ... (1+q^{2^n}) = \frac{1-q^{2^{n+1}}}{1-q}$
[(mul(1+(q^(2^k)) for k in srange(0,n+1))) == ((1-(q^(2^(n+1))))/(1-q))
for n in srange (1, 5) for q in srange (2, 5)]
#incrementar el valor de n y el de q(NUNCA IGUAL A 1)
#para comprobar un mayor rango de números
# EJERCICIO 3 - Demostrar por inducción la fórmula para la suma de los n primeros cubos: $$1^3 + 2^3 + ... + n^3 = \frac{(n+1)^2n^2}{4}$$
[(sum(k^3 for k in srange(1, n+1))) == ((((n+1)^2)*(n^2))/4) for n in srange (1,10)]
#incrementar el valor de n en el for para comprobar un mayor rango de números
# EJERCICIO 4 - Estudiar el lÃmite de las siguientes sucesiones:
# a) $\left\lbrace\frac{n^2}{n+2}\right\rbrace$
var('n')
l(n) = (n^2)/(n+2)
l.limit(n = infinity)
# b) $\left\lbrace\frac{n^3}{n^3 + 2n + 1}\right\rbrace$
var('n')
l(n) = (n^3)/(n^3 + 2*n + 1)
l.limit(n = infinity)
# c) $\left\lbrace\frac{n}{n^2 - n -4}\right\rbrace$
var('n')
l(n)=(n)/(n^2 -n -4)
l.limit(n = infinity)
# d) $\left\lbrace\frac{\sqrt{2n^2-1}}{n + 2}\right\rbrace$
var('n')
l(n)=(sqrt(2*(n^2)-1))/(n+2)
l.limit(n = infinity)
# e) $\left\lbrace\frac{\sqrt{n^3+2n}+n}{n^2 + 2}\right\rbrace$
var('n')
l(n)=(sqrt(n^3+2*n)+n)/(n^2+2)
l.limit(n = infinity)
# f) $\left\lbrace\frac{\sqrt{n+1}+n^2}{\sqrt{n + 2}}\right\rbrace$
var('n')
l(n)=(sqrt(n+1)+n^2)/(sqrt(n+2))
l.limit(n = infinity)
# g) $\left\lbrace\frac{(-1)^nn^2}{n^2+2}\right\rbrace$
var('n')
l(n)=(((-1)^n)*n^2)/(n^2+2)
l.limit(n = infinity)
# h) $\left\lbrace\frac{n+(-1)^n}{n}\right\rbrace$
var('n')
l(n)=(n+(-1)^n)/(n)
l.limit(n = infinity)
# i) $\left\lbrace(\frac{2}{3})^n\right\rbrace$
var('n')
l(n)=(2/3)^n
l.limit(n = infinity)
# j) $\left\lbrace(\frac{5}{3}^n)\right\rbrace$
var('n')
l(n)=(5/3)^n
l.limit(n = infinity)
# k) $\left\lbrace\frac{2^n}{4^n+1}\right\rbrace$
var('n')
l(n) = (2^n)/(4^n+1)
l.limit(n = infinity)
# l) $\left\lbrace\frac{3^n+(-2)^n}{3^{n+1}+(-2)^{n+1}}\right\rbrace$
var('n')
l(n) = (3^n+(-2)^n)/(3^(n+1)+(-2)^(n+1))
l.limit(n = infinity)
# m) $\left\lbrace\frac{n}{n+1}-\frac{n+1}{n}\right\rbrace$
var('n')
l(n) = ((n)/(n + 1) - (n+1)/n)
l.limit(n = infinity)
# n) $\left\lbrace\sqrt{n+1}-\sqrt{n}\right\rbrace$
var('n')
l(n) = (sqrt(n+1)-sqrt(n))
l.limit(n = infinity)
# ñ) $\left\lbrace\frac{1}{n^2}+\frac{2}{n^2}+...+\frac{n}{n^2}\right\rbrace$
# EJERCICIO 5 - Calcular, si existen, los lÃmites de las sucesiones que tienen como término general:
# a) $a_{n} = (\frac{n^2+1}{n^2})^{2n^2-3}$
var('n')
l(n) = ((n^2+1)/(n^2)^(2*n^2-3))
l.limit(n = infinity)
# b) $b_{n} = (\frac{n^2-1}{n^2})^{2n^2-3}$
var('n')
l(n) = ((n^2-1)/(n^2)^(2*n^2-3))
l.limit(n = infinity)
# c) $c_{n} = a_{n}+\frac{1}{b_{n}}$
var('n')
f(n) = ((n^2+1)/(n^2)^(2*n^2-3))
g(n) = ((n^2-1)/(n^2)^(2*n^2-3))
l(n) = f(n)+(1/g(n))
l.limit(n = infinity)
| 2_Curso/Laboratorio/SAGE-noteb/IPYNB/IPYNB-mios/Ejercicios Laboratorio.pdf (parte1).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Hermite Polynomials
# These are a set of polynomials $H_n$ on the real line, orthogonal over the interval $[-\infty, \infty]$.
#
# Physicists tend to encounter them in the eigenfunctions of the quantum harmonic oscillator:
#
# $$ \psi_n(x) = \frac{1}{\sqrt{2^n\,n!}} \cdot \left(\frac{m\omega}{\pi \hbar}\right)^{1/4} \cdot e^{
# - \frac{m\omega x^2}{2 \hbar}} \cdot H_n\left(\sqrt{\frac{m\omega}{\hbar}} x \right), \qquad n = 0,1,2,\ldots $$
#
# They are also important in probability and statistics. There is disagreement about scaling the functions: physicists use a leading term of $2^n$ and this seems to be what Python libraries deliver. In probability a leading term of 1 is preferred.
# +
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 16})
# -
# Python implements these functions in a variety of ways (see the Legendre function notebook for fuller details). The two simplest are:
#
# - `scipy.special.hermite()` for numerical work and plotting
# - `sympy.functions.special.polynomials.hermite()` for symbolic math
# ### scipy.special
#
# Calling `hermite(n)` returns the nth-order polynomial as a function which can then itself be called with one or more x-values.
import scipy.special as sp
H_3_sp = sp.hermite(3)
display(H_3_sp)
x10 = np.linspace(-1, 1, 10)
display(H_3_sp(x10))
# ## sympy.functions.special.polynomials
#
# This is symbolic math, which will give you differentiation, integration, etc, as well as nice $LaTeX$ output. Not so convenient for plotting.
# +
from IPython.display import Math
from sympy import latex, hermite
from sympy.abc import x
for i in range(10):
l_i = latex(hermite(i, x))
display(Math('H_{} = {}'.format(i, l_i)))
# -
# Note that the coefficients rapidly get large.
#
# When __plotting__ the first few Hermite polymomials over a limited range it is clear that these are very different curves, so scaling the y-axis is a problem.
# +
import scipy.special as sp
xlims = (-1, 1)
x = np.linspace(xlims[0], xlims[1], 100)
plt.figure(figsize=(9, 9))
for v in range(0, 6):
plt.plot(x, sp.hermite(v)(x))
plt.xlim(xlims)
# plt.ylim((-1.1, 1.1))
plt.legend(('$\mathcal{H}_0(x)$', '$\mathcal{H}_1(x)$', '$\mathcal{H}_2(x)$',
'$\mathcal{H}_3(x)$', '$\mathcal{H}_4(x)$', '$\mathcal{H}_5(x)$'),
loc = 0)
plt.xlabel('$x$')
plt.ylabel('$\mathcal{H}_n(x)$')
plt.title('Plots of the first six Hermite Polynomials')
plt.grid(True)
# -
# ## References:
#
# - Boas, "Mathematical methods in the physical sciences", 3rd ed, section 12.22
# - MathWorld, http://mathworld.wolfram.com/HermitePolynomial.html
# - Wikipedia, https://en.wikipedia.org/wiki/Hermite_polynomials
# - Griffiths & Schroeter, "Introduction to Quantum Mechanics", 3rd ed, section 2.3.2
| math/Hermite.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.7 64-bit (conda)
# language: python
# name: python3
# ---
# # Relatório de Análises VI
# ## Criando novas variáveis
import pandas as pd
dados = pd.read_csv('../data/aluguel/aluguel_residencial.csv', sep = ';')
dados.head(10)
# ## Criando uma nova variável
dados['Valor Bruto'] = dados['Valor'] + dados['Condominio'] + dados['IPTU']
dados.head(10)
dados['Valor m²'] = dados['Valor'] / dados['Area']
dados.head(10)
dados['Valor m²'] = dados['Valor m²'].round(2)
dados.head(10)
dados['Valor Bruto m²'] = (dados['Valor Bruto'] / dados['Area']).round(2)
dados.head(10)
casa = ['Casa', 'Casa de CondomÃnio', 'Casa de Vila']
dados['Tipo Agregado'] = dados['Tipo'].apply(lambda x: 'Casa' if x in casa else 'Apartamento')
dados
# ## Excluindo Variáveis
dados_aux = pd.DataFrame(dados[['Tipo Agregado', 'Valor m²', 'Valor Bruto', 'Valor Bruto m²']])
dados_aux.head(10)
del dados_aux['Valor Bruto']
dados_aux.head(10)
dados_aux.pop('Valor Bruto m²') # exclui uma coluna por vez
dados_aux
dados.drop(['Valor Bruto', 'Valor Bruto m²'], axis = 1, inplace = True) # exclui não só colunas (axis=1) mas também linhas (default)
dados.head(10)
dados.to_csv('../data/aluguel/aluguel_residencial.csv', sep = ';', index = False)
# ## Exercicios
# ## Falando da criação de novas variáveis para um DataFrame, analise as opções abaixo e indique a que apresenta um erro durante a execução. Considere o DataFrame abaixo:
import pandas as pd
alunos = pd.DataFrame({'Nome': ['Ary', 'Cátia', 'Denis', 'Beto', 'Bruna', 'Dara', 'Carlos', 'Alice'],
'Sexo': ['M', 'F', 'M', 'M', 'F', 'F', 'M', 'F'],
'Idade': [15, 27, 56, 32, 42, 21, 19, 35],
'Notas': [7.5, 2.5, 5.0, 10, 8.2, 7, 6, 5.6]},
columns = ['Nome', 'Idade', 'Sexo', 'Notas'])
alunos
alunos['Notas-Média(Notas)'] = alunos['Notas'].apply(lambda x: x - alunos['Notas'].mean())
alunos
alunos['Faixa Etária'] = alunos['Idade'].apply(lambda x: 'Menor que 20 anos' if x < 20
else ('Entre 20 e 40 anos' if (x >= 20 and x <= 40)
else 'Maior que 40 anos'))
alunos
# +
alunos['Faixa Etária'] = alunos['Idade'].apply(lambda x: 'Menor que 20 anos' if x < 20
elif ('Entre 20 e 40 anos' if (x >= 20 and x <= 40)
else 'Maior que 40 anos'))
alunos
# -
alunos['Notas-Média(Notas)'] = alunos.Notas - alunos.Notas.mean()
alunos
| notebook/criando-novas-variaveis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import re
import os
import glob
def get_filenames(path):
'''
Given a file path with wildcard for extension, capture all filenames.
'''
file_list = []
file_list = glob.glob(path)
return file_list
file_list = get_filenames("data/*.xlsx")
file_list
test_projects_df = pd.read_excel('data\\INFORM Pilot Projects.xlsx', sheet_name = 'Test Group')
test_projects_df['Group Type'] = 'Test Group'
test_projects_df = test_projects_df.rename(index=str, columns={
"FAS Procurement Status\n(provide planned solicitation date, closing date, etc.)\n":"FAS Procurement Status",
'PBS Estimated Award Date \n(provide specific date if possible -- if not put "TBD")': "PBS Estimated Award Date",
"PBS Procurement Status\n(provide planned solicitation date, closing date, etc.)\n(provide brief description of any obstacles, issues, etc.)": "PBS Procurement Status"
})
control_projects_df = pd.read_excel('data\\INFORM Pilot Projects.xlsx', sheet_name = 'Control Group')
control_projects_df['Group Type'] = 'Control Group'
control_projects_df = control_projects_df.rename(index=str, columns={
"ri": "Service",
'PBS Estimated Award Date \n(provide specific date if possible -- if not put "TBD")': "PBS Estimated Award Date",
"PBS Procurement Status\n(provide planned solicitation date, closing date, etc.)\n(provide brief description of any obstacles, issues, etc.)": "PBS Procurement Status"
})
INFORM_projects_df = control_projects_df.append(test_projects_df, sort=False)
INFORM_projects_df.to_excel('INFORM Projects.xlsx')
test_hours_df = pd.read_excel('data\\Copy of Acquisition Workforce Feedback Report-TEST Projects (Responses).xlsx')
test_hours_df['Group Type'] = 'Test Group'
control_hours_df = pd.read_excel('data\\INFORM Workforce Feedback Report Control Projects (Responses).xlsx')
control_hours_df['Group Type'] = 'Control Group'
test_hours_df = test_hours_df.rename(index=str, columns={
"Solicitation Number ": "Solicitation Number",
"Date of Award (to be completed by Contracting Officers Only. If not Contracting Officer Enter 10/01/2018 ": "Date of Award",
"Point of Contact (name of person who is completing survey)": "Point of Contact",
"Position of Person Completing Survey": "POC Position",
"NLES-Number of Hours Spent Preparing and Delivering (E.G. 1.5 HRS)":"NLES: Hours",
"NLES-Number of NLES Sent (to be completed by Contracting Officers Only. Put 0 if not Contracting Officer":"NLES: Number Sent",
"On average Were NLS Issued Within 3 Business Days of Award? (to be completed by Contracting Officers Only. Put NA if not Contracting Officer":"On average Were NLS Issued Within 3 Business Days of Award?",
"Provide Offeror(s) Information DUNS Number and Company Name for All Offerors (awardee and non awardees) (to be completed by Contracting Officers Only. Put 0 if not Contracting Officer":"DUNS Number and Company Name for All Offerors",
"Provide PIID Award Number for Each Awardee (to be completed by Contracting Officers Only. Put NA if not Contracting Officer":"PIID Award Number for Each Awardee",
"Was There a Protest (to be completed by Contracting Officers Only. Put NA if not Contracting Officers":"Protest?",
"Number of Offerors Submitting Written Questions. Put 0 if not Contracting Officers":"Number of Offerors Submitting Written Questions",
"On average Written Questions Answered within 5 Business Days of Request? (to be completed by Contracting Officers Only. Put NA if not Contracting Officer":"Written Questions Answered within 5 Business Days of Request?",
"Number of Offerors Submitting Post Oral Feedback Meeting Questions (to be completed by Contracting Officers Only. Put 0 if not Contracting Officer":"Number of Offerors Submitting Post Oral Feedback Meeting Questions",
"Number of Hours Spent Preparing and Delivering (E.G. 1.5 HRS)":"Number of Hours Spent Preparing and Delivering Post Oral Feedback Meeting Questions Responses",
"On Average Were Post Oral Feedback Written Questions Answered Within 5 Business Days (to be completed by Contracting Officers Only. Put NA if not Contracting Officer":"On Average Were Post Oral Feedback Written Questions Answered Within 5 Business Days",
"Oral Feedback Meeting-Number of Hours Spent Preparing and Conducting (E.G. 1.5 HRS)":"Oral Feedback Meeting-Number of Hours Spent Preparing and Conducting",
"Numbers of Offerors Requesting Oral Feedback Meeting (to be completed by Contracting Officers Only. Put 0 if not Contracting Officer":"Numbers of Offerors Requesting Oral Feedback Meeting",
"Number of Hours Spent Preparing and Delivering (E.G. 1.5 HRS).1":"Number of Hours Spent Preparing and Delivering Oral Feedback Meeting",
"On Average Were Oral Feedback Written Questions Answered Within 5 Business Days (to be completed by Contracting Officers Only. Put NA if not Contracting Officer":"On Average Were Oral Feedback Written Questions Answered Within 5 Business Days",
"Modification number , if applicable (to be completed by Contracting Officers Only. Put 0 if not Contracting Officer":"Modification number",
"Provide REF_IDV_PIID Number for each Awardee (to be completed by Contracting Officers Only. Put 0 if not Contracting Officer":"REF_IDV_PIID Number for each Awardee",
"Actual Amount of Award (Obligated) (to be completed by Contracting Officers Only. Put 0 if not Contracting Officer":"Actual Amount of Award (Obligated)",
"Contracting Office (to be completed by Contracting Officers Only. Put 0 if not Contracting Officer":"Contracting Office",
"Number of Written Questions Submitted and Answered":"Number of Written Questions Received",
"Choose the Reporting Quarter":"Reporting Quarter",
"Number of Offerors who responded to solicitation":"Number of Offerors",
"Name of Solicitatio":"Name of Solicitation"
})
test_hours_df = test_hours_df.drop(['Unnamed: 18','Unnamed: 22','Unnamed: 38'], axis=1)
control_hours_df = control_hours_df.rename(index=str, columns={
"Solicitation Number ": "Solicitation Number",
"Contracting Office Number (Contracting officer (will complete) Put 0 if not Contracting Officer": "Contracting Office",
"Date of Award (to be completed by Contracting Officer (CO) only) Put 10/01/2018 if you are not the CO": "Date of Award",
"Point of Contact (name of person completing form)": "Point of Contact",
"Position of Person Completing Survey": "POC Position",
"Indicate which Quarter the Project is Reporting ": "Reporting Quarter",
"Provide Offeror(s) Information DUNS Number and Company Name for All Offerors (awardee and non awardees) (to be completed by Contracting OfficersOnly. Put 0 if not Contracting Officer":"DUNS Number and Company Name for All Offerors",
"Provide PIID Award Number for Each Awardee (to be completed by Contracting Officers Only. Put NA if not Contracting Officer":"PIID Award Number for Each Awardee",
"Was There a Protest (to be completed by Contracting Officers Only. Put NA if not Contracting Officer":"Protest?",
"Modification number , if applicable (to be completed by Contracting Officers Only. Put 0 if not Contracting Officer":"Modification number",
"Provide REF_IDV_PIID Number for each Awardee (to be completed Contracting Officer only) Put 0 if not Contracting Officer":"REF_IDV_PIID Number for each Awardee",
"Actual Amount of Award (Obligated) (to be completed by Contracting Officers Only. Put 0 if not Contracting Officer":"Actual Amount of Award (Obligated)",
"Solicitation Name as indicated in project list. Contracting Officer to complete only":"Name of Solicitation",
"Provide PIID Award Number for Each Awardee (to be completed by Contracting Officers Only. Put 0 if not Contracting Officer":"PIID Award Number for Each Awardee",
"Notification of Award Letter-Number of Hours Spent Preparing and Delivering (to be completed by Contracting Officers only) all others put 0 if not Contracting Officer":"Notification of Award Letter-Number of Hours Spent Preparing and Delivering",
"Number of Notification of Award Letter Sent (to be completed by Contracting Officers only. If not Contracting Officer put in 0":"Number of Notification of Award Letter Sent",
"On average Were Notification of Award Letter Issued Within 3 Calendar Days of Award? (to be completed by Contracting Officers Only. Put NA if not CO/CS":"On average Were Notification of Award Letter Issued Within 3 Calendar Days of Award?",
"Number of Written Questions Received (do not count duplicate questions )":"Number of Written Questions Received",
"Number of Offeror's requesting debriefs or explanations (to be completed by Contracting Officer (CO). If not Contracting Officer enter 0":"Number of Offeror's requesting debriefs or explanations",
"Number of Hours Spent Preparing and Conducting to be completed by all":"Number of Hours Spent Preparing and Conducting debriefs or explanations",
"On average Were Oral Debriefs or Explanations Held Within 5 Business Days of Request? (to be completed by Contracting Officers Only. Put 0 if not Contracting Officer":"On average Were Oral Debriefs or Explanations Held Within 5 Business Days of Request?",
"Number of Offerors Receiving Written Debrief or Explanation (to be completed by Contracting Officers Only. Put 0 if not Contracting Officer.":"Number of Offerors Receiving Written Debrief or Explanation",
"Number of Hours Spent Preparing and Submitting (E.G. 1.5 HRS)":"Number of Hours Spent Preparing and Submitting Written Debrief or Explanation",
"On Average Were Written Debriefs Held Within 5 Business Days (to be completed by Contracting Officers Only. Put NA if not Contracting Officer":"On Average Were Written Debriefs Held Within 5 Business Days?",
"Number of Offerors to be completed by Contracting Officer only":"Number of Offerors"
})
INFORM_hours_df = control_hours_df.append(test_hours_df)
INFORM_hours_df.loc[INFORM_hours_df['Timestamp'] == 'This is control group project', 'Group Type'] = 'Control Group'
hours_cols = {"NLES: Hours",
"Notification of Award Letter-Number of Hours Spent Preparing and Delivering",
"Number of Hours Spent Preparing and Conducting debriefs or explanations",
"Number of Hours Spent Preparing and Delivering Oral Feedback Meeting",
"Number of Hours Spent Preparing and Delivering Post Oral Feedback Meeting Questions Responses",
"Number of Hours Spent Preparing and Submitting Written Debrief or Explanation",
"Oral Feedback Meeting-Number of Hours Spent Preparing and Conducting"}
for col in hours_cols:
INFORM_hours_df[col] = INFORM_hours_df[col].astype(str)
INFORM_hours_df[col] = [''.join(re.findall("\d*\.?\d+", item)) for item in INFORM_hours_df[col]]
INFORM_hours_df.to_excel('INFORM Hours.xlsx')
| INFORM Hours Data Transformation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.7.4
# language: python
# name: python-374
# ---
# # HTML exportieren
#
# Es kann auch eine eigenstÀndige HTML-Datei mit Bokeh-Inhalten generiert werden. Dies wird erreicht durch den Aufruf der Funktion `output_file ("âŠ.html")`:
from bokeh.io import output_notebook, show
output_notebook()
import bokeh.sampledata
bokeh.sampledata.download()
# +
import pandas as pd
from bokeh.plotting import figure
from bokeh.sampledata.stocks import AAPL
df = pd.DataFrame(AAPL)
df['date'] = pd.to_datetime(df['date'])
# -
p = figure(plot_width=800, plot_height=250, x_axis_type="datetime")
p.line(df['date'], df['close'], color='navy', alpha=0.5)
# +
from bokeh.io import output_file, show
output_file("plot.html")
| docs/bokeh/embedding-export/export-html.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:zero] *
# language: python
# name: conda-env-zero-py
# ---
import hermesql
from hermesql import terms
from hermesql.terms import *
from hermesql import Table
from hermesql.queries import *
# # table
# ## class
class Table2(Selectable):
@staticmethod
def _init_schema(schema: Union[str, list, tuple, Schema, None]) -> Union[str, list, tuple, Schema, None]:
# This is a bit complicated in order to support backwards compatibility. It should probably be cleaned up for
# the next major release. Schema is accepted as a string, list/tuple, Schema instance, or None
if isinstance(schema, Schema):
return schema
if isinstance(schema, (list, tuple)):
return reduce(
lambda obj, s: Schema(s, parent=obj), schema[1:], Schema(schema[0])
)
if schema is not None:
return Schema(schema)
return None
def __init__(self, table_name: str, alias: str, field: dict, criteria: Optional[dict] = None, schema: Optional[Union[Schema, str]] = None, query_cls: Optional[Type["Query"]] = None) -> None:
super().__init__(alias)
self._table_name = table_name
self._field = field
self._criteria = criteria
self._schema = self._init_schema(schema)
self._query_cls = query_cls or Query
if not issubclass(self._query_cls, Query):
raise TypeError("Expected 'query_cls' to be subclass of Query")
@classmethod
def from_model(cls, model, table_name): return cls(*model.load_table(table_name))
def get_table_name(self) -> str:
return self.alias or self._table_name
def get_sql(self, **kwargs: Any) -> str:
quote_char = kwargs.get("quote_char")
# FIXME escape
table_sql = format_quotes(self._table_name, quote_char)
if self._schema is not None:
table_sql = "{schema}.{table}".format(
schema=self._schema.get_sql(**kwargs), table=table_sql
)
return format_alias_sql(table_sql, self.alias, **kwargs)
def __str__(self) -> str:
return self.get_sql(quote_char='"')
def __eq__(self, other) -> bool:
if not isinstance(other, Table):
return False
if self._table_name != other._table_name:
return False
if self._schema != other._schema:
return False
if self.alias != other.alias:
return False
return True
def __repr__(self) -> str:
if self._schema:
return "Table('{}', schema='{}')".format(self._table_name, self._schema)
return "Table('{}')".format(self._table_name)
def __ne__(self, other: Any) -> bool:
return not self.__eq__(other)
def __hash__(self) -> int:
return hash(str(self))
def select(self, *terms: Sequence[Union[int, float, str, bool, Term, Field]]) -> "QueryBuilder":
"""
Perform a SELECT operation on the current table
:param terms:
Type: list[expression]
A list of terms to select. These can be any type of int, float, str, bool or Term or a Field.
:return: QueryBuilder
"""
return self._query_cls.from_(self).select(*terms)
def update(self) -> "QueryBuilder":
"""
Perform an UPDATE operation on the current table
:return: QueryBuilder
"""
return self._query_cls.update(self)
def insert(self, *terms: Union[int, float, str, bool, Term, Field]) -> "QueryBuilder":
"""
Perform an INSERT operation on the current table
:param terms:
Type: list[expression]
A list of terms to select. These can be any type of int, float, str, bool or any other valid data
:return: QueryBuilder
"""
return self._query_cls.into(self).insert(*terms)
# ## test create table
booking = Table2("booking", "b", {})
booking
# # Field_piece
# ## class
class Field_piece(Criterion, JSON):
def __init__(self, name: str, calculation: Optional[str] = None, type: Optional[str] = None, table_alias: Optional[str] = None) -> None:
self.alias = name
self.calculation = calculation
self.type = type
self.table_alias = table_alias
def nodes_(self) -> Iterator[NodeT]:
yield self
if self.table is not None:
yield from self.table.nodes_()
@builder
def replace_table(self, current_table: Optional["Table"], new_table: Optional["Table"]) -> "Field":
"""
Replaces all occurrences of the specified table with the new table. Useful when reusing fields across queries.
:param current_table:
The table to be replaced.
:param new_table:
The table to replace with.
:return:
A copy of the field with the tables replaced.
"""
self.table = new_table if self.table == current_table else self.table
def get_sql(self, with_alias = True, with_namespace = False, quote_char = None, **kwargs: Any) -> str:
field_sql = format_quotes(self.calculation, quote_char)
# Need to add namespace if the table has an alias
if with_namespace or self.table_alias:
field_sql = "{namespace}.{name}".format(
namespace=format_quotes(self.table_alias, quote_char), name=field_sql,
)
field_alias = getattr(self, "alias", None)
if with_alias: return format_alias_sql(field_sql, field_alias, quote_char=quote_char, **kwargs)
return field_sql
# ## test field piece
field_dict = {
"invite_id": {
"calculation": "id",
"type": "dimension"
},
"invite_at": {
"calculation": "fct.date_add(created_at)",
"type": "dimension"
},
"invite_at_date": {
"calculation": "fct.date_date_add(created_at)",
"type": "dimension"
}
}
field_dict_1 = {}
field_dict_1["invite_id"] = test_field
field_dict_1["invite_at"] = test_field
field_dict_1["invite_id"].get_sql()
booking.field = field_dict_1
test_field = Field_piece(name = "invite_id", calculation = "id", type = "dimension", table = booking)
test_field.get_sql()
# # model
import json
# ## model dict
model_dict = {
"table": {
"invites": {
"alias": "ivt",
"field": {
"invite_id": {
"calculation": "id",
"type": "dimension"
},
"invite_at": {
"calculation": "fct.date_add(created_at)",
"type": "dimension"
},
"invite_at_date": {
"calculation": "fct.date_date_add(created_at)",
"type": "dimension"
},
"referrer_id": {
"type": "dimension"
},
"referee_id": {
"calculation": "friend_id",
"type": "dimension"
}
},
"criteria": ""
},
"invite_credit": {
"name": "invite_credit",
"alias": "ivt_crt",
"field": {
"dimension": {
"created_at": {
"calculation": "fct.date_add(created_at)",
"alias": "created_at"
},
"created_at_date": {
"calculation": "fct.date_date_add(created_at",
"alias": "created_at_date"
},
"invite_id": {
"calculation": "invite_id",
"alias": "invite_id"
},
"credit": {
"calculation": "credit",
"alias": "credit"
},
"status": {
"calculation": "status",
"alias": "status"
},
"type": {
"calculation": "type",
"alias": "type"
},
"booking_id": {
"calculation": "tybooking_idpe",
"alias": "booking_id"
},
"desc": {
"calculation": "`desc`",
"alias": "`desc`"
}
},
"measure": "",
"criteria": {
"default": "invite_id is not null"
}
}
}
},
"join": {
},
"function": {
"date_add": "sql.date_add($field, interval + 7 hour)",
"date_date_add": "sql.date(fct.date_add)"
}
}
# ## class
class model:
def __init__(self, model: dict):
self.tables = model['table']
self.joins = model['join']
self.functions = model['function']
@classmethod
def from_json_file(cls, file_path):
f = open(file_path)
json_file = f
f.close()
return cls.from_json(json_file)
@classmethod
def from_json(cls, json):
model_dict = json.load(json)
return cls(model_dict)
def load_table(self, table_name):
self.table_alias = self.tables[table_name]['alias']
fields = self.load_field_from_dict(self.tables[table_name]['field'])
creteria = self.tables[table_name]['criteria']
return (table_name, self.table_alias, fields, creteria)
def load_field_from_dict(self, field_dict):
fields = {}
for field in field_dict.keys():
calculation = field_dict[field]['calculation'] if "calculation" in field_dict[field].keys() else None
type = field_dict[field]['type']
fields[field] = self.load_field(field, calculation, type, self.table_alias)
return fields
def load_field(self, field_name, calculation, type, table_alias):
return Field_piece(field_name, calculation, type, table_alias)
def load_join(self, join_name): return None
def load_function(sefl, function_name): return None
# ## test model
etl_model = model(model_dict)
invites = Table2.from_model(etl_model,"invites")
invites._field['invite_id'].calculation
| dev/hermesql dev.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## ThinkDSP
#
# This notebook contains solutions to exercises in Chapter 3: Non-periodic signals
#
# Copyright 2015 <NAME>
#
# License: [Creative Commons Attribution 4.0 International](http://creativecommons.org/licenses/by/4.0/)
# +
from __future__ import print_function, division
# %matplotlib inline
import thinkdsp
import thinkplot
import numpy as np
from ipywidgets import interact, interactive, fixed
import ipywidgets as widgets
# -
# ### Exercise
#
# Run and listen to the examples in chap03.ipynb. In the leakage example, try replacing the Hamming window with one of the other windows provided by NumPy, and see what effect they have on leakage.
#
# See http://docs.scipy.org/doc/numpy/reference/routines.window.html
#
#
# ### Solution
#
# Here's the leakage example:
# +
signal = thinkdsp.SinSignal(freq=440)
duration = signal.period * 30.25
wave = signal.make_wave(duration)
spectrum = wave.make_spectrum()
spectrum.plot(high=880)
thinkplot.config(xlabel='Frequency (Hz)')
# -
# The following figure shows the effect of 4 different windows.
# +
thinkplot.preplot(4)
for window_func in [np.bartlett, np.blackman, np.hamming, np.hanning]:
wave = signal.make_wave(duration)
wave.ys *= window_func(len(wave.ys))
spectrum = wave.make_spectrum()
spectrum.plot(high=880, label=window_func.__name__)
thinkplot.config(xlabel='Frequency (Hz)', legend=True)
# -
# All four do a good job of reducing leakage. The Bartlett filter leaves some residual "ringing". The Hamming filter dissipates the least amount of energy.
# ### Exercise
#
# Write a class called `SawtoothChirp` that extends `Chirp` and overrides evaluate to generate a sawtooth waveform with frequency that increases (or decreases) linearly.
# +
import math
PI2 = 2 * math.pi
class SawtoothChirp(thinkdsp.Chirp):
"""Represents a sawtooth signal with varying frequency."""
def _evaluate(self, ts, freqs):
"""Helper function that evaluates the signal.
ts: float array of times
freqs: float array of frequencies during each interval
"""
dts = np.diff(ts)
dps = PI2 * freqs * dts
phases = np.cumsum(dps)
phases = np.insert(phases, 0, 0)
cycles = phases / PI2
frac, _ = np.modf(cycles)
ys = thinkdsp.normalize(thinkdsp.unbias(frac), self.amp)
return ys
# -
# Here's what it sounds like.
signal = SawtoothChirp(start=220, end=880)
wave = signal.make_wave(duration=1, framerate=10000)
wave.apodize()
wave.make_audio()
# And here's the spectrogram.
sp = wave.make_spectrogram(1024)
sp.plot()
thinkplot.config(xlabel='Time (s)', ylabel='Frequency (Hz)')
# At a relatively low frame rate, you can see the aliased harmonics bouncing off the folding frequency. And you can hear them as a background hiss. If you crank up the frame rate, they go away.
#
# By the way, if you are a fan of the original Star Trek series, you might recognize the sawtooth chirp as the red alert signal:
thinkdsp.read_wave('tos-redalert.wav').make_audio()
# ### Exercise
#
# Make a sawtooth chirp that sweeps from 2500 to 3000 Hz, then
# make a wave with duration 1 and framerate 20 kHz. Draw a sketch of
# what you think the spectrum will look like. Then plot the
# spectrum and see if you got it right.
#
# ### Solution
#
# Since the fundamental sweeps from 2500 to 3000 Hz, I expect to see something like the
# Eye of Sauron in that range. The first harmonic sweeps from 5000 to 6000 Hz, so I expect a shorter tower in that range, like the Outhouse of Sauron. The second harmonic sweeps from 7500 to 9000 Hz, so I expect something even shorter in that range, like the Patio of Sauron.
#
# The other harmonics get aliased all over the place, so I expect to see some energy at all other frequencies. This distributed energy creates some interesting sounds.
signal = SawtoothChirp(start=2500, end=3000)
wave = signal.make_wave(duration=1, framerate=20000)
wave.make_audio()
wave.make_spectrum().plot()
# ### Exercise
#
# In musical terminology, a âglissandoâ is a note that slides from one pitch to another, so it is similar to a chirp. Find or make a recording of a glissando and plot its spectrogram.
#
# One suggestion: <NAME>'s *Rhapsody in Blue* starts with a famous clarinet glissando; you can download a recording from http://archive.org/details/rhapblue11924.
wave = thinkdsp.read_wave('72475__rockwehrmann__glissup02.wav')
wave.make_audio()
wave.make_spectrogram(512).plot(high=5000)
# ### Exercise
#
# A trombone player can play a glissando by extending the trombone slide while blowing continuously. As the slide extends, the total length of the tube gets longer, and the resulting pitch is inversely proportional to length.
# Assuming that the player moves the slide at a constant speed, how
# does frequency vary with time?
#
# Write a class called `TromboneGliss` that extends `Chirp` and
# provides `evaluate`. Make a wave that simulates a trombone
# glissando from F3 down to C3 and back up to F3. C3 is 262 Hz; F3 is
# 349 Hz.
#
# Plot a spectrogram of the resulting wave. Is a trombone glissando
# more like a linear or exponential chirp?
class TromboneGliss(thinkdsp.Chirp):
"""Represents a trombone-like signal with varying frequency."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
l1, l2 = 1.0 / self.start, 1.0 / self.end
lengths = np.linspace(l1, l2, len(ts)-1)
freqs = 1 / lengths
return self._evaluate(ts, freqs)
# Here's the first part of the wave:
low = 262
high = 349
signal = TromboneGliss(high, low)
wave1 = signal.make_wave(duration=1)
wave1.apodize()
wave1.make_audio()
# And the second part:
signal = TromboneGliss(low, high)
wave2 = signal.make_wave(duration=1)
wave2.apodize()
wave2.make_audio()
# Putting them together:
wave = wave1 | wave2
wave.make_audio()
# Here's the spectrogram:
sp = wave.make_spectrogram(1024)
sp.plot(high=1000)
# ### Exercise
#
# Make or find a recording of a series of vowel sounds and look at the
# spectrogram. Can you identify different vowels?
wave = thinkdsp.read_wave('87778__marcgascon7__vocals.wav')
wave.make_audio()
wave.make_spectrogram(1024).plot(high=1000)
# The stripe across the bottom is probably background noise. The peaks in the spectrogram are called "formants".
#
# In general, vowel sounds are distinguished by the amplitude ratios of the first two formants relative to the fundamental. For more, see https://en.wikipedia.org/wiki/Formant
#
# We can see the formats more clearly by selecting a segment during 'ah'.
# +
high = 1000
thinkplot.preplot(5)
segment = wave.segment(start=1, duration=0.25)
segment.make_spectrum().plot(high=high)
# -
# The fundamental is near 100 Hz. The next highest peaks are at 200 Hz and 700 Hz. People who know more about this than I do can identify vowels by looking at spectrums, but I can't.
# The 'eh' segment has a high-amplitude formant near 500 Hz.
segment = wave.segment(start=2.2, duration=0.25)
segment.make_spectrum().plot(high=high)
# The 'ih' segment has no high frequency components.
segment = wave.segment(start=3.5, duration=0.25)
segment.make_spectrum().plot(high=high)
# The 'oh' segment has a high-amplitude formant near 500 Hz, even higher than the fundamental.
segment = wave.segment(start=5.1, duration=0.25)
segment.make_spectrum().plot(high=high)
# The 'oo' segment has a high-amplitude formant near 300 Hz and no high-frequency components
segment = wave.segment(start=6.5, duration=0.25)
segment.make_spectrum().plot(high=high)
| ThinkDSP-master/code/chap03soln.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import sys
sys.path.append('..')
from lya.data import get_data_transfer_function, plot_pk
data = get_data_transfer_function()
# +
# %matplotlib inline
import matplotlib.pyplot as plt
plt.errorbar(data.x, data.y, data.y_unc);
# +
from lya.analysis import objective
def log_prob(pars, data):
# priors
if pars[0] < 0 or pars[0] > 0.5:
return -np.inf
if pars[1] < 0 or pars[1] > 10:
return -np.inf
if pars[2] < -10 or pars[2] > 0:
return -np.inf
lnp = -0.5 * objective(pars, data)
if np.isnan(lnp):
lnp = -np.inf
return lnp
# +
import numpy as np
ndim = 3
nwalkers = 64
p0_fiducial = [0.03, 5, -5]
p0 = p0_fiducial + np.random.rand(nwalkers, ndim)*0.1 # initial guess for each of the walkers
# -
np.isfinite([log_prob(p, data) for p in p0]).all()
# +
import emcee
sampler = emcee.EnsembleSampler(nwalkers, ndim, log_prob, args=[data])
state = sampler.run_mcmc(p0, 300)
sampler.reset()
# -
sampler.run_mcmc(state, 1000);
samples = sampler.get_chain(flat=True)
# +
from corner import corner
corner(samples, labels=['alpha', 'beta', 'gamma']);
# -
# compare to Fig 4 of Murgia+ (2018)
plt.hist(np.abs(samples[:, 1] / samples[:, 2]), bins=np.linspace(0, 12, 100));
| notebooks/fit_abg.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import scipy as sp
import math
import sympy as sym
import time
# +
priorsDF = pd.DataFrame(columns = ['mass_1','mass_2','sma_2','ecc_2','aop_2','inc_2','lan_2','mea_2','mass_3','sma_3','ecc_3','aop_3','inc_3','lan_3','mea_3','j2r2_1','c22r2_1','spaop_1','spinc_1','splan_1','sprate_1'],index=['dist. shape','uni-low','uni-up','log-uni-low','log-uni-up','norm-cen','norm-spread','log-norm-cen','log-norm-spread'])
allParamsDF = pd.DataFrame(columns = ['name_1','mass_1','name_2','mass_2','sma_2','ecc_2','aop_2','inc_2','lan_2','mea_2','name_3','mass_3','sma_3','ecc_3','aop_3','inc_3','lan_3','mea_3','j2r2_1','c22r2_1','spaop_1','spinc_1','splan_1','sprate_1'],index=[0])
#Set Parameters, or known data of objects
allParamsDF['name_1'][0] = 'Haumea'
allParamsDF['name_2'][0] = 'Hi\'aka'
allParamsDF['name_3'][0] = 'Namaka'
allParamsDF['mass_1'][0] = 4.006*10**21
allParamsDF['mass_2'][0] = 1.79*10**19
allParamsDF['mass_3'][0] = 1.79*10*18
allParamsDF['sma_2'][0] = 49880
allParamsDF['sma_3'][0] = 25657
allParamsDF['ecc_2'][0] = 0.0513
allParamsDF['ecc_3'][0] = 0.249
allParamsDF['aop_2'][0] = 154.1
allParamsDF['aop_3'][0] = 178.9
allParamsDF['inc_2'][0] = 126.356
allParamsDF['inc_3'][0] = 113.013
allParamsDF['lan_2'][0] = 206.766
allParamsDF['lan_3'][0] = 205.016
allParamsDF['mea_2'][0] = 152.8
allParamsDF['mea_3'][0] = 178.5
allParamsDF['j2r2_1'][0] = 1
allParamsDF['c22r2_1'][0] = 12
allParamsDF['spaop_1'][0] = 'idk'
allParamsDF['spinc_1'][0] = 'irdk'
allParamsDF['splan_1'][0] = 'isdk'
allParamsDF['sprate_1'][0] = 'wotfam'
#Set Priors
priorsDF['mass_1'][0] = 2
priorsDF['mass_1'][1] = 0
priorsDF['mass_1'][2] = 0
priorsDF['mass_1'][3] = 0
priorsDF['mass_1'][4] = 0
priorsDF['mass_1'][5] = 4.006*10**21
priorsDF['mass_1'][6] = 0.0040*10**21
priorsDF['mass_1'][7] = 4.006*10**21
priorsDF['mass_1'][8] = 0.0040*10**21
priorsDF['mass_2'][0] = 2
priorsDF['mass_2'][1] = 0
priorsDF['mass_2'][2] = 0
priorsDF['mass_2'][3] = 0
priorsDF['mass_2'][4] = 0
priorsDF['mass_2'][5] = 1.79*10**19
priorsDF['mass_2'][6] = 0.11*10**19
priorsDF['mass_2'][7] = 1.79*10**19
priorsDF['mass_2'][8] = 0.11*10**19
priorsDF['mass_3'][0] = 2
priorsDF['mass_3'][1] = 0
priorsDF['mass_3'][2] = 0
priorsDF['mass_3'][3] = 0
priorsDF['mass_3'][4] = 0
priorsDF['mass_3'][5] = 1.79*10**18
priorsDF['mass_3'][6] = 1.48*10**18
priorsDF['mass_3'][7] = 1.79*10**18
priorsDF['mass_3'][8] = 1.48*10**18
priorsDF['sma_2'][0] = 2
priorsDF['sma_2'][1] = 0
priorsDF['sma_2'][2] = 0
priorsDF['sma_2'][3] = 0
priorsDF['sma_2'][4] = 0
priorsDF['sma_2'][5] = 49880
priorsDF['sma_2'][6] = 198
priorsDF['sma_2'][7] = 49880
priorsDF['sma_2'][8] = 198
priorsDF['sma_3'][0] = 2
priorsDF['sma_3'][1] = 0
priorsDF['sma_3'][2] = 0
priorsDF['sma_3'][3] = 0
priorsDF['sma_3'][4] = 0
priorsDF['sma_3'][5] = 25657
priorsDF['sma_3'][6] = 91
priorsDF['sma_3'][7] = 25657
priorsDF['sma_3'][8] = 91
priorsDF['ecc_2'][0] = 2
priorsDF['ecc_2'][1] = 0
priorsDF['ecc_2'][2] = 1
priorsDF['ecc_2'][3] = 0
priorsDF['ecc_2'][4] = 1
priorsDF['ecc_2'][5] = 0.0513
priorsDF['ecc_2'][6] = 0.0078
priorsDF['ecc_2'][7] = 0.0513
priorsDF['ecc_2'][8] = 0.0078
priorsDF['ecc_3'][0] = 2
priorsDF['ecc_3'][1] = 0
priorsDF['ecc_3'][2] = 1
priorsDF['ecc_3'][3] = 0
priorsDF['ecc_3'][4] = 1
priorsDF['ecc_3'][5] = 0.249
priorsDF['ecc_3'][6] = 0.015
priorsDF['ecc_3'][7] = 0.249
priorsDF['ecc_3'][8] = 0.015
priorsDF['aop_2'][0] = 2
priorsDF['aop_2'][1] = 0
priorsDF['aop_2'][2] = 180
priorsDF['aop_2'][3] = 0
priorsDF['aop_2'][4] = 180
priorsDF['aop_2'][5] = 154.1
priorsDF['aop_2'][6] = 5.8
priorsDF['aop_2'][7] = 154.1
priorsDF['aop_2'][8] = 5.8
priorsDF['aop_3'][0] = 2
priorsDF['aop_3'][1] = 0
priorsDF['aop_3'][2] = 180
priorsDF['aop_3'][3] = 0
priorsDF['aop_3'][4] = 180
priorsDF['aop_3'][5] = 178.9
priorsDF['aop_3'][6] = 2.3
priorsDF['aop_3'][7] = 178.9
priorsDF['aop_3'][8] = 2.3
priorsDF['inc_2'][0] = 2
priorsDF['inc_2'][1] = 0
priorsDF['inc_2'][2] = 180
priorsDF['inc_2'][3] = 0
priorsDF['inc_2'][4] = 180
priorsDF['inc_2'][5] = 126.356
priorsDF['inc_2'][6] = 0.064
priorsDF['inc_2'][7] = 126.356
priorsDF['inc_2'][8] = 0.064
priorsDF['inc_3'][0] = 2
priorsDF['inc_3'][1] = 0
priorsDF['inc_3'][2] = 180
priorsDF['inc_3'][3] = 0
priorsDF['inc_3'][4] = 180
priorsDF['inc_3'][5] = 113.013
priorsDF['inc_3'][6] = 0.075
priorsDF['inc_3'][7] = 113.013
priorsDF['inc_3'][8] = 0.075
priorsDF['lan_2'][0] = 2
priorsDF['lan_2'][1] = 0
priorsDF['lan_2'][2] = 1
priorsDF['lan_2'][3] = 0
priorsDF['lan_2'][4] = 1
priorsDF['lan_2'][5] = 206.766
priorsDF['lan_2'][6] = 0.033
priorsDF['lan_2'][7] = 206.766
priorsDF['lan_2'][8] = 0.033
priorsDF['lan_3'][0] = 2
priorsDF['lan_3'][1] = 0
priorsDF['lan_3'][2] = 1
priorsDF['lan_3'][3] = 0
priorsDF['lan_3'][4] = 1
priorsDF['lan_3'][5] = 205.016
priorsDF['lan_3'][6] = 0.228
priorsDF['lan_3'][7] = 205.016
priorsDF['lan_3'][8] = 0.228
priorsDF['mea_2'][0] = 2
priorsDF['mea_2'][1] = 0
priorsDF['mea_2'][2] = 1
priorsDF['mea_2'][3] = 0
priorsDF['mea_2'][4] = 1
priorsDF['mea_2'][5] = 152.8
priorsDF['mea_2'][6] = 6.1
priorsDF['mea_2'][7] = 152.8
priorsDF['mea_2'][8] = 6.1
priorsDF['mea_3'][0] = 2
priorsDF['mea_3'][1] = 0
priorsDF['mea_3'][2] = 1
priorsDF['mea_3'][3] = 0
priorsDF['mea_3'][4] = 1
priorsDF['mea_3'][5] = 178.5
priorsDF['mea_3'][6] = 1.7
priorsDF['mea_3'][7] = 178.5
priorsDF['mea_3'][8] = 1.7
priorsDF['j2r2_1'] = 1
priorsDF['c22r2_1'] = 12
priorsDF['spaop_1'] = 'idk'
priorsDF['spinc_1'] = 'irdk'
priorsDF['splan_1'] = 'isdk'
priorsDF['sprate_1'] = 'wotfam'
#print(allParamsDF.transpose(),'\n\n',priorsDF.transpose())
priorsDF.to_csv(r'Priors.csv',sep='\t',index=True)
priorsDF.transpose().to_csv(r'TransposedPriors.csv',sep='\t',index=True)
# -
'''
NAME:
mm_priors
PURPOSE:
Given a dataframe of priors, and a dataframe of observed parameters for the same data,
this function calculates the likelihood of the distribution.
CALLING SEQUENCE:
totalLogProb = mm_priors(priors, params)
INPUTS
priors - a dataframe of 9 rows which holds the prior for the data points given, and the distribution shape wanted.
params - A single rowed dataframe of the actual observed parameters for the object.
OUTPUTS:
totalLogProb - The total Log of the probability of all the priors against the parameters
'''
def mm_priors(priors, params):
columnList = list(priors)
totalLogProb = 0
probDist = pd.DataFrame(columns = ['mass_1','mass_2','sma_2','ecc_2','aop_2','inc_2','lan_2','mea_2','mass_3','sma_3','ecc_3','aop_3','inc_3','lan_3','mea_3','j2r2_1','c22r2_1','spaop_1','spinc_1','splan_1','sprate_1'],index=['PDF'])
count = 0
allProbs = []
numNaNs = 0
#This loop runs through every column in the priors dataframe, and evaluates the probability density
#function of the specified type.
for i in columnList:
count += 1
#Uniform Distribution Shape
if priors[i][0] == 0:
if params[i][0] < priors[i][2] and params[i][0] > priors[i][1]:
allProbs.append(1)
elif np.isnan(x[count]):
numNaNs += 1
else:
allProbs.append(0)
#Log-Uniform Distribution Shape
elif priors[i][0] == 0:
if params[i][0] < priors[i][4] and params[i][0] > priors[i][3]:
allProbs.append(1)
elif np.isnan(params[i][0]):
numNaNs += 1
else:
allProbs.append(0)
# Normal Distribution Shape
elif priors[i][0] == 2:
if not np.isnan(params[i][0]):
allProbs.append(np.exp(-1/2*((params[i][0]-priors[i][6])/priors[i][5])**2))
#Log Normal Distribution Shape
elif priors[i][0] == 3:
if not np.isnan(params[i][0]):
allProbs.append(np.exp(-1/2*(((np.log(params[i][0])-priors[i][8])**2)/(priors[i][7])**2))/params[i][0])
else:
print('Invalid input for: ', i)
#Here, add the Prior Probability Density function for this element to the total
for x in allProbs:
totalLogProb = totalLogProb + np.log(x)
print(x)
return totalLogProb
dist = mm_priors(priorsDF,allParamsDF)
print('Total Probability: 10^(',dist,')')
| Priors Code/mm_priors.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
df = pd.read_csv('C:\Inna\'s files\data\purchases.csv')
print(df)
df.to_csv('C:\Inna\'s files\data\my_data.csv')
import pandas as pd
df = pd.read_json('C:\Inna\'s files\data\purchases.json')
df.to_json('C:\Inna\'s files\data\my_data.json')
serialized_purchases = df.to_json()
| CSV_and_JSON.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Kmeans
# Source: http://stanford.edu/~cpiech/cs221/handouts/kmeans.html
#
# K-Means is one of the most popular "clustering" algorithms.K-Means finds the best centroids by alternating between (1) assigning data points to clusters based on the current centroids (2) chosing centroids (points which are the center of a cluster) based on the current assignment of data points to clusters.
# ## The Algorithm
# In the clustering problem, we are given a training set $x^{(1)},...,x^{(m)}$, and want to group the data into a few cohesive "clusters." Here, we are given feature vectors for each data point $x^{(i)}âR^n$ as usual; but no labels $y^{(i)}$ (making this an unsupervised learning problem). Our goal is to predict $k$ centroids and a label $c^{(i)}$ for each datapoint. The k-means clustering algorithm is as follows:
# 1. Initial cluster centroids ${u_1},...,{u_k}âR^n$ randomly.
# 2. Repeat until Convergence:
#
# (1) For every $i$, set $c^{(i)}:=arg{min}_j{||x^{(i)}-u_j||}^2$
#
# (2) For each $j$, set $u_j:=\frac{\sum_{i=1}^m {1{(c^{(i)}=j)}x^{(i)}}}{\sum_{i=1}^m {1{(c^{(i)}=j)}}}$
# <img src="./kmeans_image/1.png" weight=640 height=320 />
# <img src="./kmeans_image/2.png" weight=640 height=320 />
# <img src="./kmeans_image/3.png" weight=640 height=320 />
# <img src="./kmeans_image/4.png" weight=640 height=320 />
# <img src="./kmeans_image/5.png" weight=640 height=320 />
# <img src="./kmeans_image/6.png" weight=640 height=320 />
import numpy as np
from scipy.spatial.distance import cdist
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
# calculate Euclidean distance
def euclDistance(vector1, vector2):
return np.sqrt(np.sum(np.power(vector2 - vector1, 2)))
# init centroids with random samples
def initCentroids(dataSet, k):
numSamples, dim = dataSet.shape
index = np.random.uniform(0, numSamples, k).astype(int)
centroids = dataSet[index]
return centroids
# show your cluster (only available with 2-D data)
def showCluster(dataSet, k, centroids, clusterAssment):
numSamples, dim = dataSet.shape
if dim != 2:
print ("Sorry! I can not draw because the dimension of your data is not 2!")
return 1
mark = ['or', 'ob', 'og', 'ok', '^r', '+r', 'sr', 'dr', '<r', 'pr']
if k > len(mark):
print ("Sorry! Your k is too large!")
return 1
# draw all samples
for i in range(numSamples):
# assign colors for samples
markIndex = int(clusterAssment[i, 0])
plt.plot(dataSet[i, 0], dataSet[i, 1], mark[markIndex])
mark = ['Dr', 'Db', 'Dg', 'Dk', '^b', '+b', 'sb', 'db', '<b', 'pb']
# draw the centroids
for i in range(k):
plt.plot(centroids[i, 0], centroids[i, 1], mark[i], markersize = 12)
plt.show()
# k-means cluster
def kmeans(dataSet, k):
numSamples = dataSet.shape[0]
# store which cluster this sample belongs to
clusterAssment = np.zeros([numSamples, 1])
clusterChanged = True
## step 1: init centroids
centroids = initCentroids(dataSet, k)
epoch = 0
while clusterChanged:
clusterChanged = False
## for each sample
for i in range(numSamples):
minDist = float('inf')
minIndex = 0
# for each centroid
# step 2: find the centroid who is closest
for j in range(k):
distance = euclDistance(centroids[j, :], dataSet[i, :])
if distance < minDist: ######å¯¹äºæ¯äžäžªç¹ è®¡ç®æ¯äžäžªç¹å°k䞪é矀äžå¿çè·çŠ» å¹¶è®°åœäžè¯¥ç¹è·çŠ»æè¿çé矀äžå¿çJ
minDist = distance
minIndex = j
## step 3: update its cluster ######
if clusterAssment[i, 0] != minIndex:######å€æè¯¥ç¹æ¯åŠéèŠç»§ç»åå
clusterChanged = True
clusterAssment[i, :] = minIndex
##### æ¯äžäžªç¹å已鿰åé
å°æäžç±»
## step 4: update centroids
for j in range(k):
pointsInCluster = dataSet[np.nonzero(clusterAssment[:, 0] == j)[0], :]#####np.nonzeroåœæ°æ¯numpyäžçšäºåŸå°æ°ç»arrayäžéé¶å
çŽ çäœçœ®ïŒæ°ç»çŽ¢åŒïŒçåœæ°
####äžäžè¡ä»£ç åŸå°æ°æ®ç¬¬j䞪èç±»äžå¿çç¹
centroids[j, :] = np.mean(pointsInCluster, axis=0)#####éæ°åŸå°èç±»äžå¿
if epoch < 5:
print('epoch: ' + str(epoch))
showCluster(dataSet, k, centroids, clusterAssment)
epoch = epoch + 1
np.nan
print ('Congratulations, cluster complete!')
return centroids, clusterAssment
# k-means cluster
def kmeans_simple(dataSet, k):
numSamples = dataSet.shape[0]
clusterChanged = True
clusterAssment = np.zeros([numSamples, 1])
## step 1: init centroids
centroids = initCentroids(dataSet, k)
while clusterChanged:
clusterChanged = False
# calculate pairwise distance
distance = cdist(dataSet, centroids)
# find the closest centroid for each sample
tmpIndex = np.reshape(np.argmin(distance, 1), [-1, 1])
# if any index changes, continue
if (tmpIndex != clusterAssment).any():
clusterChanged = True
# update clusterAssment
clusterAssment = tmpIndex
# update centroids
for j in range(k):
pointsInCluster = dataSet[np.nonzero(clusterAssment == j)[0], :]
centroids[j, :] = np.mean(pointsInCluster, 0)
print ('Congratulations, cluster complete!')
return centroids, clusterAssment
def customReadFile(fileName):
fileIn = open(fileName, 'r')
dataSet = []
for line in fileIn.readlines():
temp=[]
lineArr = line.strip().split('\t')
temp.append(float(lineArr[0]))
temp.append(float(lineArr[1]))
dataSet.append(temp)
fileIn.close()
return np.mat(dataSet)
# +
## step 1: load data
fileIn = '../input/testSet.txt'
print ('Step 1: Load data ' + fileIn + '...')
dataSet = customReadFile(fileIn)
print('Number of samples: ' + str(dataSet.shape[0]))
## step 2: clustering...
print ("Step 2: clustering..." )
k = 4
centroids, clusterAssment = kmeans(dataSet, k)
# centroids, clusterAssment = kmeans_simple(dataSet, k)
# clusteringResult = KMeans(n_clusters=k).fit(dataSet)
# clusterAssment = np.reshape(clusteringResult.labels_, [-1, 1])
# centroids = clusteringResult.cluster_centers_
## step 3: show the result
print ("Step 3: show the result..." )
showCluster(dataSet, k, centroids, clusterAssment)
# -
# # PlayGround
# http://stanford.edu/class/ee103/visualizations/kmeans/kmeans.html
# # Code
# https://github.com/wojiushimogui/kmeans
| k-means.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Thai2Vec Embeddings Examples
#
# We use the embeddings from `v0.1` since it was trained specifically for word2vec as opposed to latter versions which garner to classification. The `thai2vec.bin` 51,556 word embeddings of 300 dimensions, in descending order by their frequencies (See `thai2vec.vocab`). The files are in word2vec format readable by `gensim`. Most common applications include word vector visualization, word arithmetic, word grouping, cosine similarity and sentence or document vectors. For sample code, see `thwiki_lm/word2vec_examples.ipynb`.
# ## Imports
# +
# #uncomment if you are running from google colab
# # !pip install sklearn_crfsuite
# # !pip install https://github.com/PyThaiNLP/pythainlp/archive/dev.zip
# # !pip install fastai==1.0.46
# # !pip install emoji
# +
# %reload_ext autoreload
# %autoreload 2
# %matplotlib inline
from pythainlp.tokenize import word_tokenize
from gensim.models import KeyedVectors
import numpy as np
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
import matplotlib.font_manager as fm
import dill as pickle
import pandas as pd
model_path = 'thwiki_data/models/'
# -
#load into gensim
# model = KeyedVectors.load_word2vec_format(f'{model_path}thai2vec.bin',binary=True)
from pythainlp import word_vector
model = word_vector.get_model()
#create dataframe
thai2dict = {}
for word in model.index2word:
thai2dict[word] = model[word]
thai2vec = pd.DataFrame.from_dict(thai2dict,orient='index')
thai2vec.head(10)
# Using t-SNE, we can compress the 300 dimensions of each word into a 2D plane and plot their relationships.
# +
labels = model.index2word
# #tnse
# tsne = TSNE(n_components=2, init='pca', n_iter=1000)
# thai2plot = tsne.fit_transform(thai2vec)
# pickle.dump(thai2plot,open(f'{model_path}thai2plot.pkl','wb'))
thai2plot = pickle.load(open(f'{model_path}thai2plot.pkl','rb'))
# -
labels[:10]
# +
#stolen from https://blog.manash.me/how-to-use-pre-trained-word-vectors-from-facebooks-fasttext-a71e6d55f27
def plot_with_labels(low_dim_embs, labels, filename=None, figsize=(10,10),
axis_lims = None):
assert low_dim_embs.shape[0] >= len(labels), "More labels than embeddings"
plt.figure(figsize=figsize) # in inches
for i, label in enumerate(labels):
x, y = low_dim_embs[i, :]
plt.scatter(x, y)
prop = fm.FontProperties(fname=f'THSarabunNew.ttf',size=20)
plt.annotate(label,
fontproperties=prop,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
if axis_lims is not None: plt.axis(axis_lims)
if filename: plt.savefig(filename)
plot_with_labels(thai2plot[200:500],labels[200:500],axis_lims = [0,30,0,30])
# -
# ## Word Arithmetic
# You can do simple "arithmetic" with words based on the word vectors such as:
# * àžàž¹à¹àž«àžàžŽàž + àžàž£àž°àž£àž²àžàž² - àžàž¹à¹àžàž²àž¢ = àžàž£àž°àž£àž²àžàžŽàžàžµ
# * àžàž²àž¢àžàž£àž±àžàž¡àžàžàž£àžµ - àžàž³àžàž²àž = àžàž£àž°àžàž²àžàž²àžàžŽàžàžàžµ
# * àžàžŽà¹àžàžà¹àž² + à¹àžàž£àž²àž = à¹àžà¹àžà¹àžªàž²àž£à¹
#word arithmetic
model.most_similar_cosmul(positive=['àžàž£àž°àž£àž²àžàž²','àžàž¹à¹àž«àžàžŽàž'], negative=['àžàž¹à¹àžàž²àž¢'])
sample_words = ['àžàž¹à¹àž«àžàžŽàž','àžàž£àž°àž£àž²àžàž²','àžàž¹à¹àžàž²àž¢','àžàž£àž°àž£àž²àžàžŽàžàžµ']
sample_idx = []
for word in sample_words:
sample_idx.append(labels.index(word))
sample_plot = thai2plot[sample_idx]
plot_with_labels(sample_plot,sample_words)
model.most_similar_cosmul(positive=['àžàž²àž¢àžàž£àž±àžàž¡àžàžàž£àžµ'],negative=['àžàž³àžàž²àž'])
sample_words = ['àžàž²àž¢àžàž£àž±àžàž¡àžàžàž£àžµ','àžàž³àžàž²àž','àž£àžàžàžàž²àž¢àžàž£àž±àžàž¡àžàžàž£àžµ']
sample_idx = []
for word in sample_words:
sample_idx.append(labels.index(word))
sample_plot = thai2plot[sample_idx]
plot_with_labels(sample_plot,sample_words)
#word arithmetic
model.most_similar_cosmul(positive=['àžªàž±àžàž§à¹','àžàž·àž'], negative=[])
sample_words = ['àžªàž±àžàž§à¹','àžàž·àž','àžªàžŽà¹àžàž¡àžµàžàžµàž§àžŽàž']
sample_idx = []
for word in sample_words:
sample_idx.append(labels.index(word))
sample_plot = thai2plot[sample_idx]
plot_with_labels(sample_plot,sample_words)
# + [markdown] heading_collapsed=true
# ## Doesn't Match
# + [markdown] hidden=true
# It can also be used to do word groupings. For instance:
# * àžàž²àž«àž²àž£à¹àžà¹àž² àžàž²àž«àž²àž£àžªàž±àžàž§à¹ àžàž²àž«àž²àž£à¹àž¢à¹àž àžàž²àž«àž²àž£àžàž¥àž²àžàž§àž±àž - àžàž²àž«àž²àž£àžªàž±àžàž§à¹ is type of food whereas others are meals in the day
# * àž¥àž²àž àžàž¶àž àžàž¹àž àžàž±àž - àžàž±àž is pushing while the rest is pulling.
# * àžàž àžàž±àž àžàžŽàž à¹àžàžµà¹àž¢àž§ - àžàž is not verbs for the eating process
# Note that this could be relying on a different "take" than you would expect. For example, you could have answered àž¥àž¹àžà¹àžàž¢ in the second example because it is the one associated with male gender.
# + hidden=true
model.doesnt_match("àžàž²àž«àž²àž£à¹àžà¹àž² àžàž²àž«àž²àž£àžªàž±àžàž§à¹ àžàž²àž«àž²àž£à¹àž¢à¹àž àžàž²àž«àž²àž£àžàž¥àž²àžàž§àž±àž".split())
# + hidden=true
sample_words = "àžàž²àž«àž²àž£à¹àžà¹àž² àžàž²àž«àž²àž£àžªàž±àžàž§à¹ àžàž²àž«àž²àž£à¹àž¢à¹àž àžàž²àž«àž²àž£àžàž¥àž²àžàž§àž±àž".split()
sample_idx = []
for word in sample_words:
sample_idx.append(labels.index(word))
sample_plot = thai2plot[sample_idx]
plot_with_labels(sample_plot,sample_words)
# + hidden=true
model.doesnt_match("àž¥àž²àž àžàž¶àž àžàž¹àž àžàž±àž".split())
# + hidden=true
sample_words = "àž¥àž²àž àžàž¶àž àžàž¹àž àžàž±àž".split()
sample_idx = []
for word in sample_words:
sample_idx.append(labels.index(word))
sample_plot = thai2plot[sample_idx]
plot_with_labels(sample_plot,sample_words)
# + hidden=true
model.doesnt_match("à¹àž¡àž§ àž«àž¡àž² àž«àž¡àž¹ àž«àž¡àž".split())
# + hidden=true
sample_words = "à¹àž¡àž§ àž«àž¡àž² àž«àž¡àž¹ àž«àž¡àž".split()
sample_idx = []
for word in sample_words:
sample_idx.append(labels.index(word))
sample_plot = thai2plot[sample_idx]
plot_with_labels(sample_plot,sample_words)
# -
# ## Cosine Similarity
print('China + Beijing:', model.similarity('àžàž±àžàžàžŽà¹àž', 'àžàžµàž'))
print('Italy + Rome:', model.similarity('à¹àž£àž¡','àžàžŽàžàž²àž¥àžµ'))
print('Beijing + Rome:', model.similarity('à¹àž£àž¡', 'àžàž±àžàžàžŽà¹àž'))
print('Italy + Beijing:', model.similarity('àžàž±àžàžàžŽà¹àž', 'àžàžŽàžàž²àž¥àžµ'))
print('China + Rome:', model.similarity('à¹àž£àž¡','àžàžµàž'))
print('China + Italy:', model.similarity('àžàžŽàžàž²àž¥àžµ','àžàžµàž'))
sample_words = "àžàž±àžàžàžŽà¹àž àžàžµàž à¹àž£àž¡ àžàžŽàžàž²àž¥àžµ à¹àžà¹àžàžµàž¢àž§ àžàžµà¹àžàžžà¹àž".split()
sample_idx = []
for word in sample_words:
sample_idx.append(labels.index(word))
sample_plot = thai2plot[sample_idx]
plot_with_labels(sample_plot,sample_words)
# ## Spellchecking
# Originally contributed by [Sakares ATV](https://github.com/sakares), adapted from [Kaggle Spell Checker using Word2vec by CPMP](https://www.kaggle.com/cpmpml/spell-checker-using-word2vec).
# +
words = model.index2word
w_rank = {}
for i,word in enumerate(words):
w_rank[word] = i
WORDS = w_rank
# +
thai_letters = 'àžàžàžàžàž
àžàžàžàžàžàžàžàžàžàžàžàžàžàžàžàžàžàžàžàžàžàžàžàžàžàžàž àž¡àž¢àž£àž€àž€à¹
àž¥àžŠàžŠà¹
àž§àžšàž©àžªàž«àž¬àžàž®àž°àž±àž²àž³àžŽàžµàž¶àž·àžžàž¹à¹à¹à¹à¹à¹à¹à¹à¹à¹à¹à¹'
def words(text): return re.findall(r'\w+', text.lower())
def P(word):
"Probability of `word`."
# use inverse of rank as proxy
# returns 0 if the word isn't in the dictionary
return - WORDS.get(word, 0)
def correction(word):
"Most probable spelling correction for word."
return max(candidates(word), key=P)
def candidates(word):
"Generate possible spelling corrections for word."
return (known([word]) or known(edits1(word)) or known(edits2(word)) or [word])
def known(words):
"The subset of `words` that appear in the dictionary of WORDS."
return set(w for w in words if w in WORDS)
def edits1(word):
"All edits that are one edit away from `word`."
letters = thai_letters
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [L + R[1:] for L, R in splits if R]
transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R)>1]
replaces = [L + c + R[1:] for L, R in splits if R for c in letters]
inserts = [L + c + R for L, R in splits for c in letters]
return set(deletes + transposes + replaces + inserts)
def edits2(word):
"All edits that are two edits away from `word`."
return (e2 for e1 in edits1(word) for e2 in edits1(e1))
# -
correction('àžàž±àžàžàž²')
correction('àžàž£àžŽàž')
correction('àžàž¢à¹àž²')
correction('àžàž°àžà¹àž°')
| thwiki_lm/word2vec_examples.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import sys
from datetime import datetime
sys.path.insert(0, "../../../../..")
sys.path.insert(0, "../../../../../../acquire")
from HUGS.Processing import search
from HUGS.Client import Process, Search, Retrieve
from Acquire.ObjectStore import datetime_to_string
from Acquire.Client import User, Drive, Service, PAR, Authorisation, StorageCreds
from datetime import datetime
import matplotlib.cm as cm
import xarray as xr
import numpy as np
import json
import ipyleaflet as ipl
import ipywidgets as ipw
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
# -
with open("../site_data/acrg_with_locations.json", "r") as f:
site_data = json.load(f)
# +
base_url= "https://hugs.acquire-aaai.com/t"
search = Search(service_url=base_url)
search_terms = ["ch4"]
locations = []
results = search.search(search_terms=search_terms, locations=locations, data_type="CRDS")
# +
retrieve = Retrieve(service_url=base_url)
download_keys = {key: results[key]["keys"] for key in results}
ch4_data = retrieve.retrieve(keys=download_keys)
# -
# Please select data using the tickboxes to compare CH4 measurements from Bilsdale and Tacolneston. The daterange covered by the plot may be changed using the slider below the figure.
#
# Please allow some time for figure below to load as the notebook is performing a search of the HUGS object store and downloading the requested data.
# +
from HUGS.Interface import Interface
i = Interface()
i._site_locations = site_data
box = i.plotting_interface(selected_results=results, data=ch4_data)
box
# -
| user/notebooks/openghg/example_notebooks/Figure_3/Figure_3_Data_and_footprint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # MadMiner particle physics tutorial
#
# # Part 4b: The Fisher information
#
# <NAME>, <NAME>, <NAME>, and <NAME> 2018-2019
# ## 0. Preparations
# +
from __future__ import absolute_import, division, print_function, unicode_literals
import six
import logging
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
# %matplotlib inline
from madminer.fisherinformation import FisherInformation
from madminer.plotting import plot_fisher_information_contours_2d
# +
# MadMiner output
logging.basicConfig(
format='%(asctime)-5.5s %(name)-20.20s %(levelname)-7.7s %(message)s',
datefmt='%H:%M',
level=logging.INFO
)
# Output of all other modules (e.g. matplotlib)
for key in logging.Logger.manager.loggerDict:
if "madminer" not in key:
logging.getLogger(key).setLevel(logging.WARNING)
# -
# ## 1. Calculating the Fisher information
# We can use SALLY estimators (see part 3b of this tutorial) not just to define optimal observables, but also to calculate the (expected) Fisher information in a process. In `madminer.fisherinformation` we provide the `FisherInformation` class that makes this more convenient.
fisher = FisherInformation('data/lhe_data_shuffled.h5')
# fisher = FisherInformation('data/delphes_data_shuffled.h5')
# This class provides different functions:
# - `rate_information()` calculates the Fisher information in total rates,
# - `histo_information()` calculates the Fisher information in 1D histograms,
# - `histo_information_2d()` calculates the Fisher information in 2D histograms,
# - `full_information()` calculates the full detector-level Fisher information using a SALLY estimator, and
# - `truth_information()` calculates the truth-level Fisher information.
#
# Here we use the SALLY approach:
# +
fisher_information, _ = fisher.full_information(
theta=[0.,0.],
model_file='models/sally',
luminosity=30000.,
)
print('Fisher information after 30 ifb:\n{}'.format(fisher_information))
# -
# For comparison, we can calculate the Fisher information in the histogram of observables:
# +
info_histo_1d, cov_histo_1d = fisher.histo_information(
theta=[0.,0.],
luminosity=30000.,
observable="pt_j1",
bins=[30.,100.,200.,400.],
histrange=[30.,400.],
)
print('Histogram Fisher information after 30 ifb:\n{}'.format(info_histo_1d))
# -
# We can do the same thing in 2D:
# +
info_histo_2d, cov_histo_2d = fisher.histo_information_2d(
theta=[0.,0.],
luminosity=30000.,
observable1="pt_j1",
bins1=[30.,100.,200.,400.],
histrange1=[30.,400.],
observable2="delta_phi_jj",
bins2=5,
histrange2=[0.,6.2],
)
print('Histogram Fisher information after 30 ifb:\n{}'.format(info_histo_2d))
# -
# ## 2. Plot Fisher distances
# We also provide a convenience function to plot contours of constant Fisher distance `d^2(theta, theta_ref) = I_ij(theta_ref) * (theta-theta_ref)_i * (theta-theta_ref)_j`:
_ = plot_fisher_information_contours_2d(
[fisher_information, info_histo_1d, info_histo_2d],
[None, cov_histo_1d, cov_histo_2d],
inline_labels=["SALLY", "1d", "2d"],
xrange=(-2,2),
yrange=(-2,2)
)
| examples/tutorial_particle_physics/4b_fisher_information.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Day 9, Optional - visualizing galaxy data
# Note: this cannot be uploaded into MeshLab - direct upload to Sketchfab is necessary.
# import our usual stuffs
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
from galaxy_movies_library import read_in_galaxy_data
# Read in our 2 planet kepler system:
# +
from hermite_library import read_hermite_solution_from_file
# as a test:
t_h, E_h, r_h, v_h = read_hermite_solution_from_file('myPlanetSystem_kepler101_solution1.txt')
# -
# we also want to grab a unit conversion for later:
from hermite_library import AUinCM
# To load and save things correctly, we need to define a few file structures:
#
# First:
#
# This will point to the directory where generic.obj, generic.mtl, make3dplanets.py, etc is stored.
#
# This is the unzipped "genericPlanetFiles.zip" you can download from the website & unpack.
#
# Probably the easiest thing to do is do it in your local, current directory.
#
# **NOTE: SHOW THIS IN CLASS**
make3dplanets_libdir = '/Users/jillnaiman1/Downloads/planetFiles/'
# Now we'll name your planet system. This is the name of the directory where the .obj and .mtl and texture files will be stored.
SystemName = 'MyPlanetSystemSingle'
# Now list the full path where the SystemName directory where all the generated obj and mtl files will be stored.
#
# If this directory doesn't exist, make sure to mkdir it with something like:
# ```!mkdir /Users/jillnaiman1/Downloads/outPlanets```
output_planet_dir = '/Users/jillnaiman1/Downloads/outPlanets/'
# Now we need to make some decisions about what our planets will look like.
#
# If we recall the order of our planets will be:
#
# ```planet 1, planet 2, star```
#
# Let's remind ourselves of their masses:
# +
# make sure convert_kepler_data.py is in your directory as well
from convert_kepler_data import read_kepler_data
# make sure this datafile is in your local directory
kepler_data = read_kepler_data('kepler101data.txt')
# +
# masses:
kepler_data['pMass']
# so, 0.16 and 0.01 Jupiter masses
# -
kepler_data['sMass']
# There aren't any planets in our solar system (so the textures that we have) with those exact masses but we can estimate that we have a Saturn-ish ($0.3 M_J$) and Neptune-ish ($0.054 M_J$) sized planets and use their textures.
#
# Similarly, while this star's mass is a little larger than our Sun, let's assume its about a solar mass and use this mass. So, in summary:
#
# The masses will be:
#
# ```0.16 Jupiter, 0.01 Jupiter, Solar mass```
#
# So we can use the following textures as a first approximation:
#
# ```Saturn, Neptune, Sun```
#
# with comprable radii.
texture_files = ['saturn_1024.jpg', 'neptunemap_1000.jpg', 'sun_texture1.jpg']
# Saturn, Neptune Sun
PlanetRadius = [5.8232e9, 2.4622e9, 69.551e9] # cm
# You have the option to specify colors instead of textures, but we won't be using this option, so you can just put in any place holder colors you want!
PlanetColors = [ (1.0, 0, 0), (1.0, 0.0, 0.0), (1.0, 0.0, 0.0)]
# Now let's grab the planet solving library from what we downloaded from the course website:
from sys import path
# point to where the library is stored
path.append(make3dplanets_libdir)
# now import make3dplanets
from make3dplanets import make3dplanets
# Just in case things are stored in different places we'll label these explicitly, but you shouldn't have to change this though.
generic_dir = make3dplanets_libdir
textures_dir = make3dplanets_libdir + 'textureMaps/'
# We also need to pick a timestep to make our static model out of:
Nplot = 100
# Now let's make the planet files! Note the weird switching of axis in the for loop - this is to make the planet textures "upright" w/rt z-axis.
# +
PlanetLocation = np.zeros( [len(PlanetRadius), 3] )
for p in range(0,len(PlanetRadius)):
PlanetLocation[p,2] = r_h[p,0,Nplot]*AUinCM # units
PlanetLocation[p,0] = r_h[p,1,Nplot]*AUinCM
PlanetLocation[p,1] = r_h[p,2,Nplot]*AUinCM
fname = make3dplanets(SystemName, PlanetLocation, PlanetRadius,
output_planet_dir, generic_dir, textures_dir=textures_dir,
texture_file=texture_files, DistanceUnits=AUinCM)
fname # see what this is
# -
# Now, let's open this up in MeshLab and see what we can see! **do it**
# It is very likely you will see absolutely nothing! Why is this? Well this is because if we do everything *to scale* the space between the planets is so much larger than the planets & stars themselves that we can't actually see them. So we are going to make an aethetic choice here and scale up the radii by a large factor.
#
# This is something we would have to add somewhere to the documentation of our viz in order not to confuse our viewers!
# Saturn, Neptune Sun
PlanetRadius = [5.8232e9, 2.4622e9, 69.551e9] # cm
for i in range(len(PlanetRadius)): PlanetRadius[i] *= 1000
# +
PlanetLocation = np.zeros( [len(PlanetRadius), 3] )
for p in range(0,len(PlanetRadius)):
PlanetLocation[p,2] = r_h[p,0,Nplot]*AUinCM # units
PlanetLocation[p,0] = r_h[p,1,Nplot]*AUinCM
PlanetLocation[p,1] = r_h[p,2,Nplot]*AUinCM
fname = make3dplanets(SystemName, PlanetLocation, PlanetRadius,
output_planet_dir, generic_dir, textures_dir=textures_dir,
texture_file=texture_files, DistanceUnits=AUinCM)
# -
# Now if we open this up in MeshLab **(do it)** we see that we can see our little planets! Hurray!
# Now we will go through the process of uploading to Sketchfab: **DO IT**
# ### Exercise
#
# Do this activity with your planetary model. First import into MeshLab and then upload to your Sketchfab account.
#
# Things to consider:
# 1. Should you rescale anything in your model? Why or why not?
# 1. What should you choose for the radii of your planets and star? How do you justify this?
# 1. What should you choose for the texture maps? What kind of planets and star do you "think" these are?
| lesson09/_Optional_displayingGalaxyData_lesson09_notUsed.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Feb 2017 Analysis of Director Sentiment
#
# ## Setup
# +
# %matplotlib inline
from pattern.en import sentiment
from scipy import stats
import csv
import pandas as pd
CODE = 'ASX Code'
NAME = 'Company name'
SECTOR = 'Sector '
STATEMENT = 'Statement'
POLARITY = 'Polarity'
SUBJECTIVITY = 'Subjectivity'
COPY_FIELDS = [CODE, NAME, SECTOR]
FIELDNAMES = COPY_FIELDS + [POLARITY, SUBJECTIVITY]
IN16 = 'data/directors-reports-FEB2016-20170306.csv'
OUT16 = 'data/directors-reports-FEB2016-20170306-sentiment.csv'
IN17 = 'data/directors-reports-FEB2017-20170306.csv'
OUT17 = 'data/directors-reports-FEB2017-20170306-sentiment.csv'
# -
# ## 2016
#
# ### Read statements
with open(IN16) as ifh:
r = csv.DictReader(ifh)
comments16 = list(r)
# ### Calculate sentiment
sentiments16 = []
for c in comments16:
row = {k:c[k] for k in COPY_FIELDS}
s = c[STATEMENT].strip()
row[POLARITY], row[SUBJECTIVITY] = sentiment(s) if s else (0.0,0.0)
sentiments16.append(row)
# ### Save raw sentiment scores
with open(OUT16, 'wb') as ofh:
w = csv.DictWriter(ofh, fieldnames=FIELDNAMES)
w.writeheader()
w.writerows(sentiments16)
# ### Make data frame
df16 = pd.DataFrame(sentiments16)
df16
# ### Overall descriptive statistics
ax = df16[[POLARITY, SUBJECTIVITY]].boxplot(vert=False)
df16.describe()
# ### Descriptive statistics by sector
# Polarity boxplots grouped by sector
ax = df16[[SECTOR,POLARITY]].boxplot(by=SECTOR, vert=False)
ax.set_xlabel('')
ax.set_title('')
# Subjectivity boxplots grouped by sector
ax = df16[[SECTOR,SUBJECTIVITY]].boxplot(by=SECTOR, vert=False)
ax.set_xlabel('')
ax.set_title('')
for sector in df16[SECTOR].unique():
print '{}\n'.format(sector)
print df16[df16[SECTOR]==sector].describe()
print '\n\n'
# ### Does sentiment differ by sector?
#
# No: The null hypothesis that medians of all groups are equal is not rejected using the Kruskal-Wallis H-test.
polarities_by_sector = [df16[df16[SECTOR]==sector][POLARITY] for sector in df16[SECTOR].unique()]
stats.kruskal(*polarities_by_sector)
subjectivities_by_sector = [df16[df16[SECTOR]==sector][SUBJECTIVITY] for sector in df16[SECTOR].unique()]
stats.kruskal(*subjectivities_by_sector)
# ### Print words for some interesting companies
# +
def iter_words(statement):
print 'Key words from analysis:'
for words, polarity, subjectivity, label in sentiment(statement).assessments:
print ' {:20} (p:{:.3f}, s:{:.3f})'.format(' '.join(words), polarity, subjectivity)
yield ' '.join(words), polarity, subjectivity
def print_words(code, comments, year='2016'):
print '{}\n'.format(code)
for c in comments:
if c[CODE] == code:
print '{}\n'.format(c[STATEMENT])
f = 'data/example-FEB{}-{}.csv'.format(year, code)
with open(f, 'w') as fh:
w = csv.writer(fh)
w.writerows(iter_words(c[STATEMENT]))
print '\n\n'
break
# -
EXAMPLES16 = [
'AMC', # AMCOR - high Materials sentiment of 0.200
'ABC', # Adelaide Brighton - high Materials sentiment of 0.146
'RIO', # Rio Tinto - low Materials sentiment of -0.033
'S32', # South32 - low Materials sentiment of -0.050
'GPT', # GPT - high Real Estate sentiment of 0.330
'MGR', # Mirvac - high Real Estate sentiment of 0.247
'SGP', # Stockland - low Real Estate sentiment of -0.013
'SCG', # Scentre - low Real Estate sentiment of -0.221
]
for example in EXAMPLES16:
print_words(example, comments16)
# ## 2017
#
# ### Read statements
# + deletable=true editable=true
with open(IN17) as ifh:
r = csv.DictReader(ifh)
comments17 = list(r)
# + [markdown] deletable=true editable=true
# ### Calculate sentiment
# + deletable=true editable=true
sentiments17 = []
for c in comments17:
row = {k:c[k] for k in COPY_FIELDS}
s = c[STATEMENT].strip()
row[POLARITY], row[SUBJECTIVITY] = sentiment(s) if s else (0.0,0.0)
sentiments17.append(row)
# + [markdown] deletable=true editable=true
# ### Save raw sentiment scores
# + deletable=true editable=true
with open(OUT17, 'wb') as ofh:
w = csv.DictWriter(ofh, fieldnames=FIELDNAMES)
w.writeheader()
w.writerows(sentiments17)
# + [markdown] deletable=true editable=true
# ### Make data frame
# + deletable=true editable=true
df17 = pd.DataFrame(sentiments17)
df17
# + [markdown] deletable=true editable=true
# ### Overall descriptive statistics
# + deletable=true editable=true
ax = df17[[POLARITY, SUBJECTIVITY]].boxplot(vert=False)
# + deletable=true editable=true
df17.describe()
# + [markdown] deletable=true editable=true
# ### Descriptive statistics by sector
# + deletable=true editable=true
# Polarity boxplots grouped by sector
ax = df17[[SECTOR,POLARITY]].boxplot(by=SECTOR, vert=False)
ax.set_xlabel('')
ax.set_title('')
# + deletable=true editable=true
# Subjectivity boxplots grouped by sector
ax = df17[[SECTOR,SUBJECTIVITY]].boxplot(by=SECTOR, vert=False)
ax.set_xlabel('')
ax.set_title('')
# + deletable=true editable=true
for sector in df17[SECTOR].unique():
print '{}\n'.format(sector)
print df17[df17[SECTOR]==sector].describe()
print '\n\n'
# + [markdown] deletable=true editable=true
# ### Does sentiment differ by sector?
#
# No: The null hypothesis that medians of all groups are equal is not rejected using the Kruskal-Wallis H-test.
# + deletable=true editable=true
polarities_by_sector = [df17[df17[SECTOR]==sector][POLARITY] for sector in df16[SECTOR].unique()]
stats.kruskal(*polarities_by_sector)
# + deletable=true editable=true
subjectivities_by_sector = [df17[df17[SECTOR]==sector][SUBJECTIVITY] for sector in df17[SECTOR].unique()]
stats.kruskal(*subjectivities_by_sector)
# + [markdown] deletable=true editable=true
# ### Print words for some interesting companies
# + deletable=true editable=true
EXAMPLES17 = [
'AMC', # AMCOR - high Materials sentiment for 2016
'ABC', # Adelaide Brighton - high Materials sentiment for 2016
'RIO', # Rio Tinto - low Materials sentiment for 2016
'S32', # South32 - low Materials sentiment for 2016
'GPT', # GPT - high Real Estate sentiment for 2016
'MGR', # Mirvac - high Real Estate sentiment for 2016
'SGP', # Stockland - low Real Estate sentiment for 2016
'SCG', # Scentre - low Real Estate sentiment for 2016
]
for example in EXAMPLES17:
print_words(example, comments17, year='2017')
# -
# ## Compare across years
#
# ### Overall subjectivity and polarity
ax = df16[[POLARITY, SUBJECTIVITY]].boxplot(vert=False)
ax.set_xlim(-1,1)
ax = df17[[POLARITY, SUBJECTIVITY]].boxplot(vert=False)
ax.set_xlim(-1,1)
# ### Polarity by sector
# Polarity boxplots grouped by sector
ax = df16[[SECTOR,POLARITY]].boxplot(by=SECTOR, vert=False)
ax.set_xlabel('')
ax.set_title('')
ax.set_xlim(-1,1)
# Polarity boxplots grouped by sector
ax = df17[[SECTOR,POLARITY]].boxplot(by=SECTOR, vert=False)
ax.set_xlabel('')
ax.set_title('')
ax.set_xlim(-1,1)
# + [markdown] deletable=true editable=true
# ### Subjectivity by sector
# + deletable=true editable=true
# Polarity boxplots grouped by sector
ax = df16[[SECTOR,SUBJECTIVITY]].boxplot(by=SECTOR, vert=False)
ax.set_xlabel('')
ax.set_title('')
ax.set_xlim(0,1)
# + deletable=true editable=true
# Polarity boxplots grouped by sector
ax = df17[[SECTOR,SUBJECTIVITY]].boxplot(by=SECTOR, vert=False)
ax.set_xlabel('')
ax.set_title('')
ax.set_xlim(0,1)
# -
# ### Do polarity and sentiment differ across years?
#
# No. The null hypothesis that means/medians are identical is not rejected using t-test/Mann-Whitney.
print stats.ttest_ind(df16[POLARITY], df17[POLARITY])
print stats.mannwhitneyu(df16[POLARITY], df17[POLARITY], alternative='two-sided')
print stats.ttest_ind(df16[SUBJECTIVITY], df17[SUBJECTIVITY])
print stats.mannwhitneyu(df16[SUBJECTIVITY], df17[SUBJECTIVITY], alternative='two-sided')
| notebooks/1703/Feb 2017 Analysis of Director Sentiment.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <font size = "5"> **Chapter 3: [Imaging](Ch3-Imaging.ipynb)** </font>
#
#
# <hr style="height:1px;border-top:4px solid #FF8200" />
#
# # Energy-Loss Near-Edge Structure
#
# part of
#
# <font size = "5"> **[Analysis of Transmission Electron Microscope Data](_Analysis_of_Transmission_Electron_Microscope_Data.ipynb)**</font>
#
#
# by <NAME>, 2019
#
# Microscopy Facilities<br>
# Joint Institute of Advanced Materials<br>
# The University of Tennessee, Knoxville
#
# Model based analysis and quantification of data acquired with transmission electron microscopes
#
# ## Content
#
# - Retrieving and Plotting of reference EELS spectra from the [EELS database](https://eelsdb.eu/spectra/)
# - Discussion of the energy-loss near-edge structure (ELNES) of specific edges.
# ## Load important packages
#
# ### Check Installed Packages
# +
import sys
from pkg_resources import get_distribution, DistributionNotFound
def test_package(package_name):
"""Test if package exists and returns version or -1"""
try:
version = get_distribution(package_name).version
except (DistributionNotFound, ImportError) as err:
version = '-1'
return version
# Colab setup ------------------
if 'google.colab' in sys.modules:
# !pip install pyTEMlib -q
# pyTEMlib setup ------------------
else:
if test_package('sidpy') < '0.0.5':
print('installing sidpy')
# !{sys.executable} -m pip install --upgrade pyTEMlib -q
if test_package('pyTEMlib') < '0.2021.4.20':
print('installing pyTEMlib')
# !{sys.executable} -m pip install --upgrade pyTEMlib -q
# ------------------------------
print('done')
# -
# ### Import all relevant libraries
#
# Please note that the EELS_tools package from pyTEMlib is essential.
# +
import sys
if 'google.colab' in sys.modules:
# %pylab --no-import-all inline
else:
# %pylab --no-import-all notebook
# %gui qt
import warnings
warnings.filterwarnings('ignore')
from scipy.ndimage.filters import gaussian_filter
## import the configuration files of pyTEMlib (we need access to the data folder)
import pyTEMlib
import pyTEMlib.file_tools as ft
import pyTEMlib.eels_tools as eels
# For archiving reasons it is a good idea to print the version numbers out at this point
print('pyTEM version: ',pyTEMlib.__version__)
# -
# ## Chemical Shift
#
# The chemical shift is the first feature that we discuss in respect of the shape or appearance of the ionization edges: the energy-loss near edge structure (ELNES).
# This section and the following one explain how to do simple analysis of near--edge features.
#
#
# The chemical shift refers to small changes (up to a few eV) of the edge onset, and how this shift depends on the bonding of an element in a solid.
# Going back to figure in the [Introduction to Core-Loss Spectra](CH4_07-Introduction_Core_Loss.ipynb), we see that such a change can be caused by a change of the band-gap, in which case the final states are moving or by a movement of the core-levels (initial states).
#
# Please note, that this explanation above is a simplification; what we measure is the energy difference of an excited atom to one in ground state. In the excited atom all states react to the new electronic configuration and not only the final and initial states. In fact, to calculate the energy-difference, one cannot use the difference between core-levels and bottom of the conduction band.
#
# However, we want to know which of the above effects (band gap changes or core-level shift) is the major one, so that we can conclude back on bonding of the element in question.
#
#
#
#
#
# As an example of chemical shift we look at reference data of the silicon L$_{2,3}$ edge.
#
# ### Load reference data
Si_L_reference_spectra = eels.get_spectrum_eels_db(element='Si',edge='L2,3')
# ### Plot silicon spectra
# +
plt.figure()
for name, spectrum in Si_L_reference_spectra.items():
if 'Core' in spectrum['TITLE'] or 'L3' in spectrum['TITLE']:
#plt.plot(spectrum['enery_scale'],spectrum['data']/np.max(spectrum['data']), label = spectrum['TITLE'])
pass
for name, spectrum in Si_L_reference_spectra.items():
if 'a_SiO2_Si_L3_S_Schamm_63' in spectrum['TITLE']:
plt.plot(spectrum['enery_scale'],spectrum['data']/np.max(spectrum['data']), label = 'Si-L$_{2,3}$: a-SiO$_2$')
if 'Si_Si_L3_S_Schamm_58' in spectrum['TITLE']:
plt.plot(spectrum['enery_scale'],spectrum['data']/np.max(spectrum['data']), label = 'Si-L$_{2,3}$: Si')
if 'Si3N4_(alpha)_Si_L3_S_Schamm_62' in spectrum['TITLE']:
plt.plot(spectrum['enery_scale'],spectrum['data']/np.max(spectrum['data']), label = 'Si-L$_{2,3}$: Si$_3$N$_4$')
if 'SiC(6H)_Si_L3_S_Schamm_66'in spectrum['TITLE']:
plt.plot(spectrum['enery_scale'],spectrum['data']/np.max(spectrum['data']), label = 'Si-L$_{2,3}$: SiC')
plt.legend();
# -
# The shift of the edges as above can be caused by the intial and/or the final states.
#
# ### Band gap
# The band gap changes are treated in the solid state theory of band structure and are, therefore, well covered in other textbooks. The trend is that with increased oxidation (or more general increased electronegativity of the reaction partner as in the series: B, C, N, O), the band gap opens and the edge shifts to higher energies.
# This is seen in the figure above, where the onset of the Si-L$_{2,3}$ edge shifts to higher energies with Pauli electron--negativity of the reaction partner.
#
#
# In fact, one can monitor band gap changes with stoichiometry at interfaces by the shift of the edge.
# Please be aware that we see only the shift of the conduction band bottom and not the whole band gap change. This effect of the band gap is obvious between Si and SiO$_2$, where the edge shifts by about 6.5 eV.
#
# ### Core-level shift
# The initial state, the ``core-level``, can also shift, for example after oxidation. Some electrons will transfer to an anion (for example oxygen) and less electrons are available to fill the band structure. This is shown below for the case of Cu and its two oxides Cu$_2$O and CuO.
#
#
# The more electrons transfer to oxygen for the ionic bonding of these materials, the more the edges shift to lower energies, even though a band gap opens up. The opening up of the band gap will cause a shift too higher energies and counteracts the effect of ionization. Due to lower electron densities, at the Cu atoms in the oxides, the core levels are assumed to shift to higher energies (see below) and compensate a little for the effect.
#
# >
# >
# >
#
#
# The core-level shift is generally a small effect. This core states react to increase of electron density at the atom site with a decrease and vice versa. Simplified , we can think of the core level electrons getting repulsed from an increased electron density (through Coulomb interaction) and pushed closer (lower in energy) to the core.
#
Cu_L_reference_spectra = eels.get_spectrum_eels_db(element='Cu',edge='L2,3')
# +
plt.figure()
for name, spectrum in Cu_L_reference_spectra.items():
if 'Cu_Cu_L3_Y_Kihn_124' in spectrum['TITLE']:
plt.plot(spectrum['enery_scale'],spectrum['data']/np.max(spectrum['data']), label = 'Cu-L$_{2,3}$: Cu')
if 'CuO_Cu_L3_Y_Kihn_127' in spectrum['TITLE']:
plt.plot(spectrum['enery_scale'],spectrum['data']/np.max(spectrum['data']), label = 'Cu-L$_{2,3}$: CuO')
if 'Cu2O_Cu_L3_Y_Kihn_129' in spectrum['TITLE']:
plt.plot(spectrum['enery_scale'],spectrum['data']/np.max(spectrum['data']), label = 'Cu-L$_{2,3}$: Cu$_2$O')
plt.legend();
plt.xlim(910, 980)
# -
# The case of the oxidized Cu where the slightly oxidized Cu$_2$O does shift to slighly lower energies compared to pure Cu,
# the shift to lower energies due to ionic bonding is a little larger than the opening of the band gap and the core level shift to higher energies (further away from the nucleus) because of less Coulomb repulsion from the valence electrons.
#
# This effect is even more pronounced for CuO with an larger band gap than Cu$_2$O.
#
# In the figure below we see that the Si-L$_{3,2}$ edge shifts to higher energies with increased Ge content, when the spectra are taken from different locations at the diffuse Si - Ge interface. Intuitively, we would expect a shift to lower energies, because the band gap of SiGe alloys and of Ge is smaller than Si.
#
#
# <img title="si-ge-shift" width="50%" alt="core-loss" src="images/si-ge-shift.jpg">
#
#
# *We see that as we acquire spectra at the diffuse interface between Si and Ge, that the Si-L${_3,2}$ edge shifts to higher energies. This is surprising as SiGe and Ge posses a smaller band gap than Si and one would expect the opposite.*
# This shift can be explained by a shift of core levels. An all electron calculations can determine the 2p$_{3/2}$ core levels of an atom in a compound which is shown for Si-Ge and Si-GaAs alloys in the figure below. The calculations show that there is a core level shift to lower energies with increased Ge and GaAs content. Ge and GaAs will add additional electrons to the electronic structure and the Coulomb repulsion between core level electrons and valence electrons will increase, pushing the core levels to lower energies.
#
# <img title="si-ge-shift" width="50%" alt="core-loss" src="images/core-level-shift.jpg">
#
# *All electron ab initio calculations of the core level states for Si-Ge and Si-GaAs alloys with different composition. The calculations show an 2p$_{3/2}$ core level shift to lower energies with deviation of composition from pure Si (on the left).*
#
#
# The shift of the core--levels to lower energies will increase the distance between core--level and conduction band bottom, which results in a simple picture in a shift to higher energies. We see that for pure Si, the 2p$_{3/2}$ core level is at about 89 eV but the edge is at 99.8 eV. The difference in energy is caused by relaxation of valence and core electrons. Effectively, we measure with the EELS edge onset the energy difference between an excited atom and an atom in its ground state.
#
# All electrons will relax according to the overall electron density at the atom sites and the calculated core--level shifts can not be used for predicting the edge shifts. However, these calculations can explain the physical origin of the edge shift.
#
# ### Conclusion
# In summation, we can say that the following effects (strongest first, weakest last) cause a chemical shift:
#
# - band gap opening
# - ionic bonding
# - core level shift
#
# All of these effects can be present at once, but usually only one dominates the chemical shift.
#
# ## White Line
#
# In this section, we try to analyze a distinct feature of the transition metal elements. The d-states of transition metal elements form a very flat band in the band structure. This flat band, creates a strong peak in the density of states. This analysis is based on the following simplification:
#
# In the figure below, we see a s or p free electron like band in the general shape of a parabola, This parabola gives rise to a saw tooth like feature in the density of states (DOS) because flat bands have a higher density of states than steep ones. The DOS of the conduction band (above the Fermi level) is closely related to our EELS spectra. A flat d-band will cause a very prominent peak, a so called white line (in the age of photographic recording, these peaks appeared as white lines).
#
#
# <img title="si-ge-shift" width="30%" alt="core-loss" src="images/DOS-bandstructure.jpg">
# *A schematic of the relationship of density of states (DOS on the left) and band structure of a transition metal element on the right). The s and p free electron like bands (parabola) give rise to saw tooth like feature in the DOS and the flat d bands (red) cause a sharp peak in the DOS.*
#
# Since these d-bands are so prominent we can easily separate them from the rest. In figure \ref{fig:Ti-whiteline} we use the calculated cross--section as a model of the s and p free electron like states. After a subtraction, we get the pure d-band contribution.
#
# <img title="si-ge-shift" width="50%" alt="core-loss" src="images/Ti-L-whiteline.jpg">
# *We use the cross section of the Ti-L$_{2,3}$ edge (green) as a model for the free electron gas and subtract it from the experimental Ti-L$_{2,3}$ edge (red). The residual peaks (blue) can be analyzed as pure d-states. The two double peaks of the Ti-L$_{2,3}$ edge indicate that there is some structure to the d-bands (here crystal field splitting).*
#
# A simple analysis of the white line ratios of Ti-L$_3$ to Ti-L$_2$ of SrTiO$_3$ yields a intensity ratio of 242 / 314 = 0.8. However, just considering the initial states (and assuming the transition probability ( or more accurately the transition matrix elements) are the same for both edges) with 4 electrons in p$_{3/2}$ and 2 electrons in p$_{1/2}$ would let us expect a ration of 2 to 1.
#
# >Please, note that both the Ti-L$_3$ and Ti-L$_2$ edge are split in two. We will discuss this crystal field splitting in chapter \ref{sec:Titanium} as an ELNES feature. Here we just consider the sum over the the whole Ti-L$_3$ or/and Ti-L$_2$ and ignore this splitting.
#
# The deviation from the 2 : 1 white line ratio is assumed to be caused by J-J coupling, and is, therefore, symmetry dependent. The anomalous white line ratios have been used to determine valency of transition elements in compounds. Physically this approach is on shaky ground, because we do not know all the reasons for the change in the ratios, it has, however, been shown to be reliable for binary metallic alloys.
#
# Fortunately, there is a easier method (from the physical point of view). We compare the total amount of white line intensities (which corresponds to the empty d-states) and normalize them by the free-electron gas like intensity beyond the white lines.
#
# We use the method of Okamoto et al. \cite{Okamoto-Disko1992}.
#
#
# The energy-window for the free-electron like part of the edge can be chosen arbitrarily and consistently.
# Following Okamoto et al., a 50 eV integration window should be used 50 eV beyond the edge onset. These will allow to compare the results to values in the literature.
# The edge has to be taken in very thin areas and if possible corrected for single scattering distribution, because otherwise the free electron like part contains plasmon contributions, which change the analysis.
#
# For the above spectrum in the figure above, we get for the white line / free electron gas ratio (50 eV - 50 eV beyond edge onset) = 556 / 974 = 0.57. Ti in SrTiO$_3$ can be considered as Ti$^{4+}$ with no electrons in the d-bands, but using this ratio in the paper of Okamoto et al. would yield a d band occupancy of 4 as in a metal. The difference may lay in the usage of a Hatree-Slater X-section for the analysis while Okamoto et al. use a hydrogenic one. Also, the SrTiO$_3$ spectrum was presumably taken under completely different acquisition conditions than Okamoto's spectra.
# \
# For example, the SrTiO$_3$ spectrum was not corrected for convergence angle, even though it was acquired in Z-contrast mode. Another source of error, is of course the background fit, which could change especially the free electron integration result. The fact that the SrTiO$_3$ spectrum was not corrected for single scattering distribution may also overestimate the free electron gas contribution, even though the spectrum was taken in a every thin area.
#
# For TiO$_2$ spectrum of the core-loss atlas I get for the white line / free electron gas 256 / 494 = 0.52. TiO$_2$ contains also only Ti$^{4+}$. This is the level of agreement we can expect, if we use two spectra with completely different acquisition parameters.
#
#
# In the plotof the Cu-L edges above, we can see that Cu has no empty d-states but with oxidation the d-bands get unoccupied and white lines appear. The more electrons get transferred to the oxygen neighbors, the more empty d-states and the more prominent the white lines will appear.
#
# This analysis of the occupancy of d-states is extremely important for magnetic materials where the strength depends on the unpaired (d-band or f-band) electrons.
#
# The same analysis can be done for the empty f bands of M--edges, which are also rather flat. Usually, the M$_{4,5}$ and the M$_{2,3}$ edges form doublets of white lines.
#
#
#
# ## ELNES
#
# So far, we have only interpreted distinct features of the shape of the ionization edges. A general approach is to look at the shape of the edges in total and use this shape as a kind of fingerprint for the interpretation. Another one is to try to understand the different features as means of electronic structure calculations of various sophistication.
#
# In order to understand the different approaches ( and their level of confidence in the results), we will discuss the most important edges one by one.
#
# The shape of the ELNES is closely related to the density of states of the conduction band. The next chapters discuss the basics for an electronic structure interpretation of ELNES
#
#
#
# ### Transition matrix and electronic structure
# The single scattering intensity of an ionization edge $J_k^1(E)$ is related to the band structure through Fermi's Golden Rule: The transition rate is proportional to the density of final states $N(E)$ multiplied with the square of an atomic transition matrix $M(E)$
#
# \begin{equation} \Large
# J_k^1(E) \propto |M(E)|^2 N(E)
# \end{equation}
#
# The transition matrix describes the transition probability between the core states and the final states (given by $N(E)$). Because the transition probability generally decreases with higher energies (above the edge threshold, the transition matrix gives the overall shape of the edge (sawtooth) and can be determined by atomic physics.
#
#
# The density of final states (conduction band) ($N(E)$) expresses the chemical environment and its symmetry.
# Because the core--levels are highly localized the final states $N(E)$ present the local density of states. This localization causes a different shape for different elements in the same compound, even if they are nearest neighbors (with a distance of only a few Angstrom). The $N(E)$ will of course be different for elements in materials with different (local) symmetry, coordination or chemical composition.
#
# ### Life-time broadening
# For arbitrary excitations, the $N(E)$ is the joint density of states, which means a convolution of the initial and the final states. The density of final states $N(E)$ is broadened in the spectrum by the energy resolution of the experimental setup $\delta E$ and the width of the initial state $\Gamma_i$. $\Gamma_i$ can be approximated with the uncertainty principle:
#
# \begin{equation} \Large
# \Gamma_i \pi_h \approx \hbar
# \end{equation}
#
# The lifetime of the core -hole $\pi_h$ is determined how fast the core--hole is filled and the additional energy is dissipated through emission of Auger electrons (for light elements) or X-ray photons (heavier atoms). The value of $\Gamma_i$ depends on the threshold energy of the edge and is calculated to be around 0.1 and 2 eV for K-edges of the first 40 elements.
#
#
# Further broadening of the $N(E)$ is induced by the lifetime of the final states $\pi_f$. The inelastic mean free path of the ejected electron is only a few nm (assuming a kinetic energy of less than 50eV).
# Using the free electron approximation ($E_{kin} = m_0 v^2 / 2$), we get for the energy broadening of the final states:
# \begin{equation} \Large
# \Gamma_f \approx \frac {\hbar}{\pi_f} = \frac{\hbar v}{\lambda_i } = \frac{\hbar}{\lambda_i} \sqrt{\frac{2E_{kin}}{m_0}}
# \end{equation}
#
#
# Since the inelastic mean free path $\lambda_i$ varies inversely with kinetic energy $E_{kin}$ below 50 eV (and raises only slightly above 50 eV), the observed density of state structure is more and more broadened the further the distance from the onset.
#
# The next two chapters discuss the density of final states $N(E)$ and the transition matrix $M(E)$ in detail.
#
# ### Dipole-selection rule
#
# Assuming a single electron approximation (and almost no electronic theory theory solves the many-particle problem fully) for the excitation, we can replace the many electron transition matrix elements with single electron matrix elements $M(\vec{q},E)$.
#
# \begin{equation} \Large
# M(\vec{q},E) = <f|\exp(i\vec{q}\vec{r})|i>
# \end{equation}
#
# with the initial wave function $|i> = \phi_i$ and the complex conjugated final wave function $<f|=\phi_f$.
# The integration is over the volume $\tau$ surrounding the initial state (and we get a high localization again).
#
# We now expand the operator $\exp(i\vec{q}\vec{r})$ as :
#
# \begin{equation} \Large
# \exp(i\vec{q}\vec{r}) = 1 + i\vec{q}\vec{r} + \mbox{higher--order terms}
# \end{equation}
#
# Because the final and initial wave functions must be orthogonal to each other, the first term gives zero.
# The higher--order terms are assumed to be negligible.
#
# The second term has odd symmetry and determines the dipole selection rule:
# - if the initial and final wave functions are both even or odd, then the result is zero.
# - only if one of the initial and final wave are odd and one of them is even the result is not zero.
#
# There is no transition probability for s to s transitions or p-p transitions or d-d or s-d transitions.
# However, transitions are possible from s to p states and from p to s and d states.
#
# The (optical) dipole selection rule says the angular quantum number $l$ of the initial and final state must differ by 1: $\delta l = \pm 1$.
#
# For the dipole selection rule to be valid, obviously the higher order terms must be very small, otherwise a third integral (of the dipole--forbidden reflections) will modify the observed ELNES.
#
# A rough estimates leads to the requirement $\vec{v}\vec{r} <<1$ for all $r$, which is equivalent to $ q<<q_d = 1/r_c$ where $r_c$ is the radius of the core state. This radius $r_c$ denotes the volume where the transition occurs.
# And we get for the carbon K-shells ($Z^* = Z- 0.3$) and 100kV acceleration energy that the dipole selection rule should be applicable for $\theta << \theta_d = 63 mrad$. For the Si L$_{3,2}$ edge the dipole selection rule should prevail up to about 12 mrad.
#
#
# Therefore, we must use rather small aperture ( $ < 10 - 20$ mrad) centered at the zero beam to insure the dipole selection rule to be valid.
#
# ### Core--hole effects and excitons
#
# So far, we can say that our ELNES can be calculated by the site and angular momentum projected density of states. This is true for almost all metals, but the core-hole effects (through excitonic interactions of the core--hole with the excited electron and valence electrons) further modifies the final density of states $N(E)$.
#
# The missing electron in the core hole and the extra electron in the conduction band causes changes in the band structure (in the core, valence and conduction states regions).
# These relaxation processes of all the electrons involved can be rather complicated and we have to know when these complications have to be applied.
#
# <img title="si-ge-shift" width="50%" alt="core-loss" src="images/onset.jpg">
# *A method to determine the edge onset from XPS data and the band gap, if core--hole effects are negligible. This is only true for metals with an almost complete screening of the core--hole.*
#
# As illustrated in the figure above (Pantelides, PRL 1975), if there is no influence of the core-electron, the edge onset of the ionization edge should be determined by:
# \begin{eqnarray}
# E_{\mbox{onset}} &=& E_{\mbox{core-state}} - E_{\mbox{CB}}\\
# &=& (E_{\mbox{core-state}} - E_{\mbox{vacuum}} ) + (E_{\mbox{vacuum}} - E_{\mbox{CB}} )\\
# &=& (E_{\mbox{core-state}} - E_{\mbox{vacuum}} ) + (E_{\mbox{vacuum}} - E_{\mbox{VB}} ) +(E_{\mbox{VB}} - E_{\mbox{CB}} )
# \end{eqnarray}
#
# The distances between core--states and vacuum and valence band (VB) and vacuum are directly accessible with XPS, while the band gap ($E_{\mbox{VB}} - E_{\mbox{CB}} $) is available from optical spectroscopy.
#
# Therefore the calculated density of states not including core holes should start at that energy, If the agreement is not In all other cases, we must assume that the (excitonic) core-hole effects modify the $N(E)$.
#
# This modification of $N(E)$ can be compared to doping effects in semiconductors as shown in the figure below, where the whole band structure shifts due to the extra electron. This shifting of bands (up and down) is due to the different screened potential of an excited atom compared to an atom in ground state.
#
# <img title="si-ge-shift" width="50%" alt="core-loss" src="images/comp-doping.jpg">
# *Comparison of doping and core--hole effects on the electronic structure. Both effects modify the band structure.*
#
# The modifications of the core hole effect not be predicted without extensive calculations.
# Two methods have profen to be successful for ELNES calculations:
# - explicit core hole in an all electron calculation (like FLAPW)
# - Z+1 approximation in pseudo potential calculations (like VASP)
#
# In the explicit core-hole approach, we just take one electron from a core level of one atom in the supercell and place it (energetically) on top of the other electrons of that (now excited) atom.
# Now, the electronic structure calculations give the ELNES as the local and angular projected density of states of the excited atom.
#
# In the Z+1 calculation, the atom with the explicit core-hole is replaced by an atom with an atomic number increased by one. The core hole is simulated by an nuclear charge that is increased by one and the excess electron is there naturally. This Z+1 approximation can be used in combination with the much faster pseudo potential methods. The results of this approximation are very close to the explicit core hole simulations.
#
# ## ELNES Examples
#
# ### Carbon
#
#
#
# Carbon has 4 valence electrons.\\
# As a free atom has two electron in 2s and two electron in 2p states.\\
# In a compound, it is easier to understand the bonding assuming that there is only one electron in the 2s state and three electrons in the 2p state.
#
# Carbon can bond in three modes and as elemental material stays in two modification:
# - Diamond: sp$_3$ hybridization
# - lonsdaleite: sp$_3$ hybridization
# - Graphite: sp$_2$ hybridization
#
#
# The energy levels of the 2s and 2p levels are so close together that they can hybridize (as many elements in the first row of the periodic table). Hybridization means the states are indistinguishable (in energy). Carbon is unique in a sense as it can hybridize so that 2, 3 or 4 electrons hybridize. The 1s (occupied by one electron) and two or all three p states hybridize into states with p character, which means they are directional and form covalent bonds.
#
# In the case of the sp$_2$ bonds, one of the p electrons does not contribute to bonding and is de-localized. This de-localized electron is the reason for graphite to be conducting, while in diamond all electrons are involved in covalent bonding and consequently diamond is an insulator. This extra (non-hybridized) 2p electron does the double bonding in molecules and polymers.
#
# <img title="si-ge-shift" width="50%" alt="core-loss" src="images/sp2-sp3.jpg">
# *Comparison between $sp_2$ and $sp_3$ hybridization. The $sp_2$ hybridization causes a layered structure and a de--localized electron. The $sp_3$hybridization causes a bond angles of 109.5${\circ}$.*
#
#
#
# Graphite has three covalent bonds and forms a layered structure with three nearest neighbors within the layers \ref{fig:sp2}. Within the layer, the $sp_2$ hybridization does the covalent bonding. The layers are bonded together with the weak Van-der-Waals force. Diamond has 4 covalent bonds and forms with its 4 neighbors a tetrahedron. The bond angle in diamond are 109.5 and fill all three dimensions.
#
#
C_reference_spectra = eels.get_spectrum_eels_db(element='C',edge='K')
# +
plt.figure()
for name, spectrum in C_reference_spectra.items():
if 'Core' in spectrum['TITLE'] or 'L3' in spectrum['TITLE']:
#plt.plot(spectrum['enery_scale'],spectrum['data']/np.max(spectrum['data']), label = spectrum['TITLE'])
pass
for name, spectrum in C_reference_spectra.items():
if 'C_(graphite)_C_K_David_Muller_(at_Cornell_University)_1995_23' in spectrum['TITLE']:
plt.plot(spectrum['enery_scale'],spectrum['data']/np.max(spectrum['data']), label = 'C-K: graphite')
if 'C_C_K_M__Jaouen,_G__Tourillon_21xray' in spectrum['TITLE']:
plt.plot(spectrum['enery_scale'],spectrum['data']/np.max(spectrum['data']), label = 'C-K: diamond')
if 'C_C_K__243' in spectrum['TITLE']:
plt.plot(spectrum['enery_scale'],spectrum['data']/np.max(spectrum['data']), label = 'C-K: ')
if 'CN_C_K_Susana_Trasobares_122' in spectrum['TITLE']:
plt.plot(spectrum['enery_scale'],spectrum['data']/np.max(spectrum['data']), label = 'C-K: CN')
plt.legend()
plt.xlim(275, 310)
# -
# Looking at the bonding of carbon in molecular orbital theory or (its predecessor) Ligand--Field theory the non--hybridized p electron in graphite will form an occupied $\pi$ bond and an unoccupied $\pi^*$ bond. In figure \ref{fig:C-K} we see that the unoccupied $\pi^*$ state is visible in the graphite spectrum.
# In diamond, there is no molecule like p electron and consequently there is no $\pi$ or $\pi^*$ bond.
# The appearance of the $\pi^*$ state in a carbon spectrum is used as a fingerprint for $sp_2$ hybridization. In the case of so called diamond like carbon, an amorphous carbon with $sp_2$ and $sp_3$ bonds, the quality (amount of $sp_3$ bonds) of the diamond like carbon can be assessed by the intensity of the $\pi^*$ peak (or rather the lack of it).
#
# Both spectra have a $\sigma_1^*$ and $\sigma_2^*$ peak which are associated with the molecule like s states.
# The C-K edge should show only the p density of states due to the dipole selection rule. The $\sigma^*$ states show up in the C-K edge because of these states are already related to the s-p like free electron gas density of states (s-p hybridization) above the edge. The $\sigma^*$ states are the non-bonding states of the ( $sp_2$ or $sp_3$) hybridized states and are, therefore, present in any carbon compound.
#
# The quantification of $sp_2$ versus $sp_3$ hybridization is also important in polymers (where the non hybridized p electron in a $sp_2$ configuration forms the conducting double bonds. In Buckminster fullerens (bucky balls) and carbon nanotubes the $sp_3$ hybridization is always associated with a defect (dislocation like), where a carbon atom has now 4 nearest neighbors.
#
# ### Silicon
#
# The calculation of the transition matrix $M(E)$ for
# the Si-L$_{3,2}$ edge shows that the intensity of the ELNES consists almost exclusively of d-states. Less than 5\% of the intensity is from the also dipole--allowed s-DOS.
#
# We can, therefore, assume that only d-states form the Si-L$_{3,2}$ ELNES.
# The spin orbit splitting of the initial p states is 0.7 eV, and this means that the L$_3$ and the L$_2$ are separated by 0.7 eV, which cannot be resolved with most instrumental setups.
# To the calculated (local) d-DOS the same DOS has to be added (with a ratio of about 2:1) but shifted by 0.7 eV.
#
# <img title="si-ge-shift" width="50%" alt="core-loss" src="images/Si-pure.jpg">
# *Comparison of experimental and theoretical data. While an effective mass exciton would explain the sharp raise, the effect is to small, the electronic structure calculation without core hole effect, placed at the correct onset, does not agree with the experiment.*
#
# The edge onset of the Si-L$_{3,2}$ of pure Si should be at 100 eV without core-hole effects. A d--Dos calculated without influence of a core--hole is shown in figure \ref{fig:Si-L-pure} beginning at this value. We can clearly see that this DOS of state cannot reproduce the experimental ELNES. From this disagreement between experiment and DOS without core-hole effect, we conclude that the
# core-hole effects must be included.
#
# The main feature of the Si-L$_{3,2}$ of pure Si is the extreme sharp raise of the edge at the onset.
# This feature cannot be explained by the d-DOS calculated without core--hole effects, which raises not as steeply as the experimental ELNES.
#
# This steep raise is another indication of the core--hole and must have its origin in an excitonic effect (an interaction of the excess electron in the conduction band an the hole in the core state).
# In the figure above, the calculations of an effective mass electron (due to an state that is created just below the conduction band) is compared to the experimental ELNES. Such an effective mass electron must be considered delocalized. We see that the raise is steep enough to explain the experimental raise, we also see that the effect (intensity) is not change the ELNES.
#
#
# Only the explicit inclusion of the core--hole or the Z+1 calculations in figure \ref{fig:Si-L-pure2} can explain this steep onset. We can, therefore, conclude on localized excitonic enhancement of the states at the bottom of conduction band. This is a rather localized excitonic effect.
#
# We can also see in the comparison of the explicit inclusion of the core-hole and the Z+1 approximation that both simulations lead to the same ELNES, however only the explicit core-hole calculation can predict the exact intensity (cross section) of the Si-L$_{3,2}$ edge.
#
# The same calculations are also successful for SiO$_2$ (quartz) as can be seen in figure \ref{fig:Si-L-sio2}. The experimental data show the spin--orbit splitting in the first peak, all other features are to smeared out to show a clear distinction between transitions originating from $2p_{3/2}$ and 2p$_1/2$.
# Due to the simple addition of the shifted spectra, the splitting in the first peak is reproduced rather easily and cannot be used for further analysis. Again, this edge is completely dominated by the local d-DOS.
#
# ### Oxygen and Nitrogen
#
# Oxygen and nitrogen edges are usually very similar. Here we will discuss mostly the oxygen edge, but this can be easily transferred to nitrogen.
#
# The Si-SiO$_2$ interface shows oxygen deficiency in the oxide at the interface. In the following, I will show that the oxygen K edge ELNES cannot be used to probe the oxygen deficiency. Experimentally, the oxygen K edge has a chemical shift by about 1 eV. The structure of the edge is washed out at the interface as shown in figure . Higher spatial resolution experiments by Muller (nature 2003) show a completely structureless O-K edge. Simulation of the O-K edge show that this shift and the featureless structure is due to the dimer like structure (Si-O-Si) which is not avoidable at any Si-SiO$_2$ interface.
#
# Another approach is the so called "finger-print" method. In this method, one compares edges from different but known materials and hopes that similar features are conclusive for different coordinations within the unknown material. This approach can be improved by using simulations of the ELNES of the edges as seen in the plot below.
O_reference_spectra = eels.get_spectrum_eels_db(element='O',edge='K')
# +
O_reference_titles = ['SrTiO3_O_K_imhoff_232',
'MgO_O_K_Giovanni_Bertoni_112',
'ZnO_O_K_Wilfried_Sigle_169',
'Cr2O3_(alpha)_O_K_Rik_Brydson_158'
]
O_reference_materials = ['SrTiO$_3$', 'MgO', 'ZnO', 'Cr$_2$O$_3$']
plt.figure()
i = 0
for name, spectrum in O_reference_spectra.items():
if spectrum['TITLE'] in O_reference_titles:
plt.plot(spectrum['enery_scale'],spectrum['data']/np.max(spectrum['data']), label = 'O-K:'+O_reference_materials[i])
i+=1
plt.legend();
plt.xlim(525,570)
# -
# A comparison hows that the cubic oxides MgO and MgAl$_2$O$_4$ (spinel structure) have a similar ELNES which differs from the ELNES from the rhombohedral oxide Al$_2$O$_3$.
#
#
# Correlation effects of valence electrons cause the so called Hubbard band. These bands dominate the electronic structure in high T$_c$ semiconductors, giant magnet resistors and other materials with high correlations.
#
# In the figure below, we see that such a correlation effect takes place in the giant magneto-resistor LaMnO$_3$ but not in the perovskite LaAlO$_3$, which does not have this splitting of the d bands (Al does not have d electrons but Mn has), a precondition for the formation of a Hubbard band.
#
#
# <img title="si-ge-shift" width="50%" alt="core-loss" src="images/O-K-LMO.jpg">
#
# *The O-K edge in LaMnO$_3$ has a pre-peak that is interpreted as Hubbard band and is not present in LaAlO$_3$ even so it has the same structure.*
#
#
# The same Hubbard band is visible in the high T$_c$ superconductor YBa$_2$Cu$_3$O$_{7-\delta}$.
# In figure \ref{fig:O-K-YBCO-DL}, we see the probing of this Hubbard band at a dislocation in YBa$_2$Cu$_3$O$_{7-\delta}$.
#
# <img title="si-ge-shift" width="50%" alt="core-loss" src="images/O-K-YBCO-DL.jpg">
#
# *The O-K edge at a dislocation in YBa$_2$Cu$_3$O$_{7-\delta}$ has less of the signature of a Hubbard band than far away in the bulk. This lack of holes in the dislocation disturbs the superconductivity and is utilized in grain boundaries for Josephson junctions but is detrimental in polycrystalline high T$_c$ wires.*
#
#
# The prepeak that is caused by the Hubbard band is reduced in the vicinity of the dislocation and vanishes completely within the dislocation core. This lack of holes in the dislocation disturbs the superconductivity and is utilized in grain boundaries for Josephson junctions but is detrimental in polycrystalline high T$_c$ wires.
#
# ## Spatial Resolution in EELS
# % images/spat-dif-resolution.jpg}
#
# ### Methods to achieve spatial resolution
# - Spot Mode
# - Area Mode = Spatial Difference
# - Lines Scan Mode
# - Each Spot of Line one Spectrum
# - Each Segement of Line one Spectrum
# - Each Area of Line one Spectrum
# - Spectrum Imaging
#
# ### Spot mode
# % {images/spat-dif-spot.jpg}
#
# ### Spatial difference
# % images/spat-dif-spatdif1.jpg
# % images/spat-dif-spatdif2.jpg
# % images/spat-dif-spatdif3.jpg
# *EELS at Bi doped Cu grain boundary*
# % images/spat-dif-spatdif4.jpg
# % images/spat-dif-ls1.jpg
# *As segregation at Si/SiO$_2$ interface*
# % images/spat-dif-ls2.jpg
# *As segregation at Si/Sio$_2$ interface*
#
# ### Energy Filtered Imaging (EFTEM)
# % images/spat-dif-eftem1.jpg}
# % images/spat-dif-eftem2.jpg}
# % images/spat-dif-eftem3.jpg}
#
# ## Summary
#
# The core--loss part of the electron energy--loss spectrum allows us to determine:
# - chemical composition
# - bonding
# - magnetic moment through ionic charge
#
# >
# > with high spatial resolution!!!
# >
# ## Navigation
# - <font size = "3"> **Up Chapter 4: [Imaging](CH4_00-Spectroscopy.ipynb)** </font>
# - <font size = "3"> **Back: [Analysis of Core-Loss](CH4_09-Analysis_Core_Loss.ipynb)** </font>
# - <font size = "3"> **List of Content: [Front](../_MSE672_Intro_TEM.ipynb)** </font>
| Spectroscopy/CH4_10-ELNES.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Subject Correlations & Year Counts
# +
# Import necessary libraries.
import re, nltk, warnings, csv, sys, os, pickle, string, json
import pandas as pd
import numpy as np
from itertools import chain
import itertools as iter
import networkx as nx
from networkx.algorithms import community
from networkx.readwrite import json_graph
from json import JSONEncoder
from operator import itemgetter
from collections import Counter
# Read in config.py (git ignored file) for API username and pw.
config_path = os.path.abspath(os.path.join(os.path.dirname('config.py'), '../Scripts'))
sys.path.append(config_path)
import config
# Import project-specific functions.
# Python files (.py) have to be in same folder to work.
lib_path = os.path.abspath(os.path.join(os.path.dirname('Correspondence_XML_parser.py'), '../Scripts'))
sys.path.append(lib_path)
from Correspondence_XML_parser import *
# Ignore warnings related to deprecated functions.
warnings.filterwarnings('ignore')
url = 'https://dsg.xmldb-dev.northeastern.edu/BaseX964/rest/psc/'
user = config.username
pw = config.password
# -
# ## Gather XML Files
# +
# %%time
# Declare directory location to shorten filepaths later.
abs_dir = "/Users/quinn.wi/Documents/"
files = glob.glob(abs_dir + "Data/PSC/Sedgwick/*.xml")
len(files)
# +
# # %%time
# # Must be connected to Northeastern's VPN.
# r = requests.get(url,
# auth = (user, pw),
# headers = {'Content-Type': 'application/xml'}
# )
# # Read in contents of pipeline.
# soup = BeautifulSoup(r.content, 'html.parser')
# # Split soup's content by \n (each line is a file path to an XML doc).
# # Use filter() to remove empty strings ('').
# # Convert back to list using list().
# files = list(filter(None, soup.text.split('\n')))
# # Filter list and retrieve only jqa/ files.
# files = [i for i in files if '......./' in i]
# len(files)
# -
# ## Build Dataframe
# +
# %%time
# Build dataframe from XML files.
# build_dataframe() called from Correspondence_XML_parser
df = build_dataframe(files)
# Unnest subject headings.
df['subjects'] = df['subjects'].str.split(',')
df = df.explode('subjects')
# Remove leading and trailing whitespace.
df['subjects'] = df['subjects'].str.strip()
# Remove rows with subject of "The".
df = df[~df['subjects'].isin(['The'])]
# Remove rows with empty values.
df.replace('', np.nan, inplace = True)
df.dropna(inplace = True)
df.head(3)
# -
# ## Count Subject Headings by Year
# +
# %%time
# Extract month, year from date.
df['date'] = pd.to_datetime(df['date'], format = '%Y-%m-%d', errors = 'coerce')
df = df.query('date != "NaT"') # remove Not-a-Time values.
df['month'] = df['date'].dt.month
df['year'] = df['date'].dt.year
# Group by year & subject to get count of subjects per year.
subjects = df.groupby(['year', 'subjects'], as_index = False)['subjects'] \
.size() \
.reset_index()
subjects.columns = ['year', 'subjects', 'count']
# Group by year and get total number of subjects per year.
subjects['total'] = subjects.groupby('year')['count'].transform('sum')
# Get percentage of subject for each year.
subjects['percentage'] = round(subjects['count'] / subjects['total'], 2) * 100
subjects.to_csv(abs_dir + 'Github/dsg-mhs/lab_space/projects/sedgwick/subjects/data/subject-year-count.csv',
sep = ',', index = False)
subjects.head()
# -
# ## Create Adjacency Matrix of Subjects
# +
# %%time
# Create adjacency matrix.
adj = pd.crosstab(df['file'], df['subjects'])
# Convert entry-person matrix into an adjacency matrix of persons.
adj = adj.T.dot(adj)
# Change same-same connections to zero.
np.fill_diagonal(adj.values, 0)
# Simple correlation matrix from dataframe.
adj = adj.corr()
adj
# -
# ## Save Subject Adj. as Network Object
# +
# %%time
adj['source'] = adj.index
df = pd.melt(adj, id_vars = ['source'], var_name = 'target', value_name = 'weight') \
.query('(source != target) & (weight > 0.8)')
df
# -
# ## Create Graph Object
# +
# %%time
# Initialize graph object.
G = nx.from_pandas_edgelist(df, 'source', 'target', 'weight')
# Add nodes.
nodes = list( dict.fromkeys( df['source'].values.tolist() + df['target'].values.tolist() ))
G.add_nodes_from(nodes)
print (nx.info(G))
# Set degree attributes.
nx.set_node_attributes(G, dict(G.degree(G.nodes())), 'degree')
# Sort nodes by degree and print top results.
sorted_degree = sorted(dict(G.degree(G.nodes())).items(),
key = itemgetter(1), reverse = True)
print ("Top 10 nodes by degree:")
for d in sorted_degree[:10]:
print (f'\t{d}')
# Measure network density.
density = nx.density(G)
print (f"Network density: {density:.3f}")
# Related to diameter, check if network is connected and, therefore, can have a diameter.
print (f"Is the network connected? {nx.is_connected(G)}")
# Get a list of network components (communities).
# Find the largest component.
components = nx.connected_components(G)
largest_component = max(components, key = len)
# Create a subgraph of the largest component and measure its diameter.
subgraph = G.subgraph(largest_component)
diameter = nx.diameter(subgraph)
print (f"Network diameter of the largest component: {diameter:.3f}")
# Find triadic closure (similar to density).
triadic_closure = nx.transitivity(G)
print (f"Triadic closure: {triadic_closure:.3f}\n")
# Find centrality measures.
betweenness_dict = nx.betweenness_centrality(G) # Run betweenness centrality
eigenvector_dict = nx.eigenvector_centrality(G) # Run eigenvector centrality
degree_cent_dict = nx.degree_centrality(G)
# Assign each centrality measure to an attribute.
nx.set_node_attributes(G, betweenness_dict, 'betweenness')
nx.set_node_attributes(G, eigenvector_dict, 'eigenvector')
nx.set_node_attributes(G, degree_cent_dict, 'degree_cent')
# Find communities.
communities = community.naive_greedy_modularity_communities(subgraph)
# Create a dictionary that maps nodes to their community.
modularity_dict = {}
for i, c in enumerate(communities):
for name in c:
modularity_dict[name] = i
# Add modularity information to graph object.
nx.set_node_attributes(G, modularity_dict, 'modularity')
# -
# ## Write Graph Object
# +
# %%time
# Convert graph object into a dictionary.
data = json_graph.node_link_data(G)
data_json = json.dumps(data)
with open(abs_dir + "Github/dsg-mhs/lab_space/projects/sedgwick/subjects/data/sedgwick-subjects-network.json", "w") as f:
f.write(data_json)
# -
| Jupyter_Notebooks/Subjects/.ipynb_checkpoints/Sedgwick_Subject-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Data Wrangling: WeRateDogs Twitter Data
#importing required libraries
import pandas as pd
import numpy as np
import tweepy
import requests
import json
import re
import os
import datetime
import matplotlib.pyplot as plt
# %matplotlib inline
# # 1. Data Wrangling
# ## 1.1 Twitter archive file
#save twitter-archive-enhanced.csv file in twitter_archive dataframe
twitter_archive = pd.read_csv('twitter-archive-enhanced.csv')
twitter_archive.head()
# ## 1.2 Image Predictions file
#Downloading and saving image predictions data using Requests
url = 'https://d17h27t6h515a5.cloudfront.net/topher/2017/August/599fd2ad_image-predictions/image-predictions.tsv'
r = requests.get(url)
r
file_name = url.split('/')[-1]
file_name
if not os.path.isfile(file_name):
with open(file_name, 'wb') as f:
f.write(r.content)
#save image-prediction.tsv file in image_prediction dataframe
image_prediction = pd.read_csv('image-predictions.tsv', sep='\t')
image_prediction.head()
# ## 1.3 Twitter API Data for the favourites and retweets counts
# +
consumer_key = '****************************'
consumer_secret = '*************************'
access_token = '****************************'
access_secret = '***************************'
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
# +
#Experimenting to extract one tweet's id information
exp_tweet = api.get_status(twitter_archive.tweet_id[1000], tweet_mode='extended')
content = exp_tweet._json
content
# -
#added for experimenting new way of getting the data
exp_tweet.full_text
exp_tweet.retweet_count, exp_tweet.id, exp_tweet.favorite_count
#checking the keys of the test tweet
content.keys()
#Getting the retweet and favourite counts
content['retweet_count'], content['id'], content['favorite_count']
#investigating the user information
content['user'].keys()
content['user']['followers_count'], content['user']['location']
# ### 1.3.1 Quering The Twitter API
#creating a file for the tweets' text data
errors = []
if not os.path.isfile('tweet_json.txt'):
#create the file and write on it
with open ('tweet_json.txt', 'w') as file:
for tweet_id in twitter_archive['tweet_id']:
try:
status = api.get_status(tweet_id, wait_on_rate_limit=True, wait_on_rate_limit_notify=True, tweet_mode='extended')
json.dump(status._json, file)
file.write('\n')
except Exception as e:
print("Error on tweet id {}".format(tweet_id) + ";" + str(e))
errors.append(tweet_id)
# ### 1.3.2 Reading the tweet_json.txt
df_list = []
with open('tweet_json.txt', 'r') as file:
for line in file:
tweet = json.loads(line)
tweet_id = tweet['id']
retweet_count = tweet['retweet_count']
fav_count = tweet['favorite_count']
user_count = tweet['user']['followers_count']
df_list.append({'tweet_id':tweet_id,
'retweet_count':retweet_count,
'favorite_count':fav_count,
'user_count':user_count})
api_df = pd.DataFrame(df_list)
api_df.head()
# # 2. Data Assessment
# ## Visual Assesment
twitter_archive
image_prediction
api_df
# ## Programmatic Assesment
#Data types of each column and number of entries
twitter_archive.info()
sum(twitter_archive['tweet_id'].duplicated())
twitter_archive.describe()
print(twitter_archive['doggo'].value_counts())
print(twitter_archive['floofer'].value_counts())
print(twitter_archive['pupper'].value_counts())
print(twitter_archive['puppo'].value_counts())
twitter_archive.rating_denominator.value_counts()
twitter_archive['tweet_id'].loc[twitter_archive['rating_denominator']==2]
image_prediction.info()
sum(image_prediction['tweet_id'].duplicated())
api_df.info()
# ### Quality issues
# #### `twitter_archive` table
# - Missing values in name column and invalid names less than 2 characters.
# - By comparing the number of rows in `image_prediction` and `twitter_archive` tables, we found that there are many tweets in `twitter_archive` table has no image. This rows should be dropped.
# - Nan values in 'expanded_urls' column, it represnt tweets with no image, should be dropped.
# - Some tweets are actually retweets and replies not original tweets that have to be deleted.
# - Some columns have represntations of null values as 'None' not 'NaN'
# - 'retweeted_stauts_timestamp' and 'timestamp' should be datetime not object.
# - Deal with rating_numerator and rating_denominator to make sure it extracted in right way from the text
#
# #### `image_prediction` table
# - create 1 column for image prediction and 1 column for confidence level
# - drop rewteets and replies from the table
#
# #### `api_df` table
# - Keep original tweets only
# ### Tidiness issues
# - values are column names ('doggo','floofer','pupper','puppo') in `twitter_archive` table
# - Merge `twittwer_archive` with `api_df` tables
# - columns headers are values, not variable names in `image_prediction` table
# # 3. Data Cleaning
archive_clean = twitter_archive.copy()
image_prediction_clean = image_prediction.copy()
api_clean = api_df.copy()
# ## 3.1 'retweeted_status_timestamp' and 'timestamp' should be datatime not object in `twitter_archive` table
# ### Define
# - 'retweeted_stauts_timestamp' and 'timestamp' should be datetime not object.
# - we should convert data type of each column from object to datetime
# ### Code
archive_clean['timestamp'] = pd.to_datetime(archive_clean['timestamp'])
archive_clean['retweeted_status_timestamp'] = pd.to_datetime(archive_clean['retweeted_status_timestamp'])
# ### Test
archive_clean.info()
# ## 3.2 Some columns have represntations of null values as 'None' not 'NaN' in `twitter_archive` table
# ### Define
# convert 'None' values with "" as empty string, in the columnns 'doggo','floofer', 'pupper'and 'puppo' in `twitter_archive` table
# ### Code
archive_clean['doggo'].replace({"None": ""}, inplace=True)
archive_clean['floofer'].replace({"None": ""}, inplace=True)
archive_clean['pupper'].replace({"None": ""}, inplace=True)
archive_clean['puppo'].replace({"None": ""}, inplace=True)
# ### Test
archive_clean.sample(5)
# ## 3.3 values are column names ('doggo','floofer','pupper','puppo') in `twitter_archive` table
# ### Define
# - concatenate the columns in one column 'dog_breed'
# - drop the old columns
# - Replace the empty string to np.nan
# - if the value of 'dog_breed' is combined two type, make it readable
# ### Code
#add columns 'doggo', 'floofer', 'pupper',and 'puppo' to make a new column 'dog_breed'
old_columns = ['doggo', 'floofer', 'pupper', 'puppo']
archive_clean['dog_breed'] = archive_clean[old_columns].apply(lambda row: "".join(row.values.astype(str)), axis=1)
# drop old columns 'doggo', 'floofer', 'pupper',and 'puppo'
archive_clean.drop(['doggo', 'floofer', 'pupper', 'puppo'], axis=1, inplace=True)
#convert empty strings in 'dog_breed' to "NaN"
archive_clean['dog_breed'].replace({"": np.nan}, inplace=True)
#make inappropriate values more readable
#I will make it manually as it not much data
archive_clean['dog_breed'].replace({"doggopuppo": "doggo-puppo", "doggofloofer": "doggo-floofer", "doggopupper": "doggo-pupper"}, inplace=True)
# ### Test
#check that all old columns combined successfully in new column 'dog_breed'
archive_clean['dog_breed'].value_counts()
#make sure old columns deleted
archive_clean.info()
#check all empty strings is converted to 'NaN'
archive_clean['dog_breed'].isnull().sum()
archive_clean.info()
#check values changed
archive_clean['dog_breed'].value_counts()
# ## 3.4 Nan values in 'expanded_urls' column, it represnt tweets with no image, should be dropped in `twitter_archive` table
# ### Define
# - drop any row with 'NaN' value in column 'expanded_urls' with dropna()
# ### Code
#the number of 'NaN' values
archive_clean.expanded_urls.isnull().sum()
#drop all rows have 'NaN' value in 'expanded_urls'
archive_clean.dropna(subset=['expanded_urls'], axis=0, inplace=True)
#to reset the index without any problem
archive_clean.reset_index(drop=True, inplace=True)
# ### Test
#check if there are any 'NaN' values
archive_clean.expanded_urls.isnull().sum()
#check the new number of rows after dropping
archive_clean.info()
# ## 3.5 Some tweets are actually retweets and replies not original tweets that have to be deleted in `twitter_archive` table
# ### Define
# - drop any row has value (not 'NaN') in column 'retweeted_status_id' because it is retweet not original tweet
# - drop any row has value (not 'NaN') in column 'in_reply_to_status_id' because it is reply not original tweet
# - drop retweets and replies columns as we don't need them anymore
# ### Code
# +
#get list of 'tweet_id' of replies
replies_tweet_id_list = list(archive_clean[archive_clean['in_reply_to_status_id'].notnull()]['tweet_id'])
#get list of 'tweet_id' of retweets
retweets_tweet_id_list = list(archive_clean[archive_clean['retweeted_status_id'].notnull()]['tweet_id'])
# -
archive_clean.drop(archive_clean[archive_clean['in_reply_to_status_id'].notnull()].index, inplace=True)
archive_clean.drop(archive_clean[archive_clean['retweeted_status_id'].notnull()].index, inplace=True)
#to reset the index without any problem
archive_clean.reset_index(drop=True, inplace=True)
#drop retweets and replies columns
archive_clean.drop(['in_reply_to_status_id', 'in_reply_to_user_id', 'retweeted_status_id', 'retweeted_status_user_id', 'retweeted_status_timestamp'], axis=1, inplace=True)
# ### Test
archive_clean.info()
# ## 3.6 drop retweets and replies in `image_prediction_clean` table
# ### Define
# - drop and 'tweet_id' that matches 'tweet_id' of replies or retweets
# ### Code
image_prediction_clean.info()
# +
#convert 'tweet_id' column in 'image_prediction_clean' table to list
image_tweet_list = list(image_prediction_clean['tweet_id'])
#get the intersection between tweet_id in image and replies
image_reply = list(set(image_tweet_list) & set(replies_tweet_id_list))
print(len(image_reply))
image_reply
# -
#drop all replies from 'image_prediction_clean' table
for index, row in image_prediction_clean.iterrows():
if row['tweet_id'] in image_reply:
image_prediction_clean.drop(image_prediction_clean[image_prediction_clean['tweet_id'] == row['tweet_id']].index, inplace=True)
#to reset the index without any problem
image_prediction_clean.reset_index(drop=True, inplace=True)
#get the intersection between tweet_id in image and retweets
image_retweet = list(set(image_tweet_list) & set(retweets_tweet_id_list))
print(len(image_retweet))
image_retweet
#drop all retweets from 'image_prediction_clean' table
for index, row in image_prediction_clean.iterrows():
if row['tweet_id'] in image_retweet:
image_prediction_clean.drop(image_prediction_clean[image_prediction_clean['tweet_id'] == row['tweet_id']].index, inplace=True)
#to reset the index without any problem
image_prediction_clean.reset_index(drop=True, inplace=True)
# ### Test
image_prediction_clean.info()
# ## 3.7 By comparing the number of rows in `image_prediction` and `twitter_archive` tables, we found that there are many tweets in `twitter_archive` table has no image. This rows should be dropped.
# ### Define
# - check 'tweet_id' in 'image_prediction' table and 'twitter_archive' table, then drop rows in the 'twitter_archive' that their 'tweet_id' not in 'image_prediction' table
# ### Code
archive_clean.info()
image_prediction_clean.info()
# +
#get column 'tweet_id' in 'archive_clean' and 'image_prediction_clean' tables into list
archive_twet_list = list(archive_clean['tweet_id'])
image_prediction_list = list(image_prediction_clean['tweet_id'])
#get the difference between the tables, to know which original tweets have invalid images
messy_images = list(set(archive_tweet_list) & set(image_prediction_list))
print(len(messy_images))
messy_images
# -
#drop any row that in 'archive_clean' table and not in 'image_prediction_clean' table
for index, row in archive_clean.iterrows():
if row['tweet_id'] not in messy_images:
archive_clean.drop(archive_clean[archive_clean['tweet_id'] == row['tweet_id']].index, inplace=True)
#to reset the index without any problem
archive_clean.reset_index(drop=True, inplace=True)
# ### Test
archive_clean.info()
archive_list = list(archive_clean['tweet_id'])
image_list = list(image_prediction_clean['tweet_id'])
archive_list.sort()
image_list.sort()
if archive_list == image_list:
print("Matched")
# ## 3.8 Missing values in name column and invalid names less than 2 characters in `twitter_archive` table
# ### Define
# - correct names which have value 'a'
# ### Code
archive_clean.name.value_counts()
#check the text of name 'a' to make sure that it extracted right or not
text_name_a = list(archive_clean['text'].loc[(archive_clean['name'] == "a")])
text_name_a
#check the text of name 'an' to make sure that it extracted right or not
text_name_an = list(archive_clean['text'].loc[(archive_clean['name'] == "an")])
text_name_an
#try to extract some dogs names if found , else will make it None
pattern = re.compile(r'(?:name(?:d)?)\s{1}(?:is\s)?([A-Za-z]+)')
for index, row in archive_clean.iterrows():
try:
if row['name'] == "a":
new_name = re.findall(pattern, row['text'])[0]
archive_clean.loc[index,'name'] = archive_clean.loc[index, 'name'].replace('a', new_name)
elif row['name'] == "an":
new_name = re.findall(pattern, row['text'])[0]
archive_clean.loc[index,'name'] = archive_clean.loc[index,'name'].replace('an', new_name)
except IndexError:
archive_clean.loc[index,'name'] = "None"
# ### Test
#check all 'a' values are replaced
archive_clean.query('name == "a"')
#check all 'an' values are replaced
archive_clean.query('name == "an"')
#check None values are increased
archive_clean['name'].value_counts()
# ### Define
# - replace 'None' values with 'NaN' with np.nan
# ### Code
#replace "None" values with 'NaN'
archive_clean['name'].replace({"None": np.nan}, inplace=True)
# ### Test
#check that 640 "None" values are converted to "NaN"
archive_clean['name'].isnull().sum()
# ## 3.9 Keep original tweets only in `api_clean` table
# ### Define
# - delete any row its 'tweet_id' not found in 'tweet_id' column in 'archive_clean' table
# ### Code
api_clean.info()
# +
#get column 'tweet_id' in 'archive_clean' and 'api_df' tables into list
archive_list = list(archive_clean['tweet_id'])
api_list = list(api_clean['tweet_id'])
#get the intersection between the tables
out_list = list(set(api_list) & set(archive_list))
print(len(out_list))
out_list
# -
#drop any row that in 'archive_clean' table and not in 'api_clean' table
for index, row in api_clean.iterrows():
if row['tweet_id'] not in out_list:
api_clean.drop(api_clean[api_clean['tweet_id'] == row['tweet_id']].index, inplace=True)
#to reset the index without any problem
api_clean.reset_index(drop=True, inplace=True)
# ### Test
api_clean.info()
#check that all 'tweet_id' column in 'api_clean' table are in 'tweet_id' column in 'archive_clean' table
api_list = list(api_clean['tweet_id'])
print(len(list(set(archive_list) - set(api_list))))
flag = 0
if(set(api_list).issubset(set(archive_list))):
flag = 1
if(flag):
print("Done")
else:
print("something went wrong!")
# ## 3.10 Deal with rating_numerator and rating_denominator to make sure it extracted in right way from the text in `twitter_archive` table
# ### Define
# - slice the records to investigate the right value of denominators that below or above 10
# ### Code
archive_clean['rating_denominator'].value_counts()
archive_clean['rating_denominator'].loc[archive_clean['rating_denominator'] == 2] = 10
archive_clean['rating_denominator'].loc[archive_clean['rating_denominator'] < 10] = 10
pd.set_option('display.max_colwidth', 0)
archive_clean.loc[archive_clean['rating_denominator'] > 10]['text']
archive_clean['rating_denominator'].loc[archive_clean['rating_denominator'] == 11] = 10
#count number of dogs in the picture related to denominator values above 10
dogs_count = archive_clean.rating_denominator[archive_clean['rating_denominator'] > 10] /10
dogs_count
archive_clean.duplicated().sum()
# replace denominators with new values based on dog_count
archive_clean.loc[archive_clean.rating_denominator > 10, ['rating_numerator', 'rating_denominator']] = [archive_clean.rating_numerator[archive_clean.rating_denominator > 10]/dogs_count , 10]
# ### Test
archive_clean['rating_denominator'].value_counts()
# ### Define
# - slice the records to investigate the right value of numerator that below 6 and above 15
# ### Code
archive_clean.rating_numerator.value_counts()
pd.set_option('display.max_colwidth', 0)
archive_clean.loc[archive_clean['rating_numerator'] > 15]
# +
#fix the proble of numerators above 15 manually
archive_clean['rating_numerator'].loc[archive_clean['rating_numerator'] == 75] = 5
archive_clean['rating_numerator'].loc[archive_clean['rating_numerator'] == 27] = 11
# in the original image (it's link in the end of the text) the face of dog was cropped
archive_clean['rating_numerator'].loc[archive_clean['rating_numerator'] == 1776] = 15
archive_clean['rating_numerator'].loc[archive_clean['rating_numerator'] == 26] = 11
#in the image is snoop dogg not a real dog
archive_clean['rating_numerator'].loc[archive_clean['rating_numerator'] == 420] = 0
# -
#show the numerators less than 6
archive_clean.loc[archive_clean['rating_numerator'] < 6]
#extract most of numerator values from text
num_p = re.compile('(\d+\.?\d?\d?)\/(\d{1,3})')
archive_clean['rating_numerator'] = archive_clean.text.str.extract('(\d+\.?\d?\d?)\/\d{1,3}', expand = False).astype('float')
# ### Test
archive_clean.rating_numerator.value_counts()
# ## 3.11 columns headers are values, not variable names in `image_prediction` table
# ### Define
# - create 1 column for image-prediction and 1 column for confidence
# ### Code
image_prediction_clean.head()
# +
# Rename the dataset columns to avoid confusion
cols = ['tweet_id', 'jpg_url', 'img_num',
'prediction_1', 'confidence_1', 'breed_1',
'prediction_2', 'confidence_2', 'breed_2',
'prediction_3', 'confidence_3', 'breed_3']
image_prediction_clean.columns = cols
# Reshape the dataframe
image_prediction_clean = pd.wide_to_long(image_prediction_clean, stubnames=['prediction', 'confidence', 'breed'],
i=['tweet_id', 'jpg_url', 'img_num'], j='prediction_level', sep="_").reset_index()
# -
# ### Test
image_prediction_clean.head()
image_prediction_clean.duplicated().sum()
# ## 3.12 Merging `archive_clean` with `api_clean`
# ### Define
# - merging tables with merge() function
# ### Code
archive_clean.info()
api_clean.info()
#these tweet_id in 'archive_clean' and not in 'api_clean'
archive_list = list(archive_clean['tweet_id'])
api_list = list(api_clean['tweet_id'])
diff = list(set(archive_list) - set(api_list))
print(len(diff))
diff
#merge two tables with left merge to take all original tweets from 'archive_clean' table
df_combined = archive_clean.merge(api_df, left_on="tweet_id", right_on="tweet_id", how="left")
df_combined
# ### Test
#make sure that the tweet_id not in 'api_clean' has NaN values on retweet, favorite, and user count columns
df_combined.query('tweet_id == 779123168116150273')
df_combined.info()
# # 4. Storing the Data
#store combined 'archive_clean' and 'api_clean' in 'twitter_archive_master.csv'
df_combined.to_csv('twitter_archive_master.csv', index=False)
#store 'image_prediction' table in another file
image_prediction_clean.to_csv('image_prediction_cleaned.csv', index=False)
# # 5. Analyze and Visualize
# ## 5.1 The relation betweetn retweets and favorite
df_combined.info()
color = ['#eff3ff', '#c6dbef', '#9ecae1', '#6baed6', '#4292c6', '#2171b5', '#084594']
df_combined.plot(kind="scatter", x="favorite_count", y="retweet_count", alpha=0.5)
plt.xlabel("Likes")
plt.ylabel("Retweets")
plt.title("The Relation between Retweets and Favorites");
plt.savefig('Retweets_with_Likes.png', bbox_inches='tight')
# - Retweets are positively correlated with favorites
# ## 5.2 The most popular dog breed
df_combined['dog_breed'].value_counts().plot(kind = 'barh')
plt.title('Most Popular Dog Breed')
plt.xlabel('Count')
plt.ylabel('Dog Breed');
plt.savefig('most_popular_dog.png', bbox_inches='tight')
# - Pupper is the most popular dog breed
# ## 5.3 which are the top 10 predicted dog breed
image_prediction_clean.head()
predicted = image_prediction_clean[image_prediction_clean['breed'] == True]
highest_predicted = predicted.groupby(['prediction']).mean()['confidence']
highest_predicted.sort_values(ascending=False).head(10)
colors = ['#8dd3c7', '#ffffb3', '#bebada', '#fb8072', '#fb8072', '#fdb462', '#b3de69', '#fccde5']
highest_predicted.sort_values(ascending=False).head(10).plot(kind="bar", color= colors, figsize=(15,8))
plt.ylabel("Average confidence", size=15)
plt.xlabel("Dog Breed", size=15)
plt.title("The top 10 predicted dogs", size=20);
plt.savefig('top_10_predicted_dogs.png', bbox_inches='tight')
# - The highest average confidence level in predicting the dog breed is the Bernese Mountain Dog. The second is the Komondor. Both breeds have a unique appearance, which probably made the prediction easier. The highest confidence level is 65% on average. In my opinion, this level is too low and for deeper analysis we should use another algorithm to be more accurate.
# ## 5.4 How changed the Retweet and Favorite Count over time?
df_combined.retweet_count.groupby([df_combined['timestamp'].dt.year, df_combined['timestamp'].dt.month]).mean().plot(kind='line')
df_combined.favorite_count.groupby([df_combined['timestamp'].dt.year, df_combined['timestamp'].dt.month]).mean().plot(kind='line')
plt.title('Retweet and Favorite over time', size =15)
plt.ylabel('Number of Tweets')
plt.xlabel('Time (Year, Month)')
plt.legend(('Retweet Count', 'Favorite Count'), fontsize=18);
plt.savefig('retweet_and_favorite_overtime');
# - number of retweets and favorites increased overtime
# ## 5.5 How many dogs are rated above 10 ?
df_combined['rating_numerator'].value_counts().sort_index().plot(kind='bar', figsize=(18,10))
plt.title ('Rating Numerator Distribution', size=15)
plt.xlabel('Rating Numerator')
plt.ylabel('Number of Ratings');
plt.savefig('rating_numerator_distribution');
# - the most often assigned numerator is 12
| wrangle_act.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# %matplotlib inline
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import numpy as np
a = np.load('color_quantization/quantized_counts.npy')
b = np.load('color_quantization/pts_in_hull.npy')
print (a)
# a = np.reshape(a, [1, 313])
print (a.shape)
# print(a)
plt.scatter(range(0,313),a)
plt.show()
print a
| experiments/notebooks/View Color Quantizations.ipynb |
# + deletable=false editable=false
# Initialize OK
from client.api.notebook import Notebook
ok = Notebook("test-exam.ok")
# -
# # Exam
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# ### Question 1
# Simulate 1000 rolls of a die.
rolls = ...
# ### Question 2
# Write an infinite generator of the Fibonacci sequence.
def fib():
a, b = 0, 1
yield 0
while True:
yield b
a, b = b, a + b
# ## End Exam
# + [markdown] deletable=false editable=false
# ## Submission
#
# Once you're finished, select "Save and Checkpoint" in the File menu and then execute the submit cell below. The result will contain a link that you can use to check that your assignment has been submitted successfully.
# + deletable=false editable=false
# Save your notebook first, then run this cell to submit.
_ = ok.submit()
# -
#
| test/dist-correct-ok/exam_65/test-exam.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # HackerMath for ML
#
# # Introduction
#
# Intro to Stats & Maths for Machine Learning
#
#
#
# ---
#
# > What I cannot create, I do not understand
# -- <NAME>
#
# ---
#
# # Philosophy of HackerMath
#
# > Hacker literally means developing mastery over something.
# -- <NAME>
#
# <br>
#
# Here we will aim to learn Math essential for Data Science in this hacker way.
#
# ---
#
# # **Three Key Questions**
#
# - Why do you need to understand the math?
# - What math knowledge do you need?
# - Why approach it the hacker's way?
#
# ---
#
# # Approach
# - Understand the Math.
# - Code it to learn it.
# - Play with code.
#
# ---
#
# # Module 1: Linear Algebra
# ## Supervised ML - Regression, Classification
# - Solve $Ax = b$ for $ n \times n$
# - Solve $Ax = b$ for $ n \times p + 1$
# - Linear Regression
# - Ridge Regularization (L2)
# - Bootstrapping
# - Logistic Regression (Classification)
#
# ---
#
# # Module 2: Statistics
# ## Hypothesis Testing: A/B Testing
# - Basic Statistics
# - Distributions
# - Shuffling
# - Bootstrapping & Simulation
# - A/B Testing
#
# ---
#
# # Module 3: Linear Algebra contd.
# ## Unsupervised ML: Dimensionality Reduction
# - Solve $Ax = \lambda x$ for $ n \times n$
# - Eigenvectors & Eigenvalues
# - Principle Component Analysis
# - Cluster Analysis (K-Means)
#
# ---
#
#
#
# > Itâs tough to make predictions, especially about the future.
# -- <NAME>
#
# ## What is Machine Learning (ML)?
#
# > [Machine learning is the] field of study that gives computers the ability to learn without being explicitly programmed.
# -- *<NAME>*
#
# > Machine learning is the study of computer algorithm that improve automatically through experience
# -- *<NAME>*
# ## ML Problems
# - âIs this cancer?â
# - âWhat is the market value of this house?â
# - âWhich of these people are friends?â
# - âWill this person like this movie?â
# - âWho is this?â
# - âWhat did you say?â
# - âHow do you fly this thing?â.
#
# ## ML in use Everyday
# - Search
# - Photo Tagging
# - Spam Filtering
# - Recommendation
# - ...
#
# ## Broad ML Application
# - Database Mining e.g. Clickstream data, Business data
# - Automating e.g. Handwriting, Natural Language Processing, Computer Vision
# - Self Customising Program e.g. Recommendations
#
# ---
# ## ML Thought Process
#
# 
#
#
# ## Learning Paradigm
# - *Supervised* Learning
# - *Unsupervised* Learning
# - *Reinforcement* Learning
# - *Online* Learning
#
# ## Supervised Learning
# - Regression
# - Classification
#
# 
#
# ## Unsupervised Learning
# - Clustering
# - Dimensionality Reduction
#
# 
# ## ML Pipeline
#
# - *Frame*: Problem definition
# - *Acquire*: Data ingestion
# - *Refine*: Data wrangline
# - *Transform*: Feature creation
# - *Explore*: Feature selection
# - *Model*: Model creation & assessment
# - *Insight*: Communication
# ## Linear Regression
#
# 
#
# ---
#
# ### Linear Relationship
#
# $$ y_i = \alpha + \beta_1 x_1 + \beta_2 x_2 + .. $$
#
# ### Objective Function
#
# $$ \epsilon = \sum_{k=1}^n (y_i - \hat{y_i} ) ^ 2 $$
#
# *Interactive Example: [http://setosa.io/ev/](http://setosa.io/ev/ordinary-least-squares-regression/)*
#
# ## Logit Function
#
# $$ \sigma (t)={\frac {e^{t}}{e^{t}+1}}={\frac {1}{1+e^{-t}}}$$
#
# 
#
#
# ## Logistic Regression
#
# 
#
#
# ## Logistic Relationship
#
# Find the $ \beta $ parameters that best fit:
# $ y=1 $ if $\beta _{0}+\beta _{1}x+\epsilon > 0$
# $ y=0$, otherwise
#
# Follows:
#
# $$ P(x)={\frac {1}{1+e^{-(\beta _{0}+\beta _{1}x)}}} $$
#
# ---
# ## Fitting a Model
#
# 
#
# ## Bias-Variance Tradeoff
#
# 
#
# ## Train and Test Datasets
#
# Split the Data - 80% / 20%
#
# 
# ## Train and Test Datasets
#
# Measure the error on Test data
#
# 
#
#
# ## Model Complexity
# 
#
#
# ## Cross Validation
# 
#
# ## Regularization
#
# Attempts to impose Occam's razor on the solution
#
# 
#
#
# ## Model Evaluation
#
# Mean Squared Error
#
# $$ MSE = 1/n \sum_{k=1}^n (y_i - \hat{y_i} ) ^ 2 $$
#
#
# ## Model Evaluation
#
# Confusion Matrix
#
# 
#
#
# ## Model Evaluation
#
# **Classification Metrics**
#
# 
#
# Recall (TPR) = TP / (TP + FN)
# <br>
# Precision = TP / (TP + FP)
# <br>
# Specificity (TNR) = TN / (TN + FP)
#
# ## Model Evaluation
#
# **Receiver Operating Characteristic Curve**
#
# Plot of TPR vs FPR at different discrimination threshold
#
# 
#
# ---
#
# ## Decision Tree
#
# Example: Survivor on Titanic
#
# 
#
# ## Decision Tree
#
# - Easy to interpret
# - Little data preparation
# - Scales well with data
# - White-box model
# - Instability â changing variables, altering sequence
# - Overfitting
#
# ## Bagging
# - Also called bootstrap aggregation, reduces variance
# - Uses decision trees and uses a model averaging approach
#
# ## Random Forest
# - Combines bagging idea and random selection of features.
# - Similar to decision trees are constructed â but at each split, a random subset of features is used.
#
# 
# ## Challenges
#
# > If you torture the data enough, it will confess.
# -- <NAME>
#
# - Data Snooping
# - Selection Bias
# - Survivor Bias
# - Omitted Variable Bias
# - Black-box model Vs White-Box model
# - Adherence to regulations
| Data Science and Machine Learning/Machine-Learning-In-Python-THOROUGH/MACHINE_LEARNING/HACKERMATH_FOR_ML/Module_0_Introduction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
import os
import re
# +
fld = open(os.path.join("e:/", "LD2011_2014.txt"), "r")
data=[]
cid=250
for line in fld:
if line.startswith("\"\";"):
continue
cols = [float(re.sub(",",".", x)) for x in line.strip().split(";")[1:]]
data.append(cols[cid])
fld.close()
# -
NUM_ENTRIES = 1000
plt.plot(range(NUM_ENTRIES), data[:NUM_ENTRIES])
plt.xlabel("time (1pt = 15mins)")
plt.ylabel("electricity consumption")
plt.show()
np.save(os.path.join("./data", "LD_250.npy"), np.array(data))
from keras.layers.core import Dense
from keras.layers.recurrent import LSTM
from keras.models import Sequential
from sklearn.preprocessing import MinMaxScaler
import math
data = np.load(os.path.join("./data", "LD_250.npy"))
data[0]
data[:10]
data.shape
data = data.reshape(-1,1)
data.shape
scaler = MinMaxScaler(feature_range=(0,1), copy=False)
data = scaler.fit_transform(data)
data.shape
data[:10]
# +
NUM_TIMESTEPS=20
X = np.zeros((data.shape[0], NUM_TIMESTEPS))
Y = np.zeros((data.shape[0], 1))
for i in range(len(data) - NUM_TIMESTEPS -1):
X[i] = data[i:i+NUM_TIMESTEPS].T
Y[i] = data[i+NUM_TIMESTEPS+1]
X = np.expand_dims(X,axis=2)
# -
X.shape
X[-1]
Y.shape
X[0]
Y[0]
sp = int(0.7*len(data))
Xtrain,Xtest,Ytrain,Ytest = X[:sp], X[sp:], Y[:sp],Y[sp:]
Xtrain.shape
Xtest.shape
Ytrain.shape
# +
HIDDEN_UNITS = 10
BATCH_SIZE = 96 # 15 mins in 24 hours
# stateless
model1 = Sequential()
model1.add(LSTM(HIDDEN_UNITS, input_shape=(NUM_TIMESTEPS, 1), return_sequences=False))
model1.add(Dense(1))
model1.compile(loss="mean_squared_error", optimizer="adam", metrics=["mean_squared_error"])
# stateful
model2 =Sequential()
model2.add(LSTM(HIDDEN_UNITS, stateful=True, batch_input_shape=(BATCH_SIZE, NUM_TIMESTEPS, 1), return_sequences=False))
model2.add(Dense(1))
model2.compile(loss="mean_squared_error", optimizer="adam", metrics=["mean_squared_error"])
# -
print(model1.summary())
print(model2.summary())
EPOCHS =5
model1.fit(Xtrain, Ytrain, epochs=EPOCHS, batch_size=BATCH_SIZE, validation_data=(Xtest,Ytest), shuffle=False)
score1,_ = model1.evaluate(Xtest, Ytest, batch_size=BATCH_SIZE)
rmse1 = math.sqrt(score1)
print("Stateless, MSE: {:.3f}, RMSE: {:.3f}".format(score1, rmse1))
# +
train_size = (Xtrain.shape[0] // BATCH_SIZE) * BATCH_SIZE
test_size = (Xtest.shape[0] // BATCH_SIZE) * BATCH_SIZE
Xtrain2, Ytrain2 = Xtrain[:train_size], Ytrain[:train_size]
Xtest2, Ytest2 = Xtest[:test_size], Ytest[:test_size]
# -
Xtrain2.shape
Xtest2.shape
for i in range(EPOCHS):
model2.fit(Xtrain2, Ytrain2, batch_size=BATCH_SIZE, epochs=1, validation_data=(Xtest2, Ytest2), shuffle=False)
model2.reset_states()
score2, _ = model2.evaluate(Xtest2, Ytest2, batch_size=BATCH_SIZE)
rmse2 = math.sqrt(score2)
print("MSE: {:.3f}, RMSE: {:.3f}".format(score2, rmse2))
| deep-learning/keras/electricity-consumption/Electricity-consumption.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
from urllib.request import urlopen
from bs4 import BeautifulSoup
from bs4 import Comment
import re
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import Ridge
from sklearn.linear_model import Lasso
from sklearn.model_selection import GridSearchCV
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import VotingRegressor
import scipy.stats as st
import re
from openpyxl import load_workbook
import seaborn as sns
import timeit
import sqlite3
# # Load Data From CSV
df_train = pd.read_csv('train_data/n_seasons5_avgGames5_playoffsFalse_skipDays18_minmin25.csv')
df_train.drop(columns=['Unnamed: 0'], inplace=True)
df_train.head()
# # Look for Strong Correlations
c = df_train.iloc[:, 6:].corr()
fig = plt.subplots(figsize=(15,10))
sns.heatmap(c,cmap=sns.color_palette("vlag", as_cmap=True))
mean_corr_before = np.mean(np.mean(c))
# +
mask = c > 0.8
corr = []
for ind in c.index:
for col in c.columns:
if mask.loc[ind, col]:
if col != ind:
if (col, ind) not in corr:
corr.append((ind, col))
cols = 5
rows = int(np.ceil(len(corr) / cols))
fig, axs = plt.subplots(rows, cols, figsize=(15, 18))
plt.subplots_adjust(wspace=0.4, hspace=0.4)
i = 0
for row in range(rows):
for col in range(cols):
axs[row, col].scatter(df_train[corr[i][0]], df_train[corr[i][1]])
axs[row, col].set_xlabel(corr[i][0])
axs[row, col].set_ylabel(corr[i][1])
i+=1
if i == len(corr):
break
# -
# The above plots show features with Pearson R correlations greater than 0.8. It may be wise to remove features with high collinearity. Compare model accuracy with and without redundant features. Potential features to remove are:
# 1. FG (collinear with FGA, USG%, and PTS)
# 2. team_ORtg (collinear with team_avg_pts, team_eFGp)
# 3. ORB% (collinear with ORB)
# 4. DRB% (collinear with DRB, TRB%)
# 5. STL% (collinear with STL)
# 6. BLK% (collinear with BLK)
# 7. opp_diff_eFGp (collinear with off_diff_ORtg)
# 8. FT (collinear with FTA, PTS, USG%)
# 9. USG% (collinear with PTS, FG, FGA)
# 10. 3P (collinear with 3PA)
# 11. AST% (collinear with AST)
# 12. TS% (collienar with eFG%, ORtg)
# +
mask = c < -0.5
corr = []
for ind in c.index:
for col in c.columns:
if mask.loc[ind, col]:
if col != ind:
if (col, ind) not in corr:
corr.append((ind, col))
cols = 5
rows = int(np.ceil(len(corr) / cols))
fig, axs = plt.subplots(rows, cols, figsize=(15,12))
plt.subplots_adjust(wspace=0.4, hspace=0.4)
i = 0
for row in range(rows):
for col in range(cols):
axs[row, col].scatter(df_train[corr[i][0]], df_train[corr[i][1]])
axs[row, col].set_xlabel(corr[i][0])
axs[row, col].set_ylabel(corr[i][1])
i+=1
if i == len(corr):
break
# -
# The negative correlations aren't particularly strong so there may not be a need to remove any features.
# # Outliers
# +
cols = 8
rows = int(np.ceil((len(df_train.columns) - 10) / cols))
fig, axs = plt.subplots(rows, cols, figsize=(15,30))
plt.subplots_adjust(wspace=0.4, hspace=0.4)
i = 10
for row in range(rows):
for col in range(cols):
sns.boxplot(data=df_train[df_train.columns[i]], ax=axs[row, col])
axs[row, col].set_title(df_train.columns[i])
i+=1
if i == len(df_train.columns):
break
# -
# # look for non-normally distributed features
# If a feature does not have an approximately normal distribution, it may need a transformation such as a log transform.
# +
cols = 5
rows = int(np.ceil((len(df_train.columns) - 10) / cols))
fig, axs = plt.subplots(rows, cols, figsize=(15,30))
plt.subplots_adjust(wspace=0.4, hspace=0.4)
i = 10
for row in range(rows):
for col in range(cols):
axs[row, col].hist(df_train[df_train.columns[i]])
axs[row, col].set_title(df_train.columns[i])
i+=1
if i == len(df_train):
break
# -
skew_before = []
for col in df_train.columns[10:]:
skew_before.append(st.skew(df_train[col]))
# +
powers = [1, 1/2, 1/3, 1/4, 1/5]
power_for_cols = []
skew_for_cols = []
for col in df_train.columns[10:]:
skew = []
for power in powers:
x = df_train[col].apply(lambda x: np.power(x, power))
x = st.skew(x)
if x < 5 and x > -5:
skew.append(x)
else:
skew.append(999)
ind = np.argmin(np.abs(skew))
power = powers[ind]
df_train[col] = df_train[col].apply(lambda x: np.power(x, power))
power_for_cols.append(power)
skew_for_cols.append(skew[ind])
skew_after = []
for col in df_train.columns[10:]:
skew_after.append(st.skew(df_train[col]))
# +
cols = 5
rows = int(np.ceil((len(df_train.columns) - 10) / cols))
fig, axs = plt.subplots(rows, cols, figsize=(15,30))
plt.subplots_adjust(wspace=0.4, hspace=0.4)
i = 10
for row in range(rows):
for col in range(cols):
axs[row, col].hist(df_train[df_train.columns[i]])
axs[row, col].set_title(df_train.columns[i] + '_' +
str(round(power_for_cols[i-10],2)) + '_' +
str(round(skew_for_cols[i-10],2)), fontsize=10)
i+=1
if i == len(df_train):
break
# -
skew_before_range = np.max(skew_before) - np.min(skew_before)
skew_after_range = np.max(skew_after) - np.min(skew_after)
skew_before_avg = np.mean(skew_before)
skew_after_avg = np.mean(skew_after)
print('Max skewness before transform: ', np.max(skew_before))
print('Max skewness after transform: ', np.max(skew_after))
print('Range of skewness before transform: ', skew_before_range)
print('Range of skewness after transform: ', skew_after_range)
print('Average skewness before transform: ', skew_before_avg)
print('Average skewness after transform: ', skew_after_avg)
# # Remove multicollinearity
remove_cols = ['FG', 'team_ORtg', 'ORB%', 'DRB%', 'STL%', 'BLK%', 'opp_diff_eFGp', 'FT',
'USG%', '3P', 'AST%', 'TS%']
df_train.drop(columns=remove_cols, inplace=True)
c = df_train.iloc[:, 6:].corr()
fig = plt.subplots(figsize=(15,10))
_ = sns.heatmap(c,cmap=sns.color_palette("vlag", as_cmap=True))
mean_corr_after = np.mean(np.mean(c))
print('mean correlation before removing columns with high collinearity: ', mean_corr_before)
print('mean correlation after removing columns with high collinearity: ', mean_corr_after)
# # relationship of features and target variable
# +
target = 'target_PTS'
cols = 5
rows = int(np.ceil((len(df_train.columns) - 10) / cols))
fig, axs = plt.subplots(rows, cols, figsize=(15,30))
plt.subplots_adjust(wspace=0.4, hspace=0.4)
i = 10
for row in range(rows):
for col in range(cols):
corr = round(df_train[[df_train.columns[i], target]].corr().iloc[0,1], 2)
axs[row, col].scatter(df_train[df_train.columns[i]], df_train[target])
axs[row, col].set_title(df_train.columns[i] + '_' + str(corr))
i+=1
if i == len(df_train.columns):
break
# -
# This exploratory analysis reveals the need to drop certain columns to reduce multicollinearity and to transform certain features to get a more approximately normal distribution. This will be implemented as a function at the beginning of Jupyter Notebook prior to a deep learning model.
| NBA_exploratory_analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: bilstm-crf
# language: python
# name: bilstm-crf
# ---
# ## Introduction
# Even though the project as such focuses on implementing a Bidirecitonal Long Short Term Memory - Conditional Random Field model to help us label sentences. This notebook mostly focuses on exploring a new methodology inspired from the paper.
#
# The contrasting difference between this and our project is that insteaad of using the pre-trained wordembeddings on our custom corpus, we will make use of CNN layer to generate character embeddings.
#
# So the aim of this notebook would be to implement a state of the art Bi-directional LSTM-CNN-CRF architecture (Published at ACL'16. [Link To Paper](http://www.aclweb.org/anthology/P16-1101)) for Named Entity Recognition using Pytorch.
#
| BiLSTM-CRF Notebooks/BiLSTM-CNN-CRF - PyTorch Approach.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Practice Assignment: Understanding Distributions Through Sampling
#
# ** *This assignment is optional, and I encourage you to share your solutions with me and your peers in the discussion forums!* **
#
#
# To complete this assignment, create a code cell that:
# * Creates a number of subplots using the `pyplot subplots` or `matplotlib gridspec` functionality.
# * Creates an animation, pulling between 100 and 1000 samples from each of the random variables (`x1`, `x2`, `x3`, `x4`) for each plot and plotting this as we did in the lecture on animation.
# * **Bonus:** Go above and beyond and "wow" your classmates (and me!) by looking into matplotlib widgets and adding a widget which allows for parameterization of the distributions behind the sampling animations.
#
#
# Tips:
# * Before you start, think about the different ways you can create this visualization to be as interesting and effective as possible.
# * Take a look at the histograms below to get an idea of what the random variables look like, as well as their positioning with respect to one another. This is just a guide, so be creative in how you lay things out!
# * Try to keep the length of your animation reasonable (roughly between 10 and 30 seconds).
# + jupyter={"outputs_hidden": false}
import matplotlib.pyplot as plt
import numpy as np
# %matplotlib notebook
# generate 4 random variables from the random, gamma, exponential, and uniform distributions
x1 = np.random.normal(-2.5, 1, 10000)
x2 = np.random.gamma(2, 1.5, 10000)
x3 = np.random.exponential(2, 10000)+7
x4 = np.random.uniform(14,20, 10000)
# plot the histograms
plt.figure(figsize=(9,3))
plt.hist(x1, normed=True, bins=20, alpha=0.5)
plt.hist(x2, normed=True, bins=20, alpha=0.5)
plt.hist(x3, normed=True, bins=20, alpha=0.5)
plt.hist(x4, normed=True, bins=20, alpha=0.5);
plt.axis([-7,21,0,0.6])
plt.text(x1.mean()-1.5, 0.5, 'x1\nNormal')
plt.text(x2.mean()-1.5, 0.5, 'x2\nGamma')
plt.text(x3.mean()-1.5, 0.5, 'x3\nExponential')
plt.text(x4.mean()-1.5, 0.5, 'x4\nUniform')
# + jupyter={"outputs_hidden": false}
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2,2, sharey=True)
fig.tight_layout()
axs = [ax1, ax2, ax3, ax4]
x1 = np.random.normal(-2.5, 1, 1000)
x2 = np.random.gamma(2, 1.5, 1000)
x3 = np.random.exponential(2, 1000)
x4 = np.random.uniform(14,20, 1000)
axs[0].hist(x1, normed=True, bins=20, color='lightcoral')
axs[1].hist(x2, normed=True, bins=20, color='skyblue')
axs[2].hist(x3, normed=True, bins=20, color='gray')
axs[3].hist(x4, normed=True, bins=20, color='tan')
axs[0].set_title('Normal Distribution', fontsize=10)
axs[1].set_title('Gamma Distribution', fontsize=10)
axs[2].set_title('Exponential Distribution', fontsize=10)
axs[3].set_title('Uniform Distribution', fontsize=10)
# + jupyter={"outputs_hidden": false}
# The bins have to be adjusted to see a better plot, the animation tends to be slow
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.animation as animation
# %matplotlib notebook
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2,2, sharey=True)
# fig.tight_layout()
axs = [ax1, ax2, ax3, ax4]
# Generate the different distrubutions
x1 = np.random.normal(-2.5, 1, 10000)
x2 = np.random.gamma(2, 1.5, 10000)
x3 = np.random.exponential(2, 10000)
x4 = np.random.uniform(14,20, 10000)
x = [x1, x2, x3, x4]
# Axis for all plots
a1 = [-7.5,2.5,0,0.6]
a2 = [-1,10,0,0.6]
a3 = [-1,17,0,0.6]
a4 = [13,21,0,0.6]
aa = [a1, a2, a3, a4]
# NUmber of bins
b1 = np.arange(-7.5,2.5,0.2)
b2 = np.arange(-1,10,0.2)
b3 = np.arange(-1,17,0.2)
b4 = np.arange(13,21,0.2)
b= [b1, b2, b3, b4]
#Generate the titles
titles = ['Normal Distribution','Gamma Distribution','Exponential Distribution','Uniform Distribution']
# Annotation positions
anno = [-1, 6.5, 11, 18]
# selecting colors
c = ['lightcoral', 'skyblue','violet','tan']
# Creating the annimation function
n = 100
def update(curr):
if curr == n:
a.event_source.stop()
# Plotting histograms
for i in range(len(x)):
axs[i].cla()
axs[i].hist(x[i][:100*curr], normed = True, bins=b[i], color = c[i])
axs[i].axis(aa[i])
axs[i].set_title(titles[i], fontdict={'fontweight':'bold', 'fontsize':10})
axs[i].set_ylabel('Normalized Frequency')
axs[i].set_xlabel('Value')
axs[i].annotate('n = {}'.format(100*curr), [anno[i], 0.5])
plt.tight_layout()
a = animation.FuncAnimation(fig, update, interval=100)
# ani.save('sample.mp4', fps=60, dpi=200)
| Assignment Submissions/Week 3/Sample Assign 2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
"""
@author: <NAME> <<EMAIL>>
@brief: group based relevance features
@note: such features are not used in final submission (except GroupRelevance_Size)
"""
# +
import string
import numpy as np
import pandas as pd
import config
from config import TRAIN_SIZE
from utils import dist_utils, ngram_utils, nlp_utils, np_utils
from utils import logging_utils, time_utils, pkl_utils
from feature_base import BaseEstimator, StandaloneFeatureWrapper
# -
class GroupRelevance(BaseEstimator):
"""Single aggregation features"""
def __init__(self, obs_corpus, target_corpus, id_list, dfTrain, aggregation_mode=""):
super().__init__(obs_corpus, target_corpus, aggregation_mode, id_list)
self.dfTrain = dfTrain[dfTrain["relevance"] != 0].copy()
def __name__(self):
if isinstance(self.aggregation_mode, str):
feat_name = "GroupRelevance_%s"%string.capwords(self.aggregation_mode)
elif isinstance(self.aggregation_mode, list):
feat_name = ["GroupRelevance_%s"%string.capwords(m) for m in self.aggregation_mode]
return feat_name
def transform_one(self, obs, target, id):
df = self.dfTrain[self.dfTrain["search_term"] == obs].copy()
val_list = [config.MISSING_VALUE_NUMERIC]
if df is not None:
df = df[df["id"] != id].copy()
if df is not None and df.shape[0] > 0:
val_list = df["relevance"].values.tolist()
return val_list
# -------------------------------- Main ----------------------------------
def main():
logname = "generate_feature_group_relevance_%s.log"%time_utils._timestamp()
logger = logging_utils._get_logger(config.LOG_DIR, logname)
dfAll = pkl_utils._load(config.ALL_DATA_LEMMATIZED_STEMMED)
dfTrain = dfAll.iloc[:TRAIN_SIZE].copy()
## run python3 splitter.py first
split = pkl_utils._load("%s/splits_level1.pkl"%config.SPLIT_DIR)
n_iter = len(split)
## for cv
for i in range(n_iter):
trainInd, validInd = split[i][0], split[i][1]
dfTrain2 = dfTrain.iloc[trainInd].copy()
sub_feature_dir = "%s/Run%d" % (config.FEAT_DIR, i+1)
obs_fields = ["search_term", "product_title"][1:]
aggregation_mode = ["mean", "std", "max", "min", "median", "size"]
param_list = [dfAll["id"], dfTrain2, aggregation_mode]
sf = StandaloneFeatureWrapper(GroupRelevance, dfAll, obs_fields, param_list, sub_feature_dir, logger)
sf.go()
## for all
sub_feature_dir = "%s/All" % (config.FEAT_DIR)
obs_fields = ["search_term", "product_title"][1:]
aggregation_mode = ["mean", "std", "max", "min", "median", "size"]
param_list = [dfAll["id"], dfTrain, aggregation_mode]
sf = StandaloneFeatureWrapper(GroupRelevance, dfAll, obs_fields, param_list, sub_feature_dir, logger)
sf.go()
if __name__ == "__main__":
main()
| Code/feature_group_relevance.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Project : Wrangle and Analyze Twitter Archive
# ### By <NAME>
# > **In this project, I will wrangle data to create interesting and trustworthy analyses and visualizations. The Twitter archive is great, but it only contains very basic tweet information. Additional gathering, then assessing and cleaning is required for "Wow!"-worthy analyses and visualizations.**
# ## Introduction
# > The dataset that I will be wrangling (and analyzing and visualizing) is the tweet archive of Twitter user @dog_rates, also known as WeRateDogs. WeRateDogs is a Twitter account that rates people's dogs with a humorous comment about the dog. These ratings almost always have a denominator of 10. The numerators, though? Almost always greater than 10. 11/10, 12/10, 13/10, etc. Why? Because "they're good dogs Brent." WeRateDogs has over 4 million followers and has received international media coverage.
# ## Data Gathering
#
# The first step in our project is to gather data from different sources and in diffrent formats, which is the most challenging task of this project.
# Here, I will be gathering data from three different sources.
# - Import the packages
# +
import json
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import re
import requests
import seaborn as sns
import tweepy
from datetime import datetime
from functools import reduce
# %matplotlib inline
# -
# ### First Source - Local file
# > ### Enhanced Twitter Archive
# - The WeRateDogs Twitter archive contains basic tweet data for all 5000+ of their tweets but here we have filtered around 2000+ tweets with ratings.
df_local=pd.read_csv('C:/Users/somya/Desktop/wrangle/twitter-archive-enhanced.csv')
df_local.head()
# ## Second Source - URL
# > ## Image predictions
# - The tweet image predictions, i.e., what breed of dog (or other object, animal, etc.) is present in each tweet according to a neural network. This file (image_predictions.tsv) is hosted on Udacity's servers and we will be downloading it programmatically using the Requests library and the given URL-https://d17h27t6h515a5.cloudfront.net/topher/2017/August/599fd2ad_image-predictions/image-predictions.tsv
given_url='https://d17h27t6h515a5.cloudfront.net/topher/2017/August/599fd2ad_image-predictions/image-predictions.tsv'
response=requests.get(given_url)
with open(os.path.join('image_predictions.tsv'), mode ='wb') as file:
file.write(response.content)
df_url = pd.read_csv('image_predictions.tsv', sep = '\t')
df_url.head()
# ## Third Source - Twitter API
#
import time
# +
consumer_key = '...'
consumer_secret = '...'
access_token = '...'
access_secret = '...'
auth = tweepy.OAuthHandler(consumer_key,consumer_secret)
auth.set_access_token(access_token, access_secret)
api = tweepy.API(auth, parser=tweepy.parsers.JSONParser())
#api = tweepy.API(auth, wait_on_rate_limit=True)
# NOTE TO STUDENT WITH MOBILE VERIFICATION ISSUES:
# df_1 is a DataFrame with the twitter_archive_enhanced.csv file. You may have to
# change line 17 to match the name of your DataFrame with twitter_archive_enhanced.csv
# NOTE TO REVIEWER: this student had mobile verification issues so the following
# Twitter API code was sent to this student from a Udacity instructor
# Tweet IDs for which to gather additional data via Twitter's API
#tweet_ids = df_local.tweet_id.values
#len(tweet_ids)
#tweet_ids = list(df_local['tweet_id'])
start = time.time()
tweet_ids = df_local.tweet_id.values
tweet_data = []
tweet_id_success = []
tweet_id_missing = []
for tweet_id in tweet_ids:
try:
data = api.get_status(tweet_id,
tweet_mode='extended',
wait_on_rate_limit = True,
wait_on_rate_limit_notify = True)
tweet_data.append(data)
tweet_id_success.append(tweet_id)
except:
tweet_id_missing.append(tweet_id)
print(tweet_id)
end = time.time()
print(end - start)
# -
with open('tweet-json.txt','r') as data:
tweet_json = data.readline()
print(tweet_json)
df_tweet=pd.read_json('tweet-json.txt',orient='records',lines=True)
#Checking Dataset Dimensions
print(df_tweet.shape)
df_tweet.head()
df_tweet.columns
df_tweet=df_tweet[["id","favorite_count","retweet_count"]]
df_tweet.head()
# > Now, we are done with the data gathering process from three diffrent sources.
# ## Assessing
# > Now, we will assess our raw and unclean datasets one by one to find out the quality and tidiness issues.
# ### First Dataset - Twitter Archive
# - Assessing Visually
df_local
# > We observe that many rows did not mention the stage of dog that is all the four stages in many rows are None.
# - Assess Programmatically
df_local.shape
df_local.columns
df_local.info()
#
# - DataType of columns such as 'timestamp','retweeted_status_timestamp' are defined as String whereas it should be datetime.
# - There are missing expanded urls in the dataset.
# - There are 181 retweeted_status_id which means that our dataset contains retweets as well.
#
df_local['name'].value_counts()
# > Some of the names are 'a', 'an', 'the' which are not invalid.
df_local['source'].head(50)
df_local['source'].nunique()
df_local['source'].value_counts()
# - Source names need to be redefined without tags.
# > **After assessing the dataset visually, we find that some of the rows are 'None' for all the four stages of a particular dog.We will find the rows which do not have a stage of dog.**
df_local.query('doggo=="None" and floofer=="None" and pupper=="None" and puppo=="None"')
df_local.query('doggo=="None" and floofer=="None" and pupper=="None" and puppo=="None"').shape[0]
# - There are 1976 rows with no definition of the dog's stage.
df_local['rating_numerator'].value_counts()
# > The common numerator ratings given by @weratedogs are 11,12,13,16 so on. But,here we find that most of the ratings are too high such as 1776,960,666 etc.
df_local['rating_denominator'].value_counts()
# > We know that @WeRateDogs keep their denominator as 10 always while rating dogs but here some of the ratings are 11,50,2,7,0,110 etc.
# ## Second Dataset - Image Prediction
# - Assessing visually
df_url
# - After assessing visually, we find that for the last row, all the predictions of dog breed are false, which means, some images are not dogs.
# - We will find the number of rows which do not contain the images of Dog.
df_url.query('p1_dog==False and p2_dog==False and p3_dog==False').shape[0]
df_url.info()
df_url.describe()
df_url['p1'].value_counts()
# - Some of the names of dog breed are not defined, like 'bookshop','bakery','book_jacket', 'orange'.
df_url['p2'].value_counts()
df_url['jpg_url'].value_counts()
# - The Image Urls are same for some images.
# ### Third Dataset - Twitter API
# - Assess Visually
df_tweet
df_tweet['id'].duplicated().sum()
df_tweet['favorite_count'].value_counts()
df_tweet['favorite_count'].duplicated().sum()
# ### Observations Summary from the above performed Assessments-
# > #### Quality Issues-
# - Many rows in the twitter enhanced dataset did not mention the stage of dog that is all the four stages in many rows are None.
# - There are 1976 rows with no definition of the dog's stage.
# - DataType of columns in the twitter enhanced dataset such as 'timestamp','retweeted_status_timestamp' are defined as String whereas it should be datetime.
# - There are missing expanded urls in the twitter enhanced dataset.
# - There are 181 retweeted_status_id which means that our dataset contains retweets as well.
# - We do not need retweets in out dataset for analysis so we need to remove retweet_user_id and other columns related to retweets.
# - Some of the names are 'a', 'an', 'the' which are not invalid.
# - Source names need to be redefined without tags.
# - The common numerator ratings given by @weratedogs are 11,12,13,16 so on. But,here we find that most of the ratings are too high such as 1776,960,666 etc.
# - We know that @WeRateDogs keep their denominator as 10 always while rating dogs but here some of the ratings are 11,50,2,7,0,110 etc.
# - After assessing the image prediction dataset visually, we find that for the last row, all the predictions of dog breed are false, which means, some images are not dogs.
# - Some of the names of dog breed are not defined, like 'bookshop','bakery','book_jacket', 'orange'.
# - The Image Urls are same for some images.
# - The names of dog in Image prediction Dataset are separated by underscore instead of space.
#
# > #### Tidiness Issues-
# - There are four columns namely doggo, floofer,puppo, pupper for the stages of a particular dog. We don't need four columns for the stage, only one column will be enough.
# - We only need one master dataset for our analysis and visualizations, so we will merge all the three datasets collected from different sources.
#
# ## Cleaning the Data
# - First we will make copies of the dataframe.
df_local_new=df_local.copy()
df_url_new=df_url.copy()
df_tweet_new=df_tweet.copy()
# **Define**
# - Select the rows with null retweeted_status_id and remove the non-null retweets from the dataset.
# **Code**
df_local_new = df_local_new[np.isnan(df_local_new.retweeted_status_id)]
# **Test**
df_local_new.info()
# **Define**
#
# Select the columns related to retweets and drop them as it is of no use further.
# **Code**
df_local_new.drop(["retweeted_status_id","retweeted_status_user_id","retweeted_status_timestamp","in_reply_to_status_id","in_reply_to_user_id"], axis=1, inplace=True)
# **Test**
df_local_new.info()
# **Define**
# - Select the four columns of stages and make a new dataframe.
# - Add a new column 'Stage' to the new dataframe.
# - Append the non-null values to column Stage.
# - Add the new column 'Stage' to our original dataset.
# - Drop the four columns 'Doggo', 'Floofer', 'Pupper', 'Puppo' from original dataset.
#
# **Code**
#
a = pd.DataFrame(df_local_new[{'floofer', 'doggo', 'pupper','puppo'}])
a.head()
a['floofer'].replace('None', np.nan, inplace=True)
a['doggo'].replace('None', np.nan, inplace=True)
a['pupper'].replace('None', np.nan, inplace=True)
a['puppo'].replace('None', np.nan, inplace=True)
a.head()
a["Stage"]=None
a.head()
a['Stage'] = a.apply(lambda row: ','.join(row.dropna()
.astype(str).astype(str)), axis=1)
a.replace(r'^\s*$', np.nan, regex=True,inplace=True)
a.head(5)
a['Stage'].value_counts()
df_local_new.drop(["doggo","floofer","pupper","puppo"],axis=1, inplace=True)
df_local_new['Stage']=a["Stage"]
# **Test**
df_local_new.head(10)
df_local_new.info()
# **Define**
#
# Select the column 'timestamp' and change the DataType of timestamp from string to datetime.
#
# **Code**
df_local_new['timestamp'] = pd.to_datetime(df_local_new['timestamp'])
# **Test**
df_local_new.info()
# **Define**
#
# Select rows with missing values of expand urls and remove them.
#
# **Code**
df_local_new.dropna(subset=['expanded_urls'],inplace=True)
# **Test**
df_local_new.info()
# **Define**
#
# Select invalid Names, which most probably starts with lower case letter and set those cells to None.
#
# **Code**
df_local_new.loc[df_local_new['name'] == df_local_new['name'].str.lower(), 'name'] = None
# **Test**
df_local_new['name'].value_counts()
# **Define**
# - Set the numerator rating in terms of denominator as most of the times denominator is 10 and then remove the denominator column with ratings not equal to 10.
#
# **Code** and **Test**
df_local_new.rating_numerator=(df_local_new.rating_numerator/df_local_new.rating_denominator)*10
# +
df_local_new[df_local_new['rating_denominator']!=10]
# -
# **Define**
#
# Select the source column and extract the text between anchor tags.
#
# **Code**
df_local_new['source'] = df_local_new['source'].apply(lambda x: re.findall(r'>(.*)<', x)[0])
# **Test**
df_local_new['source'].value_counts()
# ## Image Prediction
df_url_new.info()
# **Define**
#
# Select the columns for which dog breed classifier is true and remove the images which are not dogs.
#
# **Code**
df_url_new= df_url_new.query('p1_dog==True and p2_dog==True and p3_dog==True')
# **Test**
df_url_new.query('p1_dog==False and p2_dog==False and p3_dog==False').shape[0]
# **Define**
# Select the dog breed prediction columns that is p1, p2 and p3 and then replace underscore in dog breed's name with space.
#
# **Code**
df_url_new['p1']=df_url_new['p1'].replace('_', ' ', regex=True)
df_url_new['p2']=df_url_new['p2'].replace('_', ' ', regex=True)
df_url_new['p3']=df_url_new['p3'].replace('_', ' ', regex=True)
# **Test**
df_url_new.head()
# ### Twitter API Dataset
df_tweet_new.info()
df_tweet_new['id'].duplicated().sum()
# **Define**
#
# - Merging all the datasets using join and make twwet_id as main key as it unique for everyone.
# - Merge two datasets first and then merge the third dataset in the master dataset.
# **Code**
# +
df_master=pd.merge(left=df_local_new,right=df_tweet_new,left_on='tweet_id',right_on='id',how='inner')
df_master.drop(['id'],axis=1,inplace=True)
df_master.head()
# -
df_master_final=pd.merge(left=df_master,right=df_url_new,on='tweet_id',how='inner')
df_master_final.head()
# **Test**
df_master_final.info()
# **Store the final dataset as csv**
# +
df_master_final.to_csv('twitter_archive_master.csv',index=False)
df=pd.read_csv('twitter_archive_master.csv')
df.head()
# -
df.columns
# ## Analyze and Visualize Data
# #### Question 1: Is retweet_count related to favorite_count that is whether the post will have more retweets if that tweet is favorite?
import seaborn as sns
sns.regplot(data=df,x='retweet_count',y='favorite_count');
# By the scatterplot above, we see that retweet count and favorite count are strongly related to each other with a positive correlation.
# #### Question 2 - Which is the most common Dog Stage?
df['Stage'].value_counts()
sns.countplot(data=df,x='Stage')
plt.xticks(rotation=70);
plt.title('Most common Stage of Dog')
# Pupper is the most common dog stage among all the dogs.
# #### Question 3 - Which is the most common breed of Dog predicted?
fig_dims = (8, 20)
fig, ax = plt.subplots(figsize=fig_dims)
sns.countplot(y = "p1", ax=ax, data=df)
plt.xticks(rotation=90);
plt.title('Most common breed of Dog')
plt.ylabel('Breed of Dogs');
# Golden retriever, Labrador retriever, Pembroke, Chihuahua, Pug are the most common breeds of Dog.
# #### Question 4 - Which is the top 10 breed of Dog which receives more retweets?
# +
df_favorite = df.groupby('p1')['retweet_count'].sum().reset_index()
df_sorted = df_favorite.sort_values('retweet_count', ascending=False).head(10)
ser_ret = df_sorted['retweet_count']
ret_breed = df_sorted['p1']
fig, ax = plt.subplots(figsize=(10,7))
fav = plt.barh(ret_breed, ser_ret)
plt.ylabel('Breed of Dog')
plt.xlabel('Retweet count')
plt.title('No of retweets per breed');
# -
# The breed golden Retriever receives the more number of retweets.
#
| wrangle_act.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/MichaelOblego/Linear-Algebra-58020/blob/main/Practical_Lab_Exam_1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="Q67Af9au_cl1"
# #Practical Lab Exam 1
# + id="Wdg4takQ_bkK"
import numpy as LA
# + id="ZkolmS6iBZxV"
W = LA.array([[1,-3,0],[2,-2,0],[3,1,0]])
X = LA.array([[3,2,1],[-1,2,3],[2,-1,1]])
Y = LA.array([[1,2,0],[0,-1,2],[0,0,0]])
Z = LA.array ([[0,1],[2,3]])
# + colab={"base_uri": "https://localhost:8080/"} id="ePHkLwo5NW9R" outputId="a35ebb37-0c66-4d10-8123-d9d09cae0373"
#a
output = LA.dot(W,Y)
print(output)
# + colab={"base_uri": "https://localhost:8080/"} id="pGmeYpDsN0oB" outputId="3e0b4c30-8959-4eb8-9745-5d3e19dbae78"
#b
output = LA.add(Z,Z)
print(output)
# + colab={"base_uri": "https://localhost:8080/"} id="fPLAUXVON6_2" outputId="575e817c-c909-4861-b875-fc072cd02e62"
#c
output = np.dot(3,X)
print(output)
# + [markdown] id="JklWriNqN_i-"
# Problem #2
# + colab={"base_uri": "https://localhost:8080/"} id="4xXxfrvYOCo5" outputId="b11601c8-6883-43ca-c5eb-0653ecabbb02"
#a
A1=LA.cross(W,Y)
B2=2*A1
print(B2)
# + colab={"base_uri": "https://localhost:8080/"} id="XjfbxlUzOfDO" outputId="bb0c3004-3a2f-46b1-cb1f-7c65037593cb"
#b
C1 = LA.dot(W,Y)
D2 = LA.subtract(C1,X)
print(D2)
# + colab={"base_uri": "https://localhost:8080/"} id="bSsQLAomOzv4" outputId="6e108410-11c9-47c5-ff46-e2237ea973ad"
#c
Z1 = LA.dot(Y,W)
X2 = 2*Z
Z1 = ([[5,-7],[4,4]])
V3 = LA.subtract(Z1,X2)
print(V3)
# + [markdown] id="Iu64WKDZPI7M"
# #Statement B is True
| Practical_Lab_Exam_1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] toc=true
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#Package:-Sklearn" data-toc-modified-id="Package:-Sklearn-1"><span class="toc-item-num">1 </span>Package: Sklearn</a></span></li><li><span><a href="#Package:-Spatial" data-toc-modified-id="Package:-Spatial-2"><span class="toc-item-num">2 </span>Package: Spatial</a></span><ul class="toc-item"><li><span><a href="#Cdist" data-toc-modified-id="Cdist-2.1"><span class="toc-item-num">2.1 </span>Cdist</a></span></li></ul></li><li><span><a href="#NAN" data-toc-modified-id="NAN-3"><span class="toc-item-num">3 </span>NAN</a></span></li></ul></div>
# -
# # Cosine Similarity
e1 = [1,1,1,0,0,0]
e2 = [1,0,0,1,0,0]
e3 = [0,0,0,0,1,1]
e4 = [0,0,0,0,0,0]
# ## Package: Sklearn
from sklearn.metrics.pairwise import cosine_similarity
cosine_similarity([e1,e2])
# ## Package: Spatial
from scipy import spatial
import numpy as np
1 - spatial.distance.cosine(e1,e2)
# ### Cdist
from scipy.spatial.distance import cdist
1 - cdist(np.array(e1).reshape(1,-1),np.array(e2).reshape(1,-1),metric='cosine')
# ## NAN
# if the vector is all 0s
spatial.distance.cosine(e1,e4)
L = [1,2,4,np.nan]
np.isnan(L)
L
np.nan_to_num(L)
np.where(np.isnan(L), 0, L)
| *Python_Basics/Cosine_Similarity/cosine_similarity.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # What do I want?
#
# - Match COSMOS galaxies to HSC objects
# - Remove non-matched HSC objects
# - Create a classification based on COSMOS features
# - (z < 0.15) and ($10^8$ < M_stellar < $10^9$ M$_\odot$)
# - Use that COSMOS classification to create an HSC training set
# - Run Random Forest & Logistic Regression on that data set (pure HSC features! no COSMOS features)
# - Create visual diagnostics
#
# I'll test two basic classifiers: a RandomForest classifier and a Logistic Regression classifier. I'll try a very simplistic weak-learner: a straight i-band cut.
#
# For my training data, I started by getting objects and labels from COSMOS. For input features, I then matched those COSMOS galaxies to their nearest HSC counterpart. I then used HSC i-band magnitude, along with HSC g-r, r-i, i-z, z-y colors. Finally, I augment it with some HSC photo-z information (`FRANKEN-Z`).
#
# In this notebook I'll look at the full decision curves for the classifiers, in hopes of better understanding my results.
# # Code
# +
# give access to importing dwarfz
import os, sys
dwarfz_package_dir = os.getcwd().split("dwarfz")[0]
if dwarfz_package_dir not in sys.path:
sys.path.insert(0, dwarfz_package_dir)
import dwarfz
# back to regular import statements
# %matplotlib inline
from matplotlib import pyplot as plt
import seaborn as sns
sns.set(context="poster", style="ticks", font_scale=1.4)
import numpy as np
import pandas as pd
from scipy.special import expit
import pathlib
# -
import matplotlib as mpl
mpl.rcParams['savefig.dpi'] = 80
mpl.rcParams['figure.dpi'] = 80
mpl.rcParams['figure.figsize'] = 2*np.array((8,6))
mpl.rcParams['figure.facecolor'] = "white"
COSMOS_filename = pathlib.Path(dwarfz.data_dir_default) / "COSMOS_reference.sqlite"
COSMOS = dwarfz.datasets.COSMOS(COSMOS_filename)
HSC_filename = pathlib.Path(dwarfz.data_dir_default) / "HSC_COSMOS_median_forced.sqlite3"
HSC = dwarfz.datasets.HSC(HSC_filename)
matches_filename = pathlib.Path(dwarfz.data_dir_default) / "matches.sqlite3"
matches_df = dwarfz.matching.Matches.load_from_filename(matches_filename)
# +
combined = matches_df[matches_df.match].copy()
combined["ra"] = COSMOS.df.loc[combined.index].ra
combined["dec"] = COSMOS.df.loc[combined.index].dec
combined["photo_z"] = COSMOS.df.loc[combined.index].photo_z
combined["log_mass"] = COSMOS.df.loc[combined.index].mass_med
photometry_cols = [
"gcmodel_flux","gcmodel_flux_err","gcmodel_flux_flags", "gcmodel_mag",
"rcmodel_flux","rcmodel_flux_err","rcmodel_flux_flags", "rcmodel_mag",
"icmodel_flux","icmodel_flux_err","icmodel_flux_flags", "icmodel_mag",
"zcmodel_flux","zcmodel_flux_err","zcmodel_flux_flags", "zcmodel_mag",
"ycmodel_flux","ycmodel_flux_err","ycmodel_flux_flags", "ycmodel_mag",
]
for col in photometry_cols:
combined[col] = HSC.df.loc[combined.catalog_2_ids][col].values
# -
# ## Turn magnitudes into colors
combined["g_minus_r"] = combined.gcmodel_mag - combined.rcmodel_mag
combined["r_minus_i"] = combined.rcmodel_mag - combined.icmodel_mag
combined["i_minus_z"] = combined.icmodel_mag - combined.zcmodel_mag
combined["z_minus_y"] = combined.zcmodel_mag - combined.ycmodel_mag
# ## Filter out bad data
# +
mask = np.isfinite(combined["g_minus_r"]) & np.isfinite(combined["r_minus_i"]) \
& np.isfinite(combined["i_minus_z"]) & np.isfinite(combined["z_minus_y"]) \
& np.isfinite(combined["icmodel_mag"]) \
& (~combined.gcmodel_flux_flags) & (~combined.rcmodel_flux_flags) \
& (~combined.icmodel_flux_flags) & (~combined.zcmodel_flux_flags) \
& (~combined.ycmodel_flux_flags)
combined = combined[mask]
# -
# ## Get FRANKENZ photo-z's
#
# +
df_frankenz = pd.read_sql_table("photo_z",
"sqlite:///{}".format(
pathlib.Path(dwarfz.data_dir_default)
/ "HSC_matched_to_FRANKENZ.sqlite"),
index_col="object_id")
df_frankenz.head()
# -
combined = combined.join(df_frankenz[["photoz_best", "photoz_risk_best"]],
on="catalog_2_ids")
# # Create classification labels
low_z = (combined.photo_z < .15)
low_mass = (combined.log_mass > 8) & (combined.log_mass < 9)
combined["low_z_low_mass"] = (low_z & low_mass)
combined.low_z_low_mass.mean()
combined.low_z_low_mass.sum()
combined.shape
# # Build Classifiers
# +
features = combined.loc[:,["g_minus_r", "r_minus_i", "i_minus_z", "z_minus_y",
"icmodel_mag",
"photoz_best",
"photoz_risk_best" # The risk of photoz_best being outside of the range z_true +- 0.15(1+z_true). It ranges from 0 (safe) to 1(risky)
]]
target = combined.loc[:,["low_z_low_mass"]]
# -
target.mean()
# +
COSMOS_field_area = 2 # sq. degree
N_COSMOS_total = HSC.df.shape[0]
N_COSMOS_good = combined.shape[0]
true_dwarf_density = target.sum().values[0] / COSMOS_field_area
print("true dwarf density: {:.2f} / sq. deg.".format(true_dwarf_density))
# -
# ## Partition training and testing sets
# +
testing_fraction = .1
np.random.seed(0)
shuffled_indices = np.random.permutation(target.index.values)
N_testing_indices = int(testing_fraction*shuffled_indices.size)
testing_set_indices = shuffled_indices[:N_testing_indices]
training_set_indices = shuffled_indices[N_testing_indices:]
features_train = features.loc[training_set_indices]
features_test = features.loc[testing_set_indices]
target_train = target.loc[training_set_indices]
target_test = target.loc[testing_set_indices]
true_dwarf = target_test.values.flatten()
true_non_dwarf = ~target_test.values.flatten()
# +
# def get_classification_characteristics(target_prob, threshold_prob, verbose=False):
# target_prediction = (target_prob > threshold_prob)
# prediction_dwarf = target_prediction
# prediction_non_dwarf = ~target_prediction
# completeness = (true_dwarf & prediction_dwarf).sum() / true_dwarf.sum()
# purity = (true_dwarf & prediction_dwarf).sum() / prediction_dwarf.sum()
# sample_size_reduction = prediction_dwarf.size / prediction_dwarf.sum()
# true_positives = np.sum(true_dwarf & prediction_dwarf)
# false_positives = np.sum(true_non_dwarf & prediction_dwarf)
# true_negatives = np.sum(true_non_dwarf & prediction_non_dwarf)
# false_negatives = np.sum(true_dwarf & prediction_non_dwarf)
# true_positive_rate = true_positives / true_dwarf.sum()
# false_positive_rate = false_positives / true_non_dwarf.sum()
# objects_per_sq_deg = N_COSMOS_good / COSMOS_field_area / sample_size_reduction
# if verbose:
# print("completeness: ", completeness)
# print("purity: ", purity)
# print("sample_size_reduction: ", sample_size_reduction)
# print("true positive rate: ", true_positive_rate)
# print("false positive rate: ", false_positive_rate)
# print("objects per sq deg: ", objects_per_sq_deg)
# return {
# "completeness": completeness,
# "purity": purity,
# "sample_size_reduction": sample_size_reduction,
# "threshold_prob": threshold_prob,
# "true_positive_rate": true_positive_rate,
# "false_positive_rate": false_positive_rate,
# "objects_per_sq_deg" : objects_per_sq_deg,
# }
# -
# ## Shared Plotting Parameters
# +
color_RF = "g"
color_LR = "b"
color_MC = "r"
label_RF = "Random Forest"
label_LR = "Logistic Regression"
label_MC = "Magnitude Cut"
linewidth = 4
# -
# # Cross-validation Wrapper
# **Note**: this doesn't perfectly handle when the number of objects doesn't evenly divide by the number of folds. In practice an example might be held out multiple times, and I'll just throw those few examples away.
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
n_folds_default = 10
def get_cross_validation_matrix(classifier, seed=0, folds=n_folds_default, only_i_mag=False):
np.random.seed(seed)
testing_fraction = 1/folds
shuffled_indices = np.random.permutation(target.index.values)
all_indices_set = set(shuffled_indices)
results = {HSC_id: []
for HSC_id in combined.loc[shuffled_indices].catalog_2_ids.drop_duplicates().values}
for fold in range(folds):
print("\rfold: {} / {}".format(fold+1, folds), end="", flush=True)
if fold == folds-1:
testing_set_indices = shuffled_indices[fold*N_testing_indices:]
else:
testing_set_indices = shuffled_indices[fold*N_testing_indices:(fold+1)*N_testing_indices]
training_set_indices = np.array(list(all_indices_set - set(testing_set_indices)))
features_train = features.loc[training_set_indices]
features_test = features.loc[testing_set_indices]
if only_i_mag:
features_train = features_train[["icmodel_mag"]]
features_test = features_test[["icmodel_mag"]]
target_train = target.loc[training_set_indices]
target_test = target.loc[testing_set_indices]
classifier.fit(features_train, target_train.values.flatten())
target_prob = classifier.predict_proba(features_test)[:,1]
for i, COSMOS_id in enumerate(testing_set_indices):
HSC_id = combined.loc[COSMOS_id].catalog_2_ids
results[HSC_id].append(target_prob[i])
return results
# ## i-mag cut
classifier_i_mag = LogisticRegression(class_weight=None,
solver="lbfgs",
max_iter=300)
# ## Logistic Regression
classifier_LR = LogisticRegression(class_weight=None,
solver="lbfgs",
max_iter=300)
# ## Random Forest
classifier_RF = RandomForestClassifier(n_estimators=1000,
n_jobs=4)
# ## Save all model results
# Note, I previously saved this data at `../data/galaxy_images_training/2017_09_26-dwarf_galaxy_scores.csv`. That's the version I use in constructing my DNN training set. The file created below isn't likely to be significantly better/different, but it uses different random seeds and thus will have some small variations in the probabilities assigned to each galaxy.
# +
hdf_file = pathlib.Path("results_cross-validated_all.hdf5")
overwrite = False
if (not hdf_file.is_file()) or overwrite:
results_RF = get_cross_validation_matrix(classifier_RF)
results_LR = get_cross_validation_matrix(classifier_LR)
results_i_mag = get_cross_validation_matrix(classifier_i_mag,
only_i_mag=True)
HSC_ids = list(sorted(results_LR.keys()))
HSC_ids = [HSC_id for HSC_id in HSC_ids
if len(results_LR[HSC_id])==1]
df_results = pd.DataFrame({
"HSC_id": HSC_ids,
"LR_prob": [results_LR[HSC_id][0] for HSC_id in HSC_ids],
"RF_prob": [results_RF[HSC_id][0] for HSC_id in HSC_ids],
"i_mag_prob": [results_i_mag[HSC_id][0] for HSC_id in HSC_ids],
"target": combined.set_index("catalog_2_ids").loc[HSC_ids].low_z_low_mass
})
df_results.to_hdf(hdf_file, key="results")
else:
df_results = pd.read_hdf(hdf_file)
df_results.head()
# -
# # Get Metrics from cross-validated results
threshold_probs = expit(np.linspace(-9, 6))
threshold_probs = np.array([-1e-6, *threshold_probs, 1+1e-6])
# +
def get_purities(key, df_results=df_results, threshold_probs=threshold_probs):
purities = np.empty_like(threshold_probs)
df_tmp = df_results[[key, "target"]]
for i, threshold_prob in enumerate(threshold_probs):
mask = df_tmp[key] > threshold_prob
purities[i] = df_tmp["target"][mask].mean()
return purities
def get_completenesses(key, df_results=df_results, threshold_probs=threshold_probs):
completenesses = np.empty_like(threshold_probs)
df_tmp = df_results[[key, "target"]]
df_tmp = df_tmp[df_tmp.target]
for i, threshold_prob in enumerate(threshold_probs):
mask = df_tmp[key] > threshold_prob
completenesses[i] = mask.mean()
return completenesses
def get_selected_object_density(key, df_results=df_results, threshold_probs=threshold_probs):
"""per sq deg"""
object_density = np.empty_like(threshold_probs)
df_tmp = df_results[[key, "target"]]
for i, threshold_prob in enumerate(threshold_probs):
mask = df_tmp[key] > threshold_prob
object_density[i] = mask.sum()
return object_density / COSMOS_field_area
def get_FPRs(key, df_results=df_results, threshold_probs=threshold_probs):
FPRs = np.empty_like(threshold_probs)
df_tmp = df_results[[key, "target"]]
df_tmp = df_tmp[~df_tmp.target]
for i, threshold_prob in enumerate(threshold_probs):
mask = df_tmp[key] > threshold_prob
FPRs[i] = mask.mean()
return FPRs
# +
purities_RF = get_purities("RF_prob")
completenesses_RF = get_completenesses("RF_prob")
TPR_RF = completenesses_RF
FPR_RF = get_FPRs("RF_prob")
object_density_RF = get_selected_object_density("RF_prob")
purities_LR = get_purities("LR_prob")
completenesses_LR = get_completenesses("LR_prob")
TPR_LR = completenesses_LR
FPR_LR = get_FPRs("LR_prob")
object_density_LR = get_selected_object_density("LR_prob")
purities_i_mag = get_purities("i_mag_prob")
completenesses_i_mag = get_completenesses("i_mag_prob")
TPR_i_mag = completenesses_i_mag
FPR_i_mag = get_FPRs("i_mag_prob")
object_density_i_mag = get_selected_object_density("i_mag_prob")
# -
import sklearn
import sklearn.metrics
# +
AUC_RF = sklearn.metrics.average_precision_score(df_results.target, df_results.RF_prob)
plt.plot(completenesses_RF, purities_RF,
marker="o", color=color_RF, label="Random Forest (AUC={:.2f})".format(AUC_RF),
linewidth=linewidth,
)
AUC_LR = sklearn.metrics.average_precision_score(df_results.target, df_results.LR_prob)
plt.plot(completenesses_LR, purities_LR,
marker="o", color=color_LR, label="Logistic Regression (AUC={:.2f})".format(AUC_LR),
linestyle="dashed",
linewidth=linewidth,
)
AUC_i_mag = sklearn.metrics.average_precision_score(df_results.target, df_results.i_mag_prob)
plt.plot(completenesses_i_mag, purities_i_mag,
marker="o", color=color_MC, label="$i$-band cut (AUC={:.2f})".format(AUC_i_mag),
linestyle="dotted",
linewidth=linewidth,
)
plt.xlabel("Completeness")
plt.ylabel("Purity")
plt.ylim(0,1)
leg = plt.legend(loc="best")
filename = "plots_for_thesis/purity-completeness-all"
plt.tight_layout()
plt.savefig(filename + ".pdf")
plt.savefig(filename + ".png")
# +
AUC_RF = sklearn.metrics.roc_auc_score(df_results.target, df_results.RF_prob)
plt.plot(FPR_RF, TPR_RF,
marker="o", color=color_RF, label="Random Forest (AUC={:.3f})".format(AUC_RF),
drawstyle="steps-post",
linewidth=linewidth,
)
AUC_LR = sklearn.metrics.roc_auc_score(df_results.target, df_results.LR_prob)
plt.plot(FPR_LR, TPR_LR,
marker="o", color=color_LR, label="Logistic Regression (AUC={:.3f})".format(AUC_LR),
linestyle="dashed",
drawstyle="steps-post",
linewidth=linewidth,
)
AUC_i_mag = sklearn.metrics.roc_auc_score(df_results.target, df_results.i_mag_prob)
plt.plot(FPR_i_mag, TPR_i_mag,
marker="o", color=color_MC, label="$i$-band cut (AUC={:.3f})".format(AUC_i_mag),
linestyle="dotted",
drawstyle="steps-post",
linewidth=linewidth,
)
plt.plot([0,1], [0,1 ], linestyle="dotted", color="k", label="Random guessing",
linewidth=linewidth,
)
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
# plt.xlim(0,1)
# plt.ylim(0,1)
plt.legend(loc="best")
filename = "plots_for_thesis/ROC-all"
plt.tight_layout()
plt.savefig(filename + ".pdf")
plt.savefig(filename + ".png")
# +
f, (ax1, ax2) = plt.subplots(2, sharex=True)
f.subplots_adjust(hspace=0.1)
ax1.plot(object_density_RF, purities_RF,
marker="o", color=color_RF, label=label_RF,
linewidth=linewidth,
)
ax1.axvline(1e3,
color="black", linestyle="dashed", label="DNN Training Set Density")
ax1.set_ylabel("Purity")
ax1.set_xscale("log")
ax1.set_ylim(0,1)
ax2.plot(object_density_RF, completenesses_RF,
marker="o", color=color_RF, label=label_RF,
linewidth=linewidth,
)
ax2.axvline(1e3,
color="black", linestyle="dashed", label="DNN Training Set Density",
linewidth=linewidth,
)
ax2.set_xlabel("Number of Selected Objects per sq. deg.")
ax2.set_ylabel("Completeness")
ax2.set_xscale("log")
ax2.set_ylim(0,1)
ax2.legend(loc="best")
plt.tight_layout()
filename = "plots_for_thesis/purity-completeness-RF"
plt.tight_layout()
plt.savefig(filename + ".pdf")
plt.savefig(filename + ".png")
# +
theoretical_probs=np.linspace(0,1,num=11)
empirical_probs_RF = np.empty(theoretical_probs.size-1)
num_in_bin_RF = np.empty_like(empirical_probs_RF)
for i in range(theoretical_probs.size-1):
prob_lim_low = theoretical_probs[i]
prob_lim_high = theoretical_probs[i+1]
mask_RF = (df_results["RF_prob"] >= prob_lim_low) & (df_results["RF_prob"] < prob_lim_high)
empirical_probs_RF[i] = df_results["target"][mask_RF].mean()
num_in_bin_RF[i] = df_results["target"][mask_RF].size
f, (ax1, ax2) = plt.subplots(2, sharex=True,
gridspec_kw = {'height_ratios':[1, 3]},
)
ax1.plot(theoretical_probs, [num_in_bin_RF[0], *num_in_bin_RF],
drawstyle="steps", color=color_RF,
linewidth=linewidth,
)
ax1.set_yscale("log")
ax1.set_ylim(bottom=10**-.5, top=10**6.5)
ax1.yaxis.set_ticks([1e0, 1e3, 1e6])
ax1.set_ylabel("Number of \nGalaxies in Bin")
ax2.step(theoretical_probs, [empirical_probs_RF[0], *empirical_probs_RF],
linestyle="steps", color=color_RF, label=label_RF,
linewidth=linewidth,
)
ax2.fill_between(theoretical_probs, theoretical_probs-theoretical_probs[1], theoretical_probs,
step="pre", color="black", label="ideal", alpha=.2,
linewidth=linewidth,
)
plt.xlabel("Reported Probability")
plt.ylabel("Actual (Binned) Probability")
plt.legend(loc="best")
plt.xlim(0,1)
plt.ylim(0,1)
plt.tight_layout()
filename = "plots_for_thesis/probability-calibration-RF"
plt.tight_layout()
plt.savefig(filename + ".pdf")
plt.savefig(filename + ".png")
# -
sklearn.metrics.log_loss(df_results.target, df_results.RF_prob)
# +
n_trees = classifier_RF.n_estimators
n_pseudo_obs = 2
pseudo_obs_class_balance = 0.5
df_results["RF_prob_softened"] = (df_results["RF_prob"] * n_trees + n_pseudo_obs * pseudo_obs_class_balance) \
/ (n_trees + n_pseudo_obs)
sklearn.metrics.log_loss(df_results.target, df_results.RF_prob_softened)
# -
sklearn.metrics.log_loss(df_results.target, df_results.LR_prob)
sklearn.metrics.log_loss(df_results.target, df_results.i_mag_prob)
# ## Predicted Probabilities
# +
plt.hist(df_results["RF_prob"], bins=np.linspace(0,1), alpha=.5, color=color_RF, label=label_RF)
plt.hist(df_results["LR_prob"], bins=np.linspace(0,1), alpha=.5, color=color_LR, label=label_LR)
plt.yscale("log")
plt.xlabel("p(dwarf | model)")
plt.ylabel("Number of objects")
plt.legend(loc="best")
# +
plt.hist(1-df_results["RF_prob"],
cumulative=True, alpha=0.9,
label="RF",
color=color_RF)
plt.hist(1-df_results["LR_prob"],
cumulative=True,
label="LR",
color=color_LR)
plt.ylim(ymin=1e-5)
plt.yscale("log")
plt.legend(loc="best")
plt.xlabel("1 - prob(dwarf)")
plt.ylabel("CDF")
# -
# # How do I actually get the images for the best candidate galaxies?
# Check out `../data/get_training_galaxy_images.ipynb`
| dwarfz/catalog_only_classifier/classifier_comparison.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:EviDENce]
# language: python
# name: conda-env-EviDENce-py
# ---
# # Tokenizer/preprocesser
import gensim
dutch_stopwords = ['aan',
'aangaande',
'aangezien',
'achte',
'achter',
'achterna',
'af',
'afgelopen',
'al',
'aldaar',
'aldus',
'alhoewel',
'alias',
'alle',
'allebei',
'alleen',
'alles',
'als',
'alsnog',
'altijd',
'altoos',
'ander',
'andere',
'anders',
'anderszins',
'beetje',
'behalve',
'behoudens',
'beide',
'beiden',
'ben',
'beneden',
'bent',
'bepaald',
'betreffende',
'bij',
'bijna',
'bijv',
'binnen',
'binnenin',
'blijkbaar',
'blijken',
'boven',
'bovenal',
'bovendien',
'bovengenoemd',
'bovenstaand',
'bovenvermeld',
'buiten',
'bv',
'daar',
'daardoor',
'daarheen',
'daarin',
'daarna',
'daarnet',
'daarom',
'daarop',
'daaruit',
'daarvanlangs',
'dan',
'dat',
'de',
'deden',
'deed',
'der',
'derde',
'derhalve',
'dertig',
'deze',
'dhr',
'die',
'dikwijls',
'dit',
'doch',
'doe',
'doen',
'doet',
'door',
'doorgaand',
'drie',
'duizend',
'dus',
'echter',
'een',
'eens',
'eer',
'eerdat',
'eerder',
'eerlang',
'eerst',
'eerste',
'eigen',
'eigenlijk',
'elk',
'elke',
'en',
'enig',
'enige',
'enigszins',
'enkel',
'er',
'erdoor',
'erg',
'ergens',
'etc',
'etcetera',
'even',
'eveneens',
'evenwel',
'gauw',
'ge',
'gedurende',
'geen',
'gehad',
'gekund',
'geleden',
'gelijk',
'gemoeten',
'gemogen',
'genoeg',
'geweest',
'gewoon',
'gewoonweg',
'haar',
'haarzelf',
'had',
'hadden',
'hare',
'heb',
'hebben',
'hebt',
'hedden',
'heeft',
'heel',
'hem',
'hemzelf',
'hen',
'het',
'hetzelfde',
'hier',
'hierbeneden',
'hierboven',
'hierin',
'hierna',
'hierom',
'hij',
'hijzelf',
'hoe',
'hoewel',
'honderd',
'hun',
'hunne',
'ieder',
'iedere',
'iedereen',
'iemand',
'iets',
'ik',
'ikzelf',
'in',
'inderdaad',
'inmiddels',
'intussen',
'inzake',
'is',
'ja',
'je',
'jezelf',
'jij',
'jijzelf',
'jou',
'jouw',
'jouwe',
'juist',
'jullie',
'kan',
'klaar',
'kon',
'konden',
'krachtens',
'kun',
'kunnen',
'kunt',
'laatst',
'later',
'liever',
'lijken',
'lijkt',
'maak',
'maakt',
'maakte',
'maakten',
'maar',
'mag',
'maken',
'me',
'meer',
'meest',
'meestal',
'men',
'met',
'mevr',
'mezelf',
'mij',
'mijn',
'mijnent',
'mijner',
'mijzelf',
'minder',
'miss',
'misschien',
'missen',
'mits',
'mocht',
'mochten',
'moest',
'moesten',
'moet',
'moeten',
'mogen',
'mr',
'mrs',
'mw',
'na',
'naar',
'nadat',
'nam',
'namelijk',
'nee',
'neem',
'negen',
'nemen',
'nergens',
'net',
'niemand',
'niet',
'niets',
'niks',
'noch',
'nochtans',
'nog',
'nogal',
'nooit',
'nu',
'nv',
'of',
'ofschoon',
'om',
'omdat',
'omhoog',
'omlaag',
'omstreeks',
'omtrent',
'omver',
'ondanks',
'onder',
'ondertussen',
'ongeveer',
'ons',
'onszelf',
'onze',
'onzeker',
'ooit',
'ook',
'op',
'opnieuw',
'opzij',
'over',
'overal',
'overeind',
'overige',
'overigens',
'paar',
'pas',
'per',
'precies',
'recent',
'redelijk',
'reeds',
'rond',
'rondom',
'samen',
'sedert',
'sinds',
'sindsdien',
'slechts',
'sommige',
'spoedig',
'steeds',
'tamelijk',
'te',
'tegen',
'tegenover',
'tenzij',
'terwijl',
'thans',
'tien',
'tiende',
'tijdens',
'tja',
'toch',
'toe',
'toen',
'toenmaals',
'toenmalig',
'tot',
'totdat',
'tussen',
'twee',
'tweede',
'u',
'uit',
'uitgezonderd',
'uw',
'vaak',
'vaakwat',
'van',
'vanaf',
'vandaan',
'vanuit',
'vanwege',
'veel',
'veeleer',
'veertig',
'verder',
'verscheidene',
'verschillende',
'vervolgens',
'via',
'vier',
'vierde',
'vijf',
'vijfde',
'vijftig',
'vol',
'volgend',
'volgens',
'voor',
'vooraf',
'vooral',
'vooralsnog',
'voorbij',
'voordat',
'voordezen',
'voordien',
'voorheen',
'voorop',
'voorts',
'vooruit',
'vrij',
'vroeg',
'waar',
'waarom',
'waarschijnlijk',
'wanneer',
'want',
'waren',
'was',
'wat',
'we',
'wederom',
'weer',
'weg',
'wegens',
'weinig',
'wel',
'weldra',
'welk',
'welke',
'werd',
'werden',
'werder',
'wezen',
'whatever',
'wie',
'wiens',
'wier',
'wij',
'wijzelf',
'wil',
'wilden',
'willen',
'word',
'worden',
'wordt',
'zal',
'ze',
'zei',
'zeker',
'zelf',
'zelfde',
'zelfs',
'zes',
'zeven',
'zich',
'zichzelf',
'zij',
'zijn',
'zijne',
'zijzelf',
'zo',
'zoals',
'zodat',
'zodra',
'zonder',
'zou',
'zouden',
'zowat',
'zulk',
'zulke',
'zullen',
'zult',
'a',
'aan',
'aangaande',
'aangezien',
'achter',
'achterna',
'aen',
'af',
'afd',
'afgelopen',
'agter',
'al',
'aldaar',
'aldus',
'alhoewel',
'alias',
'alle',
'allebei',
'alleen',
'alleenlyk',
'allen',
'alles',
'als',
'alsnog',
'altijd',
'altoos',
'altyd',
'ander',
'andere',
'anderen',
'anders',
'anderszins',
'anm',
'b',
'behalve',
'behoudens',
'beide',
'beiden',
'ben',
'beneden',
'bent',
'bepaald',
'beter',
'betere',
'betreffende',
'bij',
'bijna',
'bijvoorbeeld',
'bijv',
'binnen',
'binnenin',
'bizonder',
'bizondere',
'bl',
'blz',
'boven',
'bovenal',
'bovendien',
'bovengenoemd',
'bovenstaand',
'bovenvermeld',
'buiten',
'by',
'daar',
'daarheen',
'daarin',
'daarna',
'daarnet',
'daarom',
'daarop',
'daarvanlangs',
'daer',
'dan',
'dat',
'de',
'deeze',
'den',
'der',
'ders',
'derzelver',
'des',
'deszelfs',
'deszelvs',
'deze',
'dezelfde',
'dezelve',
'dezelven',
'dezen',
'dezer',
'dezulke',
'die',
'dien',
'dikwijls',
'dikwyls',
'dit',
'dl',
'doch',
'doen',
'doet',
'dog',
'door',
'doorgaand',
'doorgaans',
'dr',
'dra',
'ds',
'dus',
'echter',
'ed',
'een',
'eene',
'eenen',
'eener',
'eenig',
'eenige',
'eens',
'eer',
'eerdat',
'eerder',
'eerlang',
'eerst',
'eerste',
'eersten',
'effe',
'egter',
'eigen',
'eigene',
'elk',
'elkanderen',
'elkanderens',
'elke',
'en',
'enig',
'enige',
'enigerlei',
'enigszins',
'enkel',
'enkele',
'enz',
'er',
'erdoor',
'et',
'etc',
'even',
'eveneens',
'evenwel',
'ff',
'gauw',
'ge',
'gebragt',
'gedurende',
'geen',
'geene',
'geenen',
'gegeven',
'gehad',
'geheel',
'geheele',
'gekund',
'geleden',
'gelijk',
'gelyk',
'gemoeten',
'gemogen',
'geven',
'geweest',
'gewoon',
'gewoonweg',
'geworden',
'gezegt',
'gij',
'gt',
'gy',
'haar',
'had',
'hadden',
'hadt',
'haer',
'haere',
'haeren',
'haerer',
'hans',
'hare',
'heb',
'hebben',
'hebt',
'heeft',
'hele',
'hem',
'hen',
'het',
'hier',
'hierbeneden',
'hierboven',
'hierin',
'hij',
'hoe',
'hoewel',
'hun',
'hunne',
'hunner',
'hy',
'ibid',
'idd',
'ieder',
'iemand',
'iet',
'iets',
'ii',
'iig',
'ik',
'ikke',
'ikzelf',
'in',
'indien',
'inmiddels',
'inz',
'inzake',
'is',
'ja',
'je',
'jezelf',
'jij',
'jijzelf',
'jou',
'jouw',
'jouwe',
'juist',
'jullie',
'kan',
'klaar',
'kon',
'konden',
'krachtens',
'kunnen',
'kunt',
'laetste',
'lang',
'later',
'liet',
'liever',
'like',
'm',
'maar',
'maeken',
'maer',
'mag',
'martin',
'me',
'mede',
'meer',
'meesten',
'men',
'menigwerf',
'met',
'mezelf',
'mij',
'mijn',
'mijnent',
'mijner',
'mijzelf',
'min',
'minder',
'misschien',
'mocht',
'mochten',
'moest',
'moesten',
'moet',
'moeten',
'mogelijk',
'mogelyk',
'mogen',
'my',
'myn',
'myne',
'mynen',
'myner',
'myzelf',
'na',
'naar',
'nabij',
'nadat',
'naer',
'net',
'niet',
'niets',
'nimmer',
'nit',
'no',
'noch',
'nog',
'nogal',
'nooit',
'nr',
'nu',
'o',
'of',
'ofschoon',
'om',
'omdat',
'omhoog',
'omlaag',
'omstreeks',
'omtrent',
'omver',
'onder',
'ondertussen',
'ongeveer',
'ons',
'onszelf',
'onze',
'onzen',
'onzer',
'ooit',
'ook',
'oorspr',
'op',
'opdat ',
'opnieuw',
'opzij',
'opzy',
'over',
'overeind',
'overigens',
'p',
'pas',
'pp',
'precies',
'pres',
'prof',
'publ',
'reeds',
'rond',
'rondom',
'rug',
's',
'sedert',
'sinds',
'sindsdien',
'sl',
'slechts',
'sommige',
'spoedig',
'st',
'steeds',
'sy',
't',
'tamelijk',
'tamelyk',
'te',
'tegen',
'tegens',
'ten',
'tenzij',
'ter',
'terwijl',
'terwyl',
'thans',
'tijdens',
'toch',
'toe',
'toen',
'toenmaals',
'toenmalig',
'tot',
'totdat',
'tusschen',
'tussen',
'tydens',
'u',
'uit',
'uitg',
'uitgezonderd',
'uw',
'uwe',
'uwen',
'uwer',
'vaak',
'vaakwat',
'vakgr',
'van',
'vanaf',
'vandaan',
'vanuit',
'vanwege',
'veel',
'veeleer',
'veelen',
'verder',
'verre',
'vert',
'vervolgens',
'vgl',
'vol',
'volgens',
'voor',
'vooraf',
'vooral',
'vooralsnog',
'voorbij',
'voorby',
'voordat',
'voordezen',
'voordien',
'voorheen',
'voorop',
'voort',
'voortgez',
'voorts',
'voortz',
'vooruit',
'vrij',
'vroeg',
'vry',
'waar',
'waarom',
'wanneer',
'want',
'waren',
'was',
'wat',
'we',
'weer',
'weg',
'wege',
'wegens',
'weinig',
'weinige',
'wel',
'weldra',
'welk',
'welke',
'welken',
'welker',
'werd',
'werden',
'werdt',
'wezen',
'wie',
'wiens',
'wier',
'wierd',
'wierden',
'wij',
'wijzelf',
'wil',
'wilde',
'worden',
'wordt',
'wy',
'wyze',
'wyzelf',
'zal',
'ze',
'zeer',
'zei',
'zeker',
'zekere',
'zelf',
'zelfde',
'zelfs',
'zelve',
'zelven',
'zelvs',
'zich',
'zichzelf',
'zichzelve',
'zichzelven',
'zie',
'zig',
'zij',
'zijn',
'zijnde',
'zijne',
'zijner',
'zo',
"zo'n",
'zoals',
'zodra',
'zommige',
'zommigen',
'zonder',
'zoo',
'zou',
'zoude',
'zouden',
'zoveel',
'zowat',
'zulk',
'zulke',
'zulks',
'zullen',
'zult',
'zy',
'zyn',
'zynde',
'zyne',
'zynen',
'zyner',
'zyns',
'eh',
'eeh',
'uhm',
'uhhum',
'hum',
'humum',
'ehhh',
'he',
'nou']
# +
dutch_stopwords_processed = gensim.utils.simple_preprocess(' '.join(dutch_stopwords),deacc=True)
# -
def preprocess_fragment(fragment,stopwords=dutch_stopwords_processed):
preprocessed_fragment = [token for token in gensim.utils.simple_preprocess(fragment,deacc=True) if token not in stopwords]
return ' '.join(preprocessed_fragment)
| EviDENce_doc2vec_framework/tokenizer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import re
content = ''
with open('ventilator.txt', 'r+') as fin:
content = fin.read()
ventilators = pd.DataFrame(columns=['State', 'Ventilators', 'Pediatric Capable Ventilators'])
for line in content.split('\n'):
m = re.match('([\w\s]+)\s+([\d\.]+)\s+([\d\.]+)\s+([\d\.]+)\s+([\d\.]+).*', line)
if (m):
ventilators.append({'State': m[1], 'Ventilators': m[2], 'Pediatric Capable Ventilators': m[4]}, ignore_index=True)
| experiments/COVID-19_EWD/Ventilator Scrape.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import torch
import torch.nn as nn
import torch.nn.functional as Fh
import torch.optim as optim
def myloss(theta,i,show=False):
global A
beta = torch.ones([m, n], dtype=torch.float64)*(2**i)
A = torch.sigmoid(beta*theta)
Anorm = torch.sum(A*A,0)**0.5
Anorm = Anorm.repeat(m,1)
A = A/Anorm
M = torch.matmul(torch.t(A), A)
if (show):
print(A)
l1 = torch.norm(M-I)**2
l2 = lam*abs(torch.norm(theta)**2-1)
return l1+l2,l1
# +
m = 16
n = 40
I = torch.eye(n, dtype=torch.float64)
lam = 0.01
# beta = torch.ones([m, n], dtype=torch.float64, requires_grad=True)
x = torch.rand([m, n], dtype=torch.float64, requires_grad=True)
# print(x)
# x=x-0.5
# print(x)
# temp = torch.norm(x)
# x.data = x.data/temp
# y, fnorm = myloss(x,0,True)
# print("hey1")
# print(y)
# print(A)
# print("hey2")
# -
optimizer = torch.optim.Adam([x])
# +
# x = torch.tensor(.0, requires_grad=True)
# y = (x-2)**2
# optimizer = torch.optim.SGD([x], lr=0.0001)
# initilizae
# print(x,y)
for j in range(10):
# if (j!=0):
# x.data = x.data/2
# temp = torch.norm(x)
# x.data = x.data/temp
for i in range(40000):
optimizer.zero_grad()
y, fnorm = myloss(x,0)
y.backward(retain_graph=True)
optimizer.step()
# beta = torch.ones([m, n], dtype=torch.float64)*(2**j)
# A = torch.sigmoid(beta*x)
# Anorm = torch.sum(A*A,0)**0.5
# Anorm = Anorm.repeat(m,1)
# A = A/Anorm
# nt = -torch.log(1/A-1)/beta
# x.data = nt.data
# A = torch.sigmoid(beta*x)
if (i==1000):
print(A)
# temp = torch.norm(x)
# x.data = x.data/temp
if (i + 1) % 1000 == 0:
print('beta:',2**j,' epoch:',i + 1,' loss:',y.item(),' F-norm:',fnorm.item())
# -
beta = torch.ones([m, n], dtype=torch.float64)*(32)
C = torch.sigmoid(beta*x)
# print(C)
# B is the final sensing matrix
B=torch.round(C)
torch.set_printoptions(threshold=5000)
print(B)
# print((B!=0))
| matrix_gen/sensing_matrix.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Estimating the total number of phages
# In order to estimate the total number of phages world-wide, we sum our estimates for the total biomass of phages in all of the environments we cover: the marine environment, soils, the marine deep subsurface, and the terrestrial deep subsurface.
# Here is a summary of our estimates for the total number of phages in each of the environments:
# +
import pandas as pd
import sys
import sys
sys.path.insert(0, '../../statistics_helper/')
from CI_helper import *
pd.options.display.float_format = '{:,.1e}'.format
estimate = pd.read_excel('phage_num_estimate.xlsx')
estimate
# -
# Our best estimate of the total number of phages is the sum of our estimates for the number of phages in all the environments we cover:
# +
# Calculate the total number of phages
best_estimate = estimate.sum()['Value']
print('Our best estimate for the total number of phages is %.1e' %best_estimate)
# -
# # Uncertainty analysis
# We could only produce projections for the number of phages in soils and in the terrestrial deep subsurface. For the number of phages in the marine environment and in the marine deep subsurface, we did not have a methodology which we believe represents well the uncertainty associated with our estimate. We therefore chose to use an uncertainty of about one and a half orders of magnitude for both the number of phages in the marine environments and in the marine deep subsurface. We hope further studies could come up with a better methodology for assessing the uncertainty of the estimate of the total number of phages in those environments.
#
# We combine the uncertainties for the number of phages in each of the environments to produce our projection for the uncertainty associated with our estimate of the total number of phages:
# +
# Set the uncertainty associated with our estimate of the total number of phages
# in the marine environment and in the marine deep subsurface as one and a half
# orders of magnitude
estimate.loc[0,'Uncertainty'] = 10**1.5
estimate.loc[1,'Uncertainty'] = 10**1.5
# Combine the uncertainties for all environments to produce our best projection
mul_CI = CI_sum_prop(estimates=estimate['Value'],mul_CIs=estimate['Uncertainty'])
print('Our best projection for the uncertainty associated with our estimate of the total number of phages is %.1f-fold' %mul_CI)
# -
# Our final parameters are:
# +
print('Our best estimate for the total number of phages : %.0e' % best_estimate)
print('Uncertainty associated with the estiamte of the total number of phages: %.0f-fold' % mul_CI)
old_results = pd.read_excel('../phage_biomass_estimate.xlsx')
result = old_results.copy()
result.loc[0] = pd.Series({
'Parameter': 'Total number of phages',
'Value': best_estimate,
'Units': 'Number of individuals',
'Uncertainty': mul_CI
})
result.to_excel('../phage_biomass_estimate.xlsx',index=False)
| viruses/phage_num/.ipynb_checkpoints/tot_phage_num-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/SamuelaAnastasi/RNN_Sentiment_Analysis/blob/master/Sentiment_Analysis_RNN.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="B5_W3AptCHNe" colab_type="code" colab={}
from google.colab import drive
drive.mount('/gdrive')
# + id="L4ZtWgknGQ5O" colab_type="code" colab={}
import numpy as np
# read txt files of reviews and labels
with open('/gdrive/My Drive/Colab Notebooks/sentiment_analysis/data/reviews.txt', 'r') as f:
reviews = f.read()
with open('/gdrive/My Drive/Colab Notebooks/sentiment_analysis/data/labels.txt', 'r') as f:
labels = f.read()
# + id="c6RxsuHAHumU" colab_type="code" outputId="b4c2b877-b90c-4357-be94-18fdc49600cc" colab={"base_uri": "https://localhost:8080/", "height": 121}
print(reviews[:200])
print()
print(labels[:26])
# + id="iRPHmfRjJiCG" colab_type="code" outputId="1f970ebc-f55a-4873-f09e-964486e997ce" colab={"base_uri": "https://localhost:8080/", "height": 34}
#preprocess and tokenize text data
#convert to lowercase
#clean data: remove punctuation
from string import punctuation
#string.punctuation python 3.0
print(punctuation)
reviews = reviews.lower()
clean_reviews = ''.join([c for c in reviews if c not in punctuation])
# + id="3T2lT9xTMVDY" colab_type="code" colab={}
#clean data: remove \n chars that separates reviews from each-other
# split clean reviews by \n and join them again
reviews_split = clean_reviews.split('\n')
clean_reviews = ' '.join(reviews_split)
# + id="3WcTTOnGNObQ" colab_type="code" outputId="b3635769-31d0-483f-fc17-d5b29a3fdef5" colab={"base_uri": "https://localhost:8080/", "height": 353}
#create list of all words in cleaned reviews and print some of them
words = clean_reviews.split()
words[:20]
# + id="dkPvZyvfOCC3" colab_type="code" colab={}
#encode each word and label as int
# create a dict that maps each unique word to int vals
# subclass of dict: counts the hashtable object
#creates a dict that maps obj to the n of times they apear in the input
from collections import Counter
#create dict of words where most frequent words are assigned lowest int vals
w_counts = Counter(words)
w_sorted = sorted(w_counts, key=w_counts.get, reverse=True)
# vocab = sorted(counts, key=counts.get, reverse=True)
#create dict and assign 1 to most frequent word
w_to_int = {word: i for i, word in enumerate(w_sorted, 1)}
# create a list that will contain all int values assigned to each word for each review
reviews_ints = []
# get each review in reviews previously splitted by \n
for review in reviews_split:
#then for each word in this review get the int val from the w_to_int dict
#and append it to the reviews_ints.
#Now each word in each review is stored as int inside reviews_ints
reviews_ints.append([w_to_int[word] for word in review.split()])
# + [markdown] id="1vwovLugclAD" colab_type="text"
# ###Test data preprocessing
# + id="mbCjyr9HcogZ" colab_type="code" outputId="7a2e601b-dd15-4519-837f-186d2504209a" colab={"base_uri": "https://localhost:8080/", "height": 104}
# stats about vocabulary
print('Unique words: ', len((w_to_int))) # should ~ 74000+
print()
# print tokens in first review
print('Tokenized review: \n', reviews_ints[:1])
# + [markdown] id="e1jq7neDfL3f" colab_type="text"
# ###Convert labels
# Labels have values positive and negative that should be converted to 1 and 0 respectively
# + id="NDD4-2ereyzh" colab_type="code" colab={}
#convert labels to be all 1 and 0
# 1=positive, 0=negative label conversion
labels_split = labels.split('\n')
encoded_labels = np.array([1 if label == 'positive' else 0 for label in labels_split])
# + [markdown] id="TNeTRUt5ihKg" colab_type="text"
# ###Remove Outliers
# Some of the reviews are too long or too short. The model requires length of input data to be consistent. So extremely long or short reviews should be eliminated and the rest of reviews should either be truncated or padded with new values to reach the appropriate length.
# + id="iCqtFa42jAgU" colab_type="code" outputId="56b16ac1-f14f-4341-a91d-4fde9ce7d5a2" colab={"base_uri": "https://localhost:8080/", "height": 50}
# check for outliers in reviews
review_lens = Counter([len(x) for x in reviews_ints])
print("Zero-length reviews: {}".format(review_lens[0]))
print("Maximum review length: {}".format(max(review_lens)))
# + id="GP9IQPyhnYLx" colab_type="code" outputId="99f62397-023d-4238-facb-f142d3f71cfd" colab={"base_uri": "https://localhost:8080/", "height": 50}
#remove 0-length reviews and respective labels
print('Number of reviews before removing outliers: ', len(reviews_ints))
# get indices of any reviews with length 0
non_zero_idx = [i for i, review in enumerate(reviews_ints) if len(review) != 0]
# remove 0-length reviews and their labels
reviews_ints = [reviews_ints[ii] for ii in non_zero_idx]
encoded_labels = np.array([encoded_labels[ii] for ii in non_zero_idx])
print('Number of reviews after removing outliers: ', len(reviews_ints))
# + id="HeJYM2nTpf6X" colab_type="code" colab={}
#truncate long reviews or pad the short ones with columns of 0 on the left
def pad_reviews(reviews_ints, r_length):
# create a 0-filled 2D array with num_rows=num_reviews & num_cols=r_length
padded_r = np.zeros((len(reviews_ints), r_length), dtype=int)
# for each review,
for i, review_ints in enumerate(reviews_ints):
# fill each row of the 0-filled 2D array with the encoded int values
# of the review. To conserve the 0 values on the left of each row
# when the review is too short start filling from the end
# if the review is too long, just truncated up to r_length
padded_r[i, -len(review_ints):] = np.array(review_ints)[:r_length]
return padded_r
# + [markdown] id="1dGFkTNxxvc6" colab_type="text"
# ### Test implementation
# + id="zVJUS1HlxzuI" colab_type="code" outputId="8a013b89-6b0c-4550-81eb-538788cc7ad3" colab={"base_uri": "https://localhost:8080/", "height": 353}
# Input size for each review
r_length = 200
features = pad_reviews(reviews_ints, r_length=r_length)
assert len(features)==len(reviews_ints), "Your features should have as many rows as reviews."
assert len(features[0])==r_length, "Each feature row should contain seq_length values."
# print first 10 word values of the first 20 batches
print(features[:20,:10])
# + [markdown] id="FkiFQbPo5yWt" colab_type="text"
# ### Split data in training, validation and test set
# + id="HH1_sXGL58M-" colab_type="code" outputId="cd262075-0ef8-4525-fb55-1eec4b08f2bb" colab={"base_uri": "https://localhost:8080/", "height": 67}
# 0.8 train - 0.1 validation - 0.1 test
split_factor = 0.8
split_index = int(len(features) * split_factor)
train_data, rest_of_data = features[:split_index], features[split_index:]
train_y, rest_of_data_y = encoded_labels[:split_index], encoded_labels[split_index:]
test_index = int(len(rest_of_data) * 0.5)
valid_data, test_data = rest_of_data[:test_index], rest_of_data[test_index:]
val_y, test_y = rest_of_data_y[:test_index], rest_of_data_y[test_index:]
print("Train set: \t\t{}".format(train_data.shape),
"\nValidation set: \t{}".format(valid_data.shape),
"\nTest set: \t\t{}".format(test_data.shape))
# + id="kbX7X0wr9YhW" colab_type="code" colab={}
import torch
from torch.utils.data import TensorDataset, DataLoader
batch_size = 50
# convert to Tensor
train_set = TensorDataset(torch.from_numpy(train_data), torch.from_numpy(train_y))
valid_set = TensorDataset(torch.from_numpy(valid_data), torch.from_numpy(val_y))
test_set = TensorDataset(torch.from_numpy(test_data), torch.from_numpy(test_y))
# load in batches
train_loader = DataLoader(train_set, shuffle=True, batch_size=batch_size)
valid_loader = DataLoader(valid_set, shuffle=True, batch_size=batch_size)
test_loader = DataLoader(test_set, shuffle=True, batch_size=batch_size)
# + id="yoGDqAqm-uwM" colab_type="code" outputId="a70e6efc-8439-445a-9dbe-7815edc31862" colab={"base_uri": "https://localhost:8080/", "height": 269}
sample_x, sample_y = dataiter.next()
print('Sample input size: ', sample_x.size()) # batch_size, seq_length
print('Sample input: \n', sample_x)
print()
print('Sample label size: ', sample_y.size()) # batch_size
print('Sample label: \n', sample_y)
# + [markdown] id="klz0MAV9ODTa" colab_type="text"
# ###Create model
# + id="Cvryi57nOH-V" colab_type="code" outputId="e1f10ce9-ce84-<KEY>" colab={"base_uri": "https://localhost:8080/", "height": 34}
# Check if GPU is available
train_on_gpu=torch.cuda.is_available()
if(train_on_gpu):
print('Training on GPU.')
else:
print('No GPU available, training on CPU.')
# + id="PeDHhsfoy-MS" colab_type="code" colab={}
import torch.nn as nn
class SentimentNet(nn.Module):
def __init__(self, vocab_size, output_size, embedding_dim, hidden_dim, n_layers, drop_prob=0.5):
super(SentimentNet, self).__init__()
self.output_size = output_size
self.n_layers = n_layers
self.hidden_dim = hidden_dim
self.embedding = nn.Embedding(vocab_size, embedding_dim)
self.lstm = nn.LSTM(embedding_dim, hidden_dim, n_layers,
dropout=drop_prob, batch_first=True)
self.dropout = nn.Dropout(0.3)
self.fc = nn.Linear(hidden_dim, output_size)
self.sig = nn.Sigmoid()
def forward(self, x, hidden):
batch_size = x.size(0)
x = x.long()
embeds = self.embedding(x)
lstm_out, hidden = self.lstm(embeds, hidden)
lstm_out = lstm_out.contiguous().view(-1, self.hidden_dim)
out = self.dropout(lstm_out)
out = self.fc(out)
sig_out = self.sig(out)
sig_out = sig_out.view(batch_size, -1)
sig_out = sig_out[:, -1] # get last batch of labels
return sig_out, hidden
def init_hidden(self, batch_size):
# Create two new tensors with sizes n_layers x batch_size x hidden_dim,
# initialized to zero, for hidden state and cell state of LSTM
weight = next(self.parameters()).data
if (train_on_gpu):
hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda(),
weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda())
else:
hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_(),
weight.new(self.n_layers, batch_size, self.hidden_dim).zero_())
return hidden
# + id="Kmz-YSOKvVhe" colab_type="code" outputId="2908f1b1-1520-48eb-c959-6e55a827e4cb" colab={"base_uri": "https://localhost:8080/", "height": 134}
# Instantiate the model w/ hyperparams
vocab_size = len(w_to_int) + 1
output_size = 1
embedding_dim = 400
hidden_dim = 256
n_layers = 2
net = SentimentNet(vocab_size, output_size, embedding_dim, hidden_dim, n_layers)
print(net)
# + [markdown] id="wDF7jlyQvZUL" colab_type="text"
# ##Training
# + id="FLk9X1Myvctl" colab_type="code" colab={}
lr=0.001
criterion = nn.BCELoss()
optimizer = torch.optim.Adam(net.parameters(), lr=lr)
# + id="3cUNooGGvgnE" colab_type="code" outputId="4a11ddb5-344b-4134-8cf1-1b6084e6ea1b" colab={"base_uri": "https://localhost:8080/", "height": 286}
epochs = 4
counter = 0
print_every = 100
# gradient clipping
clip=5
if(train_on_gpu):
net.cuda()
net.train()
for e in range(epochs):
h = net.init_hidden(batch_size)
for inputs, labels in train_loader:
counter += 1
if(train_on_gpu):
inputs, labels = inputs.cuda(), labels.cuda()
h = tuple([each.data for each in h])
net.zero_grad()
output, h = net(inputs, h)
loss = criterion(output.squeeze(), labels.float())
loss.backward()
# prevent the exploding gradient problem in RNNs / LSTMs.
nn.utils.clip_grad_norm_(net.parameters(), clip)
optimizer.step()
if counter % print_every == 0:
val_h = net.init_hidden(batch_size)
val_losses = []
net.eval()
for inputs, labels in valid_loader:
val_h = tuple([each.data for each in val_h])
if(train_on_gpu):
inputs, labels = inputs.cuda(), labels.cuda()
output, val_h = net(inputs, val_h)
val_loss = criterion(output.squeeze(), labels.float())
val_losses.append(val_loss.item())
net.train()
print("Epoch: {}/{}...".format(e+1, epochs),
"Step: {}...".format(counter),
"Loss: {:.6f}...".format(loss.item()),
"Val Loss: {:.6f}".format(np.mean(val_losses)))
# + [markdown] id="JnjvXu9zvnQY" colab_type="text"
# ##Testing
# + id="1gnnv7M6vlyj" colab_type="code" outputId="10401fdd-5c1c-4271-c9f0-7a7ac3fb254a" colab={"base_uri": "https://localhost:8080/", "height": 50}
test_losses = []
num_correct = 0
h = net.init_hidden(batch_size)
net.eval()
for inputs, labels in test_loader:
h = tuple([each.data for each in h])
if(train_on_gpu):
inputs, labels = inputs.cuda(), labels.cuda()
output, h = net(inputs, h)
test_loss = criterion(output.squeeze(), labels.float())
test_losses.append(test_loss.item())
# convert output probabilities to predicted class (0 or 1)
pred = torch.round(output.squeeze())
correct_tensor = pred.eq(labels.float().view_as(pred))
correct = np.squeeze(correct_tensor.numpy()) if not train_on_gpu else np.squeeze(correct_tensor.cpu().numpy())
num_correct += np.sum(correct)
print("Test loss: {:.3f}".format(np.mean(test_losses)))
test_acc = num_correct/len(test_loader.dataset)
print("Test accuracy: {:.3f}".format(test_acc))
# + id="vtsW1L0_vvTl" colab_type="code" colab={}
# negative test review
test_review_neg = 'The worst movie I have seen; acting was terrible and I want my money back. This movie had bad acting and the dialogue was slow.'
# + id="mmSqMl_2vz8n" colab_type="code" outputId="92a5b68b-51ce-4b19-e2a4-e0b5044c9611" colab={"base_uri": "https://localhost:8080/", "height": 34}
from string import punctuation
def tokenize_review(test_review):
test_review = test_review.lower()
test_text = ''.join([c for c in test_review if c not in punctuation])
test_words = test_text.split()
test_ints = []
test_ints.append([w_to_int[word] for word in test_words])
return test_ints
test_ints = tokenize_review(test_review_neg)
print(test_ints)
# + id="M3v0umiTv3J_" colab_type="code" outputId="e607020c-09dd-448d-b2da-b8b8973ab0af" colab={"base_uri": "https://localhost:8080/", "height": 218}
seq_length=200
features = pad_reviews(test_ints, seq_length)
print(features)
# + id="HC8QrrnVv6gf" colab_type="code" outputId="4bf8a982-4b19-4206-d574-bc3ad488b492" colab={"base_uri": "https://localhost:8080/", "height": 34}
feature_tensor = torch.from_numpy(features)
print(feature_tensor.size())
# + id="nfB30fl7v_Xg" colab_type="code" colab={}
def predict(net, test_review, sequence_length=200):
net.eval()
test_ints = tokenize_review(test_review)
seq_length=sequence_length
features = pad_reviews(test_ints, seq_length)
feature_tensor = torch.from_numpy(features)
batch_size = feature_tensor.size(0)
h = net.init_hidden(batch_size)
if(train_on_gpu):
feature_tensor = feature_tensor.cuda()
output, h = net(feature_tensor, h)
# convert output probabilities to predicted class (0 or 1)
pred = torch.round(output.squeeze())
print('Prediction value, pre-rounding: {:.6f}'.format(output.item()))
if(pred.item()==1):
print("Positive review detected!")
else:
print("Negative review detected.")
# + id="Lp53x0z0wDBc" colab_type="code" colab={}
# positive test review
test_review_pos = 'This movie had the best acting and the dialogue was so good. I loved it.'
# + id="Vm0hk5SAwHDs" colab_type="code" outputId="aabaf9ad-f01b-4fe7-972c-3d71fac82377" colab={"base_uri": "https://localhost:8080/", "height": 50}
seq_length=200
predict(net, test_review_neg, seq_length)
| Sentiment_Analysis_RNN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: arviz3.6
# language: python
# name: arviz3_6
# ---
# # Inference Data Cookbook
# `InferenceData` is the central data format for ArviZ. `InferenceData` itself is just a container that maintains references to one or more `xarray.Dataset`. Below are various ways to generate an `InferenceData` object. See [here](XarrayforArviZ.ipynb) for more on xarray.
import arviz as az
import numpy as np
# ## From 1d numpy array
size = 100
dataset = az.convert_to_inference_data(np.random.randn(size))
print(dataset)
dataset.posterior
# ## From nd numpy array
#
shape = (1, 2, 3, 4, 5)
dataset = az.convert_to_inference_data(np.random.randn(*shape))
print(dataset)
dataset.posterior
# ## From a dictionary
datadict = {
'a': np.random.randn(100),
'b': np.random.randn(1, 100, 10),
'c': np.random.randn(1, 100, 3, 4),
}
dataset = az.convert_to_inference_data(datadict)
print(dataset)
dataset.posterior
# ## From dictionary with coords and dims
# +
datadict = {
'a': np.random.randn(100),
'b': np.random.randn(1, 100, 10),
'c': np.random.randn(1, 100, 3, 4),
}
coords = {'c1' : np.arange(3), 'c2' : np.arange(4), 'b1' : np.arange(10)}
dims = {'b' : ['b1'], 'c' : ['c1', 'c2']}
dataset = az.convert_to_inference_data(datadict, coords=coords, dims=dims)
print(dataset)
dataset.posterior
# -
# ## From pymc3
# +
import pymc3 as pm
draws = 500
chains = 2
eight_school_data = {'J': 8,
'y': np.array([28., 8., -3., 7., -1., 1., 18., 12.]),
'sigma': np.array([15., 10., 16., 11., 9., 11., 10., 18.])
}
with pm.Model() as model:
mu = pm.Normal('mu', mu=0, sd=5)
tau = pm.HalfCauchy('tau', beta=5)
theta_tilde = pm.Normal('theta_tilde', mu=0, sd=1, shape=eight_school_data['J'])
theta = pm.Deterministic('theta', mu + tau * theta_tilde)
pm.Normal('obs', mu=theta, sd=eight_school_data['sigma'], observed=eight_school_data['y'])
trace = pm.sample(draws, chains=chains)
prior = pm.sample_prior_predictive()
posterior_predictive = pm.sample_posterior_predictive(trace, 500, model)
data = az.from_pymc3(
trace=trace,
prior=prior,
posterior_predictive=posterior_predictive,
coords={'school': np.arange(eight_school_data['J'])},
dims={'theta': ['school'], 'theta_tilde': ['school']},
)
data
# -
# ## From pystan
# +
import pystan
schools_code = '''
data {
int<lower=0> J;
real y[J];
real<lower=0> sigma[J];
}
parameters {
real mu;
real<lower=0> tau;
real theta_tilde[J];
}
transformed parameters {
real theta[J];
for (j in 1:J)
theta[j] = mu + tau * theta_tilde[j];
}
model {
mu ~ normal(0, 5);
tau ~ cauchy(0, 5);
theta_tilde ~ normal(0, 1);
y ~ normal(theta, sigma);
}
generated quantities {
vector[J] log_lik;
vector[J] y_hat;
for (j in 1:J) {
log_lik[j] = normal_lpdf(y[j] | theta[j], sigma[j]);
y_hat[j] = normal_rng(theta[j], sigma[j]);
}
}
'''
stan_model = pystan.StanModel(model_code=schools_code)
fit = stan_model.sampling(data=eight_school_data,
iter=draws,
warmup=0,
chains=chains)
data = az.from_pystan(posterior=fit,
posterior_predictive='y_hat',
observed_data=['y'],
log_likelihood='log_lik',
coords={'school': np.arange(eight_school_data['J'])},
dims={'theta': ['school'],
'y': ['school'],
'log_lik': ['school'],
'y_hat': ['school'],
'theta_tilde': ['school']
}
)
data
# -
# ## From pyro
# +
import torch
import pyro
import pyro.distributions as dist
import pyro.poutine as poutine
from pyro.infer.mcmc import MCMC, NUTS
pyro.enable_validation(True)
pyro.set_rng_seed(0)
draws = 1000
warmup_steps = 0
eight_school_data = {'J' : 8,
'y' : torch.tensor([28, 8, -3, 7, -1, 1, 18, 12]).type(torch.Tensor),
'sigma' : torch.tensor([15, 10, 16, 11, 9, 11, 10, 18]).type(torch.Tensor)
}
def model(sigma):
eta = pyro.sample('eta', dist.Normal(torch.zeros(eight_school_data['J']), torch.ones(eight_school_data['J'])))
mu = pyro.sample('mu', dist.Normal(torch.zeros(1), 10 * torch.ones(1)))
tau = pyro.sample('tau', dist.HalfCauchy(scale=25 * torch.ones(1)))
theta = mu + tau * eta
return pyro.sample("obs", dist.Normal(theta, sigma))
def conditioned_model(model, sigma, y):
return poutine.condition(model, data={"obs": y})(sigma)
nuts_kernel = NUTS(conditioned_model, adapt_step_size=True)
posterior = MCMC(nuts_kernel,
num_samples=draws,
warmup_steps=warmup_steps).run(model, eight_school_data['sigma'], eight_school_data['y'])
pyro_data = az.from_pyro(posterior)
pyro_data
# -
# ## From emcee
# +
import emcee
eight_school_data = {'J': 8,
'y': np.array([28., 8., -3., 7., -1., 1., 18., 12.]),
'sigma': np.array([15., 10., 16., 11., 9., 11., 10., 18.])
}
def log_prior_8school(theta,J):
mu = theta[0]
tau = theta[1]
eta = theta[2:]
# Half-cauchy prior
if tau<0:
return -np.inf
hwhm = 25
prior_tau = -np.log(tau**2+hwhm**2)
prior_mu = -(mu/10)**2 # normal prior, loc=0, scale=10
prior_eta = -np.sum(eta**2) # normal prior, loc=0, scale=1
return prior_mu + prior_tau + prior_eta
def log_likelihood_8school(theta,y,sigma):
mu = theta[0]
tau = theta[1]
eta = theta[2:]
return -np.sum(((mu + tau * eta - y) / sigma)**2)
def lnprob_8school(theta,J,y,sigma):
prior = log_prior_8school(theta,J)
if prior <= -np.inf:
return -np.inf
like = log_likelihood_8school(theta,y,sigma)
return like+prior
nwalkers = 40
ndim = eight_school_data['J']+2
draws = 1500
pos = np.random.normal(size=(nwalkers,ndim))
pos[:,1] = np.absolute(pos[:,1])
sampler = emcee.EnsembleSampler(nwalkers,
ndim,
lnprob_8school,
args=(eight_school_data['J'],
eight_school_data['y'],
eight_school_data['sigma']
)
)
sampler.run_mcmc(pos, draws)
# define variable names, it cannot be inferred from emcee
var_names = ['mu','tau']+['eta{}'.format(i) for i in range(eight_school_data['J'])]
emcee_data = az.from_emcee(sampler, var_names = var_names)
emcee_data
# -
# ## From cmdstan
# See [from_cmdstan](https://arviz-devs.github.io/arviz/generated/arviz.from_cmdstan.html#arviz.from_cmdstan) for details. Cookbook documentation coming soon.
| doc/notebooks/InferenceDataCookbook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
# # Theil-Sen Regression
#
#
# Computes a Theil-Sen Regression on a synthetic dataset.
#
# See `theil_sen_regression` for more information on the regressor.
#
# Compared to the OLS (ordinary least squares) estimator, the Theil-Sen
# estimator is robust against outliers. It has a breakdown point of about 29.3%
# in case of a simple linear regression which means that it can tolerate
# arbitrary corrupted data (outliers) of up to 29.3% in the two-dimensional
# case.
#
# The estimation of the model is done by calculating the slopes and intercepts
# of a subpopulation of all possible combinations of p subsample points. If an
# intercept is fitted, p must be greater than or equal to n_features + 1. The
# final slope and intercept is then defined as the spatial median of these
# slopes and intercepts.
#
# In certain cases Theil-Sen performs better than `RANSAC
# <ransac_regression>` which is also a robust method. This is illustrated in the
# second example below where outliers with respect to the x-axis perturb RANSAC.
# Tuning the ``residual_threshold`` parameter of RANSAC remedies this but in
# general a priori knowledge about the data and the nature of the outliers is
# needed.
# Due to the computational complexity of Theil-Sen it is recommended to use it
# only for small problems in terms of number of samples and features. For larger
# problems the ``max_subpopulation`` parameter restricts the magnitude of all
# possible combinations of p subsample points to a randomly chosen subset and
# therefore also limits the runtime. Therefore, Theil-Sen is applicable to larger
# problems with the drawback of losing some of its mathematical properties since
# it then works on a random subset.
#
# +
# Author: <NAME> -- <<EMAIL>>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model import RANSACRegressor
print(__doc__)
estimators = [('OLS', LinearRegression()),
('Theil-Sen', TheilSenRegressor(random_state=42)),
('RANSAC', RANSACRegressor(random_state=42)), ]
colors = {'OLS': 'turquoise', 'Theil-Sen': 'gold', 'RANSAC': 'lightgreen'}
lw = 2
# #############################################################################
# Outliers only in the y direction
np.random.seed(0)
n_samples = 200
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
w = 3.
c = 2.
noise = 0.1 * np.random.randn(n_samples)
y = w * x + c + noise
# 10% outliers
y[-20:] += -20 * x[-20:]
X = x[:, np.newaxis]
plt.scatter(x, y, color='indigo', marker='x', s=40)
line_x = np.array([-3, 3])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred, color=colors[name], linewidth=lw,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
plt.title("Corrupt y")
# #############################################################################
# Outliers in the X direction
np.random.seed(0)
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
noise = 0.1 * np.random.randn(n_samples)
y = 3 * x + 2 + noise
# 10% outliers
x[-20:] = 9.9
y[-20:] += 22
X = x[:, np.newaxis]
plt.figure()
plt.scatter(x, y, color='indigo', marker='x', s=40)
line_x = np.array([-3, 10])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred, color=colors[name], linewidth=lw,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
plt.title("Corrupt x")
plt.show()
| sklearn/sklearn learning/demonstration/auto_examples_jupyter/linear_model/plot_theilsen.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# all imports necessary for this notebook
# %matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import random
import gc
import copy
import xgboost
from xgboost import XGBClassifier
from sklearn.model_selection import StratifiedKFold, cross_validate, train_test_split
from sklearn.metrics import roc_auc_score
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
# +
#read data
folder_path = '/kaggle/input/ieee-fraud-detection/'
train_identity = pd.read_csv(f'{folder_path}train_identity.csv')
train_transaction = pd.read_csv(f'{folder_path}train_transaction.csv')
test_identity = pd.read_csv(f'{folder_path}test_identity.csv')
test_transaction = pd.read_csv(f'{folder_path}test_transaction.csv')
sample_submission = pd.read_csv(f'{folder_path}sample_submission.csv')
# Merge identity and transaction data
train_df = pd.merge(train_transaction, train_identity, on='TransactionID', how='left')
test_df = pd.merge(test_transaction, test_identity, on='TransactionID', how='left')
del train_identity, train_transaction, test_identity, test_transaction
# -
target_df = train_df[['TransactionID', 'isFraud']]
train_df.drop(columns=['isFraud'], inplace = True)
random.seed(12345)
np.random.seed(12345)
train_df.head()
test_df.head()
target_df.head()
print(train_df.shape)
print(test_df.shape)
print(target_df.shape)
# +
cols_all = train_df.columns
cols_target = 'isFraud'
cols_cat = {'id_12', 'id_13', 'id_14', 'id_15', 'id_16', 'id_17', 'id_18', 'id_19', 'id_20', 'id_21', 'id_22',
'id_23', 'id_24', 'id_25', 'id_26', 'id_27', 'id_28', 'id_29', 'id_30', 'id_31', 'id_32', 'id_33',
'id_34', 'id_35', 'id_36', 'id_37', 'id_38', 'DeviceType', 'DeviceInfo', 'ProductCD', 'card4',
'card6', 'M4','P_emaildomain', 'R_emaildomain', 'card1', 'card2', 'card3', 'card5', 'addr1',
'addr2', 'M1', 'M2', 'M3', 'M5', 'M6', 'M7', 'M8', 'M9'}
cols_cont = set([col for col in cols_all if col not in cols_cat and col != cols_target] )
# cols_cont.remove(cols_target)
print(len(cols_cat))
print(len(cols_cont))
print(len(cols_cat) + len(cols_cont))
# + _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0"
def get_adverserial_df(df_1, df_2, n_sample, label = 'test_data'):
if len(df_1.shape) >1 and len(df_2.shape) >1 and df_1.shape[1] != df_2.shape[1]:
print("Error!!")
return
adverserial_df = df_1.sample(n_sample)
adverserial_df[label] = 0
temp_df = df_2.sample(n_sample)
temp_df[label] = 1
adverserial_df = pd.concat([adverserial_df, temp_df], ignore_index=True)
del temp_df
return adverserial_df
# -
def encode_cat_columns(df, cat_cols):
for col in cat_cols:
if col in df.columns:
df[col] = df[col].astype('category').cat.codes
return df
def run_adversrial_analysis(data, target, test_size=1/3, print_result = True):
X_train, X_test, y_train, y_test = train_test_split(data, target, test_size= test_size, stratify =target, random_state=0)
clf = XGBClassifier()
clf.fit(X_train, y_train)
pred_prob = clf.predict_proba(X_test)
pred_prob[:, 1]
roc_score = roc_auc_score(y_test, pred_prob[:, 1])
if print_result:
print("roc_auc score %.2f" % roc_score)
xgboost.plot_importance(clf, max_num_features=20, importance_type='gain')
xgboost.plot_importance(clf, max_num_features=20, importance_type='weight')
return roc_score, clf
def reset_cols_excluded(print_excluded = False):
cols_excluded = {'test_data', 'TransactionID', 'TransactionDT'}
if print_excluded:
print("Excluded columns:\n", cols_excluded, sep='')
return cols_excluded
cols_excluded = reset_cols_excluded(print_excluded=True)
print("Run adversarial: train vs. train")
adverserial_df = get_adverserial_df(train_df, train_df, n_sample=1000)
adverserial_df = encode_cat_columns(adverserial_df, cols_cat)
run_adversrial_analysis(adverserial_df.drop(columns=cols_excluded), adverserial_df['test_data'])
print("Run adversarial: test vs. test")
adverserial_df = get_adverserial_df(test_df, test_df, n_sample=1000)
adverserial_df = encode_cat_columns(adverserial_df, cols_cat)
run_adversrial_analysis(adverserial_df.drop(columns=cols_excluded), adverserial_df['test_data'])
train_df.sort_values(by=['TransactionDT', 'TransactionID'], inplace=True)
test_df.sort_values(by=['TransactionDT', 'TransactionID'], inplace=True)
print("Run adversarial: train vs. train (temporally ordered)")
cols_excluded = reset_cols_excluded()
temp_q1_len = int(len(train_df) / 4)
temp_half_len = int(len(train_df) / 2)
temp_q3_len = int(len(train_df) / 4) + temp_half_len
adverserial_df = get_adverserial_df(train_df[:temp_q1_len], train_df[temp_q3_len:], n_sample=1000)
adverserial_df = encode_cat_columns(adverserial_df, cols_cat)
run_adversrial_analysis(adverserial_df.drop(columns=cols_excluded), adverserial_df['test_data'])
print("Run adversarial: test vs. test (temporally ordered)")
cols_excluded = reset_cols_excluded()
temp_q1_len = int(len(test_df) / 4)
temp_half_len = int(len(test_df) / 2)
temp_q3_len = int(len(test_df) / 4) + temp_half_len
adverserial_df = get_adverserial_df(test_df[:temp_q1_len], test_df[temp_q3_len:], n_sample=1000)
adverserial_df = encode_cat_columns(adverserial_df, cols_cat)
run_adversrial_analysis(adverserial_df.drop(columns=cols_excluded), adverserial_df['test_data'])
# +
print("Run adversarial: train vs. test ")
cols_excluded = reset_cols_excluded()
adverserial_df = get_adverserial_df(train_df[:], test_df[:], n_sample=10000)
adverserial_df = encode_cat_columns(adverserial_df, cols_cat)
rocScore, clf = run_adversrial_analysis(adverserial_df.drop(columns=cols_excluded), adverserial_df['test_data'])
# +
print("Run adversarial: train vs. train (temporally ordered)")
cols_excluded = reset_cols_excluded()
temp_q1_len = int(len(train_df) / 4)
temp_half_len = int(len(train_df) / 2)
temp_q3_len = int(len(train_df) / 4) + temp_half_len
adverserial_df = get_adverserial_df(train_df[:temp_q1_len], train_df[temp_q3_len:], n_sample=1000)
adverserial_df = encode_cat_columns(adverserial_df, cols_cat)
rocScore, clf = run_adversrial_analysis(adverserial_df.drop(columns=cols_excluded), adverserial_df['test_data'])
# +
temp_q1_len = int(len(train_df) / 4)
temp_half_len = int(len(train_df) / 2)
temp_q3_len = int(len(train_df) / 4) + temp_half_len
temp_train_df = train_df.loc[:temp_q1_len]
temp_test_df = train_df.loc[temp_q3_len:]
temp_train_df.loc[:, cols_cat] = encode_cat_columns(temp_train_df.loc[:, cols_cat], cols_cat)
temp_test_df.loc[:, cols_cat] = encode_cat_columns(temp_test_df.loc[:, cols_cat], cols_cat)
pred_prob_q1 = clf.predict_proba( temp_train_df.drop(columns = ['TransactionDT', 'TransactionID']))
# -
temp_train_df['pred_prob_q1'] = np.nan
temp_train_df.loc[:, ['pred_prob_q1']] = pred_prob_q1[:, 1]
#temp_train_df['pred_prob_q1'].head()
# +
X_train = temp_train_df.drop(columns=['TransactionDT']).sample(10000)
y_train = pd.merge(X_train, target_df, on='TransactionID', how='left', left_index=True).loc[:, 'isFraud']
X_test = temp_test_df.drop(columns=['TransactionDT']).sample(10000)
y_test = pd.merge(X_test, target_df, on='TransactionID', how='left', left_index=True).loc[:, 'isFraud']
X_train.drop(columns= [ 'TransactionID'], inplace= True)
X_test.drop(columns= ['TransactionID'], inplace= True)
# -
clf = XGBClassifier(max_depth=5)
clf.fit(X_train.drop(columns=['pred_prob_q1']), y_train)
pred_prob = clf.predict_proba(X_test)
pred_prob[:, 1]
roc_score = roc_auc_score(y_test, pred_prob[:, 1])
print("roc_auc score %.4f" % roc_score)
xgboost.plot_importance(clf, max_num_features=20, importance_type='gain')
xgboost.plot_importance(clf, max_num_features=20, importance_type='weight')
# +
X_train.loc[:, ['pred_prob_q1']] = X_train.loc[:, ['pred_prob_q1']] / 1.5
X_train.loc[y_train==1, ['pred_prob_q1']] = X_train.loc[y_train==1, ['pred_prob_q1']] * 1.5
clf = XGBClassifier(max_depth=5)
clf.fit(X_train.drop(columns=['pred_prob_q1']), y_train, sample_weight = X_train['pred_prob_q1'] )
pred_prob = clf.predict_proba(X_test)
pred_prob[:, 1]
roc_score = roc_auc_score(y_test, pred_prob[:, 1])
print("roc_auc score %.4f" % roc_score)
xgboost.plot_importance(clf, max_num_features=20, importance_type='gain')
xgboost.plot_importance(clf, max_num_features=20, importance_type='weight')
# -
X_train['pred_prob_q1'].hist()
adverserial_df['predict_train'] = np.nan
X_train['predict_train']=clf.predict(X_train.drop(columns= 'predict_train'))
plt.hist(X_train['predict_train'].loc[y_train == 0] )
temp_df = X_train['predict_train'].loc[adverserial_df['test_data'] == 0]
indeces = temp_df[X_train['predict_train'] == 1].index
adverserial_df.loc[indeces].shape
#clf = XGBClassifier(max_depth=5)
clf.fit(X_train.drop(columns= 'predict_train').loc[indeces], y_train[indeces])
pred_prob = clf.predict_proba(X_test)
pred_prob[:, 1]
roc_score = roc_auc_score(y_test, pred_prob[:, 1])
print("roc_auc score %.4f" % roc_score)
xgboost.plot_importance(clf, max_num_features=20, importance_type='gain')
xgboost.plot_importance(clf, max_num_features=20, importance_type='weight')
# Focus on one variable that shows temporal depndence
# For example M7
print("Run adversarial: train vs. train (temporally ordered) on M7")
cols_excluded = reset_cols_excluded()
temp_q1_len = int(len(train_df) / 4)
temp_half_len = int(len(train_df) / 2)
temp_q3_len = int(len(train_df) / 4) + temp_half_len
adverserial_df = get_adverserial_df(train_df.loc[:temp_q1_len, ['M7']], train_df.loc[temp_q3_len:, ['M7']], n_sample=1000)
adverserial_df = encode_cat_columns(adverserial_df, cols_cat)
run_adversrial_analysis(adverserial_df.drop(columns='test_data'), adverserial_df['test_data'])
# +
cols_excluded = reset_cols_excluded()
cols_excluded.update(train_df.filter(regex = '^V|^M|^D|^C|^id').columns )
print("Run adversarial: train vs. train (temporally ordered) **Without many variables**")
temp_q1_len = int(len(train_df) / 4)
temp_half_len = int(len(train_df) / 2)
temp_q3_len = int(len(train_df) / 4) + temp_half_len
adverserial_df = get_adverserial_df(train_df[:temp_q1_len], train_df[temp_q3_len:], n_sample=1000)
adverserial_df = encode_cat_columns(adverserial_df, cols_cat)
run_adversrial_analysis(adverserial_df.drop(columns=cols_excluded), adverserial_df['test_data'])
cols_excluded = reset_cols_excluded()
# -
cols_excluded = reset_cols_excluded()
cols_excluded.update(test_df.filter(regex = '^V|^M|^D|^C|^id').columns )
print("Run adversarial: test vs. test (temporally ordered) **Without many variables**")
temp_q1_len = int(len(test_df) / 4)
temp_half_len = int(len(test_df) / 2)
temp_q3_len = int(len(test_df) / 4) + temp_half_len
adverserial_df = get_adverserial_df(test_df[:temp_q1_len], test_df[temp_q3_len:], n_sample=1000)
adverserial_df = encode_cat_columns(adverserial_df, cols_cat)
run_adversrial_analysis(adverserial_df.drop(columns=cols_excluded), adverserial_df['test_data'])
cols_excluded = reset_cols_excluded()
# +
cols_excluded = reset_cols_excluded()
cols_excluded.update(train_df.filter(regex = '^V|^M|^D|^C|^id').columns )
cols_excluded.remove('test_data')
cols_excluded.remove('TransactionID')
temp_q1_len = int(len(train_df) / 4)
temp_half_len = int(len(train_df) / 2)
temp_q3_len = int(len(train_df) / 4) + temp_half_len
X_train = train_df.drop(columns=cols_excluded).loc[:temp_q1_len ]
y_train = pd.merge(X_train, target_df, on='TransactionID', how='left').loc[:, 'isFraud']
X_test = train_df.drop(columns=cols_excluded).loc[temp_q3_len: ]
y_test = pd.merge(X_test, target_df, on='TransactionID', how='left').loc[:, 'isFraud']
X_train.drop(columns= 'TransactionID', inplace= True)
X_test.drop(columns= 'TransactionID', inplace= True)
X_train = encode_cat_columns(X_train, cols_cat)
X_test = encode_cat_columns(X_test, cols_cat)
clf = XGBClassifier()
print("Start fitting!")
clf.fit(X_train, y_train)
pred_prob = clf.predict_proba(X_test)
pred_prob[:, 1]
roc_score = roc_auc_score(y_test, pred_prob[:, 1])
print("roc_auc score %.2f" % roc_score)
xgboost.plot_importance(clf, max_num_features=20, importance_type='gain')
cols_excluded = reset_cols_excluded()
# +
cols_excluded = reset_cols_excluded()
#cols_excluded.update(train_df.filter(regex = '^V|^M|^D|^C|^id').columns )
cols_excluded.remove('test_data')
cols_excluded.remove('TransactionID')
temp_q1_len = int(len(train_df) / 4)
temp_half_len = int(len(train_df) / 2)
temp_q3_len = int(len(train_df) / 4) + temp_half_len
X_train = train_df.drop(columns=cols_excluded).loc[:temp_q1_len ]
y_train = pd.merge(X_train, target_df, on='TransactionID', how='left').loc[:, 'isFraud']
X_test = train_df.drop(columns=cols_excluded).loc[temp_q3_len: ]
y_test = pd.merge(X_test, target_df, on='TransactionID', how='left').loc[:, 'isFraud']
X_train.drop(columns= 'TransactionID', inplace= True)
X_test.drop(columns= 'TransactionID', inplace= True)
X_train = encode_cat_columns(X_train, cols_cat)
X_test = encode_cat_columns(X_test, cols_cat)
clf = XGBClassifier()
print("Start fitting!")
clf.fit(X_train, y_train)
pred_prob = clf.predict_proba(X_test)
pred_prob[:, 1]
roc_score = roc_auc_score(y_test, pred_prob[:, 1])
print("roc_auc score %.2f" % roc_score)
xgboost.plot_importance(clf, max_num_features=20, importance_type='gain')
cols_excluded = reset_cols_excluded()
# -
# # Conclusion
#
# Removing the time correlated factors seems to hurt (rather than improve) the model's quality. Whether or not there is benefit in reengineering these time-dependent features is of course another matter.
del temp_df, test_df
cols_cat_dummified = set()
n_max_top_categories_to_keep = 20
for col in cols_cat:
temp_col = train_df.loc[:, [col]]
top_cats = temp_col[col].value_counts(ascending = False, normalize=False).iloc[:n_max_top_categories_to_keep].index
temp_col.loc[temp_col[col].notnull() & ~temp_col[col].isin(top_cats), [col]] = 'infrequent_category'
temp_col.astype('category')
#print(temp_col.head(10))
#print(temp_col.describe())
temp_col = pd.get_dummies(temp_col, dummy_na = True)
#print(temp_col.describe())
#temp_col.shape
train_df.drop(columns=col, inplace=True)
cols_cat_dummified.update(list(temp_col.columns))
train_df[temp_col.columns] = temp_col
train_df.head()
# Enginner this feature to remove time depndence
#temp_df = train_df.loc[:, ['D15', 'TransactionDT']]
temp_df = train_df[train_df['D15'].notnull()].loc[:, ['D15', 'TransactionDT', 'TransactionID']]
temp_df = pd.merge(temp_df, target_df, on='TransactionID', how='left')
#temp_df = train_df[train_df['D15'].notnull()].loc[:, ['D15', 'TransactionDT']]
temp_rolling_size = 2000
#temp_min_periods = int(temp_rolling_size * 0.5)
#temp_min_periods = temp_rolling_size -1
temp_df.reset_index(drop=True, inplace=True)
temp_df['D15_mean_prev_100'] = temp_df.loc[:, ['D15']].rolling(temp_rolling_size).mean()
temp_df['D15_std_prev_100'] = temp_df.loc[:, ['D15']].rolling(temp_rolling_size).std()
temp_df['D15_z_rel_prev_100'] = (temp_df['D15'] - temp_df['D15_mean_prev_100']) / temp_df['D15_std_prev_100']
#temp_df.head(20)
# +
temp_df.dropna(inplace=True)
temp_q1_len = int(len(temp_df) / 4)
temp_half_len = int(len(temp_df) / 2)
temp_q3_len = int(len(temp_df) / 4) + temp_half_len
# -
temp_trials =20
# +
auc_sum = 0
for i in range(temp_trials):
adverserial_df = get_adverserial_df(temp_df.loc[temp_q1_len:temp_half_len, ['D15']], temp_df.loc[temp_q3_len:, ['D15']], n_sample=1000)
#adverserial_df = encode_cat_columns(adverserial_df, cols_cat)
auc_sum += run_adversrial_analysis(adverserial_df.drop(columns='test_data'), adverserial_df['test_data'], print_result = False)
print(auc_sum / temp_trials)
# +
auc_sum = 0
for i in range(temp_trials):
adverserial_df = get_adverserial_df(temp_df.loc[temp_q1_len:temp_half_len, ['D15_z_rel_prev_100']], temp_df.loc[temp_q3_len:, ['D15_z_rel_prev_100']], n_sample=1000)
#adverserial_df = encode_cat_columns(adverserial_df, cols_cat)
auc_sum += run_adversrial_analysis(adverserial_df.drop(columns='test_data'), adverserial_df['test_data'], print_result=False)
print(auc_sum / temp_trials)
# -
temp_df['D15'].isnull().sum() * 100 / len(temp_df['D15'])
temp_df['D15_z_rel_prev_100'].isnull().sum() * 100 / len(temp_df['D15_z_rel_prev_100'])
temp_df.dropna(inplace=True)
X_train = temp_df.loc[:temp_q1_len, ['D15_z_rel_prev_100']]
y_train = temp_df.loc[:temp_q1_len, ['isFraud']]
X_test = temp_df.loc[temp_q3_len:, ['D15_z_rel_prev_100']]
y_test = temp_df.loc[temp_q3_len:, ['isFraud']]
clf = XGBClassifier()
clf.fit(X_train, y_train)
pred_prob = clf.predict_proba(X_test)
pred_prob[:, 1]
roc_score = roc_auc_score(y_test, pred_prob[:, 1])
print("roc_auc score %.2f" % roc_score)
xgboost.plot_importance(clf, max_num_features=20)
X_train = temp_df.loc[:temp_q1_len, ['D15']]
y_train = temp_df.loc[:temp_q1_len, ['isFraud']]
X_test = temp_df.loc[temp_q3_len:, ['D15']]
y_test = temp_df.loc[temp_q3_len:, ['isFraud']]
clf = XGBClassifier()
clf.fit(X_train, y_train)
pred_prob = clf.predict_proba(X_test)
pred_prob[:, 1]
roc_score = roc_auc_score(y_test, pred_prob[:, 1])
print("roc_auc score %.2f" % roc_score)
xgboost.plot_importance(clf, max_num_features=20)
# To start with I will exclude three features: target of the adversarial analysis (test_data), 'TransactionID'
excluded_cols = ['test_data', 'TransactionID']
print("Excluded columns:\n", excluded_cols, sep='')
temp_half_len = int(len(train_df) / 2)
adverserial_df = get_adverserial_df(train_df, test_df, n_sample=1000)
adverserial_df = encode_cat_columns(adverserial_df, cat_cols)
run_adversrial_analysis(adverserial_df.drop(columns=excluded_cols), adverserial_df['test_data'])
# Now, I'll re-engineer the TransactionDT and exclude it from the main models
excluded_cols += ['TransactionDT']
excluded_cols
# +
## Thanks to FChmiel (https://www.kaggle.com/fchmiel) for these two functions
def make_day_feature(df, offset=0, tname='TransactionDT'):
"""
Creates a day of the week feature, encoded as 0-6.
Parameters:
-----------
df : pd.DataFrame
df to manipulate.
offset : float (default=0)
offset (in days) to shift the start/end of a day.
tname : str
Name of the time column in df.
"""
# found a good offset is 0.58
days = df[tname] / (3600*24)
encoded_days = np.floor(days-1+offset) % 7
return encoded_days
def make_hour_feature(df, tname='TransactionDT'):
"""
Creates an hour of the day feature, encoded as 0-23.
Parameters:
-----------
df : pd.DataFrame
df to manipulate.
tname : str
Name of the time column in df.
"""
hours = df[tname] / (3600)
encoded_hours = np.floor(hours) % 24
return encoded_hours
# +
train_df['weekday'] = make_day_feature(train_df, offset=0.58)
test_df['weekday'] = make_day_feature(test_df, offset=0.58)
train_df['hours'] = make_hour_feature(train_df)
test_df['hours'] = make_hour_feature(test_df)
cat_cols.extend(['weekday', 'hours'])
# -
print("Excluded columns:\n", excluded_cols, sep='')
adverserial_df = get_adverserial_df(train_df, test_df, n_sample=1000)
adverserial_df = encode_cat_columns(adverserial_df, cat_cols)
run_adversrial_analysis(adverserial_df.drop(columns=excluded_cols), adverserial_df['test_data'])
print(pd.concat([test_df['card1'].astype('category'),train_df['card1'].astype('category')], axis=1).describe())
# +
temp_top_n = 1000
temp_cat_test_list = test_df['card1'].value_counts(normalize=True)
print("Percentage of samples in top %d categories in test is %.2f" %(temp_top_n, temp_cat_test_list[0:temp_top_n].sum()))
temp_cat_test_set = set(temp_cat_test_list[0:temp_top_n].index)
temp_cat_train_list = train_df['card1'].value_counts(normalize=True)
print("Percentage of samples in top %d categories in train is %.2f" %(temp_top_n, temp_cat_train_list[0:temp_top_n].sum()))
temp_cat_train_set = set(temp_cat_train_list[0:temp_top_n].index)
temp_cat_set = temp_cat_train_set.intersection(set(temp_cat_test_set))
print("No. of categories where both intersect is %d. Which cover:" %len(set(temp_cat_set)))
sum_top_test = sum([temp_cat_test_list[index] for index in temp_cat_set])
sum_top_train = sum([temp_cat_train_list[index] for index in temp_cat_set])
print("%.4f %% of test cases" %sum_top_test)
print("%.4f %% of train cases" %sum_top_train)
#temp_cat_set = temp_cat_train_set.difference(set(temp_cat_test_set))
#print("No. of categories in train but not in test is %d" %len(set(temp_cat_set)))
#temp_cat_set = temp_cat_test_set.difference(set(temp_cat_train_set))
#print("No. of categories in test but not in train is %d" %len(set(temp_cat_set)))
# -
print("Excluded columns:\n", excluded_cols, sep='')
adverserial_df = get_adverserial_df(train_df, test_df, n_sample=1000)
#adverserial_df.card1[adverserial_df['card1'].isin(temp_cat_set)].shape
adverserial_df['card1_other'] = ~adverserial_df['card1'].isin(temp_cat_set)
print(adverserial_df['card1_other'].describe())
cat_cols.add('card1_other')
adverserial_df.loc[adverserial_df['card1_other'], 'card1'] = np.nan
adverserial_df = pd.concat([adverserial_df.drop(columns='card1'), pd.get_dummies(adverserial_df['card1'], prefix='card1')], axis=1)
adverserial_df.head()
cat_cols.update([col for col in adverserial_df.columns if 'card1' in col ])
cat_cols.discard('card1')
adverserial_df = encode_cat_columns(adverserial_df, cat_cols)
run_adversrial_analysis(adverserial_df.drop(columns=excluded_cols), adverserial_df['test_data'])
adverserial_df.head()
run_adversrial_analysis(adverserial_df[[col for col in adverserial_df.columns if 'card1' in col ]], adverserial_df['test_data'])
adverserial_df = get_adverserial_df(train_df, test_df, n_sample=1000)
adverserial_df = pd.concat([adverserial_df.drop(columns='card1'), pd.get_dummies(adverserial_df['card1'], prefix='card1')], axis=1)
adverserial_df = encode_cat_columns(adverserial_df, [col for col in adverserial_df.columns if 'card1' in col ])
run_adversrial_analysis(adverserial_df[[col for col in adverserial_df.columns if 'card1' in col ]], adverserial_df['test_data'])
adverserial_df = get_adverserial_df(train_df, test_df, n_sample=1000)
adverserial_df = encode_cat_columns(adverserial_df, cat_cols)
adverserial_df
print("Excluded columns:\n", excluded_cols, sep='')
run_adversrial_analysis(adverserial_df.drop(columns=excluded_cols), adverserial_df['test_data'])
adverserial_df = pd.concat([adverserial_df.drop(columns='card2'), pd.get_dummies(adverserial_df['card2'], prefix='card2')], axis=1)
adverserial_df.head()
print("Excluded columns:\n", excluded_cols, sep='')
run_adversrial_analysis(adverserial_df.drop(columns=excluded_cols), adverserial_df['test_data'])
temp_cat_train_list[0:10]
# +
sum([temp_cat_test_list[index] for index in temp_cat_set])
# -
print(set(temp_cat_set))
# +
set(test_df['card1'].astype('category'))
set(test_df['id_31']).difference(train_df['id_31'])
print(test_df['card1'].astype('category').value_counts(normalize=True)[0:1000].sum())
print(80 * '-')
print(train_df['card1'].astype('category').value_counts(normalize=True)[0:1000].sum())
# -
print(pd.concat([test_df['id_31'].astype('category'),train_df['id_31'].astype('category')], axis=1).describe())
print(80 * '-')
print(pd.concat([test_df['id_13'].astype('category'),train_df['id_13'].astype('category')], axis=1).describe())
print(80 * '-')
print(pd.concat([test_df['addr1'].astype('category'),train_df['addr1'].astype('category')], axis=1).describe())
print(80 * '-')
print(pd.concat([test_df['card1'].astype('category'),train_df['card1'].astype('category')], axis=1).describe())
print(80 * '-')
print(pd.concat([test_df['D11'],train_df['D11']], axis=1).describe())
print(80 * '-')
print(pd.concat([test_df['D15'],train_df['D15']], axis=1).describe())
print(80 * '-')
print(pd.concat([test_df['D10'],train_df['D10']], axis=1).describe())
print(80 * '-')
print(pd.concat([test_df['dist1'],train_df['dist1']], axis=1).describe())
temp_list = list(set(test_df['id_31']).difference(train_df['id_31']))
temp_list.sort()
print(temp_list)
temp_list = list(set(test_df['id_31']).union(train_df['id_31']))[1:]
temp_list.sort()
print(temp_list)
print(train_df[train_df['isFraud']==1].id_31.value_counts()[0:10])
print(80 * '-')
print(train_df[train_df['isFraud']==0].id_31.value_counts()[0:10])
train_df.id_31.value_counts()[0:20]
print((train_df.id_31.str.contains('hrome')).sum() / train_df.id_31.count())
print((train_df.id_31.str.contains('afari')).sum() / train_df.id_31.count())
print((train_df.id_31.str.contains('amsun')).sum() / train_df.id_31.count())
print((train_df.id_31.str.contains('^ie')).sum() / train_df.id_31.count())
print((train_df.id_31.str.contains('android')).sum() / train_df.id_31.count())
from scipy import stats
stats.ks_2samp(train_df['TransactionAmt'].sample(1000), test_df['TransactionAmt'].sample(1000))
stats.ks_2samp(train_df['D15'].sample(1000), test_df['D15'].sample(1000))
# +
temp_train['TransactionAmt'] = np.log(train_df['TransactionAmt'])
temp_test['TransactionAmt'] = np.log(test_df['TransactionAmt'] )
'''
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
temp_train['TransactionAmt'] = pd.DataFrame(scaler.fit_transform(train_df['TransactionAmt'].values.reshape(-1, 1)))
temp_test['TransactionAmt'] = pd.DataFrame(scaler.fit_transform(test_df['TransactionAmt'].values.reshape(-1, 1)))
'''
# -
temp_test['TransactionAmt'].describe()
# +
n_experiments = 100
n_transformed_better = 0
for i in range(n_experiments):
orig_result = stats.ks_2samp(train_df['TransactionAmt'].sample(1000), test_df['TransactionAmt'].sample(1000))
transformed_result = stats.ks_2samp(temp_train['TransactionAmt'].sample(1000), temp_test['TransactionAmt'].sample(1000))
#print(orig_result)
#print(transformed_result)
if transformed_result[1] > orig_result[1]:
#print("Yes")
n_transformed_better += 1
#print(80 * '-')
print("percentage where transformed was better is %.2f %%" %(n_transformed_better / n_experiments))
# -
train_df['TransactionDT'].describe()
| Adversarial analysis - IEEE - v1.1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Project: Create a neural network class
#
# ---
#
# Based on previous code examples, develop a neural network class that is able to classify any dataset provided. The class should create objects based on the desired network architecture:
#
# 1. Number of inputs
# 2. Number of hidden layers
# 3. Number of neurons per layer
# 4. Number of outputs
# 5. Learning rate
#
# The class must have the train, and predict functions.
#
# Test the neural network class on the datasets provided below: Use the input data to train the network, and then pass new inputs to predict on. Print the expected label and the predicted label for the input you used. Print the accuracy of the training after predicting on different inputs.
#
# Use matplotlib to plot the error that the train method generates.
#
# **Don't forget to install Keras and tensorflow in your environment!**
#
# ---
# ### Import the needed Packages
# +
import numpy as np
import matplotlib.pyplot as plt
# Needed for the mnist data
from keras.datasets import mnist
from keras.utils import to_categorical
# -
# ### Define the class
class NeuralNetwork:
def __init__(self, architecture, alpha):
'''
layers: List of integers which represents the architecture of the network.
alpha: Learning rate.
'''
# TODO: Initialize the list of weights matrices, then store
# the network architecture and learning rate
_inputs, _layer, _neurons, _output = architecture
self.alpha = alpha
self.layers = _layer
self.neurons = _neurons
# Define weights
self.initialWeight = np.random.randn(_inputs, _neurons)
self.middleWeight = np.zeros((_layer-1, _neurons, _neurons))
self.lastWeight = np.random.randn(_neurons, _output)
self.calcWeight = []
# Define bias
self.initialBias = np.random.randn(_neurons)
self.middleBias = np.random.randn(_layer - 1, _neurons)
self.lastBias = np.random.randn(_output)
for i in range(_layer - 1):
self.middleWeight[i] = np.random.randn(_neurons, _neurons)
def __repr__(self):
# construct and return a string that represents the network
# architecture
return "NeuralNetwork: {}".format( "-".join(str(l) for l in self.layers))
def softmax(self, X):
# applies the softmax function to a set of values
expX = np.exp(X)
return expX / expX.sum(axis=1, keepdims=True)
def sigmoid(self, x):
# the sigmoid for a given input value
return 1.0 / (1.0 + np.exp(-x))
def sigmoid_deriv(self, x):
# the derivative of the sigmoid
return x * (1 - x)
def predict(self, inputs):
# TODO: Define the predict function
self.calcWeight = np.zeros( (self.layers, inputs.shape[0], self.neurons) )
self.calcWeight[0] = self.sigmoid( np.dot(inputs, self.initialWeight) + self.initialBias )
for i in range(self.layers - 1):
self.calcWeight[i + 1] = self.sigmoid( np.dot(self.calcWeight[i], self.middleWeight[i]) + self.middleBias[i] )
return self.softmax( np.dot(self.calcWeight[len(self.calcWeight)-1], self.lastWeight) + self.lastBias )
def train(self, inputs, labels, epochs = 1000, displayUpdate = 100):
# TODO: Define the training step for the network. It should include the forward and back propagation
# steps, the updating of the weights, and it should print the error every 'displayUpdate' epochs
# It must return the errors so that they can be displayed with matplotlib
errors = []
for i in range(epochs):
# Forward propagation
prediction = self.predict(inputs)
level_error = labels - prediction
errors.append(np.average(np.abs(level_error)))
# Back propagation
level_delta_last = level_error * self.sigmoid_deriv(prediction)
level_error_middle = np.dot(level_delta_last, self.lastWeight.T)
level_delta_middle = level_error_middle * self.sigmoid_deriv(self.calcWeight[-1])
b_delta_last = np.sum(level_delta_last)
self.lastWeight += np.dot(self.calcWeight[-1].T, level_delta_last) * self.alpha
self.lastBias += b_delta_last * self.alpha
b_delta_last = np.sum(level_delta_middle)
self.initialWeight += np.dot(inputs.T, level_delta_middle) * self.alpha
self.initialBias += b_delta_last * self.alpha
for j in range(self.layers - 1):
tmp = (len(self.middleWeight) - 1) - j
tmp2 = (len(self.calcWeight) - 2) - j
level_error_middle = np.dot(level_delta_middle, self.middleWeight[tmp])
self.middleWeight[tmp] += np.dot(self.calcWeight[tmp2].T, level_delta_middle) * self.alpha
b_delta_middle = np.sum(level_delta_middle)
self.middleBias[j] += b_delta_middle * self.alpha
level_delta_middle = level_error_middle * self.sigmoid_deriv(self.calcWeight[tmp2])
if i % displayUpdate == 0:
print("Error: ", errors[-1])
return errors
# ### Test datasets
# #### XOR
# +
# input dataset
XOR_inputs = np.array([
[0,0],
[0,1],
[1,0],
[1,1]
])
# labels dataset
XOR_labels = np.array([[0,1,1,0]]).T
one_hot_labels = np.zeros((4, 2))
labels = np.array([1,0,1,0])
for x in range(4):
one_hot_labels[x, XOR_labels[x]] = 1
# +
#TODO: Test the class with the XOR data
architecture_1 = [2, 1, 4, 2]
neural_network_1 = NeuralNetwork(architecture_1, 1)
error_1 = neural_network_1.train(XOR_inputs, one_hot_labels)
f, p = plt.subplots(1,1)
p.set_xlabel('Epoch')
p.set_ylabel('Error')
p.plot(error_1)
# -
# #### Multiple classes
# +
# Creates the data points for each class
class_1 = np.random.randn(700, 2) + np.array([0, -3])
class_2 = np.random.randn(700, 2) + np.array([3, 3])
class_3 = np.random.randn(700, 2) + np.array([-3, 3])
feature_set = np.vstack([class_1, class_2, class_3])
labels = np.array([0]*700 + [1]*700 + [2]*700)
one_hot_labels = np.zeros((2100, 3))
for i in range(2100):
one_hot_labels[i, labels[i]] = 1
plt.figure(figsize=(10,10))
plt.scatter(feature_set[:,0], feature_set[:,1], c=labels, s=30, alpha=0.5)
plt.show()
# +
#TODO: Test the class with the multiple classes data
architecture_2 = [2, 2, 5, 3]
neural_network_2 = NeuralNetwork(architecture_2, 0.01)
error_2 = neural_network_2.train(feature_set, one_hot_labels)
f, p2 = plt.subplots(1,1)
p2.set_xlabel('Epoch')
p2.set_ylabel('Error')
p2.plot(error_2)
# -
# #### On the mnist data set
#
# ---
# Train the network to classify hand drawn digits.
#
# For this data set, if the training step is taking too long, you can try to adjust the architecture of the network to have fewer layers, or you could try to train it with fewer input. The data has already been loaded and preprocesed so that it can be used with the network.
#
# ---
# +
# Load the train and test data from the mnist data set
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
# Plot a sample data point
plt.title("Label: " + str(train_labels[0]))
plt.imshow(train_images[0], cmap="gray")
# +
# Standardize the data
# Flatten the images
train_images = train_images.reshape((60000, 28 * 28))
# turn values from 0-255 to 0-1
train_images = train_images.astype('float32') / 255
test_images = test_images.reshape((10000, 28 * 28))
test_images = test_images.astype('float32') / 255
# Create one hot encoding for the labels
train_labels = to_categorical(train_labels)
test_labels = to_categorical(test_labels)
# +
# TODO: Test the class with the mnist data. Test the training of the network with the test_images data, and
# record the accuracy of the classification.
architecture_3 = [train_images.shape[1], 1, 64, 10]
neural_network_3 = NeuralNetwork(architecture_3, 0.0007)
error_3 = neural_network_3.train(train_images[0:5000], train_labels[0:5000])
f, p3 = plt.subplots(1,1)
p3.set_xlabel('Epoch')
p3.set_ylabel('Error')
p3.plot(error_3)
tests = neural_network_3.predict(test_images[0:1000])
# One hot encoding
one_hot_test_labels = to_categorical(test_labels[0:1000])
np.set_printoptions(precision=3, suppress= True, linewidth=75)
# Prediction labels
predictions = np.copy(tests)
predictions[predictions > 0.5] = 1
predictions[predictions < 0.5] = 0
error_predictions = []
for index, (prediction, label) in enumerate(zip(predictions[0:10], one_hot_test_labels[0:10])):
if not np.array_equal(prediction, label):
error_predictions.append((index, prediction, label))
# -
# After predicting on the *test_images*, use matplotlib to display some of the images that were not correctly classified. Then, answer the following questions:
#
# 1. **Why do you think those were incorrectly classified?**
# - It all comes down to the similarities in the features of each number. In most incorrect cases the number shares lines or curvatures in the almost the same region as the misinterpreter number and the limits of this network .
# 2. **What could you try doing to improve the classification accuracy?**
# - Possibly with more layers in charge of differentiating between digits that have similar characterisitics.
| Neural Network Assignment.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# # !pip install --upgrade albumentations -q
import os
import pandas as pd
import pickle
import shutil
import numpy as np
import seaborn as sns
from tqdm import tqdm
import matplotlib.pyplot as plt
from PIL import ImageFile
import glob
import shutil
import cv2
from sklearn.datasets import load_files
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score,precision_score,recall_score,f1_score
from sklearn.model_selection import train_test_split
import tensorflow as tf
from tensorflow.keras import utils
from tensorflow.keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D
from tensorflow.keras.layers import Dropout, Flatten, Dense
from tensorflow.keras.models import Sequential
from tensorflow.keras.utils import plot_model
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.preprocessing import image
import albumentations as A
from tensorflow.keras.models import Model
import tensorflow.keras.layers
from tensorflow.keras.layers import Dense, ReLU
from tensorflow.keras.layers import Dropout, BatchNormalization
from tensorflow.keras.layers import GlobalAveragePooling2D
from tensorflow.keras.models import load_model
from tensorflow.keras.optimizers import Adam, SGD
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.optimizers.schedules import PolynomialDecay
from tensorflow.keras.applications.inception_v3 import preprocess_input
from tensorflow.keras.applications import EfficientNetB0
from tensorflow.keras.models import load_model
# -
def draw_plot_from_history(history, metric, n_epochs, stage):
plt.style.use("ggplot")
plt.figure(figsize=(9, 5))
plt.plot(np.arange(0, n_epochs), history.history[metric], label="train_%s" % metric)
if "val_%s" % metric in history.history:
plt.plot(np.arange(0, n_epochs), history.history["val_%s" % metric], label="val_%s" % metric)
title = stage + '_%s' % metric
plt.title(title)
plt.xlabel("Epoch #")
plt.ylabel("Value")
plt.legend()
filename = title + '.png'
plt.show()
class DataGenerator(tf.keras.utils.Sequence):
def __init__(self, data, batch_size, augmentation, image_size, shuffle=True):
self.batch_size = batch_size
self.augmentation = augmentation
self.shuffle = shuffle
self.image_size = image_size
self.image_names = np.array([item['path'] for item in data])
self.targets = to_categorical(np.array([int(item['label'][1:]) for item in data]))
self.samples = len(self.targets)
self.indexes = np.arange(self.samples)
if shuffle:
np.random.shuffle(self.indexes)
def __len__(self):
return int(np.ceil(self.samples / self.batch_size))
def on_epoch_end(self):
if self.shuffle:
np.random.shuffle(self.indexes)
def transform_image(self, image):
image = cv2.resize(image, (self.image_size, self.image_size), interpolation = cv2.INTER_CUBIC)
if self.augmentation:
image = self.augmentation(image=image)['image']
return image
def __getitem__(self, index):
take_ind = self.indexes[index * self.batch_size: min((index + 1) * self.batch_size, len(self.targets))]
X = np.empty((len(take_ind), self.image_size, self.image_size, 3))
y = self.targets[take_ind, :]
for i in range(len(take_ind)):
img = cv2.imread(self.image_names[take_ind[i]], cv2.IMREAD_COLOR)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = self.transform_image(img)
X[i] = img
return X, y
# Create augmentations:
transform = A.Compose(
[
A.ShiftScaleRotate(shift_limit=0.1, scale_limit=0.2, rotate_limit=30,
p=0.5, border_mode=cv2.BORDER_CONSTANT),
A.RandomBrightnessContrast(p=0.5),
A.RGBShift(p=0.25),
A.GaussNoise(p=0.25),
A.HorizontalFlip(p=0.5),
]
)
# Create generators:
TRAIN_DIR = "/kaggle/input/state-farm-distracted-driver-detection/imgs/train"
TEST_DIR = "/kaggle/input/state-farm-distracted-driver-detection/imgs/test"
INPUT_SHAPE = 224
BATCH_SIZE = 32
num_classes = 10
# +
train_data = []
label_stat = []
for label in os.listdir(TRAIN_DIR):
for img_path in glob.glob(os.path.join(TRAIN_DIR, label, "*.jpg")):
train_data.append({'path': img_path, 'label': label})
label_stat.append(label)
train_data, val_data = train_test_split(train_data, test_size=0.2, stratify=label_stat, shuffle=True)
train_generator = DataGenerator(train_data, BATCH_SIZE, transform, INPUT_SHAPE)
validation_generator = DataGenerator(val_data, BATCH_SIZE, None, INPUT_SHAPE, shuffle=False)
# -
# Build model:
# +
model = EfficientNetB0(weights='imagenet', include_top=False, input_shape=(INPUT_SHAPE, INPUT_SHAPE, 3))
for layer in model.layers:
layer.trainable = False
x = model.output
x = GlobalAveragePooling2D()(x)
# x = Dense(64, activation='relu')(x)
x = Dropout(0.2)(x)
predictions = Dense(num_classes, activation='softmax')(x)
model = Model(inputs=model.input, outputs=predictions)
# -
# Warmup:
NUM_EPOCHS = 3
LEARNING_RATE = 0.001
# +
opt = Adam(learning_rate=LEARNING_RATE)
model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=["accuracy"])
history = model.fit(
train_generator,
steps_per_epoch = train_generator.samples // BATCH_SIZE,
validation_data = validation_generator,
validation_steps = validation_generator.samples // BATCH_SIZE,
epochs=NUM_EPOCHS, verbose=1)
draw_plot_from_history(history, 'loss', NUM_EPOCHS, 'STAGE_1')
draw_plot_from_history(history, 'accuracy', NUM_EPOCHS, 'STAGE_1')
# -
model.save("model2_stage1.hdf5")
# Unfreezeing deeper layers:
NUM_EPOCHS = 15
LEARNING_RATE = 0.0003
FINE_TUNE_FROM_LAYER = -20
for layer in model.layers:
layer.trainable = False
for layer in model.layers[FINE_TUNE_FROM_LAYER:]:
if not isinstance(layer, tensorflow.keras.layers.BatchNormalization):
layer.trainable = True
# +
learning_rate_fn = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=LEARNING_RATE,
decay_steps=NUM_EPOCHS * train_generator.samples // BATCH_SIZE,
end_learning_rate=LEARNING_RATE / 10,
power=1.0)
opt = Adam(learning_rate=learning_rate_fn)
model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=["accuracy"])
save_best_checkpoint = ModelCheckpoint("model2_stage2.hdf5", verbose=1, monitor='val_loss', save_best_only=True, mode='auto')
history = model.fit_generator(
train_generator,
steps_per_epoch = train_generator.samples // BATCH_SIZE,
validation_data = validation_generator,
validation_steps = validation_generator.samples // BATCH_SIZE,
epochs = NUM_EPOCHS,
callbacks=[save_best_checkpoint],
verbose=1)
draw_plot_from_history(history, 'loss', NUM_EPOCHS, 'STAGE_2')
draw_plot_from_history(history, 'accuracy', NUM_EPOCHS, 'STAGE_2')
# -
def save_plot_from_history(history, metric, n_epochs, stage):
plt.style.use("ggplot")
plt.figure(figsize=(21, 15))
plt.plot(np.arange(0, n_epochs), history.history[metric], label="train_%s" % metric)
if "val_%s" % metric in history.history:
plt.plot(np.arange(0, n_epochs), history.history["val_%s" % metric], label="val_%s" % metric)
title = metric
plt.title(title, fontsize=18)
plt.xlabel("Epoch #", fontsize=18)
plt.ylabel("Value", fontsize=18)
plt.legend(fontsize=18)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
filename = title + '.png'
plt.savefig(filename)
save_plot_from_history(history, 'loss', NUM_EPOCHS, 'STAGE_2')
save_plot_from_history(history, 'accuracy', NUM_EPOCHS, 'STAGE_2')
# Make predictions:
from tqdm.auto import tqdm
def classify(model, test_img_dir):
result = dict()
keys = ['name', *['c%d' % i for i in range(10)]]
for key in keys:
result[key] = []
paths = sorted(list(os.listdir(test_img_dir)))
for path in tqdm(paths):
if not path.endswith('.jpg'):
continue
image = cv2.imread(os.path.join(test_img_dir, path), cv2.IMREAD_COLOR)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = cv2.resize(image, (INPUT_SHAPE, INPUT_SHAPE), interpolation = cv2.INTER_CUBIC)
pred = model.predict(np.expand_dims(image, 0))[0]
result['name'].append(path)
for i in range(10):
result['c%d' % i].append(pred[i])
return result
model = load_model('model2_stage2.hdf5')
result = classify(model, TEST_DIR)
# +
dct = {'img': result['name']}
for i in range(10):
col = 'c%d' % i
dct[col] = result[col]
df = pd.DataFrame(dct)
df.to_csv('submission.csv', index=False)
df
# -
# Improve score a bit with clipping hack:
# +
df_new = df.copy()
cols = ['c0', 'c1', 'c2', 'c3', 'c4', 'c5', 'c6', 'c7', 'c8', 'c9']
arr, names = df_new.values[:, 1:], df_new.values[:, 0]
arr = np.clip(arr, 0.025, 0.975)
arr /= np.sum(arr, axis=1, keepdims=True)
new_df = pd.DataFrame(np.hstack([names.reshape((-1, 1)), arr]), columns=df.columns)
new_df.to_csv('submission_new.csv', index=False)
| driver state analysis/research/activity recognition/driver-activity-classification-student.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#pip install psycopg2-binary
# +
# Dependencies
# ----------------------------------
from sqlalchemy import create_engine
from sqlalchemy import Column, Integer, String, Date
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
engine = create_engine('postgresql://postgres:postgres@localhost:5432/Employees')
connection = engine.connect()
base = declarative_base()
# -
#Get the Employees table
class Employees(base):
__tablename__ = 'Employees'
emp_no = Column(String, primary_key=True)
birth_date = Column(Date)
first_name = Column(String)
last_name = Column(String)
gender = Column(String)
hire_date = Column(Date)
def __repr__(self):
return "<Employee(emp_no='{}', birth_date='{}', first_name='{}', last_name='{}', gender='{}', hire_date='{}')>"\
.format(self.emp_no, self.birth_date, self.first_name, self.last_name, self.gender, self.hire_date)
# +
from sqlalchemy import ForeignKey
from sqlalchemy.orm import relationship
#Get the Salaries table with relationship to Employees Table
class Salary(base):
__tablename__ = 'Salary'
emp_no = Column(String, primary_key=True)
salary = Column(Integer)
from_date = Column(Date)
to_date = Column(Date)
def __repr__(self):
return "<Salary(emp_no='{}', salary='{}', from_date='{}', to_date='{}')>"\
.format(self.emp_no, self.salary, self.from_date, self.to_date)
# -
#Get the Titles table
class Titles(base):
__tablename__ = 'Titles'
emp_no = Column(String, primary_key=True)
title = Column(String)
from_date = Column(Date)
to_date = Column(Date)
def __repr__(self):
return "<Titles(emp_no='{}', title='{}', from_date='{}', to_date='{}')>"\
.format(self.emp_no, self.title, self.from_date, self.to_date)
Session = sessionmaker(bind=engine)
session = Session()
# +
from collections import defaultdict
from sqlalchemy.inspection import inspect
#read the data into a dictionary
def query_to_dict(rset):
result = defaultdict(list)
for obj in rset:
instance = inspect(obj)
for key, x in instance.attrs.items():
result[key].append(x.value)
return result
# -
#get the employee data from the db table
rset1 = session.query(Employees).all()
#get the salary data from the db table
rset2 = session.query(Salary).all()
#get the title data from the db table
rset3 = session.query(Titles).all()
# +
#create a dataframe with the employee data
df_empl = pd.DataFrame(query_to_dict(rset1))
#some employee have "True" as firstname. Instead of removing the records as invalid, I decided to just change
#the name to TBD as someone should really be just fixing those names. They each have a unique emp_no so should
#theoretically be a valid entry.
df_empl.replace(to_replace ="TRUE",
value ="TBD")
#if there were any null values, I'll be replacing them with:
#df_empl.replace(to_replace = np.nan, value ='no value') or anything similar
#print(df_empl)
# -
#create a dataframe from the salary data
df_sal = pd.DataFrame(query_to_dict(rset2))
#print(df_sal)
#create a dataframe from the title data
df_title = pd.DataFrame(query_to_dict(rset3))
#print(df_title)
#merge the employee and salary dataframe
df_merge_col = pd.merge(df_empl, df_sal, on='emp_no')
df_merge_col.head()
#merge dataframe with title dataframe
df_merge_col2 = pd.merge(df_merge_col, df_title, on='emp_no')
df_merge_col2.head()
# create histogram for salary data
df_merge_col2[['salary']].hist(figsize=(12, 6), bins=20, grid=True)
plt.xlabel('Salary')
plt.ylabel('Frequency')
plt.title('Histogram for Employee Salary')
plt.savefig('salary_histogram.png')
#Average salary by title
title_group = df_merge_col2['salary'].groupby(df_merge_col2['title'])
average_salary = title_group.mean().sort_values(ascending=False)
average_salary.plot(kind='bar', figsize=(12, 6), color='r', alpha=0.5)
plt.xlabel('Title')
plt.ylabel('Salary')
plt.title('Salary by Title')
plt.savefig('salary_by_title_bar_graph.png')
ax = df_merge_col2[['salary', 'title']].boxplot(
by='title', figsize=(10, 6), rot=90)
ax.set_xlabel('Title')
ax.set_ylabel('Salary')
ax.set_title('Salary by Title')
plt.savefig('salary_by_title_boxplot.png')
| EmployeeSQL/EmployeesDBViz.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Winning the GHZ Game on Real IBM Quantum Computer<a name="top"></a>
# *<NAME>, Dr. Jan-<NAME>, October 2020*
# The GHZ game is a serious game for quantum computing to show the quantum mechanical property of entanglement. Three players are asked for two different properties A and B of an object, an each property can be one of two conditions 1 or 0. When all are asked for property A, 1 should appear two times or zero times. When two players are asked for property B, 1 should appear three times ore one time. When the players don't know what the other two were asked, respectively, there is no strategy in classical logic that can lead to winning in all cases. Using Quantum Computing, however, there is. If you wish to find out more about this game, click [here](https://github.com/JanLahmann/Fun-with-Quantum/blob/master/GHZ-Game.ipynb).
# The Qiskit built-in QASM Aer-simulator for running the Quantum circuit of the GHZ game is perfect, giving us the results that we expected from theory. In reality, however, the imperfection of real Quantum Computing hardware returns "noisy" output due to various sources of errors. Luckily, there is a variety of options available to mitigate these effects, some of which we will apply and inspect in this notebook.
# In the following, we will have a closer look by:
# 1. Comparing IBM Quantum devices [[Go](#devices)]
# 2. Manually optimizing our circuit for a specific device [[Go](#manual)]
# 3. Using the Qiskit transpiler optimization [[Go](#transpiler)]
# 4. Running Measurement Error Mitigation [[Go](#mem)]
# 1. Combining the techniques [[Go](#all)]
#
# Appendix [[Go](#appendix)]
# * A: Solution for chapter 5. [[Go](#appendix-a)]
# * B: Running this notebook on real Quantum Computers [[Go](#appendix-b)]
#
# _____
# ## Setup
# [[Top](#top)]
# First, let's do the necessary imports:
# +
from qiskit import QuantumCircuit, QuantumRegister, execute, transpile, Aer, IBMQ # circuit creation and execution
from qiskit.providers.aer.noise import NoiseModel # simulate backends
from qiskit.visualization import plot_histogram # visualize results
from qiskit.quantum_info import Statevector, state_fidelity, hellinger_fidelity # statevector and metrics
from qiskit.providers.ibmq import least_busy # get least busy device
import qiskit.tools.jupyter # visualize backends
from qiskit.ignis.mitigation.measurement import complete_meas_cal,CompleteMeasFitter # measurement error mitigation
import pandas as pd # handy tools for calculation
# -
# Next we'll define our global variables: 3 qubits are needed for our circuit (GHZ state), and we use a high number of shots (=times a circuit is executed per run on a device, out of which the relative result distribution is calculated) to get consistent results:
# global vars
n = 3 # number of qubits
shots = 8000 # high number of shots per execution
# For the remainder of this notebook, this will be our reference circuit for assessment. Note that the GHZ game uses 4 distinct circuits. These, however, only differ in their measurment base and order, but all have the same core - the GHZ state, which is the entanglement of 3 qubits (see part before the first barrier).
# basic circuit
name = "GHZ yyx default" # ghz in measurement base Y,Y,X
ghz = QuantumCircuit(n,n, name=name) # create circuit
ghz.h(0) # h gate
ghz.cx(0,1) # cnot gate
ghz.cx(0,2) # cnot gate
ghz.barrier()
ghz.sdg([0,1]) # s dagger gate
ghz.h(range(n)) # h gates
ghz.barrier()
ghz.measure(range(n), range(n)) # measurements
ghz.draw("mpl") # draw cirucit
# In order to evaluate the accuracy of the results of executing the GHZ state on different devices and with different mitigation techniques, we need a metric which we will define as state accuracy. It represents the percentage of counts resulted from the experiment that belong to one of the four target states of the GHZ game ('001', '010', '100', '111'). Reaching either of them equals winning they game, therefore our metric represents the fidelity with which we win the GHZ game on our quantum computer. We will use the state accuracy to measure the effectiveness of our different optimization approaches.
# +
expected_states = ['001', '010', '100', '111'] # target states
def get_state_accuracy(counts): # funciton to calculate state accuracy
expected_counts = 0
for state in counts.keys():
if state in expected_states:
expected_counts = expected_counts + counts[state]
state_accuracy = expected_counts / shots
return str(state_accuracy*100)+"%"
# -
# For reference, this is what the state accuracy would look like for results from a perfect backend (100%).
counts = execute(ghz, backend=Aer.get_backend('qasm_simulator'), shots=shots).result().get_counts() # execution on simulator
state_accuracy = get_state_accuracy(counts)
print(f"state accuracy on the simulator: {state_accuracy}")
# Note that the state accuracy is a custom metric designed for our needs. Depending on your research aim, other metrics such as the built-in [hellinger fidelity](https://qiskit.org/documentation/stubs/qiskit.quantum_info.hellinger_fidelity.html?highlight=hellinger_fidelity) might be a better choice to assess circuit execution on different devices.
# Great, now we're all set to run our experiments!
# ## 1. Comparing IBM quantum devices<a name=devices><a/>
# [[Top](#top)]
# To start with, we need to find available quantum devices that are suitable to run our experiment. However, in case IBM Quantum devices are busy with jobs from other users, the execution of the following experiments take very long. To avoid this, we will use Qiskit built-in mock implementations of them, executed locally.
#
# (If you want to try out this notebook on real systems, execute the cell above again and read the [appendix](#appendix-b) for detailed instructions.)
# +
# mock devices
from qiskit.test.mock import *
backends = [FakeYorktown(), FakeMelbourne(), FakeVigo(), FakeOurense(), FakeValencia(), FakeLondon(),
FakeBurlington(), FakeEssex()] # no mock santiago available
# -
# Now, let's compare the results from the execution of the GHZ circuit on these backends:
# +
results_backends = []
for backend in backends:
counts = execute(ghz, backend=backend, shots=shots, optimization_level=0).result().get_counts() # get results
state_accuracy = get_state_accuracy(counts) # calculate state accuracy
results_backends.append((backend.name(), counts, state_accuracy)) # store results
# plot results
plot_histogram([counts for backend_name, counts, state_accuracy in results_backends],
legend=[backend_name for backend_name, counts, state_accuracy in results_backends],
sort="asc",
color=["red", "orange", "yellow", "green", "blue", "violet", "purple", "black"],
bar_labels=False)
# -
# The bars represent the relative amount (probability) to read a certain state from the execution of our circuit on the respective backend over the 8000 times the experiment was executed there. Remember that ideally, only four states should be returned by our circuit: 001, 010, 100, 100. Therefore, the best devices are those with the highest probability on these states while having the lowest on all other, undesired states.
# To make it easier to interpret, we use the state accuracy as indicator of the results' quality:
# +
# setup ordered table
results_backends_table = pd.DataFrame(columns=["backend", "state_accuracy"]) # create table
for backend_name, counts, state_accuracy in results_backends:
results_backends_table.loc[len(results_backends_table)] = [backend_name, state_accuracy] # put values into table
results_backends_table = results_backends_table.sort_values(by=["state_accuracy"], ascending=False).reset_index().iloc[:, 1:] # sort values
results_backends_table
# -
# Now we know the ranking of the devices based on their fidelity. Let's pick one with a high state accuracy to test the optimization techniques in the remaining chapters. In addition, we will pick one system with a lower accuracy to compare against later.
# +
# high accuracy
backend1 = FakeVigo()
# lower accuracy
backend2 = FakeYorktown()
# -
# ## 2. Manually optimizing circuits<a name=manual><a/>
# [[Top](#top)]
# Why is it that the IBM Quantum computer show different performances? As mentioned earlier, hardware optimization is a major research field at the moment. Essentially, our Quantum computer consist of different numbers of qubits that are connected with each other and manipulated according to our circuit. However, the way in which they are connected - the so called coupling map - as well the error rates when performing gates on single qubits, multiple qubits and when reading the state of the qubits differs across our devices due to manifacturing. Finally, the qubits posess varying coherence times (T1/T2).
#
# Let's have a closer look on how the coupling map and error rates look like on our two picked devices.
# %matplotlib inline
backend1
# On the Configuration tab you can see the coupling map of the devices. Qubits that are connected with a line can perform a multiple qubit gate such as CNOT directly. Trying to apply CNOT on non-connected qubits would require Qiskit to internally change the circuit before execution to fit it to the coupling map of the device.
#
# On the Error Map tab you can see the quality of each qubit and each connection where a darker color indicates a lower error rate (more desirable). If you are interested in the exact error rates, you can find them on the Qubit Properties tab (U2 - single qubit error rate, readout error rate) and on the Multi-Qubit Gates tab.
# **Coupling Map based optimization**
# The Qiskit transpiler prepares our code for execution on the real device. In case our circuit layout does not match the device's configuration, it will introduce swaps into our circuit so to make it applicable to the coupling map. Sometimes this can produce a worse accuracy than that from our original circuit. Therefore, let's try to create custom coupling map specific circuits of the GHZ state to avoid the transpiler interfering.
# Backend 1:
# +
# backend1: create coupling map optimised circuit
# map: qubit 0 -> qubit 1
# qubit 1 -> qubit 0
# qubit 2 -> qubit 2
ghz_backend1 = QuantumCircuit(5,3, name=f"GHZ yyx {backend1.name()} optimized") # note that we use 5 qubits here to match the configuration of the device
ghz_backend1.h(1) # we map qubit 0 to qubit 1 and qubit 1 to qubit 0 ..
ghz_backend1.cx(1,0) # .. to ensure that all logical CNOT gates are performed on physical connections
ghz_backend1.cx(1,2)
ghz_backend1.barrier()
ghz_backend1.sdg([1,0])
ghz_backend1.h(range(n))
ghz_backend1.barrier()
ghz_backend1.measure([1,0,2], range(n))
ghz_backend1.draw("mpl")
# -
# Let's compare the two circuits:
# +
# compare results between original and coupling map optimized circuit on backend1
results_backend1_coupling = []
circuits = [ghz, ghz_backend1]
for circuit in circuits:
transpiled_circuit = transpile(circuit, backend=backend1, optimization_level=0)
counts = execute(circuit, backend=backend1, shots=shots, optimization_level=0).result().get_counts() # get results
state_accuracy = get_state_accuracy(counts) # calculate state accuracy
results_backend1_coupling.append((circuit.name, counts, state_accuracy)) # store
print("State Accuracy \n"+
"\n".join([str(circuit_name)+': '+str(state_accuracy)
for circuit_name, counts, state_accuracy in results_backend1_coupling]))
plot_histogram([counts for circuit_name, counts, state_accuracy in results_backend1_coupling],
legend=[circuit_name for circuit_name, counts, state_accuracy in results_backend1_coupling],
title=backend1.name(),
bar_labels=False)
# -
# As you can see, the state accuracy improved by around 2% on our system.
# **Error Map based optimization**
# As mentioned, this was one of two ways we are considering to manually optimize the circuits. The second builds off of the results from the coupling map based optimization and additionally takes into account the error rates. For this, we have another look on the error map and pick qubits that have the least single qubit gate, multiple qubit gate and readout error rate (see colors for intuition). Sometimes you need to trade off between these as for instance some qubits have excellent read out error rates while not as good single qubit gate error rates. Though, as this approach is manual, you don not need to try out each possible configuration to find the best one.
# Backend 1:
# +
# backend1: create coupling map + error rate optimised circuit
# map: qubit 0 -> qubit 3
# qubit 1 -> qubit 1
# qubit 2 -> qubit 4
ghz_backend1_error = QuantumCircuit(5,3, name=f"GHZ yyx {backend1.name()} error rate optimised") # note that we use 5 qubits here to match the configuration of the device
ghz_backend1_error.h(3) # qubit with best adjacent multi-qubit gates error rates (cnot)
ghz_backend1_error.cx(3,1)
ghz_backend1_error.cx(3,4)
ghz_backend1_error.barrier()
ghz_backend1_error.sdg([3,1]) # avoid 4 as it has a worse single qubit error rate
ghz_backend1_error.h([3,1,4]) # best readout error rates
ghz_backend1_error.barrier()
ghz_backend1_error.measure([3,1,4], range(n)) # readout still in same order
ghz_backend1_error.draw("mpl")
# +
# compare results between original, coupling map, and error rates optimized circuit on backend1
results_backend1_error = []
circuits = [ghz_backend1_error]
for circuit in circuits:
transpiled_circuit = transpile(circuit, backend=backend1, optimization_level=0)
counts = execute(circuit, backend=backend1, shots=shots, optimization_level=0).result().get_counts() # get results
state_accuracy = get_state_accuracy(counts) # calculate state accuracy
results_backend1_error.append((circuit.name, counts, state_accuracy)) # store
print("State Accuracy \n"+
"\n".join([str(circuit_name)+': '+str(state_accuracy)
for circuit_name, counts, state_accuracy in results_backend1_coupling+results_backend1_error]))
plot_histogram([counts for circuit_name, counts, state_accuracy in results_backend1_coupling+results_backend1_error],
legend=[circuit_name for circuit_name, counts, state_accuracy in results_backend1_coupling+results_backend1_error],
title=backend1.name(),
bar_labels=False)
# -
# As can be seen, the results from the coupling map and error rate optimised circuit are around the same as those from the circuit only optimised for its coupling map. This is because while the new circuit uses qubits with improved CNOT error rates, the old one natively had good readout error rates so that effects of swapping qubit mappings do not produce a significantly better result. However, there might be still a better circuit layout than the one found here.
# ## 3. Qiskit Transpiler Optimization<a name=transpiler><a/>
# [[Top](#top)]
# Qiskit comes with a built-in transpiler to translate logical circuits into circuits that are executable on the target backend. In addition, the transpile function (and execute function calling it) has four different levels of optimization available that can be set as parameter. See an extract of the [documentation](https://qiskit.org/documentation/apidoc/transpiler_preset.html) here to understand their differences:
# Optimization Levels
#
# * Level 0: no explicit optimization other than **mapping to backend**.
#
# * level 1: light optimization by simple **adjacent gate collapsing**.
#
# * level 2: medium optimization by **initial layout selection** and **gate cancellation** using commutativity rules.
#
# * level 3: heavy optimization by **noise adaptive qubit mapping** and gate cancellation using commutativity rules and **unitary synthesis**.m
# Next to other actions taken such as combining gates and mapping to the device's coupling map (which takes place necessarily on every level), note that only optimization level 3 applies noise adaptive mapping, which we attempted to do manually in the previous chapter.
# To separately view the effects from each chapter, we will now again use the basic GHZ YYX circuit and compare the different transpiler optimization levels on backend 1.
# +
# compare results from different optimization levels on backend 1
results_backend1_transpiler = []
optimization_levels = [0,1,2,3]
#print(ghz)
for optim_level in optimization_levels:
transpiled_circuit = transpile(ghz, backend=backend1, optimization_level=optim_level)
#print(transpiled_circuit)
counts = execute(ghz, backend=backend1, shots=shots, optimization_level=optim_level).result().get_counts() # get results
state_accuracy = get_state_accuracy(counts) # calculate state accuracy
results_backend1_transpiler.append((optim_level, counts, state_accuracy)) # store
# compare state accuracies
print("State Accuracy \n"+
"\n".join(["optimization_level_"+str(optim_level)+': '+str(state_accuracy)
for optim_level, counts, state_accuracy in results_backend1_transpiler]))
# plot results
plot_histogram([counts for optim_level, counts, state_accuracy in results_backend1_transpiler],
legend=["optimization_level_"+str(optim_level) for optim_level, counts, state_accuracy in results_backend1_transpiler],
title=backend1.name(),
bar_labels=False)
# -
# Did you notice? Counterintuively, there was only a visible improvement from optimization level 0 to 1, and from 1 to 2, while level 2 and 3 produce similar results on the backend. But why is there no improvement on each level? To understand this phenomenon better, let's see what happens under the hood by inspecting the transpiled circuits that the transpiler produces to execute them on our backend:
# +
# compare transpiled circuits for different optimization levels on backend 1
optimization_levels = [0,1,2,3]
print("original circuit")
print(ghz)
for optim_level in optimization_levels:
print(f"optimization level: {optim_level}")
transpiled_circuit = transpile(ghz, backend=backend1, optimization_level=optim_level)
print(transpiled_circuit)
# -
# The circuits of level 2 and 3 are identical! Also circuits 0 and 1 behave similarly, even though the compiler picks different qubits to map our circuit. This implies that the circuit with optimization level 2 is already the best the transpiler can do for us. The small deviation between optimization level 2 and 3 only comes due to probabilistic quantum readout randomness.
# Remember that the compiler is supposed to follow the idea from our manual optimization in a more sophisticated manner. Therefore let's compare its output again with what we produced in chapter 2:
# +
print("manual optimization")
print(ghz_backend1_error)
print("State Accuracy")
print("manual optimization: "+str(results_backend1_error[0][2]))
print("transpiler optimization: "+str(results_backend1_transpiler[3][2]))
# -
# As can be seen, the structure of the circuit remains the same even on level 3 of the transpiler optimized circuit, matching our circuit from chapter 2. However, they differ in the qubits that were selected for mapping the logical circuit onto the real physical hardware. Taking this into account, the transpiler apparently has a better intuition on how to balance the importances of different kinds of errors.
# ## 4. Measurement Error Mitigation<a name=mem><a/>
# [[Top](#top)]
# So far, we achieved some improvements of the state accuracy with manual optimization and to a small extent with transpiler optimization. While these are rather simple steps to take, there is an entire research field dealing with how to mitigate quantum computer hardware imperfection with software driven techniques, one of which we will examine now.
#
#
# As discussed earlier, there are various types of errors that can perpetrate noisy results when executing a quantum circuit. A significant one is that what happens when the quantum states are read from qubits forcing them to collapse into classical states, i.e. our experiment should return one of our four targeted classical states (001, 010, 100, or 111). However, we've seen that in our results a lot more states than these incorrectly appear.
#
# These returned result represent the real results plus the readout error. Hence, knowing the devices' readout error behavior, we can estimate the real results from our obtained results through matrix multiplication, which is called Measurement Error Mitigation. For this, each basis state (one of the 2^n combinations of 0 and 1 per each of the n qubits, e.g. 00, 01, 10, 11 for 2 qubits) is measured on the device so to derive its measurment errors, allowing to restore our real counts.
#
# For a detailed explenation of how Measurement Error Mitigation works have a look at the [Qiskit Textbook](https://qiskit.org/textbook/ch-quantum-hardware/measurement-error-mitigation.html) or the well explained Qiskit [youtube video](https://www.youtube.com/watch?v=yuDxHJOKsVA) on MEM.
# +
# measurement error mitigation on backend 1
results_backend1_mem = []
# normal execution
noisy_counts = execute(ghz, backend=backend1, shots=shots, optimization_level=0).result().get_counts() # get results
noisy_state_accuracy = get_state_accuracy(noisy_counts) # calculate state accuracy from original results
results_backend1_mem.append(("noisy", noisy_counts, noisy_state_accuracy))
# measurement error mitigation
qr = QuantumRegister(n)
meas_calibs, state_labels = complete_meas_cal(qr=qr, circlabel='mcal') # setup calibration measures on basis states
cal_results = execute(meas_calibs, backend=backend1, shots=shots).result() # execute calibration measures
meas_fitter = CompleteMeasFitter(cal_results, state_labels, circlabel='mcal')
meas_filter = meas_fitter.filter # get the filter object from calibration
mitigated_counts = meas_filter.apply(noisy_counts) # apply filter to get mitigated results
mitigated_state_accuracy = get_state_accuracy(mitigated_counts) # calculate state accuracy from mitigated results
results_backend1_mem.append(("mitigated", mitigated_counts, mitigated_state_accuracy))
# compare state accuracies
print("State Accuracy \n"+
"\n".join([str(label)+': '+str(state_accuracy) for label, counts, state_accuracy in results_backend1_mem]))
# plot results
plot_histogram([counts for optim_level, counts, state_accuracy in results_backend1_mem],
legend=[label for label, counts, state_accuracy in results_backend1_mem],
title=backend1.name(),
bar_labels=False)
# -
# As can be seen, there is a visible improvement on the system with a high accuracy as result from the measurement error mitigated results.
# While this is what we looked for, let's compare the results to the mitigated results from another, natively less accurate system in order to understand the capabilities of measurement error mitigation.
backend2
# +
# measurement error mitigation on backend 2
results_backend2_mem = []
# normal execution
noisy_counts = execute(ghz, backend=backend2, shots=shots, optimization_level=0).result().get_counts() # get results
noisy_state_accuracy = get_state_accuracy(noisy_counts) # calculate state accuracy from original results
results_backend2_mem.append(("noisy", noisy_counts, noisy_state_accuracy))
# measurement error mitigation
qr = QuantumRegister(n)
meas_calibs, state_labels = complete_meas_cal(qr=qr, circlabel='mcal') # setup calibration measures on basis states
cal_results = execute(meas_calibs, backend=backend2, shots=shots).result() # execute calibration measures
meas_fitter = CompleteMeasFitter(cal_results, state_labels, circlabel='mcal')
meas_filter = meas_fitter.filter # get the filter object from calibration
mitigated_counts = meas_filter.apply(noisy_counts) # apply filter to get mitigated results
mitigated_state_accuracy = get_state_accuracy(mitigated_counts)
results_backend2_mem.append(("mitigated", mitigated_counts, mitigated_state_accuracy))
# compare state accuracies
print("State Accuracy \n"+
"\n".join([str(label)+': '+str(state_accuracy) for label, counts, state_accuracy in results_backend2_mem]))
# plot results
plot_histogram([counts for optim_level, counts, state_accuracy in results_backend2_mem],
legend=[label for label, counts, state_accuracy in results_backend2_mem],
title=backend2.name(),
bar_labels=False)
# -
# As can be seen, this is a truly remarkable improvement, demonstrating the power of measurement error mitigation. This implies that a large part of the erroneous results on backend 2 is due to measurement errors. In addition, it is worth noting that the effect of the measurement error mitigation fluctuates over different applications on the same backend, which in turn depends on the device's quality in the first place. Try it by executing the above cell multiple times.
# ## 5. Combining the optimization techniques<a name=all><a/>
# [[Top](#top)]
# We've now successfully gone through a variety of optimization techniques that differ in approach, complexity and results.
# However, the results for our system are still under 100%, which is what we aim for. Therefore, let's attempt to get the maximum fidelity through combining all of the measures discussed.
# +
# backend1
results_backend1_all = []
# basic circuit
noisy_counts = execute(ghz, backend=backend1, shots=shots, optimization_level=0).result().get_counts() # get results
noisy_state_accuracy = get_state_accuracy(noisy_counts) # calculate state accuracy from original results
results_backend1_all.append(("noisy", noisy_counts, noisy_state_accuracy))
# manually optimized circuit
manual_counts = execute(ghz_backend1_error, backend=backend1, shots=shots, optimization_level=0).result().get_counts() # get results
manual_state_accuracy = get_state_accuracy(manual_counts)
results_backend1_all.append(("manual", manual_counts, manual_state_accuracy))
# transpiler optimization (level 3 overrides manually optimized circuit)
transpiler_counts = execute(ghz, backend=backend1, shots=shots,
optimization_level=3).result().get_counts() # get results
transpiler_state_accuracy = get_state_accuracy(transpiler_counts)
results_backend1_all.append(("transpiler", transpiler_counts, transpiler_state_accuracy))
# transpiler optimization + measurement error mitigation
qr = QuantumRegister(n)
meas_calibs, state_labels = complete_meas_cal(qr=qr, circlabel='mcal') # setup calibration measures on basis states
cal_results = execute(meas_calibs, backend=backend1, shots=shots).result() # execute calibration measures
meas_fitter = CompleteMeasFitter(cal_results, state_labels, circlabel='mcal')
meas_filter = meas_fitter.filter # get the filter object from calibration
mitigated_counts = meas_filter.apply(transpiler_counts) # apply filter on previous results to get mitigated results
mitigated_state_accuracy = get_state_accuracy(mitigated_counts)
results_backend1_all.append(("transpiler - mitigated", mitigated_counts, mitigated_state_accuracy))
# compare state accuracies
print("State Accuracy \n"+
"\n".join([str(label)+': '+str(state_accuracy) for label, counts, state_accuracy in results_backend1_all]))
# plot results
plot_histogram([counts for label, counts, state_accuracy in results_backend1_all],
legend=[label for label, counts, state_accuracy in results_backend1_all],
title=backend1.name(),
bar_labels=False)
# -
# If we wish to understand the effects better, we can inspect the transpiled circuit for each technique in order:
# +
# %matplotlib inline
# inspect circuits on backend1
print("*** ",backend1.name()," ***")
print("basic circuit")
print(transpile(ghz, backend=backend1, optimization_level=0))
print("manually optimized circuit (coupling map and error map)")
print(transpile(ghz_backend1_error, backend=backend1, optimization_level=0))
print("transpiler optimized circuit")
print(transpile(ghz, backend=backend1, optimization_level=3))
# (measurement error mitigation doesn't transform the circuit)
# -
# Great, we can see that the combination of methods really produces a tremendous value for the state accuracy of backend 1, close to the ideal value of 100%.
# **Applying the combined techniques on another backend**
#
# Now it's your turn! Can you combine all the methods for backend 2? To get started, we provided a possible configuration of a manually optimized circuit for backend 2:
# +
# backend 2: coupling map + error rate optimised circuit
# map: qubit 0 -> qubit 1
# qubit 1 -> qubit 2
# qubit 2 -> qubit 0
ghz_backend2_error = QuantumCircuit(5,3, name=f"GHZ yyx {backend2.name()} error rate optimised") # note that we use 5 qubits here to match the configuration of the device
ghz_backend2_error.h(1)
ghz_backend2_error.cx(1,2)
ghz_backend2_error.cx(1,0)
ghz_backend2_error.barrier()
ghz_backend2_error.sdg([1,2]) # better single qubit error rate
ghz_backend2_error.h([1,2,0])
ghz_backend2_error.barrier()
ghz_backend2_error.measure([1,2,0], range(n)) # readout still in same order
ghz_backend2_error.draw("mpl")
# -
# Now, your task is to fill in the following cell to execute each optimization step in order just as we did with backend 1.
#
#
# *(Hint: You can copy the code from the previous cell and change the variables for those relevant for backend 2. The solution can be found in the [appendix](#appendix-a) at the end of this notebook.)*
# +
### YOUR WORK ###
# backend 2
results_backend2_all = []
noisy_counts = {}
manual_counts = {}
transpiler_counts = {}
mitigated_counts = {}
# basic circuit
## YOUR CODE HERE ##
results_backend2_all.append(("noisy", noisy_counts, noisy_state_accuracy))
# manually optimized circuit
## YOUR CODE HERE ##
results_backend2_all.append(("manual", manual_counts, manual_state_accuracy))
# transpiler optimization (level 3 overrides manually optimized circuit)
## YOUR CODE HERE ##
results_backend2_all.append(("transpiler", transpiler_counts, transpiler_state_accuracy))
# transpiler optimization + measurement error mitigation
## YOUR CODE HERE ##
results_backend2_all.append(("transpiler - mitigated", mitigated_counts, mitigated_state_accuracy))
# compare state accuracies
print("State Accuracy \n"+
"\n".join([str(label)+': '+str(state_accuracy) for label, counts, state_accuracy in results_backend2_all]))
# plot results
plot_histogram([counts for label, counts, state_accuracy in results_backend2_all],
legend=[label for label, counts, state_accuracy in results_backend2_all],
title=backend2.name(),
bar_labels=False)
# -
# If you get a state accuracy of around 95% you did a good job!
#
# Finally, we can inspect the transpiled circuits to understand the results better:
# +
# inspect circuits on backend2
print("*** ",backend2.name()," ***")
print("basic circuit")
print(transpile(ghz, backend=backend2, optimization_level=0))
print("manually optimized circuit (coupling map and error map)")
print(transpile(ghz_backend2_error, backend=backend2, optimization_level=0))
print("transpiler optimized circuit")
print(transpile(ghz, backend=backend2, optimization_level=3))
# (measurement error mitigation doesn't transform the circuit)
# -
#
#
# ---
#
# Congratulations! In this tutorial, we found ways to greatly increase the probabbility with which we can win the Quantum GHZ game on a quantum computer. Please note that there is a large field of research revolving around optimising quantum hardware and software - and we only covered a few simple ones of them. Therefore feel free to dig deeper into the topic or run your own circuits on [IBM Quantum Experience](quantum-computing.ibm.com).
#
# Thank you!
# *<NAME>, IBM Germany, <EMAIL>*
#
# *Dr. <NAME>, IBM Germany, <EMAIL>*
# <a name=end></a>**Important:** If running this notebook on a shared system (e.g. mybinder.org), remember to log off from your IBM Q account in case you used it for execution on real devices by running the following cell:
disable_account()
delete_account()
# ---
# ## .
# ## .
# ## .
# ## .
#
#
# ## *Appendix*<a name=appendix><a/>
# [[Top](#top)]
#
# ### A: Solutions (Chapter 5)<a name=appendix-a><a/>
# +
#### SOLUTION TO CHAPTER 5 #####
results_backend2_all = []
# basic circuit
noisy_counts = execute(ghz, backend=backend2, shots=shots, optimization_level=0).result().get_counts() # get results
noisy_state_accuracy = get_state_accuracy(noisy_counts) # calculate state accuracy from original results
results_backend2_all.append(("noisy", noisy_counts, noisy_state_accuracy))
# manually optimized circuit (coupling map and error map)
manual_counts = execute(ghz_backend2_error, backend=backend2, shots=shots, optimization_level=0).result().get_counts() # get results
manual_state_accuracy = get_state_accuracy(manual_counts)
results_backend2_all.append(("manual", manual_counts, manual_state_accuracy))
# transpiler optimization (level 3 overrides manual optimization circuit)
transpiler_counts = execute(ghz, backend=backend2, shots=shots,
optimization_level=3).result().get_counts() # get results
transpiler_state_accuracy = get_state_accuracy(transpiler_counts)
results_backend2_all.append(("transpiler", transpiler_counts, transpiler_state_accuracy))
# transpiler optimization + measurement error mitigation
qr = QuantumRegister(n)
meas_calibs, state_labels = complete_meas_cal(qr=qr, circlabel='mcal') # setup calibration measures on basis states
cal_results = execute(meas_calibs, backend=backend2, shots=shots).result() # execute calibration measures
meas_fitter = CompleteMeasFitter(cal_results, state_labels, circlabel='mcal')
meas_filter = meas_fitter.filter # get the filter object from calibration
mitigated_counts = meas_filter.apply(transpiler_counts) # apply filter on previous results to get mitigated results
mitigated_state_accuracy = get_state_accuracy(mitigated_counts)
results_backend2_all.append(("transpiler - mitigated", mitigated_counts, mitigated_state_accuracy))
# compare state accuracies
print("State Accuracy \n"+
"\n".join([str(label)+': '+str(state_accuracy) for label, counts, state_accuracy in results_backend2_all]))
# plot results
plot_histogram([counts for label, counts, state_accuracy in results_backend2_all],
legend=[label for label, counts, state_accuracy in results_backend2_all],
title=backend2.name(),
bar_labels=False)
# -
# ---
# ### B: Running this notebook on real quantum computers<a name=appendix-b></a>
# You can run the entire on real quantum hardware provided and made accesible by IBM on the [IBM Quantum Experience](https://quantum-computing.ibm.com).
# You should take into consideration that this will take longer, as each job submitted for execution to either of the quantum computers is queued based on a fair share algorithm. Therefore, you will most likely need to wait until other jobs queued before yours are finished. You can see the status of your jobs under "Pending Results" on the [Results page](https://quantum-computing.ibm.com/results).
#
# As the first chapter executes circuits on all suitable devices, it will take especially long. For this reason we'll provide guidance for that chapter seperately.
# **Connect IBM Quantum Experience Account**
# We need to connect to our IBM Quantum Experience account in order to be able to execute the results on real hardware.
#
# If you haven't linked your IBM Quantum Experience Account yet, follow this [guide](https://qiskit.org/documentation/install.html#install-access-ibm-q-devices-label) and paste your credentials in the following cell. Remember to remove your credentials at the [end of the tutorial](#end).
# save IBM Quantum Experience account credentials
IBMQ.save_account('MY_API_TOKEN')
# Having stored your account credentials, run the following to activate your account.
# load IBM Quantum Experience account
IBMQ.load_account()
provider = IBMQ.get_provider('ibm-q') # get provider
# **Using real Quantum Computers in chapter 1**
# We simply need to change the "backend" variable. Just execute the following cell or use it to replace the corresponding cell above, then execute the cell in chapter 1 in which the circuits are executed and review the results.
backends = provider.backends(filters=lambda x: # get available quantum computer from IBM Q provider where
x.configuration().n_qubits >= n and # number of qubits high enough
not x.configuration().simulator and # only real devices (no simulator)
x.status().operational==True) # only devices that work
print(f"IBM Q backends: {[str(backend.name()) for backend in backends]}")
# **Using real Quantum Computers in chapters 2-5**
# First, we need to change our backend variables for the real devices we would like to examine. You can pick the real devices corresponding to the mock ones we used above or choose your own based on the selection returned by the previous cell. (Again execute the cell here or replace the code in the corresponding cell above.)
# +
# higher accuracy
backend1 = provider.get_backend("ibmq_vigo")
# lower accuracy
backend2 = provider.get_backend("ibmqx2") # yorktown is named x2 on IBM Q experience
# -
# Almost done! However, as the name indicates, chapter 2 uses circuits that are manually optimized to specifically fit the default devices used so far. In order to get reasonable results in its sections, you now need to review the coupling and error map of the devices you picked by executing the first two cells in chapter 2.
#
# Based on this, your job is to create a new circuit for each (or decide to leave the default one if it is already fitting) that fits its coupling map, i.e. so that all qubits that interact with a cnot gate in the circuit are physically connected as indicated in the coupling map. Then, adapt the circuit again so to pick qubits with the lowest error rates. This doesn't need to be accurate, as it is hard to weigh the effects of the different kinds of errors present. A good indicator if you did a good job is that the state accuracy of the result is not significantly lower than that of the default circuit or that of the coupling map sensitive circuit without error optimization, respectively. Once you're done, execute the remaining cells in chapters 2-5.
# +
# backend1: coupling map optimised circuit
ghz_backend1 = QuantumCircuit(5,3, name=f"GHZ yyx {backend1.name()} optimised")
### YOUR CODE HERE ###
ghz_backend1.draw("mpl")
# +
# backend2: coupling map optimised circuit
ghz_backend2 = QuantumCircuit(5,3, name=f"GHZ yyx {backend2.name()} optimised")
### YOUR CODE HERE ###
ghz_backend2.draw("mpl")
# +
# backend1: coupling map + error rate optimised circuit
ghz_backend1_error = QuantumCircuit(5,3, name=f"GHZ yyx {backend1.name()} error rate optimised")
### YOUR CODE HERE ###
ghz_backend1_error.draw("mpl")
# +
# backend2: coupling map + error rate optimised circuit
ghz_backend2_error = QuantumCircuit(5,3, name=f"GHZ yyx {backend2.name()} error rate optimised")
### YOUR CODE HERE ###
ghz_backend2_error.draw("mpl")
| GHZ-on-Real-Devices.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Input and Output History
# ## Notebook's ``In`` and ``Out`` Objects
#
# By now I imagine you're quite familiar with the ``In [1]:``/``Out[1]:`` style prompts used by Jupyter Notebook.
# But it turns out that these are not just pretty decoration: they give a clue as to how you can access previous inputs and outputs in your current session.
# Imagine you start a session that looks like this:
import math
math.sin(2)
math.cos(2)
# We've imported the built-in ``math`` package, then computed the sine and the cosine of the number 2.
# These inputs and outputs are displayed in the shell with ``In``/``Out`` labels. It also creates some Python variables called ``In`` and ``Out`` that are automatically updated to reflect this history:
print(In)
Out
# The ``In`` object is a list, which keeps track of the commands in order (the first item in the list is a place-holder so that ``In[1]`` can refer to the first command):
print(In[1])
# The ``Out`` object is not a list but a dictionary mapping input numbers to their outputs (if any):
print(Out[2])
# Note that not all operations have outputs: for example, ``import`` statements and ``print`` statements don't affect the output.
# The latter may be surprising, but makes sense if you consider that ``print`` is a function that returns ``None``; for brevity, any command that returns ``None`` is not added to ``Out``.
#
# Where this can be useful is if you want to interact with past results.
# For example, let's check the sum of ``sin(2) ** 2`` and ``cos(2) ** 2`` using the previously-computed results:
Out[2] ** 2 + Out[3] ** 2
# The result is ``1.0`` as we'd expect from the well-known trigonometric identity.
# In this case, using these previous results probably is not necessary, but it can become very handy if you execute a very expensive computation and want to reuse the result!
# ## Underscore Shortcuts and Previous Outputs
#
# The standard Python shell contains just one simple shortcut for accessing previous output; the variable ``_`` (i.e., a single underscore) is kept updated with the previous output; this works in Jupyter Notebook as well:
print(_)
# But Notebook takes this a bit furtherâyou can use a double underscore to access the second-to-last output, and a triple underscore to access the third-to-last output (skipping any commands with no output):
print(__)
print(___)
# Jupyter Notebook stops there: more than three underscores starts to get a bit hard to count, and at that point it's easier to refer to the output by line number.
#
# There is one more shortcut we should mention, howeverâa shorthand for ``Out[X]`` is ``_X`` (i.e., a single underscore followed by the line number):
Out[2]
_2
# ## Suppressing Output
# Sometimes you might wish to suppress the output of a statement (this is perhaps most common with the plotting commands that we'll explore in Introduction to Matplotlib.
# Or maybe the command you're executing produces a result that you'd prefer not like to store in your output history, perhaps so that it can be deallocated when other references are removed.
# The easiest way to suppress the output of a command is to add a semicolon to the end of the line:
math.sin(2) + math.cos(2)
# Note that the result is computed silently, and the output is neither displayed on the screen or stored in the ``Out`` dictionary:
14 in Out
# ## Related Magic Commands
# For accessing a batch of previous inputs at once, the ``%history`` magic command is very helpful.
# Here is how you can print the first four inputs:
# %history -n 1-4
# As usual, you can type ``%history?`` for more information and a description of options available.
# Other similar magic commands are ``%rerun`` (which will re-execute some portion of the command history) and ``%save`` (which saves some set of the command history to a file).
| #0-1 Introduction and Jupyter Notebook/#1.5 - Input and Output History.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] papermill={"duration": 0.016304, "end_time": "2021-03-22T20:29:23.476444", "exception": false, "start_time": "2021-03-22T20:29:23.460140", "status": "completed"} tags=[]
# # SparkSQL
# -
# Execute arbitrary SQL queries againts CSV and PARQUET files
# + papermill={"duration": 2.246299, "end_time": "2021-03-22T20:29:25.734626", "exception": false, "start_time": "2021-03-22T20:29:23.488327", "status": "completed"} tags=[] language="bash"
# export version=`python --version |awk '{print $2}' |awk -F"." '{print $1$2}'`
#
# if [ $version == '36' ]; then
# pip install pyspark==2.4.8 wget==3.2 pyspark2pmml==0.5.1
# elif [ $version == '38' ]; then
# pip install pyspark==3.1.2 wget==3.2 pyspark2pmml==0.5.1
# else
# echo 'Currently only python 3.6 and 3.8 is supported, in case you need a different version please open an issue at https://github.com/elyra-ai/component-library/issues'
# exit -1
# fi
# + papermill={"duration": 0.164002, "end_time": "2021-03-22T20:29:25.951504", "exception": false, "start_time": "2021-03-22T20:29:25.787502", "status": "completed"} tags=[]
import wget
wget.download(
'https://raw.githubusercontent.com/IBM/claimed/master/component-library/claimed_utils.py'
)
from claimed_utils import parse_args_to_parameters
import glob
import logging
from pyspark import SparkContext, SparkConf
from pyspark.sql import SparkSession
from pyspark.sql.functions import to_timestamp
import os
import shutil
import sys
import re
# + papermill={"duration": 0.012801, "end_time": "2021-03-22T20:29:25.972462", "exception": false, "start_time": "2021-03-22T20:29:25.959661", "status": "completed"} tags=[]
# name of resulting file (default: data_result.csv)
output_result_file = os.environ.get('output_result_file', 'data_result.csv')
# file name for CSV or PARQUET file - must end with .csv or .parquet (default: data.csv)
data_file = os.environ.get('data_file', 'data.csv')
# master url of spark master (default: local mode)
master = os.environ.get('master', "local[*]")
# data_dir temporal data storage for local execution (default: ../../data/)
data_dir = os.environ.get('data_dir', '../../data/')
# sql statement to execute, table name == df, example: select * from df
data_dir = os.environ.get('data_dir', '../../data/')
parameters = list(
map(
lambda s: re.sub('$', '"', s),
map(
lambda s: s.replace('=', '="'),
filter(
lambda s: s.find('=') > -1 and bool(re.match('[A-Za-z0-9_]*=[.\/A-Za-z0-9]*', s)),
sys.argv
)
)
)
)
for parameter in parameters:
logging.warning('Parameter: '+parameter)
exec(parameter)
# + papermill={"duration": 4.178678, "end_time": "2021-03-22T20:29:30.176328", "exception": false, "start_time": "2021-03-22T20:29:25.997650", "status": "completed"} tags=[]
sc = SparkContext.getOrCreate(SparkConf().setMaster(master))
spark = SparkSession.builder.getOrCreate()
# + papermill={"duration": 4.693188, "end_time": "2021-03-22T20:29:34.875295", "exception": false, "start_time": "2021-03-22T20:29:30.182107", "status": "completed"} tags=[]
if '.csv' in data_file:
df = spark.read.option('header', 'true').option("inferSchema" , "true").csv(data_dir + data_file)
elif '.parquet' in data_file:
df = spark.read.parquet.csv(data_dir + data_file)
else:
raise Exception('Neither .csv nor .parquet in file name')
# -
df.createOrReplaceTempView('df')
df = spark.sql(sql)
# + papermill={"duration": 458.264571, "end_time": "2021-03-22T20:37:13.150144", "exception": false, "start_time": "2021-03-22T20:29:34.885573", "status": "completed"} tags=[]
if os.path.exists(data_dir + output_result_file):
os.remove(data_dir + output_result_file)
if '.csv' in data_file:
df.coalesce(1).write.option("header", "true").csv(data_dir + output_result_file)
elif '.parquet' in data_file:
df.coalesce(1).write.parquet(data_dir + output_result_file)
else:
raise Exception('Neither .csv nor .parquet in file name')
file = glob.glob(data_dir + output_result_file + '/part-*')
shutil.move(file[0], data_dir + output_result_file + '.tmp')
shutil.rmtree(data_dir + output_result_file)
shutil.move(data_dir + output_result_file + '.tmp', data_dir + output_result_file)
| component-library/transform/spark-sql.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] hideCode=false hidePrompt=false slideshow={"slide_type": "slide"}
# # Hauptkomponentenanalyse
# # (Principal Component Analysis, PCA)
# # vs.
# # Denoising Variational Autoencoders
#
# ### _an Hand von Beispielen_
# + [markdown] hideCode=false hidePrompt=false slideshow={"slide_type": "skip"}
# jupyter nbconvert PCAvsDVAEde.ipynb --to slides --post serve
# + [markdown] hideCode=false hidePrompt=false slideshow={"slide_type": "slide"}
# # Eine intuitive Perspektive ...
#
# #### "... realistische, hochdimensionale Daten konzentrieren sich in der NÀhe einer nichtlinearen, niedrigdimensionalen Mannigfaltigkeit ..." [Lei et al., 2018]
#
# 
# + [markdown] hideCode=false hidePrompt=false slideshow={"slide_type": "slide"}
# # Eine intuitive Perspektive ...
#
# #### Aber wie lernt man die Mannigfaltigkeit und die Wahrscheinlichkeitsverteilung darauf?
# + [markdown] hideCode=false hidePrompt=false slideshow={"slide_type": "fragment"}
# 
# + [markdown] slideshow={"slide_type": "slide"}
# # PCA vs. DVAE an Hand von Beispielen
# + [markdown] hideCode=false hidePrompt=false slideshow={"slide_type": "slide"}
# # PCA vs. DVAE an Hand von Beispielen
#
# Der __MNIST (Modified National Institute of Standards and Technology) Datensatz__ von handgeschriebenen Zahlen besteht aus __60,000 Trainings- und 10,000 Test-Beispielen__. Die Zahlen wurden hinsichtlich Ihrer GröÃe __normalisiert und in einem Bild fester GröÃe zentriert__.
# 
# + [markdown] slideshow={"slide_type": "slide"}
# # Vorstellung der Wettbewerber
# + [markdown] slideshow={"slide_type": "slide"}
# # PCA
# + [markdown] slideshow={"slide_type": "slide"}
# # PCA
#
# * __UnÃŒberwachtes__ Lernen
# * __Lineare Transformation__
# 
# * __"Transformiere"__ eine Menge von Beobachtungen in ein __anderes Koordinatensystem__, in dem die Werte der ersten Koordinate (Komponente) die __gröÃtmögliche Varianz__ aufweisen [Friedman et al., 2017]
# * Die __resultierenden Koordinaten (Komponenten)__ sind __nicht__ mit den ursprÃŒnglichen Koordinaten __korreliert__
# + [markdown] hideCode=false hidePrompt=false slideshow={"slide_type": "slide"}
# # PCA
#
# 
# * Wird zur __Dimensions-Reduzierung__ genutzt (Komprimierung)
# * Die __Rekonstruktion der Beobachtungen__("decoding") aus den fÃŒhrenden __Hauptkomponenten__ hat den __niedrigsten quadratischen Fehler__
# + [markdown] slideshow={"slide_type": "slide"}
# # Autoencoders
# + [markdown] hideCode=false hidePrompt=false slideshow={"slide_type": "slide"}
# # Autoencoders
#
# * unÃŒberwachtes __neuronales Netz__
# * __minimiert__ den Fehler zwischen Rekonstruktionen und Beobachtungen [Goodfellow et al., 2016]
# * lernt die __IdentitÀtsfunktion__
# * wird mit Hilfe von __FehlerrÃŒckfÃŒhrung (Backpropagation) trainiert__
# * aufgetrennt um __Kodierung und Dekodierung__
# + [markdown] hideCode=false hidePrompt=false slideshow={"slide_type": "slide"}
# # Autoencoders
# 
# + [markdown] slideshow={"slide_type": "slide"}
# # PCA vs. Autoencoders
# ## Implementierung
# + hideCode=false hidePrompt=false slideshow={"slide_type": "skip"}
# import all necessary libs
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
import tensorflow as tf
# we use Keras to implement, layer-by-layer the DVAE and PCA
from keras.layers import Input, Dense, Lambda
from keras.models import Model
from keras import backend as K
from keras import metrics
from keras.datasets import mnist
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Conv2DTranspose,Reshape
from sklearn.decomposition import PCA
import os
# %matplotlib inline
os.environ["PATH"] += os.pathsep + 'C:/Program Files (x86)/Graphviz2.38/bin/'
# + slideshow={"slide_type": "slide"}
# we use Keras to implement, layer-by-layer the DVAE and PCA
from keras.layers import Input, Dense, Lambda
from keras.models import Model
from keras import backend as K
from keras import metrics
from keras.datasets import mnist
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Conv2DTranspose,Reshape
from sklearn.decomposition import PCA
# + [markdown] slideshow={"slide_type": "slide"}
# ## Experiment Parametrierung
# + slideshow={"slide_type": "slide"}
# training params for PCA vs. DVAE
num_train = 50000
n_images = 6
batch_size = 205
original_dim = 784
latent_dim = 8
epochs = 1000
epsilon_std = 1.0
noise_factor = 0.5
# + slideshow={"slide_type": "slide"}
# get the MNIST digits
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# prepare data for DVAE
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = x_train.reshape((len(x_train), 28,28,1))
x_test = x_test.reshape((len(x_test), 28,28,1))
noise_train = x_train + noise_factor * np.random.randn(*x_train.shape)
noise_test = x_test + noise_factor * np.random.randn(*x_test.shape)
# clip the images to be between 0 and 1
noise_train = np.clip(noise_train, 0., 1.)
noise_test = np.clip(noise_test, 0., 1.)
# prepare data for PCA
# training
x_test_pca = x_test
shape_x_test = x_test_pca.shape
pcaInputTest = np.reshape(x_test,[shape_x_test[0],shape_x_test[1]*shape_x_test[2]]).astype('float32')/255
# + slideshow={"slide_type": "skip"}
# display the images (28x28 px)
digit_size = 28
figure = np.zeros((digit_size * 2, digit_size * n_images))
showidx=np.random.randint(0,num_train,n_images)
# display input, noisy input
for i,idx in enumerate (showidx):
figure[0: 28,i *28: (i + 1) * 28] = np.reshape(x_train[idx], [28, 28])
figure[28: 56,i *28: (i + 1) * 28] = np.reshape(noise_train[idx], [28, 28])
plt.figure(figsize=(28*2, 28*n_images))
plt.imshow(figure, cmap='Greys_r')
plt.show()
# + [markdown] slideshow={"slide_type": "slide"}
# 
# + [markdown] slideshow={"slide_type": "slide"}
# ## Grundlegende Mathematik der PCA
# + [markdown] hideCode=false hidePrompt=false slideshow={"slide_type": "slide"}
# ### Lineare Transformation
#
# Es sei $\{y_i\}^N_{i=1}$ eine Menge von $N$ Beobachtungs-Vektoren der Dimension $n$ mit $n\leq N$.
#
# Eine __lineare Transformation__ eines __endlich-dimensionalen__ Vektors kann als __Matrix Multiplikation__ ausgedrÃŒckt werden:
#
# $$ \begin{align} x_i = W y_i \end{align} $$
#
# mit $y_i \in R^{n}, x_i \in R^{m}$ und $W \in R^{nxm}$.
# + [markdown] hideCode=false hidePrompt=false slideshow={"slide_type": "slide"}
# ### Lineare Transformation
#
# * Das $j-te$ Element in $x_i$ ist das __Innere Produkt__ von $y_i$ und der $j-ten$ Spalte der Matrix $W$, welche wir durch $w_j$ bezeichen. Es sei $Y \in R^{nxN}$ die Matrix, welche wir durch horizontale Aneinanderreihung der Vektoren $\{y_i\}^N_{i=1}$ erhalten,
#
# $$ Y = \begin{bmatrix} | ... | \\ y_1 ... y_N \\ | ... | \end{bmatrix} $$
#
# * Aus der __linearen Transformation__ folgt:
#
# $$ X = W^TY, X_0 = W^TY_0, $$
#
# wobei $Y_0$ die __Matrix der zentrierten Elemente__ (d.h. wir subtrahieren den Mittelwert von jeder Beobachtung) bezeichnet, und __Kovarianzmatrix__ $Y_0Y_0^T$.
# + [markdown] hideCode=false hidePrompt=false slideshow={"slide_type": "slide"}
# ### Dimensionsreduzierung, Komprimierung
#
# PCA wird zur __Dimensions-Reduktion__ verwendet, da sie durch die durch eine lineare Transformation die __Anzahl der Variablen reduziert__.
#
# Da nur die ersten __$m$ Hauptkomponenten erhalten__ werden, __verliert__ PCA __information__ (d.h. __verlustreiche Komprimierung__).
#
# Der __Verlust__ ( _Summe des quadratischen Rekonstruktions-Fehlers_ ) wird jedoch durch die __Maximierung der Komponenten-Varianzen minimiert__
#
# $$ \min_{W \in R^{nxm}} \| Y_0 - WW^TY_0 \|_F^2, W^TW = I_{mxm}$$
#
# wobei $F$ die Frobenius-Norm bezeichnet.
# + [markdown] hideCode=false hidePrompt=false slideshow={"slide_type": "skip"}
# ### Minimaler quadratischer Gesamtrekonstruktionsfehler
#
# Die Transformations-Matrix $P_m$ kann ebenfalls durch Lösung der folgenden Gleichung berechnet werden:
#
# $$ \min_{W \in R^{nxm}} \| Y_0 - WW^TY_0 \|_F^2, W^TW = I_{mxm}$$
#
# wobei $F$ die Frobenius-Norm bezeichnet.
#
# Daraus folgt, dass $P_m$ __jeden zentrierten Vektor__ der LÀnge $n$ in einen Vektor der LÀnge $m$ mit ($ m < n$) derart __komprimiert__, dass die __Summe des quadratischen Rekonstruktions-Fehlers minimiert wird__.
# + [markdown] hideCode=false hidePrompt=false slideshow={"slide_type": "slide"}
# ### Skalierung
#
# Zur Berechung der PCA können viele verschiedene __iterative Algorithmen__ eingesetzt werden
# * QR Algorithmen
# * Jacobi Algorithmus
# * Power methode
# * SingulÀrwert-Zerlegung (Singular Value Decomposition, SVD)
#
# FÃŒr __sehr groÃe Datenmengen__ eignen sich diese Algorithmen __nicht__!
# + slideshow={"slide_type": "slide"}
# analytical PCA of the training set
def analytical_pca(y):
# variance to explain
pca = PCA()
# apply PCA
pca.fit(y)
# extract the components
loadings = pca.components_
# apply the transformation
components = pca.transform(y)
# reconstruct from components for visualization
filtered = pca.inverse_transform(components)
return filtered
# + [markdown] slideshow={"slide_type": "slide"}
# ## Grundlegende Mathematik der Autoencoder
# + [markdown] hideCode=false hidePrompt=false slideshow={"slide_type": "slide"}
# FÌr jeden Eingangsvektor $x$ der Dimension $d$ des kompletten Datensaztes der LÀnge $n$ generiert das neuronale Netz eine Rekonstruktion $x'$ durch:
#
# * __Kodierung der Eingangsdaten__ (d.h. verwende die lineare / nicht-lineare Transformation $g_\phi(.)$)
# * dies liefert eine __komprimierte Kodierung__ in der dÃŒnnsten Netzwerk-Ebene, $z$
# * __Dekodierung der komprimierten Eingangsdaten__ durch Anwendung der linearen / nicht-linearen Transformation $f_\theta(.)$
#
# 
# + [markdown] hideCode=false hidePrompt=false slideshow={"slide_type": "skip"}
# Die __Parameter $(\theta, \phi)$ werden im Verlauf des Training derart optimiert__, dass ein den Eingangsdaten möglichst Àhnliches Ergebnis , $x \approx f_\theta(g_\phi(x))$, produziert wird. In anderen Worten: __die IndentitÀts-Funktion wird erlernt__ mit __Cross-Entropy (bei sigmoid Aktivierungsfuntionen)__, __mittlere quadratische Fehler (MSE)__ etc.:
#
# 
# + [markdown] slideshow={"slide_type": "slide"}
# # Denoising Variational Autoencoders (DVAE)
# + [markdown] hideCode=false hidePrompt=false slideshow={"slide_type": "slide"}
# Das Funktionsprinzip __unterscheidet sich__ vom grundlegenden Autoencoder dahingehend, dass ein gewisses Maà an __Störrauschen__ (einer __gewissen Wahrscheinlichkeitsverteilung__ folgend) den __Eingangsdaten hinzugefÌgt wird__ und dass die __verborgenen Ebenen__ dieses Rauschen __ausgleichen muss__ um die Eingangsdaten zu __rekonstruieren__ [Im, Bengio et al., 2017, Kingma et al., 2017].
# 
# + [markdown] hideCode=false hidePrompt=false slideshow={"slide_type": "skip"}
# FÌr jeden gestörten Eingangsvektor $\tilde x$ eines originalen Vektors $x$ der Dimension $d$, generiert das neuronale Netz eine Rekonstruktion $x'$ durch:
# * __Kodierung der Eingangsdaten__, welche die Abbildung als Wahrscheinlichkeit der SchÀtzung von $z$ unter Verwendung der Eingangsdaten darstellt
# * dies liefert eine __komprimierte Kodierung in der dÃŒnnsten Netzwerk-Ebene__ $z$, welche der Verteilung $q_\phi(z|x)$ folgt
# * __Dekodierung der komprimierten Eingangsdaten__ an der Ausgangsebene unter Einhaltung des __Beobachtungs-Modells__ $p_\theta(x|z)$
#
# 
# + hideCode=false hidePrompt=false slideshow={"slide_type": "slide"}
# Implement the DVAE
# encoder part
x_noise = Input(shape=(28,28,1))
conv_1 = Conv2D(64,(3, 3), padding='valid',activation='relu')(x_noise)
conv_2 = Conv2D(64,(3, 3), padding='valid',activation='relu')(conv_1)
pool_1 = MaxPooling2D((2, 2))(conv_2)
conv_3 = Conv2D(32,(3, 3), padding='valid',activation='relu')(pool_1)
pool_2 = MaxPooling2D((2, 2))(conv_3)
h=Flatten()(pool_2)
z_mean = Dense(latent_dim)(h)
z_log_var = Dense(latent_dim)(h)
# + slideshow={"slide_type": "skip"}
# Implement the DVAE
# reparameterization trick
def sampling(args):
z_mean, z_log_var = args
epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim), mean=0.,
stddev=epsilon_std)
return z_mean + K.exp(z_log_var / 2) * epsilon
z = Lambda(sampling, output_shape=(latent_dim,))([z_mean, z_log_var])
# + slideshow={"slide_type": "slide"}
# Implement the DVAE
# decoder part
# we instantiate these layers separately so as to reuse them later
z=Reshape([1,1,latent_dim])(z)
conv_0T = Conv2DTranspose(128,(1, 1), padding='valid',activation='relu')(z)#1*1
conv_1T = Conv2DTranspose(64,(3, 3), padding='valid',activation='relu')(conv_0T)#3*3
conv_2T = Conv2DTranspose(64,(3, 3), padding='valid',activation='relu')(conv_1T)#5*5
conv_3T = Conv2DTranspose(48,(3, 3), strides=(2, 2),padding='same',activation='relu')(conv_2T)#10*10
conv_4T = Conv2DTranspose(48,(3, 3), padding='valid',activation='relu')(conv_3T)#12*12
conv_5T = Conv2DTranspose(32,(3, 3), strides=(2, 2),padding='same',activation='relu')(conv_4T)#24*24
conv_6T = Conv2DTranspose(16,(3, 3), padding='valid',activation='relu')(conv_5T)#26*26
x_out = Conv2DTranspose(1,(3, 3), padding='valid',activation='sigmoid')(conv_6T)#28*28
# + [markdown] hideCode=false hidePrompt=false slideshow={"slide_type": "slide"}
# ## DVAE
#
# * DVAE __Verlustfunktion__ beinhaltet die Erstellung von Beispielen aus $z \backsim q_\phi(z|x)$. Dies ist ein __stochastischer Prozess__ und eignet sich daher __nicht zur FehlerrÃŒckfÃŒhrung__.
#
#
# * Die __geschÀtzte Posteriori-Verteilung $q_\phi(z|x)$__ approximiert die tatsÀchliche Verteilung $p_\theta(z|x)$.
#
#
# * Wir können die __Kullback-Leibler Abweichung__, $D_{KL}$ benutzen um die __Differenz der beiden Verteilungen__ zu quantifizieren.
# + slideshow={"slide_type": "slide"}
# Implement the DVAE
# reparameterization trick
def sampling(args):
z_mean, z_log_var = args
epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim), mean=0.,
stddev=epsilon_std)
return z_mean + K.exp(z_log_var / 2) * epsilon
z = Lambda(sampling, output_shape=(latent_dim,))([z_mean, z_log_var])
# + [markdown] hideCode=false hidePrompt=false slideshow={"slide_type": "slide"}
# ## DVAE
#
# Durch __Minimierung des Verlusts__, __maximieren__ wir daher die __untere Schranke der Wahrscheinlichkeit__ (__evidence lower bound (ELBO)__) zur Generierung echter Daten-Beispiele.
# + slideshow={"slide_type": "slide"}
# Implement the DVAE
# instantiate model
dvae = Model(x_noise, x_out)
dvae.summary()
# Compute loss
def DVAE_loss(x_origin,x_out):
x_origin=K.flatten(x_origin)
x_out=K.flatten(x_out)
xent_loss = original_dim * metrics.binary_crossentropy(x_origin, x_out)
kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
dvae_loss = K.mean(xent_loss + kl_loss)
return dvae_loss
# compile the model
dvae.compile(optimizer='adam', loss=DVAE_loss)
# + [markdown] slideshow={"slide_type": "slide"}
# 
# + slideshow={"slide_type": "slide"}
# Train the DVAE
dvae.fit(noise_train,x_train, shuffle=True, epochs=epochs, batch_size=batch_size, validation_data=(noise_test, x_test))
# Comparison PCA vs. DVAE
# testing the DVAE
num_test=10000
showidx=np.random.randint(0,num_test,n_images)
x_out=dvae.predict(x_test[showidx])
# prepare data for testing PCA
pcaInputTest = np.reshape(x_test,[shape_x_test[0],shape_x_test[1]*shape_x_test[2]]).astype('float32')/255
pcaOutput = analytical_pca(pcaInputTest)
# + [markdown] slideshow={"slide_type": "skip"}
# 
# + slideshow={"slide_type": "skip"}
# Display results
digit_size = 28
figure = np.zeros((digit_size * 4, digit_size * n_images))
for i,idx in enumerate (showidx):
figure[0: 28,i *28: (i + 1) * 28] = np.reshape(x_test[idx], [28, 28]) # input data
figure[28: 28 * 2,i *28: (i + 1) * 28] = np.reshape(noise_test[idx], [28, 28]) # noisy input data
figure[28 * 2: 28 * 3,i *28: (i + 1) * 28] = np.reshape(x_out[i], [28, 28]) # DVAE output
figure[28 * 3: 28 * 4,i *28: (i + 1) * 28] = np.reshape(pcaOutput[idx], [28, 28]) # PCA output
plt.figure(figsize=(28 * 4, 28*n_images))
plt.imshow(figure, cmap='Greys_r')
# plt.savefig('inference_output.png')
plt.show()
# + slideshow={"slide_type": "skip"}
# %%HTML
<style>
td {
font-size: 15px
}
</style>
# + [markdown] slideshow={"slide_type": "slide"}
# # Vergleich von PCA und DVAE
#
# ### Inferenz
#
# 
# + [markdown] slideshow={"slide_type": "slide"}
# # Vergleich von PCA und DVAE
#
# ### Kostenfunktion
#
# 
# + [markdown] slideshow={"slide_type": "slide"}
# # Vergleich zwischen PCA und DVAE
#
# 
# + [markdown] slideshow={"slide_type": "slide"}
# # Vergleich zwischen PCA und DVAE
#
# 
# + [markdown] slideshow={"slide_type": "slide"}
# # PCA vs. Autoencoders
# ## "Zwei identische Fremde"
# + [markdown] hideCode=false hidePrompt=false slideshow={"slide_type": "slide"}
# # PCA vs. Autoencoders
#
# * Ein __Autoencoder__ mit einer einzelnen __voll verbundenen (fully-connected) versteckten Ebene__, einer __linearen Aktivierungsfunktion__ und dem __quadratischen Fehler als Kostenfunktion__ ist __eng mit der PCA verwandt__ - seine __Gewichten__ spannen den __Untervektorraum der Hauptkomponenten__ auf [Plaut, 2018]
#
#
# * Bei __Autoencodern__ sorgt die __diagonale Approximation beim Kodiervorgang__ zusammen mit der __inhÀrenten StochastizitÀt__ fÌr lokale __OrthogonalitÀt beim Dekodieren__ [Rolinek et al, 2019]
#
# + [markdown] hideCode=false hidePrompt=false slideshow={"slide_type": "slide"}
# # Lieraturverzeichnis
#
# [Goodfellow et al., 2016] <NAME>, <NAME> and <NAME>, Deep Learning, MIT Press, 2016.
#
# [Friedman et al., 2017] <NAME>, <NAME>, and <NAME>, The Elements of Statistical Learning: Data Mining, Inference, and Prediction, Springer, 2017.
#
# [Plaut, 2018] <NAME>., 2018. From principal subspaces to principal components with linear autoencoders. arXiv preprint arXiv:1804.10253.
#
# [<NAME> et al., 2017] <NAME>., <NAME>., <NAME>. and <NAME>., 2017, February. Denoising criterion for variational auto-encoding framework. In Thirty-First AAAI Conference on Artificial Intelligence.
#
# [Rolinek et al, 2019] <NAME>., <NAME>. and <NAME>., 2019. Variational Autoencoders Pursue PCA Directions (by Accident). In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (pp. 12406-12415).
#
# [Lei et al., 2018] <NAME>., <NAME>., <NAME>. and <NAME>., 2018. Geometric understanding of deep learning. arXiv preprint arXiv:1805.10451.
#
# [Kingma et al., 2013] <NAME>. and <NAME>., 2013. Auto-encoding variational bayes. arXiv preprint arXiv:1312.6114.
# + [markdown] hideCode=false hidePrompt=false slideshow={"slide_type": "slide"}
# ### Maximale Varianzkomponenten, Kovarianz und Dekorrelation
#
# * Der erste Ladungs-Vektor ist derjenige Einheitsvektor mit dem das innere Produkt der Beobachtungs-Vektoren die __gröÃte Varianz__ aufweisen:
#
# $$ \max w_1^T Y_0Y_0^Tw_1, w_1^Tw_1 = 1$$
#
# * Die Lösung der vorherigen leichung ist der erste Eigenvektor der __Kovarianz-Matrix__ $Y_0Y_0^T$, welcher zum gröÃten Eigenwert gehört.
#
# * Die Matrix $P$ kann durch __Diagonalisierung der Kovarianz-Matrix__ berechnet werden:
#
# $$ Y_0Y_0^T = P \Lambda P^{-1} = P \Lambda P^T $$
#
# $\Lambda = Y_0Y_0^T $ ist eine Diagonal-Matrix, deren Diagonal-Elemente $\{\lambda_i\}^N_{i=1}$ der GröÃe nach absteigend sortiert sind. $ Y = PX $ liefert die inverse Tranformation. Da die Kovarianz-Matrix von $X$ diagonal ist, ist die PCA eine __dekorrelierende Transformation__.
# + [markdown] hideCode=false hidePrompt=false slideshow={"slide_type": "slide"}
# ### SingulÀrwert-Zerlegung
# ### (Singular Value Decomposition, SVD)
#
# Ein Vektor $v$ der Dimension $N$ ist ein __Eigenvektor__ einer quadratischen N Ã N Matrix $A$, wenn diese die folgende __lineare Gleichung__ erfÃŒllt
#
# $$Av =\lambda v$$
#
# wobei $λ$ ein skalarer Wert ist, welcher als der __zum Eigenvektor v gehörende Eigenwert__ bezeichnet wird.
# + [markdown] hideCode=false hidePrompt=false slideshow={"slide_type": "slide"}
# ### SingulÀrwert-Zerlegung
# ### (Singular Value Decomposition, SVD)
#
# Die Matrix $Y_0 \in R^{nxN}$ kann __faktorisert__ werden als $Y_0 = U \Sigma V^T$, wobei $U \in R^{nxn}$ und $V \in R^{NxN}$ __orthogonale Matrizen__ sind und $\Sigma \in R^{nxN}$ abgesehen von der Diagonalwerten (den sogenannten __SingulÀr-Werten__) nur aus Nullen besteht.
#
# Die SingulÀrwertzerlegung von $Y_0$ ist Àquivalent zur __Eigenwertzerlegung__ von $Y_0T_0^T$.
#
# 
# + [markdown] hideCode=false hidePrompt=false slideshow={"slide_type": "slide"}
# <style>
# td {
# font-size: 15px
# }
# </style>
#
# # Vergleich von PCA und DVAE
#
# ### Lernen der Mannigfaltigkeit
#
# |__PCA__|__DVAE__|
# |:-----|:---|
# | Kodierung/Dekodierung, keine Robustheit gegen Rauschen | nicht-linear, probabilistische Kodierung/Dekodierung mit Robustheit gegen Rauschen und nicht-linearen Aktivierungsfunktionen|
# | unkorrelierte Koordinaten | korrelierte Ausgansdaten an der dÃŒnnsten Netzwerkebene |
# | Koordinaten sind in absteigener Reihenfolge der Varianz geordnet | Koordinaten sind ungeordnet |
# | die Spalten der Transformations-Matrix sind orthonormal | die Spalten der Transformations-Matrix sind nicht notwendigerweise orthonormal |
# | Robustheit gegenÃŒber moderatem Rauschen mit bekannten Verteilungen | Robustheit gegen eine Vielzahl verschiedener Arten und GröÃenordnungen an injeziertem Rauschen (masking noise, Gaussian noise, salt-and-pepper noise), da das Entrauschen entscheidung fÃŒr die Generalisierung ist |
# | einfacher Algorithmus (ohne Regularisierung), geringe Robustheit | die Punkte in niedrig-dimensionalen Mannifaltigkeiten sind robust gegen Rauschen im hoch-dimensionalen Beobachtungs-Raum |
# + [markdown] hideCode=false hidePrompt=false slideshow={"slide_type": "slide"}
# <style>
# td {
# font-size: 15px
# }
# </style>
# # Vergleich zwischen PCA und DVAE
#
# ### Training
#
# |__PCA__|__DVAE__|
# |:-----|:---|
# | Abbildung der Eingangsdaten auf einen festen Vektor | Abbildung der Eingangsdaten auf eine Wahrscheinlichkeitsverteilung |
# | iterative Methoden: QR Zerlegung, Jacobi Algorithmus, SingulÀrwertzerlegung | FehlerrÌckfÌhrung (Backpropagation) |
# | aufgrund der Kovarianz-Berechnung ineffizient bei groÃen Datenmengen | effizient bei groÃen Datenmengen aufgrund der starken FÀhigkeit des Erlernens der Mannigfaltigkeit |
# | basiert auf der Korrelations-/Kovarianz-Matrix, welche - zumindest in der Theorie - sehr empfindlich gegenÃŒber AusreiÃern sein kann | kann Beispiele direkt aus dem Eingangsraum generieren und daher die Eigenschfaten des Eingangsrauschens beschreiben ("reparametrization trick") |
| PCAvsDVAEde.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/sorayabailey23/Group-93/blob/main/Assessor_Data_Cleaning_2016.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="NCkY_HRffOx3"
# # Assessor Data Cleaning - 2015
#
# ---
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="-VAEn9kMc05w" outputId="94673c50-197e-4f63-df1a-62cbe34a19eb"
#importing libraries + drive
from google.colab import drive
drive.mount('/content/gdrive')
# + id="ljH4c6VVc-Yc"
import numpy as np
import pandas as pd
import math
# + [markdown] id="JuXhCc6DnxXp"
# ##Uploading + Exploring the Data
#
# I'll first upload the raw data and see what features it includes.
#
# + colab={"base_uri": "https://localhost:8080/"} id="lxrs7T_IddbX" outputId="98f5adcd-8059-4043-d13f-229b7ae8c2eb"
#Importing data from 2014 in Los Angeles
records_df = pd.read_csv("gdrive/My Drive/Team 93 Capstone Project/data/Assessor_Parcels_Data_2016.csv")
# + colab={"base_uri": "https://localhost:8080/", "height": 496} id="vMCCCrxGflQ0" outputId="5797b4ba-88de-42a7-e4b5-7fb4560fa4cf"
records_df.head()
# + colab={"base_uri": "https://localhost:8080/"} id="gQbh0Kgye-UL" outputId="6d521782-a6f0-4ac1-afda-8d7d344a2799"
records_df.columns
# + colab={"base_uri": "https://localhost:8080/"} id="lfqk_ipUfD13" outputId="5eb687b9-bd07-4cd9-dc27-35ffaa828230"
#verifying that we pulled data for the correct city + year
records_df["taxratearea_city"].unique()
# + colab={"base_uri": "https://localhost:8080/"} id="GYuUpNCe4QkN" outputId="005ba10e-9584-490a-f6e2-ec04e901db42"
records_df["rollyear"].unique()
# + colab={"base_uri": "https://localhost:8080/"} id="M2J81KULgBsX" outputId="ed90c536-266a-4d01-ab8b-4385079fe408"
records_df.info()
# + [markdown] id="YnAWXm-moobd"
# ## Relevance
#
# The dataset includes a lot of information that I don't need. For this section, I'll
#
#
# 1. Drop the columns that I don't need
# 2. Check the remaining columns for unique values
# 1. Handle missing values, including dropping data that does not include zip codes
#
#
#
#
#
# + [markdown] id="7gfuNxT-vPjj"
# ###Determining which columns to drop
# + id="eADWVKnugHQ_"
#dropping irrelevant columns
records_df = records_df.drop(columns=['Unnamed: 0', 'situszip','ain',
'taxratearea', 'usetype', 'usecode','usecodedescchar3', 'usecodedescchar4',
'totbuildingdatalines', 'yearbuilt', 'effectiveyearbuilt', 'sqftmain',
'bedrooms', 'bathrooms', 'units', 'recordingdate', 'roll_landvalue',
'roll_landbaseyear', 'roll_impvalue', 'roll_impbaseyear',
'roll_totlandimp', 'roll_homeownersexemp', 'roll_realestateexemp',
'roll_fixturevalue', 'roll_fixtureexemp', 'roll_perspropvalue',
'roll_perspropexemp', 'istaxableparcel', 'roll_totalvalue',
'roll_totalexemption',
'adminregion', 'cluster', 'parcelboundarydescription', 'situshouseno',
'situsfraction', 'situsdirection', 'situsstreet', 'situsunit',
'situscity', 'rowid', 'center_lat', 'center_lon', 'location_1','propertylocation', "parcelclassification"])
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="eO3Qf49aiSfa" outputId="286f9712-c428-4d27-ab4c-1a38e57d787b"
records_df.head()
# + id="dTZFuqdtjZhG"
#renaming columns to more recongizable names
records_df.columns=["city", "year", "assessor_id","general_use", "specific_use","net_value", "zip_code"]
# + [markdown] id="7CDOlnKKy3QF"
# ###Handling Missing Values in ["assessor_id"] (none!)
# + colab={"base_uri": "https://localhost:8080/"} id="hd6pxjgay4UW" outputId="05bd7464-3183-41c9-e1a9-1dc28f97bff8"
len(records_df['assessor_id'].unique())
# + colab={"base_uri": "https://localhost:8080/"} id="y7h03GazzKd_" outputId="c69c0700-4bcb-4032-a618-2487b8f121a6"
records_df['assessor_id'].isnull().sum()
# + id="2cFxx0KyzW7m"
#this series is good to go
# + [markdown] id="ReWrRY0oporI"
# ### Handling Missing Values in ["general_use"]
# + colab={"base_uri": "https://localhost:8080/"} id="l4e1TOk4jt9R" outputId="05a70749-3c14-44bc-de25-09f5e4d11244"
#we have entries w/ nan and (unavailable) string
records_df["general_use"].unique()
# + colab={"base_uri": "https://localhost:8080/"} id="WDSETMuEmk0f" outputId="0399caae-adea-4c57-a790-a23241c2a504"
#checking how many nans in general_use
records_df['general_use'].isnull().sum()
# + colab={"base_uri": "https://localhost:8080/"} id="piYOjeBwnJDo" outputId="14dd7f91-185b-4627-8ab2-fdf9621a32c7"
#replacing nan with (unavailable)
records_df["general_use"] = records_df["general_use"].fillna('(unavailable)')
records_df["general_use"].unique()
# + [markdown] id="0GufVSxFqCKE"
# ###Handling Missing Values in ["specific_use"]
# + colab={"base_uri": "https://localhost:8080/"} id="HAHuT5nbluSs" outputId="7f1e755b-1eed-4bad-95ee-cc10deb4005f"
#there is an empty string in this column that we should replace with nan or Not Available
records_df["specific_use"].unique()
# + id="jq8kMCG_rEXt"
records_df["specific_use"]= records_df["specific_use"].fillna('(unavailable)')
# + colab={"base_uri": "https://localhost:8080/"} id="uh9gY_ejrrnt" outputId="025ff495-694a-4921-f85f-8233291ab507"
records_df["specific_use"].unique()
# + [markdown] id="Z7qACCGMsjCh"
# ###Handling Missing Values in ["net_value"]
# + colab={"base_uri": "https://localhost:8080/"} id="HrviLMgEsqx0" outputId="58e795d1-013d-4df9-d3de-97ba17937be3"
#this is good to go
records_df['net_value'].isnull().sum()
# + [markdown] id="-e6SP1P5tWl2"
# ###Handling Missing Values in ["zip_code"]
# + colab={"base_uri": "https://localhost:8080/"} id="c25sl2oHtsKp" outputId="3ed654b5-aa96-49e2-c9a8-a4084a700f38"
#good to go
records_df['zip_code'].unique()
# + [markdown] id="ts4jqOcDvaDz"
#
# + [markdown] id="lY2D0rTM1LBf"
# ###Dropping Data w/o Zip Codes
# + id="iGDU6SRZ1V2_"
records_df = records_df[records_df['zip_code'].notna()]
# + id="txKUvLdi38Lc"
records_df = records_df.reset_index()
records_df = records_df.drop(columns=['index'])
# + [markdown] id="6JgE_Lf0vcMY"
# ##Consistency
# Now that I've handles missing values, I will check consistency over:
#
#
# 1. Data type
# 2. Categorical
# 2. Referential integrity
#
#
#
# + [markdown] id="QXWw3cnZzgKu"
# ###Data type
# + colab={"base_uri": "https://localhost:8080/"} id="c1uhByKgzy8L" outputId="1b603790-b116-4335-c34c-b09c36442ae9"
records_df.info()
# + id="-2O6lEu0z68z"
#changing to category type
records_df["assessor_id"] = records_df["assessor_id"].astype("category")
records_df["general_use"] = records_df["general_use"].astype("category")
records_df["specific_use"] = records_df["specific_use"].astype("category")
#since all NaN have been removed, change type of zip code column to int. this may change later
records_df["zip_code"] = records_df["zip_code"].astype("int")
#change year to datetime format
records_df["year"] = pd.to_datetime(records_df['year'], format='%Y')
# + id="Lqz_AATz7DwX"
records_df["year"] = records_df["year"].dt.year
# + colab={"base_uri": "https://localhost:8080/", "height": 419} id="_xgmtqrP9K7G" outputId="0b05bb8b-07c5-4686-9e74-e9cb4f5339f6"
records_df
# + colab={"base_uri": "https://localhost:8080/"} id="Utix0Z9B81v6" outputId="76bd3d00-c193-4769-aa0f-d83881396eb4"
records_df.info()
# + [markdown] id="bzxGEx1u9j9Q"
# ###Categorical Consistency
# + colab={"base_uri": "https://localhost:8080/"} id="x7CqQViw9m2f" outputId="f1b68b25-401e-48e6-ad15-b18744ee7110"
#consistent
records_df["general_use"].unique()
# + colab={"base_uri": "https://localhost:8080/"} id="bMibny9_9_5R" outputId="d298b4e0-62f0-4bec-85d4-7a38a068c95c"
#consistent
records_df["specific_use"]
# + [markdown] id="zXeLAAXO-jbL"
# ### Referential integrity
# + id="9lA8x_6G-qgS"
#making the assessor_id a unique ID per parcel per year
records_df['assessor_id'] = records_df["assessor_id"].apply(lambda x: x +"-2016")
# + colab={"base_uri": "https://localhost:8080/", "height": 419} id="n_OkPZtP-xy-" outputId="17cd6852-1e06-49f0-cde1-de60b2f4aa91"
records_df
# + [markdown] id="aYDRoKMS_nb5"
# ##Exporting Data
# + id="Wc8-dw69_qmW"
records_df.to_csv("gdrive/My Drive/Team 93 Capstone Project/data/Assessor_Parcels_Data_2016_CLEANED.csv")
# + id="zHEo40z8_9R0"
| Assessor_Data_Cleaning_2016.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/KartikChugh/MachineLearning/blob/master/svm_classification_moons.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="TBq3yEPxSEWC"
# # Classification Task: Moons
#
# In this project, we implement kernel SVMs capable of classifying non-linear data and evaluate their performance against standard library solutions using Scikit-Learn's [cluster generation](https://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_moons.html) utilities.
#
# + id="UHuNMst1Wqy2"
# Import some common packages
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sn
# Setup matplotlib for graphical display
# %matplotlib inline
mpl.rc('axes', labelsize=14)
mpl.rc('xtick', labelsize=12)
mpl.rc('ytick', labelsize=12)
# To make this notebook's output stable across runs
SEED = 42
np.random.seed(SEED)
# + [markdown] id="xxGTRvPXTeDj"
# ## 1. Dataset
#
# Sklearn provides several utility functions for loading and generating datasets to work with, including `make_moons`.
#
# > `make_circles` and **`make_moons` generate 2d binary classification datasets that are challenging to certain algorithms** (e.g. centroid-based clustering or linear classification), including optional Gaussian noise. They are useful for visualisation. `make_circles` produces Gaussian data with a spherical decision boundary for binary classification, while **`make_moons` produces two interleaving half circles.**
#
# We start by using the function to generate 500 such data points (jittered with a bit of random noise), and then divide them into train and test sets.
# + id="GofgUJhsDuAO"
from sklearn.datasets import make_moons
from sklearn.model_selection import train_test_split
X, y = make_moons(n_samples=500, noise=0.15, random_state=SEED)
y = y*2-1.0 # convert the labels from {0,1} to {-1, +1}
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=SEED)
# + [markdown] id="v4N1ZXQSFaWK"
# The `plot_svm()` function below visualizes the decision boundary, margin, and support vectors in a 2D feature space for the classifiers we will train.
#
# It assumes that a provided classifier has a `support_vectors_` attribute, used to render all support vectors in red.
# + id="0Wht-_GHTXrn"
def plot_svm (clf, X, y, axes=[-2, 3, -2, 2]):
"""
Generate a simple plot of SVM including the decision boundary, margin, and its training data
Parameters
----------
clf: your classifier handle
X: feature matrix shape(m_samples, n_features)
y: label vector shape(m_samples, )
axes: (optional) the axes of the plot in format [xmin, xmax, ymin, ymax]
"""
# Create a mesh grid based on the provided axes (100 x 100 resolution)
x0s = np.linspace(axes[0], axes[1], 100)
x1s = np.linspace(axes[2], axes[3], 100)
x0, x1 = np.meshgrid(x0s,x1s) # create a mesh grid
X_mesh = np.c_[x0.ravel(), x1.ravel()] # convert all mesh points into 2-D points
y_pred = clf.predict(X_mesh).reshape(x0.shape) # predict then covert back to the 2-D
y_decision = clf.decision_function(X_mesh).reshape(x0.shape)
plt.figsize=(16, 9)
plt.plot(X[:, 0][y==-1], X[:, 1][y==-1], "bo", label="Class -1")
plt.plot(X[:, 0][y==1], X[:, 1][y==1], "go", label="Class +1")
# Plot out the support vectors (in red)
plt.scatter(clf.support_vectors_[:,0], clf.support_vectors_[:,1], s=80, c="r", label="Support Vectors")
# Plot decision boundary and margins
plt.contourf(x0,x1, y_pred, cmap = plt.cm.brg, alpha = 0.1)
plt.contourf(x0,x1, y_decision, cmap = plt.cm.brg, alpha = 0.2)
plt.contour(x0, x1, y_decision, colors='k',
levels=[-1, 0, 1], alpha=0.5,
linestyles=['--', '-', '--'])
plt.legend(loc="lower right")
plt.axis("auto")
plt.grid(True, which='both')
plt.xlabel(r"$x_1$", fontsize=20)
plt.ylabel(r"$x_2$", fontsize=20, rotation=0)
# + [markdown] id="b_aV2q47DOdT"
# - - -
# ## 2. Model Training, Tuning, & Testing
#
# Next, we feed the train data to sklearn's Support Vector Classifier (SVC) module, training three models with different **kernels:** (1) **Linear**, (2) **Polynomial**, and (3) **Gaussian RBF**.
#
# Our hyperparameter selection process consists of a **grid search**, which iterates across combinations of hyperparameters and selects the best for each kernel (based on **5-fold cross validation**).
#
# We then report their respective confusion matrices, classification metrics, and plot visualizations computed with the test data.
#
# + [markdown] id="WQB-0g-7Sg5u"
# The below code sets up and executes a grid search across wide ranges of values.
# + id="Bo_YIX46X1GO"
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV
svc = SVC(random_state=SEED)
hyperparams = [
{
'kernel': ['linear'],
'C': [1e-2, 1e-1, 1e0, 1e1, 1e2, 1e4]
},
{
'kernel': ['poly'],
'C': [1e-2, 1e-1, 1e0, 1e1, 1e2, 1e4],
'degree': [2, 3, 4],
'coef0': [0, 1, 10, 100]
},
{
'kernel': ['rbf'],
'C': [1e-2, 1e-1, 1e0, 1e1, 1e2, 1e4],
'gamma': [1e-4, 1e-2, 1e0, 1e2, 1e4],
}
]
# + id="uOWbs3eHOkB3"
grid_search = GridSearchCV(svc, param_grid=hyperparams, scoring='accuracy', cv=5)
grid_search.fit(X_train, y_train);
# + [markdown] id="kaGS5M3_Svfh"
# We then define utility functions to retrieve the optimal hyperparameters and report the relevant ones for a given kernel, alongside its results on test data.
# + id="9FjPgeZhpNHB"
# Returns the best model for each kernel from grid search results
def get_models_from_grid_search(search_results, kernels):
ranked_indices = np.argsort(search_results['rank_test_score'])
# indices --> hyperparams, ordered from best to worst
ranked_hyperparams = np.array(search_results['params'])[ranked_indices]
# hyperparams --> kernels, ordered from best to worst
ranked_kernels = np.array([hyperparams['kernel'] for hyperparams in ranked_hyperparams])
svm_models = {}
for kernel in kernels:
index_of_best = np.where(ranked_kernels == kernel)[0][0] # unpacks tuple before accessing
hyperparams_best = ranked_hyperparams[index_of_best]
svm_models[kernel] = SVC(**hyperparams_best, random_state=SEED)
return svm_models
# + id="JFzGktS2AFdN"
# Determines whether a hyperparameter is relevant to report for a given kernel
def relevant(kernel, hyperparam):
if hyperparam == 'C':
return True
if kernel == 'poly':
return hyperparam in ['degree', 'coef0']
if kernel == 'rbf':
return hyperparam == 'gamma'
# + id="3zkd752nPelK"
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
def evaluate_models(models_dict, kernels):
for i in range(len(kernels)):
kernel = kernels[i]
model = models_dict[kernel]
model.fit(X_train, y_train)
hyperparams = {k: v for k, v in model.get_params().items() if relevant(kernel, k)}
y_pred = model.predict(X_test)
print('====================================================')
print('Kernel: ', kernel)
print('====================================================')
print('Support vectors: ', len(model.support_vectors_))
print('Hyperparameters: ', hyperparams, '\n')
print('Confusion Matrix: ')
print(confusion_matrix(y_test, y_pred), '\n')
print('Classification Report:')
print(classification_report(y_test, y_pred))
print('Visualization: ')
plt.figure(i)
plot_svm(model, X_test, y_test)
plt.show()
# + [markdown] id="NUbsTDvLTdfw"
# Now we can see the configurations for our fully-tuned classifiers, and assess their impacts on performance. In particular, we're interested in each classifier's:
#
# * **Precision**: the percent of classifications for a label that are correct
# * **Recall**: the percent of instances for a label that are correctly classified
# * **Confusion Matrix**: number of true/false positives/negatives (from which the above metrics are calculated)
# * **Plot**: visualization of the margin, decision boundaries, and support vectors
#
#
#
# + id="DQn_O_5MC54R" outputId="cdf9391c-2329-42c7-ce67-061e997e67cc" colab={"base_uri": "https://localhost:8080/", "height": 1000}
kernels = ['linear', 'poly', 'rbf']
svm_models = get_models_from_grid_search(grid_search.cv_results_, kernels)
evaluate_models(svm_models, kernels)
# + [markdown] id="yN4aOUVoW55K"
# Our soft-margin **linear SVC struggles** (understandably) **to separate the data**, admitting noticeable amounts of violations on both sides of the margin. By contrast, our polynomial and RBF classifiers perform excellently, with just 2 and 1 misclassifications respectively (hence, 98% and 99% accuracies).
#
# We can see how **non-linear kernels allow us to model much more complex relationships** within our data.
# + [markdown] id="ce_XKlbNZvXX"
# ## 3. Implementation: Custom SVCs
#
# Now that we've seen how the standard library SVCs perform on the dataset, we attempt to implement our own version from scratch. Adopting template code from [<NAME> article](http://web.archive.org/web/20160617002127/http://mblondel.org/journal/2010/09/19/support-vector-machines-in-python/), we work on 6 subtasks, identified in comments below.
#
# + [markdown] id="CrJ7lo9XZlPe"
# The first step is to implement three kernel functionsâwhich we use to transform data into non-linear feature spacesâtaking care to implement the appropriate hyperparameters.
# + id="5ty2ssgfcw1E"
# Linear Kernel
def linear_kernel(u, v):
return np.dot(u, v)
# SUBTASK 1: Polynomial Kernel (of degree up to and including p)
def polynomial_kernel(u, v, p=3, r=1):
return (np.dot(u, v)+r)**p
# SUBTASK 2: Gaussian RBF Kernel
def rbf_kernel(u, v, gamma=0.1):
return np.exp(-gamma * np.linalg.norm(u-v)**2)
# + id="7ocjh6xBSEMG"
import cvxopt # The optimization package for Quadratic Programming
import cvxopt.solvers
from sklearn.base import BaseEstimator, ClassifierMixin # Sklearn compatability
class MySVM(BaseEstimator, ClassifierMixin):
"""The Implementation of the SVM class"""
# Note that a C of none is not recommended
def __init__(self, kernel=linear_kernel, C=None):
self.kernel = kernel
self.C = C if C is None else float(C)
def fit(self, X, y=None):
"""
Train SVM based on the training set
Parameters
----------
X: feature matrix shape(m_samples, n_features)
y: label vector shape(m_samples, )
"""
m_samples, n_features = X.shape
# To speed up repeated applications of the kernel function, dynamic programming is used
# All pairs of points have the kernel function computed over them and the result stored in K
# K is indexed by indices in X, so K[i,j] = kernel_function(X[i], X[j])
K = np.zeros((m_samples, m_samples))
for i in range(m_samples):
for j in range(m_samples):
K[i,j] = self.kernel(X[i], X[j])
self.K = K
# This part requires some understanding of Quadratic Programming (QP)
# Below is the user's guide for the QP from CVXOPT
# http://cvxopt.org/userguide/coneprog.html#quadratic-programming
# DO NOT edit any of this code until Subtask 3
P = cvxopt.matrix(np.outer(y,y) * K)
q = cvxopt.matrix(np.ones(m_samples) * -1)
A = cvxopt.matrix(y, (1,m_samples))
b = cvxopt.matrix(0.0)
if self.C is None:
G = cvxopt.matrix(np.diag(np.ones(m_samples) * -1))
h = cvxopt.matrix(np.zeros(m_samples))
else:
tmp1 = np.diag(np.ones(m_samples) * -1)
tmp2 = np.identity(m_samples)
G = cvxopt.matrix(np.vstack((tmp1, tmp2)))
tmp1 = np.zeros(m_samples)
tmp2 = np.ones(m_samples) * self.C
h = cvxopt.matrix(np.hstack((tmp1, tmp2)))
# solve QP problem
solution = cvxopt.solvers.qp(P, q, G, h, A, b, options={'show_progress': False});
# Lagrange multipliers for each point in X
a = np.ravel(solution['x'])
# SUBTASK 3: Find support vectors (non-zero lagrange multipliers)
sv = a > 1e-3
X_sv = X[sv]
self.support_vectors_ = X_sv
self.y_sv = y[sv]
self.a_sv = a[sv]
if self.kernel == linear_kernel:
# SUBTASK 4: Compute weight vector w
self.w = np.sum((self.a_sv * self.y_sv)[:, np.newaxis] * X_sv, axis=0)
else:
# No need to compute for w in non-linear case, as we will use the kernel trick
self.w = None
# SUBTASK 5: Compute intercept b
k = np.where(sv)[0][0] # unpack tuple before access
self.b = y[k] - np.sum(self.a_sv * self.y_sv * K[k][sv])
def decision_function(self, X):
"""The decision function is essentially w^T . x + b"""
if self.w is not None:
return np.dot(X, self.w) + self.b
else:
# SUBTASK 6: Implement kernel trick for non-linear label prediction
# Iterative approach:
# y_predict = np.zeros(len(X))
# for i in range(len(X)):
# for a, y, x in zip(self.a_sv, self.y_sv, self.support_vectors_):
# y_predict[i] += a * y * self.kernel(x, X[i])
# return y_predict + self.b
# Vectorized approach:
# Combine each test example with support vector / related data values
examples_sv_data = np.array([(x_test, self.a_sv, self.y_sv, self.support_vectors_) for x_test in X])
# For every example, perform kernel trick on it and each support vector, then multiply each a and y value
products = np.array([ row[1][i] * row[2][i] * self.kernel(row[3][i], row[0]) for row in examples_sv_data for i in range(len(self.support_vectors_)) ])
# Sum all the products generated for each example
sums = np.add.reduceat(products, np.arange(0, len(products), len(self.support_vectors_)))
# Add bias to prediction and return!
return sums + self.b
def predict(self, X):
"""Predicts -1,+1 based on the sign of the decision function"""
return np.sign(self.decision_function(X))
# + [markdown] id="TuI0EfvTJTIH"
# ---
# ## Results & Comparison
#
# Now that we've implemented our own SVC class with some kernel functions to go with it, we are ready to use it!
#
# As before, we fit the models to the train data and evaluate them on the test data; but this time, we can use the findings of our grid search earlier to preset our hyperparameters â no tuning process required!
# + [markdown] id="LfQGQZvzuAOe"
# To recap, the best hyperparameters found for the standard library SVMs were:
#
# - **Linear:** C=10000
# - **Polynomial:** C=1, r=1, degree=4
# - **RBF:** C=100, gamma=1
#
# Below we instantiate custom SVMs with the same configuration.
#
# + id="d_4ju2X6IUDt"
from functools import partial
mysvm_linear = MySVM(kernel=linear_kernel, C=10000)
mysvm_poly = MySVM(kernel=partial(polynomial_kernel, p=4, r=1), C=1)
mysvm_rbf = MySVM(kernel=partial(rbf_kernel, gamma=1), C=100)
mysvm_models = {'linear': mysvm_linear, 'poly': mysvm_poly, 'rbf': mysvm_rbf}
# + [markdown] id="avk_bA1KKrpk"
# And assess their performance using the same metrics and visualization as earlier.
# + id="cQ67ou7TEpVZ" outputId="7628f34f-551a-47b7-92c3-441e2826ff42" colab={"base_uri": "https://localhost:8080/", "height": 1000}
evaluate_models(mysvm_models, kernels)
# + [markdown] id="Qp4XFS14Llcn"
# Amazing! We see that **our hand-made classifiers achieve results comparable to standard library implementations**. Specifically,
#
# * **Linear**: The same number of misclassifications occur (14), hence the same accuracy. The actual margins are shifted a bit, changing which labels were misclassified. Also of note is the increased number of support vectors, from 116 to 130.
#
# * **Polynomial**: Very similar plots, with noticeable differences in the margin further out from the clusters. We pick up one additional correct classification in our custom implementation (98% â 99% accuracy)
#
# * **RBF**: We get identical margins, boundaries, and support vectors, and thus the same results as well. The two plots are carbon copies. An interesting sight!
#
#
# + [markdown] id="KU1_Bd4whqam"
# ## Conclusion
#
# While more challenging than implementing gradient descent or the closed-form normal equation for linear regression, **building state-of-the-art kernel SVMs from the ground up is a very doable task**, and one that benefits from NumPy's vectorized operations.
#
# Further work might explore the basis for the difference in support vectors, and how this affects weights and biases for different kernels.
| svm_classification_moons.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Recherche d'un seuil pour le score sparse
#
# ## Objectif
#
# On cherche à déterminer un seuil optimal pour lequel on décide d'afficher la réponse ou pas. <br>
# *exemple: si seuil < 0.3, ne pas afficher la réponse*
# - D'abord on fait une *analyse statistique* rapide pour voir quelle est la distribution du score pour l'erreur et pour le succes du sparse
# - On tente ensuite une approche Machine Learning pour *classifier* 0: erreur (= on affiche pas la réponse) et 1: success (=on affiche la réponse)
# - On ouvre sur des potentielles pistes un peu plus 'probabilistes' et on propose d'aller plus loin dans la recherche d'une solution au problÚme
# <br>
#
# *Attention*, ici on utilise le dataset 'analysis_resultst.csv' (Robin), on s'intéresse exclusivement au **score sparse** et on se restraint au cas **k=5**. <br>
# D'abord le cas **with lemma**.
import json
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')
data = pd.read_csv("analysis_results.csv")
len(data)
data = data[data['position_sparse_lemma']==5.0]
len(data) #626 résultats pour k = 5 (with lemma)
suc = data[data['fiche_ok'] == True]
suc_nan = suc[pd.isnull(suc['score_sparse_lemma'])]['score_sparse_lemma']
print(len(suc)) #111 success pour k = 5 et retriever sparse (with lemma)
suc['label'] = ['succes' for x in range(len(suc))]
err = data[data['fiche_ok'] == False] #512 erreurs pour sparse et k = 5
err = err[~pd.isnull(err['score_sparse_lemma'])]
print(len(err))
err['label'] = ['error' for x in range(len(err))]
data2 = pd.concat([suc,err])
# ## Analyse statistique
# **Boxplot pour erreurs et succes du sparse**
ax = sns.boxplot(x="label", y="score_sparse_lemma", data=data2)
plt.subplot(1, 2, 1)
ax1 = sns.boxplot(x=suc['score_sparse_lemma']) #distribution success
plt.title("Distribution of successes")
plt.subplot(1, 2, 2)
ax2 = sns.boxplot(x=err['score_sparse_lemma']) #distribution erreur
plt.title("Distribution of errors")
# **Statistiques déscriptives**
from statistics import median
median_error = median(err['score_sparse_lemma'])
median_success = median(suc['score_sparse_lemma'])
stata = data2.groupby(['label']).describe()['score_sparse_lemma']
stata['median'] = [median(err['score_sparse_lemma']),median(suc['score_sparse_lemma'])]
stata
# **Correlation**
#encoding labels
from sklearn.preprocessing import LabelEncoder
data2['label_encoded'] = LabelEncoder().fit_transform(data2['label'])
#0: error, #1:success
#correlation
corr = data2.corr()
# **Density**
import scipy.stats as stats
plt.subplot(1, 2, 1)
x =[i for i in range(len(err['score_sparse_lemma']))]
plt.hist(err['score_sparse_lemma'],density=True)
y = [i for i in range(0,80)]
plt.plot(y, stats.norm.pdf(y, 19.614372, 8.302991))
plt.subplot(1, 2, 2)
x =[i for i in range(len(suc['score_sparse_lemma']))]
plt.hist(suc['score_sparse_lemma'],density=True)
y = [i for i in range(0,50)]
plt.plot(y, stats.norm.pdf(y,19.614263, 7.799816))
# ## Approche ML: classification ERROR /SUCCESS
#
# *On souhaiterait obtenir un classifieur qui, en fonction du score, puisse prédire 0: 'le retiever sparse va faire une erreur', 1: 'le retriever sparse va trouver la bonne réponse'. Ceci nous permetterait de décider si afficher la réponse (1) ou pas (0).* <br>
# On fait une regression logistique.
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
X_clf = data2['score_sparse_lemma']
Y_clf = data2['label_encoded']
X_train_clf, X_test_clf, y_train_clf, y_test_clf = train_test_split(X_clf , Y_clf, test_size=0.2, random_state=42)
X_train_clf= np.array(X_train_clf).reshape((-1,1))
X_test_clf= np.array(X_test_clf).reshape((-1,1))
y_train_clf= np.array(y_train_clf).reshape((-1,1))
y_test_clf= np.array(y_test_clf).reshape((-1,1))
clf = LogisticRegression()
clf.fit(X_train_clf, y_train_clf)
y_pred_clf = clf.predict(X_test_clf)
print(y_pred_clf)
# Metrics:
from sklearn import metrics
from sklearn.metrics import roc_auc_score
print(metrics.classification_report(y_test_clf, y_pred_clf))
print("roc_auc_score: ", roc_auc_score(y_test_clf, y_pred_clf))
# ModÚle pas performant, le problÚme pourrait venir du fait que la classe *success* est sous-représentée. <br>
# **Dans la suite on balance les classes**
#on balance les classes:
from sklearn.utils import class_weight
X_clf2 = data2['score_sparse_lemma']
Y_clf2 = data2['label_encoded']
X_train_clf2, X_test_clf2, y_train_clf2, y_test_clf2 = train_test_split(X_clf2 , Y_clf2, test_size=0.2, random_state=42)
X_train_clf2= np.array(X_train_clf2).reshape((-1,1))
X_test_clf2= np.array(X_test_clf2).reshape((-1,1))
y_train_clf2= np.array(y_train_clf2).reshape((-1,1))
y_test_clf2= np.array(y_test_clf2).reshape((-1,1))
clf2 = LogisticRegression(class_weight='balanced')
clf2.fit(X_train_clf2, y_train_clf2)
y_pred_clf2 = clf2.predict(X_test_clf2)
print(y_pred_clf2)
print(metrics.classification_report(y_test_clf2, y_pred_clf2))
print("roc_auc_score: ", roc_auc_score(y_test_clf2, y_pred_clf2))
#on 'force' un certain poids
from sklearn.utils import class_weight
X_clf3 = data2['score_sparse_lemma']
Y_clf3 = data2['label_encoded']
X_train_clf3, X_test_clf3, y_train_clf3, y_test_clf3 = train_test_split(X_clf3 , Y_clf3, test_size=0.2, random_state=42)
X_train_clf3= np.array(X_train_clf3).reshape((-1,1))
X_test_clf3= np.array(X_test_clf3).reshape((-1,1))
y_train_clf3= np.array(y_train_clf3).reshape((-1,1))
y_test_clf3= np.array(y_test_clf3).reshape((-1,1))
class_weights = {0:.17,1:.83}
clf3 = LogisticRegression(class_weight=class_weights)
clf3.fit(X_train_clf3, y_train_clf3)
y_pred_clf3 = clf3.predict(X_test_clf3)
print(y_pred_clf3)
print(metrics.classification_report(y_test_clf3, y_pred_clf3))
print("roc_auc_score: ", roc_auc_score(y_test_clf3, y_pred_clf3))
# ## Approche probabiliste/autres pistes possibles
#
#
# **Idée 1**: traiter les questions comme des variables suivant une loi de Bernouilli(p), c'est à dire:
# - avec proba p, j'aurai 0 (= erreur)
# - avec proba 1-p, j'aurai 1 (= succes)
#
# et faire un test/des test statistiques avec différentes valeurs de $\ p = p_{0}$ pour déterminer le seuil optimal $\ p_{0}$
#
# Avantage:
#
# - solide du point de vue théorique
#
# Gros problÚme:
#
# - est-ce que la proba calculée par le retriever est une 'vraie' proba au sens mathématique du terme? Est ce que le score (ramené sur [0,1]) peut etre considéré / traité comme une proba?
#
# **Idée 2**: puisque errors et successes semblent (un peu prÚs) suivre un modÚle gaussien, tenter un GMM (gaussian mixture model) à traiter avec l'algorithme EM.
#
# ## Conclusions et suite
#
# - une approche de classification ne semble par etre trÚs performante pour traiter le problÚme
# - problÚme principal: les erreurs et les success semblent suivre une distribution quasiment égale (même moyenne, quasiement même maximum et minimum etc.). Il semblerait donc que le score pour chaque question soit 'indépendant' (40% peut etre un bon score pour une question mais pas pour une autre).
#
# A faire:
# - plus creuser les stats (ici on s'est restraint à k = 5, voir si pour k!= 5 le score se comporte de maniÚre différente)
# - (avec Pavel): faire un peu de recherche pour mieux comprendre pourquoi on a cette distribution et éventuellement voir si le problÚme a déjà été resolu
# - peut-être passer à la recherche d'un score plus performant pour le dense ? (peut etre plus facile à traiter). Le sparse donne cependant des meilleurs résultats donc par forçement utile.
#
#
#
| docs/piaf/retriever/score_threshold_sparse_retriever.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#
# <img src="../../img/ods_stickers.jpg">
#
# ## <center> [mlcourse.ai](https://mlcourse.ai) â ПÑкÑÑÑÑй кÑÑÑ OpenDataScience пП ЌаÑÐžÐœÐœÐŸÐŒÑ ÐŸÐ±ÑÑеМОÑ
#
# ÐвÑПÑÑ ÐŒÐ°ÑеÑОала: <NAME> (@aiho в Slack ODS), <NAME> (@yorko в Slack ODS). ÐаÑеÑОал ÑаÑпÑПÑÑÑаМÑеÑÑÑ ÐœÐ° ÑÑлПвОÑÑ
лОÑеМзОО [Creative Commons CC BY-NC-SA 4.0](https://creativecommons.org/licenses/by-nc-sa/4.0/). ÐПжМП ОÑпПлÑзПваÑÑ Ð² лÑбÑÑ
ÑелÑÑ
(ÑеЎакÑОÑПваÑÑ, пПпÑавлÑÑÑ Ðž бÑаÑÑ Ð·Ð° ПÑМПвÑ), кÑПЌе кПЌЌеÑÑеÑкОÑ
, МП Ñ ÐŸÐ±ÑзаÑелÑМÑÐŒ ÑпПЌОМаМОеЌ авÑПÑа ЌаÑеÑОала.
# # <center>ÐПЌаÑМее заЎаМОе â 7 (ЎеЌП)
# ## <center> ÐбÑÑеМОе без ÑÑОÑелÑ: ЌеÑПЎ главМÑÑ
ÐºÐŸÐŒÐ¿ÐŸÐœÐµÐœÑ Ðž клаÑÑеÑОзаÑОÑ
# Ð ÑÑПЌ заЎаМОО ÐŒÑ ÑазбеÑеЌÑÑ Ñ ÑеЌ, как ÑабПÑаÑÑ ÐŒÐµÑÐŸÐŽÑ ÑÐœÐžÐ¶ÐµÐœÐžÑ ÑазЌеÑМПÑÑО О клаÑÑеÑОзаÑОО ЎаММÑÑ
. ÐаПЎМП еÑе Ñаз пПпÑакÑОкÑеЌÑÑ Ð² заЎаÑе клаÑÑОÑОкаÑОО.
#
# ÐÑ Ð±ÑЎеЌ ÑабПÑаÑÑ Ñ ÐœÐ°Ð±ÐŸÑПЌ ЎаММÑÑ
[Samsung Human Activity Recognition](https://archive.ics.uci.edu/ml/datasets/Human+Activity+Recognition+Using+Smartphones). СкаÑайÑе ЎаММÑе [ПÑÑÑЎа](https://drive.google.com/file/d/14RukQ0ylM2GCdViUHBBjZ2imCaYcjlux/view?usp=sharing). ÐаММÑе пПÑÑÑпаÑÑ Ñ Ð°ÐºÑелеÑПЌеÑÑПв О гОÑПÑкПпПв ЌПбОлÑМÑÑ
ÑелеÑПМПв Samsung Galaxy S3 (пПЎÑПбМее пÑП пÑОзМакО â пП ÑÑÑлке Ма UCI вÑÑе), Ñакже ОзвеÑÑеМ вОЎ акÑОвМПÑÑО ÑелПвека Ñ ÑелеÑПМПЌ в каÑЌаМе â Ñ
ПЎОл лО ПМ, ÑÑПÑл, лежал, ÑОЎел ОлО Ñел ввеÑÑ
/вМОз пП леÑÑМОÑе.
#
# ÐМаÑале ÐŒÑ Ð¿ÑеЎÑÑавОЌ, ÑÑП вОЎ акÑОвМПÑÑО МаЌ МеОзвеÑÑМеМ, О пПпÑПбÑеЌ клаÑÑеÑОзПваÑÑ Ð»ÑЎей ÑОÑÑП Ма ПÑМПве ОЌеÑÑОÑ
ÑÑ Ð¿ÑОзМакПв. ÐаÑеЌ ÑеÑОЌ заЎаÑÑ ÐŸÐ¿ÑÐµÐŽÐµÐ»ÐµÐœÐžÑ Ð²ÐžÐŽÐ° ÑОзОÑеÑкПй акÑОвМПÑÑО ОЌеММП как заЎаÑÑ ÐºÐ»Ð°ÑÑОÑОкаÑОО.
#
# ÐапПлМОÑе кПЎ в клеÑкаÑ
(гЎе МапОÑаМП "ÐÐ°Ñ ÐºÐŸÐŽ зЎеÑÑ") О ПÑвеÑÑÑе Ма вПпÑПÑÑ Ð² [веб-ÑПÑЌе](https://docs.google.com/forms/d/1qzcrfsNFy-e4TW59v2fqMj_OTom2SIOxtq4MWlI92p0).
# +
import numpy as np
import pandas as pd
import seaborn as sns
from tqdm import tqdm_notebook
# %matplotlib inline
from matplotlib import pyplot as plt
plt.style.use(['seaborn-darkgrid'])
plt.rcParams['figure.figsize'] = (12, 9)
plt.rcParams['font.family'] = 'DejaVu Sans'
from sklearn import metrics
from sklearn.cluster import AgglomerativeClustering, KMeans, SpectralClustering
from sklearn.decomposition import PCA
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.svm import LinearSVC
RANDOM_STATE = 17
# +
X_train = np.loadtxt("../../data/samsung_HAR/samsung_train.txt")
y_train = np.loadtxt("../../data/samsung_HAR/samsung_train_labels.txt").astype(int)
X_test = np.loadtxt("../../data/samsung_HAR/samsung_test.txt")
y_test = np.loadtxt("../../data/samsung_HAR/samsung_test_labels.txt").astype(int)
# -
# ÐÑПвеÑОЌ ÑазЌеÑМПÑÑО
assert(X_train.shape == (7352, 561) and y_train.shape == (7352,))
assert(X_test.shape == (2947, 561) and y_test.shape == (2947,))
# ÐÐ»Ñ ÐºÐ»Ð°ÑÑеÑОзаÑОО МаЌ Ме МÑжеМ векÑÐŸÑ ÐŸÑвеÑПв, пПÑÑÐŸÐŒÑ Ð±ÑЎеЌ ÑабПÑаÑÑ Ñ ÐŸÐ±ÑеЎОМеМОеЌ ПбÑÑаÑÑей О ÑеÑÑПвПй вÑбПÑПк. ÐбÑеЎОМОÑе *X_train* Ñ *X_test*, а *y_train* â Ñ *y_test*.
# ÐÐ°Ñ ÐºÐŸÐŽ зЎеÑÑ
X =
y =
# ÐпÑеЎелОЌ ÑОÑлП ÑМОкалÑМÑÑ
зМаÑеМОй ЌеÑПк ÑелевПгП клаÑÑа.
np.unique(y)
n_classes = np.unique(y).size
# [ÐÑО ЌеÑкО ÑППÑвеÑÑÑвÑÑÑ:](https://archive.ics.uci.edu/ml/machine-learning-databases/00240/UCI%20HAR%20Dataset.names)
# - 1 - Ñ
ПЎÑбе
# - 2 - пПЎÑÐµÐŒÑ Ð²Ð²ÐµÑÑ
пП леÑÑМОÑе
# - 3 - ÑпÑÑÐºÑ Ð¿ÐŸ леÑÑМОÑе
# - 4 - ÑОЎеМОÑ
# - 5 - ÑÑПÑМОÑ
# - 6 - лежаМОÑ
#
# *Ñж пÑПÑÑОÑе, еÑлО звÑÑаМОе ÑÑОÑ
ÑÑÑеÑÑвОÑелÑМÑÑ
кажеÑÑÑ ÐºÐŸÑÑвÑÐŒ :)*
# ÐÑЌаÑÑÑабОÑÑйÑе вÑбПÑÐºÑ Ñ Ð¿ÐŸÐŒÐŸÑÑÑ `StandardScaler` Ñ Ð¿Ð°ÑаЌеÑÑаЌО пП ÑЌПлÑаМОÑ.
# ÐÐ°Ñ ÐºÐŸÐŽ зЎеÑÑ
scaler =
X_scaled =
# ÐПМОжаеЌ ÑазЌеÑМПÑÑÑ Ñ Ð¿ÐŸÐŒÐŸÑÑÑ PCA, ПÑÑавлÑÑ ÑÑПлÑкП кПЌпПМеМÑ, ÑкПлÑкП МÑжМП ÐŽÐ»Ñ ÑПгП, ÑÑÐŸÐ±Ñ ÐŸÐ±ÑÑÑМОÑÑ ÐºÐ°Ðº ЌОМОЌÑÐŒ 90% ЎОÑпеÑÑОО ОÑÑ
ПЎМÑÑ
(ПÑЌаÑÑÑабОÑПваММÑÑ
) ЎаММÑÑ
. ÐÑпПлÑзÑйÑе ПÑЌаÑÑÑабОÑПваММÑÑ Ð²ÑбПÑÐºÑ Ðž заÑОкÑОÑÑйÑе random_state (кПМÑÑаМÑа RANDOM_STATE).
# ÐÐ°Ñ ÐºÐŸÐŽ зЎеÑÑ
pca =
X_pca =
# **ÐПпÑÐŸÑ 1:**<br>
# ÐакПе ЌОМОЌалÑМПе ÑОÑлП главМÑÑ
ÐºÐŸÐŒÐ¿ÐŸÐœÐµÐœÑ ÐœÑжМП вÑЎелОÑÑ, ÑÑÐŸÐ±Ñ ÐŸÐ±ÑÑÑМОÑÑ 90% ЎОÑпеÑÑОО ОÑÑ
ПЎМÑÑ
(ПÑЌаÑÑÑабОÑПваММÑÑ
) ЎаММÑÑ
?
# +
# ÐÐ°Ñ ÐºÐŸÐŽ зЎеÑÑ
# -
# **ÐаÑОаМÑÑ:**
# - 56
# - 65
# - 66
# - 193
# **ÐПпÑÐŸÑ 2:**<br>
# СкПлÑкП пÑПÑеМÑПв ЎОÑпеÑÑОО пÑОÑ
ПЎОÑÑÑ ÐœÐ° пеÑвÑÑ Ð³Ð»Ð°Ð²ÐœÑÑ ÐºÐŸÐŒÐ¿ÐŸÐœÐµÐœÑÑ? ÐкÑÑглОÑе ЎП ÑелÑÑ
пÑПÑеМÑПв.
#
# **ÐаÑОаМÑÑ:**
# - 45
# - 51
# - 56
# - 61
# +
# ÐÐ°Ñ ÐºÐŸÐŽ зЎеÑÑ
# -
# ÐОзÑалОзОÑÑйÑе ЎаММÑе в пÑПекÑОО Ма пеÑвÑе Ўве главМÑе кПЌпПМеМÑÑ.
# ÐÐ°Ñ ÐºÐŸÐŽ зЎеÑÑ
plt.scatter(, , c=y, s=20, cmap='viridis');
# **ÐПпÑÐŸÑ 3:**<br>
# ÐÑлО вÑе пПлÑÑОлПÑÑ Ð¿ÑавОлÑМП, ÐÑ ÑвОЎОÑе ÑкПлÑкП-ÑП клаÑÑеÑПв, пПÑÑО ОЎеалÑМП ПÑЎелеММÑÑ
ÐŽÑÑг ÐŸÑ ÐŽÑÑга. ÐакОе Ð²ÐžÐŽÑ Ð°ÐºÑОвМПÑÑО вÑ
ПЎÑÑ Ð² ÑÑО клаÑÑеÑÑ?<br>
#
# **ÐÑвеÑ:**
# - 1 клаÑÑеÑ: вÑе 6 акÑОвМПÑÑей
# - 2 клаÑÑеÑа: (Ñ
ПЎÑба, пПЎÑеЌ ввеÑÑ
пП леÑÑМОÑе, ÑпÑÑк пП леÑÑМОÑе) О (ÑОЎеМОе, ÑÑПÑМОе, лежаМОе)
# - 3 клаÑÑеÑа: (Ñ
ПЎÑба), (пПЎÑеЌ ввеÑÑ
пП леÑÑМОÑе, ÑпÑÑк пП леÑÑМОÑе) О (ÑОЎеМОе, ÑÑПÑМОе, лежаМОе)
# - 6 клаÑÑеÑПв
# ------------------------------
# СЎелайÑе клаÑÑеÑОзаÑÐžÑ ÐŽÐ°ÐœÐœÑÑ
ЌеÑПЎПЌ `KMeans`, ПбÑÑОв ÐŒÐŸÐŽÐµÐ»Ñ ÐœÐ° ЎаММÑÑ
ÑП ÑМОжеММПй за ÑÑÐµÑ PCA ÑазЌеÑМПÑÑÑÑ. РЎаММПЌ ÑлÑÑае ÐŒÑ Ð¿ÐŸÐŽÑкажеЌ, ÑÑП МÑжМП ОÑкаÑÑ ÐžÐŒÐµÐœÐœÐŸ 6 клаÑÑеÑПв, МП в ПбÑеЌ ÑлÑÑае ÐŒÑ ÐœÐµ бÑЎеЌ зМаÑÑ, ÑкПлÑкП клаÑÑеÑПв МаЎП ОÑкаÑÑ.
#
# ÐаÑаЌеÑÑÑ:
#
# - **n_clusters** = n_classes (ÑОÑлП ÑМОкалÑМÑÑ
ЌеÑПк ÑелевПгП клаÑÑа)
# - **n_init** = 100
# - **random_state** = RANDOM_STATE (ÐŽÐ»Ñ Ð²ÐŸÑпÑПОзвПЎОЌПÑÑО ÑезÑлÑÑаÑа)
#
# ÐÑÑалÑМÑе паÑаЌеÑÑÑ ÑП зМаÑеМОÑЌО пП ÑЌПлÑаМОÑ.
# +
# ÐÐ°Ñ ÐºÐŸÐŽ зЎеÑÑ
# -
# ÐОзÑалОзОÑÑйÑе ЎаММÑе в пÑПекÑОО Ма пеÑвÑе Ўве главМÑе кПЌпПМеМÑÑ. РаÑкÑаÑÑÑе ÑПÑкО в ÑППÑвеÑÑÑвОО Ñ Ð¿ÐŸÐ»ÑÑеММÑЌО ЌеÑкаЌО клаÑÑеÑПв.
# ÐÐ°Ñ ÐºÐŸÐŽ зЎеÑÑ
plt.scatter(, , c=cluster_labels, s=20, cmap='viridis');
# ÐПÑЌПÑÑОÑе Ма ÑППÑвеÑÑÑвОе ÐŒÐµÐ¶ÐŽÑ ÐŒÐµÑкаЌО клаÑÑеÑПв О ОÑÑ
ПЎМÑЌО ЌеÑкаЌО клаÑÑПв О Ма ÑП, какОе Ð²ÐžÐŽÑ Ð°ÐºÑОвМПÑÑей алгПÑОÑÐŒ `KMeans` пÑÑаеÑ.
tab = pd.crosstab(y, cluster_labels, margins=True)
tab.index = ['Ñ
ПЎÑба', 'пПЎÑеЌ ввеÑÑ
пП леÑÑМОÑе',
'ÑпÑÑк пП леÑÑМОÑе', 'ÑОЎеМОе', 'ÑÑПÑМОе', 'лежаМОе', 'вÑе']
tab.columns = ['cluster' + str(i + 1) for i in range(6)] + ['вÑе']
tab
# ÐОЎОЌ, ÑÑП ÐºÐ°Ð¶ÐŽÐŸÐŒÑ ÐºÐ»Ð°ÑÑÑ (Ñ.е. кажЎПй акÑОвМПÑÑО) ÑППÑвеÑÑÑвÑÑÑ ÐœÐµÑкПлÑкП клаÑÑеÑПв. ÐавайÑе пПÑЌПÑÑОЌ Ма ЌакÑОЌалÑМÑÑ ÐŽÐŸÐ»Ñ ÐŸÐ±ÑекÑПв в клаÑÑе, ПÑМеÑеММÑÑ
к какПЌÑ-ÑП ÐŸÐŽÐœÐŸÐŒÑ ÐºÐ»Ð°ÑÑеÑÑ. ÐÑП бÑÐŽÐµÑ Ð¿ÑПÑÑПй ЌеÑÑОкПй, Ñ
аÑакÑеÑОзÑÑÑей, МаÑкПлÑкП легкП клаÑÑ ÐŸÑЎелÑеÑÑÑ ÐŸÑ ÐŽÑÑгОÑ
пÑО клаÑÑеÑОзаÑОО.
#
# ÐÑОЌеÑ: еÑлО ÐŽÐ»Ñ ÐºÐ»Ð°ÑÑа "ÑпÑÑк пП леÑÑМОÑе", в кПÑПÑПЌ 1406 ПбÑекÑПв, ÑаÑпÑеЎелеМОе клаÑÑеÑПв ÑакПе:
# - клаÑÑÐµÑ 1 â 900
# - клаÑÑÐµÑ 3 â 500
# - клаÑÑÐµÑ 6 â 6,
#
# ÑП ÑÐ°ÐºÐ°Ñ ÐŽÐŸÐ»Ñ Ð±ÑÐŽÐµÑ 900 / 1406 $\approx$ 0.64.
#
#
# **ÐПпÑÐŸÑ 4:**<br>
# ÐакПй вОЎ акÑОвМПÑÑО ПÑЎелОлÑÑ ÐŸÑ ÐŸÑÑалÑМÑÑ
лÑÑÑе вÑегП в ÑеÑЌОМаÑ
пÑПÑÑПй ЌеÑÑОкО, ПпОÑаММПй вÑÑе?<br>
#
# **ÐÑвеÑ:**
# - Ñ
ПЎÑба
# - ÑÑПÑМОе
# - ÑпÑÑк пП леÑÑМОÑе
# - пеÑеÑОÑлеММÑе ваÑОаМÑÑ ÐœÐµ пПЎÑ
ПЎÑÑ
# ÐОЎМП, ÑÑП kMeans Ме ПÑÐµÐœÑ Ñ
ПÑПÑП ПÑлОÑÐ°ÐµÑ ÑПлÑкП акÑОвМПÑÑО ÐŽÑÑг ÐŸÑ ÐŽÑÑга. ÐÑпПлÑзÑйÑе ЌеÑПЎ лПкÑÑ, ÑÑÐŸÐ±Ñ Ð²ÑбÑаÑÑ ÐŸÐ¿ÑОЌалÑМПе кПлОÑеÑÑвП клаÑÑеÑПв. ÐаÑаЌеÑÑÑ Ð°Ð»Ð³ÐŸÑОÑЌа О ЎаММÑе ОÑпПлÑзÑеЌ Ñе же, ÑÑП ÑаМÑÑе, ЌеМÑеЌ ÑПлÑкП `n_clusters`.
# ÐÐ°Ñ ÐºÐŸÐŽ зЎеÑÑ
inertia = []
for k in tqdm_notebook(range(1, n_classes + 1)):
#
#
# **ÐПпÑÐŸÑ 5:**<br>
# ÐакПе кПлОÑеÑÑвП клаÑÑеÑПв ПпÑОЌалÑМП вÑбÑаÑÑ, ÑПглаÑМП ЌеÑÐŸÐŽÑ Ð»ÐŸÐºÑÑ?<br>
#
# **ÐÑвеÑ:**
# - 1
# - 2
# - 3
# - 4
# ------------------------
# ÐПпÑПбÑеЌ еÑе ПЎОМ ЌеÑПЎ клаÑÑеÑОзаÑОО, кПÑПÑÑй ПпОÑÑвалÑÑ Ð² ÑÑаÑÑе â аглПЌеÑаÑОвМÑÑ ÐºÐ»Ð°ÑÑеÑОзаÑОÑ.
ag = AgglomerativeClustering(n_clusters=n_classes,
linkage='ward').fit(X_pca)
# ÐПÑÑОÑайÑе Adjusted Rand Index (`sklearn.metrics`) ÐŽÐ»Ñ Ð¿ÐŸÐ»ÑÑОвÑегПÑÑ ÑÐ°Ð·Ð±ÐžÐµÐœÐžÑ ÐœÐ° клаÑÑеÑÑ Ðž ÐŽÐ»Ñ `KMeans` Ñ Ð¿Ð°ÑаЌеÑÑаЌО Оз Ð·Ð°ÐŽÐ°ÐœÐžÑ Ðº 4 вПпÑПÑÑ.
# +
# ÐÐ°Ñ ÐºÐŸÐŽ зЎеÑÑ
# -
# **ÐПпÑÐŸÑ 6:**<br>
# ÐÑЌеÑÑÑе вÑе веÑМÑе ÑÑвеÑжЎеМОÑ.<br>
#
# **ÐаÑОаМÑÑ:**
# - СПглаÑМП ARI, KMeans ÑпÑавОлÑÑ Ñ ÐºÐ»Ð°ÑÑеÑОзаÑОей Ñ
Ñже, ÑеЌ Agglomerative Clustering
# - ÐÐ»Ñ ARI Ме ÐžÐŒÐµÐµÑ Ð·ÐœÐ°ÑÐµÐœÐžÑ ÐºÐ°ÐºÐžÐµ ОЌеММП ЌеÑкО пÑОÑÐ²ÐŸÐµÐœÑ ÐºÐ»Ð°ÑÑеÑаЌ, ÐžÐŒÐµÐµÑ Ð·ÐœÐ°ÑеМОе ÑПлÑкП ÑазбОеМОе ПбÑекÑПв Ма клаÑÑеÑÑ
# - Ð ÑлÑÑае ÑлÑÑайМПгП ÑÐ°Ð·Ð±ÐžÐµÐœÐžÑ ÐœÐ° клаÑÑеÑÑ ARI бÑÐŽÐµÑ Ð±Ð»ÐžÐ·ÐŸÐº к МÑлÑ
# -------------------------------
# ÐПжМП заЌеÑОÑÑ, ÑÑП заЎаÑа Ме ПÑÐµÐœÑ Ñ
ПÑПÑП ÑеÑаеÑÑÑ ÐžÐŒÐµÐœÐœÐŸ как заЎаÑа клаÑÑеÑОзаÑОО, еÑлО вÑЎелÑÑÑ ÐœÐµÑкПлÑкП клаÑÑеÑПв (> 2). ÐавайÑе ÑепеÑÑ ÑеÑОЌ заЎаÑÑ ÐºÐ»Ð°ÑÑОÑОкаÑОО, вÑпПЌМОв, ÑÑП ЎаММÑе Ñ ÐœÐ°Ñ ÑазЌеÑеМÑ.
#
# ÐÐ»Ñ ÐºÐ»Ð°ÑÑОÑОкаÑОО ОÑпПлÑзÑйÑе ЌеÑПЎ ПпПÑМÑÑ
векÑПÑПв â клаÑÑ `sklearn.svm.LinearSVC`. ÐÑ Ð² кÑÑÑе ПÑЎелÑМП Ме ÑаÑÑЌаÑÑОвалО ÑÑÐŸÑ Ð°Ð»Ð³ÐŸÑОÑÐŒ, МП ПМ ПÑÐµÐœÑ ÐžÐ·Ð²ÐµÑÑеМ, пПÑОÑаÑÑ Ð¿ÑП МегП ЌПжМП, МапÑОЌеÑ, в ЌаÑеÑОалаÑ
ÐÐ²Ð³ÐµÐœÐžÑ Ð¡ÐŸÐºÐŸÐ»ÐŸÐ²Ð° â [ÑÑÑ](https://github.com/esokolov/ml-course-msu/blob/master/ML16/lecture-notes/Sem11_linear.pdf).
#
# ÐаÑÑÑПйÑе ÐŽÐ»Ñ `LinearSVC` гОпеÑпаÑаЌеÑÑ `C` Ñ Ð¿ÐŸÐŒÐŸÑÑÑ `GridSearchCV`.
#
# - ÐбÑÑОÑе МПвÑй `StandardScaler` Ма ПбÑÑаÑÑей вÑбПÑке (ÑП вÑеЌО ОÑÑ
ПЎМÑЌО пÑОзМакаЌО), пÑЌОеМОÑе ЌаÑÑÑабОÑПваМОе к ÑеÑÑПвПй вÑбПÑке
# - Ð `GridSearchCV` ÑкажОÑе cv=3.
# ÐÐ°Ñ ÐºÐŸÐŽ зЎеÑÑ
#
X_train_scaled =
X_test_scaled =
svc = LinearSVC(random_state=RANDOM_STATE)
svc_params = {'C': [0.001, 0.01, 0.1, 1, 10]}
# ÐÐ°Ñ ÐºÐŸÐŽ зЎеÑÑ
best_svc =
# +
# ÐÐ°Ñ ÐºÐŸÐŽ зЎеÑÑ
# -
# **ÐПпÑÐŸÑ 7**<br>
# ÐакПе зМаÑеМОе гОпеÑпаÑаЌеÑÑа `C` бÑлП вÑбÑаМП лÑÑÑОЌ пП ОÑПгаЌ кÑПÑÑ-валОЎаÑОО?<br>
#
# **ÐÑвеÑ:**
# - 0.001
# - 0.01
# - 0.1
# - 1
# - 10
y_predicted = best_svc.predict(X_test_scaled)
tab = pd.crosstab(y_test, y_predicted, margins=True)
tab.index = ['Ñ
ПЎÑба', 'пПЎÑеЌ ввеÑÑ
пП леÑÑМОÑе', 'ÑпÑÑк пП леÑÑМОÑе',
'ÑОЎеМОе', 'ÑÑПÑМОе', 'лежаМОе', 'вÑе']
tab.columns = tab.index
tab
# **ÐПпÑÐŸÑ 8:**<br>
# ÐакПй вОЎ акÑОвМПÑÑО SVM ПпÑеЎелÑÐµÑ Ñ
Ñже вÑегП в ÑеÑЌОМаÑ
ÑПÑМПÑÑО? ÐПлМПÑÑ? <br>
#
# **ÐÑвеÑ:**
# - пП ÑПÑМПÑÑО â пПЎÑеЌ ввеÑÑ
пП леÑÑМОÑе, пП пПлМПÑе â лежаМОе
# - пП ÑПÑМПÑÑО â лежаМОе, пП пПлМПÑе â ÑОЎеМОе
# - пП ÑПÑМПÑÑО â Ñ
ПЎÑба, пП пПлМПÑе â Ñ
ПЎÑба
# - пП ÑПÑМПÑÑО â ÑОЎеМОе, пП пПлМПÑе â ÑÑПÑМОе
# ÐакПМеÑ, пÑПЎелайÑе ÑП же ÑаЌПе, ÑÑП в 7 вПпÑПÑе, ÑПлÑкП ЎПбавОв PCA.
#
# - ÐÑпПлÑзÑйÑе вÑбПÑкО `X_train_scaled` О `X_test_scaled`
# - ÐбÑÑОÑе ÑÐŸÑ Ð¶Ðµ PCA, ÑÑП ÑаМÑÑе, Ма ПÑЌаÑÑабОÑПваММПй ПбÑÑаÑÑей вÑбПÑке, пÑОЌеМОÑе пÑеПбÑазПваМОе к ÑеÑÑПвПй
# - ÐаÑÑÑПйÑе гОпеÑпаÑаЌеÑÑ `C` Ма кÑПÑÑ-валОЎаÑОО пП ПбÑÑаÑÑей вÑбПÑке Ñ PCA-пÑеПбÑазПваМОеЌ. ÐÑ Ð·Ð°ÐŒÐµÑОÑе, МаÑкПлÑкП ÑÑП пÑПÑ
ÐŸÐŽÐžÑ Ð±ÑÑÑÑее, ÑеЌ ÑаМÑÑе.
#
# **ÐПпÑÐŸÑ 9:**<br>
# ÐакПва ÑазМПÑÑÑ ÐŒÐµÐ¶ÐŽÑ Ð»ÑÑÑОЌ каÑеÑÑвПЌ (ЎПлей веÑМÑÑ
ПÑвеÑПв) Ма кÑПÑÑ-валОЎаÑОО в ÑлÑÑае вÑеÑ
561 ОÑÑ
ПЎМÑÑ
пÑОзМакПв О вП вÑПÑПЌ ÑлÑÑае, кПгЎа пÑОЌеМÑлÑÑ ÐŒÐµÑПЎ главМÑÑ
кПЌпПМеМÑ? ÐкÑÑглОÑе ЎП ÑелÑÑ
пÑПÑеМÑПв.<br>
#
# **ÐаÑОаМÑÑ:**
# - ÐаÑеÑÑвП ПЎОМакПвПе
# - 2%
# - 4%
# - 10%
# - 20%
#
# **ÐПпÑÐŸÑ 10:**<br>
# ÐÑбеÑОÑе вÑе веÑМÑе ÑÑвеÑжЎеМОÑ:
#
# **ÐаÑОаМÑÑ:**
# - ÐеÑПЎ главМÑÑ
ÐºÐŸÐŒÐ¿ÐŸÐœÐµÐœÑ Ð² ЎаММПЌ ÑлÑÑае пПзвПлОл ÑЌеМÑÑОÑÑ Ð²ÑÐµÐŒÑ ÐŸÐ±ÑÑÐµÐœÐžÑ ÐŒÐŸÐŽÐµÐ»Ðž, пÑО ÑÑПЌ каÑеÑÑвП (ÐŽÐŸÐ»Ñ Ð²ÐµÑМÑÑ
ПÑвеÑПв Ма кÑПÑÑ-валОЎаÑОО) ПÑÐµÐœÑ Ð¿ÐŸÑÑÑаЎалП, бПлее ÑеЌ Ма 10%
# - PCA ЌПжМП ОÑпПлÑзПваÑÑ ÐŽÐ»Ñ Ð²ÐžÐ·ÑалОзаÑОО ЎаММÑÑ
, ПЎМакП ÐŽÐ»Ñ ÑÑПй заЎаÑО еÑÑÑ Ðž лÑÑÑе пПЎÑ
ПЎÑÑОе ЌеÑПЎÑ, МапÑОЌеÑ, tSNE. ÐаÑП PCA ÐžÐŒÐµÐµÑ ÐŒÐµÐœÑÑÑÑ Ð²ÑÑОÑлОÑелÑМÑÑ ÑлПжМПÑÑÑ
# - PCA ÑÑÑÐŸÐžÑ Ð»ÐžÐœÐµÐ¹ÐœÑе кПЌбОМаÑОО ОÑÑ
ПЎМÑÑ
пÑОзМакПв, О в МекПÑПÑÑÑ
заЎаÑаÑ
ПМО ЌПгÑÑ Ð¿Ð»ÐŸÑ
П ОМÑеÑпÑеÑОÑПваÑÑÑÑ ÑелПвекПЌ
| jupyter_russian/assignments_demo/assignment07_unsupervised_learning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Creating a System
# ## Conventional methods
# Systems are defined by a recycle stream (i.e. a tear stream; if any), and a path of unit operations and nested systems. A System object takes care of solving recycle streams by iteratively running its path of units and subsystems until the recycle converges to steady state. Systems can be manually created or automatically generated via the flowsheet or by context management.
# ### Manually generated
# Manually creating a system is **not recommended** as it requires an exponential amount of time and effort for an individual to layout an accurate path. Here we create a trivial system manually as a simple exercise:
# + tags=["nbval-ignore-output"]
import biosteam as bst
bst.settings.set_thermo(['Water'])
feed = bst.Stream('feed', Water=100)
recycle = bst.Stream('recycle')
effluent = bst.Stream('effluent')
T1 = bst.MixTank('T1', ins=[feed, recycle])
P1 = bst.Pump('P1', T1-0)
S1 = bst.Splitter('S1', P1-0, [effluent, recycle], split=0.5)
manual_sys = bst.System('manual_sys', path=[T1, P1, S1], recycle=recycle)
manual_sys.simulate()
manual_sys.diagram(
kind='cluster', # Cluster diagrams highlight recycle streams and nested systems.
number=True, # This numbers each unit according to their path order
)
# -
manual_sys.show()
# Note that the inlets and outlets to a system are inherently connected to the unit operations within the system, but we can still connect systems just like unit operations, as depicted future examples.
# ### Autogenerated from the flowsheet
# The **recommended** way of creating systems is to use the flowsheet. Here we expand on the existing process and create a new system using the flowsheet:
# + tags=["nbval-ignore-output"]
water = bst.Stream('water', Water=10)
P2 = bst.Pump('P2', manual_sys-0) # -pipe- notation equivalent to manual_sys.outs[0]
M2 = bst.Mixer('M2', [P2-0, water])
flowsheet_sys = bst.main_flowsheet.create_system('flowsheet_sys')
flowsheet_sys.simulate()
flowsheet_sys.diagram(kind='cluster', number=True)
# -
flowsheet_sys.show()
# ### Autogenerated by context management
# System objects' context management feature allows for creating systems of only the units created within the given context:
# + tags=["nbval-ignore-output"]
downstream_recycle = bst.Stream('downstream_recycle')
product = bst.Stream('product')
with bst.System('context_sys') as context_sys:
T2 = bst.MixTank('T2', ins=['', downstream_recycle])
P3 = bst.Pump('P3', T2-0)
S2 = bst.Splitter('S2', P3-0, [product, downstream_recycle], split=0.5)
# The feed is empty, no need to run system (yet)
context_sys.diagram('cluster')
# -
context_sys.show()
# Let's connect two systems together and create a new system from the flowsheet:
# + tags=["nbval-ignore-output"]
# -pipe- notation equivalent to context_sys.ins[:] = [flowsheet_sys.outs[0]]
flowsheet_sys-0-context_sys
complete_sys = bst.main_flowsheet.create_system('complete_sys')
complete_sys.simulate()
complete_sys.diagram('cluster')
# -
complete_sys.show()
# ## Drop-in systems
# ### A simple example
# When a system is created by a function, it's called a drop-in system. Here, we create a sugarcane to ethanol production system without facilities (e.g., cooling tower, boiler) by using drop-in systems:
# + tags=["nbval-ignore-output"]
from biorefineries.sugarcane import chemicals
from biosteam import Stream, System, settings, main_flowsheet
from biorefineries.sugarcane import (
create_juicing_system_with_fiber_screener as create_juicing_system,
create_sucrose_to_ethanol_system
)
main_flowsheet.clear() # Remove previous unit operations to prevent ID-conflict warnings
settings.set_thermo(chemicals)
denaturant = Stream('denaturant',
Octane=230.69,
units='kg/hr',
price=0.756)
sucrose_solution = Stream('sucrose_solution')
juicing_sys = create_juicing_system(
ID='juicing_sys', # ID of system
outs=[sucrose_solution], # Place sucrose_solution at the 0th outlet (all other streams are defaulted)
)
sucrose_to_ethanol_sys = create_sucrose_to_ethanol_system(ins=[sucrose_solution, denaturant])
# Here are a couple of other ways to connect systems:
# Manually:
# >>> sucrose_to_ethanol_sys.ins[0] = juicing_sys.outs[0]
# With -pipe- notation:
# >>> juicing_sys-0-0-sucrose_to_ethanol_sys
# Manually create a new system and simulate
sugarcane_to_ethanol_sys = System('sugarcane_to_ethanol_sys',
path=[juicing_sys, sucrose_to_ethanol_sys])
sugarcane_to_ethanol_sys.simulate()
sugarcane_to_ethanol_sys.diagram(kind='surface')
# -
sugarcane_to_ethanol_sys.show(data=False)
# The number of inlets and outlets are rather large. It may be helpful to specify what inlets and outlets do we want to expose:
s = main_flowsheet.stream
sugarcane_to_ethanol_sys.load_inlet_ports([s.sugarcane])
sugarcane_to_ethanol_sys.load_outlet_ports([s.ethanol, s.bagasse])
sugarcane_to_ethanol_sys.show(data=False)
# The ethanol product is now the 0th stream
sucrose_to_ethanol_sys.outs[0].show()
# ### System factories
# Both `create_juicing_system` and `create_sucrose_to_ethanol_system` are [SystemFactory](../process_tools/SystemFactory.txt) objects, which accept the system `ID`, `ins`, and `outs` (similar to unit operations) and return a new system. Let's first have a look at some of the system factories in the [biorefineries.sugarcane](https://github.com/BioSTEAMDevelopmentGroup/Bioindustrial-Park/tree/master/BioSTEAM%202.x.x/biorefineries/sugarcane) library:
create_juicing_system.show()
print()
create_sucrose_to_ethanol_system.show()
# [SystemFactory](../process_tools/SystemFactory.txt) objects are composed of a function `f` which creates the unit operations, a predefined system `ID`, and `ins` and `outs` dictionaries that serve as keyword arguments to initialize the system's default inlets and outlets.
#
# The signature of a SystemFactory is `f(ID=None, ins=None, outs=None, mockup=False, area=None, udct=None, ...)`. The additional parameters (i.e. mockup, area, and udct) will be discussed in the next section.
# ### Saving time with mock systems
# When creating a biorefinery, we may not be interested in all the subsystems we created with SystemFactory objects. We can save a few milliseconds in computational time (per system) by using mock systems:
main_flowsheet.clear() # Remove previous unit operations to prevent ID-conflict warnings
juicing_sys = create_juicing_system(
outs=[sucrose_solution],
mockup=True
)
sucrose_to_ethanol_sys = create_sucrose_to_ethanol_system(
ins=[sucrose_solution, denaturant],
mockup=True
)
# Note that mock systems don't have anything other than `ins`, `outs`, and `units`
juicing_sys.show()
sucrose_to_ethanol_sys.show()
# + tags=["nbval-ignore-output"]
# We can create the system using the flowsheet
sugarcane_to_ethanol_sys = main_flowsheet.create_system('sugarcane_to_ethanol_sys')
sugarcane_to_ethanol_sys.simulate()
sugarcane_to_ethanol_sys.diagram()
# -
sucrose_to_ethanol_sys.outs[0].show()
# ### Using the area naming convention
# The area naming convention follows {letter}{area + number} where the letter depends on
# the unit operation as follows:
#
# * C: Centrifuge
# * D: Distillation column
# * E: Evaporator
# * F: Flash tank
# * H: Heat exchange
# * M: Mixer
# * P: Pump (including conveying belt)
# * R: Reactor
# * S: Splitter (including solid/liquid separator)
# * T: Tank or bin for storage
# * U: Other units
# * J: Junction, not a physical unit (serves to adjust streams)
# * PS: Process specificiation, not a physical unit (serves to adjust streams)
#
# For example, the first mixer in area 100 would be named M101. When calling a SystemFactory object, we can pass the `area` to name unit operations according to the area convention. In the following example, we name all unit operations in the juicing system under area 300:
main_flowsheet.clear() # Remove previous unit operations
juicing_sys = create_juicing_system(area=300, mockup=True)
juicing_sys.show()
# To access unit operations by their default ID (as originally defined in SystemFactory code), you can request a unit dictionary by passing `udct`=True:
main_flowsheet.clear() # Remove previous unit operations
# When udct is True, both the system and the unit dictionary are returned
juicing_sys, udct = create_juicing_system(mockup=True, area=300, udct=True)
unit = udct['T201']
print(repr(unit)) # Originally, this unit was named T201
# ### Creating system factories
# Create a SystemFactory object for creating sugarcane to ethanol systems:
# +
from biosteam import System, SystemFactory
@SystemFactory(
ID='sugarcane_to_ethanol_sys',
ins=[create_juicing_system.ins[0], # Reuse default from juicing system factory
dict(ID='denaturant',
price=0.756)],
outs=[dict(ID='ethanol',
price=0.789),
dict(ID='bagasse')]
)
def create_sugarcane_to_ethanol_system(ins, outs):
# ins and outs will be stream objects
sugarcane, denaturant = ins
ethanol, bagasse = outs
juicing_sys = create_juicing_system(
ins=sugarcane,
outs=[None, bagasse], # None will default to a stream
mockup=True
)
sucrose_to_ethanol_sys = create_sucrose_to_ethanol_system(
ins=(juicing_sys-0, denaturant),
outs=ethanol,
mockup=True,
)
# The system factory builds a system from units created by the function
create_sugarcane_to_ethanol_system.show()
# -
# Create the sugarcane to ethanol system and simulate:
main_flowsheet.clear() # Remove previous unit operations
sugarcane_to_ethanol_sys = create_sugarcane_to_ethanol_system()
sugarcane_to_ethanol_sys.simulate()
sugarcane_to_ethanol_sys.show()
# Biorefinery systems can be created by connecting smaller systems, allowing us to create alternative configurations with ease. The [biorefineries](https://github.com/BioSTEAMDevelopmentGroup/Bioindustrial-Park) library has yet to fully implement SystemFactory objects across all functions that create systems, but that is the goal.
| docs/tutorial/Creating_a_System.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
# =============================================================
# Receiver Operating Characteristic (ROC) with cross validation
# =============================================================
#
# Example of Receiver Operating Characteristic (ROC) metric to evaluate
# classifier output quality using cross-validation.
#
# ROC curves typically feature true positive rate on the Y axis, and false
# positive rate on the X axis. This means that the top left corner of the plot is
# the "ideal" point - a false positive rate of zero, and a true positive rate of
# one. This is not very realistic, but it does mean that a larger area under the
# curve (AUC) is usually better.
#
# The "steepness" of ROC curves is also important, since it is ideal to maximize
# the true positive rate while minimizing the false positive rate.
#
# This example shows the ROC response of different datasets, created from K-fold
# cross-validation. Taking all of these curves, it is possible to calculate the
# mean area under curve, and see the variance of the curve when the
# training set is split into different subsets. This roughly shows how the
# classifier output is affected by changes in the training data, and how
# different the splits generated by K-fold cross-validation are from one another.
#
# <div class="alert alert-info"><h4>Note</h4><p>See also :func:`sklearn.metrics.auc_score`,
# :func:`sklearn.model_selection.cross_val_score`,
# `sphx_glr_auto_examples_model_selection_plot_roc.py`,</p></div>
#
#
#
# +
print(__doc__)
import numpy as np
from scipy import interp
import matplotlib.pyplot as plt
from itertools import cycle
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import StratifiedKFold
# #############################################################################
# Data IO and generation
# Import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
X, y = X[y != 2], y[y != 2]
n_samples, n_features = X.shape
# Add noisy features
random_state = np.random.RandomState(0)
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# #############################################################################
# Classification and ROC analysis
# Run classifier with cross-validation and plot ROC curves
cv = StratifiedKFold(n_splits=6)
classifier = svm.SVC(kernel='linear', probability=True,
random_state=random_state)
tprs = []
aucs = []
mean_fpr = np.linspace(0, 1, 100)
i = 0
for train, test in cv.split(X, y):
probas_ = classifier.fit(X[train], y[train]).predict_proba(X[test])
# Compute ROC curve and area the curve
fpr, tpr, thresholds = roc_curve(y[test], probas_[:, 1])
tprs.append(interp(mean_fpr, fpr, tpr))
tprs[-1][0] = 0.0
roc_auc = auc(fpr, tpr)
aucs.append(roc_auc)
plt.plot(fpr, tpr, lw=1, alpha=0.3,
label='ROC fold %d (AUC = %0.2f)' % (i, roc_auc))
i += 1
plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',
label='Luck', alpha=.8)
mean_tpr = np.mean(tprs, axis=0)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
std_auc = np.std(aucs)
plt.plot(mean_fpr, mean_tpr, color='b',
label=r'Mean ROC (AUC = %0.2f $\pm$ %0.2f)' % (mean_auc, std_auc),
lw=2, alpha=.8)
std_tpr = np.std(tprs, axis=0)
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
plt.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2,
label=r'$\pm$ 1 std. dev.')
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
| scikit-learn-official-examples/model_selection/plot_roc_crossval.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Time Series Decomposition
# +
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
# %matplotlib inline
df = pd.read_csv('ConstructionTimeSeriesDatav2.csv')
# -
# # Public Construction Spending - Basic Decomposition Method
# First, take a look at the data using a simple visualization.
# +
#View the data
plt.plot(df['Month'],df['Public Construction'])
# -
# The graph shows the data has a linear upward trend with sesaonal cycles.
#
# Now generate the intercept and slope values.
# +
from scipy import stats
slope, intercept, r_value, p_value, std_err = stats.linregress(df.index,df['Public Construction'])
print('intercept =', intercept, ' slope =', slope, ' p_value = ',p_value)
# -
# Create a new column that calculates a simple regression.
# +
#Create the R1 regression column
def create_regress_col(row, intercept, slope):
return float(intercept) + float(row['Month']) * slope
df['regress'] = df.apply(create_regress_col,args = (intercept,slope),axis = "columns")
df['R1'] = df['Public Construction'] - df['regress']
df.style.format({
'Public Construction': '{:,.0f}'.format,
'regress': '{:,.0f}'.format,
'R1': '{:,.0f}'.format
})
# -
# Plot the results of the R1 simple regression column.
plt.plot(df.index,df.R1)
# The resluts of R1 show a seasonality pattern that repeats about every 12 months.
#
# Check the seasonality using auto-correlation.
# +
# Create column with lag of 12
lag = 12
df['lag12'] = np.NaN
for i in range(len(df['lag12']))[lag:]:
df['lag12'].iloc[i] = df['Public Construction'].iloc[i-4]
print(df.head(n=10))
# Compute autocorrelations
for i in range(int(len(df.index)/2)):
print('autocorrelation, lag =',i,':',df.R1.autocorr(lag = i))
fig,ax = plt.subplots()
ax.plot(df['Month'],df['Public Construction'],c='k')
ax.plot(df['Month'],df.lag12,c='b')
ax.set_xlim([0,150])
ax.text(155,17000,'Public Construction',color='k')
ax.text(155,26000,'Lagged\nPublic Construction',color='b')
ax.set_xlabel('Month')
# -
# The lagged public construction is highly correlated to the actual Public Construction spending amounts.
#
# Plot each subset of the anual pattern.
# +
dfQtr = pd.DataFrame()
cycleLen = 12
for i in range(int(len(df.index)/cycleLen)):
newData = pd.DataFrame({i:df['R1'].iloc[i*cycleLen:(i+1)*cycleLen]})
newData.index = range(0,len(newData))
dfQtr = pd.concat([dfQtr,newData],axis=1)
fig,ax = plt.subplots()
ax.plot(dfQtr)
# -
# This confirms that the 12 seasonal patterns look the same.
#
# Next, average the spending for each of the years in the data.
# +
avg = []
for i in range(len(dfQtr.index)):
avg.append(dfQtr.iloc[i].mean())
dfQtr = pd.concat([dfQtr,pd.DataFrame({'avg':avg})], axis=1)
print(dfQtr)
fig,ax = plt.subplots()
c = 180
for col in dfQtr.columns.values:
if col == 'avg':
ax.plot(dfQtr[col], c = 'r')
else:
ax.plot(dfQtr[col], c = 'k')
# -
# Now, calculate the seasonal affect for each year of spending.
df['S'] = np.NaN
df['R2Public'] = np.NaN
df['Composite'] = np.NaN
df['R2ErrorPublic'] = np.NaN
S = dfQtr['avg'].tolist()
for i in df.index:
df.loc[i,'S'] = S[i%cycleLen]
df.loc[i,'R2X'] = df.loc[i,'R1'] - df.loc[i,'S']
df.loc[i,'Composite'] = df.loc[i,'regress'] + df.loc[i,'S']
df.loc[i,'R2ErrorX'] = 100*df.loc[i,'R2X'] / df.loc[i,'Public Construction']
df.style.format({
'Public Construction': '{:,.0f}'.format,
'regress': '{:,.0f}'.format,
'R1': '{:,.0f}'.format,
'S': '{:,.0f}'.format,
'R2Public': '{:,.0f}'.format,
'Composite':'{:,.0f}'.format,
'R2ErrorPublic': '{:.2f}%'.format
})
# Visualize how the model fits the original Public Construction spending series.
fig, ax = plt.subplots()
ax.plot(df['Public Construction'],c='k')
ax.plot(df['Composite'],c='b')
ax.set_xlim([0,150])
ax.text(150,16000,'Public Construction', color='k')
ax.text(150,20500,'Model', color='b')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.set_xlabel('Months')
ax.set_ylabel('Construction Spending (in millions of $)')
| timeseriesdecom/Public_TimeSeriesDecomposition.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="oTce78yJBIQr"
# # Setup project
#
# + [markdown] id="LP25zIjVzhLN"
# ### git clone
# + colab={"base_uri": "https://localhost:8080/"} id="vIs_IWGJBHDw" outputId="00e08066-fa78-47ef-a9d1-768727123426"
# !git clone -b setup-for-pip https://github.com/Ronnypetson/gym-minigrid.git
# + colab={"base_uri": "https://localhost:8080/"} id="jXjKf9PWCc6J" outputId="e1732e59-bb87-406f-a97e-7a6349d0d2eb"
# !cd /content/gym-minigrid/
# !pip install /content/gym-minigrid/
# + id="bGlhbcRhDLEQ"
from gym_minigrid.wrappers import *
from project_RL.linear_sarsa.sarsa_lambda_agent import LinearSarsaLambda
import project_RL.linear_sarsa.train as train
hyperparameters = {
# 'env_name': 'MiniGrid-Empty-5x5-v0',
'env_name': 'MiniGrid-DoorKeyObst-6x6-v0',
'discount_rate': 0.9,
'learning_rate': 1e-3,
'lambda': 0.6,
'epsilon': 0.3,
'n0': 3000
}
env = gym.make(hyperparameters['env_name'])
agent = train.train(env, hyperparameters, num_episodes=int(3e5))
#play(env, agent, linear_parse_observation_to_state, episodes=10)
# + id="gwYTxOL1MEXv"
# plot('log_MiniGrid-Dynamic-Obstacles-6x6-v0_21-10-24-22-32-22', 1000)
from google.colab import files
# export mydate=`date +%Y-%m-%d-%H-%M-%S`
# !zip -m minigrid.zip /content/*MiniGrid*
files.download('minigrid.zip')
| project_RL/notebooks/mo436_gym_minigrid.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Low-Rank Matrix Completion
#
# **Published**: September 18, 2019
#
# **Author**: <NAME> [[**GitHub homepage**](https://github.com/xinychen)]
#
# **Download**: This Jupyter notebook is at our GitHub repository. If you want to evaluate the code, please download the notebook from the repository of [**tensor-learning**](https://github.com/xinychen/tensor-learning/blob/master/content/LRMC.ipynb).
# --------------------
#
# If you search **matrix completion** as a keyword on [**wiki**](https://en.wikipedia.org/wiki/Matrix_completion), you might see the following explaination for this concept:
#
# > Matrix completion is the task of filling in the missing entries of a partially observed matrix. A wide range of datasets are naturally organized in matrix form.
#
# From this brief explaination, we could on the one hand see that the matrix completion task requires us to use a mainfold of approaches to impute the missing values for a given matrix which is partially observed. On the other hand, it seems that there are tremendous real-world datasets organized in matrix form, and this means that you have an access to solve real-world missing data problems using matrix completion techniques.
#
# **About this chapter**: This chapter does not intend to give a thorough review of low-rank matrix completion, but rather to highlight the basic idea of low-rank matrix completion, and to provide an overview of the key matrix completion techniques that have been successfully used. We direct the interested reader to the more focused references that are provided throughout the chapter.
#
# ## Motivation: Low-Rank Matrix Completion and Recovery
# The data incompleteness problem appears often in machine learning. If you want to fully understand the matrix completion, then the following applications may be helpful:
#
# - Recommender systems
# - Image inpainting
# ### Recommender Systems
# A strong motivation for the matrix completion in recommender systems comes from user ratings of some items like products, movies, or musics which are put into a matrix $\boldsymbol{X}\in\mathbb{R}^{m\times n}$. The entries $x_{ij},\forall i\in\left\{1,...,m\right\},j\in\left\{1,...,n\right\}$ of the matrix correspond to the $i$-th user's rating of item $j$.
#
# **A simple example**: In Figure 1, the question is how to predict unobserved entries of the matrix according to the limited observations.
#
# 
# > Figure 1: User ratings of some movies are naturally organized in matrix form, where the matrix has some unobserved entries (i.e., unseen user ratings). Note that this figure is from http://www.princeton.edu/~yc5/ele538b_sparsity/lectures/matrix_recovery.pdf.
#
# ### Image Inpainting
# According to the definition on [**wiki**](https://en.wikipedia.org/wiki/Inpainting), inpainting is the process of reconstructing lost or deteriorated parts of images and videos. For the image inpainting task which involves with missing data, we should first review the image data representation.
#
# In general, each image simply consists of three color channels: Red (R), Blue (B), and Green (G) that form the RGB color image. Therefore, color images can be stored in a third-order tensor with dimensions as (width, height, channels). Formally, we could represent a color image by using a tensor like $\boldsymbol{\mathcal{Y}}\in\mathbb{R}^{m\times n\times 3}$, in which $3$ is used to indicate RGB and any $(i,j)$-th pixel is denoted by the vector $\left[y_{ij1},y_{ij2},y_{ij3}\right]^\top$.
#
# On the other hand, we could also convert a color image to a grey image, and then, any matrix $\boldsymbol{Y}\in\mathbb{R}^{m\times n\times 3}$ can be used to represent the pixels of a grey image. Below is a color image which is originally stored in a third-order tensor, and we load an image as grayscale using `imageio`.
#
# 
# > Figure 2: An image from https://raw.githubusercontent.com/qbzhao/BCPF/master/TestImages/lena.bmp.
#
# > `Imageio` is a Python library that provides an easy interface to read and write a wide range of image data, including animated images, volumetric data, and scientific formats. It is cross-platform, runs on Python 2.7 and 3.4+, and is easy to install.
#
# > Main website: http://imageio.github.io
# +
import numpy as np
import imageio
import matplotlib.pyplot as plt
lena = imageio.imread('../images/lena.bmp', as_gray = True)
print('The shape of the image is {}.'.format(lena.shape))
dim1, dim2 = lena.shape
mask = np.round(np.random.rand(dim1, dim2)) # Generate a binary mask.
sparse_lena = np.multiply(lena, mask)
plt.subplots(1, 2, figsize=(8,15))
plt.subplot(1, 2, 1)
plt.imshow(lena)
plt.title('The original Lena')
plt.axis('off')
plt.subplot(1, 2, 2)
plt.imshow(sparse_lena)
plt.title('The incomplete Lena')
plt.axis("off")
plt.show()
# -
# **Matrix completion problem description**: Suppose we observe partial entries of a matrix $\boldsymbol{Y}\in\mathbb{R}^{m\times n}$, indexed by the set $\Omega$. Then, the goal is to recover missing (or unobserved) entries in $\boldsymbol{Y}$.
#
# It is not hard to understand the matrix completion problem if you see the following example:
#
# > Suppose that we have a matrix like $\boldsymbol{Y}=\left[\begin{array}{ll}{2} & {\color{red}{?}} \\ {\color{red}{?}} & {4}\end{array}\right] \in \mathbb{R}^{2 \times 2}$, which is obviously a partially observed matrix. The observed entries of $\boldsymbol{Y}$ are indicated by the set $\Omega=\left\{\color{red}{(1,1),(2,2)}\right\}$. Then, applying the matrix completion technique, our final goal is to find a matrix like $\boldsymbol{X}=\left[\begin{array}{ll}{2} & {\color{red}{8}} \\ {\color{red}{1}} & {4}\end{array}\right] \in \mathbb{R}^{2 \times 2}$ that acts as the recovered matrix.
#
# ## Low-Rank Matrix Completion
# #### Basic Formulation for Matrix Completion
#
# To ease the reader's journey, there are two steps towards understanding the basic idea of the matrix completion techniques. The first is from the view of data noise. Indeed, the matrix completion for data without noise implies that the recovered matrix $\boldsymbol{X}$ has absolutely same entries on $\Omega$ as the original matrix $\boldsymbol{Y}$. It means that
# $$\boldsymbol{X}_{\Omega}=\boldsymbol{Y}_{\Omega}.~~~~~~~~~~\text{(1)}$$
#
# > For example, if we have a partially observed matrix $\boldsymbol{Y}\in\mathbb{R}^{2\times 2}$ like $$\boldsymbol{Y}=\left[\begin{array}{cc}y_{11} & \text{?} \\ \text{?} & y_{22} \\ \end{array}\right],$$ then, the recovered matrix $\boldsymbol{X}\in\mathbb{R}^{2\times 2}$ must satisfy $$\left\{\begin{array}{l}x_{11}=y_{11}, \\x_{22}=y_{22}. \\ \end{array}\right.$$
#
# Then, the idea of the matrix completion is rank minimization. We assume that $\boldsymbol{X}$ is a low-rank matrix and write the matrix completion via rank minimization as
# $$\begin{array}{ll} \min_{\boldsymbol{X}} & \text{rank}\left(\boldsymbol{X}\right)\\
# \text{s.t.} & \boldsymbol{X}_{\Omega}=\boldsymbol{Y}_{\Omega}. \\ \end{array}~~~~~~~~~~\text{(2)}$$
#
# More formally, Eq. (1) is written in another form:
# $$\begin{array}{ll} \min_{\boldsymbol{X}} & \text{rank}\left(\boldsymbol{X}\right)\\
# \text{s.t.} & \mathcal{P}_{\Omega}\left(\boldsymbol{X}\right)=\mathcal{P}_{\Omega}\left(\boldsymbol{Y}\right). \\ \end{array}$$
# where as $\Omega$ stores the indices of the observed set, the operator $\mathcal{P}_{\Omega}:\mathbb{R}^{m\times n} \mapsto \mathbb{R}^{m\times n}$ is introduced as an orthogonal projection onto subspace of matrices supported on $\Omega$, namely,
# $$\left[\mathcal{P}_{\Omega}\left(\boldsymbol{{X}}\right)\right]_{ij}=\left\{
# \begin{array}{ll}
# x_{ij}, &\text{if} \left(i,j\right)\in\Omega, \\
# 0, & \text{otherwise}.
# \end{array}\right.$$
#
# However, this formulation is not intuitive for our understanding. In the following, we will use the equation system mentioned in Eq. (2).
# #### Convex Formulation for Matrix Completion
#
# The optimization problem in Eq. (2) is a nonconvex optimization problem since the objective function $\text{rank}\left(\boldsymbol{X}\right)$ is nonconvex. Fortunately, one common approach proved in the previous studies is to use the trace norm $\left\|\boldsymbol{X}\right\|_{*}$ to approximate the rank of matrices. The advantage of the trace norm is that $\left\|\boldsymbol{X}\right\|_{*}$ is the tightest convex envelop for the rank of matrices. This leads to the following convex optimization problem for the matrix completion in Eq. (2):
#
# $$\begin{array}{ll} \min_{\boldsymbol{X}} & \left\|\boldsymbol{X}\right\|_{*} \\
# \text{s.t.} & \boldsymbol{X}_{\Omega}=\boldsymbol{Y}_{\Omega}. \\ \end{array}~~~~~~~~~~\text{(3)}$$
#
# Before solving this optimization problem, we first learn about the concept of trace norm in ways that will help your understanding of matrix completion.
#
# **Definition 1.** For a $m\times n$ matrix $\boldsymbol{X}$, the trace norm of $\boldsymbol{X}$ is defined as the sum of singular values, namely, $\left\|\boldsymbol{X}\right\|_{*}=\sum_{i}^{r}\sigma_{i}$, and $r=\text{rank}\left(\boldsymbol{X}\right)$ and $\sigma_{1} \geq \cdots \geq \sigma_{r} \geq 0$ are singular values of $\boldsymbol{X}$.
#
# > For example, for a given matrix like $\boldsymbol{X}=\left[\begin{array}{cc}4 & 4 \\ -3 & 3 \\ \end{array}\right]$, the Singluar Value Decomposition (SVD) is $$\boldsymbol{X}=\boldsymbol{U}\boldsymbol{\Sigma}\boldsymbol{V}^\top=\left[\begin{array}{cc}{1} & {0} \\ {0} & {1}\end{array}\right]\left[\begin{array}{cc}{\color{red}{4 \sqrt{2}}} & {0} \\ {0} & {\color{red}{3 \sqrt{2}}}\end{array}\right]\left[\begin{array}{cc}{\frac{\sqrt{2}}{2}} & {\frac{\sqrt{2}}{2}} \\ {-\frac{\sqrt{2}}{2}} & {\frac{\sqrt{2}}{2}}\end{array}\right],$$
# where we could derive the sum of sigular values as $\color{red}{7\sqrt{2}}$ which is the trace norm of $\boldsymbol{X}$.
# #### Proximal Gradient Descent
#
# In the matrix completion, the optimization problem is
# $$\min_{\boldsymbol{X}}~\frac{1}{2}\left\|\boldsymbol{Y}_{\Omega}-\boldsymbol{X}_{\Omega}\right\|_{F}^{2}+\rho\left\|\boldsymbol{X}\right\|_{*},$$
# where the objective function includes two parts: $$\left\{\begin{array}{l}g\left(\boldsymbol{X}\right)=\frac{1}{2}\left\|\boldsymbol{Y}_{\Omega}-\boldsymbol{X}_{\Omega}\right\|_{F}^{2}, \\ h\left(\boldsymbol{X}\right)=\left\|\boldsymbol{X}\right\|_{*}, \end{array}\right.$$
# and $g\left(\boldsymbol{X}\right)$ is convex and differentiable while $h\left(\boldsymbol{X}\right)$ is convex and not necessarily differentiable.
#
# For this type of optimization, we should introduce the proximal methods.
# #### Solution to Matrix Completion
# ### Experiments on Image Data
# Examine algorithms
| content/LRMC.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="Ic4_occAAiAT"
# ##### Copyright 2019 The TensorFlow Authors.
# + cellView="form" id="ioaprt5q5US7"
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + cellView="form" id="yCl0eTNH5RS3"
#@title MIT License
#
# Copyright (c) 2017 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# # copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# + [markdown] id="ItXfxkxvosLH"
# # Classificazione di testo con TensorFlow Hub: Recensioni di film
# + [markdown] id="hKY4XMc9o8iB"
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://www.tensorflow.org/tutorials/keras/text_classification_with_hub"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />Visualizza su TensorFlow.org</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/it/tutorials/keras/text_classification_with_hub.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Esegui in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/it/tutorials/keras/text_classification_with_hub.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />Visualizza il sorgente su GitHub</a>
# </td>
# <td>
# <a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/it/tutorials/keras/text_classification_with_hub.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Scarica il notebook</a>
# </td>
# </table>
# + [markdown] id="BYzaKBe8YXg0"
# Note: La nostra comunità di Tensorflow ha tradotto questi documenti. PoichÚ queste traduzioni sono *best-effort*, non Ú garantito che rispecchino in maniera precisa e aggiornata la [documentazione ufficiale in inglese](https://www.tensorflow.org/?hl=en).
# Se avete suggerimenti per migliorare questa traduzione, mandate per favore una pull request al repository Github [tensorflow/docs](https://github.com/tensorflow/docs).
# Per proporsi come volontari alla scrittura o alla review delle traduzioni della comunità contattate la
# [mailing list <EMAIL>](https://groups.google.com/a/tensorflow.org/forum/#!forum/docs).
# + [markdown] id="Eg62Pmz3o83v"
# Questo notebook classifica recensioni di film come *positive* o *negative* usando il testo delle revisioni. Questo Ú un esempio di classificazione *binaria*âo a due classi, un importante tipo di problema di machine learning largamente applicabile.
#
# Il tutorial dimostra l'applicazione di base del trasferimento di conoscenza (transfer learning n.d.t) con TensorFlow Hub e Keras.
#
# Useremo il [dataset IMDB](https://www.tensorflow.org/datasets/catalog/imdb_reviews) che contiene il testo di 50.000 recensioni di film dall'[Internet Movie Database](https://www.imdb.com/). Esse sono divise in 25,000 recensioni per l'addestramento e 25,000 revisioni per la verifica. Gli insiemi di addestramento e verifica sono *bilanciati*, nel senso che essi contengono un eguale numero di recensioni positive e negative.
#
# Questo notebook usa [tf.keras](https://www.tensorflow.org/guide/keras), un'API di alto livello per costruire ed addestrare modelli in TensorFlow, e [TensorFlow Hub](https://www.tensorflow.org/hub), una libreria ed una piattaforma per il trasferimento di conoscenza. Per un tutorial più avanzato di classificazione del testo che usa `tf.keras`, vedere la [MLCC Text Classification Guide](https://developers.google.com/machine-learning/guides/text-classification/).
# + id="2ew7HTbPpCJH"
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
try:
# # %tensorflow_version only exists in Colab.
# %tensorflow_version 2.x
except Exception:
pass
import tensorflow as tf
# !pip install tensorflow-hub
# !pip install tfds-nightly
import tensorflow_hub as hub
import tensorflow_datasets as tfds
print("Version: ", tf.__version__)
print("Eager mode: ", tf.executing_eagerly())
print("Hub version: ", hub.__version__)
print("GPU is", "available" if tf.config.experimental.list_physical_devices("GPU") else "NOT AVAILABLE")
# + [markdown] id="iAsKG535pHep"
# ## Scarichiamo il dataset IMDB
#
# Il dataset IMDB Ú disponibile su [imdb reviews](https://www.tensorflow.org/datasets/catalog/imdb_reviews) o su [TensorFlow datasets](https://www.tensorflow.org/datasets). Il codice seguente scarica il dataset IMDB sulla vostra macchina (o eseguibile colab):
# + id="zXXx5Oc3pOmN"
# Split the training set into 60% and 40%, so we'll end up with 15,000 examples
# for training, 10,000 examples for validation and 25,000 examples for testing.
train_data, validation_data, test_data = tfds.load(
name="imdb_reviews",
split=('train[:60%]', 'train[60%:]', 'test'),
as_supervised=True)
# + [markdown] id="l50X3GfjpU4r"
# ## Esploriamo i dati
#
# Prendiamoci un momento per capire la struttura dei dati. Ogni esempio Ú una frase che rappresenta la recensione di un film e l'etichetta corrispondente. La frase non Ú pre-elaborata in alcun modo. L'etichetta Ú un valore intero tra 0 e 1, ove 0 Ú una recensione negativa, ed 1 Ú una recensione positiva.
#
# Stampiamo i primi 10 esempi.
# + id="QtTS4kpEpjbi"
train_examples_batch, train_labels_batch = next(iter(train_data.batch(10)))
train_examples_batch
# + [markdown] id="IFtaCHTdc-GY"
# E stampiamo anche le prime 10 etichette.
# + id="tvAjVXOWc6Mj"
train_labels_batch
# + [markdown] id="LLC02j2g-llC"
# ## Costruiamo il modello
#
# La rete neurale viene creata impilando livelliâciò richiede tre decisioni architetturali principali:
#
# * Come rappresentare il testo?
# * Quanti livelli usare nel modello?
# * Quante *unità nascoste* usare per ogni livello?
#
# In questo esempio, i dati di input sono frasi. Le etichette da predire sono 0 o 1.
#
# Un modo di rappresentare il testo Ú convertire le frasi in vettori di assorbimento (embedding n.d.t.). Possiamo usare un (livello n.d.t) di assorbimento del testo pre-addestrato come primo livello, ed avremo tre vantaggi:
#
# * non dovremo preoccuparci della pre-elaborazione del testo,
# * potremo beneficiare del trasferimento di conoscenza,
# * il risultato dell'assorbimento ha una lunghezza fissa, e quindi sarà più semplice da elaborare.
#
# Per questo esempio useremo un **modello di assorbimento pre-addestrato** preso da [TensorFlow Hub](https://www.tensorflow.org/hub) e chiamato [google/tf2-preview/gnews-swivel-20dim/1](https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim/1).
#
# Qui ci sono altri tre modelli pre-addestrati, da provare, che rendono ancora più interessante questo tutorial:
#
# * [google/tf2-preview/gnews-swivel-20dim-with-oov/1](https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim-with-oov/1) - lo stesso di [google/tf2-preview/gnews-swivel-20dim/1](https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim/1), ma con il 2.5% del vocabolario convertito in OOV buckets. Ciò può essere d'aiuto se il vocabolario del nostro caso e il vocabolario del modello non coincidono completamente.
# * [google/tf2-preview/nnlm-en-dim50/1](https://tfhub.dev/google/tf2-preview/nnlm-en-dim50/1) - Un modello più ampio con un vocabolario da ~1M di vocaboli e 50 dimensioni.
# * [google/tf2-preview/nnlm-en-dim128/1](https://tfhub.dev/google/tf2-preview/nnlm-en-dim128/1) - Un modello ancora più ampio con un vocabolario da ~1M di vocaboli e 128 dimensioni.
# + [markdown] id="In2nDpTLkgKa"
# Per prima cosa creiamo un livello Keras che usa un modello TensorFlow Hub per eseguire l'assorbimento (embedding n.d.t) delle frasi, e proviamolo su una coppia di esempi di input. Notate che, indipendentemente dalla lunghezza del testo di input, il formato dell'output dell'assorbimento Ú: `(num_examples, embedding_dimension)`.
# + id="_NUbzVeYkgcO"
embedding = "https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim/1"
hub_layer = hub.KerasLayer(embedding, input_shape=[],
dtype=tf.string, trainable=True)
hub_layer(train_examples_batch[:3])
# + [markdown] id="dfSbV6igl1EH"
# Ora costruiamo il modello completo:
# + id="xpKOoWgu-llD"
model = tf.keras.Sequential()
model.add(hub_layer)
model.add(tf.keras.layers.Dense(16, activation='relu'))
model.add(tf.keras.layers.Dense(1))
model.summary()
# + [markdown] id="6PbKQ6mucuKL"
# I livelli sono impilati sequenzialmente per realizzare il classificatore:
#
# 1. Il primo livello Ú un livello TensorFlow Hub. Questo livello usa un modello Saved Model pre-addestrato per mappare una frase nel suo vettore di assorbimento. Il modello di assorbimento del testo pre-addestrato che useremo ([google/tf2-preview/gnews-swivel-20dim/1](https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim/1)) divide le frasi in token, assorbe ciascun token e poi combina gli assorbimenti. Le dimensioni risultanti sono: `(num_examples, embedding_dimension)`.
# 2. Questo vettore di output di lunghezza fissa Ú passato attraverso un livello completamente connesso (`Denso`) con 16 unità nascoste.
# 3. L'ultimo livello Ú connesso densamente con un singolo nodo di output. Usando la funzione di attivazione `sigmoid`, questo valore Ú un float tra 0 e 1, che rappresenta una probabilità , o un livello di confidenza.
#
# Compiliamo il modello.
# + [markdown] id="L4EqVWg4-llM"
# ### Funzione obiettivo e ottimizzatore
#
# Un modello, per l'addestramento, ha bisogno di una funzione obiettivo e di un ottimizzatore. Essendo questo un problema di classificazione binaria e l'output del modello una probabilità (un livello a unità singola con un'attivazione sigmoid), useremo la funzione obiettivo `binary_crossentropy`.
#
# Questa non Ú l'unica scelta possibile per una funzione obiettivo, potreste, per esempio, scegliere la `mean_squared_error`. In generale, però, `binary_crossentropy` Ú migliore per gestire probabilità âessa misura la "distanza" tra distribuzioni di probabilità o, nel nostro caso, tra la distribuzione dei dati reali e le previsioni.
#
# Nel seguito, quando esploreremo i problemi di regressione (diciamo, per prevedere il prezzo di una casa), vedremo come usare un'altra funzione obiettivo chiamata scarto quadratico medio.
#
# Adesso, configuriamo il modello per usare un ottimizzatore ed una funzione obiettivo:
# + id="Mr0GP-cQ-llN"
model.compile(optimizer='adam',
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
metrics=['accuracy'])
# + [markdown] id="35jv_fzP-llU"
# ## Addestriamo il modello
#
# Addestriamo il modello per 20 epoche in mini-lotti di 512 campioni l'uno. CioÚ 20 iterazioni su tutti i campioni nei tensori `x_train` e `y_train`. Durante l'addestramento, controlliamo l'obiettivo e l'accuratezza del modello sui 10.000 campioni dell'insieme di validazione:
# + id="tXSGrjWZ-llW"
history = model.fit(train_data.shuffle(10000).batch(512),
epochs=20,
validation_data=validation_data.batch(512),
verbose=1)
# + [markdown] id="9EEGuDVuzb5r"
# ## Valutiamo il modello
#
# E andiamo a vedere come si comporta il modello. Saranno restituiti due valori. Obietttivo (un numero che rappresenta il nostro errore, per cui valori piccoli sono migliori), e accuratezza.
# + id="zOMKywn4zReN"
results = model.evaluate(test_data.batch(512), verbose=2)
for name, value in zip(model.metrics_names, results):
print("%s: %.3f" % (name, value))
# + [markdown] id="z1iEXVTR0Z2t"
# Questo approccio abbastanza ingenuo raggiunge un'accuratezza di circa l'87%. Con approcci più avanzati, il modello potrebbe avvicinarsi al 95%.
# + [markdown] id="5KggXVeL-llZ"
# ## Ulteriori letture
#
# Per un metodo più generale di trattamento degli input stringa e per un'analisi più dettagliata del progresso di accuratezza e obiettivo durante l'addestramento, date un'occhiata [qui](https://www.tensorflow.org/tutorials/keras/basic_text_classification).
| site/it/tutorials/keras/text_classification_with_hub.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Py3-GPU (Python3.5.2)
# language: python
# name: py3-gpu
# ---
# +
import os
import json
import numpy as np
import pandas as pd
import scipy
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
from src.data_loader import Shifted_Data_Loader,upsample_dataset
from src.plot import orig_vs_transformed as plot_ovt
from src.plot import enc_dec_samples
# from src.models import GResNet,EDense,EResNet,EConvNet
from src.test_models.drduplex import DRDuplex
from src.config import get_config
from src.trainer import Trainer
from src.utils import prepare_dirs_and_logger
from keras.datasets import fashion_mnist,mnist
from keras.layers import Dense
from keras.models import Model
from keras.utils import to_categorical
from keras.optimizers import adadelta
# +
config,_ = get_config()
# Boilerplate
setattr(config, 'proj_root', '/home/elijahc/projects/vae')
setattr(config, 'log_dir', '/home/elijahc/projects/vae/logs')
setattr(config, 'dev_mode',True)
setattr(config, 'seed', 7)
setattr(config, 'project','vae')
setattr(config, 'ecc_max',4.8/8.0)
setattr(config, 'bg_noise',0.8)
setattr(config, 'contrast_level',0.8)
# setattr(config, 'rot_max',90.0/360.0)
setattr(config, 'rot_max',0)
# Training Params
setattr(config, 'batch_size', 512)
setattr(config, 'dataset', 'fashion_mnist')
setattr(config, 'epochs',1000)
setattr(config, 'monitor', None)
# setattr(config, 'lr', 10)
# setattr(config, 'min_delta', 0.25)
# setattr(config, 'monitor', 'val_loss')
setattr(config, 'optimizer', 'adam')
setattr(config, 'label_corruption',0.0)
# -
# Architecture Params
setattr(config, 'enc_blocks', [128,256,512])
setattr(config, 'enc_arch', 'dense')
setattr(config, 'dec_blocks', [4,2,1])
setattr(config, 'z_dim', 35)
setattr(config, 'y_dim', 35)
# +
if config.ecc_max == 0.:
translation_amt = None
else:
translation_amt = config.ecc_max
if config.rot_max == 0.:
rot_max = None
else:
rot_max = config.rot_max
if config.bg_noise == 0.:
bg_noise = None
else:
bg_noise = config.bg_noise
# Loss Weights
setattr(config, 'xcov', 0)
setattr(config, 'recon', 1)
setattr(config, 'xent', 15)
# setattr(config,'model_dir','/home/elijahc/projects/vae/models/2019-06-07/recon_{}_xent_{}/label_corruption_{}'.format(config.recon,config.xent,config.label_corruption))
setattr(config,'model_dir','/home/elijahc/projects/vae/models/2019-06-05/xent_{}_recon_{}_{}/bg_noise_{}'.format(config.xent,config.recon,config.enc_arch,config.bg_noise))
# -
# +
np.random.seed(7)
if not config.dev_mode:
print('setting up...')
prepare_dirs_and_logger(config)
vars(config)
# -
oversample_factor=2
DL = Shifted_Data_Loader(dataset=config.dataset,flatten=False,num_train=60000*oversample_factor,
translation=translation_amt,
rotation=rot_max,
# contrast_level=config.contrast_level,
# bg='natural',
# blend=None,
noise_mode='uniform',
noise_kws={
'amount':1,
'width':config.bg_noise,
},
bg_only=True,
)
pt,idx = plot_ovt(DL,cmap='gray')
# +
# plt.imshow(DL.fg_train[50].reshape(56,56),cmap='gray',vmin=0,vmax=1)
# -
DL.sx_test.shape
mod = DRDuplex(img_shape=(56,56,1),
num_classes=DL.num_classes,
recon=config.recon,
xent=config.xent,n_residual_blocks=4,
# kernel_regularization=1e-5,
)
mod.combined.summary()
DL.sx_test.shape
val_pct = 0.05
val_idxs = np.random.choice(np.arange(10000),int(val_pct*60000),replace=False)
validation_set = (DL.sx_test[val_idxs],
{'Classifier':DL.y_test_oh[val_idxs],
'Generator':DL.fg_test[val_idxs]}
)
mod.train(config.epochs,DL,config.batch_size,verbose=0,shuffle=True,
validation_data=validation_set,
)
hist_df = pd.DataFrame.from_records(mod.combined.history.history)
hist_df.head()
sns.set_context('paper')
metrics = ['loss','Generator_loss','Classifier_acc']
fig,axs = plt.subplots(nrows=len(metrics),sharex=True,figsize=(10,10))
for metric_name,ax in zip(metrics,axs):
sns.scatterplot(data=hist_df[[metric_name,'val_'+metric_name]],ax=ax)
# ax.set_xscale('log')
axs[2].hlines(y=(1.0/DL.num_classes),xmin=0,xmax=hist_df.index.values.max(),linestyles='dashed')
def enc_dec(model,DL):
rand_im = np.random.randint(0,DL.x_train.shape[0])
im = DL.sx_train[rand_im]
y_true = DL.y_train_oh[rand_im]
latent_rep = model.E.predict(im.reshape(1,56,56,1))
y_pred = model.Q.predict(im.reshape(1,56,56,1))
fig,axs = plt.subplots(2,2,figsize=(8,6))
y_pred_axs = axs[1]
y_pred_axs[0].imshow(y_true.reshape(1,-1))
y_pred_axs[1].imshow(y_pred.reshape(1,-1))
im_axs = axs[0]
im_axs[0].imshow(im.reshape(56,56),cmap='gray')
im_axs[0].set_title('Image; class: {}'.format(np.argmax(y_true)))
im_axs[1].set_title('Recon; class: {}'.format(np.argmax(y_pred)))
im_axs[1].imshow(model.G.predict(latent_rep).reshape(56,56),cmap='gray')
for ax in axs.ravel():
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.tight_layout()
enc_dec(mod,DL)
hist_df['generalization_error'] = hist_df.val_loss - hist_df.loss
hist_df['G_generalization_error'] = hist_df.val_Generator_loss - hist_df.Generator_loss
hist_df['class_generalization_error'] = hist_df.val_Classifier_loss - hist_df.Classifier_loss
sns.lineplot(data=hist_df[['class_generalization_error']])
# plt.yscale('log')
import datetime as dt
def clean_config(config,keys=['dev_mode','log_dir','log_level','proj_root']):
c = vars(config)
for k in keys:
if k in c.keys():
del c[k]
c['uploaded_by']='elijahc'
c['last_updated']= str(dt.datetime.now())
return c
run_meta = clean_config(config)
run_meta['project']='vae'
# run_meta['ecc_max']=0.8
run_meta
# +
trainer.save_model()
run_conf = clean_config(config)
with open(os.path.join(run_conf['model_dir'],'config.json'), 'w') as fp:
json.dump(run_conf, fp)
hist_df.to_parquet(os.path.join(run_conf['model_dir'],'train_history.parquet'))
# -
generator = mod.G
# +
z_encoder = Model(mod.combined.input,mod.E.z_lat)
y_encoder = Model(trainer.input,trainer.y_lat)
classifier = Model(trainer.input,trainer.y_class)
l3_encoder = Model(trainer.input,trainer.model.get_layer(name='dense_1').output)
l1_encoder = Model(trainer.input,trainer.model.get_layer(name='conv2d_1').output)
# l2_encoder = Model(trainer.input,trainer.model.get_layer(name='block_2_Add_2').output)
# l2_encoder = Model(trainer.input,trainer.model.get_layer(name='block_4_Add_1').output)
l2_encoder = Model(trainer.input,trainer.model.get_layer(name='conv2d_3').output)
# -
mod = trainer.model
# +
# mod.summary()
# -
def get_weight_grad(model, inputs, outputs):
""" Gets gradient of model for given inputs and outputs for all weights"""
grads = model.optimizer.get_gradients(model.total_loss, model.trainable_weights)
symb_inputs = (model._feed_inputs + model._feed_targets + model._feed_sample_weights)
f = K.function(symb_inputs, grads)
x, y, sample_weight = model._standardize_user_data(inputs, outputs)
output_grad = f(x + y + sample_weight)
return output_grad
classifier.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['acc'])
res = classifier.evaluate(DL.sx_test,DL.y_test_oh,batch_size=config.batch_size)
ts_error = 1-res[1]
print(res[1])
df = pd.DataFrame.from_records({'test_acc':[res[1]],
'label_corruption':[config.label_corruption],
'recon':[config.recon],
'xent':[config.xent],
'ecc_max':[config.ecc_max],
'xcov': [config.xcov]})
df.to_json(os.path.join(config.model_dir,'performance.json'))
out_s = l1_encoder.output_shape
type(out_s)
# +
l1_enc = l1_encoder.predict(DL.sx_test,batch_size=config.batch_size).reshape(10000,np.prod(l1_encoder.output_shape[1:]))
l2_enc = l2_encoder.predict(DL.sx_test,batch_size=config.batch_size).reshape(10000,np.prod(l2_encoder.output_shape[1:]))
l3_enc = l3_encoder.predict(DL.sx_test,batch_size=config.batch_size).reshape(10000,np.prod(l3_encoder.output_shape[1:]))
z_enc = z_encoder.predict(DL.sx_test,batch_size=config.batch_size)
# y_lat = y_lat_encoder.predict(DL.sx_test,batch_size=config.batch_size)
y_enc = y_encoder.predict(DL.sx_test,batch_size=config.batch_size)
# -
l1_enc.shape
import xarray
import hashlib
import random
def raw_to_xr(encodings,l_2_depth,stimulus_set):
obj_names = [
"T-shirt",
"Trouser",
"Pullover",
"Dress",
"Coat",
"Sandal",
"Dress Shirt",
"Sneaker",
"Bag",
"Ankle boot",
]
all_das = []
for layer,activations in encodings.items():
neuroid_n = activations.shape[1]
n_idx = pd.MultiIndex.from_arrays([
pd.Series(['{}_{}'.format(layer,i) for i in np.arange(neuroid_n)],name='neuroid_id'),
pd.Series([l_2_depth[layer]]*neuroid_n,name='layer'),
pd.Series([layer]*neuroid_n,name='region')
])
p_idx = pd.MultiIndex.from_arrays([
stimulus_set.image_id,
stimulus_set.dx,
stimulus_set.dy,
stimulus_set.rxy,
stimulus_set.numeric_label.astype('int8'),
pd.Series([obj_names[i] for i in stimulus_set.numeric_label],name='object_name'),
pd.Series(stimulus_set.dx.values/28, name='tx'),
pd.Series(stimulus_set.dy.values/28, name='ty'),
pd.Series([1.0]*len(stimulus_set),name='s'),
])
da = xarray.DataArray(activations.astype('float32'),
coords={'presentation':p_idx,'neuroid':n_idx},
dims=['presentation','neuroid'])
all_das.append(da)
return xarray.concat(all_das,dim='neuroid')
encodings = {
'pixel':DL.sx_test.reshape(10000,np.prod(DL.sx_test.shape[1:])),
'dense_1':l1_enc,
'dense_2':l2_enc,
'dense_3':l3_enc,
'y_lat':y_enc,
'z_lat':z_enc
}
depths = {
'pixel':0,
'dense_1':1,
'dense_2':2,
'dense_3':3,
'y_lat':4,
'z_lat':4
}
slug = [(dx,dy,float(lab),float(random.randrange(20))) for dx,dy,rxy,lab in zip(DL.dx[1],DL.dy[1],DL.dtheta[1],DL.y_test)]
image_id = [hashlib.md5(json.dumps(list(p),sort_keys=True).encode('utf-8')).digest().hex() for p in slug]
stim_set = pd.DataFrame({'dx':DL.dx[1]-14,'dy':DL.dy[1]-14,'numeric_label':DL.y_test,'rxy':DL.dtheta[1],'image_id':image_id})
out = raw_to_xr(encodings,depths,stim_set)
# +
out = raw_to_xr(encodings,depths,stim_set)
from collections import OrderedDict
def save_assembly(da,run_dir,fname,**kwargs):
da = da.reset_index(da.coords.dims)
da.attrs = OrderedDict()
with open(os.path.join(run_dir,fname), 'wb') as fp:
da.to_netcdf(fp,**kwargs)
save_assembly(out,run_dir=config.model_dir,fname='dataset.nc',
format='NETCDF3_64BIT',
# engine=
# encoding=enc,
)
# +
# z_enc_tr = z_encoder.predict(DL.sx_train,batch_size=config.batch_size)
# y_lat = y_lat_encoder.predict(DL.sx_test,batch_size=config.batch_size)
# y_enc_tr = y_encoder.predict(DL.sx_train,batch_size=config.batch_size)
# -
np.save(os.path.join(config.model_dir,'z_enc'),z_enc)
np.save(os.path.join(config.model_dir,'l1_enc'),l1_enc)
np.save(os.path.join(config.model_dir,'l2_enc'),l2_enc)
np.save(os.path.join(config.model_dir,'y_enc'),y_enc)
y_enc.shape
_lat_vec = np.concatenate([y_enc,z_enc],axis=1)
_lat_vec.shape
z_enc_mu = np.mean(z_enc,axis=0)
z_enc_cov = np.cov(z_enc,rowvar=False)
np.random.multivariate_normal(z_enc_mu,z_enc_cov,size=50).shape
regen = generator.predict(_lat_vec,batch_size=config.batch_size)
rand_im = np.random.randint(0,10000)
plt.imshow(regen[rand_im].reshape(56,56),cmap='gray')
_lat_vec[rand_im]
# +
# enc_dec_samples(DL.x_train,DL.sx_train,z_enc_tr,y_enc_tr,generator)
# -
enc_dec_samples(DL.x_test,DL.sx_test,z_enc,y_enc,generator)
z_enc2 = z_encoder.predict(DL2.sx_test,batch_size=config.batch_size)
y_lat2 = y_encoder.predict(DL2.sx_test,batch_size=config.batch_size)
_lat_vec2 = np.concatenate([y_lat2,z_enc2],axis=1)
regen2 = generator.predict(_lat_vec2,batch_size=config.batch_size)
from src.plot import remove_axes,remove_labels
from src.utils import gen_trajectory
# +
examples = 5
rand_im = np.random.randint(0,10000,size=examples)
fix,axs = plt.subplots(examples,11,figsize=(8,4))
_lat_s = []
regen_s = []
out = gen_trajectory(z_enc[rand_im],z_enc2[rand_im],delta=.25)
out_y = gen_trajectory(y_enc[rand_im],y_lat2[rand_im],delta=.25)
for z,y in zip(out,out_y):
_lat = np.concatenate([y,z],axis=1)
_lat_s.append(_lat)
regen_s.append(generator.predict(_lat,batch_size=config.batch_size))
i=0
for axr,idx in zip(axs,rand_im):
axr[0].imshow(DL.x_test[idx].reshape(28,28),cmap='gray')
axr[1].imshow(DL.sx_test[idx].reshape(56,56),cmap='gray')
axr[2].imshow(regen[idx].reshape(56,56),cmap='gray')
for j,a in enumerate(axr[3:-3]):
a.imshow(regen_s[j][i,:].reshape(56,56),cmap='gray')
# a.imshow(s.reshape(56,56),cmap='gray')
axr[-3].imshow(regen2[idx].reshape(56,56),cmap='gray')
axr[-2].imshow(DL2.sx_test[idx].reshape(56,56),cmap='gray')
axr[-1].imshow(DL2.x_test[idx].reshape(28,28),cmap='gray')
for a in axr:
remove_axes(a)
remove_labels(a)
i+=1
# plt.imshow(regen[rand_im].reshape(56,56),cmap='gray')
# +
# fix.savefig('../../updates/2019-02-05/assets/img/translocate_{}.png'.format(translation_amt))
# -
fdjsakl;fdsa
# +
from collections import Counter
import dit
from dit import Distribution
def mutual_information(X,Y):
XY_c = Counter(zip(X,Y))
XY_pmf = {k:v/float(sum(XY_c.values())) for k,v in XY_c.items()}
XY_jdist = Distribution(XY_pmf)
return dit.shannon.mutual_information(XY_jdist,[0],[1])
| notebooks/06.1-MNISTM-Dense-Res.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: py39
# language: python
# name: py39
# ---
# # Notebook to Start Pulling it All Together
# ## Salinity top 5 m
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
import numpy as np
import pandas as pd
import statsmodels.api as sm
foramt = "{:.2}"
myformat = {'bias': foramt, 'rmse': foramt, 'swillmott': foramt, 'slopedev': foramt, 'const': foramt,
'systematic': foramt, 'nonsystematic':foramt,
'spread': foramt}
def bias(df, obs, mod):
diffy = df[mod] - df[obs]
return diffy.count(), diffy.mean()
def rmse(df, obs, mod):
return (np.sqrt(((df[mod] - df[obs])**2).mean()))
def swillmott(df, obs, mod):
meanobs = df[obs].mean()
return (((df[mod] - df[obs])**2).sum()
/(( (df[mod] - meanobs).abs() + (df[obs] - meanobs).abs() )**2).sum())
def slope_inter(df, obs, mod):
X = df[obs]
y = df[mod]
X2 = X**2
X = np.column_stack((X, X2))
X = sm.add_constant(X)
# Fit and make the predictions by the model
model = sm.OLS(y, X, missing='drop').fit()
predictions = model.predict(X)
nonsyst = np.sqrt(((y - predictions)**2).mean())
systematic = np.sqrt(((predictions - df[obs])**2).mean())
return model.params['x2'], model.params['x1'], model.params['const'], systematic, nonsyst
def spread(df, obs, mod):
return 1 - ((df[mod] - df[mod].mean())**2).mean() / ((df[obs] - df[obs].mean())**2).mean()
def read_pieces(pieces):
temp1 = pd.read_csv(pieces[0])
if 'ferry' in pieces[0]:
temp1['k'] = 1.5
for piece in pieces[1:]:
nextpiece = pd.read_csv(piece)
if 'ferry' in piece:
nextpiece['k'] = 1.5
temp1 = pd.concat([temp1, nextpiece], ignore_index=True)
return temp1
def plot_and_stats(temp1, name):
fig, ax = plt.subplots(1, 1, figsize=(6, 5))
vmax = 34
vmin = 0
condition = temp1.k < 5
title = 'Salinity top 5 m'
counts, xedges, yedges, color = ax.hist2d(temp1.SA[condition], temp1.mod_vosaline[condition],
bins=np.arange(vmin, vmax, 0.5), norm=LogNorm());
fig.colorbar(color)
number, tbias = bias(temp1[condition], 'SA', 'mod_vosaline')
trmse = rmse(temp1[condition], 'SA', 'mod_vosaline')
tswillmott = swillmott(temp1[condition], 'SA', 'mod_vosaline')
m2, m, c, syst, nonsyst = slope_inter(temp1[condition], 'SA', 'mod_vosaline')
tspread = spread(temp1[condition], 'SA', 'mod_vosaline')
ax.plot([vmin, vmax], [vmin, vmax], 'w-');
xr = np.arange(vmin, vmax, 0.5)
ax.plot(xr, c + m * xr + m2 * xr**2 , 'r-');
ax.arrow(3, 27, 0, 1-tbias/2, head_width=0.5, head_length=0.2, length_includes_head=True)
ax.arrow(3, 29, 0, -1+tbias/2, head_width=0.5, head_length=0.2, length_includes_head=True)
ax.arrow(4, 27, 0, 1-syst/2, head_width=0.5, head_length=0.2, length_includes_head=True)
ax.arrow(4, 29, 0, -1+syst/2, head_width=0.5, head_length=0.2, length_includes_head=True)
ax.arrow(5, 27, 0, 1-nonsyst/2, head_width=0.5, head_length=0.2, length_includes_head=True)
ax.arrow(5, 29, 0, -1+nonsyst/2, head_width=0.5, head_length=0.2, length_includes_head=True);
Cp2 = {'number': number,
'bias': tbias,
'rmse': trmse,
'swillmott': tswillmott,
'slopedev': 1-m,
'const': c,
'systematic': syst,
'nonsystematic': nonsyst,
'spread': tspread}
ax.text(3-0.2, 22, 'bias', rotation=90)
ax.text(4-0.2, 21, 'systematic', rotation=90)
ax.text(5-0.2, 20, 'non-systematic', rotation=90)
ax.set_title(f'{name}, {title}');
dCp2 = pd.DataFrame(data=Cp2, index=[name])
return dCp2
pieces = ('/home/sallen/202007/H201812/ObsModel_H201812_ctd_20150101_20151231.csv',
'/home/sallen/202007/H201812/ObsModel_H201812_ctd_20160101_20161231.csv',
'/home/sallen/202007/H201812/ObsModel_H201812_ctd_20170101_20171231.csv',
'/home/sallen/202007/H201812/ObsModel_H201812_psfts_20150101_20151231.csv',
'/home/sallen/202007/H201812/ObsModel_H201812_psfts_20160101_20161231.csv',
'/home/sallen/202007/H201812/ObsModel_H201812_psfts_20170101_20171231.csv',
'/home/sallen/202007/H201812/ObsModel_H201812_pugts_20150101_20151231.csv',
'/home/sallen/202007/H201812/ObsModel_H201812_pugts_20160101_20161231.csv',
'/home/sallen/202007/H201812/ObsModel_H201812_pugts_20170101_20171231.csv',
)
temp1 = read_pieces(pieces)
#temp1['SA'] = temp1.SA.fillna(value=temp1['salinity (g/kg)'])
#temp1['SA'] = temp1['salinity (g/kg)']
temp1.drop(columns='Unnamed: 0', inplace=True)
temp1.drop_duplicates(inplace=True)
d201812 = plot_and_stats(temp1, 'H201812')
d201812.style.format(myformat)
pieces = ('/home/sallen/202007/H201905/ObsModel_H201905_ctd_20150101_20151231.csv',
'/home/sallen/202007/H201905/ObsModel_H201905_ctd_20160101_20161231.csv',
'/home/sallen/202007/H201905/ObsModel_H201905_ctd_20170101_20171231.csv',
'/home/sallen/202007/H201905/ObsModel_H201905_psfts_20150101_20151231.csv',
'/home/sallen/202007/H201905/ObsModel_H201905_psfts_20160101_20161231.csv',
'/home/sallen/202007/H201905/ObsModel_H201905_psfts_20170101_20171231.csv',
'/home/sallen/202007/H201905/ObsModel_H201905_pugts_20150101_20151231.csv',
'/home/sallen/202007/H201905/ObsModel_H201905_pugts_20160101_20161231.csv',
'/home/sallen/202007/H201905/ObsModel_H201905_pugts_20170101_20171231.csv',
)
temp1 = read_pieces(pieces)
#temp1['SA'] = temp1.SA.fillna(value=temp1['salinity (g/kg)'])
#temp1['SA'] = temp1['salinity (g/kg)']
temp1.drop(columns='Unnamed: 0', inplace=True)
temp1.drop_duplicates(inplace=True)
d201905 = plot_and_stats(temp1, 'H201905')
d201905.style.format(myformat)
pieces = ('/home/sallen/202007/202007C-p2/ObsModel_202007Cp2_ctd_20150101_20151231.csv',
'/home/sallen/202007/202007C-p2/ObsModel_202007Cp2_TSPUG_20150101-20151231.csv',
'/home/sallen/202007/202007C-p2/ObsModel_202007Cp2_ctd_20160101_20161231.csv',
'/home/sallen/202007/202007C-p2/ObsModel_202007Cp2_TSPUG_20160101-20161231.csv',
'/home/sallen/202007/202007C-p2/ObsModel_202007Cp2_TS_20170101-20170331.csv',
'/home/sallen/202007/202007C-p2/ObsModel_202007Cp2_TS_20170401-20170630.csv',
'/home/sallen/202007/202007C-p2/ObsModel_202007Cp2_TS_20170701-20170930.csv',
'/home/sallen/202007/202007C-p2/ObsModel_202007Cp2_TS_20171001-20171231.csv',
'/home/sallen/202007/202007C-p2/ObsModel_202007Cp2_TSPUG_20170101-20171231.csv',
'/home/sallen/202007/202007C-p2/ObsModel_202007Cp2_psfts_20150101_20151231.csv',
'/home/sallen/202007/202007C-p2/ObsModel_202007Cp2_psfts_20160101_20161231.csv',
'/home/sallen/202007/202007C-p2/ObsModel_202007Cp2_psfts_20170101_20171231.csv'
)
temp1 = read_pieces(pieces)
#temp1['SA'] = temp1.SA.fillna(value=temp1['salinity (g/kg)'])
#temp1['SA'] = temp1['salinity (g/kg)']
temp1.drop(columns='Unnamed: 0', inplace=True)
temp1.drop_duplicates(inplace=True)
dCp2 = plot_and_stats(temp1, 'Cp2')
dCp2.style.format(myformat)
pieces = ('/home/sallen/202007/202007D-again/ObsModel_202007D-again_TS_20150101-20150331.csv',
'/home/sallen/202007/202007D-again/ObsModel_202007D-again_TS_20150401-20150630.csv',
'/home/sallen/202007/202007D-again/ObsModel_202007D-again_TS_20150701-20150930.csv',
'/home/sallen/202007/202007D-again/ObsModel_202007D-again_TS_20151001-20151231.csv',
'/home/sallen/202007/202007D-again/ObsModel_202007D-again_psfts_20150101_20151231.csv',
'/home/sallen/202007/202007D-again/ObsModel_202007D-again_pugts_20150101_20151231.csv',
'/home/sallen/202007/202007D-again/ObsModel_202007D-again_ctd_20160101_20161231.csv',
'/home/sallen/202007/202007D-again/ObsModel_202007D-again_psfts_20160101_20161231.csv',
'/home/sallen/202007/202007D-again/ObsModel_202007D-again_pugts_20160101_20161231.csv',
'/home/sallen/202007/202007D-again/ObsModel_202007D-again_ctd_20170101_20171231.csv',
'/home/sallen/202007/202007D-again/ObsModel_202007D-again_psfts_20170101_20171231.csv',
'/home/sallen/202007/202007D-again/ObsModel_202007D-again_pugts_20170101_20171231.csv',
)
temp1 = read_pieces(pieces)
#temp1['SA'] = temp1.SA.fillna(value=temp1['salinity (g/kg)'])
#temp1['SA'] = temp1['salinity (g/kg)']
temp1.drop(columns='Unnamed: 0', inplace=True)
temp1.drop_duplicates(inplace=True)
Dagain = plot_and_stats(temp1, 'Dagain')
Dagain.style.format(myformat)
pieces = ('/home/sallen/202007/202007D-nociliates/ObsModel_202007D-nociliates_ctd_20150101_20151231.csv',
'/home/sallen/202007/202007D-nociliates/ObsModel_202007D-nociliates_ctd_20160101_20161231.csv',
'/home/sallen/202007/202007D-nociliates/ObsModel_202007D-nociliates_ctd_20170101_20171231.csv',
'/home/sallen/202007/202007D-nociliates/ObsModel_202007D-nociliates_psfts_20150101_20151231.csv',
'/home/sallen/202007/202007D-nociliates/ObsModel_202007D-nociliates_psfts_20160101_20161231.csv',
'/home/sallen/202007/202007D-nociliates/ObsModel_202007D-nociliates_psfts_20170101_20171231.csv',
'/home/sallen/202007/202007D-nociliates/ObsModel_202007D-nociliates_pugts_20150101_20151231.csv',
'/home/sallen/202007/202007D-nociliates/ObsModel_202007D-nociliates_pugts_20160101_20161231.csv',
'/home/sallen/202007/202007D-nociliates/ObsModel_202007D-nociliates_pugts_20170101_20171231.csv',
)
temp1 = read_pieces(pieces)
#temp1['SA'] = temp1.SA.fillna(value=temp1['salinity (g/kg)'])
#temp1['SA'] = temp1['salinity (g/kg)']
temp1.drop(columns='Unnamed: 0', inplace=True)
temp1.drop_duplicates(inplace=True)
DnoC = plot_and_stats(temp1, 'D-noCil')
DnoC.style.format(myformat)
pieces = ('/home/sallen/202007/202007D-lowR/ObsModel_202007D-lowR_ctd_20150101_20151231.csv',
'/home/sallen/202007/202007D-lowR/ObsModel_202007D-lowR_psfts_20150101_20151231.csv',
'/home/sallen/202007/202007D-lowR/ObsModel_202007D-lowR_pugts_20150101_20151231.csv',
'/home/sallen/202007/202007D-lowR/ObsModel_202007D-lowR_ctd_20160101_20161231.csv',
'/home/sallen/202007/202007D-lowR/ObsModel_202007D-lowR_psfts_20160101_20161231.csv',
'/home/sallen/202007/202007D-lowR/ObsModel_202007D-lowR_pugts_20160101_20161231.csv',
'/home/sallen/202007/202007D-lowR/ObsModel_202007D-lowR_ctd_20170101_20171231.csv',
'/home/sallen/202007/202007D-lowR/ObsModel_202007D-lowR_psfts_20170101_20171231.csv',
'/home/sallen/202007/202007D-lowR/ObsModel_202007D-lowR_pugts_20170101_20171231.csv',
)
temp1 = read_pieces(pieces)
#temp1['SA'] = temp1.SA.fillna(value=temp1['salinity (g/kg)'])
#temp1['SA'] = temp1['salinity (g/kg)']
temp1.drop(columns='Unnamed: 0', inplace=True)
temp1.drop_duplicates(inplace=True)
DlowR = plot_and_stats(temp1, 'D-lowR')
DlowR.style.format(myformat)
pieces = ('/home//sallen/202007/202007C-p3/ObsModel_202007Cp3_ctd_20150101_20151231.csv',
'/home//sallen/202007/202007C-p3/ObsModel_202007Cp3_ctd_20160101_20161231.csv',
'/home//sallen/202007/202007C-p3/ObsModel_202007Cp3_ctd_20170101_20171231.csv',
'/home//sallen/202007/202007C-p3/ObsModel_202007Cp3_TSPUG_20150101-20151231.csv',
'/home//sallen/202007/202007C-p3/ObsModel_202007Cp3_pugts_20160101_20161231.csv',
'/home//sallen/202007/202007C-p3/ObsModel_202007Cp3_pugts_20170101_20171231.csv',
'/home//sallen/202007/202007C-p3/ObsModel_202007Cp3_psfts_20150101_20151231.csv',
'/home//sallen/202007/202007C-p3/ObsModel_202007Cp3_psfts_20160101_20161231.csv',
'/home//sallen/202007/202007C-p3/ObsModel_202007Cp3_psfts_20170101_20171231.csv',
)
temp1 = read_pieces(pieces)
temp1.drop(columns='Unnamed: 0', inplace=True)
temp1.drop_duplicates(inplace=True)
Cp3 = plot_and_stats(temp1, 'Cp3')
Cp3.style.format(myformat)
pieces = ('/home/sallen/202007/202007F/ObsModel_202007F_ctd_20150101_20151231.csv',
'/home/sallen/202007/202007F/ObsModel_202007F_psfts_20150101_20151231.csv',
'/home/sallen/202007/202007F/ObsModel_202007F_pugts_20150101_20151231.csv',
'/home/sallen/202007/202007F/ObsModel_202007F_ctd_20160101_20161231.csv',
'/home/sallen/202007/202007F/ObsModel_202007F_psfts_20160101_20161231.csv',
'/home/sallen/202007/202007F/ObsModel_202007F_pugts_20160101_20161231.csv',
'/home/sallen/202007/202007F/ObsModel_202007F_ctd_20170101_20171231.csv',
'/home/sallen/202007/202007F/ObsModel_202007F_psfts_20170101_20171231.csv',
'/home/sallen/202007/202007F/ObsModel_202007F_pugts_20170101_20171231.csv',
)
temp1 = read_pieces(pieces)
#temp1['SA'] = temp1.SA.fillna(value=temp1['salinity (g/kg)'])
#temp1['SA'] = temp1['salinity (g/kg)']
temp1.drop(columns='Unnamed: 0', inplace=True)
temp1.drop_duplicates(inplace=True)
modF = plot_and_stats(temp1, 'F')
modF.style.format(myformat)
pieces = ('/home/sallen/202007/202007G-p1/ObsModel_202007Gp1_ctd_20150101_20150331.csv',
'/home/sallen/202007/202007G-p1/ObsModel_202007Gp1_ctd_20150401_20150630.csv',
'/home/sallen/202007/202007G-p1/ObsModel_202007Gp1_ctd_20150701_20150930.csv',
'/home/sallen/202007/202007G-p1/ObsModel_202007Gp1_ctd_20151001_20151231.csv',
'/home/sallen/202007/202007G-p1/ObsModel_202007Gp1_psfts_20150101_20150331.csv',
'/home/sallen/202007/202007G-p1/ObsModel_202007Gp1_psfts_20150401_20150630.csv',
'/home/sallen/202007/202007G-p1/ObsModel_202007Gp1_psfts_20150701_20150930.csv',
'/home/sallen/202007/202007G-p1/ObsModel_202007Gp1_psfts_20151001_20151231.csv',
'/home/sallen/202007/202007G-p1/ObsModel_202007Gp1_pugts_20150101_20150331.csv',
'/home/sallen/202007/202007G-p1/ObsModel_202007Gp1_pugts_20150401_20150630.csv',
'/home/sallen/202007/202007G-p1/ObsModel_202007Gp1_pugts_20150701_20150930.csv',
'/home/sallen/202007/202007G-p1/ObsModel_202007Gp1_pugts_20151001_20151231.csv',
'/home/sallen/202007/202007G-p1/ObsModel_202007Gp1_ctd_20160101_20161231.csv',
'/home/sallen/202007/202007G-p1/ObsModel_202007Gp1_psfts_20160101_20161231.csv',
'/home/sallen/202007/202007G-p1/ObsModel_202007Gp1_pugts_20160101_20161231.csv',
'/home/sallen/202007/202007G-p1/ObsModel_202007Gp1_ctd_20170101_20171231.csv',
'/home/sallen/202007/202007G-p1/ObsModel_202007Gp1_psfts_20170101_20171231.csv',
'/home/sallen/202007/202007G-p1/ObsModel_202007Gp1_pugts_20170101_20171231.csv',
)
temp1 = read_pieces(pieces)
#temp1['SA'] = temp1.SA.fillna(value=temp1['salinity (g/kg)'])
#temp1['SA'] = temp1['salinity (g/kg)']
temp1.drop(columns='Unnamed: 0', inplace=True)
temp1.drop_duplicates(inplace=True)
modGp1 = plot_and_stats(temp1, 'Gp1')
modGp1.style.format(myformat)
def highlight_max_min(s):
'''
highlight the maximum in a Series yellow.
'''
is_max = abs(s) == abs(s).max()
is_min = abs(s) == abs(s).min()
color = []
for v, v2 in zip(is_max, is_min):
if v:
color.append('red')
elif v2:
color.append('darkgreen')
else:
color.append('black')
return ['color: %s' % color[i] for i in range(len(is_max))]
alltogether = pd.concat([d201905, dCp2, Dagain, DnoC, DlowR, Cp3, modF, modGp1], axis=0)
foramt = "{:.2}"
alltogether.style.format(myformat).apply(highlight_max_min)
# Without Ferry data, new rivers are a definite improvement.
# Systematic error and bias decreased with new bathymetry.
| notebooks/PhysTuning/SurfaceSalinity-NoFerry.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Stock Price Movement Prediction Using The Deutsche Börse Public Dataset & Machine Learning - Notebook 3 (Applying A Neural Network)
#
# Here we apply the neural network approach suggested in Notebook 1 to the data set product in Notebook 2.
# ## Important: 20 minute by 20 minute predictions
#
# We adapt the approaches of notebook `03-stock-price-prediction-machine-learning.ipynb` and
# notebook `supporting/simple-linear-model.ipynb` to work on 20 minutes by 20 minute basis rather than on a minute by minute.
# +
import pandas as pd
import numpy as np
import glob, os
from datetime import datetime
from sklearn.preprocessing import MinMaxScaler, StandardScaler, RobustScaler
from pandas import DataFrame
from pandas import concat
import matplotlib as mpl
import matplotlib.pyplot as plt
# %matplotlib inline
mpl.rcParams['figure.figsize'] = (15, 10) # use bigger graphs
from math import sqrt
from numpy import concatenate
from matplotlib import pyplot
from pandas import read_csv
from pandas import DataFrame
from pandas import concat
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import mean_squared_error
from keras.models import Sequential
from keras.layers import Dense, Flatten, Reshape, Conv1D, MaxPooling1D, BatchNormalization, LeakyReLU
from keras.layers import LSTM
from keras import regularizers
import datetime
# -
# We read the file we processed in the second notebook
input_file = '/data/processed/cooked_v3.pkl'
df = pd.read_pickle(input_file)
# Checking what stocks are available in the file
all_mnemonics = df.Mnemonic.unique()
df.Mnemonic.unique(), df.Mnemonic.unique().shape[0]
# Next we will select the most liquid stocks from this set.
# +
df['Liquidity'] = df['TradedVolume']*df['EndPrice']
tmp = df.groupby('Mnemonic')['Liquidity'].sum().to_frame()
tmp['LiquidityNormalized'] = 100.0*tmp['Liquidity']/(tmp['Liquidity'].sum())
df = df.drop(columns=['Liquidity'])
tmp.sort_values('LiquidityNormalized', ascending=False).head()
# -
most_liquid_stocks = list(tmp.sort_values('LiquidityNormalized', ascending=False).index[0:50])
print("50 Most liquid stocks:", ", ".join(most_liquid_stocks))
# Check what dates are available in the file. We will split the original set
# into three parts, train, valid, test based on the dates.
# If the dates are ordered chronologically, we take the first dates for the test set,
# then we take the next dates for the validation set and finally we take what is
# left for the test set
def date_part(dt):
return str(dt).split(' ')[0]
unique_days = sorted(list(set(map(date_part , list(df.index.unique())))))
len(unique_days), unique_days[0:2], unique_days[-3:-1]
# +
percent_train = 60.0
percent_valid = 5.0
percent_test = 100.0 - percent_train - percent_valid
offset_train = int(len(unique_days)*percent_train/100.0)
offset_test = offset_train + int(len(unique_days)*percent_valid/100.0)
train_valid_days = list(set(unique_days[0:offset_test]))
np.random.seed(484811945)
np.random.shuffle(train_valid_days)
train_days = train_valid_days[0:offset_train]
valid_days = train_valid_days[offset_train:]
test_days = set(unique_days[offset_test:])
'train', len(train_days), 'valid', len(valid_days), 'test', len(test_days)
# -
df['CalcDateTime'] = df.index
df['Date'] = df['CalcDateTime'].dt.strftime("%Y-%m-%d")
df_train = df[df.Date.isin(list(train_days))]
df_valid = df[df.Date.isin(list(valid_days))]
df_test = df[df.Date.isin(list(test_days))]
# We've prepared the train, test and valid sets. Make sure the days do not overlap
df_train[['CalcDateTime']].describe()
df_valid[['CalcDateTime']].describe()
df_test[['CalcDateTime']].describe()
# In the class below we create features from the raw features
# +
enable_non_linear_features = True
def closer_to_with_normalization(pnt, a, b, norm):
"""
Returns the "directed" and normalized distance to the closer.
@pnt: pnt which is compared to other two points, called a and b here
@a: point a
@b: point b
@norm: normalization constant
"""
return (np.absolute(pnt - a) - np.absolute(pnt - b))/norm
def resample_single_stock(single_stock, interval):
df = pd.DataFrame({
'MaxPrice': single_stock['MaxPrice'].resample(interval).max(),
'MinPrice': single_stock['MinPrice'].resample(interval).min(),
'LastEndPrice': single_stock['EndPrice'].resample(interval).last(),
'FirstStartPrice': single_stock['StartPrice'].resample(interval).first(),
'MeanEndPrice': single_stock['EndPrice'].resample(interval).mean(),
'HasTrade': single_stock['HasTrade'].resample(interval).max(),
'Mnemonic': single_stock['Mnemonic'].resample(interval).last(),
'Date': single_stock['Date'].resample(interval).last(),
})
# Warning: this works because we had forward filled the prices
df = df[df['HasTrade'] == 1.0]
return df
def rev_pct_change(a, t):
one_step_in_past = a
t_steps_in_past = a.shift(t).ffill()
return ((one_step_in_past - t_steps_in_past)/one_step_in_past).fillna(0.0)
def add_non_linear_features(main, resampled, interval):
main['tmp:SignDirection@' + interval] = np.sign(main['x:Direction@' + interval])
main['tmp:D1@' + interval] = np.where(
(main['tmp:SignDirection@' + interval] == 1.0) &
(main['tmp:SignDirection@' + interval].shift(1) == 1.0), 1.0, 0.0)
main['tmp:D2@' + interval] = np.where(
(main['tmp:SignDirection@' + interval] == -1.0) &
(main['tmp:SignDirection@' + interval].shift(1) == -1.0), -1.0, 0.0)
main['x:D@' + interval] = main['tmp:D1@' + interval] + main['tmp:D2@' + interval]
main['x:SignDirection-2@' + interval] = np.sign(
(resampled['LastEndPrice'] - resampled['FirstStartPrice'].shift(2).ffill()
)).fillna(0.0)
main['tmp:D1@' + interval] = np.where(
(main['tmp:SignDirection@' + interval] == 1.0) &
(main['tmp:SignDirection@' + interval].shift(1) == -1.0), main['x:SignDirection-2@' + interval], 0.0)
main['tmp:D2@' + interval] = np.where(
(main['tmp:SignDirection@' + interval] == -1.0) &
(main['tmp:SignDirection@' + interval].shift(1) == 1.0), main['x:SignDirection-2@' + interval], 0.0)
main['x:Da@' + interval] = main['tmp:D1@' + interval] + main['tmp:D2@' + interval]
main = main.drop(columns=[
'tmp:SignDirection@' + interval,
'tmp:D1@' + interval,
'tmp:D2@' + interval
])
return main
def prepare_single_stock_multi_intervals(single_stock, predicted_price, main_interval, intervals):
main = resample_single_stock(single_stock, main_interval)
# we use the same anchor
anchor = main['MeanEndPrice']
future_mean_price = main[predicted_price].shift(-1)
main['y(Return)'] = (future_mean_price - anchor)/anchor
# do not normalize
main['pseudo_y(SignReturn)'] = np.sign(main['y(Return)'])
# actual return won't be normalized
main['pseudo_y(pctChange)'] = (future_mean_price - anchor)/anchor
# baseline will be normalized
main['baseline'] = main['pseudo_y(pctChange)'].shift(1).fillna(0.0)
all_intervals = [main_interval] + intervals
for interval in all_intervals:
sub = resample_single_stock(single_stock, interval)
resampled = sub.resample(main_interval).last()
main['x:Direction@' + interval] = \
2.0*(resampled['LastEndPrice'] - resampled['FirstStartPrice'])/ \
anchor
if enable_non_linear_features:
main = add_non_linear_features(main, resampled, interval)
main['x:H1@' + interval] = - closer_to_with_normalization(
resampled['LastEndPrice'],
resampled['MaxPrice'],
resampled['MinPrice'],
anchor)
main['x:EndToMean@' + interval] = (resampled['LastEndPrice'] - resampled['MeanEndPrice'])/anchor
main['x:AdjustedPctChange@' + interval] = (resampled['LastEndPrice'] - resampled['MeanEndPrice'])/resampled['MeanEndPrice']
main['x:RevPctChange@' + interval] = rev_pct_change(resampled['LastEndPrice'], 1)
main = main[main['HasTrade'] == 1.0]
meta = main[['MeanEndPrice', 'HasTrade', 'LastEndPrice']]
main = main.drop(columns = [
'MaxPrice',
'MinPrice',
'LastEndPrice',
'FirstStartPrice',
'MeanEndPrice',
'HasTrade'
])
return main, meta
# -
class NARemover:
def __init__(self, name):
self.name = name
def transform(self, single_stock):
before = single_stock.shape[0]
single_stock = single_stock.dropna()
after = single_stock.shape[0]
print("{}: Dropped {:2.2f} % of records due to NA".format(self.name, 100.0*(before - after)/(0.0001 + before)))
return single_stock
# The variable we use for predictions start with `x(`, while the variables that should be predicted start with `y(`
# +
dummy = df_train[df_train.Mnemonic == 'BMW'].copy()
dummy = dummy[dummy.HasTrade == 1.0]
#main_interval_dummy, intervals_dummy = '20Min', ['5Min', '10Min', '15Min']
main_interval_dummy, intervals_dummy = '20Min', ['5Min', '10Min']
dummy, meta = prepare_single_stock_multi_intervals(dummy, 'MeanEndPrice', main_interval_dummy, intervals_dummy)
dummy.head()
# +
class TrainingSet:
def __init__(self, X, y, orig_df):
self.X = X
self.y = y
self.baseline = orig_df['baseline'].values
self.original_df = orig_df
class TrainingSetBuilder:
def transform(self, single_stock):
x_features = filter(lambda name: name.startswith('x(') or name.startswith('x:'), list(single_stock.dtypes.index))
X = single_stock[x_features].values
y = single_stock[['pseudo_y(SignReturn)']].values
return TrainingSet(X, y, single_stock)
# +
class Predictions:
def __init__(self, predictions, training_set):
self.predictions = predictions
self.training_set = training_set
def evaluate(self):
single_feature = 'baseline'
stats_df = pd.DataFrame({
'predictions': self.predictions[:,0],
'single_feature_pred': self.training_set.original_df[single_feature].values,
'pseudo_y(SignReturn)': self.training_set.y[:,0],
'pseudo_y(pctChange)': self.training_set.original_df['pseudo_y(pctChange)'].values,
'y(Return)': self.training_set.original_df['y(Return)'].values})
corr = stats_df. \
corr()[['predictions', 'single_feature_pred']]. \
iloc[1:]
pred_signs = np.sign(stats_df['predictions'])
y_signs = np.sign(stats_df['y(Return)'])
has_answer = np.absolute(pred_signs * y_signs).sum()
correct = np.where(pred_signs * y_signs == 1.0, 1.0, 0.0).sum()
thresholds = []
accuracy = []
correct_lst = []
errors = []
percent_has_answer = []
abs_has_answer = []
achieved_returns = []
preds = stats_df['predictions']
for d in range(5, 46, 5):
low = np.percentile(preds, d)
high = np.percentile(preds, 100 - d)
thresholded = np.where(preds > high, 1.0, np.where(preds < low, -1.0, 0.0))
c = np.where(np.sign(thresholded)*np.sign(y_signs) == 1.0, 1.0, 0.0).sum()
e = np.where(np.sign(thresholded)*np.sign(y_signs) == -1.0, 1.0, 0.0).sum()
achieved_ret = (stats_df['pseudo_y(pctChange)']*thresholded).sum()
correct_lst.append(c)
errors.append(e)
accuracy.append(c/(c + e))
percent_has_answer.append(100.0*(c + e)/pred_signs.shape[0])
abs_has_answer.append((c + e))
achieved_returns.append(achieved_ret)
thresholds.append(d)
at_cutoff = DataFrame({
'thresholds': thresholds,
'accuracy': accuracy,
'percent_with_answer': percent_has_answer,
'absolute_has_answer': abs_has_answer,
'achieved_returns': achieved_returns,
'correct': correct_lst,
'errors': errors
})
at_cutoff['achieved_norm_returns'] = at_cutoff['achieved_returns']/at_cutoff['absolute_has_answer']
ret = stats_df['pseudo_y(pctChange)']
rand_feature = np.where(np.random.rand(ret.shape[0]) > 0.5, 1.0, -1.0)
random_returns = (ret * rand_feature).sum()
always_up_returns = (ret*1.0).sum()
always_down_returns = (ret*-1.0).sum()
omnicient_returns = (np.absolute(ret)).sum()
achieved = (ret * pred_signs).sum()
return {
'corr': corr,
'accuracy_at_cutoff': at_cutoff,
'matches': {
'percent_correct': 100*correct/has_answer,
'percent_has_answer': has_answer/pred_signs.shape[0],
'absolute_with_answer': has_answer,
'size': pred_signs.shape[0]
},
'strategies': {
'omniscient': omnicient_returns,
'random': random_returns,
'always_up': always_up_returns,
'always_down': always_down_returns,
'achieved': achieved,
'num_trials': np.absolute(pred_signs).sum()
}
}
class MLModel:
def __init__(self):
self.model = None
def fit(self, training_set, valid_set = None):
train_X, train_y = training_set.X, training_set.y
if valid_set is None:
valid_X, valid_y = train_X, train_y
else:
valid_X, valid_y = valid_set.X, valid_set.y
model = Sequential()
model.add(Dense(10, activation='relu', input_shape =(train_X.shape[1],),
kernel_regularizer=regularizers.l2(0.1)))
model.add(Dense(5, activation='relu', kernel_regularizer=regularizers.l2(0.1)))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
self.model = model
# fit network
history = model.fit(train_X, train_y, epochs=150, batch_size=2500, validation_data=(valid_X, valid_y), verbose=2, shuffle=True)
# plot history
pyplot.plot(history.history['loss'], label='train')
pyplot.plot(history.history['val_loss'], label='valid')
pyplot.legend()
pyplot.show()
def transform(self, input_set):
predictions = self.model.predict(input_set.X)
return Predictions(predictions, input_set)
def fit_transform(self, training_set, valid_set):
self.fit(training_set, valid_set)
return self.transform(training_set), self.transform(valid_set)
# +
def make_inverter(m, s, th):
return lambda y: y*s + m
def make_transformer(m, s, th):
def transform(fn):
norm = (fn-m)/s
return np.where(norm > th, th, np.where(norm < -th, -th, norm))
return transform
def fit_normalize_features(prepared_single_stock):
th = 2.5
inp = prepared_single_stock.copy()
inverters = {}
transformers = {}
for f in list(inp.dtypes.index):
if f.startswith('x(') or f.startswith('x:') or f == 'baseline':
fn = inp[f]
s = 0.0000001 + np.std(fn.values)
m = np.mean(fn.values)
inverters[f] = make_inverter(m, s, th)
transformers[f] = make_transformer(m, s, th)
inp[f] = transformers[f](fn)
return inp, transformers, inverters
def normalize_features(prepared_single_stock, transformers):
inp = prepared_single_stock.copy()
for f in list(inp.dtypes.index):
if f.startswith('x(') or f.startswith('x:') or f == 'baseline':
fn = inp[f]
inp[f] = transformers[f](fn)
return inp
# +
combined_training_set = []
combined_valid_set = []
combined_test_set = []
#main_interval, intervals = '20Min', ['5Min', '10Min', '15Min']
main_interval, intervals = '20Min', ['5Min', '10Min']
#main_interval, intervals = '4H', ['30Min', '1H', '2H']
normalizers = {}
for mnemonic in most_liquid_stocks:
single_stock = df_train[df_train.Mnemonic == mnemonic].copy()
single_stock = single_stock[single_stock.HasTrade == 1.0]
single_stock, meta = prepare_single_stock_multi_intervals(single_stock, 'MeanEndPrice', main_interval, intervals)
single_stock = NARemover(mnemonic).transform(single_stock)
single_stock, single_stock_transformer, _ = fit_normalize_features(single_stock)
normalizers[mnemonic] = single_stock_transformer
combined_training_set.append(single_stock)
print("train", single_stock.shape)
single_stock = df_valid[df_valid.Mnemonic == mnemonic].copy()
single_stock = single_stock[single_stock.HasTrade == 1.0]
single_stock, meta = prepare_single_stock_multi_intervals(single_stock, 'MeanEndPrice', main_interval, intervals)
single_stock = NARemover(mnemonic).transform(single_stock)
single_stock = normalize_features(single_stock, normalizers[mnemonic])
combined_valid_set.append(single_stock)
print("valid", single_stock.shape)
single_stock = df_test[df_test.Mnemonic == mnemonic].copy()
single_stock = single_stock[single_stock.HasTrade == 1.0]
single_stock, meta = prepare_single_stock_multi_intervals(single_stock, 'MeanEndPrice', main_interval, intervals)
single_stock = NARemover(mnemonic).transform(single_stock)
single_stock = normalize_features(single_stock, normalizers[mnemonic])
combined_test_set.append(single_stock)
print("test", single_stock.shape)
combined_training_set_df = pd.concat(combined_training_set, axis=0)
training_set = TrainingSetBuilder().transform(combined_training_set_df)
combined_valid_set_df = pd.concat(combined_valid_set, axis=0)
valid_set = TrainingSetBuilder().transform(combined_valid_set_df)
combined_test_set_df = pd.concat(combined_test_set, axis=0)
test_set = TrainingSetBuilder().transform(combined_test_set_df)
print(training_set.original_df.shape, valid_set.original_df.shape, test_set.original_df.shape)
# +
from sklearn.linear_model import Ridge
class LinearModel:
def __init__(self):
self.model = None
def fit(self, training_set, valid_set = None):
train_X, train_y = training_set.X[:,:], training_set.y
if valid_set is None:
valid_X, valid_y = train_X, train_y
else:
valid_X, valid_y = valid_set.X, valid_set.y
self.model = Ridge(alpha=1.5)
# train_y should be -1/+1
self.model.fit(train_X, train_y)
def transform(self, input_set):
return Predictions(self.model.predict(input_set.X), input_set)
def fit_transform(self, training_set, valid_set):
self.fit(training_set, valid_set)
return self.transform(training_set), self.transform(valid_set)
model = LinearModel()
train_predictions, valid_predictions = model.fit_transform(training_set, valid_set)
print("Fitted model")
# -
train_predictions.evaluate()['matches']
valid_predictions.evaluate()['matches']
test_predictions = model.transform(test_set)
test_predictions.evaluate()['matches']
train_predictions.evaluate()['accuracy_at_cutoff']
# +
def pred_baseline(d):
single_feature = 'baseline'
preds = d.training_set.original_df[single_feature].values
preds = preds.reshape((preds.shape[0], 1))
return Predictions(preds, d.training_set).evaluate()
def readable_summary(which_set, p):
achieved = p.evaluate()['strategies']['achieved']
achieved_baseline = pred_baseline(p)['strategies']['achieved']
per_change = np.mean(np.absolute(p.training_set.original_df['pseudo_y(pctChange)']))
n = p.training_set.original_df.shape[0]
print ("""Conclusion:
If you play {} times on the {} with 1 EUR and you always guess the movement,
ignoring all transactions cost (which would likely be significant), you will make {}.
Instead you make {} or {} percent of the ideally achievable.
If you use the baseline you will make {} or {} percent of ideal.
Disclaimer:
It is important to note that the purpose of this evaluation is to make
an interpretation of prediction accuracy in the context of financial gains.
However, the current evaluation does not mean that such gains are practically
achievable. For example, slippage is not taken into account.
For proper evaluation one should run backtests.
It's important to know that a preditive model in itself is insufficient
as there are other components.
Those experiments are not a financial advise.
""".format(
n, which_set, n * per_change, achieved, 100.0*achieved/(n*per_change),
achieved_baseline, 100.0*achieved_baseline/(n*per_change)))
readable_summary('Training Set', train_predictions)
# -
train_predictions.evaluate()['strategies']
pred_baseline(valid_predictions)['strategies']
valid_predictions.evaluate()['accuracy_at_cutoff']
# ### Baseline on Testset: Accuracy at different levels
# this is for the baseline
pred_baseline(test_predictions)['accuracy_at_cutoff']
# ### Baseline on Testset: Comparison of Returns
# this is for the baseline
pred_baseline(test_predictions)['strategies']
# ### ML Model on Testset: Accuracy at different levels
# this is for ML
test_predictions.evaluate()['accuracy_at_cutoff']
# ### ML Model on Testset: Comparison of Returns
# for ML
Predictions(test_predictions.predictions, test_predictions.training_set).evaluate()['strategies']
readable_summary('Test set', test_predictions)
# ### Analysis by Mnemonic and by Date
#
# It is known that when it comes to stock predictions, different stocks and different days will exibit different performance
def plot_accuracy_by(grouping_feature, predictions):
df = predictions.training_set.original_df
s = df[['Date', 'Mnemonic', 'pseudo_y(SignReturn)']].copy()
s['Predictions'] = predictions.predictions
s['Baseline'] = df['baseline']
def agg(group):
pred = group['Predictions']
baseline = group['Baseline']
rets = group['pseudo_y(SignReturn)']
c = pred.corr(rets)
c = np.where(np.sign(pred)*np.sign(rets) == 1.0, 1.0, 0.0).sum()
e = np.where(np.sign(pred)*np.sign(rets) == -1.0, 1.0, 0.0).sum()
acc = c/(c + e)
c_baseline = np.where(np.sign(baseline)*np.sign(rets) == 1.0, 1.0, 0.0).sum()
e_baseline = np.where(np.sign(baseline)*np.sign(rets) == -1.0, 1.0, 0.0).sum()
acc_baseline = c_baseline/(c_baseline + e_baseline)
l = group.shape[0]
return {"corr": c, 'size': l, 'accuracy': acc, 'acc_baseline': acc_baseline}
f = s.groupby(grouping_feature).apply(agg).to_frame("agg")
f['AccuracyPred'] = f['agg'].map(lambda i: i['accuracy'])
f['AccuracyBaseline'] = f['agg'].map(lambda i: i['acc_baseline'])
f['AccPred - AccBaseline'] = f['AccuracyPred'] - f['AccuracyBaseline']
f = f.drop(columns=['agg'])
f = f[f.index != '2017-10-14'] # remove this date which has one data point
f[['AccuracyPred', 'AccuracyBaseline']].plot()
return f
perf = plot_accuracy_by('Mnemonic', test_predictions)
perf.sort_values(by='AccuracyPred', ascending=False).head(20)
perf.sort_values(by='AccuracyPred', ascending=False).tail(20)
perf = plot_accuracy_by('Date', test_predictions)
perf.sort_values(by='AccuracyPred', ascending=False).head(20)
perf.sort_values(by='AccuracyPred', ascending=False).tail(20)
# !echo "Last run on `date`"
| notebooks/supporting/adapted-03-stock-price-prediction-machine-learning-20-minutes-intervals.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# Tà i liá»u nà y mang giấy phép Creative Commons Attribution (CC BY). (c) <NAME> 06/2019.
#
# [@SangVn](https://github.com/SangVn) [@VnCFD](https://vncfdgroup.wordpress.com/)
#
# *Thá»±c hà nh CFD vá»i Python!*
# # Bà i 22. Phương pháp tÃnh dòng qua mặt, xá» lÜ kết quả, module solver
#
# ## 1. Phương pháp tÃnh dòng qua mặt
# Quay lại vá»i bà i toán Riemann hay còn gá»i là bà i toà n phân rã gián Äoạn Äã ÄÆ°á»£c giá»i thiá»u á» phần 2 vá»i phương pháp giải Godunov:
# $$U_t + F(U)_x = 0 \qquad(1)\\
# U(t=0)=
# \begin{cases}
# U_L; \quad x<x_0 \\
# U_R; \quad x>x_0
# \end{cases}
# $$
#
# Sá» dụng cÃŽng thức xấp xá»:
# $$\frac{U_i^{n+1} - U_i^n}{\Delta t} + \frac{F_{i+1/2} - F_{i-1/2}}{\Delta x} = 0 \qquad(2)$$
# Theo phương pháp Godunov dòng qua mặt $F_{i+1/2}=F(U_{i+1/2})$. Ngoà i ra, có thá» tÃnh dòng F bằng rất nhiá»u phương pháp khác như HLL, Roe, Osher... Hãy Äá»c quyá»n sách cá»§a `Eleuterio F. Toro Riemann Solvers and Numerical Methods for Fluid Dynamics`. Trong khuÃŽn khá» phần 3 ta sẜ sá» dụng thêm phương pháp Roe. Vá» code, nếu bạn biết ngÃŽn ngữ FORTRAN có thá» tìm hiá»u thêm trên trang [I do like CFD](http://www.cfdbooks.com/) cá»§a `<NAME>`. Phần python code dưá»i Äây ÄÆ°á»£c chuyá»n Äá»i từ fortran code cá»§a tác giả trên.
# +
# coding: utf-8
# Module fluxes.py
# Nguyên mẫu FORTRAN - <NAME>
import numpy as np
def flux_roe(side, PL, PR):
gamma = 1.4
nx = side.normal[0]
ny = side.normal[1]
tx = -ny
ty = nx
# Left state
rhoL, vxL, vyL, pL = PL[0], PL[1], PL[2], PL[3]
vnL = vxL * nx + vyL * ny
vtL = vxL * tx + vyL * ty
aL = (gamma * pL / rhoL) ** 0.5
HL = pL / rhoL * gamma / (gamma - 1) + 0.5 * (vxL ** 2 + vyL ** 2)
# Right state
rhoR, vxR, vyR, pR = PR[0], PR[1], PR[2], PR[3]
vnR = vxR * nx + vyR * ny
vtR = vxR * tx + vyR * ty
aR = (gamma * pR / rhoR) ** 0.5
HR = pR / rhoR * gamma / (gamma - 1) + 0.5 * (vxR ** 2 + vyR ** 2)
# First compute the Roe Averages
RT = (rhoR / rhoL) ** 0.5
rho = RT * rhoL
vx = (vxL + RT * vxR) / (1.0 + RT)
vy = (vyL + RT * vyR) / (1.0 + RT)
H = (HL + RT * HR) / (1.0 + RT)
a = ((gamma - 1.0) * (H - 0.5 * (vx * vx + vy * vy))) ** 0.5
vn = vx * nx + vy * ny
vt = vx * tx + vy * ty
# Wave Strengths
drho = rhoR - rhoL
dp = pR - pL
dvn = vnR - vnL
dvt = vtR - vtL
dV = [0.0, 0.0, 0.0, 0.0]
dV[0] = (dp - rho * a * dvn) / (2.0 * a * a)
dV[1] = rho * dvt / a
dV[2] = drho - dp / (a * a)
dV[3] = (dp + rho * a * dvn) / (2.0 * a * a)
# Wave Speed
ws = [0.0, 0.0, 0.0, 0.0]
ws[0] = abs(vn - a)
ws[1] = abs(vn)
ws[2] = abs(vn)
ws[3] = abs(vn + a)
# Harten's Entropy Fix JCP(1983), 49, pp357-393:
# only for the nonlinear fields.
dws = [0.2, 0.0, 0.0, 0.2]
if (ws[0] < dws[0]): ws[0] = 0.5 * (ws[0] * ws[0] / dws[0] + dws[0])
if (ws[3] < dws[3]): ws[3] = 0.5 * (ws[3] * ws[3] / dws[3] + dws[3])
# Right Eigenvectors
Rv = np.zeros((4, 4))
Rv[0, 0] = 1.0
Rv[1, 0] = vx - a * nx
Rv[2, 0] = vy - a * ny
Rv[3, 0] = H - vn * a
Rv[0, 1] = 0.0
Rv[1, 1] = a * tx
Rv[2, 1] = a * ty
Rv[3, 1] = vt * a
Rv[0, 2] = 1.0
Rv[1, 2] = vx
Rv[2, 2] = vy
Rv[3, 1] = 0.5 * (vx * vx + vy * vy)
Rv[0, 3] = 1.0
Rv[1, 3] = vx + a * nx
Rv[2, 3] = vy + a * ny
Rv[3, 3] = H + vn * a
# Dissipation Term
diss = np.zeros(4)
for i in range(4):
for j in range(4):
diss[i] += ws[j] * dV[j] * Rv[i, j]
# Compute the flux.
fL = np.zeros(4)
fL[0] = rhoL * vnL
fL[1] = fL[0] * vxL + pL * nx
fL[2] = fL[0] * vyL + pL * ny
fL[3] = fL[0] * HL
fR = np.zeros(4)
fR[0] = rhoR * vnR
fR[1] = fR[0] * vxR + pR * nx
fR[2] = fR[0] * vyR + pR * ny
fR[3] = fR[0] * HR
Roe = 0.5 * (fL + fR - diss) * side.area
return Roe
# -
# Hãy lưu phần code trên và o file `fluxes.py` cÅ©ng như hãy tìm hiá»u và thêm và o những phương pháp khác tÃnh flux.
#
# ## 2. XỠlÜ kết quả
#
# Chúng ta cần các hà m Äá» xá» lÜ kết quả hay trưá»ng thÃŽng sá» dòng chảy ban Äầu. Các hà m cần thiết gá»m: ghi, Äá»c và biá»u diá»
n. Các hà m nà y sẜ ÄÆ°á»£c lưu trong module `functions.py`. ThÃŽng sá» dòng chảy có thá» lưu á» ba dạng (tìm hiá»u: `Tecplot Data Format Guide`):
# - lưu tại tâm ÃŽ lưá»i: tá»a Äá» tâm ÃŽ lưá»i, P tại tâm (cell_data)
# - lưu theo kiá»u block: tạo Äá» Äiá»m lưá»i, P tại tâm (block_data)
# - lưu tại Äiá»m lưá»i: tá»a Äá» Äiá»m lưá»i, P tại Äiá»m lưá»i (point_data)
#
# Trong cấu trúc dữ liá»u Cell giá trá» P ÄÆ°á»£c xác tại tâm ÃŽ lưá»i, ta dá»
dà ng viết hà m lưu lưá»i theo cách má»t và hai. Tuy nhiên Äá» lưu dữ liá»u theo cách ba cần có hà m xác Äá»nh P tại các Äiá»m lưá»i. Viá»c nà y khá phức tạp nên ta sẜ dùng ParaView Äá» chuyá»n từ dạng hai vá» dạng ba (dùng filter: `Cell Data To Point Data`) thay vì tá»± viết code.
#
# **Thêm và o module functions.py:**
# +
# hà m tÃnh sá» mach
def Mach(P):
a = (gamma*P[3]/P[0]) ** 0.5
u = (P[1]*P[1] + P[2]*P[2]) ** 0.5
M = u/a
return M
R_gas = 287.052873836 # Hà ng sá» chất khÃ
# hà m xác Äá»nh khá»i lượng riêng phụ thuá»c nhiá»t Äá» và áp suất theo phương trình trạng thái
def rho(T, p):
rho = p/(R_gas*T)
return rho
def Temperature(rho, p):
T = p/(R_gas*rho)
return T
# Lưu tá»a Äá» tâm ÃŽ lưá»i và thÃŽng sá» dòng chảy tại Äó
# Two-Dimensional Field Plots
def write_cell_data(cells, iter, time, filename):
print('\nWrite cell data to: %s\n' % filename)
f = open(filename, 'w')
f.write('TITLE = "vncfd field: iter= %d, time= %f"\n' % (iter, time))
f.write('VARIABLES = "X", "Y", "rho", "u", "v", "p", "Mach", "T"\n')
f.write('ZONE T="1", I=%d, J=%d, DATAPACKING=POINT\n' % (cells.size[1], cells.size[0]))
for cell in cells:
C = cell.center
P = cell.P
M = Mach(P) # hà m xác Äá»nh sá» mach
T = Temperature(P[0], P[3])
f.write('%f %f %f %f %f %f %f %f\n' % (C[0], C[1], P[0], P[1], P[2], P[3], M, T))
f.close()
# Lưu tạo Äá» Äiá»m lưá»i và thÃŽng sá» dòng chảy tại tâm ÃŽ lưá»i
# Cell-Centered Data
def write_block_data(cells, points, iter, time, filename):
print('\nWrite block data to: %s\n' % filename)
f = open(filename, 'w')
f.write('TITLE = "vncfd field: iter= %d, time= %f"\n' % (iter, time))
f.write('VARIABLES = "X", "Y", "rho", "u", "v", "p", "Mach", "T"\n')
f.write('ZONE T="1", I=%d, J=%d, DATAPACKING=BLOCK, VARLOCATION=([3,4,5,6,7,8]=CELLCENTERED)\n' % (points.shape[1], points.shape[0]))
X_p, Y_p = points[:, :, 0].ravel(), points[:, :, 1].ravel()
for x in X_p: f.write('%f ' % x)
f.write('\n')
for y in Y_p: f.write('%f ' % y)
f.write('\n')
for i in range(4):
for cell in cells: f.write('%f ' % cell.P[i])
f.write('\n')
for cell in cells:
M = Mach(cell.P)
f.write('%f ' % M)
for cell in cells:
T = Temperature(cell.P[0], cell.P[3])
f.write('%f ' % T)
f.write('\n')
f.close()
# Äá»c các thÃŽng sá» ban Äầu (rho, u, v, p) từ file cell_data
def read_field(cells, filename):
print('\nRead cell data from: %s\n' % filename)
f = open(filename, 'r')
line = f.readline() # Äá»c dòng Äầu tiên chứa iter và time
words = line.split() # chia dòng cuá»i ra thà nh các từ riêng biá»t bằng dấu cách ' '
time = float(words[-1].replace('"', '')) # từ cuá»i cùng bá» dấu '"'là time
iter = int(words[-3].replace(',', '')) # từ thứ 3 tứ cuá»i lên bá» dấu ',' là iter
f.close()
data = loadtxt(filename, skiprows=3, usecols=(2, 3, 4, 5)) # rho u v p
for i in range(cells.len):
cells[i].P = data[i]
cells[i].U = P2U(data[i]) # hà m tÃnh U từ P
return iter, time
# Hà m biá»u diá»
n kết quả bằng pyplot: pcolor hay contourf
def show_field(cells, points, porc=0): #porc=0 - pcolor, porc=1 - contourf
Nj, Ni = cells.size[0], cells.size[1]
if porc == 0:
# Nếu vẜ pcolor, pcolormesh... cần tá»a Äá» tại Äá»nh, giá trá» tại tâm
X_p, Y_p = points[:, :, 0], points[:, :, 1]
else:
# Nếu vẜ contour, contourf, quiver, streamplot... cần tá»a Äá» tại tâm, giá trá» tại tâm
centers = array([cell.center for cell in cells]).reshape((Nj, Ni, 2))
X_c, Y_c = centers[:, :, 0], centers[:, :, 1]
fig, axs = plt.subplots(2, 2)
titles = ['rho', 'u', 'v', 'p']
i = 0
for ax in axs.flat:
value_c = array([cell.P[i] for cell in cells]).reshape((Nj, Ni))
if porc == 0: c = ax.pcolor(X_p, Y_p, value_c)
else: c = ax.contourf(X_c, Y_c, value_c)
ax.set_title(titles[i])
fig.colorbar(c, ax=ax)
i += 1
fig.tight_layout()
plt.show()
# Chuyá»n dữ liá»u từ dạng block_data sang point_data:
# Bưá»c 1. dùng ParaView má» file block_data
# Bưá»c 2. Apply filter: CellDataToPointData
# Bưá»c 3. xuất dữ liá»u tại Äiá»m lưá»i: save data -> 'filename.txt'
def show_point_data(Nj, Ni, filename):
data = loadtxt(filename, skiprows=1, delimiter=',') # file thu ÄÆ°á»£c từ paraview chuyá»n dạng 2 sang 3
# Dòng Äầu tiên: "rho", "u", "v", "p", "Mach", "T", "Points:0", "Points:1", "Points:2"
# tương ứng các cá»t: 0, 1, 2, 3, 4, 5, 6, 7, 8
x = data[:, 6].reshape((Nj, Ni)) # cá»t thứ 5
y = data[:, 7].reshape((Nj, Ni)) # cá»t thứ 6
fig, axs = plt.subplots(2, 2)
titles = ['rho', 'u', 'v', 'p', 'Mach', 'T']
iv = [0, 3, 4, 5]
i = 0
for ax in axs.flat:
value = data[:, iv[i]].reshape((Nj, Ni))
c = ax.contourf(x, y, value)
ax.set_title(titles[iv[i]])
# ax.set_xlim([-0.5, 1.5])
# ax.set_ylim([-0.5, 0.5])
fig.colorbar(c, ax=ax)
i += 1
fig.tight_layout()
img_filename = filename.replace('.txt', '.png')
plt.savefig(img_filename)
plt.show()
# -
# ### 3. Module solver
#
# Hà m solver - giải há» phương trình Euler 2D có cấu trúc tương tá»± như trong trưá»ng hợp 1D á» phần 2. Nó bao gá»m các bưá»c:
# +
# coding: utf-8
# Module solver.py
from functions import write_cell_data
import setup
# Hà m eu_solver thá»±c hiá»n các bưá»c lặp Äá» tìm nghiá»m
# Biến Äầu và o gá»m có: các ÃŽ lưá»i, các mặt, sá» vòng lặp và thá»i gian lúc ban Äầu
def eu_solver(cells, sides, iter, time):
# thiết láºp Äiá»u kiá»n biên
setup.set_boco(sides)
while(time < setup.time_target):
iter += 1 # tÄng sá» vòng lặp lên 1 ÄÆ¡n vá»
# tÃnh bưá»c thá»i gian
dt = cells.time_step_global(setup.CFL) # có thá» thiết láºp dt trong setup: dt = setup.dt
if(time+dt > setup.time_target): dt = setup.time_target - time
time += dt
print('iteration: %d, dt: %f, time: %f' % (iter, dt, time))
# tÃnh dòng qua các bá» mặt
sides.flux_boundaries() # dòng qua các mặt biên
sides.flux_inner_sides(setup.flux_func) # dòng qua các mặt trong
# tÃnh giá trá» U má»i
cells.new_U(dt)
# tÃnh giá trá» P má»i
cells.new_P()
# nếu sỠvòng lặp bằng iter_to_write_field thì ghi lại kết quả giữa chừng
if setup.write_field_iter is not None and not (iter%setup.write_field_iter):
write_cell_data(cells, iter, time, setup.field_filename)
# ghi lại kết quả cuá»i cùng
write_cell_data(cells, iter, time, setup.field_filename)
# -
# á» Äây, các biến như: time_target, flux_func, write_field_iter.. ÄÆ°á»£c xác Äá»nh trong module `setup.py`, sẜ ÄÆ°á»£c giá»i thiá»u á» bà i sau.
#
# Trong nhiá»u trưá»ng hợp ta muá»n chạy má»t sá» vòng lặp nhất Äá»nh chứ khÃŽng cần quan tâm thá»i gian cuá»i cùng, do Äó ta sá»a lại hà m `eu_solver` như sau:
# +
# Hà m eu_solver thá»±c hiá»n các bưá»c lặp Äá» tìm nghiá»m
# Biến Äầu và o gá»m có: các ÃŽ lưá»i, các mặt, sá» vòng lặp, thá»i gian lúc ban Äầu
def eu_solver(cells, sides, iter, time):
# thiết láºp Äiá»u kiá»n biên
setup.set_boco(sides)
# tÃnh theo thá»i gian
if setup.time_target is not None:
while(time < setup.time_target):
iter += 1 # tÄng sá» vòng lặp lên 1 ÄÆ¡n vá»
# tÃnh bưá»c thá»i gian
dt = cells.time_step_global(setup.CFL) # có thá» thiết láºp dt trong setup: dt = setup.dt
if (time + dt > setup.time_target): dt = setup.time_target - time
time += dt
print('iteration: %d, dt: %f, time: %f' % (iter, dt, time))
iter, time = iteration(cells, sides, iter, time, dt)
# tÃnh theo sá» vòng lặp
elif setup.iter_target is not None:
while(iter < setup.iter_target):
iter += 1 # tÄng sá» vòng lặp lên 1 ÄÆ¡n vá»
# tÃnh bưá»c thá»i gian
dt = cells.time_step_global(setup.CFL) # có thá» thiết láºp dt trong setup: dt = setup.dt
time += dt
print('iteration: %d, dt: %f, time: %f' % (iter, dt, time))
iter, time = iteration(cells, sides, iter, time, dt)
# ghi lại kết quả cuá»i cùng
write_cell_data(cells, iter, time, setup.field_filename)
def iteration(cells, sides, iter, time, dt):
# tÃnh dòng qua các bá» mặt
sides.flux_boundaries() # dòng qua các mặt biên
sides.flux_inner_sides(setup.flux_func) # dòng qua các mặt trong
# tÃnh giá trá» U má»i
cells.new_U(dt)
# tÃnh giá trá» P má»i
cells.new_P()
# nếu sỠvòng lặp bằng write_field_iter thì ghi lại kết quả
if setup.write_field_iter is not None and not (iter % setup.write_field_iter):
write_cell_data(cells, iter, time, setup.field_filename)
return iter, time
# -
# Như váºy là chúng ta Äã có Äá»§ các hà m cần thiết. Bưá»c cuá»i cùng, ta cần thiết láºp các thÃŽng sá»: Äiá»u kiá»n ban Äầu, Äiá»u kiá»n biên, sá» CFL, thá»i Äiá»m kết thúc tÃnh toán... và gá»i các hà m cần thiết theo thứ tá»± hợp lÜ Äá» giải quyết bà i toán.
# Äá» thá»±c hiá»n những viá»c nà y ta dùng hai module `setup.py` và `run.py` sẜ ÄÆ°á»£c giá»i thiá»u á» bà i sau.
#
# ## [Bà i 23. Thiết láºp module setup, run. TÃnh bà i toán dòng chảy trên âm qua dá»c](Bai_23.ipynb)
| Bai_22.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Data Description and Analysis
# +
import numpy as np
import pandas as pd
pd.set_option('max_columns', 150)
import gc
import os
# matplotlib and seaborn for plotting
import matplotlib
matplotlib.rcParams['figure.dpi'] = 120 #resolution
matplotlib.rcParams['figure.figsize'] = (8,6) #figure size
import matplotlib.pyplot as plt
sns.set_style('darkgrid')
import seaborn as sns
color = sns.color_palette()
root = 'C:/Data/instacart-market-basket-analysis/'
# -
# The dataset contains relational set of files describing customers' orders over time. For each user, 4 to 100 orders are provided with the sequence of products purchased in each order. The data of the order's week and hour of the day as well as a relative measure of time between orders is provided.
#
# **Files in the Dataset:**
os.listdir(root)
aisles = pd.read_csv(root + 'aisles.csv')
departments = pd.read_csv(root + 'departments.csv')
orders = pd.read_csv(root + 'orders.csv')
order_products_prior = pd.read_csv(root + 'order_products__prior.csv')
order_products_train = pd.read_csv(root + 'order_products__train.csv')
products = pd.read_csv(root + 'products.csv')
# ### aisles:
# This file contains different aisles and there are total 134 unique aisles.
aisles.head()
aisles.tail()
len(aisles.aisle.unique())
aisles.aisle.unique()
# ### departments:
# This file contains different departments and there are total 21 unique departments.
departments.head()
departments.tail()
len(departments.department.unique())
departments.department.unique()
# ### orders:
# This file contains all the orders made by different users. From below analysis, we can conclude following:
#
# - There are total 3421083 orders made by total 206209 users.
# - There are three sets of orders: Prior, Train and Test. The distributions of orders in Train and Test sets are similar whereas the distribution of orders in Prior set is different.
# - The total orders per customer ranges from 0 to 100.
# - Based on the plot of 'Orders VS Day of Week' we can map 0 and 1 as Saturday and Sunday respectively based on the assumption that most of the people buy groceries on weekends.
# - Majority of the orders are made during the day time.
# - Customers order once in a week which is supported by peaks at 7, 14, 21 and 30 in 'Orders VS Days since prior order' graph.
# - Based on the heatmap between 'Day of Week' and 'Hour of Day,' we can say that Saturday afternoons and Sunday mornings are prime time for orders.
orders.head(12)
orders.tail()
orders.info()
len(orders.order_id.unique())
len(orders.user_id.unique())
orders.eval_set.value_counts()
orders.order_number.describe().apply(lambda x: format(x, '.2f'))
order_number = orders.groupby('user_id')['order_number'].max()
order_number = order_number.value_counts()
fig, ax = plt.subplots(figsize=(15,8))
ax = sns.barplot(x = order_number.index, y = order_number.values, color = color[3])
ax.set_xlabel('Orders per customer')
ax.set_ylabel('Count')
ax.xaxis.set_tick_params(rotation=90, labelsize=10)
ax.set_title('Frequency of Total Orders by Customers')
fig.savefig('Frequency of Total Orders by Customers.png')
fig, ax = plt.subplots(figsize = (8,4))
ax = sns.kdeplot(orders.order_number[orders.eval_set == 'prior'], label = "Prior set", lw = 1)
ax = sns.kdeplot(orders.order_number[orders.eval_set == 'train'], label = "Train set", lw = 1)
ax = sns.kdeplot(orders.order_number[orders.eval_set == 'test'], label = "Test set", lw = 1)
ax.set_xlabel('Order Number')
ax.set_ylabel('Count')
ax.tick_params(axis = 'both', labelsize = 10)
ax.set_title('Distribution of Orders in Various Sets')
fig.savefig('Distribution of Orders in Various Sets.png')
plt.show()
fig, ax = plt.subplots(figsize = (5,3))
ax = sns.countplot(orders.order_dow)
ax.set_xlabel('Day of Week', size = 10)
ax.set_ylabel('Orders', size = 10)
ax.tick_params(axis = 'both', labelsize = 8)
ax.set_title('Total Orders per Day of Week')
fig.savefig('Total Orders per Day of Week.png')
plt.show()
# +
temp_df = orders.groupby('order_dow')['user_id'].nunique()
fig, ax = plt.subplots(figsize = (5,3))
ax = sns.barplot(x = temp_df.index, y = temp_df.values)
ax.set_xlabel('Day of Week', size = 10)
ax.set_ylabel('Total Unique Users', size = 10)
ax.tick_params(axis = 'both', labelsize = 8)
ax.set_title('Total Unique Users per Day of Week')
fig.savefig('Total Unique Users per Day of Week.png')
plt.show()
# -
fig, ax = plt.subplots(figsize = (10,5))
ax = sns.countplot(orders.order_hour_of_day, color = color[2])
ax.set_xlabel('Hour of Day', size = 10 )
ax.set_ylabel('Orders', size = 10)
ax.tick_params(axis = 'both', labelsize = 8)
ax.set_title('Total Orders per Hour of Day')
fig.savefig('Total Orders per Hour of Day.png')
plt.show()
fig, ax = plt.subplots(figsize = (10,5))
ax = sns.countplot(orders.days_since_prior_order, color = color[2])
ax.set_xlabel('Days since prior order', size = 10)
ax.set_ylabel('Orders', size = 10)
ax.tick_params(axis = 'both', labelsize = 8)
ax.set_title('Orders VS Days since prior order')
fig.savefig('Orders VS Days since prior order.png')
plt.show()
temp_df = orders.groupby(["order_dow", "order_hour_of_day"])["order_number"].aggregate("count").reset_index()
temp_df = temp_df.pivot('order_dow', 'order_hour_of_day', 'order_number')
temp_df.head()
ax = plt.subplots(figsize=(7,3))
ax = sns.heatmap(temp_df, cmap="YlGnBu", linewidths=.5)
ax.set_title("Frequency of Day of week Vs Hour of day", size = 12)
ax.set_xlabel("Hour of Day", size = 10)
ax.set_ylabel("Day of Week", size = 10)
ax.tick_params(axis = 'both', labelsize = 8)
cbar = ax.collections[0].colorbar
cbar.ax.tick_params(labelsize=10)
fig = ax.get_figure()
fig.savefig("Frequency of Day of week Vs Hour of day.png")
plt.show()
# ### order_products_prior:
#
# This file gives information about which products were ordered and in which order they were added in the cart. It also tells us that if the product was reordered or not.
#
# - In this file there is an information of total 3214874 orders through which total 49677 products were ordered.
# - From the 'Count VS Items in cart' plot, we can say that most of the people buy 1-15 items in an order and there were a maximum of 145 items in an order.
# - The percentage of reorder items in this set is 58.97%.
order_products_prior.head(10)
order_products_prior.tail()
len(order_products_prior.order_id.unique())
len(order_products_prior.product_id.unique())
add_to_cart_order_prior = order_products_prior.groupby('order_id')['add_to_cart_order'].count()
add_to_cart_order_prior = add_to_cart_order_prior.value_counts()
add_to_cart_order_prior.head()
add_to_cart_order_prior.tail()
add_to_cart_order_prior.index.max()
fig, ax = plt.subplots(figsize = (15,8))
ax = sns.barplot(x = add_to_cart_order_prior.index, y = add_to_cart_order_prior.values, color = color[3])
ax.set_xlabel('Items in cart')
ax.set_ylabel('Count')
ax.xaxis.set_tick_params(rotation=90, labelsize = 9)
ax.set_title('Frequency of Items in Cart in Prior set', size = 15)
fig.savefig('Frequency of Items in Cart in Prior set.png')
fig, ax = plt.subplots(figsize=(3,3))
ax = sns.barplot(x = order_products_prior.reordered.value_counts().index,
y = order_products_prior.reordered.value_counts().values, color = color[3])
ax.set_xlabel('Reorder', size = 10)
ax.set_ylabel('Count', size = 10)
ax.tick_params(axis = 'both', labelsize = 8)
ax.ticklabel_format(style='plain', axis='y')
ax.set_title('Reorder Frequency in Prior Set')
fig.savefig('Reorder Frequency in Prior Set')
plt.show()
print('Percentage of reorder in prior set:',
format(order_products_prior[order_products_prior.reordered == 1].shape[0]*100/order_products_prior.shape[0], '.2f'))
# ### order_products_train:
#
# This file gives information about which products were ordered and in which order they were added in the cart. It also tells us that if the product was reordered or not.
#
# - In this file there is an information of total 131209 orders through which total 39123 products were ordered.
# - From the 'Count VS Items in cart' plot, we can say that most of the people buy 1-15 items in an order and there were a maximum of 145 items in an order.
# - The percentage of reorder items in this set is 59.86%.
order_products_train.head(10)
order_products_train.tail()
len(order_products_train.order_id.unique())
len(order_products_train.product_id.unique())
add_to_cart_order_train = order_products_prior.groupby('order_id')['add_to_cart_order'].count()
add_to_cart_order_train = add_to_cart_order_train.value_counts()
add_to_cart_order_train.head()
add_to_cart_order_train.tail()
add_to_cart_order_train.index.max()
fig, ax = plt.subplots(figsize = (15,8))
ax = sns.barplot(x = add_to_cart_order_train.index, y = add_to_cart_order_train.values, color = color[2])
ax.set_xlabel('Items in cart')
ax.set_ylabel('Count')
ax.xaxis.set_tick_params(rotation=90, labelsize = 8)
ax.set_title('Frequency of Items in Cart in Train set', size = 15)
fig.savefig('Frequency of Items in Cart in Train set.png')
fig, ax = plt.subplots(figsize=(3,3))
ax = sns.barplot(x = order_products_train.reordered.value_counts().index,
y = order_products_train.reordered.value_counts().values, color = color[2])
ax.set_xlabel('Reorder', size = 10)
ax.set_ylabel('Count', size = 10)
ax.tick_params(axis = 'both', labelsize = 8)
ax.set_title('Reorder Frequency in Train Set')
fig.savefig('Reorder Frequency in Train Set')
plt.show()
print('Percentage of reorder in train set:',
format(order_products_train[order_products_train.reordered == 1].shape[0]*100/order_products_train.shape[0], '.2f'))
# ### products:
#
# This file contains the list of total 49688 products and their aisle as well as department. The number of products in different aisles and different departments are different.
products.head(10)
products.tail()
len(products.product_name.unique())
len(products.aisle_id.unique())
len(products.department_id.unique())
# +
temp_df = products.groupby('aisle_id')['product_id'].count()
fig, ax = plt.subplots(figsize = (15,6))
ax = sns.barplot(x = temp_df.index, y = temp_df.values, color = color[3])
ax.set_xlabel('Aisle Id')
ax.set_ylabel('Total products in aisle')
ax.xaxis.set_tick_params(rotation=90, labelsize = 7)
ax.set_title('Total Products in Aisle VS Aisle ID', size = 12)
fig.savefig('Total Products in Aisle VS Aisle ID.png')
# +
temp_df = products.groupby('department_id')['product_id'].count()
fig, ax = plt.subplots(figsize = (8,5))
ax = sns.barplot(x = temp_df.index, y = temp_df.values, color = color[2])
ax.set_xlabel('Department Id')
ax.set_ylabel('Total products in department')
ax.xaxis.set_tick_params(rotation=90, labelsize = 9)
ax.set_title('Total Products in Department VS Department ID', size = 10)
fig.savefig('Total Products in Department VS Department ID.png')
# +
temp_df = products.groupby('department_id')['aisle_id'].nunique()
fig, ax = plt.subplots(figsize = (8,5))
ax = sns.barplot(x = temp_df.index, y = temp_df.values)
ax.set_xlabel('Department Id')
ax.set_ylabel('Total Aisles in department')
ax.xaxis.set_tick_params(rotation=90, labelsize = 9)
ax.set_title('Total Aisles in Department VS Department ID', size = 10)
fig.savefig('Total Aisles in Department VS Department ID.png')
# -
| Data Description and Analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Correlation Tests
# This section lists statistical tests that you can use to check if two samples are related.
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from scipy import stats
sns.set_style('whitegrid')
df = pd.read_csv('../data/500_Person_Gender_Height_Weight_Index.csv')
df.head()
# ## Pearsonâs Correlation Coefficient
# Tests whether two samples have a linear relationship.
#
# ### Assumptions
#
# - Observations in each sample are independent and identically distributed (iid).
# - Observations in each sample are normally distributed.
# - Observations in each sample have the same variance.
#
# ### Interpretation
#
# - H0: the two samples are independent.
# - Ha: there is a dependency between the samples.
# ## References
# https://machinelearningmastery.com/statistical-hypothesis-tests-in-python-cheat-sheet/
| hypothesis/notebooks/Correlation Test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Flows between strata
#
# In the [previous introduction to flows](http://summerepi.com/examples/3-flow-types.html) and [introduction to stratification](http://summerepi.com/examples/6-stratification-introduction.html) we saw a workflow where:
#
# - An unstratified model is defined
# - Flows are added to that model
# - The model is then stratified, splitting the flows between new strata
#
# This approach works fine for many workflows, but in some cases, we want to define flows that move people between strata. For example, we might want to model people migrating from a rural location to an urban location over time.
#
# This example will show you how to implement flows between strata. Let's start with a baseline model, stratified by location.
# +
import numpy as np
import matplotlib.pyplot as plt
from summer import CompartmentalModel
def build_model():
"""Returns a model for the stratification examples"""
model = CompartmentalModel(
times=[1990, 2020],
compartments=["pop"],
infectious_compartments=[],
timestep=0.1,
)
model.set_initial_population(distribution={"pop": 20e6})
model.add_crude_birth_flow("birth", 0.02, "pop")
model.add_death_flow("death", 0.01, "pop")
return model
def plot_compartments(model, times=[]):
"""Plot model compartment sizes over time"""
fig, ax = plt.subplots(1, 1, figsize=(12, 6), dpi=120)
for i in range(model.outputs.shape[1]):
ax.plot(model.times, model.outputs.T[i])
for t in times:
ax.axvline(x=t, color='k', linestyle='--', alpha=0.3)
ax.set_title("Population")
ax.set_xlabel("Days")
ax.set_ylabel("Compartment size")
start, end = ax.get_xlim()
ax.xaxis.set_ticks(np.arange(start + 1.5, end, 5))
ax.legend([str(c) for c in model.compartments], loc='lower right')
plt.show()
# -
# ## Unstratified model
#
# In our example model, there is only one compartment with a birth and death rate.
model = build_model()
model.run()
plot_compartments(model)
# ## Stratified model
#
# Next lets split the population into urban and rural.
# +
from summer import Stratification
model = build_model()
strat = Stratification('location', ['urban', 'rural'], ['pop'])
strat.set_population_split({'rural': 0.7, 'urban': 0.3})
model.stratify_with(strat)
model.run()
plot_compartments(model)
# -
# Note that, by default, 50% of the births, which are based on total population, are born into the urban/rural stratum respectively. This isn't physically realistic but we'll ignore it for simplicity's sake. A [function flow](http://summerepi.com/api/model.html#summer.model.CompartmentalModel.add_function_flow) could be used to more plausibly balance births between locations, based on their respective populations.
# ## Stratified model with migration
#
# Now we can add a [transition flow](http://summerepi.com/api/model.html#summer.model.CompartmentalModel.add_transition_flow) where 2% of the rural population migrates to the urban compartment per year.
# +
from summer import Stratification
model = build_model()
# Add an urban/rural stratification with an inter-location migration flow.
strat = Stratification('location', ['urban', 'rural'], ['pop'])
strat.set_population_split({'rural': 0.7, 'urban': 0.3})
model.stratify_with(strat)
model.add_transition_flow(
'migration',
fractional_rate=0.02,
source='pop',
dest='pop',
source_strata={'location': 'rural'},
dest_strata={'location': 'urban'},
# Expected flow count can be used as a sanity check,
# to assert that the expected number of flows was added.
expected_flow_count=1
)
model.run()
plot_compartments(model)
# -
# ## Stratified model witha age-based migration
#
# We can take this example one step further with the observation that:
#
# - people aged 0-19 are unlikely to migrate
# - people aged 20-39 are likely to migrate
# - people aged 40+ are less likely to migrate
#
# We can use an [age stratification](http://summerepi.com/examples/7-age-stratification.html) to model the age strata and ageing flows.
# +
from summer import Stratification, AgeStratification, Overwrite
model = build_model()
# Add an urban/rural stratification with an inter-location migration flow.
strat = Stratification('location', ['urban', 'rural'], ['pop'])
strat.set_population_split({'rural': 0.7, 'urban': 0.3})
model.stratify_with(strat)
model.add_transition_flow(
'migration',
fractional_rate=0, # To be overwritten
source='pop',
dest='pop',
source_strata={'location': 'rural'},
dest_strata={'location': 'urban'},
# Expected flow count can be used as a sanity check,
# to assert that the expected number of flows was added.
expected_flow_count=1
)
# Set age-specific migration rates.
age_strat = AgeStratification('age', [0, 20, 40], ['pop'])
age_strat.set_population_split({'0': 0.2, '20': 0.4, '40': 0.4})
age_strat.set_flow_adjustments("migration", {
'0': Overwrite(0), # No migration
'20': Overwrite(0.05), # 5% of 20-39 year olds per year
'40': Overwrite(0.01), # 1% of 40+ year olds per year
})
model.stratify_with(age_strat)
# Track urban and rural populations
model.request_output_for_compartments(
'urban_pop',
compartments=["pop"],
strata={"location": "urban"}
)
model.request_output_for_compartments(
'rural_pop',
compartments=["pop"],
strata={"location": "rural"}
)
model.run()
plot_compartments(model)
# Plot rural/urban split
fig, ax = plt.subplots(1, 1, figsize=(12, 6), dpi=120)
ax.plot(model.times, model.derived_outputs['urban_pop'])
ax.plot(model.times, model.derived_outputs['rural_pop'])
ax.set_title("Population")
ax.set_xlabel("Days")
ax.set_ylabel("Compartment size")
start, end = ax.get_xlim()
ax.xaxis.set_ticks(np.arange(start + 1.5, end, 5))
ax.legend(['urban_pop', 'rural_pop'], loc='lower right')
plt.show()
# -
# ## Summary
#
# Now you know how to add flows between strata after a model has been stratified.
| docs/examples/11-flows-between-strata.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] deletable=true editable=true
# # Sentiment Analysis
# + [markdown] deletable=true editable=true
#
#
# MY FUNCTIONS
# + deletable=true editable=true
def sentence(data):
data=data.replace('?','.').replace('!','.').split('.')
data = [title.strip() for title in data]
data.pop()
return data
# + [markdown] deletable=true editable=true
# word count
# + deletable=true editable=true
def words(data):
data=[title.lower().replace(',','').split(' ') for title in data]
return data
# + [markdown] deletable=true editable=true
# Main function
# + deletable=true editable=true
from collections import Counter
d=Counter()
f=open("txt_sentoken/neg/output.txt","r")
content=f.read()
senten=sentence(content)
wordlist=words(senten)
wordcounter=0
for sen in wordlist:
count=0
megj=0
for word in sen:
if word!='' and word!='"' and word!='(' and word!=')'and word!=':'and word!=';'and word!='-':
if megj==1:
d["NOT_"+word]+=1
count+=1
wordcounter+=1
else:
d[word]+=1
wordcounter+=1
if word[-3:len(word)]=="n't":
megj=1
if count==2:
megj=0
count=0
Prob=Counter()
for word in d:
Prob[word]=(d[word]+1)/(wordcounter+len(d))
Prob.most_common(10)
# + [markdown] deletable=true editable=true
# positive part
# + deletable=true editable=true
from collections import Counter
dpos=Counter()
f=open("txt_sentoken/pos/output.txt","r")
content=f.read()
senten=sentence(content)
wordlist=words(senten)
wordcounter=0
for sen in wordlist:
count=0
megj=0
for word in sen:
if word!='' and word!='"' and word!='(' and word!=')'and word!=':'and word!=';'and word!='-':
if megj==1:
dpos["NOT_"+word]+=1
count+=1
wordcounter+=1
else:
dpos[word]+=1
wordcounter+=1
if word[-3:len(word)]=="n't":
megj=1
if count==2:
megj=0
count=0
Probpos=Counter()
for word in dpos:
Probpos[word]=(dpos[word]+1)/(wordcounter+len(dpos))
Probpos.most_common(10)
# + [markdown] deletable=true editable=true
# ### NEW TEXT
# we read a movie review from own_test.txt and the classifier trained previously decides whether it is a positive or negative review
#
# + deletable=true editable=true
from collections import Counter
import math
dtest=Counter()
f=open("txt_sentoken/own_test.txt","r")
content=f.read()
senten=sentence(content)
wordlist=words(senten)
wordcounter=0
for sen in wordlist:
count=0
megj=0
for word in sen:
if word!='' and word!='"' and word!='(' and word!=')'and word!=':'and word!=';'and word!='-':
if megj==1:
dtest["NOT_"+word]+=1
count+=1
wordcounter+=1
else:
dtest[word]+=1
wordcounter+=1
if word[-3:len(word)]=="n't":
megj=1
if count==2:
megj=0
count=0
test_pos=0
test_neg=0
for word in dtest:
if word in Probpos and word in Prob:
test_pos+=math.log(Probpos[word])
test_neg+=math.log(Prob[word])
elif word in Probpos:
test_pos+=math.log(Probpos[word])
elif word in Prob:
test_neg+=math.log(Prob[word])
if test_pos>test_neg:
print("test_pos")
else: print("test_neg")
print(test_pos,test_neg)
| sentiment_analysis/sentiment_analysis2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (Cfox2)
# language: python
# name: cfox2
# ---
# *This GOSTnets example workbook will focus on Dar Es Salaam. We will build a multi-modal network comprised of the city streets from OSM, and the transit network as represented by their GTFS feed. We will then work out the shortest path travel time for each of 900 households, with access to the transit network and without. We will close by calculating the effect on trip times that having access to the GTFS-defined transit network affords the average citizen.*
#
# Author: <NAME>, G.O.S.T | SD Chief Economist Front Office
# ### Library Import
import geopandas as gpd
import pandas as pd
import networkx as nx
import peartree as pt
import osmnx as ox
import sys, os, time, json, copy
ghub = r'C:\Users\charl\Documents\GitHub\GOST\NetworkAnalysis\GOSTNets'
sys.path.append(ghub) # Allows system to look in the 'ghub' folder for any python libraries. Here used to import GOSTnets
import GOSTnet as gn
import importlib # This library lets you re-load libraries if you make a change to the underlying python
from shapely.geometry import Point
# ### Define paths, file names
pth = r'C:\Users\charl\Documents\GOST\DarEsSalaam'
pointz = r'workplace_geo.csv'
feed_path = os.path.join(pth, r'GTFS.zip')
# ### Preparing the MultiModal Network Step 1: The Walking Graph
# This function searches OSM for objects called 'Dar Es Salaam'. It is functionally equivalent to going to www.openstreetmap.org and manually entering the search term 'Dar Es Salaam'. If you don't get the desired area first time (it's a search function...) you can modify the result number the function takes as the area. To do this, follow the documentation steps here:
# https://osmnx.readthedocs.io/en/stable/osmnx.html
#
#
# Here, <NAME> gives the city relation, which is perfectly adequate for our purposes. We have imported the network type 'walk', which is all of the walkable streets in OSM. Switch keyword argument to 'drive' to get the driving network
# %time G_OSM = ox.graph_from_place('<NAME>', network_type = 'walk')
# This line checks to see if the graph is already simplified. It usually will be if it is from osmnx, but a useful check for other networkx objects
ox.simplify.is_simplified(G_OSM)
# This GOSTnets command takes a grpah object and makes an equivalent GeoDataFrame of the edges. The partner function, gn.node_gdf_from_graph, does the same thing for the nodes of a target graph object
G_OSM_gdf = gn.edge_gdf_from_graph(G_OSM)
# I wanted to get a summary of the roads broken down by highway type, for my personal interest. For this, each road must be labelled only as one type of highway. "highway = ['primary, secondary'] " is not admissable, nor helpful.
#
# Hence, this function iterates through each row, checks whether or not the contents of the column are a list, and if true, returns the first object in the list. Otherwise, it returns what it found. This removes all list objects, leaving only text strings.
# +
def check(x):
if type(x.highway) == list:
return x.highway[0]
else:
return x.highway
G_OSM_gdf['highway'] = G_OSM_gdf.apply(lambda x: check(x), axis = 1)
# -
# Print the total number of edges in the dataframe
len(G_OSM_gdf)
# ...that's a lot of edges. Let's get the summary of edges by the highway type as labelled in OpenStreetMap:
G_OSM_gdf.highway.value_counts()
# Send the GeoDataFrame to a .csv for visual inspection in QGIS / future reference.
G_OSM_gdf.to_csv(os.path.join(pth, 'walkingnet.csv'))
# In order for the network edges to be useful in calculating travel times, they need a value for how long it takes to 'traverse' that edge of the graph. There is a handy GOSTnets function for this, which automatically generates the traverse time for graphs which already include a 'length' property measured in metres.
#
# Users must specify what graph type they are working with.
#
# **'walk'** will return a traverse time measured in seconds, at the assumed walk speed (defined in kmph, default 4.5 for average human).
#
# **'drive'** will define traverse times according to highway types. Although there is a built in default dictionary of assumed speeds by highway type, it is best to pass an explicit dictionary of key:value pairs to the function to avoid unexpected results. Here, we are trying to generate travel time estimates for walking across this OSM network, so 'walk' is used, and I re-affirm the walk speed as the default, at 4.5 kmph
Gwalk = G_OSM.copy()
# %time Gwalk = gn.convert_network_to_time(Gwalk, distance_tag = 'length', graph_type = 'walk', speed_dict = None, walk_speed = 4.5)
# This function prints out an example edge in (u, v, {data}) format, typical of Networkx edge objects
gn.example_edge(Gwalk, 1)
# ### Preparing the MultiModal Network Step 2: The GTFS Graph
# Here I am making use of the peartree library to import a GTFS feed as a representative network graph. It is important to define the time of day for which we want to get the graph. This is because public transport is not like a road network - the 'edges' along which people can travel only exist at certain times of day, i.e. when buses are in service, for example. Here, I follow the peartree documentation example and use 7am to 9am as my time stretch to model a morning commute. We load this GTFS feed into a networkx object with 'load_feed_as_graph'. This will form another part of our multi-modal network.
feed = pt.get_representative_feed(feed_path)
start = 7 * 60 * 60
end = 9 * 60 * 60
# %time Gtransit = pt.load_feed_as_graph(feed, start, end)
# I pass this to a GeoDataFrame and send to .csv for future reference / visual inspection
Gtransit_gdf = gn.edge_gdf_from_graph(Gtransit)
Gtransit_gdf.to_csv(os.path.join(pth, 'transitnet.csv'))
# One thing we do need to do is make sure every edge on the GTFS network has a property called 'time' in its data dictionary, to ensure that there is one 'key' across the combined multimodal network which will always represent the traverse time of an edge, no matter where it came from.
#
# This will act as our impedance later for calculating journey times. As such, here we iterate through all edges and make a new property 'time', equal to 'length'. At first sight this looks counterintuitive - 'length' isn't time, right? But it is not, for Peartree loads GTFS feeds into graphs with the edge 'length' measured in seconds. Ergo, we have the value we need, it is just currently mis-labelled. As good practice we also add an edge 'mode' of 'GTFS transit', so we can pick apart the GTFS edges easily later.
# +
gn.example_edge(Gtransit, 1)
for u, v, data in Gtransit.edges(data = True):
data['time'] = data['length']
data['mode'] = 'GTFStransit'
gn.example_edge(Gtransit, 1)
# -
# ### Bind the graphs
# Having prepared the walking and transit graphs individually, we now bind these together with GOSTnets' 'bind_graphs' command. The order of the graphs is important - the first is the base graph onto which we want to bind the smaller graph (second).
#
# The connection_threshold parameter is the distance in metres for which we will accept nodes to be 'bound' via the creation of a new edge. If there are no nodes on the other graph within 50m, no connection is made. the .crs of both objects is irrelevant, as this distance is always returned in projected metres, irrespective of the network objects passed to it.
#
# The speed parameter is the speed assumed for the new binding edges - as these trips are expected to be made on foot, the default is the same as our default walking speed of 4.5kmph.
#
# This function takes some time to run for larger networks, hence the use of progress statements. These can be turned off by adding an optional parameter, 'verbose = False' to the function call.
importlib.reload(gn)
# %time Gbound = gn.bind_graphs(Gwalk, Gtransit, name = 'GTFS', connection_threshold = 50, exempt_nodes = [], speed = 4.5)
# Once again, we send the bound network to .csv for visual inspection.
Gbound_gdf = gn.edge_gdf_from_graph(Gbound)
Gbound_gdf.to_csv(os.path.join(pth, 'multinet.csv'))
# We also save the graphs we will use for travel time calculations in a handy format that will allow recall later
nx.write_gml(Gbound, os.path.join(pth, 'multinet.gml'))
nx.write_gml(Gwalk, os.path.join(pth, 'walknet.gml'))
# ### Preparing the Journey File
# Having prepared the network, we now prepare the journey file which describes which trips will be made.
#
# We import the file of origins and destinations as a standard pandas dataframe using pd.read_csv
# +
points = pd.read_csv(os.path.join(pth, pointz))
points.head(3)
# -
# Here, we define new columns, 'origin' and 'destination', which are shapely objects of the Lat / Long points for the origins and destinations. Longitude always comes first. We initially generate a tuple, then pass the tuple to shapely's Point() method to generate shapely point instances. These have geometric properties, as opposed to just being a string.
points['origin'] = list(zip(points.gpsLongitude, points.gpsLatitude))
points['origin'] = points['origin'].apply(lambda x: Point(x))
points['destination'] = list(zip(points.job_longitude, points.job_latitude))
points['destination'] = points['destination'].apply(lambda x: Point(x))
# GOSTnets' snap_points_to_graph function requires a GeoDataFrame, so we generate one - by defining which column is the geometry (we will start with the origin col), and passing in the definition of the CRS (here, WGS 84).
points = gpd.GeoDataFrame(points, crs = {'init':'epsg:4326'}, geometry = 'origin')
# We run the point snapper, which returns the nearest node's ID in a new column called 'Nearest_node'
# +
importlib.reload(gn)
# %time points_nn = gn.snap_points_to_graph(Gwalk, points, geomcol = 'origin')
# -
# This isn't very helpful if the file contains both origins and destinations, as we will need the IDs of the closest node for both origin and destination points (one for each). Therefore, we rename the column we just calculated as 'Origin_node'.
points_nn = points_nn.rename(columns = {'Nearest_node':'Origin_node'})
points_nn.head(3)
# We re-define the geometry property of the GeoDataFrame as the destination column. for more info on why this is relevant and necessary, check out: http://geopandas.org/data_structures.html
points_nn = points_nn.set_geometry('destination')
# We now re-run snap_points, asking it instead to snap the destination points to the graph and return the nearest node
# +
importlib.reload(gn)
# %time points_nn = gn.snap_points_to_graph(Gwalk, points_nn, geomcol = 'destination')
# -
# We rename the resulting column as 'Destination_node' for ease of keeping track of what's going on
points_nn = points_nn.rename(columns = {'Nearest_node':'Destination_node'})
# We check to make sure the dataframe looks how we want it to after the snapping process has been completed. We print the first 3 rows:
points_nn.head(3)
# Save our prepared dataframe as a .csv
points_nn.to_csv(os.path.join(pth, 'preparedpoints.csv'))
# ### Running the travel time calculations
# Re-import saved versions of our prepared graphs and points file
#Gbound = nx.read_gml(os.path.join(pth, 'multinet.gml'))
#Gwalk = nx.read_gml(os.path.join(pth, 'walknet.gml'))
points = pd.read_csv(os.path.join(pth, 'preparedpoints.csv'))
# For each origin and destination we now have an approximate node start and end point. We also have a fully connected multi-modal network with a consistently labelled traverse time for each edge. We are ready to start calculating travel times at this point.
#
# Networkx' shortest path calculation requires that we pass it the origins, the destination, and the time. So, we iterate through our DataFrame like so:
# +
points_nn['TT_multi'] = 0
points_nn['TT_walking'] = 0
for i in range(0, len(points_nn)):
origin = points_nn.Origin_node.loc[i]
destination = points_nn.Destination_node.loc[i]
try:
points_nn['TT_multi'].loc[i] = nx.shortest_path_length(Gbound, source=origin, target=destination, weight='time')
except:
points_nn['TT_multi'].loc[i] = None
try:
points_nn['TT_walking'].loc[i] = nx.shortest_path_length(Gwalk, source=origin, target=destination, weight='time')
except:
points_nn['TT_walking'].loc[i] = None
if i % 100 == 0 and i != 0:
print('%d trips done' % i)
elif i == len(points_nn):
print('Analysis complete')
# -
# Convert values in seconds to minutes; identify travel time reduction from utilizing transit network
out = points_nn.copy()
out['TT_walking'] = out['TT_walking'] / 60
out['TT_multi'] = out['TT_multi'] / 60
out['perf_improvement'] = 1 - (out['TT_multi'] / out['TT_walking'])
# Calculate average trip time reduction from use of the transit network
print('Average performance improvement: %d percent' % int(out['perf_improvement'].mean() * 100))
# Send the results to .csv
out.to_csv(os.path.join(pth, 'output.csv'))
# Comments? Questions? <EMAIL>
| GOSTNets/Notebooks/Multimodal Example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
df = pd.read_csv(r'C:\Users\sadiv\Documents\DataAnalytics\LasVegasTripAdvisorReviews.csv', sep=";")
df.head()
# -
df.size
df.dtypes
df.info
import psycopg2
import numpy as np
import os
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(style="ticks", color_codes=True)
import matplotlib
import warnings
import yellowbrick as yb
from pandas.plotting import scatter_matrix
warnings.simplefilter(action='ignore', category=FutureWarning)
#plt.subplots(figsize = (30,30))
sns.heatmap(aa.corr())
sns.set(style="whitegrid")
sns.violinplot(x="Casino", y="Score", hue="Gym" data=df, figsize = (15,15))
sns.set(style="whitegrid")
sns.violinplot(x="Gym", y="Score", hue="Casino", data=df, figsize = (15,15))
# +
features = ['Casino', 'Gym', 'Tennis court', 'Spa', 'Pool']
target = ['Score']
y = df[target]
X = df[features]
# -
aa = df.replace({'Pool': {'YES': 1, 'NO': 0}, 'Gym': {'YES': 1, 'NO': 0}, 'Tennis court': {'YES': 1, 'NO': 0}, 'Spa': {'YES': 1, 'NO': 0}, 'Casino': {'YES': 1, 'NO': 0}, 'Gym': {'YES': 1, 'NO': 0}})
# +
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn import metrics
y = df[target]
X = df[features]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1)
clf = DecisionTreeClassifier()
clf = clf.fit(X_train,y_train)
y_pred = clf.predict(X_test)
print("Accuracy:",metrics.accuracy_score(y_test, y_pred))
| ADS-Fall2019A/Las Vegas Hotel TripAdvisor Reviews/Vegas Hotel - See Heat Map.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
from matplotlib import pyplot as plt
import numpy as np
# +
# each point is Sepal length on Cm, Sepal width on Cm & type (0.1)
# 0 for Iris-setosa & 1 for Iris-versicolor
data = [[ 5.1, 3.5, 0 ],
[ 4.9, 3. , 0 ],
[ 4.7, 3.2, 0 ],
[ 4.6, 3.1, 0 ],
[ 5. , 3.6, 0 ],
[ 5.4, 3.9, 0 ],
[ 4.6, 3.4, 0 ],
[ 5. , 3.4, 0 ],
[ 4.4, 2.9, 0 ],
[ 4.9, 3.1, 0 ],
[ 5.4, 3.7, 0 ],
[ 4.8, 3.4, 0 ],
[ 4.8, 3. , 0 ],
[ 4.3, 3. , 0 ],
[ 5.8, 4. , 0 ],
[ 5.7, 4.4, 0 ],
[ 5.4, 3.9, 0 ],
[ 5.1, 3.5, 0 ],
[ 5.7, 3.8, 0 ],
[ 5.1, 3.8, 0 ],
[ 7. , 3.2, 1 ],
[ 6.4, 3.2, 1 ],
[ 6.9, 3.1, 1 ],
[ 5.5, 2.3, 1 ],
[ 6.5, 2.8, 1 ],
[ 5.7, 2.8, 1 ],
[ 6.3, 3.3, 1 ],
[ 4.9, 2.4, 1 ],
[ 6.6, 2.9, 1 ],
[ 5.2, 2.7, 1 ],
[ 5. , 2. , 1 ],
[ 5.9, 3. , 1 ],
[ 6. , 2.2, 1 ],
[ 6.1, 2.9, 1 ],
[ 5.6, 2.9, 1 ],
[ 6.7, 3.1, 1 ],
[ 5.6, 3. , 1 ],
[ 5.8, 2.7, 1 ],
[ 6.2, 2.2, 1 ],
[ 5.6, 2.5, 1 ],
[ 5.9, 3.2, 1 ],
[ 6.1, 2.8, 1 ],
[ 6.3, 2.5, 1 ],
[ 6.1, 2.8, 1 ],
[ 6.4, 2.9, 1 ]]
mystery_flower = [ 6.6, 3]
# +
def sigmoid(x) :
return 1/(1 + np.exp(-x))
def sigmoid_p(x) :
return sigmoid(x) * (1-sigmoid(x))
# -
T = np.linspace(-6, 6, 100)
plt.plot(T, sigmoid(T), c='r')
plt.plot(T, sigmoid_p(T), c='b')
# scatter data
plt.axis([0, 10, 0, 6])
plt.grid()
for i in range(len(data)) :
point = data[i]
color = "r"
if point[2] == 0 :
color = "b"
plt.scatter(point[0], point[1], c=color)
# +
# training loop
learning_rate = 0.2
costs = []
# generating the weights & the bias
w1 = np.random.randn()
w2 = np.random.randn()
b = np.random.randn()
for i in range(100000000) :
ri = np.random.randint(len(data))
point = data[ri]
z = point[0] * w1 + point[1] * w2 + b
prediction = sigmoid(z)
target = point[2]
# cost function
cost = np.square(prediction - target)
#derivative of the cost function
dcost_prediction = 2 * (prediction -target)
dprediction_dz = sigmoid_p(z)
dz_dw1 = point[0]
dz_dw2 = point[1]
dz_db = 1
# the slope of the cost function
dcost_dz = dcost_prediction * dprediction_dz
# the slope of the weights & bias
dcost_dw1 = dcost_dz * dz_dw1
dcost_dw2 = dcost_dz * dz_dw2
dcost_db = dcost_dz * dz_db
# Getting new weights & bias
w1 = w1 - learning_rate * dcost_dw1
w2 = w2 - learning_rate * dcost_dw2
b = b - learning_rate * dcost_db
# this is only for the graph shown below
if i % 100 == 0 :
cost_sum = 0
for j in range(len(data)) :
point = data[ri]
z = point[0] * w1 + point[1] * w2 + b
prediction = sigmoid(z)
target = point[2]
cost_sum += np.square(prediction - target)
costs.append(cost_sum/len(data))
plt.plot(costs)
# +
# prediction
#test data
test_data = [[ 5.4, 3.4, 0. ],
[ 4.6, 3.6, 0. ],
[ 5.1, 3.3, 0. ],
[ 4.8, 3.4, 0. ],
[ 5. , 3. , 0. ],
[ 5. , 3.4, 0. ],
[ 5.2, 3.5, 0. ],
[ 5.2, 3.4, 0. ],
[ 4.7, 3.2, 0. ],
[ 4.8, 3.1, 0. ],
[ 5.4, 3.4, 0. ],
[ 5.2, 4.1, 0. ],
[ 5.5, 4.2, 0. ],
[ 4.9, 3.1, 0. ],
[ 5. , 3.2, 0. ],
[ 5.5, 3.5, 0. ],
[ 4.9, 3.1, 0. ],
[ 4.4, 3. , 0. ],
[ 5.1, 3.4, 0. ],
[ 5. , 3.5, 0. ],
[ 4.5, 2.3, 0. ],
[ 4.4, 3.2, 0. ],
[ 5. , 3.5, 0. ],
[ 5.1, 3.8, 0. ],
[ 4.8, 3. , 0. ],
[ 5.1, 3.8, 0. ],
[ 4.6, 3.2, 0. ],
[ 5.3, 3.7, 0. ],
[ 5. , 3.3, 0. ],
[ 6.8, 2.8, 1. ],
[ 6.7, 3. , 1. ],
[ 6. , 2.9, 1. ],
[ 5.7, 2.6, 1. ],
[ 5.5, 2.4, 1. ],
[ 5.5, 2.4, 1. ],
[ 5.8, 2.7, 1. ],
[ 6. , 2.7, 1. ],
[ 5.4, 3. , 1. ],
[ 6. , 3.4, 1. ],
[ 6.7, 3.1, 1. ],
[ 6.3, 2.3, 1. ],
[ 5.6, 3. , 1. ],
[ 5.5, 2.5, 1. ],
[ 5.5, 2.6, 1. ],
[ 6.1, 3. , 1. ],
[ 5.8, 2.6, 1. ],
[ 5. , 2.3, 1. ],
[ 5.6, 2.7, 1. ],
[ 5.7, 3. , 1. ],
[ 5.7, 2.9, 1. ],
[ 6.2, 2.9, 1. ],
[ 5.1, 2.5, 1. ],
[ 5.7, 2.8, 1. ]]
for i in range(len(test_data)) :
point = test_data[i]
print(point)
z = point[0] * w1 + point[1] * w2 + b
prediction = sigmoid(z)
print("prediction : {}" .format(prediction))
# -
z = mystery_flower[0] * w1 + mystery_flower[1] * w2 + b
prediction = sigmoid(z)
prediction
# 0 for Iris-setosa and 1 for Iris-versicolor
def guess_flower(SepalLength, SepalWidth) :
z = SepalLength * w1 + SepalWidth * w2 + b
prediction = sigmoid(z)
if prediction < .5:
print('Iris-setosa')
else:
print('Iris-versicolor')
guess_flower(4.8, 3.0)
guess_flower(6.7, 3.0)
guess_flower(5.1, 3.7)
| flower_classifier.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import accuracy_score,precision_score,recall_score,f1_score\
,fbeta_score,classification_report,confusion_matrix,precision_recall_curve,roc_auc_score\
,roc_curve
# -
# # Data-preprocessing
filepath = '/Users/mac/Desktop/Kaggle_datasets/Titanic/train.csv'
df = pd.read_csv(filepath)
df.head()
df.info()
df.describe()
df.Age.fillna(df.Age.median(), inplace=True)
columns = ['Pclass','Sex','Age','SibSp','Parch']
df_trim_features = df[columns]
df_trim_features.head()
df_dum_features = pd.get_dummies(df_trim_features, columns=['Pclass','Sex'])
df_dum_features.head()
df_label = df['Survived'].values
df_label[0:5]
# +
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler(feature_range=(0,1))
df_feat_trans = scaler.fit(df_dum_features).transform(df_dum_features)
df_feat_trans
# -
# # Model fitïŒé顿¯binary classification
# ## naive_bayes.GaussianNB()
# +
from sklearn import naive_bayes, cross_validation
#cross_validationå
åå²
X_train,X_test,y_train,y_test = cross_validation.train_test_split(df_feat_trans, df_label,
test_size=0.25, random_state=0,stratify=df_label)
#è·æš¡å
cls=naive_bayes.GaussianNB()
cls.fit(X_train,y_train)
print('Training Score: %.2f' % cls.score(X_train,y_train))
print('Testing Score: %.2f' % cls.score(X_test,y_test))
# -
# ## svm.LinearSVC(C=?)
# +
from sklearn import svm, cross_validation
X_train,X_test,y_train,y_test = cross_validation.train_test_split(df_feat_trans, df_label,
test_size=0.25, random_state=0,stratify=df_label)
cls = svm.LinearSVC(C=0.1)
cls.fit(X_train,y_train)
print('Training Score: %.2f' % cls.score(X_train,y_train))
print('Testing Score: %.2f' % cls.score(X_test,y_test))
# -
# ## DecisionTreeClassifier(max_depth=?)
# +
from sklearn import cross_validation
from sklearn.tree import DecisionTreeClassifier
X_train,X_test,y_train,y_test = cross_validation.train_test_split(df_feat_trans, df_label,
test_size=0.25, random_state=0,stratify=df_label)
depth=20
cls = DecisionTreeClassifier(max_depth=depth)
cls.fit(X_train,y_train)
print('Training Score: %.2f' % cls.score(X_train,y_train))
print('Testing Score: %.2f' % cls.score(X_test,y_test))
# -
# # Decomposition: ex. PCA, n_components=3
# +
from sklearn import decomposition
pca = decomposition.PCA(n_components=3)
pca.fit(df_feat_trans)
df_feat_trans = pca.transform(df_feat_trans)
from sklearn import cross_validation
from sklearn.tree import DecisionTreeClassifier
X_train,X_test,y_train,y_test = cross_validation.train_test_split(df_feat_trans, df_label,
test_size=0.25, random_state=0,stratify=df_label)
depth=20
cls = DecisionTreeClassifier(max_depth=depth)
cls.fit(X_train,y_train)
print('Training Score: %.2f' % cls.score(X_train,y_train))
print('Testing Score: %.2f' % cls.score(X_test,y_test))
# -
df_feat_trans
# +
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111, projection='3d')
ax.scatter(xs=df_feat_trans[:,0],ys=df_feat_trans[:,1],zs=df_feat_trans[:,2],
c=df_label, s=500, alpha=0.5, #cmap=plt.cm.get_cmap('nipy_spectral', 10)
)
plt.show()
# -
# # Classification_matrics
# +
from sklearn import svm, cross_validation
X_train,X_test,y_train,y_test = cross_validation.train_test_split(df_feat_trans, df_label,
test_size=0.25, random_state=0,stratify=df_label)
cls = svm.LinearSVC(C=0.1)
cls.fit(X_train,y_train)
print('Training Score: %.2f' % cls.score(X_train,y_train))
print('Testing Score: %.2f' % cls.score(X_test,y_test))
y_pred = cls.predict(X_test) #ç²åé æž¬çæ¡
y_pred
# +
from sklearn.metrics import accuracy_score,precision_score,recall_score,f1_score\
,fbeta_score,classification_report,confusion_matrix,precision_recall_curve,roc_auc_score\
,roc_curve
y_true = y_test
conf = confusion_matrix(y_true,y_pred)
conf
# -
sns.heatmap(conf, annot=True, fmt="d")
plt.show()
print(classification_report(y_true,y_pred))
# # Summary
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import MinMaxScaler
#æž
æŽæžæ
def data_preprocessing(df):
df.Age.fillna(df.Age.median(), inplace=True)
columns = ['Pclass','Sex','Age','SibSp','Parch']
df_trim_features = df[columns]
df_dum_features = pd.get_dummies(df_trim_features, columns=['Pclass','Sex'])
scaler = MinMaxScaler(feature_range=(0,1))
df_feat_trans = scaler.fit(df_dum_features).transform(df_dum_features)
return df_feat_trans
#èšç·Ž
filepath = '/Users/mac/Desktop/Kaggle_datasets/Titanic/train.csv'
df = pd.read_csv(filepath)
df_label = df['Survived'].values
X_train,X_test,y_train,y_test = cross_validation.train_test_split(data_preprocessing(df), df_label,
test_size=0.25, random_state=0,stratify=df_label)
cls = svm.LinearSVC(C=0.1)
cls.fit(X_train,y_train)
print('Training Score: %.2f' % cls.score(X_train,y_train))
print('Testing Score: %.2f' % cls.score(X_test,y_test))
#枬é©
filepath = '/Users/mac/Desktop/Kaggle_datasets/Titanic/test.csv'
df = pd.read_csv(filepath)
X_test_ = data_preprocessing(df)
y_pred_ = cls.predict(X_test_)
# -
filepath = '/Users/mac/Desktop/Kaggle_datasets/Titanic/gender_submission.csv'
df = pd.read_csv(filepath)
df['Survived'] = y_pred_
df
df.to_csv('answer.csv', index_label='PassengerId')
| Skl_Titanic_(Binary_Classification).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Data Mining Versuch Music Clustering
# * Prof: Dr. <NAME>
# * Autor: <NAME>
# * Datum: 02.06.2020
#
# [Ãbersicht Ipython Notebooks im Data Mining Praktikum](Data Mining Praktikum.ipynb)
#
# # EinfÃŒhrung
# ## Lernziele:
# In diesem Versuch sollen Kenntnisse in folgenden Themen vermittelt werden:
#
# * Zugriff auf Musikdateien
# * Transcodierung von mp3 zu wav
# * Extraktion von Merkmalen in Musikdateien (Feature Extraction)
# * Optimierung mit dem genetischen Algorithmus
# * Selektion der aussagekrÀftigsten Merkmale (Feature Selection)
# * Clustering von Musikfiles (automatische Playlistgenerierung)
#
#
# ## Vor dem Versuch zu klÀrende Fragen
#
# ### Transcodierung von MP3 nach WAV und Merkmalsextraktion
# In diesem Versuch wird der MP3 Decoder [mpg123](http://www.mpg123.de/) eingesetzt. Installieren und testen sie diesen Decoder vor dem Versuch auf ihrem Rechner. Machen Sie sich zunÀchst mit dem in Kapitel [Gegebene Module zur Transcodierung und Feature Extraction](#Gegebene-Module-zur-Transcodierung-und-Feature-Extraction) aufgefÌhrten Code vertraut. Versuchen Sie Funktion und Ablauf dieses Programms zu verstehen und beantworten Sie folgende Fragen.
# 1. Was versteht man unter den statistischen GröÃen _Mittelwert, Standardabweichung, Skewness und Kurtosis_?
#
# #### Mittelwert
# Mit dem Begriff Mittelwert (_auch Durchschnitt genannt_) ist in der Statistik meistens das _artithmetische Mittel_ gemeint. Der Mittelwert ist dabei eine Kennzahl fÃŒr die zentrale Tendenz einer Verteilung. Das arithmetische Mittel ist rechnerisch die Summe der gegebenen Werte geteilt durch die Anzahl der Werte. Angewandt kann dies beispielsweise zum Berechnen einer Durchschnittsgeschwindigkeit werden. In diesem Fall wÃŒrden die Werte als Geschwindigkeit interpretiert werden. Der _Erwartungswert_ einer Zufallsvariablen beschreibt hingegen die Zahl, die die Zufallsvariable im Mittel annimmt.
#
# #### Standardabweichung
# Die Standardabweichung ist ein Maà dafÃŒr, wie weit einzelne DatensÀtze verteilt sind. Mathematisch wird die Standardabweichung definiert als die mittlere quadratische Abweichung einer reellen Zufallsvariablen von ihrem Erwartungswert. Wenn alle Werte gleich sind, so ist die Standardabweichung Null, da diese von der Varianz abgeleitet ist. Das heiÃt je weiter Daten vertreut sind, desto höher ist die Standardabweichung. Je enger/nÀher DatensÀtze jedoch beieinander liegen, desto niedriger fÀllt die Standardabweichung aus. Es gibt zwei verschiedene Formeln der Anwendung, welche sich darin unterscheiden, dass nicht durch _n_, sondern durch _n-1_ geteilt wird.
# - die Standardabweichung fÌr eine Stichprobe, wenn die Ergebnisse generalisierbar sein sollen. D.h. man möchte Ergebnisse erzielen, die es ermöglichen auf alle Menschen und nicht nur auf die ursprÌgnliche Versuchsgruppe _x_ RÌckschlÌsse zu ziehen.
# - die Standardabweichung fÃŒr die Grundgesamtheit, wenn die Ergebnisse nicht verallgemeinert werden sollen. D.h. die Ergebnisse gelten nur fÃŒr die Versuchsgruppe _x_.
#
# #### Skewness
# Die MaÃzahl der Schiefe (_eng. skewness_) gibt an, inwieweit eine Verteilungsfunktion sich zu einer Seite "neigt". Das heiÃt diese ist ein Maà fÃŒr die Asymmetrie einer Verteilung. Der Wert kann dabei _positiv_ (Verteilungsfunktion tendiert nach rechts), _negativ_ (Verteilungsfunktion tendiert nach links), _null_ (Verteilungsfunktion ist symmetrisch) und _undefiniert_ (0/0) sein.
#
# ###### Eigenschaften einer unimodalen Verteilung**
# - linksschief (identisch mit dem Begriff rechtssteil) Verteilungen ist der Median gröÃer als das arithmetische Mittel.
# - rechtsschief (identisch mit dem Begriff linkssteil) Verteilungen ist der Modus kleiner als der Erwartungswert
# - Ist die Verteilungsfunktion symmetrisch, so ist das arithmetische Mittel gleich dem Median und die Verteilung wird eine Schiefe von 0 haben.
#
# #### Kurtosis
# Die Kurtosis (_dt. Wölbung_) ist ein Maà fÃŒr die Steilheit einer Wahrscheinlichkeitsfunktion, oder HÀufigkeitsverteilung. Verteilungen mit geringer Wölbung streuen relativ gleichmÀÃig. Bei Verteilungen mit hoher Wölbung resultiert die Streuung dagegen deutlich mehr aus extremen, aber seltenen Ereignissen. Der Exzess gibt die Differenz der Wölbung der betrachteten Funktion zur Wölbung der Dichtefunktion einer normalverteilten ZufallsgröÃe an. Jede nicht-symmetrische Verteilungsfunktion ist schief. Verteilungen mit niederger Kurtosis streuen relativ gleichmÀÃig(breite Glocke), Verteilungen mit hoher Kurtosis haben eine mehr extreme auf einem Punkt verteilte Streuung(spitze Glocke).
# 2. Was beschreibt die Fourier-Transformierte eines zeitlich ausgedehnten Signals?
#
# #### Die Fourier Transformation
# Die Fourier-Transformation (genauer die kontinuierliche Fourier-Transformation ist eine mathematische Methode aus dem Bereich der Fourier-Analysis, mit der kontinuierliche, aperiodische Signale in ein kontinuierliches Spektrum zerlegt werden. Die Funktion, die dieses Spektrum beschreibt, nennt man auch Fourier-Transformierte oder Spektralfunktion. Es handelt sich dabei um eine Integraltransformation, die nach dem Mathematiker <NAME> benannt ist. Fourier fÃŒhrte im Jahr 1822 die Fourier-Reihe ein, die jedoch nur fÃŒr periodische Signale definiert ist und zu einem diskreten Frequenzspektrum fÃŒhrt. Die Entwicklung einer Funktion in ihre _Fourier-Reihe_ wird harmonische Analyse genannt. Die Funktion wird dabei als Ãberlagerung von sinusförmigen, Schwingungen dargestellt. Ist die Funktion ein Eingangssignal eines LTI-Systems, kann das Ausgangssignal relativ einfach berechnet werden, da das Signals als Ãberlagerung von Eigenfunktionen des Systems vorliegt.
# 3. Mit welcher Samplingrate werden die WAV Dateien abgetastet?
#
# #### Abtastrate von WAV Dateien
# Die Abtastrate oder Abtastfrequenz, auch Samplingrate, Samplerate oder Samplingfrequenz, ist in der Signalverarbeitung die HÀufigkeit, mit der ein Analogsignal (auch zeitkontinuierliches Signal genannt) in einer vorgegebenen Zeit abgetastet (das heiÃt, gemessen und in ein zeitdiskretes Signal umgewandelt) wird. Da es sich bei einer Abtastung immer um einen periodischen Vorgang handelt, ist ihre grundlegende Einheit das *Hertz* (abgekÃŒrzt: Hz), also Perioden pro Sekunde.
# - Ein Abtastvorgang pro Sekunde: 1 Hz = 1 S/s
#
# Die Waveform Audi File Format (_WAV_) ist eine von IBM und Missrn
# soft entwickeltes Containerformat fÃŒr die Speicherung von Audiodaten. Die in den Containern enthaltenen Dateien sind normalerweise unkomprimierte in _Pulscodemodulation (PCM)_ codierte Audiosignale fÃŒr die Speicherung und Bearbeitung von Audio-Informationen. WAV-Dateien sind unkomprimiert und fehlerfrei, aber relativ groÃ. Ihre GröÃe ergibt sich aus der Abtastrate und der Samplingtiefe bei der Digitalisierung des analogen Audiosignals. Daraus ergibt sich bei einer standardmÀÃigen Abtastrate von **44,1 kHz** und einer Samplingtiefe von **16 Bit** eine DateigröÃe von **5,3 Megabyte (MB) pro Minute** fÃŒr ein Monosignal. Es sind allerdings auch andere Abtastraten und Sampletiefen möglich.
# 4. Insgesamt werden 42 Merkmale pro Musiksequenz extrahiert. Beschreiben Sie kurz diese Merkmale
#
# #### Merkmalsextraktion bei WAV Dateien
# Die extrahierten Merkmale betreffen die Kurtuosis, ... Diese Merkmale liegen sowohl im Spektralbereich, als auch Merkmale im Zeitbereich.
# ### Matching der Teilsequenzen
#
# 1. Nachdem fÃŒr jedes MusikstÃŒck die beiden Teilsequenzen in Form der extrahierten Merkmale vorliegen: Wie kann die Ãhnlichkeit zwischen Teilsequenzen ermittelt
# werden?
#
# Die MusikstÃŒcke, welche als Trainingsdaten vorliegen, werden die einzelnen Teilsequencen in Vektoren umgewandelt und anschlieÃend auf deren Ãhnlichkeit miteinander vergliechen. Haben diese eine hohe Ãhnlichkeit, so so mÃŒsste das erste TeilstÃŒck und das zweite TeilstÃŒck gleich oder sehr Àhnlich sein.
#
#
# 2. Welche Numpy- bzw. Scipy-Module können Sie fÃŒr die Bestimmung der Ãhnlichkeit zwischen Teilsequenzen einsetzen?
#
# Aus der Scipy Libary können folgende Funktionen angewandt werden, um die Ãhnlichkeit der einzelnen Teilsequenzen zu berechnen.
#
# ```
# scipy.spatial.distance.euclidean (Euklid)
#
# scipy.stats.pearsonr (Pearson)
#
# scipy.spatial.distance.cosine (Kosinus)
# ```
# ### Genetischer Algorithmus fÃŒr die Merkmalsselektion
#
# 1. Beschreiben Sie die Prozesschritte im genetischen Algorithmus [Genetischer Algorithmus](https://www.hdm-stuttgart.de/~maucher/Python/FunktionenAlgorithmen/html/genAlgTSP.html)
#
# #### Der Genetische Algorithmus
# Ein Genetischer Algorithmus (GA) ist eine Such-Heuristik, welche durch _Charls Darwin's_ Theorie der natÌrlichen Evolution inspiriert wurde. Dieser reflektiert den Prozess, in welchem eine natÌrliche Selektion der besten (_fittest_) Individuen einer Population fÌr eine Reproduktion selektiert werden. Genetische Algorithmen (GA) sind sehr gut fÌr Probleme in der Suche, als auch fÌr Optimierungen einzusetzen. Ein Beispiel hierfÌr ist der Einsatz eines _GA_, um eine Lösung fÌr das "Travelling Salesman Problem" (TSP) zu finden.
# FÃŒr die Erkundung der besten Merkmale in diesem Fall werden die einzelnen Elemente des GA auf die Problemstellung wie folgt ÃŒbertragen:
#
#
# * **Gene**: Satz an Parametern (Variablen).
# * **Individual/Chromosome**: Ein Chromosom ist eine Zusammensetzung von Genen. In diesem Fall ein einzelnes Merkmal, welches die Bedingungen erfÃŒllt.
# * **Population**: Eine Sammlung möglicher Merkmale.
# * **Parents**: Zwei Merkmale, welche kombiniert werden, um ein neues Merkmal zu generieren.
# * **Mating Pool**: Eine Sammlung an Elternteilen, welche dazu verwendet werden, eine neue Population (nÀchste Generation) zu generieren.
# * **Fitness**: Eine Funktion, welche die GÃŒte der Mermale anhand ihres _mittleren Rangs_ bewertet.
# * **Mutation**: Eine Möglichkeit Variationen in der Population zu erzeugen, durch zufÀlliges austauschen von Elementen der Merkmale.
#
# #### Schritte der AusfÃŒhrung
#
# ##### Erzeugung zufÀlliger Population:
# Erzeugt eine Menge von Individuen mit zufÀlligen Chromosomen. Jedes Individum ist ein Lösungsvorschlag fÌr ein Problem.
#
# ##### Bestimme Fitness:
# Individuen werden anhand einer Fitnessfunktion bewertet.
#
# ##### Selektion:
# WÀhle ein Paar von Individuen aus. Die Auswahl ist von der Fitness abhÀngig. Je höher die Fitness, desto höher die Wahrscheinlichkeit, dass sie fÌr die Kreuzung ausgewÀhlt werden.
#
# ##### Kreuzung:
#
# Paare werden gekreuzt und erzeugen Nachkommen, welche die Chromosomen der Eltern erben. Hier wird ein zufÀlliger Schnittpunkt zur Vereinigung der Elternvektoren genutzt. Die Chromosome werden an dieser Stelle geteilt und miteinander vertauscht.
#
# ##### Mutation:
#
# Ein Teil der Chromosomen wird zufÀllig verÀndert. Es gibt eine Mutationswahrscheinlichkeit, folglich kommt es nicht immer zu einer Mutation.
#
# 2. In diesem Versuch wird davon ausgegangen, dass Merkmale dann gut sind, wenn durch sie die erste Teilsequenz eines MusikstÃŒcks durch einen Àhnlichen Vektor wie die jeweils zweite Teilsequenz beschrieben wird. Wie kann mit dieser Annahme der genetische Algorithmus fÃŒr die Merkmalsselektion angewandt werden. Unter Merkmalsselektion versteht man allgemein die Suche nach den $r$ besten Merkmalen aus einer Menge von insgesamt $R$ Merkmalen. In diesem Versuch werden initial $R=42$ Merkmale extrahiert, aus denen dann die besten $r<R$ Merkmale zu bestimmen sind. Ãberlegen Sie hierfÃŒr speziell wie die Fitnessfunktion, die Kreuzung und die Mutation zu realisieren sind.
#
# ##### Fitnessfunktion:
#
# Euklidische Distanz der verwendeten Features zwischen der ersten und der zweiten Teilsequenz. Diese errechneten Werte werden aufsteigend sortiert in einem Array abgespeichert. Danach wird aus dem Array ein Mittlerer Rang(siehe Aufgabebeschreibung Matching der Teilsequenzen 4.) berechnet. Dieser spiegelt die Fitness wieder.
#
# ##### Kreuzung:
#
# Indivduen mit niedriger Distanz bzw. mit gutem Fitnesswert werden miteinander gekreuzt. Dabei wird ein zufÀlliger Schnittpunkt zur Vereinigung der Elternvektoren genutzt. Die Chromosome werden an dieser Stelle geteilt und miteinander vertauscht.
#
# ##### Mutation:
#
# Mit niedriger Wahrscheinlichkeit wird ein Wert zufÀllig verÀndert.
# ### Clustering und Playlistgenerierung
#
# 1. Wie kann mit einem hierarchischen Clustering der Musikfiles eine Menge von Playlists erzeugt werden, so dass innerhalb einer Playlist möglichst Àhnliche Titel zu finden sind?
# # DurchfÃŒhrung
# ## Gegebene Module zur Transcodierung und Feature Extraction
# Mit dem in diesem Abschnitt gegebenen Code werden die im Unterverzeichnis _BandCollection_ befindlichen mp3-Files zunÀchst in wave decodiert. Danach werden aus den wave Dateien Audiomerkmale erhoben.
#
# Von jedem MusikstÌck werden zwei disjunkte Teilsequenzen erhoben und von beiden Teilsequenzen jeweils ein Merkmalsvektor gebildet. Der Grund hierfÌr ist: FÌr die spÀter folgende Bestimmung der wichtigsten Merkmale (Merkmalsselektion mit dem genetischen Algorithmus), wird angenommen dass Merkmale dann gut sind, wenn die aus ihnen gebildeten Merkmalsvektoren fÌr Teilsequenzen des gleichen MusikstÌcks nahe beieinander liegen und die Merkmalsvektoren von Teilsequenzen unterschiedlicher MusikstÌcke weiter voneinander entfernt sind. In der Merkmalsselektion werden dann die Merkmale als relevant erachtet, fÌr die diese Annahme zutrifft.
#
# **Aufgaben:**
#
# 1. Stellen Sie im unten gegebenen Code die Verzeichnisse fÃŒr Ihre Musikdateien (aktuell Unterverzeichnis _BandCollection_) und fÃŒr den Ort Ihres _mpg123_ Decoders richtig ein.
#
MPG123_PATH = 'C:\Program Files (x86)\mpg123-1.24.0-x86\\mpg123.exe -w "%s" -r 10000 -m "%s"'
MUSIC_FILE_PATH = '../data/BandCollection'
# +
# #!pip install Wave
# #!pip install pandas
import subprocess
import wave
import struct
import numpy
import os
import pandas as pd
# +
numpy.set_printoptions(precision=2,suppress=True)
#Names of features extracted in this module
FeatNames=["amp1mean","amp1std","amp1skew","amp1kurt","amp1dmean","amp1dstd","amp1dskew","amp1dkurt","amp10mean","amp10std",
"amp10skew","amp10kurt","amp10dmean","amp10dstd","amp10dskew","amp10dkurt","amp100mean","amp100std","amp100skew",
"amp100kurt","amp100dmean","amp100dstd","amp100dskew","amp100dkurt","amp1000mean","amp1000std","amp1000skew",
"amp1000kurt","amp1000dmean","amp1000dstd","amp1000dskew","amp1000dkurt","power1","power2","power3","power4",
"power5","power6","power7","power8","power9","power10"]
# -
def moments(x):
mean = x.mean()
std = x.var()**0.5
skewness = ((x - mean)**3).mean() / std**3
kurtosis = ((x - mean)**4).mean() / std**4
return [mean, std, skewness, kurtosis]
#Feature category 2: Frequency domain parameters
def fftfeatures(wavdata):
f = numpy.fft.fft(wavdata)
f = f[2:int(f.size / 2 + 1)]
f = abs(f)
total_power = f.sum()
f = numpy.array_split(f, 10)
return [e.sum() / total_power for e in f]
#Creating the entire feature vector per music-file
def features(x):
x = numpy.array(x)
f = []
xs = x
diff = xs[1:] - xs[:-1]
f.extend(moments(xs))
f.extend(moments(diff))
xs = x.reshape(-1, 10).mean(1)
diff = xs[1:] - xs[:-1]
f.extend(moments(xs))
f.extend(moments(diff))
xs = x.reshape(-1, 100).mean(1)
diff = xs[1:] - xs[:-1]
f.extend(moments(xs))
f.extend(moments(diff))
xs = x.reshape(-1, 1000).mean(1)
diff = xs[1:] - xs[:-1]
f.extend(moments(xs))
f.extend(moments(diff))
f.extend(fftfeatures(x))
return f
# 2. Die verwendete Musiksammlung sollte mindestens 5 verschiedene Interpreten möglichst unterschiedlicher Genres enthalten. Von jedem Interpret sollten mehrere Titel (evtl. ein ganzes Album) enthalten sein.
def read_wav(wav_file):
"""Returns two chunks of sound data from wave file."""
w = wave.open(wav_file)
n = 60 * 10000
if w.getnframes() < n * 3:
raise ValueError('Wave file too short')
#For each music file 2 sequences, each containing n frames are subtracted. The first sequence starts at postion n,
#the second sequence starts at postion 2n. The reason for extracting 2 subsequences is, that later on we like to
#find the best features and in this exercise we assume that good features have the property that they are similar for 2 subsequences
#of the same song, but differ for subsequences of different songs.
w.setpos(n)
frames = w.readframes(n)
wav_data1 = struct.unpack('%dh' % n, frames)
frames = w.readframes(n)
wav_data2 = struct.unpack('%dh' % n, frames)
return wav_data1, wav_data2
def compute_chunk_features(mp3_file):
"""Return feature vectors for two chunks of an MP3 file."""
# Extract MP3 file to a mono, 10kHz WAV file
#mpg123_command = 'C:\Program Files (x86)\mpg123-1.24.0-x86\\mpg123.exe -w "%s" -r 10000 -m "%s"'
#mpg123_command = 'C:\\Program Files (x86)\\mpg123-1.24.0-x86\\mpg123.exe -w "%s" -r 10000 -m "%s"'
mpg123_command = MPG123_PATH
out_file = 'temp.wav'
cmd = mpg123_command % (out_file, mp3_file)
temp = subprocess.call(cmd)
# Read in chunks of data from WAV file
wav_data1, wav_data2 = read_wav(out_file)
# We'll cover how the features are computed in the next section!
return numpy.array(features(wav_data1)), numpy.array(features(wav_data2))
#
# 3. FÃŒhren Sie den in diesem Abschnitt gegebenen Programmcode zur Audiofeature-Extraction aus. Damit werden fÃŒr alle Musiksequenzen jeweils 42 Merkmale extrahiert. Die extrahierten Merkmalsvektoren der jeweils ersten Sequenz werden in das File _FeatureFileTrainingAllList1.csv_ geschrieben, die der zweiten Teilsequen in das File _FeatureFileTestAllList2.csv_.
# +
fileList=[]
featureList1=[]
featureList2=[]
#Specify the name of the directory, which contains your MP3 files here.
# This directory should contain for each band/author one subdirectory, which contains all songs of this author
for path, dirs, files in os.walk(MUSIC_FILE_PATH):
#print '-'*10,dirs,files
for f in files:
if not f.endswith('.mp3'):
# Skip any non-MP3 files
continue
mp3_file = os.path.join(path, f)
print(mp3_file)
# Extract the track name (i.e. the file name) plus the names
# of the two preceding directories. This will be useful
# later for plotting.
tail, track = os.path.split(mp3_file)
tail, dir1 = os.path.split(tail)
tail, dir2 = os.path.split(tail)
# Compute features. feature_vec1 and feature_vec2 are lists of floating
# point numbers representing the statistical features we have extracted
# from the raw sound data.
try:
feature_vec1, feature_vec2 = compute_chunk_features(mp3_file)
except:
print("Error: Chunk Features failed")
continue
#title=str(track)
title=str(dir1)+'\\'+str(track)
print('-'*20+ title +'-'*20)
#print " feature vector 1:",feature_vec1
#print " feature vector 2:",feature_vec2
fileList.append(title)
featureList1.append(feature_vec1)
featureList2.append(feature_vec2)
# Write feature vecotrs of all music files to pandas data-frame
MusicFeaturesTrain = pd.DataFrame(index=fileList, data=numpy.array(featureList1), columns=FeatNames)
MusicFeaturesTrain.to_csv("FeatureFileTrainingAllList1.csv")
MusicFeaturesTest = pd.DataFrame(index=fileList, data=numpy.array(featureList2), columns=FeatNames)
MusicFeaturesTest.to_csv("FeatureFileTestAllList2.csv")
# -
# ## Matching der Teilsequenzen
# In diesem Abschnitt soll ein Verfahren implementiert werden, mit dem die Ãbereinstimmung der ersten Teilsequenz eines MusikstÃŒcks mit den zweiten Teilsequenzen aller anderen MusikstÃŒcke berechnet werden kann.
#
# **Aufagben:**
#
# #!pip install prettyprint
import pprint
pp = pprint.PrettyPrinter(indent=4)
# 1. Lesen Sie die im vorigen Teilversuch angelegten zwei csv-Dateien in jeweils einen eigenen Pandas Dataframe ein.
df_train_features = pd.read_csv("FeatureFileTrainingAllList1.csv", sep=",", header=0, names=FeatNames)
df_test_features = pd.read_csv("FeatureFileTestAllList2.csv", sep=",", header=0, names=FeatNames)
#pp.pprint(df_train_features.head(5))
#pp.pprint(df_test_features.head(5))
# 2. Skalieren Sie beide Teilsequenzmengen, so dass alle Merkmale eine Standardabweichung von 1 aufweisen. Z.B. mit [http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.scale.html](http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.scale.html).
# +
# #!pip install sklearn --upgrade
from sklearn import preprocessing
import numpy as np
# +
#sklearn.preprocessing.scale(X, *, axis=0, with_mean=True, with_std=True, copy=True)
# returns a numpy array
train_features_scaled = preprocessing.scale(df_train_features)
test_features_scaled = preprocessing.scale(df_test_features)
##pp.pprint(train_features_scaled)
##pp.pprint(test_features_scaled)
# +
##pp.pprint('Mean value \n {0}'.format(train_features_scaled.mean(axis=0)))
##pp.pprint('Standard deviation \n {0}'.format(train_features_scaled.std(axis=0)))
# +
##pp.pprint('Mean value \n {0}'.format(test_features_scaled.mean(axis=0)))
##pp.pprint('Standard deviation \n {0}'.format(test_features_scaled.std(axis=0)))
# -
df_train_features = pd.DataFrame(data=train_features_scaled, index=df_train_features.index, columns=df_train_features.columns)
df_test_features = pd.DataFrame(data=test_features_scaled, index=df_test_features.index, columns=df_test_features.columns)
# +
#pp.pprint('The train features Dataframe {}'.format(df_train_features))
#pp.pprint('The test features Dataframe {}'.format(df_test_features))
# -
# Die merkmalsausprÀgung von Objekten unterliegt _Streuungen_. Hierdurch kann eine Distanz _d{ij} zwischen den Objekten durch die Mermale dominiert werden, die eine entsprechend groÃe Streuung besitzen. Dieser Umstand ist besonders zu berÃŒcksichtigen, wenn zwischen den Objektmerkmalen, deutliche GröÃenunterschiede bestehen. Um die Streuung zu berÃŒcksichtigen, werden die Merkmale _skaliert_. Wird die Distanz ÃŒber die _L-2_ Norm bestimmt, kann die Skalierung ÃŒber die **Standardabweichung** _s_ durchgefÃŒhrt werden. Dazu wird _F2_ um die quadratische Standardabweichung _s_ ergÀnzt.
# #### Berechnung der skalierten Distanz
# **3.1 Euklidsche Distanz**: Bestimmen Sie zu jeder Teilsequenz aus der Datei _FeatureFileTrainingAllList1.csv_ die euklidische Distanz zu allen Teilsequenzen aus der Datei _FeatureFileTestAllList2.csv_ und schreiben Sie diese Distanzen in eine aufsteigend geordnete Liste.
import math
from scipy.spatial import distance
# euklidische Distanz zu allen Teilsequenzen
def calcEuclideandDist(df_one, df_two):
euclid_dist_dict = {}
for index_one, row_one in df_one.iterrows():
euclid_dist_list = []
for index_two, row_two in df_two.iterrows():
euclid_dist_list.append([distance.euclidean(row_one, row_two), index_two])
euclid_dist_list.sort()
euclid_dist_dict[index_one] = euclid_dist_list
return euclid_dist_dict
euclid_dist_dict = calcEuclideandDist(df_train_features, df_test_features)
print(len(euclid_dist_dict))
#pp.pprint(euclid_dist_dict)
# **3.2 Korrelative Distanz**: Schreiben Sie auch die zugehörigen Argumente (Teilsequenzen) in eine geordnete Liste, sodass fÌr jede Teilsequenz aus _FeatureFileTrainingAllList1.csv_ die am nÀchsten liegende Teilsequenz aus _FeatureFileTestAllList2.csv_ an erster Stelle steht, die zweitnÀchste Teilsequenz an zweiter usw.
# zugehörige Argumente fÌr beide Teilsequenzen
def calcCorrelationDist(df_one, df_two):
cor_dist_dict={}
for index_one, row_one in df_one.iterrows():
cor_dist_list = []
for index_two, row_two in df_two.iterrows():
cor_dist_list.append([distance.correlation(row_one, row_two), index_two])
cor_dist_list.sort()
cor_dist_dict[index_one] = cor_dist_list
return cor_dist_dict
cor_dist_dict = calcCorrelationDist(df_train_features, df_test_features)
print(len(cor_dist_dict))
#pp.pprint(cor_dist_dict)
# #### Berechnung des Mittleren Rangs
# 4. Bestimmen Sie ÃŒber alle Teilsequenzen aus _FeatureFileTrainingAllList1.csv_ den **mittleren Rang** an dem die zugehörige zweite Teilsequenz erscheint. Liegt z.B. fÃŒr die erste Teilsequenz des MusikstÃŒcks A die zweite Teilsequenz nur an fÃŒnfter Stelle der geordneten nÀchsten Nachbarliste. Dann wÃŒrde diese Teilsequenz mit dem Rang 5 in den Mittelwert einflieÃen.
#
# Hinweis: Werden die verkÌrzten Files mit 50 anstelle von 60 genommen. Aufgrund dieser geÀnderten Datengrundlage sind die aktuellen Abweichungen vorhanden.
#
from numpy import mean
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
def calcMeanRank(dist_dict):
# initialize the dict
rank_list = []
for seq_one, seq_one_val in dist_dict.items():
for index, seq_two_val in enumerate(seq_one_val):
if seq_one == seq_two_val[1]:
rank_list.append(index + 1) # shift by one as rank zero isnt allowed
# calculate mean
mean_rank = mean(rank_list)
return mean_rank
cor_mean_rank = calcMeanRank(cor_dist_dict)
cor_mean_rank
# 5. Bestimmen Sie jetzt den mittleren Rang, fÃŒr den Fall, dass _correlation_ anstelle _euclidean_ als Ãhnlichkeitsmaà verwendet wird. Welches Ãhnlichkeitsmaà ist fÃŒr diese Anwendung zu bevorzugen?
#
euc_mean_rank = calcMeanRank(euclid_dist_dict)
euc_mean_rank
# 6. Diskutieren Sie das Ergebnis
#
# Der mittlere Rang bei der Berechnung mit dem Ãhnlichkeitsmaà der _correlation_ ergibt einen Wert von 2.65, wÀhrend die Berechnung mit dem Ãhnlichkeitsmaà der _euclidean_ einen leicht besseren Wert von 2.65 ergibt.
#
# Daher wÀre das euklidsche Ãhnlichkeitsmaà hier zu bevorzugen
# Die euklidische Distanz wird auch _L2-Norm_ genannt und ist eine Variante der sogenannten Minkowski-Metrik zur Berechnung von distanzen zwischen Vektoren (Punkte) in einem höherdimensionalen Raum. Die Korrelation ist ein Maà fÃŒr den statistischen Zusammenhang zwischen zwei DatensÀtzen. Der mittlere Rang bei der Berechnung mit dem Ãhnlichkeitsmaà der _correlation_ ergibt einen Wert von 2.65, wÀhrend die Berechnung mit dem Ãhnlichkeitsmaà der _euclidean_ einen leicht besseren Wert von 2.65 ergibt. Das Ãhnlichkeitsmaà von Euklid ist in diesem Fall fÃŒr die Anwendung zu bevorzugen, da dies eine bessere Aussage ÃŒber die tatsÀchliche Ãhnlichkeit der Formen zwischen den Vektoren erlaubt.
#
# Hinweis zu anderen Bereichen: In der Signalverarbeitung wird hÀufig die Metrik der _Korrelation_ oder _Cross-Correlation_ eingesetzt. Dabei ist ein Wert gröÃer als 0.8 anzustreben.
# ## Merkmalsauswahl mit dem genetischen Algorithmus
# In diesem Abschnitt soll unter Anwendung eines selbst zu implementierenden genetischen Algorithmus eine Untermenge wichtiger Merkmale aus den insgesamt 42 angelegten Merkmalen berechnet werden.
# Als Vorlage kann hierfÌr die Implementierung fÌr die [Lösung des TSP Problems](https://www.hdm-stuttgart.de/~maucher/Python/FunktionenAlgorithmen/html/genAlgTSP.html) herangezogen werden. Anzupassen sind dann jedoch mindestens die Fitness-Funktion, die Kreuzungs- und die Mutationsfunktion.
#
# #### Der Genetische Algorithmus
# Ein Genetischer Algorithmus (GA) ist eine Such-Heuristik, welche durch _<NAME>'s_ Theorie der natÌrlichen Evolution inspiriert wurde. Dieser reflektiert den Prozess, in welchem eine natÌrliche Selektion der besten (_fittest_) Individuen einer Population fÌr eine Reproduktion selektiert werden. Genetische Algorithmen (GA) sind sehr gut fÌr Probleme in der Suche, als auch fÌr Optimierungen einzusetzen. Ein Beispiel hierfÌr ist der Einsatz eines _GA_, um eine Lösung fÌr das "Travelling Salesman Problem" (TSP) zu finden.
# FÃŒr die Erkundung der besten Merkmale in diesem Fall werden die einzelnen Elemente des GA auf die Problemstellung wie folgt ÃŒbertragen:
#
# * **Gene**: Einzelnes Element eines Merkmals, bzw. ein Satz an Parametern (Variablen).
# * **Individual/Chromosome**: Ein Chromosom ist eine Zusammensetzung von Genen. In diesem Fall ein einzelnes Merkmal, welches die Bedingungen erfÃŒllt.
# * **Population**: Eine Sammlung möglicher Merkmale.
# * **Parents**: Zwei Merkmale, welche kombiniert werden, um ein neues Merkmal zu generieren.
# * **Mating Pool**: Eine Sammlung an Elternteilen, welche dazu verwendet werden, eine neue Population (nÀchste Generation) zu generieren.
# * **Fitness**: Eine Funktion, welche die GÃŒte der Mermale anhand ihres _mittleren Rangs_ bewertet.
# * **Mutation**: Eine Möglichkeit Variationen in der Population zu erzeugen, durch zufÀlliges austauschen von Elementen der Merkmale.
# * **Elitism**: Eine Möglichkeit die besten Individuen in die nÀchste Generation zu Ìbertragen.
#
# Der hier dargestellte Genetische Algorithmus (GA) wird die folgenden Schritte ausfÃŒhren:
#
# 1. Erzeugung einer initialen, zufÀlligen Population.
# 2. Fitness der Individuen (_Chromosomen_) innerhalb der Population berechnen.
# 3. Selektion des _Mating Pools_, d.h. der fittesten Individuen.
# 4. Kreuzung zur Erzeugung einer neuen Generation.
# 5. Mutation.
# 6. Austausch gegen die neue Population.
# 7. Wiederhole von Schritt 1 bis 6, bis die Abbruchbedingung erfÃŒllt ist.
#
# **Aufgaben:**
# #### Genetischer Algorithmus fÃŒr die Music Feature Selection
#
# 1. Implementieren Sie die die Merkmalsauswahl mit dem genetischen Algorithmus entsprechend der o.g. Beschreibung
# #!pip install pandas --upgrade
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import random, operator
# ##### Fitness-Funktion
# Die Fitness der Population wird mittels des _mittleren Rangs_, wie im vorherigen Abschnitt berechnet. Je geringer die GröÃe des mittleren Ranges, desto höher die Bedeutsamkeit der ausgewÀhlten Merkmale.
# calculate fitness
def fitness(df_train, df_test):
euclead_dist = calcEuclideandDist(df_train, df_test)
return calcMeanRank(euclead_dist)
# #### Selektions-Funktion
# Zur Selektion des _Mating Pools_, d.h. der Elternteile, welche zur Erzeugung der nÀchsten Generation herangezogen werden sollen, können verschiedene Methoden angewandt werden. Die populÀrsten Methoden sind _fitness proporionate selection_, Àhnlich eines Roulette Rades oder die _tournament selection_. Eine weitere Möglichkeit der Selektion ist die Methode des _elitism_. Hierbei werden die höchst Performer in der Population gegenÌber der gesamten Performance der Population bewertet.
def selection(popRanked, eliteSize):
return selection
def matingPool(population, selection):
return matingPool
# ##### Kreuzungsfunktion
# Die Kreuzung mittels _crossover_ hat verschiedene Arten an Kreuzungsverfahren. Diese können sein: _blend, one point, two points, uniform_. Mittels __crossover_ soll die nÀchste Generation aus der selektierten Elternpopulation generiert werden. Man nennt dies auch "_breeding_". In diesem Fall wird eine Funktion des _ordered crossover_ verwendet. Dabei werden zufÀllige Elemente (_Gene_) des ersten Elternteils ausgewÀhlt und mit Elementen / Genen des zweiten Elternteils aufgefÌllt, ohne diese zu duplizieren.
#
# * Kreuzungsfunktion wie in der KI Vorlesung beim Travelling Salesman Problem.
# * Man legt einen Kreuzungspunkt fest, nimmt dann fÃŒr das erste Kind den ersten Kopf wie im Elternteil und fÃŒr den Tail des ersten Kindes, scannt man den ersten Elternteil und ÃŒbernimmt die Features die noch nicht drin sind
def crossover(parent1, parent2):
return child
# Generierung der neuen Generation (_offspring population_). Zuerst wird hierbei _elitism_ verwendet, um die besten Merkmale zu erhalten, um diese dann mittels _crossover_ aufzufÃŒllen.
def crossoverPopulation(matingpool, eliteSize):
return children
# ##### Mutationsfunktion
# Es gibt verschiedene Varianten, um Mutation zu erzeugen. Diese sind unter Anderem: _it flip, swap, inverse, uniform, non-uniform, gaussian, shrink_. Mutation hÀlt eine wichtige Funktion fÌr GAs inne, denn diese hilft lokale Komvergenz (_local convergence_), durch das EinfÌhren neuer, noch unbekannter Merkmale, zu vermeiden. Die EinfÌhrung neuer Merkmale ermöglicht es einen noch unbekannten Lösungsraum zu erkunden. Da einzelne Merkmale nicht einfach herausgelöscht werden dÌrfen, wird hier die Methode des _swap mutation_ angewandt. Dies bedeutet, dass mit einer geringen Wahrscheinlichkeit verschiedene Merkmale ihre Lokation austauschen (_swap_) werden. FÌr ein Individuum kann dies mittels der folgenden Funktion erzeugt werden.
def mutation(individual, mutationRate):
return individual
def mutatePopulation(population, mutationRate):
return mutatedPop
# ##### Generation der Population
# Eine zufÀllige Population (_set of features_) wird aus der gesamten Population herausgelöst. Diese dient als initiale Population fÌr den Generativen Algorithmus (GA).
def generate_population(popSize, featureAmount, featureNames):
# initialize population
population = np.zeros((popSize, featureAmount))
# important use permutation
for i in range(popSize):
population[i, 0:featureAmount]=np.random.permutation(len(featureNames))[:featureAmount]
return population
# #### Erzeugung der nÀchsten Generation
# Eine neue Generation soll mittels der unten aufgefÌhrten Funktion generiert werden. Hierzu werden alle Distanzen, bzw. die Fitness der Merkmale mittels dem _mittleren Rang_ bewertet. Hierauf werden potentielle Eltern aus der Population ausgewÀhlt und ein _Mating Pool_ definiert. Aus diesem kann dann eine neue Generation mittels Kreugung (_crossover_) und Mutation (_mutation_) generiert werden.
#
# #### Anmerkung
# Es ist wichtig zu beachten, dass eine Population eine feste GröÃe behÀlt. Einzelne, Individuen (_Chromosome_) werden nur gegen fittere Individuen ausgetauscht.
def generate_next_population(popRanked, currentGen, eliteSize, mutationRate):
return nextGeneration
# #### Genetischer Algorithmus
#
# Die PopulationsgröÃe, die Anzahl der auszuwÀhlenden Merkmale und die Anzahl der Iterationen sollen als Parameter einstellbar sein.
# Der Fitnesswert des besten Individuums in der Population soll in jeder Iteration gespeichert werden. Der Verlauf dieses besten Fitness-Wertes ÃŒber den Fortlauf der Iterationen soll graphisch ausgegeben werden.
#
# Ein Pandas Frame, der nur die berechneten wichtigsten Merkmale aus _FeatureFileTrainingAllList1.csv_ enthÀlt soll angelegt und in die csv Datei _subFeaturesTrain1.csv_ geschrieben werden.
#
# #### Pseudo Code
# ```
# START
# Generate the initial population
# Compute fitness
# REPEAT
# Selection
# Crossover
# Mutation
# Compute fitness
# UNTIL population has converged
# STOP
# ```
# +
def fastEuclidean(x,y):
z=y-x
return math.sqrt(np.dot(z,z))
def mid_rank(df_train,df_test,similarity):
FeatureFileTrainingDF_scaled = preprocessing.scale(df_train,0)
FeatureFileTestAllDF_scaled = preprocessing.scale(df_test,0)
size = len(FeatureFileTestAllDF_scaled)
rank = 0
ranklist = np.zeros(size)
dct_dist = np.zeros(size)
for i,k in enumerate(FeatureFileTrainingDF_scaled):
for j,l in enumerate(FeatureFileTestAllDF_scaled):
dist = similarity(k, l)
dct_dist[j] = dist
dct_index = np.argsort(dct_dist)
ranklist[i] = np.where(dct_index == i)[0][0]+1
rank = sum(ranklist)/len(ranklist)
return(rank)
# +
import numpy as np
import matplotlib.pyplot as plt
def genAlg(iterations, popSize, anzahlMerkmale, mutationPopulation, label):
kreuzPopulation = 0.99
# array for best fitness-values
bestDist = np.zeros(iterations)
fitness=np.zeros(popSize)
featureNames_fit = np.array(FeatNames)
population = np.zeros((popSize, anzahlMerkmale))
# generate initial population
pop = generate_population(popSize, anzahlMerkmale, FeatNames)
for j in range(iterations):
# print('------------------- iteration {0} model training -------------------'.format(j))
#Fitnessberechnung:##############################################
for k in range(popSize):
selection = featureNames_fit[population[k].astype(int)]
#print(selection)
df1_temp = df_train_features[selection]
df2_temp = df_test_features[selection]
midRank_temp = mid_rank(df1_temp, df2_temp, fastEuclidean)
fitness[k] = midRank_temp
#print(midRank_temp)
sortedIndex = fitness.argsort(axis=0)#Indizees der nach ansteigenden Distanzen sortieren
sortedDist = fitness[sortedIndex] # die ansteigend sortiere Distanzen
#print("sortedIndex", sortedIndex)
#print("sortedCost", sortedDist)
bestDist[j] = sortedDist[0] #kleinste Distanz der Iteration abspeichern
sortedPopulation = population[sortedIndex].astype(int) #sortierung der Population nach ansteigender Distanz
#print("sortedPopulation", sortedPopulation)
invertedDist = 1/sortedDist #Berechnung der Fitness aus der Distanz
#invertedDist enthÀlt die berechneten Fitness Werte
#print("invertedDist", invertedDist)
#################################################################
#Selection#######################################################
invertedDistSum = invertedDist.sum()
#print("invertedDistSum:", invertedDistSum)
rn1 = invertedDistSum * np.random.rand() # Zufallszahl ziwschen 0 und 1 * invertedDistSum
#print("rn1", rn1)
found1 = False
index = 1
while not found1:
#print("invertedDist[:index].sum(axis=0)", invertedDist[:index].sum(axis=0))
if rn1 < invertedDist[:index].sum(axis=0): #sum(axis=0): entlang der column summieren
#print("gefunden. index ist:", index)
found1=index
else:
index+=1
found1 = found1-1
equal=True
while equal:
rn2=invertedDistSum * np.random.rand()
#print("rn2", rn2)
found2 = False
index=1
while not found2:
#print("invertedDist[:index].sum(axis=0)", invertedDist[:index].sum(axis=0))
if rn2 < invertedDist[:index].sum(axis=0):
#print("gefunden. index ist:", index)
found2 = index
else:
index+=1
found2=found2-1
if found2 != found1:
equal = False
#print("beides equal?", equal)
#print("ok, weiter gehts")
parent1 = sortedPopulation[found1]
#print("parent1", parent1)
parent2 = sortedPopulation[found2]
#print("parent2", parent2)
#parent1 und parent2 sind die selektierten Individuen
#################################################################
#Kreuzung########################################################
crossrn = np.random.rand()
if crossrn < kreuzPopulation:#wenn Wert innerhalb der Kreuzwahrscheinlichkeit gewÃŒrfelt -> kreuze
#berechne random Index bei dem gekreuzt wird
crossIndex = np.random.randint(0, anzahlMerkmale-1)
head1, tail = np.split(parent1, [crossIndex])
head2, tail = np.split(parent2, [crossIndex])
# tail
tailind = 0
taillength1 = anzahlMerkmale - len(head1)
tail1 = np.zeros(taillength1, dtype=int)
for i in range(0, anzahlMerkmale):
if parent2[i] not in head1 and tailind < taillength1:
tail1[tailind] = parent2[i]
tailind = tailind + 1
tailind = 0
taillength2 = anzahlMerkmale - len(head2)
tail2 = np.zeros(taillength2, dtype=int)
for j in range(0, anzahlMerkmale):
if parent2[j] not in head2 and tailind < taillength2:
tail2[tailind] = parent2[j]
tailind = tailind + 1
#Kind1 bekommt linken Teil von Parent1 und rechten Teil von Parent2
child1 = np.append(head1, tail1)
#Kind2 bekommt linken Teil von Parent2 und rechten Teil von Parent1
child2 = np.append(head2, tail2)
#print("Kind1:", child1)
#print("Kind2:", child2)
#################################################################
#Mutation########################################################
#Fall child1
mutiere = np.random.rand() < mutationPopulation
#mutiere = True #SPÃTER AUSKOMMENTIEREN!!!!!!!!!!!!!!!
if mutiere:#wenn Wert innerhalb der Mutationswahrscheinlichkeit gewÃŒrfelt -> mutiere
#print("child1 mutiert")
#VerÀndere ein Merkmal des Kindes. Dabei wird das aktuelle Merkmal mit einem zufÀlligen Merkmal aus FeatNames
#ausgetauscht. Das neue Merkmal soll noch nicht im Kind bereits vorkommen
neuesMerkmal = np.ceil(np.random.rand()*(len(FeatNames))).astype(int)-1
#print("neues Merkmal:", neuesMerkmal)
while neuesMerkmal in child1:
#Wenn neues Merkmal bereits im Kind enthalten, wÃŒrfele neu
neuesMerkmal = np.ceil(np.random.rand()*(len(FeatNames))).astype(int)-1
#wÀhle ein zufÀlliges Merkmal des Kindes aus was ersetzt wird
altesMerkmalPos = np.ceil(np.random.rand()*anzahlMerkmale).astype(int)-1
#print("Position altes Merkmal:", altesMerkmalPos)
child1[altesMerkmalPos] = neuesMerkmal #ersetze Merkmal
#print("mutiertes child1:", child1)
#Fall child2
mutiere = np.random.rand() < mutationPopulation
#mutiere = True #SPÃTER AUSKOMMENTIEREN!!!!!!!!!!!!!!!
if mutiere:#wenn Wert innerhalb der Mutationswahrscheinlichkeit gewÃŒrfelt -> mutiere
#print("child2 mutiert")
#VerÀndere ein Merkmal des Kindes. Dabei wird das aktuelle Merkmal mit einem zufÀlligen Merkmal aus FeatNames
#ausgetauscht. Das neue Merkmal soll noch nicht im Kind bereits vorkommen
neuesMerkmal = np.ceil(np.random.rand()*(len(FeatNames))).astype(int)-1
#print("neues Merkmal:", neuesMerkmal)
while neuesMerkmal in child2:
#Wenn neues Merkmal bereits im Kind enthalten, wÃŒrfele neu
neuesMerkmal = np.ceil(np.random.rand()*(len(FeatNames))).astype(int)-1
#wÀhle ein zufÀlliges Merkmal des Kindes aus was ersetzt wird
altesMerkmalPos = np.ceil(np.random.rand()*anzahlMerkmale).astype(int)-1
#print("Position altes Merkmal:", altesMerkmalPos)
child2[altesMerkmalPos] = neuesMerkmal #ersetze Merkmal
#print("mutiertes child2:", child2)
#child1 und child2 sind die Resultate der Mutation #######################
#Ersetze die schlechtesten zwei Individuen mit den Kindern, falls die Neuen besser sind#########
merkmaleChild1 = featureNames_fit[child1]
#print("merkmaleChild1", merkmaleChild1)
df1_child1 = df_train_features[merkmaleChild1]
df2_child1 = df_test_features[merkmaleChild1]
midRank_child1 = mid_rank(df1_child1,df2_child1, fastEuclidean)
merkmaleChild2 = featureNames_fit[child2]
#print("merkmaleChild2", merkmaleChild2)
df1_child2 = df_test_features[merkmaleChild2]
df2_child2 = df_train_features[merkmaleChild2]
midRank_child2 = mid_rank(df1_child2,df2_child2, fastEuclidean)
replace1=False
replace2=False
index = popSize -1
while index > 0:
if sortedDist[index]>midRank_child1 and not replace1:
if not np.ndarray.any(np.ndarray.all(child1==sortedPopulation, axis=1)):
sortedPopulation[index]= child1
replace1=True
elif sortedDist[index]>midRank_child2 and not replace2:
if not np.ndarray.any(np.ndarray.all(child2==sortedPopulation, axis=1)):
sortedPopulation[index]= child2
replace2=True
if replace1 and replace2:
break
index=index-1
population=sortedPopulation
#print("Population am Ende der Iteration:", population)
#print("bestDist:", bestDist)
#Graphische Anzeige#########################################
bestIndividuum = featureNames_fit[population[0]]
print("bestIndividuum ", bestIndividuum)
subFeaturesTrain1DF = df_train_features[bestIndividuum]
subFeaturesTrain1DF.to_csv('./subFeaturesTrain1.csv', sep=",")
print("Best mid rank:", bestDist[-1])
print("Population ", population[0])
plt.subplot(122)
plt.grid(True)
plt.plot(range(iterations), bestDist, label=label)
plt.legend()
plt.savefig('genetic_algorithm_{}.png'.format(label))
plt.show()
return bestDist[-1]
############################################################
#genAlg(100, 100, 10, 0.1,'test') #iterationen, populationsize, #merkmale, mutationsRate, plotlabel
# -
# #### Training des Genetischen Algorithmus
# HierfÌr wird eine variierende Anzahl an Merkmalen ausgewÀhlt, um den Algorithmus, sowie die daraus generierten Modelle auf die beste Anzahl an Merkmalen zu untersuchen.
#
# WÀhrend die Merkmale _iterations_, _popSize_, sowie _mutationPopulation_ fÌr eine Vergleichbarkeit der Ergebnisse immer konstant gehalten werden, wird die Anzahl der Merkmale in immer gleich bleibenden Schritten zwischen dem Wert 10 bis 30 variiert.
# +
model_result = []
for merkmal in range(10, 30):
m = genAlg(2000, 50, merkmal, 0.05, "{0}_features_genetic_algorithm: ".format(merkmal))
model_result.append([merkmal, m])
print("Distanz bei {0} Merkmalen: ".format(merkmal), m)
# -
# #### Music Feature Selection
# 2. Implementieren und beschreiben Sie kurz das Konzept ihrer Kreuzungs- und Mutationsfunktion.
# ##### Kreuzung:
#
# Indivduen mit niedriger Distanz bzw. mit gutem Fitnesswert werden miteinander gekreuzt. Dabei wird ein zufÀlliger Schnittpunkt zur Vereinigung der Elternvektoren genutzt. Die Chromosome werden an dieser Stelle geteilt und miteinander vertauscht.
#
# ##### Mutation:
#
# Mit niedriger Wahrscheinlichkeit wird ein Wert zufÀllig verÀndert.
#
# ( _Siehe den Code im oberen Abschnitt._ )
# 3. Bestimmen Sie eine möglichst kleine Merkmalsuntermenge mit einem möglichst guten mittleren Rang? Geben Sie sowohl die gefundenen wichtigsten Merkmale als auch den zugehörigen mittleren Rang an.
# Eine möglichtst kleine Merkmalsuntermenge mit möglichst gutem _mittlerem Rang_ konnte bei 20 Merkmalen gefunden. Der daraus resultierende mittlere Rang betrÀgt 1.35, wobei die wichtigste Merkmale sind:
#
# ```
# 'power7' 'amp1mean' 'amp1000std' 'amp10kurt' 'power2' 'power1' 'amp10skew'
# 'amp1dskew' 'amp1000mean' 'amp10mean' 'power9' 'power4' 'amp1dskew' 'power7'
# 'amp10skew' 'amp100std' 'amp10dskew' 'power8' 'power6' 'power10'
# ```
# 4. Um wieviel verschlechtert sich der Mittlere Rang, wenn nur die 10 wichtigsten Merkmale benutzt werden?
# 1.534 - 1.35 = 0.184
#
# Der Rang verschlechtert sich nur um 0.184. Der Unterschied zwischen 20 und 10 Merkmalen fÀllt nicht groà aus. Der Informationsgewinn bei zusÀtzlichen Features ist somit sehr gering.
# #### Untersuchung der Merkmale
# Des weiteren wird der Algorithmus untersucht auf die Variation der Mutationswahrscheinlichkeit. Diese findet in AbstÀnden zwischen _[0.01, 0.05, 0.1, 0.2]_ statt.
pop_mutation = [0.01, 0.05, 0.1, 0.2]
for f in pop_mutation:
result = genAlg(2000, 50, 13, f, "{0}_features_genetic_algorithm: ".format(f))
print("Distanz bei {0} Merkmalen: ".format(result), f)
# ## Clustering und automatische Playlistgenerierung
# Implementieren Sie ein hierarchisches Clustering aller Subsequenzen in _subFeaturesTrain1.csv_. Diese _.csv_-Datei enthÀlt nur die im vorigen Schritt ermittelten wichtigsten Merkmale. Das hierarchische Clustering ist in einem Dendrogram der Art wie in der unten gegebenen Abbildung zu visualisieren.
#
# Die gefundenen Cluster sind mit den zugehörigen Musiktiteln in der Konsole auszugeben.
# 
#
# ### Das Hierarchische Clustering
#
# Das Hierarchische Clustering ist ein Typ des unÃŒberwachten Lernens (engl., _Unsupervised Learning_). Dieser wird angewandt, um nicht gelabelte Datenpunkte zu clustern. Ãhnlich dem K-means Clustering Algorithmus grupiert der Hierarchische Clustering Algorithmus Datenpunkte zusammen, welche Àhnliche Charakteristiken aufweisen. In manchen FÀllen kann das Ergebnis des hierarchischen, als auch K-means Clustering Algorithmus daher Àhnliche sein. Die Hierarchie dieser Cluster wird als Baum Strukture (_Dendogram_) dargestellt. Die Wurzel dieses Baumes stellt das einzigartige Cluster dar, welches alle Stichproben an Datenpunkte (engl., _samples_) vereint. In den BlÀttern dagegen finden sich die einzelnen Cluster, die nur einen Datenpunktsatz enthalten.
#
# ### Agglomerierendes und spaltendes hierarchisches Clustering
# Es gibt zwei Arten von hierarchischem Clustering, welche als _agglomerierend_ und _spaltend_ (engl., _agglomerative and divisive_) bezeichnet werden. Beim _agglomerierenden_ hierarchischen Clustering wird ein _bottom-up_ Vorgehen, beginnend von den Datenpunkten angwandt, wÀhrend im _top-down_ Vorgehen alle Datenpunkte als ein groÃes Cluster betrachtet werden. Hierbei wird das Vorgehen des Spaltens (engl., _dividing_ ) des gesamten Clusters in kleinere Cluster vorgenommen.
#
# * **Agglomerativ** - Bottom-up-Ansatz. Beginnen Sie mit vielen kleinen Clustern und fÃŒgen Sie diese zu gröÃeren Clustern zusammen.
# * **Trennend** - Ansatz von oben nach unten. Beginnen Sie mit einem einzelnen Cluster und zerlegen Sie ihn dann in kleinere Cluster.
#
# ### Das VerknÃŒphungskriterium (_linkage criteria_)
# Das VerknÃŒpfungskriterium bestimmt die fÃŒr die ZusammenfÃŒhrungsstrategie verwendete Metrik:
#
# * _Ward_ minimiert die Summe der quadrierten Differenzen in allen Clustern. Es handelt sich um einen Varianz-minimierenden Ansatz und Àhnelt in diesem Sinne der k-Mittelwert-Zielfunktion, wird aber mit einem agglomerierenden hierarchischen Ansatz angegangen.
#
# * _Maximale oder vollstÀndige VerknÌpfung_ minimiert den maximalen Abstand zwischen den Beobachtungen von Paaren von Clustern.
#
# * _Durchschnittliche VerknÌpfung_ minimiert den Durchschnitt der AbstÀnde zwischen allen Beobachtungen von Haufenpaaren.
#
# * _EinzelverknÌpfung_ minimiert den Abstand zwischen den nÀchsten Beobachtungen von Haufenpaaren.
#
#
# #### Umsetzung des agglomerierenden Clusterings (_Agglomerative Clustering_)
#
# Der _FeatureAgglomeration_ Cluster Algorithmus ist Àhnlich dem _Agglomerative_ Clustering, jedoch werden rekursiv Merkmale anstelle von Stichproben zusammengefÌhrt. Der _agglomerierende, hierarchische Clustering Algorithmus_ wird im Folgenden mittels der Scikit-Learn Library umgesetzt.
#
# #### Pseudo Code
#
#
#
# **Aufgaben:**
#
# 1. Optimieren Sie fÃŒr die Anwendung des Hierarchischen Clustering Algorithmus die Parameter
#
# 1. metric (ÃhnlichkeitsmaÃ), in diesem Fall 'affinity'
# 2. linkage method, in diesem Fall 'linkage'
# 3. Clusteranzahl, in diesem Fall 'n_clusters'
import sklearn
import pandas as pd
import seaborn as sns
import numpy as np
from matplotlib import pyplot as plt
from sklearn.cluster import AgglomerativeClustering
from sklearn.preprocessing import normalize
df_music_features = pd.read_csv("./subFeaturesTrain1.csv", sep=",", delimiter=None, header='infer', names=None, index_col=None)
df_music_features = df_music_features.rename(columns={'Unnamed: 0': 'music_title'})
df_music_features.head(5)
# +
# get all music labels
dict_music_label = df_music_features.music_title.to_dict()
music_label = {v: k for k, v in dict_music_label.items()}
df_music_features_copy = df_music_features.copy()
df_music_features_copy.music_title = df_music_features.music_title.map(music_label)
df_music_features_copy.set_index('music_title', inplace=True)
df_music_features_copy.head(5)
# -
# #### Skalierung der Input Variablen
# Um einen _Bias_ des Models zu verhindern werden die DatensÀtze normalisiert, sodass alle die selbe Skalierung aufweisen.
# +
from sklearn import preprocessing
# data preprocessing to normalize the data to bring them to the same scale
data_scaled = preprocessing.scale(df_music_features_copy)
data_scaled = pd.DataFrame(data_scaled, index=df_music_features.index, columns=df_music_features_copy.columns)
data_scaled.head()
# -
# ### Datenanalyse mittels Heatmap
# Mittels einer Heatmap können die DatensÀtze auf deren Verteilung und Ãhnlichkeit zueinander untersucht werden. Je stÀrker die Felder gelb eingefÀrbt sind, desto gröÃer ist der Wert dieses Merkmals. Dies verhÀlt sich ebenso umgekehrt mit dunkel-blau eingefÀrbten Werten.
fig, ax = plt.subplots(figsize=(30,30))
ax.axes.set_title("Heatmap of features from music samples", fontsize=24, y=1.01)
ax.set(xlabel='Feature X', ylabel='Importance Y');
sns.heatmap(data_scaled, annot=True, fmt="g", cmap='viridis', ax=ax)
plt.savefig('heatmap_music_feature_dist.png')
plt.show()
# +
from scipy.cluster.hierarchy import dendrogram, linkage
def create_dendogram(df_data, affinity, labels):
#linkage matrix encoding hierarchical clustering
Z = linkage(df_data, affinity)
plt.figure(figsize=(20, 30))
plt.title('Hierarchical Clustering Dendrogram', fontsize=20)
plt.xlabel('Calculated distance, method {}'.format(affinity), fontsize=16)
plt.ylabel('Music features per title', fontsize=16)
# create dendogram
R = dendrogram(Z, orientation="right", labels=labels)
# define graph spec
ax = plt.gca()
ax.tick_params(axis='x', which='major', labelsize=15)
ax.tick_params(axis='y', which='major', labelsize=15)
plt.savefig('dendrogram {}.png'.format(affinity))
plt.show()
return R
# -
# #### Unterschiedliche Affinity und Linkage
affinity = ['euclidean', 'l1', 'l2', 'manhattan', 'cosine', 'precomputed']
method_linkage = ['ward', 'complete', 'average', 'single']
labels = df_music_features.music_title.tolist()
leaves_dict = {}
# Create dendograms with various methods
for method in method_linkage:
leaves1 = create_dendogram(data_scaled, method, labels)
leaves_dict[method] = leaves1
# 2. FÃŒr welche Parameterkonstellation erlangen Sie das fÃŒr Sie subjektiv betrachtet gÃŒnstigste Ergebnis?
# Subjektiv betrachtet ergeben die generierten Cluster (Playlists) Sinn. KÃŒnstler werden gröÃtenteils in eine Playlist zusammengefasst, mit wenigen anderen KÃŒnstlern z.B. eine Playlist mit 15 Adele Songs und mit nur einem einzigen Lana Del Rey Song. Zudem kann man AusreiÃer-Cluster anhand von kleinen Playlists erkennen.
# 3. Ãberlegen Sie sich AnsÀtze um diese Art der Musikgruppierung zu verbessern?
# Am nahe liegendsten wÀre die Extraktion von weiteren Features wie z.B. Sprache, BPM, Spieldauer der Songs. AuÃerdem könnte man die ClustergröÃe anhand von einer Mindest- oder Maximalanzahl von Songs vordefinieren, um z.B. eine Playlist mit einer gewissen Spieldauer zu erstellen. GrundsÀtzlich ist es möglich ein anderes Verfahren zur Clusterbildung zu verwenden (z.B. K-Means).
#
# Fazit:
#
# Ein Durchlauf des genetischen Algorithmus dauert sehr lange. Dies hat das Testen und Entwickeln sehr zeitaufwendig gemacht! Wir hÀtten diesbezÌglich von Anfang an mit Multithreading arbeiten sollen, stattdessen haben wir die Performance durch eine andere Euklidfunktion verbessert.
#
leaves_dict
# Das Dictionary _R_, das von der Scipy Funktion _dendrogram()_ zurÌck gegeben wird, enthÀlt Informationen Ìber das _Dendrogram_ und damit auch eine Liste an Werten der geclusterten MusikstÌclke.
# ### Zusatz: Visuelle Analyse der Clusteranzahl mittels Dendogram
# Die Daten können mittels einem Dendogram untersucht werden. Dies gibt Aufschluss darÃŒber, wie viele Cluster aus dem gesamten Datensatz (enlg., _sample_ ) im Optimum hervorgebracht werden können. Die _x-Achse_ enthÀlt die DatensÀtze und die _y-Achse_ stellt den Abstand (bzw. UnÀhnlichkeit) zwischen den DatensÀtzen dar. Je gröÃer der Abstand, desto unÀhnlicher sind sich die DatensÀtze und umgekehrt. Die senkrechte Linie mit dem maximalen Abstand ist die blaue Linie und daher kann in diesem Beispiel ein Schwellenwert von _t=5_ festgelegt werden, um hier das Dendrogramm visuell zu schneiden. Der Schwellenwert bei _t=5_ (engl., _threshold_ ) wird durch eine gestrichtelte Linie im _Dendogram_ dargestellt. Durch den Schnitt dieser mit drei zusammen laufenden Cluster Linien (engl., _fusions_) lÀsst sich erkennen, dass sich nun _n=3_ Cluster bilden.
import scipy.cluster.hierarchy as shc
# +
def plot_hierarchical_cluster(data, method):
plt.figure(figsize=(10, 7))
plt.title("Cluster analysis Dendrogram, method={}".format(method))
plt.xlabel('Music Titles')
plt.ylabel('Euclidean distances')
dend = shc.dendrogram(shc.linkage(data, method))
# plot the line to make the threshhold visible
plt.axhline(y=5, color='r', linestyle='--')
plt.axhline(y=3, color='r', linestyle='--')
# -
# Da die Methode=_'ward'_ die besten Ergebnisse erzielt hat, wird diese auch in dem folgenden Versuch angewandt. Das Ãhnlichkeitsmaà ist auch hier die _euklidsche Distanz_ .
plot_hierarchical_cluster(data_scaled, 'ward')
# #### Erkenntnis Dendrogram
# Visuelle Untersuchung des Cluster Baumes in einem _Dendogram_. Im untersten Knoten, dem Blatt (_engl., leave_) werden die Àhnlichsten Merkmale in einem Cluster zusammen gefasst. Diese Cluster erhalten die Label zwischen _1-n_ der festgelegten Cluster Anazhl _n_.
#
# #### AusreiÃer erkennen ( _outlier detection_ )
# In dem aktuellen Graphen sind keine AusreiÃer erkennbar. Diese könnten normalerweise anhand von nicht paarweise vorkommenden Zweigen ( _branches_ ) ausgemacht werden.
# ### Trainieren des Agglomerative Clustering Algorithmus
# Der Algorithmus benötigt keine initiale Angabe der Cluster Anzahl _k_, wie der k-means Clustering Algorithmus.
#
# #### Vorgehen
# ZunÀchst wird jeder Punkt als ein separates Cluster betrachtet, dann werden die Punkte rekursiv in AbhÀngigkeit von der Entfernung zwischen ihnen gebÃŒndelt. Die Datenpunkte werden so gruppiert, dass der Abstand zwischen den Datenpunkten innerhalb eines Clusters minimal und der Abstand zwischen den Punkten innerhalb verschiedener Clusters maximal ist. HÀufig verwendete EntfernungsmaÃe sind die euklidische Distanz, die Manhattan-Distanz oder die Mahalanobis-Distanz.
def agglomerativeCluster(data, n_clusters, affinity, linkage):
agglo_cluster_model = AgglomerativeClustering(n_clusters, affinity, linkage)
return agglo_cluster_model.fit_predict(data)
# Aufgabe 1. Optimieren Sie fÃŒr die Anwendung des Hierarchischen Clustering Algorithmus die Parameter
#
# - metric (ÃhnlichkeitsmaÃ), in diesem Fall 'affinity'
# - linkage method, in diesem Fall 'linkage'
# - Clusteranzahl, in diesem Fall 'n_clusters'
cluster_lables = agglomerativeCluster(data_scaled, n_clusters=3, affinity='euclidean', linkage='ward')
cluster_lables
agglomerative = pd.DataFrame(cluster_lables)
df_music_features.insert((df_music_features.shape[1]), 'agglomerative', agglomerative)
df_music_features.head(5)
def get_cluster(df_data, n_cluster):
music_cluster = []
for n in range(1, n_cluster):
playlist_recom = df_data.loc[df_data['agglomerative'] == n]
music_cluster.append(playlist_recom)
print('Cluster{0}: {1}'.format(n, playlist_recom))
return music_cluster
music_cluster = get_cluster(df_music_features, 3)
music_cluster
| src/proj3_music_clustering/model/proj3_music_clustering.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#
# # Doing Math in Python
#
#
# ## Numeric Data Types
# <p>
# There are two ways numbers are represented internally - integers and
# floating point numbers. Even though the numbers <i>1</i> and <i>1.0</i>
# have the same value their internal representation are very different.
# </p>
#
# <p>
# Computations with integers are exact, whereas those that involve
# floating point numbers are not. The accuracy of a floating point
# computation depends on the precision of the numbers used. Greater
# the precision, the more accurate the result. But there is limit to
# the precision of floating numbers. The precision is limited to the
# number of bits used. 32-bit floating point numbers have lower precision
# than 64-bit numbers. There is also a limit to how big or how small a
# floating point number you can represent. For a 32-bit representation
# the range is (+/-) 3.4E38 and for 64-bit representation the range is
# (+/-) 1.8E308.
# </p>
#
# <p>
# There is also a limit to the range of integers that you can represent.
# With 32 bits the range is -2<sup>31</sup> to (2<sup>31</sup> - 1). In
# mixed operations, those that involve both integers and floating numbers,
# integers are converted to floating point numbers and the computation
# performed.
#
# <b><i>Careful:</i></b> consider this expression: 3.5 + (6/4).
#
# The result is 4.5 and not 5.0 as you would expect because the (6/4) is
# computed as an integer division which truncates the fractional portion
# of the quotient.
# </p>
#
#
# You can explicitly convert numbers of one type to another with built-in
# functions that Python provides:
# +
x = 123
y = float(x) # y = 123.0
z = 34.89
w = int(z) # w = 34
x = 12345678987654321
# -
# <h4> Math Library </h4>
# <p>
# There are lots of useful functions in the
# <a href = "https://docs.python.org/3/library/math.html"> Math
# Library</a>. To use this library the first statement in your program
# must be
# <pre>
# import math
# </pre>
import math
#
#
# The Math Library not only has functions but also useful constants like
# <i>π</i> and <i>e</i>. To use the functions or the constants in your
# program you must apply the dot operator. The general syntax for usage
# is <i>math.function()</i> or <i>math.constant</i>. The table below
# gives just a subset of all the functions available.
# </p>
#
# <center>
# <table border = "1" width = "75%">
# <tr>
# <th width = "15%"> Function </th><th> Meaning </th>
# </tr>
# <tr>
# <td> ceil (x) </td>
# <td> Return the ceiling of x as a float, the smallest integer value greater
# than or equal to x. </td>
# </tr>
# <tr>
# <td> floor (x) </td>
# <td> Return the floor of x as a float, the largest integer value less than
# or equal to x. </td>
# </tr>
# <tr>
# <td> exp (x) </td>
# <td> Return e**x. </td>
# </tr>
# <tr>
# <td> log (x, base) </td>
# <td> Return the logarithm of x to the given base. If the base is not specified,
# return the natural logarithm of x (that is, the logarithm to base e). </td>
# </tr>
# <tr>
# <td> log10 (x) </td>
# <td> Return the base-10 logarithm of x. </td>
# </tr>
# <tr>
# <td> pow (x, y) </td>
# <td> Return x**y. </td>
# </tr>
# <tr>
# <td> sqrt (x) </td>
# <td> Return the square root of x. </td>
# </tr>
# <tr>
# <td> degrees (x) </td>
# <td> Converts angle x from radians to degrees. </td>
# </tr>
# <tr>
# <td> radians (x) </td>
# <td> Converts angle x from degrees to radians. </td>
# </tr>
# <tr>
# <td> sin (x) </td>
# <td> Return the sine of x radians. </td>
# </tr>
# <tr>
# <td> cos (x) </td>
# <td> Return the cosine of x radians. </td>
# </tr>
# <tr>
# <td> tan (x) </td>
# <td> Return the tangent of x radians. </td>
# </tr>
# <tr>
# <td> asin (x) </td>
# <td> Return the arc sine of x, in radians. </td>
# </tr>
# <tr>
# <td> acos (x) </td>
# <td> Return the arc cosine of x, in radians. </td>
# </tr>
# <tr>
# <td> atan (x) </td>
# <td> Return the arc tangent of x, in radians. </td>
# </tr>
# <tr>
# <td> hypot (x, y) </td>
# <td> Return the Euclidean norm, sqrt(x*x + y*y). This is the length of the
# vector from the origin to point (x, y). </td>
# </tr>
# <tr>
# <td> pi </td>
# <td> The mathematical constant pi. </td>
# </tr>
# <tr>
# <td> e </td>
# <td> The mathematical constant e. </td>
# </tr>
# </table>
# </center>
#
# <h4> Random Number Generation </h4>
# <p>
# There are computations that require you to generate random numbers. Python
# provides a
# <a href = "https://docs.python.org/3/library/random.html"> pseudo
# random number generator</a>. The word <i>pseudo</i> in this
# context means that the random number generator is deterministic and after
# a certain cycle of generating random numbers it starts repeating that cycle.
# </p>
#
# <p>
# However, for most simple computations the pseudo random number generator
# works fine since the cycle length is extremely large. All of the random
# number generating functions comes in a module called <i>random</i> and has
# to be imported in the program to be used. Your first line of code should
# read:
# <pre>
# import random
# </pre>
# To use the functions you must use the dot operator. The general usage is
# <i>random.function()</i>. The table below gives a subset of the functions
# that are available in the <i>random</i> module.
# </p>
#
# <center>
# <table border = "1" width = "75%">
# <tr>
# <th width = "25%"> Function </th><th> Meaning </th>
# </tr>
# <tr>
# <td> randrange (start, stop, step) </td>
# <td> Return a randomly selected element from range(start, stop, step). </td>
# </tr>
# <tr>
# <td> randint (a, b) </td>
# <td> Return a random integer N such that a <= N = <= b. </td>
# </tr>
# <tr>
# <td> choice (seq) </td>
# <td> Return a random element from the non-empty sequence seq. </td>
# </tr>
# <tr>
# <td> shuffle (x) </td>
# <td> Shuffle the sequence x in place. </td>
# </tr>
# <tr>
# <td> sample (population, k) </td>
# <td> Return a k length list of unique elements chosen from the population
# sequence. Used for random sampling without replacement. </td>
# </tr>
# <tr>
# <td> random () </td>
# <td> Return the next random floating point number in the range [0.0, 1.0).</td>
# </tr>
# <tr>
# <td> uniform (a, b) </td>
# <td> Return a random real number N such that a <= N < b. </td>
# </tr>
#
# </table>
# </center>
# +
import random
# We can set the initialization of the random process with the seed() function to generate the same outputs
random.seed(123)
print(random.random())
# +
random.seed(123)
# Random float x in the range 0.0 <= x < 1.0
x = random.random() # x = 0.052363598850944326
x
# +
random.seed(123)
# Random float x in the range -1.0 <= x < 1.0
x = random.uniform (-1.0, 1.0) # x = -0.8952728022981113
x
# -
random.seed(123)
# Choose a random number from 1 to 100 that is divisible by 3
x = random.randrange(3, 100, 3) # x = 12
x
# +
random.seed(123)
# Choose a random element from a sequence
seq = ['a', 'e', 'i', 'o', 'u']
x = random.choice(seq) # x = 'a'
x
# -
random.seed(123)
# Choose 2 elements from a population
x = random.sample (seq, 2) # x = ['a', 'i']
x
random.seed(123)
# Shuffle a sequence
seq = ['a', 'e', 'i', 'o', 'u']
random.shuffle (seq) # seq = ['o', 'e', 'u', 'i', 'a']
seq
| Notebooks/Example-002-Math-in-Python.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="ZzJSVb6ETWhC"
import pandas as pd
import numpy as np
from ast import literal_eval
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
pd.options.display.max_columns = 100
pd.options.display.max_rows = 100
pd.options.display.precision = 3
sns.set_style('darkgrid')
plt.rcParams['figure.figsize'] = [7, 5]
plt.rcParams['figure.titlesize'] = 15
# %matplotlib inline
# + [markdown] id="Gc5kHt6GTWhN"
# # Loading The Files
# + id="-b1M80leTWhN" colab={"base_uri": "https://localhost:8080/"} outputId="50d5b2c7-aa6b-4778-855a-cc256c1caf4b"
# Load 'Movies' file
movies = pd.read_csv('movies_metadata.csv',)
movies.id = movies.id.str.replace('-', '0').astype('int')
movies.release_date = pd.to_datetime(movies.release_date, errors='coerce')
# Load 'Credits' file
credits = pd.read_csv('credits.csv')
credits = credits[~credits.id.duplicated()]
# Load 'keywords' file
keywords = pd.read_csv('keywords.csv')
keywords = keywords[~keywords.id.duplicated()]
# Load 'tags' file
tags = pd.read_csv('tags.csv')
# Load 'links' file
links = pd.read_csv('links.csv')
links = links.loc[links.tmdbId.notnull()]
links.tmdbId = links.tmdbId.astype('int')
# + colab={"base_uri": "https://localhost:8080/", "height": 479} id="7AYclk-MTWhO" outputId="30475bf5-4097-4584-bbec-26daf8635737"
clean_movies = movies.drop(['adult','belongs_to_collection','video','homepage','poster_path',
'tagline', 'vote_average','overview'], axis=1)
clean_movies.head()
# + id="j_jC1NVRs0oK"
clean_movies.dropna(inplace=True)
# + id="VzuGb4A9s04m"
clean_movies = clean_movies[clean_movies['status'].isin(['Released'])]
# + id="rUpzqWDFs7CX"
clean_movies.drop(columns=['status'], axis=1, inplace=True)
# + id="o8S2XynCtAIC"
clean_movies = clean_movies[clean_movies['original_language'].isin(['en'])]
# + id="U8KD_jqGtALK"
clean_movies.drop(columns=['original_language'], axis=1, inplace=True)
# + id="AQYhtjYsuUXW"
clean_movies['release_date'] = clean_movies['release_date'].astype(str)
# + id="lEK-lIUBtANg"
clean_movies[['release_year', 'release_month', 'release_day']] = clean_movies['release_date'].str.split('-', expand=True)
# + id="i6hHNoEutAQV"
clean_movies['release_year'] = clean_movies['release_year'].astype(int)
# + id="9xB75SrntATs"
clean_movies = clean_movies.drop(clean_movies[clean_movies['release_year'] < 1950].index)
# + id="EzxgUJaStAWU"
clean_movies.drop(columns=['release_date','release_day','release_month'], axis=1, inplace=True)
# + colab={"base_uri": "https://localhost:8080/", "height": 221} id="QN59Z4LWTWhP" outputId="d7b47b79-b112-4571-d5f3-8105171d5fbd"
print(f"Number of unique movies: {len(credits.id.unique())}")
credits.head()
# + colab={"base_uri": "https://localhost:8080/"} id="bkQYVWawTWhQ" outputId="e65f8856-1f4b-4ca0-941a-14ad206c36ab"
print("Movies appears in 'celan_movies', but not in 'credits': ")
print(set(clean_movies.id.unique())^set(credits.id.unique()))
# + [markdown] id="Ld0LfMhrTWhQ"
# # Merging
# + id="0aTbwXLDTWhQ"
metadata = pd.merge(left=clean_movies, right=credits, on='id')
metadata = metadata.merge(keywords, on='id')
metadata_links = pd.merge(left=metadata, right=links, left_on='id', right_on='tmdbId')
tags_per_movie = tags.groupby('movieId')['tag'].apply(list).reset_index(drop=False)
metadata_all = pd.merge(left=metadata_links, right=tags_per_movie, on='movieId')
metadata_all = metadata_all.drop(['tmdbId', 'movieId', 'imdbId'], axis=1)
# + [markdown] id="wZ8lj_h5TWhR"
# # Data Cleaning
# + id="rTE7RuDtTWhR"
def get_director(x):
for i in x:
if i['job'] == 'Director':
return i['name']
return np.nan
# + id="PmlVJ3RtTWhR"
def get_list(x):
if isinstance(x, list):
names = [i['name'] for i in x]
#Check if more than 3 elements exist. If yes, return only first three. If no, return entire list.
if len(names) > 3:
names = names[:3]
return names
#Return empty list in case of missing/malformed data
return []
# + id="2r9cfp9vTWhS"
def create_soup(x, dir_weight=0):
return ' '.join(x['keywords']) + ' ' + \
' '.join(x['cast']) + ' ' + \
' '.join(x['tag']) + ' ' + \
' '.join(x['genres']) + ' ' + \
' '.join(x['production_companies']) + ' ' + \
' '.join(x['production_countries']) + ' ' + \
x['director'] + (' ' + x['director'])*4
# + [markdown] id="15IUb9KxTWhS"
# ## Extracting Lists
# + id="8r3sIWThTWhS"
features_to_clean = ['genres', 'production_companies', 'production_countries', 'crew', 'cast', 'keywords']
for feature in features_to_clean:
metadata_all[feature] = metadata_all[feature].fillna('[]')
metadata_all[feature] = metadata_all[feature].apply(literal_eval)
if feature == 'crew':
metadata_all['director'] = metadata_all[feature].apply(get_director)
metadata_all = metadata_all.drop([feature], axis=1)
else:
metadata_all[feature] = metadata_all[feature].apply(get_list)
# + [markdown] id="VF0Wkm1kTWhT"
# ## Remove Spaces & Make Strings Lower
# + id="_dUO1D3sTWhT"
def clean_data(x):
if isinstance(x, list):
return [str.lower(i.replace(" ", "")) for i in x]
else:
#Check if director exists. If not, return empty string
if isinstance(x, str):
return str.lower(x.replace(" ", ""))
else:
return ''
# + id="E8__gjyvTWhU"
def clean_tags(tags_list):
tags_list = list(set(tags_list))
res = []
for tag in tags_list:
try:
temp = tag.replace(' ', '').lower()
except AttributeError:
temp = ''
res.append(temp)
return res
# + id="h2bWO5BcTWhU"
features_to_clean = ['genres', 'production_companies',
'production_countries', 'cast',
'keywords', 'director', 'tag']
for feature in features_to_clean:
if feature == 'tag':
metadata_all[feature] = metadata_all[feature].apply(clean_tags)
else:
metadata_all[feature] = metadata_all[feature].apply(clean_data)
# + [markdown] id="Zn7OCtLYTWhU"
# ## Create Bag-of-Words
# + id="HWnOkQwATWhV"
metadata_all['bow'] = metadata_all.apply(create_soup, axis=1)
# + colab={"base_uri": "https://localhost:8080/", "height": 813} id="qPbT7Q4Fa06R" outputId="e0916374-d92b-41a6-da41-eaaceaabcdb2"
metadata_all
# + [markdown] id="hPCOxyXjTWhV"
# # Recommend
# + id="xDMgTBJGTWhV"
def get_recommendations(title, ind_movies, cosine_sim, top_n=10):
# Get the index of the movie that matches the title
idx = ind_movies[title]
if not isinstance(idx, np.int64):
print("There is more then one movie with this title")
print("Choosing only the first one...")
idx = idx[0]
# Get the pairwsie similarity scores of all movies with that movie
sim_scores = list(enumerate(cosine_sim[idx]))
# Sort the movies based on the similarity scores
sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True)
# Get the scores of the top_n most similar movies
sim_scores = sim_scores[1:top_n+1]
# Get the movie indices
movie_indices = [i[0] for i in sim_scores]
similarity_score = [round(i[1], 3) for i in sim_scores]
# Return the top top_n most similar movies
rec_movies = metadata_all['original_title'].iloc[movie_indices].values
return dict(zip(rec_movies, similarity_score))
# return metadata_all['original_title'].iloc[movie_indices], sim_scores, movie_indices
# + [markdown] id="r_UlH0oFTWhV"
# ## Build Similarity Matrix
# + id="EXdPqaeDTWhV"
count = CountVectorizer(stop_words='english')
count_matrix = count.fit_transform(metadata_all['bow'])
cosine_sim = cosine_similarity(count_matrix, count_matrix)
# + colab={"base_uri": "https://localhost:8080/"} id="_8g7-TFbrc5k" outputId="df2a9ad8-806e-4c72-e040-7ffd98fe836c"
indices = pd.Series(metadata_all.index, index=metadata_all['original_title'])
get_recommendations('Amadeus', indices, cosine_sim, top_n=10)
# + [markdown] id="2Tt-d5FLTWhW"
# ## Final Output
# + id="x8E7D3bUTWhW" colab={"base_uri": "https://localhost:8080/"} outputId="6630d462-ecb0-430b-feb5-6c48e417dde8"
indices = pd.Series(metadata_all.index, index=metadata_all['original_title'])
get_recommendations('Amadeus', indices, cosine_sim, top_n=10)
# + id="vPRdtVvpaJB7"
| content_based_filtering.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: fe_test
# language: python
# name: fe_test
# ---
# ## Arbitrary value imputation with Scikit-learn ==> SimpleImputer
#
# Scikit-learn provides a class to make most of the most common data imputation techniques.
#
# The **SimpleImputer** class provides basic strategies for imputing missing values, including:
#
# - Mean and median imputation for numerical variables
# - Most frequent category imputation for categorical variables
# - Arbitrary value imputation for both categorical and numerical variables
#
# ### Advantages
#
# - Simple to use if applied to the entire dataframe
# - Maintained by the scikit-learn developers: good quality code
# - Fast computation (it uses numpy for calculations)
# - Allows for grid search over the various imputation techniques
# - Allows for different missing values encodings (you can indicate if the missing values are np.nan, or zeroes, etc)
#
# ### Limitations
#
# - Returns a numpy array instead of a pandas dataframe, inconvenient for data analysis
# - Needs to use additional classes to select which features to impute ==>
# - requires more lines of code
# - additional classes still in beta (may change without warning)
# - not so straightforward to use anymore.
#
# ### More details about the transformers
#
# - [SimpleImputer](https://scikit-learn.org/stable/modules/generated/sklearn.impute.SimpleImputer.html#sklearn.impute.SimpleImputer)
# - [ColumnTransformer](https://scikit-learn.org/stable/modules/generated/sklearn.compose.ColumnTransformer.html)
# - [Stackoverflow](https://stackoverflow.com/questions/54160370/how-to-use-sklearn-column-transformer)
#
#
# ## In this demo:
#
# We will do **arbitrary value imputation with Scikit-learn** using the Ames House Price Dataset.
#
# - To download the dataset please refer to the lecture **Datasets** in **Section 1** of this course.
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# these are the objects we need to impute missing data
# with sklearn
from sklearn.impute import SimpleImputer
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
# to split the datasets
from sklearn.model_selection import train_test_split
# +
# we use only the following variables for the demo:
# 3 of which contain NA
cols_to_use = [
'OverallQual', 'TotalBsmtSF', '1stFlrSF', 'GrLivArea', 'WoodDeckSF',
'BsmtUnfSF', 'LotFrontage', 'MasVnrArea', 'GarageYrBlt', 'SalePrice'
]
# +
# let's load the House Prices dataset
data = pd.read_csv('../houseprice.csv', usecols=cols_to_use)
print(data.shape)
data.head()
# -
# let's check the null values
data.isnull().mean()
# The variables LotFrontage, MasVnrArea and GarageYrBlt contain missing data.
# +
# let's separate into training and testing set
# first let's remove the target from the features
cols_to_use.remove('SalePrice')
X_train, X_test, y_train, y_test = train_test_split(data[cols_to_use], # just the features
data['SalePrice'], # the target
test_size=0.3, # the percentage of obs in the test set
random_state=0) # for reproducibility
X_train.shape, X_test.shape
# -
# let's check the misssing data again
X_train.isnull().mean()
# ### SimpleImputer on the entire dataset
# +
# Now we impute the missing values with SimpleImputer
# create an instance of the simple imputer
# we indicate that we want to impute with a constant
# arbitrary value: 999
imputer = SimpleImputer(strategy='constant',
fill_value = 999)
# we fit the imputer to the train set
# the imputer assigns 999 to all variables
imputer.fit(X_train)
# -
# For arbitrary value imputation, we need to set the strategy to 'constant' and indicate the arbitrary value in the fill_value argument.
# we can look at the learnt imputation values like this:
imputer.statistics_
# +
# and now we impute the train and test set
# NOTE: the data is returned as a numpy array!!!
X_train = imputer.transform(X_train)
X_test = imputer.transform(X_test)
X_train
# +
# we can transform it back to a dataframe:
X_train = pd.DataFrame(X_train, columns=cols_to_use)
X_train.head()
# +
# Let's explore the distributions after the imputation
X_train.hist(bins=50, figsize=(10,10))
plt.show()
# -
# Note the tall bar at 999 in the LotFrontage's histogram, which appeared after the imputation. You can also see the imputation effect on GarageYrBlt, with the bar at the far left right at the 999 value.
#
# **Remember to be cautious with the order in which you pass the column names** as we discussed in the previous notebook.
#
# ### SimpleImputer: different procedures on different features
#
# On occasions, we want to implement different imputation techniques to different features, how can we do this using the SimpleImputer from Scikit-learn?
#
# We need to use another class called ColumnTransformer.
#
# **Note** ColumnTransformer has been recently released by Scikit-learn and it is therefore in beta version. This means that the developers can change the way it is used, or remove it altogether any time. The implication is that the code you write today, may not be useful after new releases from Scikit-learn. That is usually what happens with any open-source package when the developers are trying new functionality.
#
# More details about the ColumnTransformer in the [Sklearn website](https://scikit-learn.org/stable/modules/generated/sklearn.compose.ColumnTransformer.html)
# +
# let's separate into training and testing set
X_train, X_test, y_train, y_test = train_test_split(data[cols_to_use],
data['SalePrice'],
test_size=0.3,
random_state=0)
X_train.shape, X_test.shape
# -
# let's look at the missing values
X_train.isnull().mean()
# +
# let's look at the distributions of the variables to decide
# which arbitrary value to use for each feature
X_train.hist(bins=50, figsize=(10,10))
plt.show()
# -
# For this demo, I will impute each of LotFrontage, MasVnrArea and GarageYrBlt with different arbitrary values.
# +
# first we need to make lists, indicating which features
# will be imputed with each value
features_LotFrontAge = ['LotFrontage']
features_MasVnrArea = ['MasVnrArea']
features_GarageYrBlt = ['GarageYrBlt']
# then we put the features list and the transformers together
# using the column transformer
# in this example, I will use the default parameter of ColumnTransformer
# remainder = drop, which means that only the imputed features will
# be retained, and the rest dropped
preprocessor = ColumnTransformer(transformers=[
('imputer_LotFrontAge', SimpleImputer(
strategy='constant', fill_value=999), features_LotFrontAge),
('imputer_MasVnrArea', SimpleImputer(
strategy='constant', fill_value=-10), features_MasVnrArea),
('imputer_GarageYrBlt', SimpleImputer(
strategy='constant', fill_value=1700), features_GarageYrBlt)
], remainder='drop')
# -
# now we fit the preprocessor
preprocessor.fit(X_train)
# +
# we can explore the transformers like this:
preprocessor.transformers
# +
# and we can look at the parameters learnt like this:
# for LotFrontAge
preprocessor.named_transformers_['imputer_LotFrontAge'].statistics_
# +
# for MasVnrArea
preprocessor.named_transformers_['imputer_MasVnrArea'].statistics_
# +
# for GarageYrBlt
preprocessor.named_transformers_['imputer_GarageYrBlt'].statistics_
# +
# and now we can impute the data
X_train = preprocessor.transform(X_train)
X_test = preprocessor.transform(X_test)
X_train
# -
# Observe how the returned data contains only 3 variables, the ones for which we specified imputation values. This is the effect of specifying remainder='drop' in the columnTransformer.
# **ANOTHER NOTE OF CAUTION**
#
# ColumnTransformer is in essence just slicing the dataframe in the required features. Then the SimpleImputer operates on the sliced dataframe. If different features are required by the different imputation methods, it will slice the dataframe on the required features for each imputation, and then concatenate them to produce the final output.
#
# For our specific example, what happens is the following:
#
# - ColumnTransformer first creates 3 single column dataframes, one with each of the indicated variables
# - The SimpleImputers learn the parameters in their respective sliced dataframes
# - In this case they don't learn anything, because we hard code the arbitrary value
# - ColunmnTransformer contatenates back the 3 single column dataframes into the final result, which is a 3 column dataframe
#
# So note that you will not get all the variables from the original X_train, unless you create imputers for all the variables!!
#
# Also, there is a risk of messing up with the order of the features, if we do not use the transformers within a pipeline or we do not keep an eye on the order of the features in the train set, before they were converted to numpy arrays.
# see how the result of the imputation is a 3 column dataset
pd.DataFrame(X_train,
columns=features_LotFrontAge + features_MasVnrArea +
features_GarageYrBlt).head()
# +
X_train = pd.DataFrame(X_train,
columns=features_LotFrontAge + features_MasVnrArea +
features_GarageYrBlt)
X_train.hist(bins=50, figsize=(10,10))
plt.show()
# -
# Note the bars corresponding to the arbitrary values at 1700, 999 and -1 for each feature.
| Section-04-Missing-Data-Imputation/04.10-Arbitrary-Value-Imputation-Sklearn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Import pandas
import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import regularizers
from tensorflow.keras.callbacks import History,LearningRateScheduler
from tensorflow.keras.layers import Dropout,Dense,Activation
# -
# Load data
dataset = pd.read_csv("data/training set values.csv", sep=',', error_bad_lines=False, header=0, index_col=0)
datalabel = pd.read_csv("data/training set labels.csv", sep=",", error_bad_lines=False, header=0, index_col=0)
testset = pd.read_csv("data/test set values.csv", sep=',', error_bad_lines=False, header=0, index_col=0)
finalfile_index=testset.index
dataframe = dataset.join(datalabel, on=["id"])
# +
# Add some columns
dataframe['functional'] = (dataframe['status_group'] == "functional") * 1
dataframe['non_functional'] = (dataframe['status_group'] == "non functional") * 1
dataframe['functional_needs_repair'] = (dataframe['status_group'] == "functional needs repair") * 1
dataframe['categorical_population'] = pd.qcut(dataframe['population'][dataframe['population'] > 0], 5)
dataframe['categorical_gps_height'] = pd.qcut(dataframe['gps_height'][dataframe['gps_height'] > 0], 5)
dataframe['categorical_construction_year'] = pd.qcut(dataframe['construction_year'][dataframe['construction_year'] > 0], 5)
dataframe.loc[dataframe['construction_year'] == 0, 'construction_year'] = dataframe['construction_year'].min()
dataframe['categorical_usage_time'] = dataframe.apply(lambda row: int(row.date_recorded.split('-')[2]) - row.construction_year, axis=1)
dataframe['categorical_usage_time'] = pd.qcut(dataframe['categorical_usage_time'][dataframe['categorical_usage_time'] > 0], 5)
testset['categorical_population'] = pd.qcut(testset['population'][testset['population'] > 0], 5)
testset['categorical_gps_height'] = pd.qcut(testset['gps_height'][testset['gps_height'] > 0], 5)
testset['categorical_construction_year'] = pd.qcut(testset['construction_year'][testset['construction_year'] > 0], 5)
testset.loc[testset['construction_year'] == 0, 'construction_year'] = testset['construction_year'].min()
testset['categorical_usage_time'] = testset.apply(lambda row: int(row.date_recorded.split('-')[2]) - row.construction_year, axis=1)
testset['categorical_usage_time'] = pd.qcut(testset['categorical_usage_time'][testset['categorical_usage_time'] > 0], 5)
dataframe
# +
# Geographic location.sort_values(by=('functional'))
print(dataframe[['region', 'functional', 'non_functional', 'functional_needs_repair']].groupby(['region']).mean().sort_values(by=('functional')))
# +
# How the waterpoint is managed
print(dataframe[['management', 'functional', 'non_functional', 'functional_needs_repair']].groupby(['management']).mean().sort_values(by=('functional')))
# +
# The kind of extraction the waterpoint uses
print(dataframe[['extraction_type_class', 'functional', 'non_functional', 'functional_needs_repair']].groupby(['extraction_type_class']).mean().sort_values(by=('functional')))
# +
# What the water costs
print(dataframe[['payment', 'functional', 'non_functional', 'functional_needs_repair']].groupby(['payment']).mean().sort_values(by=('functional')))
# +
# The quality of the water
print(dataframe[['quality_group', 'functional', 'non_functional', 'functional_needs_repair']].groupby(['quality_group']).mean().sort_values(by=('functional')))
# +
# The quantity of water
print(dataframe[['quantity', 'functional', 'non_functional', 'functional_needs_repair']]
.groupby(['quantity'])
.mean()
.sort_values(by=('functional'))
)
# +
# The source of the water
print(dataframe[['source_type', 'functional', 'non_functional', 'functional_needs_repair']]
.groupby(['source_type'])
.mean()
.sort_values(by=('functional'))
)
# +
# The population number
print(dataframe[['categorical_population', 'functional', 'non_functional', 'functional_needs_repair']]
.groupby(['categorical_population'])
.mean()
.sort_values(by=('functional'))
)
# +
# The gps height
print(dataframe[['categorical_gps_height', 'functional', 'non_functional', 'functional_needs_repair']]
.groupby(['categorical_gps_height'])
.mean()
.sort_values(by=('functional'))
)
# +
# The construction year
print(dataframe[['categorical_construction_year', 'functional', 'non_functional', 'functional_needs_repair']]
.groupby(['categorical_construction_year'])
.mean()
.sort_values(by=('functional'))
)
# +
# The old usage
print(dataframe[['categorical_usage_time', 'functional', 'non_functional', 'functional_needs_repair']]
.groupby(['categorical_usage_time'])
.mean()
.sort_values(by=('functional'))
)
# +
# Drop some columns
dataframe.drop([
"funder",
"gps_height",
"installer",
"longitude",
"latitude",
"wpt_name",
"num_private",
"basin",
"subvillage",
"region_code",
"district_code",
"lga",
"ward",
"population",
"public_meeting",
"recorded_by",
"scheme_management",
"extraction_type_group",
"management_group",
"payment",
"payment_type",
"water_quality",
"quantity_group",
"source",
"source_class",
"waterpoint_type",
"scheme_name",
"permit",
"construction_year",
"extraction_type",
"waterpoint_type_group",
"date_recorded",
"amount_tsh",
"functional",
"non_functional",
"functional_needs_repair"
], inplace=True, axis=1)
testset.drop([
"funder",
"gps_height",
"installer",
"longitude",
"latitude",
"wpt_name",
"num_private",
"basin",
"subvillage",
"region_code",
"district_code",
"lga",
"ward",
"population",
"public_meeting",
"recorded_by",
"scheme_management",
"extraction_type_group",
"management_group",
"payment",
"payment_type",
"water_quality",
"quantity_group",
"source",
"source_class",
"waterpoint_type",
"scheme_name",
"permit",
"construction_year",
"extraction_type",
"waterpoint_type_group",
"date_recorded",
"amount_tsh",
], inplace=True, axis=1)
# Transform non numeric values to numeric values
nonNumericKeys = [
'region', 'management', 'extraction_type_class', 'quality_group', 'quantity', 'source_type',
'categorical_population', 'categorical_gps_height', 'categorical_construction_year', 'categorical_usage_time',
]
for nonNumericKey in nonNumericKeys:
print(dataframe[nonNumericKey].unique().tolist())
# Transformation
for (i, value) in enumerate(dataframe[nonNumericKey].unique().tolist()):
dataframe[nonNumericKey].replace(value, i, inplace=True)
for (i, value) in enumerate(testset[nonNumericKey].unique().tolist()):
testset[nonNumericKey].replace(value, i, inplace=True)
# Convert column to int
dataframe[nonNumericKey] = dataframe[nonNumericKey].astype(int)
testset[nonNumericKey] = testset[nonNumericKey].astype(int)
# Transformation for status_group
# Convert column to int
dataframe['status_group'].replace('functional', 1, inplace=True)
dataframe['status_group'].replace('functional needs repair', 0, inplace=True)
dataframe['status_group'].replace('non functional', -1, inplace=True)
dataframe['status_group'] = dataframe['status_group'].astype(int)
dataframe = dataframe.reindex(columns=(['status_group'] + list([a for a in dataframe.columns if a != 'status_group']) ))
dataframe
# -
from sklearn.model_selection import train_test_split
#Séparation des valeurs de train et label (tous les exemples)
X_alltrain = dataframe.values[:, 1:]
y_alltrain = dataframe.values[:, 0]
X_train, X_test, y_train, y_test = train_test_split(X_alltrain, y_alltrain, random_state=42)
print('%i X_train, %i X_test, %i y_train,%i y_test'%(
X_train.shape[0], X_test.shape[0], y_train.shape[0], y_test.shape[0]))
print('%s X_alltrain, %s y_alltrain'%(X_alltrain.shape, y_alltrain.shape))
print(y_alltrain)
feature_names=dataframe.columns.tolist()[1:]
target_names=["Non fonctionnel","Fonctionnel"]
print('features:',feature_names)
print('target:',target_names)
# +
# Fonctions permettant de générer le fichier d'envoi à Kaggle
#
#parametres: Classifiers; Données à calculer ; index)
def generer_resultats(clf,data=dataframe.values):
"""
Fonctions permettant de générer le fichier d'envoi à Kaggle.
On passe un classifier sur lequel on refait le training avec toutes les données de training
Parameters
----------
Classifiers : Classifier utilisé pour la prédiction
data : Données à calculer. par défaut, les valeurs du dataset "test"
"""
print(clf.get_params())
clf.fit(X_alltrain, y_alltrain)
prediction=clf.predict(data)
results=pd.DataFrame(prediction.astype(int), index = finalfile_index, columns=['status_group'])
results['status_group'].replace(1, 'functional', inplace=True)
results['status_group'].replace(0, 'functional needs repair', inplace=True)
results['status_group'].replace(-1, 'non functional', inplace=True)
results.to_csv('resultats%s.csv'%clf.__class__.__name__)
#Fonction pour l'affichage 2 D des résultats
from matplotlib.colors import ListedColormap
def plot_decision_boundary(clf,X,y, axes=[-0, 30, -5, 5], axis_name=['x1','x2'],alpha=0.5, contour=True):
"""
Fonction pour l'affichage 2 D des résultats
Parameters
----------
clf : Classifier à afficher
X : features de Données a afficher
y : labels de Données a afficher
axes : : Tailles des axes (valeur min/max)
axis_name : Nom des axes sur le graphique
alpha : Transparence des points
contour : Afichage du contour
"""
x1s = np.linspace(axes[0], axes[1], 100)
x2s = np.linspace(axes[2], axes[3], 100)
x1, x2 = np.meshgrid(x1s, x2s)
X_new = np.c_[x1.ravel(), x2.ravel()]
y_pred = clf.predict(X_new).reshape(x1.shape)
custom_cmap = ListedColormap(['#fafab0','#9898ff','#a0faa0'])
plt.contourf(x1, x2, y_pred, alpha=0.3, cmap=custom_cmap)
if contour:
custom_cmap2 = ListedColormap(['#7d7d58','#4c4c7f','#507d50'])
plt.contour(x1, x2, y_pred, cmap=custom_cmap2, alpha=0.8)
plt.plot(X[:, 0][y==0], X[:, 1][y==0], "bo",label="Non fonctionnel", alpha=alpha)
plt.plot(X[:, 0][y==1], X[:, 1][y==1], "ys", label="Fonctionnel",alpha=alpha)
plt.axis(axes)
plt.xlabel(axis_name[0], fontsize=18)
plt.ylabel(axis_name[1]+ " ",fontsize=18, rotation=0)
plt.legend(loc="lower right", fontsize=14)
# +
from sklearn.naive_bayes import GaussianNB,MultinomialNB
from sklearn.metrics import accuracy_score
from sklearn.ensemble import RandomForestClassifier
gnb_clf = RandomForestClassifier(random_state=2)
y_train = y_train.astype("int")
gnb_clf.fit(X_train, y_train)
y_pred = gnb_clf.predict(X_test)
print("Niveau de précision : %.2f"%(100*accuracy_score(y_test, y_pred)))
# +
# Graphes
# %matplotlib inline
import matplotlib.pyplot as plt
# Machine learning
from sklearn.model_selection import train_test_split
from sklearn.datasets import make_moons
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
#Eviter les warnings
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=FutureWarning)
rnd_clf = RandomForestClassifier(random_state=2)
# Lance sur les données de test
generer_resultats(rnd_clf, testset.values)
# -
| notebooks/Pump it up.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Polynomial Fitting and Interpolation
#
# ### Variance of estimates
#
# Least-squares provides a method to estimate the parameters of the linear model
#
# $$
# y = Xw
# $$
#
# as
#
# $$
# \hat{w} = (X^{T}X)^{-1}X^{T}y
# $$
#
# This equation provides a method to find the most likely value of $w$ such that $\hat{y} = X\hat{w}$ is the closest to $y$ in the least square sense. That is
#
# $$
# \sum(e^2) = \sum(y-\hat{y})^2
# $$
#
# is minimum. The estimated value of $w$ represent a mean value, it can be shown that the variance of the estimated value is given by
#
# $$
# \sigma_{w}^2 = (X^{T}X)^{-1}\sigma_{e}^2
# $$
#
# where
# $$
# \sigma_{e}^2 = \frac{1}{N-p}\Vert y - X\hat{w} \Vert = \frac{1}{N-p}\sum e^{2}
# $$
#
# where $N$ is the length of the vector $y$ and $p$ the length of the vector $w$.
#
#
# __Why is this important__
#
# The variance of an estimated parameter can help you to figure out if the parameter is different from zero (that is, if the parameter should be included in the model). As a rule of thumb, if the
# $$
# w \pm 2\sigma_{w} < 0
# $$
#
# then the parameter is likely not different from zero and should be removed from the model.
# +
#Numerical example 1
#assume a that data was generated by the following model
import numpy as np
import matplotlib.pyplot as plt
force = np.arange(0,20)
amount_of_noise = 0.1
displacement = 3 + force*(0.2) + amount_of_noise*np.random.randn(*force.shape)
plt.scatter(force,displacement)
plt.xlabel('Force (N)')
plt.ylabel('Dispacement (cm)')
plt.show()
# -
# if we only have access to the data $force$ and $displacement$, we can use least squares to estimate the parameters provided the model
X = np.column_stack((np.ones_like(force.T), force.T))
w_est = (np.linalg.inv(X.T@X)@X.T)@displacement
print(w_est)
#we can also estimate the variance of the estimates
w_var = np.linalg.inv(X.T@X)*(np.var(displacement - X@w_est))
print(w_var)
# So that the estimated parameters mean and standar deviations are
#
print(f'{w_est[0]} +/- {np.sqrt(w_var[0,0])}')
print(f'{w_est[1]} +/- {np.sqrt(w_var[1,1])}')
# Clearly both parameters are needed in the model
# +
#Numerical example 2
#assume a that data was generated by the following model
import numpy as np
import matplotlib.pyplot as plt
force = np.arange(0,20)
amount_of_noise = 0.1
displacement = force*(0.2) + amount_of_noise*np.random.randn(*force.shape)
plt.scatter(force,displacement)
plt.xlabel('Force (N)')
plt.ylabel('Dispacement (cm)')
plt.show()
# -
# we will use the same model as before
X = np.column_stack((np.ones_like(force.T), force.T))
w_est = (np.linalg.inv(X.T@X)@X.T)@displacement
w_var = np.linalg.inv(X.T@X)*(np.var(displacement - X@w_est))
print(f'{w_est[0]} +/- {np.sqrt(w_var[0,0])}')
print(f'{w_est[1]} +/- {np.sqrt(w_var[1,1])}')
# as you can see, for the first parameter, we have
#
# $$
# -0.034 - 0.032 = -0.066< 0
# $$
#
# so that this parameter is likely not needed in the model.
#
# #### This approach is very sensible to noise level, and is not useful if the model does not approximates well the true data behavior. Other approaches that are more robust to noise are available and should be preferred, such as adjusted R2 score, or the information criterion.
# ### Polynomial fitting
#
# Often, the relation between data is non-linear and cannot be explained by a simple linear model.
#
# Consider the following data points
x = np.array([[0., 0.22439948, 0.44879895, 0.67319843, 0.8975979 ,
1.12199738, 1.34639685, 1.57079633, 1.7951958 , 2.01959528,
2.24399475, 2.46839423, 2.6927937 , 2.91719318, 3.14159265]])
y = np.array([[0.00000000e+00, 2.11502684e-01, 3.52202913e-01, 3.81114667e-01,
3.03928805e-01, 1.69611983e-01, 4.82741074e-02, 3.74939946e-33,
4.82741074e-02, 1.69611983e-01, 3.03928805e-01, 3.81114667e-01,
3.52202913e-01, 2.11502684e-01, 1.22464680e-16]])
plt.plot(x.T,y.T,'o')
# Assume for a moment that a linear model can explain the relation between variables, that is
# $$
# \hat{y} = w_{0} + w_{1}x +e
# $$
#
# where $w_{0}$ and $w_{1}$ are parameters that can be estimated with least-squares, and $e$ is the noise.
X = np.column_stack((np.ones_like(x.T), x.T))
w_est = (np.linalg.inv(X.T@X)@X.T)@y.T
plt.plot(x.T,y.T ,'--o',label='Measured Data')
plt.plot(x.T,X@w_est, '-o', label='Predicted Data')
plt.legend()
# Clearly that is not a good model, so we can start increasing the dimensionality of our model by adding non-linear relations between input and output data, that is
# $$
# \hat{y} = w_{0} + w_{1}x + w_{2}x^{2} +e
# $$
X = np.column_stack((np.ones_like(x.T), x.T, (x**2).T))
w_est = (np.linalg.inv(X.T@X)@X.T)@y.T
plt.plot(x.T,y.T, '--o',label='Measured Data')
plt.plot(x.T,X@w_est, '-o', label='Predicted Data')
plt.legend()
X = np.column_stack((np.ones_like(x.T), x.T, (x**2).T, (x**3).T))
w_est = (np.linalg.inv(X.T@X)@X.T)@y.T
plt.plot(x.T,y.T, '--o', label='Measured Data')
plt.plot(x.T,X@w_est, '-o', label='Predicted Data')
plt.legend()
X = np.column_stack((np.ones_like(x.T), x.T, (x**2).T, (x**3).T, (x**4).T))
w_est = (np.linalg.inv(X.T@X)@X.T)@y.T
plt.plot(x.T,y.T, '--o', label='Measured Data')
plt.plot(x.T,X@w_est, '-o', label='Predicted Data')
plt.legend()
X = np.column_stack((np.ones_like(x.T), x.T, (x**2).T, (x**3).T, (x**4).T, (x**5).T))
w_est = (np.linalg.inv(X.T@X)@X.T)@y.T
plt.plot(x.T,y.T, '--o',label='Measured Data')
plt.plot(x.T,X@w_est, '-o', label='Predicted Data')
plt.legend()
X = np.column_stack(( x.T, (x**2).T, (x**3).T, (x**4).T, (x**5).T,(x**6).T ))
w_est = (np.linalg.inv(X.T@X)@X.T)@y.T
plt.plot(x.T,y.T, '--o', label='Measured Data')
plt.plot(x.T,X@w_est, '-o', label='Predicted Data')
plt.legend()
# We are basically creating a model that describes the data as
# $$
# \begin{bmatrix}
# y[1] \\
# y[2] \\
# \vdots \\
# y[n]
# \end{bmatrix} = \begin{bmatrix}
# 1 & x[1] & x[1]^2 & x[1]^3 & x[1]^4 & x[1]^5 & x[1]^6 \\
# 1 & x[2] & x[2]^2 & x[2]^3 & x[2]^4 & x[2]^5 & x[2]^6 \\
# \vdots & \vdots \\
# 1 & x[n] & x[n]^2 & x[n]^3 & x[n]^4 & x[n]^5 & x[n]^6
# \end{bmatrix}
# \begin{bmatrix}
# w_{0}\\
# w_{1} \\
# w_{2} \\
# w_{3} \\
# w_{4} \\
# w_{5} \\
# w_{6} \\
# \end{bmatrix}
# $$
#
# and used least-squares to estimate the unknown model parameters
# Now, we can estiamte the parameters variances and verify in they are needed in the model
# +
X = np.column_stack((np.ones_like(x.T), x.T, (x**2).T, (x**3).T, (x**4).T, (x**5).T, (x**6).T))
w_est = (np.linalg.inv(X.T@X)@X.T)@y.T
w_var = np.linalg.inv(X.T@X)*(np.var(y.T - X@w_est))
i=0
for w,var in zip(w_est,np.diag(w_var)): #I'm taking only the diagonal elements of the matrix w_var
print(f'w_{i} = {w[0]} +/- {np.sqrt(var)}')
i+=1
plt.plot(x.T,y.T, '--o', label='Measured Data')
plt.plot(x.T,X@w_est, '-o', label='Predicted Data')
plt.legend()
# -
# ### Interpolation
#
# Now that you have a model to describe the measured data, is possible to _fill the gaps_ in the data.
#
# Assume that you want to know the value of the signal at $x=2.1$, this value is not included in the original table, but it can be estimated with the model
# $$
# y[2.1] = w_{0} + (2.1)w_{1} + (2.1)^2w_{2}+(2.1)^3w_{3} + (2.1)^4w_{4} + (2.1)^5w_{5} + (2.1)^6w_{6}
# $$
#
# Finally, if we want to have a smoother signal, we could create more values for the $x$ variable and use our model to find the corresponding values of $y$
# +
new_x = np.array([np.linspace(x[0][0],x[0][-1],1000)])
new_X = np.column_stack((np.ones_like(new_x.T), new_x.T, (new_x**2).T, (new_x**3).T, (new_x**4).T, (new_x**5).T, (new_x**6).T))
new_y = new_X@w_est
plt.plot(x.T,y.T, '--o', label='Measured Data')
plt.plot(new_x.T,new_y, label='Predicted Data')
plt.legend()
# -
import statsmodels.api as sm
#For bladder
result = sm.OLS(y.T, X).fit()
print(result.summary())
| polynomial fitting.ipynb |