code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [Root]
# language: python
# name: Python [Root]
# ---
# ### Categorical Data
#
# Categoricals are a pandas data type, which correspond to categorical variables in statistics: a variable, which can take
# on only a limited, and usually fixed, number of possible values (categories; levels in R). Examples are gender, social
# class, blood types, country affiliations, observation time or ratings via Likert scales.
#
# In contrast to statistical categorical variables, categorical data might have an order (e.g. ‘strongly agree’ vs ‘agree’ or
# ‘first observation’ vs. ‘second observation’), but numerical operations (additions, divisions, ...) are not possible.
#
# All values of categorical data are either in categories or np.nan. Order is defined by the order of categories, not lexical
# order of the values.
#
# documentation: http://pandas.pydata.org/pandas-docs/stable/categorical.html
import pandas as pd
import numpy as np
file_name_string = 'C:/Users/<NAME>/Desktop/Exercise Files/02_07/Begin/EmployeesWithGrades.xlsx'
employees_df = pd.read_excel(file_name_string, 'Sheet1', index_col=None, na_values=['NA'])
# ##### Change data type
# change data type for "Grade" column to category
#
# documentation for astype(): http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.astype.html
employees_df["Grade"] = employees_df["Grade"].astype("category")
# ##### Rename the categories
# Rename the categories to more meaningful names (assigning to Series.cat.categories is inplace)
employees_df["Grade"].cat.categories = ["excellent", "good", "acceptable", "poor", "unacceptable"]
# ### Values in data frame have not changed
# tabulate Department, Name, and YearsOfService, by Grade
employees_df.groupby('Grade').count()
| basics/second/Categorical Data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Cruises Portfolio Risk and Returns
# + outputHidden=false inputHidden=false
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import math
import warnings
warnings.filterwarnings("ignore")
# yfinance is used to fetch data
import yfinance as yf
yf.pdr_override()
# + outputHidden=false inputHidden=false
# input
# Airlines Stock
symbols = ['CCL','CUK','LIND','NCLH','RCL']
start = '2019-12-01'
end = '2020-04-14'
# + outputHidden=false inputHidden=false
df = pd.DataFrame()
for s in symbols:
df[s] = yf.download(s,start,end)['Adj Close']
# + outputHidden=false inputHidden=false
from datetime import datetime
from dateutil import relativedelta
d1 = datetime.strptime(start, "%Y-%m-%d")
d2 = datetime.strptime(end, "%Y-%m-%d")
delta = relativedelta.relativedelta(d2,d1)
print('How many years of investing?')
print('%s years' % delta.years)
# + outputHidden=false inputHidden=false
number_of_years = delta.years
# + outputHidden=false inputHidden=false
days = (df.index[-1] - df.index[0]).days
days
# + outputHidden=false inputHidden=false
df.head()
# + outputHidden=false inputHidden=false
df.tail()
# + outputHidden=false inputHidden=false
plt.figure(figsize=(12,8))
plt.plot(df)
plt.title('Cruises Stocks Closing Price')
plt.legend(labels=df.columns)
# + outputHidden=false inputHidden=false
# Normalize the data
normalize = (df - df.min())/ (df.max() - df.min())
# + outputHidden=false inputHidden=false
plt.figure(figsize=(18,12))
plt.plot(normalize)
plt.title('Cruises Stocks Normalize')
plt.legend(labels=normalize.columns)
# + outputHidden=false inputHidden=false
stock_rets = df.pct_change().dropna()
# + outputHidden=false inputHidden=false
plt.figure(figsize=(12,8))
plt.plot(stock_rets)
plt.title('Cruises Stocks Returns')
plt.legend(labels=stock_rets.columns)
# + outputHidden=false inputHidden=false
plt.figure(figsize=(12,8))
plt.plot(stock_rets.cumsum())
plt.title('Cruises Stocks Returns Cumulative Sum')
plt.legend(labels=stock_rets.columns)
# + outputHidden=false inputHidden=false
sns.set(style='ticks')
ax = sns.pairplot(stock_rets, diag_kind='hist')
nplot = len(stock_rets.columns)
for i in range(nplot) :
for j in range(nplot) :
ax.axes[i, j].locator_params(axis='x', nbins=6, tight=True)
# + outputHidden=false inputHidden=false
ax = sns.PairGrid(stock_rets)
ax.map_upper(plt.scatter, color='purple')
ax.map_lower(sns.kdeplot, color='blue')
ax.map_diag(plt.hist, bins=30)
for i in range(nplot) :
for j in range(nplot) :
ax.axes[i, j].locator_params(axis='x', nbins=6, tight=True)
# + outputHidden=false inputHidden=false
plt.figure(figsize=(7,7))
corr = stock_rets.corr()
# plot the heatmap
sns.heatmap(corr,
xticklabels=corr.columns,
yticklabels=corr.columns,
cmap="Reds")
# + outputHidden=false inputHidden=false
# Box plot
stock_rets.plot(kind='box',figsize=(12,8))
# + outputHidden=false inputHidden=false
rets = stock_rets.dropna()
plt.figure(figsize=(12,8))
plt.scatter(rets.mean(), rets.std(),alpha = 0.5)
plt.title('Stocks Risk & Returns')
plt.xlabel('Expected returns')
plt.ylabel('Risk')
plt.grid(which='major')
for label, x, y in zip(rets.columns, rets.mean(), rets.std()):
plt.annotate(
label,
xy = (x, y), xytext = (50, 50),
textcoords = 'offset points', ha = 'right', va = 'bottom',
arrowprops = dict(arrowstyle = '-', connectionstyle = 'arc3,rad=-0.3'))
# + outputHidden=false inputHidden=false
rets = stock_rets.dropna()
area = np.pi*20.0
sns.set(style='darkgrid')
plt.figure(figsize=(12,8))
plt.scatter(rets.mean(), rets.std(), s=area)
plt.xlabel("Expected Return", fontsize=15)
plt.ylabel("Risk", fontsize=15)
plt.title("Return vs. Risk for Stocks", fontsize=20)
for label, x, y in zip(rets.columns, rets.mean(), rets.std()) :
plt.annotate(label, xy=(x,y), xytext=(50, 0), textcoords='offset points',
arrowprops=dict(arrowstyle='-', connectionstyle='bar,angle=180,fraction=-0.2'),
bbox=dict(boxstyle="round", fc="w"))
# + outputHidden=false inputHidden=false
rest_rets = rets.corr()
pair_value = rest_rets.abs().unstack()
pair_value.sort_values(ascending = False)
# + outputHidden=false inputHidden=false
# Normalized Returns Data
Normalized_Value = ((rets[:] - rets[:].min()) /(rets[:].max() - rets[:].min()))
Normalized_Value.head()
# + outputHidden=false inputHidden=false
Normalized_Value.corr()
# + outputHidden=false inputHidden=false
normalized_rets = Normalized_Value.corr()
normalized_pair_value = normalized_rets.abs().unstack()
normalized_pair_value.sort_values(ascending = False)
# + outputHidden=false inputHidden=false
print("Stock returns: ")
print(rets.mean())
print('-' * 50)
print("Stock risks:")
print(rets.std())
# + outputHidden=false inputHidden=false
table = pd.DataFrame()
table['Returns'] = rets.mean()
table['Risk'] = rets.std()
table.sort_values(by='Returns')
# + outputHidden=false inputHidden=false
table.sort_values(by='Risk')
# + outputHidden=false inputHidden=false
rf = 0.01
table['Sharpe Ratio'] = (table['Returns'] - rf) / table['Risk']
table
# + outputHidden=false inputHidden=false
table['Max Returns'] = rets.max()
# + outputHidden=false inputHidden=false
table['Min Returns'] = rets.min()
# + outputHidden=false inputHidden=false
table['Median Returns'] = rets.median()
# + outputHidden=false inputHidden=false
total_return = stock_rets[-1:].transpose()
table['Total Return'] = 100 * total_return
table
# + outputHidden=false inputHidden=false
table['Average Return Days'] = (1 + total_return)**(1 / days) - 1
table
# + outputHidden=false inputHidden=false
initial_value = df.iloc[0]
ending_value = df.iloc[-1]
table['CAGR'] = ((ending_value / initial_value) ** (252.0 / days)) -1
table
# + outputHidden=false inputHidden=false
table.sort_values(by='Average Return Days')
| Python_Stock/Portfolio_Strategies/Cruises_Portfolio.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Imports
import pandas as pd
import numpy as np
# +
# conda install pyarrow -c conda-forge
# -
# # Constants
data_files = ["clickstream/part-00000.parquet"
"clickstream/part-00001.parquet",
"clickstream/part-00002.parquet",
"clickstream/part-00003.parquet",
"clickstream/part-00004.parquet",
"clickstream/part-00005.parquet",
"clickstream/part-00006.parquet",
"clickstream/part-00007.parquet",
"clickstream/part-00008.parquet",
"clickstream/part-00009.parquet"]
# # Data reading
data_clickstream_df = pd.read_parquet("clickstream", engine="pyarrow")
train_target_df = pd.read_csv("alfabattle2_abattle_train_target.csv")
test_target_df = pd.read_csv("alfabattle2_prediction_session_timestamp.csv")
print(f"{data_clickstream_df.shape=}")
print(f"{train_target_df.shape=}")
print(f"{test_target_df.shape=}")
data_clickstream_df.head(2)
train_target_df.head(2)
test_target_df.head(2)
# # Check user distr
data_clickstream_unique_user = data_clickstream_df.client.unique()
print(f"{data_clickstream_unique_user.shape=}")
# +
train_target_unique_user = train_target_df.client_pin.unique()
test_target_unique_user = test_target_df.client_pin.unique()
print(f"{train_target_unique_user.shape=}")
print(f"{test_target_unique_user.shape=}")
# -
assert np.intersect1d(data_clickstream_unique_user, train_target_unique_user).shape[0] == train_target_unique_user.shape[0], f"Кол-во разных юзеров в кликстриме и train разное"
assert np.intersect1d(data_clickstream_unique_user, test_target_unique_user).shape[0] == test_target_unique_user.shape[0], f"Кол-во разных юзеров в кликстриме и test разное"
assert np.intersect1d(train_target_unique_user, test_target_unique_user).shape[0] == test_target_unique_user.shape[0], f"Кол-во разных юзеров в train и test разное"
| check_client_distr.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
# # Derivadas aproximadas
#
# Nem sempre é possível calcular a derivada de uma função.
# Às vezes, a função em questão não é dada de forma explícita.
# Por exemplo,
# $$f(x) = \min_{|y| < x} \Big( \frac{\cos(2x^2 - 3y)}{20x - y} \Big).$$
#
# Assim, teremos que _estimar_ a derivada de $f$, sem calculá-la explicitamente.
# A idéia principal é que
# $$ f'(x) = \lim_{h \to 0} \frac{f(x+h) - f(x)}{h}, $$
# ou seja, que a derivada é o limite do "quociente fundamental".
# Podemos usar o computador para estimar este limite:
def df(f, x, h=1e-5):
return (f(x+h) - f(x))/h
# ### Exercício: "Esta função é vetorial"?
#
# Ou seja, se passarmos um vetor `x` em argumento, vai dar certo?
# Em que condições?
# ### Exercício
#
# Calcule a derivada do seno no intervalo $[0,7]$ por este método,
# e compare com o resultado teórico.
# +
xs = np.arange(0, 7, 0.05)
### BEGIN SOLUTION
dfx = np.cos(xs)
dfx_approx = df(np.sin,xs)
_, [ax1, ax2] = plt.subplots(ncols=2, figsize=(15,4))
ax1.set_title('Cálculo da derivada')
ax1.plot(xs, dfx_approx, label='aproximação')
ax1.plot(xs, dfx, label='valor real')
ax1.legend(loc=0)
ax2.set_title('Erro de aproximação')
ax2.plot(xs, dfx_approx - dfx)
plt.show()
### BEGIN SOLUTION
# -
# ### Exercício
#
# Muitas vezes, a função que vamos usar é "vetorial", como por exemplo `sin`, `exp`.
# Mas às vezes não é tão simples escrever uma forma vetorial para uma função.
# Nestes casos, não podemos usar tão diretamente as funções acima para fazer gráficos,
# e em vez disso devemos construir as listas (ou, melhor, `array`s) nós mesmos.
#
# Vejamos um exemplo:
#
# Seja $y = f(t)$ a raiz de $t\cos(x) = x$.
# Uma forma de calcular $f$ seria, por exemplo,
# usando o método da bisseção.
# Por exemplo:
from rootfinding import bissection
def f(t):
def g(x):
return t*np.cos(x) - x
return bissection(g,-np.pi/2,np.pi/2, tol=1e-8)
# Agora, escreva uma função `fvect` que recebe um array do numpy e retorna o array correspondente a todas as $f(t)$
# para cada $t$ no array.
### Resposta aqui
# E agora, veja o gráfico de f.
v = np.arange(-3,3,0.05)
plt.plot(v, fvect(v));
plt.show()
# Com a ajuda da fvect, faça um gráfico da derivada de $f$.
### Resposta aqui
# ## Estimando o erro
#
# Uma atividade importante ao se construir um método numérico é calcular (ou ao menos estimar) o erro cometido.
# Em geral, estimativas de erros são feitas com mais do que as hipóteses mínimas para o método.
# Por exemplo, no caso do método de Newton, basta a função ser derivável para podermos usá-lo,
# mas para mostrar convergência quadrática, temos que supor que ela terá duas derivadas,
# e que o quociente $\frac{f''(\xi)}{2f'(x)}$ seja limitado no intervalo de convergência.
#
# Vamos, então, seguir este padrão: queremos calcular a primeira derivada,
# e para estimar o erro suporemos que a função é duas vezes derivável.
# Assim:
# $$ \frac{f(x+h) - f(x)}{h} - f'(x) = \frac{\big(f(x) + h f'(x) + \frac{h^2}{2} f''(\xi) \big) - f(x)}{h} - f'(x)
# = \frac{h f''(\xi)}{2}.$$
# No caso de $f(x) = \sin(x)$, o erro estará aproximadamente entre $h (-\sin(x))/2$ e $h (-\sin(x+h))/2$.
# Vejamos o quão próximo isto é de fato:
plt.title('Erro na estimativa do erro ("erro do erro")')
plt.plot(xs, (dfx_approx - dfx) - (- 1e-5 * np.sin(xs) / 2))
plt.show()
# O exemplo anterior mostra que, se desejamos aproximar a derivada de uma função "bem-comportada" pelo quociente fundamental,
# o erro será proporcional ao **passo** e à derivada segunda (que, em geral, não conhecemos!).
# Assim, para diminuir o erro, teremos que diminuir igualmente o passo.
# Mas isso pode resultar em erros de truncamento...
# +
dfx_approx_2 = df(np.sin,xs, h=1e-10)
_, [ax1, ax2] = plt.subplots(ncols=2, figsize=(15,4))
ax1.set_title('Erro de aproximação')
ax1.plot(xs, dfx_approx_2 - dfx)
ax2.set_title('Erro na estimativa do erro')
ax2.plot(xs, (dfx_approx_2 - dfx) - (- 1e-10 * np.sin(xs)/2))
plt.show()
# -
# ### Exercício: vendo o truncamento
#
# Porque faz sentido, dados os gráficos acima,
# atribuir o erro de aproximação à precisão numérica do computador,
# e não à derivada segunda?
# Note que o erro de aproximação não está mais proporcional a $\varepsilon$.
# Para resolver isso, precisamos de um método de cálculo cujo erro seja menor!
| comp-cientifica-I-2018-2/semana-6/raw_files/Semana6-Parte1-DerivadasNumericas.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
def mameada_astral(pedido,*algo,**el_69):
print("profesor, buenos dias, ya tiene mi", pedido,"terminado?")
print("Joven, aun falta, pero si me da",algo, "lo hago a la brevedad")
print("pero si te apena, me puedes dar",algo)
print(".i." *20)
for i in el_69:
print(i,":",el_69[i])
mameada_astral("nota","el culo",profe="sthy",estudiante="bruno")
| IntroToPython/Untitled4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Validation Playground
#
# **Watch** a [short tutorial video](https://greatexpectations.io/videos/getting_started/integrate_expectations) or **read** [the written tutorial](https://docs.greatexpectations.io/en/latest/tutorials/validate_data.html?utm_source=notebook&utm_medium=validate_data)
#
# We'd love it if you **reach out for help on** the [**Great Expectations Slack Channel**](https://greatexpectations.io/slack)
import json
import great_expectations as ge
from great_expectations.profile import ColumnsExistProfiler
import great_expectations.jupyter_ux
from great_expectations.datasource.types import BatchKwargs
from datetime import datetime
# ## 1. Get a DataContext
# This represents your **project** that you just created using `great_expectations init`. [Read more in the tutorial](https://docs.greatexpectations.io/en/latest/tutorials/validate_data.html?utm_source=notebook&utm_medium=validate_data#get-a-datacontext-object)
context = ge.data_context.DataContext()
# ## 2. List the CSVs in your folder
#
# The `DataContext` will now introspect your pyspark `Datasource` and list the CSVs it finds. [Read more in the tutorial](https://docs.greatexpectations.io/en/latest/tutorials/validate_data.html?utm_source=notebook&utm_medium=validate_data#list-data-assets)
ge.jupyter_ux.list_available_data_asset_names(context)
# ## 3. Pick a csv and the expectation suite
#
# Internally, Great Expectations represents csvs and dataframes as `DataAsset`s and uses this notion to link them to `Expectation Suites`. [Read more about the validate method in the tutorial](https://docs.greatexpectations.io/en/latest/tutorials/validate_data.html?utm_source=notebook&utm_medium=validate_data#pick-a-data-asset-and-expectation-suite)
#
data_asset_name = "ONE_OF_THE_CSV_DATA_ASSET_NAMES_FROM_ABOVE" # TODO: replace with your value!
normalized_data_asset_name = context.normalize_data_asset_name(data_asset_name)
normalized_data_asset_name
# We recommend naming your first expectation suite for a table `warning`. Later, as you identify some of the expectations that you add to this suite as critical, you can move these expectations into another suite and call it `failure`. [Read more in the tutorial](https://docs.greatexpectations.io/en/latest/getting_started/pipeline_integration.html?utm_source=notebook&utm_medium=integrate_validation#choose-data-asset-and-expectation-suite)
expectation_suite_name = "warning" # TODO: replace with your value!
# #### 3.a. If you don't have an expectation suite, let's create a simple one
#
# You need expectations to validate your data. Expectations are grouped into Expectation Suites.
#
# If you don't have an expectation suite for this data asset, the notebook's next cell will create a suite of very basic expectations, so that you have some expectations to play with. The expectation suite will have `expect_column_to_exist` expectations for each column.
#
# If you created an expectation suite for this data asset, you can skip executing the next cell (if you execute it, it will do nothing).
#
# To create a more interesting suite, open the [create_expectations.ipynb](create_expectations.ipynb) notebook.
#
#
try:
context.get_expectation_suite(normalized_data_asset_name, expectation_suite_name)
except great_expectations.exceptions.DataContextError:
context.create_expectation_suite(data_asset_name=normalized_data_asset_name, expectation_suite_name=expectation_suite_name, overwrite_existing=True);
batch_kwargs = context.yield_batch_kwargs(data_asset_name)
batch = context.get_batch(normalized_data_asset_name, expectation_suite_name, batch_kwargs)
ColumnsExistProfiler().profile(batch)
batch.save_expectation_suite()
expectation_suite = context.get_expectation_suite(normalized_data_asset_name, expectation_suite_name)
context.build_data_docs()
# ## 4. Load a batch of data you want to validate
#
# To learn more about `get_batch` with other data types (such as existing pandas dataframes, SQL tables or Spark), see [this tutorial](https://docs.greatexpectations.io/en/latest/tutorials/validate_data.html?utm_source=notebook&utm_medium=validate_data#load-a-batch-of-data-to-validate)
#
batch_kwargs = context.yield_batch_kwargs(data_asset_name)
batch = context.get_batch(normalized_data_asset_name, expectation_suite_name, batch_kwargs)
batch.head()
# ## 5. Get a pipeline run id
#
# Generate a run id, a timestamp, or a meaningful string that will help you refer to validation results. We recommend they be chronologically sortable.
# [Read more in the tutorial](https://docs.greatexpectations.io/en/latest/getting_started/pipeline_integration.html?utm_source=notebook&utm_medium=validate_data#set-a-run-id)
# Let's make a simple sortable timestamp. Note this could come from your pipeline runner.
run_id = datetime.utcnow().isoformat().replace(":", "") + "Z"
run_id
# ## 6. Validate the batch
#
# This is the "workhorse" of Great Expectations. Call it in your pipeline code after loading data and just before passing it to your computation.
#
# [Read more about the validate method in the tutorial](https://docs.greatexpectations.io/en/latest/tutorials/validate_data.html?utm_source=notebook&utm_medium=validate_data#validate-the-batch)
#
# +
validation_result = batch.validate(run_id=run_id)
if validation_result["success"]:
print("This data meets all expectations for {}".format(str(data_asset_name)))
else:
print("This data is not a valid batch of {}".format(str(data_asset_name)))
# -
# ## 6.a. OPTIONAL: Review the JSON validation results
#
# Don't worry - this blob of JSON is meant for machines. Continue on or skip this to see this in Data Docs!
# +
# print(json.dumps(validation_result, indent=4))
# -
# ## 7. Validation Operators
#
# The `validate` method evaluates one batch of data against one expectation suite and returns a dictionary of validation results. This is sufficient when you explore your data and get to know Great Expectations.
# When deploying Great Expectations in a **real data pipeline, you will typically discover additional needs**:
#
# * validating a group of batches that are logically related
# * validating a batch against several expectation suites such as using a tiered pattern like `warning` and `failure`
# * doing something with the validation results (e.g., saving them for a later review, sending notifications in case of failures, etc.).
#
# `Validation Operators` provide a convenient abstraction for both bundling the validation of multiple expectation suites and the actions that should be taken after the validation.
#
# [Read more about Validation Operators in the tutorial](https://docs.greatexpectations.io/en/latest/tutorials/validate_data.html?utm_source=notebook&utm_medium=validate_data#save-validation-results)
# +
# This is an example of invoking a validation operator that is configured by default in the great_expectations.yml file
results = context.run_validation_operator(
assets_to_validate=[batch],
run_id=run_id,
validation_operator_name="action_list_operator",
)
# -
# ## 8. View the Validation Results in Data Docs
#
# Let's now build and look at your Data Docs. These will now include an **data quality report** built from the `ValidationResults` you just created that helps you communicate about your data with both machines and humans.
#
# [Read more about Data Docs in the tutorial](https://docs.greatexpectations.io/en/latest/tutorials/validate_data.html?utm_source=notebook&utm_medium=validate_data#view-the-validation-results-in-data-docs)
context.open_data_docs()
# ## Congratulations! You ran Validations!
#
# ## Next steps:
#
# ### 1. Author more interesting Expectations
#
# Here we used some **extremely basic** `Expectations`. To really harness the power of Great Expectations you can author much more interesting and specific `Expectations` to protect your data pipelines and defeat pipeline debt. Go to [create_expectations.ipynb](create_expectations.ipynb) to see how!
#
# ### 2. Explore the documentation & community
#
# You are now among the elite data professionals who know how to build robust descriptions of your data and protections for pipelines and machine learning models. Join the [**Great Expectations Slack Channel**](https://greatexpectations.io/slack) to see how others are wielding these superpowers.
| great_expectations/init_notebooks/spark/validation_playground.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/jmestradag/thefts_and_arrests_Colombia/blob/main/Thefts_arrests_analysis_Colombia.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="jpHek_xrhhOV"
# #Descriptive exploratory data analysis for thefts and arrests data in Colombia from 2010 to 2020
#
#
# + [markdown] id="SyXq5UAkJJkC"
# ## Introduction
#
# Crime with most impact in Colombia is theft, as per National Police database, being the most frequent crime, it helps to explain security perception within citizens and general population. Looking into social media, it looks like that there is a generallized concern about this specific crime now during reactivation period after 5 months of permanent quarantine and lockdown due to coronavirus pandemic disease.
#
# This both facts explains why this descriptive analysis is focused on this crime instead of others that might be more outrageous and, of course, because there is more data publicly available to practice data science skills with it.
#
# This project tries to analyse official data in order to either reject or confirm if citizens perception is based on data reported by themselves to National Police.
# + [markdown] id="hVcW7_PYNR7M"
# ### Questions for project:
#
# 1. Which areas (states/cities) are top 5 most impacted by thefts? Is there any coincidence with areas having most unsatisfied basic needs or poverty levels? Or is inequality in main cities affecting thefts levels?
#
# 2. How we can compare thefts levels on these areas based on differences in their population distribution? Is there a metric for comparison against foreign cities around the world?
#
# 3. Is there a trend in data that can be used to answer a deteriorating security perception within citizens?
#
# 4. How efficiently have performed National Police during analysis period to answer security perception of citizens regarding thefts?
#
# 5. How is recidivism and overcrowding levels in Colombia's prisons are affecting security perception on citizens?
# + [markdown] id="IXC-GkAEztW_"
# #### Author: <EMAIL>
# + [markdown] id="wNeruorEfxa4"
# ### Data Sources:
#
# [Crime Database - National Police](
# https://www.policia.gov.co/grupo-informaci%C3%B3n-criminalidad/estadistica-delictiva)
#
# [Population distribution for Colombia - DANE](https://www.dane.gov.co/files/censo2018/informacion-tecnica/CNPV-2018-Poblacion-Ajustada-por-Cobertura.xls)
#
# [Unsatisfied Basic Needs data in Colombia - DANE](https://www.dane.gov.co/files/censo2018/informacion-tecnica/CNPV-2018-NBI.xlsx)
#
# [Prison population dashboard - INPEC](http://192.168.127.12:8080/jasperserver-pro/flow.html?_flowId=dashboardRuntimeFlow&dashboardResource=/public/DEV/dashboards/Dash__Poblacion_Intramural&j_username=inpec_user&j_password=<PASSWORD>)
#
# [Stats dashboards - INPEC](https://www.inpec.gov.co/estadisticas-/tableros-estadisticos)
# + [markdown] id="X7TDyWhQJS9F"
#
#
# <div class="alert alert-block alert-info" style="margin-top: 20px">
# <img src="https://upload.wikimedia.org/wikipedia/commons/a/a1/Escudo_Polic%C3%ADa_Nacional_de_Colombia.jpg" width="250" align="center">
# </a>
# <img src="https://seeklogo.com/images/C/colombia-dane-logo-CEC894F9A8-seeklogo.com.png" width="250" align="center">
# </a>
# <img src="https://www.inpec.gov.co/documents/20143/46642/3110245.PNG/751d7e05-ee7e-b529-0f64-1b934ea9e114?t=1511790311995" width="250" align="center">
# </a>
#
# </div>
#
# + [markdown] id="j8vmOaAHG_Q3"
# DANE is the institution in charge of handling statistics in Colombia.
#
# INPEC is the institution in charge of managing prisons and inmates in Colombia.
# + colab={"base_uri": "https://localhost:8080/"} id="majZ04O99klg" outputId="edc4c665-6c92-4acf-fc00-6c1313d32418"
#This is just for Colab on drive ....
from google.colab import drive
drive.mount('/content/drive')
# + id="7mKproMg-eDe"
# Disable warnings in Anaconda
import warnings
warnings.filterwarnings('ignore')
#Libraries for analysis
import pandas as pd
import numpy as np
#Plotting libraries
import plotly.express as px
import plotly.graph_objs as go
# + [markdown] id="BRgtoUjrhZ0Q"
# ## Data capture
# + [markdown] id="gGY1yf7PsIdg"
# ### Theft Data
# + id="ERLrW8p3_VUg"
#Create a new dataframe for reading thefts details from 2010 - 2020 upto Nov 30th in data source from police
df = pd.read_csv('/content/drive/MyDrive/Colab Notebooks/Projects/hurtos_personas_2010_2020/hurtos_a_personas_2010_2020.csv')
#df
# + colab={"base_uri": "https://localhost:8080/", "height": 195} id="J3x0oTUzaQYL" outputId="2fcc1681-0b78-41c0-abca-9bbe64b8da39"
#Gather population for states in order to get rates and properly compare them instead of using raw data
pop_states = pd.read_excel('/content/drive/MyDrive/Colab Notebooks/Projects/hurtos_personas_2010_2020/CNPV-2018-Poblacion-Ajustada-por-Cobertura.xlsm',
sheet_name='Ajuste por Cobertura CNPV 2018')
#data cleaning
pop_states.drop(columns=['Código DIVIPOLA', 'CABECERA', 'CENTROS POBLADOS Y RURAL DISPERSO'], inplace=True)
pop_states.rename(columns={'NOMBRE DEPARTAMENTO':'DEPARTAMENTO', 'TOTAL':'TOTAL'}, inplace=True)
pop_states.DEPARTAMENTO = pop_states.DEPARTAMENTO.str.upper()
pop_states.sort_values(by='TOTAL', ascending=False, inplace=True)
pop_states.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 402} id="p2x9kPnidvBd" outputId="148e6ccd-2c6b-4b70-c93f-d552c2d11534"
#Gather population for cities in order to get rates and properly compare them instead of using raw data
pop_cities = pd.read_excel('/content/drive/MyDrive/Colab Notebooks/Projects/hurtos_personas_2010_2020/CNPV-2018-Poblacion-Ajustada-por-Cobertura.xlsm',
sheet_name='Ajuste por Cobertura CNPV Mpios')
#data cleaning
pop_cities.drop(columns=['Código DIVIPOLA', 'CABECERA', 'CENTROS POBLADOS Y RURAL DISPERSO'], inplace=True)
pop_cities.rename(columns={'NOMBRE DEPARTAMENTO':'DEPARTAMENTO', 'NOMBRE MUNICIPIO':'MUNICIPIO','TOTAL':'TOTAL'}, inplace=True)
pop_cities.DEPARTAMENTO = pop_cities.DEPARTAMENTO.str.upper()
pop_cities.MUNICIPIO = pop_cities.MUNICIPIO.str.upper()
pop_cities.sort_values(by='TOTAL', ascending=False, inplace=True)
pop_cities.head(12)
# + [markdown] id="YsOmJu1MTu2x"
# The most populated states are driven by its main capital cities as can be seen on last 2 dataframes.
#
# Most population distribution is focused on cities (urban) instead of towns or counties (rural).
#
# Top 5 most populated cities in Colombia are:
#
# 1. Bogota
# 2. Medellin
# 3. Cali
# 4. Barranquilla
# 5. Cartagena
#
# These data is based on 2018 census from DANE which is the latest official stat about population.
# + id="nOIRrkbgPaWy"
#Gather population for states in order to get rates and properly compare them instead of using raw data
ubn_states = pd.read_excel('/content/drive/MyDrive/Colab Notebooks/Projects/hurtos_personas_2010_2020/CNPV-2018-NBI.xlsx',
sheet_name='Departamento')
#ubn_states.head()
# + id="p7F4J_uxPZ7J"
#Gather population for cities in order to get rates and properly compare them instead of using raw data
ubn_cities = pd.read_excel('/content/drive/MyDrive/Colab Notebooks/Projects/hurtos_personas_2010_2020/CNPV-2018-NBI.xlsx',
sheet_name='Municipios')
#ubn_cities.head()
# + [markdown] id="IwCVTQXBr-u_"
# ### Arrests data
# + id="pxRHLcHj-f0K"
#Create a new dataframe for reading arrests details from 2011 - 2010 not included in original data source from police
df2 = pd.read_excel('/content/drive/MyDrive/Colab Notebooks/Projects/hurtos_personas_2010_2020/capturas_2011.xlsx',
skiprows=10, nrows=127624)
#df2
# + id="U6BN4maTDMTC"
#Create a new dataframe for reading arrests details from 2012
df3 = pd.read_excel('/content/drive/MyDrive/Colab Notebooks/Projects/hurtos_personas_2010_2020/capturas_2012.xlsx',
skiprows=10, nrows=143860)
#df3
# + id="9R7dcR-pEaXZ"
#Create a new dataframe for reading arrests details from 2013
df4 = pd.read_excel('/content/drive/MyDrive/Colab Notebooks/Projects/hurtos_personas_2010_2020/capturas_2013.xlsx',
skiprows=10, nrows=151160)
#df4
# + id="mMmSs3EXFGr4"
#Create a new dataframe for reading arrests details from 2014
df5 = pd.read_excel('/content/drive/MyDrive/Colab Notebooks/Projects/hurtos_personas_2010_2020/capturas_2014.xlsx',
skiprows=10, nrows=149313)
#df5
# + id="IzBQ78s8FmlL"
#Create a new dataframe for reading arrests details from 2015
df6 = pd.read_excel('/content/drive/MyDrive/Colab Notebooks/Projects/hurtos_personas_2010_2020/capturas_2015.xlsx',
skiprows=10, nrows=148721)
#df6
# + id="4QMm98CKGa9l"
#Create a new dataframe for reading arrests details from 2016
df7 = pd.read_excel('/content/drive/MyDrive/Colab Notebooks/Projects/hurtos_personas_2010_2020/capturas_2016.xlsx',
skiprows=10, nrows=143952)
#df7
# + id="s3ZWKZA6GyiE"
#Create a new dataframe for reading arrests details from 2017
df8 = pd.read_excel('/content/drive/MyDrive/Colab Notebooks/Projects/hurtos_personas_2010_2020/capturas_2017.xlsx',
skiprows=10, nrows=143418)
#df8
# + id="Oj_EWGYCHPl8"
#Create a new dataframe for reading arrests details from 2018
df9 = pd.read_excel('/content/drive/MyDrive/Colab Notebooks/Projects/hurtos_personas_2010_2020/capturas_2018.xlsx',
skiprows=10, nrows=139293)
#df9
# + id="EBU7dot2IAbP"
#Create a new dataframe for reading arrests details from 2019
df10 = pd.read_excel('/content/drive/MyDrive/Colab Notebooks/Projects/hurtos_personas_2010_2020/capturas_2019.xlsx',
skiprows=10, nrows=232682)
#df10
# + id="2C3KxAduIbPP"
#Create a new dataframe for reading arrests details from 2020 - upto Nov 30th
df11 = pd.read_excel('/content/drive/MyDrive/Colab Notebooks/Projects/hurtos_personas_2010_2020/capturas_2020.xlsx',
skiprows=9, nrows=99475)
#df11
# + colab={"base_uri": "https://localhost:8080/", "height": 402} id="ZbceMA-CJGPE" outputId="fd580fc0-175c-4a2b-9029-511584b8358d"
#Create one single dataframe with arrests details from 2011 through 2020. ---> 2010 not included in National Police Database ***
#dataframes list
data_frames = [df2, df3, df4, df5, df6, df7, df8, df9, df10, df11]
# Define column names
colNames = ('DEPARTAMENTO', 'MUNICIPIO', 'CODIGO DANE', 'FECHA', 'GENERO', 'DELITO', 'EDAD', 'CANTIDAD')
# Define a dataframe with the required column names
arrests = pd.DataFrame(columns = colNames)
#for loop to get one dataframe
for dfx in data_frames:
arrests = arrests.append(dfx)
arrests
# + [markdown] id="23oK_a7GHt2X"
# This dataframe contains all of the arrests performed by National Police with multiple crimes not just due to thefts. There are almost 1.5 million cases recorded during 2011 - 2020 period.
# + [markdown] id="rzgOwYH000Aj"
# ### Prison data
# + colab={"base_uri": "https://localhost:8080/", "height": 195} id="FzHFdako07VD" outputId="060c53eb-ad82-42c3-912a-ebd491b74807"
#Create a new dataframe for reading recidivism totals from 2016 - 2020. Other periods not included in original data source from INPEC
rcdv = pd.read_excel('/content/drive/MyDrive/Colab Notebooks/Projects/hurtos_personas_2010_2020/reincidencia_total.xlsx')
rcdv
# + colab={"base_uri": "https://localhost:8080/", "height": 166} id="FlB_p_f61shO" outputId="8b9fc3ee-7f80-4b23-936c-b5af9b67e115"
#Create a new dataframe for detention months of unconvicted inmates from 2017 - 2020. Other periods not included in original data source from INPEC
dmu = pd.read_excel('/content/drive/MyDrive/Colab Notebooks/Projects/hurtos_personas_2010_2020/meses_detencion_sindicados.xlsx')
dmu
# + colab={"base_uri": "https://localhost:8080/", "height": 166} id="wq8T2yFY23-j" outputId="202cce83-00c6-4c55-9bdc-84c4125202df"
#Create a new dataframe for detention years of convicted inmates from 2017 - 2020. Other periods not included in original data source from INPEC
dyc = pd.read_excel('/content/drive/MyDrive/Colab Notebooks/Projects/hurtos_personas_2010_2020/annos_prision_condenados.xlsx')
dyc
# + colab={"base_uri": "https://localhost:8080/", "height": 254} id="V2FdDK7v3YXV" outputId="c6cae174-1c97-49d3-a7c5-57136b10d83e"
#Create a new dataframe for capacity and total prison population from 2014 - 2020. Other periods not included in original data source from INPEC
tpp = pd.read_excel('/content/drive/MyDrive/Colab Notebooks/Projects/hurtos_personas_2010_2020/consolidadoSituacionJuridica.xlsx')
tpp
# + [markdown] id="0u6J_fv9sYf9"
# ## Data Wrangling - Country level
# + [markdown] id="USh87muRsfUo"
# ###Theft raw data
#
# These analyses and plots are for raw aggregated data without adjusting for population levels.
# + id="7fyHjhluLYA_"
#change format to DATE column in order to convert later to datetime format
df["fecha"] = pd.to_datetime(df["FECHA HECHO"]).dt.strftime('%Y/%m/%d')
# To convert a string to a datetime
df["DateTime"] = pd.to_datetime(df["fecha"], format="%Y/%m/%d")
# Set index as DateTime for plotting purposes
df = df.set_index(["DateTime"])
# To get month and year column
df["month"] = pd.DatetimeIndex(df["fecha"]).month
df["year"] = pd.DatetimeIndex(df["fecha"]).year
df['month_year'] = pd.to_datetime(df["fecha"]).dt.to_period('M')
df["day"] = pd.to_datetime(df["fecha"]).dt.weekday
#df.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 542} id="Tiir8iUyemK3" outputId="e6ecad7b-d14f-483b-96d6-81e090a3616d"
#Creating a pivot table for each year to get a close up on values per year
df_pivot_year = df.groupby(["year"])["CANTIDAD"].sum().reset_index().sort_values(by="year")
px.line(x="year" , y="CANTIDAD", data_frame=df_pivot_year, title="Thefts per Year in Colombia - Jan 01st 2010 upto Nov 30th 2020")
# + [markdown] id="yp_xXarkJCK3"
# #### Thefts per year finding:
#
# There is an upward trend in thefts at country level since 2010 upto 2019; but there is a change in slope after 2015.
#
# 2020 was an atypical year due to lockdown.
# + colab={"base_uri": "https://localhost:8080/", "height": 542} id="UI21lkbiOLqg" outputId="9c4ded69-2a23-4b5a-ca23-12355b33d038"
#cleaning of some cells from armas medios column to uniform data values from it for ease of plotting and analysis based on synomyms
df["ARMAS MEDIOS"].replace('ARMA BLANCA / CORTOPUNZANTE', 'ARMAS BLANCAS', inplace=True) #same category, different name
df["ARMAS MEDIOS"].replace('CORTANTES', 'ARMAS BLANCAS', inplace=True) #same category, different name
df["ARMAS MEDIOS"].replace('NO REPORTADO', 'SIN EMPLEO DE ARMAS', inplace=True) #same category, different name
df["ARMAS MEDIOS"].replace('-', 'SIN EMPLEO DE ARMAS', inplace=True) #same category, different name
df["ARMAS MEDIOS"].replace('CORTOPUNZANTES', 'ARMAS BLANCAS', inplace=True) #same category, different name
df["ARMAS MEDIOS"].replace('PUNZANTES', 'ARMAS BLANCAS', inplace=True) #same category, different name
df["ARMAS MEDIOS"].replace('NO REGISTRA', 'SIN EMPLEO DE ARMAS', inplace=True) #same category, different name
df["ARMAS MEDIOS"].replace('PALANCAS', 'CONTUNDENTES', inplace=True) #same category, different name
df["ARMAS MEDIOS"].replace('SUSTANCIAS TOXICAS', 'JERINGA', inplace=True) #same category, different name
df["ARMAS MEDIOS"].replace('DIRECTA', 'LLAVE MAESTRA', inplace=True) #same category, different name
df["ARMAS MEDIOS"].replace('ESCOPOLAMINA', 'ALUCINOGENOS', inplace=True) #same category, different name
#Creating a pivot table for each year to get a close up on values per year
df_pivot_weapons = df.groupby(["ARMAS MEDIOS"])["CANTIDAD"].sum().reset_index().sort_values(by="CANTIDAD", ascending=False)
fig = px.bar(df_pivot_weapons, x='ARMAS MEDIOS', y='CANTIDAD', title='Weapons used during robbery in Colombia - Jan 01st 2010 upto Nov 30th 2020')
fig.show()
# + id="WROajMQCTGWv"
#df_pivot_weapons.sort_values(by="CANTIDAD", ascending=False)
# + [markdown] id="2D2nI1yAKMtp"
# #### Weapons used finding:
#
# Almost 50% of thefts are made without weapons (non violent thefts), the remaining 50% are violent with risk of harm or life threat.
# + colab={"base_uri": "https://localhost:8080/", "height": 542} id="vGSTnlabV9Wr" outputId="c1844283-acea-43d7-fbad-5a48180f7f02"
#cleaning of some cells from armas medios column to uniform data values from it for ease of plotting and analysis based on synomyms
df["GENERO"].replace('NO REGISTRA', 'NO REPORTA', inplace=True) #same category, different name
df["GENERO"].replace('NO REPORTADO', 'NO REPORTA', inplace=True) #same category, different name
#Creating a pivot table for each year to get a close up on values per year
df_pivot_gender = df.groupby(["GENERO"])["CANTIDAD"].sum().reset_index().sort_values(by="GENERO")
fig = px.bar(df_pivot_gender, x='GENERO', y='CANTIDAD', title='Gender most impacted by robbery in Colombia - Jan 01st 2010 upto Nov 30th 2020')
fig.show()
# + id="SRfxJkddYFFv"
#df_pivot_gender.sort_values(by="CANTIDAD", ascending=False)
# + [markdown] id="iQZjFcgRLZ-m"
# #### Gender assaulted finding:
#
# 60% of thefts are made to males and 40% to females. It is curious, since most common belief is that females are more prone to be assaulted in robbery.
# + colab={"base_uri": "https://localhost:8080/", "height": 343} id="ItmysBt2gKPe" outputId="44f80d27-cdb7-4c12-87ee-ec082f05f5cd"
#checking which 'departamentos' (states) are more prone to theft and robbery
df_pivot_states = df.groupby(["DEPARTAMENTO"])["CANTIDAD"].sum().reset_index().sort_values(by="CANTIDAD", ascending=False)
df_pivot_states.head(10)
# + id="QLpleeNhgaGJ" colab={"base_uri": "https://localhost:8080/", "height": 542} outputId="6180b0dc-f296-4b2c-dd47-a0ae11288bb4"
#Graph for top 5 of states in Colombia most impacted by thefts
fig = px.bar(df_pivot_states.head(), x='DEPARTAMENTO', y='CANTIDAD', title='Top 5 of Colombian states most impacted by thefts levels')
fig.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 343} id="kMh1mUErfBHp" outputId="bebf7761-1060-4608-ca8c-3874b2db4183"
#checking which 'municipios' (cities) are more prone to theft and robbery
df_pivot_cities = df.groupby(["MUNICIPIO"])["CANTIDAD"].sum().reset_index().sort_values(by="CANTIDAD", ascending=False)
df_pivot_cities.MUNICIPIO = df_pivot_cities.MUNICIPIO.str.rstrip(' (CT)')
df_pivot_cities.head(10)
# + colab={"base_uri": "https://localhost:8080/", "height": 542} id="Igs54YbYRAxR" outputId="264dc815-41c1-45ac-a950-85aac068f5c8"
#Graph for top 5 of cities in Colombia most impacted by thefts
fig = px.bar(df_pivot_cities.head(), x='MUNICIPIO', y='CANTIDAD', title='Top 5 of Colombian cities most impacted by thefts levels')
fig.show()
# + [markdown] id="qTqfstcGIk_l"
# #### Top 5 areas impacted by theft finding:
#
# The most affected states are driven by its main capital cities crimes as can be seen. Most thefts / robbery distribution is focused on cities instead of towns or counties.
#
# Top 5 most impacted cities in Colombia by thefts are:
#
# 1. Bogota
# 2. Cali
# 3. Medellin
# 4. Barranquilla
# 5. Bucaramanga
#
# Cali is 3rd biggest city after Medellin, even though managed to get in top 2 of theft levels.
#
# These data is based on 2018 census from DANE which is the latest official stat about population. DANE is the institution in charge of handling statistics in Colombia.
# + colab={"base_uri": "https://localhost:8080/", "height": 343} id="1ocoqafgQ55l" outputId="5732908a-f912-4e91-f0dd-8293f6324d9e"
#checking which 'departamentos' (states) have more unsatisfied basic needs or poverty
ubn_states = ubn_states.reset_index().sort_values(by="Prop de Personas en NBI (%)", ascending=False)
ubn_states = ubn_states.drop(columns=['Componente vivienda', 'Componente Servicios','Componente Hacinamiento',
'Componente Inasistencia', 'Componente dependencia económica', 'Código Departamento', 'index'])
ubn_states.head(10)
# + colab={"base_uri": "https://localhost:8080/", "height": 343} id="pMd6SidwQ5lK" outputId="599a1324-05f5-4470-ce6d-cfec1d76c09e"
#checking which 'municipios' (cities and towns) have more unsatisfied basic needs or poverty
ubn_cities = ubn_cities.reset_index().sort_values(by="Prop de Personas en NBI (%)", ascending=False)
ubn_cities = ubn_cities.drop(columns=['Componente vivienda', 'Componente Servicios','Componente Hacinamiento',
'Componente Inasistencia', 'Componente dependencia económica', 'Código Departamento',
'index', 'Código Municipio'])
ubn_cities.head(10)
# + [markdown] id="hVCHzKRMMXxL"
# #### Unsatisfied Basic Needs (UBN) finding:
#
# This data is taken from DANE census in 2018, most recent stat about poverty in Colombia. NBI is UBN in spanish.
#
# Since this data is a snapshot, not a time series like the rest of data, we can not make correlations; but at simple sight from 32 states (Departamentos) none of top 10 states in UBN are not in none of top 10 most affected states by thefts.
#
# This finding might suggest that generallized poverty is not a driver to commit this crime. By the way, these high UBN areas are far away from main cities where crime levels are high.
#
# Also, in cities, compared to towns, there are much less levels of UBN but still there are poor and rich neighborhoods. There are no official data as snapshot nor as time series for digging further in order to confirm or reject hypothesis of inequality being a driver for thefts.
# + [markdown] id="aTYQKve-l2pm"
# ###Theft rate data
#
# States theft's levels main drivers are its capital cities as can be seen on plots above. Meaning that small towns are not big contributors to these crime levels.
#
# These analyses and plots are for population adjusted data with thefts levels per 100,000 habitants to ease comparison but bearing in mind that it is useful just for cities with more than 500,000 habitants in order to avoid small towns affect stats.
#
# This rate data is the official indicator for comparison purposes against internal and external / foreign cities across the world.
# + colab={"base_uri": "https://localhost:8080/", "height": 343} id="z-KozRQ4mCgJ" outputId="f80d6619-7aaf-4c5a-dc90-c20e13f9d1df"
#merging 2 dataframes for adjusting rate per 100,000 habitants in order to eliminate population noise from raw data
cities_data = pd.merge(df_pivot_cities, pop_cities, left_on='MUNICIPIO', right_on='MUNICIPIO', how='inner' )
cities_data['rate'] = round((cities_data['CANTIDAD']/cities_data['TOTAL'])*100000, 2)
cities_data.sort_values(by='rate', ascending=False, inplace=True)
#filter cities with more than 500,000 habitants in order to avoid false positives rates due to small towns included in MUNICIPIOS list
cities_data = cities_data[cities_data['TOTAL']>500000]
cities_data.head(10)
# + colab={"base_uri": "https://localhost:8080/", "height": 542} id="laQYENDs9VCr" outputId="8c677f60-63c7-4ee9-c82e-43c382437472"
#Graph for top 5 of states in Colombia most impacted by thefts
fig = px.bar(cities_data.head(), x='MUNICIPIO', y='rate', title='Top 5 of Colombian cities with higher rate of thefts levels per 100,000 hab')
fig.show()
# + [markdown] id="BsbFWC3-P3M5"
# #### Theft rate finding:
#
# Bogota is still number 1 in both top 5's, rated and unrated theft levels.
#
# Barranquilla, being 4th biggest city in population is in both top 5's.
#
# Bucaramanga, which has around half of Barranquilla's population has a higher theft rate. Bucaramanga, Villavicencio and Ibague are considered mid size cities in Colombia and are 9th, 10th, 11th in population size as per latest census data in 2018.
# + colab={"base_uri": "https://localhost:8080/", "height": 343} id="AEf83jzsmCXJ" outputId="8fca9f22-a447-4a03-f4c3-3d04599f031a"
#merging 2 dataframes for adjusting rate per 100,000 habitants in order to eliminate population noise from raw data
states_data = pd.merge(df_pivot_states, pop_states, left_on='DEPARTAMENTO', right_on='DEPARTAMENTO', how='inner' )
states_data['rate'] = round((states_data['CANTIDAD']/states_data['TOTAL'])*100000, 2)
states_data.sort_values(by='rate', ascending=False, inplace=True)
#filter cities with more than 500,000 habitants in order to avoid false positives rates due to small states included in DEPARTAMENTOS list
states_data = states_data[states_data['TOTAL']>500000]
states_data.head(10)
# + colab={"base_uri": "https://localhost:8080/", "height": 542} id="IuB-hGhN9Xpi" outputId="cc933ae8-8cf0-47f9-ee51-83fcef29626c"
#Graph for top 5 of states in Colombia most impacted by thefts
fig = px.bar(states_data.head(), x='DEPARTAMENTO', y='rate', title='Top 5 of Colombian states with higher rate of thefts levels per 100,000 hab')
fig.show()
# + [markdown] id="38aSTqNJsjQr"
# ### Arrests data
# + id="iQx1PnPFaKWm" colab={"base_uri": "https://localhost:8080/", "height": 443} outputId="e8b6ee28-99be-4ef8-9391-1463c336f74c"
#change format to DATE column in order to convert later to datetime format
arrests["fecha"] = pd.to_datetime(arrests["FECHA"]).dt.strftime('%Y/%m/%d')
# To convert a string to a datetime
arrests["DateTime"] = pd.to_datetime(arrests["fecha"], format="%Y/%m/%d")
# Set index as DateTime for plotting purposes
arrests = arrests.set_index(["DateTime"])
# To get month and year column
arrests["month"] = pd.DatetimeIndex(arrests["fecha"]).month
arrests["year"] = pd.DatetimeIndex(arrests["fecha"]).year
arrests['month_year'] = pd.to_datetime(arrests["fecha"]).dt.to_period('M')
arrests["day"] = pd.to_datetime(arrests["fecha"]).dt.weekday
arrests.head()
# + id="12LItvvjslS2" colab={"base_uri": "https://localhost:8080/", "height": 542} outputId="948d49e4-cc19-43ef-b06a-e517e91ac7f0"
#Creating a pivot table for each year to get a close up on values per year
arrests_pivot_year = arrests.groupby(["DELITO"])["CANTIDAD"].sum().reset_index().sort_values(by="CANTIDAD", ascending=False).head()
fig = px.bar(arrests_pivot_year, x='DELITO', y='CANTIDAD', title='Top 10 arrests per crimes in Colombia - Jan 01st 2010 upto Nov 30th 2020')
fig.show()
# + id="S85m07hMaJrS" colab={"base_uri": "https://localhost:8080/", "height": 542} outputId="75f255d9-b70a-44af-88fa-1e7a1ec11bdc"
#Creating a pivot table for each year to get a close up on values per year
arrests_pivot_year = arrests.groupby(["year"])["CANTIDAD"].sum().reset_index().sort_values(by="year")
px.line(x="year" , y="CANTIDAD", data_frame=arrests_pivot_year, title="Arrests per Year in Colombia due to multiple crimes - Jan 01st 2010 upto Nov 30th 2020")
# + colab={"base_uri": "https://localhost:8080/", "height": 542} id="3eJDKk-qApHK" outputId="9d0786a9-be4d-4646-b6f4-4f867747d79c"
#Creating a pivot table for each year to get a close up on values per year of arrests per crime type
arrests_pivot_year2 = arrests.groupby(["year", "DELITO"])["CANTIDAD"].sum().reset_index().sort_values(by="year")
px.line(x="year" , y="CANTIDAD", line_group="DELITO", data_frame=arrests_pivot_year2, title="Arrests per crime and year in Colombia - Jan 01st 2010 upto Nov 30th 2020")
# + [markdown] id="4R9p6fw7Tg0e"
# #### Arrest per crime finding:
#
# Colombia being one of main producers of illicit drugs worldwide, its police is focused on this crime and the war on drugs (ARTÍCULO 376. TRÁFICO, FABRICACIÓN O PORTE DE ESTUPEFACIENTES). Thus, no surprise.
#
# Second most important arrest per crime category is theft (ARTICULO 239, HURTO PERSONAS). This helps to explain the importance of this analysis.
# + colab={"base_uri": "https://localhost:8080/", "height": 343} id="8sibDk15Ao_8" outputId="cb8881e4-108b-4a5f-dde7-ed57e48badc9"
arrests_pivot_year3 = arrests_pivot_year2[arrests_pivot_year2['DELITO']=='ARTÍCULO 239. HURTO PERSONAS']
arrests_pivot_year3.rename(columns={'CANTIDAD':'TOTAL'}, inplace=True)
arrests_pivot_year3
# + colab={"base_uri": "https://localhost:8080/", "height": 542} id="QAhKgsCuAo1h" outputId="b41151c9-0a3b-4eda-ebd1-5ad201edc291"
#Creating a pivot table for each year to get a close up on values per year of arrests per crime type
px.line(x="year" , y="TOTAL", data_frame=arrests_pivot_year3, title="Arrests per year in Colombia due to theft / robbery - Jan 01st 2010 upto Nov 30th 2020")
# + [markdown] id="-J7BdgyaV2xW"
# #### Arrests due to theft finding:
#
# There is a downward trend in arrests due to theft category since 2014.
#
# 2020 is an atypical year due to lockdown but previous trend is worrisome.
# + colab={"base_uri": "https://localhost:8080/", "height": 343} id="yEiK3yMGAorn" outputId="a435f326-4fa8-4e8c-d893-528bdfe06a30"
#merging 2 dataframes for getting % of arrest per theft crime under analysis
eff = pd.merge(df_pivot_year, arrests_pivot_year3, left_on='year', right_on='year', how='inner' )
eff['%arrests'] = round(eff['TOTAL']/eff['CANTIDAD']*100, 2)
eff
# + id="kXN71KLTIV1s"
#df.describe()
# + colab={"base_uri": "https://localhost:8080/", "height": 542} id="IW-OLcGnGAXh" outputId="0c6319c8-5715-4015-d8c7-9821e6c73e90"
#Creating a pivot table for each year to get a close up on values per year of arrests per crime type
px.line(x="year" , y="%arrests", data_frame=eff, title="% Arrests per year in Colombia on theft / robbery - Jan 01st 2010 upto Nov 30th 2020")
# + [markdown] id="ESPm9IdtWf8N"
# #### % efficiency of theft arrest finding:
#
# Efficiency of police regarding theft category is decreasing since 2011. Interesting finding that might help to support why security perception within citizens is worsening.
# + [markdown] id="ETtD83h_8A-K"
# ### Prison data
# + colab={"base_uri": "https://localhost:8080/", "height": 542} id="I5L7nz9p7_oi" outputId="1188826c-08f2-427f-ce86-b74365094338"
#Plotting recidivism level through period of analysis with data available from INPEC
px.line(x="Periodo" , y="Reincidencia", data_frame=rcdv, title="Recidivism in Colombia - Jan 01st 2010 upto Nov 30th 2020")
# + [markdown] id="-_RtnP7AXVcY"
# #### Recidivism finding:
#
# There is an upward trend in recidivism since 2016 upto 2020 bearing in mind that in 2020 arrests decreased due to lockdown.
#
# Interesting finding, as time goes by, felons are more inclided to re-offend after being released of prison.
# + colab={"base_uri": "https://localhost:8080/", "height": 542} id="qY1u2iIW9iS_" outputId="9a26add9-c170-43c8-93e2-126708f47661"
#Plotting detention months of unconvicted inmates through period of analysis with data available from INPEC
fig = go.Figure()
# Create and style traces
fig.add_trace(go.Scatter(x=dmu['Periodo'], y=dmu['0-5'], name='0-5 months'))
fig.add_trace(go.Scatter(x=dmu['Periodo'], y=dmu['6-10'], name='6-10 months'))
fig.add_trace(go.Scatter(x=dmu['Periodo'], y=dmu['11-15'], name = '11-15 months'))
fig.add_trace(go.Scatter(x=dmu['Periodo'], y=dmu['16-20'],name='16-20 months'))
fig.add_trace(go.Scatter(x=dmu['Periodo'], y=dmu['21-25'], name='21-25 months'))
fig.add_trace(go.Scatter(x=dmu['Periodo'], y=dmu['26-30'], name='26-30 months'))
fig.add_trace(go.Scatter(x=dmu['Periodo'], y=dmu['31-35'], name='31-35 months'))
fig.add_trace(go.Scatter(x=dmu['Periodo'], y=dmu['>36'], name='>36 months'))
# Edit the layout
title_string = 'Time Series per year of detention months of unconvicted inmates in Colombia'
fig.update_layout(title=title_string, xaxis_title='Year', yaxis_title='# Population')
fig.update_traces(mode='lines')
fig.show()
# + [markdown] id="_kldx_wPYH7n"
# #### Detention months of unconvicted inmates finding:
#
# A sharp decrease in detention months of unconvicted inmates since 2018 within range of 0-5 and 6-10 months with no increase in other time range.
#
# This finding means a higher turn over in prison systems or an increase in speed of judicial systems for inmate conviction process.
# + colab={"base_uri": "https://localhost:8080/", "height": 542} id="P2KJVTNEA5B7" outputId="18619476-75a7-4b46-c41a-84b44e516cf0"
#Plotting detention years of convicted inmates through period of analysis with data available from INPEC
fig = go.Figure()
# Create and style traces
fig.add_trace(go.Scatter(x=dyc['Periodo'], y=dyc['0-5'], name='0-5 years'))
fig.add_trace(go.Scatter(x=dyc['Periodo'], y=dyc['6-10'], name='6-10 years'))
fig.add_trace(go.Scatter(x=dyc['Periodo'], y=dyc['11-15'], name = '11-15 years'))
fig.add_trace(go.Scatter(x=dyc['Periodo'], y=dyc['16-20'],name='16-20 years'))
fig.add_trace(go.Scatter(x=dyc['Periodo'], y=dyc['21-25'], name='21-25 years'))
fig.add_trace(go.Scatter(x=dyc['Periodo'], y=dyc['26-30'], name='26-30 years'))
fig.add_trace(go.Scatter(x=dyc['Periodo'], y=dyc['31-35'], name='31-35 years'))
fig.add_trace(go.Scatter(x=dyc['Periodo'], y=dyc['>36'], name='>36 years'))
# Edit the layout
title_string = 'Time Series of detention years of convicted inmates in Colombia'
fig.update_layout(title=title_string, xaxis_title='Year', yaxis_title='# Population')
fig.update_traces(mode='lines')
fig.show()
# + [markdown] id="xDqISmXgZZTD"
# #### Detention years of convited inmates finding:
#
# A similar behaviour as detention month for unconvited inmates a decrease in range of 0-5 and 6-10 years with no increase in other time ranges.
#
# This finding means a higher turn over in prison systems.
# + colab={"base_uri": "https://localhost:8080/", "height": 542} id="zpqYseA8A483" outputId="8f725837-ce11-4396-8049-b0c411f034b9"
#Plotting prison capacity vs prison population through period of analysis with data available from INPEC
fig = go.Figure()
# Create and style traces
fig.add_trace(go.Scatter(x=tpp['Periodo'], y=tpp['Capacidad'], name='Capacity'))
fig.add_trace(go.Scatter(x=tpp['Periodo'], y=tpp['Poblacion'], name='Population'))
fig.add_trace(go.Scatter(x=tpp['Periodo'], y=tpp['Sindicados'], name = 'Unconvicted'))
fig.add_trace(go.Scatter(x=tpp['Periodo'], y=tpp['Condenados'],name='Convicted'))
# Edit the layout
title_string = 'Time Series of Capacity vs Population of inmates in Colombia'
fig.update_layout(title=title_string, xaxis_title='Year', yaxis_title='# Population')
fig.update_traces(mode='lines')
fig.show()
# + [markdown] id="YO5YzfIlaNsB"
# #### Prison's Capacity vs Population finding:
#
# There is a gap between capacity and population meaning overcrowding in prisons.
#
# During last period from 2019 to 2020 there is a decrease of prison's population due to previously analysed decreases in unconvited and convicted population within Colombian jails.
#
# Capacity is almost flat with very small changes due to the fact that it is expensive to build new jails with latest security standards in an undeveloped country with other more urgent needs in its society and therefore with budget constraints.
# + colab={"base_uri": "https://localhost:8080/", "height": 542} id="gO0rm5KdA431" outputId="e11c189a-e067-4607-b2ff-c9e6058970eb"
#Calculate overcrowding for plotting and understand trend through period of analysis
tpp['% Overcrowding'] = round((tpp['Poblacion']/tpp['Capacidad']*100)-100, 2)
#tpp['% Overcrowding']
#Plotting % overcrowding through period of analysis with data available from INPEC
px.line(x="Periodo" , y="% Overcrowding", data_frame=tpp, title="% Overcrowding in prisons of Colombia - Jan 01st 2010 upto Nov 30th 2020")
# + [markdown] id="mRnGLTZCbj4v"
# #### % Overcrowding finding:
#
# % overcrowding since 2014 upto 2019 was around 45% and 55% above capacity but in 2020 dropped to 20%.
#
# It is either a big coincidence or an attempt to reduce overcrowding in prison systems from INPEC during coronavirus lockdown. In 2020 there records of riots in Colombian prisons in press media.
# + [markdown] id="Mi0iK01fIRz5"
# ## Top 5 cities most impacted by theft analysis
# + colab={"base_uri": "https://localhost:8080/", "height": 542} id="E35UaTYFGAUi" outputId="6361ac70-8e66-4f08-ca53-ae4120b55143"
#Creating a plot for thefts and arrests only due to thefts in city under analysis
df_bta = df[df['MUNICIPIO']=='BOGOTÁ D.C. (CT)'].groupby(["year"])["CANTIDAD"].sum().reset_index().sort_values(by="year")
df_bta2 = arrests[arrests['MUNICIPIO']=='BOGOTÁ D.C. (CT)']
df_bta2 = df_bta2[df_bta2['DELITO']=='ARTÍCULO 239. HURTO PERSONAS']
df_bta2 = df_bta2.groupby(["year"])["CANTIDAD"].sum().reset_index().sort_values(by="year")
fig = go.Figure()
# Create and style traces
fig.add_trace(go.Scatter(x=df_bta['year'], y=df_bta['CANTIDAD'], name='Thefts'))
fig.add_trace(go.Scatter(x=df_bta2['year'], y=df_bta2['CANTIDAD'], name='Arrests'))
# Edit the layout
title_string = 'Time Series of Thefts vs Arrests in Bogota'
fig.update_layout(title=title_string, xaxis_title='Year', yaxis_title='# Population')
fig.update_traces(mode='lines')
fig.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 542} id="F82-uZ2IWIKt" outputId="603112a0-16ff-4837-a917-8a141ccbf8ee"
#Creating a plot for thefts and arrests only due to thefts in city under analysis
df_cali = df[df['MUNICIPIO']=='CALI (CT)'].groupby(["year"])["CANTIDAD"].sum().reset_index().sort_values(by="year")
df_cali2 = arrests[arrests['MUNICIPIO']=='CALI (CT)']
df_cali2 = df_cali2[df_cali2['DELITO']=='ARTÍCULO 239. HURTO PERSONAS']
df_cali2 = df_cali2.groupby(["year"])["CANTIDAD"].sum().reset_index().sort_values(by="year")
fig = go.Figure()
# Create and style traces
fig.add_trace(go.Scatter(x=df_cali['year'], y=df_cali['CANTIDAD'], name='Thefts'))
fig.add_trace(go.Scatter(x=df_cali2['year'], y=df_cali2['CANTIDAD'], name='Arrests'))
# Edit the layout
title_string = 'Time Series of Thefts vs Arrests in Cali'
fig.update_layout(title=title_string, xaxis_title='Year', yaxis_title='# Population')
fig.update_traces(mode='lines')
fig.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 542} id="Q_KYxaVpGARo" outputId="55e32220-4d9a-40f0-a301-9fb727a0935d"
#Creating a plot for thefts and arrests only due to thefts in city under analysis
df_mdl = df[df['MUNICIPIO']=='MEDELLÍN (CT)'].groupby(["year"])["CANTIDAD"].sum().reset_index().sort_values(by="year")
df_mdl2 = arrests[arrests['MUNICIPIO']=='MEDELLÍN (CT)']
df_mdl2 = df_mdl2[df_mdl2['DELITO']=='ARTÍCULO 239. HURTO PERSONAS']
df_mdl2 = df_mdl2.groupby(["year"])["CANTIDAD"].sum().reset_index().sort_values(by="year")
fig = go.Figure()
# Create and style traces
fig.add_trace(go.Scatter(x=df_mdl['year'], y=df_mdl['CANTIDAD'], name='Thefts'))
fig.add_trace(go.Scatter(x=df_mdl2['year'], y=df_mdl2['CANTIDAD'], name='Arrests'))
# Edit the layout
title_string = 'Time Series of Thefts vs Arrests in Medellin'
fig.update_layout(title=title_string, xaxis_title='Year', yaxis_title='# Population')
fig.update_traces(mode='lines')
fig.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 542} id="aTxXw1cWGAM-" outputId="e509b923-8e3a-4482-fe17-7143023f1365"
#Creating a plot for thefts and arrests only due to thefts in city under analysis
df_bqa = df[df['MUNICIPIO']=='BARRANQUILLA (CT)'].groupby(["year"])["CANTIDAD"].sum().reset_index().sort_values(by="year")
df_bqa2 = arrests[arrests['MUNICIPIO']=='BARRANQUILLA (CT)']
df_bqa2 = df_bqa2[df_bqa2['DELITO']=='ARTÍCULO 239. HURTO PERSONAS']
df_bqa2 = df_bqa2.groupby(["year"])["CANTIDAD"].sum().reset_index().sort_values(by="year")
fig = go.Figure()
# Create and style traces
fig.add_trace(go.Scatter(x=df_bqa['year'], y=df_bqa['CANTIDAD'], name='Thefts'))
fig.add_trace(go.Scatter(x=df_bqa2['year'], y=df_bqa2['CANTIDAD'], name='Arrests'))
# Edit the layout
title_string = 'Time Series of Thefts vs Arrests in Barranquilla'
fig.update_layout(title=title_string, xaxis_title='Year', yaxis_title='# Population')
fig.update_traces(mode='lines')
fig.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 542} id="04sMj8YeGAJS" outputId="73e3975e-a800-44fd-8656-1e422a46b992"
#Creating a plot for thefts and arrests only due to thefts in city under analysis
df_bga = df[df['MUNICIPIO']=='BUCARAMANGA (CT)'].groupby(["year"])["CANTIDAD"].sum().reset_index().sort_values(by="year")
df_bga2 = arrests[arrests['MUNICIPIO']=='BUCARAMANGA (CT)']
df_bga2 = df_bga2[df_bga2['DELITO']=='ARTÍCULO 239. HURTO PERSONAS']
df_bga2 = df_bga2.groupby(["year"])["CANTIDAD"].sum().reset_index().sort_values(by="year")
fig = go.Figure()
# Create and style traces
fig.add_trace(go.Scatter(x=df_bga['year'], y=df_bga['CANTIDAD'], name='Thefts'))
fig.add_trace(go.Scatter(x=df_bga2['year'], y=df_bga2['CANTIDAD'], name='Arrests'))
# Edit the layout
title_string = 'Time Series of Thefts vs Arrests in Bucaramanga'
fig.update_layout(title=title_string, xaxis_title='Year', yaxis_title='# Population')
fig.update_traces(mode='lines')
fig.show()
# + [markdown] id="NRc6GXnqcnp9"
# #### Top 5 cities most affected by theft findings:
#
# In all 5 cases, the gap between thefts cases and arrest cases due to thefts are widening, meaning an increase in security issues for citizens of these 5 cities.
# + [markdown] id="KZ21El6iMVS3"
# ## Conclusions:
# + [markdown] id="5EtxAMVfdP_D"
# 1. There is an upward trend in thefts at country level since 2010 upto 2019; but there is an increased rate in theft levels after 2015. 2020 was an atypical year due to lockdown.
#
# 2. Almost 50% of thefts are made without weapons (non violent thefts), the remaining 50% are violent with risk of harm or life threat.
#
# 3. The most affected states are driven by its main capital cities crimes as can be seen. Most thefts / robbery distribution is focused on cities (urban areas) instead of towns or counties (rural areas). This answers question # 1 partially.
#
# 3.1. Top 5 most impacted cities in Colombia by thefts are:
#
# Bogota
# Cali
# Medellin
# Barranquilla
# Bucaramanga
#
# 4. Bogota is still number 1 in both top 5's, rated and unrated theft levels. Barranquilla, being 4th biggest city in population is in both top 5's. Bucaramanga, which has around half of Barranquilla's population has a higher theft rate. This finding answers question # 2 which asks about how to compare rate among internal and external / foreign cities.
#
# 5. Since UBN data is a snapshot, not a time series like the rest of data, we can not make correlations; but at simple sight from 32 states (Departamentos) none of top 10 states in UBN are not in none of top 10 most affected states by thefts. This finding might suggest that generallized poverty is not a driver to commit this crime. By the way, these high UBN areas are far away from main cities where crime levels are high. Also, in cities, compared to towns, there are much less levels of UBN but still there are poor and rich neighborhoods. There are no official data as snapshot nor as time series for digging further in order to confirm or reject hypothesis of inequality being a driver for thefts. Therefore, question # 1 can not be answered completely due to lack of data availabity on unequality within Colombian cities.
#
# 6. Colombia being one of main producers of illicit drugs worldwide, its police is focused on the war on drugs (ARTÍCULO 376. TRÁFICO, FABRICACIÓN O PORTE DE ESTUPEFACIENTES). Second most important arrest per crime category is theft (ARTICULO 239, HURTO PERSONAS). This helps to explain the importance of this analysis.
#
# 7. There is a downward trend in arrests due to theft category since 2014 this trend is worrisome. Conclusions # 1 and # 7 answers question # 3.
#
# 8. Efficiency of police regarding theft category is a decreasing trend since 2011. Interesting finding that might help to support why security perception within citizens is worsening. Police efforts are focused on war on drugs and in a lesser way to reduce thefts as can be seen on plots based on official records. This finding answers question # 4.
#
# 9. There is an upward trend in recidivism since 2016 upto 2020. Interesting finding, as time goes by, felons are more inclided to re-offend after being released of prison. This finding answers partially question # 5.
#
# 10. A sharp decrease in detention months of unconvicted inmates since 2018 within range of 0-5 and 6-10 months, this finding means a higher turn over in prison systems or an increase in speed of judicial systems for inmate conviction process.
#
# 11. A similar behaviour as detention months for unconvited inmates a decrease in range of 0-5 and 6-10 years of convited inmates, this finding means a higher turn over in prison systems. This also helps to explains why citizens are concerned with security perception worsening on streets.
#
# 12. There is a gap between capacity and population meaning overcrowding in prisons. During last period from 2019 to 2020 there is a decrease of prison's population due to previously analysed decreases in unconvited and convicted population within Colombian jails. Capacity is almost flat with very small changes due to the fact that it is expensive to build new jails with latest security standards in an undeveloped country with other more urgent needs in its society and therefore with budget constraints to try to solve this overcrowding issue.
#
# 13. % overcrowding since 2014 upto 2019 was around 45% and 55% above capacity but in 2020 dropped to 20%. It is either a big coincidence or an attempt to reduce overcrowding in prison systems from INPEC during coronavirus lockdown. In 2020 there are records of riots in Colombian prisons on press media. This finding answers partially question # 5 because there is correlation between recidivism and overcrowding documented on multiple studies: [Universidad EAFIT](https://poseidon01.ssrn.com/delivery.php?ID=797101088103070016103114090083084095061040041017050027018015064117008114007024116112101013061121056036119094117095019084068065043013049092040071082092021004088007005060044012072115081118027123030013011115092025123001094069067124108117102089027023072009&EXT=pdf&INDEX=TRUE), [University of Chicago](https://www.jstor.org/stable/1147497?seq=1), and [University of Geneve](https://www.frontiersin.org/articles/10.3389/fpsyt.2019.01015/full).
#
# 14. In all of the top 5 cases, the gap between thefts cases and arrest cases due to thefts are widening, meaning an increase in security issues for citizens of these 5 cities. This also helps to explains why citizens are concerned with security perception worsening on cities' streets. This confirms answers to question # 3 given on conclusions # 1 and 7.
| Thefts_arrests_analysis_Colombia.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Dataframes
# ### Creating Dataframe
# + pycharm={"name": "#%%\n"}
import numpy as np
import pandas as pd
# + pycharm={"name": "#%%\n"}
np.random.seed(101)
mydata = np.random.randint(0, 101, (4, 3))
mydata
# + pycharm={"name": "#%%\n"}
myindex = ['CA', 'NY', 'AZ', 'TX']
mycols = ['Jan', 'Feb', 'Mar']
df = pd.DataFrame(data=mydata, index=myindex, columns=mycols)
df
# + pycharm={"name": "#%%\n"}
df.info
# + pycharm={"name": "#%%\n"}
df.info()
# + pycharm={"name": "#%%\n"}
pwd
# + pycharm={"name": "#%%\n"}
# ls
# + pycharm={"name": "#%%\n"}
df = pd.read_csv('DATA/tips.csv')
df
# + [markdown] pycharm={"name": "#%% md\n"}
# ### Basic properties
# + pycharm={"name": "#%%\n"}
df.columns
# + pycharm={"name": "#%%\n"}
df.index
# + pycharm={"name": "#%%\n"}
df.head()
# + pycharm={"name": "#%%\n"}
df.head(10)
# + pycharm={"name": "#%%\n"}
df.tail(10)
# + pycharm={"name": "#%%\n"}
df.info()
# + pycharm={"name": "#%%\n"}
df.describe().transpose()
# + [markdown] pycharm={"name": "#%% md\n"}
# ### Working with columns
# + pycharm={"name": "#%%\n"}
df['total_bill']
# + pycharm={"name": "#%%\n"}
type(df['total_bill'])
# + pycharm={"name": "#%%\n"}
df[['total_bill', 'tip']]
# + pycharm={"name": "#%%\n"}
df['tip_percentage'] = 100 * df['tip'] / df['total_bill']
# + pycharm={"name": "#%%\n"}
df
# + pycharm={"name": "#%%\n"}
df['price_per_person'] = df['total_bill'] / df['size']
# + pycharm={"name": "#%%\n"}
df
# + pycharm={"name": "#%%\n"}
df['price_per_person'] = np.round(df['total_bill'] / df['size'], 2)
# + pycharm={"name": "#%%\n"}
df.head()
# + pycharm={"name": "#%%\n"}
df.drop('tip_percentage', axis=1)
# + pycharm={"name": "#%%\n"}
df
# + pycharm={"name": "#%%\n"}
df.drop('tip_percentage', axis=1, inplace=True)
df
# + pycharm={"name": "#%%\n"}
df.shape
# + [markdown] pycharm={"name": "#%% md\n"}
# ### Working with rows
# + pycharm={"name": "#%%\n"}
df.index
# + pycharm={"name": "#%%\n"}
df.set_index("Payment ID")
# + pycharm={"name": "#%%\n"}
df.head()
# + pycharm={"name": "#%%\n"}
df = df.set_index("Payment ID")
# + pycharm={"name": "#%%\n"}
df.head()
# + pycharm={"name": "#%%\n"}
df.reset_index()
# + pycharm={"name": "#%%\n"}
df.head()
# + pycharm={"name": "#%%\n"}
df.reset_index(inplace=True)
# + pycharm={"name": "#%%\n"}
df.head()
# + pycharm={"name": "#%%\n"}
df.set_index("Payment ID", inplace=True)
# + pycharm={"name": "#%%\n"}
df.head()
# + pycharm={"name": "#%%\n"}
df.iloc[0]
# + pycharm={"name": "#%%\n"}
df.loc['Sun2959']
# + pycharm={"name": "#%%\n"}
df.iloc[0:4]
# + pycharm={"name": "#%%\n"}
df.loc[['Sun2959', 'Sun4458']]
# + pycharm={"name": "#%%\n"}
df.drop('Sun2959', axis=0)
# + pycharm={"name": "#%%\n"}
df.head()
# + pycharm={"name": "#%%\n"}
df.drop('Sun2959', axis=0, inplace=True)
# + pycharm={"name": "#%%\n"}
df.head()
# + pycharm={"name": "#%%\n"}
df = df.iloc[1:]
# + pycharm={"name": "#%%\n"}
df.head()
# + pycharm={"name": "#%%\n"}
one_row = df.iloc[0]
one_row
# + pycharm={"name": "#%%\n"}
df.append(one_row)
# + pycharm={"name": "#%%\n"}
df.tail
| 003_pandas_dataframes_pract.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import pandas as pd
df = pd.read_csv('queryset_CNN.csv')
print(df.shape)
print(df.dtypes)
preds = []
pred = []
for index, row in df.iterrows():
doc_id = row.doc_id
author_id = row.author_id
import ast
authorList = ast.literal_eval(row.authorList)
candidate = len(authorList)
algo = "tfidf_svc"
test = algo # change before run
level = "word"
iterations = 30
dropout = 0.5
samples = 3200
dimensions = 200
loc = authorList.index(author_id)
printstate = (("doc_id = %s, candidate = %s, ") % (str(doc_id), str(candidate)))
printstate += (("samples = %s, ") % (str(samples)))
printstate += (("test = %s") % (str(test)))
print("Current test: %s" % (str(printstate)))
from sshtunnel import SSHTunnelForwarder
with SSHTunnelForwarder(('192.168.127.12', 22),
ssh_username='ninadt',
ssh_password='<PASSWORD>',
remote_bind_address=('localhost', 3306),
local_bind_address=('localhost', 3300)):
import UpdateDB as db
case = db.checkOldML(doc_id = doc_id, candidate = candidate, samples = samples,
test = test, port = 3300)
if case == False:
print("Running: %12s" % (str(printstate)))
import StyloML as Stylo
(labels_index, train_acc, val_acc, samples) = Stylo.getResults(
algo,
doc_id = doc_id, authorList = authorList[:],
samples = samples)
(labels_index, testY, predY, samples) = Stylo.getTestResults(
algo, labels_index = labels_index,
doc_id = doc_id, authorList = authorList[:],
samples = samples)
loc = testY
test_acc = predY[loc]
test_bin = 0
if(predY.tolist().index(max(predY)) == testY):
test_bin = 1
from sshtunnel import SSHTunnelForwarder
with SSHTunnelForwarder(('192.168.127.12', 22),
ssh_username='ninadt',
ssh_password='<PASSWORD>',
remote_bind_address=('localhost', 3306),
local_bind_address=('localhost', 3300)):
import UpdateDB as db
case = db.updateresultOldML(doc_id = doc_id, candidate = candidate, samples = samples,
train_acc = train_acc, val_acc = val_acc,
test_acc = test_acc, test_bin = test_bin,
test = test, port = 3300)
del Stylo
import time
time.sleep(10)
from IPython.display import clear_output
clear_output()
else:
print("Skipped: %12s" % (str(printstate)))
# +
# import matplotlib.pyplot as plt
# # summarize history for accuracy
# plt.plot(history.history['acc'])
# plt.plot(history.history['val_acc'])
# plt.title('model accuracy')
# plt.ylabel('accuracy')
# plt.xlabel('epoch')
# plt.legend(['train', 'test'], loc='upper left')
# plt.show()
# +
# # summarize history for loss
# plt.plot(history.history['loss'])
# plt.plot(history.history['val_loss'])
# plt.title('model loss')
# plt.ylabel('loss')
# plt.xlabel('epoch')
# plt.legend(['train', 'test'], loc='upper left')
# plt.show()
# -
# %tb
| fyp/Test ML.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import csv
import json
import logging
import os
import click
import pydgraph
from pprint import pprint
# +
logger = logging.getLogger("Dependency Graph Generation")
logger.setLevel(logging.DEBUG)
client_stub = pydgraph.DgraphClientStub('localhost:9080')
client = pydgraph.DgraphClient(client_stub)
# +
# Drop All - discard all data and start from a clean slate.
def drop_all(client):
return client.alter(pydgraph.Operation(drop_all=True))
# Set schema.
def set_schema(client):
schema = """
name: string @index(exact) .
depends: [uid] @reverse .
version: string .
src: string .
pkg_rel_date: string .
number_dependents: int .
type Package {
name
depends
version
}
"""
return client.alter(pydgraph.Operation(schema=schema))
drop_all(client)
set_schema(client)
# +
def query_package(client, package_name, package_version):
"""Check if package(name,version) is already in dgraph"""
query = """query all($name: string, $version: string) {
all(func: eq(name, $name)) @filter(eq(version, $version)) {
uid
name
version
depends {
name
version
version_specifier
}
number_dependents
}
}"""
variables = {'$name': package_name, '$version': package_version}
res = client.txn(read_only=True).query(query, variables=variables)
packages = json.loads(res.json)
# Print results.
if packages.get("all"):
return packages.get("all")[0]
return []
def insert_package(package_json):
txn = client.txn()
package_uid = {}
pkg_dependencies = {}
for package_name, package in package_json.get("dep_info", {}).items():
version = package.get("ver")
# Check if this package already exists in the graph db.
# if it does increment its number of dependents by 1
# otherwise insert it into the graph db.
existing_package = query_package(client, package_name, version)
if existing_package:
existing_package['number_dependents'] = existing_package['number_dependents']+1
txn.mutate(set_obj=existing_package)
package_uid[(package_name, version)] = existing_package['uid']
continue
pkg_to_insert = {
'uid': '_:{}'.format(package_name),
'dgraph.type': 'Package',
'name': package_name,
'version': version,
'pkg_rel_date': package['pkg_rel_date'][0],
'src': package['src'][0],
'number_dependents': 0
}
insert_data = txn.mutate(set_obj=pkg_to_insert)
package_uid[(package_name, version)] = insert_data.uids[package_name]
pkg_dependencies[insert_data.uids[package_name]] = package.get('dep')
txn.commit()
txn = client.txn()
# print(package_uid)
# print(pkg_dependencies)
# # it is important that we commit the dependencies after we insert the nodes
# # otherwise we can't link them via uuid
with open(r'dependencies.csv', 'a') as f:
writer = csv.writer(f)
writer.writerow([package_uid, package_uid[(dependency['dep_name'], dependency['dep_ver'])],
dependency['dep_name'], dependency['dep_ver'], dependency['dep_constraint']])
# for package_uuid, dependencies in pkg_dependencies.items():
# for dependency in dependencies:
# dependency_to_insert = {
# "uid": package_uuid,
# "depends": {
# "uid": package_uid[(dependency['dep_name'], dependency['dep_ver'])],
# 'name': dependency['dep_name'],
# 'version': dependency['dep_ver'],
# 'version_specifier': dependency['dep_constraint']
# }
# }
# response = txn.mutate(set_obj=dependency_to_insert)
# txn.commit()
# +
@click.command()
@click.option('--datadir', help='Directory of the dependency JSON files.')
def generate_dep_graph(datadir):
entries = os.listdir(datadir)
for entry in entries:
with open(os.path.join(datadir, entry)) as file:
json_data = json.load(file)
logger.info("Inserting data for {}".format(json_data.get("root_pkg")))
try:
insert_package(json_data)
except Exception as e:
logger.warning("Failed to parse data for {} with exception {}".format(json_data.get("root_pkg"), e))
if __name__ == '__main__':
generate_dep_graph()
# +
# datadir = "test_merged_data"
# entries = os.listdir(datadir)
# for entry in entries:
# with open(os.path.join(datadir, entry)) as file:
# json_data = json.load(file)
# logger.info("Inserting data for {}".format(json_data.get("root_pkg")))
# try:
# insert_package(json_data)
# except Exception as e:
# logger.warning("Failed to parse data for {} with exception {}".format(json_data.get("root_pkg"), e))
# -
| load-data-to-draph.py.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# <NAME>
# separate into functions and have more configurability
# function [] = cnn_training(trainlabels,trainimages,maxtrain,iter,eta,pool,trained_parameter_file)
import matplotlib.pyplot as plt
import numpy as np
import numpy.linalg as lin
import matplotlib.patches as mpatches
import scipy as sypy
from scipy import signal
from scipy import io
# Testing the function
# maxtrain=6; #maximum training images
# iter = 30; #maximum iterations
# eta=0.01; # learning rate
# n_fl=1;\
# # # %%select the pooling
# # pool='maxpool';
# pool= 'avgpool';
# [trainlabels, trainimages, testlabels, testimages] = cnnload()
from ipynb.fs.full.cnn import cnnload
from ipynb.fs.full.avgpool import avgpool
from ipynb.fs.full.avgpool import maxpool
# function defintion here
def cnn_training(trainlabels, trainimages, maxtrain, iter,eta, pool, trained_parameter_file)
fn = 4; # number of kernels for layer 1
ks = 5; # size of kernel
[n, h, w] = np.shape(trainimages);
n = min(n, maxtrain);
# normalize data to [-1,1] range
nitrain = (trainimages / 255) * 2 - 1;
# train with backprop
h1 = h - ks + 1;
w1 = w - ks + 1;
A1 = np.zeros((fn, h1, w1));
h2 = int(h1 / 2);
w2 = int(w1 / 2);
I2 = np.zeros((fn,h2, w2));
A2 = np.zeros((fn,h2, w2));
A3 = np.zeros(10);
# % kernels for layer 1
W1 = np.random.randn(fn,ks, ks) * .01;
B1 = np.ones(fn);
# % scale parameter and bias for layer 2
S2 = np.random.randn(1, fn) * .01;
B2 = np.ones(fn);
# % weights and bias parameters for fully-connected output layer
W3 = np.random.randn(10,fn, h2, w2) * .01;
B3 = np.ones(10);
# % true outputs
Y = np.eye(10) * 2 - 1;
for it in range(0, iter):
err = 0;
for im in range(0, n):
# ------------ FORWARD PROP ------------%
# ------Layer 1: convolution with bias followed by tanh activation function
for fm in range(0, fn):
A1[fm, :, :,] = sypy.signal.convolve2d(nitrain[im, :, :], W1[fm, ::-1, ::-1], 'valid') + B1[fm];
Z1 = np.tanh(A1)
# ------Layer 2: max or average(both subsample) with scaling and bias
for fm in range(0, fn):
if pool == 'maxpool':
I2[fm, :, :] = maxpool(Z1[fm, :, :])
elif pool == 'avgpool':
I2[fm, :, :] = avgpool(Z1[fm, :, :])
A2[fm, :, :] = I2[fm, :, :] * S2[:,fm] + B2[fm]
Z2 = np.tanh(A2)
# ------Layer 3: fully connected
for cl in range(0, n_fl):
A3[cl] =sypy.signal.convolve(Z2, W3[cl, ::-1, ::-1, :: -1], 'valid') + B3[cl]
Z3 = np.tanh(A3)
err = err + 0.5*lin.norm(Z3.T - Y[:,trainlabels[im]],2)**2
# ------------ BACK PROP ------------%
# -------Compute error at output layer
Del3 = (1 - Z3 ** 2) * (Z3.T - Y[:,trainlabels[im]]);
#---Compute error at layer2
Del2 = np.zeros(np.shape(Z2));
for cl in range(0,10):
Del2 = Del2 + Del3[cl] * W3[cl];
Del2=Del2*(1- Z2**2)
# Compute error at layer1
Del1= np.zeros(np.shape(Z1))
for fm in range(0,fn):
Del1[fm,:,:]=(S2[:,fm]/4)*(1-Z1[fm,:,:]**2)
for ih in range(0,h1):
for iw in range(0,w1):
Del1[fm,ih,iw]=Del1[fm,ih,iw]*Del2[fm,ih//2,iw//2]
# Update bias at layer3
DB3=Del3 # gradient w.r.t bias
B3=B3 -eta*DB3
# Update weights at layer 3
for cl in range(0,10):
DW3= DB3[cl] * Z2 #gradients w.r.t weights
W3[3,:,:,:]=W3[cl,:,:,:] -eta*DW3
# Update scale and bias parameters at layer 2
for fm in range(0,fn):
DS2 = sypy.signal.convolve(Del2[fm,:,:],I2[fm, ::-1, ::-1],'valid')
S2[:,fm]=S2[:,fm] -eta*DS2
DB2=sum(sum(Del2[fm,:,:]))
B2[fm]=B2[fm] -eta*DB2
#Update kernel weights and bias parameters at layer 1
for fm in range(0,fn):
DW1 = sypy.signal.convolve(nitrain[im,:,:],Del1[fm, ::-1, ::-1],'valid')
W1[fm,:,:]=W1[fm,:,:] -eta*DW1
DB1=sum(sum(Del1[fm,:,:]))
B1[fm]=B1[fm] -eta*DB1
print(['Error: '+str(err)+' at iteration '+ str(it)])
sypy.io.savemat(trained_parameter_file,{'W1':W1,'B1':B1,'S2':S2,'B2':B2,'W3':W3,'B3':B3,'maxtrain':maxtrain,'it':it,'eta':eta,'err':err})
# -
| ML_Notebook/hdr_om/.ipynb_checkpoints/cnn_training-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6.9 64-bit
# name: python36964bit28dca1e930dc408e886c4b0f2278e3c5
# ---
import pandas as pd
excel_file='20200512.xlsx'
raw=pd.read_excel(io=excel_file, sheet_name=5)
| notebooks/hospitals-extractor.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import sys
sys.path.append('../')
import os
os.environ["CUDA_VISIBLE_DEVICES"]="1"
from keras import backend as K
from keras.models import load_model
from keras.models import Model
from keras.optimizers import Adam
from scipy.misc import imread
import numpy as np
from matplotlib import pyplot as plt
from models.keras_ssd512_Siamese import ssd_512
from keras_loss_function.keras_ssd_loss import SSDLoss
from keras_layers.keras_layer_AnchorBoxes import AnchorBoxes
from keras_layers.keras_layer_DecodeDetections import DecodeDetections
from keras_layers.keras_layer_DecodeDetectionsFast import DecodeDetectionsFast
from keras_layers.keras_layer_L2Normalization import L2Normalization
from data_generator.object_detection_2d_data_generator import DataGenerator
from eval_utils.average_precision_evaluator import Evaluator
# # %matplotlib inline
# Set a few configuration parameters.
img_height = 512
img_width = 512
# model_mode indicates the way the pretrained model was created.
# In training model, Model_Build should == 'Load_Model'. decode_detections will be called in the Evaluator.
# However, decode_detections is run on CPU and is very slow.
# In inference model, Model_Build should == 'New_Model_Load_Weights'.
# DecodeDetections will be called when build the model. DecodeDetections is writen in tensorflow and is run GPU.
# It seems that the result under inference model is slightly better than that under training model.
# Maybe DecodeDetections and decode_detections are not exactly the same.
model_mode = 'inference' # 'training'#
if model_mode == 'inference':
Model_Build = 'New_Model_Load_Weights'
elif model_mode == 'training':
Model_Build = 'Load_Model'
else:
raise ValueError('Undefined model_mode. model_mode should be inference or training')
model_path = '../trained_weights/SIM10K_to_City/current/G100_D10_GD_weights0_001/epoch-307_loss-9.8738_val_loss-11.3486.h5'
evaluate_mode = 'MAP' # 'Visualize_detection' #
if evaluate_mode == 'Visualize_detection':
confidence_thresh = 0.01
elif evaluate_mode == 'MAP':
confidence_thresh = 0.001
else:
raise ValueError('Undefined evaluate_mode.')
Optimizer_Type = 'Adam' # 'SGD' #
batch_size = 8
Build_trainset_for_val = False # True #
loss_weights = [0.0, 0.0, 0.0] + [1.0]
# 'City_to_foggy0_01_resize_600_1200' # 'City_to_foggy0_02_resize_600_1200' # 'SIM10K_to_VOC07'
# 'SIM10K' # 'Cityscapes_foggy_beta_0_01' # 'City_to_foggy0_02_resize_400_800' # 'SIM10K_to_City_resize_400_800' #
DatasetName = 'SIM10K_to_City_resize_400_800' #'SIM10K_to_VOC07_resize_400_800' # 'City_to_foggy0_01_resize_400_800' # 'SIM10K_to_VOC12_resize_400_800' #
processed_dataset_path = './processed_dataset_h5/' + DatasetName
# The anchor box scaling factors used in the original SSD300 for the Pascal VOC datasets
# scales_pascal = [0.1, 0.2, 0.37, 0.54, 0.71, 0.88, 1.05]
# The anchor box scaling factors used in the original SSD300 for the MS COCO datasets
scales_coco = [0.07, 0.15, 0.3, 0.45, 0.6, 0.75, 0.9, 1.05]
scales = scales_coco
top_k = 200
nms_iou_threshold = 0.45
if DatasetName == 'SIM10K_to_VOC12_resize_400_800':
resize_image_to = (400, 800)
# The directories that contain the images.
train_source_images_dir = '../../datasets/SIM10K/JPEGImages'
train_target_images_dir = '../../datasets/VOCdevkit/VOC2012/JPEGImages'
test_target_images_dir = '../../datasets/VOCdevkit/VOC2012/JPEGImages'
# The directories that contain the annotations.
train_annotation_dir = '../../datasets/SIM10K/Annotations'
test_annotation_dir = '../../datasets/VOCdevkit/VOC2012/Annotations'
# The paths to the image sets.
train_source_image_set_filename = '../../datasets/SIM10K/ImageSets/Main/trainval10k.txt'
# The trainset of VOC which has 'car' object is used as train_target. The tainset of VOC2012.
train_target_image_set_filename = '../../datasets/VOCdevkit/VOC2012_CAR/ImageSets/Main/train_target.txt'
# The valset of VOC which has 'car' object is used as test. The valset of VOC2012.
test_target_image_set_filename = '../../datasets/VOCdevkit/VOC2012_CAR/ImageSets/Main/test.txt'
classes = ['background', 'car'] # Our model will produce predictions for these classes.
train_classes = ['background', 'car', 'motorbike', 'person'] # The train_source dataset contains these classes.
train_include_classes = [train_classes.index(one_class) for one_class in classes[1:]]
# The test_target dataset contains these classes.
val_classes = ['background', 'car',
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'cat',
'chair', 'cow', 'diningtable', 'dog',
'horse', 'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor']
val_include_classes = [val_classes.index(one_class) for one_class in classes[1:]]
# Number of positive classes, 8 for domain Cityscapes, 20 for Pascal VOC, 80 for MS COCO, 1 for SIM10K
n_classes = len(classes) - 1
elif DatasetName == 'SIM10K_to_VOC07_resize_400_800':
resize_image_to = (400, 800)
# The directories that contain the images.
train_source_images_dir = '../../datasets/SIM10K/JPEGImages'
train_target_images_dir = '../../datasets/VOCdevkit/VOC2007/JPEGImages'
test_target_images_dir = '../../datasets/VOCdevkit/VOC2007/JPEGImages'
# The directories that contain the annotations.
train_annotation_dir = '../../datasets/SIM10K/Annotations'
test_annotation_dir = '../../datasets/VOCdevkit/VOC2007/Annotations'
# The paths to the image sets.
train_source_image_set_filename = '../../datasets/SIM10K/ImageSets/Main/trainval10k.txt'
# The trainset of VOC which has 'car' object is used as train_target.
train_target_image_set_filename = '../../datasets/VOCdevkit/VOC2007_CAR/ImageSets/Main/train_target.txt'
# The valset of VOC which has 'car' object is used as test.
test_target_image_set_filename = '../../datasets/VOCdevkit/VOC2007_CAR/ImageSets/Main/test.txt'
classes = ['background', 'car'] # Our model will produce predictions for these classes.
train_classes = ['background', 'car', 'motorbike', 'person'] # The train_source dataset contains these classes.
train_include_classes = [train_classes.index(one_class) for one_class in classes[1:]]
# The test_target dataset contains these classes.
val_classes = ['background', 'car',
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'cat',
'chair', 'cow', 'diningtable', 'dog',
'horse', 'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor']
val_include_classes = [val_classes.index(one_class) for one_class in classes[1:]]
# Number of positive classes, 8 for domain Cityscapes, 20 for Pascal VOC, 80 for MS COCO, 1 for SIM10K
n_classes = len(classes) - 1
elif DatasetName == 'SIM10K_to_City_resize_400_800':
resize_image_to = (400, 800)
# The directories that contain the images.
train_source_images_dir = '../../datasets/SIM10K/JPEGImages'
train_target_images_dir = '../../datasets/Cityscapes/JPEGImages'
test_target_images_dir = '../../datasets/val_data_for_SIM10K_to_cityscapes/JPEGImages'
# The directories that contain the annotations.
train_annotation_dir = '../../datasets/SIM10K/Annotations'
test_annotation_dir = '../../datasets/val_data_for_SIM10K_to_cityscapes/Annotations'
# The paths to the image sets.
train_source_image_set_filename = '../../datasets/SIM10K/ImageSets/Main/trainval10k.txt'
train_target_image_set_filename = '../../datasets/Cityscapes/ImageSets/Main/train_source.txt'
test_target_image_set_filename = '../../datasets/val_data_for_SIM10K_to_cityscapes/ImageSets/Main/test.txt'
classes = ['background', 'car'] # Our model will produce predictions for these classes.
train_classes = ['background', 'car', 'motorbike', 'person'] # The train_source dataset contains these classes.
train_include_classes = [train_classes.index(one_class) for one_class in classes[1:]]
# The test_target dataset contains these classes.
val_classes = ['background', 'car']
val_include_classes = 'all'
# Number of positive classes, 8 for domain Cityscapes, 20 for Pascal VOC, 80 for MS COCO, 1 for SIM10K
n_classes = len(classes) - 1
elif DatasetName == 'City_to_foggy0_02_resize_400_800':
resize_image_to = (400, 800)
# Introduction of PascalVOC: https://arleyzhang.github.io/articles/1dc20586/
# The directories that contain the images.
train_source_images_dir = '../../datasets/Cityscapes/JPEGImages'
train_target_images_dir = '../../datasets/Cityscapes/JPEGImages'
test_target_images_dir = '../../datasets/Cityscapes/JPEGImages'
# The directories that contain the annotations.
train_annotation_dir = '../../datasets/Cityscapes/Annotations'
test_annotation_dir = '../../datasets/Cityscapes/Annotations'
# The paths to the image sets.
train_source_image_set_filename = '../../datasets/Cityscapes/ImageSets/Main/train_source.txt'
train_target_image_set_filename = '../../datasets/Cityscapes/ImageSets/Main/train_target.txt'
test_target_image_set_filename = '../../datasets/Cityscapes/ImageSets/Main/test.txt'
# Our model will produce predictions for these classes.
classes = ['background',
'person', 'rider', 'car', 'truck',
'bus', 'train', 'motorcycle', 'bicycle']
train_classes = classes
train_include_classes = 'all'
val_classes = classes
val_include_classes = 'all'
# Number of positive classes, 8 for domain Cityscapes, 20 for Pascal VOC, 80 for MS COCO, 1 for SIM10K
n_classes = len(classes) - 1
elif DatasetName == 'City_to_foggy0_01_resize_400_800':
resize_image_to = (400, 800)
# Introduction of PascalVOC: https://arleyzhang.github.io/articles/1dc20586/
# The directories that contain the images.
train_source_images_dir = '../../datasets/Cityscapes/JPEGImages'
train_target_images_dir = '../../datasets/CITYSCAPES_beta_0_01/JPEGImages'
test_target_images_dir = '../../datasets/CITYSCAPES_beta_0_01/JPEGImages'
# The directories that contain the annotations.
train_annotation_dir = '../../datasets/Cityscapes/Annotations'
test_annotation_dir = '../../datasets/Cityscapes/Annotations'
# The paths to the image sets.
train_source_image_set_filename = '../../datasets/Cityscapes/ImageSets/Main/train_source.txt'
train_target_image_set_filename = '../../datasets/Cityscapes/ImageSets/Main/train_target.txt'
test_target_image_set_filename = '../../datasets/Cityscapes/ImageSets/Main/test.txt'
# Our model will produce predictions for these classes.
classes = ['background',
'person', 'rider', 'car', 'truck',
'bus', 'train', 'motorcycle', 'bicycle']
train_classes = classes
train_include_classes = 'all'
val_classes = classes
val_include_classes = 'all'
# Number of positive classes, 8 for domain Cityscapes, 20 for Pascal VOC, 80 for MS COCO, 1 for SIM10K
n_classes = len(classes) - 1
else:
raise ValueError('Undefined dataset name.')
# +
if Model_Build == 'New_Model_Load_Weights':
# 1: Build the Keras model
K.clear_session() # Clear previous models from memory.
# import tensorflow as tf
# from keras.backend.tensorflow_backend import set_session
#
# config = tf.ConfigProto()
# config.gpu_options.allow_growth = True # dynamically grow the memory used on the GPU
# config.log_device_placement = True # to log device placement (on which device the operation ran)
# # (nothing gets printed in Jupyter, only if you run it standalone)
# sess = tf.Session(config=config)
# set_session(sess) # set this TensorFlow session as the default session for Keras
# model.output = `predictions`: (batch, n_boxes_total, n_classes + 4 + 8)
# In inference mode, the predicted locations have been converted to absolute coordinates.
# In addition, we have performed confidence thresholding, per-class non-maximum suppression, and top-k filtering.
G_model = ssd_512(image_size=(img_height, img_width, 3),
n_classes=n_classes,
mode=model_mode,
l2_regularization=0.0005,
scales=scales,
aspect_ratios_per_layer= [[1.0, 2.0, 0.5],
[1.0, 2.0, 0.5, 3.0, 1.0/3.0],
[1.0, 2.0, 0.5, 3.0, 1.0/3.0],
[1.0, 2.0, 0.5, 3.0, 1.0/3.0],
[1.0, 2.0, 0.5, 3.0, 1.0/3.0],
[1.0, 2.0, 0.5],
[1.0, 2.0, 0.5]],
two_boxes_for_ar1=True,
steps=[8, 16, 32, 64, 128, 256, 512],
offsets=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
clip_boxes=False,
variances=[0.1, 0.1, 0.2, 0.2],
normalize_coords=True,
subtract_mean=[123, 117, 104],
swap_channels=[2, 1, 0],
confidence_thresh=confidence_thresh,
iou_threshold=nms_iou_threshold,
top_k=top_k,
nms_max_output_size=400)
# 2: Load the trained weights into the model
G_model.load_weights(model_path, by_name=True)
else:
raise ValueError('Undefined Model_Build. Model_Build should be New_Model_Load_Weights or Load_Model')
# +
# 1: Instantiate two `DataGenerator` objects: One for training, one for validation.
# Load dataset from the created h5 file.
train_dataset = DataGenerator(dataset='train',
load_images_into_memory=False,
hdf5_dataset_path=os.path.join(processed_dataset_path, 'dataset_train.h5'),
filenames=train_source_image_set_filename,
target_filenames=train_target_image_set_filename,
filenames_type='text',
images_dir=train_source_images_dir,
target_images_dir=train_target_images_dir)
val_dataset = DataGenerator(dataset='val',
load_images_into_memory=False,
hdf5_dataset_path=os.path.join(processed_dataset_path, 'dataset_test.h5'),
filenames=test_target_image_set_filename,
filenames_type='text',
images_dir=test_target_images_dir)
# +
if evaluate_mode == 'Visualize_detection':
# Make predictions:
# 1: Set the generator for the predictions.
# For the test generator:
from data_generator.object_detection_2d_geometric_ops import Resize
from data_generator.object_detection_2d_photometric_ops import ConvertTo3Channels
from data_generator.object_detection_2d_misc_utils import apply_inverse_transforms
# First convert the input image to 3 channels and size img_height X img_width
# Also, convert the groundtruth bounding box
# Remember, if you want to visualize the predicted box on the original image,
# you need to apply the corresponding reverse transformation.
convert_to_3_channels = ConvertTo3Channels()
resize = Resize(height=img_height, width=img_width)
test_generator = train_dataset.generate(batch_size=batch_size,
generator_type='base',
shuffle=False,
transformations=[convert_to_3_channels,
resize],
label_encoder=None,
returns={'processed_images',
'filenames',
'inverse_transform',
'original_images',
'original_labels'},
keep_images_without_gt=False)
# test_dataset_size = test_generator.get_dataset_size()
# print("Number of images in the test dataset:\t{:>6}".format(test_dataset_size))
# 2: Generate samples.
# The order of these returned items are not determined by the keys in returns in the previous cell,
# but by the order defined in DataGenerator.generate()
batch_images, batch_filenames, batch_inverse_transforms, batch_original_images, batch_original_labels = next(test_generator)
i = 3 # Which batch item to look at
print("Image:", batch_filenames[i])
print()
print("Ground truth boxes:\n")
print(np.array(batch_original_labels[i]))
# 3: Make predictions.
y_pred = G_model.predict(batch_images)
# 4: Convert the predictions for the original image.
y_pred_decoded_inv = apply_inverse_transforms(y_pred, batch_inverse_transforms)
np.set_printoptions(precision=2, suppress=True, linewidth=90)
print("Predicted boxes:\n")
print(' class conf xmin ymin xmax ymax')
print(y_pred_decoded_inv[i])
# 5: Draw the predicted boxes onto the image
# Set the colors for the bounding boxes
colors = plt.cm.hsv(np.linspace(0, 1, n_classes+1)).tolist()
plt.figure(figsize=(20,12))
plt.imshow(batch_original_images[i])
current_axis = plt.gca()
# for box in batch_original_labels[i]:
# xmin = box[1]
# ymin = box[2]
# xmax = box[3]
# ymax = box[4]
# label = '{}'.format(classes[int(box[0])])
# current_axis.add_patch(plt.Rectangle((xmin, ymin), xmax-xmin, ymax-ymin, color='green', fill=False, linewidth=2))
# current_axis.text(xmin, ymin, label, size='x-large', color='white', bbox={'facecolor':'green', 'alpha': 1.0})
good_prediction = [val for val in y_pred_decoded_inv[i] if val[1] > 0.3 ] # confidence_thresh
for box in good_prediction:
xmin = box[2]
ymin = box[3]
xmax = box[4]
ymax = box[5]
color = colors[int(box[0])]
label = '{}: {:.2f}'.format(classes[int(box[0])], box[1])
current_axis.add_patch(plt.Rectangle((xmin, ymin), xmax-xmin, ymax-ymin, color=color, fill=False, linewidth=2))
current_axis.text(xmin, ymin, label, size='x-large', color='white', bbox={'facecolor': color, 'alpha':1.0})
# for box in y_pred_decoded_inv[i]:
# xmin = box[2]
# ymin = box[3]
# xmax = box[4]
# ymax = box[5]
# color = colors[int(box[0])]
# label = '{}: {:.2f}'.format(classes[int(box[0])], box[1])
# current_axis.add_patch(plt.Rectangle((xmin, ymin), xmax-xmin, ymax-ymin, color=color, fill=False, linewidth=2))
# current_axis.text(xmin, ymin, label, size='x-large', color='white', bbox={'facecolor':color, 'alpha':1.0})
elif evaluate_mode == 'MAP':
evaluator = Evaluator(model=G_model,
n_classes=n_classes,
data_generator=val_dataset,
model_mode=model_mode)
results = evaluator(img_height=img_height,
img_width=img_width,
batch_size=batch_size,
data_generator_mode='resize',
round_confidences=False,
matching_iou_threshold=0.5,
border_pixels='include',
sorting_algorithm='quicksort',
average_precision_mode='sample',
num_recall_points=11,
ignore_neutral_boxes=True,
return_precisions=True,
return_recalls=True,
return_average_precisions=True,
verbose=True)
mean_average_precision, average_precisions, precisions, recalls = results
for i in range(1, len(average_precisions)):
print("{:<14}{:<6}{}".format(classes[i], 'AP', round(average_precisions[i], 3)))
print()
print("{:<14}{:<6}{}".format('', 'mAP', round(mean_average_precision, 3)))
m = max((n_classes + 1) // 2, 2)
n = 2
fig, cells = plt.subplots(m, n, figsize=(n*8, m*8))
for i in range(m):
for j in range(n):
if n*i+j+1 > n_classes: break
cells[i, j].plot(recalls[n*i+j+1], precisions[n*i+j+1], color='blue', linewidth=1.0)
cells[i, j].set_xlabel('recall', fontsize=14)
cells[i, j].set_ylabel('precision', fontsize=14)
cells[i, j].grid(True)
cells[i, j].set_xticks(np.linspace(0, 1, 11))
cells[i, j].set_yticks(np.linspace(0, 1, 11))
cells[i, j].set_title("{}, AP: {:.3f}".format(classes[n*i+j+1], average_precisions[n*i+j+1]), fontsize=16)
else:
raise ValueError('Undefined evaluate_mode.')
# +
# Make predictions:
# 1: Set the generator for the predictions.
# For the test generator:
from data_generator.object_detection_2d_geometric_ops import Resize
from data_generator.object_detection_2d_photometric_ops import ConvertTo3Channels
from data_generator.object_detection_2d_misc_utils import apply_inverse_transforms
# First convert the input image to 3 channels and size img_height X img_width
# Also, convert the groundtruth bounding box
# Remember, if you want to visualize the predicted box on the original image,
# you need to apply the corresponding reverse transformation.
convert_to_3_channels = ConvertTo3Channels()
resize = Resize(height=img_height, width=img_width)
test_generator = val_dataset.generate(batch_size=batch_size,
shuffle=False,
transformations=[convert_to_3_channels,
resize],
label_encoder=None,
returns={'processed_images',
'filenames',
'inverse_transform',
'original_images',
'original_labels'},
keep_images_without_gt=False)
# test_dataset_size = test_generator.get_dataset_size()
# print("Number of images in the test dataset:\t{:>6}".format(test_dataset_size))
# 2: Generate samples.
# The order of these returned items are not determined by the keys in returns in the previous cell,
# but by the order defined in DataGenerator.generate()
batch_images, batch_filenames, batch_inverse_transforms, batch_original_images, batch_original_labels = next(test_generator)
i = 5 # Which batch item to look at
print("Image:", batch_filenames[i])
print()
print("Ground truth boxes:\n")
print(np.array(batch_original_labels[i]))
# 3: Make predictions.
y_pred = G_model.predict(batch_images)
# 4: Convert the predictions for the original image.
y_pred_decoded_inv = apply_inverse_transforms(y_pred, batch_inverse_transforms)
np.set_printoptions(precision=2, suppress=True, linewidth=90)
print("Predicted boxes:\n")
print(' class conf xmin ymin xmax ymax')
print(y_pred_decoded_inv[i])
# 5: Draw the predicted boxes onto the image
# Set the colors for the bounding boxes
colors = plt.cm.hsv(np.linspace(0, 1, n_classes+1)).tolist()
plt.figure(figsize=(20,12))
plt.imshow(batch_original_images[i])
current_axis = plt.gca()
# for box in batch_original_labels[i]:
# xmin = box[1]
# ymin = box[2]
# xmax = box[3]
# ymax = box[4]
# label = '{}'.format(classes[int(box[0])])
# current_axis.add_patch(plt.Rectangle((xmin, ymin), xmax-xmin, ymax-ymin, color='green', fill=False, linewidth=2))
# current_axis.text(xmin, ymin, label, size='x-large', color='white', bbox={'facecolor':'green', 'alpha': 1.0})
good_prediction = [val for val in y_pred_decoded_inv[i] if val[1] > 0.3 ] # confidence_thresh
for box in good_prediction:
xmin = box[2]
ymin = box[3]
xmax = box[4]
ymax = box[5]
color = colors[int(box[0])]
label = '{}: {:.2f}'.format(classes[int(box[0])], box[1])
current_axis.add_patch(plt.Rectangle((xmin, ymin), xmax-xmin, ymax-ymin, color=color, fill=False, linewidth=2))
current_axis.text(xmin, ymin, label, size='x-large', color='white', bbox={'facecolor': color, 'alpha':1.0})
# for box in y_pred_decoded_inv[i]:
# xmin = box[2]
# ymin = box[3]
# xmax = box[4]
# ymax = box[5]
# color = colors[int(box[0])]
# label = '{}: {:.2f}'.format(classes[int(box[0])], box[1])
# current_axis.add_patch(plt.Rectangle((xmin, ymin), xmax-xmin, ymax-ymin, color=color, fill=False, linewidth=2))
# current_axis.text(xmin, ymin, label, size='x-large', color='white', bbox={'facecolor':color, 'alpha':1.0})
# -
y_pred_decoded_inv[i][-1][1]
| src/.ipynb_checkpoints/ssd512_siamese_evaluation-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# Cleaning resampling ISH temperature datasets
# +
# boilerplate includes
import sys
import os
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
#from mpl_toolkits.mplot3d import Axes3D
import pandas as pd
import seaborn as sns
import datetime
import scipy.interpolate
# import re
from IPython.display import display, HTML
# %matplotlib notebook
plt.style.use('seaborn-notebook')
pd.set_option('display.max_columns', None)
# -
# ## Constants / Parameters
# +
# PARAMETERS (might be overridden by a calling script)
# if not calling from another script (batch), SUBNOTEBOOK_FLAG might not be defined
try:
SUBNOTEBOOK_FLAG
except NameError:
SUBNOTEBOOK_FLAG = False
# Not calling as a sub-script? define params here
if not SUBNOTEBOOK_FLAG:
# SET PARAMETER VARIABLES HERE UNLESS CALLING USING %run FROM ANOTHER NOTEBOOK
DATADIR = '../data/temperatures/ISD'
OUTDIR = '../data/temperatures'
FTPHOST = 'ftp.ncdc.noaa.gov'
FETCH_STATIONS_LIST_FILE = True
TEMP_COL = 'AT' # The label of the hourly temperature column we make/output
# Resampling and interpolation parameters
# spline order used for converting to on-the-hour and filling small gaps
BASE_INTERPOLATION_K = 1 # 1 for linear interpolation
# give special treatment to data gaps longer than...
POTENTIALLY_PROBLEMATIC_GAP_SIZE = pd.Timedelta('03:00:00')
# Time range to use for computing normals (30 year, just like NOAA uses)
NORM_IN_START_DATE = '1986-07-01'
NORM_IN_END_DATE = '2016-07-01'
# Time range or normals to output to use when running 'medfoes on normal temperature' (2 years, avoiding leapyears)
NORM_OUT_START_DATE = '2014-01-01'
NORM_OUT_END_DATE = '2015-12-31 23:59:59'
print("Cleaning temperature data for ",STATION_CALLSIGN)
# -
# Potentially turn interactive figure display off
if SUPPRESS_FIGURE_DISPLAY:
plt.ioff()
# # Interpolation and cleanup
# Load the data
fn = "{}_AT.h5".format(STATION_CALLSIGN)
ot = pd.read_hdf(os.path.join(DATADIR,fn), 'table')
# ### Deduplication
# More precisely, we can only have one value for each time,
# otherwise interpolation doesn't make much sense (or work)
t = ot.copy(deep=True) # not needed, just safety
# just showing the duplicates
tmp = t[t.index.duplicated(keep=False)].sort_index()
print(len(tmp), 'duplicates')
#display(tmp) # decomment to see the list of duplicates
# actually remove duplicates, just keeping the first
# @TCC could somehow try to identify the most reliable or take mean or such
t = t[~t.index.duplicated(keep='first')].sort_index()
# ## Outlier removal
# Using a deviation from running median/sigam threshold method
# +
# fairly permissive settings
rolling_sigma_window = 24*5 # None or 0 to just use median instead of median/sigma
rolling_median_window = 5
thresh = 1.5 # deviation from media/sigma to trigger removal
multipass = True # cycle until no points removed, or False for not
tin = t
cum_num = 0
while multipass:
if rolling_sigma_window:
sigma = t['AT'].rolling(window=rolling_sigma_window, center=True).std()
else:
sigma = 1
diff = (t['AT']-t['AT'].rolling(window=rolling_median_window, center=True).median())/sigma
outlier_mask = diff.abs() > thresh
num = np.count_nonzero(outlier_mask)
cum_num += num
print("removing {} points".format(num))
if num == 0:
break
# plotting each step
# ax = t.plot(linestyle='-', marker='*')
# if np.count_nonzero(outlier_mask) > 0:
# t[outlier_mask].plot(ax=ax, linestyle='none', marker='o', color='red')
# diff.abs().plot(ax=ax)
# if np.count_nonzero(outlier_mask) > 0:
# diff.abs()[outlier_mask].plot(ax=ax, linestyle='none', marker='o', color='yellow')
t = t[~outlier_mask]
# -
# plot showing what is being removed
if cum_num > 0:
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax = tin[~tin.index.isin(t.index)].plot(ax=ax, linestyle='none', marker='o', color='r', zorder=8)
ax = tin.plot(ax=ax, linestyle='-', linewidth=1, marker=None, color='red')
ax = t.plot(ax=ax, linestyle='-', marker='.', color='blue')
ax.set_ylabel('air temperature [$\degree$ C]')
ax.legend(['outlier', 'original', 'cleaned'])
ax.set_title(STATION_CALLSIGN)
# saving figure
# saving
fn = '{}_outlier.png'.format(STATION_CALLSIGN)
fig.savefig(os.path.join(OUTDIR,fn))
#mpld3.save_html(fig, '{}_outler.html'.format(STATION_CALLSIGN))
# Actually apply the outlier removal
ot = t
# ## "by-hand" fixes for particular datasets, hopefully minimal
def remove_spurious_temps(ot, query_op, date1, date2=None, plot=True, inplace=False):
if date2 is None:
date2 = date1
ax = ot.loc[date1:date2].plot(ax=None, linestyle='-', marker='o') # plot
out_t = ot.drop(ot.loc[date1:date2].query('AT {}'.format(query_op)).index, inplace=inplace)
if inplace:
out_t = ot
out_t.loc[date1:date2].plot(ax=ax, linestyle='-', marker='*') # plot'
ax.set_title("Remove AT {}, range=[{}:{}]".format(query_op, date1, date2))
return out_t
STATION_CALLSIGN
# +
if STATION_CALLSIGN == 'KSNA': # KSNA (Orange County)
# 2016-08-14 to 2016-08-15 overnight has some >0 values when they should be more like 19-20
remove_spurious_temps(ot, '< 0', '2016-08-14', '2016-08-15', inplace=True)
if STATION_CALLSIGN == 'KSFO':
remove_spurious_temps(ot, '< 0', '1976-07-16', '1976-07-17', inplace=True)
if STATION_CALLSIGN == 'KRIV':
remove_spurious_temps(ot, '< 0', '1995-11-15', '1995-11-15', inplace=True)
# -
# ### Identify bigger gaps which will get filled day-over-day interpolation
# Interpolate based on same hour-of-day across days.
# +
# flag the gaps in the original data that are possibly too long for the simple interpolation we did above
gaps_filename = os.path.join(OUTDIR, "{}_AT_gaps.tsv".format(STATION_CALLSIGN))
gaps = ot.index.to_series().diff()[1:]
idx = np.flatnonzero(gaps > POTENTIALLY_PROBLEMATIC_GAP_SIZE)
prob_gaps = gaps[idx]
# save to file for future reference
with open(gaps_filename,'w') as fh:
# output the gaps, biggest to smallest, to review
print('#', STATION_CALLSIGN, ot.index[0].isoformat(), ot.index[-1].isoformat(), sep='\t', file=fh)
print('# Potentially problematic gaps:', len(prob_gaps), file=fh)
tmp = prob_gaps.sort_values(ascending=False)
for i in range(len(tmp)):
rng = [tmp.index[i]-tmp.iloc[i], tmp.index[i]]
print(rng[0], rng[1], rng[1]-rng[0], sep='\t', file=fh)
if not SUPPRESS_FIGURE_DISPLAY:
# go ahead and just print it here too
with open(gaps_filename) as fh:
for l in fh:
print(l, end='')
else:
print('# Potentially problematic gaps:', len(prob_gaps))
# -
# ### Interpolate to produce on-the-hour values
# Simple interpolation hour-to-hour
# +
# Interpolate to get on-the-hour values
newidx = pd.date_range(start=ot.index[0].round('d')+pd.Timedelta('0h'),
end=ot.index[-1].round('d')-pd.Timedelta('1s'),
freq='1h', tz='UTC')
if True:
# Simple linear interpolation
at_interp_func = scipy.interpolate.interp1d(ot.index.astype('int64').values,
ot['AT'].values,
kind='linear',
fill_value=np.nan, #(0,1)
bounds_error=False)
else:
# Should be better method, but has some screwy thing using updated data
at_interp_func = scipy.interpolate.InterpolatedUnivariateSpline(
ot.index.astype('int64').values,
ot['AT'].values,
k=BASE_INTERPOLATION_K,
ext='const')
nt = pd.DataFrame({'AT':at_interp_func(newidx.astype('int64').values)},
index=newidx)
# -
# ### Fill the bigger gaps
# +
# Fill those gaps using day-to-day (at same hour) interpolation
gap_pad = pd.Timedelta('-10m') # contract the gaps a bit so we don't remove good/decent edge values
t = nt.copy(deep=True) # operate on a copy so we can compare with nt
# fill the gap ranges with nan (replacing the default interpolation)
for i in range(len(prob_gaps)):
rng = [prob_gaps.index[i]-prob_gaps.iloc[i], prob_gaps.index[i]]
t[rng[0]-gap_pad:rng[1]+gap_pad] = np.nan
# reshape so each row is a whole day's (24) data points
rows = int(t.shape[0]/24)
foo = pd.DataFrame(t.iloc[:rows*24].values.reshape((rows,24)))
# simple linear interpolation
foo.interpolate(metnod='time', limit=24*60, limit_direction='both', inplace=True)
# # Alternative interpolation using running means
# # @TCC not great for very large gaps
# RUNNING_MEAN_WINDOW_SIZE = 3
# while True:
# # interpolate each column (temp at hour x on each day)
# # filling nans with values from a windowed running mean
# foo.fillna(foo.rolling(window=RUNNING_MEAN_WINDOW_SIZE, min_periods=1, center=True).mean(), inplace=True)
# if not foo.isnull().values.any():
# break
# reshape back
t = pd.DataFrame({'AT':foo.stack(dropna=False).values}, index=t.index[:rows*24])
# -
# # Check that it looks OK...
# ### Plot the temperature data
# +
# You can specify a specific range by setting r1 and r2, or None for full range
#r1, r2 = '1952-05-07', '1952-05-23'
r1, r2 = None, None
if r1 is None:
r1 = t.index[0]
if r2 is None:
r2 = t.index[-1]
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(ot.loc[r1:r2].index, ot.loc[r1:r2]['AT'], linestyle='none', marker='.', label='raw')
#ax.scatter(ot.loc[r1:r2].index, ot.loc[r1:r2]['AT'], marker='.', label='raw')
ax.plot(nt.loc[r1:r2].index, nt.loc[r1:r2]['AT'], linestyle='-', marker=None, lw=1, label='interpolated')
# ax.plot(t.loc[r1:r2].index, t.loc[r1:r2]['AT'], '-*', lw=1, label='filled')
# @TCC maybe make a single dataframe with the parts I don't want deleted or masked out
for i in range(len(prob_gaps)):
if i == 0: # only label first segment
label = 'filled'
else:
label = ''
rng = [tmp.index[i]-tmp.iloc[i], tmp.index[i]]
ax.plot(t.loc[rng[0]:rng[1]].index, t.loc[rng[0]:rng[1]]['AT'], '.-', lw=1, color='r', label=label)
# # mark the big gaps with vertical lines
# for i in range(len(prob_gaps)):
# ax.axvline(prob_gaps.index[i]-prob_gaps.iloc[i],
# c='k', ls=':', lw=0.5)
# ax.axvline(prob_gaps.index[i],
# c='k', ls=':', lw=0.5)
ax.set_xlim((r1,r2))
ax.set_xlabel('DateTime')
ax.set_ylabel('Temperature [$\degree$C]')
ax.set_title(STATION_CALLSIGN)
ax.legend()
# -
# saving
fig.savefig(os.path.join(OUTDIR, '{}_cleaning.png'.format(STATION_CALLSIGN)))
#mpld3.save_html(fig, '{}_cleaning.html'.format(STATION_CALLSIGN))
# ### Save final cleaned temperatures
outfn = os.path.join(OUTDIR, "{}_AT_cleaned".format(STATION_CALLSIGN))
print("Saving cleaned temp data to:", outfn)
t.to_hdf(outfn+'.h5', 'table', mode='w',
data_colums=True, complevel=5, complib='bzip2',
dropna=False)
# # Compute the normals
# Need the normal (repated so it covers 2 years) for running medfoes on the normals
#
# Not needed for this particular study
# +
# # Time range to use for computing normals (30 year, just like NOAA uses)
# NORM_IN_START_DATE = '1986-07-01'
# NORM_IN_END_DATE = '2016-07-01'
# # Time range or normals to output to use when running 'medfoes on normal temperature' (2 years, avoiding leapyears)
# NORM_OUT_START_DATE = '2014-01-01'
# NORM_OUT_END_DATE = '2015-12-31 23:59:59'
# # %run "Temperature functions.ipynb" # for compute_year_over_year_norm function
# tempnorm = compute_year_over_year_norm(ot,
# NORM_OUT_START_DATE, NORM_OUT_END_DATE,
# NORM_IN_START_DATE, NORM_IN_END_DATE,
# freq='hourly',
# interp_method='linear',
# norm_method='mean')
# # Save as csv for medfoes input
# outfn = os.path.join(OUTDIR, "{}_AT_cleaned_normalsX2.csv".format(STATION_CALLSIGN))
# print("Saving temp normals data to:",outfn)
# tempnorm.to_csv(outfn, index_label='datetime')
# tempnorm.plot()
# -
# Turn iteractive display back on, if we turned it off
if SUPPRESS_FIGURE_DISPLAY:
plt.ioff()
| code/Cleaning temperatures.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # A Brief Review of Radiation
#
# This notebook is part of [The Climate Laboratory](https://brian-rose.github.io/ClimateLaboratoryBook) by [<NAME>](http://www.atmos.albany.edu/facstaff/brose/index.html), University at Albany.
# + [markdown] slideshow={"slide_type": "slide"}
# ____________
# <a id='section1'></a>
#
# ## 1. Emission temperature and lapse rates
# ____________
# + [markdown] slideshow={"slide_type": "slide"}
# Planetary energy balance is the foundation for all climate modeling. So far we have expressed this through a globally averaged budget
#
# $$C \frac{d T_s}{dt} = (1-\alpha) Q - OLR$$
#
# and we have written the OLR in terms of an emission temperature $T_e$ where by definition
#
# $$ OLR = \sigma T_e^4 $$
# + [markdown] slideshow={"slide_type": "slide"}
# Using values from the observed planetary energy budget, we found that $T_e = 255$ K
#
# The emission temperature of the planet is thus about 33 K colder than the mean surface temperature (288 K).
# + [markdown] slideshow={"slide_type": "slide"}
# ### Where in the atmosphere do we find $T = T_e = 255$ K?
#
# That's about -18ºC.
#
# Let's plot **global, annual average observed air temperature** from NCEP reanalysis data.
# + slideshow={"slide_type": "slide"}
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import xarray as xr
# + slideshow={"slide_type": "slide"}
## The NOAA ESRL server is shutdown! January 2019
ncep_url = "http://www.esrl.noaa.gov/psd/thredds/dodsC/Datasets/ncep.reanalysis.derived/"
ncep_air = xr.open_dataset( ncep_url + "pressure/air.mon.1981-2010.ltm.nc",
use_cftime=True)
#url = 'http://apdrc.soest.hawaii.edu:80/dods/public_data/Reanalysis_Data/NCEP/NCEP/clima/pressure/air'
#air = xr.open_dataset(url)
## The name of the vertical axis is different than the NOAA ESRL version..
#ncep_air = air.rename({'lev': 'level'})
print( ncep_air)
# + slideshow={"slide_type": "slide"}
# Take global, annual average and convert to Kelvin
coslat = np.cos(np.deg2rad(ncep_air.lat))
weight = coslat / coslat.mean(dim='lat')
Tglobal = (ncep_air.air * weight).mean(dim=('lat','lon','time'))
Tglobal
# + slideshow={"slide_type": "slide"}
# a "quick and dirty" visualization of the data
Tglobal.plot()
# + [markdown] slideshow={"slide_type": "slide"}
# Let's make a better plot.
#
# Here we're going to use a package called `metpy` to automate plotting this temperature profile in a way that's more familiar to meteorologists: a so-called *skew-T* plot.
# + slideshow={"slide_type": "slide"}
from metpy.plots import SkewT
# + slideshow={"slide_type": "slide"}
fig = plt.figure(figsize=(9, 9))
skew = SkewT(fig, rotation=30)
skew.plot(Tglobal.level, Tglobal, color='black', linestyle='-', linewidth=2, label='Observations')
skew.ax.set_ylim(1050, 10)
skew.ax.set_xlim(-75, 45)
# Add the relevant special lines
skew.plot_dry_adiabats(linewidth=0.5)
skew.plot_moist_adiabats(linewidth=0.5)
#skew.plot_mixing_lines()
skew.ax.legend()
skew.ax.set_title('Global, annual mean sounding from NCEP Reanalysis',
fontsize = 16)
# + [markdown] slideshow={"slide_type": "slide"}
# Note that surface temperature in global mean is indeed about 288 K or 15ºC as we keep saying.
#
# So where do we find temperature $T_e=255$ K or -18ºC?
#
# Actually in mid-troposphere, near 500 hPa or about 5 km height.
#
# We can infer that much of the outgoing longwave radiation actually originates far above the surface.
# + [markdown] slideshow={"slide_type": "slide"}
# Recall that our observed global energy budget diagram shows 217 out of 239 W m$^{-2}$ total OLR emitted by the atmosphere and clouds, only 22 W m$^{-2}$ directly from the surface.
#
# This is due to the **greenhouse effect**.
# + [markdown] slideshow={"slide_type": "slide"}
# So far we have dealt with the greenhouse in a very artificial way in our energy balance model by simply assuming
#
# $$ \text{OLR} = \tau \sigma T_s^4 $$
#
# i.e., the OLR is reduced by a constant factor from the value it would have if the Earth emitted as a blackbody at the surface temperature.
#
# Now it's time to start thinking a bit more about how the radiative transfer process actually occurs in the atmosphere, and how to model it.
# + [markdown] slideshow={"slide_type": "slide"}
# ____________
# <a id='section2'></a>
#
# ## 2. Solar Radiation
# ____________
#
# Let's plot a spectrum of solar radiation.
#
# For Python details, click to expand the code blocks (or see the code in the notebook).
# + slideshow={"slide_type": "skip"} tags=["hide_input"]
# Using pre-defined code for the Planck function from the climlab package
from climlab.utils.thermo import Planck_wavelength
# + slideshow={"slide_type": "skip"} tags=["hide_input"]
# approximate emission temperature of the sun in Kelvin
Tsun = 5780.
# boundaries of visible region in nanometers
UVbound = 390.
IRbound = 700.
# array of wavelengths
wavelength_nm = np.linspace(10., 3500., 400)
to_meters = 1E-9 # conversion factor
# + slideshow={"slide_type": "skip"} tags=["hide_input"]
label_size = 16
fig, ax = plt.subplots(figsize=(14,7))
ax.plot(wavelength_nm,
Planck_wavelength(wavelength_nm * to_meters, Tsun))
ax.grid()
ax.set_xlabel('Wavelength (nm)', fontsize=label_size)
ax.set_ylabel('Spectral radiance (W sr$^{-1}$ m$^{-3}$)', fontsize=label_size)
# Mask out points outside of this range
wavelength_vis = np.ma.masked_outside(wavelength_nm, UVbound, IRbound)
# Shade the visible region
ax.fill_between(wavelength_vis, Planck_wavelength(wavelength_vis * to_meters, Tsun))
title = 'Blackbody emission curve for the sun (T = {:.0f} K)'.format(Tsun)
ax.set_title(title, fontsize=label_size);
ax.text(280, 0.8E13, 'Ultraviolet', rotation='vertical', fontsize=12)
ax.text(500, 0.8E13, 'Visible', rotation='vertical', fontsize=16, color='w')
ax.text(800, 0.8E13, 'Infrared', rotation='vertical', fontsize=12);
# + [markdown] slideshow={"slide_type": "slide"}
# - Spectrum peaks in the visible range
# - most energy at these wavelength.
# - No coincidence that our eyes are sensitive to this range of wavelengths!
# - Longer wavelengths called “infrared”, shorter wavelengths called “ultraviolet”.
#
# The shape of the spectrum is a fundamental characteristic of radiative emissions
# (think about the color of burning coals in a fire – cooler = red, hotter = white)
# + [markdown] slideshow={"slide_type": "slide"}
# Theory and experiments tell us that both the total flux of emitted radiation, and the wavelength of maximum emission, depend only on the temperature of the source!
#
# The theoretical spectrum was worked out by Max Planck and is therefore known as the “Planck” spectrum (or simply blackbody spectrum).
# + tags=["hide_input"]
fig, ax = plt.subplots(figsize=(14,7))
wavelength_um = wavelength_nm / 1000
for T in [24000,12000,6000,3000]:
ax.plot(wavelength_um,
(Planck_wavelength(wavelength_nm * to_meters, T) / T**4),
label=str(T) + ' K')
ax.legend(fontsize=label_size)
ax.set_xlabel('Wavelength (um)', fontsize=label_size)
ax.set_ylabel('Normalized spectral radiance (W sr$^{-1}$ m$^{-3}$ K$^{-4}$)', fontsize=label_size)
ax.set_title("Normalized blackbody emission spectra $T^{-4} B_{\lambda}$ for different temperatures");
# + [markdown] slideshow={"slide_type": "slide"}
# Going from cool to warm:
#
# - total emission increases
# - maximum emission occurs at shorter wavelengths.
#
# The **integral of these curves over all wavelengths** gives us our familiar $\sigma T^4$
# + [markdown] slideshow={"slide_type": "slide"}
# Mathematically it turns out that
#
# $$ λ_{max} T = \text{constant} $$
#
# (known as Wien’s displacement law).
# + [markdown] slideshow={"slide_type": "slide"}
# By fitting the observed solar emission to a blackbody curve, we can deduce that the emission temperature of the sun is about 6000 K.
#
# Knowing this, and knowing that the solar spectrum peaks at 0.6 micrometers, we can calculate the wavelength of maximum terrestrial radiation as
#
# $$ λ_{max}^{Earth} = 0.6 ~ \mu m \frac{6000}{255} = 14 ~ \mu m $$
#
# This is in the far-infrared part of the spectrum.
# + [markdown] slideshow={"slide_type": "slide"}
# ____________
# <a id='section3'></a>
#
# ## 3. Terrestrial Radiation and absorption spectra
# ____________
# -
# ## Terrestrial versus solar wavelengths
#
# Now let's look at normalized blackbody curves for Sun and Earth:
# + tags=["hide_input"]
fig, ax = plt.subplots(figsize=(14,7))
wavelength_um = np.linspace(0.1, 200, 10000)
wavelength_meters = wavelength_um / 1E6
for T in [6000, 255]:
ax.semilogx(wavelength_um,
(Planck_wavelength(wavelength_meters, T) / T**4 * wavelength_meters),
label=str(T) + ' K')
ax.legend(fontsize=label_size)
ax.set_xlabel('Wavelength (um)', fontsize=label_size)
ax.set_ylabel('Normalized spectral radiance (W sr$^{-1}$ m$^{-2}$ K$^{-4}$)', fontsize=label_size)
ax.set_title("Normalized blackbody emission spectra $T^{-4} \lambda B_{\lambda}$ for the sun ($T_e = 6000$ K) and Earth ($T_e = 255$ K)",
fontsize=label_size);
# + [markdown] slideshow={"slide_type": "slide"}
# There is essentially no overlap between the two spectra.
#
# **This is the fundamental reason we can discuss the solar “shortwave” and terrestrial “longwave” radiation as two distinct phenomena.**
#
# In reality all radiation exists on a continuum of different wavelengths. But in climate science we can get a long way by thinking in terms of a very simple “two-stream” approximation (short and longwave). We’ve already been doing this throughout the course so far!
# -
# ### Atmospheric absorption spectra
#
# Now look at the atmospheric **absorption spectra**.
#
# (fraction of radiation at each wavelength that is absorbed on a single vertical path through the atmosphere)
# <img src='../images/MarshallPlumbFig2.5.png'>
# + [markdown] slideshow={"slide_type": "skip"}
# *Figure reproduced from Marshall and Plumb (2008): Atmosphere, Ocean, and Climate Dynamics*
# + [markdown] slideshow={"slide_type": "slide"}
# - Atmosphere is almost completely transparent in the visible range, right at the peak of the solar spectrum
# - Atmosphere is very opaque in the UV
# - Opacity across the IR spectrum is highly variable!
# - Look at the gases associated with various absorption features:
# - Main players include H$_2$O, CO$_2$, N$_2$O, O$_2$.
# - Compare to major constituents of atmosphere, in decreasing order:
# - 78% N$_2$
# - 21% O$_2$
# - 1% Ar
# - H$_2$O (variable)
# + [markdown] slideshow={"slide_type": "slide"}
# - The dominant constituent gases N$_2$ and O$_2$ are nearly completely transparent across the entire spectrum (there are O$_2$ absorption features in far UV, but little energy at these wavelengths).
# - The greenhouse effect mostly involves trace constituents:
# - O$_3$ = 500 ppb
# - N$_2$O = 310 ppb
# - CO$_2$ = 400 ppm (but rapidly increasing!)
# - CH$_4$ = 1.7 ppm
# - Note that most of these are tri-atomic molecules! There are fundamental reasons for this: these molecules have modes of rotational and vibration that are easily excited at IR wavelengths. See courses in radiative transfer!
#
# + [markdown] slideshow={"slide_type": "skip"}
# ____________
#
# ## Credits
#
# This notebook is part of [The Climate Laboratory](https://brian-rose.github.io/ClimateLaboratoryBook), an open-source textbook developed and maintained by [<NAME>](http://www.atmos.albany.edu/facstaff/brose/index.html), University at Albany.
#
# It is licensed for free and open consumption under the
# [Creative Commons Attribution 4.0 International (CC BY 4.0)](https://creativecommons.org/licenses/by/4.0/) license.
#
# Development of these notes and the [climlab software](https://github.com/brian-rose/climlab) is partially supported by the National Science Foundation under award AGS-1455071 to Brian Rose. Any opinions, findings, conclusions or recommendations expressed here are mine and do not necessarily reflect the views of the National Science Foundation.
# ____________
# -
| content/courseware/radiation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#
# # Deploy and serving Deep Learning model with TensorFlow Serving
# ## Tensorflow Extended and Tensorflow Serving
# TensorFlow Extended (TFX) is an end-to-end platform for deploying production ML pipelines.
#
#
# 
#
# **How it works**
#
# > When you’re ready to move your models from research to production, use TFX to create and manage a production pipeline
# >> https://www.tensorflow.org/tfx
#
# When you’re ready to go beyond training a single model, or ready to put your amazing model to work and move it to production, TFX is there to help you build a complete ML pipeline.
#
# A TFX pipeline is a sequence of components that implement an ML pipeline which is specifically designed for scalable, high-performance machine learning tasks. That includes modeling, training, serving inference, and managing deployments to online, native mobile, and JavaScript targets.
#
#
# There are many great components in the pipeline. But for this tutorial I will focus on **Tensorflow Serving**. One of the most important and interesting component of TFX.
#
# ### So, what is **Tensorflow Serving**?
#
#
# Machine Learning (ML) serving systems need to support **model versioning** (for model updates with a rollback option) and **multiple models** (for experimentation via A/B testing), while ensuring that concurrent models achieve high throughput on hardware accelerators (GPUs and TPUs) with low latency. TensorFlow Serving has proven performance handling tens of millions of inferences per second at Google.
#
# 
#
# #### Architecture
#
# TensorFlow Serving is a flexible, high-performance serving system for machine learning models, designed for production environments. TensorFlow Serving makes it easy to deploy new algorithms and experiments, while keeping the same server architecture and APIs. TensorFlow Serving provides out of the box integration with TensorFlow models, but can be easily extended to serve other types of models.
#
# #### Key concept:
#
# * Servables: Servables are the central abstraction in TensorFlow Serving. Servables are the underlying objects that clients use to perform computation (for example, a lookup or inference).
#
#
# * Loader: Loaders manage a servable's life cycle. The Loader API enables common infrastructure independent from specific learning algorithms, data or product use-cases involved. Specifically, Loaders standardize the APIs for loading and unloading a servable.
#
#
# * Source: Sources are plugin modules that find and provide servables. Each Source provides zero or more servable streams. For each servable stream, a Source supplies one Loader instance for each version it makes available to be loaded. (A Source is actually chained together with zero or more SourceAdapters, and the last item in the chain emits the Loaders.
#
#
#
# * Manager: Managers listen to Sources and track all versions. The Manager tries to fulfill Sources' requests, but may refuse to load an aspired version if, say, required resources aren't available.
#
# #### Tensorflow Serving vs Python Flask vs Django vs other
#
# What makes Tensorflow Serving is huge more advance than other web application framework like Python Flask or Django?
#
# When deploying a machine learning model to production, we go through these steps:
#
# 1. Build web application (Flask, Django, ..)
#
# 2. Create API endpoint to handle the request and communicate with backend.
#
# 3. Load pretrain model
#
# 4. Pre-processing, predict
#
# 5. Return results to client
#
# Example of a Python Flask ***app.py***:
#
#
#
#
#
#
#
# ## Example of Python Flask **app.py**, handle request and parse result from Text classification model
# +
import os
import random
import string
from flask import Flask, request, render_template
import torch
import torch.nn.functional as F
import csv
import pandas as pd
from nltk.tokenize import sent_tokenize, word_tokenize
import numpy as np
app = Flask(__name__)
APP_ROOT = os.path.dirname(os.path.abspath(__file__))
IMAGES_FOLDER = "flask_images"
rand_str = lambda n: "".join([random.choice(string.ascii_letters + string.digits) for _ in range(n)])
model = None
word2vec = None
max_length_sentences = 0
max_length_word = 0
num_classes = 0
categories = None
@app.route("/")
def home():
return render_template("main.html")
@app.route("/input")
def new_input():
return render_template("input.html")
@app.route("/show", methods=["POST"])
def show():
global model, dictionary, max_length_word, max_length_sentences, num_classes, categories
trained_model = request.files["model"]
if torch.cuda.is_available():
model = torch.load(trained_model)
else:
model = torch.load(trained_model, map_location=lambda storage, loc: storage)
dictionary = pd.read_csv(filepath_or_buffer=request.files["word2vec"], header=None, sep=" ", quoting=csv.QUOTE_NONE,
usecols=[0]).values
dictionary = [word[0] for word in dictionary]
max_length_sentences = model.max_sent_length
max_length_word = model.max_word_length
num_classes = list(model.modules())[-1].out_features
if "classes" in request.files:
df = pd.read_csv(request.files["classes"], header=None)
categories = [item[0] for item in df.values]
return render_template("input.html")
@app.route("/result", methods=["POST"])
def result():
global dictionary, model, max_length_sentences, max_length_word, categories
text = request.form["message"]
document_encode = [
[dictionary.index(word) if word in dictionary else -1 for word in word_tokenize(text=sentences)] for sentences
in sent_tokenize(text=text)]
for sentences in document_encode:
if len(sentences) < max_length_word:
extended_words = [-1 for _ in range(max_length_word - len(sentences))]
sentences.extend(extended_words)
if len(document_encode) < max_length_sentences:
extended_sentences = [[-1 for _ in range(max_length_word)] for _ in
range(max_length_sentences - len(document_encode))]
document_encode.extend(extended_sentences)
document_encode = [sentences[:max_length_word] for sentences in document_encode][
:max_length_sentences]
document_encode = np.stack(arrays=document_encode, axis=0)
document_encode += 1
empty_array = np.zeros_like(document_encode, dtype=np.int64)
input_array = np.stack([document_encode, empty_array], axis=0)
feature = torch.from_numpy(input_array)
if torch.cuda.is_available():
feature = feature.cuda()
model.eval()
with torch.no_grad():
model._init_hidden_state(2)
prediction = model(feature)
prediction = F.softmax(prediction)
max_prob, max_prob_index = torch.max(prediction, dim=-1)
prob = "{:.2f} %".format(float(max_prob[0])*100)
if categories != None:
category = categories[int(max_prob_index[0])]
else:
category = int(max_prob_index[0]) + 1
return render_template("result.html", text=text, value=prob, index=category)
if __name__ == "__main__":
app.secret_key = os.urandom(12)
app.run(host="0.0.0.0", port=4555, debug=True)
# -
# ## Result
#
# 
#
#
# Flask is fine only if you are planning to demo your model on local machine, but when deploy your model to production, there will be some issues:
#
# 1. Loading and Serving model are processing inside backend codebase. Everytime clients send a request to server, pretrain model is reloaded. For one single model, reload the pretrain model is acceptable but it will be impossible to load multiple complex models at the same time (eg: object detection + image alignment + object tracking)
#
# 2. Model version: There is no information about model version. Anytime you want to update your model, you need to create a new API endpoint to process or overwrite the old version.
# ## Tensorflow Serving is the way to solve the Python Flask disadvantages
# ### Example with Tensorflow Serving
# +
## Example: Simple NN with Mnist datasaet
from __future__ import print_function
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
def make_model(input_shape=[28, 28, 1]):
model = tf_models.Sequential()
model.add(layers.InputLayer(input_shape=input_shape))
for no_filter in [16, 32, 64]:
model.add(layers.Conv2D(
no_filter,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
activation='relu',
))
model.add(layers.MaxPooling2D(
pool_size=(2, 2),
strides=(2, 2),
padding='same',
))
model.add(layers.BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001))
model.add(layers.Flatten())
model.add(layers.Dense(128, activation='relu'))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(10, activation='softmax'))
return model
model = make_model()
print(model.inputs, model.outputs, model.count_params())
# [<tf.Tensor 'input_1:0' shape=(?, 28, 28, 1) dtype=float32>]
# [<tf.Tensor 'dense_1/Softmax:0' shape=(?, 10) dtype=float32>]
# 156234
# -
# ## Training, save and load model
# +
from tensorflow.keras.models import load_model
# fit model and save weight
model.fit(...)
model.save(...)
# load pretrained model
model = load_model('./temp_models/mnist_all.h5')
# -
# ## Set learning_phase = 0 to change to evaluation mode:
# +
# The export path contains the name and the version of the model
tf.keras.backend.set_learning_phase(0) # Ignore dropout at inference
export_path = './temp_models/serving/1'
# -
# ## Convert h5 to Tensorflow Serving format saved_model.pb with method .simple_saved
#
with tf.keras.backend.get_session() as sess:
tf.saved_model.simple_save(
sess,
export_path,
inputs={'input_image': model.input},
outputs={'y_pred': model.output})
# ## Export model by SaveModelBuilder method with custom MetaGraphDef. Custom tag-set or define assets (external file for serving)
#
# +
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import utils
from tensorflow.python.saved_model import tag_constants, signature_constants
from tensorflow.python.saved_model.signature_def_utils_impl import build_signature_def, predict_signature_def
from tensorflow.contrib.session_bundle import exporter
builder = saved_model_builder.SavedModelBuilder(export_dir_path)
signature = predict_signature_def(
inputs={
'input_image': model.inputs[0],
},
outputs={
'y_pred': model.outputs[0]
}
)
with K.get_session() as sess:
builder.add_meta_graph_and_variables(
sess=sess,
tags=[tag_constants.SERVING],
signature_def_map={'reid-predict': signature},
# or
# signature_def_map={signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature},
)
builder.save()
# -
# ## Save model with ***checkpoint*** format .cpkt:
#
# +
import os
import tensorflow as tf
trained_checkpoint_prefix = './temp_models/model.ckpt-00001'
export_dir = os.path.join('./temp_models/serving', '1')
graph = tf.Graph()
with tf.compat.v1.Session(graph=graph) as sess:
# Restore from checkpoint
loader = tf.compat.v1.train.import_meta_graph(trained_checkpoint_prefix + '.meta')
loader.restore(sess, trained_checkpoint_prefix)
# Export checkpoint to SavedModel
builder = tf.compat.v1.saved_model.builder.SavedModelBuilder(export_dir)
builder.add_meta_graph_and_variables(sess,
[tf.saved_model.TRAINING, tf.saved_model.SERVING],
strip_default_attrs=True)
builder.save()
# -
# * File **saved_model.pb** and **variables** folder will be created:
# - saved_model.pb: serialized model, stored graph info of the model and other metadata such as: signature, model inputs/outputs.
# - variables: store serialized variables of the graph (learned weight)
#
#
# * **Tensorflow Serving manage the model version control by folder name. I.e: version 1 is folder 1**
# >temp_models/serving/1
#
# >├── saved_model.pb
#
# >└── variables
#
# >├── variables.data-00000-of-00001
#
# >└── variables.index
#
# * Use <code>save_model_cli</code> to show <code> saved_model.pb </code> metadata
# ```bash saved_model_cli show --dir temp_models/serving/1 --tag_set serve --signature_def serving_default```
#
# * **Result**
#
# +
MetaGraphDef with tag-set: 'serve' contains the following SignatureDefs:
signature_def['serving_default']:
The given SavedModel SignatureDef contains the following input(s):
inputs['input_image'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 28, 28, 1)
name: input_1:0
The given SavedModel SignatureDef contains the following output(s):
outputs['pred'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 10)
name: dense_1/Softmax:0
Method name is: tensorflow/serving/predict
# -
#
# * **Test output with 1 sample**
# ``` saved_model_cli run --dir temp_models/serving/1/ --tag_set serve --signature_def serving_default --input_exprs "input_image=np.zeros((1, 28, 28, 1))"```
#
# * **Output**
#
#
# +
#output
Result for output key y_pred:
[[1.5933816e-01 1.6137624e-01 4.8642698e-05 8.6862819e-05 2.8394745e-05
1.3426773e-03 2.7080998e-03 6.2681846e-03 1.3640945e-02 6.5516180e-01]
# -
# ## gRPC (Google Remote Procedures Calls) vs RESTful (Representational State Transfer)
# * Tensorflow Serving support both **gRPC** and **http**. To make the request to tensorflow server via gRPC, we need to install <code>tensorflow_model_server</code> and lib <code>tensorflow-serving-api</code>
#
# ```bash
#
# # echo "deb [arch=amd64] http://storage.googleapis.com/tensorflow-serving-apt stable tensorflow-model-server tensorflow-model-server-universal" | sudo tee /etc/apt/sources.list.d/tensorflow-serving.list && \
# # curl https://storage.googleapis.com/tensorflow-serving-apt/tensorflow-serving.release.pub.gpg | sudo apt-key add -
#
# # step 2
# apt-get update && apt-get install tensorflow-model-server
# # or apt-get upgrade tensorflow-model-server
#
# # step 3
# pip install tensorflow-serving-api
# ```
#
# * **Run the server:**
# ```bash
# tensorflow_model_server --port=8500 --rest_api_port=8501 --model_name=mnist-serving --model_base_path=/home/thuc/project/tensorflow/temp_models/serving
# ```
#
#
# * **Save_model folder structure. I.e: home/thuc/project/tensorflow/temp_models/serving** with 2 diffirent version
#
# <code>
# temp_models/serving
# ├── 1
# │ ├── saved_model.pb
# │ └── variables
# │ ├── variables.data-00000-of-00001
# │ └── variables.index
# └── 2
# ├── saved_model.pb
# └── variables
# ├── variables.data-00000-of-00001
# └── variables.index
# ...
# 4 directories, 6 files
#
# </code>
#
# ### Restful API example, default port=8500
# +
#Code to request Restful API
from sklearn.metrics import accuracy_score, f1_score
print(x_test.shape)
# (10000, 28, 28, 1)
def rest_infer(imgs,
model_name='mnist-serving',
host='localhost',
port=8501,
signature_name="serving_default"):
"""MNIST - serving with http - RESTful API
"""
if imgs.ndim == 3:
imgs = np.expand_dims(imgs, axis=0)
data = json.dumps({
"signature_name": signature_name,
"instances": imgs.tolist()
})
headers = {"content-type": "application/json"}
json_response = requests.post(
'http://{}:{}/v1/models/{}:predict'.format(host, port, model_name),
data=data,
headers=headers
)
if json_response.status_code == 200:
y_pred = json.loads(json_response.text)['predictions']
y_pred = np.argmax(y_pred, axis=-1)
return y_pred
else:
return None
y_pred = rest_infer(x_test)
print(
accuracy_score(np.argmax(y_test, axis=-1), y_pred),
f1_score(np.argmax(y_test, axis=-1), y_pred, average="macro")
)
# result
# 0.9947 0.9946439344333233
# -
# ### gRPC example, default port=8500, require: model_name, signature_name,host, port, input_name, output_name
# +
# With gRPC, default port = 8500, code require: model_name, signature_name, host, port, input_name, output_name
import numpy as np
import copy
import tensorflow as tf
import cv2
import grpc
import matplotlib.pyplot as plt
from tensorflow_serving.apis import predict_pb2, prediction_service_pb2_grpc
channel = grpc.insecure_channel("localhost:8500")
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
request = predict_pb2.PredictRequest()
# model_name
request.model_spec.name = "mnist-serving"
# signature name, default is `serving_default`
request.model_spec.signature_name = "serving_default"
def grpc_infer(imgs):
"""MNIST - serving with gRPC
"""
if imgs.ndim == 3:
imgs = np.expand_dims(imgs, axis=0)
request.inputs["input_image"].CopyFrom(
tf.contrib.util.make_tensor_proto(
imgs,
dtype=np.float32,
shape=imgs.shape
)
)
try:
result = stub.Predict(request, 10.0)
result = result.outputs["y_pred"].float_val
result = np.array(result).reshape((-1, 10))
result = np.argmax(result, axis=-1)
return result
except Exception as e:
print(e)
return None
y_pred = grpc_infer(x_test)
print(
accuracy_score(np.argmax(y_test, axis=-1), y_pred),
f1_score(np.argmax(y_test, axis=-1), y_pred, average="macro")
)
# result
# 0.9947 0.9946439344333233
# -
# ## Benchmark
# * **Benchmark inference time between gRPC and RESTful API, with 1 request**
#
# +
# http
start = time.time()
y_pred = rest_infer(x_test[0])
print("Inference time: {}".format(time.time() - start))
# >>> Inference time: 0.0028078556060791016
# gRPC
start = time.time()
y_pred = grpc_infer(x_test[0])
print("Inference time: {}".format(time.time() - start))
# >>> Inference time: 0.0012249946594238281
# -
# * **Inference time with 10000 MNIST sample**
# +
start = time.time()
y_pred = rest_infer(x_test)
print(">>> Inference time: {}".format(time.time() - start))
>>> Inference time: 6.681854248046875
start = time.time()
y_pred = grpc_infer(x_test)
print(">>> Inference time: {}".format(time.time() - start))
>>> Inference time: 0.3771860599517822
# -
# * gRPC has almost 18 times faster with 10000 Mnist sample request
# * With more complicate model or model with multiple input, output, **gRPC** perform even more faster than **http**
# ## Serve model with multiple inputs:
#
# * Example: Face verification system. We will have 2 images as inputs, the system will parse the result if 2 images are show the same person or not
# - Model: Siamese network
# - Input: 2 images as inputs
# - Output: 1 verification result
#
#
# 
# +
from tensorflow.keras.models import load_model
model = load_model('sianet.h5')
print(model.inputs, model.outputs)
# output
# <tf.Tensor 'input_6:0' shape=(?, 64, 32, 3) dtype=float32>,
# <tf.Tensor 'input_7:0' shape=(?, 64, 32, 3) dtype=float32>],
# <tf.Tensor 'dense_2/Sigmoid:0' shape=(?, 1) dtype=float32>])
# +
#Convert to .pb format
import tensorflow.keras.backend as K
def export_pb(export_dir_path, model):
builder = saved_model_builder.SavedModelBuilder(export_dir_path)
signature = predict_signature_def(
inputs={
'img1': model.inputs[0],
'img2': model.inputs[1]
},
outputs={
'predict': model.outputs[0]
}
)
with K.get_session() as sess:
builder.add_meta_graph_and_variables(
sess=sess,
tags=[tag_constants.SERVING],
signature_def_map={'signature-reid': signature}
)
builder.save()
# -
# !tensorflow_model_server --port=8500 --rest_api_port=8501 --model_name=siamese-reid --model_base_path=relative-path-to-model-version
# !curl localhost:8501/v1/models/siamese-reid
# #### Request gPRC server **2 inputs**, **1 output**
def _grpc_client_request(
img1,
img2,
host='localhost',
port=8500,
img1_name='img1',
img2_name='img2',
model_spec_name='siamese-reid',
model_sig_name='signature-reid',
timeout=10
):
host = host.replace("http://", "").replace("https://", "")
channel = grpc.insecure_channel("{}:{}".format(host, port))
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
# Create PredictRequest ProtoBuf from image data
request = predict_pb2.PredictRequest()
request.model_spec.name = model_spec_name
request.model_spec.signature_name = model_sig_name
# img1
img_arr1 = np.expand_dims(img1, axis=0)
request.inputs[img1_name].CopyFrom(
tf.contrib.util.make_tensor_proto(
img_arr1,
dtype=np.float32,
shape=[*img_arr1.shape]
)
)
# img2
img_arr2 = np.expand_dims(img2, axis=0)
request.inputs[img2_name].CopyFrom(
tf.contrib.util.make_tensor_proto(
img_arr2,
dtype=np.float32,
shape=[*img_arr2.shape]
)
)
print(img_arr1.shape, img_arr2.shape)
start = time.time()
# Call the TFServing Predict API
predict_response = stub.Predict(request, timeout=timeout)
print(">>> Inference time: {}'s".format(time.time() - start))
return predict_response
# #### Parse result
# +
img_size = (64, 32)
img1_fp = 'path-to-img1'
img2_fp = 'path-to-img2'
# preprocess images
img1 = preprocess_reid(img1_fp, img_size)
img2 = preprocess_reid(img2_fp, img_size)
# parse result
result = _grpc_client_request(img1, img2)
pred = np.array(result.outputs['predict'].float_val)
pred = (pred >= 0.5).astype(int)
print(pred)
# [1]
# -
# 
# ## Serving with complex output
#
# * Object Detection model and Image Segmentation are the model with very complex output. Usually the model output flatten, normalize array content lot of coordinator, bounding-boxes, detection-boxes, detection classes, detection score, num_detections and a lot more information. First, I will go through Object Detection with **ssd-mobilenet-v2** model:
# https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md
#
# ```
# ssd_mobilenet_v2_coco_2018_03_29/
# ├── checkpoint
# ├── frozen_inference_graph.pb
# ├── model.ckpt.data-00000-of-00001
# ├── model.ckpt.index
# ├── model.ckpt.meta
# └── saved_model
# ├── saved_model.pb
# └── variables
#
# ```
# !saved_model_cli show --dir /home/thuc/Downloads/pretrained_models/ssd_mobilenet_v2_coco_2018_03_29/saved_model/1 --all
# +
# output
MetaGraphDef with tag-set: 'serve' contains the following SignatureDefs:
signature_def['serving_default']:
The given SavedModel SignatureDef contains the following input(s):
inputs['inputs'] tensor_info:
dtype: DT_UINT8
shape: (-1, -1, -1, 3)
name: image_tensor:0
The given SavedModel SignatureDef contains the following output(s):
outputs['detection_boxes'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 100, 4)
name: detection_boxes:0
outputs['detection_classes'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 100)
name: detection_classes:0
outputs['detection_scores'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 100)
name: detection_scores:0
outputs['num_detections'] tensor_info:
dtype: DT_FLOAT
shape: (-1)
name: num_detections:0
Method name is: tensorflow/serving/predict
# -
# * Model outputs include:
# - Signature: signature_def['serving_default']
# - Model input: dtype: int8, 3 channels, undefined input dimension
# - Model output:
# - detection_boxes: shape: (-1, 100, 4)
# - detection_classes: shape: (-1, 100)
# - detection_scores: shape: (-1, 100)
# - num_detections: shape: (-1)
#start tensorflow_model_server
# !tensorflow_model_server --port=8500 --rest_api_port=8501 --model_name=ssd-mbv2-coco --model_base_path=/home/thuc/Downloads/ssd_mobilenet_v1_coco_2018_01_28/saved_model/
# #### Request gRPC
# +
test_img = "/home/thuc/Downloads/cat.jpg"
img = cv2.imread(test_img)[:, :, ::-1]
img_arr = np.expand_dims(img, axis=0)
# init channel
channel = grpc.insecure_channel("localhost:8500")
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
request = predict_pb2.PredictRequest()
request.model_spec.name = "ssd-mbv2-coco"
request.model_spec.signature_name = "serving_default"
request.inputs["inputs"].CopyFrom(
tf.contrib.util.make_tensor_proto(
img_arr,
dtype=np.uint8,
shape=img_arr.shape
)
)
result = stub.Predict(request, 10.0)
# -
# #### Parse result
# * Use function provided by TF Object Dectection API: https://github.com/tensorflow/models/tree/master/research/object_detection/utils
# * File map label by TF API: https://github.com/tensorflow/models/blob/master/research/object_detection/data/mscoco_label_map.pbtxt
# * Because the detection output of Tensorflow-serving has been flatten and normalize to [0,1] so we need to convert back to original coordinator value in order to visualize on output image
# +
import copy
from object_detection.utils import visualization_utils as vis_util
from object_detection.utils import label_map_util
boxes = result.outputs['detection_boxes'].float_val
classes = result.outputs['detection_classes'].float_val
scores = result.outputs['detection_scores'].float_val
no_dets = result.outputs['num_detections'].float_val
print(boxes)
# output
[0.05715984106063843, 0.4511566460132599, 0.9412486553192139, 0.9734638929367065, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 .........
# +
label_map = label_map_util.load_labelmap("/home/thuc/Downloads/mscoco_label_map.pbtxt")
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=90, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
img_ = copy.deepcopy(img)
image_vis = vis_util.visualize_boxes_and_labels_on_image_array(
img_,
np.reshape(boxes, [len(boxes) // 4,4]),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=2,
max_boxes_to_draw=12,
min_score_thresh=0.9,
skip_scores=False,
skip_labels=False,
skip_track_ids=False
)
plt.imshow(image_vis)
# -
# #### Result:
# 
# ## Serving multiple model
#
# * Tensorflow Serving support serving multiple model and automatic reload the newest version of each model.
# * We need to create <code> serving.config </code> with the model_path are absolute path:
model_config_list {
config {
name: 'model-1'
base_path: 'path-to-model1'
model_platform: "tensorflow",
model_version_policy {
specific {
versions: 1
}
}
}
config {
name: 'model-2'
base_path: 'path-to-model2'
model_platform: "tensorflow",
model_version_policy {
specific {
versions: 1
}
}
}
config {
name: 'model-3'
base_path: 'path-to-model3'
model_platform: "tensorflow",
model_version_policy {
specific {
versions: 1
}
}
}
}
#start tensorflow_model_server
# !tensorflow_model_server --port=8500 --rest_api_port=8501 --model_config_file=./temp_models/serving.config
# ## Improve Inference time with Batching request
# #### Tensorflow Serving support inference batching
# * **Server side**
#
# **Model without Server-side batching**
# 
# * **Model with server-side Batching**
#
# 
# #### Enable server-side batching by <code>batching_parameter.txt</code>
# +
max_batch_size { value: 32 }
batch_timeout_micros { value: 5000 }
#with max_batch_size is number of batch-size, i.e batch-size=32
#batch_timeout_micros. Maximum timeout to create the batch-size 32
# -
# Add <code>batching_parameter.txt</code> to docker container
# !tensorflow_model_server --port=8500 --rest_api_port=8501 \
# --model_name=mnist-serving \
# --model_base_path=/home/thuc/phh_workspace/temp_models/serving \
# --enable_batching=true \
# --batching_parameters_file=/home/thuc/phh_workspace/temp_models/batching_parameters.txt
# ### Tensorflow Serving with Docker/Docker compose
# * Pull docker image and test
# +
# step 1
docker pull tensorflow/serving
# step 2
docker run --rm -p 8500:8500 -p 8501:8501 --mount type=bind,source=/home/thuc/phh_workspace/temp_models/serving,target=/models/mnist-serving -e MODEL_NAME=mnist-serving -t tensorflow/serving
# with config file
docker run --rm -p 8500:8500 -p 8501:8501 --mount type=bind,source=/home/thuc/phh_workspace/temp_models/serving,target=/models/mnist-serving --mount type=bind,source=/home/thuc/phh_workspace/temp_models/serving.config,target=/models/serving.config -t tensorflow/serving --model_config_file=/models/serving.config
# step 3 - testing with curl
curl localhost:8501/v1/models/mnist-serving
# output
# return OK
{
"model_version_status": [
{
"version": "1",
"state": "AVAILABLE",
"status": {
"error_code": "OK",
"error_message": ""
}
}
]
}
# -
# #### Build a web API with Python Flask, tensorflow-serving-api, docker/docker compose and preprocessing data using gRPC
# * **Preprocessing, gRPC request**
# +
import base64
import cv2
import numpy as np
import grpc
from protos.tensorflow_serving.apis import predict_pb2
from protos.tensorflow_serving.apis import prediction_service_pb2_grpc
from protos.tensorflow.core.framework import (
tensor_pb2,
tensor_shape_pb2,
types_pb2
)
def convert_image(encoded_img, to_rgb=False):
if isinstance(encoded_img, str):
b64_decoded_image = base64.b64decode(encoded_img)
else:
b64_decoded_image = encoded_img
img_arr = np.fromstring(b64_decoded_image, np.uint8)
img = cv2.imdecode(img_arr, cv2.IMREAD_COLOR)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = np.expand_dims(img, axis=-1)
return img
def grpc_infer(img):
channel = grpc.insecure_channel("10.5.0.5:8500")
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
request = predict_pb2.PredictRequest()
request.model_spec.name = "mnist-serving"
request.model_spec.signature_name = "serving_default"
if img.ndim == 3:
img = np.expand_dims(img, axis=0)
tensor_shape = img.shape
dims = [tensor_shape_pb2.TensorShapeProto.Dim(size=dim) for dim in tensor_shape]
tensor_shape = tensor_shape_pb2.TensorShapeProto(dim=dims)
tensor = tensor_pb2.TensorProto(
dtype=types_pb2.DT_FLOAT,
tensor_shape=tensor_shape,
float_val=img.reshape(-1))
request.inputs['input_image'].CopyFrom(tensor)
try:
result = stub.Predict(request, 10.0)
result = result.outputs["y_pred"].float_val
result = np.array(result).reshape((-1, 10))
result = np.argmax(result, axis=-1)
return result
except Exception as e:
print(e)
return None
# -
# * **API endpoint, handle request, parse result**
# +
import json
from flask import Flask, request
from utils import grpc_infer, convert_image
app = Flask(__name__)
@app.route('/api/mnist', methods=['POST'])
def hello():
encoded_img = request.values['encoded_image']
img = convert_image(encoded_img)
result = grpc_infer(img)
return json.dumps(
{
"code": 200,
"result": result.tolist()
}
)
if __name__ == '__main__':
app.run(debug=True, host="10.5.0.4", port=5000)
# -
# * **Dockerfile**
# +
FROM ubuntu:16.04
RUN apt-get update
RUN apt-get install -y python3-pip python3-dev libglib2.0-0 libsm6 libxrender1 libxext6 \
&& cd /usr/local/bin \
&& ln -s /usr/bin/python3 python \
&& pip3 install --upgrade pip
RUN mkdir /code
WORKDIR /code
COPY requirements.txt /code/requirements.txt
RUN pip3 install -r requirements.txt
# -
# * **Docker compose**
# +
version: '2'
services:
web:
container_name: mnist_api
build: .
restart: always
volumes:
- .:/code
command: bash -c "python3 serve.py"
ports:
- "5000:5000"
networks:
mynet:
ipv4_address: 10.5.0.4
tf-serving:
image: tensorflow/serving
restart: always
ports:
- "8500:8500"
- "8501:8501"
volumes:
- ./serving:/models
- ./serving_docker.config:/models/serving_docker.config
command: --model_config_file=/models/serving_docker.config
networks:
mynet:
ipv4_address: 10.5.0.5
networks:
mynet:
driver: bridge
ipam:
config:
- subnet: 10.5.0.0/16
gateway: 10.5.0.1
# -
# * **Build image**
# +
# step 1
# !docker-compose build
# step 2
# !docker-compose up
# -
# * **Test API with Postman, data input as base64**
# 
| Final.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # This notebook is to demonstrate `flake8_nb` reporting
# The next cell should report `F401 'not_a_package' imported but unused`
# + tags=["raises-exception"]
import not_a_package
# -
# The next cell should report `E231 missing whitespace after ':'`
{"1":1}
# The next cell should not be reported, since it is valid syntax
def func():
return "foo"
# The next cell should not be reported, since it is valid syntax
class Bar:
def foo(self):
return "foo"
# The next cell is just for testing, the output of the shell command
# !flake8_nb notebook_with_out_flake8_tags.ipynb
# The next cell should be ignored in the generated intermediate `*.py` file since it is empty
| tests/data/notebooks/notebook_with_out_flake8_tags.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Test data quality at scale with PyDeequ
#
# Authors: <NAME> (<EMAIL>@), <NAME> (cghyzel@), <NAME> (jaoanan@), <NAME> (<EMAIL>lerv@)
# You generally write unit tests for your code, but do you also test your data? Incoming data quality can make or break your machine learning application. Incorrect, missing or malformed data can have a large impact on production systems. Examples of data quality issues are:
#
# * Missing values can lead to failures in production system that require non-null values (NullPointerException).
# * Changes in the distribution of data can lead to unexpected outputs of machine learning models.
# * Aggregations of incorrect data can lead to wrong business decisions.
#
# In this blog post, we introduce PyDeequ, an open source Python wrapper over [Deequ](https://aws.amazon.com/blogs/big-data/test-data-quality-at-scale-with-deequ/) (an open source tool developed and used at Amazon). While Deequ is written in Scala, PyDeequ allows you to use its data quality and testing capabilities from Python and PySpark, the language of choice of many data scientists. PyDeequ democratizes and extends the power of Deequ by allowing you to use it alongside the many data science libraries that are available in that language. Furthermore, PyDeequ allows for fluid interface with [Pandas](https://pandas.pydata.org/) DataFrame as opposed to restricting within Spark DataFrames.
#
# Deequ allows you to calculate data quality metrics on your dataset, define and verify data quality constraints, and be informed about changes in the data distribution. Instead of implementing checks and verification algorithms on your own, you can focus on describing how your data should look. Deequ supports you by suggesting checks for you. Deequ is implemented on top of [Apache Spark](https://spark.apache.org/) and is designed to scale with large datasets (think billions of rows) that typically live in a distributed filesystem or a data warehouse. PyDeequ gives you access to this capability, but also allows you to use it from the familiar environment of your Python Jupyter notebook.
#
# ## Deequ at Amazon
#
# Deequ is being used internally at Amazon for verifying the quality of many large production datasets. Dataset producers can add and edit data quality constraints. The system computes data quality metrics on a regular basis (with every new version of a dataset), verifies constraints defined by dataset producers, and publishes datasets to consumers in case of success. In error cases, dataset publication can be stopped, and producers are notified to take action. Data quality issues do not propagate to consumer data pipelines, reducing their blast radius.
#
# Deequ is also used within [Amazon SageMaker Model Monitor](https://docs.aws.amazon.com/sagemaker/latest/dg/model-monitor.html#model-monitor-how-it-works). Now with the availability of PyDeequ, it is finding its way into a broader set of environments - SageMaker Notebooks, AWS Glue, and more.
#
# ## Overview of PyDeequ
#
# Let’s look at PyDeequ’s main components, and how they relate to Deequ (shown in Figure 1).
#
# * Metrics Computation — Deequ computes data quality metrics, that is, statistics such as completeness, maximum, or correlation. Deequ uses Spark to read from sources such as Amazon S3, and to compute metrics through an optimized set of aggregation queries. You have direct access to the raw metrics computed on the data.
# * Constraint Verification — As a user, you focus on defining a set of data quality constraints to be verified. Deequ takes care of deriving the required set of metrics to be computed on the data. Deequ generates a data quality report, which contains the result of the constraint verification.
# * Constraint Suggestion — You can choose to define your own custom data quality constraints, or use the automated constraint suggestion methods that profile the data to infer useful constraints.
# * Python wrappers — You can call each of the Deequ functions using Python syntax. The wrappers translate the commands to the underlying Deequ calls, and return their response.
#
# 
#
# Figure 1. Overview of PyDeequ components.
#
# ## Example
#
# As a running example, we use [a customer review dataset provided by Amazon](https://s3.amazonaws.com/amazon-reviews-pds/readme.html) on Amazon S3. We have intentionally followed the example in the [Deequ blog](https://aws.amazon.com/blogs/big-data/test-data-quality-at-scale-with-deequ/), to show the similarity in functionality and execution. We begin the way many data science projects do: with initial data exploration and assessment in a Jupyter notebook.
#
# During the data exploration phase, you’d like to easily answer some basic questions about the data:
#
# * Are the fields that are supposed to contain unique values, really unique? Are there fields that are missing values?
# * How many distinct categories are there in the categorical fields?
# * Are there correlations between some key features?
# * If there are two supposedly similar datasets (different categories, or different time periods, say), are they really similar?
#
# Then, we’ll show you how to scale this approach to large-scale datasets, using the same code on an EMR cluster. This is how you’d likely do your ML training, and later as you move into a production setting.
#
# ### Setup: Start a PySpark Session in a SageMaker Notebook
# + language="bash"
#
# # install PyDeequ via pip
# pip install pydeequ
# +
from pyspark.sql import SparkSession, Row, DataFrame
import json
import pandas as pd
import sagemaker_pyspark
import pydeequ
classpath = ":".join(sagemaker_pyspark.classpath_jars())
spark = (SparkSession
.builder
.config("spark.driver.extraClassPath", classpath)
.config("spark.jars.packages", pydeequ.deequ_maven_coord)
.config("spark.jars.excludes", pydeequ.f2j_maven_coord)
.getOrCreate())
# -
# ### We will be using the Amazon Product Reviews dataset -- specifically the Electronics subset.
# +
df = spark.read.parquet("s3a://amazon-reviews-pds/parquet/product_category=Electronics/")
df.printSchema()
# -
# ## Data Analysis
#
# Before we define checks on the data, we want to calculate some statistics on the dataset; we call them metrics. As with Deequ, PyDeequ supports a rich set of metrics (they are described in this blog (https://aws.amazon.com/blogs/big-data/test-data-quality-at-scale-with-deequ/) and in this Deequ package (https://github.com/awslabs/deequ/tree/master/src/main/scala/com/amazon/deequ/analyzers)). In the following example, we show how to use the _AnalysisRunner (https://github.com/awslabs/deequ/blob/master/src/main/scala/com/amazon/deequ/analyzers/runners/AnalysisRunner.scala)_ to capture the metrics you are interested in.
# +
from pydeequ.analyzers import *
analysisResult = AnalysisRunner(spark) \
.onData(df) \
.addAnalyzer(Size()) \
.addAnalyzer(Completeness("review_id")) \
.addAnalyzer(ApproxCountDistinct("review_id")) \
.addAnalyzer(Mean("star_rating")) \
.addAnalyzer(Compliance("top star_rating", "star_rating >= 4.0")) \
.addAnalyzer(Correlation("total_votes", "star_rating")) \
.addAnalyzer(Correlation("total_votes", "helpful_votes")) \
.run()
analysisResult_df = AnalyzerContext.successMetricsAsDataFrame(spark, analysisResult)
analysisResult_df.show()
# -
# ### You can also get that result in a Pandas Dataframe!
#
# Passing `pandas=True` in any call for getting metrics as DataFrames will return the dataframe in Pandas form! We'll see more of it down the line!
analysisResult_pd_df = AnalyzerContext.successMetricsAsDataFrame(spark, analysisResult, pandas=True)
analysisResult_pd_df
# From this, we learn that:
#
# * review_id has no missing values and approximately 3,010,972 unique values.
# * 74.9% of reviews have a star_rating of 4 or higher
# * total_votes and star_rating are not correlated.
# * helpful_votes and total_votes are strongly correlated
# * the average star_rating is 4.0
# * The dataset contains 3,120,938 reviews.
#
# ## Define and Run Tests for Data
#
# After analyzing and understanding the data, we want to verify that the properties we have derived also hold for new versions of the dataset. By defining assertions on the data distribution as part of a data pipeline, we can ensure that every processed dataset is of high quality, and that any application consuming the data can rely on it.
#
# For writing tests on data, we start with the _VerificationSuite (https://github.com/awslabs/deequ/blob/master/src/main/scala/com/amazon/deequ/VerificationSuite.scala)_ and add _Checks (https://github.com/awslabs/deequ/blob/master/src/main/scala/com/amazon/deequ/checks/Check.scala)_ on attributes of the data. In this example, we test for the following properties of our data:
#
# * There are at least 3 million rows in total.
# * review_id is never NULL.
# * review_id is unique.
# * star_rating has a minimum of 1.0 and maximum of 5.0.
# * marketplace only contains “US”, “UK”, “DE”, “JP”, or “FR”.
# * year does not contain negative values.
#
# This is the code that reflects the previous statements. For information about all available checks, see _this GitHub repository (https://github.com/awslabs/deequ/blob/master/src/main/scala/com/amazon/deequ/checks/Check.scala)_. You can run this directly in the Spark shell as previously explained:
#
# +
from pydeequ.checks import *
from pydeequ.verification import *
check = Check(spark, CheckLevel.Warning, "Amazon Electronic Products Reviews")
checkResult = VerificationSuite(spark) \
.onData(df) \
.addCheck(
check.hasSize(lambda x: x >= 3000000) \
.hasMin("star_rating", lambda x: x == 1.0) \
.hasMax("star_rating", lambda x: x == 5.0) \
.isComplete("review_id") \
.isUnique("review_id") \
.isComplete("marketplace") \
.isContainedIn("marketplace", ["US", "UK", "DE", "JP", "FR"]) \
.isNonNegative("year")) \
.run()
print(f"Verification Run Status: {checkResult.status}")
checkResult_df = VerificationResult.checkResultsAsDataFrame(spark, checkResult, pandas=True)
checkResult_df
# -
# After calling run(), PyDeequ translates your test description into Deequ, which in its turn translates it into a series of Spark jobs which are executed to compute metrics on the data. Afterwards, it invokes your assertion functions (e.g., lambda x: x == 1.0 for the minimum star-rating check) on these metrics to see if the constraints hold on the data.
#
# Interestingly, the review_id column is not unique, which resulted in a failure of the check on uniqueness. We can also look at all the metrics that Deequ computed for this check by running:
checkResult_df = VerificationResult.successMetricsAsDataFrame(spark, checkResult, pandas=True)
checkResult_df
# ## Automated Constraint Suggestion
#
# If you own a large number of datasets or if your dataset has many columns, it may be challenging for you to manually define appropriate constraints. Deequ can automatically suggest useful constraints based on the data distribution. Deequ first runs a data profiling method and then applies a set of rules on the result. For more information about how to run a data profiling method, see _this GitHub repository. (https://github.com/awslabs/deequ/blob/master/src/main/scala/com/amazon/deequ/examples/data_profiling_example.md)_
# +
from pydeequ.suggestions import *
suggestionResult = ConstraintSuggestionRunner(spark) \
.onData(df) \
.addConstraintRule(DEFAULT()) \
.run()
# Constraint Suggestions in JSON format
print(json.dumps(suggestionResult, indent=2))
# -
# The above result contains a list of constraints with descriptions and Python code, so that you can directly apply it in your data quality checks.
# # Scaling to Production
#
# So far we’ve shown you how to use these capabilities in the context of data exploration using a Jupyter notebook running on a SageMaker Notebook instance. As your project matures, you’ll want to use the same capabilities on larger and larger datasets, and in a production environment. With PyDeequ, it’s easy to make that transition.
#
# 
#
# As seen in the diagram above, you can leverage both an AWS EMR cluster and/or AWS Glue for larger or production purposes.
# ## More Examples on GitHub
#
# You can find examples of more advanced features at _Deequ’s GitHub page (https://github.com/awslabs/deequ)_:
#
# * Deequ not only provides data quality checks with fixed thresholds. Learn how to use _anomaly detection on data quality metrics (https://github.com/awslabs/deequ/blob/master/src/main/scala/com/amazon/deequ/examples/anomaly_detection_example.md)_ to apply tests on metrics that change over time.
# * Deequ offers support for storing and loading metrics. Learn how to use the _MetricsRepository (https://github.com/awslabs/deequ/blob/master/src/main/scala/com/amazon/deequ/examples/metrics_repository_example.md)_ for this use case.
# * If your dataset grows over time or is partitioned, you can use Deequ’s _incremental metrics computation (https://github.com/awslabs/deequ/blob/master/src/main/scala/com/amazon/deequ/examples/algebraic_states_example.md)_ capability. For each partition, Deequ stores a state for each computed metric. To compute metrics for the union of partitions, Deequ can use these states to efficiently derive overall metrics without reloading the data.
#
# ## Additional Resources
#
# Learn more about the inner workings of Deequ in the VLDB 2018 paper “_Automating large-scale data quality verification. (http://www.vldb.org/pvldb/vol11/p1781-schelter.pdf)_”
#
# ## Conclusion
#
# This blog post showed you how to use PyDeequ for calculating data quality metrics, verifying data quality metrics, and profiling data to automate the configuration of data quality checks. PyDeequ is available for you now to build your own data quality management pipeline.
#
| tutorials/test_data_quality_at_scale.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
# %ls
import numpy as np
data = np.load('closure_2d_vdp_ntsnap_6000_tot_60.npz')
closure = data['ec_snap'].transpose()
states = data['usnap_les'].transpose()
# +
## parameter: previous time step
p = 0
## parameter: nu
nu = 2
## parameter: dt
dt = 60/6000.0
# -
## target: d delta / dt
target = (closure[1:,:] - closure[:-1,:])/dt
# +
## target without last term
eff_states = states[:-1,:]
eff_closure = closure[:-1,:]
## analytically:
analytical_prediction = nu*eff_closure - nu*eff_states*eff_states*eff_closure - eff_states
print 'mean squared error on whole data =', np.square(analytical_prediction-target).mean()
# -
print analytical_prediction
print target
print eff_closure
print closure
| data/2d_vdp/data/simple_test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Data Preparation: MCI Patient Selection
# ADNIMERGE patient selection according to Massi's R screening file.
# This notebook is to serve as to get familiar with the ADNI dataset, the ADNIMERGE file, and select the MCI patients of interest for our models.
#
# Massi used the RID variable to see which rows refers to the same subject. Initally he included only those subjects that at VISCODE ==“bl” had a DX_bl == [“EMCI”] or DX_bl == [“LMCI”]. From this time he also took also the variables used as predicotrs. Then, for these subjects, he considered their 3 year followups (VISCODE in [bl,m03,m30,m36,m24,m18,m12,m06]) and see if at a certain point their DX became “Dementia”. in this case they were coded as converters, otherwise they were coded as non-converters. If conversion happened anytime along the 3 years, they were considered converters anyhow.
# +
#importing libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
import sys
from collections import Counter
# %matplotlib inline
# -
os.getcwd()
# ## Loading the ADNIMERGE.csv dataset
# The ADNIMERGE dataset can be found in the folder DATA_15_12_2017/Study_info. The data in this folder are limited to study info, clinical, sociodemographic and neuropsychological data. The ADNIMERGE dataset is basically one large dataset containing the most important features of the ADNI study, and most/all can also be found in the separate datasets in the DATA folder. In case you would like to know what all the abbreviations are, you can find all study codes in the so-called 'dictionary files, and the file ADNIMERGE_DICT in the Study_info folder contains specifically the codes for the ADNIMERGE data.
#
# More info on the ADNI study in general (centers included, cohorts-ADNI1, ADNIGO, ADNI2, ADNI3, study design etc.) can be found on the website: http://adni.loni.usc.edu/
# +
#reading CSV file
data = pd.read_csv("ADNIMERGE.csv")
selected_columns = ['RID', 'PTID', 'VISCODE', 'SITE', 'COLPROT', 'ORIGPROT', 'EXAMDATE',
'DX_bl', 'AGE', 'PTGENDER', 'DX']
adnimerge = data[selected_columns]
adnimerge.head(10)
# -
# get all column names of ADNIMERGE
data.columns.values
# ## Step 1: Including only MCI subjects
# +
#code from Massi
to_include_mci = np.unique(np.array(adnimerge["RID"].loc[adnimerge["DX_bl"].isin(["EMCI","LMCI"])]))
adnimerge2 = adnimerge.loc[adnimerge["RID"].isin(to_include_mci)]
adnimerge2.head()
# -
to_include_mci.shape
adnimerge2[(adnimerge2["VISCODE"] == "bl") & (adnimerge2["DX"] == "Dementia")]
#selecting subjects with DX_bl of EMCI and LMCI
adnimerge_MCI = adnimerge[adnimerge["DX_bl"].isin(["EMCI", "LMCI"])]
adnimerge_MCI.head()
#selecting RIDs of these subjects
MCI_patients = np.unique(adnimerge2["RID"])
MCI_patients
#doing the above in one go; Step 1 of Massi code
#selecting subjects RID with DX_bl of EMCI and LMCI
MCI_patients_RID = np.unique(adnimerge[adnimerge["DX_bl"].isin(["EMCI", "LMCI"])]["RID"])
MCI_patients_RID
#selecting subjects again from adnimerge based on RIDs in MCI_patients_RID; Step 2 of Massi code
adnimerge_MCI_patients = adnimerge[adnimerge["RID"].isin(MCI_patients_RID)]
adnimerge_MCI_patients.head(100)
# ### showing discrepancy when only selecting for DX_bl and not DX
#showing discrepancy between DX_bl and DX at VICSODE bl, slecting only baseline visit
adnimerge_MCI_patients[(adnimerge_MCI_patients["VISCODE"] == "bl") & (adnimerge_MCI_patients["DX"] == "Dementia")]
#getting RIDs from these discrepancy cases
inconsequent_RID = np.unique(adnimerge_MCI_patients[(adnimerge_MCI_patients["VISCODE"] == "bl") & (adnimerge_MCI_patients["DX"] == "Dementia")]["RID"])
inconsequent_RID
#showing discrepancy between DX_bl and DX at VICSODE bl, slecting only baseline visit
adnimerge_MCI_patients[adnimerge_MCI_patients["RID"] == 332]
# ### selecting for DX_bl and DX
#selecting subjects RID with DX_bl of EMCI and LMCI and DX of MCI
MCI_patients_RID2 = np.unique(adnimerge[(adnimerge["DX_bl"].isin(["EMCI", "LMCI"])) & (adnimerge["DX"] == "MCI")]["RID"])
MCI_patients_RID2
#showing discrepancy between DX_bl and DX at VICSODE bl, slecting only baseline visit
adnimerge_MCI_patients2 = adnimerge[adnimerge["RID"].isin(MCI_patients_RID2)]
adnimerge_MCI_patients2.head()
#showing discrepancy between DX_bl and DX at VICSODE bl, slecting only baseline visit
adnimerge_MCI_patients2[(adnimerge_MCI_patients2["VISCODE"] == "bl") & (adnimerge_MCI_patients2["DX"] == "Dementia")]
#check whether other discrepancy cases are removed
adnimerge_MCI_patients2[adnimerge_MCI_patients2["RID"] == 332]
#check out this particular case 1226
adnimerge_MCI_patients2[adnimerge_MCI_patients2["RID"] == 1226]
# ### selecting for DX_bl, DX and VISCODE bl
#selecting subjects RID with DX_bl of EMCI and LMCI and DX of MCI and baseline visitcode
MCI_patients_RID3 = np.unique(adnimerge[(adnimerge["DX_bl"].isin(["EMCI", "LMCI"])) & (adnimerge["DX"] == "MCI") & (adnimerge["VISCODE"] == "bl")]["RID"])
MCI_patients_RID3.shape
#showing discrepancy between DX_bl and DX at VICSODE bl, slecting only baseline visit
#also screened RIDs for baseline visit
adnimerge_MCI_patients3 = adnimerge[adnimerge["RID"].isin(MCI_patients_RID3)]
adnimerge_MCI_patients3.head()
adnimerge_MCI_patients3[adnimerge_MCI_patients3["RID"] == 332]
# +
#showing discrepancy between DX_bl and DX at VICSODE bl, slecting only baseline visit
#also removed RIDs at baseline discrepancy
#adnimerge_MCI_patients3[adnimerge_MCI_patients3["DX"] == "Dementia"]
# -
# ### Comparing MCI selection of my protocol vs Massi
#my MCI selection (from MCI_patients_RID3)
adnimerge_MCI_patients3.head(10)
adnimerge_MCI_patients3.shape
#MCI selection of Massi (based on mci_to_include)
adnimerge2.head()
#difference of 35 records
adnimerge2.shape
#find discrepancy RIDs as described above, and select these RIDs from dataframe
#comes to total number of 30 records, still 5 records difference
wrongRID_adnimerge2 = np.unique(adnimerge2[(adnimerge2["VISCODE"] == "bl") & (adnimerge2["DX"] == "Dementia")]["RID"])
adnimerge2[adnimerge2["RID"].isin(wrongRID_adnimerge2)]
#6 RIDs are considered these dicrepancy RIDs
wrongRID_adnimerge2
#find the missing RIDs in MCI_patients_RID3 (my selection) list as compared to to_include_mci (Massis selection) list
lostRID = [x for x in to_include_mci if x not in MCI_patients_RID3]
lostRID
#check the additional RIDs in the adnimerge2 dataframe
#they represent NaN values for DX (which are not selected in my approach)
#Massi is probably eliminating these subjects in his second step, where he is going to select
#subjects with follow-up of m36 and higher, which these subjects don't have
adnimerge2[adnimerge2["RID"].isin([2071, 2314, 4085, 4575, 4622])]
# ## Step 2 Selecting MCI subjects with follow-up available untill at least m36
# (or m36 and higher?)
#VISCODE labels in dataframe my approach
np.unique(adnimerge_MCI_patients3["VISCODE"].values)
#VISCODE labels in dataframe my approach
#count number of labels in dataset and compare to label count Massi
adnimerge_MCI_patients3["VISCODE"].groupby(adnimerge_MCI_patients3["VISCODE"].values).count()
#VISCODE labels in Massi's approach
np.unique(adnimerge2["VISCODE"].values)
#VISCODE labels in Massi's approach
#count number of labels in dataset and compare to label count Massi
adnimerge2["VISCODE"].groupby(adnimerge2["VISCODE"].values).count()
#code Massi
# INCLUDE ONLY SUBJECTS WITH INFO OF CONVERTION AT LEAST YEAR 3
to_include_time = ["m114","m126","m144","m102","m132","m120","m108","m90","m96","m84","m78","m66","m54","m42","m72","m60","m48","m36"]
to_include_subjects = np.unique(np.array(adnimerge2["RID"].loc[adnimerge2["VISCODE"].isin(to_include_time)]))
adnimerge3 = adnimerge2.loc[adnimerge2["RID"].isin(to_include_subjects)]
adnimerge3.head(50)
# OF THESE SUBJECTS, KEEP ONLY INFO OF CONVERTION BY YEAR 3
to_include_time_2 = ["bl","m03","m30","m36","m24","m18","m12","m06"]
adnimerge_selected_massi = adnimerge3.loc[adnimerge3["VISCODE"].isin(to_include_time_2)]
adnimerge_selected_massi.head(50)
np.count_nonzero(np.unique(adnimerge_selected_massi["RID"]))
#check what difference it gives when using on m36 -- 25 subjects less (who apparently don't have m36)
#INCLUDE ONLY SUBJECTS WITH VISCODE of m36 available
visitcode_required = ["m114","m126","m144","m102","m132","m120","m108","m90","m96","m84","m78","m66","m54","m42","m72","m60","m48","m36"]
visitcode_m36 = ["m36"]
subjects_included = np.unique(adnimerge2[adnimerge2["VISCODE"].isin(visitcode_m36)]["RID"])
adnimerge4 = adnimerge2.loc[adnimerge2["RID"].isin(subjects_included)]
adnimerge4.head(50)
np.count_nonzero(np.unique(adnimerge4["RID"]))
# Now cleaning this code and applying this approach to the dataframe adnimerge_MCI_patients3
# INCLUDE ONLY SUBJECTS WITH INFO OF CONVERTION AT LEAST YEAR 3
visitcode_required = ["m114","m126","m144","m102","m132","m120","m108","m90","m96","m84","m78","m66","m54","m42","m72","m60","m48","m36"]
MCI_patients_RID4 = np.unique(adnimerge_MCI_patients3[adnimerge_MCI_patients3["VISCODE"].isin(visitcode_required)]["RID"])
adnimerge_MCI_patients4 = adnimerge_MCI_patients3.loc[adnimerge_MCI_patients3["RID"].isin(MCI_patients_RID4)]
adnimerge_MCI_patients4.head(20)
# OF THESE SUBJECTS, KEEP ONLY INFO OF CONVERTION BY YEAR 3
visitcode_selected = ["bl","m03","m30","m36","m24","m18","m12","m06"]
adnimerge_selected_nadine = adnimerge_MCI_patients4.loc[adnimerge_MCI_patients4["VISCODE"].isin(visitcode_selected)]
#adnimerge_selected_nadine.head(20)
np.count_nonzero(np.unique(adnimerge_selected_nadine["RID"]))
differ_RID = [x for x in to_include_subjects if x not in MCI_patients_RID4]
differ_RID
#check what difference it gives when using on m36
#INCLUDE ONLY SUBJECTS WITH VISCODE of m36 available
visitcode_required = ["m114","m126","m144","m102","m132","m120","m108","m90","m96","m84","m78","m66","m54","m42","m72","m60","m48","m36"]
visitcode_m36 = ["m36"]
MCI_patients_RID5 = np.unique(adnimerge_MCI_patients3[adnimerge_MCI_patients3["VISCODE"].isin(visitcode_m36)]["RID"])
adnimerge_MCI_patients5 = adnimerge_MCI_patients3.loc[adnimerge_MCI_patients3["RID"].isin(MCI_patients_RID5)]
adnimerge_MCI_patients5.head(50)
np.count_nonzero(np.unique(adnimerge_MCI_patients5["RID"]))
# OF THESE SUBJECTS, KEEP ONLY INFO OF CONVERSION BY YEAR 3
visitcode_selected = ["bl","m03","m30","m36","m24","m18","m12","m06"]
adnimerge_selected_nadine = adnimerge_MCI_patients5[adnimerge_MCI_patients5["VISCODE"].isin(visitcode_selected)]
adnimerge_selected_nadine.head(20)
#np.count_nonzero(np.unique(adnimerge_selected_nadine["RID"]))
# +
#adding converter_at_3years variable
#not sure how to construct the for loops for this, so first re-run the code from Massi below
visitcode_selected2 = ["m03","m30","m36","m24","m18","m12","m06"]
adnimerge_selected_massi["conversion"] = "no"
for
if adnimerge_selected_massi["VISCODE"].isin(visitcode_selected2) & adnimerge_selected_massi["DX"] == "Dementia"
adnimerge_selected_massi["conversion"] = "yes"
adnimerge_selected_massi.head()
# +
#code from Massi
#RESHAPE ADNIMERGE AND ADD CONVERTION VARIABLE AT THE END
tempo = adnimerge_selected_massi.copy()
tempo.index = np.array(range(tempo.shape[0]))
adnimerge_selected_massi = adnimerge_selected_massi[adnimerge_selected_massi['VISCODE'] == "bl"]
adnimerge_selected_massi.index = np.array(range(adnimerge_selected_massi.shape[0]))
adnimerge_selected_massi["CONVERSION_AT_3Y"] = "NO"
for i in range(adnimerge_selected_massi.shape[0]):
if "Dementia" in np.array(tempo["DX"].loc[tempo["RID"] == adnimerge_selected_massi["RID"].iloc[i]]):
adnimerge_selected_massi["CONVERSION_AT_3Y"].iloc[i] = "YES"
adnimerge_selected_massi.head(50)
# -
| DATA_PREPARATION/PATIENT_SCREENING/data_preparation_patient_selection.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# [Table of contents](../toc.ipynb)
#
# # Conditions
#
# * Decisions during execution of a program are at some stage almost everywhere needed.
# * Conditions tell the computer: *do this if A happens, and do B if something else happens*.
# * This gives programmer control about program execution.
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Control flow
#
# <img src="condition.png" alt="Condition" width="450">
# + [markdown] slideshow={"slide_type": "subslide"}
# * These conditional statements are supported:
# * `if`
# * `elif` (the Python replacement for else if in other languages)
# * `else`
# + slideshow={"slide_type": "subslide"}
# Python treats zero as False
condition = 0
if condition:
print("Catched")
else:
print("Not catched")
# + slideshow={"slide_type": "subslide"}
# Bools work of course perfectly
condition = True
if condition:
print("Catched")
else:
print("Not catched")
# + slideshow={"slide_type": "subslide"}
# Any arithmetic or comparision operator will work as well
signal = 5.5
if signal > 2.5:
print("Signal overshoot")
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Exercise: Conditions (10 minutes)
#
# <img src="../_static/exercise.png" alt="Exercise" width="75" align="left">
#
# * Write a python function which checks if a number is positive, negative, or zero.
# * The script should print the state (pos, neg, or zero) and the number.
#
# **Hint**
# * The function syntax (we will cover them later) in Python is
#
# ```python
# def my_func(args):
# your code goes here
#
# ```
# * You might need `elif` for it.
# * The print command might be `print("Positive number", x)` and the like.
# +
#own solution
def my_func(number):
if number >0:
print('Positive number', number)
elif number == 0:
print('Zero', number)
else:
print('Negative number', number)
# -
my_func(-15.3)
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Solution
#
# Please find one possible solution in [`solution_condition.py`](solution_condition.py) file.
# + slideshow={"slide_type": "fragment"}
import sys
sys.path.append("01_basic-python")
from solution_condition import *
check_value(0)
# + slideshow={"slide_type": "fragment"}
check_value(11)
# + slideshow={"slide_type": "fragment"}
check_value(-3.14)
# + [markdown] slideshow={"slide_type": "slide"}
# # Loops
#
# * Loops are required to execute code multiple times.
# * The basic loop keywords are
# * `while`
# * `for`
# * Finer control inside loops is provided by
# * `break`
# * `continue`
# * `pass` statements
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Control flow
#
# <img src="loop.png" alt="Loop" width="450">
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Iterators
#
# * Some common code snippets to define loop iterators are
# * `in range(start, stop)`
# * `in`
# * `in enumerate` which enumertates many objects like lists and returns index and item
# + [markdown] slideshow={"slide_type": "subslide"}
# ## For loop
#
# * A for loop repeats a statement over a sequence.
# + slideshow={"slide_type": "fragment"}
for i in [0, 2, 3]:
print(i)
# + slideshow={"slide_type": "fragment"}
for i in range(2, 5):
print(i)
# + slideshow={"slide_type": "fragment"}
my_list = [1, 5, 22]
for idx, value in enumerate(my_list):
print("Index=", idx, "Value=", value)
# + slideshow={"slide_type": "subslide"}
# Here a for loop with a break
for i in range(0, 99):
print(i)
if i > 5:
print("Here the break")
break
# + slideshow={"slide_type": "subslide"}
# You can also loop over dictionaries
my_dict = {"power": 3.5, "speed": 120.3, "temperature": 23}
for field in my_dict.keys():
print(field, "is adjusted to", my_dict[field])
# + slideshow={"slide_type": "subslide"}
# Also strings work well
my_string = "Hello World"
for letter in my_string:
print(letter)
# + [markdown] slideshow={"slide_type": "subslide"}
# ### For loop over two lists
#
# * You can use `zip` to loop over multiple lists.
# + slideshow={"slide_type": "fragment"}
list_one = [0, 3, 5]
list_two = [8, 7, -3]
for i, j in zip(list_one, list_two):
print(i * j)
# + [markdown] slideshow={"slide_type": "subslide"}
# ## While loop
#
# * Repeats a statement as long as condition is `True`.
# * `while` loops are used if you do not know how long the sequence should be repeated.
# * Condition is checked before code execution.
# * You have to make sure that your while loops do not continue to infinity.
# * `while` loops are barely used compared with `for` loops.
# + slideshow={"slide_type": "fragment"}
i = 0
while i < 4:
i += 1 # do not forget to increment
print(i)
# + slideshow={"slide_type": "subslide"}
"""Here a example for a while loop.
This loop will require different number of runs
until condition becomes True."""
import random
criterion = 9.5
sum_of_numbers = 0.0
idx = 0
while sum_of_numbers < criterion:
sum_of_numbers += random.random()
idx += 1
print(idx, sum_of_numbers)
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Exercise: For loop (5 minutes)
#
# <img src="../_static/exercise.png" alt="Exercise" width="75" align="left">
#
# * Write a `for` loop which iterates over the values in dictionary
#
# `{"force": [0, 10, 15, 30, 45], "distance": [2.5, 3.5, 6.0, -3.0, 8.1]}`
#
# and computes the product of the two fields.
# * Print the overall sum of these products.
#
# **Hint**
# * Think about if conversion of the dictionary makes sense.
# +
#own solution 1
mydict = {"force": [0, 10, 15, 30, 45], "distance": [2.5, 3.5, 6.0, -3.0, 8.1]}
summe = 0
for i in range(len(mydict['force'])):
prod = mydict['force'][i] * mydict['distance'][i]
summe += prod
print(summe)
# +
#own solution 2
mydict = {"force": [0, 10, 15, 30, 45], "distance": [2.5, 3.5, 6.0, -3.0, 8.1]}
summe = 0
for i,j in zip(mydict['force'], mydict['distance']):
summe += i*j
print(summe)
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Solution
#
# Please find one possible solution in [`solution_loop.py`](solution_loop.py) file.
# + slideshow={"slide_type": "fragment"}
# %run solution_loop
| 01_basic-python/03_conditions-and-loops.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="9S7BEa5tOfln" colab_type="text"
# #Reading the data
#
# + id="BVNrXu2CN7Q1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 650} executionInfo={"status": "ok", "timestamp": 1597819574359, "user_tz": -360, "elapsed": 1556, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQsCiZ9sTcl01oFBy4y41QBhNLIaNalyju4jEEFA=s64", "userId": "10283689024231170753"}} outputId="fe6dbe8e-f598-4f48-bfee-b7344e32c9b0"
data = pd.read_csv('/content/drive/My Drive/Colab Notebooks/Income>=50k/census.csv')
data
# + [markdown] id="a0gUL97kOzC3" colab_type="text"
# # Data Exploration
#
# + id="--Mjx-erZ1L-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 182} executionInfo={"status": "ok", "timestamp": 1597801737870, "user_tz": -360, "elapsed": 2515, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQsCiZ9sTcl01oFBy4y41QBhNLIaNalyju4jEEFA=s64", "userId": "10283689024231170753"}} outputId="a83da51a-8708-4014-9eb0-d061b39aecfe"
data['education-num'].describe()
# + id="tQkJadJpOHO5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 90} executionInfo={"status": "ok", "timestamp": 1597765655239, "user_tz": -360, "elapsed": 2370, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQsCiZ9sTcl01oFBy4y41QBhNLIaNalyju4jEEFA=s64", "userId": "10283689024231170753"}} outputId="8a1a1544-73e8-45e5-d9a5-a008a49377d3"
print("The shape of the data is: {}".format(data.income.shape))
a = data.income.value_counts()
print("The number of people who earn more less than 50K are: {}".format(a[0]))
print("The number of people who earn more more than 50K are: {}".format(a[1]))
print("The percentage of people earn more than 50k are {}".format((a[1]*100)/(a[0]+a[1])))
# + id="YPb8RiqxQUwc" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 403} executionInfo={"status": "ok", "timestamp": 1597765687254, "user_tz": -360, "elapsed": 2480, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQsCiZ9sTcl01oFBy4y41QBhNLIaNalyju4jEEFA=s64", "userId": "10283689024231170753"}} outputId="36acfaa0-dd37-4668-95ca-3510a802c48b"
data.info()
# + id="mAuvAj3lQkbJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 293} executionInfo={"status": "ok", "timestamp": 1597765753459, "user_tz": -360, "elapsed": 2646, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQsCiZ9sTcl01oFBy4y41QBhNLIaNalyju4jEEFA=s64", "userId": "10283689024231170753"}} outputId="b5be63ab-cd1b-4c11-8853-138f1e5f8ce9"
#checking if there is any missing data
data.isnull().sum()
# + [markdown] id="3mod2d56QrJg" colab_type="text"
# #Exploratory Data Analysis
# + id="HiXriSZQQqWI" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 182} executionInfo={"status": "ok", "timestamp": 1597766681446, "user_tz": -360, "elapsed": 1365, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQsCiZ9sTcl01oFBy4y41QBhNLIaNalyju4jEEFA=s64", "userId": "10283689024231170753"}} outputId="b33f67a6-ab97-4bc5-93e3-3c398e5ae6f0"
data['age'].describe()
# + id="TaPYulg3UN_i" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 182} executionInfo={"status": "ok", "timestamp": 1597766708631, "user_tz": -360, "elapsed": 1310, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQsCiZ9sTcl01oFBy4y41QBhNLIaNalyju4jEEFA=s64", "userId": "10283689024231170753"}} outputId="3e7896bc-f480-43bf-b0ff-c9fcf5b7a7fd"
data['capital-gain'].describe()
# + id="Kmu-VfBfUVO6" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 182} executionInfo={"status": "ok", "timestamp": 1597766737795, "user_tz": -360, "elapsed": 1520, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQsCiZ9sTcl01oFBy4y41QBhNLIaNalyju4jEEFA=s64", "userId": "10283689024231170753"}} outputId="7a6d8176-de48-42df-b821-c89510e0d773"
data['capital-loss'].describe()
# + id="pjf6sewzUeE8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 182} executionInfo={"status": "ok", "timestamp": 1597767360538, "user_tz": -360, "elapsed": 2036, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQsCiZ9sTcl01oFBy4y41QBhNLIaNalyju4jEEFA=s64", "userId": "10283689024231170753"}} outputId="8590deb6-7e6e-4496-f3f2-8a35c33dcc09"
data['hours-per-week'].describe()
# + id="Mr-7uKZsXlRf" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 297} executionInfo={"status": "ok", "timestamp": 1597767822328, "user_tz": -360, "elapsed": 3180, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQsCiZ9sTcl01oFBy4y41QBhNLIaNalyju4jEEFA=s64", "userId": "10283689024231170753"}} outputId="d9e46371-9a16-4cb7-99d8-a68a2929b6d8"
#distributions of the variable capital gain and capital loss
import seaborn as sns
x = data['capital-gain']
sns.distplot(x)
# + id="A6_-4QnoYpw1" colab_type="code" colab={}
#binning the hours per week data
bins = [0, 2500,5000,7500,10000,20000,30000,40000,50000]
labels =[1,2,3,4,5,6,7,8]
data['binned'] = pd.cut(data['hours-per-week'], bins,labels=labels)
# + id="6t_XKhrWoSrE" colab_type="code" colab={}
#transforming the variable.log transformation
skewed = ['capital-gain', 'capital-loss']
data[skewed] = data[skewed].apply(
lambda x: np.log(x + 1))
# + id="AaxFgHWNrNNA" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 501} executionInfo={"status": "ok", "timestamp": 1597772888035, "user_tz": -360, "elapsed": 7194, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQsCiZ9sTcl01oFBy4y41QBhNLIaNalyju4jEEFA=s64", "userId": "10283689024231170753"}} outputId="192657ad-dcfe-41f4-98b7-7b9c82c54ab3"
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize =(10, 7))
ax.hist(data['age'])
# + id="gtwvS8xNpMg_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 501} executionInfo={"status": "ok", "timestamp": 1597772919568, "user_tz": -360, "elapsed": 1299, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQsCiZ9sTcl01oFBy4y41QBhNLIaNalyju4jEEFA=s64", "userId": "10283689024231170753"}} outputId="defe2ade-59e0-4588-d7a5-0c80853380bb"
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize =(10, 7))
ax.hist(data['education-num'])
# + id="TC3tJzTPr-1e" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 538} executionInfo={"status": "ok", "timestamp": 1597772939031, "user_tz": -360, "elapsed": 1300, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQsCiZ9sTcl01oFBy4y41QBhNLIaNalyju4jEEFA=s64", "userId": "10283689024231170753"}} outputId="9910ee17-17a1-4929-a371-dd1aa83e30dc"
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize =(10, 7))
ax.hist(data['capital-gain'])
# + id="aEIyU9LNsFLG" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 501} executionInfo={"status": "ok", "timestamp": 1597772959651, "user_tz": -360, "elapsed": 1289, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQsCiZ9sTcl01oFBy4y41QBhNLIaNalyju4jEEFA=s64", "userId": "10283689024231170753"}} outputId="98224413-daa0-465c-8dbc-477d2023205f"
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize =(10, 7))
ax.hist(data['hours-per-week'])
# + id="rjDw5ViZsKEJ" colab_type="code" colab={}
#applying scaling
scaler = MinMaxScaler()
numerical = ['age', 'education-num', 'capital-gain', 'capital-loss', 'hours-per-week']
features_log_minmax_transform = pd.DataFrame(data=data)
features_log_minmax_transform[numerical] = scaler.fit_transform(
data[numerical])
# + id="poDCwgJ6uMBK" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 650} executionInfo={"status": "ok", "timestamp": 1597797744514, "user_tz": -360, "elapsed": 908, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQsCiZ9sTcl01oFBy4y41QBhNLIaNalyju4jEEFA=s64", "userId": "10283689024231170753"}} outputId="b2c4d649-4b7b-43a8-e1b5-c546f4fa51a5"
data
# + id="rJRO79UhLV0X" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 164} executionInfo={"status": "ok", "timestamp": 1597797931422, "user_tz": -360, "elapsed": 1878, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQsCiZ9sTcl01oFBy4y41QBhNLIaNalyju4jEEFA=s64", "userId": "10283689024231170753"}} outputId="cf2cbc36-6c77-4254-9e67-0656dc4d0de5"
data['workclass'].value_counts()
# + id="7v-KMQlqVjXo" colab_type="code" colab={}
#label encoding
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
for v in ['workclass', 'education_level','marital-status', 'occupation', 'relationship', 'race', 'sex','native-country','income']:
data.loc[:,v] = le.fit_transform(data.loc[:,v])
# + id="IXH4tsFNL78i" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 201} executionInfo={"status": "ok", "timestamp": 1597855636718, "user_tz": -360, "elapsed": 1289, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQsCiZ9sTcl01oFBy4y41QBhNLIaNalyju4jEEFA=s64", "userId": "10283689024231170753"}} outputId="51ea7158-bf75-4eaa-cd92-6885291c452e"
le = LabelEncoder()
a = data['department']
le.fit(a)
integer_mapping = {l: i for i, l in enumerate(le.classes_)}
integer_mapping
# + id="zj2JXSeNsdy2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 513} executionInfo={"status": "ok", "timestamp": 1597774365008, "user_tz": -360, "elapsed": 1504, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQsCiZ9sTcl01oFBy4y41QBhNLIaNalyju4jEEFA=s64", "userId": "10283689024231170753"}} outputId="cc243824-b93e-417e-fd15-718ef6611bbf"
df.drop(labels=['workclass', 'education_level','marital-status', 'occupation', 'relationship', 'race', 'sex'],axis=1,inplace=True)
df
# + id="lhlkkCJPUjBw" colab_type="code" colab={}
df = data
# + id="yuZHrtF_x2NQ" colab_type="code" colab={}
#separating the target variable
y = df['income']
# + id="AZNzjuOmx7cu" colab_type="code" colab={}
#removing the target variable from the dataframe
df.drop(['income'],axis=1,inplace=True)
# + id="B-RxvM8CxqK-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 54} executionInfo={"status": "ok", "timestamp": 1597819640715, "user_tz": -360, "elapsed": 965, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQsCiZ9sTcl01oFBy4y41QBhNLIaNalyju4jEEFA=s64", "userId": "10283689024231170753"}} outputId="f1144a2a-0a36-4107-ad54-97fe4f4cdd84"
X_train, X_test, y_train, y_test = train_test_split(df, y, test_size = 0.2, random_state = 0)
print("Training set has {} samples.".format(X_train.shape[0]))
print("Testing set has {} samples.".format(X_test.shape[0]))
# + id="5Ue5Xad4yuRm" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 274} executionInfo={"status": "ok", "timestamp": 1597783579600, "user_tz": -360, "elapsed": 5574, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQsCiZ9sTcl01oFBy4y41QBhNLIaNalyju4jEEFA=s64", "userId": "10283689024231170753"}} outputId="9c1d8f7c-43bf-4257-f4cb-9168058490d1"
# !pip install catboost
# + id="QTQRhGYPyPzo" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} executionInfo={"status": "ok", "timestamp": 1597784084356, "user_tz": -360, "elapsed": 18203, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQsCiZ9sTcl01oFBy4y41QBhNLIaNalyju4jEEFA=s64", "userId": "10283689024231170753"}} outputId="6e880e78-3a63-42ea-8bca-9ad95f2d9e4b"
from catboost import CatBoostClassifier
model = CatBoostClassifier()
model.fit(X_train, y_train,cat_features=['binned'])
# + id="T6qZ2pdqzJ6n" colab_type="code" colab={}
# predict the results
y_pred=model.predict(X_test)
# + id="FVamsVzwzTVG" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} executionInfo={"status": "ok", "timestamp": 1597784097431, "user_tz": -360, "elapsed": 1126, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQsCiZ9sTcl01oFBy4y41QBhNLIaNalyju4jEEFA=s64", "userId": "10283689024231170753"}} outputId="2cd9c000-7e6f-40f7-ec2d-50c12d226eb4"
# view accuracy
from sklearn.metrics import accuracy_score
accuracy=accuracy_score(y_pred, y_test)
print('Catboost Model accuracy score: {0:0.4f}'.format(accuracy_score(y_test, y_pred)))
# + id="V4yUG9StzV3-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} executionInfo={"status": "ok", "timestamp": 1597852450485, "user_tz": -360, "elapsed": 2927, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQsCiZ9sTcl01oFBy4y41QBhNLIaNalyju4jEEFA=s64", "userId": "10283689024231170753"}} outputId="9c56f700-250e-44a2-b4ab-6e7a48bdd791"
# build the lightgbm model
import lightgbm as lgb
clf = lgb.LGBMClassifier()
clf.fit(X_train, y_train)
# + id="rmorZLoszf8W" colab_type="code" colab={}
# predict the results
y_pred=clf.predict(X_test)
# + id="lJiRefZRzjXm" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} executionInfo={"status": "ok", "timestamp": 1597852460901, "user_tz": -360, "elapsed": 2718, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQsCiZ9sTcl01oFBy4y41QBhNLIaNalyju4jEEFA=s64", "userId": "10283689024231170753"}} outputId="735a7879-8b8a-4e78-d693-a7125ed7bf00"
# view accuracy
from sklearn.metrics import accuracy_score
accuracy=accuracy_score(y_pred, y_test)
print('Catboost Model accuracy score: {0:0.4f}'.format(accuracy_score(y_test, y_pred)))
# + id="LQyKUM6OzmG2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 201} executionInfo={"status": "ok", "timestamp": 1597784385634, "user_tz": -360, "elapsed": 1255, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQsCiZ9sTcl01oFBy4y41QBhNLIaNalyju4jEEFA=s64", "userId": "10283689024231170753"}} outputId="655f8823-374f-40b7-9be3-7f56a56dd877"
from sklearn.ensemble import GradientBoostingClassifier
gbc = GradientBoostingClassifier(n_estimators=20, learning_rate=0.05, max_features=2, max_depth=2, random_state=0)
gbc.fit(X_train, y_train)
# + id="UBj26Ekw0aQK" colab_type="code" colab={}
# predict the results
y_pred=gbc.predict(X_test)
# + id="knzlco0j0dbK" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} executionInfo={"status": "ok", "timestamp": 1597784395918, "user_tz": -360, "elapsed": 1207, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQsCiZ9sTcl01oFBy4y41QBhNLIaNalyju4jEEFA=s64", "userId": "10283689024231170753"}} outputId="6aa5f73e-0aa3-47d9-b2a8-daa3f40d2060"
# view accuracy
from sklearn.metrics import accuracy_score
accuracy=accuracy_score(y_pred, y_test)
print('Catboost Model accuracy score: {0:0.4f}'.format(accuracy_score(y_test, y_pred)))
# + id="JHEllQiy0g60" colab_type="code" colab={}
# save the model to disk
import pickle
filename = 'clf.sav'
pickle.dump(model, open(filename, 'wb'))
# + id="WKUWzkWN0Vrb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} executionInfo={"status": "ok", "timestamp": 1597778252431, "user_tz": -360, "elapsed": 4807, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQsCiZ9sTcl01oFBy4y41QBhNLIaNalyju4jEEFA=s64", "userId": "10283689024231170753"}} outputId="4b0f625b-0c4a-4351-f053-b5b69cf84050"
import os
# !ls
# + id="hvi0dXcRmtUH" colab_type="code" colab={}
# + id="dZ9JqzvYysrK" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 441} executionInfo={"status": "ok", "timestamp": 1597784333944, "user_tz": -360, "elapsed": 1213, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQsCiZ9sTcl01oFBy4y41QBhNLIaNalyju4jEEFA=s64", "userId": "10283689024231170753"}} outputId="6037ae06-b5d2-4e55-af88-3d7e7f087cff"
df
# + [markdown] id="qowhamJ1mtoc" colab_type="text"
# # Employee Data
#
# + id="3AA8cXOnyO8T" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 633} executionInfo={"status": "ok", "timestamp": 1597855440096, "user_tz": -360, "elapsed": 1795, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQsCiZ9sTcl01oFBy4y41QBhNLIaNalyju4jEEFA=s64", "userId": "10283689024231170753"}} outputId="8eb6a6a4-1008-45a8-9e90-1b6a25ec4b2c"
from pycaret.datasets import get_data
data = get_data('employee')
data
# + id="YeBYVEUiaKK7" colab_type="code" colab={}
#separating the target variable
y = data['left']
# + id="lkPW62aiaPzo" colab_type="code" colab={}
#removing the target variable from the dataframe
data.drop(['left'],axis=1,inplace=True)
# + id="LvhVPvP0nGA8" colab_type="code" colab={}
#label encoding
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
for v in ['department', 'salary']:
data.loc[:,v] = le.fit_transform(data.loc[:,v])
# + id="jwEPCCIFasYX" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 54} executionInfo={"status": "ok", "timestamp": 1597855562433, "user_tz": -360, "elapsed": 1355, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQsCiZ9sTcl01oFBy4y41QBhNLIaNalyju4jEEFA=s64", "userId": "10283689024231170753"}} outputId="5f0b71c9-762b-4cb0-e0f3-721230d59928"
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(data, y, test_size = 0.2, random_state = 0)
print("Training set has {} samples.".format(X_train.shape[0]))
print("Testing set has {} samples.".format(X_test.shape[0]))
# + id="Pe6F5OFVavKO" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} executionInfo={"status": "ok", "timestamp": 1597856193315, "user_tz": -360, "elapsed": 1354, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQsCiZ9sTcl01oFBy4y41QBhNLIaNalyju4jEEFA=s64", "userId": "10283689024231170753"}} outputId="94f0e1f6-9210-489b-9051-9c6aad325c1d"
# build the lightgbm model
import lightgbm as lgb
clf = lgb.LGBMClassifier()
clf.fit(X_train, y_train)
# + id="c6UNheIVnWGo" colab_type="code" colab={}
# predict the results
y_pred=clf.predict(X_test)
# + id="tn2a7rp4nY34" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} executionInfo={"status": "ok", "timestamp": 1597855615056, "user_tz": -360, "elapsed": 1160, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQsCiZ9sTcl01oFBy4y41QBhNLIaNalyju4jEEFA=s64", "userId": "10283689024231170753"}} outputId="881df443-6fc1-41f0-c90d-0924363f1acb"
# view accuracy
from sklearn.metrics import accuracy_score
accuracy=accuracy_score(y_pred, y_test)
print('Catboost Model accuracy score: {0:0.4f}'.format(accuracy_score(y_test, y_pred)))
# + id="HZcX6MhvncMe" colab_type="code" colab={}
import pickle
with open('/content/drive/My Drive/Colab Notebooks/Income>=50k/model.pkl', 'wb') as f:
pickle.dump(clf,f)
# + id="5cwSbBUh7QPJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} executionInfo={"status": "ok", "timestamp": 1597877874918, "user_tz": -360, "elapsed": 2269, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQsCiZ9sTcl01oFBy4y41QBhNLIaNalyju4jEEFA=s64", "userId": "10283689024231170753"}} outputId="1c41f8ca-2c85-41af-fac8-62166425e37c"
import lightgbm as lgb
print(lgb.__version__)
| creating_model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 전체 classification 결과를 환자 단위, 파트 별 단위로 묶어서 결과 만드는 코드
import os
import numpy as np
import pandas
# ## ENV SETTING
# +
label_type_3cls = '3classes'
label_type_5cls = '5classes'
learning_rate = '5e-5'
num_fold = 5
# true label env
true_dataset_root = 'E:/Thesis_research/Database/Medical/Dental_directory_dataset'
true_lbl_dir = os.path.join(true_dataset_root, 'ClassificationClass',label_type_3cls)
# prediction env
pred_root = f'E:/Thesis_research/results_materials/Dental/raw_prediction_results/{learning_rate}'
exp_dir_3cls = os.path.join(pred_root, label_type_3cls)
exp_dir_5cls = os.path.join(pred_root, label_type_5cls)
# -
# ## PREDICTION SETTING AND VOTING
#
# * 각 네트워크 별로 4개의 part에 대한 prediction 중 unique 병록번호에 해당하는 prediction들을 모아서 voting해서 true와 비교!
# +
from collections import Counter
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
part_list = [16, 26, 36, 46]
patient_wise_overall_acc_lst = []
confusion_matrix_metric_tot_lst = []
for i_fold_iter in range(num_fold):
print()
print(f'Current fold: {i_fold_iter +1 }')
# ## TRUE LABEL SETTING
true_imageset_path = os.path.join(true_dataset_root,'ImageSets','Classification','eval' + str(i_fold_iter+1) + '.txt')
with open(true_imageset_path, 'r') as f:
eval_img_list = f.read().split('\n')
person_num_list =[]
for i_eval_img in eval_img_list:
if i_eval_img == '':
continue
eval_img_info = i_eval_img.split('_')
age_person_num = eval_img_info[0] + '_' + eval_img_info[1] # e.g. '20_2392392' because there are three miss labeled images file name
if len(eval_img_info)>1: # skip blank line
person_num_list.append(age_person_num)
person_num_unique_list, unique_idx = np.unique(np.array(person_num_list), return_index=True)
person_num_perdiction_all_list = []
true_lbl_unique = []
pred_dir_3cls = os.path.join(pred_root,label_type_3cls, f'resnet152-TL_aug-{label_type_3cls}-fold{i_fold_iter}','eval_result_resnet152_cls_best_model', 'prediction_class')
pred_result_list_3cls = sorted(os.listdir(pred_dir_3cls))
pred_dir_5cls = os.path.join(pred_root,label_type_5cls, f'resnet152-TL_aug-{label_type_5cls}-fold{i_fold_iter}','eval_result_resnet152_cls_best_model', 'prediction_class')
for i_iter, i_person_num_unique in enumerate(person_num_unique_list):
pred_result_person_num = [s for s in pred_result_list_3cls if i_person_num_unique in s]
# 하나라도 파트 없으면 false alarm!!
if not len(pred_result_person_num) == 4 :
print('Each person must have four teeth parts')
raise AssertionError
# true label setting
true_lbl = 0
for i, i_pred in enumerate(pred_result_person_num):
true_lbl_path = os.path.join(true_lbl_dir, i_pred)
with open(true_lbl_path,'r') as f:
lbl = int(f.read())
if i==0:
true_lbl = lbl
else:
if true_lbl != lbl: # check all patients label is the same each other
raise AssertionError
else:
true_lbl = lbl
true_lbl_unique.append(true_lbl)
person_num_prediction = []
for i_pred in pred_result_person_num:
pred_txt_nameOnly = os.path.splitext(i_pred)[0]
pred_name_info = pred_txt_nameOnly.split('_')
part_num = int(pred_name_info[-1])
pred_result_3cls_path = os.path.join(pred_dir_3cls, i_pred)
with open(pred_result_3cls_path, 'r') as f:
pred_lbl_3cls = int(f.read())
person_num_prediction.append(pred_lbl_3cls)
pred_result_5cls_path = os.path.join(pred_dir_5cls, i_pred)
with open(pred_result_5cls_path, 'r') as f:
pred_lbl_5cls = int(f.read())
if pred_lbl_5cls in [1,2,3]:
pred_lbl_5cls = 1
elif pred_lbl_5cls == 4:
pred_lbl_5cls = 2
person_num_prediction.append(pred_lbl_5cls)
person_num_perdiction_all_list.append(person_num_prediction)
network_final_pred_list = []
for i_person_num_pred in person_num_perdiction_all_list:
most_common_pred, num_most_common_pred = Counter(i_person_num_pred).most_common(1)[0] # 4, 6 times
network_final_pred_list.append(most_common_pred)
confusion_matrix_metric = confusion_matrix(true_lbl_unique, network_final_pred_list)
print('Confusion matrix: ')
print(confusion_matrix_metric)
confusion_matrix_metric_tot_lst.append(confusion_matrix_metric)
overall_acc_metric = accuracy_score(true_lbl_unique, network_final_pred_list)
print('Overall accuracy = ', overall_acc_metric)
patient_wise_overall_acc_lst.append(overall_acc_metric)
# -
# # Patient wise cv 결과 정리
print('Confusion matrix: ')
confusion_matrix_metric_tot = np.array(confusion_matrix_metric_tot_lst)
confusion_matrix_metric_avg = np.mean(confusion_matrix_metric_tot, axis = 0)
print(confusion_matrix_metric_avg)
print()
print('Overall Accuracy: ')
patient_wise_avg_acc = np.mean(patient_wise_overall_acc_lst)
patient_wise_std_error= np.std(patient_wise_overall_acc_lst) / np.sqrt(len(patient_wise_overall_acc_lst))
print('acc: ',patient_wise_avg_acc)
print('std_error: ', patient_wise_std_error)
print()
print('Group-wise accuracy: ')
group_wise_acc_dict={}
for i_group in range(confusion_matrix_metric_tot.shape[1]):
group_wise_acc_dict[i_group] = []
for i_fold in range(confusion_matrix_metric_tot.shape[0]):
confusion_matrix_cur = confusion_matrix_metric_tot[i_fold]
group_wise_acc = confusion_matrix_cur[i_group, i_group] / np.sum(confusion_matrix_cur[i_group, :])
group_wise_acc_dict[i_group].append(group_wise_acc)
group_wise_acc_mean = np.mean(group_wise_acc_dict[i_group])
group_wise_acc_std_error = np.std(group_wise_acc_dict[i_group]) / np.sqrt(len(group_wise_acc_dict[i_group]))
print('Age group ' + str(i_group+1))
print('acc: ',group_wise_acc_mean)
print('std_error: ',group_wise_acc_std_error)
print()
# # 3cls part-wise와 비교
# +
from scipy.stats import ttest_ind
print('====== patient-wise =====')
print('(3cls + 5cls) voting vs 3cls patient-wise acc')
patient_wise_acc_lst_3cls = np.load(os.path.join(exp_dir_3cls,'3cls_patient_wise_acc_lst.npy'))
ttest,pval = ttest_ind(patient_wise_overall_acc_lst,patient_wise_acc_lst_3cls)
print("p-value",pval)
print()
print('===== part-wise ======')
for i_part in part_list:
print('(3cls + 5cls) voting vs 3cls part ' + str(i_part) + ' acc')
part_wise_name = os.path.join(exp_dir_3cls, '3cls_part'+str(i_part)+'_acc_lst.npy')
part_wise_acc_lst = np.load(part_wise_name)
ttest,pval = ttest_ind(patient_wise_overall_acc_lst,part_wise_acc_lst)
print("p-value",pval)
print()
# -
# # 5cls part-wise와 비교 (p-value)
# +
print('====== patient-wise =====')
print('(3cls + 5cls) voting vs 5cls patient-wise acc')
patient_wise_acc_lst_5cls = np.load(os.path.join(exp_dir_5cls, '5cls_patient_wise_acc_lst.npy'))
ttest,pval = ttest_ind(patient_wise_overall_acc_lst,patient_wise_acc_lst_5cls, equal_var=False)
print("p-value",pval)
print()
print('===== part-wise ======')
for i_part in part_list:
print('(3cls + 5cls) voting vs 5cls part ' + str(i_part) + ' acc')
part_wise_name = os.path.join(exp_dir_5cls, '5cls_part'+str(i_part)+'_acc_lst.npy')
part_wise_acc_lst = np.load(part_wise_name)
ttest,pval = ttest_ind(patient_wise_overall_acc_lst,part_wise_acc_lst, equal_var=False)
print("p-value",pval)
print()
# -
# ## Compare 3 cls and 5 cls
print('====== patient-wise =====')
print('3cls voting vs 5cls voting acc')
patient_wise_acc_lst_3cls = np.load(os.path.join(exp_dir_3cls, '3cls_patient_wise_acc_lst.npy'))
patient_wise_acc_lst_5cls = np.load(os.path.join(exp_dir_5cls, '5cls_patient_wise_acc_lst.npy'))
ttest,pval = ttest_ind(patient_wise_acc_lst_5cls,patient_wise_acc_lst_3cls)
print("p-value",pval)
print()
| jupyter_notebooks/[Dental]3class+5class voting evaluation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Interactive Recommendation System with Word Embeddings using Word2Vec, Plotly, and NetworkX
# Welcome to this project! We will be exploring how to build a model that can understand words in a mathematical way, such that words with similar meaninings that share certain charactertics belong close to each other in vector space. That's a fancy way of saying that the mathemical representations for words that have some similar meaning are close to each other, and words that don't share a lot of meaning are further apart. We will show that this model can be adapted to be used in a recommendation system, or as a discovery tool (i.e. we don't have this in stock, maybe you'd like this alternative?).
#
# ## Prerequisites
# - Basic-to-intermediate level understanding of Python (list comprehensions, dictionary structure, etc.).
# - Understanding of basic mathematical principles (what a vector is, for example).
# - Basic understanding of machine learning, although we will go over and introduce a lot at a beginner-level.
#
# ## Project Breakdown
# - Task 1: Introduction (you are here)
# - Task 2: Exploratory Data Analysis and Preprocessing
# - Task 3: Word2Vec with Gensim
# - Task 4: Exploring Results
# - Task 5: Building and Visualizing Interactive Network Graph
#
# ## Task 1: Introduction
# We will be using a recipe dataset, to train a model to learn the interactions between different kind of ingredients and available products in a supermarket. This model can then be implemented in a number of different ways, for example,
# - to recommend products based on items added to cart;
# - to offer alternatives products based on stock;
# - to discover new products to create different recipes.
#
#
# ### Sample
# *<NAME> Tip Roast*
#
# - 1 1/4 tablespoons paprika
# - 1 tablespoon kosher salt
# - 1 teaspoon garlic powder
# - 1/2 teaspoon ground black pepper
# - 1/2 teaspoon onion powder
# - 1/2 teaspoon ground cayenne pepper
# - 1/2 teaspoon dried oregano
# - 1/2 teaspoon dried thyme
# - 2 tablespoons olive oil
# - 1 (3 pound) sirloin tip roast
#
# In a small bowl, mix the paprika, kosher salt, garlic powder, black pepper, onion powder, cayenne pepper, oregano, and thyme. Stir in the olive oil, and allow the mixture to sit about 15 minutes.
#
# Preheat oven to 350 degrees F (175 degrees C). Line a baking sheet with aluminum foil.
#
# Place the roast on the prepared baking sheet, and cover on all sides with the spice mixture.
#
# Roast 1 hour in the preheated oven, or to a minimum internal temperature of 145 degrees F (63 degrees C). Let sit 15 minutes before slicing.
#
# ### Let's take a look at a demo!
#
# [Click here!](Data/demo.html)
| interactive-word-embeddings-using-word2vec-and-plotly/Task1_Introduction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="4mB_sFildiDh"
# <a href="https://colab.research.google.com/github/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_07_2_Keras_gan.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] colab_type="text" id="303iHntmdiDj"
# # T81-558: Applications of Deep Neural Networks
# **Module 7: Generative Adversarial Networks**
# * Instructor: [<NAME>](https://sites.wustl.edu/jeffheaton/), McKelvey School of Engineering, [Washington University in St. Louis](https://engineering.wustl.edu/Programs/Pages/default.aspx)
# * For more information visit the [class website](https://sites.wustl.edu/jeffheaton/t81-558/).
# + [markdown] colab_type="text" id="d5n2iv9udiDk"
# # Module 7 Material
#
# * Part 7.1: Introduction to GANS for Image and Data Generation [[Video]](https://www.youtube.com/watch?v=0QnCH6tlZgc&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_07_1_gan_intro.ipynb)
# * **Part 7.2: Implementing a GAN in Keras** [[Video]](https://www.youtube.com/watch?v=T-MCludVNn4&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_07_2_Keras_gan.ipynb)
# * Part 7.3: Face Generation with StyleGAN and Python [[Video]](https://www.youtube.com/watch?v=Wwwyr7cOBlU&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_07_3_style_gan.ipynb)
# * Part 7.4: GANS for Semi-Supervised Learning in Keras [[Video]](https://www.youtube.com/watch?v=ZPewmEu7644&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_07_4_gan_semi_supervised.ipynb)
# * Part 7.5: An Overview of GAN Research [[Video]](https://www.youtube.com/watch?v=cvCvZKvlvq4&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_07_5_gan_research.ipynb)
#
# + colab={} colab_type="code" id="zgMWBuf61OmL"
# Nicely formatted time string
def hms_string(sec_elapsed):
h = int(sec_elapsed / (60 * 60))
m = int((sec_elapsed % (60 * 60)) / 60)
s = sec_elapsed % 60
return "{}:{:>02}:{:>05.2f}".format(h, m, s)
# + [markdown] colab_type="text" id="1yqlUD4sdiDk"
# # Part 7.2: Implementing DCGANs in Keras
#
# Paper that described the type of DCGAN that we will create in this module. [[Cite:radford2015unsupervised]](https://arxiv.org/abs/1511.06434) This paper implements a DCGAN as follows:
#
# * No pre-processing was applied to training images besides scaling to the range of the tanh activation function [-1, 1].
# * All models were trained with mini-batch stochastic gradient descent (SGD) with a mini-batch size of 128.
# * All weights were initialized from a zero-centered Normal distribution with standard deviation 0.02.
# * In the LeakyReLU, the slope of the leak was set to 0.2 in all models.
# * we used the Adam optimizer(Kingma & Ba, 2014) with tuned hyperparameters. We found the suggested learning rate of 0.001, to be too high, using 0.0002 instead.
# * Additionally, we found leaving the momentum term $\beta{1}$ at the suggested value of 0.9 resulted in training oscillation and instability while reducing it to 0.5 helped stabilize training.
#
# The paper also provides the following architecture guidelines for stable Deep Convolutional GANs:
#
# * Replace any pooling layers with strided convolutions (discriminator) and fractional-strided convolutions (generator).
# * Use batchnorm in both the generator and the discriminator.
# * Remove fully connected hidden layers for deeper architectures.
# * Use ReLU activation in generator for all layers except for the output, which uses Tanh.
# * Use LeakyReLU activation in the discriminator for all layers.
#
# While creating the material for this module I used a number of Internet resources, some of the most helpful were:
#
# * [Deep Convolutional Generative Adversarial Network (TensorFlow 2.0 example code)](https://www.tensorflow.org/tutorials/generative/dcgan)
# * [Keep Calm and train a GAN. Pitfalls and Tips on training Generative Adversarial Networks](https://medium.com/@utk.is.here/keep-calm-and-train-a-gan-pitfalls-and-tips-on-training-generative-adversarial-networks-edd529764aa9)
# * [Collection of Keras implementations of Generative Adversarial Networks GANs](https://github.com/eriklindernoren/Keras-GAN)
# * [dcgan-facegenerator](https://github.com/platonovsimeon/dcgan-facegenerator), [Semi-Paywalled Article by GitHub Author](https://medium.com/datadriveninvestor/generating-human-faces-with-keras-3ccd54c17f16)
#
# The program created next will generate faces similar to these. While these faces are not perfect, they demonstrate how we can construct and train a GAN on or own. Later we will see how to import very advanced weights from nVidia to produce high resolution, realistic looking faces. Figure 7.GAN-GRID shows images from GAN training.
#
# **Figure 7.GAN-GRID: GAN Neural Network Training**
# 
#
# As discussed in the previous module, the GAN is made up of two different neural networks: the discriminator and the generator. The generator generates the images, while the discriminator detects if a face is real or was generated. These two neural networks work as shown in Figure 7.GAN-EVAL:
#
# **Figure 7.GAN-EVAL:
# 
#
# The discriminator accepts an image as its input and produces number that is the probability of the input image being real. The generator accepts a random seed vector and generates an image from that random vector seed. An unlimited number of new images can be created by providing additional seeds.
# + [markdown] colab_type="text" id="SpCjlQyEdiDo"
# I suggest running this code with a GPU, it will be very slow on a CPU alone. The following code mounts your Google drive for use with Google CoLab. If you are not using CoLab, the following code will not work.
# + colab={"base_uri": "https://localhost:8080/", "height": 156} colab_type="code" id="Y8_-1h5ddiDp" outputId="c623348e-940c-48b5-be88-c5998b0e8314"
try:
from google.colab import drive
drive.mount('/content/drive', force_remount=True)
COLAB = True
print("Note: using Google CoLab")
# %tensorflow_version 2.x
except:
print("Note: not using Google CoLab")
COLAB = False
# + [markdown] colab_type="text" id="BeVAWGHOdiDl"
# The following packages will be used to implement a basic GAN system in Python/Keras.
# + colab={} colab_type="code" id="KubxTY1mdiDm"
import tensorflow as tf
from tensorflow.keras.layers import Input, Reshape, Dropout, Dense
from tensorflow.keras.layers import Flatten, BatchNormalization
from tensorflow.keras.layers import Activation, ZeroPadding2D
from tensorflow.keras.layers import LeakyReLU
from tensorflow.keras.layers import UpSampling2D, Conv2D
from tensorflow.keras.models import Sequential, Model, load_model
from tensorflow.keras.optimizers import Adam
import numpy as np
from PIL import Image
from tqdm import tqdm
import os
import time
import matplotlib.pyplot as plt
# + [markdown] colab_type="text" id="X9uwIRYJdiDr"
# These are the constants that define how the GANs will be created for this example. The higher the resolution, the more memory that will be needed. Higher resolution will also result in longer run times. For Google CoLab (with GPU) 128x128 resolution is as high as can be used (due to memory). Note that the resolution is specified as a multiple of 32. So **GENERATE_RES** of 1 is 32, 2 is 64, etc.
#
# To run this you will need training data. The training data can be any collection of images. I suggest using training data from the following two locations. Simply unzip and combine to a common directory. This directory should be uploaded to Google Drive (if you are using CoLab). The constant **DATA_PATH** defines where these images are stored.
#
# The source data (faces) used in this module can be found here:
#
# * [Kaggle Faces Data New](https://www.kaggle.com/gasgallo/faces-data-new)
# * [Kaggle Lag Dataset: Dataset of faces, from more than 1k different subjects](https://www.kaggle.com/gasgallo/lag-dataset)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="tb_XblE7diDr" outputId="78debcc4-f20c-4578-ebe6-91b40d8b6f3f"
# Generation resolution - Must be square
# Training data is also scaled to this.
# Note GENERATE_RES 4 or higher
# will blow Google CoLab's memory and have not
# been tested extensivly.
GENERATE_RES = 3 # Generation resolution factor
# (1=32, 2=64, 3=96, 4=128, etc.)
GENERATE_SQUARE = 32 * GENERATE_RES # rows/cols (should be square)
IMAGE_CHANNELS = 3
# Preview image
PREVIEW_ROWS = 4
PREVIEW_COLS = 7
PREVIEW_MARGIN = 16
# Size vector to generate images from
SEED_SIZE = 100
# Configuration
DATA_PATH = '/content/drive/My Drive/projects/faces'
EPOCHS = 50
BATCH_SIZE = 32
BUFFER_SIZE = 60000
print(f"Will generate {GENERATE_SQUARE}px square images.")
# + [markdown] colab_type="text" id="oDTfFQjTdiDu"
# Next we will load and preprocess the images. This can take awhile. Google CoLab took around an hour to process. Because of this we store the processed file as a binary. This way we can simply reload the processed training data and quickly use it. It is most efficient to only perform this operation once. The dimensions of the image are encoded into the filename of the binary file because we need to regenerate it if these change.
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="dJ69ALfSdiDv" outputId="59daec8c-7d81-4764-ff40-b9c4931011eb"
# Image set has 11,682 images. Can take over an hour
# for initial preprocessing.
# Because of this time needed, save a Numpy preprocessed file.
# Note, that file is large enough to cause problems for
# sume verisons of Pickle,
# so Numpy binary files are used.
training_binary_path = os.path.join(DATA_PATH,
f'training_data_{GENERATE_SQUARE}_{GENERATE_SQUARE}.npy')
print(f"Looking for file: {training_binary_path}")
if not os.path.isfile(training_binary_path):
start = time.time()
print("Loading training images...")
training_data = []
faces_path = os.path.join(DATA_PATH,'face_images')
for filename in tqdm(os.listdir(faces_path)):
path = os.path.join(faces_path,filename)
image = Image.open(path).resize((GENERATE_SQUARE,
GENERATE_SQUARE),Image.ANTIALIAS)
training_data.append(np.asarray(image))
training_data = np.reshape(training_data,(-1,GENERATE_SQUARE,
GENERATE_SQUARE,IMAGE_CHANNELS))
training_data = training_data.astype(np.float32)
training_data = training_data / 127.5 - 1.
print("Saving training image binary...")
np.save(training_binary_path,training_data)
elapsed = time.time()-start
print (f'Image preprocess time: {hms_string(elapsed)}')
else:
print("Loading previous training pickle...")
training_data = np.load(training_binary_path)
# + [markdown] colab_type="text" id="Y9kO_iSRyixQ"
# We will use a TensorFlow **Dataset** object to actually hold the images. This allows the data to be quickly shuffled int divided into the appropriate batch sizes for training.
# + colab={} colab_type="code" id="BXl0JohJBx69"
# Batch and shuffle the data
train_dataset = tf.data.Dataset.from_tensor_slices(training_data).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
# + [markdown] colab_type="text" id="2dATyXqQdiDw"
# The code below creates the generator and discriminator.
# + [markdown] colab_type="text" id="zB_aX4ChdiD0"
# Next we actually build the discriminator and the generator. Both will be trained with the Adam optimizer.
# + colab={} colab_type="code" id="Ulou-BZPybzT"
def build_generator(seed_size, channels):
model = Sequential()
model.add(Dense(4*4*256,activation="relu",input_dim=seed_size))
model.add(Reshape((4,4,256)))
model.add(UpSampling2D())
model.add(Conv2D(256,kernel_size=3,padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
model.add(UpSampling2D())
model.add(Conv2D(256,kernel_size=3,padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
# Output resolution, additional upsampling
model.add(UpSampling2D())
model.add(Conv2D(128,kernel_size=3,padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
if GENERATE_RES>1:
model.add(UpSampling2D(size=(GENERATE_RES,GENERATE_RES)))
model.add(Conv2D(128,kernel_size=3,padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
# Final CNN layer
model.add(Conv2D(channels,kernel_size=3,padding="same"))
model.add(Activation("tanh"))
return model
def build_discriminator(image_shape):
model = Sequential()
model.add(Conv2D(32, kernel_size=3, strides=2, input_shape=image_shape,
padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
model.add(ZeroPadding2D(padding=((0,1),(0,1))))
model.add(BatchNormalization(momentum=0.8))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(256, kernel_size=3, strides=1, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(512, kernel_size=3, strides=1, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
return model
# + [markdown] colab_type="text" id="2kxKk7uDOnPS"
# As we progress through training images will be produced to show the progress. These images will contain a number of rendered faces that show how good the generator has become. These faces will be
# + colab={} colab_type="code" id="UKnCeDut2cp0"
def save_images(cnt,noise):
image_array = np.full((
PREVIEW_MARGIN + (PREVIEW_ROWS * (GENERATE_SQUARE+PREVIEW_MARGIN)),
PREVIEW_MARGIN + (PREVIEW_COLS * (GENERATE_SQUARE+PREVIEW_MARGIN)), 3),
255, dtype=np.uint8)
generated_images = generator.predict(noise)
generated_images = 0.5 * generated_images + 0.5
image_count = 0
for row in range(PREVIEW_ROWS):
for col in range(PREVIEW_COLS):
r = row * (GENERATE_SQUARE+16) + PREVIEW_MARGIN
c = col * (GENERATE_SQUARE+16) + PREVIEW_MARGIN
image_array[r:r+GENERATE_SQUARE,c:c+GENERATE_SQUARE]
= generated_images[image_count] * 255
image_count += 1
output_path = os.path.join(DATA_PATH,'output')
if not os.path.exists(output_path):
os.makedirs(output_path)
filename = os.path.join(output_path,f"train-{cnt}.png")
im = Image.fromarray(image_array)
im.save(filename)
# + [markdown] colab_type="text" id="XiUbj3W4Oo3U"
#
# + colab={"base_uri": "https://localhost:8080/", "height": 285} colab_type="code" id="gL5byGhNzOzd" outputId="ad1df425-b190-4404-b716-5998f6ea48fb"
generator = build_generator(SEED_SIZE, IMAGE_CHANNELS)
noise = tf.random.normal([1, SEED_SIZE])
generated_image = generator(noise, training=False)
plt.imshow(generated_image[0, :, :, 0])
# + [markdown] colab_type="text" id="nlxxEHDIOqjW"
#
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="LOnTxIXnyeEQ" outputId="ed8e14da-88d9-4d02-9338-07bb71eadb7e"
image_shape = (GENERATE_SQUARE,GENERATE_SQUARE,IMAGE_CHANNELS)
discriminator = build_discriminator(image_shape)
decision = discriminator(generated_image)
print (decision)
# + [markdown] colab_type="text" id="-ChOo3D1OsVc"
# Loss functions must be developed that allow the generator and discriminator to be trained in an adversarial way. Because these two neural networks are being trained independently they must be trained in two separate passes. This requires two separate loss functions and also two separate updates to the gradients. When the discriminator's gradients are applied to decrease the discriminator's loss it is important that only the discriminator's weights are update. It is not fair, nor will it produce good results, to adversarially damage the weights of the generator to help the discriminator. A simple backpropagation would do this. It would simultaneously affect the weights of both generator and discriminator to lower whatever loss it was assigned to lower.
#
# Figure 7.TDIS shows how the discriminator is trained.
#
# **Figure 7.TDIS: Training the Discriminator**
# 
#
# Here a training set is generated with an equal number of real and fake images. The real images are randomly sampled (chosen) from the training data. An equal number of random images are generated from random seeds. For the discriminator training set, the $x$ contains the input images and the $y$ contains a value of 1 for real images and 0 for generated ones.
#
# Likewise, the Figure 7.TGEN shows how the generator is trained.
#
# **Figure 7.TGEN: Training the Generator**
# 
#
# For the generator training set, the $x$ contains the random seeds to generate images and the $y$ always contains the value of 1, because the optimal is for the generator to have generated such good images that the discriminiator was fooled into assigning them a probability near 1.
# + colab={} colab_type="code" id="gBaP98zAySJV"
# This method returns a helper function to compute cross entropy loss
cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)
def discriminator_loss(real_output, fake_output):
real_loss = cross_entropy(tf.ones_like(real_output), real_output)
fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)
total_loss = real_loss + fake_loss
return total_loss
def generator_loss(fake_output):
return cross_entropy(tf.ones_like(fake_output), fake_output)
# + [markdown] colab_type="text" id="gIlZvHGAxbWf"
# Both the generator and discriminator use Adam and the same learning rate and momentum. This does not need to be the case. If you use a **GENERATE_RES** greater than 3 you may need to tune these learning rates, as well as other training and hyperparameters.
# + colab={} colab_type="code" id="79UDhOCa0R4h"
generator_optimizer = tf.keras.optimizers.Adam(1.5e-4,0.5)
discriminator_optimizer = tf.keras.optimizers.Adam(1.5e-4,0.5)
# + [markdown] colab_type="text" id="frCpCNn8yRcM"
# The following function is where most of the training takes place for both the discriminator and the generator. This function was based on the GAN provided by the [TensorFlow Keras exmples](https://www.tensorflow.org/tutorials/generative/dcgan) documentation. The first thing you should notice about this function is that it is annotated with the **tf.function** annotation. This causes the function to be precompiled and improves performance.
#
# This function trans differently than the code we previously saw for training. This code makes use of **GradientTape** to allow the discriminator and generator to be trained together, yet separately.
#
#
# + colab={} colab_type="code" id="uzyh-LqU0j5d"
# Notice the use of `tf.function`
# This annotation causes the function to be "compiled".
@tf.function
def train_step(images):
seed = tf.random.normal([BATCH_SIZE, SEED_SIZE])
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
generated_images = generator(seed, training=True)
real_output = discriminator(images, training=True)
fake_output = discriminator(generated_images, training=True)
gen_loss = generator_loss(fake_output)
disc_loss = discriminator_loss(real_output, fake_output)
gradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables)
gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.trainable_variables)
generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables))
discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables))
return gen_loss,disc_loss
# + colab={} colab_type="code" id="NjrRgDR10lSF"
def train(dataset, epochs):
fixed_seed = np.random.normal(0, 1, (PREVIEW_ROWS * PREVIEW_COLS, SEED_SIZE))
start = time.time()
for epoch in range(epochs):
epoch_start = time.time()
gen_loss_list = []
disc_loss_list = []
for image_batch in dataset:
t = train_step(image_batch)
gen_loss_list.append(t[0])
disc_loss_list.append(t[1])
g_loss = sum(gen_loss_list) / len(gen_loss_list)
d_loss = sum(disc_loss_list) / len(disc_loss_list)
epoch_elapsed = time.time()-epoch_start
print (f'Epoch {epoch+1}, gen loss={g_loss},disc loss={d_loss},'\
' {hms_string(epoch_elapsed)}')
save_images(epoch,fixed_seed)
elapsed = time.time()-start
print (f'Training time: {hms_string(elapsed)}')
# + colab={"base_uri": "https://localhost:8080/", "height": 884} colab_type="code" id="vWmEHprD0t1V" outputId="7eaaf566-6cc0-4878-e26d-4362c58e0084"
train(train_dataset, EPOCHS)
# + [markdown] colab_type="text" id="Mvir4efcLlwi"
#
# + colab={} colab_type="code" id="15Hia_feD9sm"
generator.save(os.path.join(DATA_PATH,"face_generator.h5"))
| t81_558_class_07_2_Keras_gan.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Simple Binary Classification with defaults
#
# In this notebook we will use the Adult Census dataset. Download the data from [here](https://www.kaggle.com/wenruliu/adult-income-dataset/downloads/adult.csv/2).
# +
import numpy as np
import pandas as pd
import torch
from pytorch_widedeep.preprocessing import WidePreprocessor, TabPreprocessor
from pytorch_widedeep.training import Trainer
from pytorch_widedeep.models import Wide, TabMlp, TabResnet, TabTransformer, WideDeep
from pytorch_widedeep.metrics import Accuracy, Precision
# -
df = pd.read_csv('data/adult/adult.csv.zip')
df.head()
# For convenience, we'll replace '-' with '_'
df.columns = [c.replace("-", "_") for c in df.columns]
#binary target
df['income_label'] = (df["income"].apply(lambda x: ">50K" in x)).astype(int)
df.drop('income', axis=1, inplace=True)
df.head()
# ### Preparing the data
#
# Have a look to notebooks one and two if you want to get a good understanding of the next few lines of code (although there is no need to use the package)
wide_cols = ['education', 'relationship','workclass','occupation','native_country','gender']
crossed_cols = [('education', 'occupation'), ('native_country', 'occupation')]
cat_embed_cols = [('education',16), ('relationship',8), ('workclass',16), ('occupation',16),('native_country',16)]
continuous_cols = ["age","hours_per_week"]
target_col = 'income_label'
# +
# TARGET
target = df[target_col].values
# wide
wide_preprocessor = WidePreprocessor(wide_cols=wide_cols, crossed_cols=crossed_cols)
X_wide = wide_preprocessor.fit_transform(df)
# deeptabular
tab_preprocessor = TabPreprocessor(embed_cols=cat_embed_cols, continuous_cols=continuous_cols)
X_tab = tab_preprocessor.fit_transform(df)
# -
print(X_wide)
print(X_wide.shape)
print(X_tab)
print(X_tab.shape)
# ### Defining the model
wide = Wide(wide_dim=np.unique(X_wide).shape[0], pred_dim=1)
deeptabular = TabMlp(mlp_hidden_dims=[64,32],
column_idx=tab_preprocessor.column_idx,
embed_input=tab_preprocessor.embeddings_input,
continuous_cols=continuous_cols)
model = WideDeep(wide=wide, deeptabular=deeptabular)
model
# As you can see, the model is not particularly complex. In mathematical terms (Eq 3 in the [original paper](https://arxiv.org/pdf/1606.07792.pdf)):
#
# $$
# pred = \sigma(W^{T}_{wide}[x, \phi(x)] + W^{T}_{deep}a_{deep}^{(l_f)} + b)
# $$
#
#
# The architecture above will output the 1st and the second term in the parenthesis. `WideDeep` will then add them and apply an activation function (`sigmoid` in this case). For more details, please refer to the paper.
# ### Training
# Once the model is built, we just need to compile it and run it
trainer = Trainer(model, objective='binary', metrics=[Accuracy, Precision])
trainer.fit(X_wide=X_wide, X_tab=X_tab, target=target, n_epochs=5, batch_size=64, val_split=0.2)
# As you can see, you can run a wide and deep model in just a few lines of code.
#
# Using `TabResnet` as the `deeptabular` component
wide = Wide(wide_dim=np.unique(X_wide).shape[0], pred_dim=1)
deeptabular = TabResnet(blocks_dims=[64,32],
column_idx=tab_preprocessor.column_idx,
embed_input=tab_preprocessor.embeddings_input,
continuous_cols=continuous_cols)
model = WideDeep(wide=wide, deeptabular=deeptabular)
trainer = Trainer(model, objective='binary', metrics=[Accuracy, Precision])
trainer.fit(X_wide=X_wide, X_tab=X_tab, target=target, n_epochs=5, batch_size=64, val_split=0.2)
# Using `TabTransformer` as the `deeptabular` component
# for TabTransformer we only need the names of the columns
cat_embed_cols_for_transformer = [el[0] for el in cat_embed_cols]
cat_embed_cols_for_transformer
# deeptabular
tab_preprocessor = TabPreprocessor(embed_cols=cat_embed_cols_for_transformer,
continuous_cols=continuous_cols,
for_tabtransformer=True)
X_tab = tab_preprocessor.fit_transform(df)
wide = Wide(wide_dim=np.unique(X_wide).shape[0], pred_dim=1)
deeptabular = TabTransformer(column_idx=tab_preprocessor.column_idx,
embed_input=tab_preprocessor.embeddings_input,
continuous_cols=continuous_cols)
model = WideDeep(wide=wide, deeptabular=deeptabular)
trainer = Trainer(model, objective='binary', metrics=[Accuracy, Precision])
trainer.fit(X_wide=X_wide, X_tab=X_tab, target=target, n_epochs=2, batch_size=512, val_split=0.2)
# Also mentioning that one could build a model with the individual components independently. For example, a model comprised only by the `wide` component would be simply a linear model. This could be attained by just:
model = WideDeep(wide=wide)
trainer = Trainer(model, objective='binary', metrics=[Accuracy, Precision])
trainer.fit(X_wide=X_wide, target=target, n_epochs=5, batch_size=64, val_split=0.2)
# The only requisite is that the model component must be passed to `WideDeep` before "fed" to the `Trainer`. This is because the `Trainer` is coded so that it trains a model that has a parent called `model` and then children that correspond to the model components: `wide`, `deeptabular`, `deeptext` and `deepimage`. Also, `WideDeep` builds the last connection between the output of those components and the final, output neuron(s).
| examples/03_Binary_Classification_with_Defaults.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # TEXT MINING for BEGINNER
# - 본 자료는 텍스트 마이닝을 활용한 연구 및 강의를 위한 목적으로 제작되었습니다.
# - 본 자료를 강의 목적으로 활용하고자 하시는 경우 꼭 아래 메일주소로 연락주세요.
# - 본 자료에 대한 허가되지 않은 배포를 금지합니다.
# - 강의, 저작권, 출판, 특허, 공동저자에 관련해서는 문의 바랍니다.
# - **Contact : ADMIN(<EMAIL>)**
#
# ---
# ## DAY 08. 동적 페이지 수집하기: 인스타그램
# - Python을 활용해 가상의 브라우저를 띄워 웹페이지에서 데이터를 크롤링하는 방법에 대해 다룹니다.
#
# ---
# > **\*\*\* 주의사항 \*\*\***
# 본 자료에서 설명하는 웹크롤링하는 방법은 해당 기법에 대한 이해를 돕고자하는 교육의 목적으로 사용되었으며,
# 이를 활용한 대량의 무단 크롤링은 범죄에 해당할 수 있음을 알려드립니다.
# ### 1. 가상의 브라우저 실행하기: Chrome Driver
#
# ---
# +
# 가상의 브라우저를 컨트롤 할 수 있도록 도와주는 selenium 패키지를 설치합니다.
# 아래 주석을 해지하고 셀을 실행합니다.
# 설치는 한번만 수행하면 되며, 재설치시 Requirement already satisfied: ~ 라는 메시지가 출력됩니다.
# #!pip install selenium
# +
# Python 코드를 통해 가상의 브라우저를 띄우기 위해 selenium 패키지를 import 합니다.
import selenium
from selenium import webdriver
from selenium.webdriver.common.by import By
# selenium을 활용해 브라우저를 직접 띄우는 경우, 실제 웹서핑을 할때처럼 로딩시간이 필요합니다.
# 로딩시간 동안 대기하도록 코드를 구성하기위해 time 패키지를 import 합니다.
import time
# Python 코드를 통해 웹페이지에 정보를 요청하기 위해 BeautifulSoup, urllib 패키지를 import 합니다.
from bs4 import BeautifulSoup
import requests
import urllib
# +
# Chrome Driver를 호출합니다.
chrome_options = webdriver.ChromeOptions()
# 브라우저에 임의로 User-agent 옵션을 넣어 Python 코드로 접속함을 숨깁니다.
chrome_options.add_argument('--user-agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36"')
# Chrome Driver 파일의 경로를 지정하고 실행합니다.
# Chrome Driver는 아래 링크에서 다운로드 가능합니다.
# 본 Jupyter Notebook 파일과 동일한 경로에 Chrome Driver가 존재하는 경우 아래 경로를 그대로 사용합니다.
# Windows 운영체제
#driver = webdriver.Chrome(executable_path = "chromedriver", chrome_options=chrome_options)
# MAC, Linux 운영체제
driver = webdriver.Chrome(executable_path = "./chromedriver", chrome_options=chrome_options)
# -
# Chrome Driver 다운로드 URL : http://chromedriver.chromium.org/downloads
# ### 2. 가상의 브라우저를 활용에 사이트 접속하기
#
# ---
# 브라우저에서 URL에 해당하는 페이지로 이동합니다.
URL = "https://www.instagram.com/"
driver.get(URL)
# 실제 페이지가 불러와지는 시간을 고려해 sleep(SEC) 함수로 기다리는 시간을 지정해줍니다.
time.sleep(10)
# 인스타그램 게시물에 접근하기 위해서는 로그인이 필요합니다.
# 인스타그램 로그인 페이지로 이동합니다.
# click() 함수로 원하는 요소(태그)를 클릭할 수 있습니다.
driver.find_element(By.CLASS_NAME, "izU2O").find_element(By.TAG_NAME, "a").click()
time.sleep(10)
# 인스타그램 로그인 페이지에서 아이디(ID)와 비밀번호(PW)를 입력합니다.
# 아이디와 비밀번호를 브라우저에서 직접 입력해도 됩니다.
ID = "여기에 ID를 입력합니다."
PW = "<PASSWORD>."
driver.find_element(By.NAME, "username").send_keys(ID)
driver.find_element(By.NAME, "password").send_keys(PW)
time.sleep(2)
# +
# 로그인 버튼을 직접 클릭해 로그인합니다.
# Selenium으로 로그인 버튼을 클릭하는 경우 자동입력방지문자를 입력해야 합니다.
# -
# 알림 설정 관련 선택창이 출현한 경우에 "나중에 하기"를 선택합니다.
# 알림 설정 창이 출현하지 않는다면 실행을 하지 말아주세요.
driver.find_element(By.CLASS_NAME, "mt3GC").find_elements(By.TAG_NAME, "button")[1].click()
# +
# 인스타그램 검색창에 해시태그를 검색합니다.
# 검색어를 입력합니다.
keyword = "#<PASSWORD>"
search_form = driver.find_element(By.CSS_SELECTOR, "#react-root > section > nav > div._8MQSO.Cx7Bp > div > div > div.LWmhU._0aCwM > input")
search_form.send_keys(keyword)
time.sleep(5)
# -
# 해시태그 검색결과 중 가장 상위에 출현한 해시태그 검색결과로 이동합니다.
search_url = driver.find_element(By.CLASS_NAME, "fuqBx").find_element(By.TAG_NAME, "a").get_attribute("href")
driver.get(search_url)
time.sleep(5)
# ### 3. 인스타그램 게시물 본문 수집하기
#
# ---
# +
# 수집된 게시물 개수를 저장할 변수를 선언합니다.
post_count = 0
# 게시물 내용을 저장할 파일을 생성합니다.
# 파일명에는 "#"을 포함할 수 없으므로 제거해줍니다.
f = open("instagram_post_" + keyword.replace("#", "") + ".txt", "w", encoding = "utf-8")
# 인스타그램 게시물은 스크롤을 계속 내려야 모든 게시물을 확인할 수 있습니다.
# 몇 번 스크롤을 내리면서 게시물을 수집할지 지정합니다.
# 게시물이 양이 많을수록 스크롤 횟수를 늘려줍니다.
scroll_limit = 2
# 수집된 전체 게시물을 저장할 리스트를 생성합니다.
# 중복된 게시물 수집을 방지하기위해 활용됩니다.
total_post_list = []
for i in range(scroll_limit+1):
# 현재 내려진 스크롤 횟수를 출력합니다.
print("Scroll 횟수 :", i, end="\r")
time.sleep(5)
# 현재 화면상에 출현한 게시물 모두를 불러와 리스트에 저장합니다.
post_list = driver.find_elements(By.CLASS_NAME, "_9AhH0")
# 이미 수집된 게시물이 있는지 확인하고 새로운 게시물만 리스트에 저장합니다.
post_list = [post for post in post_list if post not in total_post_list]
# 수집할 게시물을 수집된 전체 게시물 리스트에 추가합니다.
total_post_list += post_list
# FOR 문을 활용해 게시물 지정을 반복합니다. (1.~4. 반복)
for post in post_list:
try:
# 1. 포스팅 이미지를 클릭합니다.
post.click()
time.sleep(7)
# 2. 포스팅 본문의 텍스트를 가져옵니다.
try:
content = driver.find_element(By.CLASS_NAME, "C4VMK").text
content = content.replace("\n", " ")
f.write(content + "\n")
except:
None
# 3. 수집한 포스트 개수를 늘려줍니다.
post_count += 1
# 4. 우측 상단의 X를 눌러 팝업창을 닫습니다.
driver.find_element(By.CLASS_NAME, "ckWGn").click()
except:
continue
# 스크롤을 내려 추가로 게시물이 브라우저에 보이도록 합니다.
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
time.sleep(5)
f.close()
# -
print("> 수집한 총 게시물 개수 :", post_count)
# Chrome Driver를 닫습니다.
driver.close()
# 파일에 저장된 카페 게시글 내용을 확인합니다.
f = open("instagram_post_" + keyword.replace("#", "") + ".txt", encoding="utf-8")
for post in f.read().split("\n")[:10]:
print(post.strip())
f.close()
| practice-note/08_text-mining-for-beginner_python-crawling-practice-3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <h1>Testing Grimoire 1.1</h1>
# +
#from pandas import DataFrame, Series
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, accuracy_score
# +
# Importa o código principal
import sys
sys.path.append('C:\\Users\\<NAME>\\Desktop\\Qualificação PPGCC\\abordagem\\RFNS')
# from grimoire.EnginneringForest import EnginneringForest
from grimoire.EnginneringForest import EnginneringForest
# +
df_heart = pd.read_csv('../datasets/heart.csv',
engine='c',
memory_map=True,
low_memory=True)
X=df_heart[['age', 'sex', 'cp', 'trestbps', 'chol', 'fbs', 'restecg',
'thalach', 'exang', 'oldpeak', 'slope', 'ca', 'thal']]
# Labels
y=df_heart['target']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30,
random_state=100,
shuffle=True,
stratify=y)
# -
model_eg = EnginneringForest(3, name_log='testing_grimoire_11.txt')
model_eg.fit(X_train, y_train)
y_pred = model_eg.predict(X_test)
# +
mac = accuracy_score(y_test, y_pred)
print("Accuracy:", mac)
# No código anterior eu havia conseguido Accuracy: 0.8571428571428571
# Na primeira execução do código deu Accuracy: 0.8681318681318682
# -
(0.8681318681318682 - 0.8571428571428571) * 100
print(y_pred)
print(list(y_test))
| jupyter/Testing Grimoire 1.1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.4 64-bit (''conda-env'': conda)'
# language: python
# name: python37464bitcondaenvconda3f4d7783b740440a9c80fd58cd6c0e7e
# ---
from pyspark.sql import SparkSession
spark = SparkSession.builder.appName("My-App").getOrCreate()
sc = spark.sparkContext
words = ["""Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""]
wordsRdd = sc.parallelize(words)
wordsRdd.count()
wordsRddSplit = wordsRdd.flatMap(lambda x : x.lower().split(" ")).flatMap(lambda x : x.lower().split("\n"))
wordsRddSplit.take(5)
wordsRddTuple = wordsRddSplit.map(lambda x: (x,1))
wordsRddGrouped = wordsRddTuple.groupByKey()
wordsRddCount = wordsRddGrouped.map(lambda x: (x[0],sum(x[1])))
wordsRddCount.take(5)
wordsTotalCount = (wordsRddCount.map(lambda x: x[1]).reduce(lambda x,y: x+y))
wordsUniqueCount = wordsRddSplit.distinct().count()
average_appearance = wordsTotalCount / wordsUniqueCount
print(average_appearance)
# ## Reference
#
# - <https://spark.apache.org/docs/2.1.1/programming-guide.html>
| spark-rdd-example09-flatMap/Spark-RDD-Example-09-flatMap.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Hand-crafted features for GTZAN
#
# > The goal of this notebook is to create several audio features descriptors for the GTZAN dataset, as proposed for many year as input for machine learning algorithms. We are going to use timbral texture based features and tempo based features for this. The main goal is to produce this features, classify and then compare with our proposed deep learning approach, using CNNs on the raw audio.
#
# > This script is from https://github.com/Hguimaraes/gtzan.keras adapted to my case of 2 specific sub-genres
import os
import librosa
import itertools
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.stats import kurtosis
from scipy.stats import skew
# +
import sklearn
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.pipeline import Pipeline
from sklearn.feature_selection import SelectKBest
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.feature_selection import VarianceThreshold
from sklearn.feature_selection import SelectFromModel
import lightgbm as lgbm
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import SGDClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
# -
# Set the seed
np.random.seed(42)
gtzan_dir = '../Data/songs/full_test_set/'
# Parameters
song_samples = 22050*30
genres = {'black': 0, 'death': 1}
def get_features(y, sr, n_fft = 1024, hop_length = 512):
# Features to concatenate in the final dictionary
features = {'centroid': None, 'roloff': None, 'flux': None, 'rmse': None,
'zcr': None, 'contrast': None, 'bandwidth': None, 'flatness': None}
# Count silence
if 0 < len(y):
y_sound, _ = librosa.effects.trim(y, frame_length=n_fft, hop_length=hop_length)
features['sample_silence'] = len(y) - len(y_sound)
# Using librosa to calculate the features
features['centroid'] = librosa.feature.spectral_centroid(y, sr=sr, n_fft=n_fft, hop_length=hop_length).ravel()
features['roloff'] = librosa.feature.spectral_rolloff(y, sr=sr, n_fft=n_fft, hop_length=hop_length).ravel()
features['zcr'] = librosa.feature.zero_crossing_rate(y, frame_length=n_fft, hop_length=hop_length).ravel()
features['rmse'] = librosa.feature.rms(y, frame_length=n_fft, hop_length=hop_length).ravel()
features['flux'] = librosa.onset.onset_strength(y=y, sr=sr).ravel()
features['contrast'] = librosa.feature.spectral_contrast(y, sr=sr).ravel()
features['bandwidth'] = librosa.feature.spectral_bandwidth(y, sr=sr, n_fft=n_fft, hop_length=hop_length).ravel()
features['flatness'] = librosa.feature.spectral_flatness(y, n_fft=n_fft, hop_length=hop_length).ravel()
# MFCC treatment
mfcc = librosa.feature.mfcc(y, n_fft = n_fft, hop_length = hop_length, n_mfcc=13)
for idx, v_mfcc in enumerate(mfcc):
features['mfcc_{}'.format(idx)] = v_mfcc.ravel()
# Get statistics from the vectors
def get_moments(descriptors):
result = {}
for k, v in descriptors.items():
result['{}_max'.format(k)] = np.max(v)
result['{}_min'.format(k)] = np.min(v)
result['{}_mean'.format(k)] = np.mean(v)
result['{}_std'.format(k)] = np.std(v)
result['{}_kurtosis'.format(k)] = kurtosis(v)
result['{}_skew'.format(k)] = skew(v)
return result
dict_agg_features = get_moments(features)
dict_agg_features['tempo'] = librosa.beat.tempo(y, sr=sr)[0]
return dict_agg_features
def read_process_songs(src_dir, debug = True):
# Empty array of dicts with the processed features from all files
arr_features = []
# Read files from the folders
for x,_ in genres.items():
folder = src_dir + x
for root, subdirs, files in os.walk(folder):
for file in files:
# Read the audio file
file_name = folder + "/" + file
signal, sr = librosa.load(file_name)
# Debug process
if debug:
print("Reading file: {}".format(file_name))
# Append the result to the data structure
features = get_features(signal, sr)
features['genre'] = genres[x]
arr_features.append(features)
return arr_features
# +
# %%time
# Get list of dicts with features and convert to dataframe
features = read_process_songs(gtzan_dir, debug=True)
# -
df_features = pd.DataFrame(features)
df_features.shape
df_features.head()
df_features.to_csv('../data/full_test_set_features.csv', index=False)
X = df_features.drop(['genre'], axis=1).values
y = df_features['genre'].values
# ## Visualization
#
# > Linear (and nonlinear) dimensionality reduction of the GTZAN features for visualization purposes
# Standartize the dataset
scale = StandardScaler()
x_scaled = scale.fit_transform(X)
# Use PCA only for visualization
pca = PCA(n_components=35, whiten=True)
x_pca = pca.fit_transform(x_scaled)
print("cumulative explained variance ratio = {:.4f}".format(np.sum(pca.explained_variance_ratio_)))
# Use LDA only for visualization
lda = LDA()
x_lda = lda.fit_transform(x_scaled, y)
# Using tsne
tsne = TSNE(n_components=2, verbose=1, learning_rate=250)
x_tsne = tsne.fit_transform(x_scaled)
# +
plt.figure(figsize=(18, 4))
plt.subplot(131)
print(x_pca.shape)
plt.scatter(x_pca[:,0], x_pca[:,1], c=y)
plt.colorbar()
plt.title("Embedded space with PCA")
plt.subplot(132)
print(x_lda.shape)
plt.scatter(x_lda[:,0], x_lda[:,1], c=y)
plt.colorbar()
plt.title("Embedded space with LDA")
plt.subplot(133)
plt.scatter(x_tsne[:,0], x_tsne[:,1], c=y)
plt.colorbar()
plt.title("Embedded space with TSNE")
plt.show()
# -
# idea from https://github.com/rodriguezda/PCA/blob/master/pca%20pipeline.ipynb
import seaborn as sns
df = pd.DataFrame(X)
df_component = pd.DataFrame(pca.components_, columns = df.columns)
plt.figure(figsize=(15,2))
sns.heatmap(df_component)
max_vals = df_component.idxmax(1)
print(max_vals)
max_vals.hist(bins=133, figsize=(12,8))
# ## Classical Machine Learning
# Helper to plot confusion matrix -- from Scikit-learn website
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y)
# ### Logistic Regression
# +
params = {
"cls__penalty": ["l1", "l2"], # penalty norms
"cls__C": [0.5, 1, 2, 5], # regularisation - smaller is stronger
"cls__max_iter": [500] # maximum number of iterations
}
pipe_lr = Pipeline([
('scale', StandardScaler()),
('var_tresh', VarianceThreshold(threshold=(.8 * (1 - .8)))),
('feature_selection', SelectFromModel(lgbm.LGBMClassifier())),
('cls', LogisticRegression())
])
grid_lr = GridSearchCV(pipe_lr, params, scoring='accuracy', n_jobs=6, cv=5)
grid_lr.fit(X_train, y_train)
# -
preds = grid_lr.predict(X_test)
print("best score on validation set (accuracy) = {:.4f}".format(grid_lr.best_score_))
print("best score on test set (accuracy) = {:.4f}".format(accuracy_score(y_test, preds)))
# ### ElasticNet
# +
params = {
"cls__loss": ['log'],
"cls__penalty": ["elasticnet"],
"cls__l1_ratio": [0.15, 0.25, 0.5, 0.75],
}
pipe_en = Pipeline([
('scale', StandardScaler()),
('var_tresh', VarianceThreshold(threshold=(.8 * (1 - .8)))),
('feature_selection', SelectFromModel(lgbm.LGBMClassifier())),
('cls', SGDClassifier())
])
grid_en = GridSearchCV(pipe_en, params, scoring='accuracy', n_jobs=6, cv=5)
grid_en.fit(X_train, y_train)
# -
preds = grid_en.predict(X_test)
print("best score on validation set (accuracy) = {:.4f}".format(grid_en.best_score_))
print("best score on test set (accuracy) = {:.4f}".format(accuracy_score(y_test, preds)))
# ### Decision Tree
# +
params = {
"cls__criterion": ["gini", "entropy"],
"cls__splitter": ["best", "random"],
}
pipe_cart = Pipeline([
('var_tresh', VarianceThreshold(threshold=(.8 * (1 - .8)))),
('feature_selection', SelectFromModel(lgbm.LGBMClassifier())),
('cls', DecisionTreeClassifier())
])
grid_cart = GridSearchCV(pipe_cart, params, scoring='accuracy', n_jobs=6, cv=5)
grid_cart.fit(X_train, y_train)
# -
preds = grid_cart.predict(X_test)
print("best score on validation set (accuracy) = {:.4f}".format(grid_cart.best_score_))
print("best score on test set (accuracy) = {:.4f}".format(accuracy_score(y_test, preds)))
# ### Random Forest
# +
params = {
"cls__n_estimators": [100, 250, 500, 1000],
"cls__criterion": ["gini", "entropy"],
"cls__max_depth": [5, 7, None]
}
pipe_rf = Pipeline([
('var_tresh', VarianceThreshold(threshold=(.8 * (1 - .8)))),
('feature_selection', SelectFromModel(lgbm.LGBMClassifier())),
('cls', RandomForestClassifier())
])
grid_rf = GridSearchCV(pipe_rf, params, scoring='accuracy', n_jobs=6, cv=5)
grid_rf.fit(X_train, y_train)
# -
preds = grid_rf.predict(X_test)
print("best score on validation set (accuracy) = {:.4f}".format(grid_rf.best_score_))
print("best score on test set (accuracy) = {:.4f}".format(accuracy_score(y_test, preds)))
# ### SVM
# +
params = {
"cls__C": [0.5, 1, 2, 5],
"cls__kernel": ['rbf', 'linear', 'sigmoid'],
}
pipe_svm = Pipeline([
('scale', StandardScaler()),
('var_tresh', VarianceThreshold(threshold=(.8 * (1 - .8)))),
('feature_selection', SelectFromModel(lgbm.LGBMClassifier())),
('cls', SVC())
])
grid_svm = GridSearchCV(pipe_svm, params, scoring='accuracy', n_jobs=6, cv=5)
grid_svm.fit(X_train, y_train)
# -
preds = grid_svm.predict(X_test)
print("best score on validation set (accuracy) = {:.4f}".format(grid_svm.best_score_))
print("best score on test set (accuracy) = {:.4f}".format(accuracy_score(y_test, preds)))
# ## Results and save the model
cm = confusion_matrix(y_test, preds)
classes = ['black', 'death']
plt.figure(figsize=(2,2))
plot_confusion_matrix(cm, classes, normalize=True)
import joblib
joblib.dump(grid_svm, "../Models/pipe_svm.joblib")
| Notebooks/.ipynb_checkpoints/1.0-handcrafted_features-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# Libraries
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
matplotlib.style.use('ggplot')
import random as rng
import numpy as np
# %matplotlib inline
data = pd.read_csv("ocr_data/table_iterations/ocr_corrected.csv")
print data.head()
# +
# make individual dataframes for each year
df_2015 = data[['2015:Team','OPR Rank']]
df_2014 = data[['2014:Team','OPR Rank.1']]
df_2013 = data[['2013:Team','OPR Rank.2']]
df_2012 = data[['2012:Team','OPR Rank.3']]
df_2011 = data[['2011:Team','OPR Rank.4']]
df_2010 = data[['2010:Team','OPR Rank.5']]
df_2009 = data[['2009:Team','OPR Rank.6']]
df_2008 = data[['2008:Team','OPR Rank.7']]
# display head summaries of each individual dataframe
print df_2015.head()
print df_2014.head()
print df_2013.head()
print df_2012.head()
print df_2011.head()
print df_2010.head()
print df_2009.head()
print df_2008.head()
# extract teams
itterated_df = df_2015.itteritems("2015:Team")
# -
plt.boxplot(data["Unnamed: 3"][1:])
plt.show()
| OCR_RANKINGS.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import cv2
import numpy as np
from matplotlib import pyplot as plt
# ## Why need to use CLAHE (Contrast Limited Adaptive Histogram Equalization)
# + _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19"
img_list = "../input/aptos2019-blindness-detection/train_images/000c1434d8d7.png"
# +
img = cv2.imread(img_list,0)
clahe = cv2.createCLAHE(clipLimit=3.5, tileGridSize=(8,8))
cl1 = clahe.apply(img)
plt.imshow(cl1 )
plt.show()
plt.imshow(img)
plt.show()
# -
image_new = []
def process(image_group):
for i, img in enumerate(image_group):
print(img)
img = cv2.imread(img,0)
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
cl1 = clahe.apply(img)
image_new.append(cl1)
| clahe.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.1 64-bit
# language: python
# name: python3
# ---
# ## COVID-19 World Vaccination Progress
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
import itertools
import math
import pycaret.regression as caret
from pycaret.time_series import *
from sklearn.model_selection import TimeSeriesSplit
from sklearn.linear_model import LinearRegression, Ridge, Lasso
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
from sklearn.metrics import r2_score,mean_absolute_error,mean_squared_error
from statsmodels.tsa.arima.model import ARIMA
import statsmodels
# +
from fbprophet import Prophet
from fbprophet.diagnostics import cross_validation
from fbprophet.diagnostics import performance_metrics
from fbprophet.plot import plot_cross_validation_metric
import itertools
from matplotlib import pyplot as plt
import seaborn as sns
sns.set()
import statsmodels.api as sm
import scipy.stats as stats
from sklearn.metrics import r2_score
import warnings
from typing import List
from fbprophet import Prophet
from fbprophet.diagnostics import cross_validation
from fbprophet.diagnostics import performance_metrics
from fbprophet.plot import plot_cross_validation_metric
import itertools
# -
from typing import List
import warnings
import datetime
from datetime import date , datetime , timedelta
from statsmodels.tsa.stattools import adfuller
from numpy import log
# ### EDA
df = pd.read_csv("/Users/luomingni/Desktop/MS/first term/5220_SML/Project/archive/country_vaccinations copy.csv")
df.head()
df.shape
df.info
# +
countries = df.country.unique()
for country in countries:
print(country,end = ":\n")
print(df[df.country == country]['vaccines'].unique()[0] , end = "\n"+"_"*20+"\n\n")
# +
dict_vac_percentages = {}
iso_list = df.iso_code.unique()
for iso_code in iso_list:
dict_vac_percentages[iso_code]=df[df.iso_code==iso_code]['people_fully_vaccinated_per_hundred'].max()
df_vac_percentages = pd.DataFrame()
df_vac_percentages['iso_code'] = dict_vac_percentages.keys()
df_vac_percentages['fully vaccinated percentage'] = dict_vac_percentages.values()
df_vac_percentages['country'] = countries
# +
map_full_percentage = px.choropleth(df_vac_percentages, locations="iso_code" , color="fully vaccinated percentage"
, hover_name="country" , color_continuous_scale=px.colors.sequential.YlGn)
map_full_percentage.show()
# -
plt.subplots(figsize=(8, 8))
sns.heatmap(df.corr(), annot=True, square=True)
plt.show()
# ### Methods
class DataModeler:
def __init__(self):
pass
def _parametrized(dec):
def layer(*args, **kwargs):
def repl(f):
return dec(f, *args, **kwargs)
return repl
return layer
@staticmethod
@_parametrized
def logger(f, job):
def aux(self, *xs, **kws):
print(job + " - ", end='\t')
res = f(self, *xs, **kws)
print("Completed")
return res
return aux
# ### Preprocessing
class DataPreprocessor(DataModeler):
"Wrap the operations of data preprocessing."
def __init__(self):
super(DataPreprocessor, self).__init__()
@DataModeler.logger("Transforming feature type")
def _feature_transform(self, df:pd.DataFrame) -> List[pd.DataFrame]:
"""
Transform data type of some columns.
@param df: raw data
return: processed data
"""
df['date'] = pd.to_datetime(df['date'],format="%Y-%m-%d")
return df
@DataModeler.logger("Counting missing rate")
def missing_value_counter(self,df:pd.DataFrame, cols:List[str]) -> pd.DataFrame:
"""
Count missing values in specified columns.
@param df: dataframe
@param cols: columns to be calculated
return: summary information
"""
res = pd.DataFrame(cols, columns=['Feature'])
na_cnts = [sum(df[col].isna()) for col in cols]
res['NA Count'] = na_cnts
res['NA Rate'] = res['NA Count'] / df.shape[0]
res = res[res['NA Count'] != 0]
res = res.sort_values(by='NA Count', ascending=False).reset_index(drop=True)
return res
@DataModeler.logger("Checking day interval")
def check_day_interval(self,d0:date,d1:date):
"""
get internal day to check missing value
"""
#d0 = date(2020,12,20)
#d1 = date(2021 , 10 , 26)
delta = d1 - d0
days = delta.days + 1
print(days) #no missing value in 'date'! nice!
return days
@DataModeler.logger("Checking missing value")
def missing_value(self,data):
return data.isna().sum()
@DataModeler.logger("filling missing value using the day ahead")
def fill_missing_value(self,data,target:str):
"""
fill missing value by the value of last day
"""
for i in data[target][data[target].isna() == True].index:
data[target][i] = data[target][i-1]
return data
@DataModeler.logger("Filtering useful columns")
def _filter_data(self, df:pd.DataFrame) -> List[pd.DataFrame]:
"""
Select useful variables for the model
@param df: raw data
return: processed data
"""
df_filtered = df[['date','daily_vaccinations']]
return df_filtered
@DataModeler.logger("Filling missing value")
def _fill_missing_value(self, df:pd.DataFrame) -> pd.DataFrame:
"""
Fill missing values in input data.
param df: dataframe
return: processed dataframe
"""
res = df.fillna(0.0)
return res
@DataModeler.logger("Sort data by date")
def _sort_data(self, df:pd.DataFrame) -> List[pd.DataFrame]:
"""
Sort data by date
@param df: raw data
return: processed data
"""
df = df.sort_values(by='date')
return df
def preprocess(self, df:pd.DataFrame) -> pd.DataFrame:
"""
Preprocess raw data and modify the fields to get required columns.
@param df: raw data
return: combined clean vaccination data
"""
df = self._feature_transform(df)
df = self._filter_data(df)
df = self._fill_missing_value(df)
df = self._sort_data(df)
df = df.groupby(by=['date']).sum().reset_index()
df['total_vaccinations'] = df['daily_vaccinations'].cumsum()
df['percentage_people_vaccinated'] = (df['total_vaccinations']/(8032669179*2))*100
return df
# ### Feature Engineering
# +
class FeatureEngineer(DataModeler):
"Wrap the operations of feature engineering."
def __init__(self):
super(FeatureEngineer, self).__init__()
@DataModeler.logger("Generating date features")
def _gen_date_feats(self, data1:pd.DataFrame):
"""
Extract date features from time of data
return: dataframe with new features
"""
data1['Date'] = pd.to_datetime(data1['Date'])
data1['Date'] = data1['Date'].dt.strftime('%d.%m.%Y')
data1['year'] = pd.DatetimeIndex(data1['Date']).year
data1['month'] = pd.DatetimeIndex(data1['Date']).month
data1['day'] = pd.DatetimeIndex(data1['Date']).day
data1['dayofyear'] = pd.DatetimeIndex(data1['Date']).dayofyear
data1['weekofyear'] = pd.DatetimeIndex(data1['Date']).weekofyear
data1['weekday'] = pd.DatetimeIndex(data1['Date']).weekday
data1['quarter'] = pd.DatetimeIndex(data1['Date']).quarter
data1['is_month_start'] = pd.DatetimeIndex(data1['Date']).is_month_start
data1['is_month_end'] = pd.DatetimeIndex(data1['Date']).is_month_end
print(data1.info())
return data1
@DataModeler.logger("Generating sliding window features")
def gen_window(self,data1:pd.DataFrame,tar:str, width:str):
"""
Use sliding window to generate features
return: dataframe with new features
"""
data1['Series'] = np.arange(1 , len(data1)+1)
#define lag
data1['Shift1'] = data1[tar].shift(1)
# define Window = 7
#window_len = 7
data1['Window_mean'] = data1['Shift1'].rolling(window = width).mean()
#remove missing value
data1.dropna(inplace = True)
data1.reset_index(drop = True , inplace=True)
#df_X = data1[['Date', 'Series' , 'Window_mean' , 'Shift1' ]]
#df_Y = data1[['Target']]
return data1
# -
# ### Prophet model
class MLModeler(DataModeler):
"Wrap the operations of Prophet model."
def __init__(self):
super(MLModeler, self).__init__()
@DataModeler.logger("Transforming feature type")
def _train_test_split(self, df:pd.DataFrame,target_variable):
"""
Split data into training and validation dataset.
@param df: processed data
return: train and validation data
"""
df = df.rename(columns={'date':'ds',target_variable:'y'})
df['cap'] = 100
df['floor'] = 0
df_train = df[df['ds'] < datetime(2021,8,22)]
df_val = df[df['ds'] >= datetime(2021,8,22)]
return df_train,df_val
@DataModeler.logger("Fit model on training data")
def _fit_model(self, df:pd.DataFrame):
"""
Fit the model on training data
@param df: raw data
return: trained model
"""
m = Prophet()
m.fit(df)
return m
@DataModeler.logger("Predict results on test data")
def _predict_test(self, m) -> pd.DataFrame:
"""
Test the trained model.
param m: trained
return: dataframe containing forecasts
"""
future = m.make_future_dataframe(periods=90)
forecast = m.predict(future)
return forecast
@DataModeler.logger("Plot predicted data")
def _plot_forecast(self, m):
"""
Plot predicted data
@param m: model
return: none
"""
fig1 = m.plot(forecast)
@DataModeler.logger("Plot components of predicted data")
def _plot_components_forecast(self, m):
"""
Plot components of predicted data
@param m: model
return: none
"""
fig2 = m.plot_components(forecast)
@DataModeler.logger("Plot cross validation metrics")
def _plot_cross_validation_metrics(self, m):
"""
Plot cross validation metrics.
@param m: trained model
return: combined clean vaccination data
"""
df_cv = cross_validation(m, initial='165 days', period='100 days', horizon = '65 days')
df_p = performance_metrics(df_cv)
fig3 = plot_cross_validation_metric(df_cv, metric='mape')
@DataModeler.logger("Calculate RMSE, MAE, MAPE on test data")
def _calculate_metrics(self, m):
"""
Calculate RMSE on test data.
@param m: trained model
return: rmse
"""
df_cv = cross_validation(m, initial='165 days', period='100 days', horizon = '65 days')
df_p = performance_metrics(df_cv)
print('RMSE - ',df_p['rmse'].min())
print('MAE - ',df_p['mae'].min())
print('MAPE - ',df_p['mape'].min())
@DataModeler.logger("Tuning hyperparameters")
def _hyperparameter_tuning(self, m, df):
def create_param_combinations(**param_dict):
param_iter = itertools.product(*param_dict.values())
params =[]
for param in param_iter:
params.append(param)
params_df = pd.DataFrame(params, columns=list(param_dict.keys()))
return params_df
def single_cv_run(history_df, metrics, param_dict):
m = Prophet(**param_dict)
m.add_country_holidays(country_name='US')
m.fit(history_df)
df_cv = cross_validation(m, initial='165 days', period='100 days', horizon = '65 days')
df_p = performance_metrics(df_cv).mean().to_frame().T
df_p['params'] = str(param_dict)
df_p = df_p.loc[:, metrics]
return df_p
param_grid = {
'changepoint_prior_scale': [0.005, 0.05, 0.5, 5],
'changepoint_range': [0.8, 0.9],
'seasonality_prior_scale':[0.1, 1, 10.0],
'holidays_prior_scale':[0.1, 1, 10.0],
'seasonality_mode': ['multiplicative', 'additive'],
'growth': ['linear', 'logistic'],
'yearly_seasonality': [5, 10, 20]
}
metrics = ['horizon', 'rmse', 'mape', 'params']
results = []
params_df = create_param_combinations(**param_grid)
for param in params_df.values:
param_dict = dict(zip(params_df.keys(), param))
cv_df = single_cv_run(df, metrics, param_dict)
results.append(cv_df)
results_df = pd.concat(results).reset_index(drop=True)
return results_df.loc[results_df['rmse'] == min(results_df['rmse']), ['params']]
# ### ARIMA model
class time_Series_Learner():
def __init__(self):
super(time_Series_Learner, self).__init__()
@DataModeler.logger("Hypothesis testing")
def Hypothesis_test(self,df):
result = adfuller(df.dropna())
print('ADF Statistic: %f' % result[0])
print('p-value: %f' % result[1])
@DataModeler.logger("Transforming feature type")
def split_dataset(self,X, y, train_ratio=0.8):
X_len = len(X)
train_data_len = int(X_len * train_ratio)
X_train = X[:train_data_len]
y_train = y[:train_data_len]
X_valid = X[train_data_len:]
y_valid = y[train_data_len:]
return X_train, X_valid, y_train, y_valid
@DataModeler.logger("Training")
def Univariate_Arima(self, train_Y,parameters:tuple,Y_valid):
model = ARIMA(train_Y, order=parameters) # p,d,q parameters
model_fit = model.fit()
y_pred = model_fit.forecast(len(Y_valid))
# Calcuate metrics
metrics = {}
score_mae = mean_absolute_error(Y_valid, y_pred)
metrics["mae"] = score_mae
score_rmse = math.sqrt(mean_squared_error(Y_valid, y_pred))
metrics["rmse"] = score_rmse
score_r2 = r2_score(Y_valid, y_pred)
metrics["r2"] = score_r2
#print('RMSE: {}'.format(score_rmse))
return metrics, model_fit
@DataModeler.logger("Tuning hyperparameters")
def tune_parameters(self, parameters,y_train,y_valid):
rmse, AIC = [], []
for parameters in pdq:
warnings.filterwarnings("ignore") # specify to ignore warning messages
score_rmse, model_fit = self.Univariate_Arima(y_train,parameters,y_valid)
#rmse.append(score_rmse)
AIC.append(model_fit.aic)
final, index = min(AIC), AIC.index(min(AIC))
parameter = pdq[index]
#print(AIC)
print("suitable parameter:",parameter)
print("result:",final)
return parameter
@DataModeler.logger("Predict results on test data")
def valid_forcast(self, model_fit):
y_pred = model_fit.forecast(66)
return y_pred
@DataModeler.logger("Plot predicted data")
def plot_predict_test(self, X_valid, y_pred, y_valid ):
fig = plt.figure(figsize=(15,4))
sns.lineplot(x=X_valid.index, y=y_pred, color='blue', label='predicted') #navajowhite
sns.lineplot(x=X_valid.index, y=y_valid, color='orange', label='Ground truth') #navajowhite
plt.xlabel(xlabel='Date', fontsize=14)
plt.ylabel(ylabel='Percentage Vaccinations', fontsize=14)
plt.xticks(rotation=-60)
plt.show()
@DataModeler.logger("Model diagonostic")
def Model_diagonostic(self, model_fit):
model_fit.plot_diagnostics(figsize=(15, 12))
plt.show()
# ### Regression model: preliminary result for choosing models
# +
class RF_Learner(DataModeler):
"Wrap the operations of RF model."
def __init__(self):
super(RF_Learner, self).__init__()
@DataModeler.logger("Transforming feature type")
def split_dataset(self,X, y, train_ratio=0.8):
X_len = len(X)
train_data_len = int(X_len * train_ratio)
X_train = X[:train_data_len]
y_train = y[:train_data_len]
X_valid = X[train_data_len:]
y_valid = y[train_data_len:]
return X_train, X_valid, y_train, y_valid
@DataModeler.logger("Transforming feature type_2")
def trim(self, stamp:List[str], x_train, x_valid):
predictors_train = list(set(list(x_train.columns))-set(stamp))
x_train = x_train[predictors_train].values
#y_train = x_train[target].values
x_valid = x_valid[predictors_train].values
#y_valid_ = df_test[target].values
return x_train, x_valid
@DataModeler.logger("Fit model on training data")
def RF_train(self,x_train, y_train,x_valid):
regressor = RandomForestRegressor(n_estimators=200, random_state=0)
regressor.fit(x_train, y_train)
y_pred = regressor.predict(x_valid)
return y_pred
@DataModeler.logger("Predict results on test data")
def predict(self,y_pred,y_valid):
# Calcuate metrics
metrics = {}
score_mae = mean_absolute_error(y_valid, y_pred)
metrics["mae"] = score_mae
score_rmse = math.sqrt(mean_squared_error(y_valid, y_pred))
metrics["rmse"] = score_rmse
score_r2 = r2_score(y_valid, y_pred)
metrics["r2"] = score_r2
return metrics
# -
# #### ARIMA learner
# +
# loading data from univariate --
df_world = pd.read_csv("/Users/luomingni/Desktop/MS/first term/5220_SML/Project/world_filtered_data.csv")
# define
df_world1 = pd.DataFrame(df_world,columns = ['date','percentage_people_vaccinated'])
df_world1.index = df_world1['date']
X = df_world1['date']
y = df_world1['percentage_people_vaccinated']
# -
# ARIMA leaner
ARIMA_leaner = time_Series_Learner()
ARIMA_leaner.Hypothesis_test(df_world1.percentage_people_vaccinated)
#grid search
# Define the p, d and q parameters to take any value between 0 and 2
p = q = range(0, 4)
d = range(0,2)
# Generate all different combinations of p, q and q triplets
pdq = list(itertools.product(p, d, q))
X_train, X_valid, y_train, y_valid = ARIMA_leaner.split_dataset(X,y)
parameter = ARIMA_leaner.tune_parameters(pdq,y_train,y_valid)
metrics, model_fit = ARIMA_leaner.Univariate_Arima(y_train,(2,1,2),y_valid)
metrics
y_pred = ARIMA_leaner.valid_forcast(model_fit)
ARIMA_leaner.plot_predict_test(X_valid,y_pred,y_valid)
ARIMA_leaner.Model_diagonostic(model_fit)
| EDA&3models.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
import numpy as np
def get_dist(x, y):
return(np.sqrt(np.sum((np.array(x) - np.array(y)) ** 2)))
# -
get_dist([2, 2], [-1.88, 2.05])
get_dist([-2, -2], [-1.88, 2.05])
print(get_dist([2, 2], [-0.71, 0.42]))
print(get_dist([-2, -2], [-0.71, 0.42]))
print(get_dist([2, 2], [2.45, -0.67]))
print(get_dist([-2, -2], [2.45, -0.67]))
print(get_dist([2, 2], [1.85, -3.80]))
print(get_dist([-2, -2], [1.85, -3.80]))
print(get_dist([2, 2], [-3.69, -1.33]))
print(get_dist([-2, -2], [-3.69, -1.33]))
print(np.mean([-1.88, 2.45]))
print(np.mean([2.05, -0.67]))
print(np.mean([-0.71, 1.85, -3.69]))
print(np.mean([0.42, -3.80, -1.33]))
# +
print(get_dist([0.285, 0.69], [-1.88, 2.05]))
print(get_dist([-0.85, -1.57], [-1.88, 2.05]))
print(get_dist([0.285, 0.69], [-0.71, 0.42]))
print(get_dist([-0.85, -1.57], [-0.71, 0.42]))
print(get_dist([0.285, 0.69], [2.45, -0.67]))
print(get_dist([-0.85, -1.57], [2.45, -0.67]))
print(get_dist([0.285, 0.69], [1.85, -3.80]))
print(get_dist([-0.85, -1.57], [1.85, -3.80]))
print(get_dist([0.285, 0.69], [-3.69, -1.33]))
print(get_dist([-0.85, -1.57], [-3.69, -1.33]))
# -
print(np.mean([-1.88, -0.71, 2.45]))
print(np.mean([2.05, 0.42, -0.67]))
print(np.mean([1.85, -3.69]))
print(np.mean([-3.80, -1.33]))
# +
print(get_dist([-0.0467, 0.6], [-1.88, 2.05]))
print(get_dist([-0.92, -2.565], [-1.88, 2.05]))
print(get_dist([-0.0467, 0.6], [-0.71, 0.42]))
print(get_dist([-0.92, -2.565], [-0.71, 0.42]))
print(get_dist([-0.0467, 0.6], [2.45, -0.67]))
print(get_dist([-0.92, -2.565], [2.45, -0.67]))
print(get_dist([-0.0467, 0.6], [1.85, -3.80]))
print(get_dist([-0.92, -2.565], [1.85, -3.80]))
print(get_dist([-0.0467, 0.6], [-3.69, -1.33]))
print(get_dist([-0.92, -2.565], [-3.69, -1.33]))
# -
print('The only data point to change cluster assignments was 2.')
print('The process took three iterations to assign all points such that cluster centers do not change.')
| Studying Materials/Course 4 Clustering and Retrieval/Week 3 KMeans/Quiz_Work.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Machine Learning LAB 1
# Academic Year 2021/22, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
#
# The notebook contains some simple tasks to be performed about classification and regression. Complete **all** the required code sections and answer to **all** the questions.
# ### IMPORTANT 1: make sure to rerun all the code from the beginning to obtain the results for the final version of your notebook, since this is the way we will do it before evaluating your notebook!
#
# ### IMPORTANT 2: Place your name and ID number. Also recall to save the file as Surname_Name_LAB1.ipynb . Notebooks without name will be discarded.
#
# **Student name**: <NAME><br>
# **ID Number**: 2048654
#
# # 1) Classification of Music genre
#
# ### Dataset description
#
# A music genre is a conventional category that identifies pieces of music as belonging to a shared tradition or set of conventions. It is to be distinguished from musical form and musical style. The features extracted from these songs can help the machine to assing them to the two genres.
#
# This dataset is a subset of the dataset provided [here](https://www.kaggle.com/insiyeah/musicfeatures), containing only the data regarding the classical and metal genres.
#
# ### We consider 3 features for the classification
#
# 1) **tempo**, the speed at which a passage of music is played, i.e., the beats per minute of the musical piece<br>
# 2) **chroma_stft**, [mean chromagram activation on Short-Time Fourier Transform](https://librosa.org/doc/0.7.0/generated/librosa.feature.chroma_stft.html)<br>
# 3) **spectral_centroid**, Indicates where the "center of mass" of the spectrum is located, i.e., it is the weighted average of the frequency transform<br>
#
# We first import all the packages that are needed.
# +
# %matplotlib inline
import csv
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
import sklearn as sl
from scipy import stats
from sklearn import datasets
from sklearn import linear_model
# -
# # Perceptron
# Firstly we will implement the perceptron algorithm and use it to learn a halfspace.
# **TO DO** Set the random seed, you can use your ID (matricola) or any other number! Try to make various tests changing the seed.
# YOUR_ID, try also to change the seed to
# see the impact of random initialization on the results
IDnumber = 2048654
#IDnumber = 10
np.random.seed(IDnumber)
# Load the dataset and then split in training set and test set (the training set is typically larger, you can use a 75% tranining 25% test split) after applying a random permutation to the datset.
# A) Load dataset and perform permutation
# +
# Load the dataset
filename = 'data/music.csv'
music = csv.reader(open(filename, newline='\n'), delimiter=',')
header = next(music) # skip first line
print(f"Header: {header}\n")
dataset = np.array(list(music))
print(f"Data shape: {dataset.shape}\n")
print( "Dataset Example:")
print( dataset[:10,...])
X = dataset[:,:-1].astype(float) # columns 0,1,2 contain the features
Y = dataset[:, -1].astype( int) # last column contains the labels
Y = 2*Y-1 # for the perceptron classical--> -1, metal-->1
m = dataset.shape[0]
print("\nNumber of samples loaded:", m)
permutation = np.random.permutation(m) # random permutation
# moved the permutation of the np.arrays X and Y to the next cell in order to include
# them in the control that makes sure there are at least 10 elements of each genre in
# the training set
# -
# We are going to classify class "1" (metal) vs class "-1" (classical)
# B) **TO DO** Divide the data into training set and test set (75% of the data in the first set, 25% in the second one)
# +
# Divide in training and test: make sure that your training set
# contains at least 10 elements from class 1 and at least 10 elements
# from class -1! If it does not, modify the code so to apply more random
# permutations (or the same permutation multiple times) until this happens.
# IMPORTANT: do not change the random seed.
# boolean variable that remains false if there are not at least 10 elements
# of each genre in the training set
k = False
while k == False:
# counter variables for the two genres
cont_p = 0
cont_n = 0
# perform the permutation
X = X[permutation]
Y = Y[permutation]
# m_test needs to be the number of samples in the test set
m_training = int(0.75 * m)
# m_test needs to be the number of samples in the test set
m_test = int(0.25 * m)
# X_training = instances for training set
X_training = X[:m_training]
#Y_training = labels for the training set
Y_training = Y[:m_training]
# X_test = instances for test set
X_test = X[-m_test: ]
# Y_test = labels for the test set
Y_test = Y[-m_test: ]
# making sure the Y_training contains both 1 and -1
for i in Y_training:
if i == 1: cont_p+=1
else: cont_n+=1
if cont_p >= 10 and cont_n >= 10:
k = True
print(Y_training) # to make sure that Y_training contains both 1 and -1
print( m_test)
print("\nNumber of classical instances in test:", np.sum(Y_test==-1))
print( "Number of metal instances in test:", np.sum(Y_test== 1))
print("Shape of training set: " + str(X_training.shape))
print( "Shape of test set: " + str( X_test.shape))
# -
# We add a 1 in front of each sample so that we can use a vector in homogeneous coordinates to describe all the coefficients of the model. This can be done with the function $hstack$ in $numpy$.
# +
# Add a 1 to each sample (homogeneous coordinates)
X_training = np.hstack((np.ones((m_training, 1)), X_training))
X_test = np.hstack((np.ones((m_test, 1)), X_test ))
print("Training set in homogeneous coordinates:")
print( X_training[:10])
# -
# **TO DO** Now complete the function *perceptron*. Since the perceptron does not terminate if the data is not linearly separable, your implementation should return the desired output (see below) if it reached the termination condition seen in class or if a maximum number of iterations have already been run, where one iteration corresponds to one update of the perceptron weights. In case the termination is reached because the maximum number of iterations have been completed, the implementation should return **the best model** seen up to now.
#
# The input parameters to pass are:
# - $X$: the matrix of input features, one row for each sample
# - $Y$: the vector of labels for the input features matrix X
# - $max\_num\_iterations$: the maximum number of iterations for running the perceptron
#
# The output values are:
# - $best\_w$: the vector with the coefficients of the best model
# - $best\_error$: the *fraction* of misclassified samples for the best model
# +
# A template is provided, but feel free to build a different implementation
def perceptron_update(current_w, x, y):
# Place in this function the update rule of the perceptron algorithm
return current_w + x*y
def perceptron(X, Y, max_num_iterations):
# Place in this function the main section of the perceptron algorithm
# init the algorith with w=0, use a best_w variable to keep track of the best solution
# m-> number of training examples
# n-> number of features
m, n = X.shape
curr_w = np.zeros(n)
best_w = curr_w
num_samples = m
# current error for the cycle
curr_error = 1
best_error = curr_error
# list to store the errors for each iteration in order to plot them
errors = []
best_errors = []
index_misclassified = -1 # will be ovewritten
num_misclassified = 0 # will be ovewritten
# main loop continue until all samples correctly classified or max #iterations reached
num_iter = 1
# instead of using the following condition for the while I used a break to stop the
# loop if all samples are correctly classified
#while ((index_misclassified != -1) and (num_iter < max_num_iterations)):
while num_iter < max_num_iterations:
# list to store the misclassified samples
rand_index = []
num_misclassified = 0
# avoid working always on the same sample, you can
# use a random permutation or randomize the choice of misclassified
permutation = np.random.permutation(len(X))
X = X[permutation]
Y = Y[permutation]
for i in range(num_samples):
# check if the i-th randomly selected sample is misclassified
# store the number of randomly classified samples and the index of
# at least one of them
if Y[i] * np.dot(curr_w, X[i]) <= 0:
num_misclassified += 1
rand_index.append(i)
# update error count, keep track of best solution
error = num_misclassified / num_samples
if error < best_error:
best_error = error
best_w = curr_w
# store the errors and the best errors
errors.append( error)
best_errors.append(best_error)
# break the loop if all samples are correctly classified
if num_misclassified == 0:
print("There are no misclassified points. \n\nn_iter = ", num_iter)
break
# call update function using a misclassifed sample
# update with a random misclassified point
index_misclassified = rand_index[np.random.randint(0, len(rand_index))]
curr_w = perceptron_update(curr_w, X[index_misclassified],
Y[index_misclassified])
num_iter += 1
return best_w, best_error, errors, best_errors
# -
# Now we use the implementation above of the perceptron to learn a model from the training data using 100 iterations and print the error of the best model we have found.
#now run the perceptron for 100 iterations
w_found, error, errors, best_errors = perceptron(X_training,Y_training, 100)
print(w_found)
print("Training Error of perpceptron (100 iterations): " + str( error))
print( "Misclassified points: " , int(error*m_training))
# **TO DO** use the best model $w\_found$ to predict the labels for the test dataset and print the fraction of misclassified samples in the test set (the test error that is an estimate of the true loss).
# +
#now use the w_found to make predictions on test dataset
num_errors = 0
# compute the number of errors
for i in range(m_test):
if Y_test[i] * np.dot(w_found, X_test[i]) <= 0:
num_errors += 1
true_loss_estimate = num_errors/m_test # error rate on the test set
#NOTE: you can avoid using num_errors if you prefer, as long as true_loss_estimate is correct
print("Test Error of perpceptron (100 iterations): " + str(true_loss_estimate))
print( "Misclassified points: " , num_errors )
# -
# **TO DO** **[Answer the following]** What about the difference betweeen the training error and the test error in terms of fraction of misclassified samples)? Explain what you observe. [Write the answer in this cell]
#
# **ANSWER QUESTION 1**
# The two errors are comparable ($\sim 0.19$ vs $0.16$), in contrast with what we expected. Indeed since the the model is trained on the training set minimizing the loss function and making our model as optimized as possible. This may be caused by the smallness of the data set and an unfortunate choice of the seed: indeed repeating the procedure with different seeds brings a smaller error for the training set than the one of the test set. Furthermore the small amount of interations makes the results very unstable. However the multiple operations of randomization, not always based on the seed used, make the results different for every run. We can appreciate the error trend in the following plot:
#plt.scatter(np.arange(len(errors)), errors, label = 'errors', color = 'C0', marker = 'x', s = 1)
plt.plot(np.arange(len(errors)), errors,
label = 'errors',
color = 'C0',
linestyle = 'dashed',
linewidth = 1)
plt.xlabel('#iteration')
plt.ylabel('error')
plt.plot(np.arange(len(best_errors)), best_errors,
label = 'best error trend',
color = 'red',
linestyle = '-',
linewidth = 2)
plt.legend()
plt.show()
# **TO DO** Copy the code from the last 2 cells above in the cell below and repeat the training with 4000 iterations. Then print the error in the training set and the estimate of the true loss obtained from the test set.
# +
#now run the perceptron for 4000 iterations here!
w_found, error, errors, best_errors = perceptron(X_training,Y_training, 4000)
print( w_found )
print("Training Error of perpceptron (4000 iterations): " + str(error ))
print( "Misclassified points: " , int(error*m_training))
num_errors = 0
# compute the number of errors
for i in range(m_test):
if Y_test[i] * np.dot(w_found, X_test[i]) <= 0:
num_errors += 1
true_loss_estimate = num_errors/m_test # error rate on the test set
print("Test Error of perpceptron (4000 iterations): " + str(true_loss_estimate))
print( "Misclassified points: " , num_errors )
# -
# **TO DO** [Answer the following] What about the difference betweeen the training error and the test error in terms of fraction of misclassified samples) when running for a larger number of iterations ? Explain what you observe and compare with the previous case. [Write the answer in this cell]
#
# **ANSWER QUESTION 2**
# In this case, as expected, the training error is almost always smaller than the test error, indeed, as said in the previous answer, the model is trained on the training error, minimizing the loss function, so it is to be expected this behaviour.
# While the training error is significantly smaller in the case with 4000 iterations than the previous case, the test error doesn't show notewhorthy differences probabily due to the small dimension of the latter.
#plt.scatter(np.arange(len(errors)), errors, label = 'errors', color = 'C0', marker = 'x', s = 1)
plt.plot(np.arange(len(errors)), errors,
label = 'errors',
color = 'C0',
linestyle = 'dashed',
linewidth = 0.5)
plt.xlabel('#iteration')
plt.ylabel('error')
plt.plot(np.arange(len(best_errors)), best_errors,
label = 'best error trend',
color = 'red',
linestyle = '-',
linewidth = 2)
plt.legend()
plt.show()
# # Logistic Regression
# Now we use logistic regression, exploiting the implementation in Scikit-learn, to predict labels. We will also plot the decision region of logistic regression.
#
# We first load the dataset again.
# +
# Load the dataset
filename = 'data/music.csv'
music = csv.reader(open(filename, newline='\n'), delimiter=',')
header = next(music) # skip first line
print(f"Header: {header}\n")
dataset = np.array(list(music))
print(f"Data shape: {dataset.shape}\n")
print("Dataset Example:")
print(dataset[:10,...])
X = dataset[:,:-1].astype(float) # columns 0,1,2 contain the features
Y = dataset[:,-1].astype(int) # last column contains the labels
Y = 2*Y-1 # for the perceprton classical--> -1, metal-->1
m = dataset.shape[0]
print("\nNumber of samples loaded:", m)
permutation = np.random.permutation(m) # random permutation
# -
# **TO DO** As for the previous part, divide the data into training and test (75%-25%) and add a 1 as first component to each sample.
# +
# Divide in training and test: make sure that your training set
# contains at least 10 elements from class 1 and at least 10 elements
# from class -1! If it does not, modify the code so to apply more random
# permutations (or the same permutation multiple times) until this happens.
# IMPORTANT: do not change the random seed.
k = False
cont_p = 0
cont_n = 0
while k == False:
X = X[permutation]
Y = Y[permutation]
m_training = int(0.75 * m)
m_test = int(0.25 * m)
X_training = X[:m_training]
Y_training = Y[:m_training]
X_test = X[-m_test: ]
Y_test = Y[-m_test: ]
for i in Y_training:
if i == 1: cont_p+=1
else: cont_n+=1
if cont_p >= 10 and cont_n >= 10:
k = True
print( "Number of samples in the test set:", m_test )
print("\nNumber of classical instances in test:", np.sum(Y_test==-1))
print( "Number of metal instances in test:", np.sum(Y_test== 1))
print("\nShape of training set: " + str(X_training.shape))
print( "Shape of test set: " + str( X_test.shape))
# -
# To define a logistic regression model in Scikit-learn use the instruction
#
# $linear\_model.LogisticRegression(C=1e5)$
#
# ($C$ is a parameter related to *regularization*, a technique that
# we will see later in the course. Setting it to a high value is almost
# as ignoring regularization, so the instruction above corresponds to the
# logistic regression you have seen in class.)
#
# To learn the model you need to use the $fit(...)$ instruction and to predict you need to use the $predict(...)$ function. See the Scikit-learn documentation for how to use it.
#
# **TO DO** Define the logistic regression model, then learn the model using the training set and predict on the test set. Then print the fraction of samples misclassified in the training set and in the test set.
# +
# part on logistic regression for 2 classes
# a large C disables regularization
logreg = linear_model.LogisticRegression(C=1e5)
# learn from training set
logreg.fit(X_training, Y_training)
# predict on training set
Y_pred_training = logreg.predict(X_training)
# print the error rate = fraction of misclassified samples
error_rate_training = 0
for i, j in zip(Y_training, Y_pred_training):
#print(i, j)
if i != j:
error_rate_training += 1
error_rate_training /= m_training
print("Error rate on training set: " + str(error_rate_training))
#predict on test set
Y_pred_test = logreg.predict(X_test)
#print the error rate = fraction of misclassified samples
error_rate_test = 0
for i, j in zip(Y_test, Y_pred_test):
#print(i, j)
if i != j:
error_rate_test += 1
error_rate_test /= m_test
print("Error rate on test set: " + str(error_rate_test))
# -
# **TO DO** Now pick two features and restrict the dataset to include only two features, whose indices are specified in the $feature$ vector below. Then split into training and test. Which features are you going to select ?
# +
# to make the plot we need to reduce the data to 2D, so we choose two features
features_list = ['tempo', 'chroma_stft', 'spectral_centroid']
index_feature1 = 2 # Select the best 2 features according to your experiments
index_feature2 = 1 # Select the best 2 features according to your experiments
features = [index_feature1, index_feature2]
feature_name0 = features_list[features[0]]
feature_name1 = features_list[features[1]]
X_reduced = X[ :, features]
X_training = X_reduced[:m_training]
X_test = X_reduced[ -m_test:]
print(X_reduced.shape)
# -
# Now learn a model using the training data and measure the performances.
# +
# learning from training data
logreg = linear_model.LogisticRegression(C=1e5)
logreg.fit(X_training, Y_training)
#predict on test set
Y_pred_test = logreg.predict(X_training)
#print the error rate = fraction of misclassified samples
error_rate_test = 0
for i, j in zip(Y_training, Y_pred_test):
if i != j:
error_rate_test += 1
error_rate_test /= m_test
print("Error rate on test set: " + str(error_rate_test))
# -
# **TO DO** [Answer the following] Which features did you select and why ? Compare the perfromances with the ones of the case with all the 3 features and comment about the results. [Write the answer in this cell]
#
# **ANSWER QUESTION 3**
# The features selected were 'spectral_centroid' and 'chroma_stft'. Honestly I can't see a reason to exclude one of the three features, maybe the feature 'tempo' seems to be the one that characterize less between classical and metal music. The error rate on test set results in this way less than the ones we obtain using combination of the other features, meaning that the algorithm, using the three features, assignes to 'tempo' a small weight.
# If everything is ok, the code below uses the model in $logreg$ to plot the decision region for the two features chosen above, with colors denoting the predicted value. It also plots the points (with correct labels) in the training set. It makes a similar plot for the test set.
#
# # ATTENTION!!
# I had to increase the step size from 0.02 to 0.0325 for the first feature selected, because the size of the array we are using with the spectral centroid feature causes a out of memory error on my laptop that kills the kernel (for the other i mantained .02). Be careful if there are some ram eating processes active, it can lead to some freezing problems, at least for me (8Gb of RAM).
# +
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
# NOTICE: This visualization code has been developed for a "standard" solution of the notebook,
# it could be necessary to make some fixes to adapt to your implementation
h1 = .0325 # step size in the mesh
h2 = .02
x_min, x_max = X_reduced[:, 0].min() - .5, X_reduced[:, 0].max() + .5
y_min, y_max = X_reduced[:, 1].min() - .5, X_reduced[:, 1].max() + .5
'''print(x_min)
print(x_max)
print(y_min)
print(y_max)
print('\n')
print(len(np.arange(x_min, x_max, h)))
print(len(np.arange(y_min, y_max, h)))
'''
xx, yy = np.meshgrid(np.arange(x_min, x_max, h1), np.arange(y_min, y_max, h2))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(4, 3))
# added shading = 'auto' to avoid a warning about deprecated behaviour
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired, shading='auto')
# Plot also the training points
plt.scatter(X_training[:, 0], X_training[:, 1], c=Y_training, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel(feature_name0)
plt.ylabel(feature_name1)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title('Training set')
plt.show()
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(4, 3))
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired, shading='auto')
# Plot also the test points
plt.scatter(X_test[:, 0], X_test[:, 1], c=Y_test, edgecolors='k', cmap=plt.cm.Paired, marker='s')
plt.xlabel(feature_name0)
plt.ylabel(feature_name1)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title('Test set')
plt.show()
# -
# # 2) Linear Regression on the Boston House Price dataset
#
# ### Dataset description:
#
# The Boston House Price Dataset involves the prediction of a house price in thousands of dollars given details about the house and its neighborhood.
#
# The dataset contains a total of 500 observations, which relate 13 input features to an output variable (house price).
#
# The variable names are as follows:
#
# CRIM: per capita crime rate by town.
#
# ZN: proportion of residential land zoned for lots over 25,000 sq.ft.
#
# INDUS: proportion of nonretail business acres per town.
#
# CHAS: Charles River dummy variable (= 1 if tract bounds river; 0 otherwise).
#
# NOX: nitric oxides concentration (parts per 10 million).
#
# RM: average number of rooms per dwelling.
#
# AGE: proportion of owner-occupied units built prior to 1940.
#
# DIS: weighted distances to five Boston employment centers.
#
# RAD: index of accessibility to radial highways.
#
# TAX: full-value property-tax rate per $10,000.
#
# PTRATIO: pupil-teacher ratio by town.
#
# B: 1000*(Bk – 0.63)2 where Bk is the proportion of blacks by town.
#
# LSTAT: % lower status of the population.
#
# MEDV: Median value of owner-occupied homes in $1000s.
#
# +
#needed if you get the IPython/javascript error on the in-line plots
# %matplotlib nbagg
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
import pandas as pd
from scipy import stats
# +
#Import Data: Load the data from a .csv file
filename = "data/house.csv"
Data = np.genfromtxt(filename, delimiter=';',skip_header=1)
#A quick overview of data, to inspect the data you can use the method describe()
dataDescription = stats.describe(Data)
#print(dataDescription)
print ("Shape of data array: " + str(Data.shape))
#for more interesting visualization: use Panda!
pdData = pd.read_csv(filename, delimiter = ';')
pdData
# -
# # Split data in training and test sets
#
#
#
# Given $m$ total data, denote with $m_{t}$ the part used for training. Keep $m_t$ data as training data, and $m_{test}:= m-m_{t}$. For instance one can take $m_t=0.7m$ of the data as training and $m_{test}=0.3m$ as testing. Let us define as define
#
# $\bullet$ $S_{t}$ the training data set
#
# $\bullet$ $S_{test}$ the testing data set
#
#
# The reason for this splitting is as follows:
#
# TRAINING DATA: The training data are used to compute the empirical loss
# $$
# L_S(h) = \frac{1}{m_t} \sum_{z_i \in S_{t}} \ell(h,z_i)
# $$
# which is used to estimate $h$ in a given model class ${\cal H}$.
# i.e.
# $$
# \hat{h} = {\rm arg\; min}_{h \in {\cal H}} \, L_S(h)
# $$
#
# TESTING DATA: The test data set can be used to estimate the performance of the final estimated model
# $\hat h_{\hat d_j}$ using:
# $$
# L_{{\cal D}}(\hat h_{\hat d_j}) \simeq \frac{1}{m_{test}} \sum_{ z_i \in S_{test}} \ell(\hat h_{\hat d_j},z_i)
# $$
#
#
# **TO DO**: split the data in training and test sets (70%-30%)
# +
#get number of total samples
num_total_samples = Data.shape[0]
print( "Total number of samples: ", num_total_samples)
m_t = int(num_total_samples*.7)
print("Cardinality of Training Set: ", m_t)
#shuffle the data
np.random.shuffle(Data)
#training data
X_training = Data[:m_t, :-1]
Y_training = Data[:m_t, -1]
print( "Training input data size: ", X_training.shape)
print( "Training output data size: ", Y_training.shape)
#test data, to be used to estimate the true loss of the final model(s)
X_test = Data[m_t:, :-1]
Y_test = Data[m_t:, -1]
print( "Test input data size: ", X_test.shape)
print( "Test output data size: ", Y_test.shape)
# -
# # Data Normalization
# It is common practice in Statistics and Machine Learning to scale the data (= each variable) so that it is centered (zero mean) and has standard deviation equal to 1. This helps in terms of numerical conditioning of the (inverse) problems of estimating the model (the coefficients of the linear regression in this case), as well as to give the same scale to all the coefficients.
# +
# scale the data
# standardize the input matrix
from sklearn import preprocessing
# the transformation is computed on training data and then used on all the 3 sets
scaler = preprocessing.StandardScaler().fit(X_training)
np.set_printoptions(suppress=True) # sets to zero floating point numbers < min_float_eps
X_training = scaler.transform(X_training)
print ("Mean of the training input data:", X_training.mean(axis=0))
print ("Std of the training input data:",X_training.std(axis=0))
X_test = scaler.transform(X_test) # use the same transformation on test data
print ("Mean of the test input data:", X_test.mean(axis=0))
print ("Std of the test input data:", X_test.std(axis=0))
# -
# # Model Training
#
# The model is trained (= estimated) minimizing the empirical error
# $$
# L_S(h) := \frac{1}{m_t} \sum_{z_i \in S_{t}} \ell(h,z_i)
# $$
# When the loss function is the quadratic loss
# $$
# \ell(h,z) := (y - h(x))^2
# $$
# we define the Residual Sum of Squares (RSS) as
# $$
# RSS(h):= \sum_{z_i \in S_{t}} \ell(h,z_i) = \sum_{z_i \in S_{t}} (y_i - h(x_i))^2
# $$
# so that the training error becomes
# $$
# L_S(h) = \frac{RSS(h)}{m_t}
# $$
#
# We recal that, for linear models we have $h(x) = <w,x>$ and the Empirical error $L_S(h)$ can be written
# in terms of the vector of parameters $w$ in the form
# $$
# L_S(w) = \frac{1}{m_t} \|Y - X w\|^2
# $$
# where $Y$ and $X$ are the matrices whose $i-$th row are, respectively, the output data $y_i$ and the input vectors $x_i^\top$.
#
#
# **TO DO:** compute the linear regression coefficients using np.linalg.lstsq from scikitlear
#
# +
#compute linear regression coefficients for training data
#add a 1 at the beginning of each sample for training, and testing (use homogeneous coordinates)
m_training = X_training.shape[0]
X_trainingH = np.hstack((np.ones((m_training,1)),X_training)) # H: in homogeneous coordinates
m_test = X_test.shape[0]
X_testH = np.hstack((np.ones((m_test,1)),X_test)) # H: in homogeneous coordinates
# Compute the least-squares coefficients using linalg.lstsq
w_np, RSStr_np, rank_Xtr, sv_Xtr = np.linalg.lstsq(X_trainingH, Y_training, rcond=None)
print("LS coefficients with numpy lstsq:", w_np)
# compute Residual sums of squares
RSStr_hand = np.sum((Y_training - np.dot(X_trainingH, w_np)) ** 2 )
print( "RSS with numpy lstsq:\t\t\t\t", RSStr_np )
print("Empirical risk with numpy lstsq:\t\t", RSStr_np /m_training)
print( "RSS with 'by hand':\t\t\t\t", RSStr_hand )
print( "Empirical risk 'by hand':\t\t\t", RSStr_hand/m_training)
# -
# ## Data prediction
#
# Compute the output predictions on both training and test set and compute the Residual Sum of Squares (RSS).
#
# **TO DO**: Compute these quantities on training and test sets.
# +
#compute predictions on training and test
prediction_training = np.dot(X_trainingH, w_np)
prediction_test = np.dot(X_testH, w_np)
#what about the loss for points in the test data?
RSS_training = np.sum((Y_training - np.dot(X_trainingH, w_np)) ** 2 )
RSS_test = np.sum((Y_test - np.dot(X_testH, w_np)) ** 2 )
print( "RSS on training data:\t\t\t", RSS_training )
print("Loss estimated from training data:\t", RSS_training/m_training)
print( "RSS on test data:\t\t\t", RSS_test )
print( "Loss estimated from test data:\t\t", RSS_test/m_test )
# -
# ### QUESTION 4: Comment on the results you get and on the difference between the train and test errors.
#
# The loss computed using the training set results significantly smaller then the one computed with the test set, as expected. However changing the seed to some unlucky number could lead to results in contrast with the prevoius sentence due to the smallness of the set.
# ## Ordinary Least-Squares using scikit-learn
# Another fast way to compute the LS estimate is through sklearn.linear_model (for this function homogeneous coordinates are not needed).
# +
from sklearn import linear_model
# build the LinearRegression() model and train it
LinReg = linear_model.LinearRegression()
LinReg.fit(X_training, Y_training)
print("Intercept:", LinReg.intercept_)
print("Least-Squares Coefficients:", LinReg.coef_)
# predict output values on training and test sets
Y_pred_test = LinReg.predict(X_test)
Y_pred_training = LinReg.predict(X_training)
# return a prediction score based on the coefficient of determination
print("Measure on training data:", 1-LinReg.score(X_training, Y_training))
| Linear_Predictors/Attar_Aidin_Lab1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pyprind
import pandas as pd
import os
pbar = pyprind.ProgBar(50000)
labels = {'pos':1, 'neg':0}
df = pd.DataFrame()
for s in ('test', 'train'):
for l in ('pos', 'neg'):
path = '../../imdb-dataset/aclImdb/%s/%s' % (s, l)
for file in os.listdir(path):
with open(os.path.join(path, file), 'r') as infile:
txt = infile.read()
df = df.append([[txt, labels[l]]], ignore_index=True)
pbar.update()
df.columns = ['review', 'sentiment']
import numpy as np
np.random.seed(0)
df = df.reindex(np.random.permutation(df.index))
df.to_csv('../../imdb-dataset/movie_data.csv', index=False)
| ch08/.ipynb_checkpoints/01-importing-imbd-dataset-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# import jaxlib
# from jax_unirep import get_reps
import os
from time import time
from collections import defaultdict
# from UniRep.unirep_utils import get_UniReps
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import pickle
from sklearn.decomposition import PCA, TruncatedSVD
from sklearn.manifold import TSNE
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import RidgeClassifier, LogisticRegression
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.neural_network import MLPClassifier
from sklearn import model_selection
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import make_scorer, accuracy_score, roc_auc_score, roc_curve, auc, plot_confusion_matrix
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.preprocessing import label_binarize
from scipy import interp
from itertools import cycle
# from Bio.SeqUtils import molecular_weight
# from Bio.SeqUtils.IsoelectricPoint import IsoelectricPoint as IP
import warnings
warnings.filterwarnings('ignore')
# -
def train_opt_model(model_class, parameters, X_train, y_train, scoring, refit_sc):
"""Trains a classifier with 5 fold cross validation.
Trains a classifier on training data performing 5 fold cross validation and testing multiple parameters.
Inputs:
model_class: the sklearn class of the classifier
parameters: a parameter grid dictionary with the parameters and values to test
X_train: features of the training set
y_train: classes of the training set
scoring: a sklearn scoring object with the score metrics to use
refit_sc: the score metric to select the parameters that yield the best classifier
Outputs:
clf: the trained classifier
"""
clf = GridSearchCV(model_class, parameters, cv=5, scoring=scoring, refit=refit_sc, verbose=3, n_jobs=-1)
clf.fit(X_train, y_train)
return clf
def plot_param_search_1d(trained_model, parameters, refit_sc):
# convert dictionary to lists for ease of use
param_values = list(parameters.values())
param_names = list(parameters.keys())
# results
cv_results = pd.DataFrame(trained_model.cv_results_)
# print results of best classifier
if 'rank_test_AUC' in cv_results:
print('For the model optimized by AUC:')
print('\t the parameter is: {}'.format(cv_results.loc[cv_results['rank_test_AUC']==1,'params'].to_string(index=False)))
print('\t the AUC is: {}'.format(cv_results.loc[cv_results['rank_test_AUC']==1,'mean_test_AUC'].to_string(index=False)))
print('\t the accuracy is: {}'.format(cv_results.loc[cv_results['rank_test_AUC']==1,'mean_test_Accuracy'].to_string(index=False)))
print('For the model optimized by Accuracy:')
print('\t the parameter is: {}'.format(cv_results.loc[cv_results['rank_test_Accuracy']==1,'params'].to_string(index=False)))
if 'rank_test_AUC' in cv_results:
print('\t the AUC is: {}'.format(cv_results.loc[cv_results['rank_test_Accuracy']==1,'mean_test_AUC'].to_string(index=False)))
print('\t the accuracy is: {}'.format(cv_results.loc[cv_results['rank_test_Accuracy']==1,'mean_test_Accuracy'].to_string(index=False)))
print("Optimizing by",refit_sc,"so best parameters are: {}".format(trained_model.best_params_))
# plot results
if 'rank_test_AUC' in cv_results:
acc_eval_scores = np.transpose(np.array(cv_results.mean_test_AUC))
else:
acc_eval_scores = np.transpose(np.array(cv_results.mean_test_Accuracy))
# xaxis
scores_dict = {param_names[0]: parameters[param_names[0]]}
param_df = pd.DataFrame(scores_dict)
sns.lineplot(parameters[param_names[0]], acc_eval_scores)
plt.ylabel(refit_sc)
plt.xlabel(param_names[0])
plt.axvline(trained_model.best_params_[param_names[0]], 0,1, linestyle="dashed", color="grey")
plt.tight_layout()
# plt.savefig("model.png", dpi=600)
plt.show()
return None
def plot_param_search(trained_model, parameters, refit_sc):
# assert que parameters son exactament 2, sino no es pot fer
# results
cv_results = pd.DataFrame(trained_model.cv_results_)
# print results of best classifier
if 'rank_test_AUC' in cv_results:
print('For the model optimized by AUC:')
print('\t the parameters are: {}'.format(cv_results.loc[cv_results['rank_test_AUC']==1,'params'].to_string(index=False)))
print('\t the AUC is: {}'.format(cv_results.loc[cv_results['rank_test_AUC']==1,'mean_test_AUC'].to_string(index=False)))
print('\t the accuracy is: {}'.format(cv_results.loc[cv_results['rank_test_AUC']==1,'mean_test_Accuracy'].to_string(index=False)))
print('For the model optimized by Accuracy:')
print('\t the parameters are: {}'.format(cv_results.loc[cv_results['rank_test_Accuracy']==1,'params'].to_string(index=False)))
if 'rank_test_AUC' in cv_results:
print('\t the AUC is: {}'.format(cv_results.loc[cv_results['rank_test_Accuracy']==1,'mean_test_AUC'].to_string(index=False)))
print('\t the accuracy is: {}'.format(cv_results.loc[cv_results['rank_test_Accuracy']==1,'mean_test_Accuracy'].to_string(index=False)))
print("Optimizing by",refit_sc,"so best parameters are: {}".format(trained_model.best_params_))
# plot results
if 'rank_test_AUC' in cv_results:
rlt = pd.concat([pd.DataFrame(trained_model.cv_results_["params"]),
pd.DataFrame(trained_model.cv_results_["mean_test_AUC"], columns=["AUC"])],axis=1)
else:
rlt = pd.concat([pd.DataFrame(trained_model.cv_results_["params"]),
pd.DataFrame(trained_model.cv_results_["mean_test_Accuracy"], columns=["Accuracy"])],axis=1)
# get parameter with less values to go to x axis
param_values = list(parameters.values())
param_names = list(parameters.keys())
# select the parameter with more values as x axis (reverse if necessary)
if len(param_values[0]) < len(param_values[1]):
param_values.reverse()
param_names.reverse()
sns.lineplot(x=param_names[0], y=refit_sc, hue=param_names[1], data=rlt)
plt.axvline(trained_model.best_params_[param_names[0]], 0,1, linestyle="dashed", color="grey")
plt.tight_layout()
# plt.savefig("model.png", dpi=600)
plt.show()
return None
def plot_roc_curve(trained_classifier, model_name, X_test, y_test, roc_col):
# adapted from https://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html
# Binarize the output
y_labs = label_binarize(y_test, classes=['cytoplasm', 'membrane','secreted'])
n_classes = y_labs.shape[1]
if str(trained_classifier)[:3] == 'SVC':
pred_probs = trained_classifier.predict_proba(X_test)
preds = trained_classifier.predict(X_test)
else:
pred_probs = trained_classifier.best_estimator_.predict_proba(X_test)
preds = trained_classifier.best_estimator_.predict(X_test)
test_accuracy = accuracy_score(y_test, preds)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_labs[:, i], pred_probs[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
plt.plot(fpr["macro"], tpr["macro"],
label=str(model_name)+', AUC: {:.3f}'.format(roc_auc["macro"]),
color=roc_col, linestyle='-', linewidth=1)
return test_accuracy, roc_auc["macro"]
# Importing the true data sets with unirep features
human_unirep_all = pd.read_pickle('UniRep_datasets/combined_escherichia_UniRep_dataset_noSP.pkl')
# Running the model comparison on human data set WITH signal peptides
X = np.stack(human_unirep_all["UniRep"].to_numpy())
y = np.array(human_unirep_all['location'])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# the dataset is unbalanced
pd.Series(y).value_counts()
# +
model_names = [
'kNN',
'LogReg',
'RF',
'MLP',
'AdaBoost',
'RBF SVM',
'lin SVM'
]
models = [
KNeighborsClassifier(),
LogisticRegression(solver='liblinear'),
RandomForestClassifier(),
MLPClassifier(max_iter=200),
AdaBoostClassifier(DecisionTreeClassifier(max_depth=1)),
SVC(kernel='rbf'),
SVC(kernel='linear')
]
params = [
# for kNN
{'n_neighbors':[int(np.sqrt(len(y_train))/16),
int(np.sqrt(len(y_train))/8),
int(np.sqrt(len(y_train))/4),
int(np.sqrt(len(y_train))/2),
int(np.sqrt(len(y_train))),
int(np.sqrt(len(y_train))*2),
int(np.sqrt(len(y_train))*4)],
'weights': ['uniform', 'distance']},
# for LogisticRegression
{'C' : [1e-3,1e-1,1,10],
'penalty' : ["l1","l2"]},
# for RandomForest
{'n_estimators':[1,5,10,20],
'max_depth':[1,30,150]},
# for MLP works fine but slow
{'hidden_layer_sizes':[50,100],
'alpha':[1e-4,1e-2,1e-1]},
# for AdaBoost
{'n_estimators':[10,50,100],
'learning_rate':[0.1,1]},
# for RBF SVM
{'C':[1e-4, 1e-2, 1],
'gamma':['scale','auto']},
# for linear SVM
{'C':[1e-4, 1e-2, 1]}
]
# -
# ovo: One versus One and averaging macro, not affected by class imbalance (https://scikit-learn.org/stable/modules/generated/sklearn.metrics.roc_auc_score.html#sklearn.metrics.roc_auc_score)
scoring = {'Accuracy': make_scorer(accuracy_score),
'AUC':'roc_auc_ovo'}
refit_sc = 'AUC'
# +
trained_models = []
for model, parameters, mod_name in zip(models,params, model_names):
print("Started training model", mod_name)
time_0 = time()
if str(model)[:3] == 'SVC':
trn_clf = train_opt_model(model, parameters, X_train, y_train,
{'Accuracy':make_scorer(accuracy_score)}, 'Accuracy')
print("Finished training model in", round(time()-time_0,2),"seconds\n")
if len(parameters) == 2:
plot_param_search(trn_clf, parameters, 'Accuracy')
else:
plot_param_search_1d(trn_clf, parameters, 'Accuracy')
# retrain calculating probabilities
best_parm = trn_clf.best_params_
if len(best_parm) == 2: #for rbf kerlen
trn_clf = SVC(kernel='rbf', C=best_parm['C'], gamma=best_parm['gamma'], probability=True)
else:
trn_clf = SVC(kernel='linear', C=best_parm['C'], probability=True)
trn_clf.fit(X_train, y_train)
else:
trn_clf = train_opt_model(model, parameters, X_train, y_train, scoring, refit_sc)
print("Finished training model in", round(time()-time_0,2),"seconds\n")
if len(parameters) == 2:
plot_param_search(trn_clf, parameters, refit_sc)
else:
plot_param_search_1d(trn_clf, parameters, refit_sc)
trained_models.append(trn_clf)
print("---------------------------\n")
# +
compare_auc = []
compare_accuracy = []
roc_colors = ['navy','orange','green','peru','red','violet','cyan']
for trn_model, mod_name, roc_col in zip(trained_models, model_names, roc_colors):
test_accuracy, test_auc_val = plot_roc_curve(trn_model, mod_name, X_test, y_test, roc_col)
compare_auc.append(test_auc_val)
compare_accuracy.append(test_accuracy)
plt.plot([0, 1], [0, 1], 'k--', lw=1)
plt.xlim([-0.05, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic (ROC) curve')
plt.legend(loc="lower right")
plt.show()
# -
# %matplotlib inline
# +
fig, axs = plt.subplots(1,2, figsize=(15,5))
axs[0].bar(model_names, compare_auc)
axs[1].bar(model_names, compare_accuracy)
axs[0].set_ylabel('AUC')
axs[1].set_ylabel('Accuracy')
axs[0].set_ylim(0.90,1.0)
axs[1].set_ylim(0.90,1.0)
plt.show()
# -
# ### Select best model
best_model_name = 'LogReg'
# just to confirm, the selected model is:
trained_models[model_names.index(best_model_name)].estimator
# get the best model and best parameters
print('The best parameters are {}'.format(trained_models[model_names.index(best_model_name)].best_params_))
# should now retrain with all data, but here we take the best estimator trained on training dataset
best_mod = trained_models[model_names.index(best_model_name)].best_estimator_
# get a confusion matrix of the test dataset for that model
preds = best_mod.predict(X_test)
plot_confusion_matrix(best_mod, X_test, y_test,
display_labels=['cytoplasm', 'membrane','secreted'],
cmap=plt.cm.Blues)
plt.show()
pickle.dump(best_mod, open("trained_models/escherichia_noSP_opt_logreg.pkl", 'wb'))
loaded_model = pickle.load(open("trained_models/escherichia_noSP_opt_logreg.pkl", 'rb'))
# if zero: all predictions match, the model was loaded correctly
len(preds) - (loaded_model.predict(X_test) == preds).sum()
| notebooks/sklearn_supervised_models_opt_ecoli.ipynb |
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .java
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: Java
// language: java
// name: java
// ---
// + [markdown] toc=true
// <h1>Table of Contents<span class="tocSkip"></span></h1>
// <div class="toc"><ul class="toc-item"><li><span><a href="#Aerospike-Hello-World!" data-toc-modified-id="Aerospike-Hello-World!-1"><span class="toc-item-num">1 </span>Aerospike Hello World!</a></span><ul class="toc-item"><li><span><a href="#Ensure-database-is-running" data-toc-modified-id="Ensure-database-is-running-1.1"><span class="toc-item-num">1.1 </span>Ensure database is running</a></span></li><li><span><a href="#Download-Aerospike-client-from-POM" data-toc-modified-id="Download-Aerospike-client-from-POM-1.2"><span class="toc-item-num">1.2 </span>Download Aerospike client from POM</a></span></li><li><span><a href="#Import-the-modules" data-toc-modified-id="Import-the-modules-1.3"><span class="toc-item-num">1.3 </span>Import the modules</a></span></li><li><span><a href="#Initialize-the-client" data-toc-modified-id="Initialize-the-client-1.4"><span class="toc-item-num">1.4 </span>Initialize the client</a></span></li><li><span><a href="#Understand-records-are-addressable-via-a-tuple-of-(namespace,-set,-userkey)" data-toc-modified-id="Understand-records-are-addressable-via-a-tuple-of-(namespace,-set,-userkey)-1.5"><span class="toc-item-num">1.5 </span>Understand records are addressable via a tuple of (namespace, set, userkey)</a></span></li><li><span><a href="#Write-a-record" data-toc-modified-id="Write-a-record-1.6"><span class="toc-item-num">1.6 </span>Write a record</a></span></li><li><span><a href="#Read-a-record" data-toc-modified-id="Read-a-record-1.7"><span class="toc-item-num">1.7 </span>Read a record</a></span></li><li><span><a href="#Display-result" data-toc-modified-id="Display-result-1.8"><span class="toc-item-num">1.8 </span>Display result</a></span></li><li><span><a href="#Clean-up" data-toc-modified-id="Clean-up-1.9"><span class="toc-item-num">1.9 </span>Clean up</a></span></li><li><span><a href="#All-code-in-Java-boilerplate" data-toc-modified-id="All-code-in-Java-boilerplate-1.10"><span class="toc-item-num">1.10 </span>All code in Java boilerplate</a></span></li><li><span><a href="#Next-steps" data-toc-modified-id="Next-steps-1.11"><span class="toc-item-num">1.11 </span>Next steps</a></span></li></ul></li></ul></div>
// -
// # Aerospike Hello World!
//
// Hello World! in Java with Aerospike.
// This notebook requires Aerospike datbase running locally and that Java kernel has been installed. Visit [Aerospike notebooks repo](https://github.com/aerospike-examples/interactive-notebooks) for additional details and the docker container.
// ## Ensure database is running
// This notebook requires that Aerospike datbase is running.
import io.github.spencerpark.ijava.IJava;
import io.github.spencerpark.jupyter.kernel.magic.common.Shell;
IJava.getKernelInstance().getMagics().registerMagics(Shell.class);
// %sh asd
// + [markdown] hide_input=false
// ## Download Aerospike client from POM
// -
// %%loadFromPOM
<dependencies>
<dependency>
<groupId>com.aerospike</groupId>
<artifactId>aerospike-client</artifactId>
<version>5.0.0</version>
</dependency>
</dependencies>
// ## Import the modules
//
// Import the client library and other modules.
import com.aerospike.client.AerospikeClient;
import com.aerospike.client.policy.WritePolicy;
import com.aerospike.client.Bin;
import com.aerospike.client.Key;
import com.aerospike.client.Record;
import com.aerospike.client.Value;
System.out.println("Client modules imported.");
// ## Initialize the client
//
// Initialize the client and connect to the cluster. The configuration is for Aerospike database running on port 3000 of localhost which is the default. Modify config if your environment is different (Aerospike database running on a different host or different port).
//
AerospikeClient client = new AerospikeClient("localhost", 3000);
System.out.println("Initialized the client and connected to the cluster.");
// ## Understand records are addressable via a tuple of (namespace, set, userkey)
//
// The three components namespace, set, and userkey (with set being optional) form the Primary Key (PK) or simply key, of the record. The key serves as a handle to the record, and using it, a record can be read or written. By default userkey is not stored on server, only a hash (a byte array, the fourth component in the output below) which is the internal representation of the key is stored. For a detailed description of the data model see the [Data Model overview](https://www.aerospike.com/docs/architecture/data-model.html)
Key key = new Key("test", "demo", "foo");
System.out.println("Working with record key:");
System.out.println(key);
// ## Write a record
//
// Aerospike is schema-less and records may be written without any other setup. Here the bins or fields: name, age and greeting, are being written to a record with the key as defined above.
// +
Bin bin1 = new Bin("name", "<NAME>");
Bin bin2 = new Bin("age", 32);
Bin bin3 = new Bin("greeting", "Hello World!");
// Write a record
client.put(null, key, bin1, bin2, bin3);
System.out.println("Successfully written the record.");
// -
// ## Read a record
//
// The record can be retrieved using the same key.
// Read the record
Record record = client.get(null, key);
System.out.println("Read back the record.");
// ## Display result
//
// Print the record that was just retrieved. We are printing:
//
// 1. The metadata with the record's generation (or version) and expiration time.
// 1. The actual value of the record's bins.
System.out.println("Record values are:");
System.out.println(record);
// ## Clean up
// Finally close the client connection.
client.close();
System.out.println("Connection closed.");
// ## All code in Java boilerplate
// All the above code can also be written in the Java boilerplate format and run in a cell.
// +
import com.aerospike.client.AerospikeClient;
import com.aerospike.client.policy.WritePolicy;
import com.aerospike.client.Bin;
import com.aerospike.client.Key;
import com.aerospike.client.Record;
import com.aerospike.client.Value;
public class Test{
public static void putRecordGetRecord () {
AerospikeClient client = new AerospikeClient("localhost", 3000);
Key key = new Key("test", "demo", "putgetkey");
Bin bin1 = new Bin("bin1", "value1");
Bin bin2 = new Bin("bin2", "value2");
// Write a record
client.put(null, key, bin1, bin2);
// Read a record
Record record = client.get(null, key);
client.close();
System.out.println("Record values are:");
System.out.println(record);
}
}
Test.putRecordGetRecord()
// -
// ## Next steps
//
// Visit [Aerospike notebooks repo](https://github.com/aerospike-examples/interactive-notebooks) to run additional Aerospike notebooks. To run a different notebook, download the notebook from the repo to your local machine, and then click on File->Open, and select Upload.
| notebooks/java/hello_world.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# +
from scipy import interpolate
import numpy as np
import matplotlib.pyplot as plt
# sampling
x = np.linspace(0, 10, 10)
y = np.sin(x)
# spline trough all the sampled points
tck = interpolate.splrep(x, y)
x2 = np.linspace(0, 10, 200)
y2 = interpolate.splev(x2, tck)
# spline with all the middle points as knots (not working yet)
# knots = x[1:-1] # it should be something like this
knots = np.array([x[1]]) # not working with above line and just seeing what this line does
weights = np.concatenate(([1],np.ones(x.shape[0]-2)*.01,[1]))
tck = interpolate.splrep(x, y, t=knots, w=weights)
x3 = np.linspace(0, 10, 200)
y3 = interpolate.splev(x2, tck)
# plot
plt.plot(x, y, 'go', x2, y2, 'b', x3, y3,'r')
plt.show()
# -
| Chapter08/Defining a B-spline for a given set of control points.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # NCAR Earth System Data Science WIP Talk
# __This presentation is based on work I did during the [NCAR Summer Internship in Parallel Computational Science (SIParCS) program](https://www2.cisl.ucar.edu/siparcs)__
# ### <NAME> -- Atmospheric Science PhD Candidate at UC Davis
# * [Twitter](https://twitter.com/lucassterzinger)
# * [GitHub](https://github.com/lsterzinger)
# * [Website](https://lucassterzinger.com)
#
#
# # Motivation:
# * NetCDF is not cloud optimized
# * Other formats, like Zarr, aim to make accessing and reading data from the cloud fast and painless
# * However, most geoscience datasets available in the cloud are still in their native NetCDF/HDF5, so a different access method is needed
# # What do I mean when I say "Cloud Optimized"?
# 
#
# In traditional scientific workflows, data is archived in a repository and downloaded to a separate computer for analysis (left). However, datasets are becoming much too large to fit on personal computers, and transferring full datasets from an archive to a seperate machine can use lots of bandwidth.
#
# In a cloud environment, the data can live in object storage (e.g. AWS S3), and analysis can be done in an adjacent compute instances, allowing for low-latency and high-bandwith access to the dataset.
#
# ## Why NetCDF doesn't work well in this workflow
#
# NetCDF is probably the most common binary data format for atmospheric/earth sciences, and has a lot of official and community support. However, the NetCDF format/API requires either a) many small reads to access the metadata for a single file or b) use a serverside utility like THREDDS/OPeNDAP to extract metadata.
#
# 
#
# ## The Zarr Solution
# The [Zarr data format](https://zarr.readthedocs.io/en/stable/) alleviates this problem by storing the metadata and chunks in seperate files that can be accessed as-needed and in parallel. Having consolidated metadata means that all the information about the dataset can be loaded and interpreted in a single read of a small plaintext file. With this metadata in-hand, a program can request exactly which chunks of data are needed for a given operation.
#
# 
#
# ## _However_
# While Zarr proves to be very good for this cloud-centric workflow, most cloud-available data is currently only available in NetCDF/HDF5/GRIB2 format. While it would be _wonderful_ if all this data converted to Zarr overnight, it would be great if in the meantime there was a way to use some of the Zarr spec, right?
# # Introducting `kerchunk`
# [Github page](https://github.com/intake/kerchunk)
#
# `kerchunk` works by doing all the heavy lifting of extracting the metadata, generating byte-ranges for each variable chunk, and creating a Zarr-spec metadata file. This file is plaintext and can opened and analyzed with xarray very quickly. When a user requests a certain chunk of data, the NetCDF4 API is bypassed entirely and the Zarr API is used to extract the specified byte-range.
#
# 
#
# ## How much of a difference does this make, really?
# Testing this method on 24 hours of 5-minute GOES-16 data and accessing via native NetCDF, Zarr, and NetCDF + ReferenceMaker:
#
# 
# ***
# # Let's try it out!
# ### Import `kerchunk` and make sure it's at the latest version (`0.0.3` at the time of writing)
import kerchunk
kerchunk.__version__
import xarray as xr
import matplotlib.pyplot as plt
from fsspec_reference_maker.hdf import SingleHdf5ToZarr
from fsspec_reference_maker.combine import MultiZarrToZarr
import fsspec
from glob import glob
# ## `fsspec` -- What is it?
# * Provides unified interface to different filesystem types
# * Local, cloud, http, dropbox, Google Drive, etc
# * All accessible with the same API
from fsspec.registry import known_implementations
known_implementations.keys()
# ### Open a new filesystem, of type `s3` (Amazon Web Services storage)
# This tells `fsspec` what type of storage system to use (AWS S3) and any authentication options (this is a public dataset, so use anonymous mode `anon=True`)
fs = fsspec.filesystem('s3', anon=True)
# Use `fs.glob()` to generate a list of files in a certain directory. Goes data is stored in `s3://noaa-goes16/<product>/<year>/<day_of_year>/<hour>/<datafile>.nc` format.
#
# This `glob()` returns all files in the 210th day of 2020 (July 28th, 2020)
flist = fs.glob("s3://noaa-goes16/ABI-L2-SSTF/2020/210/*/*.nc")
flist[0]
# ### Prepend `s3://` to the URLS
flist = ['s3://' + f for f in flist]
flist[:3]
# ### Start a dask cluster
# [Dask](https://dask.org/) is a python package that allows for easily parallelizing python code. This section starts a local client (using whatever processors are available on the current machine).
from dask.distributed import Client
client = Client()
client
# ## Definte function to return a reference dictionary for a given S3 file URL
#
# This function does the following:
# 1. `so` is a dictionary of options for `fsspec.open()`
# 2. Use `fsspec.open()` to open the file given by URL `f`
# 3. Using `kerchunk.SingleHdf5ToZarr()` and supplying the file object `infile` and URL `f`, generate reference with `.translate()`
def gen_ref(f):
so = dict(
mode="rb", anon=True, default_fill_cache=False, default_cache_type="none"
)
with fsspec.open(f, **so) as infile:
return SingleHdf5ToZarr(infile, f, inline_threshold=300).translate()
# + [markdown] tags=[]
# ### Map `gen_ref` to each member of `flist_bag` and compute
# Dask bag is a way to map a function to a set of inputs. This next couple blocks of code tell Dask to take all the files in `flist`, break them up into the same amount of partitions and map each partition to the `gen_ref()` function -- essentially mapping each file path to `gen_ref()`. Calling `bag.compute()` on this runs `gen_ref()` in parallel with as many workers as are available in Dask client.
#
# _Note: if running interactively on Binder, this will take a while since only one worker is available and the references will have to be generated in serial. See option for loading from jsons below_
# -
import dask.bag as db
bag = db.from_sequence(flist, npartitions=len(flist)).map(gen_ref)
bag.visualize()
# %time dicts = bag.compute()
# + [markdown] tags=[]
# ### _Save/load references to/from JSON files (optional)_
# The individual dictionaries can be saved as JSON files if desired
# +
# import ujson
# for d in dicts:
# # Generate name from corresponding URL:
# # Grab URL, strip everything but the filename,
# # and replace .nc with .json
# name = './example_jsons/individual/'+ d['templates']['u'].split('/')[-1].replace('.nc', '.json')
# with open(name, 'w') as outf:
# outf.write(ujson.dumps(d))
# -
# These generated jsons can then be loaded back in as a dict
# +
# import ujson
# dicts = []
# for f in sorted(glob('./example_jsons/individual/*.json')):
# with open(f,'r') as fin:
# dicts.append(ujson.load(fin))
# -
# ### Use `MultiZarrToZarr` to combine the 24 individual references into a single reference
# In this example we passed a list of reference dictionaries, but you can also give it a list of `.json` filepaths (commented out)
mzz = MultiZarrToZarr(
dicts,
# sorted((glob('./example_jsons/individual/*.json'))),
remote_protocol='s3',
remote_options={'anon':True},
xarray_open_kwargs={
"decode_cf" : False,
"mask_and_scale" : False,
"decode_times" : False,
"decode_timedelta" : False,
"use_cftime" : False,
"decode_coords" : False
},
xarray_concat_args={'dim' : 't'},
)
# References can be saved to a file (`combined.json`) or passed back as a dictionary (`mzz_dict`)
# %time mzz.translate('./combined.json')
# mzz_dict = mzz.translate()
| 01-Create_References.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.4.1
# language: julia
# name: julia-1.4
# ---
# # Introduction
# This workbook is an introduction to the basic concepts and designs relating to the paper
#
# **Fast estimation of sparse quantum noise** by *<NAME>*
#
# This workbook is going to go through the basic ideas behind experimental design for a trivial 3 qubit system, using qiskit to simulate it. It shows how to build the local circuits in qasm.
#
# This is only 3 qubits, to keep the simulation time nice and short. Of course 3 qubits is way too small for this system to be any use. We are looking at recovering Paulis in the order of $4^{\delta n}$, and $\delta$ is about 0.25 (upto a maximum of, say, 0.5). Here with n = 3, thats only about 4 values! We do in fact recover more, (about 10) but the high weight Paulis are 4*3 = 12, so really you wouldn't use this protocol here (In fact the number of experiments we do you could almsot certainly use a more tomography style to recover the 64 actual Paulis.
#
#
# # Software needed
# For this introductory notebook, we need minimal software. All these packages should be available through the Julia package manager. However, we will need some to help with the simulation etc.
#
# If you get an error trying to "use" them the error message tells you how to load them.
using Hadamard
using PyPlot
# convenience (type /otimes<tab>) - <tab> is the "tab" key.
⊗ = kron
# type /oplus<tab>
⊕ = (x,y)->mod.(x+y,2)
# I love this add on, especially as some of the simulations take a noticeable time.
using ProgressMeter
# +
# We are going to need some more of my code
# You can get it by doing the following, in the main julia repl (hit ']' to get to the package manager)
# pkg> add https://github.com/rharper2/Juqst.jl
# Currently there is harmless warning re overloading Hadamard.
using Juqst
# -
# This is the code in this github that implements the various peeling algorithms for us.
include("peel.jl")
using Main.PEEL
include("./localPeelFunctions.jl")
# ### FInally we are going to need qiskit
#
# (Instructions to get under Julia)
#
# Need to install Conda, PyCall using the julia package manager.
#
# Easiest way is the repl (hit ], then add Conda, add PyCall)
# or...
#
# ```
# using Pkg
# Pkg.add("Conda")
# Pkg.add("PyCall")
# ```
#
# then use Conda to install pip
# ```
# using Conda
# Conda.add("pip")
# ```
#
# Once that is done, then we can use pip to install qiskit (in a place Julia can find it).
#
#
# ```
# using PyCall
# run(`$(PyCall.pyprogramname) -m pip install qiskit`)
# ```
#
# Then we can use qiskit from Julia!
using PyCall
qiskit = pyimport("qiskit")
# # Some preliminary information.
#
# ## This is replicated from "Scalable Estimation - Basic Concepts"
#
# # Set up some Pauli Errors to find
# # Choose our sub-sampling matrices
# One of the main ideas behind the papers is that we can use the protocols in [Efficient learning of quantum channels](https://arxiv.org/abs/1907.12976) and [Efficient learning of quantum noise](https://arxiv.org/abs/1907.13022) to learn the eigenvalues of an entire stabiliser group ($2^n$) entries at once to arbitrary precision. Whilst it might be quite difficult to learn the eigenvalues of an arbitrary group as this will require an arbitrary $n-$qubit Clifford gate (which can be a lot of primitive gates!) even today's noisy devices can quite easily create a 2-local Stabiliser group over $n-$ qubits.
#
#
# Finally we are simulating the recovery in a 6 qubit system. That means our bitstrings are 12 bits long.
#
# Our experiments will need two sub-sampling groups. The first subsampling group will be two of our potential MuBs (set out below) (selected randomly). The second subsampling group will have single MUBs (potentialSingles below) on the first and fourth qubit, and a potential (two qubit) MuB on qubits 2 and 3.
#
# This maximises the seperation of Paulis using local stabiliser (two qubit) groups.
all2QlMuBs
(experiments,paulisAll,ds) = generateSensibleSubsamples([(2,1),(1,2)])
print("$experiments\n")
print("$paulisAll\n")
noOfQubits = 3
# Here there are two sets of experiments
# The tuple for each qubit, is the (number, experiment type),
# the numbers for each will add up to 4 (since we have 4 qubits)
experiments[1]
# # The Experiments
# ## So what are these experiments?
#
# Let's pull in the figure from the paper which shows all the experimental designs:
#
# 
#
# Okay that's a bit busy, lets break it down.
# Step 1 was to choose one MUB from the set for each pair of qubits, well that is what we did in PaulisAll, lets look at the first element of that
paulisAll[1]
# So because it was randomly chosen, you will have to look at that and convince yourself that indeed that each of the two elements shown above are one of these four sets of MuBs.
#
# So the first experiment (the top row of (2)) is just an experiment to extract those eigenvalues.
#
# How do we do that?
#
# Well we have the circuits we need in the appendix of the paper - they look like this:
#
# 
#
# Where what we are going to need on each two qubit pair is either (a) or (c), depending on the MUB randomly selected
# ## Setting up the circuits
# For the two qubit expermeints, circuit 1 is (a) above and circuit (2) is (c) above.
#
# Effectively we take an input state in the computational basis, apply these circuits, do a Pauli twirl of varing gates, reverse the circuit and measure in the computational basis.
#
# Some code to set up the circuits on the ... err ... circuit
#
pX = [0 1;1 0]
pY = [0 -im;im 0]
pZ = [1 0;0 -1]
pI = [1 0;0 1]
paulis = [pI,pX,pY,pZ]
superPaulis = [makeSuper(x) for x in paulis];
experiments[1]
numberOfQubits = foldl((a,b)->a[1]+b[1],experiments[1])
# Sanity check on noisless sim
circuit = getCircuit(experiments[1],12)
qiskit.execute(circuit,qiskit.Aer.get_backend("qasm_simulator"),shots=100).result().get_counts()
circuit.draw()
# Creates a primitive noise ansatz
""" get_noise - get a primitive noise model reflecting the passed in parameters
Parameters:
- p_meas: the probability of measurement error.
- p_gate: the probability of a single qubit gate error.
- p_gate2: the probability of a cx/cz error
Returns: the noise_model.
"""
function get_noise(p_meas,p_gate,p_gate2)
error_meas = qiskit.providers.aer.noise.errors.pauli_error([("X",p_meas), ("I", 1 - p_meas)])
error_gate1 = qiskit.providers.aer.noise.errors.depolarizing_error(p_gate, 1)
error_gate2 = qiskit.providers.aer.noise.errors.depolarizing_error(p_gate2,2)
noise_model = qiskit.providers.aer.noise.NoiseModel()
noise_model.add_all_qubit_quantum_error(error_meas, "measure") # measurement error is applied to measurements
noise_model.add_all_qubit_quantum_error(error_gate1, ["x","h","s","z","u3","id"]) # single qubit gate error is applied to x gates
noise_model.add_all_qubit_quantum_error(error_gate2, ["cx"]) # two qubit gate error is applied to cx gates
return noise_model
end
noise_model = get_noise(0.02,0.01,0.015)# from qiskit.providers.aer.noise import NoiseModel
lengths=[2,4,10,20,60,100,200]
@time res = doASeries(experiments[1],lengths,50,2000,noise_model)
cc = getCounts(res,lengths,noOfQubits)
eigs = extractEigenvalues(cc,lengths)
indices = generateEigenvalues(getStabilizerForExperiment(experiments[1]))
for (ix,e) in enumerate(eigs)
print("$(PEEL.fidelityLabels(indices[ix]-1,qubits=3)): $e\n")
end
recoveredEigenvalues = Dict()
indices = generateEigenvalues(getStabilizerForExperiment(experiments[1]))
for i in 1:2^noOfQubits
recoveredEigenvalues[indices[i]] = push!(get(recoveredEigenvalues,indices[i],[]),eigs[i])
end
# ### Note
#
# In the graph below, we can see the primitive noise model we employ. All multi qubit Paulis have the same eigenvalues (also known as fidelity). We get distinct bands depending on whether we have one, two or three Paulis. In reality the device would not have this clear distinction.
experiment1_SpamyEigenvalues = map(ifwht_natural,cc);
# Dont bother with the first element as its all 1.
for x = 2:2^noOfQubits
toPlot = [d[x] for d in experiment1_SpamyEigenvalues]
plot(lengths,toPlot,label=PEEL.fidelityLabels(indices[x]-1,qubits=3))
end
legend()
recoveredEigenvalues
experiments[2]
# +
# Cycle through experiments... to generate offsets.
shots = 2000
sequences = 40
experi = experiments[1]
# This is step 2 of the figure - we move through the different qubit groups.
@showprogress 1 "QubitGroups" for (ix,stabilizerSelect) in enumerate(experi)
# If it is two qubits we have 5 stabilizer experiments/circuits
range = 1:5
if stabilizerSelect[1] == 1 # one qubit only so only 3 stabiliser experiments
range = 1:3
end
@showprogress 1 "Experiment: " for experimentType in range
newStab = stabilizerSelect
# Check if we have already done this one.
if newStab[2] != experimentType # New experiment needed.
experimentToDo = copy(experi)
newStab = (newStab[1],experimentType)
experimentToDo[ix]=newStab
# Work out the Paulis for that experiment type (we need this later)
paulisForExperiment = getStabilizerForExperiment(experimentToDo)
# Run the experiment for the requisite sequences and shots.
results = doASeries(experimentToDo,lengths,sequences,shots,noise_model)
# Extract the estimated eigenvalues (2^noOfQubits)
cc = getCounts(results,lengths,noOfQubits)
eigs = extractEigenvalues(cc,lengths)
# Work out what eigenvalues we have calcuated
indices = generateEigenvalues(paulisForExperiment)
#print("We have experiment $experimentToDo\nWIth indices $indices\n")
# Put them into our 'recoveredEigenvalue' oracle.
for i in 1:2^noOfQubits
recoveredEigenvalues[indices[i]] = push!(get(recoveredEigenvalues,indices[i],[]),eigs[i])
end
end
end
end
# -
for k in sort(collect(keys(recoveredEigenvalues)))
print("$(PEEL.fidelityLabels(k-1,qubits=3)): $(round.(recoveredEigenvalues[k],digits=5))\n")
end
experiments
# Now we need to repeat, but this time for experiment 2
res2 = doASeries(experiments[2],lengths,sequences,shots,noise_model)
cc2 = getCounts(res2,lengths,noOfQubits)
eigs2 = extractEigenvalues(cc2,lengths)
indices2 = generateEigenvalues(getStabilizerForExperiment(experiments[2]))
print("$indices2\n")
for i in 1:2^noOfQubits
recoveredEigenvalues[indices2[i]] = push!(get(recoveredEigenvalues,indices2[i],[]),eigs2[i])
end
for k in sort(collect(keys(recoveredEigenvalues)))
print("$(PEEL.fidelityLabels(k-1,qubits=3)): $(round.(recoveredEigenvalues[k],digits=5))\n")
end
# +
# And then cycle through experiments for experiments[2] ... to generate offsets.
experi = experiments[2]
# This is step 2 of the figure - we move through the different qubit groups.
@showprogress 1 "QubitGroups" for (ix,stabilizerSelect) in enumerate(experi)
# If it is two qubits we have 5 stabilizer experiments/circuits
range = 1:5
if stabilizerSelect[1] == 1 # one qubit only so only 3 stabiliser experiments
range = 1:3
end
@showprogress 1 "Experiment: " for experimentType in range
newStab = stabilizerSelect
# Check if we have already done this one.
if newStab[2] != experimentType # New experiment needed.
experimentToDo = copy(experi)
newStab = (newStab[1],experimentType)
experimentToDo[ix]=newStab
# Work out the Paulis for that experiment type (we need this later)
paulisForExperiment = getStabilizerForExperiment(experimentToDo)
# Run the experiment for the requisite sequences and shots.
results = doASeries(experimentToDo,lengths,sequences,shots,noise_model)
# Extract the estimated eigenvalues (2^noOfQubits)
cc = getCounts(results,lengths,noOfQubits)
eigs = extractEigenvalues(cc,lengths)
# Work out what eigenvalues we have calcuated
indices = generateEigenvalues(paulisForExperiment)
# print("We have experiment $experimentToDo\nWIth indices $indices\n")
# Put them into our 'recoveredEigenvalue' oracle.
for i in 1:2^noOfQubits
recoveredEigenvalues[indices[i]] = push!(get(recoveredEigenvalues,indices[i],[]),eigs[i])
end
end
end
end
# -
for k in sort(collect(keys(recoveredEigenvalues)))
print("$(PEEL.fidelityLabels(k-1,qubits=3)): $(round.(recoveredEigenvalues[k],digits=5))\n")
end
using Statistics
fids= Float64[]
# Compared to the total number of Eigenvalues, which is
for i in sort(collect(keys(recoveredEigenvalues)))
print("$(PEEL.fidelityLabels(i-1,qubits=3)) $(round(mean(recoveredEigenvalues[i]),digits=4))\n")
push!(fids,round(mean(recoveredEigenvalues[i]),digits=4))
end
# Average them out
using Statistics
oracleToUse = Dict()
for k in keys(recoveredEigenvalues)
oracleToUse[k]=mean(recoveredEigenvalues[k])
end
# Generate the samples directly
samples = []
for (ix,x) in enumerate(paulisAll)
# Similarly if we reverse above (right hand least significant, then we reverse here)
push!(samples,[[y for y in generateFromPVecSamples4N(reverse(x),d)] for d in ds])
end
listOfX = [[fwht_natural([oracleToUse[x+1] for x in y]) for y in s] for s in samples];
# +
# Some functions to give us labels:
function probabilityLabels(x;qubits=2)
str = string(x,base=4,pad=qubits)
paulis = ['I','Y','X','Z']
return map(x->paulis[parse(Int,x)+1],str)
end
function fidelityLabels(x;qubits=2)
str = string(x,base=4,pad=qubits)
paulis = ['I','X','Y','Z']
return map(x->paulis[parse(Int,x)+1],str)
end
# +
# Use the patterns to create the listOfPs
# What is this?? Well basically it tells us which Pauli error fell into which bin, given our choice of experiment
# So the first vector (listOfPs[1][1])
listOfPs=[]
for p in paulisAll
hMap = []
# Because here we use right hand least significant - we just reverse the order we stored the experiments.
for i in reverse(p)
#print("Length $(length(i))\n")
if length(i) == 2
push!(hMap,twoPattern(i))
elseif length(i) == 4
push!(hMap,fourPattern([i])[1])
else # Assume a binary bit pattern
push!(hMap,[length(i)])
end
end
push!(listOfPs,hMap)
end
listOfPs
# +
using LinearAlgebra
maxPass = 200
# singletons is when 'noise' threshold below which we declare we have found a singletons
# It will be related to the measurment accuracy and the number of bins
# Here we base it off the shotsToDo variance, on the basis of our hoped for recovery
# We start that one low and then slowly increase it, meaning we are more likely to accept
# If you have a certain probability distribution and this ansatz is not working, set it
# so that you get a reasonable number of hits in the first round.
singletons = (0.002*.999)/30000
singletonsInc = singletons/2
# Zeros is set high - we don't want to accept bins with very low numbers as they are probably just noise
# If the (sum(mean - value)^2) for all the offsets is below this number we ignore it.
# But then we lower it, meaning we are less likely to think a bin has no value in it.
# Obviously it should never be negative.
zerosC = (0.03*.999)/20000*2*1.1
zerosDec = (zerosC*0.99)/maxPass
mappings=[]
prevFound = 0
qubitSize = 3
listOfX = [[fwht_natural([oracleToUse[x+1] for x in y]) for y in s] for s in samples]
found = Dict()
rmappings = []
for x in mappings
if length(x) == 0
push!(rmappings,x)
else
ralt = Dict()
for i in keys(x)
ralt[x[i]]= i
end
push!(rmappings,ralt)
end
end
prevFound = 0
for i in 1:maxPass
for co = 1:length(listOfX)
bucketSize = length(listOfX[co][1])
for extractValue = 1:bucketSize
extracted = [x[extractValue] for x in listOfX[co]]
if !(PEEL.closeToZero(extracted,qubitSize*2,cutoff= zerosC))
(isit,bits,val) = PEEL.checkAndExtractSingleton([extracted],qubitSize*2,cutoff=singletons)
if isit
#print("$bits\n")
#pval = binaryArrayToNumber(j6*[x == '0' ? 0 : 1 for x in bits])
vval = parse(Int,bits,base=2)
#print("$bits, $vval $(round(dist[vval+1],digits=5)) and $(round(val,digits=5))\n")
PEEL.peelBack(listOfX,listOfPs,bits,val,found,ds,rmappings)
end
end
end
end
if length(found) > prevFound
prevFound = length(found)
else
singletons += singletonsInc
zerosC -=zerosDec
if (zerosC <= 0)
break
end
end
if length(found) > 0
print("Pass $i, $(length(found)) $(sum([mean(found[x]) for x in keys(found)]))\n")
if sum([mean(found[x]) for x in keys(found)]) >= 0.999995
break
end
end
end
# -
for k in keys(found)
print("$(probabilityLabels(parse(Int,k,base=2),qubits=3)) -- $(round.(found[k],digits=4))\n")
end
# ## And that's all folks
#
# What do we see, well we recovered 10 values, which is more than advertised. They were all single Pauli errors, and whilst there is some variation - they are all in the right ballpark. 40 seqeunces of 2,000 shots is a relatively small number ot recover to this precision - it only gets better with more qubits and more sequences.
#
| Scalable Estimation - Experimental Qiskit- Just 3 qubits.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# #### Exercice 15
#
# Le programme pour mémoire
#
# MOV R0, #255
# STR R0, 68
# HALT
# LDR R1, 72
# HALT
#
# Copier une copie d'écran de la mémoire qui met en valeur ce qui a changé dans la mémoire au premier `HALT`.
#
# > 
# >
# > La valeur décimal de 0x44 est ...
#
# Expliquer pourquoi la valeur montrée est ce qu'elle est (*what*), et est où elle est (*where*).
#
# > *what*: La valeur du mot mémoire modifié, 0xff, correspond à 255 (dans R0) en décimal. Or nous avons expressément demandé d'enregistrer cette valeur en mémoire STR **R0**, 68.
# >
# > *where*: le mot mémoire modifié a pour adresse 0x44 soit 4\*16+4=68 en décimal qui est bien la deuxième opérande de `STR`. Remarquer qu'elle n'est pas préfixée par `#` car ce n'est pas une valeur au sens de «contenu d'un registre ou d'un mot mémoire».
#
# À présent, cliquer sur la position mémoire immédiatement à droite de celle qui a été modifiée, et y saisir une valeur. Relancer l'exécution (en appuyant sur Play) et montrer, avec une copie d'écran partielle, que la valeur saisie précédemment a été copiée ('loaded') dans `R1`.
#
# > Je saisi 0xff00 dans le mot mémoire suivant
# >
# > 
# >
# > L'instruction LDR R2, 72 a bien pour effet de charger le mot saisi dans le registre R2:
# >
# > 
#
# Pourquoi la deuxième opérande pour l'instruction `LDR` est 72 et pas 69? Que se passe-t-il si vous mettez 69 et que vous relancer l'exécution?
#
# > 69 est l'adresse du deuxième octet du mot mémoire d'adresse 68; les adresses des mots mémoires consécutifs augmentent de 4 en 4 et 72 = 8 + 4
# >
# > Si on met 69 à la place de 72, on obtient une erreur « *Unaligned access at line 4 LDR* » qui nous indique un problème d'alignement. Les opérations `STR` et `LDR` fonctionnent avec des **adresses de mot**.
#
# > **Complément**: Sachez cependant qu'il existe des opérations similaires `STRB` et `LDRB` (B pour *Byte* - octet) qui utilisent l'adresse d'un octet. Vous pourriez avoir une surprise en les utilisant car les processeurs ARM sont *petit boutistes* \[ *little endian* \] (on commence par le petit bout...). Par exemple:
# >
# > MOV R0,#0x0201
# > STR R0,68
# > LDRB R1,68
# > HALT
# >
# > Qu'aura-t-on dans R1, 0x1 ou 0x2? Quelle adresse faut-il mettre pour obtenir l'octet qui n'a pas été obtenu?
# ________
# #### Exercice 16
#
# Pour mémoire:
#
# LDR R0, xCoord
# ADD R0, R0, #6
# STR R0, xCoord
# LDR R0, yCoord
# ADD R0, R0, #2
# STR R0, yCoord
# HALT
# xCoord: 3
# yCoord: 4
#
# Quelles sont les adresses respectives de `xCoord` et `yCoord`?
#
# > xCoord correspond à l'adresse 0x1c et yCoord à 0x20.
#
# > Mémoire après avoir valider le programme mais avant de l'avoir exécuté:
# >
# > 
#
# > Mémoire après exécution:
# >
# > 
# ____
# #### Exercice 17
#
# Faites tourner ce programme. Lorsque le processeur se bloque en attente d'une donnée, saisir 1, 2 ou 3. Lorsque le programme se supend, prendre une capture d'écran partielle montrant la console et montrant la valeur dans `R0` qui devrait être le nombre d'allumettes restantes.
#
# Pour rappel:
#
# ```
# //R0 - allumettes restantes
# //R1 - pour écrire des messages
# //R2 - nombre d'allumettes à enlever
# MOV R0, #15
# STR R0, .WriteUnsignedNum
# MOV R1, #msg1
# STR R1, .WriteString
# MOV R1, #msg2
# STR R1, .WriteString
# LDR R2, .InputNum
# SUB R0, R0, R2
# HALT
# msg1: .ASCIZ "restantes\n"
# msg2: .ASCIZ "Combien souhaitez-vous en enlever (1-3)?\n"
# ```
#
# > Je saisis 2 pour indiquer que je souhaite prendre 2 allumettes
# >
# > 
#
# ____
# #### Exercice 18
#
# Pour rappel:
# ```
# //R0 - allumettes restantes
# //R1 - pour écrire des messages
# //R2 - nombre d'allumettes à enlever
# MOV R0, #15
# loop: STR R0, .WriteUnsignedNum
# MOV R1, #msg1
# STR R1, .WriteString
# MOV R1, #msg2
# STR R1, .WriteString
# LDR R2, .InputNum
# SUB R0, R0, R2
# B loop
# HALT
# msg1: .ASCIZ "restantes\n"
# msg2: .ASCIZ "Combien souhaitez-vous en enlever (1-3)?\n"
# ```
#
#
# Pourquoi l'étiquette `loop:` n'a-t-elle pas été placée sur la première instruction (plutôt que la seconde)? Si vous n'êtes pas sûr, expérimenter ce changement.
#
# > Si on la place en première ligne, le compteur d'allumette est remis à 15 à chaque itération!
#
# Même en version mono-utilisateur de ce jeu, il y a deux sérieuses limitations dans ce code. Pourriez-vous les préciser?
#
# > 1. rien n'arrête le programme, le nombre d'allumettes fini par devenir négatif,
# > 2. On peut enlever autant d'allumettes qu'on veut et non 1, 2 ou 3.
# _____
# #### Exercice 19
# Pour rappel:
#
# ```
# //R0 - allumettes restantes
# //R1 - pour écrire des messages
# //R2 - nombre d'allumettes à enlever
# MOV R0, #15
# loop: STR R0, .WriteUnsignedNum
# MOV R1, #msg1
# STR R1, .WriteString
# MOV R1, #msg2
# STR R1, .WriteString
# input: LDR R2, .InputNum
# CMP R2, #3
# BGT input
# SUB R0, R0, R2
# B loop
# HALT
# msg1: .ASCIZ "restantes\n"
# msg2: .ASCIZ "Combien souhaitez-vous en enlever (1-3)?\n"
# ```
#
# À présent, en utilisant l'une des quatres formes possibles de branchement conditionnel présentées plus tôt, ajouter quelques instructions de manière à renforcer la règle que le nombre d'allumettes récupérées est au moins une.
#
# > ...
# > CMP R2, #3
# > BGT input
# > CMP R2, #1 // Vérifions que le nombre d'allumette n'est pas inférieure à 1
# > BLT input // Si c'est le cas, on redemande ...
# > SUB ...
# >
#
# Essayer de saisir une valeur négative. Est-ce quel le code empêche cela?
#
# > oui, si le nombre est négatif, il y a branchement à 'input'.
#
# Pouvez-vous trouver un moyen d'empêcher que le nombre total d'allumette puisse devenir négatif?
#
# > ...
# > BLT input
# > SUB R4, R0, R2 // utilisation d'un regisre temporaire
# > CMP R4, #0
# > BLT input // l'utilisateur enlève plus d'allumettes qu'il ne peut
# > MOV R0, R4 // ok, plaçons le résultat temporaire dans le registre adéquat
# > BGT loop // Si il n'y a plus d'allumettes, le jeu est fini.
# > HALT
#
# > 
# ____
# #### Exercice 20
#
# Saisir et faire tourner le programme complet plus d'une fois.
#
# Prendre une capture d'écran partiel montrant la console à la fin du jeu dans chaque cas: celui où vous gagnez, celui où l'ordinateur gagne.
#
# Il y a en fait une stratégie très simple qui garantie la victoire si vous jouez en premier et qui vous l'assure probablement si vous jouez en second pourvu que l'autre joueur ne suive pas la même stratégie (comme dans ce cas où l'ordinateur joue au hasard).
#
# Pouvez-vous trouver cette stratégie gagnante?
#
# > Je gagne:
# >
# > 15 allumettes restantes.
# > 1 prises par l'ordinateur.
# > 14 allumettes restantes.
# > Combien d'allumettes voulez-vous prendre (1-3)?
# > 11 allumettes restantes.
# > 2 prises par l'ordinateur.
# > 9 allumettes restantes.
# > Combien d'allumettes voulez-vous prendre (1-3)?
# > 7 allumettes restantes.
# > 1 prises par l'ordinateur.
# > 6 allumettes restantes.
# > Combien d'allumettes voulez-vous prendre (1-3)?
# > 3 allumettes restantes.
# > 1 prises par l'ordinateur.
# > 2 allumettes restantes.
# > Combien d'allumettes voulez-vous prendre (1-3)?
# > Vous gagnez!
#
# > Je perds:
# >
# > 15 allumettes restantes.
# > 1 prises par l'ordinateur.
# > 14 allumettes restantes.
# > Combien d'allumettes voulez-vous prendre (1-3)?
# > 11 allumettes restantes.
# > 2 prises par l'ordinateur.
# > 9 allumettes restantes.
# > Combien d'allumettes voulez-vous prendre (1-3)?
# > 7 allumettes restantes.
# > 3 prises par l'ordinateur.
# > 4 allumettes restantes.
# > Combien d'allumettes voulez-vous prendre (1-3)?
# > 2 allumettes restantes.
# > 1 prises par l'ordinateur.
# > 1 allumettes restantes.
# > L'ordinateur gagne!
#
# > Stratégie gagnante à tout les coups si on commence:
# >
# >> Prendre autant d'allumettes qu'il faut pour qu'il en reste toujours un multiple de 1+4\*n soit 1, 5, 9, 13.
# ____
| 06_machine_physique/Initiation_au_langage_d_assemblage/Partie1/solutions/03.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # **Machine Leraning for Medicine**
# # **Bayesian Networks for Clinical Data Analysis.**
# * Student : <NAME>.
import pandas as pd
import numpy as np
import pyAgrum as gum
import pyAgrum.lib.notebook as gnb
import pyAgrum.lib.bn2graph as bnPlot
import pyAgrum.lib.dynamicBN as gdyn
# # 1 - Modeling With a Bayesian Network :
# > In this section we model with a Bayesian Network the problem of type 2 diabetes remission after a gastric by-pass surgery.
# Fast way to create the DAG
dr = gum.fastBN("Remission<-Insulin<-Glycemia->Other_drugs->Remission")
dr
# +
# Add marginal for Glycemia and CPTs for other varaibles :
# Marginal for Glycemia (Glycemia > 6.5)
dr.cpt("Glycemia")[:] = [0.5, 0.5]
# CPT for Insulin
dr.cpt("Insulin")[:]=[[0.9,0.1],[0.1,0.9]]
# Equivalent to
#dr.cpt("Insulin")[{"Glycemia":0}] = [0.9,0.1]
#dr.cpt("Insulin")[{"Glycemia":1}] = [0.9,0.1]
# CPT for Other_drugs
dr.cpt("Other_drugs")[:]=[[0.7,0.3],[0.25,0.75]]
# CPT for Remission
dr.cpt("Remission")[{"Insulin":1,"Other_drugs":1}] = [0.9, 0.1] # a=0,b=0
dr.cpt("Remission")[{"Insulin":0,"Other_drugs":1}] = [0.4, 0.6] # a=0,b=1
dr.cpt("Remission")[{"Insulin":1,"Other_drugs":0}] = [0.7, 0.3] # a=1,b=0
dr.cpt("Remission")[{"Insulin":0,"Other_drugs":0}] = [0.1, 0.9] # a=1,b=1
# -
# display the network with CPT tables
gnb.sideBySide(dr,
dr.cpt("Glycemia"),
dr.cpt("Insulin"),
dr.cpt("Other_drugs"),
dr.cpt("Remission"),
captions=["the BN","the marginal for $Glycemia$","the CPT for $Insulin$","the CPT for $Other\_drugs$","the CPT for $Remission$"])
# ## 1 - 1 - Inference :
# define the inference engine (LazyPropagation : exact inference)
ie=gum.LazyPropagation(dr)
# infer
ie.makeInference()
# display posterior of Remission
ie.posterior("Remission")
# ## 1 - 2 - Answering Some Observational Queries :
# ### **What is the probability to get the remission if the glycemia level is less than 6.5 and no drugs are taken?**
# > We are interested in the distribution of the variable $Remission$ if we know that the variable $Glycemia$ is less than $6.5$ (No = $0$) and the no drugs were taken (the variable $Other\_drugs$ is equal to $0$). In other words :
# $$P(Remission | Glycemia = 0, Other\_drugs = 0)$$
# set the evidence
evs = {'Glycemia' : 0, 'Other_drugs' : 0}
ie.setEvidence(evs)
# infer
ie.makeInference()
# display results
gnb.sideBySide(ie.posterior("Remission"),gnb.getInference(dr, evs = evs),
captions=["$P(Remission | Glycemia = 0, Other\_drugs = 0)$","Complete inference with evidence="+str(evs)])
# > If the $Glycemia$ level is less than $6.5$ and the no drugs were taken we have a remission probability $0.84$.
# ### **What is the probability to get the remission if the glycemia level is bigger than 6.5 and insulin is prescribed?**
# > We are interested in the distribution of the variable $Remission$ if we know that the variable $Glycemia$ is bigger than $6.5$ (No = $1$) and that insulin was prescribed (the variable $Other\_drugs$ is equal to $1$). In other words :
# $$P(Remission | Glycemia = 1, Other\_drugs = 1)$$
# set the evidence
evs = {'Glycemia' : 1, 'Other_drugs' : 1}
ie.setEvidence(evs)
# infer
ie.makeInference()
# display results
gnb.sideBySide(ie.posterior("Remission"),gnb.getInference(dr, evs = evs),
captions=["$P(Remission | Glycemia = 1, Other\_drugs = 1)$","Complete inference with evidence="+str(evs)])
# > If the $Glycemia$ is bigger than $6.5$ and insulin was prescribed we have a remission probability of $0.15$.
# > **REMARK :** In this section we answered some observational queries (consist of finding variables that are associated by collecting and analyzing raw data. It allows us to answer queries based on passive observation of data **If I observe variable $ X $, what can I say about variable $ Y $? (i.e $ P (Y \mid X) $)**). In order to answer interventional queries (consist of predicting the effect of a deliberate intervention,**What would $ X $ be, if I do $ Y $? (i.e. $P(X \mid do(Y))$)**, interventional queries can not be answered using only passively collected data) or counterfactual queries (consists of reasoning about hypothetical situations, **What would have happened if?**), the network needs to become a causal diagram ($A \rightarrow B$ means that $A$ causes $B$, which is not always the case).
# # 2 - Bayesian Network from Real Data :
# > In this section we create a network from real data (CPTs and Marginals are computed from the data). Two heuristic search approaches methods are tested for learning the structure of bayesian networks namely **Hill Climbing** and **Tabu Search** :
# > * Hill Climbing : a simple iterative technique.
# >* Tabu Search : a memory based approach that guides a local heuristic search procedure to explore the solution space beyond local optimality.
#
# Generally speaking, Tabu search yields better results because of amnesy of hill climbing, source :
# [Learning the structure of Bayesian Networks: Aquantitative assessment of the effect of differentalgorithmic schemes](https://arxiv.org/pdf/1704.08676.pdf)
# path to data
data_path = "./data/SPLEX/"
# utilitary function to discretize the data (BN can be constructed only from discrete data)
def discretize_dataset(dataframe, ignore = [], path_to_save = "./", name_to_save = "discr_data", num_bins = 5):
"""
Discretise a dataset (of continuous variables). This function is based on the numpy.digitize function. Given a fixed number of bins, this function
creates for each column (variable in the data set) that number of bins. Then, for every value of a variable of a data-point, it returns the index
of the bin to which this value belongs.
:param dataframe: the dataset (expected to be a pandas dataframe)
:param ignore: list of columns to ignore (no discretizing for these columns)
:param path_to_save: the path to where to store the resulting data set (saved as .csv), defaults current directory.
:param name_to_save: the name to give to the resulting dataset, defaults discr_data.csv.
:param num_bins: the number of discretisation bins to use (fixed for all variables), defaults 5.
"""
l=[]
for col in list(set(dataframe.columns.values) - set(ignore)):
bins = np.linspace(min(dataframe[col]), max(dataframe[col]), num_bins)
l.append(pd.DataFrame(np.digitize(dataframe[col], bins),columns=[col]))
# add the ignored columns to the dataframe
for c in ignore:
l.append(pd.DataFrame(dataframe[[c]]))
discr_data=pd.concat(l, join='outer', axis=1)
discr_data.to_csv(path_to_save+name_to_save+".csv",index=False)
# ## 2 - 1 - Network for Host Varaibles :
# SPLEX host dataset, load and display some information about the data
splex_host = pd.read_table(data_path+"SPLEX_host.txt", sep=" ")
print("Number of examples :", splex_host.shape[0])
print("Dimension of the problem :", splex_host.shape[1])
print("Variables : ", splex_host.columns.values)
print("Sample of the data :")
splex_host.head()
# discretize the dataset, save and the load the discretized dataset :
path_to_save = data_path
name_to_save = "SPLEX_host_disc"
discretize_dataset(splex_host, path_to_save = path_to_save, name_to_save = name_to_save, num_bins = 5)
discr_splex_host = pd.read_table(path_to_save+name_to_save+".csv", sep=",")
print("Sample of the discretised host data :")
discr_splex_host.head()
# +
# Learn the Bayesian Network for host variables, and save the obtained networks
# path to discretised dataset
data_where = "./data/SPLEX/SPLEX_host_disc.csv"
# declare learner
learner=gum.BNLearner(data_where)
# leran the BN
# learn the BN with useLocalSearchWithTabuList
learner.useLocalSearchWithTabuList()
host_bn_local = learner.learnBN()
# learn the BN with useGreedyHillClimbing()
learner.useGreedyHillClimbing()
host_bn_greedy = learner.learnBN()
# -
gnb.sideBySide(host_bn_local,
host_bn_greedy,
captions=["BN when using useLocalSearchWithTabuList","BN when using useGreedyHillClimbing"])
# example of a CPT table
gnb.sideBySide(host_bn_local.cpt("crevised_QUICKI_meta"),
host_bn_greedy.cpt("crevised_QUICKI_meta"),
captions=["CPT for $crevised\_QUICKI\_meta$ when using useLocalSearchWithTabuList","CPT for $crevised\_QUICKI\_meta$ when using useGreedyHillClimbing"])
# save BNs images as pdf
path_to_save = "./data/SPLEX/BN/"
bnPlot.dotize(host_bn_greedy, path_to_save+"host_bn_greedy", format='pdf')
bnPlot.dotize(host_bn_local, path_to_save+"host_bn_local", format='pdf')
# > Let's remark that we do not get the same Bayesian Network when using the useLocalSearchWithTabuList function and the useGreedyHillClimbing function.
# ## 2 - 1 - Network for Environmental Varaibles :
# SPLEX env dataset
data_path = "./data/SPLEX/"
splex_env = pd.read_table(data_path+"SPLEX_env.txt", sep=" ")
print("Number of examples :", splex_env.shape[0])
print("Dimension of the problem :", splex_env.shape[1])
print("Variables : ", splex_env.columns.values)
print("Sample of the data :")
splex_env.head()
# discretize the dataset, save and the load the discretized dataset :
path_to_save = './data/SPLEX/'
name_to_save = "SPLEX_env_disc"
discretize_dataset(splex_env, path_to_save = path_to_save, name_to_save = name_to_save, num_bins = 5)
discr_splex_env = pd.read_table(path_to_save+name_to_save+".csv", sep=",")
print("Sample of the discretised enviromental data :")
discr_splex_env.head()
# +
# Learn the Bayesian Network for host variables, and save the obtained networks
# path to discretised dataset
data_where = "./data/SPLEX/SPLEX_env_disc.csv"
# declare learner
learner=gum.BNLearner(data_where)
# leran the BN
# learn the BN with useLocalSearchWithTabuList
learner.useLocalSearchWithTabuList()
env_bn_local = learner.learnBN()
# learn the BN with useGreedyHillClimbing()
learner.useGreedyHillClimbing()
env_bn_greedy = learner.learnBN()
# -
gnb.sideBySide(env_bn_local,
env_bn_greedy,
captions=["BN when using useLocalSearchWithTabuList","BN when using useGreedyHillClimbing"])
# example of a CPT table
gnb.sideBySide(env_bn_local.cpt("Mg_alim"),
env_bn_greedy.cpt("Mg_alim"),
captions=["CPT for $Mg\_alim$ when using useLocalSearchWithTabuList","CPT for $Mg\_alim$ when using useGreedyHillClimbing"])
# save BN images as pdf
path_to_save = "./data/SPLEX/BN/"
bnPlot.dotize(env_bn_greedy, path_to_save+"env_bn_greedy", format='pdf')
bnPlot.dotize(env_bn_local, path_to_save+"env_bn_local", format='pdf')
# > Let's remark that we do not get the same Bayesian Network when using the useLocalSearchWithTabuList function and the useGreedyHillClimbing function.
# # 3 - Dynamic Bayesian Networks :
# > In this section we build a dynamic BN which allows to see the evolution of different variables across time steps.
# load the dynamic.txt data set and display some information
data_path = "./data/"
dynamic = pd.read_table(data_path+"dynamic.txt", sep=" ")
print("Number of examples :", dynamic.shape[0])
print("Dimension of the problem :", dynamic.shape[1])
print("Variables : ", dynamic.columns.values)
print("Sample of the data :")
dynamic.head()
# discretise the dataset
path_to_save = './data/'
name_to_save = "dynamic_disc"
discretize_dataset(dynamic, ignore=["Status0", "Status1", "Status2"] , path_to_save = path_to_save, name_to_save = name_to_save, num_bins = 5)
discr_dynamic = pd.read_table(path_to_save+name_to_save+".csv", sep=",")
print("Sample of the discretised dynamic dataset :")
discr_dynamic.head()
# +
# learn the dynamic BN : a variable with a name A is present at t=0 with the name A0 and at time t as At.
# path to discretised dataset
data_where = "./data/dynamic_disc.csv"
# declare learner
learner=gum.BNLearner(data_where)
# leran the BN
# learn the BN with useLocalSearchWithTabuList
learner.useLocalSearchWithTabuList()
dynamic_bn_local = learner.learnBN()
# learn the BN with useGreedyHillClimbing()
learner.useGreedyHillClimbing()
dynamic_bn_greedy = learner.learnBN()
# -
# display the leanrned dynamic BNs
gnb.sideBySide(dynamic_bn_local,
dynamic_bn_greedy,
captions=["BN when using useLocalSearchWithTabuList","BN when using useGreedyHillClimbing"])
# > Note that we do not get the same Bayesian Network when using the useLocalSearchWithTabuList function and the useGreedyHillClimbing function.
# ### **Visualize the Network with Time Slices :**
# visualize the network with time slices
gdyn.showTimeSlices(dynamic_bn_local)
# > We can see the interactions between the different variables across time.
# ### **Inference for Variable Status : remission,non-remission, or partial remission :**
# Inference (Status : remission,non-remission, or partial remission)
for i in range(3):
gnb.showPosterior(dynamic_bn_local,target="Status{}".format(i),evs={})
# > After recording the different variables and building the BN, we infer the variable status (across time steps) and see how it evoloves (from 1, to approximately equal distribution to 2).
| Bayesian Networks/.ipynb_checkpoints/Bayesian_Netwroks_Modeling-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Optimization of an X-Gate for a Transmon Qubit
# + attributes={"classes": [], "id": "", "n": "1"}
# NBVAL_IGNORE_OUTPUT
# %load_ext watermark
import qutip
import numpy as np
import scipy
import matplotlib
import matplotlib.pylab as plt
import krotov
# %watermark -v --iversions
# -
# $\newcommand{tr}[0]{\operatorname{tr}}
# \newcommand{diag}[0]{\operatorname{diag}}
# \newcommand{abs}[0]{\operatorname{abs}}
# \newcommand{pop}[0]{\operatorname{pop}}
# \newcommand{aux}[0]{\text{aux}}
# \newcommand{opt}[0]{\text{opt}}
# \newcommand{tgt}[0]{\text{tgt}}
# \newcommand{init}[0]{\text{init}}
# \newcommand{lab}[0]{\text{lab}}
# \newcommand{rwa}[0]{\text{rwa}}
# \newcommand{bra}[1]{\langle#1\vert}
# \newcommand{ket}[1]{\vert#1\rangle}
# \newcommand{Bra}[1]{\left\langle#1\right\vert}
# \newcommand{Ket}[1]{\left\vert#1\right\rangle}
# \newcommand{Braket}[2]{\left\langle #1\vphantom{#2} \mid #2\vphantom{#1}\right\rangle}
# \newcommand{op}[1]{\hat{#1}}
# \newcommand{Op}[1]{\hat{#1}}
# \newcommand{dd}[0]{\,\text{d}}
# \newcommand{Liouville}[0]{\mathcal{L}}
# \newcommand{DynMap}[0]{\mathcal{E}}
# \newcommand{identity}[0]{\mathbf{1}}
# \newcommand{Norm}[1]{\lVert#1\rVert}
# \newcommand{Abs}[1]{\left\vert#1\right\vert}
# \newcommand{avg}[1]{\langle#1\rangle}
# \newcommand{Avg}[1]{\left\langle#1\right\rangle}
# \newcommand{AbsSq}[1]{\left\vert#1\right\vert^2}
# \newcommand{Re}[0]{\operatorname{Re}}
# \newcommand{Im}[0]{\operatorname{Im}}$
# ## Define the Hamiltonian
# The effective Hamiltonian of a single transmon depends on the capacitive energy $E_C=e^2/2C$ and the Josephson energy $E_J$, an energy due to the Josephson junction working as a nonlinear inductor periodic with the flux $\Phi$. In the so-called transmon limit the ratio between these two energies lie around $E_J / E_C \approx 45$. The time-independent Hamiltonian can be described then as
#
# \begin{equation*}
# \op{H}_{0} = 4 E_C (\hat{n}-n_g)^2 - E_J \cos(\hat{\Phi})
# \end{equation*}
#
# where $\hat{n}$ is the number operator, which count how many Cooper pairs cross the junction, and $n_g$ being the effective offset charge measured in Cooper pair charge units. The aforementioned equation can be written in a truncated charge basis defined by the number operator $\op{n} \ket{n} = n \ket{n}$ such that
#
# \begin{equation*}
# \op{H}_{0} = 4 E_C \sum_{j=-N} ^N (j-n_g)^2 |j \rangle \langle j| - \frac{E_J}{2} \sum_{j=-N} ^{N-1} ( |j+1\rangle\langle j| + |j\rangle\langle j+1|).
# \end{equation*}
#
# If we apply a potential $V(t)$ to the qubit the complete Hamiltonian is changed to
#
# \begin{equation*}
# \op{H} = \op{H}_{0} + V(t) \cdot \op{H}_{1}
# \end{equation*}
#
# The interaction Hamiltonian $\op{H}_1$ is then equivalent to the charge operator $\op{q}$, which in the truncated charge basis can be written as
#
# \begin{equation*}
# \op{H}_1 = \op{q} = \sum_{j=-N} ^N -2n \ket{n} \bra{n}.
# \end{equation*}
#
# Note that the -2 coefficient is just indicating that the charge carriers here are Cooper pairs, each with a charge of $-2e$.
#
# We define the logic states $\ket{0_l}$ and $\ket{1_l}$ (not to be confused with the charge states $\ket{n=0}$ and $\ket{n=1}$) as the eigenstates of the free Hamiltonian $\op{H}_0$ with the lowest energy. The problem to solve is find a potential $V_{opt}(t)$ such that after a given final time $T$ can
#
# + attributes={"classes": [], "id": "", "n": "2"}
def transmon_ham_and_states(Ec=0.386, EjEc=45, nstates=8, ng=0.0, T=10.0, steps=1000):
"""Transmon Hamiltonian"""
# Ec : capacitive energy
# EjEc : ratio Ej / Ec
# nstates : defines the maximum and minimum states for the basis. The truncated basis
# will have a total of 2*nstates + 1 states
Ej = EjEc * Ec
n = np.arange(-nstates, nstates+1)
up = np.diag(np.ones(2*nstates),k=-1)
do = up.T
H0 = qutip.Qobj(np.diag(4*Ec*(n - ng)**2) - Ej*(up+do)/2.0)
H1 = qutip.Qobj(-2*np.diag(n))
eigenvals, eigenvecs = scipy.linalg.eig(H0.full())
ndx = np.argsort(eigenvals.real)
E = eigenvals[ndx].real
V = eigenvecs[:,ndx]
w01 = E[1]-E[0] # Transition energy between states
psi0 = qutip.Qobj(V[:, 0])
psi1 = qutip.Qobj(V[:, 1])
profile = lambda t: np.exp(-40.0*(t/T - 0.5)**2)
eps0 = lambda t, args: 0.5 * profile(t) * np.cos(8*np.pi*w01*t)
return ([H0, [H1, eps0]], psi0, psi1)
# + attributes={"classes": [], "id": "", "n": "3"}
H, psi0, psi1 = transmon_ham_and_states()
# -
# We introduce the projectors $P_i = \ket{\psi _i}\bra{\psi _i}$ for the logic states $\ket{\psi _i} \in \{\ket{0_l}, \ket{1_l}\}$
# + attributes={"classes": [], "id": "", "n": "4"}
proj0 = psi0 * psi0.dag()
proj1 = psi1 * psi1.dag()
# -
# ## Optimization target
# We choose our X-gate to be defined during a time interval starting at $t_{0} = 0$ and ending at $T = 10$, with a total of $nt = 1000$ time steps.
# + attributes={"classes": [], "id": "", "n": "5"}
tlist = np.linspace(0, 10, 1000)
# -
# We make use of the $\sigma _{x}$ operator included in QuTiP to define our objective:
# + attributes={"classes": [], "id": "", "n": "11"}
objectives = krotov.gate_objectives(
basis_states=[psi0, psi1], gate=qutip.operators.sigmax(), H=H)
# -
# We define the desired shape of the pulse and the update factor $\lambda _a$
# + attributes={"classes": [], "id": "", "n": "16"}
def S(t):
"""Shape function for the pulse update"""
dt = tlist[1] - tlist[0]
steps = len(tlist)
return np.exp(-40.0*(t/((steps-1)*dt)-0.5)**2)
pulse_options = {
H[1][1]: krotov.PulseOptions(lambda_a=1, shape=S)
}
# -
# It may be useful to check the fidelity after each iteration. To achieve this, we define a simple function that will be used by the main routine
def print_fidelity(**args):
F_re = np.average(np.array(args['tau_vals']).real)
print("Iteration %d: \tF = %f" % (args['iteration'], F_re))
return F_re
# ## Simulate dynamics of the guess pulse
# + attributes={"classes": [], "id": "", "n": "17"}
def plot_pulse(pulse, tlist):
fig, ax = plt.subplots()
if callable(pulse):
pulse = np.array([pulse(t, None) for t in tlist])
ax.plot(tlist, pulse)
ax.set_xlabel('time')
ax.set_ylabel('pulse amplitude')
plt.show(fig)
# + attributes={"classes": [], "id": "", "n": "18"}
plot_pulse(H[1][1], tlist)
# -
# Once we are sure to have obtained the desired guess pulse, the dynamics for the initial guess can be found easily
# + attributes={"classes": [], "id": "", "n": "19"}
guess_dynamics = [objectives[x].mesolve(tlist, e_ops=[proj0, proj1]) for x in [0,1]]
# using initial state psi0 = objectives[0].initial_state
# + attributes={"classes": [], "id": "", "n": "20"}
def plot_population(result):
fig, ax = plt.subplots()
ax.plot(result.times, result.expect[0], label='0')
ax.plot(result.times, result.expect[1], label='1')
ax.legend()
ax.set_xlabel('time')
ax.set_ylabel('population')
plt.show(fig)
# + attributes={"classes": [], "id": "", "n": "21"}
plot_population(guess_dynamics[0])
plot_population(guess_dynamics[1])
# -
# It is obvioius that our initial guess is not even near the pulse that we are trying to achieve. However we will still use it and try to see what results that we can obtain.
# ## Optimize
# We now use all the information that we have gathered to initialize
# the optimization routine. That is:
#
# * The `objectives`: creating an X-gate in the given basis.
#
# * The `pulse_options`: initial pulses and their shapes restrictions.
#
# * The `tlist`: time grid used for the propagation.
#
# * The `propagator`: propagation method that will be used.
#
# * The `chi_constructor`: the optimization functional to use.
#
# * The `info_hook`: the subroutines to be called and data to be analized inbetween iterations.
#
# * The `iter_stop`: the number of iterations to perform the optimization.
# + attributes={"classes": [], "id": "", "n": "23"}
oct_result = krotov.optimize_pulses(
objectives, pulse_options, tlist,
propagator=krotov.propagators.expm,
chi_constructor=krotov.functionals.chis_re,
info_hook=print_fidelity, iter_stop=20)
# -
# ## Simulate dynamics of the optimized pulse
# We want to see how much the results have improved after the optimization.
# + attributes={"classes": [], "id": "", "n": "27"}
plot_pulse(oct_result.optimized_controls[0], tlist)
# + attributes={"classes": [], "id": "", "n": "28"}
opt_dynamics = [oct_result.optimized_objectives[x].mesolve(
tlist, e_ops=[proj0, proj1]) for x in [0,1]]
# -
opt_states = [oct_result.optimized_objectives[x].mesolve(tlist) for x in [0,1]]
# + attributes={"classes": [], "id": "", "n": "29"}
plot_population(opt_dynamics[0])
# -
plot_population(opt_dynamics[1])
# In this case we do not only care about the expected value for the states, but since we want to implement a gate it is necessary to check whether we are performing a coherent control. We are then interested in the phase difference that we obtain after propagating the states from the logic basis.
def plot_gate(result):
num = len(result[0].states)
overlap_0 = np.vectorize(lambda i: np.angle(result[0].states[i].overlap(psi1)))
overlap_1 = np.vectorize(lambda i: np.angle(result[1].states[i].overlap(psi0)))
rel_phase = (overlap_0(np.arange(num))- overlap_1(np.arange(num)))%(2*np.pi)
fig, ax = plt.subplots()
ax.plot(result[0].times, rel_phase/np.pi)
ax.set_xlabel('time')
ax.set_ylabel('relative phase (π)')
plt.show(fig)
print('Final relative phase = %.2e' % rel_phase[-1])
plot_gate(opt_states)
# We may also propagate the optimization result using the same propagator that was
# used in the optimization (instead of `qutip.mesolve`). The main difference
# between the two propagations is that `mesolve` assumes piecewise constant pulses
# that switch between two points in `tlist`, whereas `propagate` assumes that
# pulses are constant on the intervals of `tlist`, and thus switches *on* the
# points in `tlist`.
# + attributes={"classes": [], "id": "", "n": "30"}
opt_dynamics2 = [oct_result.optimized_objectives[x].propagate(
tlist, e_ops=[proj0, proj1], propagator=krotov.propagators.expm) for x in [0,1]]
# -
# The difference between the two propagations gives an indication of the "time
# discretization error". If this error were unacceptably large, we would need a
# smaller time step.
# + attributes={"classes": [], "id": "", "n": "31"}
"%.2e" % abs(opt_dynamics2[0].expect[1][-1] - opt_dynamics[0].expect[1][-1])
# -
| docs/notebooks/05_example_transmon_xgate.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
import os
data_path = os.path.join(os.path.pardir, 'data/raw/')
train_path = data_path + 'train.csv'
test_path = data_path + 'test.csv'
#train_df = pd.read_csv(train_path, index_col='PassengerId')
#test_df = pd.read_csv(test_path, index_col='PassengerId')
#test_df['Survived'] = -1
#df = pd.concat((train_df, test_df))
processed_data_path = os.path.join(os.path.pardir, 'data/processed/')
p_train_path = processed_data_path + 'train.csv'
p_test_path = processed_data_path + 'test.csv'
train_df = pd.read_csv(p_train_path, index_col='PassengerId')
test_df = pd.read_csv(p_test_path, index_col='PassengerId')
test_df['Survived'] = -1
df = pd.concat((train_df, test_df))
# -
import matplotlib.pyplot as plt
# %matplotlib inline
plt.hist(df.Age)
f, (p1, p2) = plt.subplots(1,2, figsize=(14,3))
p1.hist(df.Fare)
p2.hist(df.Age)
| notebooks/05-Matplotlib.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
d = {'k1': 1, 'k2': 2}
d['k3'] = 3
print(d)
d['k1'] = 100
print(d)
# +
d1 = {'k1': 1, 'k2': 2}
d2 = {'k1': 100, 'k3': 3, 'k4': 4}
d1.update(d2)
print(d1)
# -
d1 = {'k1': 1, 'k2': 2}
d2 = {'k1': 100, 'k3': 3, 'k4': 4}
d3 = {'k5': 5, 'k6': 6}
# +
# d1.update(d2, d3)
# TypeError: update expected at most 1 arguments, got 2
# -
d1.update(**d2, **d3)
print(d1)
d1 = {'k1': 1, 'k2': 2}
d2 = {'k1': 100, 'k3': 3, 'k4': 4}
d3 = {'k5': 5, 'k6': 6}
# +
# d3.update(**d1, **d2)
# TypeError: dict.update() got multiple values for keyword argument 'k1'
# -
d1 = {'k1': 1, 'k2': 2}
d2 = {'k3': 3, 'k4': 4}
print({**d1, **d2})
print(dict(**d1, **d2))
d1 = {'k1': 1, 'k2': 2}
d2 = {'k1': 100, 'k3': 3, 'k4': 4}
print({**d1, **d2})
# +
# print(dict(**d1, **d2))
# TypeError: dict() got multiple values for keyword argument 'k1'
# +
d = {'k1': 1, 'k2': 2}
d.update(k1=100, k3=3, k4=4)
print(d)
# +
d = {'k1': 1, 'k2': 2}
d.update([('k1', 100), ('k3', 3), ('k4', 4)])
print(d)
# +
d = {'k1': 1, 'k2': 2}
keys = ['<KEY>']
values = [100, 3, 4]
d.update(zip(keys, values))
print(d)
# +
d = {'k1': 1, 'k2': 2}
# d.update(k3=3, k3=300)
# SyntaxError: keyword argument repeated: k3
# +
d = {'k1': 1, 'k2': 2}
d.update([('k3', 3), ('k3', 300)])
print(d)
# +
d = {'k1': 1, 'k2': 2}
keys = ['k3', 'k3']
values = [3, 300]
d.update(zip(keys, values))
print(d)
| notebook/dict_add_update.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/erivetna87/DS-Unit-2-Kaggle-Challenge/blob/master/EricR_DS7_Sprint_Challenge_6.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] colab_type="text" id="M3XH_XLsy_Bn"
# _Lambda School Data Science, Unit 2_
#
# # Sprint Challenge: Predict Steph Curry's shots 🏀
#
# For your Sprint Challenge, you'll use a dataset with all Steph Curry's NBA field goal attempts. (Regular season and playoff games, from October 28, 2009, through June 5, 2019.)
#
# You'll predict whether each shot was made, using information about the shot and the game. This is hard to predict! Try to get above 60% accuracy. The dataset was collected with the [nba_api](https://github.com/swar/nba_api) Python library.
# + colab_type="code" id="Nw3CL7TE7tNq" outputId="4d443982-6f5a-496b-e210-c6f7b75fd6ab" colab={"base_uri": "https://localhost:8080/", "height": 1000}
import sys
in_colab = 'google.colab' in sys.modules
if in_colab:
# Install packages in Colab
# !pip install category_encoders==2.0.0
# !pip install pandas-profiling==2.3.0
# !pip install plotly==4.1.1
# + colab_type="code" id="-Nm24pCHy_Bo" colab={}
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
import category_encoders as ce
from sklearn.ensemble import RandomForestClassifier
from sklearn.dummy import DummyClassifier
from sklearn.impute import SimpleImputer
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LogisticRegressionCV
from sklearn.preprocessing import StandardScaler
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestRegressor
# Read data
url = 'https://drive.google.com/uc?export=download&id=1fL7KPyxgGYfQDsuJoBWHIWwCAf-HTFpX'
df = pd.read_csv(url)
# Check data shape
assert df.shape == (13958, 20)
# + [markdown] colab_type="text" id="B8BvDKLFy_Bq"
# To demonstrate mastery on your Sprint Challenge, do all the required, numbered instructions in this notebook.
#
# To earn a score of "3", also do all the stretch goals.
#
# You are permitted and encouraged to do as much data exploration as you want.
#
# **1. Begin with baselines for classification.** Your target to predict is `shot_made_flag`. What is your baseline accuracy, if you guessed the majority class for every prediction?
#
# **2. Hold out your test set.** Use the 2018-19 season to test. NBA seasons begin in October and end in June. You'll know you've split the data correctly when your test set has 1,709 observations.
#
# **3. Engineer new feature.** Engineer at least **1** new feature, from this list, or your own idea.
# - **Homecourt Advantage**: Is the home team (`htm`) the Golden State Warriors (`GSW`) ?
# - **Opponent**: Who is the other team playing the Golden State Warriors?
# - **Seconds remaining in the period**: Combine minutes remaining with seconds remaining, to get the total number of seconds remaining in the period.
# - **Seconds remaining in the game**: Combine period, and seconds remaining in the period, to get the total number of seconds remaining in the game. A basketball game has 4 periods, each 12 minutes long.
# - **Made previous shot**: Was <NAME>'s previous shot successful?
#
# **4. Decide how to validate** your model. Choose one of the following options. Any of these options are good. You are not graded on which you choose.
# - **Train/validate/test split: train on the 2009-10 season through 2016-17 season, validate with the 2017-18 season.** You'll know you've split the data correctly when your train set has 11,081 observations, and your validation set has 1,168 observations.
# - **Train/validate/test split: random 80/20%** train/validate split.
# - **Cross-validation** with independent test set. You may use any scikit-learn cross-validation method.
#
# **5.** Use a scikit-learn **pipeline** to **encode categoricals** and fit a **Decision Tree** or **Random Forest** model.
#
# **6.** Get your model's **validation accuracy.** (Multiple times if you try multiple iterations.)
#
# **7.** Get your model's **test accuracy.** (One time, at the end.)
#
#
# **8.** Given a **confusion matrix** for a hypothetical binary classification model, **calculate accuracy, precision, and recall.**
#
# ### Stretch Goals
# - Engineer 4+ new features total, either from the list above, or your own ideas.
# - Make 2+ visualizations to explore relationships between features and target.
# - Optimize 3+ hyperparameters by trying 10+ "candidates" (possible combinations of hyperparameters). You can use `RandomizedSearchCV` or do it manually.
# - Get and plot your model's feature importances.
#
#
# + [markdown] colab_type="text" id="t6Jt3qjQ-zig"
# ## 1. Begin with baselines for classification.
#
# >Your target to predict is `shot_made_flag`. What would your baseline accuracy be, if you guessed the majority class for every prediction?
# + colab_type="code" id="I0BDeNFG_Kee" colab={}
# 1. Begin with baselines for classification. Your target to predict is shot_made_flag.
#What is your baseline accuracy, if you guessed the majority class for every prediction?
# + id="SYiDZNQKgzFx" colab_type="code" outputId="b8173a96-66ba-4830-e578-ae4fd8b2b95b" colab={"base_uri": "https://localhost:8080/", "height": 117}
df.head(1)
# + id="8PewwC85oW-W" colab_type="code" outputId="c6d9860f-e450-415c-bfd0-f02381a35fcb" colab={"base_uri": "https://localhost:8080/", "height": 317}
df.describe()
# + id="O1Gj_raCiIDL" colab_type="code" colab={}
df['game_date'] = pd.to_datetime(df['game_date'], infer_datetime_format=True)
# + id="nG8LWC0OlCKW" colab_type="code" colab={}
df.drop(columns=['game_id','game_event_id'])
# + id="jG91iFwRrYPp" colab_type="code" colab={}
train, test = df, df
# + id="l-HiEYoHl5DF" colab_type="code" outputId="48074d42-2796-4367-ded3-471582ac2f1b" colab={"base_uri": "https://localhost:8080/", "height": 34}
train, test = train_test_split(df, train_size=0.80, test_size=0.20,
stratify=train['shot_made_flag'])
train.shape, test.shape
# + id="-02R4HCziIKY" colab_type="code" colab={}
# Target is shot_made_flag
target = 'shot_made_flag'
#Dataframe dropping target
train_features = train.drop(columns=[target])
#obtain numeric features
numeric_features = train_features.select_dtypes(include='number').columns.tolist()
#Get Cardinality
cardinality = train_features.select_dtypes(exclude='number').nunique()
categorical_features = cardinality[cardinality <= 15].index.tolist()
# print(categorical_features)
#combine feature lists
features = numeric_features + categorical_features
# + id="Bf6hHPhS6GVf" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 617} outputId="84d40ddb-2a63-4c42-fd17-4b18e6f99c18"
import plotly.express as px
px.scatter(train, x='loc_x', y='loc_y', color='shot_made_flag')
# + id="e_VyJ_IgiIM5" colab_type="code" colab={}
X_train = train[features]
y_train = train[target]
X_test = test[features]
y_test = test[target]
# + id="1PdBub2M6hg2" colab_type="code" colab={}
dummy = DummyClassifier(strategy='stratified', random_state=1)
# + id="MWfSJ_bJiIUr" colab_type="code" outputId="edb78315-20c5-4176-b742-3fb34fdbd6e7" colab={"base_uri": "https://localhost:8080/", "height": 34}
dummy.fit(X_train,y_train)
# + id="S96jqVufiISr" colab_type="code" outputId="f891c9c9-a7a3-45a4-ac21-66d1475f70bc" colab={"base_uri": "https://localhost:8080/", "height": 34}
#evaluating baseline accuracy for model
dummy.score(X_test,y_test)
# + [markdown] colab_type="text" id="Dz2QHBiVy_Br"
# ## 2. Hold out your test set.
#
# >Use the 2018-19 season to test. NBA seasons begin in October and end in June. You'll know you've split the data correctly when your test set has 1,709 observations.
# + colab_type="code" id="OPod6lBG_wTT" outputId="22690c05-be1a-438e-ba06-3d3e862d46e3" colab={"base_uri": "https://localhost:8080/", "height": 34}
#Creating Test Set within Data Constraints
df_2018_2019 = df.loc[(df['game_date'] >= '10-1-2018') & (df['game_date'] <= '6-30-2019')]
df_2018_2019.shape
# + id="R1PfyaLp4TJx" colab_type="code" outputId="70777728-4c23-4f7f-e24f-9fd7aeb52f48" colab={"base_uri": "https://localhost:8080/", "height": 153}
#Engineering Home Advantage for Golden State Warriors
df_2018_2019['htm_adv'] = df_2018_2019['htm'].apply(lambda x: 1 if x == 'GSW' else 0)
# + [markdown] colab_type="text" id="P9Nihzk6y_CF"
# ## 3. Engineer new feature.
#
# >Engineer at least **1** new feature, from this list, or your own idea.
# >
# >- **Homecourt Advantage**: Is the home team (`htm`) the Golden State Warriors (`GSW`) ?
# >- **Opponent**: Who is the other team playing the Golden State Warriors?
# >- **Seconds remaining in the period**: Combine minutes remaining with seconds remaining, to get the total number of seconds remaining in the period.
# >- **Seconds remaining in the game**: Combine period, and seconds remaining in the period, to get the total number of seconds remaining in the game. A basketball game has 4 periods, each 12 minutes long.
# >- **Made previous shot**: Was <NAME>'s previous shot successful?
#
#
# + colab_type="code" id="A0pxdFtWy_Bz" colab={}
#Engineering Home Advantage for Golden State Warriors
df['htm_adv'] = df['htm'].apply(lambda x: 1 if x == 'GSW' else 0)
# + [markdown] colab_type="text" id="eLs7pt7NFJLF"
# ## **4. Decide how to validate** your model.
#
# >Choose one of the following options. Any of these options are good. You are not graded on which you choose.
# >
# >- **Train/validate/test split: train on the 2009-10 season through 2016-17 season, validate with the 2017-18 season.** You'll know you've split the data correctly when your train set has 11,081 observations, and your validation set has 1,168 observations.
# >- **Train/validate/test split: random 80/20%** train/validate split.
# >- **Cross-validation** with independent test set. You may use any scikit-learn cross-validation method.
# + id="Au-MalR2vGFB" colab_type="code" colab={}
train, val = train_test_split(df, train_size=0.80, test_size=0.20,
stratify=df['shot_made_flag'])
# + colab_type="code" id="LJ58CceDISXR" colab={}
# Target is shot_made_flag
target = 'shot_made_flag'
#Dataframe dropping target
train_features = train.drop(columns=[target])
#obtain numeric features
numeric_features = train_features.select_dtypes(include='number').columns.tolist()
#Get Cardinality
cardinality = train_features.select_dtypes(exclude='number').nunique()
categorical_features = cardinality[cardinality <= 15].index.tolist()
# print(categorical_features)
#combine feature lists
features = numeric_features + categorical_features
# + id="LsWmyRV61KEH" colab_type="code" colab={}
X_train = train[features]
y_train = train[target]
X_val = val[features]
y_val = val[target]
X_test = df_2018_2019[features]
y_test = df_2018_2019[target]
# + [markdown] colab_type="text" id="oQ2lWlu7JPRt"
# ## 5. Use a scikit-learn pipeline to encode categoricals and fit a Decision Tree or Random Forest model.
# + colab_type="code" id="X2S8mUuJy_CB" outputId="229b0939-6fda-48c0-f95f-b3b03ede1d7b" colab={"base_uri": "https://localhost:8080/", "height": 408}
pipeline = make_pipeline(
ce.OneHotEncoder(use_cat_names=True),
SimpleImputer(strategy='median'),
RandomForestClassifier(n_estimators=300,n_jobs=-1)
)
pipeline.fit(X_train,y_train)
# + [markdown] colab_type="text" id="8kJXxFpty_CH"
# ## 6.Get your model's validation accuracy
#
# > (Multiple times if you try multiple iterations.)
# + colab_type="code" id="7560JKvxy_CJ" outputId="73f47d6d-15a2-4ac0-f802-bb64381930e6" colab={"base_uri": "https://localhost:8080/", "height": 34}
print('Validation Accuracy', pipeline.score(X_val, y_val))
# + [markdown] colab_type="text" id="YvyYY9tfy_CL"
# ## 7. Get your model's test accuracy
#
# > (One time, at the end.)
# + colab_type="code" id="wjV2dfl6y_CL" outputId="ec1719cd-8704-4ea1-ef4c-7d0a1d9b0af3" colab={"base_uri": "https://localhost:8080/", "height": 34}
# encoder = pipeline.named_steps['onehotencoder']
# encoded = encoder.transform(X_test)
print('Test Accuracy', pipeline.score(X_test, y_test))
# + [markdown] id="xGL5stLvJCn1" colab_type="text"
# ## 8. Given a confusion matrix, calculate accuracy, precision, and recall.
#
# Imagine this is the confusion matrix for a binary classification model. Use the confusion matrix to calculate the model's accuracy, precision, and recall.
#
# <table>
# <tr>
# <td colspan="2" rowspan="2"></td>
# <td colspan="2">Predicted</td>
# </tr>
# <tr>
# <td>Negative</td>
# <td>Positive</td>
# </tr>
# <tr>
# <td rowspan="2">Actual</td>
# <td>Negative</td>
# <td style="border: solid">85</td>
# <td style="border: solid">58</td>
# </tr>
# <tr>
# <td>Positive</td>
# <td style="border: solid">8</td>
# <td style="border: solid"> 36</td>
# </tr>
# </table>
# + [markdown] id="nEvt7NkUJNao" colab_type="text"
# ### Calculate accuracy
# + id="FFszS2A5JJmv" colab_type="code" outputId="6352cf3e-72f7-4124-e954-2f571d4d4949" colab={"base_uri": "https://localhost:8080/", "height": 34}
accuracy = (36+85)/(36+8+58+85)
print(accuracy)
# + [markdown] id="XjHTmk8sJO4v" colab_type="text"
# ### Calculate precision
# + id="7qX1gbcMJQS_" colab_type="code" outputId="3509a297-8071-4af5-9a6b-406cb8d5d2ad" colab={"base_uri": "https://localhost:8080/", "height": 34}
precision = (36)/(36+58)
print(precision)
# + [markdown] id="pFug3ZKaJQ7A" colab_type="text"
# ### Calculate recall
# + id="L0OKc3JxJR4r" colab_type="code" outputId="a054516a-0c0f-4a23-d0a2-be51846b8cb0" colab={"base_uri": "https://localhost:8080/", "height": 34}
recall = (100)/(100+8)
print(recall)
| EricR_DS7_Sprint_Challenge_6.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
import numpy as np
import pandas as pd
import pickle
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
import matplotlib.pyplot as plt
# import here in this order otherwise crashes the kernel,
# something wrong with shapely and pysal,
# shapely needs to be imported before pysal?
from mpl_toolkits.basemap import Basemap
from shapely.geometry import Point, Polygon
import sys
sys.path.append('../')
import scripts.outliers as outliers
import scripts.utils as utils
import scripts.interactive_plot as interactive_plot
# -
# ## Load data
# +
DATA_FILE = '../data/lda_data_8.pickle'
METADATA_FILE = '../data/metadata.csv'
dataset, ddf, w_dict = outliers.load_data(DATA_FILE, METADATA_FILE)
X_list, Y, Yaudio = dataset
X = np.concatenate(X_list, axis=1)
# -
# ## Outliers at the recording level
df_global, threshold, MD = outliers.get_outliers_df(X, Y, chi2thr=0.999)
outliers.print_most_least_outliers_topN(df_global, N=10)
tab_all = interactive_plot.plot_outliers_world_figure(MD, MD>threshold, ddf)
print "n outliers " + str(len(np.where(MD>threshold)[0]))
# ### Outliers for different sets of features
# outliers for features
feat = X_list
feat_labels = ['rhythm', 'melody', 'timbre', 'harmony']
tabs_feat = []
for i in range(len(feat)):
print 'outliers', feat_labels[i]
XX = feat[i]
df_feat, threshold, MD = outliers.get_outliers_df(XX, Y, chi2thr=0.999)
outliers.print_most_least_outliers_topN(df_feat, N=5)
tabs_feat.append(interactive_plot.plot_outliers_world_figure(MD, MD>threshold, ddf))
# Output the interactive plot of music outliers in .html.
interactive_plot.plot_tabs(tab_all, tabs_feat, out_file="../demo/outliers.html")
# ### Outliers wrt spatial neighbourhoods
df_local = outliers.get_local_outliers_df(X, Y, w_dict)
outliers.print_most_least_outliers_topN(df_local, N=10)
# ## Outliers at the country level
# First, cluster recordings in K clusters (select best K based on silhouette score).
centroids, cl_pred = outliers.get_country_clusters(X, bestncl=None, min_ncl=10, max_ncl=30)
ddf['Clusters'] = cl_pred
print len(np.unique(cl_pred))
outliers.print_clusters_metadata(ddf, cl_pred)
# Get histogram of cluster mappings for each country.
cluster_freq = utils.get_cluster_freq_linear(X, Y, centroids)
cluster_freq.head()
| notebooks/results_outliers.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Plotting word frequencies
# !conda install nltk --yes
# + tags=[]
import requests
from nltk import FreqDist
from nltk.corpus import stopwords
import seaborn as sns
# %matplotlib inline
# -
# Download text of '<NAME> Wonderland' ebook from https://www.gutenberg.org/
help(requests)
# + tags=[]
url = "https://www.gutenberg.org/files/11/11-0.txt"
alice = requests.get(url)
print(alice.text)
# -
import nltk
help(nltk)
# Define a function to plot word frequencies
# + tags=[]
def plot_word_frequency(words, top_n=10):
word_freq = FreqDist(words)
labels = [element[0] for element in word_freq.most_common(top_n)]
counts = [element[1] for element in word_freq.most_common(top_n)]
plot = sns.barplot(labels, counts)
return plot
# -
# Plot words frequencies present in the gutenberg corpus
ss = FreqDist(alice.text.split())
ss.most_common(10)[0][0]
ss.most_common(10)
ss
alice_words = alice.text.split()
plot_word_frequency(alice_words, 15)
# ## Stopwords
# Import stopwords from nltk
# + tags=[]
from nltk.corpus import stopwords
nltk.download('stopwords')
# -
# Look at the list of stopwords
help(stopwords.words)
print(stopwords.words('english'))
# Let's remove stopwords from the following piece of text.
# + tags=[]
sample_text = "the great aim of education is not knowledge but action"
# -
# Break text into words
sample_words = sample_text.split()
print(sample_words)
# Remove stopwords
sample_words = [word for word in sample_words if word not in stopwords.words('english')]
print(sample_words)
# Join words back to sentence
"sadSSDD".lower()
sample_text = " ".join(sample_words)
print(sample_text)
# ## Removing stopwords in the genesis corpus
# + tags=[]
no_stops = [word for word in alice_words if word.lower() not in stopwords.words("english")]
# -
plot_word_frequency(no_stops, 10)
# Some other things that can be done
# * Need to change tokens to lower case
# * Need to get rid of punctuations
#
# All the preprocessing steps will be covered while creating the classifier
| 9. NLP/1. Lexical Processing/2. Basic Lexical Processing/1. stopwords.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# ### Loading common data formats
# [**<NAME>**](mailto:<EMAIL>)
# University of North Carolina, Chapel Hill
# + [markdown] slideshow={"slide_type": "skip"}
# It is possible to turn many sorts of data into a Pandas dataframe for subsequent anaysis.
#
# The most basic method is reading comma-delimited text files, or csv files. This is accomplished with the `read_csv`.
# + slideshow={"slide_type": "slide"}
import pandas as pd
# + slideshow={"slide_type": "fragment"}
df = pd.read_csv("data/titanic.csv")
# + [markdown] slideshow={"slide_type": "skip"}
# After a loading any new data, it is best practice to look at the table and basic descriptive statistics. This both ensures that the data loaded correctly and gives you an overview of the data.
#
# First, take a look at the top ten rows.
# + slideshow={"slide_type": "slide"}
df.head(10)
# + [markdown] slideshow={"slide_type": "skip"}
# Note that pandas has created an index which is shown furtherest to the left. By default this is sequential integer counting up from zero. Alternatively, if you have a column or column that can be used to identify cases, you can assign them to be the index.
# + slideshow={"slide_type": "slide"}
df_indexed = pd.read_csv("data/titanic.csv", index_col="PassengerId")
df_indexed.head(10)
# + [markdown] slideshow={"slide_type": "skip"}
# Meaninful indexes are particularly useful when you want to merge dataframes.
# + [markdown] slideshow={"slide_type": "skip"}
# You can look at the bottom few rows to make sure the data format is relatively consistent.
# + slideshow={"slide_type": "slide"}
df.tail(5)
# + [markdown] slideshow={"slide_type": "skip"}
# `info` details information about the dataframe such as number of rows and columns, along with information about each column, including the data type and the number of non-missing values.
# + slideshow={"slide_type": "slide"}
df.info()
# + [markdown] slideshow={"slide_type": "skip"}
# `describe` shows the distribution for numerical columns.
# + slideshow={"slide_type": "slide"}
df.describe()
# -
# Pandas assumes that the file is comma delimited. If that is not the case, the `sep` parameter can be set to whatever the delimiter is. Most comman options would be `\t` for tab and `|` for pipe.
# + slideshow={"slide_type": "slide"}
df_tab = pd.read_csv("data/titanic_tab.csv", sep="\t", index_col="PassengerId")
df_tab.head(5)
# + [markdown] slideshow={"slide_type": "skip"}
# Excel `xls` files can also be read in directly. You need to specify which sheet.
# + slideshow={"slide_type": "slide"}
df_xls = pd.read_excel("data/titanic.xls", sheet_name="Sheet1")
# + slideshow={"slide_type": "fragment"}
df_xls.head()
# + [markdown] slideshow={"slide_type": "skip"}
# There are similar methods for reading SAS XPORT or SAS7BDAT files with `pd.read_SAS()` and Stata .dta files with `pd.read_dta()`. Unfortunately, `read_dta` does not work with the most current version of Stata files, which is also true of older versions of Stata.
# + [markdown] slideshow={"slide_type": "slide"}
# ### Beyond the Basics
#
# JSON is increasingly common file format for social scientists to encounter. It is a human-readable text format where each record includes both the variable names, such as `Sex` or `Fare` along with values, like "male" or 7.25. When viewed in a text editor, the contents are often rendered in a fairly straightforward way.
# + [markdown] slideshow={"slide_type": "slide"}
# <img src="images/titanic_json.png" width="800">
# + [markdown] slideshow={"slide_type": "notes"}
# Importing a JSON file as a Pandas dataframe is similar to the other import processes.
# + slideshow={"slide_type": "slide"}
df_json = pd.read_json('data/titanic.json')
# + slideshow={"slide_type": "fragment"}
df_json.head()
# -
# If your data comes as an XML
| Notebooks/Uncategorized/Loading.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
import _pickle as pkl
# train_df=pd.read_csv('./complaints_train_data_clean.csv',usecols=["category_name","complaint_title","complaint_description",],na_filter=False)
from gensim.parsing.preprocessing import STOPWORDS
def remove_stopwords(tokens):
# input and outputs a list of words
return [word for word in tokens if word not in STOPWORDS]
train_df=pd.read_csv('./complaints_train_validation_data_clean.csv',usecols=["index","category_name","complaint_title","complaint_description",],na_filter=False)
whitelist = set('abcdefghijklmnopqrstuvwxyz ABCDEFGHIJKLMNOPQRSTUVWXYZ')
sentences = (train_df['complaint_title']+" "+train_df['complaint_description']).tolist()
sentences = [''.join(filter(whitelist.__contains__, x)).lower() for x in sentences]
sentences = [x.split() for x in sentences]
sentences = list(map(remove_stopwords, sentences))
train_df.insert(1,"complaint_text",sentences)
val_df=pd.read_csv('./complaints_validation_data_clean.csv',usecols=["category_name","complaint_title","complaint_description",],na_filter=False)
test_df=pd.read_csv('./complaints_test_data_clean.csv',usecols=["category_name","complaint_title","complaint_description",],na_filter=False)
## gold labels
gold_df=pd.read_csv('./clean-gold-labels.tsv',sep='\t',usecols=["category_name","complaint_description",],na_filter=False)
gold_df['complaint_title'] = gold_df['complaint_description']
sentences = (gold_df['complaint_description']).tolist()
sentences = [''.join(filter(whitelist.__contains__, x)).lower() for x in sentences]
sentences = [x.split() for x in sentences]
sentences = list(map(remove_stopwords, sentences))
gold_df.insert(1,"complaint_text",sentences)
######
sentences = (test_df['complaint_title']+" "+test_df['complaint_description']).tolist()
sentences = [''.join(filter(whitelist.__contains__, x)).lower() for x in sentences]
sentences = [x.split() for x in sentences]
sentences = list(map(remove_stopwords, sentences))
test_df.insert(1,"complaint_text",sentences)
gold_df.head()
# +
colsize = len(train_df['category_name'])
train_df['category_name'] = train_df["category_name"].astype('category')
#train_df['true_label'] = pd.Series(np.zeros(colsize), index=train_df.index)
# train_df['predicted_label'] = pd.Series(np.zeros(colsize), index=train_df.index)
train_df['true_label'] = train_df['category_name'].cat.codes
# for i in range(colsize):
# if(train_df['true_label'][i]==5):
# print(train_df['true_label'][i],train_df['category_name'][i])
# -
for i,x in enumerate(train_df.groupby("category_name").agg({"complaint_title": np.count_nonzero}).index._data):
print(i,x)
# +
class_names = train_df.groupby("category_name").agg({"complaint_title": np.count_nonzero}).index.tolist()
pkl.dump(class_names,open("class_names.p","wb"))
noOfClasses = len(train_df.groupby("category_name").agg({"complaint_title": np.count_nonzero}).index)
print(class_names)
# -
keywords = {'Air Pollution':['dust|smoke|burn'],
'Autorickshaws and Taxis':['taxi','auto','autorickshaw'],
'BMTC - Driver or Conductor':['rude','behaviour'],
'BMTC - Need new Bus Route':['frequency'],
'BMTC - Others':[],
'Bad Roads':['road'],
'Broken Storm Water Drains':['overflow','drainage'],
'Cattle':['cows','buffaloes','goats','cow'],
'Clearing of Blockage of Under Ground Drainage Pipelines and Replacement of Damaged or Missing Manhole Cover':[],
'Desilting - Lakes':['lake'],
'Diseases':['malaria','dengue','cholera','fever','disease','hospital','epidemic'],
'Electricity':['power','current','power cut'],
'Flooding of Roads and Footpaths':['water','flood','floods'],
'Footpaths':['footpath'],
'Garbage':['waste','plastic','dirt'],
'Government Land Encroachment':['occupy','illegal'],
'Hawkers and Vendors':[],
'Hoardings':['advertise'],
'Illegal posters and Hoardings':['banner', 'ads ','advertise'],
'Lakes - Others':['lake'],
'Maintenance of Roads and Footpaths - Others':[],
'Manholes':['manhole','man hole'],
'Mosquitos':['mosquito','mosquitoe','mosquitoes','dengue','malaria'],
'Need New Streetlights':['streetlight','light','new streetlight'],
'Need New Toilets ':['toilet','urinal','urinate'],
'New Bus Shelters':['shelter'],
'No Sewage Drains':['drainage'],
'Noise Pollution':['siren','speakers','speakers','loud'],
'Others':[],
'Overflow of Storm Water Drains':['pipes'],
'Parking Violations':['parked','parker'],
'Parks and playgrounds':['park','play','playground'],
'Potholes':['holes','pothole'],
'Public Nuisance':[],
'Repair of streetlights':['streetlight','light','broken','damaged'],
'Sewage and Storm Water Drains - Others':['drainage'],
'Stray Dogs':['dog'],
'Traffic':['vehicles'],
'Trees, Parks and Playgrounds - Others':['tree'],
'Unauthorized Construction':['encroach','building','built'],
'Water Leakage':[],
'Water Supply ':[]}
regexPatterns = {'Air Pollution':['air.*pollution|pollution|dust'],
'Autorickshaws and Taxis':['autorickshaws|taxis|taxi|auto|autorickshaw'],
'BMTC - Driver or Conductor':['bmtc.*driver|bmtc.*conductor|bus.*driver|bus.*conductor'],
'BMTC - Need new Bus Route':['bus.*route'],
'BMTC - Others':['bmtc'],
'Bad Roads':['bad.*road|road.*bad'],
'Broken Storm Water Drains':['(broken|damage).*(drain)'],
'Cattle':['(cattle|cows|buffaloes|goats)'],
'Clearing of Blockage of Under Ground Drainage Pipelines and Replacement of Damaged or Missing Manhole Cover':['clearing|blockage|under|ground|drainage|pipelines|replacement|damaged|missing|manhole|cover'],
'Desilting - Lakes':['lake'],
'Diseases':['diseases|malaria|dengue|cholera'],
'Electricity':['electricity|power|current|power.*cut'],
'Flooding of Roads and Footpaths':['((water|flood|flow).*(roads|footpaths))|((roads|footpaths).*(water|flood|flow))'],
'Footpaths':['footpath'],
'Garbage':['garbage|waste|plastic|dirt'],
'Government Land Encroachment':['(government.*land).*(encroach|occupy|illegal)'],
'Hawkers and Vendors':['(hawkers|vendors)'],
'Hoardings':['(hoardings|advertisements)'],
'Illegal posters and Hoardings':['posters|hoardings|banner|ads|advertise'],
'Lakes - Others':['lake'],
'Maintenance of Roads and Footpaths - Others':['(maintenance).*(roads|footpaths)'],
'Manholes':['(manholes|manhole|man hole)'],
'Mosquitos':['mosquito|mosquitoe|mosquitoes|dengue|malaria'],
'Need New Streetlights':['(need|no|new).*(streetlight|light)'],
'Need New Toilets ':['toilets|toilet|urinal|urinate'],
'New Bus Shelters':['bus.*shelter|shelter.*bus'],
'No Sewage Drains':['drain'],
'Noise Pollution':['noise|noise.*pollution|siren|speakers|speakers|loud'],
'Others':['others'],
'Overflow of Storm Water Drains':['overflow.*(drains|pipes)'],
'Parking Violations':['parking|parked|parker'],
'Parks and playgrounds':['(parks|playgrounds|park|play|playground)'],
'Potholes':['(pot hole|holes|pothole)'],
'Public Nuisance':['(public.*nuisance|nuisance)'],
'Repair of streetlights':['((light).*(repair|broke|damage))|((repair|broke|damage).*(light))'],
'Sewage and Storm Water Drains - Others':['(sewage|storm|water|drains|drainage)'],
'Stray Dogs':['(stray|dogs|dog)'],
'Traffic':['(traffic|vehicles)'],
'Trees, Parks and Playgrounds - Others':['(trees|parks|playgrounds|tree)'],
'Unauthorized Construction':['encroach','building','built'],
'Water Leakage':['water.*leak|leak.*water'],
'Water Supply ':['water.*supply|supply.*water']}
# +
extracts_df=pd.read_csv('./p.tsv',sep='\t',usecols=["category_name","Entity","complaint words",],na_filter=False)
extracts_df = extracts_df[extracts_df['category_name'].isin(class_names)]
# extracts_df
def combine(x):
x = x.tolist()
x = set(x)
x = '|'.join(list(x)).lower()
return x
extracts_df = extracts_df.groupby("category_name").agg({"Entity": combine,"complaint words":combine })
extracts_df.to_csv("extracts.csv")
extracts_df
# -
import re
class_words = [ re.sub('-','',x).lower().split() + keywords[x] for x in class_names ]
print(class_words,len(class_words))
# +
########### discrete LFs ####
import os
import re
stopwords_pattern = ' of| and| no| others| or| -|,|no '
def ltp(x):
return '(' + '|'.join(x) + ')'
def create_LF_Based_On_Category_Name(debug=False):
if os.path.exists("d_Category_Name_LFs.py"):
os.remove("d_Category_Name_LFs.py")
f = open("d_Category_Name_LFs.py","a+")
for i in range(len(class_names)):
functionName = re.sub(r'( )+|-|,','',class_names[i])
pattern = re.sub(stopwords_pattern , '', class_names[i].lower().strip())
pattern= re.sub("( )+",",",pattern)
pattern= re.sub(" $","",pattern)
words = pattern.split(',')
wordsStr = '['+','.join(['"'+x+'"' for x in words])+']'
pattern = ltp(words)
if(debug):
print(pattern)
f.write("\n")
f.write(r'''def LF_Category_Name_'''+functionName+'''(c):
words = '''+wordsStr+'''
if(len(set(c['complaint_text']).intersection(words))>0):
return '''+str(i+1)+'''
return 0''')
f.write("\n")
f.close()
def create_LF_Based_On_Keywords(debug=False):
if os.path.exists("d_KeyWord_Based_LFs.py"):
os.remove("d_KeyWord_Based_LFs.py")
f = open("d_KeyWord_Based_LFs.py","a+")
for i in range(len(class_names)):
functionName = re.sub(r'( )+|-|,','',class_names[i])
pattern = re.sub(stopwords_pattern , '', class_names[i].lower().strip())
pattern= re.sub("( )+",",",pattern)
words = pattern.split(',')
##### add keywords #####
words = words+ keywords[class_names[i]]
####
wordsStr = '['+','.join(['"'+x+'"' for x in words])+']'
pattern = ltp(words)
if(debug):
print(pattern)
f.write("\n")
f.write(r'''def LF_KeyWord_'''+functionName+'''(c):
words = '''+wordsStr+'''
if(len(set(c['complaint_text']).intersection(words))>0):
return '''+str(i+1)+'''
return 0''')
f.write("\n")
f.close()
def create_LF_Extracts_Phrases_Regex(debug=False):
if os.path.exists("d_Regex_Based_Extracts_Phrases_LFs.py"):
os.remove("d_Regex_Based_Extracts_Phrases_LFs.py")
f = open("d_Regex_Based_Extracts_Phrases_LFs.py","a+")
for i in range(len(class_names)):
if(class_names[i] in extracts_df.index.tolist()):
functionName = re.sub(r'( )+|-|,','',class_names[i])
pattern = re.sub(stopwords_pattern , '', class_names[i].lower().strip())
pattern= re.sub("( )+",",",pattern)
words = pattern.split(',')
##### add keywords #####
words = words+ keywords[class_names[i]]
####
wordsStr = '['+','.join(['"'+x+'"' for x in words])+']'
if(debug):
print(pattern)
f.write("\n")
f.write(r'''def LF_Extract_Phrase_Regex_'''+functionName+'''(c):
pattern = \''''+extracts_df.loc[class_names[i]]['complaint words']+'''\'
if(re.search(pattern,c['complaint_description'],flags=re.I)):
return '''+str(i+1)+'''
return 0''')
f.write("\n")
f.close()
def create_LF_Description_Regex(debug=False):
if os.path.exists("d_Regex_Based_Description_LFs.py"):
os.remove("d_Regex_Based_Description_LFs.py")
f = open("d_Regex_Based_Description_LFs.py","a+")
for i in range(len(class_names)):
functionName = re.sub(r'( )+|-|,','',class_names[i])
pattern = re.sub(stopwords_pattern , '', class_names[i].lower().strip())
pattern= re.sub("( )+",",",pattern)
words = pattern.split(',')
##### add keywords #####
words = words+ keywords[class_names[i]]
####
wordsStr = '['+','.join(['"'+x+'"' for x in words])+']'
if(debug):
print(pattern)
f.write("\n")
f.write(r'''def LF_Desc_Regex_'''+functionName+'''(c):
words = '''+wordsStr+'''
pattern = \''''+''.join(regexPatterns[class_names[i]])+'''\'
if(re.search(pattern,c['complaint_description'],flags=re.I)):
return '''+str(i+1)+'''
return 0''')
f.write("\n")
f.close()
def create_LF_Title_Regex(debug=False):
if os.path.exists("d_Regex_Based_Title_LFs.py"):
os.remove("d_Regex_Based_Title_LFs.py")
f = open("d_Regex_Based_Title_LFs.py","a+")
for i in range(len(class_names)):
functionName = re.sub(r'( )+|-|,','',class_names[i])
pattern = re.sub(stopwords_pattern , '', class_names[i].lower().strip())
pattern= re.sub("( )+",",",pattern)
words = pattern.split(',')
##### add keywords #####
words = words+ keywords[class_names[i]]
####
wordsStr = '['+','.join(['"'+x+'"' for x in words])+']'
pattern = ltp(words)
if(debug):
print(pattern)
f.write("\n")
f.write(r'''def LF_Title_Regex_'''+functionName+'''(c):
words = '''+wordsStr+'''
pattern = \''''+ ''.join(regexPatterns[class_names[i]]) +'''\'
if(re.search(pattern,c['complaint_title'],flags=re.I)):
return '''+str(i+1)+'''
return 0''')
f.write("\n")
f.close()
# +
LF_Names = []
LF_output_map = dict()
create_LF_Title_Regex()
create_LF_Description_Regex()
# create_LF_Based_On_Keywords()
# create_LF_Extracts_Phrases_Regex()
# create_LF_Based_On_Category_Name()
# create_LF_Based_On_Embeddings()
# create_LF_Based_On_TFIDF()
# create_LF_Based_On_Embeddings_Title()
# create_LF_Based_On_Embeddings_Description()
# for i in range(len(class_names)):
# functionName = re.sub(r'( )+|-|,','',class_names[i])
# LF_Names.append('LF_Category_Name_'+functionName)
# LF_output_map['LF_Category_Name_'+functionName]=i
# for i in range(len(class_names)):
# functionName = re.sub(r'( )+|-|,','',class_names[i])
# LF_Names.append('LF_Embedding_'+functionName)
# LF_output_map['LF_Embedding_'+functionName]=i
# for i in range(len(class_names)):
# functionName = re.sub(r'( )+|-|,','',class_names[i])
# LF_Names.append('LF_TFIDF_'+functionName)
# LF_output_map['LF_TFIDF_'+functionName]=i
LF_l=[]
# for i in range(len(class_names)):
# functionName = re.sub(r'( )+|-|,','',class_names[i])
# LF_Names.append('LF_KeyWord_'+functionName)
# LF_output_map['LF_KeyWord_'+functionName]=i
# LF_l.append(i)
for i in range(len(class_names)):
functionName = re.sub(r'( )+|-|,','',class_names[i])
LF_Names.append('LF_Title_Regex_'+functionName)
LF_output_map['LF_Title_Regex_'+functionName]=i
LF_l.append(i)
for i in range(len(class_names)):
functionName = re.sub(r'( )+|-|,','',class_names[i])
LF_Names.append('LF_Desc_Regex_'+functionName)
LF_output_map['LF_Desc_Regex_'+functionName]=i
LF_l.append(i)
# for i in range(len(class_names)):
# if(class_names[i] in extracts_df.index.tolist()):
# functionName = re.sub(r'( )+|-|,','',class_names[i])
# LF_Names.append('LF_Extract_Phrase_Regex_'+functionName)
# LF_output_map['LF_Extract_Phrase_Regex_'+functionName]=i
# LF_l.append(i)
# for i in range(len(class_names)):
# functionName = re.sub(r'( )+|-|,','',class_names[i])
# LF_Names.append('LF_Title_Embedding_'+functionName)
# LF_output_map['LF_Title_Embedding_'+functionName]=i
# for i in range(len(class_names)):
# functionName = re.sub(r'( )+|-|,','',class_names[i])
# LF_Names.append('LF_Description_Embedding_'+functionName)
# LF_output_map['LF_Description_Embedding_'+functionName]=i
print('['+','.join(LF_Names)+']')
# -
# %load d_KeyWord_Based_LFs.py
# %load d_Regex_Based_Title_LFs.py
# %load d_Regex_Based_Description_LFs.py
# %load d_Regex_Based_Extracts_Phrases_LFs.py
LFs = [LF_Title_Regex_AirPollution,LF_Title_Regex_AutorickshawsandTaxis,LF_Title_Regex_BMTCDriverorConductor,LF_Title_Regex_BMTCNeednewBusRoute,LF_Title_Regex_BMTCOthers,LF_Title_Regex_BadRoads,LF_Title_Regex_BrokenStormWaterDrains,LF_Title_Regex_Cattle,LF_Title_Regex_ClearingofBlockageofUnderGroundDrainagePipelinesandReplacementofDamagedorMissingManholeCover,LF_Title_Regex_DesiltingLakes,LF_Title_Regex_Diseases,LF_Title_Regex_Electricity,LF_Title_Regex_FloodingofRoadsandFootpaths,LF_Title_Regex_Footpaths,LF_Title_Regex_Garbage,LF_Title_Regex_GovernmentLandEncroachment,LF_Title_Regex_HawkersandVendors,LF_Title_Regex_Hoardings,LF_Title_Regex_IllegalpostersandHoardings,LF_Title_Regex_LakesOthers,LF_Title_Regex_MaintenanceofRoadsandFootpathsOthers,LF_Title_Regex_Manholes,LF_Title_Regex_Mosquitos,LF_Title_Regex_NeedNewStreetlights,LF_Title_Regex_NeedNewToilets,LF_Title_Regex_NewBusShelters,LF_Title_Regex_NoSewageDrains,LF_Title_Regex_NoisePollution,LF_Title_Regex_Others,LF_Title_Regex_OverflowofStormWaterDrains,LF_Title_Regex_ParkingViolations,LF_Title_Regex_Parksandplaygrounds,LF_Title_Regex_Potholes,LF_Title_Regex_PublicNuisance,LF_Title_Regex_Repairofstreetlights,LF_Title_Regex_SewageandStormWaterDrainsOthers,LF_Title_Regex_StrayDogs,LF_Title_Regex_Traffic,LF_Title_Regex_TreesParksandPlaygroundsOthers,LF_Title_Regex_UnauthorizedConstruction,LF_Title_Regex_WaterLeakage,LF_Title_Regex_WaterSupply,LF_Desc_Regex_AirPollution,LF_Desc_Regex_AutorickshawsandTaxis,LF_Desc_Regex_BMTCDriverorConductor,LF_Desc_Regex_BMTCNeednewBusRoute,LF_Desc_Regex_BMTCOthers,LF_Desc_Regex_BadRoads,LF_Desc_Regex_BrokenStormWaterDrains,LF_Desc_Regex_Cattle,LF_Desc_Regex_ClearingofBlockageofUnderGroundDrainagePipelinesandReplacementofDamagedorMissingManholeCover,LF_Desc_Regex_DesiltingLakes,LF_Desc_Regex_Diseases,LF_Desc_Regex_Electricity,LF_Desc_Regex_FloodingofRoadsandFootpaths,LF_Desc_Regex_Footpaths,LF_Desc_Regex_Garbage,LF_Desc_Regex_GovernmentLandEncroachment,LF_Desc_Regex_HawkersandVendors,LF_Desc_Regex_Hoardings,LF_Desc_Regex_IllegalpostersandHoardings,LF_Desc_Regex_LakesOthers,LF_Desc_Regex_MaintenanceofRoadsandFootpathsOthers,LF_Desc_Regex_Manholes,LF_Desc_Regex_Mosquitos,LF_Desc_Regex_NeedNewStreetlights,LF_Desc_Regex_NeedNewToilets,LF_Desc_Regex_NewBusShelters,LF_Desc_Regex_NoSewageDrains,LF_Desc_Regex_NoisePollution,LF_Desc_Regex_Others,LF_Desc_Regex_OverflowofStormWaterDrains,LF_Desc_Regex_ParkingViolations,LF_Desc_Regex_Parksandplaygrounds,LF_Desc_Regex_Potholes,LF_Desc_Regex_PublicNuisance,LF_Desc_Regex_Repairofstreetlights,LF_Desc_Regex_SewageandStormWaterDrainsOthers,LF_Desc_Regex_StrayDogs,LF_Desc_Regex_Traffic,LF_Desc_Regex_TreesParksandPlaygroundsOthers,LF_Desc_Regex_UnauthorizedConstruction,LF_Desc_Regex_WaterLeakage,LF_Desc_Regex_WaterSupply]
pkl.dump(LF_Names,open("LF_Names.p","wb"))
pkl.dump(LF_output_map,open("LF_output_map.p","wb"))
pkl.dump(LF_l,open("LF_l.p","wb"))
print(len(LF_Names))
print(len(LF_output_map))
print(len(LF_l))
# +
# returns: NoOf samples by NoOf LFs list of lists
def get_L_S_Tensor(df,msg):
L = []
print('labelling ',msg,' data')
for i in range(len(df.index)):
Li=[]
for LF in LFs:
# print(i,LF.__name__)
l = LF(df.iloc[i])
Li.append(l)
# S.append((s+1)/2) #to scale scores in [0,1]
L.append(Li)
if(i%500==0 and i!=0):
print(str(i)+'data points labelled in',(time.time() - start_time)/60,'mins')
return L
import time
import datetime
start_time = time.time()
lt = time.localtime()
print("started at: {}-{}-{}, {}:{}:{}".format(lt.tm_mday,lt.tm_mon,lt.tm_year,lt.tm_hour,lt.tm_min,lt.tm_sec))
test_L_S = get_L_S_Tensor(test_df,'discrete test')
pkl.dump(test_L_S,open("test_L_S_regex84.p","wb"))
train_L_S = get_L_S_Tensor(train_df,'discrete train')
pkl.dump(train_L_S,open("train_L_S_regex84.p","wb"))
# gold_L_S = get_L_S_Tensor(gold_df,'discrete gold')
# print(np.array(gold_L_S).shape)
# pkl.dump(gold_L_S,open("gold_discrete.p","wb"))
# print()
print(str(datetime.timedelta(seconds=time.time() - start_time)))
# +
import scipy.sparse as sp
import _pickle as pkl
# L_train = pkl.load(open("train_L_S_discrete.p","rb"))
# L_train = sp.csr_matrix(L_train)
# L_gold = pkl.load(open("gold_discrete.p","rb"))
# print(np.array(L_gold).shape)
# L_gold = sp.csr_matrix(L_gold)
L_train = pkl.load(open("train_L_S_regex84.p","rb"))
print(np.array(L_train).shape)
L_train = sp.csr_matrix(L_train)
L_gold = pkl.load(open("test_L_S_regex84.p","rb"))
print(np.array(L_gold).shape)
L_gold = sp.csr_matrix(L_gold)
# +
import os
import numpy as np
from snorkel import SnorkelSession
session = SnorkelSession()
# -
from snorkel.learning import GenerativeModel
from sklearn.externals import joblib
gen_model = GenerativeModel()
# +
#training with 84 regex LFs
# Note: We pass cardinality explicitly here to be safe
# Can usually be inferred, except we have no labels with value=3
start_time = time.time()
lt = time.localtime()
print("started at: {}-{}-{}, {}:{}:{}".format(lt.tm_mday,lt.tm_mon,lt.tm_year,lt.tm_hour,lt.tm_min,lt.tm_sec))
gen_model.train(L_train, cardinality=42)
# gen_model.train(L_train, epochs=100, decay=0.95, step_size=0.1 / L_train.shape[0], reg_param=1e-6)
joblib.dump(clf, 'snorkel_gen_model.pkl')
print(str(datetime.timedelta(seconds=time.time() - start_time)))
# +
# rerun
# Note: We pass cardinality explicitly here to be safe
# Can usually be inferred, except we have no labels with value=3
start_time = time.time()
lt = time.localtime()
print("started at: {}-{}-{}, {}:{}:{}".format(lt.tm_mday,lt.tm_mon,lt.tm_year,lt.tm_hour,lt.tm_min,lt.tm_sec))
gen_model.train(L_train, cardinality=42)
# gen_model.train(L_train, epochs=100, decay=0.95, step_size=0.1 / L_train.shape[0], reg_param=1e-6)
# joblib.dump(gen_model, 'snorkel_gen_model.pkl')
print(str(datetime.timedelta(seconds=time.time() - start_time)))
# -
train_marginals = gen_model.marginals(L_train)
train_marginals = np.array(train_marginals)
train_GenLabels = np.argmax(train_marginals,axis=1)
print(train_GenLabels.shape)
train_GenLabels = [class_names[x] for x in train_GenLabels]
train_df["predicted_label"]=train_GenLabels
train_df.to_csv("ICMCwithSnorkelPredictedLabels.csv")
# +
gold_marginals = gen_model.marginals(L_gold)
gold_marginals = np.array(gold_marginals)
gold_GenLabels = np.argmax(gold_marginals,axis=1)
gold_GenLabels = [class_names[x] for x in gold_GenLabels]
gold_df_out=pd.read_csv('./clean-gold-labels.tsv',sep='\t',na_filter=False)
gold_df_out["predicted_label"]=gold_GenLabels
gold_df_out.to_csv("500ExampleswithSnorkelPredictedLabels.csv")
# -
L_test = pkl.load(open("test_L_S_discrete.p","rb"))
L_test = sp.csr_matrix(L_test)
# +
import numpy as np
dev_marginals = gen_model.marginals(L_test)
dev_marginals = np.array(dev_marginals)
print(dev_marginals.shape)
GenLabels = np.argmax(dev_marginals,axis=1)
print(GenLabels.shape)
# +
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from collections import defaultdict
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_recall_fscore_support
def report2dict(cr):
# Parse rows
tmp = list()
for row in cr.split("\n"):
parsed_row = [x for x in row.split(" ") if len(x) > 0]
if len(parsed_row) > 0:
tmp.append(parsed_row)
# Store in dictionary
measures = tmp[0]
D_class_data = defaultdict(dict)
for row in tmp[1:]:
class_label = row[0]
for j, m in enumerate(measures):
D_class_data[class_label][m.strip()] = float(row[j + 1].strip())
return pd.DataFrame(D_class_data).T
def predictAndPrint(true_labels,pl):
print("acc",accuracy_score(true_labels,pl))
# print(precision_recall_fscore_support(true_labels,pl,average='macro'))
# draw2DArray(confusion_matrix(true_labels,pl))
return report2dict(classification_report(true_labels, pl, target_names=class_names))
# +
#load true test labels
import pandas as pd
test_df=pd.read_csv('./complaints_test_data_clean.csv',usecols=["category_name","complaint_title","complaint_description",],na_filter=False)
colsize = len(test_df['category_name'])
test_df['category_name'] = test_df["category_name"].astype('category')
test_df['true_label'] = test_df['category_name'].cat.codes
true_labels = test_df['true_label'].tolist()
test_df
# -
#snorkel
Results = predictAndPrint(true_labels,GenLabels)
Results
#majority
import math
L_test = pkl.load(open("test_L_S_discrete.p","rb"))
L_test = pd.DataFrame(L_test)
L_test = L_test.replace(0, np.NaN)
predicted_labels=L_test.mode(axis=1)[0].tolist()
# print(predicted_labels)
predicted_labels = [ int(x)-1 if not math.isnan(x) else -1 for x in predicted_labels ]
MajLabels = predicted_labels
Results = predictAndPrint(true_labels,predicted_labels)
Results
| icmc/.ipynb_checkpoints/Create_Discrete_LFs-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="8T_6UJ3LJVp3" executionInfo={"status": "ok", "timestamp": 1620618274956, "user_tz": -330, "elapsed": 2137, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjNFd_aXbbrGY9DhDrUsGEO2RoR0SuxrzuyePup8A=s64", "userId": "03497878686001103417"}}
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
#This function lets you activate matplotlib interactive support
# at any point during an IPython session.
# In [1]: %matplotlib inline
# In this case, where the matplotlib default is TkAgg:
from sklearn.tree import DecisionTreeRegressor
from pydotplus import graph_from_dot_data #Load graph as defined by data in DOT format.
from sklearn.tree import export_graphviz #Export a decision tree in DOT format.
# + id="VD0hm5fLMYap" executionInfo={"status": "ok", "timestamp": 1620618275699, "user_tz": -330, "elapsed": 2871, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjNFd_aXbbrGY9DhDrUsGEO2RoR0SuxrzuyePup8A=s64", "userId": "03497878686001103417"}}
df = pd.read_csv("/content/drive/MyDrive/Colab Notebooks/ML/Position_Salaries.csv")
# + colab={"base_uri": "https://localhost:8080/", "height": 197} id="CzJOc5HfN_Fk" executionInfo={"status": "ok", "timestamp": 1620618275702, "user_tz": -330, "elapsed": 2866, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjNFd_aXbbrGY9DhDrUsGEO2RoR0SuxrzuyePup8A=s64", "userId": "03497878686001103417"}} outputId="f479e069-5ccb-4d23-d06c-2395626f6747"
df.head()
# + [markdown] id="_90U5-lROGqy"
# ###**EDA**
# + colab={"base_uri": "https://localhost:8080/"} id="IHogDU5NOAvM" executionInfo={"status": "ok", "timestamp": 1620618275705, "user_tz": -330, "elapsed": 2858, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjNFd_aXbbrGY9DhDrUsGEO2RoR0SuxrzuyePup8A=s64", "userId": "03497878686001103417"}} outputId="ca83eac5-ed68-44cf-de72-66287c8715f7"
df.info()
# + colab={"base_uri": "https://localhost:8080/", "height": 287} id="uMT04RamOQCF" executionInfo={"status": "ok", "timestamp": 1620618275706, "user_tz": -330, "elapsed": 2849, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjNFd_aXbbrGY9DhDrUsGEO2RoR0SuxrzuyePup8A=s64", "userId": "03497878686001103417"}} outputId="a47e400e-9863-4ebe-d579-8c8a280b80c5"
df.describe()
# + colab={"base_uri": "https://localhost:8080/"} id="B2mFAIzBOXwb" executionInfo={"status": "ok", "timestamp": 1620618275707, "user_tz": -330, "elapsed": 2841, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjNFd_aXbbrGY9DhDrUsGEO2RoR0SuxrzuyePup8A=s64", "userId": "03497878686001103417"}} outputId="1babb8a4-94f3-4047-fdc0-6163b1b2b273"
df.columns
# + colab={"base_uri": "https://localhost:8080/"} id="5m1Q4WRnOjGT" executionInfo={"status": "ok", "timestamp": 1620618275709, "user_tz": -330, "elapsed": 2836, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjNFd_aXbbrGY9DhDrUsGEO2RoR0SuxrzuyePup8A=s64", "userId": "03497878686001103417"}} outputId="6f38615e-c215-4f82-f7c5-128844695fca"
df.isnull().sum()
# + [markdown] id="4y1UHaU_OoK9"
# ###**Model fit & training**
# + id="FL5E_x5COms0" executionInfo={"status": "ok", "timestamp": 1620618275711, "user_tz": -330, "elapsed": 2831, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjNFd_aXbbrGY9DhDrUsGEO2RoR0SuxrzuyePup8A=s64", "userId": "03497878686001103417"}}
X = df.iloc[:, 1:2].values
y = df.iloc[:, 2].values
# + colab={"base_uri": "https://localhost:8080/"} id="ufAN0k_TPUPs" executionInfo={"status": "ok", "timestamp": 1620618275712, "user_tz": -330, "elapsed": 2826, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjNFd_aXbbrGY9DhDrUsGEO2RoR0SuxrzuyePup8A=s64", "userId": "03497878686001103417"}} outputId="1a03aebe-18b7-4400-afb7-7b080e389f44"
regressor = DecisionTreeRegressor()
regressor.fit(X, y)
# + [markdown] id="owE9yZmgT9ti"
# ###**Prediction**
# + colab={"base_uri": "https://localhost:8080/"} id="QelCgTmMT65_" executionInfo={"status": "ok", "timestamp": 1620618275713, "user_tz": -330, "elapsed": 2820, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjNFd_aXbbrGY9DhDrUsGEO2RoR0SuxrzuyePup8A=s64", "userId": "03497878686001103417"}} outputId="5d8c662d-6267-4c2f-db0c-f225bb7e0a2e"
y_pred = regressor.predict(np.reshape(np.array(11),(-1, 1))) #Predict class or regression value for X.
y_pred
# + [markdown] id="peCQBQ0vUUr6"
# ###**Saving Decision tree**
# + colab={"base_uri": "https://localhost:8080/"} id="BSMClomwUQ-8" executionInfo={"status": "ok", "timestamp": 1620618276104, "user_tz": -330, "elapsed": 3204, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjNFd_aXbbrGY9DhDrUsGEO2RoR0SuxrzuyePup8A=s64", "userId": "03497878686001103417"}} outputId="aa02078b-b4df-4705-9779-001e777c2cd1"
dot_data = export_graphviz(regressor, filled=True, rounded=True)
graph = graph_from_dot_data(dot_data)
graph.write_png("regressor_position.png")
# + colab={"base_uri": "https://localhost:8080/", "height": 601} id="Usx6zK4tX8ur" executionInfo={"status": "ok", "timestamp": 1620619110132, "user_tz": -330, "elapsed": 2643, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjNFd_aXbbrGY9DhDrUsGEO2RoR0SuxrzuyePup8A=s64", "userId": "03497878686001103417"}} outputId="02f44571-1cd8-4192-82d3-7f80368a29f5"
import cv2
img = cv2.imread("regressor_position.png")
plt.figure(figsize = (15, 15))
plt.imshow(img)
# + id="ppVGWyQ_a7pd"
| Decision_tree_regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 字符与字符串
# 在我们进入这一部分最后的关卡,面对关底 Boss 之前,我们需要稍微做一点准备,补上关于 Python 字符和字符串的一些知识。
#
# 字符串可能是编程中使用最多的基本数据类型,Python 提供了强大的字符与字符串处理能力,我们经常用到的包括:
# * 一个名为 `string` 的模块(*module*),提供了常用的一些字符串(比如所有英文大写和小写字母);
# * 一个名为 `str` 的类,Python 里的字符串就是这个类的对象实例;
# * 一个名为 `re` 的模块,提供对**正则表达式**(*regular expression*)的支持,这是一个强大的字符串匹配查找和替换工具,我们以后会介绍。
#
# 这一章我们就来学习上面列出的前两个。
# ## 字符、ASCII 和 Unicode
# 字符(*character*)我们人类文字的最小单元,字母、数字、标点和其他符号都是字符,中日韩的单字也是字符——中日韩语言的独特性是老外发明计算机和各种初期标准时没太多考虑的,然后一直痛到现在。
#
# 开始时还是挺简单的,字符在计算机里其实就是用整数表示的,一个整数对应一个字符,查表就知道哪个是哪个,这个表叫做 [ASCII](https://www.cs.cmu.edu/~pattis/15-1XX/common/handouts/ascii.html)(全称是 *American Standard Code for Information Interchange*),里面有不多不少 128 个字符,用 7 位二进制数(即 7 个比特——不是 7 个比特币)或者两位十六进制数就可以搞定,还空出了最高的一位(为 0)。
#
# ASCII 码完美解决了英语问题,在计算机发展的初级阶段英语是标准语言也是唯一可用的语言。后来非英语的欧美国家通过扩展 ASCII 码表引入了希腊语、俄语等语种字符,方法就是把一个字节的另外 128 个数用上,扩展出来的那些字符编码的最高位固定为 1,这样保持和 ASCII 兼容,称为 EASCII 或者 High ASCII。
# 真正颠覆性的变化是中日计算机领域的发展。中国仅收入《新华字典》的常用汉字就有 10000 多个,日本也有超过 2000 个常用汉字和约 100 个假名字符,远远超出 ASCII 或者一个字节能表示的范围,于是中日(还有韩国和中国港澳台地区)纷纷动手制定自己的字符集和编码标准。
#
# 很多人不知道,我国在 1980 年就有了第一个计算机字符编码标准 GB2312-1980 公布实施,这个标准影响力之大,直到今天仍然存在,也让汉字编码(以及输入)的问题在计算机普及之前基本上就圆满解决了,真的要感谢中国最早一代的信息化专家们。
#
# GB2312 包含 6000 多个常用汉字,还有 600 多包含日文假名在内的其他语言字符。GB2312 是双字节方案,用两个字节(16 个比特)表示一个字符,两个字节的最高位都是 1,这样不会和 ASCII 字符冲突,但会和 EASCII 字符冲突,所以如果不告诉计算机这是 GB2312 编码的文字,计算机会把一个汉字认成两个 EASCII 字符,显示一堆 Çéäæûá 给你看,这就是俗称的“乱码”。
# 整个 8、90 年代东亚地区的计算机都处于这样的窘境,不仅和欧美不兼容,彼此之间也不兼容,我国就有大陆的 GB2312 和港澳台地区的 Big5 两种互不兼容的编码体系,日本和韩国还有各自几个不一样的编码,那个年代的硬核玩家必然会装着动态切换文字编码的工具。
#
# 1991 年回过神来的行业领袖和国际标准组织终于开始着手统一工作,通过来自施乐(Xerox)和苹果(Apple)的专家和 ISO 组织一起推动,拿出了一个统一的字符集方案 UCS(Universal Coded Character Set),经过几十年的迭代,目前最新的 12.1 版本收录了超过 13 万个字符,涵盖了 150 多种现存和曾经存在的人类语言,还有好多空位,所以大家正在往里头使劲塞 Emoji 表情包。UCS 里每个字符都对应一个序号,这个序号采用四个字节表示,称为 UCS-4 编码。
# 这里要提醒大家,字符集和字符编码是两个不同的概念。字符集决定了可以被识别和处理的字符是哪些,而字符编码是计算机程序如何用一个数字来表示特定字符。就好像学校里就这么些学生,是固定的,但可以通过身份证也可以通过学号找到一个学生,那么身份证号和学号就是学生两种不同的编码方式。
#
# UCS 中最常用的字符都集中在靠前部分,也就是 UCS-4 编码中只用到低位两字节而高位两字节都是 0 的那些,这部分字符又被称为“基本多文本平面(*Basic Multilingual Plane, BMP*)”,其实用两字节就可以编码,这个两字节编码方式就叫 UCS-2。
#
# UCS 字符集和配套的 UCS-4、UCS-2 都是 ISO 国际标准,但大家很少直接用这两个编码,原因是:
# 1. UCS-4 编码要用四个字节,对于最经常使用的英文来说,ASCII 编码只需要一个字节,UCS-4 用四个字节而前面大部分都是 0,很浪费;即使中文,常用的一般也是两字节编码,用 UCS-4 也很浪费。
# 2. UCS-2 编码和东亚各国已经用了几十年的双字节编码不兼容,而且容量也不太够。
#
# 所以大家开始寻找变通方案,那就是把字符集和编码方式分开,字符集最大限度和 UCS 一致,但用不一样的办法来编码不同的字符。
# 我国颁布于 1993 年的 GB13000 就是字符集标准,和 UCS-2 兼容;而 2005 年颁布的 GB18030 则是编码标准,这个标准采用单、双和四字节的变长编码方案,单字节兼容 ASCII 码,双字节兼容 GB2312-1980 和 GBK 汉字编码,而四字节部分处理不太常用的汉字和少数民族文字。GB18030 是强制标准,所有在中国出售的软件系统和服务都必须支持。
#
# 另一个我们常用的编码方式是 UTF-8,同样采用变长方案,英文采用单字节编码,与 ASCII 兼容,西欧其他语言的字符采用双字节编码,常用汉字采用三字节编码,罕用字符采用四字节编码。UTF-8 对中文用户来说,汉字编码比 GB18030 多用一个字节,体积要增加 50%,不过仍然是支持最广泛的事实标准。
#
# 很多现代编辑器都提供转码功能,可以在这些主流编码之间方便的转换。从最大限度兼容的角度,一般都会使用 UTF-8 编码,无论是源代码还是文档。
#
# 在软件开发领域,不少编程语言和核心库都诞生在 Unicode 之前,原本都不支持 Unicode,后续陆陆续续添加了 Unicode 支持,到现在基本上都把 Unicode 作为缺省方案了。最近 20 年诞生的新编程语言则基本上从开始就建立在 Unicode 基础上,有些甚至允许你用 Emoji 表情做变量名——当然这不能算是个好的编码风格。
# 字符在计算机里就是用整数来表示的,英文字符毫无例外都是最古老的 ASCII 码,其他字符对应的整数看采用的编码格式。而 Python 的 `str` 类缺省采用 Unicode 编码格式。
#
# Python 提供两个函数 `ord()` 和 `chr()` 来做字符和它对应的整数值之间的转换。前者给出输入字符对应的整数(UCS 编码的十进制表示),后者则反过来,根据输入的整数返回对应字符。
ord('码')
chr(30721)
chr(30722)
c = '\u7802' # \u 表示后面是16位 UCS-2 编码,这里十六进制数 0x7802 = 30722
print(c)
# Python 官网有一篇介绍 [Python 中 Unicode 支持](https://docs.python.org/3/howto/unicode.html) 的文章,可以作为延伸阅读材料。
# ## 字符串值
# 字符串基本上就是一个字符列表,在很多编程语言里就是按照字符列表(数组)来处理的,也允许程序员像操作数组一样操作字符串,不过因为字符串使用的实在是太频繁了(远比数字要多),对程序的基本性能有重大影响,所以大多数编程语言都会对字符串做些内部的特殊处理以提升性能,绝大部分情况下这些特殊处理对我们写程序来说是透明的,我们不需要去关注,只把字符串当做字符列表就可以了。
#
# Python 里字符串值(*string literal*)可以用单引号、双引号、三个单引号或三个双引号来表示,主要区别是:
# * 单引号里面可以包含双引号字符;
# * 双引号里面可以包含单引号字符;
# * 三个引号括起来的可以包含多行文字。
'用单引号括起来的字符串里不能直接包含单引号,但可以有"双引号"'
"用双引号括起来的字符串里不能直接包含双引号,但可以有'单引号'"
"中文的引号其实是另外的字符,总是可以包含的”“‘’"
'''三个单或者双引号可以包含多行文字
就像这样'''
'其实多行文字并没有什么特别\n像这样插入一个换行字符也能实现'
# 上面的代码里我们看到了一个奇怪的东西 `\n`,关于这个有几点知识:
#
# * 字符串值里反斜线 `\` 加一个字符这种东西叫“转义符(*escape character*)”,代表一个特殊字符或者特殊指令(告诉解释器后面的字符如何理解),Python 解释器会在处理时自动翻译或处理;Python 支持的转义符列表可以参考 [这里](http://python-ds.com/python-3-escape-sequences),所以不管我们用单引号还是双引号都可以在字符值里用 `\"` `\'` 来输入需要的引号;如果我们想输入 `\` 怎么办?聪明人立刻就能想到,用 `\\` 就可以了;
# * 转义符 `\n` 代表一个特殊字符,也就是 ASCII 里的换行(*LF*, *linefeed*)符;ASCII 里还有个回车(*CR*, *carriage return*)符,可以用 `\r` 表示;回车换行的概念来自老式打字机,打字机打完一行需要按两个功能键,一个是回车,即移动滑轨让打字位置回到行首,另一个是换行,即走纸一行让打字位置走到下一行空白处;在计算机时代我们敲击键盘的换行(↩︎, Return)键就能完成这两个工作;
# * 如果在字符串前面写一个小写的 `r`,意思是 *raw*,就是告诉 Python 解释器这个字符串不要做任何转义处理,里面所有的 `\` 就是 `\` 字符,没有特殊含义,这种特殊标记的字符串有时候会有用,比如处理文件路径,或者正则表达式,后面我们会遇到的。
#
# 关于回车换行(LF 和 CR),有另外一个坑需要说下。当我们打开一个文本文件,无论是源代码还是别的什么,里面都有很多的回车换行符,藏在每一行行尾,并不肉眼可见,编辑器看到这些字符就另起一行显示内容,如果把这些回车换行符都去掉,整个文件就会显示成长长长长长的一行,那就没法读了。
#
# 然而,因为历史原因,不同的操作系统里保存文本文件时用的回车换行符是不一样的:
# * Mac OS 9 以及更早的 Mac OS 用的是单回车,也就是 `\r`;
# * Unix、Linux、Mac OS X 及改名的 macOS 用的是单换行,也就是 `\n`;
# * Windows 用的是回车加换行,也就是 `\r\n`。
#
# 所以 Windows 下创建的文件拿到其他系统一般是没问题的,但是 Windows 打开其他系统创建的文件就可能识别不出新行而显示成一大串。不过好在这么多年了,这个坑已经为人所熟知,大部分编辑器都能很智能的识别,就算不能识别也会允许你设置好告诉它如何处理。
#
# 在 Visual Studio Code 中打开文件,右下角就会显示这个文件里使用的换行符是哪一种,点击可以修改:
# <img src="assets/crlf-in-vsc.png" width="800">
# ## 组装字符串:f-string
# 我们写程序的时候经常要干的一件事是动态的攒一个字符串出来,里面有固定的文字,但是特定位置上要出现某些我们算出来的值。比如我们计算了一个题目,最后输出一句话,通常想写成这样:
#
# `计算完毕,获胜概率为 x%,败北概率为 y%,平局概率为 (100-x-y)%。`
#
# 我们当然可以用很笨的拼装办法比如:
x = 62
y = 29
print('计算完毕,获胜概率为 ' + str(x) + '%,败北概率为 ' + str(y) + '%,平局概率为 ' + str(100-x-y) + '%。')
# 这样的代码又难看又难维护,所以很多编程语言都提供了“参数化字符串”的方案,在 Python 3 里是所谓的 *f-string*,就是在字符串前面加一个小写的 `f`,下面是例子:
x = 62
y = 29
print(f'计算完毕,获胜概率为 {x}%,败北概率为 {y}%,平局概率为 {100-x-y}%。')
# 这和上面的笨办法输出完全一样。所谓 *f-string* 就是一个字符串前面加个字母 f,也就是 `f'...'` 或者 `f"..."` 这样的。这样的字符串里可以用一对大括号括起来任何合法的表达式,这个表达式会被计算,并用结果替换 `{表达式}`。在上面的例子中,`{x}` 会用 `x` 的值 62 替换,`{y}` 会用 `y` 的值 29 替换,`{100-x-y}` 会用 `100-x-y` 的值 9 替换,最后就得到了我们想要的结果。
#
# 这个方案是不是漂亮多了呢?
#
# 顺便,在 *f-string* 中如果想输入大括号怎么办?使用 `{{` 和 `}}` 就可以了。
# ## 字符串处理
# 先看两个基本操作:字符串串接和字符串长度。
hello = 'hello'
world = "world"
# 可以用操作符 + 来串接字符串
hw = hello + ' ' + world + '!'
print(hw)
# 内置函数 len() 能返回字符串的长度,这个函数还能返回别的类型的数据,以后我们会经常碰到它
len(hw)
# 除了上面两个基本操作以外,Python 的字符串类 `str` 还提供了[大量的方法](https://docs.python.org/3/library/stdtypes.html#string-methods)来对字符串做各种处理,一些比较常用的包括:
# * `capitalize()` 将字符串对象中每个句子的首字母大写;
# * `upper()` 和 `lower()` 将字符串对象变成全大写和全小写;
# * `rjust()` 将字符串扩展到指定长度,并向右对齐,也就是在原字符串左边补空格;
# * `ljust()` 和 `center()` 类似,只是分别让原字符串左对齐(右边补空格)和居中(左右对称补空格)。
#
# 注意这一类方法都不会改变原来的字符串对象,而是把处理结果作为一个新的字符串返回,我们可以将其赋值给另一个变量然后使用。
hw.capitalize()
hello.upper()
hello.rjust(8)
hello.center(9)
# `str` 类还提供了一组方法来判断字符串是不是具备特定特征,比如:
# * `isalpha()` 判断字符串里是不是都是字母;
# * `isdigit()` 判断字符串里是不是都是数字;
# * `islower()` `isupper()` 判断字符串是不是全小写或者全大写字母。
#
# 这一类方法返回的都是 `True` 或者 `False`。
hello.isalpha()
hello.isdigit()
'42'.isdigit()
# `str` 类还提供了一组方法来在字符串内查找特定子串:
# * 可以用操作符 `in` 来检查字符串是不是包含某个子串;
# * `find()` `index()` `rindex()` 可以查找子串并给出子串的位置,区别是:
# * 如果没找到 `find()` 会返回 `-1`,而后两个会抛出运行时异常 `ValueError`;
# * `index()` 返回从左向右找到的第一个匹配的位置,`rindex()` 则是从右向左找。
#
# 下面来看看例子。
'Py' in 'Python'
'py' in 'Python' # 大小写敏感
s = 'Perform a string formatting operation. The string on which this method is called can contain literal text...'
s.find('string')
s.find('abracadabra')
s.index('string')
s.rindex('string')
# +
# 下面这句会抛出 "ValueError: substring not found" 的异常
# s.rindex('abracadabra')
# -
# 能查找子串,自然也能把找到的子串替换成其他字符串,这个方法叫 `replace`,这会对找到的所有子串进行替换,如果只希望替换一部分,可以传入一个“替换次数”的参数,那就只替换从左到右找到的前 n 个子串。和前面的例子一样,这个方法也不会改变原来的字符串对象,而是生成一个新的返回:
hello.replace('l', '{ell}')
hello.replace('l', '{ell}', 1)
# 关于 Python 中的字符串操作就先介绍这些,其他的以后碰到再说,有兴趣也可以去看看[官方手册里的说明](https://docs.python.org/3.5/library/stdtypes.html#string-methods),可以自己打开一个 notebook,建立几个字符串,然后对照官方手册里的方法一个个试试,会是个有趣的体验。
# ## 常用字符串
# 下面是 Python 内置的一些常用字符串,可以在我们的程序中直接使用(使用前需要先 `import`):
import string
string.ascii_lowercase # 英文小写字母
string.ascii_uppercase # 英文大写字母
string.ascii_letters # 英文小写和大写字母
string.digits # 十进制数字
string.hexdigits # 十六进制数字
string.octdigits # 八进制数字
string.punctuation # 标点符号(英文)
string.whitespace # 各种空白字符,包括空格、tab、换行、回车等等
string.printable # 上面所有字符的大合集
# ## 小结
# * 了解字符集和字符编码(*optional*);
# * 了解 Python 中字符串的表示方法以及转义符;
# * 了解如何使用 *f-string*;
# * 了解 Python 中常用的字符串操作;
# * 了解 `string` 模块中提供的常用字符串。
| p1-a-string.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Documentație modele
#
# <NAME>, grupa _10LF383_
# ### 1. [Ridge Regression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Ridge)
# class `sklearn.linear_model.Ridge`<i>(alpha=1.0, *, fit_intercept=True, normalize=False, copy_X=True, max_iter=None, tol=0.001, solver='auto', random_state=None)</i>
# ## Descriere
#
# Modelul este folosit pentru estimări când datele au multe dimensiuni dar un număr restrâns de intrări pentru fiecare dimensiune.
#
# În general, modelul Ridge Regression are o performanță mai scăzută pe setul de date folosit pentru antrenare comparativ cu Linear Regression, dar o putere mai mare de predicție pentru setul de date de testare, iar acest lucru se datorează penalizării suplimentare pe care o aplică sumei pătratelor datorate erorilor.
# Prin aplicarea acestei penalizări modelul evită sa fie mult prea fidel construit pe datele din setul de instruire (“overfitting”). </h3>
#
# <img style="height: 50px; width:325px" src="Images/RidgeFormula.png" >
#
# Hiperparametrul $\alpha$ controlează cantitatea de penalizare, ia valori egale sau mai mari ca 0, cu cât este mai mare penalizarea, cu atât coeficienții tind mai mult spre coliniaritate:
# <sup>
# <a href="https://scikit-learn.org/stable/modules/linear_model.html#ridge-regression-and-classification">[1]</a>
# </sup>
# </h3>
# <table>
# <tr>
# <td><img style="height: 250px; width:400px" src="Images/Ridge1.png" ></td>
# <td><img style="height: 250px; width:400px" src="Images/Ridge2.png"></td>
# <td><img style="height: 250px; width:400x" src="Images/Ridge3.png" ></td>
# </tr>
# <tr>
# <td><img style="height: 250px; width:400x" src="Images/Ridge4.png"></td>
# <td><img style="height: 250px; width:400x" src="Images/Ridge5.png" ></td>
# </tr>
# <tr>
# <td colspan="3"><p><center>Imagini preluate din tutorialul "Regularization Part 1: Ridge Regression"
# <sup>
# <a href="https://www.youtube.com/watch?v=Q81RR3yKn30">[2]</a>
# </sup>oferit de "StatQuest with Josh Starmer"</center></p>
# </td>
# </tr>
# </table>
# ### Hiperparametri
# - `alpha`: "Regularization strength", reduce eroarea estimărilor, valori mai mari înseamnă regularizări mai puternice.
# ### 2. [LassoLars Regression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LassoLars)
# class `sklearn.linear_model.LassoLars`<i>(alpha=1.0, *, fit_intercept=True, verbose=False, normalize=True, precompute='auto', max_iter=500, eps=2.220446049250313e-16, copy_X=True, fit_path=True, positive=False, jitter=None, random_state=None)<i>
# ### Despre regresor
#
# Modelul LassoLars este un algoritm de tipul Least Angle Regression (Lars) ce implementează o regresie Lasso.
# Este un algoritm folosit pentru seturile de date cu multe dimensiuni și a fost dezvoltat de <NAME>, <NAME>, <NAME> și <NAME><sup><a href="https://en.wikipedia.org/wiki/Least-angle_regression">[3]</a></sup>. Algoritmul este similar cu "Forward Stepwise Regression".
#
# "Forward Stepwise Regression" este o variantă îmbunătățită a algoritmului "Forward Selection". Acesta din urmă cauta să obtina o predictie adecvată adăugând la fiecare pas în model variabila cu corelația cea mai puternică pentru variabila prezisă, dar în acest fel sunt neglijați ceilalți predictori corelați cu variabila adăugată și ignorați pentru că nu cresc prea mult predicția modelului. "Forward Stepwise Regression" în schimb face modificări de pondere la fiecare pas variabilelor adăugate în model pe masură ce sunt adaugate și alte variabile.
#
# "Least Angle Regression" este în schimb mai rapid decât "Forward Stepwise Regression" întrucat alege să facă schimbări în direcția optimă așa încât corelațiile dintre variabilele din model să fie egale.
#
# <div style="text-align:center">
# <img src="Images/LassoLars.png" >
# <p><center>Least Angle Regression
# <sup>
# <a href="https://www.quora.com/What-is-Least-Angle-Regression-and-when-should-it-be-used">[4]</a>
# </sup>
# </center>
# </p>
# </div>
#
# ### Descriere funcționare algoritm:
#
# Modelul pornește din μ0. Se caută variabila care corelează cel mai puternic cu y, sau altfel spus variabila față de care y descrie cel mai mic unghi (linia verde), în cazul de față x1. Algoritmul se deplasează în direcția variabilei x1 până când unghiul lui y față de x2 devine egal cu unghiul dintre y si x1, sau altfel spus când se ajunge la corelații egale (μ1). Din acest punct algoritmul sa va deplasa în direcția cea mai potrivită pentru a conserva egalitatea corelațiilor. Pe parcurs vor fi adăugate și alte variabile x3 si algoritmul se va deplasa într-o directie care să aducă cele 3 corelații la egalitate. Acest proces va continua până cand sunt epuizate toate variabilele.<sup><a href="https://www.quora.com/What-is-Least-Angle-Regression-and-when-should-it-be-used">[5]</a></sup>
#
# ### Hiperparametri
# - `alpha`: constantă care se înmulțește cu termenul de penalizare, implicit 1.
#
#
#
# ### 3. [Stochastic Gradient Descent Regressor](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.SGDRegressor)
# class `sklearn.linear_model.SGDRegressor`<i>(loss='squared_loss', *, penalty='l2', alpha=0.0001, l1_ratio=0.15, fit_intercept=True, max_iter=1000,tol=0.001, shuffle=True, verbose=0, epsilon=0.1, random_state=None, learning_rate='invscaling', eta0=0.01,power_t=0.25, early_stopping=False, validation_fraction=0.1, n_iter_no_change=5, warm_start=False, average=False)</i>
# ### Descriere
#
# Este un model liniar de regresie ce folosește Stochastic Gradient Descent pentru normalizarea funcției de eroare.
# Poate să fie utilizat cu un anumit tip al funcției de eroare, implementată ca și hiperparametru în cadrul modelului.
#
# Spre exemplu, modelul poate fi folosit cu funcția de eroare liniară sau pătratică. Acest lucru determină algoritmul să se axeze mai mult sau mai puțin pe corectarea valorilor aberante (outliers).
# <sup>
# <a href="https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.SGDRegressor.html#sklearn.linear_model.SGDRegressor">[6]</a>
# </sup>
# <img src="Images/GradientDescent.png" >
# <p><center>Gradient Descent
# <sup>
# <a href="https://www.oreilly.com/library/view/hands-on-machine-learning/9781491962282/ch04.html">[4]</a>
# </sup>
# </center>
# </p>
# ### Hiperparametri
# - `loss`: funcția de loss, se poate alege între _squared_loss_, _huber_, _epsilon_insensitive_ sau _squared_epsilon_insensitive_
# - `l1_ratio`: utilizat numai dacă `penalty` este `elasticnet`, definește raportul dintre impactul fiecărui termen de penalizare asupra funcției obiectiv
# - `alpha`: constanta care multiplică termenul de regularizare.
| Laborator9/Documentație modele II.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
df = pd.read_csv('./brazil/brazil_reviews.csv')
df
df2 = df['comments']
dfl = df2.str.len()
dfl.mean(axis = 0)
from langdetect import detect
langs = []
for i in df2:
try:
langs.append(detect(i))
except:
langs.append('Nan')
continue
import collections
c = collections.Counter(langs)
freq = pd.DataFrame.from_dict(c, orient='index').reset_index()
len(df2)
freq = freq.apply(lambda x: x/len(df2) if x.name == 0 else x)
freq
freq_sorted = freq.sort_values(by= [0], ascending=False)
freq_sorted
export = freq_sorted.reset_index(drop=True)
export.to_csv(r'hong_kong.csv')
| Analyser.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Fit pure component molecular parameters for Hexane
#
# This notebook has te purpose of showing how to optimize the molecular parameters of a pure fluid in SGTPy.
#
# First it's needed to import the necessary modules
import numpy as np
from scipy.optimize import minimize
from sgtpy import component, saftvrmie
# Now the experimental equilibria data is read. For Hexane this include vapor and liquid density and saturation temperature and pressure.
# Experimental equilibria data obtained from NIST WebBook
Tsat = np.array([290., 300., 310., 320., 330., 340., 350., 360.]) # K
Psat = np.array([ 14016., 21865., 32975., 48251., 68721., 95527., 129920., 173260.]) # Pa
rhol = np.array([7683.6, 7577.4, 7469.6, 7360.1, 7248.7, 7135. , 7018.7, 6899.5]) #nol/m3
rhov = np.array([ 5.8845, 8.9152, 13.087, 18.683, 26.023, 35.466, 47.412, 62.314]) #mol/m3
# Then is necessary to create an objective function, as ```fobj```. This function can be modified according to the available experimental data and the parameters to be optimized.
#
# For this fluid, $m_s, \sigma, \epsilon, \lambda_r$ are optimized and $\lambda_a$ is fixed to 6. The objective function measures the error for the calculated saturation pressure, liquid density and vapor density (weighted).
# objective function to optimize molecular parameters
def fobj(inc):
ms, sigma, eps, lambda_r = inc
pure = component(ms = ms, sigma = sigma , eps = eps, lambda_r = lambda_r , lambda_a = 6.)
eos = saftvrmie(pure)
#Pure component pressure and liquid density
P = np.zeros_like(Psat)
vl = np.zeros_like(rhol)
vv = np.zeros_like(rhov)
n= len(Psat)
for i in range(n):
P[i], vl[i], vv[i] = eos.psat(Tsat[i], Psat[i])
rhosaftl = 1/vl
rhosaftv = 1/vv
error = np.mean(np.abs(P/Psat - 1))
error += np.mean(np.abs(rhosaftl/rhol - 1))
error += 0.1*np.mean(np.abs(rhosaftv/rhov - 1))
return error
# The objective function is minimized using SciPy's ```minimize``` function.
# initial guess for ms, sigma, eps and lambda_r
inc0 = np.array([2.0, 4.52313581 , 378.98125026, 19.00195008])
minimize(fobj, inc0, method = 'Nelder-Mead')
| examples/SGTPy's paper notebooks/Fit Pure components parameters Hexane.ipynb |
# +
# PCA in 2d on digit images.
# Based on fig 14.23 of of "Elements of statistical learning". Code is from <NAME>'s site:
# Code modified from
# https://github.com/empathy87/The-Elements-of-Statistical-Learning-Python-Notebooks/blob/master/examples/ZIP%20Code.ipynb
try:
import pandas as pd
except ModuleNotFoundError:
# %pip install -qq pandas
import pandas as pd
from matplotlib import transforms, pyplot as plt
import numpy as np
try:
from sklearn.decomposition import PCA
except ModuleNotFoundError:
# %pip install -qq scikit-learn
from sklearn.decomposition import PCA
from sklearn import datasets
import requests
from io import BytesIO
try:
import probml_utils as pml
except ModuleNotFoundError:
# %pip install -qq git+https://github.com/probml/probml-utils.git
import probml_utils as pml
try:
from tensorflow import keras
except ModuleNotFoundError:
# %pip install -qq tensorflow
from tensorflow import keras
import tensorflow as tf
# define plots common properties and color constants
plt.rcParams["font.family"] = "Arial"
plt.rcParams["axes.linewidth"] = 0.5
GRAY1, GRAY4, GRAY7 = "#231F20", "#646369", "#929497"
if 1:
mnist = keras.datasets.mnist
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
train_images = train_images / 255.0
test_images = test_images / 255.0
ndx = np.arange(1000)
nsamples, nrows, ncols = train_images.shape
X_train = np.reshape(train_images, (nsamples, nrows * ncols))
X_train = X_train[ndx, :]
y_train = train_labels[ndx]
w = 30 # 28+2
if 0:
digits = datasets.load_digits()
X_train = digits.data
y_train = digits.target # 0..9
w = 10 # pixels for one digit, 8+2
if 0:
# load numpy array from the compressed file
url = "https://github.com/probml/probml-data/blob/main/data/goog.npy?raw=true"
response = requests.get(url)
rawdata = BytesIO(response.content)
arr = np.load(rawdata)["arr_0"] #'../data/zip.npy.npz'
# do train-test split by the last column
train, test = arr[arr[:, -1] == 0], arr[arr[:, -1] == 1]
X_train, X_test = train[:, 1:-1], test[:, 1:-1]
y_train, y_test = train[:, 0].astype(int), test[:, 0].astype(int)
w = 20 # pixels for one digit, 16+4
n_samples, n_features = X_train.shape
print(X_train.shape)
img_size = int(np.sqrt(n_features))
# idx_3 = np.where(y_train == 3)[0]
idx_3 = np.where(y_train == 9)[0]
X_train_3 = X_train[idx_3]
X_train_3_pca = PCA(n_components=2).fit_transform(X_train_3)
x_grid = np.percentile(X_train_3_pca[:, 0], [5, 25, 50, 75, 95])
y_grid = np.percentile(X_train_3_pca[:, 1], [5, 25, 50, 75, 95])
x_grid[2], y_grid[2] = 0, 0
fig, axarr = plt.subplots(1, 2, figsize=(6.7, 3.8), dpi=150, gridspec_kw=dict(width_ratios=[3, 2]))
plt.subplots_adjust(wspace=0.1)
for s in axarr[1].spines.values():
s.set_visible(False)
axarr[1].tick_params(bottom=False, labelbottom=False, left=False, labelleft=False)
ax = axarr[0]
ax.scatter(X_train_3_pca[:, 0], X_train_3_pca[:, 1], s=1, color="#02A4A3")
ax.set_xlabel("First Principal Component", color=GRAY4, fontsize=8)
ax.set_ylabel("Second Principal Component", color=GRAY4, fontsize=8)
for i in ax.get_yticklabels() + ax.get_xticklabels():
i.set_fontsize(7)
ax.axhline(0, linewidth=0.5, color=GRAY1)
ax.axvline(0, linewidth=0.5, color=GRAY1)
for i in range(5):
if i != 2:
ax.axhline(y_grid[i], linewidth=0.5, color=GRAY4, linestyle="--")
ax.axvline(x_grid[i], linewidth=0.5, color=GRAY4, linestyle="--")
img = np.ones(shape=(4 + w * 5, 4 + w * 5))
for i in range(5):
for j in range(5):
v = X_train_3_pca - np.array([x_grid[i], y_grid[j]])
v = np.sqrt(np.sum(v**2, axis=-1))
idx = np.argmin(v)
ax.scatter(
X_train_3_pca[idx : idx + 1, 0],
X_train_3_pca[idx : idx + 1, 1],
s=14,
facecolors="none",
edgecolors="r",
linewidth=1,
)
img[j * w + 4 : j * w + 4 + img_size, i * w + 4 : i * w + 4 + img_size] = -X_train_3[idx].reshape(
(img_size, img_size)
)
ax = axarr[1]
ax.imshow(img, cmap="gray")
ax.set_aspect("equal", "datalim")
plt.tight_layout()
pml.savefig("pca_digits.pdf")
plt.show()
| notebooks/book1/20/pca_digits.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# This notebook is about analyzing the 'world happiness report' data from the year 2015-2021 to gain insights from the data and to understand which factors effects the 'happiness score and economy' i.e GDP of a country and how the countries have performed over the years.Further 'kmeans clustering' have been performed on the '2021 world happiness report' data based on several factors such as GDP per capita, life expectancy, corruption,social support etc to form clusters of countries according to these factors
import plotly as py
import numpy as np
import pandas as pd
from plotly.offline import init_notebook_mode, iplot, plot
import plotly as py
init_notebook_mode(connected=True)
import plotly.graph_objs as go
from matplotlib import pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
# Loading data into the notebook
data_2k15 = pd.read_csv('2015.csv')
data_2k16 = pd.read_csv('2016.csv')
data_2k17 = pd.read_csv('2017.csv')
data_2k18 = pd.read_csv('2018.csv')
data_2k19 = pd.read_csv('2019.csv')
data_2k20 = pd.read_csv('2020.csv')
data_2k21 = pd.read_csv('world-happiness-report-2021.csv')
data_2k15.head()
data_2k15.info()
data_2k16.head()
data_2k16.info()
data_2k17.head()
data_2k17.info()
data_2k18.head()
data_2k18.info()
data_2k18['Perceptions of corruption'] = data_2k18['Perceptions of corruption'].fillna(0)
data_2k19.head()
data_2k19.info()
data_2k20.head()
data_2k20.info()
data_2k21.head()
data_2k21.info()
# The dataset is already cleaned and there was only one NaN value in the year 2018
# Plotting top 5 and bottom 5 countries on the basis of their happiness score
happy_top5 = data_2k21.groupby('Country name')['Ladder score'].max().sort_values(ascending=False).head(5).reset_index()
happy_lower5 = data_2k21.groupby('Country name')['Ladder score'].max().sort_values(ascending=False).tail(5).reset_index()
new_data = pd.concat([happy_top5,happy_lower5],axis=0)
new_data.columns = ['Country','Score']
new_data
plt.figure(figsize=(10,7))
sns.barplot(y = new_data["Country"], x = new_data["Score"],palette='twilight',orient = "h")
plt.title("Top 5 and bottom 5 countries on the basis of happiness score",fontsize=13)
plt.xlabel("Happiness Score", fontsize= 12)
plt.ylabel("Country", fontsize= 12)
plt.xticks(fontsize= 12)
plt.yticks(fontsize= 12)
plt.xlim(0,8)
# Plotting happiness score of top 30 countries from 2015-2021 through plotly
# +
df2015 = data_2k15.iloc[:30,:]
df2016 = data_2k16.iloc[:30,:]
df2017 = data_2k17.iloc[:30,:]
df2018 = data_2k18.iloc[:30,:]
df2019 = data_2k19.iloc[:30,:]
df2020 = data_2k20.iloc[:30,:]
df2021 = data_2k21.iloc[:30,:]
# Creating curve1
curve1 = go.Scatter(x = df2015['Country'],
y = df2015['Happiness Score'],
mode = "lines+markers",
name = "2015",
marker = dict(color = 'red'),
text= df2015.Country)
# Creating curve2
curve2 = go.Scatter(x = df2015['Country'],
y = df2016['Happiness Score'],
mode = "lines+markers",
name = "2016",
marker = dict(color = 'blue'),
text= df2015.Country)
# Creating curve3
curve3 = go.Scatter(x = df2015['Country'],
y = df2017['Happiness.Score'],
mode = "lines+markers",
name = "2017",
marker = dict(color = 'green'),
text= df2015.Country)
# Creating curve4
curve4 = go.Scatter(x = df2015['Country'],
y = df2018['Score'],
mode = "lines+markers",
name = "2018",
marker = dict(color = 'black'),
text= df2015.Country)
# Creating curve5
curve5 = go.Scatter(x = df2015['Country'],
y = df2019['Score'],
mode = "lines+markers",
name = "2019",
marker = dict(color = 'pink'),
text= df2015.Country)
# Creating curve6
curve6 = go.Scatter(x = df2015['Country'],
y = df2020['Ladder score'],
mode = "lines+markers",
name = "2020",
marker = dict(color = 'purple'),
text= df2015.Country)
# Creating curve7
curve7 = go.Scatter(x = df2015['Country'],
y = df2021['Ladder score'],
mode = "lines+markers",
name = "2021",
marker = dict(color = 'orange'),
text= df2015.Country)
data = [curve1, curve2, curve3, curve4, curve5,curve6,curve7]
layout = dict(title = 'Happiness score of top 30 countries from 2015 to 2021',
xaxis= dict(title= 'Countries'),
yaxis= dict(title= 'Happiness Score'),
hovermode="x unified"
)
fig = dict(data = data, layout = layout)
iplot(fig)
# -
# Plotting happiness score of the countries on wolrd map
data = dict(
type = 'choropleth',
marker_line_width=1,
locations = data_2k15['Country'],
locationmode = "country names",
z = data_2k21['Ladder score'],
text = data_2k15['Country'],
colorbar = {'title' : 'Happiness score'})
layout = dict(title = 'Happiness Map for the year 2021',
geo = dict(projection = {'type':'mercator'})
)
choromap = go.Figure(data = [data],layout = layout)
iplot(choromap)
# Plotting top 5 and bottom 5 countries on the basis of their GDP per capita
gdp_top5 = data_2k21.groupby('Country name')['Logged GDP per capita'].max().sort_values(ascending=False).head(5).reset_index()
gdp_lower5 = data_2k21.groupby('Country name')['Logged GDP per capita'].max().sort_values(ascending=False).tail(5).reset_index()
new_data = pd.concat([gdp_top5,gdp_lower5],axis=0)
new_data.columns = ['Country','GDP']
new_data
plt.figure(figsize=(10,7))
sns.barplot(y=new_data['Country'], x=new_data['GDP'], palette='twilight', orient='h')
plt.title('Top 5 and bottom 5 countries on the basis of GDP Per Capita', fontsize=15)
plt.xlabel('GDP', fontsize=12)
plt.ylabel('Country', fontsize=12)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.xlim(0, 14)
data_2k15['Year']='2015'
data_2k16['Year']='2016'
data_2k17['Year']='2017'
data_2k18['Year']='2018'
data_2k19['Year']='2019'
data_2k20['Year']='2020'
data_2k21['Year']='2021'
# Plotting a line plot to visualize the GDP of various countries over the years
# +
data_2k15.rename(columns={'Economy (GDP per Capita)':'GDP per capita'},inplace=True)
data1=data_2k15.filter(['Country','GDP per capita',"Year"],axis=1)
data_2k16.rename(columns={'Economy (GDP per Capita)':'GDP per capita'},inplace=True)
data2=data_2k16.filter(['Country','GDP per capita',"Year"],axis=1)
data_2k17.rename(columns={'Economy..GDP.per.Capita.':'GDP per capita'},inplace=True)
data3=data_2k17.filter(['Country','GDP per capita','Year'],axis=1)
data_2k18.rename(columns={'Country or region':'Country'},inplace=True)
data4=data_2k18.filter(['Country','GDP per capita',"Year"],axis=1)
data_2k19.rename(columns={'Country or region':'Country'},inplace=True)
data5=data_2k19.filter(['Country','GDP per capita','Year'],axis=1)
data_2k20.rename(columns={'Country name':'Country','Explained by: Log GDP per capita':'GDP per capita'},inplace=True)
data6=data_2k20.filter(['Country','GDP per capita','Year'],axis=1)
data_2k21.rename(columns={'Country name':'Country','Explained by: Log GDP per capita':'GDP per capita'},inplace=True)
data7=data_2k21.filter(['Country','GDP per capita','Year'],axis=1)
data1=data1.append([data2,data3,data4,data5,data6,data7])
# +
plt.figure(figsize=(10,8))
df = data1[data1['Country']=='India']
sns.lineplot(x="Year", y="GDP per capita",data=df,label='India')
df = data1[data1['Country']=='United States']
sns.lineplot(x="Year", y="GDP per capita",data=df,label='US')
df = data1[data1['Country']=='Finland']
sns.lineplot(x="Year", y="GDP per capita",data=df,label='Finland')
df = data1[data1['Country']=='United Kingdom']
sns.lineplot(x="Year", y="GDP per capita",data=df,label="UK")
df = data1[data1['Country']=='Canada']
sns.lineplot(x="Year", y="GDP per capita",data=df,label='Canada')
df = data1[data1['Country']=='Switzerland']
sns.lineplot(x="Year", y="GDP per capita",data=df,label='Switzerland')
df = data1[data1['Country']=='United Arab Emirates']
sns.lineplot(x="Year", y="GDP per capita",data=df,label='United Arab Emirates')
df = data1[data1['Country']=='Pakistan']
sns.lineplot(x="Year", y="GDP per capita",data=df,label='Pakistan')
df = data1[data1['Country']=='Afghanistan']
sns.lineplot(x="Year", y="GDP per capita",data=df,label='Afghanistan')
df = data1[data1['Country']=='Luxembourg']
sns.lineplot(x="Year", y="GDP per capita",data=df,label='Luxembourg')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.title("GDP per capita 2015-2021")
# -
# Plotting heatmap of the data to show correlation between various features
dcr =data_2k21.drop(['Standard error of ladder score', 'Logged GDP per capita','upperwhisker', 'lowerwhisker','Ladder score in Dystopia',
'Explained by: Social support',
'Explained by: Healthy life expectancy',
'Explained by: Freedom to make life choices',
'Explained by: Generosity', 'Explained by: Perceptions of corruption',
'Dystopia + residual'],axis=1)
cor = dcr.corr() #Calculate the correlation of the above variables
sns.heatmap(cor, square = True) #Plot the correlation as heat map
# This heatmap represents the correlation between various features of the data.It can be seen that the ladder score i.e the happiness score mostly depends on features like 'GDP per capita,social support,healthy life expectancy and freedom to make life choices'.It is least correlated with ''generosity' and 'Perception of corruption'.
# +
fig, axes = plt.subplots(nrows=3, ncols=2, figsize=(12, 6), sharey='row', sharex='row')
axes[0, 0].scatter(x=data_2k21['GDP per capita'], y=data_2k21['Ladder score'])
axes[0, 0].set_xlabel('Log GDP per capita')
axes[0, 0].set_ylabel('Ladder score')
axes[0, 1].scatter(x=data_2k21['Social support'], y=data_2k21['Ladder score'])
axes[0, 1].set_xlabel('Social Support')
axes[0, 1].set_ylabel('Ladder score')
axes[1, 0].scatter(x=data_2k21['Explained by: Healthy life expectancy'], y=data_2k21['Ladder score'])
axes[1, 0].set_xlabel('Healthy life expectancy')
axes[1, 0].set_ylabel('Ladder score')
axes[1, 1].scatter(x=data_2k21['Explained by: Freedom to make life choices'], y=data_2k21['Ladder score'])
axes[1, 1].set_xlabel('Freedom To Make Life Choices')
axes[1, 1].set_ylabel('Ladder Score')
axes[2, 0].scatter(x=data_2k21['Explained by: Perceptions of corruption'],y=data_2k21['Ladder score'])
axes[2, 0].set_xlabel('Perceptions of Corruption')
axes[2, 0].set_ylabel('Ladder score')
axes[2, 1].scatter(x=data_2k21['Explained by: Generosity'], y=data_2k21['Ladder score'])
axes[2, 1].set_xlabel('Generosity')
axes[2, 1].set_ylabel('Ladder score')
fig.tight_layout()
# -
# From the above scatter plot, we can see that,except Perception Of Corruption every other thing like GDP Per Capita,Social Support,Healthy life Expectancy), Freedom To Choos & Generosity makes people happy.
#
# Top Countries In The World By Happiness Factors are as:
#
# - Happiest Country - Finland
# - Highest GDP Per Capita (Economy) - Luxembourg
# - Highest Social Support - Turkmenistan and Iceland
# - High Healthy Life Expectancy Country - Singapore
# - Highest score in freedom to make life choices - Norway and Uzbekistan
# - Highest Perception of Corruption - Singapore
# - Highest Generosity - Indonesia
# +
df = data_2k21
trace = go.Bar( x = df['Regional indicator'],
y = df['Country'].value_counts(),
marker = dict(color = 'yellow',
line=dict(color='black',width=1.5)),
text = df['Country'],
textposition='inside')
data = [trace]
layout = go.Layout(barmode = "group",
title = 'Counts of Countries according to their Region - 2021',
xaxis= dict(title= 'Countries'),
yaxis= dict(title= 'Counts'))
fig = go.Figure(data = data, layout = layout)
iplot(fig)
# -
# ### Clustering of countries
# Here we consider only eight parameters namely,happiness score,GDP per capita,social support,life expectancy,freedom,generosity, corruption and Dystopia residual for clustering the countries.
data = data_2k21[['Ladder score','GDP per capita', 'Explained by: Social support',
'Explained by: Healthy life expectancy',
'Explained by: Freedom to make life choices',
'Explained by: Generosity', 'Explained by: Perceptions of corruption','Dystopia + residual']]
# Scaling data to get better results
ss = StandardScaler()
ss.fit_transform(data)
# K means Clustering
# +
def Kmeans_clst(X, nclust=2):
model = KMeans(nclust)
model.fit(X)
clust_labels = model.predict(X)
cent = model.cluster_centers_
return (clust_labels, cent)
clust_labels, cent = Kmeans_clst(data, 2)
kmeans = pd.DataFrame(clust_labels)
data.insert((data.shape[1]),'kmeans',kmeans)
# -
# Plotting scatter plots between various features as a result of clustering
#Plot the clusters obtained using k means
fig = plt.figure()
ax = fig.add_subplot(111)
scatter = ax.scatter(data['GDP per capita'],data['Ladder score'],c=kmeans[0])
ax.set_title('K-Means Clustering')
ax.set_xlabel('GDP per Capita')
ax.set_ylabel('Happiness score')
plt.colorbar(scatter)
#Plot the clusters obtained using k means
fig = plt.figure()
ax = fig.add_subplot(111)
scatter = ax.scatter(data['Explained by: Healthy life expectancy'],data['Ladder score'],c=kmeans[0])
ax.set_title('K-Means Clustering')
ax.set_xlabel('Healthy life expectancy')
ax.set_ylabel('Happiness score')
plt.colorbar(scatter)
#Plot the clusters obtained using k means
fig = plt.figure()
ax = fig.add_subplot(111)
scatter = ax.scatter(data['Explained by: Freedom to make life choices'],data['Ladder score'],c=kmeans[0])
ax.set_title('K-Means Clustering')
ax.set_xlabel('Freedom to decisions')
ax.set_ylabel('Happiness score')
plt.colorbar(scatter)
#Plot the clusters obtained using k means
fig = plt.figure()
ax = fig.add_subplot(111)
scatter = ax.scatter(data['Explained by: Social support'],data['Ladder score'],c=kmeans[0])
ax.set_title('K-Means Clustering')
ax.set_xlabel('Social support')
ax.set_ylabel('Happiness score')
plt.colorbar(scatter)
#Plot the clusters obtained using k means
fig = plt.figure()
ax = fig.add_subplot(111)
scatter = ax.scatter(data['Explained by: Healthy life expectancy'],data['GDP per capita'],c=kmeans[0])
ax.set_title('K-Means Clustering')
ax.set_ylabel('GDP per Capita')
ax.set_xlabel('Healthy life expectancy')
plt.colorbar(scatter)
#Plot the clusters obtained using k means
fig = plt.figure()
ax = fig.add_subplot(111)
scatter = ax.scatter(data['Explained by: Social support'],data['GDP per capita'],c=kmeans[0])
ax.set_title('K-Means Clustering')
ax.set_ylabel('GDP per Capita')
ax.set_xlabel('Social support')
plt.colorbar(scatter)
# From the above plots,we can observe that the countries that have higher happiness score and GDP are clustered together.Higher social support,healthy life expectancy and Freedom to make life choices ultimately contributes to higher happiness score and overall economy of a country.
| Happiness score analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # List
# - Definition
# - Examples
# - Concatenation
# - Repetition
# - Indexing
# - Slicing
# - Available Methods (append,pop,remove,insert etc.)
# - Class exercise
a = []
print(a)
print(len(a))
a = [1,2,3,4,5]
print(a)
print(len(a))
b = [1,"a",'B',11.,12.45,1+5j,True,False,None,"class2",[1,2,3,4]]
print(b)
print(len(b))
c = [2,3]
d = [3,4,5,b]
print(d)
c = [2,3]
d = [3,4,5,b[:3]]
print(d)
c = [2,3]
d = [3,4,5,b[3]]
print(d)
test1 = "123"
# +
# who
# -
# ###### concatenation
a = [1,2,3]
b = [4,5,6]
print(a+b)
print(a+b)
a = [1,2,3]
b = [1,5,6]
print(a+b)
# ###### repetition
a = [1,2,3]
b = a*5
print(b)
# # Indexing and slicing
l = [0,1,2,3,4,5,6,7,8,9]
# print(l[:])
# print(l[::])
# print(l[::-1])
# print(l[-2:9])
# print(l[2:-5])
print(l[::2])
a = [1,2,3,4,5]
print(dir(a))
print(a)
a.append(10)
print(a)
b = [100,200]
a.append(b)
print(a)
# # pop
print(a)
print(a.pop())
print(a)
a.pop(-1)
print(a)
# #### extend
c = [1,2,3]
a.extend(c)
print(a)
a+c
a = [1,2,3,4,5]
print(a)
a.insert(-1,1000)
print(a)
a.insert(0,1000)
print(a)
a.index(5)
# # Write one line program to filter even and odd numbers from the list (0-50)
range(50)
range(2,50)
print(list(range(10,50,5)))
print(list(range(50)))
a = list(range(51))
print("Even numbers are",a[::2])
print("Odd numbers are",a[1::2])
| 03_List.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split, GridSearchCV, cross_val_score
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.metrics import confusion_matrix
# +
data = pd.read_csv('game.csv')
print("Dataset Shape: ", data.shape)
data.head(10)
# -
data.drop(data.iloc[:,29:200], inplace = True, axis = 1)
res = data.drop(['date','number_of_game','day_of_week','v_name','v_league','v_game_number','h_name',
'h_league','h_game_number','h_name','h_league','h_game_number',
'length_outs','day_night','completion','forefeit','protest','park_id','attendance',
'length_minutes','v_line_score','h_line_score'],axis=1)
res['Winner'] = res['h_score'] - res['v_score']
res['Winner'] = res['Winner'].apply(lambda x: 1 if x > 0 else 0)
res.head(20)
Data_Final = res.drop(['v_score','h_score'],axis=1)
Data_Final.head()
df = Data_Final.replace([' ','NULL','na','(none)'],np.nan)
print(df.isin(['NaN']).mean())
# drop any NaNs
df_2 = df.dropna()
df_2.dtypes
import warnings
warnings.filterwarnings('ignore')
# +
from sklearn.svm import SVR, LinearSVR
from sklearn.tree import DecisionTreeRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import r2_score, mean_squared_error
from sklearn.linear_model import Lasso, Ridge, ElasticNet
from sklearn.model_selection import train_test_split as tts
from yellowbrick.datasets import load_concrete
from yellowbrick.regressor import ResidualsPlot
# +
features = df_2[[ 'v_at_bats', 'v_hits', 'v_doubles','v_triples','v_homeruns',
'v_rbi','v_sacrifice_hits','v_sacrifice_flies']]
target = df_2[['Winner']].values.ravel()
# -
X = df_2.iloc[:, :-1].values
y = df_2.iloc[:,-1].values
X
y
regressors = [
SVR(),
Lasso(),
Ridge(),
LinearSVR(),
ElasticNet(),
MLPRegressor(),
KNeighborsRegressor(),
DecisionTreeRegressor(),
RandomForestRegressor(),
]
def score_model(X, y, estimator):
"""
Split the data into train and test splits and evaluate the mode
"""
X_train, X_test, y_train, y_test = tts(X, y)
estimator.fit(X_train, y_train)
y_pred = estimator.predict(X_test)
print("{}: {}".format(estimator.__class__.__name__, r2_score(y_test, y_pred)))
for regressor in regressors:
score_model(X, y, regressor)
def visualize_model(X, y, estimator):
"""
Visually evaluate the regressor's performance across the train and test data
"""
X_train, X_test, y_train, y_test = tts(X, y)
visualizer = ResidualsPlot(estimator)
visualizer.fit(X_train, y_train)
visualizer.score(X_test, y_test)
visualizer.show()
for regressor in regressors:
visualize_model(X, y, regressor)
| Capstone Project Part 2 (December 12, 2021).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Dependencies and Setup
# %matplotlib inline
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from scipy.stats import sem
# Hide warning messages in notebook
import warnings
warnings.filterwarnings('ignore')
# File to Load (Remember to Change These)
mouse_drug_data_to_load = "mouse_drug_data.csv"
clinical_trial_data_to_load = "clinicaltrial_data.csv"
# Read the Mouse and Drug Data and the Clinical Trial Data
mouse_drug_data = pd.read_csv("mouse_drug_data.csv")
clinical_trial_data = pd.read_csv("clinicaltrial_data.csv")
# Combine the data into a single dataset
combined_data = pd.merge(clinical_trial_data, mouse_drug_data,on="Mouse ID")
combined_df = pd.DataFrame(combined_data)
# Display the data table for preview
combined_df.head()
# -
# ## Tumor Response to Treatment
# +
# Store the Mean Tumor Volume Data Grouped by Drug and Timepoint
average_tumor_volume = combined_df.groupby(['Drug', 'Timepoint'],as_index=False).agg({'Tumor Volume (mm3)': "mean"})
# Preview DataFrame
average_tumor_volume.head()
# -
# +
# Store the Standard Error of Tumor Volumes Grouped by Drug and Timepoint
standard_error_data = combined_df.groupby(['Drug', 'Timepoint'],as_index=False).agg({'Tumor Volume (mm3)': "sem"})
# Preview DataFrame
standard_error_data.head()
# -
# Minor Data Munging to Re-Format the Data Frames
reformat_df = average_tumor_volume.pivot(index='Timepoint', columns='Drug', values='Tumor Volume (mm3)')
reformat_df_se = standard_error_data.pivot(index='Timepoint', columns='Drug', values='Tumor Volume (mm3)')
# Preview that Reformatting worked
reformat_df
df1 = reformat_df[['Capomulin','Infubinol','Ketapril','Placebo']]
df1
# +
#Generate graph
x1 = list(df1.index)
markers = [(i, j,0) for i in range(1, 11) for j in range(1, 3)]
[plt.plot(x1,df1.values, marker=markers[i], ms=10) for i in range(16)]
plt.xlabel('Time (Days)')
plt.ylabel('Tumor Volume (mm3)')
plt.title('Tumor Response to Treatment')
plt.legend(['Capomulin','Infubinol','Ketapril','Placebo'], loc='upper left')
# Save the Figure
plt.savefig('Tumor_Response_to_Treatment.png')
# -
# ## Metastatic Response to Treatment
# Store the Mean Met. Site Data Grouped by Drug and Timepoint
average_tumor_volume_site = combined_df.groupby(['Drug', 'Timepoint'],as_index=False).agg({'Metastatic Sites': "mean"})
# Preview DataFrame
average_tumor_volume_site.head()
reformat_df_site = average_tumor_volume_site.pivot(index='Timepoint', columns='Drug', values='Metastatic Sites')
reformat_df_site
# Store the Standard Error associated with Met. Sites Grouped by Drug and Timepoint
standard_error_data_site = combined_df.groupby(['Drug', 'Timepoint'],as_index=False).agg({'Metastatic Sites': "sem"})
standard_error_data_site.head()
# Minor Data Munging to Re-Format the Data Frames
reformat_df_site_se = standard_error_data_site.pivot(index='Timepoint', columns='Drug', values='Metastatic Sites')
# Preview that Reformatting worked
df2 = reformat_df_site_se[['Capomulin','Infubinol','Ketapril','Placebo']]
df2
reformat_df_site
dfObj = pd.DataFrame(reformat_df_site, columns = ['Capomulin','Infubinol','Ketapril','Placebo'])
dfObj
# +
reformat_df
x3 = list(dfObj.index)
#Create the graph
[plt.plot(x3,dfObj.values, marker=markers[i], ms=10) for i in range(16)]
plt.xlabel('Treatment Duration (Days)')
plt.ylabel('Met. Sites')
plt.title('Metatastic spread during Treatment')
plt.legend(['Capomulin','Infubinol','Ketapril','Placebo'], loc='upper left')
#Save the figure
plt.savefig('Metatastic_spread_during_Treatment.png')
# -
# ## Survival Rates
# Store the Count of Mice Grouped by Drug and Timepoint (W can pass any metric)
count_of_mice = combined_df.groupby(['Drug', 'Timepoint'],as_index=False).agg({'Mouse ID': "count"})
# Preview DataFrame
count_of_mice.head()
# Minor Data Munging to Re-Format the Data Frames
reformat_df_mice_count = count_of_mice.pivot(index='Timepoint', columns='Drug', values='Mouse ID')
# Preview the Data Frame
reformat_df_mice_count
#Calculate the percentages for the graph
Perc_calc_for_graph = 100*(reformat_df_mice_count[cols]/reformat_df_mice_count[cols].iloc[0])
df3 = Perc_calc_for_graph[['Capomulin','Infubinol','Ketapril','Placebo']]
df3
# +
# Generate the Plot (Accounting for percentages)
x3 = list(df3.index)
markers = [(i, j,0) for i in range(1, 11) for j in range(1, 3)]
[plt.plot(x3,df3.values, marker=markers[i], ms=10) for i in range(16)]
plt.ylabel('Survival Rate (%)')
plt.xlabel('Time (Days)')
plt.title('Survival During Treatment')
plt.legend(['Capomulin','Infubinol','Ketapril','Placebo'], loc='lower left')
# Save the Figure
plt.savefig('Survival_During_Treatment.png')
# Show the Figure
plt.show()
# -
# ## Summary Bar Graph
# Calculate the percent changes for each drug - using the data from the average tumor growth
cols = reformat_df.columns
survival_rate = (100*(reformat_df[cols].iloc[-1]/reformat_df[cols].iloc[0]-1)).rename('Percentage Change')
survival_rate_df = pd.DataFrame(survival_rate)
# Display the data to confirm
survival_rate_df
#Just some extra - check change from period to period
perc_changes = reformat_df_mice_count.pct_change()
print(perc_changes)
df4 = survival_rate[['Capomulin','Infubinol','Ketapril','Placebo']]
df4
survival_rate_df.index.values
# +
# Store all Relevant Percent Changes into a Tuple
y_pos = survival_rate
#change the colors of the bars and the add the lables
colors = []
bar_tick_label = []
for value in y_pos:
label = "{:.2f}".format(value)
bar_tick_label.append(label)
if value<0:
colors.append('g')
else:
colors.append('r')
# Plot the graph
plt.bar(cols,y_pos, color=colors, width=1, align='center', alpha=0.5)
plt.xticks(rotation=90)
plt.ylabel('% Tumor Volume Change')
plt.title('Tumor Change over 45 Day Treatment')
# Orient widths. Add labels, tick marks, etc.
for x,y in zip(cols,y_pos):
label = "{:.0f}%".format(y)
plt.annotate(label, # this is the text
(x,y), # this is the point to label
textcoords="offset points", # how to position the text
xytext=(0,0), # distance from text to points (x,y)
ha='center') # horizontal alignment can be left, right or center
# Save the Figure
plt.savefig('Tumor_Growth_over_45_day_treatment.png')
# Show the Figure
fig.show()
# -
# IGNORE Checking out seaborn functionality
import seaborn as sns; sns.set()
ax = sns.lineplot(x='Timepoint', y='Tumor Volume (mm3)', hue='Drug', err_style="bars", data=standard_error_data).set_title('Metatastic spread during Treatment')
plt.xlabel('Treatment Duration (Days)')
plt.ylabel('Met. Sites')
plt.legend(['Capomulin','Infubinol','Ketapril','Placebo'], loc='upper left')
| Pymaceuticals/pymaceuticals_starter.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (tensorflow)
# language: python
# name: tensorflow
# ---
# +
__author__ = 'Adrian_Radillo'
import os
import time
from tensorflow.examples.tutorials.mnist import input_data
# -
# Load MNIST dataset
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
# # Assignment
# +
# Import Tensorflow and start a session
import tensorflow as tf
sess = tf.InteractiveSession()
def weight_variable(shape):
'''
Initialize weights
:param shape: shape of weights, e.g. [w, h ,Cin, Cout] where
w: width of the filters
h: height of the filters
Cin: the number of the channels of the filters
Cout: the number of filters
:return: a tensor variable for weights with initial values
'''
# IMPLEMENT YOUR WEIGHT_VARIABLE HERE
initial = tf.truncated_normal(shape, stddev=0.1)
W = tf.Variable(initial, name = "W")
return W
def bias_variable(shape):
'''
Initialize biases
:param shape: shape of biases, e.g. [Cout] where
Cout: the number of filters
:return: a tensor variable for biases with initial values
'''
# IMPLEMENT YOUR BIAS_VARIABLE HERE
initial = tf.constant(0.1, shape=shape)
b = tf.Variable(initial, name = "b")
return b
def conv2d(x, W):
'''
Perform 2-D convolution
:param x: input tensor of size [N, W, H, Cin] where
N: the number of images
W: width of images
H: height of images
Cin: the number of channels of images
:param W: weight tensor [w, h, Cin, Cout]
w: width of the filters
h: height of the filters
Cin: the number of the channels of the filters = the number of channels of images
Cout: the number of filters
:return: a tensor of features extracted by the filters, a.k.a. the results after convolution
'''
# IMPLEMENT YOUR CONV2D HERE
h_conv = tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
return h_conv
def max_pool_2x2(x):
'''
Perform non-overlapping 2-D maxpooling on 2x2 regions in the input data
:param x: input data
:return: the results of maxpooling (max-marginalized + downsampling)
'''
# IMPLEMENT YOUR MAX_POOL_2X2 HERE
h_max = tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
return h_max
# +
def main():
# Specify training parameters
result_dir = './results/2b/1/' # directory where the results from the training are saved
max_step = 5500 # the maximum iterations. After max_step iterations, the training will stop no matter what
start_time = time.time() # start timing
# placeholders for input data and input labeles
x = tf.placeholder(tf.float32, shape=[None, 784], name = "x")
y_ = tf.placeholder(tf.float32, shape=[None, 10], name = "labels")
# reshape the input image
x_image = tf.reshape(x, [-1, 28, 28, 1])
# first convolutional layer
with tf.name_scope("conv1"):
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
tf.summary.histogram("weights", W_conv1)
tf.summary.histogram("biases", b_conv1)
tf.summary.histogram("ReLu_activations", h_conv1)
tf.summary.histogram("max-pool_activations", h_pool1)
# second convolutional layer
with tf.name_scope("conv2"):
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
tf.summary.histogram("weights", W_conv2)
tf.summary.histogram("biases", b_conv2)
tf.summary.histogram("ReLu_activations", h_conv2)
tf.summary.histogram("max-pool_activations", h_pool2)
# densely connected layer
with tf.name_scope("fc1"):
W_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
tf.summary.histogram("weights", W_fc1)
tf.summary.histogram("biases", b_fc1)
tf.summary.histogram("ReLu_activations", h_fc1)
# dropout
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# softmax
with tf.name_scope("fc2"):
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
tf.summary.histogram("weights", W_fc2)
tf.summary.histogram("biases", b_fc2)
# setup training
with tf.name_scope("cross_entropy"):
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
with tf.name_scope("train"):
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
with tf.name_scope("accuracy"):
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Add a scalar summary for the snapshot loss.
tf.summary.scalar(cross_entropy.op.name, cross_entropy)
tf.summary.scalar('accuracy', accuracy)
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.summary.merge_all()
# Add the variable initializer Op.
init = tf.global_variables_initializer()
# Create a saver for writing training checkpoints.
saver = tf.train.Saver()
# Instantiate a SummaryWriter to output summaries and the Graph.
summary_writer = tf.summary.FileWriter(result_dir, sess.graph)
# Run the Op to initialize the variables.
sess.run(init)
# run the training
for i in range(max_step):
batch = mnist.train.next_batch(50) # make the data batch, which is used in the training iteration.
# the batch size is 50
if i%100 == 0:
# output the training accuracy every 100 iterations
train_accuracy = accuracy.eval(feed_dict={x:batch[0], y_:batch[1], keep_prob: 1.0})
print("step %d, training accuracy %g"%(i, train_accuracy))
# Update the events file which is used to monitor the training (in this case,
# only the training loss is monitored)
summary_str = sess.run(summary_op, feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
summary_writer.add_summary(summary_str, i)
summary_writer.flush()
# save the checkpoints every 1100 iterations
if i % 1100 == 0 or i == max_step:
checkpoint_file = os.path.join(result_dir, 'checkpoint')
saver.save(sess, checkpoint_file, global_step=i)
# print test error
print("test accuracy %g"%accuracy.eval(feed_dict={
x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))
# print validation error
print("validation accuracy %g"%accuracy.eval(feed_dict={
x: mnist.validation.images, y_: mnist.validation.labels, keep_prob: 1.0}))
train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5}) # run one train_step
stop_time = time.time()
print('The training takes %f second to finish'%(stop_time - start_time))
if __name__ == "__main__":
main()
# -
| assignment1/DCN_TensorFlow.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Homework: 7 Day Forecast
#
# ## The Problem
#
# For this assignment, use IPython interact so the user can input a location and then display the **current conditions** and the **7 day weather forecast** for that location.
#
# To accomplish this, you will need a Weather API and a Geocoding API. I suggest using the example from lesson 10. OpenWeatherMap is a free service. Their OneCall API should be excatly what you need: https://openweathermap.org/api/one-call-api The focus of this assignment is loading the 7-day forecast int a data frame and the manipulation of the data frame itself.
#
#
# ### Requirements:
#
# - Display all temperatures as integer values ( I don't want to see 34.67) in degrees Fahrenheight (imeprial units).
# - Make sure to show the weather icon for the current conditions: https://openweathermap.org/weather-conditions#How-to-get-icon-URL See example here, for code `10d`: http://openweathermap.org/img/wn/10d@2x.png
# - The 7 day forecast should display the Day of the week (Monday, Tuesday, etc...) plus the low and high temperatures for that day.
#
#
# ### Screenshot of an example run:
#
# 
#
#
# HINTS and ADVICE:
#
#
# - Use the **problem simplification approach** solve a simpler problem and add complexity as you figure things out.
# - Hard-code a location. e.g. Syracuse, NY
# - Start with the weather example from lesson 10. The OneCall API should be excatly what you need: https://openweathermap.org/api/one-call-api
# - For the `current conditions`, we only need the description weather icon and current temperature. Get that working next.
# - Next work on getting the 7-day to display in a data frame.
# - Next work on setting the 7-day to display EXACTLY like the output. There are several approaches:
# - You could iterate over the API output building another list of dict with exactly the values required and then displaying that in a data frame.
# - You could load the API output into a dataframe then use `to_records()` to iterate over the dataframe to build another data frame
# - You could try to manipulate the dataframe with the `apply()` method an a lambda (inline function). This is probably the least amount of code but the most difficult to accomplish.
# - Once everything above works, introduce IPython interact and allow for the input of any location. You are done!
#
# + [markdown] label="problem_analysis_cell"
# ## Part 1: Problem Analysis
#
# Inputs:
#
# ```
# TODO: Inputs
# ```
#
# Outputs:
#
# ```
# TODO: Outputs
# ```
#
# Algorithm (Steps in Program):
#
# ```
# TODO:Steps Here
#
# ```
# -
# ## Part 2: Code Solution
#
# You may write your code in several cells, but place the complete, final working copy of your code solution within this single cell below. Only the within this cell will be considered your solution. Any imports or user-defined functions should be copied into this cell.
# + label="code_solution_cell"
# Step 2: Write code here
# + [markdown] label="homework_questions_cell"
# ## Part 3: Questions
#
# 1.Pandas programs are different than typical Python programs. Explain the process you followed to achieve the solution? Which approach did you take to the dataframe?
#
# `--== Double-Click and Write Your Answer Below This Line ==--`
#
#
# 2. Did you use `json_normalize()` to flatten the dataframe?
#
# `--== Double-Click and Write Your Answer Below This Line ==--`
#
#
# 3. This solution requires little new code was far from easy. This shows that more code != complex code. Where did you spend most of your time on this assignment?
#
# `--== Double-Click and Write Your Answer Below This Line ==--`
#
#
#
# + [markdown] label="reflection_cell"
# ## Part 4: Reflection
#
# Reflect upon your experience completing this assignment. This should be a personal narrative, in your own voice, and cite specifics relevant to the activity as to help the grader understand how you arrived at the code you submitted. Things to consider touching upon: Elaborate on the process itself. Did your original problem analysis work as designed? How many iterations did you go through before you arrived at the solution? Where did you struggle along the way and how did you overcome it? What did you learn from completing the assignment? What do you need to work on to get better? What was most valuable and least valuable about this exercise? Do you have any suggestions for improvements?
#
# To make a good reflection, you should journal your thoughts, questions and comments while you complete the exercise.
#
# Keep your response to between 100 and 250 words.
#
# `--== Double-Click and Write Your Reflection Below Here ==--`
#
# -
# run this code to turn in your work!
from coursetools.submission import Submission
Submission().submit()
| lessons/12-pandas/HW-Pandas.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.common.action_chains import ActionChains
import urllib
from bs4 import BeautifulSoup
import time
import csv
import re
import sys
import os
import numpy as np
import pandas as pd
from IPython.core.debugger import set_trace
path = 'datasets/alltrails/'
chrome_options = webdriver.chrome.options.Options()
chrome_options.add_argument("--start-maximized")
#chrome_options.add_argument('--headless')
#chrome_options.add_argument('--no-sandbox')
#chrome_options.add_argument('--disable-dev-shm-usage')
chrome_path = r'C:\Users\Sumit\Anaconda3\pkgs\python-chromedriver-binary-77.0.3865.40.0-py36_0\Lib\site-packages\chromedriver_binary\chromedriver.exe'
import pickle
infile=open('AllTrailsScraper_notebook_env.db','rb')
df_trails = pickle.load(infile)
infile.close()
def login():
driver.get("https://www.alltrails.com/login?ref=header#")
email = driver.find_element_by_id("user_email").send_keys("<EMAIL>")
password = driver.find_element_by_id("user_password").send_keys("<PASSWORD>")
login_button = driver.find_element_by_class_name("login")
login_button.click()
def scrape_trail(browser, hike_url):
browser.get(hike_url)
soup = BeautifulSoup(browser.page_source, "lxml")
return soup
# +
# Below processing should already have been done:
# df_trails['trail'] = df_trails['trail'].str.split('?').str[0]
# df_trails['trail_url'] = df_trails['trail_url'].str.split('?').str[0]
# df_trails['trail_url'] = df_trails['trail_url'].str.replace('/explore/','/')
# +
driver = webdriver.Chrome(chrome_path, options=chrome_options)
login()
time.sleep(1)
df_trail_latlon = pd.DataFrame()
for index, row in df_trails.iterrows():
park = row['park']
trail = row['trail']
trail_url = row['trail_url']
print(trail_url)
soup = scrape_trail(driver, trail_url)
#print(soup.getText())
e_trail_lat = soup.find('meta', {'property': 'place:location:latitude'})
e_trail_lon = soup.find('meta', {'property': 'place:location:longitude'})
trail_lat = float(e_trail_lat['content'])
trail_lon = float(e_trail_lon['content'])
print(trail_lat)
print(trail_lon)
df_trail_latlon = df_trail_latlon.append({'park':park,'trail':trail,'trail_url':trail_url,'trail_lat':trail_lat,'trail_lon':trail_lon}, ignore_index=True)
# e_rev = soup.findAll('div', {'itemprop': 'review'})
# print('Reviews: '+str(len(e_rev)))
# for review in e_rev:
# e_rev_date = review.find('meta', {'itemprop': 'datePublished'})
# e_rev_body = review.find('p', {'itemprop': 'reviewBody'})
# rev_date = e_rev_date['content']
# rev_body = e_rev_body.getText()
# # print(rev_date)
# # print(rev_body)
# df_trail_reviews = df_trail_reviews.append({'park':park,'trail':trail,'date':rev_date,'review':rev_body}, ignore_index=True)
# df_trail_reviews.to_csv(path+'reviews_'+park+'_'+trail+'.csv')
driver.quit()
# -
df_trail_latlon.to_csv(path+'traillatlon.csv')
df_trail_latlon
| models/AllTrailsScraper_TrailLatLon_Only.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
df = pd.read_csv('../datasets/SVM_Dataset1.csv', index_col=0)
# -
df
6 * .75
# +
import numpy as np
feature_cols = ['X1', 'X2']
X_train = np.array(df[feature_cols])
X_train
# -
Y_train = df['y'].values
Y_train
color_ls = []
for k in Y_train:
if k == 1:
color_ls.append('b')
else:
color_ls.append('r')
color_ls
label = []
for k in Y_train:
if k == 1:
label.append('H')
else:
label.append('NH')
label
# +
import matplotlib.pyplot as plt
for k, (i,j) in enumerate(X_train):
plt.scatter(i, j, c = color_ls[k])
plt.text(i+0.02, j+0.02, label[k])
# +
from sklearn import svm
# svm_classifier = svm.SVC(kernel='poly',C=1, degree=2)
svm_classifier = svm.SVC(kernel='linear', C=10)
svm_classifier.fit(X_train, Y_train)
# -
def plot_decision_boundary(clf, X, y):
x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max),np.arange(x2_min, x2_max))
Z = clf.decision_function(np.array([xx1.ravel(), xx2.ravel()]).T).reshape(xx1.shape)
plt.contour(xx1, xx2, Z, colors='b', levels=[-1, 0, 1], alpha=0.4, linestyles=['--', '-', '--'])
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
# +
plot_decision_boundary(svm_classifier, X_train, Y_train)
for k, (i,j) in enumerate(X_train):
plt.scatter(i, j, c = color_ls[k])
plt.text(i+0.02, j+0.02, label[k])
# +
# Number of Support Vectors for each class:
svm_classifier.n_support_
# +
# What are those Support Vectors:
svm_classifier.support_vectors_
# -
weight=svm_classifier.coef_
intercept=svm_classifier.intercept_
print(weight)
print(intercept)
svm_classifier.predict([[3,6]])
| notebooks/SVM Dataset 1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/OscarAT1984/descensoGradiente/blob/main/Descenso_de_gradiente.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="NLeW7L6XTyFv"
from matplotlib import cm # Para manejar colores
import numpy as np
import matplotlib.pyplot as plt
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="mvLhC2uqT4Vr" outputId="12fbaccb-3cb3-4034-8e01-648a77b10e38"
fig, ax = plt.subplots(subplot_kw={"projection": "3d"})
def f(x,y):
return x**2 + y**2;
res = 100
X = np.linspace(-4, 4, res)
Y = np.linspace(-4, 4, res)
X, Y = np.meshgrid(X, Y)
Z = f(X,Y)
# Gráficar la superficie
surf = ax.plot_surface(X, Y, Z, cmap=cm.cool,
linewidth=0, antialiased=False)
fig.colorbar(surf)
# + colab={"base_uri": "https://localhost:8080/", "height": 298} id="g7rRgj6UT89k" outputId="1ae64b72-c160-47a2-801a-bbd14748b332"
level_map = np.linspace(np.min(Z), np.max(Z),res)
plt.contourf(X, Y, Z, levels=level_map,cmap=cm.cool)
plt.colorbar()
plt.title('Descenso de gradiente')
def derivate(_p,p):
return (f(_p[0],_p[1]) - f(p[0],p[1])) / h
p = np.random.rand(2) * 8 - 4 # generar dos valores aleatorios
plt.plot(p[0],p[1],'o', c='k')
lr = 0.01
h = 0.01
grad = np.zeros(2)
for i in range(10000):
for idx, val in enumerate(p):
_p = np.copy(p)
_p[idx] = _p[idx] + h;
dp = derivate(_p,p)
grad[idx] = dp
p = p - lr * grad
if(i % 10 == 0):
plt.plot(p[0],p[1],'o', c='r')
plt.plot(p[0],p[1],'o', c='w')
plt.show()
print("El punto mínimo esta en: ", p)
| Descenso_de_gradiente.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/sankethks25/Deep-Learning/blob/main/Cat_Dog_Prediction.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="oqmR3RLGzqhI"
# ### Connecting to Kaggle
# + colab={"resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY>", "ok": true, "headers": [["content-type", "application/javascript"]], "status": 200, "status_text": ""}}, "base_uri": "https://localhost:8080/", "height": 73} id="qk6LyZK3zttM" outputId="cbb172b5-8e49-4082-8bce-4ba6a0a8900a"
from google.colab import files
files.upload()
# ! mkdir ~/.kaggle
# ! cp kaggle.json ~/.kaggle/
# ! chmod 600 ~/.kaggle/kaggle.json
# + [markdown] id="rWopI2_pz1Os"
# ### Downloading the Dataset
# + colab={"base_uri": "https://localhost:8080/"} id="ywfWuDtMz03q" outputId="c29c26a4-001e-4ded-e142-c0b7772d9500"
# !kaggle datasets download -d fushenggg/3-class-cat-dog-car-dataset
# + id="tQ1lZVYk0IM_"
# !unzip /content/3-class-cat-dog-car-dataset.zip
# + [markdown] id="s4h_mp7nzmRj"
# ### Importing Dependencies
# + id="fbx-KDFHx191"
import numpy as np
import pandas as pd
import os
from pathlib import Path
import matplotlib.pyplot as plt
import cv2
import tensorflow as tf
from tensorflow.keras.layers import Dense, Input, Flatten, Conv2D, MaxPool2D, BatchNormalization
from tensorflow.keras.models import Model
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
from sklearn.model_selection import train_test_split
from tqdm import tqdm
# + [markdown] id="DJjqg4MnMVc0"
# ### Clean Function
# + id="02RoKMiRMXpZ"
def clean_data(fname):
target_size = (150, 150)
img = plt.imread(fname)
img = cv2.resize(img, target_size)
# Normalization
img = img/255.0
return img
# + [markdown] id="NOsG37kb6n8a"
# ### Taking Dataset
# + colab={"base_uri": "https://localhost:8080/"} id="OCAi2DbN0rs8" outputId="20b9429d-535a-4a29-f430-23067f5adf80"
data_dir = Path(r"/content/cats_dogs_cars/cats_dogs_cars/data")
labels = {"car": 0, "cat": 1, "dog": 2}
X = []
y = []
for fname in tqdm(os.listdir(data_dir)):
img = clean_data(data_dir / fname)
X.extend([img])
y.extend([labels[fname.split(".")[0]]])
X = np.array(X)
y = np.array(y)
# + [markdown] id="RCDG-Qe0-BTZ"
# ### Splitting Data into Train/Test/Val
# + id="prrGtFdt-KAH"
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42, shuffle=True, stratify=y)
# X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.1, random_state=42, shuffle=True, stratify=y)
# + [markdown] id="bJL3JsUk9LoW"
# ### Model Building
# + id="qr2TYECU9FFk"
def build_model():
inputs = Input(shape=X_train[0].shape, name="Input Layer")
x = Conv2D(filters=256, kernel_size=(5, 5), strides=(1, 1), padding="valid", activation="relu")(inputs)
x = MaxPool2D(pool_size=(2,2))(x)
x = BatchNormalization()(x)
x = Conv2D(filters=128, kernel_size=(5, 5), strides=(1, 1), padding="valid", activation="relu")(x)
x = MaxPool2D(pool_size=(2,2))(x)
x = BatchNormalization()(x)
x = Flatten()(x)
x = Dense(units=512, activation="relu")(x)
outputs = Dense(units=3, activation="softmax")(x)
# Building Model
model = Model(inputs=inputs, outputs=outputs)
model.summary()
tf.keras.utils.plot_model(model, "model.png", show_shapes=True, show_dtype=True, dpi=300, expand_nested=True,
show_layer_activations=True)
return model
# + colab={"base_uri": "https://localhost:8080/"} id="VdNmzB76EXqa" outputId="fed2bcec-1c5b-4f2f-926a-f4d0433e7c9c"
model = build_model()
# + [markdown] id="le75ic2uGt1Y"
# ### Model Compiling
# + id="1jduaZmiEa1n"
model.compile(optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"])
# + [markdown] id="OM0W1XFMHdfk"
# ### Callbacks
# + id="yHEYLYG2HcoT"
def callbacks():
earlystopping = EarlyStopping(monitor="val_loss", patience=5, mode="min", verbose=1, restore_best_weights=True)
checkpoint = ModelCheckpoint("model.hdf5", monitor='val_loss', verbose=1, save_best_only=True, mode='min', save_freq='epoch')
lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=1, mode='min', min_delta=0.0001, cooldown=0, min_lr=0)
return [earlystopping, checkpoint, lr]
# + [markdown] id="JW2MaEbqJTdD"
# ### Model Training
# + colab={"base_uri": "https://localhost:8080/"} id="3JtWfnqpJSvv" outputId="52a85042-9f45-498e-d685-b2279aeed362"
model.fit(X_train, y_train, epochs=30, verbose=1, validation_data=(X_test, y_test),
batch_size=32, shuffle=True, validation_batch_size=32, callbacks=callbacks())
# + [markdown] id="mc8fAeXdKwh6"
# ### Model Prediction
# + id="grY6qygFKClU"
def predict(fname):
classes = ["car", "cat", "dog"]
test = clean_data(fname)
lmodel = tf.keras.models.load_model("/content/model.hdf5")
pred = lmodel.predict(np.array([test, ]))
pred = np.argmax(pred, axis=-1)[0]
pred = classes[pred]
plt.title(f"Predicted Class is {pred}")
plt.imshow(test)
# + colab={"base_uri": "https://localhost:8080/", "height": 281} id="wyDVBcDYPrAx" outputId="86c96c6c-caeb-4ca0-9296-37d5631e7e45"
predict("/content/cats_dogs_cars/cats_dogs_cars/test/10.jpg")
# + id="cCbcadNyQimx"
| Cat_Dog_Prediction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Machine Learning API
# > ICDSS Machine Learning Workshop Series: Coding Models on `scikit-learn`, `keras` & `fbprophet`
# * [Pipeline](#pipeline)
# * [Preprocessing](#pipe:preprocessing)
# * [Estimation](#pipe:estimation)
# * [Supervised Learning](#pipe:supervised-learning)
# * [Unsupervised Learning](#pipe:unsupervised-learning)
# * [Evaluation](#pipe:evaluation)
# * [`scikit-learn`](#scikit-learn)
# * [Preprocessing](#sk:preprocessing)
# * [Estimation](#sk:estimation)
# * [Model Selection](#sk:model-selection)
# * [Hyperparameters](#sk:hyperparameters)
# * [`GridSearchCV`](#sk:GridSearchCV)
# * [Pipeline](#sk:pipeline)
# * [`keras`](#keras)
# * [Dense](#keras:dense)
# * [Iris](#keras:iris)
# * [CNN](#keras:cnn)
# * [Fashion MNIST](#keras:fashion-mnist)
# * [LSTM](#keras:lstm)
# * [President Trump Generator](#keras:president-trump-generator)
# * [`fbprophet`](#fbprophet)
# * [Bitcoin Capital Market](#fbprophet:bitcoin-capital-market)
# ## Pipeline <a class="anchor" id="pipeline"></a>
# <img src="assets/Pipeline.png" alt="Drawing" style="width: 750px;"/>
# +
# %reset -f
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('seaborn-muted')
from sklearn import datasets
# -
# ### Preprocessing <a class="anchor" id="pipe:preprocessing"></a>
class Preprocessor:
"""Generic `preprocessing` transformer."""
def __init__(self, **hyperparameters):
"""Initialise Hyperparameters"""
raise NotImplementedError
def fit(self, X_train):
"""Set state of `preprocessor`."""
raise NotImplementedError
def transform(self, X):
"""Apply transformation."""
raise NotImplementedError
def fit_transform(self, X):
"""Reset state and apply transformer."""
self.fit(X)
return self.transform(X)
# #### Principle Component Analysis (PCA)
class PCA:
"""Principle Component Analysis."""
def __init__(self, n_components=2):
"""Contructor.
Parameters
----------
n_comps: int
Number of principle components
"""
self.n_comps = n_components
self.mu = None
self.U = None
self._fitted = False
def fit(self, X):
"""Fit PCA according to `X.cov()`.
Parameters
----------
X: numpy.ndarray
Features matrix
Returns
-------
array: numpy.ndarray
Transformed features matrix
"""
self.D, N = X.shape
self.mu = X.mean(axis=1).reshape(-1, 1)
# center data
A = X - self.mu
# covariance matrix
S = (1 / N) * np.dot(A.T, A)
# eigendecomposition
_l, _v = np.linalg.eig(S)
_l = np.real(_l)
_v = np.real(_v)
# short eigenvalues
_indexes = np.argsort(_l)[::-1]
# sorted eigenvalues and eigenvectors
l, v = _l[_indexes], _v[:, _indexes]
# eigenvalues
V = v[:, :self.n_comps]
# unnormalised transformer
_U = np.dot(A, V)
# transformation matrix
self.U = _U / np.apply_along_axis(np.linalg.norm, 0, _U)
# unnormalised transformed features
W = np.dot(self.U.T, A)
# data statistics
self.W_mu = np.mean(W, axis=1)
self.W_std = np.std(W, axis=1)
self._fitted = True
return self
def transform(self, X):
"""Transform `X` by projecting it to PCA feature space.
Parameters
----------
X: numpy.ndarray
Features matrix
Returns
-------
array: numpy.ndarray
Transformed features matrix
"""
if not self._fitted:
raise AssertionError('Not fitted yet.')
# centered data
Phi = X - self.mu
# unnormalised transformed features
W = np.dot(self.U.T, Phi)
return ((W.T - self.W_mu) / self.W_std).T
def fit_transform(self, X):
"""Fit PCA according to `X.cov()`
and then transform `X` by
projecting it to PCA feature space.
Parameters
----------
X: numpy.ndarray
Features matrix
Returns
-------
array: numpy.ndarray
Transformed features matrix
"""
self.fit(X)
return self.transform(X)
# +
# preprocessor
pca = PCA(n_components=2)
# fetch data
iris = datasets.load_iris()
# supervised-learning data
X, y = iris.data, iris.target
# transformed data
X_transform = pca.fit_transform(X.T)
_, ax = plt.subplots(figsize=(20.0, 3.0))
for i in range(iris.target.max() + 1):
I = iris.target == i
ax.scatter(X_transform[0, I], X_transform[1, I], label=iris.target_names[i])
ax.set_title('Iris Dataset PCA')
ax.set_xlabel('Principle Component 1')
ax.set_ylabel('Principle Component 2')
ax.legend();
# -
# #### `challenge` QuadraticFeatures
#
# Code the transformation $\mathcal{G}$ such that:
#
# $$\mathcal{G}: \mathbb{R}^{2} \rightarrow \mathbb{R}^{6}$$
#
# according to the mapping:
#
# $$\begin{bmatrix}x_{1} & x_{2}\end{bmatrix} \mapsto \begin{bmatrix}1 & x_{1} & x_{2} & x_{1}x_{2} & x_{1}^{2} & x_{2}^{2}\end{bmatrix}$$
class QuadraticFeatures:
"""Generate Quadratic features."""
def fit(self, X_train):
"""Set state of `preprocessor`."""
pass
return self
def transform(self, X):
"""Apply transformation."""
# get dimensions
N, D = X.shape
# check number of input features
assert(D==2)
# get x_{1} column
x_1 = X[:, 0]
# get x_{2} column
x_2 = X[:, 1]
# initialise output matrix
out = np.empty(shape=(N, 6))
# column 1: constant
out[:, 0] =
# column 2: x_{1}
out[:, 1] =
# column 3: x_{2}
out[:, 2] =
# column 4: x_{1}x_{2}
out[:, 3] =
# column 5: x_{1}^{2}
out[:, 4] =
# column 6: x_{2}^{2}
out[:, 5] =
return out
def fit_transform(self, X):
"""Reset state and apply transformer."""
self.fit(X)
return self.transform(X)
# ##### Unit Test for `QuadraticFeatures`
# unit test function
assert (QuadraticFeatures().fit_transform(np.array([[1, 2],
[3, 4]])) == np.array([[ 1., 1., 2., 2., 1., 4.],
[ 1., 3., 4., 12., 9., 16.]])).all(), "Wrong implementation, try again!"
'Well Done!'
# ### Estimation <a class="anchor" id="pipe:estimation"></a>
class Estimator:
"""Generic `estimator` class."""
def __init__(self, **hyperparameters):
"""Initialise Hyperparameters"""
raise NotImplementedError
def fit(self, X, y):
"""Train model."""
return self
def predict(self, X):
"""Forward/Inference pass."""
return y_hat
def score(self, X, y):
"""Performance results."""
y_hat = self.predict(X)
return self._loss(y, y_hat)
def _loss(self, y, y_hat):
"""Objective function for scoring."""
return L(y, y_hat)
# #### `challenge` Linear Regression
#
# Code the estimator $\mathcal{F}$ such that:
#
# $$\mathbf{y} = \mathbf{X} * \mathbf{w}_{MLE}$$
#
# for the Maximum Likelihood estimation weights parameters:
#
# $$\mathbf{w}_{MLE} = (\mathbf{X}^{T} \mathbf{X})^{-1} * \mathbf{X}^{T} * \mathbf{y}$$
class LinearRegression:
"""Linear Regression `estimator` class."""
def __init__(self, dimensionality):
"""Initialise Hyperparameters"""
self.dimensionality =
self.w_mle =
def fit(self, X, y):
"""Train model."""
self.w_mle =
return self
def predict(self, X):
"""Forward/Inference pass."""
y_hat =
return y_hat
def score(self, X, y):
"""Performance results."""
y_hat = self.predict(X)
return self._loss(y, y_hat)
def _loss(self, y, y_hat):
"""Objective function for scoring."""
return ((y - y_hat) ** 2).mean(axis=None)
# ##### Unit Test for `LinearRegression`
# +
# dummy data
X, y = np.array([[1., 2.], [1., 3.], [1., 7.]]), np.array([2., 3., 7.])
# input dimensions
N, D = X.shape
# estimator: init & fit
lr = LinearRegression(dimensionality=D).fit(X, y)
# unit test function
assert np.isclose(lr.predict(X), y).all(), 'Wrong implementation, try again!'
'Well Done!'
# -
# ## `scikit-learn` <a class="anchor" id="scikit-learn"></a>
# ### Preprocessing <a class="anchor" id="sk:preprocessing"></a>
# +
# %reset -f
import numpy as np
import pandas as pd
import sklearn
import sklearn.ensemble
import sklearn.neural_network
import sklearn.decomposition
import sklearn.pipeline
from sklearn import datasets
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use('seaborn-muted')
import warnings
warnings.filterwarnings('ignore')
np.random.seed(0)
# -
# #### Principle Component Analysis <a class="anchor" id="sk:pca"></a>
# +
# preprocessor
pca = sklearn.decomposition.PCA(n_components=2)
# fetch data
iris = datasets.load_iris()
# supervised-learning data
X, y = iris.data, iris.target
# transformed data
X_transform = pca.fit_transform(X)
_, ax = plt.subplots(figsize=(20.0, 3.0))
for i in range(iris.target.max() + 1):
I = iris.target == i
ax.scatter(X_transform[I, 0], X_transform[I, 1], label=iris.target_names[i])
ax.set_title('Iris Dataset PCA')
ax.set_xlabel('Principle Component 1')
ax.set_ylabel('Principle Component 2')
ax.legend();
# -
# ### Estimation <a class="anchor" id="sk:estimation"></a>
# <img src="assets/ml_map.png" alt="Drawing" style="width: 750px;"/>
# ### Problem Statement
#
# Let's try to model a continous function:
#
# $$f(x) = x^{3} - 0.4x^{2} - x + 0.3 + \epsilon, \quad x \in [-1, 1] \text{ and } \epsilon \sim \mathcal{N}(0, 0.05)$$
# fetch data
x = np.linspace(-1, 1, 500)
# targets
y = (x**3 - 0.4*x**2 - x + 0.3) + np.random.normal(0, 0.01, len(x))
# features matrix
X = x.reshape(-1, 1)
# #### API
# +
# initialize figure
_, ax = plt.subplots(figsize=(20.0, 6.0))
# true data
ax.plot(x, y, label='Observations', lw=2)
ax.fill_between(x, y-0.025, y+0.025)
## (1) INIT - set hyperparameters
estimators = [('Gradient Boosting Tree Regressor', sklearn.ensemble.GradientBoostingRegressor(n_estimators=25)),
('Support Vector Machine Regressor', sklearn.svm.SVR(C=1.0, kernel='rbf')),
('Ridge Regressor', sklearn.linear_model.Ridge(alpha=0.5)),
('K-Nearest Neighbors Regressor', sklearn.neighbors.KNeighborsRegressor(n_neighbors=3)),
('Multi-Layer Perceptron Regressor', sklearn.neural_network.MLPRegressor(hidden_layer_sizes=(15,),
activation='relu'))
]
for name, model in estimators:
## (2) FIT - train model
model.fit(X, y)
## (3) PREDICT - make predictions
y_hat = model.predict(X)
## (4) SCORE - evaluate model
score = model.score(X, y)
print('[Score] %s: %.3f' % (name, score))
# figure settings
ax.plot(x, y_hat, label=name, lw=3)
ax.set_title('Estimators Comparison Matrix')
ax.legend();
# -
# #### Support Vector Machine Regressor
# ##### Linear Features
#
# $f(x)$ is a highly non-linear function (cubic) and therefore it cannot be adequately modelled by a linear estimator,
# nonetheless we will fit a `Linear SVM Regressor` and ascess its performance (visually).
# +
# initialize figure
_, ax = plt.subplots(figsize=(20.0, 6.0))
# true data
ax.plot(x, y, lw=3, label='Observations')
## (1) INIT
svr = sklearn.svm.SVR(C=1.0, kernel='linear')
## (2) FIT
svr.fit(X, y)
## (3) PREDICT
y_hat = svr.predict(X)
## (4) SCORE
score = svr.score(X, y)
print('[Score] %s: %.3f' % ('Linear SVM', score))
# figure settings
ax.plot(x, y_hat, label='Model')
ax.set_title('Linear SVM Regressor')
ax.legend();
# -
# ##### Cubic Features
# +
# preprocessor
poly = sklearn.preprocessing.PolynomialFeatures(degree=3)
# generate cubic features
X_transform = poly.fit_transform(X)
##### REPEAT THE SAME, X -> X_transform #####
# initialize figure
_, ax = plt.subplots(figsize=(20.0, 6.0))
# true data
ax.plot(x, y, lw=3, label='Observations')
## (1) INIT
svr = sklearn.svm.SVR(C=1.0, kernel='linear')
## (2) FIT
svr.fit(X_transform, y) # X -> X_transform #
## (3) PREDICT
y_hat = svr.predict(X_transform)
## (4) SCORE
score = svr.score(X_transform, y)
print('[Score] %s: %.3f' % ('Linear SVM with Cubic Features', score))
# figure settings
ax.plot(x, y_hat, label='Model')
ax.set_title('Linear SVM Regressor with Cubic Features')
ax.legend();
# -
# #### `challenge` AdaBoost Regressor
#
# Model the continuous function using the Boosting Regressor `AdaBoostRegressor`:
#
# $$g(x) = -0.3x^{7} + 7x^{2} + \epsilon, \quad x \in [-0.5, 1] \text{ and } \epsilon \sim \mathcal{N}(0, 0.5)$$
# fetch data
x_a = np.linspace(-0.5, 1, 500)
# targets
y_a = (-0.3*x_a**7 + 7*x_a**2) + np.random.normal(0, 0.5, len(x_a))
# features matrix
X_a = x_a.reshape(-1, 1)
# +
# preprocessor
poly = sklearn.preprocessing.PolynomialFeatures(degree=7)
# generate cubic features
X_transform_a = poly.fit_transform(X_a)
# initialize figure
_, ax = plt.subplots(figsize=(20.0, 6.0))
# true data
ax.plot(x_a, y_a, label='Observations')
## (1) INIT
ada =
## (2) FIT
## (3) PREDICT
y_hat =
## (4) SCORE
score =
print('[Score] %s: %.3f' % ('AdaBoostRegressor with 7th Order Polynomial Features', score))
# figure settings
ax.plot(x_a, y_hat, lw=3, label='Model')
ax.set_title('AdaBoostRegressor with 7th Order Polynomial Features')
ax.legend();
# -
# ##### Unit Test for `AdaBoostRegressor`
# unit test function
assert score > 0.9, 'Wrong implementation, low score!! Try again!'
'Well Done!'
# ### Model Selection <a class="anchor" id="sk:model-selection"></a>
# #### Hyperparameters <a class="anchor" id="sk:hyperparameters"></a>
# +
# initialize figure
_, ax = plt.subplots(figsize=(20.0, 6.0))
# true data
ax.plot(x, y, label='original', lw=3)
# parameters
n_estimators_params = [1, 2, 10, 25]
for n_estimators in n_estimators_params:
# initialize estimator
gbr = sklearn.ensemble.GradientBoostingRegressor(n_estimators=n_estimators).fit(X, y)
# prediction
y_hat = gbr.predict(X)
# figure settings
ax.plot(x, y_hat, label='n_estimators=%s' % n_estimators)
ax.set_title('Gradient Boosting Tree Regressor')
ax.legend();
# +
# preprocessor
poly = sklearn.preprocessing.PolynomialFeatures(degree=3)
# generate cubic features
X_transform = poly.fit_transform(X)
# initialize figure
_, ax = plt.subplots(figsize=(20.0, 6.0))
# true data
ax.plot(x, y, label='original', lw=3)
# parameters
c_params = [1.0, 0.001]
for c in c_params:
# initialize estimator
svr = sklearn.svm.SVR(C=c, kernel='linear').fit(X_transform, y)
# prediction
y_hat = svr.predict(X_transform)
# figure settings
ax.plot(x, y_hat, label='c=%s' % c)
ax.set_title('SVM Regressor')
ax.legend();
# -
# #### `GridSearchCV` <a class="anchor" id="sk:GridSearchCV"></a>
# +
# fetch data
digits = datasets.load_digits()
X_train_raw, X_test_raw, y_train, y_test = sklearn.model_selection.train_test_split(digits.data,
digits.target,
test_size=0.25)
# use PCA to reduce input dimensionality
pca = sklearn.decomposition.PCA(n_components=10)
X_train = pca.fit_transform(X_train_raw)
X_test = pca.transform(X_test_raw)
# estimator hyperparameters grid
param_grid = {'n_estimators': [1, 5, 10, 25, 50, 75, 100, 200],
'max_depth': [5, 7, 11, 15, 20, 45]
}
## (1) INIT - set hyperparameters **RANGES**, not single values
search = sklearn.model_selection.GridSearchCV(sklearn.ensemble.RandomForestClassifier(), param_grid)
## (2) FIT
search.fit(X_train, y_train)
## (**) REPORT - cross validation results
results = pd.DataFrame(search.cv_results_).set_index(['param_' + key for key in param_grid.keys()])
mean_test_score = results['mean_test_score'].unstack(0)
# figure settings
_, ax = plt.subplots(figsize=(20.0, 6.0))
sns.heatmap(mean_test_score, annot=True, cmap=plt.cm.Reds, ax=ax)
ax.set_title('Cross-Validation Accuracy')
## (**) SELECT - pick the best model
model = search.best_estimator_
print('[Select] Best parameters: %s' % search.best_params_)
## (3) PREDICT
y_hat = model.predict(X_test)
## (4) SCORE
score = model.score(X_test, y_test)
print('[Score] %s: %.3f' % ('Best Random Forest Classifier', score))
# confusion matrix
cm = sklearn.metrics.confusion_matrix(y_test, y_hat)
# figure settings
_, ax = plt.subplots(figsize=(20.0, 6.0))
sns.heatmap(cm, ax=ax, annot=True, cmap=plt.cm.Blues)
ax.set_title('Random Forest Classifier')
ax.set_xlabel('Predicted Label')
ax.set_ylabel('True Label');
# -
# ### Pipeline <a class="anchor" id="sk:pipeline"></a>
# `sklearn.pipeline.Pipeline` is a container that put all the pieces:
#
# 1. Preprocessing
# 1. Estimation
# 1. Model Selection
#
# together, using the common `fit`-`predict`-`score` API
# +
# fetch data
digits = datasets.load_digits()
X_train, X_test, y_train, y_test = sklearn.model_selection.train_test_split(digits.data,
digits.target,
test_size=0.25)
# data flow
steps = [('pca', sklearn.decomposition.PCA(n_components=2)),
('rf', sklearn.ensemble.RandomForestClassifier())]
# estimator hyperparameters grid
# prepend the name of the step that the parameter corresponds to
# (i.e 'pca' or 'rf') followed by two underscores '__'
param_grid = {'pca__n_components': [5, 10, 20, 35, 45, 64],
'rf__n_estimators': [1, 25, 50, 75, 100, 200],
'rf__max_depth': [5, 11, 15, 20, 45]
}
## (1) INIT
# pipeline
pipe = sklearn.pipeline.Pipeline(steps=steps)
# grid-search
search = sklearn.model_selection.GridSearchCV(pipe, param_grid)
## (2) FIT
search.fit(X_train, y_train)
## (**) SELECT - pick the best model
model = search.best_estimator_
print('[Select] Best parameters: %s' % search.best_params_)
## (3) PREDICT
y_hat = model.predict(X_test)
## (4) SCORE
score = model.score(X_test, y_test)
print('[Score] %s: %.3f' % ('Best Random Forest Classifier', score))
# confusion matrix
cm = sklearn.metrics.confusion_matrix(y_test, y_hat)
# figure settings
_, ax = plt.subplots(figsize=(20.0, 6.0))
sns.heatmap(cm, ax=ax, annot=True, cmap=plt.cm.Blues)
ax.set_title('Pipeline of PCA and Random Forest Classifier')
ax.set_xlabel('Predicted Label')
ax.set_ylabel('True Label');
# -
# #### `challenge` Naive [`tpot`](https://github.com/EpistasisLab/tpot) Framework
#
# Code a `pipeline` that optimizes both the estimator type and its hyperparameters on the raw digits dataset.
# ## `keras` <a class="anchor" id="keras"></a>
# ### Iris Dataset <a class="anchor" id="keras:iris"></a>
# +
# %reset -f
from keras.models import Model
from keras.layers import Input
from keras.layers import Dense
from keras.layers import Activation
from keras.optimizers import RMSprop
from keras.utils import to_categorical
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
# -
# #### Data & Preprocessing
# +
# fetch data
iris = datasets.load_iris()
# split to train/test datasets
X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target)
# one-hot encode categorical output
y_train_one_hot = to_categorical(y_train)
y_test_one_hot = to_categorical(y_test)
# matrix shapes
N_train, D = X_train.shape
N_test, _ = X_test.shape
_, M = y_train_one_hot.shape
# -
# #### Feedforward Neural Network
# +
# input layer
X = Input(shape=(D,), name="X")
# Convolution Layer
A1 = Dense(16, name="A1")(X)
# Non-Linearity
Z1 = Activation("relu", name="Z1")(A1)
# Affine Layer
A2 = Dense(M, name="A2")(Z1)
# Multi-Class Classification
Y = Activation("softmax", name="Y")(A2)
# Define Graph
model = Model(inputs=X, outputs=Y)
# Compile Graph
model.compile(optimizer=RMSprop(lr=0.04),
loss='categorical_crossentropy',
metrics=['accuracy'])
# Computational Graph Summary
model.summary()
# -
# model training
history = model.fit(X_train, y_train_one_hot, epochs=100, validation_split=0.25, verbose=0)
# #### Evaluation
# +
y_hat_one_hot = model.predict(X_test)
# one-hot-encoded to raw data
y_hat = np.argmax(y_hat_one_hot, axis=1)
# confusion matrix
cm = confusion_matrix(y_test, y_hat)
cm_norm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
_, ax = plt.subplots(figsize=(7.0, 6.0))
sns.heatmap(cm_norm, annot=True, cmap=plt.cm.Blues, ax=ax)
ax.set_xticklabels(iris.target_names, rotation=45)
ax.set_yticklabels(iris.target_names, rotation=45)
ax.set_title('Iris Dataset Confusion Matrix')
ax.set_xlabel('Predicted Class')
ax.set_ylabel('True Class');
# -
# ### Fashion MNIST <a class="anchor" id="keras:fashion-mnist"></a>
# +
# %reset -f
from keras.models import Model
from keras.layers import Input
from keras.layers import Dense
from keras.layers import Conv2D
from keras.layers import Dropout
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.optimizers import RMSprop
from keras.utils import to_categorical
from keras.datasets import fashion_mnist
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import confusion_matrix
np.random.seed(0)
# -
# #### Data & Preprocessing
# +
import pickle
# fetch data
(X_train_raw, y_train), (X_test_raw, y_test) = pickle.load(open('data/fashion-mnist/fashion-mnist.pkl', 'rb'))
## ORIGINAL IMPLEMNTATION
# (X_train_raw, y_train), (X_test_raw, y_test) = fashion_mnist.load_data()
# one-hot encode categorical output
y_train_one_hot = to_categorical(y_train)
y_test_one_hot = to_categorical(y_test)
# tensor shape
N_train, h, w = X_train_raw.shape
N_test, _, _ = X_test_raw.shape
_, M = y_train_one_hot.shape
# convert raw pixels to tensors
X_train = X_train_raw.reshape(-1, h, w, 1)
X_test = X_test_raw.reshape(-1, h, w, 1)
# -
# #### Convolutional Neural Network
# +
# input layer: shape=(height, width, number of channels)
X = Input(shape=(h, w, 1), name="X")
# Convolution Layer
CONV1 = Conv2D(filters=32, kernel_size=(3, 3), activation="relu", name="CONV1")(X)
# Max Pooling Layer
POOL1 = MaxPooling2D(pool_size=(2, 2), name="POOL1")(CONV1)
# Convolution Layer
CONV2 = Conv2D(filters=32, kernel_size=(3, 3), activation="relu", name="CONV2")(POOL1)
# Max Pooling Layer
POOL2 = MaxPooling2D(pool_size=(2, 2), name="POOL2")(CONV2)
# Convolution Layer
CONV3 = Conv2D(filters=64, kernel_size=(3, 3), activation="relu", name="CONV3")(POOL2)
# Max Pooling Layer
POOL3 = MaxPooling2D(pool_size=(2, 2), name="POOL3")(CONV3)
# Convert 3D feature map to 1D
FLAT = Flatten()(POOL3)
# Fully Connected Layer
FC1 = Dense(units=64, name="FC1")(FLAT)
# Dropout
DROP = Dropout(rate=0.5, name="DROP")(FC1)
# Multi-Class Classification Output Layer
Y = Dense(M, activation="softmax", name="Y")(DROP)
# Define Graph
model = Model(inputs=X, outputs=Y)
# Compile Graph
model.compile(optimizer=RMSprop(),
loss='categorical_crossentropy',
metrics=['accuracy'])
# Computational Graph Summary
model.summary()
# -
# model training
history = model.fit(X_train, y_train_one_hot, batch_size=128, epochs=3, validation_split=0.25)
# #### Evaluation
# +
y_hat_one_hot = model.predict(X_test)
# one-hot-encoded to raw data
y_hat = np.argmax(y_hat_one_hot, axis=1)
# confusion matrix
cm = confusion_matrix(y_test, y_hat)
cm_norm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
_, ax = plt.subplots(figsize=(20.0, 6.0))
sns.heatmap(cm_norm, annot=True, cmap=plt.cm.Greens, ax=ax)
ax.set_title('Fashion MNIST Dataset Confusion Matrix')
ax.set_xlabel('Predicted Class')
ax.set_ylabel('True Class');
# -
# ### President Trump Generator <a class="anchor" id="keras:president-trump-generator"></a>
# +
# %reset -f
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
from keras.datasets import fashion_mnist
import numpy as np
np.random.seed(0)
# -
# #### Data & Preprocessing
# +
# fetch data
with open("data/trump/speeches.txt") as corpus_file:
corpus = corpus_file.read()
print("Loaded a corpus of {0} characters".format(len(corpus)))
corpus_length = len(corpus)
# Get a unique identifier for each char in the corpus
# then make some dicts to ease encoding and decoding
chars = sorted(list(set(corpus)))
num_chars = len(chars)
encoding = {c: i for i, c in enumerate(chars)}
decoding = {i: c for i, c in enumerate(chars)}
print("Our corpus contains {0} unique characters.".format(num_chars))
# it slices, it dices, it makes julienned datasets!
# chop up our data into X and y, slice into roughly (num_chars / skip) overlapping 'sentences'
# of length sentence_length, and encode the chars
sentence_length = 50
skip = 1
X_data = []
y_data = []
for i in range (0, len(corpus) - sentence_length, skip):
sentence = corpus[i:i + sentence_length]
next_char = corpus[i + sentence_length]
X_data.append([encoding[char] for char in sentence])
y_data.append(encoding[next_char])
num_sentences = len(X_data)
print("Sliced our corpus into {0} sentences of length {1}".format(num_sentences, sentence_length))
# Vectorize our data and labels. We want everything in one-hot
# because smart data encoding cultivates phronesis and virtue.
print("Vectorizing X and y...")
X = np.zeros((num_sentences, sentence_length, num_chars), dtype=np.bool)
y = np.zeros((num_sentences, num_chars), dtype=np.bool)
for i, sentence in enumerate(X_data):
for t, encoded_char in enumerate(sentence):
X[i, t, encoded_char] = 1
y[i, y_data[i]] = 1
# Double check our vectorized data before we sink hours into fitting a model
print("Sanity check y. Dimension: {0} # Sentences: {1} Characters in corpus: {2}".format(y.shape, num_sentences, len(chars)))
print("Sanity check X. Dimension: {0} Sentence length: {1}".format(X.shape, sentence_length))
# -
# #### Recurrent Neural Network
# +
model = Sequential()
model.add(LSTM(units=256, input_shape=(sentence_length, num_chars), name="LSTM"))
model.add(Dense(units=num_chars, activation="softmax", name="Y"))
# Compile Graph
model.compile(optimizer=Adam(),
loss='categorical_crossentropy')
# Computational Graph Summary
model.summary()
# -
# remove this line to train the model again
if None:
# Dump our model architecture to a file so we can load it elsewhere
architecture = model.to_yaml()
with open('models/trump/architecture.yaml', 'a') as model_file:
model_file.write(architecture)
# Set up checkpoints
file_path="models/trump/weights-{epoch:02d}-{loss:.3f}.hdf5"
checkpoint = ModelCheckpoint(file_path, monitor="loss", verbose=1, save_best_only=True, mode="min")
callbacks = [checkpoint]
# model training
history = model.fit(X, y, epochs=30, batch_size=256, callbacks=callbacks)
else:
# load weights from checkpoint
model.load_weights("models/trump/weights-03-2.152.hdf5")
# #### Helper Functions
# +
def generate(seed_pattern):
X = np.zeros((1, sentence_length, num_chars), dtype=np.bool)
for i, character in enumerate(seed_pattern):
X[0, i, encoding[character]] = 1
generated_text = ""
for i in range(10):
prediction = np.argmax(model.predict(X, verbose=0))
generated_text += decoding[prediction]
activations = np.zeros((1, 1, num_chars), dtype=np.bool)
activations[0, 0, prediction] = 1
X = np.concatenate((X[:, 1:, :], activations), axis=1)
return generated_text
def make_seed(seed_phrase=""):
if seed_phrase:
phrase_length = len(seed_phrase)
pattern = ""
for i in range (0, sentence_length):
pattern += seed_phrase[i % phrase_length]
else:
seed = randint(0, corpus_length - sentence_length)
pattern = corpus[seed:seed + sentence_length]
return pattern
# -
# #### Evaluation
# seed letter
seed = "b"
# prediction
seed + generate(make_seed(seed))
# ## `fbprophet` <a class="anchor" id="fbprophet"></a>
# +
# %reset -f
from fbprophet import Prophet
import pandas_datareader.data as web
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
np.random.seed(0)
# -
# #### Data & Preprocessing
# +
import pickle
# fetch data
btc = pickle.load(open('data/bitcoin/btc.pkl', 'rb'))
## ORIGINAL IMPLEMNTATION
# btc = web.DataReader("BCHARTS/KRAKENUSD", data_source="quandl").dropna()
# time-series DataFrame
df = pd.DataFrame(columns=["ds", "y"])
df["ds"] = btc.index
# market capital column
df["y"] = (btc["Close"] * btc["VolumeBTC"]).values
df.head()
# -
# #### MAP Optimization and Hamiltonian Monte Carlo Inference
# +
# define model
model = Prophet(daily_seasonality=True)
# train model
model.fit(df);
# -
# #### Evaluation
# +
future = model.make_future_dataframe(periods=365)
forecast = model.predict(future)
# generate plots
model.plot(forecast);
model.plot_components(forecast);
# -
# ## Disclaimer
#
# Presentations are intended for educational purposes only and do not replace independent professional judgment.
# Statements of fact and opinions expressed are those of the participants individually and,
# unless expressly stated to the contrary, are not the opinion or position of the ICDSS, its cosponsors, or its committees.
# The ICDSS does not endorse or approve, and assumes no responsibility for, the content,
# accuracy or completeness of the information presented.
# Attendees should note that sessions are video-recorded and may be published in various media, including print,
# audio and video formats without further notice.
| notebooks/Demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5"
import numpy as np # linear algebra
import pandas as pd # data processing
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_absolute_error,SCORERS
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import LabelEncoder
# Models
from sklearn.ensemble import RandomForestRegressor
import statsmodels.api as sm
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge
from xgboost import XGBRegressor
import xgboost as xgb
#To export the model
import pickle
# !ls
# -
np.random.seed(30)
# + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a"
df = pd.read_csv('/kaggle/input/nyc-rolling-data.csv')
# -
df.head()
df.columns
# # Data Cleaning/Validation
# +
# change building class to type int
df['BUILDING CLASS CATEGORY']=df['BUILDING CLASS CATEGORY'].str[0:2]
df['BUILDING CLASS CATEGORY']=df['BUILDING CLASS CATEGORY'].astype(int)
#drop easement 100% Nan ,Apartment Number 70% Nan and Address is too specific probably wont generalize
df.drop(columns=['EASE-MENT','APARTMENT NUMBER','ADDRESS'], inplace=True)
# Drop properties that were transfered without cash consideration
df=df[df['SALE PRICE']!=0]
# Deal with NaN
df.fillna(-1,inplace=True)
# -
df.head()
df.info()
# # Random Forrest Regressor
print(df.columns)
cat_names=['SALE DATE','BUILDING CLASS AT TIME OF SALE','ZIP CODE','BUILDING CLASS AT PRESENT','NEIGHBORHOOD']
cont_names=['TAX CLASS AT TIME OF SALE','YEAR BUILT','GROSS SQUARE FEET','LAND SQUARE FEET',
'RESIDENTIAL UNITS', 'COMMERCIAL UNITS', 'TOTAL UNITS','BLOCK', 'LOT','TAX CLASS AT PRESENT','BOROUGH', 'BUILDING CLASS CATEGORY']
target='SALE PRICE'
# ## Baseline
X_train ,X_test, y_train, y_test = train_test_split(df.drop(columns=['SALE PRICE','SALE DATE','NEIGHBORHOOD','TAX CLASS AT PRESENT','BUILDING CLASS AT PRESENT',
'BUILDING CLASS AT TIME OF SALE']),df['SALE PRICE'],test_size=0.2)
model_rf = RandomForestRegressor(random_state=0,n_jobs=4)
model_rf.fit(X_train,y_train)
predicted = model_rf.predict(X_test)
mean_absolute_error(y_test,predicted)
# +
#Plotting feature importance of the numeric features
importances = model_rf.feature_importances_
std = np.std([tree.feature_importances_ for tree in model_rf.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(X_train.shape[1]):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Plot the impurity-based feature importances of the forest
plt.figure(figsize=(15,7))
plt.title("Feature importance")
plt.bar(range(X_train.shape[1]), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(X_train.shape[1]), indices)
plt.xlim([-1, X_train.shape[1]])
plt.xlabel('Feature')
plt.ylabel('Importance')
plt.show()
plt.savefig('NumericalFeatures.png')
# -
#
# |Feature ranking|Feature number|Feature name|
# |--------------|--------------|------------|
# |1 |2 |Block |
# |2 |9 |Gross square feet|
# |3 |3 |Lot |
# |4 |4 |Zipcode |
# |5 |8 |Land square feet|
# |6 |10 |Year built|
# |7 |6 |Commerical units|
# |8 |7 |Total units|
# |9 |1 |Building Class Category|
# |10 |0 |Borough|
# |11 |11 |Tax class at time of sale|
# |12 |5 |Residential units|
# # Label Encoding
# +
# label encoder for each cat variable so 5
neighborhood_encoder=LabelEncoder()
tax_class_present_encoder=LabelEncoder()
building_class_encoder=LabelEncoder()
building_class_at_sale_encoder=LabelEncoder()
sale_date_encode=LabelEncoder()
df['NEIGHBORHOOD'] = neighborhood_encoder.fit_transform(df['NEIGHBORHOOD'])
df['BUILDING CLASS AT TIME OF SALE']=building_class_at_sale_encoder.fit_transform(df['BUILDING CLASS AT TIME OF SALE'])
df['TAX CLASS AT PRESENT']=tax_class_present_encoder.fit_transform(df['TAX CLASS AT PRESENT'].astype(str))
df['BUILDING CLASS AT PRESENT']=building_class_encoder.fit_transform(df['BUILDING CLASS AT PRESENT'].astype(str))
df['SALE DATE']=sale_date_encode.fit_transform(df['SALE DATE'])
# +
X_train, X_test, y_train, y_test = train_test_split(df.drop(columns=['SALE PRICE',]), df['SALE PRICE'], test_size=0.2)
# -
model_rf = RandomForestRegressor(random_state=0,n_jobs=4)
model_rf.fit(X_train,y_train)
predicted = model_rf.predict(X_test)
mean_absolute_error(y_test,predicted)
# +
#Plotting feature importance of the numeric features
importances = model_rf.feature_importances_
std = np.std([tree.feature_importances_ for tree in model_rf.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(X_train.shape[1]):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Plot the impurity-based feature importances of the forest
plt.figure(figsize=(15,7))
plt.title("Feature importance")
plt.bar(range(X_train.shape[1]), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(X_train.shape[1]), indices)
plt.xlim([-1, X_train.shape[1]])
plt.xlabel('Feature')
plt.ylabel('Importance')
plt.show()
plt.savefig('AllFeatures.png')
# -
X_train.info()
# |Feature ranking|Feature number|Feature name|
# |--------------|--------------|------------|
# |1 |4 |Block |
# |2 |12 |Gross square feet|
# |3 |5 |Lot |
# |4 |7 |Zipcode |
# |5 |16 |Sale Date|
# |6 |11 |Land Square Feet|
# |7 |13 |Year Built|
# |8 |10 |Total units|
# |9 |1 |Neighborhood|
# |10 |2 |Building Class Category|
# |11 |9 |Commercial units|
# |12 |15 |Building class at time of sale|
# |13 |6 |Building class at present|
# |14 | 0|Borough|
# |15 |8|Residential Units|
# |16 |14|Tax Class At Time of Sale|
# |17 |3| Tax Class At Present|
#
#
#
# # Using cross validation
# +
model_rf = RandomForestRegressor(random_state=0,n_jobs=4)
#model_rf.fit(X_train,y_train)
#predicted = model_rf.predict(X_train)
#mean_absolute_error(y_train,predicted)
error=[]
split=[]
for cv in range(2,10):
split.append(cv)
error.append(np.mean(cross_val_score(model_rf,X_train,y_train,cv=cv,scoring='neg_mean_absolute_error')))
plt.plot(split,error)
# -
# ### Best cv at 7 folds gives roughly the same mae as the regular model
def smallest_error( list ):
max = list[ 0 ]
for a in list:
if a > max:
max = a
return max
smallest_error(error)
# # Ordinary least squares
x_sm = sm.add_constant(X_train)
model = sm.OLS(y_train,x_sm)
model.fit().summary()
# ### Model explains aorund 20% of the variation which is poor
# # Linear regressor
# ## Baseline
model_lr= LinearRegression(n_jobs=4)
model_lr.fit(X_train, y_train)
prediction = model_lr.predict(X_test)
mean_absolute_error(y_test,prediction)
# ### MAE of 16.94 billion
# +
## Cross Validation
# -
model_lr= LinearRegression(n_jobs=4)
np.mean(cross_val_score(model_lr,X_train,y_train,cv=4,scoring='neg_mean_absolute_error'))
# ### MAE of 5.47 billion
# # Lasso Regressor
# ## Baseline
model_lasso=Lasso()
model_lasso.fit(X_train, y_train)
prediction = model_lasso.predict(X_test)
mean_absolute_error(y_test,prediction)
# ## Cross validation
model_lasso=Lasso()
model_lasso.fit(X_train, y_train)
prediction = model_lasso.predict(X_test)
np.mean(cross_val_score(model_lasso,X_train,y_train,cv=7,scoring='neg_mean_absolute_error'))
# ## Cross validation while changing alpha
# +
model_lasso=Lasso()
alpha=[]
error =[]
for i in range(1,100, 25):
alpha.append(i/100)
model_las=Lasso(alpha=(i/100))
error.append(np.mean(cross_val_score(model_lasso,X_train,y_train,cv=3,scoring='neg_mean_absolute_error')))
plt.plot(alpha,error)
# +
### Alpha doesnt affect mean cross validation socre, perhaps the model is not the right choice
# -
# # Ridge Regressor
# ## Baseline
# +
model_rdg = Ridge(alpha=1, normalize=True)
model_rdg.fit(X_train, y_train)
prediction = model_rdg.predict(X_test)
mean_absolute_error(y_test,prediction)
# -
# ## Cross validation
# +
error=[]
alpha=[]
for i in range(1,200,25):
model_rdg = Ridge(alpha=(i/100), normalize=True)
alpha.append(i/100)
error.append(np.mean(cross_val_score(model_rdg,X_train,y_train,cv=4,scoring='neg_mean_absolute_error')))
plt.plot(alpha,error)
# -
# # XGBoost
# ## Baseline
model_xgb = XGBRegressor(n_jobs=4)
model_xgb.fit(X_train, y_train)
prediction = model_xgb.predict(X_test)
mean_absolute_error(y_test,prediction)
# ## Tuning
# ### Learning Rate
error = []
lr=[]
for x in range(1,10):
model_xgb = XGBRegressor(n_jobs=4,learning_rate=(x/100))
model_xgb.fit(X_train, y_train,
early_stopping_rounds=5,
eval_set=[(X_test, y_test)],
verbose=True)
prediction =model_xgb.predict(X_test)
lr.append(x/100)
error.append(mean_absolute_error(y_test,prediction))
plt.plot(lr,error)
# ### Rounds
# +
model_xgb = XGBRegressor(n_estimators=1000,n_jobs=4,learning_rate=0.04)
model_xgb.fit(X_train, y_train,
eval_set=[(X_test, y_test)],
verbose=False)
prediction =model_xgb.predict(X_test)
mean_absolute_error(y_test,prediction)
# +
# save best model to pickle
# -
pickl = {'model': model_rf}
pickle.dump( pickl, open( 'model_file' + ".p", "wb" ) )
| label-enc-nyc-rolling-sale-price-models.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] id="VhY3NirhQcm8"
# # Design of Experiments
# -
# ## Bibliographic Notes
#
# BV, Section 7.5
#
# https://www.kaggle.com/datasets/ashydv/advertising-dataset?select=advertising.csv
#
#
# https://www.kaggle.com/code/malik4real/optimization-of-facebook-ad-campaign/data
#
# PICOS
#
# + [markdown] id="RZgGR1UKQ2HT" tags=[]
# ## Problem
#
# An established firm selling services in a particular urban market has been relying on television, radio and newspaper advertising to promote sales. The company has been recording weekly sales and expenses to monitor the performance of their advertising strategy.
#
# New leadership at the firm has suggested expanding their advertising strategy to include ad-word buys on internet platforms. But without prior experience with internet advertising in that particular market, the current staff would like perform experiments to determine if the proposed change would be a cost-effective means of increasing sales.
#
# What experiments should the firm perform?
# -
# ## Data Set
# + id="xEWb5oJ5Qa4t"
import pandas as pd
df = pd.read_csv("advertising.csv")
ax = df.plot(y="Sales", x="TV", kind="scatter", color='r', alpha=0.3)
df.plot(y="Sales", x="Radio", kind="scatter", color='g', alpha=0.3, ax=ax)
df.plot(y="Sales", x="Newspaper", kind="scatter", color='b', alpha=0.3, ax=ax)
# -
df
| notebooks/05/design-of-experiments.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
## Here I am checking for how many datapoints prediction changes after switching the value of protected attribute (After Preprocessing)
import pandas as pd
import random,time,csv
import numpy as np
import math,copy,os
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
from sklearn import tree
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
import sklearn.metrics as metrics
import sys
sys.path.append(os.path.abspath('..'))
from Measure import measure_final_score,calculate_recall,calculate_far,calculate_precision,calculate_accuracy
# +
## Load dataset
from sklearn import preprocessing
dataset_orig = pd.read_csv('../Unbiased_Dataset/Heart_Health_Age.csv')
dataset_orig.columns
# +
## Divide into train,validation,test
# dataset_orig_train, dataset_orig_test = train_test_split(dataset_orig, test_size=0.2, random_state = 0, shuffle = True)
dataset_orig_train, dataset_orig_test = train_test_split(dataset_orig, test_size=0.2, shuffle = True)
X_train, y_train = dataset_orig_train.loc[:, dataset_orig_train.columns != 'Probability'], dataset_orig_train['Probability']
X_test , y_test = dataset_orig_test.loc[:, dataset_orig_test.columns != 'Probability'], dataset_orig_test['Probability']
# -
# Train LSR model
clf = LogisticRegression(C=1.0, penalty='l2', solver='liblinear', max_iter=100)
clf.fit(X_train, y_train)
# Create new test by switching the value of prottected attribute
same , not_same = 0,0
for index,row in dataset_orig_test.iterrows():
row_ = [row.values[0:len(row.values)-1]]
y_normal = clf.predict(row_)
# Here protected attribute value gets switched
if row_[0][0] == 0: ## index of Age is 0
row_[0][0] = 1
else:
row_[0][0] = 0
y_reverse = clf.predict(row_)
if y_normal[0] != y_reverse[0]:
not_same += 1
else:
same += 1
print(same , not_same)
| Verification/Heart_Health_After_Preprocessing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="gKk5OZEd3oi_"
# # **Springboard Decision Tree Specialty Coffee Case Study - Tier 2**
# + [markdown] colab_type="text" id="4Ee1UIwg3ojC"
# # The Scenario
#
# Imagine you've just finished the Springboard Data Science Career Track course, and have been hired by a rising popular specialty coffee company - RR Diner Coffee - as a data scientist. Congratulations!
#
# RR Diner Coffee sells two types of thing:
# - specialty coffee beans, in bulk (by the kilogram only)
# - coffee equipment and merchandise (grinders, brewing equipment, mugs, books, t-shirts).
#
# RR Diner Coffee has three stores, two in Europe and one in the USA. The flagshap store is in the USA, and everything is quality assessed there, before being shipped out. Customers further away from the USA flagship store have higher shipping charges.
#
# You've been taken on at RR Diner Coffee because the company are turning towards using data science and machine learning to systematically make decisions about which coffee farmers they should strike deals with.
#
# RR Diner Coffee typically buys coffee from farmers, processes it on site, brings it back to the USA, roasts it, packages it, markets it, and ships it (only in bulk, and after quality assurance) to customers internationally. These customers all own coffee shops in major cities like New York, Paris, London, Hong Kong, Tokyo, and Berlin.
#
# Now, RR Diner Coffee has a decision about whether to strike a deal with a legendary coffee farm (known as the **Hidden Farm**) in rural China: there are rumours their coffee tastes of lychee and dark chocolate, while also being as sweet as apple juice.
#
# It's a risky decision, as the deal will be expensive, and the coffee might not be bought by customers. The stakes are high: times are tough, stocks are low, farmers are reverting to old deals with the larger enterprises and the publicity of selling *Hidden Farm* coffee could save the RR Diner Coffee business.
#
# Your first job, then, is ***to build a decision tree to predict how many units of the Hidden Farm Chinese coffee will be purchased by RR Diner Coffee's most loyal customers.***
#
# To this end, you and your team have conducted a survey of 710 of the most loyal RR Diner Coffee customers, collecting data on the customers':
# - age
# - gender
# - salary
# - whether they have bought at least one RR Diner Coffee product online
# - their distance from the flagship store in the USA (standardized to a number between 0 and 11)
# - how much they spent on RR Diner Coffee products on the week of the survey
# - how much they spent on RR Diner Coffee products in the month preeding the survey
# - the number of RR Diner coffee bean shipments each customer has ordered over the preceding year.
#
# You also asked each customer participating in the survey whether they would buy the Hidden Farm coffee, and some (but not all) of the customers gave responses to that question.
#
# You sit back and think: if more than 70% of the interviewed customers are likely to buy the Hidden Farm coffee, you will strike the deal with the local Hidden Farm farmers and sell the coffee. Otherwise, you won't strike the deal and the Hidden Farm coffee will remain in legends only. There's some doubt in your mind about whether 70% is a reasonable threshold, but it'll do for the moment.
#
# To solve the problem, then, you will build a decision tree to implement a classification solution.
#
#
# -------------------------------
# As ever, this notebook is **tiered**, meaning you can elect that tier that is right for your confidence and skill level. There are 3 tiers, with tier 1 being the easiest and tier 3 being the hardest. This is ***tier 2***, so a moderate challenge.
#
# **1. Sourcing and loading**
# - Import packages
# - Load data
# - Explore the data
#
#
# **2. Cleaning, transforming and visualizing**
# - Cleaning the data
# - Train/test split
#
#
# **3. Modelling**
# - Model 1: Entropy model - no max_depth
# - Model 2: Gini impurity model - no max_depth
# - Model 3: Entropy model - max depth 3
# - Model 4: Gini impurity model - max depth 3
#
#
# **4. Evaluating and concluding**
# - How many customers will buy Hidden Farm coffee?
# - Decision
#
# **5. Random Forest**
# - Import necessary modules
# - Model
# - Revise conclusion
#
# + [markdown] colab_type="text" id="sAH5Xa-k3ojD"
# # 0. Overview
#
# This notebook uses decision trees to determine whether the factors of salary, gender, age, how much money the customer spent last week and during the preceding month on RR Diner Coffee products, how many kilogram coffee bags the customer bought over the last year, whether they have bought at least one RR Diner Coffee product online, and their distance from the flagship store in the USA, could predict whether customers would purchase the Hidden Farm coffee if a deal with its farmers were struck.
# + [markdown] colab_type="text" id="uhP1W8xj3ojE"
# # 1. Sourcing and loading
# ## 1a. Import Packages
# + colab={} colab_type="code" id="fCCdlo0S3ojF"
import pandas as pd
import numpy as np
from sklearn import tree, metrics
from sklearn.model_selection import train_test_split
import seaborn as sns
import matplotlib.pyplot as plt
from io import StringIO
from IPython.display import Image
import pydotplus
# + [markdown] colab_type="text" id="V63O9EtT3ojJ"
# ## 1b. Load data
# + colab={} colab_type="code" id="9CyUA3Sr3ojK"
# Read in the data to a variable called coffeeData
coffeeData = pd.read_csv("data/RRDinerCoffeeData.csv")
# + [markdown] colab_type="text" id="1-vIm7gZ3ojM"
# ## 1c. Explore the data
# + [markdown] colab_type="text" id="yXgbM-se3ojN"
# As we've seen, exploration entails doing things like checking out the **initial appearance** of the data with head(), the **dimensions** of our data with .shape, the **data types** of the variables with .info(), the **number of non-null values**, how much **memory** is being used to store the data, and finally the major summary statistcs capturing **central tendancy, dispersion and the null-excluding shape of the dataset's distribution**.
#
# How much of this can you do yourself by this point in the course? Have a real go.
# + colab={} colab_type="code" id="Sc2zr3cZ3ojO"
# Call head() on your data
coffeeData.head(8)
# + colab={} colab_type="code" id="s6OUaOUw3ojQ"
# Call .shape on your data
coffeeData.shape
# + colab={} colab_type="code" id="g0c2mJwy3ojU"
# Call info() on your data
coffeeData.info()
# + colab={} colab_type="code" id="HlWI9x003ojX"
# Call describe() on your data with the parameter include = 'all' to get the relevant summary statistics for your data
coffeeData.describe(include='all')
# + [markdown] colab_type="text" id="IMeTiYhx3ojZ"
# # 2. Cleaning, transforming and visualizing
# ## 2a. Cleaning the data
# + [markdown] colab_type="text" id="D0U5JiV73oja"
# Some datasets don't require any cleaning, but almost all do. This one does. We need to replace '1.0' and '0.0' in the 'Decision' column by 'YES' and 'NO' respectively, clean up the values of the 'gender' column, and change the column names to words which maximize meaning and clarity.
# + [markdown] colab_type="text" id="F13t5Pkt3ojb"
# First, let's change the name of `spent_week`, `spent_month`, and `SlrAY` to `spent_last_week` and `spent_last_month` and `salary` respectively.
# + colab={} colab_type="code" id="ztEMAbSB3ojb"
# Call .columns on your data to check out the names of our data's columns
coffeeData.columns
# + colab={} colab_type="code" id="H0Lte6Xp3oje"
# Using .rename(), make the relevant name changes to spent_week and spent_per_week.
# Remember: you can either do a reassignment, or use inplace=True. Both will change the value of coffeeData
coffeeData.rename(columns = {"spent_month":"spent_last_month", "spent_week":"spent_last_week", "SlrAY":"Salary"},
inplace = True)
# + colab={} colab_type="code" id="KQYCRzlL3ojg"
# Check out the column names
coffeeData.columns
# + colab={} colab_type="code" id="fA14FS_i3oji"
# Let's have a closer look at the gender column. Its values need cleaning.
# Call describe() on the gender column
coffeeData['Gender'].describe()
# + colab={} colab_type="code" id="phuJGhTy3ojk"
# Call unique() on the gender column to see its unique values
coffeeData['Gender'].unique()
# + [markdown] colab_type="text" id="ooRCYoRs3ojm"
# We can see a bunch of inconsistency here.
#
# Use replace() to make the values of the `gender` column just `Female` and `Male`.
# + colab={} colab_type="code" id="75aIRhq93ojn"
# Use the function .replace() on the column "gender"; replace all alternate values with 'Female'
coffeeData["Gender"] = coffeeData["Gender"].replace(["female", "f ", "FEMALE", "F"], "Female")
# + colab={} colab_type="code" id="jeBibzWZ3ojp"
# Let's check the unique values of the column "gender"
coffeeData['Gender'].unique()
# + colab={} colab_type="code" id="dUTN_CDP3ojr"
# Use the function .replace() on the column "gender"; replace all alternate values with "Male"
coffeeData["Gender"] = coffeeData["Gender"].replace(['Male', 'MALE', 'male', 'M'], "Male")
# + colab={} colab_type="code" id="xX6nK9133ojt"
# Let's check the unique values of the column "gender"
coffeeData['Gender'].unique()
# + colab={} colab_type="code" id="2j0P3MRw3ojv"
# Check out the unique values of the column 'Decision':
coffeeData['Decision'].unique()
# + [markdown] colab_type="text" id="rq9cFtM_3ojx"
# We now want to replace `1.0` and `0.0` in the `Decision` column by `YES` and `NO` respectively.
# + colab={} colab_type="code" id="f1TIzvLi3ojy"
# Call replace() on the Decision column to replace 'Yes' and 'No' by 1 and 0
coffeeData["Decision"] = coffeeData["Decision"].replace(1.0, "YES")
coffeeData["Decision"] = coffeeData["Decision"].replace(0.0, "NO")
coffeeData.info()
# + colab={} colab_type="code" id="M4OwWLxl3oj2"
# Check that our replacing those values with 'YES' and 'NO' worked, with unique()
coffeeData['Decision'].unique()
# + [markdown] colab_type="text" id="QN-9--sP3oj7"
# ## 2b. Train/test split
# To execute the train/test split properly, we need to do five things:
# 1. Drop all rows with a null value in the `Decision` column, and save the result as NOPrediction: a dataset that will contain all known values for the decision
# 2. Visualize the data using scatter and boxplots of several variables in the y-axis and the decision on the x-axis
# 3. Get the subset of coffeeData with null values in the `Decision` column, and save that subset as Prediction
# 4. Divide the NOPrediction subset into X and y, and then further divide those subsets into train and test subsets for X and y respectively
# 5. Create dummy variables to deal with categorical inputs
# + [markdown] colab_type="text" id="7KqIuw0u3oj8"
# ### 1. Drop all null values within the `Decision` column, and save the result as NoPrediction
# + colab={} colab_type="code" id="veB-qSlx3oj8"
# NoPrediction will contain all known values for the decision
# Call dropna() on coffeeData, and store the result in a variable NOPrediction
# Call describe() on the Decision column of NoPrediction after calling dropna() on coffeeData
NOPrediction = coffeeData.dropna()
NOPrediction["Decision"].describe()
# + [markdown] colab_type="text" id="uBV_Daz93oj-"
# ### 2. Visualize the data using scatter and boxplots of several variables in the y-axis and the decision on the x-axis
# + colab={} colab_type="code" id="bOtNleRB3oj_"
# Exploring our new NOPrediction dataset
# Call boxplot() on our Seaborn object sns, and plug y="spent_today", x= "Decision", data=NOPrediction
# Don't forget to call plt.show() after that
sns.boxplot(y="spent_last_week", x= "Decision", data=NOPrediction)
plt.show()
# + [markdown] colab_type="text" id="yBV33HAJ3okA"
# Can you admissibly conclude anything from this boxplot? Write your answer here:
#
#
# + colab={} colab_type="code" id="at0eOa1D3okB"
# Call scatterplot() on our Seaborn object sns, and plug in y="spent_last_month", x= "Distance", hue = "Decision", data =NOPrediction.
sns.scatterplot(y="spent_last_month", x= "Distance", hue = "Decision", data =NOPrediction)
plt.show()
# + [markdown] colab_type="text" id="DhqotDIT3okD"
# Can you admissibly conclude anything from this scatterplot? Remember: we are trying to build a tree to classify unseen examples. Write your answer here:
# + [markdown] colab_type="text" id="94eqkSR83okE"
# ### 3. Get the subset of coffeeData with null values in the Decision column, and save that subset as Prediction
# + colab={} colab_type="code" id="d9U83zuO3okF"
# Get just those rows whose value for the Decision column is null. There are lots of ways to do this.
# One way is to subset on pd.isnull(data['Decision']). Use square brackets, and plug that in as parameter.
# Store the result in a variable called Prediction
# Call a head() on the result to see it's worked out alright
Prediction = coffeeData[pd.isnull(coffeeData["Decision"])]
Prediction.head()
# + colab={} colab_type="code" id="6ROB3dbn3okH"
# Call describe() on Prediction
Prediction.describe()
# + [markdown] colab_type="text" id="X-rymzL93okI"
# ### 4. Divide the NOPrediction subset into X and y
# + colab={} colab_type="code" id="x8H3Agcc3okJ"
# First of all, let's check the names of the columns of NOPrediction
NOPrediction.columns
# + colab={} colab_type="code" id="A5efb_tg3okL"
# Let's do our feature selection.
# Make a variable called 'features', and a list containing the strings of every column except "Decision"; that is:
# ["age", "gender", "num_coffeeBags_per_year", "spent_last_week", "spent_last_month", "Salary", "Distance", "Online"]
features = ["Age", "Gender", "num_coffeeBags_per_year", "spent_last_week", "spent_last_month",
"Salary", "Distance", "Online"]
# Make an explanatory variable called X, and assign it: NoPrediction[features]
X = NOPrediction[features]
# Make a dependent variable called y, and assign it: NoPrediction.Decision
y = NOPrediction.Decision
# + [markdown] colab_type="text" id="_AhtxVje3okN"
# ### 4. Further divide those subsets into train and test subsets for X and y respectively: X_train, X_test, y_train, y_test
# + colab={} colab_type="code" id="E3el1RIy3okO"
# Call train_test_split on X, y, test_size = 0.25, and random_state = 246
# Make new variables called X_train, X_test, y_train, and y_test
X_train, X_test, y_train, y_test=train_test_split(X, y,
test_size = 0.25,
random_state = 246)
# + [markdown] colab_type="text" id="yHZsXNVd3okR"
# ### 5. Create dummy variables to deal with categorical inputs
# One-hot encoding replaces each unique value of a given column with a new column, and puts a 1 in the new column for a given row just if its initial value for the original column matches the new column. Check out [this resource](https://hackernoon.com/what-is-one-hot-encoding-why-and-when-do-you-have-to-use-it-e3c6186d008f) if you haven't seen one-hot-encoding before.
# + colab={} colab_type="code" id="NHeVzNYw3okR"
# One-hot encoding all features in training set.
# Call get_dummies() on our Pandas objet pd, and pass X_train to it. Reassign the result back to X_train.
X_train = pd.get_dummies(X_train)
# Do the same, but for X_test
X_test = pd.get_dummies(X_test)
# + [markdown] colab_type="text" id="NyBZa7Xi3okT"
# # 3. Modelling
# It's useful to look at the scikit-learn documentation on decision trees https://scikit-learn.org/stable/modules/tree.html before launching into applying them. If you haven't seen them before, take a look at that link, in particular the section `1.10.5.`
# + [markdown] colab_type="text" id="KD6n81o93okU"
# ## Model 1: Entropy model - no max_depth
#
# We'll give you a little more guidance here, as the Python is hard to deduce, and scikitlearn takes some getting used to.
#
# Theoretically, let's remind ourselves of what's going on with a decision tree implementing an entropy model.
#
# <NAME>'s **ID3 Algorithm** was one of the first, and one of the most basic, to use entropy as a metric.
#
# **Entropy** is a measure of how uncertain we are about which category the data-points fall into at a given point in the tree. The **Information gain** of a specific feature with a threshold (such as 'spent_last_month <= 138.0') is the difference in entropy that exists before and after splitting on that feature; i.e., the information we gain about the categories of the data-points by splitting on that feature and that threshold.
#
# Naturally, we want to minimize entropy and maximize information gain. Quinlan's ID3 algorithm is designed to output a tree such that the features at each node, starting from the root, and going all the way down to the leaves, have maximial information gain. We want a tree whose leaves have elements that are *homogeneous*, that is, all of the same category.
#
# The first model will be the hardest. Persevere and you'll reap the rewards: you can use almost exactly the same code for the other models.
# + colab={} colab_type="code" id="FZCPaHJ93okV"
# Declare a variable called entr_model, and assign it: tree.DecisionTreeClassifier(criterion="entropy", random_state = 1234)
entr_model = tree.DecisionTreeClassifier(criterion="entropy", random_state = 1234)
# Call fit() on entr_model, and pass in X_train and y_train, in that order
entr_model.fit(X_train, y_train)
# Call predict() on entr_model with X_test passed to it, and assign the result to a variable y_pred
y_pred = entr_model.predict(X_test)
# Assign y_pred the following: pd.Series(y_pred)
y_pred = pd.Series(y_pred)
# Check out entr_model
entr_model
# + colab={} colab_type="code" id="U_lIDQLi3okX"
# Now we want to visualize the tree
dot_data = StringIO()
# We can do so with export_graphviz
tree.export_graphviz(entr_model, out_file=dot_data,
filled=True, rounded=True,
special_characters=True, feature_names=X_train.columns,class_names = ["NO", "YES"])
# Alternatively for class_names use entr_model.classes_
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
Image(graph.create_png())
# + [markdown] colab_type="text" id="3noeN8O33okZ"
# ## Model 1: Entropy model - no max_depth: Interpretation and evaluation
# + colab={} colab_type="code" id="Ej2wToUD3okZ"
# Run this block for model evaluation metrics
print("Model Entropy - no max depth")
print("Accuracy:", metrics.accuracy_score(y_test,y_pred))
print("Balanced accuracy:", metrics.balanced_accuracy_score(y_test,y_pred))
print('Precision score for "Yes"' , metrics.precision_score(y_test,y_pred, pos_label = "YES"))
print('Precision score for "No"' , metrics.precision_score(y_test,y_pred, pos_label = "NO"))
print('Recall score for "Yes"' , metrics.recall_score(y_test,y_pred, pos_label = "YES"))
print('Recall score for "No"' , metrics.recall_score(y_test,y_pred, pos_label = "NO"))
# + [markdown] colab_type="text" id="lNAC7zQq3okb"
# What can you infer from these results? Write your conclusions here:
# + [markdown] colab_type="text" id="kK7AcA-h3okd"
# ## Model 2: Gini impurity model - no max_depth
#
# Gini impurity, like entropy, is a measure of how well a given feature (and threshold) splits the data into categories.
#
# Their equations are similar, but Gini impurity doesn't require logorathmic functions, which can be computationally expensive.
# + colab={} colab_type="code" id="Sf4GIAGZ3oke"
# Make a variable called gini_model, and assign it exactly what you assigned entr_model with above, but with the
# criterion changed to 'gini'
gini_model = tree.DecisionTreeClassifier(criterion = "gini", random_state = 1234)
# Call fit() on the gini_model as you did with the entr_model
gini_model.fit(X_train, y_train)
# Call predict() on the gini_model as you did with the entr_model
y_pred = gini_model.predict(X_test)
# Turn y_pred into a series, as before
y_pred = pd.Series(y_pred)
# Check out gini_model
gini_model
# + colab={} colab_type="code" id="jQiwN0_M3okf"
# As before, but make the model name gini_model
dot_data = StringIO()
tree.export_graphviz(gini_model, out_file=dot_data,
filled=True, rounded=True,
special_characters=True, feature_names=X_train.columns,class_names = ["NO", "YES"])
# Alternatively for class_names use gini_model.classes_
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
Image(graph.create_png())
# + colab={} colab_type="code" id="JTTncLkT3okh"
# Run this block for model evaluation
print("Model Gini impurity model")
print("Accuracy:", metrics.accuracy_score(y_test,y_pred))
print("Balanced accuracy:", metrics.balanced_accuracy_score(y_test,y_pred))
print('Precision score' , metrics.precision_score(y_test,y_pred, pos_label = "YES"))
print('Recall score' , metrics.recall_score(y_test,y_pred, pos_label = "NO"))
# + [markdown] colab_type="text" id="QUv_MQoT3okj"
# How do the results here compare to the previous model? Write your judgements here:
# + [markdown] colab_type="text" id="zMKqeTAH3okk"
# ## Model 3: Entropy model - max depth 3
# We're going to try to limit the depth of our decision tree, using entropy first.
#
# As you know, we need to strike a balance with tree depth.
#
# Insufficiently deep, and we're not giving the tree the opportunity to spot the right patterns in the training data.
#
# Excessively deep, and we're probably going to make a tree that overfits to the training data, at the cost of very high error on the (hitherto unseen) test data.
#
# Sophisticated data scientists use methods like random search with cross-validation to systematically find a good depth for their tree. We'll start with picking 3, and see how that goes.
# + colab={} colab_type="code" id="u9OVRL4V3okl"
# Make a model as before, but call it entr_model2, and make the max_depth parameter equal to 3.
# Execute the fitting, predicting, and Series operations as before
entr_model2 = tree.DecisionTreeClassifier(criterion="entropy", max_depth = 3, random_state = 1234)
entr_model2.fit(X_train, y_train)
y_pred = entr_model2.predict(X_test)
y_pred = pd.Series(y_pred)
entr_model2
# + colab={} colab_type="code" id="kU5pjxFE3okn"
# As before, we need to visualize the tree to grasp its nature
dot_data = StringIO()
tree.export_graphviz(entr_model2, out_file=dot_data,
filled=True, rounded=True,
special_characters=True, feature_names=X_train.columns,class_names = ["NO", "YES"])
# Alternatively for class_names use entr_model2.classes_
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
Image(graph.create_png())
# + colab={} colab_type="code" id="ii6y1OP23okp"
# Run this block for model evaluation
print("Model Entropy model max depth 3")
print("Accuracy:", metrics.accuracy_score(y_test,y_pred))
print("Balanced accuracy:", metrics.balanced_accuracy_score(y_test,y_pred))
print('Precision score for "Yes"' , metrics.precision_score(y_test,y_pred, pos_label = "YES"))
print('Recall score for "No"' , metrics.recall_score(y_test,y_pred, pos_label = "NO"))
# + [markdown] colab_type="text" id="AreKiZ3g3okv"
# So our accuracy decreased, but is this certainly an inferior tree to the max depth original tree we did with Model 1? Write your conclusions here:
# + [markdown] colab_type="text" id="wDkzNio53okw"
# ## Model 4: Gini impurity model - max depth 3
# We're now going to try the same with the Gini impurity model.
# + colab={} colab_type="code" id="21bkFzrG3okw"
# As before, make a variable, but call it gini_model2, and ensure the max_depth parameter is set to 3
gini_model2 = tree.DecisionTreeClassifier(criterion="entropy", max_depth = 3, random_state = 1234)
# Do the fit, predict, and series transformations as before.
gini_model2.fit(X_train, y_train)
y_pred = gini_model2.predict(X_test)
y_pred = pd.Series(y_pred)
gini_model2
# + colab={} colab_type="code" id="9NFpPds03oky"
dot_data = StringIO()
tree.export_graphviz(gini_model2, out_file=dot_data,
filled=True, rounded=True,
special_characters=True, feature_names=X_train.columns,class_names = ["NO", "YES"])
# Alternatively for class_names use gini_model2.classes_
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
Image(graph.create_png())
# + colab={} colab_type="code" id="5Fb1shzJ3ok0"
print("Gini impurity model - max depth 3")
print("Accuracy:", metrics.accuracy_score(y_test,y_pred))
print("Balanced accuracy:", metrics.balanced_accuracy_score(y_test,y_pred))
print('Precision score' , metrics.precision_score(y_test,y_pred, pos_label = "YES"))
print('Recall score' , metrics.recall_score(y_test,y_pred, pos_label = "NO"))
# + [markdown] colab_type="text" id="VfnIEBHY3ok2"
# Now this is an elegant tree. Its accuracy might not be the highest, but it's still the best model we've produced so far. Why is that? Write your answer here:
# + [markdown] colab_type="text" id="7voWsyRg3ok4"
# # 4. Evaluating and concluding
# ## 4a. How many customers will buy Hidden Farm coffee?
# Let's first ascertain how many loyal customers claimed, in the survey, that they will purchase the Hidden Farm coffee.
# + colab={} colab_type="code" id="T6DVKPRc3ok6"
# Call value_counts() on the 'Decision' column of the original coffeeData
coffeeData["Decision"].value_counts()
# + [markdown] colab_type="text" id="JtmW_Z_53ok8"
# Let's now determine the number of people that, according to the model, will be willing to buy the Hidden Farm coffee.
# 1. First we subset the Prediction dataset into `new_X` considering all the variables except `Decision`
# 2. Use that dataset to predict a new variable called `potential_buyers`
# + colab={} colab_type="code" id="BhsjR0UL3ok9"
# Feature selection
# Make a variable called feature_cols, and assign it a list containing all the column names except 'Decision'
feature_cols = ['Age', 'Gender', 'num_coffeeBags_per_year', 'spent_last_week', 'spent_last_month', 'Salary', 'Distance', 'Online']
# Make a variable called new_X, and assign it the subset of Prediction, containing just the feature_cols
new_X = Prediction[feature_cols]
# + colab={} colab_type="code" id="irJxY7KK3ok_"
# Call get_dummies() on the Pandas object pd, with new_X plugged in, to one-hot encode all features in the training set
new_X = pd.get_dummies(new_X)
# Make a variable called potential_buyers, and assign it the result of calling predict() on a model of your choice;
# don't forget to pass new_X to predict()
potential_buyers = gini_model2.predict(new_X)
# + colab={} colab_type="code" id="M3HyEFOs3olA"
# Let's get the numbers of YES's and NO's in the potential buyers
# Call unique() on np, and pass potential_buyers and return_counts=True
np.unique(potential_buyers, return_counts=True)
# + [markdown] colab_type="text" id="ZEtE2SgJ3olC"
# The total number of potential buyers is 303 + 183 = 486
# + colab={} colab_type="code" id="QVP3F14d3olC"
# Print the total number of surveyed people
print("The total number of surveyed people was", coffeeData.Salary.count())
# + colab={} colab_type="code" id="J1S31cSJ3olE"
# Let's calculate the proportion of buyers
486/702
# + colab={} colab_type="code" id="MnIvwNt23olJ"
# Print the percentage of people who want to buy the Hidden Farm coffee, by our model
print("Only ", round((486/702)*100, 2), "% of people want to buy the Hidden Farm coffee." )
# + [markdown] colab_type="text" id="Hs1yV09z3olM"
# ## 4b. Decision
# Remember how you thought at the start: if more than 70% of the interviewed customers are likely to buy the Hidden Farm coffee, you will strike the deal with the local Hidden Farm farmers and sell the coffee. Otherwise, you won't strike the deal and the Hidden Farm coffee will remain in legends only. Well now's crunch time. Are you going to go ahead with that idea? If so, you won't be striking the deal with the Chinese farmers.
#
# They're called `decision trees`, aren't they? So where's the decision? What should you do? (Cue existential cat emoji).
#
# Ultimately, though, we can't write an algorithm to actually *make the business decision* for us. This is because such decisions depend on our values, what risks we are willing to take, the stakes of our decisions, and how important it us for us to *know* that we will succeed. What are you going to do with the models you've made? Are you going to risk everything, strike the deal with the *Hidden Farm* farmers, and sell the coffee?
#
# The philosopher of language <NAME> once wrote that the number of doubts our evidence has to rule out in order for us to know a given proposition depends on our stakes: the higher our stakes, the more doubts our evidence has to rule out, and therefore the harder it is for us to know things. We can end up paralyzed in predicaments; sometimes, we can act to better our situation only if we already know certain things, which we can only if our stakes were lower and we'd *already* bettered our situation.
#
# Data science and machine learning can't solve such problems. But what it can do is help us make great use of our data to help *inform* our decisions.
#
# + [markdown] colab_type="text" id="crWWrpdox8i-"
# ## 5. Random Forest
# You might have noticed an important fact about decision trees. Each time we run a given decision tree algorithm to make a prediction (such as whether customers will buy the Hidden Farm coffee) we will actually get a slightly different result. This might seem weird, but it has a simple explanation: machine learning algorithms are by definition ***stochastic***, in that their output is at least partly determined by randomness.
#
# To account for this variability and ensure that we get the most accurate prediction, we might want to actually make lots of decision trees, and get a value that captures the centre or average of the outputs of those trees. Luckily, there's a method for this, known as the ***Random Forest***.
#
# Essentially, Random Forest involves making lots of trees with similar properties, and then performing summary statistics on the outputs of those trees to reach that central value. Random forests are hugely powerful classifers, and they can improve predictive accuracy and control over-fitting.
#
# Why not try to inform your decision with random forest? You'll need to make use of the RandomForestClassifier function within the sklearn.ensemble module, found [here](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html).
# + [markdown] colab_type="text" id="fI4fiVWq0IH9"
# ### 5a. Import necessary modules
# + colab={} colab_type="code" id="vSRTnHnD0D-O"
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import make_classification
# + [markdown] colab_type="text" id="jKmFvvjb0WB9"
# ### 5b. Model
# You'll use your X_train and y_train variables just as before.
#
# You'll then need to make a variable (call it firstRFModel) to store your new Random Forest model. You'll assign this variable the result of calling RandomForestClassifier().
#
# Then, just as before, you'll call fit() on that firstRFModel variable, and plug in X_train and y_train.
#
# Finally, you should make a variable called y_pred, and assign it the result of calling the predict() method on your new firstRFModel, with the X_test data passed to it.
# + colab={} colab_type="code" id="OQEeTiRG1aSm"
# Plug in appropriate max_depth and random_state parameters
firstRFModel = RandomForestClassifier(max_depth = 3, random_state = 1234)
firstRFModel.fit(X_train, y_train)
y_pred = firstRFModel.predict(X_test)
# Model and fit
y_pred = pd.Series(y_pred)
# Check out firstRFModel
firstRFModel
# -
print("firstRFModel impurity model - max depth 3")
print("Accuracy:", metrics.accuracy_score(y_test,y_pred))
print("Balanced accuracy:", metrics.balanced_accuracy_score(y_test,y_pred))
print('Precision score' , metrics.precision_score(y_test,y_pred, pos_label = "YES"))
print('Recall score' , metrics.recall_score(y_test,y_pred, pos_label = "NO"))
# + [markdown] colab_type="text" id="sCIt6pyn1zpb"
# ### 5c. Revise conclusion
#
# Has your conclusion changed? Or is the result of executing random forest the same as your best model reached by a single decision tree?
| Springboard Decision Tree Specialty Coffee Case Study - Tier 2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
consumer_key = 'FIzSzQgeVFjKWVUOveOXQpWxA'
consumer_secret = '<KEY>'
access_token = '<KEY>'
access_secret = '<KEY>'
class StdOutListener(StreamListener):
def __init__(self):
return
def on_data(self, data):
with open('/home/jessy/project1/tweetdata.txt','a') as tf:
tf.write(data)
print(data)
return True
def on_error(self, status):
print(status)
if __name__ == '__main__':
l = StdOutListener()
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
stream = Stream(auth, l)
# print('abb')
stream.filter(track=['depression', 'anxiety', 'mental health', 'suicide', 'stress', 'sad','pain'])
| sample1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import sys
sys.path.append('../../src')
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from pandas_profiling import ProfileReport
import visualization.visualize as vis
plt.rcParams.update({'font.size': 12})
pd.set_option('display.max_rows', 500)
# -
filename = '../../data/raw/Myocardial infarction complications Database.csv'
data = pd.read_csv(filename)
data
data.info()
# + tags=[]
data_desc = data.describe().transpose()
data_desc['n_nulls'] = [data[idx].isnull().sum() for idx in data_desc.index]
data_desc
# -
data_desc_null = data_desc[data_desc['n_nulls'] > 0.1*data.shape[0]].sort_values(by='n_nulls', ascending=False)
data_desc_null
# # Plotting some graphs:
INPUT_COLS = data.columns[0:112]
INPUT_COLS
INPUT_COLS_1stday = list(data.columns[[92, 99, 102]])
INPUT_COLS_1stday
INPUT_COLS_2ndday = list(data.columns[[93, 100, 103]])
INPUT_COLS_2ndday
INPUT_COLS_3rdday = list(data.columns[[94, 101, 104]])
INPUT_COLS_3rdday
OUTPUT_COLS = data.columns[112:]
OUTPUT_COLS
ID_COLS = ['ID']
TARGET_COLS = ['FIBR_PREDS', 'PREDS_TAH', 'JELUD_TAH', 'FIBR_JELUD', 'A_V_BLOK',
'OTEK_LANC', 'RAZRIV', 'DRESSLER', 'ZSN', 'REC_IM', 'P_IM_STEN', 'LET_IS']
TARGET_COL = 'LET_IS'
FEATURE_COLS = np.array(data.columns[~np.isin(data.columns, ID_COLS+TARGET_COLS)])
FEATURE_COLS
boxplot_cols = list(data_desc.sort_values(by='mean', ascending=False).index[~np.isin(data_desc.index, ID_COLS+TARGET_COLS)])
vis.plot_feature_boxplots(data, TARGET_COLS, figsize=(15, 3), subplot_layout=(1, 2))
vis.plot_feature_boxplots(data, boxplot_cols, figsize=(15, 25), subplot_layout=(8, 2))
# # Complications and lethal outcome:
COMPLICATIONS_COLS = ['FIBR_PREDS', 'PREDS_TAH', 'JELUD_TAH', 'FIBR_JELUD', 'A_V_BLOK',
'OTEK_LANC', 'RAZRIV', 'DRESSLER', 'ZSN', 'REC_IM', 'P_IM_STEN']
COMPLICATIONS_NAMES = ['Atrial fibrillation', 'Supraventricular tachycardia', 'Ventricular tachycardia', 'Ventricular fibrillation', 'Third-degree AV block',
'Pulmonary edema', 'Myocardial rupture', 'Dressler syndrome', 'Chronic heart failure', 'Relapse of the myocardial infarction', 'Post-infarction angina']
TARGET_NAMES = ['Alive', 'Cardiogenic shock', 'Pulmonary edema', 'Myocardial rupture',
'Progress of congestive heart failure',
'Thromboembolism', 'Asystole', 'Ventricular fibrillation']
# +
# Complications counts:
complications_counts = pd.DataFrame(data[COMPLICATIONS_COLS].sum(axis=0))
complications_counts.index = COMPLICATIONS_NAMES
complications_counts = complications_counts.sort_values(by=0, ascending=False)
complications_counts
plt.figure(figsize=(12, 8))
plt.title('Complications occurrences')
plt.bar(complications_counts.index, complications_counts[0])
plt.xticks(rotation=90)
plt.xlabel('Complications')
plt.ylabel('nº of occurrences')
plt.tight_layout()
plt.savefig('./imgs/Complications occurrences.png', dpi=100)
plt.show()
# -
# Lethal Outcome counts:
plt.figure(figsize=(12, 8))
plt.title('Lethal Outcomes occurrences')
plt.bar(TARGET_NAMES, data['LET_IS'].value_counts(dropna=False))
plt.xticks(rotation=90)
plt.xlabel('Lethal Outcome')
plt.ylabel('nº of occurrences')
plt.tight_layout()
plt.savefig('./imgs/Lethal Outcomes occurrences.png', dpi=100)
plt.show()
data['LET_IS'].value_counts(dropna=False)
# +
# Complication ration per Lethal Outcome:
results = []
for y in data[TARGET_COL].unique():
dftmp = data[data[TARGET_COL] == y]
results.append(dftmp[COMPLICATIONS_COLS].mean(axis=0).to_dict())
results[-1][TARGET_COL] = y
results = pd.DataFrame(results)
results = results.sort_values(by=TARGET_COL).drop(columns=[TARGET_COL])
results.index = TARGET_NAMES
results.columns = COMPLICATIONS_NAMES
plt.figure(figsize=(16, 10))
plt.title('Complication ratio per Lethal Outcome')
sns.heatmap(results, cmap='coolwarm', annot=results.values.round(2))
plt.xlabel('Complications')
plt.ylabel('Lethal outcome')
plt.tight_layout()
plt.savefig('./imgs/Complication ratio per Lethal Outcome.png', dpi=100)
plt.show()
# -
# # Daily measurements:
data[INPUT_COLS_1stday+INPUT_COLS_2ndday+INPUT_COLS_3rdday].isnull().sum(axis=0)
data[TARGET_COL][data[INPUT_COLS_1stday].isnull().sum(axis=1).astype(bool)].value_counts()
data[TARGET_COL][data[INPUT_COLS_2ndday].isnull().sum(axis=1).astype(bool)].value_counts()
data[TARGET_COL][data[INPUT_COLS_3rdday].isnull().sum(axis=1).astype(bool)].value_counts()
data[INPUT_COLS_1stday+INPUT_COLS_2ndday+INPUT_COLS_3rdday+[TARGET_COL]][data[INPUT_COLS_1stday+INPUT_COLS_2ndday+INPUT_COLS_3rdday].isnull().sum(axis=1).astype(bool)]
data[TARGET_COL][data[INPUT_COLS_1stday+INPUT_COLS_2ndday+INPUT_COLS_3rdday].isnull().sum(axis=1).astype(bool)].value_counts()
data[TARGET_COL][data[INPUT_COLS_1stday+INPUT_COLS_2ndday+INPUT_COLS_3rdday].isnull().sum(axis=1).astype(bool)].value_counts().sum()
# +
model_number = [0, 1, 2, 3]
acc_simple = [0.89, 0.89, 0.90, 0.92]
acc_balanced = [0.69, 0.68, 0.72, 0.76]
plt.figure(figsize=(8, 6))
plt.plot(model_number, acc_simple, label='Acc (simple)', alpha=0.75)
plt.plot(model_number, acc_balanced, label='Acc (balanced)', alpha=0.75)
plt.xticks(model_number)
plt.xlabel('Model number')
plt.ylabel('Metric valuer')
plt.legend(loc='best', prop={'size': 10})
plt.tight_layout()
plt.savefig('./imgs/models_summary.png', dpi=100)
plt.show()
# -
| notebooks/lucas/nbk00_data_exploration.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## NumPy Array Manipulation
# ### concatenate()
# +
import numpy as np
a = np.array([[1,2],[3,4]])
print('First array:')
print(a)
b = np.array([[5,6],[7,8]])
print('Second array:')
print(b)
# both the arrays are of same dimensions
print('Joining the two arrays along axis 0:')
print(np.concatenate((a,b)))
print('Joining the two arrays along axis 1:')
print(np.concatenate((a,b),axis = 1))
# -
# ### arrange()
# np.arange(start, stop, step) - (interval including start, but excluding stop).
np.arange(3)
np.arange(3.0)
np.arange(3,7)
np.arange(3,7,2)
# ### reshape()
# ##### This function gives a new shape to an array without changing the data.
# numpy.reshape(arr, newshape, order)
# +
import numpy as np
a = np.arange(8)
print('The original array:', a)
#print a
b = a.reshape(4,2)
print('The modified array:')
print(b)
# -
# ## meshgrid()
# A Meshgrid is an ND-coordinate space generated by a set of arrays. Each point on the meshgrid corresponds to a combination of one value from each of the arrays.
# #### If you give np.meshgrid() two 1D arrays Array A and Array B such that <br/>
# A is [a1,a2,a3]
# and B is [b1,b2,b3],
# <br/>then running <b>np.meshgrid(A, B)</b> returns a list of two 2D arrays, which look like
#
# [[a1,a1,a1],[a2,a2,a2],[a3,a3,a3]]
# and
#
# [[b1,b1,b1],[b2,b2,b2],[b3,b3,b3]]
# <br/>for which each array here contains arrays with an array full of the first item, the next filled with all the next item in the original array, etc.<br/>
# By adding these two arrays together, we can create the 2D array containing, as its elements, every combination of sums between the numbers in the original elements.
# +
# declaring how many x and y points we want (3 and 2)
nx, ny = (3, 2)
# create a linear space with 3 points between 0 and 1 inclusive
x = np.linspace(0, 1, nx)
print(x)
# -
# create a linear space with 2 points between 0 and 1 inclusive
y = np.linspace(0, 1, ny)
print(y)
# Using the meshgrid function, which takes two or more vectors.
# It meshes the vectors together to get a grid. In this case,
# meshing x and y creates a Cartesian plane
xv, yv = np.meshgrid(x, y)
print(xv)
print(yv)
# +
# xv and yv are now kind of "meshed" versions of the original x, y.
# If you combine xv and yv into coordinates, you'll get:
# (0, 0), (0.5, 0), (1, 0)
# (0, 1), (0.5, 1), (1, 1)
# For every point in x, there are all the y points and vice versa.
# -
| Array_NumPy_Manipulation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from sympy import *
from sympy.abc import x
init_printing(use_unicode=True)
# +
F, m, a, = symbols("F m a")
newtons_2_law = Eq(F, m*a)
display(newtons_2_law)
# -
f = Function("f")(x)
f_ = Derivative(f, x)
f_
C = symbols("C")
Eq(ln(abs(y)), (-x**2/2+C))
f = Function("f")(x)
g = Function("g")(x)
y_ = Derivative(y, x)
eq1 = Eq(y_, f*g)
display(eq1)
(1/-y).diff(y)
t, lam = symbols('t lambda')
y = Function('y')(t)
dydt = y.diff(t)
expr = Eq(dydt, -lam*y)
display(expr)
dsolve(expr)
# eq4 = Eq(Derivative(y), 4*x*y)
# display(eq4)
# dsolve(eq4)
y = Function('y')(x)
dydx = y.diff(x)
eq3 = Eq(dydx, y)
display(eq3)
dsolve(eq3)
y = Function('y')
der = Derivative(y, x)
der
0
| differential_equations/lab.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 1. Introspection
# > Introspection refers to the ability to examine something to determine what it is, what it knows, and what it is capable of doing. Introspection gives programmers a great deal of flexibility and control.
#
# >Python's support for introspection runs deep and wide throughout the language. In fact, it would be hard to imagine Python without its introspection features.
#
# ## 1.1 Starting the Python interpreter in interactive mode
# Just go to the terminal and type <b>`python`</b> and you will see the Python promt (<b> `>>>` </b>) and the Python version that you are currently using:
#
# 
# ## 1.2 [help()](https://docs.python.org/3/library/functions.html#help)
# ### Starting the help Utility
#
# Let's start by asking for help. You may type <b>`help()`</b> without specifying an argument, the interactive help system starts on the interpreter console.
#
# For example:
# 
# ### Asking Python for help Keywords:
#
# help> keywords
# 
# When we typed <b>`help()`</b>, we were greeted with a message and some instructions, followed by the <b>`help`</b> prompt. At the prompt, we entered keywords and were shown a list of Python keywords.
#
# As you can see, Python's online help utility displays information on a variety of topics, or for a particular object. The help utility is quite useful, and does make use of Python's introspection capabilities.
#
# If we want to quit, just type <b>`'quit'`</b>, and we will be returned to the Python prompt.
#
# `help()` can also accept parameters. It can be any object (everything in Python is an object!), and it gives helpful information specific to that object.
n = 1
help(n)
# If you want to know more about __`help()`__, just click [here](http://www.ibm.com/developerworks/library/l-pyint/index.html).
#
# ## 1.3 [dir()](https://docs.python.org/3/library/functions.html#dir)
# Python provides a way to examine the contents of modules (and other objects) using the built-in __`dir()`__ function.
#
# It returns a sorted list of attribute names for any object passed to it. If no object is specified, __`dir()`__ returns the names in the current scope.
#
# 
s = "string"
dir(s)
# If you want to know more about __`dir()`__, click [here](http://www.ibm.com/developerworks/library/l-pyint/index.html)
# If at any point you feel lost and need help, `help()` and `dir()` are at your disposal. Use them in the code boxes to find out more about what's happening in the examples.
| Beginner_1/1. Introspection.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import base64
import numpy as np
from io import BytesIO
from PIL import Image
import logging
import sys
import json
import requests
import s3fs
import posixpath
import os
import joblib
# -
data_start = "2019-06-09 07:12"
data_end = "2019-09-17"
location = "stockholm"
dest_folder = "<your destination s3 path>"
authorization = "<your EWD authorization token>"
data_range = pd.date_range(data_start, data_end, freq="1min")
def download_reftime(timestamp, location, dest_folder, authorization, overwrite=False):
try:
s3 = s3fs.S3FileSystem()
dest_file = posixpath.join(dest_folder, timestamp.strftime("%Y%m"), "%s.npy" % timestamp.strftime("%Y%m%d%H%M"))
if s3.exists(dest_file) and not overwrite:
return (timestamp, None)
method = "GET"
url = "https://demo-apim.westeurope.cloudapp.azure.com/api_secure/PrecipitationAPI/3.0.0/weather/precipitation/at/%s?location=%s" % (timestamp.strftime("%Y%m%d%H%M"), location)
headers = {"accept": "application/json",
"Authorization": authorization}
response = requests.request(method=method, url=url, headers=headers)
if response.status_code != 200:
return (timestamp, response.status_code)
resp = json.loads(response.content.decode("utf-8"))
b64png = resp['canvas'][len("data:image/png;base64,"):]
dec = base64.b64decode(b64png)
image = Image.open(BytesIO(dec))
image = image.resize((64, 61))
img = np.array(image)
img = np.flipud(img)
with s3.open(dest_file, mode='wb') as f:
np.save(f, img)
return (timestamp, 200)
except Exception as e:
return (timestamp, str(e))
with joblib.parallel_backend("threading"):
results = joblib.Parallel(n_jobs=4)(
joblib.delayed(download_reftime)(timestamp, location, dest_folder, authorization)
for timestamp in data_range)
| Solutions/Greenlytics/data-retrieval-ewd.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introduction
# Yet another meaningful and interesting Kaggle competition, wherein this time the aim is to predict a patient’s severity of decline in lung function.
#
# In this notebook, we aim to perform an EDA of the given traincsv dataset
# + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5"
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
import matplotlib.pyplot as plt
import seaborn as sns
import random
import pydicom
# + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a"
root_dir = "/kaggle/input/osic-pulmonary-fibrosis-progression"
train_dir = os.path.join(root_dir,'train')
test_dir = os.path.join(root_dir,'test')
train_csv_path = os.path.join(root_dir,'train.csv')
test_csv_path = os.path.join(root_dir,'test.csv')
# -
df_train = pd.read_csv(train_csv_path)
df_test = pd.read_csv(test_csv_path)
# ### Lets check the sanity of the datasets: i.e. null values, shape etc
print(df_train.shape)
print(df_test.shape)
for col in df_train.columns:
print("Number of null values in column {} is {}".format(col,sum(df_train[col].isnull())))
# ### Lets first try to find number of patients:
df_train.groupby(['Patient']).count()
list_patient_ids = list(df_train['Patient'].unique())
print("Number of patients: {}".format(len(list_patient_ids)))
# So we have datapoints pertaining to 176 patients. To be specific, we know the below related to these 176 patients:
#
# A. ID => unique id for each patient - also the name of the DICOM folder in train
#
# B. Weeks => the relative number of weeks before/after the baseline CT was taken. This is the same baseline CT present in the DICOM folder
#
# C. FVC => the recorded lung capacity in ml
#
# D. Percent => a computed field which approximates the patient's FVC as a percent of the typical FVC for a person of similar characteristics
#
# E. Age => age of the patient
#
# F. Sex => gender of the patient
#
# G. Smoking Status => whether patient smokes or not
df_train.columns
# We will check in the dataset whether for a given patient, is there a change in values for:
#
# a. age - probably during the course, there could have been a change in person's age
#
# b. smokingstatus - checking whether there was any change in smoking status for the patient e.g whether the patient went to 'Never Smokes' to 'Currently Smokes' or 'Ex smoker' to 'Currently Smokes'
#
# c. sex - for data sanity
#
# This check wouldnt make sense if above patient attributes were only taken once. Since we do not know this, its best to check to eliminate any data discrepancies
for pat_id in list_patient_ids:
age_unique = len(df_train.loc[df_train['Patient']==pat_id,'Age'].unique())
smoke_unique = len(df_train.loc[df_train['Patient']==pat_id,'SmokingStatus'].unique())
sex_unique = len(df_train.loc[df_train['Patient']==pat_id,'Sex'].unique())
if sex_unique !=1:
print("Please check sex column for patient id:".format(pat_id))
if age_unique !=1:
print("Please check age column for patient id:".format(pat_id))
if smoke_unique !=1:
print("Please check smoking_status column for patient id:".format(pat_id))
# We dont have any such discrepancies, so 'age', 'smokingstatus' and 'sex' are constant for a given patient id.
#
# So lets create a patient information dictionary, for future use
pat_info_dict = {}
for pat_id in list_patient_ids:
pat_info_dict[pat_id] = {}
pat_info_dict[pat_id]['age'] = list(df_train.loc[df_train['Patient']==pat_id,'Age'].unique())[0]
pat_info_dict[pat_id]['sex'] = list(df_train.loc[df_train['Patient']==pat_id,'Sex'].unique())[0]
pat_info_dict[pat_id]['smokingstatus'] = list(df_train.loc[df_train['Patient']==pat_id,'SmokingStatus'].unique())[0]
smoke_unique = len(df_train.loc[df_train['Patient']==pat_id,'SmokingStatus'].unique())
# ### Now lets analyze the distribution
df_pat_analysis = df_train.loc[:,['Patient','Age','Sex','SmokingStatus']].drop_duplicates()
sns.countplot(df_pat_analysis['Sex'])
sns.distplot(df_pat_analysis['Age'])
sns.countplot(df_pat_analysis['SmokingStatus'])
# +
#ax = fig.add_subplot(111)
fig, ax =plt.subplots(nrows=2,ncols=1,figsize=(16,16),squeeze=False)
#ax[0,0].scatter(xdf['landmark_id'],xdf['count'],c=xdf['landmark_id'],s=50)
#ax[0,0].tick_params(axis='x',rotation=45)
#ax[0,0].title.set_text('scatter plot: landmark_id v/s count ')
sns.countplot(x="Age", hue="SmokingStatus", data=df_pat_analysis.loc[df_pat_analysis['Sex']=="Male"],ax=ax[0,0])
ax[0,0].title.set_text('Plot: Smoking status v/s Age distribution (Male)')
sns.countplot(x="Age", hue="SmokingStatus", data=df_pat_analysis.loc[df_pat_analysis['Sex']=="Female"],ax=ax[1,0])
ax[1,0].title.set_text('Plot: Smoking status v/s Age distribution (Female)')
fig.tight_layout(pad=3.0)
plt.show()
# -
# Distribution Inferences:
# 1. Distibrution of age is following more or less a gaussian distribution with mean center around the age of 65-70
# 2. You have a greater count of male patients and "Ex-smoker" patients
# 3. You can see a dominance of "Ex-smoker" class within male patients, however "Never-Smoked" tends to edge out "Ex-smoker" in the female patients
# With a fair understanding of distribution of patient attributes, lets now incorporate the remaining attributes which shed light on the their visits and lung capacity i.e.
# "Weeks"
# "FVC", and
# "Percent"
df_train.columns
fig, ax = plt.subplots(nrows=1,ncols=1,figsize=(16,8),squeeze=False)
sns.distplot(df_train['Weeks'],ax =ax[0,0])
# # Patient Card
# +
patient_id = random.choice(list_patient_ids)
df_pat = df_train.loc[df_train['Patient']==patient_id]
plt.figure(figsize=(20,10))
# Changing default values for parameters individually
plt.rc('lines', linewidth=2, linestyle='-', marker='.')
plt.rcParams['lines.markersize'] = 20
plt.rcParams['font.size'] = '20.0'
#Plot a line graph
plt.plot(df_pat['Weeks'],df_pat['FVC'])
typical_fvc=0
for x,y in zip(df_pat['Weeks'],df_pat['FVC']):
percent_mark = round(list(df_pat.loc[df_pat['Weeks']==x].loc[df_pat['FVC']==y]['Percent'])[0],2)
disp_str = "{} %".format(percent_mark)
typical_fvc = round(y*100/percent_mark,2)
#print("Typical FVC value is {}".format(typical_fvc))
plt.text(x,y,disp_str)
# Lets plot the CT Scan week
ct_scan_line = range(int(min(df_pat['FVC'])), int(max(df_pat['FVC'])))
max_len = len(ct_scan_line)
plt.plot([0]*max_len,ct_scan_line, marker = '|')
# Add labels and title
plt.title("Week wise analysis for \n Patient: {} \n Age: {}\n Sex: {}\n Smoking Status: {} \n Typical FVC Value: {}ml".format(pat_id,pat_info_dict[pat_id]['age'],pat_info_dict[pat_id]['sex'],pat_info_dict[pat_id]['smokingstatus'],typical_fvc))
plt.xlabel("Week#")
plt.ylabel("FVC")
plt.show()
| notebooks/initial-eda-patient-analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Standard imports
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import re
import seaborn as sns
import time
import tensorflow as tf
from tensorflow.keras.backend import get_value
# %matplotlib inline
# Insert mavenn at beginning of path
import sys
path_to_mavenn_local = '../../'
sys.path.insert(0, path_to_mavenn_local)
#Load mavenn and check path
import mavenn
print(mavenn.__path__)
# +
# Import dataset splitter from sklearn
from sklearn.model_selection import train_test_split
# Load dataset as a dataframe
data_df = mavenn.load_example_dataset('sortseq')
# Extract x and y as np.arrays
x = data_df['x'].values
y = data_df['y'].values
ct = data_df['ct'].values
# Split into training and test sets
#x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=0)
# -
data_df.head()
# +
# Define a model with a pairwise G-P map
# a heteroskedastic Gaussian GE measurement process,
# and specify the training data.
mavenn.set_seed(0)
model = mavenn.Model(x=x,
y=y,
ct_n=ct,
gpmap_type='additive',
alphabet='dna',
regression_type='MPA')
# Fit model to training data
start_time = time.time()
model.fit(epochs=10,
learning_rate=0.01,
early_stopping=True,
early_stopping_patience=20)
training_time = time.time()-start_time
print(f'training time: {training_time:.1f} seconds')
# -
# Save model
model.save('sortseq')
# Load model
model = mavenn.load('sortseq')
model.get_nn().summary()
# +
# Predict latent phentoype values (phi) on test data
phi = model.x_to_phi(x)
# Set phi lims and create grid in phi space
phi_lim = [min(phi)-.5, max(phi)+.5]
phi_grid = np.linspace(phi_lim[0], phi_lim[1], 1000)
mat = model.na_p_of_all_y_given_phi(phi_grid)
plt.imshow(mat, aspect='auto', interpolation='nearest')
# -
# Compute mask_dict from trainig data
mask_dict = mavenn.get_mask_dict(x, alphabet='dna')
mask_dict
# +
wt_seq = mavenn.x_to_consensus(x)
theta_add_df = model.get_gpmap_parameters(which='additive')
theta_add_df.head()
# -
# Illustrate pairwise parameters
fig, ax = plt.subplots(1,1, figsize=[10,4])
ax, cb = mavenn.heatmap(theta_add_df,
ax=ax,
seq=wt_seq,
ccenter=0)
| mavenn/development/20.09.16_train_save_load_sortseq.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/PacktPublishing/Hands-On-Computer-Vision-with-PyTorch/blob/master/Chapter02/Operations_on_tensors.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="TqJn6S6MaXSJ" outputId="365578bb-f483-40f4-d08f-adb8c1982840" colab={"base_uri": "https://localhost:8080/", "height": 51}
import torch
x = torch.tensor([[1,2,3,4], [5,6,7,8]])
print(x * 10)
# + id="oPoA4yptaY2N" outputId="89d72f4b-71b8-45ae-9c50-4fb1e73ae6c0" colab={"base_uri": "https://localhost:8080/", "height": 51}
x = torch.tensor([[1,2,3,4], [5,6,7,8]])
y = x.add(10)
print(y)
# + id="fHmRXqMcadet"
y = torch.tensor([2, 3, 1, 0]) # y.shape == (4)
y = y.view(4,1) # y.shape == (4, 1)
# + id="rr5Gs-QMaf-H" outputId="28504802-789e-4372-83f8-feff86ed66e0" colab={"base_uri": "https://localhost:8080/", "height": 51}
x = torch.randn(10,1,10)
z1 = torch.squeeze(x, 1) # similar to np.squeeze()
# The same operation can be directly performed on
# x by calling squeeze and the dimension to squeeze out
z2 = x.squeeze(1)
assert torch.all(z1 == z2) # all the elements in both tensors are equal
print('Squeeze:\n', x.shape, z1.shape)
# + id="jnIQNMH5ajlF" outputId="f53812a0-2391-4746-9af9-22b29eb8867a" colab={"base_uri": "https://localhost:8080/", "height": 68}
x = torch.randn(10,10)
print(x.shape)
# torch.size(10,10)
z1 = x.unsqueeze(0)
print(z1.shape)
# torch.size(1,10,10)
# The same can be achieved using [None] indexing
# Adding None will auto create a fake dim at the
# specified axis
x = torch.randn(10,10)
z2, z3, z4 = x[None], x[:,None], x[:,:,None]
print(z2.shape, z3.shape, z4.shape)
# + id="SWxKXdP6am9D" outputId="8cc993b3-d461-4aa6-9bf3-c0f56af01037" colab={"base_uri": "https://localhost:8080/", "height": 51}
x = torch.tensor([[1,2,3,4], [5,6,7,8]])
print(torch.matmul(x, y))
# + id="VtZmPZOEapyc" outputId="170d0165-e8b0-46a8-f01a-1bfa93a93e2c" colab={"base_uri": "https://localhost:8080/", "height": 51}
print(x@y)
# + id="al6kKt4dasVv" outputId="d0761ad3-ddce-432a-a1be-8284ffef7276" colab={"base_uri": "https://localhost:8080/", "height": 51}
import torch
x = torch.randn(10,10,10)
z = torch.cat([x,x], axis=0) # np.concatenate()
print('Cat axis 0:', x.shape, z.shape)
# Cat axis 0: torch.Size([10, 10, 10]) torch.Size([20, 10, 10])
z = torch.cat([x,x], axis=1) # np.concatenate()
print('Cat axis 1:', x.shape, z.shape)
# Cat axis 1: torch.Size([10, 10, 10]) torch.Size([10, 20, 10])
# + id="vv1DtZ2qb_qu" outputId="bafdaba7-c0b5-4b8d-a7bb-beb20b8cfd7e" colab={"base_uri": "https://localhost:8080/", "height": 34}
x = torch.arange(25).reshape(5,5)
print('Max:', x.shape, x.max())
# + id="DO2nx2glcNPQ" outputId="1b7fbdb5-1f41-4bd7-8c77-3b494e90161b" colab={"base_uri": "https://localhost:8080/", "height": 34}
x.max(dim=0)
# + id="3O-_2LwQcOv6" outputId="5b84364e-3453-4265-8e10-59ede36033e9" colab={"base_uri": "https://localhost:8080/", "height": 51}
m, argm = x.max(dim=1)
print('Max in axis 1:\n', m, argm)
# + id="0qwAEb6BcQJB" outputId="d89be36a-3a97-4f3e-da1a-64cf3ff4cffa" colab={"base_uri": "https://localhost:8080/", "height": 34}
x = torch.randn(10,20,30)
z = x.permute(2,0,1) # np.permute()
print('Permute dimensions:', x.shape, z.shape)
# + id="mCeCjaZo0arI" outputId="f8718838-01ed-4426-9ff1-49f346ffe131"
dir(torch.Tensor)
# + id="jhiL6isOcSJP" outputId="1bbe6e0f-b453-47d9-c687-a1526681aa8b"
help(torch.Tensor.view)
# + id="OYYOtiFn0arN"
| Chapter02/7_Operations_on_tensors.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.3.1
# language: julia
# name: julia-1.3
# ---
# +
# A transit-timing example:
# -
# Read in the transit-timing code:
include("../src/ttv.jl")
# +
# Specify the initial conditions:
# Mass of star:
mstar = 0.82
# First planet:
m_1 = 3.18e-4
e1 = 0.0069
om1 = 0.0
p1 = 228.774
t01 = 0.0
# Second planet:
m_2 = 3e-6
e2 = 0.0054
om2 = 0.0
p2 = 221.717
t02 = -p2/6 # We want mean anomaly to be +60 deg, so its
# transit should have about occurred 1/6 of an orbit
# prior to the initial time.
# Now, integration quantities:
t0 = 0.0 # initial time
h = 10.0 # time step
tmax = 9837.282 # Maximum time of integration
# Okay, set up array for orbital elements
n = 3
elements = zeros(n,7)
elements[1,1] = mstar
elements[2,:] = [m_1,p1,t01,e1*cos(om1),e1*sin(om1),pi/2,0.0]
elements[3,:] = [m_2,p2,t02,e2*cos(om2),e2*sin(om2),pi/2,0.0]
count = zeros(Int64,n)
tt = zeros(n,44)
rstar = 1e12
# Now, run the ttv function:
dq = ttv_elements!(n,t0,h,tmax,elements,tt,count,0.0,0,0,rstar)
# Print the times:
t1 = tt[2,1:count[2]]
println(t1)
# +
# Plot these times with a mean ephemeris removed:
using Statistics
using PyPlot
# Three different lengths of transit timing sequences:
nplot = [8,22,43]
for iplot=1:3
pavg = mean(t1[2:nplot[iplot]] - t1[1:nplot[iplot]-1])
it = collect(0:1:nplot[iplot]-1)
ttv1 = t1[1:nplot[iplot]] .- it .* pavg .- t1[1]
if iplot == 2
plot(it,ttv1,"o")
else
plot(it,ttv1,".")
end
println(pavg)
end
# -
| examples/.ipynb_checkpoints/ttv_example-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: local-venv
# language: python
# name: local-venv
# ---
# # Simple Bayesian network example
#
# Monty Hall Problem
#
# We are in a game show. We have 3 doors.
# Behind one door is a prize. Behind the others there's nothing.
#
# You have to select one door.
# Let's say you pick no 1.
# The host, who knows what's behind each door, opens another door, e.g. no 3, which has nothing.
# Now, you have one last chance before the game ends: do you want to change your mind and select door no 2?
# +
# We have 3 random variables in this setting:
# Contestant (C)
# Host (H)
# Prize (P)
# Structure:
# (P) (C)
# \ /
# \ /
# \ /
# (H)
# Let's see the Conditional probability distributions:
# P(C):
# C 0 1 2
# 0.33 0.33 0.33
# P(P):
# P 0 1 2
# 0.33 0.33 0.33
# P(H | P,C):
# # +-----+-----------------+-----------------+-----------------+
# | C | 0 | 1 | 2 |
# # +-----+-----+-----+-----+-----+-----+-----+-----------------+
# | P | 0 | 1 | 2 | 0 | 1 | 2 | 0 | 1 | 2 |
# # +=====+=====+=====+=====+=====+=====+=====+=====+=====+=====+
# | H=0 | 0 | 0 | 0 | 0 | 0.5 | 1 | 0 | 1 | 0.5 |
# # +-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+
# | H=1 | 0.5 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0.5 |
# # +-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+
# | H=2 | 0.5 | 1 | 0 | 1 | 0.5 | 0 | 0 | 0 | 0 |
# # +-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+
# -
# Import the libraries
from pgmpy.models import BayesianNetwork
from pgmpy.factors.discrete import TabularCPD
# Define the network structure
model = BayesianNetwork([('C', 'H'), ('P', 'H')])
model.to_daft(node_pos={'P':(0,0), 'C':(2,0), 'H':(1,-1)}).render()
# +
# Defining the CPDs:
cpd_c = TabularCPD('C', 3, [[0.33], [0.33], [0.33]])
cpd_p = TabularCPD('P', 3, [[0.33], [0.33], [0.33]])
cpd_h = TabularCPD('H', 3, [[0, 0, 0, 0, 0.5, 1, 0, 1, 0.5],
[0.5, 0, 1, 0, 0, 0, 1, 0, 0.5],
[0.5, 1, 0, 1, 0.5, 0, 0, 0, 0]],
evidence=['C', 'P'], evidence_card=[3, 3]) # variables that constitute evidence & cardinality
# Associating the CPDs with the network structure.
model.add_cpds(cpd_c, cpd_p, cpd_h)
model.get_cpds()
# -
# Sanity check
model.check_model()
# List independencies between variables
model.get_independencies()
# Get the markov blanket for a variable.
# The markov blanket is the set of node’s parents, its children and its children’s other parents.
model.get_markov_blanket('H')
# Let's print the CPDs to see what we have defined so far
print(cpd_c)
print('')
print(cpd_p)
print('')
print(cpd_h)
# +
# Get the active trails
# For any two variables A and B in a network, if any change in A influences the values of B,
# then we say that there is an active trail between A and B.
# So, in our example:
# a change in C affects H, but not P
# a change in P affects H, but not C
# a change in H affects C and P
print(model.active_trail_nodes('C'))
print(model.active_trail_nodes('P'))
print(model.active_trail_nodes('H'))
# +
# OK, now we have defined and explored the network.
# Let's continue with the game:
# This is where we initially choose to select door no. 0
# And the host chooses to open door no. 2
# So, the new evidence that will update our beliefs is:
# C = 0
# H = 2
# What is the posterior probability for prize?
# Infering the posterior probability
from pgmpy.inference import VariableElimination
infer = VariableElimination(model)
posterior_p = infer.query(['P'], evidence={'C': 0, 'H': 2})
print(posterior_p)
# +
# The new evidence updated our beliefs,
# so we have a better estimation of the probabilities now for where the price could be.
# So, we should switch doors.
# -
| bayesian_net_example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # MNIST E2E on Kubeflow on AWS
#
# This example guides you through:
#
# 1. Taking an example TensorFlow model and modifying it to support distributed training
# 1. Serving the resulting model using TFServing
# 1. Deploying and using a web-app that uses the model
#
# ## Requirements
#
# * You must be running Kubeflow 1.0 on EKS
#
# ### Install AWS CLI
#
#
# Click `Kernal` -> `Restart` after your install new packages.
# !pip install boto3
# ### Create AWS secret in kubernetes and grant aws access to your notebook
#
# > Note: Once IAM for Service Account is merged in 1.0.1, we don't have to use credentials
#
# 1. Please create an AWS secret in current namespace.
#
# > Note: To get base64 string, try `echo -n $AWS_ACCESS_KEY_ID | base64`.
# > Make sure you have `AmazonEC2ContainerRegistryFullAccess` and `AmazonS3FullAccess` for this experiment. Pods will use credentials to talk to AWS services.
# + language="bash"
#
# # Replace placeholder with your own AWS credentials
# AWS_ACCESS_KEY_ID='<your_aws_access_key_id>'
# AWS_SECRET_ACCESS_KEY='<your_aws_secret_access_key>'
#
# kubectl create secret generic aws-secret --from-literal=AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} --from-literal=AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY}
# -
# 2. Attach `AmazonEC2ContainerRegistryFullAccess` and `AmazonS3FullAccess` to EKS node group role and grant AWS access to notebook.
# ### Verify you have access to AWS services
#
# * The cell below checks that this notebook was spawned with credentials to access AWS S3 and ECR
# +
import logging
import os
import uuid
from importlib import reload
import boto3
# Set REGION for s3 bucket and elastic contaienr registry
AWS_REGION='us-west-2'
boto3.client('s3', region_name=AWS_REGION).list_buckets()
boto3.client('ecr', region_name=AWS_REGION).describe_repositories()
# -
# ## Prepare model
#
# There is a delta between existing distributed mnist examples and what's needed to run well as a TFJob.
#
# Basically, we must:
#
# 1. Add options in order to make the model configurable.
# 1. Use `tf.estimator.train_and_evaluate` to enable model exporting and serving.
# 1. Define serving signatures for model serving.
#
# The resulting model is [model.py](model.py).
# ## Install Required Libraries
#
# Import the libraries required to train this model.
import notebook_setup
reload(notebook_setup)
notebook_setup.notebook_setup(platform='aws')
import k8s_util
# Force a reload of kubeflow; since kubeflow is a multi namespace module
# it looks like doing this in notebook_setup may not be sufficient
import kubeflow
reload(kubeflow)
from kubernetes import client as k8s_client
from kubernetes import config as k8s_config
from kubeflow.tfjob.api import tf_job_client as tf_job_client_module
from IPython.core.display import display, HTML
import yaml
# ### Configure The Docker Registry For Kubeflow Fairing
#
# * In order to build docker images from your notebook we need a docker registry where the images will be stored
# * Below you set some variables specifying a [Amazon Elastic Container Registry](https://aws.amazon.com/ecr/)
# * Kubeflow Fairing provides a utility function to guess the name of your AWS account
# +
from kubernetes import client as k8s_client
from kubernetes.client import rest as k8s_rest
from kubeflow import fairing
from kubeflow.fairing import utils as fairing_utils
from kubeflow.fairing.builders import append
from kubeflow.fairing.deployers import job
from kubeflow.fairing.preprocessors import base as base_preprocessor
# Setting up AWS Elastic Container Registry (ECR) for storing output containers
# You can use any docker container registry istead of ECR
AWS_ACCOUNT_ID=fairing.cloud.aws.guess_account_id()
AWS_ACCOUNT_ID = boto3.client('sts').get_caller_identity().get('Account')
DOCKER_REGISTRY = '{}.dkr.ecr.{}.amazonaws.com'.format(AWS_ACCOUNT_ID, AWS_REGION)
namespace = fairing_utils.get_current_k8s_namespace()
logging.info(f"Running in aws region {AWS_REGION}, account {AWS_ACCOUNT_ID}")
logging.info(f"Running in namespace {namespace}")
logging.info(f"Using docker registry {DOCKER_REGISTRY}")
# -
# ## Use Kubeflow fairing to build the docker image
#
# * You will use kubeflow fairing's kaniko builder to build a docker image that includes all your dependencies
# * You use kaniko because you want to be able to run `pip` to install dependencies
# * Kaniko gives you the flexibility to build images from Dockerfiles
# TODO(https://github.com/kubeflow/fairing/issues/426): We should get rid of this once the default
# Kaniko image is updated to a newer image than 0.7.0.
from kubeflow.fairing import constants
constants.constants.KANIKO_IMAGE = "gcr.io/kaniko-project/executor:v0.14.0"
# +
from kubeflow.fairing.builders import cluster
# output_map is a map of extra files to add to the notebook.
# It is a map from source location to the location inside the context.
output_map = {
"Dockerfile.model": "Dockerfile",
"model.py": "model.py"
}
preprocessor = base_preprocessor.BasePreProcessor(
command=["python"], # The base class will set this.
input_files=[],
path_prefix="/app", # irrelevant since we aren't preprocessing any files
output_map=output_map)
preprocessor.preprocess()
# -
# Create a new ECR repository to host model image
# !aws ecr create-repository --repository-name mnist --region=$AWS_REGION
# Use a Tensorflow image as the base image
# We use a custom Dockerfile
cluster_builder = cluster.cluster.ClusterBuilder(registry=DOCKER_REGISTRY,
base_image="", # base_image is set in the Dockerfile
preprocessor=preprocessor,
image_name="mnist",
dockerfile_path="Dockerfile",
pod_spec_mutators=[fairing.cloud.aws.add_aws_credentials_if_exists, fairing.cloud.aws.add_ecr_config],
context_source=cluster.s3_context.S3ContextSource(region=AWS_REGION))
cluster_builder.build()
logging.info(f"Built image {cluster_builder.image_tag}")
# ## Create a S3 Bucket
#
# * Create a S3 bucket to store our models and other results.
# * Since we are running in python we use the python client libraries but you could also use the `gsutil` command line
# +
import boto3
from botocore.exceptions import ClientError
bucket = f"{AWS_ACCOUNT_ID}-mnist"
def create_bucket(bucket_name, region=None):
"""Create an S3 bucket in a specified region
If a region is not specified, the bucket is created in the S3 default
region (us-east-1).
:param bucket_name: Bucket to create
:param region: String region to create bucket in, e.g., 'us-west-2'
:return: True if bucket created, else False
"""
# Create bucket
try:
if region is None:
s3_client = boto3.client('s3')
s3_client.create_bucket(Bucket=bucket_name)
else:
s3_client = boto3.client('s3', region_name=region)
location = {'LocationConstraint': region}
s3_client.create_bucket(Bucket=bucket_name,
CreateBucketConfiguration=location)
except ClientError as e:
logging.error(e)
return False
return True
create_bucket(bucket, AWS_REGION)
# -
# ## Distributed training
#
# * We will train the model by using TFJob to run a distributed training job
# +
train_name = f"mnist-train-{uuid.uuid4().hex[:4]}"
num_ps = 1
num_workers = 2
model_dir = f"s3://{bucket}/mnist"
export_path = f"s3://{bucket}/mnist/export"
train_steps = 200
batch_size = 100
learning_rate = .01
image = cluster_builder.image_tag
train_spec = f"""apiVersion: kubeflow.org/v1
kind: TFJob
metadata:
name: {train_name}
spec:
tfReplicaSpecs:
Ps:
replicas: {num_ps}
template:
metadata:
annotations:
sidecar.istio.io/inject: "false"
spec:
serviceAccount: default-editor
containers:
- name: tensorflow
command:
- python
- /opt/model.py
- --tf-model-dir={model_dir}
- --tf-export-dir={export_path}
- --tf-train-steps={train_steps}
- --tf-batch-size={batch_size}
- --tf-learning-rate={learning_rate}
image: {image}
workingDir: /opt
env:
- name: AWS_REGION
value: {AWS_REGION}
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: aws-secret
key: AWS_ACCESS_KEY_ID
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: aws-secret
key: AWS_SECRET_ACCESS_KEY
restartPolicy: OnFailure
Chief:
replicas: 1
template:
metadata:
annotations:
sidecar.istio.io/inject: "false"
spec:
serviceAccount: default-editor
containers:
- name: tensorflow
command:
- python
- /opt/model.py
- --tf-model-dir={model_dir}
- --tf-export-dir={export_path}
- --tf-train-steps={train_steps}
- --tf-batch-size={batch_size}
- --tf-learning-rate={learning_rate}
image: {image}
workingDir: /opt
env:
- name: AWS_REGION
value: {AWS_REGION}
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: aws-secret
key: AWS_ACCESS_KEY_ID
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: aws-secret
key: AWS_SECRET_ACCESS_KEY
restartPolicy: OnFailure
Worker:
replicas: 1
template:
metadata:
annotations:
sidecar.istio.io/inject: "false"
spec:
serviceAccount: default-editor
containers:
- name: tensorflow
command:
- python
- /opt/model.py
- --tf-model-dir={model_dir}
- --tf-export-dir={export_path}
- --tf-train-steps={train_steps}
- --tf-batch-size={batch_size}
- --tf-learning-rate={learning_rate}
image: {image}
workingDir: /opt
env:
- name: AWS_REGION
value: {AWS_REGION}
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: aws-secret
key: AWS_ACCESS_KEY_ID
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: aws-secret
key: AWS_SECRET_ACCESS_KEY
restartPolicy: OnFailure
"""
# -
# ### Create the training job
#
# * You could write the spec to a YAML file and then do `kubectl apply -f {FILE}`
# * Since you are running in jupyter you will use the TFJob client
# * You will run the TFJob in a namespace created by a Kubeflow profile
# * The namespace will be the same namespace you are running the notebook in
# * Creating a profile ensures the namespace is provisioned with service accounts and other resources needed for Kubeflow
tf_job_client = tf_job_client_module.TFJobClient()
# +
tf_job_body = yaml.safe_load(train_spec)
tf_job = tf_job_client.create(tf_job_body, namespace=namespace)
logging.info(f"Created job {namespace}.{train_name}")
# -
# ### Check the job
#
# * Above you used the python SDK for TFJob to check the status
# * You can also use kubectl get the status of your job
# * The job conditions will tell you whether the job is running, succeeded or failed
# !kubectl get tfjobs -o yaml {train_name}
# ## Get The Logs
#
# * There are two ways to get the logs for the training job
#
# 1. Using kubectl to fetch the pod logs
# * These logs are ephemeral; they will be unavailable when the pod is garbage collected to free up resources
# 1. Using Fluentd-Cloud-Watch
# * Kubernetes data plane logs are not automatically available in AWS
# * You need to install fluentd-cloud-watch plugin to ship containers logs to Cloud Watch
#
# ## Deploy TensorBoard
#
# * You will create a Kubernetes Deployment to run TensorBoard
# * TensorBoard will be accessible behind the Kubeflow endpoint
# +
tb_name = "mnist-tensorboard"
tb_deploy = f"""apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: mnist-tensorboard
name: {tb_name}
namespace: {namespace}
spec:
selector:
matchLabels:
app: mnist-tensorboard
template:
metadata:
labels:
app: mnist-tensorboard
version: v1
spec:
serviceAccount: default-editor
containers:
- command:
- /usr/local/bin/tensorboard
- --logdir={model_dir}
- --port=80
image: tensorflow/tensorflow:1.15.2-py3
name: tensorboard
env:
- name: AWS_REGION
value: {AWS_REGION}
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: aws-secret
key: AWS_ACCESS_KEY_ID
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: aws-secret
key: AWS_SECRET_ACCESS_KEY
ports:
- containerPort: 80
"""
tb_service = f"""apiVersion: v1
kind: Service
metadata:
labels:
app: mnist-tensorboard
name: {tb_name}
namespace: {namespace}
spec:
ports:
- name: http-tb
port: 80
targetPort: 80
selector:
app: mnist-tensorboard
type: ClusterIP
"""
tb_virtual_service = f"""apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: {tb_name}
namespace: {namespace}
spec:
gateways:
- kubeflow/kubeflow-gateway
hosts:
- '*'
http:
- match:
- uri:
prefix: /mnist/{namespace}/tensorboard/
rewrite:
uri: /
route:
- destination:
host: {tb_name}.{namespace}.svc.cluster.local
port:
number: 80
timeout: 300s
"""
tb_specs = [tb_deploy, tb_service, tb_virtual_service]
# -
k8s_util.apply_k8s_specs(tb_specs, k8s_util.K8S_CREATE_OR_REPLACE)
# ### Access The TensorBoard UI
#
# > Note: By default, your namespace may not have access to `istio-system` namespace to get
endpoint = k8s_util.get_ingress_endpoint()
if endpoint:
vs = yaml.safe_load(tb_virtual_service)
path= vs["spec"]["http"][0]["match"][0]["uri"]["prefix"]
tb_endpoint = endpoint + path
display(HTML(f"TensorBoard UI is at <a href='{tb_endpoint}'>{tb_endpoint}</a>"))
# ## Wait For the Training Job to finish
# * You can use the TFJob client to wait for it to finish.
# +
tf_job = tf_job_client.wait_for_condition(train_name, expected_condition=["Succeeded", "Failed"], namespace=namespace)
if tf_job_client.is_job_succeeded(train_name, namespace):
logging.info(f"TFJob {namespace}.{train_name} succeeded")
else:
raise ValueError(f"TFJob {namespace}.{train_name} failed")
# -
# ## Serve the model
# * Deploy the model using tensorflow serving
# * We need to create
# 1. A Kubernetes Deployment
# 1. A Kubernetes service
# 1. (Optional) Create a configmap containing the prometheus monitoring config
# +
import os
deploy_name = "mnist-model"
model_base_path = export_path
# The web ui defaults to mnist-service so if you change it you will
# need to change it in the UI as well to send predictions to the mode
model_service = "mnist-service"
deploy_spec = f"""apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: mnist
name: {deploy_name}
namespace: {namespace}
spec:
selector:
matchLabels:
app: mnist-model
template:
metadata:
# TODO(jlewi): Right now we disable the istio side car because otherwise ISTIO rbac will prevent the
# UI from sending RPCs to the server. We should create an appropriate ISTIO rbac authorization
# policy to allow traffic from the UI to the model servier.
# https://istio.io/docs/concepts/security/#target-selectors
annotations:
sidecar.istio.io/inject: "false"
labels:
app: mnist-model
version: v1
spec:
serviceAccount: default-editor
containers:
- args:
- --port=9000
- --rest_api_port=8500
- --model_name=mnist
- --model_base_path={model_base_path}
- --monitoring_config_file=/var/config/monitoring_config.txt
command:
- /usr/bin/tensorflow_model_server
env:
- name: modelBasePath
value: {model_base_path}
- name: AWS_REGION
value: {AWS_REGION}
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: aws-secret
key: AWS_ACCESS_KEY_ID
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: aws-secret
key: AWS_SECRET_ACCESS_KEY
image: tensorflow/serving:1.15.0
imagePullPolicy: IfNotPresent
livenessProbe:
initialDelaySeconds: 30
periodSeconds: 30
tcpSocket:
port: 9000
name: mnist
ports:
- containerPort: 9000
- containerPort: 8500
resources:
limits:
cpu: "1"
memory: 1Gi
requests:
cpu: "1"
memory: 1Gi
volumeMounts:
- mountPath: /var/config/
name: model-config
volumes:
- configMap:
name: {deploy_name}
name: model-config
"""
service_spec = f"""apiVersion: v1
kind: Service
metadata:
annotations:
prometheus.io/path: /monitoring/prometheus/metrics
prometheus.io/port: "8500"
prometheus.io/scrape: "true"
labels:
app: mnist-model
name: {model_service}
namespace: {namespace}
spec:
ports:
- name: grpc-tf-serving
port: 9000
targetPort: 9000
- name: http-tf-serving
port: 8500
targetPort: 8500
selector:
app: mnist-model
type: ClusterIP
"""
monitoring_config = f"""kind: ConfigMap
apiVersion: v1
metadata:
name: {deploy_name}
namespace: {namespace}
data:
monitoring_config.txt: |-
prometheus_config: {{
enable: true,
path: "/monitoring/prometheus/metrics"
}}
"""
model_specs = [deploy_spec, service_spec, monitoring_config]
# -
k8s_util.apply_k8s_specs(model_specs, k8s_util.K8S_CREATE_OR_REPLACE)
# ## Deploy the mnist UI
#
# * We will now deploy the UI to visual the mnist results
# * Note: This is using a prebuilt and public docker image for the UI
# +
ui_name = "mnist-ui"
ui_deploy = f"""apiVersion: apps/v1
kind: Deployment
metadata:
name: {ui_name}
namespace: {namespace}
spec:
replicas: 1
selector:
matchLabels:
app: mnist-web-ui
template:
metadata:
labels:
app: mnist-web-ui
spec:
containers:
- image: gcr.io/kubeflow-examples/mnist/web-ui:v20190112-v0.2-142-g3b38225
name: web-ui
ports:
- containerPort: 5000
serviceAccount: default-editor
"""
ui_service = f"""apiVersion: v1
kind: Service
metadata:
annotations:
name: {ui_name}
namespace: {namespace}
spec:
ports:
- name: http-mnist-ui
port: 80
targetPort: 5000
selector:
app: mnist-web-ui
type: ClusterIP
"""
ui_virtual_service = f"""apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: {ui_name}
namespace: {namespace}
spec:
gateways:
- kubeflow/kubeflow-gateway
hosts:
- '*'
http:
- match:
- uri:
prefix: /mnist/{namespace}/ui/
rewrite:
uri: /
route:
- destination:
host: {ui_name}.{namespace}.svc.cluster.local
port:
number: 80
timeout: 300s
"""
ui_specs = [ui_deploy, ui_service, ui_virtual_service]
# -
k8s_util.apply_k8s_specs(ui_specs, k8s_util.K8S_CREATE_OR_REPLACE)
# ## Access the web UI
#
# * A reverse proxy route is automatically added to the Kubeflow endpoint
# * The endpoint will be
#
# ```
# http:/${KUBEflOW_ENDPOINT}/mnist/${NAMESPACE}/ui/
# ```
# * You can get the KUBEFLOW_ENDPOINT
#
# ```
# KUBEfLOW_ENDPOINT=`kubectl -n istio-system get ingress istio-ingress -o jsonpath="{.status.loadBalancer.ingress[0].hostname}"`
# ```
#
# * You must run this command with sufficient RBAC permissions to get the ingress.
#
# * If you have sufficient privileges you can run the cell below to get the endpoint if you don't have sufficient priveleges you can
# grant appropriate permissions by running the command
#
# ```
# kubectl create --namespace=istio-system rolebinding --clusterrole=kubeflow-view --serviceaccount=${NAMESPACE}:default-editor ${NAMESPACE}-istio-view
# ```
endpoint = k8s_util.get_ingress_endpoint()
if endpoint:
vs = yaml.safe_load(ui_virtual_service)
path= vs["spec"]["http"][0]["match"][0]["uri"]["prefix"]
ui_endpoint = endpoint + path
display(HTML(f"mnist UI is at <a href='{ui_endpoint}'>{ui_endpoint}</a>"))
| mnist/mnist_aws.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: dev
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Dependencies
# Update sklearn to prevent version mismatches
# !pip install sklearn --upgrade
# install joblib. This will be used to save your model.
# Restart your kernel after installing
# !pip install joblib
import joblib
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.preprocessing import LabelEncoder
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
# # Preprocess the raw data
# ### Read the CSV
df = pd.read_csv("Resources/exoplanet_data.csv")
# ### Perform Basic Data Cleaning
# Drop the null columns where all values are null
df = df.dropna(axis='columns', how='all')
# Drop the null rows
df = df.dropna()
df.head()
df.columns
# ### Select significant features (columns)
# Perform feature selection based upon physical characteristics of the exoplanet candidates. These will also be used as X values.<br>I will mainly select features related to the appearance and physical characteristics. They would be checked off in the following lists. Also, if there is a feature that might result from a linear combination of other features, then that would be unchecked off.
# KOI = Kepler Objects of Interest<br> It is a number used to identify and track a Kepler Object of Interest. A KOI is a target identified by the Kepler Project that displays at least one transit-like sequence within Kepler time-series photometry that appears to be of astrophysical origin and initially consistent with a planetary transit hypothesis.
# #### Project Disposition Columns
# Flags designate the most probable physical explanation of the KOI.
# - [x] koi_fpflag_nt: Not Transit-Like Flag - KOI whose light curve is not consistent with that of a transiting planet.
# - [x] koi_fpflag_ss: Stellar Eclipse Flag - A KOI that is observed to have a significant secondary event, transit shape, or out-of-eclipse variability.
# - [x] koi_fpflag_co: Centroid Offset Flag - The source of the signal is from a nearby star.
# - [x] koi_fpflag_ec: Ephemeris Match Indicates Contamination Flag - The KOI shares the same period and epoch as another object.
# #### Transit Properties
# Transit parameters delivered by the Kepler Project are typically best-fit parameters produced by a Mandel-Agol (2002) fit to a multi-quarter Kepler light curve, assuming a linear orbital ephemeris. Some of the parameters listed below are fit directly, other are derived from the best-fit parameters.
# - [x] koi_period: Orbital Period (days) - The interval between consecutive planetary transits.
# - [x] koi_period_err1: Orbital Period (days) - Uncertainties Column (positive +)
# - [ ] koi_period_err2: Orbital Period (days) - Uncertainties Column (negative -)
# - [x] koi_time0bk: Transit Epoch - The time corresponding to the center of the first detected transit in Barycentric Julian Day (BJD) minus a constant offset of 2,454,833.0 days.
# - [x] koi_time0bk_err1: Transit Epoch - Uncertainties Column (positive +)
# - [ ] koi_time0bk_err2: Transit Epoch - Uncertainties Column (negative -)
# - [x] koi_impact: Impact Parameter - The sky-projected distance between the center of the stellar disc and the center of the planet disc at conjunction, normalized by the stellar radius.
# - [x] koi_impact_err1: Impact Parameter - Uncertainties Column (positive +)
# - [ ] koi_impact_err2: Impact Parameter - Uncertainties Column (negative -)
# - [x] koi_duration: Transit Duration (hours) - The duration of the observed transits.
# - [x] koi_duration_err1: Transit Duration (hours) - Uncertainties Column (positive +)
# - [ ] koi_duration_err2: Transit Duration (hours) - Uncertainties Column (negative -)
# - [x] koi_depth: Transit Depth (parts per million) - The fraction of stellar flux lost at the minimum of the planetary transit.
# - [x] koi_depth_err1: Transit Depth (parts per million) - Uncertainties Column (positive +)
# - [ ] koi_depth_err2: Transit Depth (parts per million) - Uncertainties Column (negative -)
# - [x] koi_prad: Planetary Radius (Earth radii) - The radius of the planet. Planetary radius is the product of the planet star radius ratio and the stellar radius.
# - [x] koi_prad_err1: Planetary Radius (Earth radii) - Uncertainties Column (positive +)
# - [x] koi_prad_err2: Planetary Radius (Earth radii) - Uncertainties Column (negative -)
# - [x] koi_teq: Equilibrium Temperature (Kelvin) - Approximation for the temperature of the planet.
# - [x] koi_insol: Insolation Flux [Earth flux]
# - [x] koi_insol_err1: Insolation Flux [Earth flux] - Uncertainties Column (positive +)
# - [x] koi_insol_err2: Insolation Flux [Earth flux] - Uncertainties Column (negative -)
# #### Threshold-Crossing Event (TCE) Information
# The Transiting Planet Search (TPS) module of the Kepler data analysis pipeline performs a detection test for planet transits in the multi-quarter, gap-filled flux time series. The TPS module detrends each quarterly PDC light curve to remove edge effects around data gaps and then combines the data segments together, filling gaps with interpolated data so as to condition the flux time series for a matched filter.
# - [x] koi_model_snr: Transit Signal-to-Noise - Transit depth normalized by the mean uncertainty in the flux during the transits.
# - [x] koi_tce_plnt_num: TCE Planet Number - TCE Planet Number federated to the KOI.
# #### Stellar Parameters
# Stellar effective temperature, surface gravity, metallicity, radius, mass, and age should comprise a consistent set.
# - [x] koi_steff: Stellar Effective Temperature (Kelvin) - The photospheric temperature of the star.
# - [x] koi_steff_err1: Stellar Effective Temperature (Kelvin) - Uncertainties Column (positive +)
# - [x] koi_steff_err2: Stellar Effective Temperature (Kelvin) - Uncertainties Column (negative -)
# - [x] koi_slogg: Stellar Surface Gravity - The base-10 logarithm of the acceleration due to gravity at the surface of the star.
# - [x] koi_slogg_err1: Stellar Surface Gravity - Uncertainties Column (positive +)
# - [x] koi_slogg_err2: Stellar Surface Gravity - Uncertainties Column (negative -)
# - [x] koi_srad: Stellar Radius (solar radii) - The photospheric radius of the star.
# - [x] koi_srad_err1: Stellar Radius (solar radii) - Uncertainties Column (positive +)
# - [x] koi_srad_err2: Stellar Radius (solar radii) - Uncertainties Column (negative -)
# #### Kepler Input Catalog (KIC) Parameters
# - [x] ra: RA (deg) - KIC Right Ascension of the planetary system in decimal degrees
# - [x] dec: Dec (deg) - KIC Declination in decimal degrees
# - [x] koi_kepmag: Kepler-band (mag) - Kepler-band (mag), it is a magnitude computed according to a hierarchical scheme and depends on what pre-existing catalog source is available.
# Selected features
feature_names = ['koi_fpflag_nt',
'koi_fpflag_ss',
'koi_fpflag_co',
'koi_fpflag_ec',
'koi_period',
'koi_period_err1',
'koi_time0bk',
'koi_time0bk_err1',
'koi_impact',
'koi_impact_err1',
'koi_duration',
'koi_duration_err1',
'koi_depth',
'koi_depth_err1',
'koi_prad',
'koi_prad_err1',
'koi_prad_err2',
'koi_teq',
'koi_insol',
'koi_insol_err1',
'koi_insol_err2',
'koi_model_snr',
'koi_tce_plnt_num',
'koi_steff',
'koi_steff_err1',
'koi_steff_err2',
'koi_slogg',
'koi_slogg_err1',
'koi_slogg_err2',
'koi_srad',
'koi_srad_err1',
'koi_srad_err2',
'ra',
'dec',
'koi_kepmag'
]
X = df[feature_names]
X.head()
# Use `koi_disposition` for the y values
y = df["koi_disposition"]
y.head()
# ### Label-encode y column
label_encoder = LabelEncoder()
label_encoder.fit(y)
encoded_y = label_encoder.transform(y)
for label, original_class in zip(encoded_y, y):
print('Original Class: ' + str(original_class))
print('Encoded Label: ' + str(label))
print('-' * 12)
# decoded_y = label_encoder.inverse_transform(encoded_y)
decoded_y = label_encoder.inverse_transform([0,1,2])
decoded_y
# Note that each of the original labels has been replaced with an integer.
# ### Spliting the data into training and testing data.
# split data into training and testing
X_train, X_test, y_train, y_test = train_test_split(X, encoded_y, random_state=42)
X_train.head()
# ### Applying One-Hot Encoding
y_train_categorical = to_categorical(y_train)
y_test_categorical = to_categorical(y_test)
y_train_categorical
# ### Scaling the data
#
# Scaling the data using the MinMaxScaler
# Scale your data
X_scaler = MinMaxScaler().fit(X_train)
X_train_scaled = X_scaler.transform(X_train)
X_test_scaled = X_scaler.transform(X_test)
X.shape
y.shape
# # Original Model
# ### Create the model: for categorical data, we use a classifier model
number_inputs = 35
number_hidden_nodes_level1 = 40
number_hidden_nodes_level2 = 50
number_classes = 3 # output nodes
# Function to create model, required for KerasClassifier
def create_DeepLearning_model():
# We first need to create a sequential model
model = Sequential()
# add layers
# Next, we add our first layer. This layer requires us to specify both the number of inputs and
# the number of nodes that we want in the hidden layer.
model.add(Dense(units=number_hidden_nodes_level1,activation='relu', input_dim=number_inputs))
model.add(Dense(units=number_hidden_nodes_level2,activation='relu'))
# Our final layer is the output layer. Here, we need to specify the activation function
# (typically softmax for classification) and the number of classes (labels) that we are trying to predict
model.add(Dense(units=number_classes, activation='softmax'))
# Compile model: categorical crossentropy for categorical data
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
# model = create_DeepLearning_model(number_inputs, number_hidden_nodes_level1, number_hidden_nodes_level2, number_classes)
model = create_DeepLearning_model()
model.summary()
# ### Train the Model
# Training consists of updating our weights using our optimizer and loss function. In this case, we choose 50 iterations (loops) of training that are called epochs. We also choose to shuffle our training data and increase the detail printed out during each training cycle.
model.fit(
X_train_scaled,
y_train_categorical,
epochs=50,
shuffle=True,
verbose=2
)
# We use our testing data to validate our model. This is how we determine the validity of our model (i.e. the ability to predict new and previously unseen data points)
# Evaluate the original model using the testing data
model_loss, model_accuracy = model.evaluate(
X_test_scaled, y_test_categorical, verbose=2)
print(f"Loss: {round(model_loss*100,3)}%, Accuracy: {round(model_accuracy*100,3)}%")
# # Hyperparameter Tuning
# ### Create the `GridSearchCV` model to find best/tuned parameters
# +
model_keras = KerasClassifier(build_fn=create_DeepLearning_model, verbose=0)
# define the grid search parameters
batch_size = [5, 10, 20, 40, 60, 80, 100]
epochs = [10, 50, 100, 1000]
param_grid = dict(batch_size=batch_size, epochs=epochs)
grid_model = GridSearchCV(model_keras, param_grid, verbose=1, cv=3, n_jobs=-1)
# Train the model with GridSearch
_ = grid_model.fit(X_train_scaled, y_train_categorical)
# -
# ### Find Tuned parameters
print(f"Best Parameters: {grid_model.best_params_}")
print(f"Best Score: {round(grid_model.best_score_*100,3)}%")
# # Tuned Model
# ### Create the model
# Tuned model based upon best parameters previously found
tuned_model = model
# ### Train the Model with specific parameters
# Fit and score the tuned model
tuned_model.fit(
X_train_scaled,
y_train_categorical,
batch_size=grid_model.best_params_['batch_size'],
epochs=grid_model.best_params_['epochs'],
shuffle=True,
verbose=2
)
# Evaluate the tuned model using the testing data
model_loss, model_accuracy = tuned_model.evaluate(
X_test_scaled, y_test_categorical, verbose=2)
print(f"Loss: {round(model_loss*100,3)}%, Accuracy: {round(model_accuracy*100,3)}%")
# ### Classification report
# +
# Make predictions with the hypertuned model
predictions = tuned_model.predict(X_test_scaled)
# Transform catgorical arrays to numeric representations
def uncategorize(list):
pos=0
for value in list:
if round(value,0)==1:
return pos
pos +=1
return 0
predictions_uncategorized = [uncategorize(list) for list in predictions]
# Calculate classification report
print(classification_report(y_test, predictions_uncategorized,target_names=["CANDIDATE","FALSE POSITIVE","CONFIRMED"]))
| DeepLearning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# # Topological Searches in pandapower
# This is an introduction into the pandapower module topology. The topology module provides topoligical searches and analyses for pandapower networks based on the NetworkX library. This tutorial will show you how to get started and demonstrate a few use cases of the module. For a full documentation of the topology functions, see the pandapower documentation.
#
# To demonstrate the usage of the topology module we will use the medium voltage open ring from the simple pandapower test networks:
import pandapower.networks as nw
net = nw.simple_mv_open_ring_net()
# ## Creating MultiGraphs
# The pandapower topology package provides a function to translate a pandapower network into an [networkx multigraph](https://networkx.github.io/documentation/networkx-1.9.1/reference/classes.multigraph.html):
import pandapower.topology as top
mg = top.create_nxgraph(net) # converts example network into a MultiGraph
# This picture visualises the conversion: On the left hand side you can see what our example network looks like, on the right hand side how it gets converted into a MultiGraph.
#
# <img src="pics/multigraph_example.png">
#
#
# ## Algorithms from the NetworkX package
#
# The bus numbers in the networkx graph are the same as the bus indices in pandapower. You can now use all [networkx algorithms](https://networkx.github.io/documentation/networkx-1.9.1/reference/algorithms.html) to perform graph searches on the network.
# ### shortest path
# To find the [shortest path](https://networkx.github.io/documentation/networkx-1.9.1/reference/algorithms.shortest_paths.html) between nodes bus0 and bus5:
#
# <img src="pics/nx_shortest_path.png">
import networkx as nx
path = nx.shortest_path(mg, 0, 5)
path
# This will algorithm will find the shortest path in terms of number of visited buses. The length of the lines is encoded in the weight parameter of the edges. If we want to find the shortest path in terms of shortest line length, we have to pass the weight parameter to the search:
path = nx.shortest_path(mg, 0, 5, weight="weight")
path
# In this case the search of course yields the same path, since there is only one path from bus0 to bus5.
#
# Since the bus indices in the graph and in the pandapower network are the same, we can use the path to directly access buses in pandapower:
net.bus.loc[path]
# gives us all buses on the shortest path between bus0 and bus5. We can also use the bus indices to find branch elements directly in pandapower. For example, to find all lines on the path:
net.line[(net.line.from_bus.isin(path)) & (net.line.to_bus.isin(path))]
# or all transformers on this path:
net.trafo[(net.trafo.hv_bus.isin(path)) & (net.trafo.lv_bus.isin(path))]
# ### customizing graph conversion
# Now suppose we want to find the shortest path distance between bus2 and bus6 without going through the transformer substation, but allowing to go over open switches. The path we are looking for is therefore bus6 --> bus5 --> bus4 --> bus3 --> bus2.
#
# This is not a path in the graph above though, since there is no edge between bus4 and bus5. We translate the graph with respect_switches=False to include the line with an open switch in the graph:
mg = top.create_nxgraph(net, respect_switches=False)
# <img src="pics/multigraph_example_respect_switches.png">
# Now we still have the problem that the shortest path algorithm will find the path over the substation (bus1) as the shortest path:
nx.shortest_path(mg, 6, 2)
# To prevent this, we can specify bus1 as a nogobus in the conversion, which means it will not be translated to the networkx graph:
mg = top.create_nxgraph(net, respect_switches=False, nogobuses={1})
# Now we get the path that we were looking for:
nx.shortest_path(mg, 6, 2)
# ### cycles
#
# We can also use the [cycle algorithms](https://networkx.github.io/documentation/networkx-1.9.1/reference/algorithms.cycles.html) to find cycles in the network. Cycle algorithms only work on undirected graphs, which is why we need to specify multi=False in the graph conversion:
mg = top.create_nxgraph(net, multi=False)
nx.cycle_basis(mg)
# There are no cycles in the network, which confirms the radiality of the network. If we do not respect the switches, we will find the ring as a cycle:
mg = top.create_nxgraph(net, respect_switches=False, multi=False)
nx.cycle_basis(mg)
# ## Algorithms in the topology package
# Besides from using networkx algorithms, there are some custom algorithms in the pandapower.topology package. For a full list with explanation see the pandapower documentation. Here, we only cover the two most important ones: connected_component and connected_components.
# ### Connected component
# The connected component function returns all buses that are connected to a bus in the networkx graph. Suppose we want to find all buses that are on the same feeder as bus 2. We set bus1 as a nogobus and search for all buses connected to bus 2:
mg = top.create_nxgraph(net, nogobuses={1})
area = top.connected_component(mg, 2)
# This generator contains all buses connected to bus2:
set(area)
# We get the buses 2,3 and 4, but not bus1, since it was defined as a nogobus. If we want to get bus1 as connected to bus2, but still not go over bus2, we can define bus1 as a notravbus instead of a nogobus. This means that search algorithms will find the bus as connected, but not traverse it:
mg = top.create_nxgraph(net, notravbuses={1})
set(top.connected_component(mg, 2))
# ### Connected components
# If we don't want to find the area connected to one specific bus, but rather all areas that are connected, we can use the connected_components function:
mg = top.create_nxgraph(net, nogobuses={0, 1})
for area in top.connected_components(mg):
print(area)
# Once again, we can alternatively use notravbuses to get the substation bus in the areas:
mg = top.create_nxgraph(net, nogobuses={0}, notravbuses={1})
for area in top.connected_components(mg):
print(area)
# If we want to avoid getting the notravbus as an own area, we can alternatively pass the notravbuses argument directly to the connected_components search instead of to the graph conversion:
mg = top.create_nxgraph(net, nogobuses={0})
for area in top.connected_components(mg, notravbuses={1}):
print(area)
# For more examples of topological searches in pandapower, see the documentation of the topology package.
| tutorials/topology.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Comparative genomic analysis of *Arothron* species
# +
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import seaborn as sns
import pandas as pd
# %matplotlib inline
# -
df = pd.read_csv("Aro_samples.csv")
df[['sample_name', 'sci_name', 'label']]
# ## Principal component analysis
# #### using [PCAngsd](https://github.com/Rosemeis/pcangsd) v0.95 ([<NAME> 2018](https://doi.org/10.1534/genetics.118.301336))
# +
sns.set_context('talk')
sns.set_style("whitegrid", {'grid.linestyle': '--'})
# sns.set_style("white")
dfcov = pd.read_table("Aro_PCAngsd_v0.95.cov", header=None)
C = dfcov.values
eigVals, eigVecs = np.linalg.eigh(C)
idx = eigVals.argsort()[::-1]
eigVals = eigVals[idx]
eigVecs = eigVecs[:,idx]
pc1 = eigVals[0]/eigVals.sum()
pc2 = eigVals[1]/eigVals.sum()
pc3 = eigVals[2]/eigVals.sum()
pc4 = eigVals[3]/eigVals.sum()
pc5 = eigVals[4]/eigVals.sum()
print("pc1:" + str(pc1))
print("pc2:" + str(pc2))
print("pc3:" + str(pc3))
print("pc4:" + str(pc4))
print("pc5:" + str(pc5))
df["PC1"] = eigVecs[:, 0]
df["PC2"] = eigVecs[:, 1]
df["PC3"] = eigVecs[:, 2]
df["PC4"] = eigVecs[:, 3]
df["PC5"] = eigVecs[:, 4]
def pca_plot(pcx, pcy):
numS1=200
numS2=250
numS3=150
plt.figure(figsize=(5, 5))
for idx, row in df.iterrows():
sp = row['label']
label = "$\it{"+row['label']+"}$"
color=row['color']
marker=row['marker']
markersize=row['markersize']
h_align=row['h_align']
alpha=row['alpha']
linewidth=row['linewidth']
xytext=(row['xx'], row['yy'])
plt_sc = plt.scatter(row[pcx], row[pcy],
color=color,
marker=marker,
s=markersize,
linewidth=linewidth,
alpha=alpha,
label=label)
plt.annotate(label,
xy=(row[pcx], row[pcy]),
xytext=xytext,
textcoords='offset points',
horizontalalignment=h_align,
verticalalignment='bottom',
alpha=1.0,
fontsize=12)
return True
# PC1, PC2
pca_plot('PC1', 'PC2')
plt.xlabel("PC1 (" + "{0:.1f}".format(pc1*100) + "%)", fontsize=16)
plt.ylabel("PC2 (" + "{0:.1f}".format(pc2*100) + "%)", fontsize=16)
plt.xlim(-0.35, 0.65)
plt.ylim(-0.4, 0.4)
plt.xticks([-0.2, 0.0, 0.2, 0.4, 0.6], fontsize=12)
plt.yticks([-0.4, -0.2, 0.0, 0.2, 0.4], fontsize=12)
plt.savefig("Aro_PCAngsd_PC1-PC2.pdf", bbox_inches='tight')
# PC2, PC3
pca_plot('PC2', 'PC3')
plt.xlabel("PC2 (" + "{0:.1f}".format(pc2*100) + "%)", fontsize=16)
plt.ylabel("PC3 (" + "{0:.1f}".format(pc3*100) + "%)", fontsize=16)
plt.xlim(-0.4, 0.4)
plt.ylim(-0.3, 1.1)
plt.xticks(fontsize=12)
# plt.yticks([-0.2, 0.0, 0.2, 0.4, 0.6, 0.8, 1.0], fontsize=12)
plt.yticks([0.0, 0.4, 0.8], fontsize=12)
plt.savefig("Aro_PCAngsd_PC2-PC3.pdf", bbox_inches='tight')
# PC3, PC4
pca_plot('PC3', 'PC4')
plt.xlabel("PC3 (" + "{0:.1f}".format(pc3*100) + "%)", fontsize=16)
plt.ylabel("PC4 (" + "{0:.1f}".format(pc4*100) + "%)", fontsize=16)
plt.xlim(-0.3, 1.1)
plt.ylim(-0.3, 0.75)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.savefig("Aro_PCAngsd_PC3-PC4.pdf", bbox_inches='tight')
# -
# ## Admixture analysis
# #### using [NGSAdmix](http://www.popgen.dk/software/index.php/NgsAdmix) v32 ([Skotte et al. 2013](https://doi.org/10.1534/genetics.113.154138)) and [ANGSD](http://www.popgen.dk/angsd/index.php/ANGSD) v0.918 ([Korneliussen et al. 2014](https://doi.org/10.1186/s12859-014-0356-4))
# +
sns.set_context("talk")
sns.set_style("white")
basename = "Aro_NGSadmix"
sns_deep_blue = sns.color_palette('deep').as_hex()[0]
sns_deep_green = sns.color_palette('deep').as_hex()[2]
sns_deep_red = sns.color_palette('deep').as_hex()[3]
sns_deep_lightblue = sns.color_palette('deep').as_hex()[9]
sns_muted_yellow = sns.color_palette('muted').as_hex()[8]
sample_order=['AR0001',
'AR0002',
'AR0003',
'AR0004',
'AR0005',
'AR0006',
'AR0007',
'AR0008',
'AR0009',
'AR0014',
'AR0015',
'AR0016',
'AR0017',
'AR0010',
'AR0018',
'AR0019',
'AR0020',
'AR0021',
'AR0011',
'AR0012',
'AR0013']
for i in [4, 5, 6]:
plt.figure(figsize=(8, 2.5))
df_k = pd.read_csv(basename+"_K"+str(i)+".qopt",
header=None,
delim_whitespace=True)
df_k.columns = list(range(1, i+1))
df_k['sample_name'] = pd.Categorical(df.sample_name, sample_order)
df_k['label'] = df.label
df_k_sort=df_k.sort_values(by='sample_name')
r = np.arange(len(df_k_sort))
if (i==3):
plt.bar(r, list(df_k_sort[1]), 0.8, linewidth=0, color='orange', align='center')
plt.bar(r, list(df_k_sort[2]), 0.8, linewidth=0, color=sns_deep_red,
bottom=list(df_k_sort[1]))
plt.bar(r, list(df_k_sort[3]), 0.8, linewidth=0, color=sns_deep_green,
bottom=list(df_k_sort[1]+df_k_sort[2]))
elif (i==4):
plt.bar(r, list(df_k_sort[1]), 0.8, linewidth=0, color=sns_deep_blue, align='center')
plt.bar(r, list(df_k_sort[2]), 0.8, linewidth=0, color='orange',
bottom=list(df_k_sort[1]))
plt.bar(r, list(df_k_sort[4]), 0.8, linewidth=0, color=sns_deep_red,
bottom=list(df_k_sort[1]+df_k_sort[2]))
plt.bar(r, list(df_k_sort[3]), 0.8, linewidth=0, color=sns_deep_green,
bottom=list(df_k_sort[1]+df_k_sort[2]+df_k_sort[4]))
elif (i==5):
plt.bar(r, list(df_k_sort[3]), 0.8, linewidth=0, color='orange', align='center')
plt.bar(r, list(df_k_sort[4]), 0.8, linewidth=0, color=sns_deep_lightblue,
bottom=list(df_k_sort[3]))
plt.bar(r, list(df_k_sort[2]), 0.8, linewidth=0, color=sns_deep_red,
bottom=list(df_k_sort[3]+df_k_sort[4]))
plt.bar(r, list(df_k_sort[1]), 0.8, linewidth=0, color=sns_deep_green,
bottom=list(df_k_sort[2]+df_k_sort[3]+df_k_sort[4]))
plt.bar(r, list(df_k_sort[5]), 0.8, linewidth=0, color=sns_deep_blue,
bottom=list(df_k_sort[1]+df_k_sort[2]+df_k_sort[3]+df_k_sort[4]))
elif (i==6):
plt.bar(r, list(df_k_sort[1]), 0.8, linewidth=0, color=sns_muted_yellow, align='center')
plt.bar(r, list(df_k_sort[6]), 0.8, linewidth=0, color='orange',
bottom=list(df_k_sort[1]))
plt.bar(r, list(df_k_sort[3]), 0.8, linewidth=0, color=sns_deep_lightblue,
bottom=list(df_k_sort[1]+df_k_sort[6]))
plt.bar(r, list(df_k_sort[4]), 0.8, linewidth=0, color=sns_deep_blue,
bottom=list(df_k_sort[1]+df_k_sort[6]+df_k_sort[3]))
plt.bar(r, list(df_k_sort[5]), 0.8, linewidth=0, color=sns_deep_red,
bottom=list(df_k_sort[1]+df_k_sort[6]+df_k_sort[3]+df_k_sort[4]))
plt.bar(r, list(df_k_sort[2]), 0.8, linewidth=0, color=sns_deep_green,
bottom=list(df_k_sort[1]+df_k_sort[6]+df_k_sort[3]+df_k_sort[4]+df_k_sort[5]))
else:
plt.bar(r, list(df_k_sort[1]), 0.8)
bottom = df_k_sort[1]
for j in np.arange(2, i+1):
plt.bar(r, list(df_k_sort[j]), 0.8, bottom=list(bottom))
bottom=bottom+df_k_sort[j]
plt.tick_params(axis='x',
which='both',
bottom=False,
top=False,
labelbottom=False)
plt.gca().spines['right'].set_visible(False)
plt.gca().spines['top'].set_visible(False)
plt.gca().spines['bottom'].set_visible(False)
plt.xlim(-0.8, len(df_k_sort)-0.5)
plt.ylabel("K="+str(i), fontsize=24)
plt.yticks((0.0, 0.5, 1.0), fontsize=16)
# plt.savefig("Aro_NGSadmix_K"+str(i)+".svg", bbox_inches='tight')
plt.savefig("Aro_NGSadmix_K"+str(i)+".pdf", bbox_inches='tight', transparent = True)
# plt.xticks(r, ["$\it{"+label+"}$" for label in df_k_sort['label']], fontsize=16, rotation=90)
# plt.tick_params(
# axis='x',
# labelbottom=True)
# plt.savefig("Aro_NGSadmix_K6.svg", bbox_inches='tight')
# plt.savefig("Aro_NGSadmix_K6.pdf", bbox_inches='tight')
# -
# ## Hybrid index and heterozygosity
df_het=pd.read_csv("Aro_Het.csv")
df_het
# +
sns.set_context('talk')
sns.set_style('whitegrid')
sns_deep_green=sns.color_palette('deep').as_hex()[2]
fig = plt.figure(figsize=(5, 5))
ax=fig.add_subplot(111)
ax.scatter(df_het['r_ste'][0], df_het['het'][0],
color='chocolate', marker='x', linewidth=4.0, s=200, zorder=10)
ax.scatter(df_het['r_ste'][1], df_het['het'][1],
color='chocolate', marker='x', linewidth=4.0, s=200, zorder=10)
ax.scatter(df_het['r_ste'][2], df_het['het'][2],
color='seagreen', marker='x', linewidth=4.0, s=200, zorder=10)
ax.scatter([0.0], [0.0], color='orange', marker='o', s=250, zorder=10)
ax.scatter([1.0], [0.0], color=sns_deep_green, marker='s', s=250, zorder=10)
t1=mpatches.Polygon([[0.0, 0.0], [0.5, 1.0], [1.0, 0.0]],
color='lightblue', linewidth=0, alpha=0.3, zorder=1)
ax.add_patch(t1)
plt.xlim(-0.1, 1.1)
plt.xticks([0.0, 0.2, 0.4, 0.6, 0.8, 1.0], fontsize=14)
plt.xlabel("Hybrid index", fontsize=18)
plt.ylim(-0.15, 1.15)
plt.yticks([0.0, 0.2, 0.4, 0.6, 0.8, 1.0], fontsize=14)
plt.ylabel("Heterozygosity", fontsize=18)
ax.set_aspect(0.9)
plt.savefig("Aro_Het.svg", bbox_inches='tight')
# -
| compgen/Aro_compgen.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R 3.5.1
# language: R
# name: ir32
# ---
rawdata = read.table('GSE124731_single_cell_rnaseq_gene_counts.txt', header = 1, stringsAsFactors = FALSE, row.names = 1)
geneIDs1 <- ensembldb::select(EnsDb.Hsapiens.v86, keys= rownames(rawdata), keytype = "GENEID", columns = c("SYMBOL","GENEID"))
rawdata = rawdata[rownames(rawdata) %in% geneIDs1$GENEID,]
rawdata = rawdata[geneIDs1$GENEID[!duplicated(geneIDs1$SYMBOL)],]
geneIDs1 <- ensembldb::select(EnsDb.Hsapiens.v86, keys= rownames(rawdata), keytype = "GENEID", columns = c("SYMBOL","GENEID"))
rownames(rawdata) = geneIDs1$SYMBOL
source("../../functions.R")
setwd('Figures/Figure 3')
seurat = CreateSeuratObject(rawdata)
seurat = NormalizeData(seurat)
seurat@meta.data$n_genes = seurat@meta.data$nGene
seurat = seuratProcess(seurat)
metadata = read.table('GSE124731_single_cell_rnaseq_meta_data.txt', header = 1, stringsAsFactors = FALSE, row.names = 1)
<EMAIL> = metadata[<EMAIL>,]
DimPlot(seurat, 'umap', group.by = 'cell.type')
table(seurat@ident)
# +
inkt = FindMarkers(seurat, 'iNKT', test.use = 'wilcox')
nk = FindMarkers(seurat, 'NK', test.use = 'wilcox')
vd1 = FindMarkers(seurat, 'Vd1', test.use = 'wilcox')
vd2 = FindMarkers(seurat, 'Vd2', test.use = 'wilcox')
mait = FindMarkers(seurat, 'MAIT', test.use = 'wilcox')
gd = FindMarkers(seurat, c('Vd1', 'Vd2'), test.use = 'wilcox')
cd4 = FindMarkers(seurat, c('CD4'), test.use = 'wilcox')
cd8 = FindMarkers(seurat, c('CD8'), test.use = 'wilcox')
ab = FindMarkers(seurat, c('CD4', 'CD8'), test.use = 'wilcox')
head(inkt, 20)
# +
inkt$type = 'iNKT'
nk$type = 'NK'
vd1$type = 'vd1'
vd2$type = 'vd2'
mait$type = 'MAIT'
gd$type = 'gd'
cd4$type = 'CD4'
cd8$type = 'CD8'
ab$type = 'ab'
inkt$gene = rownames(inkt)
nk$gene = rownames(nk)
vd1$gene = rownames(vd1)
vd2$gene = rownames(vd2)
mait$gene = rownames(mait)
gd$gene = rownames(gd)
cd4$gene = rownames(cd4)
cd8$gene = rownames(cd8)
ab$gene = rownames(ab)
df = rbind(inkt, nk, vd1, vd2, mait, gd, cd4, cd8, ab)
write.table(df, sep = ',', 'innate_lists.csv')
| Figures/Figure 3/InnateSubsets.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# !python -m pip install --upgrade pip
# !pip3 install opencv-python
# !python -m pip install --upgrade opencv-python
import cv2
print(cv2.__version__)
imgpath = 'D:/Dataset/4.2.03.tiff'
#imgpath = '/home/pi/Dataset/4.2.03.tiff'
img = cv2.imread(imgpath)
type(img)
cv2.imshow('Mandrill', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.namedWindow('Mandrill', cv2.WINDOW_AUTOSIZE)
cv2.imshow('Mandrill', img)
cv2.waitKey(0)
cv2.destroyWindow('Mandrill')
outpath = 'D:/Dataset/output.jpg'
cv2.imwrite(outpath, img)
img = cv2.imread(imgpath, 0)
cv2.imshow('Mandrill', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
import numpy as np
img1 = np.zeros((512, 512, 3), np.uint8)
cv2.line(img1, (0, 99), (99, 0), (255, 0, 0), 2)
cv2.rectangle(img1, (100, 60), (200, 170), (0, 255, 0), 2)
cv2.circle(img1, (60, 60), 50, (0, 0, 255), -1)
cv2.ellipse(img1, (100, 200), (50, 20), 0, 0, 360, (127, 127, 127), -1)
points = np.array([[80, 2], [125, 0],
[170, 0], [230, 5],
[30, 50]], np.int32)
points = points.reshape((-1, 1, 2))
cv2.polylines(img1, [points], True, (0, 255, 255))
text1 = 'Test Text'
cv2.putText(img1, text1, (100, 100),
cv2.FONT_HERSHEY_SIMPLEX,
5, (255, 255, 0))
cv2.imshow('Shapes', img1)
cv2.waitKey(0)
cv2.destroyAllWindows()
| Section06/01_Intro_to_OpenCV.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Recursion
# - defining something in terms of itself usually at some smaller scale, perhaps multiple times, to achieve your objective
# - e.g., a human being is someone whose mother is a human being
# - directory is a structure that holds files and (smaller) directories, etc.
# - in programming, functions can generally <em>call themselves</em> to solve smaller subproblems
# - fractals is a drawing which also has self-similar structure, where it can be defined in terms of itself
#
# ## Definitions
# <strong>Recursion: </strong> The process of solving a problem by reducing it to smaller versions of itself is called recursion. <br>
# <strong>Recursive definition: </strong> a definition in which something is defined in terms of smaller version of itself. <br>
# <strong>Recursive algorithm:</strong> an algorithm that finds a solution to a given problem by reducing the problem to smaller versions of itself<br>
# <strong>Infinite recursion:</strong> never stops
#
# ### general construct of recurive algorithms:
# - recursive algorithms have base case(s) and general case(s)
# - base case(s) provides direct answer that makes the recursion stop
# - general case(s) recursively reduces to one of the base case(s)
# ### recursive countdown example
# Recursively print countdown from 10-1 and blast off!
# Run it as a script
import os
import time
def countDown(n):
os.system('clear')
if n == 0:
print('Blast Off!')
time.sleep(1)
os.system('clear')
else:
print(n)
time.sleep(1)
countDown(n-1) # tail recursion
#print(n)
countDown(10)
# ## Fibonacci numbers
# - Fibonacci sequence: 0, 1, 1, 2, 3, 5, 8, 13, 21, 34, ...
# - devised by Fibonacci (1170-1250), who used the sequence to model the breeding of (pairs) of rabbits
# - say in generation 7 you had 21 pairs in total, of which 13 were adults, then in next generation the adults will have bred new children, and the previous children will have grown up to become adults. So, in generation 8, you'll have 13+21=34 rabbits, of which 21 are adults.
# ### Fibonacci number definition
# <pre>
# fib(0) = 0 - base case 1
# fib(1) = 1 - base case 2
# fib(n) = fib(n-1) + fib(n-2) for n >= 2 - general case
# </pre>
# In Python:
#count = 0
def fib(n):
global count
#count += 1
if n <= 1:
return n
f = fib(n-1) + fib(n-2)
return f
fib(10)
#print(count)
#assert fib(8) == 21
#assert fib(10) == 55
# ### visualize fib(4) using pythontutor.com
# - https://goo.gl/YNizhH
from IPython.display import IFrame
src = """
http://pythontutor.com/iframe-embed.html#code=%23%20In%20Python%3A%0Adef%20fib%28n%29%3A%0A%20%20%20%20if%20n%20%3C%3D%201%3A%0A%20%20%20%20%20%20%20%20return%20n%0A%20%20%20%20f%20%3D%20fib%28n-1%29%20%2B%20fib%28n-2%29%0A%20%20%20%20return%20f%0A%20%20%20%20%0Aprint%28fib%284%29%29&codeDivHeight=400&codeDivWidth=350&cumulative=false&curInstr=0&heapPrimitives=false&origin=opt-frontend.js&py=3&rawInputLstJSON=%5B%5D&textReferences=false
"""
IFrame(src, width=900, height=300)
# ### how many times is fib() called for fib(4)?
# - Modify fib to count the number of times fib gets called.
# ### Factorial definition
# <pre>
# 0! = 1 - base case
# n! = n.(n-1)! for n >= 1 - general case
# </pre>
# ### exercise 1
# Write a recursive fact(n) function that takes a positive integer n and returns its factorial.
# <pre>
# Here are some test cases that the fact(n) should pass:
# assert fact(5) == 120
# assert fact(10) == 3628800
# assert fact(100) == math.factorial(100)
# </pre>
# ### exercise 2
# Write a recursive function -- gcd(a, b) -- that finds the greatest common divisor of two given positive integers, a and b.
# <pre>
# Here are some test cases that gcd(a, b) should pass:
# assert gcd(2, 100) == 2
# assert gcd(50, 10) == 10
# assert gcd(125, 75) == 25
# </pre>
# ### exercise 3
# Write a program that simulates the steps required to solve the "Tower of Hanoii" puzzle for some disks n.
# - https://www.mathsisfun.com/games/towerofhanoi.html
#
# - Recursive algorithm
# - If there are 1 or more disks to move:
# 1. Move the top n-1 disks from needle 1 (source) to needle 2 (helper), using needle 3 (dest) as the intermediate needle
# 2. Move disk number n from needle 1 to needle 3
# 3. Move the top n - 1 disks from needle 2 to needle 3, using needle 1 as the intermediate needle
def moveDisks(n, src, helper, dst):
if n > 0:
moveDisks(n-1, src, dst, helper)
print('Move disk #{} from {} to {}'.format(n, src, dst))
moveDisks(n-1, helper, src, dst)
moveDisks(3, 'needle1', 'needle2', 'needle3')
| Ch18-Functions-4-Recursion.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introduction to C++
# ## Hello world
# +
# %%file hello.cpp
#include <iostream>
int main() {
std::cout << "Hello, world!" << std::endl;
}
# -
# ### Compilation
# + language="bash"
#
# g++ hello.cpp -o hello.exe
# -
# ### Execution
# + language="bash"
#
# ./hello.exe
# -
# ## Namespaces
#
# Just like Python, C++ has namespaces that allow us to build large libraries without worrying about name collisions. In the `Hello world` program, we used the explicit name `std::cout` indicating that `cout` is a member of the standard workspace. We can also use the `using` keyword to import selected functions or classes from a namespace.
#
# ```c++
# using std::cout;
#
# int main()
# {
# cout << "Hello, world!\n";
# }
# ```
#
# For small programs, we sometimes import the entire namespace for convenience, but this may cause namespace collisions in larger programs.
#
# ```c++
# using namespace std;
#
# int main()
# {
# cout << "Hello, world!\n";
# }
# ```
#
# You can easily create your own namespace.
#
# ```c++
# namespace sta_663 {
# const double pi=2.14159;
#
# void greet(string name) {
# cout << "\nTraditional first program\n";
# cout << "Hello, " << name << "\n";
# }
# }
#
# int main()
# {
# cout << "\nUsing namespaces\n";
# string name = "Tom";
# cout << sta_663::pi << "\n";
# sta_663::greet(name);
# }
# ```
# ## Types
# +
# %%file dtypes.cpp
#include <iostream>
#include <complex>
using std::cout;
int main() {
// Boolean
bool a = true, b = false;
cout << "and " << (a and b) << "\n";
cout << "&& " << (a && b) << "\n";
cout << "or " << (a or b) << "\n";
cout << "|| " << (a || b) << "\n";
cout << "not " << not (a or b) << "\n";
cout << "! " << !(a or b) << "\n";
// Integral numbers
cout << "char " << sizeof(char) << "\n";
cout << "short int " << sizeof(short int) << "\n";
cout << "int " << sizeof(int) << "\n";
cout << "long " << sizeof(long) << "\n";
// Floating point numbers
cout << "float " << sizeof(float) << "\n";
cout << "double " << sizeof(double) << "\n";
cout << "long double " << sizeof(long double) << "\n";
cout << "complex double " << sizeof(std::complex<double>) << "\n";
// Characters and strings
char c = 'a'; // Note single quotes
char word[] = "hello"; // C char arrays
std::string s = "hello"; // C++ string
cout << c << "\n";
cout << word << "\n";
cout << s << "\n";
}
# + language="bash"
#
# g++ dtypes.cpp -o dtypes.exe
# ./dtypes.exe
# -
# ## Type conversions
# +
# %%file type.cpp
#include <iostream>
using std::cout;
using std::string;
using std::stoi;
int main() {
char c = '3'; // A char is an integer type
string s = "3"; // A string is not an integer type
int i = 3;
float f = 3;
double d = 3;
cout << c << "\n";
cout << i << "\n";
cout << f << "\n";
cout << d << "\n";
cout << "c + i is " << c + i << "\n";
cout << "c + i is " << c - '0' + i << "\n";
cout << "s + i is " << stoi(s) + i << "\n"; // Use std::stod to convert to double
}
# + language="bash"
#
# g++ -o type.exe type.cpp -std=c++14
# + language="bash"
#
# ./type.exe
# -
# ## Command line inputs
# +
# %%file command_args.cpp
#include <iostream>
#include <exception>
using std::cout;
using std::stoi;
int main(int argc, char *argv[]) {
for (int i=0; i<argc; i++) {
cout << i << ": " << argv[i];
try {
stoi(argv[i]);
cout << " is an integer\n";
} catch (std::exception& e) {
cout << " is not an integer\n";
}
}
}
# + language="bash"
#
# g++ -o command_args.exe command_args.cpp -std=c++14
# + language="bash"
#
# ./command_args.exe 1 2 hello goodbye
# -
# ## Header, implementation and driver files
# ### Header file(s)
# +
# %%file func02.hpp
double add(double x, double y);
double mult(double x, double y);
# -
# ### Implementation file(s)
# +
# %%file func02.cpp
double add(double x, double y) {
return x + y;
}
double mult(double x, double y) {
return x * y;
}
# -
# ### Driver program
# +
# %%file test_func02.cpp
#include <iostream>
#include "func02.hpp"
int main() {
double a = 3;
double b = 4;
std::cout << add(a, b) << std::endl;
std::cout << mult(a, b) << std::endl;
}
# -
# ### Compilation
# + language="bash"
#
# g++ test_func02.cpp func02.cpp -o test_func02.exe
# -
# ### Execution
# + language="bash"
#
# ./test_func02.exe
# -
# ## Using `make`
# +
# %%file Makefile
test_func02.exe: test_func02.o func02.o
g++ -o test_func02.exe test_func02.o func02.o
test_func02.o: test_func02.cpp func02.hpp
g++ -c test_func02.cpp
func02.o: func02.cpp
g++ -c func02.cpp
# -
# ### Compilation
# + language="bash"
#
# make
# -
# ### Execution
# + language="bash"
#
# ./test_func02.exe
# -
# ## A more flexible Makefile
# +
# %%file Makefile2
CC=g++
CFLAGS=-Wall -std=c++14
test_func02.exe: test_func02.o func02.o
$(CC) $(CFLAGS) -o test_func02.exe test_func02.o func02.o
test_func02.o: test_func02.cpp func02.hpp
$(CC) $(CFLAGS) -c test_func02.cpp
func02.o: func02.cpp
$(CC) $(CFLAGS) -c func02.cpp
# -
# ### Compilation
# Note that no re-compilation occurs!
# + language="bash"
#
# make -f Makefile2
# -
# ### Execution
# + language="bash"
#
# ./test_func02.exe
# -
# ## Input and output
# %%file data.txt
9 6
# +
# %%file io.cpp
#include <fstream>
#include "func02.hpp"
int main() {
std::ifstream fin("data.txt");
std::ofstream fout("result.txt");
double a, b;
fin >> a >> b;
fin.close();
fout << add(a, b) << std::endl;
fout << mult(a, b) << std::endl;
fout.close();
}
# + language="bash"
#
# g++ io.cpp -o io.exe func02.cpp
# + language="bash"
#
# ./io.exe
# -
# ! cat result.txt
# ## Arrays
# +
# %%file array.cpp
#include <iostream>
using std::cout;
using std::endl;
int main() {
int N = 3;
double counts[N];
counts[0] = 1;
counts[1] = 3;
counts[2] = 3;
double avg = (counts[0] + counts[1] + counts[2])/3;
cout << avg << endl;
}
# + language="bash"
#
# g++ -o array.exe array.cpp
# + language="bash"
#
# ./array.exe
# -
# ## Loops
# +
# %%file loop.cpp
#include <iostream>
using std::cout;
using std::endl;
using std::begin;
using std::end;
int main()
{
int x[] = {1, 2, 3, 4, 5};
cout << "\nTraditional for loop\n";
for (int i=0; i < sizeof(x)/sizeof(x[0]); i++) {
cout << i << endl;
}
cout << "\nUsing iterators\n";
for (auto it=begin(x); it != end(x); it++) {
cout << *it << endl;
}
cout << "\nRanged for loop\n\n";
for (auto &i : x) {
cout << i << endl;
}
}
# + language="bash"
#
# g++ -o loop.exe loop.cpp -std=c++14
# + language="bash"
#
# ./loop.exe
# -
# ## Function arguments
# +
# %%file func_arg.cpp
#include <iostream>
using std::cout;
using std::endl;
// Value parameter
void f1(int x) {
x *= 2;
cout << "In f1 : x=" << x << endl;
}
// Reference parameter
void f2(int &x) {
x *= 2;
cout << "In f2 : x=" << x << endl;
}
/* Note
If you want to avoid side effects
but still use references to avoid a copy operation
use a const refernece like this to indicate that x cannot be changed
void f2(const int &x)
*/
/* Note
Raw pointers are prone to error and
generally avoided in modern C++
See unique_ptr and shared_ptr
*/
// Raw pointer parameter
void f3(int *x) {
*x *= 2;
cout << "In f3 : x=" << *x << endl;
}
int main() {
int x = 1;
cout << "Before f1: x=" << x << "\n";
f1(x);
cout << "After f1 : x=" << x << "\n";
cout << "Before f2: x=" << x << "\n";
f2(x);
cout << "After f2 : x=" << x << "\n";
cout << "Before f3: x=" << x << "\n";
f3(&x);
cout << "After f3 : x=" << x << "\n";
}
# + language="bash"
#
# c++ -o func_arg.exe func_arg.cpp --std=c++14
# + language="bash"
#
# ./func_arg.exe
# -
# ## Arrays, pointers and dynamic memory
#
# We generally avoid using raw pointers in C++, but this is standard in C and you should at least understand what is going on.
#
# In C++, we typically use smart pointers, STL containers or convenient array constructs provided by libraries such as Eigen and Armadillo.
# ### Pointers and addresses
# +
# %%file p01.cpp
#include <iostream>
using std::cout;
int main() {
int x = 23;
int *xp;
xp = &x;
cout << "x " << x << "\n";
cout << "Address of x " << &x << "\n";
cout << "Pointer to x " << xp << "\n";
cout << "Value at pointer to x " << *xp << "\n";
}
# + language="bash"
#
# g++ -o p01.exe p01.cpp -std=c++14
# ./p01.exe
# -
# ### Arrays
# +
# %%file p02.cpp
#include <iostream>
using std::cout;
using std::begin;
using std::end;
int main() {
int xs[] = {1,2,3,4,5};
int ys[3];
for (int i=0; i<5; i++) {
ys[i] = i*i;
}
for (auto x=begin(xs); x!=end(xs); x++) {
cout << *x << " ";
}
cout << "\n";
for (auto x=begin(ys); x!=end(ys); x++) {
cout << *x << " ";
}
cout << "\n";
}
# + language="bash"
#
# g++ -o p02.exe p02.cpp -std=c++14
# ./p02.exe
# -
# ### Dynamic memory
# +
# %%file p03.cpp
#include <iostream>
using std::cout;
using std::begin;
using std::end;
int main() {
// declare memory
int *z = new int; // single integer
*z = 23;
// Allocate on heap
int *zs = new int[3]; // array of 3 integers
for (int i=0; i<3; i++) {
zs[i] = 10*i;
}
cout << *z << "\n";
for (int i=0; i < 3; i++) {
cout << zs[i] << " ";
}
cout << "\n";
// need for manual management of dynamically assigned memory
delete z;
delete[] zs;
}
# + language="bash"
#
# g++ -o p03.exe p03.cpp -std=c++14
# ./p03.exe
# -
# ### Pointer arithmetic
# +
# %%file p04.cpp
#include <iostream>
using std::cout;
using std::begin;
using std::end;
int main() {
int xs[] = {100,200,300,400,500,600,700,800,900,1000};
cout << xs << ": " << *xs << "\n";
cout << &xs << ": " << *xs << "\n";
cout << &xs[3] << ": " << xs[3] << "\n";
cout << xs+3 << ": " << *(xs+3) << "\n";
}
# + language="bash"
#
# g++ -o p04.exe p04.cpp
# ./p04.exe
# -
# ### C style dynamic memory for jagged array ("matrix")
# +
# %%file p05.cpp
#include <iostream>
using std::cout;
using std::begin;
using std::end;
int main() {
int m = 3;
int n = 4;
int **xss = new int*[m]; // assign memory for m pointers to int
for (int i=0; i<m; i++) {
xss[i] = new int[n]; // assign memory for array of n ints
for (int j=0; j<n; j++) {
xss[i][j] = i*10 + j;
}
}
for (int i=0; i<m; i++) {
for (int j=0; j<n; j++) {
cout << xss[i][j] << "\t";
}
cout << "\n";
}
// Free memory
for (int i=0; i<m; i++) {
delete[] xss[i];
}
delete[] xss;
}
# + language="bash"
#
# g++ -o p05.exe p05.cpp
# ./p05.exe
# -
# ## Functions
# +
# %%file func01.cpp
#include <iostream>
double add(double x, double y) {
return x + y;
}
double mult(double x, double y) {
return x * y;
}
int main() {
double a = 3;
double b = 4;
std::cout << add(a, b) << std::endl;
std::cout << mult(a, b) << std::endl;
}
# + language="bash"
#
# g++ -o func01.exe func01.cpp -std=c++14
# ./func01.exe
# -
# ### Function parameters
# +
# %%file func02.cpp
#include <iostream>
double* add(double *x, double *y, int n) {
double *res = new double[n];
for (int i=0; i<n; i++) {
res[i] = x[i] + y[i];
}
return res;
}
int main() {
double a[] = {1,2,3};
double b[] = {4,5,6};
int n = 3;
double *c = add(a, b, n);
for (int i=0; i<n; i++) {
std::cout << c[i] << " ";
}
std::cout << "\n";
delete[] c; // Note difficulty of book-keeping when using raw pointers!
}
# + language="bash"
#
# g++ -o func02.exe func02.cpp -std=c++14
# ./func02.exe
# +
# %%file func03.cpp
#include <iostream>
using std::cout;
// Using value
void foo1(int x) {
x = x + 1;
}
// Using pointer
void foo2(int *x) {
*x = *x + 1;
}
// Using ref
void foo3(int &x) {
x = x + 1;
}
int main() {
int x = 0;
cout << x << "\n";
foo1(x);
cout << x << "\n";
foo2(&x);
cout << x << "\n";
foo3(x);
cout << x << "\n";
}
# + language="bash"
#
# g++ -o func03.exe func03.cpp -std=c++14
# ./func03.exe
# -
# ## Generic programming with templates
# +
# %%file template.cpp
#include <iostream>
template<typename T>
T add(T a, T b) {
return a + b;
}
int main() {
int m =2, n =3;
double u = 2.5, v = 4.5;
std::cout << add(m, n) << std::endl;
std::cout << add(u, v) << std::endl;
}
# + language="bash"
#
# g++ -o template.exe template.cpp
# + language="bash"
#
# ./template.exe
# -
# ## Anonymous functions
# +
# %%file lambda.cpp
#include <iostream>
using std::cout;
using std::endl;
int main() {
int a = 3, b = 4;
int c = 0;
// Lambda function with no capture
auto add1 = [] (int a, int b) { return a + b; };
// Lambda function with value capture
auto add2 = [c] (int a, int b) { return c * (a + b); };
// Lambda funciton with reference capture
auto add3 = [&c] (int a, int b) { return c * (a + b); };
// Change value of c after function definition
c += 5;
cout << "Lambda function\n";
cout << add1(a, b) << endl;
cout << "Lambda function with value capture\n";
cout << add2(a, b) << endl;
cout << "Lambda function with reference capture\n";
cout << add3(a, b) << endl;
}
# + language="bash"
#
# c++ -o lambda.exe lambda.cpp --std=c++14
# + language="bash"
#
# ./lambda.exe
# -
# ## Function pointers
# +
# %%file func_pointer.cpp
#include <iostream>
#include <vector>
#include <functional>
using std::cout;
using std::endl;
using std::function;
using std::vector;
int main()
{
cout << "\nUsing generalized function pointers\n";
using func = function<double(double, double)>;
auto f1 = [](double x, double y) { return x + y; };
auto f2 = [](double x, double y) { return x * y; };
auto f3 = [](double x, double y) { return x + y*y; };
double x = 3, y = 4;
vector<func> funcs = {f1, f2, f3,};
for (auto& f : funcs) {
cout << f(x, y) << "\n";
}
}
# + language="bash"
#
# g++ -o func_pointer.exe func_pointer.cpp -std=c++14
# + language="bash"
#
# ./func_pointer.exe
# -
# ## Standard template library (STL)
# +
# %%file stl.cpp
#include <iostream>
#include <vector>
#include <map>
#include <unordered_map>
using std::vector;
using std::map;
using std::unordered_map;
using std::string;
using std::cout;
using std::endl;
struct Point{
int x;
int y;
Point(int x_, int y_) :
x(x_), y(y_) {};
};
int main() {
vector<int> v1 = {1,2,3};
v1.push_back(4);
v1.push_back(5);
cout << "Vecotr<int>" << endl;
for (auto n: v1) {
cout << n << endl;
}
cout << endl;
vector<Point> v2;
v2.push_back(Point(1, 2));
v2.emplace_back(3,4);
cout << "Vector<Point>" << endl;
for (auto p: v2) {
cout << "(" << p.x << ", " << p.y << ")" << endl;
}
cout << endl;
map<string, int> v3 = {{"foo", 1}, {"bar", 2}};
v3["hello"] = 3;
v3.insert({"goodbye", 4});
// Note the a C++ map is ordered
// Note using (traditional) iterators instead of ranged for loop
cout << "Map<string, int>" << endl;
for (auto iter=v3.begin(); iter != v3.end(); iter++) {
cout << iter->first << ": " << iter->second << endl;
}
cout << endl;
unordered_map<string, int> v4 = {{"foo", 1}, {"bar", 2}};
v4["hello"] = 3;
v4.insert({"goodbye", 4});
// Note the unordered_map is similar to Python' dict.'
// Note using ranged for loop with const ref to avoid copying or mutation
cout << "Unordered_map<string, int>" << endl;
for (const auto& i: v4) {
cout << i.first << ": " << i.second << endl;
}
cout << endl;
}
# + language="bash"
#
# g++ -o stl.exe stl.cpp -std=c++14
# + language="bash"
#
# ./stl.exe
# -
# ## STL algorithms
# +
# %%file stl_algorithm.cpp
#include <vector>
#include <iostream>
#include <numeric>
using std::cout;
using std::endl;
using std::vector;
using std::begin;
using std::end;
int main() {
vector<int> v(10);
// iota is somewhat like range
std::iota(v.begin(), v.end(), 1);
for (auto i: v) {
cout << i << " ";
}
cout << endl;
// C++ version of reduce
cout << std::accumulate(begin(v), end(v), 0) << endl;
// Accumulate with lambda
cout << std::accumulate(begin(v), end(v), 1, [](int a, int b){return a * b; }) << endl;
}
# + language="bash"
#
# g++ -o stl_algorithm.exe stl_algorithm.cpp -std=c++14
# + language="bash"
#
# ./stl_algorithm.exe
# -
# ## Random numbers
# +
# %%file random.cpp
#include <iostream>
#include <random>
#include <functional>
using std::cout;
using std::random_device;
using std::mt19937;
using std::default_random_engine;
using std::uniform_int_distribution;
using std::poisson_distribution;
using std::student_t_distribution;
using std::bind;
// start random number engine with fixed seed
// Note default_random_engine may give differnet values on different platforms
// default_random_engine re(1234);
// or
// Using a named engine will work the same on differnt platforms
// mt19937 re(1234);
// start random number generator with random seed
random_device rd;
mt19937 re(rd());
uniform_int_distribution<int> uniform(1,6); // lower and upper bounds
poisson_distribution<int> poisson(30); // rate
student_t_distribution<double> t(10); // degrees of freedom
int main()
{
cout << "\nGenerating random numbers\n";
auto runif = bind (uniform, re);
auto rpois = bind(poisson, re);
auto rt = bind(t, re);
for (int i=0; i<10; i++) {
cout << runif() << ", " << rpois() << ", " << rt() << "\n";
}
}
# + language="bash"
#
# g++ -o random.exe random.cpp -std=c++14
# + language="bash"
#
# ./random.exe
# -
# ## Statistics
#
# A nicer library for working with probability distributions. Show integration with Armadillo. Integration with Eigen is also possible.
# +
import os
if not os.path.exists('./stats'):
# ! git clone https://github.com/kthohr/stats.git
# +
# %%file stats.cpp
#define STATS_USE_ARMA
#include <iostream>
#include "stats.hpp"
using std::cout;
using std::endl;
// set seed for randome engine to 1776
std::mt19937_64 engine(1776);
int main() {
// evaluate the normal PDF at x = 1, mu = 0, sigma = 1
double dval_1 = stats::dnorm(1.0,0.0,1.0);
// evaluate the normal PDF at x = 1, mu = 0, sigma = 1, and return the log value
double dval_2 = stats::dnorm(1.0,0.0,1.0,true);
// evaluate the normal CDF at x = 1, mu = 0, sigma = 1
double pval = stats::pnorm(1.0,0.0,1.0);
// evaluate the Laplacian quantile at p = 0.1, mu = 0, sigma = 1
double qval = stats::qlaplace(0.1,0.0,1.0);
// draw from a t-distribution dof = 30
double rval = stats::rt(30);
// matrix output
arma::mat beta_rvs = stats::rbeta<arma::mat>(5,5,3.0,2.0);
// matrix input
arma::mat beta_cdf_vals = stats::pbeta(beta_rvs,3.0,2.0);
cout << "evaluate the normal PDF at x = 1, mu = 0, sigma = 1" << endl;
cout << dval_1 << endl;
cout << "evaluate the normal PDF at x = 1, mu = 0, sigma = 1, and return the log value" << endl;
cout << dval_2 << endl;
cout << "evaluate the normal CDF at x = 1, mu = 0, sigma = 1" << endl;
cout << pval << endl;
cout << "evaluate the Laplacian quantile at p = 0.1, mu = 0, sigma = 1" << endl;
cout << qval << endl;
cout << "draw from a t-distribution dof = 30" << endl;
cout << rval << endl;
cout << "draws from a beta distribuiotn to populate Armadillo matrix" << endl;
cout << beta_rvs << endl;
cout << "evaluaate CDF for beta draws" << endl;
cout << beta_cdf_vals << endl;
}
# + language="bash"
#
# g++ -std=c++11 -I./stats/include stats.cpp -o stats.exe
# + language="bash"
#
# ./stats.exe
# -
# ## Numerics
#
# ### Using Armadillo
# +
# %%file test_arma.cpp
#include <iostream>
#include <armadillo>
using std::cout;
using std::endl;
int main()
{
using namespace arma;
vec u = linspace<vec>(0,1,5);
vec v = ones<vec>(5);
mat A = randu<mat>(4,5); // uniform random deviates
mat B = randn<mat>(4,5); // normal random deviates
cout << "\nVecotrs in Armadillo\n";
cout << u << endl;
cout << v << endl;
cout << u.t() * v << endl;
cout << "\nRandom matrices in Armadillo\n";
cout << A << endl;
cout << B << endl;
cout << A * B.t() << endl;
cout << A * v << endl;
cout << "\nQR in Armadillo\n";
mat Q, R;
qr(Q, R, A.t() * A);
cout << Q << endl;
cout << R << endl;
}
# + language="bash"
#
# g++ -o test_arma.exe test_arma.cpp -std=c++14 -larmadillo
# + language="bash"
#
# ./test_arma.exe
# -
# ### Using Eigen
# +
# %%file test_eigen.cpp
#include <iostream>
#include <fstream>
#include <random>
#include <Eigen/Dense>
#include <functional>
using std::cout;
using std::endl;
using std::ofstream;
using std::default_random_engine;
using std::normal_distribution;
using std::bind;
// start random number engine with fixed seed
default_random_engine re{12345};
normal_distribution<double> norm(5,2); // mean and standard deviation
auto rnorm = bind(norm, re);
int main()
{
using namespace Eigen;
VectorXd x1(6);
x1 << 1, 2, 3, 4, 5, 6;
VectorXd x2 = VectorXd::LinSpaced(6, 1, 2);
VectorXd x3 = VectorXd::Zero(6);
VectorXd x4 = VectorXd::Ones(6);
VectorXd x5 = VectorXd::Constant(6, 3);
VectorXd x6 = VectorXd::Random(6);
double data[] = {6,5,4,3,2,1};
Map<VectorXd> x7(data, 6);
VectorXd x8 = x6 + x7;
MatrixXd A1(3,3);
A1 << 1 ,2, 3,
4, 5, 6,
7, 8, 9;
MatrixXd A2 = MatrixXd::Constant(3, 4, 1);
MatrixXd A3 = MatrixXd::Identity(3, 3);
Map<MatrixXd> A4(data, 3, 2);
MatrixXd A5 = A4.transpose() * A4;
MatrixXd A6 = x7 * x7.transpose();
MatrixXd A7 = A4.array() * A4.array();
MatrixXd A8 = A7.array().log();
MatrixXd A9 = A8.unaryExpr([](double x) { return exp(x); });
MatrixXd A10 = MatrixXd::Zero(3,4).unaryExpr([](double x) { return rnorm(); });
VectorXd x9 = A1.colwise().norm();
VectorXd x10 = A1.rowwise().sum();
MatrixXd A11(x1.size(), 3);
A11 << x1, x2, x3;
MatrixXd A12(3, x1.size());
A12 << x1.transpose(),
x2.transpose(),
x3.transpose();
JacobiSVD<MatrixXd> svd(A10, ComputeThinU | ComputeThinV);
cout << "x1: comman initializer\n" << x1.transpose() << "\n\n";
cout << "x2: linspace\n" << x2.transpose() << "\n\n";
cout << "x3: zeors\n" << x3.transpose() << "\n\n";
cout << "x4: ones\n" << x4.transpose() << "\n\n";
cout << "x5: constant\n" << x5.transpose() << "\n\n";
cout << "x6: rand\n" << x6.transpose() << "\n\n";
cout << "x7: mapping\n" << x7.transpose() << "\n\n";
cout << "x8: element-wise addition\n" << x8.transpose() << "\n\n";
cout << "max of A1\n";
cout << A1.maxCoeff() << "\n\n";
cout << "x9: norm of columns of A1\n" << x9.transpose() << "\n\n";
cout << "x10: sum of rows of A1\n" << x10.transpose() << "\n\n";
cout << "head\n";
cout << x1.head(3).transpose() << "\n\n";
cout << "tail\n";
cout << x1.tail(3).transpose() << "\n\n";
cout << "slice\n";
cout << x1.segment(2, 3).transpose() << "\n\n";
cout << "Reverse\n";
cout << x1.reverse().transpose() << "\n\n";
cout << "Indexing vector\n";
cout << x1(0);
cout << "\n\n";
cout << "A1: comma initilizer\n";
cout << A1 << "\n\n";
cout << "A2: constant\n";
cout << A2 << "\n\n";
cout << "A3: eye\n";
cout << A3 << "\n\n";
cout << "A4: mapping\n";
cout << A4 << "\n\n";
cout << "A5: matrix multiplication\n";
cout << A5 << "\n\n";
cout << "A6: outer product\n";
cout << A6 << "\n\n";
cout << "A7: element-wise multiplication\n";
cout << A7 << "\n\n";
cout << "A8: ufunc log\n";
cout << A8 << "\n\n";
cout << "A9: custom ufucn\n";
cout << A9 << "\n\n";
cout << "A10: custom ufunc for normal deviates\n";
cout << A10 << "\n\n";
cout << "A11: np.c_\n";
cout << A11 << "\n\n";
cout << "A12: np.r_\n";
cout << A12 << "\n\n";
cout << "2x2 block startign at (0,1)\n";
cout << A1.block(0,1,2,2) << "\n\n";
cout << "top 2 rows of A1\n";
cout << A1.topRows(2) << "\n\n";
cout << "bottom 2 rows of A1";
cout << A1.bottomRows(2) << "\n\n";
cout << "leftmost 2 cols of A1";
cout << A1.leftCols(2) << "\n\n";
cout << "rightmost 2 cols of A1";
cout << A1.rightCols(2) << "\n\n";
cout << "Diagonal elements of A1\n";
cout << A1.diagonal() << "\n\n";
A1.diagonal() = A1.diagonal().array().square();
cout << "Transforming diagonal eelemtns of A1\n";
cout << A1 << "\n\n";
cout << "Indexing matrix\n";
cout << A1(0,0) << "\n\n";
cout << "singular values\n";
cout << svd.singularValues() << "\n\n";
cout << "U\n";
cout << svd.matrixU() << "\n\n";
cout << "V\n";
cout << svd.matrixV() << "\n\n";
}
# -
import os
if not os.path.exists('./eigen'):
# ! hg clone https://bitbucket.org/eigen/eigen/
# + language="bash"
#
# g++ -o test_eigen.exe test_eigen.cpp -std=c++14 -I./eigen
# + language="bash"
#
# ./test_eigen.exe
# -
# ### Check SVD
# +
import numpy as np
A10 = np.array([
[5.17237, 3.73572, 6.29422, 6.55268],
[5.33713, 3.88883, 1.93637, 4.39812],
[8.22086, 6.94502, 6.36617, 6.5961]
])
U, s, Vt = np.linalg.svd(A10, full_matrices=False)
# -
s
U
Vt.T
| notebook/S12C_CPP.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="U3Yg5MPQz7mF"
# Libraries we need for python
import asyncio
from bs4 import BeautifulSoup
# + id="RBMT3h9a0brW" colab={"base_uri": "https://localhost:8080/", "height": 139} outputId="116d9bb3-8692-4884-8fa8-a80a19b5962f"
# Install selenium to control our headless browser
# !pip install selenium
# + id="1zg6j1vI4_3j" colab={"base_uri": "https://localhost:8080/", "height": 717} outputId="393adcb3-0c06-43b4-ea1a-e6077b3da29d"
# Our chrome driver, default chrome downloaded by pyppeteer does not work as expected
# !apt install chromium-chromedriver
# + id="Lmr1l7_V1QMO"
from selenium import webdriver
# + id="FQZpyZ9c0SCX"
# The page we want to scrap for analysis
# Not a good example for a JS heavy page but useful
url = 'http://example.com'
# + id="EESE6m3FbWtv"
options = webdriver.ChromeOptions()
options.add_argument('--headless')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
# + id="rQUjpTQtbRYT"
wd = webdriver.Chrome('chromedriver', options=options)
wd.get(url)
# + id="p7bFxYfp1IWa"
# Our HTML parser, that we'll use later for
soup = BeautifulSoup(str(wd.page_source), 'html.parser')
# + id="OZzZlSmu5lE8" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="ccd42b36-4bb4-49de-9aa1-04522d921864"
h1 = soup("h1")
print(h1)
# + id="ba4zH-Tw6hdn" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="222242dd-24ab-4d30-f9a0-95b060227860"
for h in h1:
# Print each H1 heading and the length
print(f"{h} ({len(h.text)})")
# + id="drkCvTed6UQL" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="cd01d24f-6b6a-495f-f489-7502a65e8faf"
paragraphs = soup("p")
for p in paragraphs:
print(f"{p} ({len(p.text)})")
| seo/notebooks/Scraping_pages_with_Selenium.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Preparation steps
#
# Install iotfunctions with
#
# `pip install git+https://github.com/ibm-watson-iot/functions@development`
#
# This projects contains the code for the Analytics Service pipeline as well as the anomaly functions and should pull in most of this notebook's dependencies.
#
# The plotting library matplotlib is the exception, so you need to run
# `pip install matplotlib`
#
# +
# Real life data
import logging
import threading
import itertools
import pandas as pd
import numpy as np
import json
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import axes3d
import seaborn as seabornInstance
from sqlalchemy import Column, Integer, String, Float, DateTime, Boolean, func
from iotfunctions import base
from iotfunctions import bif
from iotfunctions import entity
from iotfunctions import metadata
from iotfunctions.metadata import EntityType
from iotfunctions.db import Database
from iotfunctions.dbtables import FileModelStore
from iotfunctions.enginelog import EngineLogging
from iotfunctions import estimator
from iotfunctions.ui import (UISingle, UIMultiItem, UIFunctionOutSingle,
UISingleItem, UIFunctionOutMulti, UIMulti, UIExpression,
UIText, UIStatusFlag, UIParameters)
from mmfunctions.anomaly import (SaliencybasedGeneralizedAnomalyScore, SpectralAnomalyScore,
FFTbasedGeneralizedAnomalyScore, KMeansAnomalyScore,
SaliencybasedGeneralizedAnomalyScoreV2, FFTbasedGeneralizedAnomalyScoreV2,
KMeansAnomalyScoreV2, BayesRidgeRegressor, BayesRidgeRegressorExt)
import datetime as dt
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn import metrics
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
EngineLogging.configure_console_logging(logging.INFO)
# +
# setting to make life easier
Temperature='MeanTemp'
kmeans0='TemperatureKmeansScore0'
kmeans='TemperatureKmeansScore'
fft0='FFTAnomalyScore0'
fft='FFTAnomalyScore'
spectral='TemperatureSpectralScore'
sal='SaliencyAnomalyScore'
gen='TemperatureGeneralizedScore'
kmeansA='kmeansAnomaly'
kmeansA0='kmeansAnomaly0'
spectralA='spectralAnomaly'
fftA0='fftAnomaly0'
fftA='fftAnomaly'
salA='salAnomaly'
genA='genAnomaly'
kmeans_break=1.3
spectral_break = 100
fft_break = 100
sal_break = 100
gen_break = 30000
# -
#
# #### What will be shown
#
# General approach is straightforward
# * read raw data in
# * transform it so that it is compatible to the Monitoring pipeline
# * add yet another anomaly detector based on computer vision technology. The point here is to show how to run pipeline anomaly functions 'locally', an important concept for automated testing.
# * simplify the dataframe - we have only one entity, no need for an entity index
# * render input data and anomaly scores properly scaled
#
# <br>
#
# We start with Microsoft's anomaly test data found here
# https://github.com/microsoft/anomalydetector/blob/master/samples/sample.csv
#
# and then proceed to applying anomaly detection to real life pump data
#
#
# <br>
#
#
# #### Current inventory of anomaly detectors by type
#
# This is the list of functions to apply
#
#
# | Detector | ML Type | Type | How does it work |
# | ------- | ------------ | ------- | ---------------- |
# | KMeans | Unsupervised | Proximity | Clusters data points in centroid buckets, small buckets are outliers, score is distance to closest other bucket |
# | Generalized | Unsupervised | Linear Model | Covariance matrix over data point vectors serves to measure multi-dimensional deviation |
# | FFT | Unsupervised | Linear Model | Run FFT before applying Generalized |
# | Spectral | Unsupervised | Linear Model | Compute signal energy to reduce dimensions |
# | Saliency | Unsupervised | Linear Model | Apply saliency transform (from computer vision |
# | SimpleAnomaly | **Supervised** | Ensemble | Run Gradient boosting on training data, anomaly if prediction deviates from actual data |
# | --- | **Supervised** | LSTM | Train a stacked LSTM, anomaly if prediction deviates from actual data |
#
#
# set up a db object with a FileModelStore to support scaling
with open('credentials_as_monitor_demo.json', encoding='utf-8') as F:
credentials = json.loads(F.read())
db_schema=None
fm = FileModelStore()
db = Database(credentials=credentials, model_store=fm)
print (db)
# +
# Run on the good pump first
# Get stuff in
df_i = pd.read_csv('./Weather.csv',
dtype={"Snowfall":object, "PoorWeather":object, "SNF": object, "TSHDSBRSGF":object},
index_col=False, parse_dates=['Date'])
df_i.rename(columns={'Date': 'timestamp'}, inplace=True)
# drastic filtering
df_i = df_i[df_i['STA'] == 10001]
df_i['entity'] = df_i['STA'].astype(str)
# and sort it by timestamp
df_i = df_i[['entity','timestamp','Precip','MaxTemp','MinTemp','MeanTemp']].sort_values(by=['entity','timestamp'])
df_i = df_i.set_index(['entity','timestamp']).dropna()
df_i.head(8)
# +
# Now run the anomaly functions as if they were executed in a pipeline
EngineLogging.configure_console_logging(logging.DEBUG)
jobsettings = { 'db': db,
'_db_schema': 'public', 'save_trace_to_file' : True}
spsi = BayesRidgeRegressor(['MinTemp'], ['MaxTemp'])
et = spsi._build_entity_type(columns = [Column(Temperature,Float())], **jobsettings)
spsi._entity_type = et
df_i = spsi.execute(df=df_i)
EngineLogging.configure_console_logging(logging.INFO)
df_i.describe()
# +
plots = 1
fig, ax = plt.subplots(plots, 1, figsize=(20,10), squeeze=False)
cnt = 0
ax[cnt,0].plot(df_i.unstack(level=0).index, df_i['MinTemp'],linewidth=0.5,color='blue',label='MinTemp')
ax[cnt,0].plot(df_i.unstack(level=0).index, df_i['MaxTemp'],linewidth=0.5,color='green',label='MaxTemp')
ax[cnt,0].plot(df_i.unstack(level=0).index, df_i['predicted_MaxTemp'],linewidth=0.5,color='red',label='MaxTemp pred')
ax[cnt,0].fill_between(df_i.unstack(level=0).index, df_i['predicted_MaxTemp'] - df_i['stddev_MaxTemp'],
df_i['predicted_MaxTemp'] + df_i['stddev_MaxTemp'], color="pink", alpha=0.3, label="predict stddev")
ax[cnt,0].legend(bbox_to_anchor=(1.1, 1.05))
ax[cnt,0].set_ylabel('WWII Weather Data\n(predict max temp from min temp)',fontsize=12,weight="bold")
cnt = 1
# +
# Now run the anomaly functions as if they were executed in a pipeline
EngineLogging.configure_console_logging(logging.DEBUG)
jobsettings = { 'db': db,
'_db_schema': 'public', 'save_trace_to_file' : True}
spsi = BayesRidgeRegressorExt(['MinTemp'], ['MaxTemp'], degree=3)
et = spsi._build_entity_type(columns = [Column(Temperature,Float())], **jobsettings)
spsi._entity_type = et
df_i = spsi.execute(df=df_i)
EngineLogging.configure_console_logging(logging.INFO)
df_i.describe()
# +
plots = 1
fig, ax = plt.subplots(plots, 1, figsize=(20,10), squeeze=False)
cnt = 0
ax[cnt,0].plot(df_i.unstack(level=0).index, df_i['MinTemp'],linewidth=0.5,color='blue',label='MinTemp')
ax[cnt,0].plot(df_i.unstack(level=0).index, df_i['MaxTemp'],linewidth=0.5,color='green',label='MaxTemp')
ax[cnt,0].plot(df_i.unstack(level=0).index, df_i['predicted_MaxTemp'],linewidth=0.5,color='red',label='MaxTemp pred')
ax[cnt,0].fill_between(df_i.unstack(level=0).index, df_i['predicted_MaxTemp'] - df_i['stddev_MaxTemp'],
df_i['predicted_MaxTemp'] + df_i['stddev_MaxTemp'], color="pink", alpha=0.3, label="predict stddev")
ax[cnt,0].legend(bbox_to_anchor=(1.1, 1.05))
ax[cnt,0].set_ylabel('WWII Weather Data\n(predict max temp from min temp)',fontsize=12,weight="bold")
cnt = 1
# -
dimension_value = '["1.016"]'
eval('isinstance(' + str(dimension_value) + ',str)')
# +
#evl = 'df_i["Precip"].isin(' + str(dimension_value) + ")"
# -
dimension_value = '"1.016"'
eval('isinstance(' + str(dimension_value) + ',str)')
df_i['Precip'].values
#np.zeros((10, ) + (3,)).shape
(10, ) + (3,)
| MonitorBayesianRegression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Modeling Questions
#
#
# * Calc AUC score to all models
# * Save all charts to a folder
#
# ### Also
# * SMOAT Upsample and try running all models on that data.
# * **train_test_split STRATIFIED before SMOTE, then only SMOTE the train set?**
# * do new .ipynb doing SMOTE and sampling!
# # MODEL CREATION
# +
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import scipy.stats as st
# import pymc3 as pm
import seaborn as sns
# enables inline plots, without it plots don't show up in the notebook
# %matplotlib inline
# %config InlineBackend.figure_format = 'svg'
# # %config InlineBackend.figure_format = 'png'
# mpl.rcParams['figure.dpi']= 300
pd.set_option('display.max_columns', 300)
pd.set_option('display.max_rows', 60)
pd.set_option('display.precision', 3)
pd.set_option('display.float_format', lambda x: '%.3f' % x)
import sklearn
from sklearn.preprocessing import StandardScaler, Binarizer, LabelBinarizer, MultiLabelBinarizer
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV, cross_val_score, cross_validate \
,cross_val_predict, GridSearchCV, RandomizedSearchCV
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn import metrics
from sklearn.metrics import confusion_matrix,recall_score,precision_score, f1_score
from sklearn.model_selection import train_test_split, cross_validate
from sklearn.linear_model import LogisticRegression, LogisticRegressionCV
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.naive_bayes import GaussianNB, MultinomialNB, BernoulliNB
from sklearn.metrics import roc_curve, auc
import itertools
from sklearn.metrics import confusion_matrix
from sklearn.externals import joblib
from imblearn.over_sampling import SMOTE
# + [markdown] toc-hr-collapsed=true
# # Helper Functions
# +
target_names=['No','Yes']
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
def confusion_matrices(y_pred):
# Compute confusion matrix
cnf_matrix = confusion_matrix(y_test, y_pred)
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=target_names,
title='Confusion matrix, without normalization')
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=target_names, normalize=True,
title='Confusion matrix, Normalized')
def plot_roc_curve(fit_model, title):
y_score=fit_model.predict_proba(X_test)[:,1]
fpr, tpr,_ = roc_curve(y_test, y_score)
roc_auc = auc(fpr, tpr)
plt.figure(figsize=(6,6))
# Plotting the Baseline
plt.plot([0,1],[0,1])
plt.plot(fpr,tpr)
plt.grid(which='major')
plt.title(f"{title} ROC curve")
s= 'AUC: ' + str(round(metrics.roc_auc_score(y_test, fit_model.predict(X_test)),3))
plt.text(0.75, 0.25, s=s, ha='right', va='bottom', fontsize=14,
bbox=dict(facecolor='grey', alpha=0.5))
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate');
def makecost(obs,prob,falsepos_cost,falseneg_cost):
def cost(cutoff):
pred = np.array(prob > cutoff)
fpos = pred * (1 - obs)
fneg = (1 - pred) * obs
return np.sum(fpos * falsepos_cost + fneg * falseneg_cost)
return np.vectorize(cost)
# cut = np.linspace(0,1,100)
# cost = np.zeros_like(cut)
# from sklearn.model_selection import KFold, cross_val_predict
# obs = np.ravel(y)
# K = 20
# for j in range(K):
# folds = KFold(n_splits=5,shuffle=True)
# prob = cross_val_predict(logreg,X,np.ravel(y),cv=folds,method='predict_proba',n_jobs=5)[:,1]
# getcost = makecost(obs,prob,falsepos_cost=20,falseneg_cost=25)
# currentcost = getcost(cut)/X.shape[0]
# cost += currentcost
# plt.plot(cut, currentcost,c='C0',alpha=0.05)
# cost /= K
# plt.plot(cut,cost,c='C0')
# plt.xlabel('cutoff')
# plt.ylabel('Expected cost per data point');
def number_of_uniques(df):
for i in df.columns:
print(i,":", len(df[i].unique()))
def number_of_NaN(df):
for i in df.columns:
if df[i].isna().sum() != 0:
print(i,":", df[i].isna().sum())
# -
# # Load Data
cupid = pd.read_hdf('data/cupid_cleaned.hd5', key='df', mode='r')
#cupid = load_pd_pkl('data/cupid_cleaned')
# ### Only:
# * 14.65% of all single/available explicitly answered yes or no.
# * 21.54% of all single/available answered yes/maybe/no
# * 35.57% of all single/available explicitly stated if they had kids
# * 8.27% of all single/available state they have one or more kid
# ### ?
#
# +
# Create binary "wants_kids" YES vs NO from the more options
cupid['wants_kids_binary'] = ['yes' if x == 'yes' else 'no' if x == 'no' else np.nan for x in cupid['wants_kids']]
cupid['has_kids_options'] = ['one' if x == 'one' else 'multiple' if x == 'multiple' else 'no' if x == 'no' else np.nan for x in cupid['has_kids']]
cupid['status_available'] = ['yes' if x == 'available' else 'yes' if x == 'single' else np.nan for x in cupid['status']]
# +
target = ['wants_kids_binary']
continuous = ['age'] #, 'height']
# bi_categorical = ['sex', 'signs_fun', 'signs_unimportant', 'signs_important',
# 'religion_unserious', 'religion_laughing', 'religion_somewhat',
# 'religion_serious', ]
# mult_categorical = ['body_type', 'drinks', 'drugs', 'income', 'orientation', 'status',
# 'diet_intensity', 'diet_choice', 'primary_ethnicity',
# 'has_kids','likes_cats', 'likes_dogs', 'dislikes_cats', 'dislikes_dogs',
# 'has_cats', 'has_dogs','english_fluent','english_poor','spanish_fluent',
# 'spanish_not_poorly','religion_name','new_education',]
# remove from bi: 'signs_fun', 'signs_unimportant', 'signs_important', 'religion_unserious', 'religion_laughing', 'religion_somewhat',
# 'religion_serious',
bi_categorical = ['sex', 'signs_fun', 'signs_unimportant', 'signs_important',
'religion_unserious', 'religion_laughing', 'religion_somewhat', 'religion_serious']
# remove from multi: 'new_education','likes_cats', 'likes_dogs', 'dislikes_cats',
# 'dislikes_dogs', 'has_cats', 'has_dogs', 'primary_ethnicity', ,
# 'english_fluent','english_poor','spanish_fluent','spanish_not_poorly',
# 'diet_intensity', 'diet_choice', 'religion_name'
mult_categorical = ['orientation', 'status_available', 'has_kids_options',
'drinks', 'smokes', 'drugs', 'religion_name',]
# Assign feature groupings
columns = bi_categorical + mult_categorical + target
# Create DF of everything
df = cupid[target + continuous + bi_categorical + mult_categorical]
### Change data type of age
df['age'] = df.age.astype(float)
df = df.dropna()
# Split DF of only standardized/scaled features
scaled_features = df.copy().loc[:, continuous]
# Tranform age and height (standardized features)
features = scaled_features[continuous]
features = StandardScaler().fit_transform(features.values)
scaled_features[continuous] = features
# Create dummy variables for original features, then combine back with scaled features
dummies = pd.get_dummies(df.loc[:, columns], columns=columns, drop_first=False)
df = scaled_features.join(dummies)
# Drop all "placeholder" columns
df.drop(df.filter(regex='placeholder'), axis=1, inplace=True)
# Drop unused binary column
df.drop('has_kids_options_no', axis=1, inplace=True)
df.drop(columns=['status_available_yes', 'wants_kids_binary_no', 'sex_f','signs_important_matters a lot',
'religion_serious_very serious','orientation_straight','drinks_very often','smokes_yes',
'drugs_often','religion_name_other'], axis=1, inplace=True)
# SETUP MODELING DATA
X = df.drop('wants_kids_binary_yes', axis=1)
y = df['wants_kids_binary_yes']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42, stratify=y)
# +
print("Before OverSampling, counts of label '1': {}".format(sum(y_train==1)))
print("Before OverSampling, counts of label '0': {} \n".format(sum(y_train==0)))
sm = SMOTE(random_state=42)
X_train, y_train = sm.fit_sample(X_train, y_train.ravel())
print('After OverSampling, the shape of train_X: {}'.format(X_train.shape))
print('After OverSampling, the shape of train_y: {} \n'.format(y_train.shape))
print("After OverSampling, counts of label '1': {}".format(sum(y_train==1)))
print("After OverSampling, counts of label '0': {}".format(sum(y_train==0)))
# -
fig, ax = plt.subplots(figsize=(14,12))
sns.heatmap(df.corr(), xticklabels=True, cmap='BrBG');
dir = 'models'
# # K-Nearest Neighbors (KNN)
# +
knn = KNeighborsClassifier()
# define the parameter values that should be searched
#k_range = list(range(1, 7))
#weight_options = ['uniform', 'distance']
k_range = list(range(1,80))
# weight_options = ['uniform']
weight_options = ['uniform', 'distance']
# create a parameter grid: map the parameter names to the values that should be searched
param_grid = dict(n_neighbors=k_range, weights=weight_options)
# print(param_grid)
# instantiate the grid
grid_knn = RandomizedSearchCV(knn, param_grid, cv=10, scoring='roc_auc', n_jobs=-1, n_iter=100)
# fit the grid with data
grid_knn.fit(X_train, y_train)
# examine the best model
print(grid_knn.best_score_)
print(grid_knn.best_params_)
print(grid_knn.best_estimator_)
# +
grid_knn = grid_knn.best_estimator_
joblib.dump(grid_knn, f'{dir}/knn.joblib') # Save model to disk
y_pred_knn = grid_knn.predict(X_test)
print("test f1:", metrics.f1_score(y_test, y_pred_knn))
print("test roc_AUC:", metrics.roc_auc_score(y_test, y_pred_knn))
print("test accuracy:", metrics.accuracy_score(y_test, y_pred_knn))
# -
plot_roc_curve(grid_knn, 'KNN')
# # Logistic Regression
# +
logreg = LogisticRegression(max_iter=500, random_state=42)
# define the parameter values that should be searched
C_options = [0.5, 1, 2, 3]
duals = [False]
tol_options = [1e-3, 1e-2, 1e-1]
pen_options = ['l2']
solver_options = ['liblinear', 'sag', 'newton-cg', 'lbfgs']
# create a parameter grid: map the parameter names to the values that should be searched
param_grid = dict(C=C_options, dual=duals, tol=tol_options, penalty=pen_options, solver=solver_options)
# print(param_grid)
# instantiate the grid
grid_lr = RandomizedSearchCV(logreg, param_grid, cv=10, scoring='roc_auc', n_jobs=6, n_iter=60)
# fit the grid with data
grid_lr.fit(X_train, y_train);
# examine the best model
print(grid_lr.best_score_)
print(grid_lr.best_params_)
print(grid_lr.best_estimator_)
# +
grid_lr = grid_lr.best_estimator_
joblib.dump(grid_lr, f'{dir}/logreg.joblib') # Save model to disk
y_pred_logreg = grid_lr.predict(X_test)
print(metrics.accuracy_score(y_test, y_pred_logreg))
print(metrics.classification_report(y_test, grid_lr.predict(X_test)))
print(metrics.roc_auc_score(y_test, y_pred_logreg))
# -
plot_roc_curve(grid_lr, "LogReg")
# + [markdown] toc-hr-collapsed=false
# # Support Vector Machine (SVM)
# +
svm = SVC(probability = True, random_state=42)
Cs=[0.5, 1, 1.5]
kernels = ['rbf', 'sigmoid', 'linear']
gammas = ['scale', 'auto']
tols = [1e-5, 1e-4, 1e-3, 1e-2]
# Cs=[.75, 1, 1.25, 1.5, 2]
# kernels = ['linear','rbf','sigmoid']
# gammas = ['scale', 'auto']
# tols = [.00001, .0001, .001, .01,]
param_grid = dict(C=Cs, tol=tols, gamma=gammas, kernel=kernels)
grid_svm = RandomizedSearchCV(svm, param_grid, cv=10, scoring='roc_auc', n_jobs=-1, n_iter=30)
grid_svm.fit(X_train, y_train)
print(grid_svm.best_score_)
print(grid_svm.best_params_)
print(grid_svm.best_estimator_)
# +
grid_svm = grid_svm.best_estimator_
y_pred_best_svm = grid_svm.predict(X_test)
print(metrics.roc_auc_score(y_test, y_pred_best_svm))
print(metrics.classification_report(y_test, y_pred_best_svm))
metrics.accuracy_score(y_test, y_pred_best_svm)
joblib.dump(grid_svm, f'{dir}/SVM.joblib') # Save model to disk
# -
plot_roc_curve(grid_svm, 'SVM')
# # Decision Trees
# +
dt = DecisionTreeClassifier(random_state=42)
criterions = ['gini', 'entropy']
param_grid = dict(criterion=criterions)
grid_dt = GridSearchCV(dt, param_grid, scoring='roc_auc', cv=10, n_jobs=-1)
grid_dt.fit(X_train, y_train)
print(grid_dt.best_score_)
print(grid_dt.best_params_)
print(grid_dt.best_estimator_)
# +
grid_dt = grid_dt.best_estimator_
joblib.dump(grid_dt, f'{dir}/DecisionTree.joblib') # Save model to disk
y_pred_dtree = grid_dt.predict(X_test)
print('roc_auc:', metrics.roc_auc_score(y_test, y_pred_dtree))
# -
plot_roc_curve(grid_dt, "Decision Tree")
# # Random Forrest
# +
rf = RandomForestClassifier(random_state=42)
criterions = ['gini', 'entropy']
n_ests = [100, 300]
param_grid = dict(criterion=criterions, n_estimators=n_ests)
grid_rf = GridSearchCV(rf, param_grid, scoring='roc_auc', cv=10, n_jobs=-1)
grid_rf.fit(X_train, y_train)
print(grid_rf.best_score_)
print(grid_rf.best_params_)
print(grid_rf.best_estimator_)
# +
grid_rf = grid_rf.best_estimator_
joblib.dump(grid_rf, f'{dir}/RandomForest.joblib') # Save model to disk
y_pred_rf = grid_rf.predict(X_test)
print('roc_auc:', metrics.roc_auc_score(y_test, y_pred_rf))
# -
plot_roc_curve(grid_rf, "Random Forest")
# # Gradient Boosting
# +
gb = GradientBoostingClassifier(random_state=42)
losses = ['deviance', 'exponential']
lrs = [.01, .05, .1, .5, 1]
n_ests = [50, 100, 200]
subsamples=[0.5, .75, 1]
maxd = [3,5]
tols = [1e-6, 1e-5, 1e-4, 1e-3]
param_grid = dict(loss=losses, learning_rate=lrs, n_estimators=n_ests, subsample=subsamples,
max_depth=maxd, tol=tols)
grid_gb = RandomizedSearchCV(gb, param_grid, scoring='roc_auc', cv=10, n_jobs=-1, n_iter=100)
grid_gb.fit(X_train, y_train)
print(grid_gb.best_score_)
print(grid_gb.best_params_)
print(grid_gb.best_estimator_)
# +
grid_gb = grid_gb.best_estimator_
grid_gb
joblib.dump(grid_gb, f'{dir}/GradientBoosting.joblib') # Save model to disk
y_pred_gb = grid_gb.predict(X_test)
print(metrics.roc_auc_score(y_test, y_pred_gb))
# -
plot_roc_curve(grid_gb, "Gradient Boosting")
# # Naive Bayes
bern = BernoulliNB()
bern.fit(X_train, y_train)
y_pred_bern = bern.predict(X_test)
metrics.roc_auc_score(y_test, y_pred_bern)
# +
nb = GaussianNB()
nb.fit(X_train, y_train)
joblib.dump(nb, f'{dir}/NaiveBayesGaussian.joblib') # Save model to disk
y_pred_nb = nb.predict(X_test)
# -
metrics.roc_auc_score(y_test, y_pred_nb)
plot_roc_curve(nb, "Gaussian Naive Bayes")
# # Now do analysis with people having kids as well for SVM
# +
# Create binary "wants_kids" YES vs NO from the more options
cupid['wants_kids_binary'] = ['yes' if x == 'yes' else 'no' if x == 'no' else np.nan for x in cupid['wants_kids']]
cupid['has_kids_options'] = ['one' if x == 'one' else 'multiple' if x == 'multiple' else 'no' if x == 'no' else np.nan for x in cupid['has_kids']]
cupid['status_available'] = ['yes' if x == 'available' else 'yes' if x == 'single' else np.nan for x in cupid['status']]
# +
target = ['wants_kids_binary']
continuous = ['age'] #, 'height']
# bi_categorical = ['sex', 'signs_fun', 'signs_unimportant', 'signs_important',
# 'religion_unserious', 'religion_laughing', 'religion_somewhat',
# 'religion_serious', ]
# mult_categorical = ['body_type', 'drinks', 'drugs', 'income', 'orientation', 'status',
# 'diet_intensity', 'diet_choice', 'primary_ethnicity',
# 'has_kids','likes_cats', 'likes_dogs', 'dislikes_cats', 'dislikes_dogs',
# 'has_cats', 'has_dogs','english_fluent','english_poor','spanish_fluent',
# 'spanish_not_poorly','religion_name','new_education',]
# remove from bi: 'signs_fun', 'signs_unimportant', 'signs_important', 'religion_unserious', 'religion_laughing', 'religion_somewhat',
# 'religion_serious',
bi_categorical = ['sex', 'signs_fun', 'signs_unimportant', 'signs_important',
'religion_unserious', 'religion_laughing', 'religion_somewhat', 'religion_serious']
# remove from multi: 'new_education','likes_cats', 'likes_dogs', 'dislikes_cats',
# 'dislikes_dogs', 'has_cats', 'has_dogs', 'primary_ethnicity', ,
# 'english_fluent','english_poor','spanish_fluent','spanish_not_poorly',
# 'diet_intensity', 'diet_choice', 'religion_name'
mult_categorical = ['orientation', 'status_available', 'has_kids_options',
'drinks', 'smokes', 'drugs', 'religion_name',]
# Assign feature groupings
columns = bi_categorical + mult_categorical + target
# Create DF of everything
df = cupid[target + continuous + bi_categorical + mult_categorical]
### Change data type of age
df['age'] = df.age.astype(float)
df = df.dropna()
# Split DF of only standardized/scaled features
scaled_features = df.copy().loc[:, continuous]
# Tranform age and height (standardized features)
features = scaled_features[continuous]
features = StandardScaler().fit_transform(features.values)
scaled_features[continuous] = features
# Create dummy variables for original features, then combine back with scaled features
dummies = pd.get_dummies(df.loc[:, columns], columns=columns, drop_first=False)
df = scaled_features.join(dummies)
# Drop all "placeholder" columns
df.drop(df.filter(regex='placeholder'), axis=1, inplace=True)
# Drop unused binary column
df.drop('has_kids_options_no', axis=1, inplace=True)
df.drop(columns=['status_available_yes', 'wants_kids_binary_no', 'sex_f','signs_important_matters a lot',
'religion_serious_very serious','orientation_straight','drinks_very often','smokes_yes',
'drugs_often','religion_name_other'], axis=1, inplace=True)
# SETUP MODELING DATA
X = df.drop('wants_kids_binary_yes', axis=1)
y = df['wants_kids_binary_yes']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42, stratify=y)
# +
print("Before OverSampling, counts of label '1': {}".format(sum(y_train==1)))
print("Before OverSampling, counts of label '0': {} \n".format(sum(y_train==0)))
sm = SMOTE(random_state=42)
X_train, y_train = sm.fit_sample(X_train, y_train.ravel())
print('After OverSampling, the shape of train_X: {}'.format(X_train.shape))
print('After OverSampling, the shape of train_y: {} \n'.format(y_train.shape))
print("After OverSampling, counts of label '1': {}".format(sum(y_train==1)))
print("After OverSampling, counts of label '0': {}".format(sum(y_train==0)))
# +
svm = SVC(probability = True, random_state=42)
Cs=[0.5, 1, 1.5]
kernels = ['rbf', 'sigmoid', 'linear']
gammas = ['scale', 'auto']
tols = [1e-5, 1e-4, 1e-3, 1e-2]
# Cs=[.75, 1, 1.25, 1.5, 2]
# kernels = ['linear','rbf','sigmoid']
# gammas = ['scale', 'auto']
# tols = [.00001, .0001, .001, .01,]
param_grid = dict(C=Cs, tol=tols, gamma=gammas, kernel=kernels)
grid_svm = RandomizedSearchCV(svm, param_grid, cv=10, scoring='roc_auc', n_jobs=-1, n_iter=30)
grid_svm.fit(X_train, y_train)
print(grid_svm.best_score_)
print(grid_svm.best_params_)
print(grid_svm.best_estimator_)
# +
grid_svm = grid_svm.best_estimator_
y_pred_best_svm = grid_svm.predict(X_test)
print(metrics.roc_auc_score(y_test, y_pred_best_svm))
print(metrics.classification_report(y_test, y_pred_best_svm))
metrics.accuracy_score(y_test, y_pred_best_svm)
dir = 'models'
joblib.dump(grid_svm, f'{dir}/SVM_knowing_current_children.joblib') # Save model to disk
# -
plot_roc_curve(grid_svm, 'SVM')
| modeling_workbook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Bokeh Image URL Glyph
# +
from bokeh.plotting import figure, output_file, show
from bokeh.models import Range1d, ColumnDataSource
from bokeh.models.mappers import ColorMapper
import numpy as np
from bokeh.io import export_png
p = figure(plot_width=400, plot_height=400)
N = 5
line_color = '#1f78b4'
fill_color = 'black'
output_file("../../figures/image_url.html")
url = 'https://github.com/prism-em/prism-em.github.io/raw/master/img/PRISM_transparent_512.png'
N = 12
source = ColumnDataSource(dict(
url = [url]*N,
x1 = 64*np.arange(N),
y1 = np.arange(N)*64 + np.random.rand(N)*32,
w1 = [64]*N,
h1 = [64]*N,
))
p.image_url(url='url' ,x='x1', y='y1', w='w1', h='h1',source=source, anchor="center")
p.x_range = Range1d(-10, 10+32*N)
p.y_range = Range1d(10, 10+32*N)
show(p)
export_png(p, filename="../../figures/image_url.png");
| visualizations/bokeh/notebooks/glyphs/image_url.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Intro
# Deep learning uses Deep neural networks (DNNs) to solve problems. DNNs are big, multi-layer neural network.
# Neural network: ML algorithm learns
#
# Can a single Perceptron handle XOR? -> Are the ponits linearly separable?
#
# ## MiniFlow
# **Topological sort**: Flattening the graph (NN) in such a way where all the input dependencies for each node are resolved before trying to run its calculation.
#
# +
# miniflow.py
"""
You need to change the Add() class below.
"""
class Neuron:
def __init__(self, inbound_neurons=[]):
# Neurons from which this Node receives values
self.inbound_neurons = inbound_neurons
# Neurons to which this Node passes values
self.outbound_neurons = []
# A calculated value
self.value = None
# Add this node as an outbound node on its inputs.
for n in self.inbound_neurons:
n.outbound_neurons.append(self)
# These will be implemented in a subclass.
def forward(self):
"""
Forward propagation.
Compute the output value based on `inbound_neurons` and
store the result in self.value.
"""
raise NotImplemented
def backward(self):
"""
Backward propagation.
Compute the gradient of the current node with respect
to the input neurons. The gradient of the loss with respect
to the current neuron should already be computed in the `gradients`
attribute of the output neurons.
"""
raise NotImplemented
class Input(Neuron):
def __init__(self):
# an Input neuron has no inbound nodes,
# so no need to pass anything to the Node instantiator
Neuron.__init__(self)
# NOTE: Input node is the only node where the value
# is passed as an argument to forward().
#
# All other neuron implementations should get the value
# of the previous neurons from self.inbound_neurons
#
# Example:
# val0 = self.inbound_neurons[0].value
def forward(self, value=None):
# Overwrite the value if one is passed in.
if value:
self.value = value
class Add(Neuron):
def __init__(self, x, y):
Neuron.__init__(self, [x, y])
def forward(self):
"""
Set the value of this neuron to the sum of it's inbound_nodes.
Your code here!
"""
self.value = self.inbound_neurons[0].value + self.inbound_neurons[1].value
"""
No need to change anything below here!
"""
def topological_sort(feed_dict):
"""
Sort generic nodes in topological order using Kahn's Algorithm.
`feed_dict`: A dictionary where the key is a `Input` node and the value is the respective value feed to that node.
Returns a list of sorted nodes.
"""
input_neurons = [n for n in feed_dict.keys()]
G = {}
neurons = [n for n in input_neurons]
while len(neurons) > 0:
n = neurons.pop(0)
if n not in G:
G[n] = {'in': set(), 'out': set()}
for m in n.outbound_neurons:
if m not in G:
G[m] = {'in': set(), 'out': set()}
G[n]['out'].add(m)
G[m]['in'].add(n)
neurons.append(m)
L = []
S = set(input_neurons)
while len(S) > 0:
n = S.pop()
if isinstance(n, Input):
n.value = feed_dict[n]
L.append(n)
for m in n.outbound_neurons:
G[n]['out'].remove(m)
G[m]['in'].remove(n)
# if no other incoming edges add to S
if len(G[m]['in']) == 0:
S.add(m)
return L
def forward_pass(output_neuron, sorted_neurons):
"""
Performs a forward pass through a list of sorted neurons.
Arguments:
`output_neuron`: A neuron in the graph, should be the output neuron (have no outgoing edges).
`sorted_neurons`: a topologically sorted list of neurons.
Returns the output neuron's value
"""
for n in sorted_neurons:
n.forward()
return output_neuron.value
# +
# nn.py
"""
This script builds and runs a graph with miniflow.
There is no need to change anything to solve this quiz!
However, feel free to play with the network! Can you also
build a network that solves the equation below?
(x + y) + y
"""
from miniflow import *
x, y = Input(), Input()
f = Add(x, y)
feed_dict = {x: 10, y: 5}
sorted_neurons = topological_sort(feed_dict)
output = forward_pass(f, sorted_neurons)
print("{} + {} = {} (according to miniflow)".format(feed_dict[x], feed_dict[y], output))
# +
"""
Bonus Challenge!
Write your code in Add (scroll down).
"""
class Neuron:
def __init__(self, inbound_neurons=[], label=''):
# An optional description of the neuron - most useful for outputs.
self.label = label
# Neurons from which this Node receives values
self.inbound_neurons = inbound_neurons
# Neurons to which this Node passes values
self.outbound_neurons = []
# A calculated value
self.value = None
# Add this node as an outbound node on its inputs.
for n in self.inbound_neurons:
n.outbound_neurons.append(self)
# These will be implemented in a subclass.
def forward(self):
"""
Forward propagation.
Compute the output value based on `inbound_neurons` and
store the result in self.value.
"""
raise NotImplemented
def backward(self):
"""
Backward propagation.
Compute the gradient of the current Neuron with respect
to the input neurons. The gradient of the loss with respect
to the current Neuron should already be computed in the `gradients`
attribute of the output neurons.
"""
raise NotImplemented
class Input(Neuron):
def __init__(self):
# An Input Neuron has no inbound neurons,
# so no need to pass anything to the Neuron instantiator
Neuron.__init__(self)
# NOTE: Input Neuron is the only Neuron where the value
# may be passed as an argument to forward().
#
# All other Neuron implementations should get the value
# of the previous neurons from self.inbound_neurons
#
# Example:
# val0 = self.inbound_neurons[0].value
def forward(self, value=None):
# Overwrite the value if one is passed in.
if value:
self.value = value
def backward(self):
# An Input Neuron has no inputs so we refer to ourself
# for the gradient
self.gradients = {self: 0}
for n in self.outbound_neurons:
self.gradients[self] += n.gradients[self]
"""
Can you augment the Add class so that it accepts
any number of neurons as input?
Hint: this may be useful:
https://docs.python.org/3/tutorial/controlflow.html#unpacking-argument-lists
"""
class Add(Neuron):
# You may need to change this...
def __init__(self, *args):
Neuron.__init__(self, *args)
def forward(self):
"""
For reference, here's the old way from the last
quiz. You'll want to write code here.
"""
# x_value = self.inbound_neurons[0].value
# y_value = self.inbound_neurons[1].value
# self.value = x_value + y_value
inbound_neuron_values = [neuron.value for neuron in self.inbound_neurons]
self.value = sum(inbound_neuron_values)
def topological_sort(feed_dict):
"""
Sort the neurons in topological order using Kahn's Algorithm.
`feed_dict`: A dictionary where the key is a `Input` Neuron and the value is the respective value feed to that Neuron.
Returns a list of sorted neurons.
"""
input_neurons = [n for n in feed_dict.keys()]
G = {}
neurons = [n for n in input_neurons]
while len(neurons) > 0:
n = neurons.pop(0)
if n not in G:
G[n] = {'in': set(), 'out': set()}
for m in n.outbound_neurons:
if m not in G:
G[m] = {'in': set(), 'out': set()}
G[n]['out'].add(m)
G[m]['in'].add(n)
neurons.append(m)
L = []
S = set(input_neurons)
while len(S) > 0:
n = S.pop()
if isinstance(n, Input):
n.value = feed_dict[n]
L.append(n)
for m in n.outbound_neurons:
G[n]['out'].remove(m)
G[m]['in'].remove(n)
# if no other incoming edges add to S
if len(G[m]['in']) == 0:
S.add(m)
return L
def forward_pass(output_Neuron, sorted_neurons):
"""
Performs a forward pass through a list of sorted neurons.
Arguments:
`output_Neuron`: A Neuron in the graph, should be the output Neuron (have no outgoing edges).
`sorted_neurons`: a topologically sorted list of neurons.
Returns the output Neuron's value
"""
for n in sorted_neurons:
n.forward()
return output_Neuron.value
# -
# ### 7. Learning and Loss
# +
# nn.py
from miniflow import *
x, y, z = Input(), Input(), Input()
inputs = [x, y, z]
weight_x, weight_y, weight_z = Input(), Input(), Input()
weights = [weight_x, weight_y, weight_z]
bias = Input()
f = Linear(inputs, weights, bias)
feed_dict = {
x: 6,
y: 14,
z: 3,
weight_x: 0.5,
weight_y: 0.25,
weight_z: 1.4,
bias: 2
}
graph = topological_sort(feed_dict)
output = forward_pass(f, graph)
print(output) # should be 12.7 with this example
# +
"""
Write the Linear#forward method below!
"""
import numpy as np
class Neuron:
def __init__(self, inbound_neurons=[]):
# Neurons from which this Node receives values
self.inbound_neurons = inbound_neurons
# Neurons to which this Node passes values
self.outbound_neurons = []
# A calculated value
self.value = None
# Add this node as an outbound node on its inputs.
for n in self.inbound_neurons:
n.outbound_neurons.append(self)
# These will be implemented in a subclass.
def forward(self):
"""
Forward propagation.
Compute the output value based on `inbound_neurons` and
store the result in self.value.
"""
raise NotImplemented
class Input(Neuron):
def __init__(self):
# An Input Neuron has no inbound neurons,
# so no need to pass anything to the Neuron instantiator
Neuron.__init__(self)
# NOTE: Input Neuron is the only Neuron where the value
# may be passed as an argument to forward().
#
# All other Neuron implementations should get the value
# of the previous neurons from self.inbound_neurons
#
# Example:
# val0 = self.inbound_neurons[0].value
def forward(self, value=None):
# Overwrite the value if one is passed in.
if value:
self.value = value
class Linear(Neuron):
def __init__(self, inputs, weights, bias):
Neuron.__init__(self, inputs)
self.weights = weights
self.bias = bias
def forward(self):
"""
Set self.value to the value of the linear function output.
Your code goes here!
"""
self.inbound_neuron_values = [neuron.value for neuron in self.inbound_neurons]
print("self.inbound_neuron_values", self.inbound_neuron_values)
print("self.weights", self.weights)
self.weights_values = [weight.value for weight in self.weights]
self.value = np.dot(self.weights_values, self.inbound_neuron_values)
self.value += self.bias.value
def topological_sort(feed_dict):
"""
Sort the neurons in topological order using Kahn's Algorithm.
`feed_dict`: A dictionary where the key is a `Input` Neuron and the value is the respective value feed to that Neuron.
Returns a list of sorted neurons.
"""
input_neurons = [n for n in feed_dict.keys()]
G = {}
neurons = [n for n in input_neurons]
while len(neurons) > 0:
n = neurons.pop(0)
if n not in G:
G[n] = {'in': set(), 'out': set()}
for m in n.outbound_neurons:
if m not in G:
G[m] = {'in': set(), 'out': set()}
G[n]['out'].add(m)
G[m]['in'].add(n)
neurons.append(m)
L = []
S = set(input_neurons)
while len(S) > 0:
n = S.pop()
if isinstance(n, Input):
n.value = feed_dict[n]
L.append(n)
for m in n.outbound_neurons:
G[n]['out'].remove(m)
G[m]['in'].remove(n)
# if no other incoming edges add to S
if len(G[m]['in']) == 0:
S.add(m)
return L
def forward_pass(output_Neuron, sorted_neurons):
"""
Performs a forward pass through a list of sorted neurons.
Arguments:
`output_Neuron`: A Neuron in the graph, should be the output Neuron (have no outgoing edges).
`sorted_neurons`: a topologically sorted list of neurons.
Returns the output Neuron's value
"""
for n in sorted_neurons:
n.forward()
return output_Neuron.value
# +
# Much more elegant:
self.value = self.bias.value
for w, x in zip(self.weights, self.inbound_neurons):
self.value += w.value * x.value
# + active=""
# ('self.inbound_neuron_values', [6, 14, 3])
# ('self.weights', [<miniflow.Input instance at 0x7efd68457950>, <miniflow.Input instance at 0x7efd68457998>, <miniflow.Input instance at 0x7efd68457a28>])
# -
# ### From Neurons to Layers
# It's common to feed in multiple data examples in each forward pass rather than just 1. This is because the examples can be processed in parallel, resulting in big performance gains.
# Number of examples passed forward in each forward pass: batch size.
#
#
class Linear(Layer):
def __init__(self, inbound_layer, weights, bias):
# Notice the ordering of the input layers passed to the
# Layer constructor.
Layer.__init__(self, [inbound_layer, weights, bias])
def forward(self):
"""
Set the value of this layer to the linear transform output.
Your code goes here!
"""
inputs = self.inbound_layers[0].value
weights = self.inbound_layers[1].value
bias = self.inbound_layers[2].value
self.value = np.dot(inputs, weights) + bias
# ### Sigmoid
#
# Activation function used to categorise the output.
#
# Linear -> Sigmoid.
class Sigmoid(Layer):
def __init__(self, layer):
Layer.__init__(self, [layer])
def _sigmoid(self, x):
"""
This method is separate from `forward` because it
will be used with `backward` as well.
`x`: A numpy array-like object.
"""
return 1. / (1. + np.exp(-x)) # the `.` ensures that `1` is a float
def forward(self):
input_value = self.inbound_layers[0].value
self.value = self._sigmoid(input_value)
# +
# Flattening matrices
# TODO: Create Anki card
# 2 by 2 matrices
w1 = np.array([[1, 2], [3, 4]])
w2 = np.array([[5, 6], [7, 8]])
# flatten
w1_flat = np.reshape(w1, -1)
w2_flat = np.reshape(w2, -1)
w = np.concatenate((w1_flat, w2_flat))
# array([1, 2, 3, 4, 5, 6, 7, 8])
# +
# Pretty sure MSE doesn't divide by two but it's nice for derivatives.
def MSE(computed_output, ideal_output, n_inputs):
"""
Calculates the mean squared error.
`computed_output`: a numpy array
`ideal_output`: a numpy array
`n_inputs`: the number of inputs
Return the mean squared error of output layer.
"""
first = 1. / (2. * n_inputs)
norm = np.linalg.norm(ideal_output - computed_output)
return first * np.square(norm)
# -
# ### Gradient Descent
#
# Given that gradient points in the direction of steepest ascent, it follows that moving along the direction of the negative gradient creates the steepest descent. This fact forms the crux of gradient descent.
#
# To move in a negative direction, networks usually define a learning rate, a small negative number, to control the size of the step along the negative gradient that the cost function should move between each iteration through the network.
#
# When designing neural networks, you can tweak the learning rate as a parameter. The more negative the learning rate, the faster the network will change. Of course, bigger changes mean that your network may overshoot the minimum in the loss (and fail to ever land close to it!). The smaller the learning rate, the slower the network learns (and when neural networks take hours or days to run on cloud machines, time is money!).
#
# Learning in real neural networks strives to minimize the cost as much as possible. To see how, consider the cost function again.
#
# +
# Implementing Backprop
class Sigmoid(Layer):
"""
Represents a layer that performs the sigmoid activation function.
"""
def __init__(self, layer):
# The base class constructor.
Layer.__init__(self, [layer])
def _sigmoid(self, x):
"""
This method is separate from `forward` because it
will be used with `backward` as well.
`x`: A numpy array-like object.
"""
return 1. / (1. + np.exp(-x))
def forward(self):
"""
Perform the sigmoid function and set the value.
"""
input_value = self.inbound_layers[0].value
self.value = self._sigmoid(input_value)
def backward(self):
"""
Calculates the gradient using the derivative of
the sigmoid function.
"""
# Initialize the gradients to 0.
self.gradients = {n: np.zeros_like(n.value) for n in self.inbound_layers}
# Sum the derivative with respect to the input over all the outputs.
for n in self.outbound_layers:
grad_cost = n.gradients[self]
sigmoid = self.value
self.gradients[self.inbound_layers[0]] += sigmoid * (1 - sigmoid) * grad_cost
| examples/deep-learning-notes-and-labs/06_MiniFlow.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="-mLd7YLEcexu" colab_type="text"
# # Introduction to NumPy
#
# This notebook is the first half of a special session on NumPy and PyTorch for CS 224U.
#
# Why should we care about NumPy?
# - It allows you to perform tons of operations on vectors and matrices.
# - It makes things run faster than naive for-loop implementations (a.k.a. vectorization).
# - We use it in our class (see files prefixed with `np_` in your cs224u directory).
# - It's used a ton in machine learning / AI.
# - Its arrays are often inputs into other important Python packages' functions.
#
# In Jupyter notebooks, NumPy documentation is two clicks away: Help -> NumPy reference.
# + id="Mp15vbfVcexx" colab_type="code" colab={}
__author__ = '<NAME>, <NAME>, and <NAME>'
# + id="ABGsaGW2cex0" colab_type="code" colab={}
import numpy as np
# + [markdown] id="_KnLhSpucex2" colab_type="text"
# # Vectors
# ## Vector Initialization
# + id="jQC6-YGocex3" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="bbc7d531-eb9f-4527-92bc-b05cfdd32db5"
np.zeros(5)
# + id="gyjdcjLLcex6" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="d4719fde-d344-4283-b928-ec426a7c3f3f"
np.ones(5)
# + id="o5c1YTvMcex8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="e3ef56e0-5503-4698-dcea-5ca80feb6f8e"
# convert list to numpy array
np.array([1,2,3,4,5])
# + id="dB_Ad0Yccex-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="3375f182-1059-4515-94cd-72e04cb129df"
# convert numpy array to list
np.ones(5).tolist()
# + id="k5w2ha8ZceyA" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="5bd20b62-d0dc-4f1b-cb26-19f8632c03f9"
# one float => all floats
np.array([1.0,2,3,4,5])
# + id="xqTqFOsWceyD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="c5c3eb33-a45d-474e-d017-0fbfa0453356"
# same as above
np.array([1,2,3,4,5], dtype='float')
# + id="uAk2pRQlceyF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="8a2b37f7-7ba9-459f-c2f6-875cb7ac2b60"
# spaced values in interval
np.array([x for x in range(20) if x % 2 == 0])
# + id="pv3mFg2MceyJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="ff18ce28-f849-414d-fd65-8bd08feee592"
# same as above
np.arange(0,20,2)
# + id="th4v1HLfceyQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="d8b49fd9-03e4-4afa-9f04-10dec05275b6"
# random floats in [0, 1)
np.random.random(10)
# + id="jzAywrIQceyS" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="036d6384-d2f7-44e4-a016-e9f3504eae0c"
# random integers
np.random.randint(5, 15, size=10)
# + [markdown] id="TBhBW2e-ceyU" colab_type="text"
# ## Vector indexing
# + id="dbFyC_Z-ceyU" colab_type="code" colab={}
x = np.array([10,20,30,40,50])
# + id="NFhaacFSceyX" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="6e4866bf-cf03-4ccb-a8ed-ec6e7ffc9464"
x[0]
# + id="ot-nrKqdceya" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="80ef6068-79f4-495c-c405-2c2879c3a218"
# slice
x[0:2]
# + id="rT-_f9Ilceyc" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="d6e634bc-50b5-4f2a-b11d-15a6a5b77b27"
x[0:1000]
# + id="QJf5w_CZceye" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="3681f26f-c491-4103-a797-f88a4da16a3e"
# last value
x[-1]
# + id="0eH8_p4nceyh" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="8b1c8612-5d5b-450b-8c2c-93db11dc3152"
# last value as array
x[[-1]]
# + id="qlhX0khXceyj" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="ed5c5f4c-e625-4b9f-96c4-ac311ee9d0d0"
# last 3 values
x[-3:]
# + id="XcDF1XJyceym" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="36048f6e-488e-4346-d631-f92032d1e204"
# pick indices
x[[0,2,4]]
# + [markdown] id="Rb37Nzokceyp" colab_type="text"
# ## Vector assignment
#
# Be careful when assigning arrays to new variables!
# + id="xFySMtrKceyq" colab_type="code" colab={}
#x2 = x # try this line instead
x2 = x.copy()
# + id="Zj0lJgNsceys" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="1ad107c8-fd11-42d0-d73a-28ee3cf2a090"
x2[0] = 10
x2
# + id="VaubusA-ceyv" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="56f0f74b-d905-41e3-e90b-b64398c7cdb8"
x2[[1,2]] = 10
x2
# + id="vIrrt_wjceyy" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="2c2430eb-9d4d-4459-ead5-0bda49981d7e"
x2[[3,4]] = [0, 1]
x2
# + id="qJpk26w_cey1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="69765172-15a8-4431-b31e-dede44d5829a"
# check if the original vector changed
x
# + [markdown] id="vNesIaGbcey3" colab_type="text"
# ## Vectorized operations
# + id="Za8vuwyNcey3" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="145d3145-68d2-4d10-d230-bcf81f494f12"
x.sum()
# + id="K69RrJZ7cey5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="98235dfc-505f-4977-876a-627b9dff5456"
x.mean()
# + id="lkxpVnfecey7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="5424a833-e37e-433c-c623-5bb4bba92d0f"
x.max()
# + id="mS0Jh3cjcey9" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="69ab0e5d-3e14-46a2-dc50-b6ed1102bc89"
x.argmax()
# + id="-PaESH5Zcey_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="174376c0-731f-41b2-b137-524242e60658"
np.log(x)
# + id="ip7-nN86cezB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="eff370c6-8b17-44b3-8862-2836090bb2f2"
np.exp(x)
# + id="0ZFQkc-AcezD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="bad122c8-c33e-4b01-f662-e7331b684ffa"
x + x # Try also with *, -, /, etc.
# + id="nZ5GxyIhcezF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="574592d1-05fd-400f-8443-cb817e470188"
x + 1
# + [markdown] id="zs6vU2akcezH" colab_type="text"
# ## Comparison with Python lists
#
# Vectorizing your mathematical expressions can lead to __huge__ performance gains. The following example is meant to give you a sense for this. It compares applying `np.log` to each element of a list with 10 million values with the same operation done on a vector.
# + id="4OdqZfmmcezH" colab_type="code" colab={}
# log every value as list, one by one
def listlog(vals):
return [np.log(y) for y in vals]
# + id="n5WfclY5cezJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="471baf81-dbfd-4cec-fdab-d03d11804fd4"
# get random vector
samp = np.random.random_sample(int(1e7))+1
samp
# + id="_LPfYQmVcezL" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="49049d30-d1cc-4649-c387-2c1587245a4c"
# %time _ = np.log(samp)
# + id="zqyua7TtcezN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="1101efb4-8a0c-4e6b-9cae-3afc2d50b330"
# %time _ = listlog(samp)
# + [markdown] id="7N2Qb465cezP" colab_type="text"
# # Matrices
#
# The matrix is the core object of machine learning implementations.
# + [markdown] id="7hXDQndvcezQ" colab_type="text"
# ## Matrix initialization
# + id="PBdcUl19cezQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="035848f4-f037-4bbd-86a6-a1fcfc0173c4"
np.array([[1,2,3], [4,5,6]])
# + id="12HsP0cIcezT" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="597b5cdc-7ebc-4293-c74d-d6a45d4db319"
np.array([[1,2,3], [4,5,6]], dtype='float')
# + id="m5UrV0_1cezV" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="fdd8ee64-b413-46be-8294-b456b5ef1054"
np.zeros((3,5))
# + id="hT-rH_SecezW" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="60ebfdef-0907-4a90-b22e-0f57990f2a92"
np.ones((3,5))
# + id="Q2_gUgi_cezZ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="2a81a378-c23a-4df5-a9aa-d77a62a35542"
np.identity(3)
# + id="G7hIPa0-ceze" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="5cec1c59-0524-4864-9621-e787fa4468c8"
np.diag([1,2,3])
# + [markdown] id="lGTcCSKPcezf" colab_type="text"
# ## Matrix indexing
# + id="Zh6fdye0cezg" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="12539d9f-647e-456b-b598-b017a4629b55"
X = np.array([[1,2,3], [4,5,6]])
X
# + id="2tOH4Ei9cezl" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="bef748dc-9526-4936-bd39-4260d30c531a"
X[0]
# + id="V34HdSaCcezn" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="ffbb02a3-5da1-4599-ee9b-4d100d2f60b6"
X[0,0]
# + id="fGoDA_N6cezp" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="b7c8ef7f-2f16-4bb1-ad2c-4482093256a1"
# get row
X[0, : ]
# + id="MIxtFslscezr" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="c7a49757-d8f9-4dd6-9454-7e2d078fdc6d"
# get column
X[ : , 0]
# + id="5VS7alaTcezs" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="622d8bdf-c717-4123-c558-2ad53c796791"
# get multiple columns
X[ : , [0,2]]
# + [markdown] id="oCihzE5Bcezu" colab_type="text"
# ## Matrix assignment
# + id="45JWeyRjcezv" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="3143641d-2450-411d-8e62-7b7bde9576dc"
# X2 = X # try this line instead
X2 = X.copy()
X2
# + id="i42QDZJBcezx" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="52b64911-a7c3-44ca-e20d-c69fcadefed7"
X2[0,0] = 20
X2
# + id="_8-9cP_4cezy" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="1d6df961-f387-4a02-ebb9-3baa113f73cb"
X2[0] = 3
X2
# + id="DKKLd4Ymcez0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="3e8f4326-723b-45e8-f9c1-c8fe1ea84d84"
X2[: , -1] = [5, 6]
X2
# + id="oRvGIVTJcez1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="20979db2-20a2-4eab-9758-24dbb0c8e3f3"
# check if original matrix changed
X
# + [markdown] id="YipO754Qcez4" colab_type="text"
# ## Matrix reshaping
# + id="iveodOZucez5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="428c19dd-3378-4b93-d872-39cd93ede186"
z = np.arange(1, 7)
z
# + id="8rH5cURJcez6" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="be3b87fa-a3dc-4c5a-9865-2ecd89b19127"
z.shape
# + id="FyNvV3VOcez8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="77535949-b45e-4629-f0ab-a6ed52f4cbcf"
Z = z.reshape(2,3)
Z
# + id="oEKLJc53cez9" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="02dee30e-72ea-45c4-992d-764c1489935e"
Z.shape
# + id="Wicndzd-cez_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="f267e055-4b65-4279-aa7a-7423a54e6441"
Z.reshape(6)
# + id="MTzvYxqfce0B" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="c66374b0-e413-4b75-9ca5-a54998d60011"
# same as above
Z.flatten()
# + id="jf_1Ok8lce0C" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="a0522e1c-d560-450b-cf18-42708a2efbac"
# transpose
Z.T
# + [markdown] id="NTu6Kg3Rce0F" colab_type="text"
# ## Numeric operations
# + id="e_1wZNtvce0F" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="3556e618-bf9d-45e2-eb2e-18ac12a18bc2"
A = np.array(range(1,7), dtype='float').reshape(2,3)
A
# + id="PdUlxkK8ce0H" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="6d2b268a-edf8-4851-d947-2711cbb5eca1"
B = np.array([1, 2, 3])
B
# + id="PlTNK5J6ce0J" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="3b166c21-b464-426b-a22b-2ec1878feadb"
# not the same as A.dot(B)
A * B
# + id="-jzL6fiXce0L" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="68e50cd1-990f-410d-dcf8-af3c6c22782d"
A + B
# + id="-4WzsLu9ce0P" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="fb07d2d9-a0ee-4a7d-b618-f8e2f4394591"
A / B
# + id="zdpn2B1tce0Q" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="b8c967c2-04b8-4734-a729-9b3d6900bace"
# matrix multiplication
A.dot(B)
# + id="G62RIGMDce0S" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="df99b3ca-9c0b-4d29-d09f-373164e5795a"
B.dot(A.T)
# + id="iWddUlRxce0T" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="278b198c-c976-4c11-8439-7b5e15641beb"
A.dot(A.T)
# + id="6eu1e5trce0V" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="d8ec1620-2ddc-468a-8493-afec52fbe93a"
# outer product
# multiplying each element of first vector by each element of the second
np.outer(B, B)
# + [markdown] id="eZK8Bx_Uce0W" colab_type="text"
# The following is a practical example of numerical operations on NumPy matrices.
#
# In our class, we have a shallow neural network implemented in `np_shallow_neural_network.py`. See how the forward and backward passes use no for loops, and instead takes advantage of NumPy's ability to vectorize manipulations of data.
#
# ```python
# def forward_propagation(self, x):
# h = self.hidden_activation(x.dot(self.W_xh) + self.b_xh)
# y = softmax(h.dot(self.W_hy) + self.b_hy)
# return h, y
#
# def backward_propagation(self, h, predictions, x, labels):
# y_err = predictions.copy()
# y_err[np.argmax(labels)] -= 1 # backprop for cross-entropy error: -log(prediction-for-correct-label)
# d_b_hy = y_err
# h_err = y_err.dot(self.W_hy.T) * self.d_hidden_activation(h)
# d_W_hy = np.outer(h, y_err)
# d_W_xh = np.outer(x, h_err)
# d_b_xh = h_err
# return d_W_hy, d_b_hy, d_W_xh, d_b_xh
# ```
#
# The forward pass essentially computes the following:
# $$h = f(xW_{xh} + b_{xh})$$
# $$y = \text{softmax}(hW_{hy} + b_{hy}),$$
# where $f$ is `self.hidden_activation`.
#
# The backward pass propagates error by computing local gradients and chaining them. Feel free to learn more about backprop [here](http://cs231n.github.io/optimization-2/), though it is not necessary for our class. Also look at this [neural networks case study](http://cs231n.github.io/neural-networks-case-study/) to see another example of how NumPy can be used to implement forward and backward passes of a simple neural network.
# + [markdown] id="dsvkL5JJce0X" colab_type="text"
# ## Going beyond NumPy alone
#
# These are examples of how NumPy can be used with other Python packages.
# + [markdown] id="i7Yfl7f4ce0X" colab_type="text"
# ### Pandas
# We can convert numpy matrices to Pandas dataframes. In the following example, this is useful because it allows us to label each row. You may have noticed this being done in our first unit on distributed representations.
# + id="Is4vi3LOce0Y" colab_type="code" colab={}
import pandas as pd
# + id="x8inswuSce0b" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="98f13a4f-afd0-49f8-d997-668f0723c090"
count_df = pd.DataFrame(
np.array([
[1,0,1,0,0,0],
[0,1,0,1,0,0],
[1,1,1,1,0,0],
[0,0,0,0,1,1],
[0,0,0,0,0,1]], dtype='float64'),
index=['gnarly', 'wicked', 'awesome', 'lame', 'terrible'])
count_df
# + [markdown] id="_XAkpulYce0d" colab_type="text"
# ### Scikit-learn
#
# In `sklearn`, NumPy matrices are the most common input and output and thus a key to how the library's numerous methods can work together. Many of the cs224u's model built by Chris operate just like `sklearn` ones, such as the classifiers we used for our sentiment analysis unit.
# + id="ikbRUHa3ce0e" colab_type="code" colab={}
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn import datasets
# + id="IFFKTVomce0f" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="cc1b4785-99d0-4871-fa62-887098c3074f"
iris = datasets.load_iris()
X = iris.data
y = iris.target
print(type(X))
print("Dimensions of X:", X.shape)
print(type(y))
print("Dimensions of y:", y.shape)
# + id="zWcs_QNKce0h" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 238} outputId="2f554a7d-dd02-4958-b58f-2835010569ec"
# split data into train/test
X_iris_train, X_iris_test, y_iris_train, y_iris_test = train_test_split(
X, y, train_size=0.7, test_size=0.3)
print("X_iris_train:", type(X_iris_train))
print("y_iris_train:", type(y_iris_train))
print()
# start up model
maxent = LogisticRegression(fit_intercept=True,
solver='liblinear',
multi_class='auto')
# train on train set
maxent.fit(X_iris_train, y_iris_train)
# predict on test set
iris_predictions = maxent.predict(X_iris_test)
fnames_iris = iris['feature_names']
tnames_iris = iris['target_names']
# how well did our model do?
print(classification_report(y_iris_test, iris_predictions, target_names=tnames_iris))
# + [markdown] id="o5L2lhPece0i" colab_type="text"
# ### SciPy
#
# SciPy contains what may seem like an endless treasure trove of operations for linear algebra, optimization, and more. It is built so that everything can work with NumPy arrays.
# + id="WOiYe1rEce0j" colab_type="code" colab={}
from scipy.spatial.distance import cosine
from scipy.stats import pearsonr
from scipy import linalg
# + id="XCMewWa0ce0k" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="7e1eb24a-993e-4def-802b-c785cf3205a9"
# cosine distance
a = np.random.random(10)
b = np.random.random(10)
cosine(a, b)
# + id="2CkLhVZ9ce0o" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="f0f413ca-1d11-459d-e1f7-453f10b54401"
# pearson correlation (coeff, p-value)
pearsonr(a, b)
# + id="zTUdqwddce0s" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="898b1a22-c01c-4171-b64a-5466fc00e4c6"
# inverse of matrix
A = np.array([[1,3,5],[2,5,1],[2,3,8]])
linalg.inv(A)
# + [markdown] id="vcNJRrSUce0t" colab_type="text"
# To learn more about how NumPy can be combined with SciPy and Scikit-learn for machine learning, check out this [notebook tutorial](https://github.com/cgpotts/csli-summer/blob/master/advanced_python/intro_to_python_ml.ipynb) by <NAME> and <NAME>. (You may notice that over half of this current notebook is modified from theirs.) Their tutorial also has some interesting exercises in it!
# + [markdown] id="voXxtiAuce0u" colab_type="text"
# ### Matplotlib
# + id="iaNkvPxBce0w" colab_type="code" colab={}
import matplotlib.pyplot as plt
# + id="gkcdxVCgce0x" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 281} outputId="4aa17cb5-33ac-43d4-f6dd-1045f05bfab5"
a = np.sort(np.random.random(30))
b = a**2
c = np.log(a)
plt.plot(a, b, label='y = x^2')
plt.plot(a, c, label='y = log(x)')
plt.legend()
plt.title("Some functions")
plt.show()
# + id="IdFaNsWxce00" colab_type="code" colab={}
| numpy_tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: qiim2-2018.11
# language: python
# name: qiim2-2018.11
# ---
# + [markdown] hideCode=false hidePrompt=false
# This notebook contains an example of using `redbiom` through it's Python API to extract a subset of American Gut Project samples. These data are then loaded into QIIME 2 for a mini beta-diversity analysis using UniFrac. This assumes we're using a QIIME 2 2018.11 environment that additionally has `redbiom` 0.3.0 installed. The exact commands I ran to install it are:
#
# ```
# $ conda install nltk
# $ pip install https://github.com/biocore/redbiom/archive/0.3.0.zip
# ```
# + hideCode=false hidePrompt=false
import redbiom.summarize
import redbiom.search
import redbiom.fetch
import qiime2
import pandas as pd
import skbio
from biom import load_table
import argparse
import sys
from qiime2.plugins import feature_table, diversity, emperor
pd.options.display.max_colwidth = 1000
# -
# **DIRECTORY WHERE TO EXPORT ALL CLEANED UP DATA**
output_dir = 'deblur_biom_wflow_output_data/'
# + [markdown] hideCode=false hidePrompt=false
# The first thing we're going to do is gather the `redbiom` contexts. A context is roughly a set of consistent technical parameters. For example, the specific sequenced gene, the variable region within the gene, the length of the read, and how the operational taxonomic units were assessed.
#
# The reason `redbiom` partitions data into contexts is because these technical details can lead to massive technical bias. The intention is to facilitate comparing "apples" to "apples".
#
# The context summarization returns a pandas `DataFrame` so it should be pretty friendly to manipulate.
# + hideCode=false hidePrompt=false
contexts = redbiom.summarize.contexts()
# + hideCode=false hidePrompt=false
contexts.shape
# + [markdown] hideCode=false hidePrompt=false
# At the present time, determining the context to use is a bit manual and requires some strung munging. Additional development is needed.
#
# Let's take a look at the larger contexts.
# + hideCode=false hidePrompt=false
contexts.sort_values('SamplesWithData', ascending=False).head()
# -
# ## Using Deblur Context
# + hideCode=false hidePrompt=false
ctx = contexts[contexts['ContextName'] == 'Deblur-Illumina-16S-V4-100nt-fbc5b2']['ContextName'].iloc[0]
# + hideCode=false hidePrompt=false
ctx
# -
# **for Daniel Notes below talks about Greengenes but we are using deblur as he recommends instaed**
# + [markdown] hideCode=false hidePrompt=false
# Breaking this name into its constiuent pieces, this is a closed reference context meaning that operational taxonomic units were assessed against a reference database and sequences which did not recruit to the reference were discarded. The reference used is Greengenes, a common 16S reference database. The gene represented by the data is the 16S SSU rRNA gene, and specifically the V4 region of the gene. Finally, the fragments represented are truncated to 100 nucleotides. (Don't worry if this is all a lot of jargon. It is a lot of jargon. Please ask questions :)
#
# So cool, we have a "context". What can we do now? Let's search for some sample identifiers based off of the metadata (i.e., variables) associated with the samples. Specifically, let's get some skin, oral and fecal samples. Be forewarned, the metadata search uses Python's `ast` module behind the scenes, so malformed queries at present produce tracebacks.
# + hideCode=false hidePrompt=false
study_id = 10317 # the Qiita study ID of the American Gut Project is 10317
query = "where qiita_study_id==%d" % (study_id)
results = redbiom.search.metadata_full(query)
# + hideCode=false hidePrompt=false
len(results)
# + hideCode=false hidePrompt=false
study_id = 10317 # the Qiita study ID of the American Gut Project is 10317
results = {}
for site in ['sebum', 'saliva', 'feces']:
query = "where qiita_study_id==%d and env_material=='%s'" % (study_id, site)
results[site] = redbiom.search.metadata_full(query)
# + hideCode=false hidePrompt=false
for k, v in results.items():
print(k, len(v))
# -
# ## Get the biom_table (OTU data) from deblur context
# the following takes a while to run (!30 minutes)
# + hideCode=false hidePrompt=false
biom_table, _ = redbiom.fetch.data_from_samples(ctx, to_keep_all)
# -
biom_table
print(biom_table.head(5))
from qiime2.plugins import feature_table, diversity, emperor
table_ar = qiime2.Artifact.import_data('FeatureTable[Frequency]', biom_table)
table_ar
# ## Export full_otu table so dont have to reload everything later
from biom import load_table
table_ar.export_data('full_otus')
# !ls full_otus
btable = load_table('full_otus/feature-table.biom')
btable
# +
#table_ar = qiime2.Artifact.import_data('FeatureTable[Frequency]', btable)
# -
# ## Before rarification we want to remove blooms
# +
import skbio
import biom
import argparse
import sys
__version__='1.0'
def trim_seqs(seqs, seqlength=100):
"""
Trims the sequences to a given length
Parameters
----------
seqs: generator of skbio.Sequence objects
Returns
-------
generator of skbio.Sequence objects
trimmed sequences
"""
for seq in seqs:
if len(seq) < seqlength:
raise ValueError('sequence length is shorter than %d' % seqlength)
yield seq[:seqlength]
def remove_seqs(table, seqs):
"""
Parameters
----------
table : biom.Table
Input biom table
seqs : generator, skbio.Sequence
Iterator of sequence objects to be removed from the biom table.
Return
------
biom.Table
"""
filter_seqs = {str(s) for s in seqs}
_filter = lambda v, i, m: i not in filter_seqs
return table.filter(_filter, axis='observation', inplace=False)
# -
table = load_table('full_otus/feature-table.biom')
seqs_file = 'bloom/newbloom.all.fna'
table
seqs = skbio.read(seqs_file, format='fasta')
length = min(map(len, table.ids(axis='observation')))
seqs = trim_seqs(seqs, seqlength=length)
outtable = remove_seqs(table, seqs)
outtable
# (15 sOTUs matched bloom filter and removed)
table_ar = qiime2.Artifact.import_data('FeatureTable[Frequency]', outtable)
# ## Rarification
sampling_depth=1000
# rarefy to 1000 sequences per sample (yes, it's arbitrary)
rare_ar, = feature_table.actions.rarefy(table=table_ar, sampling_depth=sampling_depth)
biom_table_rar = rare_ar.view(biom.Table)
biom_table_rar
print(biom_table_rar.head(3))
# ## converting to sparse dataframe format
c = biom_table_rar.matrix_data
c = c.transpose()
c
cdf = pd.SparseDataFrame(c)
print(cdf.shape)
cdf['sample_name'] = biom_table_rar.ids()
cdf.head(3)
# **write biom sparse dataframe to pickle file**
cdf.to_pickle(output_dir + '2.21.rar1000.biom_data.pkl')
# ## create dna_seq lookup_id which matches the column in cdf
otu_ids = biom_table_rar.ids(axis='observation')
otu_id_df = pd.DataFrame(otu_ids, columns=['dna_seq'])
otu_id_df['lookup_id'] = range(len(otu_id_df))
otu_id_df.head()
# **write id lookup to csv file**
otu_id_df.to_csv(output_dir + '2.21.rar1000.biom_ids.csv', index=False)
| biom_data_ingest/deblur_full_biom_workflow.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .robot
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Robot Framework
# language: robotframework
# name: robotframework
# ---
# # Contribute
# ## Wanted
#
# - More test coverage
# - More documentation
# - Useful magics
# - JupyterLab MIME renderer for Robot `output.xml`
# ## Hack
#
# The Kernel and the JupyterLab extension are developed in tandem in the same environment.
#
# [Fork the repo](https://github.com/gtri/irobotframework), and clone it locally...
#
# git clone https://github.com/gtri/irobotframework
#
# Get [Miniconda](https://conda.io/miniconda.html).
#
# Get [anaconda-project](https://github.com/Anaconda-Platform/anaconda-project)...
#
# conda install -n base anaconda-project
#
# In the repo, run **bootstrap** the `default` environment...
#
# anaconda-project run bootstrap
# anaconda-project run jlpm bootstrap
#
# Start the **TypeScript** watcher...
#
# anaconda-project run jlpm watch
#
# ...and in a separate terminal, start the **JupyterLab** watcher...
#
# anaconda-project run lab --watch
#
# Start **doing work**, and enjoy live rebuilding, available after refreshing your page. Kernel changes will require restarting your kernel.
#
# Keep the [quality](#Quality) high by running (and adding) tests, and ensuring your code meets the formatting standards.
#
# Make a [pull request](https://github.com/gtri/irobotframework)!
# ## Quality
#
# Code quality is assured in a separate `test` environment. Get it ready with **bootstrap**:
#
# anaconda-project run --env-spec test bootstrap
# anaconda-project run --env-spec test jlpm bootstrap
#
# Who wants ugly code? Squash all your syntax opinions...
#
# anaconda-project run test:lint
#
# Try to keep the coverage up by adding [unit tests](https://github.com/gtri/irobotframework/src/irobotframework/tests)...
#
# anaconda-project run test:py
#
# Make sure everything is on the up-and-up with [integration tests](https://github.com/gtri/irobotframework/src/atest)...
#
# anaconda-project run test:robot
#
# ## Document
# Documentation is pretty important, as is pretty documentation. Docs are built from `irobotframework` and IPython notebooks with [nbsphinx](https://github.com/spatialaudio/nbsphinx), and can be updated automatically on save with:
#
# anaconda-project run --env-spec docs bootstrap
# anaconda-project run build:docs:watch
#
# > This will tell you which port to visit in your browser.
#
# This contributes to [Quality](#Quality), as the notebooks are executed prior to run, though some of them _anticipate_ failure, and proceed anyway.
#
# > Don't check in notebooks with outputs!
# ## Build
#
# This is mostly handled by CI, but you can **build** all of the output packages into `./dist`.
#
# anaconda-project run build:pip:sdist
# anaconda-project run build:pip:wheel
# anaconda-project run build:npm
# anaconda-project run build:conda
# anaconda-project run build:docs
#
# ## Release
# This is _also_ mostly handled by CI, but here is the rough outline.
#
# Clear out your repo (or do a fresh checkout)...
#
# git clean -dxf
#
# Preflight the release...
#
# anaconda-project run test:release
#
# If you had to make any changes, go ahead and fix them, and rebuild everything.
#
# anaconda-project run jlpm lerna publish
# twine upload dist/pip/*
#
# Wait for the bot to pick up the changes on `conda-forge`.
| docs/Contributing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import findspark
findspark.init('/home/ubuntu/spark-2.1.1-bin-hadoop2.7')
import pyspark
from pyspark.sql import SparkSession
spark = SparkSession.builder.appName('aggs').getOrCreate()
df = spark.read.csv('sales_info.csv',inferSchema=True,header=True)
df.show()
df.printSchema()
df.groupBy('Company')
df.groupBy('Company').mean().show()
df.groupBy('Company').sum().show()
df.groupBy('Company').count().show()
df.groupBy('Company').max().show()
df.agg({'Sales':'sum'}).show()
df.agg({'Sales':'max'}).show()
group_data =df.groupBy('Company')
group_data.agg({'Sales' : 'max'}).show()
from pyspark.sql.functions import countDistinct,avg,stddev
df.select(countDistinct('Sales')).show() #count the distinct number of sales
df.select(avg('Sales')).show()
df.select(stddev('Sales').alias("Std Dev")).show()
from pyspark.sql.functions import format_number
SalesStd=df.select(stddev('Sales').alias("Std Dev"))
SalesStd.select(format_number('Std Dev',2).alias("Std Dev")).show()# number formating
df.orderBy('Sales').show()
df.orderBy(df['Sales'].desc()).show()
| Spark_Dataframe/GroupBy&Agg.ipynb |