code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import warnings
warnings.filterwarnings("ignore")
import numpy as np, pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
# -
df = pd.read_csv("data/preprocessed_train.csv")
df.head().T
# +
target = 'fare_amount'
features = ['passenger_count','Hour','Weekday','AMorPM']
# more feats
features.extend(['pickup_longitude',
'pickup_latitude',
'dropoff_longitude',
'dropoff_latitude'])
# numericalising
for col in features:
if df[col].dtype == 'object':
df[col] = df[col].astype("category").cat.codes
# +
df = df.dropna()
X = df[features].copy().values
y = df[target].copy().values
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
# +
model_baseline = RandomForestRegressor(n_estimators=400,
max_depth=5,
min_impurity_split=3,).fit(X_train, y_train)
# r2 score
model_baseline.score(X_train, y_train)
# -
model_baseline.score(X_test, y_test)
preds_baseline = model_baseline.predict(X_test)
np.sqrt(mean_squared_error(y_test, preds_baseline))
| RandomForest.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Getting Started with Pilot-Streaming on Stampede
#
# In the first step we need to import all required packages and modules into the Python Path
# Pilot-Streaming utilizes [SAGA-Python](http://saga-python.readthedocs.io/en/latest/tutorial/part3.html) to manage the Spark cluster environment. All attributes of the SAGA Job map 1-to-1 to the Pilot Compute Description.
#
# `resource`: URL of the Local Resource Manager. All SAGA adaptors are supported. Examples:
#
# * `slurm://localhost`: Submit to local SLURM resource manager, e.g. on master node of Wrangler or Stampede
# * `slurm+ssh://login1.wrangler.tacc.utexas.edu`: Submit to Wrangler master node SLURM via SSH (e.g. on node running a job)
#
# `type:` The `type` attributes specifies the cluster environment. It can be: `Spark`, `Dask` or `Kafka`.
#
#
# Note: This is not required anymore on Stampede 2
#
# Depending on the resource there might be other configurations necessary, e.g. to ensure that the correct subnet is used the Spark driver can be configured using various environment variables: os.environ["SPARK_LOCAL_IP"]='172.16.58.3'
# +
# System Libraries
import sys, os
sys.path.append("..")
import pandas as pd
## logging
import logging
logging.basicConfig(level=logging.DEBUG)
logging.getLogger().setLevel(logging.ERROR)
logging.getLogger("py4j").setLevel(logging.ERROR)
import sys, os
sys.path.append("..")
import pandas as pd
import datetime
# -
# System Libraries
import sys, os
sys.path.append("..")
import pandas as pd
## logging
import logging
logging.basicConfig(level=logging.DEBUG)
logging.getLogger().setLevel(logging.ERROR)
logging.getLogger("py4j").setLevel(logging.ERROR)
import datetime
import confluent_kafka
from confluent_kafka import TopicPartition
import pykafka
import pyspark
import time
import redis
import uuid
import os
import pickle
import math
# Dask
import dask.array as da
import dask.bag as db
from dask.delayed import delayed
import distributed
from distributed import Client
# Pilot-Streaming
import pilot.streaming
sys.modules['pilot.streaming']
RESOURCE_URL="slurm+ssh://login4.stampede2.tacc.utexas.edu"
WORKING_DIRECTORY=os.path.join(os.environ["HOME"], "work")
# # 1. Kafka
pilot_compute_description = {
"resource":RESOURCE_URL,
"working_directory": WORKING_DIRECTORY,
"number_of_nodes": 1,
"cores_per_node": 48,
"project": "TG-MCB090174",
"queue": "normal",
"config_name": "stampede",
"walltime": 59,
"type":"kafka"
}
# %%time
kafka_pilot = pilot.streaming.PilotComputeService.create_pilot(pilot_compute_description)
kafka_pilot.wait()
kafka_pilot.get_details()
kafka_pilot.cancel()
# # 2. Dask
# +
import distributed
pilot_compute_description = {
"resource":RESOURCE_URL,
"working_directory": WORKING_DIRECTORY,
"number_of_nodes": 1,
"cores_per_node": 48,
"dask_cores" : 24,
"project": "TG-MCB090174",
"queue": "normal",
"walltime": 359,
"type":"dask"
}
# -
# %%time
dask_pilot = pilot.streaming.PilotComputeService.create_pilot(pilot_compute_description)
dask_pilot.wait()
dask_pilot.get_details()
import distributed
dask_client = distributed.Client(dask_pilot.get_details()['master_url'])
dask_client.scheduler_info()
dask_client.gather(dask_client.map(lambda a: a*a, range(10)))
# # 3 Spark
# + run_control={"frozen": false, "read_only": false}
### Required Spark configuration that needs to be provided before pyspark is imported and JVM started
#os.environ["SPARK_LOCAL_IP"]='192.168.3.11' #must be done before pyspark is loaded
import os
import pyspark
pilot_compute_description = {
"resource":RESOURCE_URL,
"working_directory": WORKING_DIRECTORY,
"number_of_nodes": 1,
"cores_per_node": 48,
"project": "TG-MCB090174",
"queue": "normal",
"walltime": 359,
"type":"spark"
}
# -
# Start Spark Cluster and Wait for Startup Completion
# +
# %%time
spark_pilot = pilot.streaming.PilotComputeService.create_pilot(pilot_compute_description)
spark_pilot.wait()
# -
spark_pilot.get_details()
# +
#conf=pyspark.SparkConf()
#conf.set("spark.driver.bindAddress", "192.168.3.11")
#sc = pyspark.SparkContext(master="spark://172.16.17.32:7077", appName="dfas")
# -
#os.environ["SPARK_LOCAL_IP"]="192.168.3.11"
sc = spark_pilot.get_context()
rdd = sc.parallelize([1,2,3])
rdd.map(lambda a: a*a).collect()
spark_pilot.cancel()
| examples/Pilot-Streaming-GettingStarted-Stampede2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/SokichiFujita/PyTorch-for-Deep-Learning-and-Computer-Vision/blob/master/Chapter3_2DimensionalTensors.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="A5daX-FuSBLM" colab_type="code" colab={}
import torch
# + id="9B7iWT6cSHVW" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="676c0aad-c5af-4474-cd56-77bce99ca099"
one_d = torch.arange(0,9)
print(one_d)
# + id="LVxj3BJJSMm1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="dca7345c-6c5d-427f-a8a0-ce401dee09d9"
two_d = one_d.view(3,3)
print(two_d)
# + id="vg8czLb_Sk5A" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="80dc550f-4696-45a4-8150-882b56813f66"
print(two_d.dim())
# + id="E1O0C1BRSpzC" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="d80a019f-e614-4134-eb48-4cc06ca9b9cb"
print(two_d[0,0])
print(two_d[0,1])
print(two_d[1,0])
# + id="q1DBdWqQS4av" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="61d2673c-e96d-4223-fa29-4edf73ca9f22"
x = torch.arange(18).view(3,3,2)
print(x)
| Chapter3_2DimensionalTensors.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
# +
xlsx = pd.ExcelFile('Book2.xlsx')
df1 = xlsx.parse('Sheet1')
df2 = xlsx.parse('Sheet2')
df3 = xlsx.parse('Sheet3')
df4 = xlsx.parse('Sheet4')
# -
plt.title('Unit-1')
sns.distplot(df1['Power'])
plt.title('Unit-2')
sns.distplot(df2['Power'])
plt.title('Unit-3')
sns.distplot(df3['Power'])
plt.title('Unit-4')
sns.distplot(df4['Power'])
plt.title('Unit-1')
sns.scatterplot(x='Temperature',y='Power',data=df1)
plt.title('Unit-1')
sns.scatterplot(x='Temperature',y='Power',data=df2)
plt.title('Unit-3')
sns.scatterplot(x='Temperature',y='Power',data=df3)
plt.title('Unit-4')
sns.scatterplot(x='Temperature',y='Power',data=df4)
duty_cycle1 = (df1.mean()-df1.min())/(df1.max()-df1.min())
duty_cycle1
duty_cycle2 = (df2.mean()-df2.min())/(df2.max()-df2.min())
duty_cycle2
duty_cycle3 = (df3.mean()-df3.min())/(df3.max()-df3.min())
duty_cycle3
duty_cycle4 = (df4.mean()-df4.min())/(df4.max()-df4.min())
duty_cycle4
temp_range1 = [df1['Temperature'].max(),df1['Temperature'].min()]
temp_range1
temp_range2 = [df2['Temperature'].max(),df2['Temperature'].min()]
temp_range2
temp_range3 = [df3['Temperature'].max(),df3['Temperature'].min()]
temp_range3
temp_range4 = [df4['Temperature'].max(),df4['Temperature'].min()]
temp_range4
avg_e1 = df1['Power'].mean()*10
avg_e1
avg_e2 = df2['Power'].mean()*10
avg_e2
avg_e3 = df3['Power'].mean()*10
avg_e3
avg_e4 = df4['Power'].mean()*10
avg_e4
df1['Temperature'].mean()
df2['Temperature'].mean()
df3['Temperature'].mean()
df4['Temperature'].mean()
df1['Temperature'].std()
df2['Temperature'].std()
df3['Temperature'].std()
df4['Temperature'].std()
plt.title('Unit-1')
sns.distplot(df1['Temperature'])
plt.title('Unit-2')
sns.distplot(df2['Temperature'])
plt.title('Unit-3')
sns.distplot(df3['Temperature'])
plt.title('Unit-4')
sns.distplot(df4['Temperature'])
| zenatix_project.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import data_loader
import numpy as np
import pandas as pd
import pickle
import os
import nltk
import re
import timeit
from torch.autograd import Variable
import torch
from sklearn import preprocessing, svm, metrics
from sklearn.model_selection import train_test_split, cross_val_score, StratifiedKFold
from sklearn.externals import joblib
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from sklearn.model_selection import cross_val_score
from util.classification.lstm_pos_tagger import LSTMPOSTagger
meta_list, data_list = data_loader.load_data(load_train=True, load_dev=True, load_test=True)
train_meta, train_meta_corrected, \
dev_meta, dev_meta_corrected, \
test_meta, test_meta_corrected = meta_list
train_data, train_data_corrected, \
dev_data, dev_data_corrected, \
test_data, test_data_corrected = data_list
# +
languages = train_meta["native_language"].unique()
print("# of Sentence: {}".format(len(train_meta)))
print("Sentence distribution:")
stats = []
for language in languages:
stats.append(len(train_meta[train_meta["native_language"]==language]))
stats_df = pd.DataFrame(stats, columns=["# of sentences"], index=languages)
print(stats_df)
print("Author distribution:")
stats = []
for language in languages:
stats.append(len(train_meta[train_meta["native_language"]==language]["doc_id"].unique()))
stats_df = pd.DataFrame(stats, columns=["# of authors"], index=languages)
print(stats_df)
stats = []
languages = train_meta["native_language"].unique()
print("Exam score stats:")
for language in languages:
stats.append(train_meta[train_meta["native_language"]==language]["score"].describe()[['count', 'mean', 'std', 'max', 'min']])
stats_df = pd.DataFrame(stats, index=languages)
print(stats_df)
# -
post_df = data_loader.load_post_metadata()
post_df["native_language"].value_counts()*2
post_df.head()
| stats.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # TensorTrade - Renderers and Plotly Visualization Chart
# ## Data Loading Function
# +
import pandas as pd
def load_csv(filename):
df = pd.read_csv('data/' + filename, skiprows=1, parse_dates=['date'])
df.drop(columns=['symbol', 'volume_btc'], inplace=True)
# Fix timestamp form "2019-10-17 09-AM" to "2019-10-17 09-00-00 AM"
df['date'] = df['date'].str[:14] + '00-00 ' + df['date'].str[-2:]
# Remove the timezone from the timestamp column from sting to datetime for proper sorting.
df['date'] = pd.to_datetime(df['date'])
# Make sure historical prices are sorted chronologically, oldest first.
df.sort_values(by='date', ascending=True, inplace=True)
df.reset_index(drop=True, inplace=True)
# Format timestamps as required to appear on the chart buy/sell marks
df['date'] = df['date'].dt.strftime('%Y-%m-%d %I:%M %p')
# The chart expects the column name 'datetime' for timestamps
df.rename(columns={'date': 'datetime'}, inplace=True)
return df
# -
df = load_csv('Coinbase_BTCUSD_1h.csv')
df.head()
# ## Data Preparation
# ### Create the dataset features
# +
import ta
from tensortrade.data import DataFeed, Module
dataset = ta.add_all_ta_features(df, 'open', 'high', 'low', 'close', 'volume', fillna=True)
dataset.head(3)
# -
# ### Create Chart Price History Data
# Note: It is recommended to create the chart data *after* creating and cleaning the dataset to ensure one-to-one mapping between the historical prices data and the dataset.
# +
price_history = dataset[['datetime', 'open', 'high', 'low', 'close', 'volume']] # chart data
display(price_history.head(3))
dataset.drop(columns=['datetime', 'open', 'high', 'low', 'close', 'volume'], inplace=True)
# -
# ## Setup Trading Environment
# ### Create Data Feeds
# +
from tensortrade.exchanges import Exchange
from tensortrade.exchanges.services.execution.simulated import execute_order
from tensortrade.data import Stream, DataFeed, Module
from tensortrade.instruments import USD, BTC
from tensortrade.wallets import Wallet, Portfolio
coinbase = Exchange("coinbase", service=execute_order)(
Stream("USD-BTC", price_history['close'].tolist())
)
portfolio = Portfolio(USD, [
Wallet(coinbase, 10000 * USD),
Wallet(coinbase, 10 * BTC),
])
with Module("coinbase") as coinbase_ns:
nodes = [Stream(name, dataset[name].tolist()) for name in dataset.columns]
feed = DataFeed([coinbase_ns])
feed.next()
# -
# ### Trading Environment Renderers
# A renderer is a channel for the trading environment to output its current state. One or more renderers can be attached to the environment at the same time. For example, you can let the environment draw a chart and log to a file at the same time.
#
# Notice that while all renderers can technically be used together, you need to select the best combination to void undesired results. For example, PlotlyTradingChart can work well with FileLogger but may not display well with ScreenLogger.
#
# Renderer can be set by name (string) or class, single or list. Available renderers are:
# * `'screenlog'` or `ScreenLogger`: Shows results on the screen.
# * `'filelog'` or `FileLogger`: Logs results to a file.
# * `'plotly'` or `PlotlyTradingChart`: A trading chart based on Plotly.
#
# #### Examples:
#
# * renderers = 'screenlog' (default)
# * renderers = ['screenlog', 'filelog']
# * renderers = ScreenLogger()
# * renderers = ['screenlog', FileLogger()]
# * renderers = [FileLogger(filename='example.log')]
#
# Renderers can also be created and configured first then attached to the environment as seen in a following example.
#
# ### Trading Environment with a Single Renderer
# +
from tensortrade.environments.render import ScreenLogger
from tensortrade.environments import TradingEnvironment
env = TradingEnvironment(
feed=feed,
portfolio=portfolio,
action_scheme='managed-risk',
reward_scheme='risk-adjusted',
window_size=20,
price_history=price_history,
renderers = 'screenlog' # ScreenLogger used with default settings
)
# +
from tensortrade.agents import DQNAgent
agent = DQNAgent(env)
agent.train(n_episodes=2, n_steps=200, render_interval=10)
# -
# ### Environment with Multiple Renderers
# Create PlotlyTradingChart and FileLogger renderers. Configuring renderers is optional as they can be used with their default settings.
# +
from tensortrade.environments.render import PlotlyTradingChart
from tensortrade.environments.render import FileLogger
chart_renderer = PlotlyTradingChart(
height = 800
)
file_logger = FileLogger(
filename='example.log', # omit or None for automatic file name
path='training_logs' # create a new directory if doesn't exist, None for no directory
)
# -
# ### Environement with Multiple Renderers
env = TradingEnvironment(
feed=feed,
portfolio=portfolio,
action_scheme='managed-risk',
reward_scheme='risk-adjusted',
window_size=20,
price_history=price_history,
renderers = [chart_renderer, file_logger]
)
# ## Setup and Train DQN Agent
# +
from tensortrade.agents import DQNAgent
agent = DQNAgent(env)
# Set render_interval to None to render at episode ends only
agent.train(n_episodes=2, n_steps=200, render_interval=10)
# -
# ## Direct Performance and Net Worth Plotting
# Alternatively, the final performance and net worth can be displayed using pandas via matplotlib.
# +
# %matplotlib inline
portfolio.performance.plot()
# -
portfolio.performance.net_worth.plot()
| examples/renderers_and_plotly_chart.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from bs4 import BeautifulSoup
import re
import numpy as np
import tensorflow as tf
from tqdm import tqdm
import itertools
# +
def process_string(string):
string = re.sub('[^A-Za-z0-9\-\/ ]+', ' ', string).split()
return [y.strip() for y in string]
def to_title(string):
if string.isupper():
string = string.title()
return string
# -
def parse_raw(filename):
with open(filename, 'r') as fopen:
entities = fopen.read()
soup = BeautifulSoup(entities, 'html.parser')
inside_tag = ''
texts, labels = [], []
for sentence in soup.prettify().split('\n'):
if len(inside_tag):
splitted = process_string(sentence)
texts += splitted
labels += [inside_tag] * len(splitted)
inside_tag = ''
else:
if not sentence.find('</'):
pass
elif not sentence.find('<'):
inside_tag = sentence.split('>')[0][1:]
else:
splitted = process_string(sentence)
texts += splitted
labels += ['OTHER'] * len(splitted)
assert (len(texts)==len(labels)), "length texts and labels are not same"
print('len texts and labels: ', len(texts))
return texts,labels
train_texts, train_labels = parse_raw('data_train.txt')
test_texts, test_labels = parse_raw('data_test.txt')
train_texts += test_texts
train_labels += test_labels
with open('entities-bm-normalize-v3.txt','r') as fopen:
entities_bm = fopen.read().split('\n')[:-1]
entities_bm = [i.split() for i in entities_bm]
entities_bm = [[i[0],'TIME' if i[0] in 'jam' else i[1]] for i in entities_bm]
# +
replace_by = {'organizaiton':'organization','orgnization':'organization',
'othoer': 'OTHER'}
with open('NER-part1.txt','r') as fopen:
nexts = fopen.read().split('\n')[:-1]
nexts = [i.split() for i in nexts]
for i in nexts:
if len(i) == 2:
label = i[1].lower()
if 'other' in label:
label = label.upper()
if label in replace_by:
label = replace_by[label]
train_labels.append(label)
train_texts.append(i[0])
# +
replace_by = {'LOC':'location','PRN':'person','NORP':'organization','ORG':'organization','LAW':'law',
'EVENT':'event','FAC':'organization','TIME':'time','O':'OTHER','ART':'person','DOC':'law'}
for i in entities_bm:
try:
string = process_string(i[0])
if len(string):
train_labels.append(replace_by[i[1]])
train_texts.append(process_string(i[0])[0])
except Exception as e:
print(e)
assert (len(train_texts)==len(train_labels)), "length texts and labels are not same"
# -
np.unique(train_labels,return_counts=True)
# +
def _pad_sequence(
sequence,
n,
pad_left = False,
pad_right = False,
left_pad_symbol = None,
right_pad_symbol = None,
):
sequence = iter(sequence)
if pad_left:
sequence = itertools.chain((left_pad_symbol,) * (n - 1), sequence)
if pad_right:
sequence = itertools.chain(sequence, (right_pad_symbol,) * (n - 1))
return sequence
def ngrams(
sequence,
n,
pad_left = False,
pad_right = False,
left_pad_symbol = None,
right_pad_symbol = None,
):
"""
generate ngrams
Parameters
----------
sequence : list of str
list of tokenize words
n : int
ngram size
Returns
-------
ngram: list
"""
sequence = _pad_sequence(
sequence, n, pad_left, pad_right, left_pad_symbol, right_pad_symbol
)
history = []
while n > 1:
try:
next_item = next(sequence)
except StopIteration:
return
history.append(next_item)
n -= 1
for item in sequence:
history.append(item)
yield tuple(history)
del history[0]
# -
def get_ngrams(s, grams=(2,3,4)):
return [''.join(i) for k in grams for i in list(ngrams(s,k))]
# +
word2idx = {'PAD': 0,'NUM':1,'UNK':2}
tag2idx = {'PAD': 0}
char2idx = {'PAD': 0,'NUM':1,'UNK':2}
word_idx = 3
tag_idx = 1
char_idx = 3
def parse_XY(texts, labels):
global word2idx, tag2idx, char2idx, word_idx, tag_idx, char_idx
X, Y = [], []
for no, text in enumerate(texts):
text = text.lower()
if len(text) < 2:
continue
tag = labels[no]
for c in get_ngrams(text):
if c not in char2idx:
char2idx[c] = char_idx
char_idx += 1
if tag not in tag2idx:
tag2idx[tag] = tag_idx
tag_idx += 1
Y.append(tag2idx[tag])
if text not in word2idx:
word2idx[text] = word_idx
word_idx += 1
X.append(word2idx[text])
return X, np.array(Y)
# -
X, Y = parse_XY(train_texts, train_labels)
idx2word={idx: tag for tag, idx in word2idx.items()}
idx2tag = {i: w for w, i in tag2idx.items()}
# +
seq_len = 50
def iter_seq(x):
return np.array([x[i: i+seq_len] for i in range(0, len(x)-seq_len, 1)])
def to_train_seq(*args):
return [iter_seq(x) for x in args]
def generate_char_seq(batch, maxlen):
temp = np.zeros((batch.shape[0],batch.shape[1], maxlen),dtype=np.int32)
for i in range(batch.shape[0]):
for k in range(batch.shape[1]):
for no, c in enumerate(get_ngrams(idx2word[batch[i,k]])):
temp[i,k,no] = char2idx[c]
return temp
# -
X_seq, Y_seq = to_train_seq(X, Y)
X_char_seq = generate_char_seq(X_seq, seq_len * 2)
X_seq.shape
Y_seq.shape
from sklearn.cross_validation import train_test_split
train_Y, test_Y, train_X, test_X = train_test_split(Y_seq, X_char_seq,test_size=0.2)
# +
class Attention:
def __init__(self,hidden_size):
self.hidden_size = hidden_size
self.dense_layer = tf.layers.Dense(hidden_size)
self.v = tf.random_normal([hidden_size],mean=0,stddev=1/np.sqrt(hidden_size))
def score(self, hidden_tensor, encoder_outputs):
energy = tf.nn.tanh(self.dense_layer(tf.concat([hidden_tensor,encoder_outputs],2)))
energy = tf.transpose(energy,[0,2,1])
batch_size = tf.shape(encoder_outputs)[0]
v = tf.expand_dims(tf.tile(tf.expand_dims(self.v,0),[batch_size,1]),1)
energy = tf.matmul(v,energy)
return tf.squeeze(energy,1)
def __call__(self, hidden, encoder_outputs):
seq_len = tf.shape(encoder_outputs)[1]
batch_size = tf.shape(encoder_outputs)[0]
H = tf.tile(tf.expand_dims(hidden, 1),[1,seq_len,1])
attn_energies = self.score(H,encoder_outputs)
return tf.expand_dims(tf.nn.softmax(attn_energies),1)
class Model:
def __init__(
self,
dict_size,
size_layers,
learning_rate,
maxlen,
num_blocks = 3,
block_size = 128,
):
self.word_ids = tf.placeholder(tf.int32, shape = [None, maxlen, maxlen * 2])
self.labels = tf.placeholder(tf.int32, shape = [None, maxlen])
embeddings = tf.Variable(tf.random_uniform([dict_size, size_layers], -1, 1))
embedded = tf.nn.embedding_lookup(embeddings, self.word_ids)
embedded = tf.reduce_mean(embedded,axis=2)
self.attention = Attention(size_layers)
self.maxlen = tf.shape(self.word_ids)[1]
self.lengths = tf.count_nonzero(tf.reduce_sum(self.word_ids,axis=2), 1)
def residual_block(x, size, rate, block):
with tf.variable_scope(
'block_%d_%d' % (block, rate), reuse = False
):
attn_weights = self.attention(tf.reduce_sum(x,axis=1), x)
conv_filter = tf.layers.conv1d(
attn_weights,
x.shape[2] // 4,
kernel_size = size,
strides = 1,
padding = 'same',
dilation_rate = rate,
activation = tf.nn.tanh,
)
conv_gate = tf.layers.conv1d(
x,
x.shape[2] // 4,
kernel_size = size,
strides = 1,
padding = 'same',
dilation_rate = rate,
activation = tf.nn.sigmoid,
)
out = tf.multiply(conv_filter, conv_gate)
out = tf.layers.conv1d(
out,
block_size,
kernel_size = 1,
strides = 1,
padding = 'same',
activation = tf.nn.tanh,
)
return tf.add(x, out), out
forward = tf.layers.conv1d(
embedded, block_size, kernel_size = 1, strides = 1, padding = 'SAME'
)
zeros = tf.zeros_like(forward)
for i in range(num_blocks):
for r in [1, 2, 4, 8, 16]:
forward, s = residual_block(
forward, size = 7, rate = r, block = i
)
zeros = tf.add(zeros, s)
logits = tf.layers.conv1d(
zeros, len(idx2tag), kernel_size = 1, strides = 1, padding = 'SAME'
)
log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood(
logits, self.labels, self.lengths
)
self.cost = tf.reduce_mean(-log_likelihood)
self.optimizer = tf.train.AdamOptimizer(
learning_rate = learning_rate
).minimize(self.cost)
mask = tf.sequence_mask(self.lengths, maxlen = self.maxlen)
self.tags_seq, _ = tf.contrib.crf.crf_decode(
logits, transition_params, self.lengths
)
self.prediction = tf.boolean_mask(self.tags_seq, mask)
mask_label = tf.boolean_mask(self.labels, mask)
correct_pred = tf.equal(self.prediction, mask_label)
correct_index = tf.cast(correct_pred, tf.float32)
self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# +
tf.reset_default_graph()
sess = tf.InteractiveSession()
dim = 256
dropout = 1
learning_rate = 1e-3
batch_size = 32
model = Model(len(char2idx), dim, learning_rate, seq_len)
sess.run(tf.global_variables_initializer())
# +
import time
for e in range(2):
lasttime = time.time()
train_acc, train_loss, test_acc, test_loss = 0, 0, 0, 0
pbar = tqdm(
range(0, len(train_X), batch_size), desc = 'train minibatch loop'
)
for i in pbar:
batch_x = train_X[i : min(i + batch_size, train_X.shape[0])]
batch_y = train_Y[i : min(i + batch_size, train_X.shape[0])]
acc, cost, _ = sess.run(
[model.accuracy, model.cost, model.optimizer],
feed_dict = {
model.word_ids: batch_x,
model.labels: batch_y
},
)
assert not np.isnan(cost)
train_loss += cost
train_acc += acc
pbar.set_postfix(cost = cost, accuracy = acc)
pbar = tqdm(
range(0, len(test_X), batch_size), desc = 'test minibatch loop'
)
for i in pbar:
batch_x = test_X[i : min(i + batch_size, test_X.shape[0])]
batch_y = test_Y[i : min(i + batch_size, test_X.shape[0])]
acc, cost = sess.run(
[model.accuracy, model.cost],
feed_dict = {
model.word_ids: batch_x,
model.labels: batch_y
},
)
assert not np.isnan(cost)
test_loss += cost
test_acc += acc
pbar.set_postfix(cost = cost, accuracy = acc)
train_loss /= len(train_X) / batch_size
train_acc /= len(train_X) / batch_size
test_loss /= len(test_X) / batch_size
test_acc /= len(test_X) / batch_size
print('time taken:', time.time() - lasttime)
print(
'epoch: %d, training loss: %f, training acc: %f, valid loss: %f, valid acc: %f\n'
% (e, train_loss, train_acc, test_loss, test_acc)
)
# -
| entity-tagging/5.residual-char-ngrams.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# DataFrame的创建和访问
import pandas as pd
import numpy as np
from IPython.display import display
data = pd.DataFrame(np.arange(1,10).reshape(3,3),index=['r1','r2','r3'],columns=['c1','c2','c3'])
print(data)
display(data)
# -
# 内容打印 series类型
a = data['c1']
print(a)
print(type(a))
# 内容打印 DataFrame类型
b = data[['c1']]
print(b)
print(type(b))
# 访问多个key
c = data[['c1','c3']]
print(c)
print(type(c))
# 索引访问 :以行访问
d = data[:2]
display(d)
print(str(type(d))
# iloc:先行后列
e = data.iloc[1:3]
display(e)
# loc
f = data.loc[['r2','r3']]
display(f)
print(type(f))
# head() :浏览数据
display(data.head())
display(data.head(2))
# 行列同时筛选
a = data[['c1','c3']][0:2]
b = data.iloc[0:2][['c1','c3']]
c = data.loc[['r1','r2'],['c1','c3']]
e = data.iloc[0:2,[0,2]]
f = data.iloc[0:2,0:1]
display(a)
display(b)
display(c)
display(d)
f
# 数据运算
data['c4'] = data['c3'] - data['c1']
data.head()
# 条件过滤,通过判断条件筛选
a = data[data['c1'] > 1]
b = data['c1']>1
display(a)
display(b)
display(type(b))
# sort_values()排序函数,无by报错
# 以C2降序排序
a = data.sort_values(by='c2',ascending=False)
display(a)
# 升序
b = data.sort_values(by='c2')
display(b)
# 数据删除
display(data)
# 单列
a = data.drop(columns='c1')
display(a)
# 多列
b = data.drop(columns=['c1','c3'])
display(b)
# 多行
c = data.drop(index=['r1','r3'])
display(c)
# +
# 可视化视图
# -
# matlab工具 和 seaborn工具
import matplotlib.pyplot as plt
import seaborn as sns
# +
# 曲线和线 plot
x1 = np.array([1,2,3])
y1 = x1 + 1
plt.plot(x1,y1);
y2 = x1*2
plt.plot(x1,y2);
# -
# 散点图 scatter
x = np.random.rand(10)
y = np.random.rand(10)
plt.scatter(x,y);
# 直方图 hist
# 注意,实际上datafream中有这个方法
data = np.random.randn(10000)
# bins:颗粒度,即为直方图的长条形数目
# edgecolor:顾名思义,边框颜色
plt.hist(data,bins=40,edgecolor='black')
# 使用dataframe内置函数hist绘制直方图
df = pd.DataFrame(data)
df.hist(bins=40,edgecolor='black')
# plot绘图
df.plot()
df.plot(kind='hist',bins=100,edgecolor='blue',color='green')
# 中文标签
df3 = pd.DataFrame([[8000,6000],[7000,5000],[6500,4000]],
columns=['人均收入','人均支出'],
index=['北京','上海','广州'])
display(df3)
# +
import pandas as pd
import numpy as np
from IPython.display import display
import matplotlib.pyplot as plt
import seaborn as sns
# 中文标签绘图
# 显示中文标签
plt.rcParams['font.sans-serif'] = ['SimHei']
#解决符号‘-’显示为方块的问题
plt.rcParams['axes.unicode_minus'] = False
#绘图,line为折线,bar为柱状
df3['人均收入'].plot(kind='line')
df3['人均收入'].plot(kind='bar');
print(type(df3))
# -
# 为图形添加文字说明
x = [1,2,3]
y = [2,4,6]
plt.plot(x,y)
plt.title('test')
plt.xlabel('X')
plt.ylabel('Y')
# +
# 使用plt.legend()添加图例,使用不同的线型绘图
x1 = np.array([1,2,3])
y1 = x1+1
plt.plot(x1,y1,label='y=x+1')
y2 = x1*2
plt.plot(x1,y2,color='red',linestyle='--',label='y=x*2')
plt.title('this is test2')
# 添加图例,表示左上角
plt.legend(loc='upper left')
# -
# 绘图改图例位置
x1 = np.array([10,20,30])
y1 = x1
plt.plot(x1,y1,color='red',linestyle='--',label='y=x')
plt.legend(loc='upper left')
# 使用双坐标轴
plt.twinx()
y2=x1 ** 2
plt.plot(x1,y2,label='y=x^2')
plt.legend(loc='upper right')
# 多子图subplot ,参数位1表示行数,参数位2表示列数,参数位3表示编号
# 1.折线图
ax1 = plt.subplot(221)
plt.plot([1,2,3],[2,4,6])
# 2.柱状图
ax1 = plt.subplot(222)
plt.bar([1,2,3],[2,4,6])
# 3.散点图
ax1 = plt.subplot(223)
plt.scatter([1,2,3],[2,4,6])
# 4.直方图
ax1 = plt.subplot(224)
plt.hist([2,2,2,3,4])
# 多图的另外表现形式
# 同时,在绘制过程中也可以为每个图定制标签
fig,axes = plt.subplots(2,2,figsize=(10,8))
ax1,ax2,ax3,ax4 = axes.flatten()
ax1.plot([1,2,3],[2,4,6])
# ax1标签
ax1.set_title('the first title')
ax1.set_xlabel('sub1_X')
ax1.set_ylabel('sub1_Y')
ax2.bar([1,2,3],[2,4,6])
ax3.scatter([1,2,3],[2,4,6])
ax4.hist([2,2,2,3,4])
# +
# seaborn
# +
# 箱线图
# 准备类型为numpy,共4组,每组10个随机数的数据
data = np.random.normal(size=(10,4))
print(data)
print(type(data))
lables = ['A','B','C','D']
# 使用mat绘制
plt.boxplot(data,labels=lables)
plt.show()
# 使用sea绘制
df = pd.DataFrame(data,columns=lables)
sns.boxplot(data=df)
plt.show()
# -
# 饼图
nums = [25,33,37]
labels=['ADC','APC','TK']
plt.pie(x=nums,labels=labels)
# +
# 热力图:反应两个变量之间的关系
np.random.seed(33)
data = np.random.rand(3,3)
print(data)
print(type(data))
heatmap = sns.heatmap(data)
plt.show()
# +
# 获取示例数据集
flights = sns.load_dataset("flights")
sns.jointplot(x="year",y="passengers",data=flights,kind="scatter")
sns.jointplot(x="year",y="passengers",data=flights,kind="kde")
sns.jointplot(x="year",y="passengers",data=flights,kind="hex")
# -
| python/jupyternotebook/learning20208/.ipynb_checkpoints/pra825-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from ahh import pre
fi = pre.wget_fi('ftp://ftp.cdc.noaa.gov/Datasets/ncep.reanalysis.dailyavgs/surface/air.sig995.1948.nc') # most simple case
fi # returns file name
fi = pre.wget_fi('ftp://ftp.cdc.noaa.gov/Datasets/ncep.reanalysis.dailyavgs/surface/air.sig995.1948.nc',
out_dir='data') # output to another directory named data; if it doesn't exist, will automatically create
fi # returns file path if out_dir is not None
pre.wget_fi('ftp://ftp.cdc.noaa.gov/Datasets/ncep.reanalysis.dailyavgs/surface/air.sig995.1948.nc',
out_dir='data', user=None, pwd=None, quiet=True) # can input user/pwd and turn off output in terminal
pre.wget_fi('ftp://ftp.cdc.noaa.gov/Datasets/ncep.reanalysis.dailyavgs/surface/air.sig995.194*.nc') # can utilize wild card
fmt = 'ftp://ftp.cdc.noaa.gov/Datasets/ncep.reanalysis.dailyavgs/surface/air.sig995.{dt:%Y}.nc' # must use "dt" somewhere
start = '1948' # start at 1948
end = '1951' # finish at 1950
freq = '1A' # increase annually
fi_list = pre.gen_fi_list(fmt, start, end, freq=freq) # gain a bit more control
pre.wget_list(fi_list, out_dir='data') # note wget_list instead of wget_fi
fmt = 'ftp://ftp.cdc.noaa.gov/Datasets/ncep.reanalysis.dailyavgs/surface/air.sig995.{dt:%Y}.nc'
start = '1948' # start at 1948
end = '1951' # finish at 1950
freq = '1A' # increase annually
fi_list = pre.gen_fi_list(fmt, start, end, freq=freq)
pre.wget_list(fi_list, out_dir='data', nthreads=3) # download all three files simultaneously (useful for small files)
# note wget_list will user/pwd inputs will not work with nthreads > 1
| examples/pre/wget_fi_AND_gen_fi_list_AND_wget_list.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ##### Created by <NAME>
# ##### Last Updated 4/10/2020
# ### About:
# The overall objective of this project is to project revenue for several Countries, 1 Quarter into the future. Exponential Smoothing,ARIMA modeling, and Linear Regression will be utilized do so dynamically.
#
# This notebook will be used to explore the different models listed above on one specific country. Upon completion, a dynamic version of the final model will be created in a .py file, also located in this github folder.
#
# The data source is a currency file found on the Kaggle notebook below, with modifications made to format the data in a way that was useful for me to use in my career as a Data Scientist at IBM. These values in No Way represent true financial data at IBM.
#
# Referenced - Topic 9 Part 1. Time series analysis in Python. found on Kaggle
# https://www.kaggle.com/kashnitsky/topic-9-part-1-time-series-analysis-in-python/data
# +
# Import relevant Packages
import warnings # do not disturb
warnings.filterwarnings('ignore')
# used for accuracy metrics
from sklearn.metrics import r2_score, median_absolute_error, mean_absolute_error
from sklearn.metrics import median_absolute_error, mean_squared_error, mean_squared_log_error
import numpy as np # vectors and matrices
import pandas as pd # tables and data manipulation
import matplotlib.pyplot as plt # plots
import seaborn as sns # additional plots
from math import sqrt
from math import isnan
from dateutil.relativedelta import relativedelta # working with dates and style
from scipy.optimize import minimize # for function minimization
import statsmodels.formula.api as smf # statistics and econometrics
import statsmodels.tsa.api as smt
import statsmodels.api as sm
import scipy.stats as scs
from itertools import product # useful functions
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import StandardScaler
# %matplotlib inline
# -
# ### Store Data
# #### Lets look at Revenue from Germany
rev_df = pd.read_csv('currency.csv')
# select Germany and drop additional columns
rev_df = rev_df[rev_df['Country'] == "Germany"].drop(columns='Country') # select Germany and drop country column
rev_df.tail()
plt.figure(figsize=(25, 7))
plt.plot(rev_df.Revenue)
plt.title('Monthly Revenue - Germany')
plt.grid(True)
plt.show()
# ## Forecasting Quality Metrics
# ### These are common metrics used to measure quality of the forecasts we create.
# ### Most of the measurements can be created using the sklearn package
# ##### R squared:
# coefficient of determination (in econometrics, this can be interpreted as the percentage of variance explained by the model), (−∞,1]
sklearn.metrics.r2_score
# ##### Mean Absolute Error:
# this is an easy to interperet metric, it uses the same unit of measurement as the initial series, [0,+∞)
sklearn.metrics.mean_absolute_error
# ##### Mean Squared Error:
# very commonly used metric, gives a higher penalty to large errrors and vice versa
sklearn.metrics.median_absolute_error
# ##### Mean Squared Logarithmic Error:
# similar to MSE but more weight is given to small mistakes as well.
# Typically used when exponential trends exist. [0,+∞)
sklearn.metrics.mean_squared_log_error
# ##### Mean Absolute Percentage Error:
# This is MAE calculated as a percentage.
# This is easy for management to interperate, and ultimately used in the final .py file
# to determine which model to use.
def mean_absolute_percentage_error(y_true, y_pred):
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
# ### Moving Average
# Starting with the hypothesis that tomorrow will be the same as today(very unlikely), we begin to analyze by determining what it's moving average is. Moving average is the average of the previous n observations. We use this to predict 1 observation into the future, and iterate.
#
# As we continue to look into the future, our prediction quickly becomes unreliable as the averages become dependent upon the initial averages. We can use the moving average in smoothing the original time series data, which helps to identify trends.
# +
def moving_average(series, n):
"""
:define: Moving Average - assumption that the future value of our variable depends on the average of its k previous values
:param series: dataframe with timestamps
:param n: number of previous values to average
:return: average of last n observations, predicts one observation in the future
"""
return np.average(series[-n:])
moving_average(rev_df.Revenue, 6)
# -
# In Pandas, we have the DataFrame.rolling(window).mean() where the window represents the number of observations to include. the wider the window, the smoother the trend. Consider taking the average of every observation in a dataset. We would have a straight line, as the average plotted would not change. Identifying trends with the smoothing technique can help detect common patterns, especially in noisy data.
#
# Lets look at a plot of the Moving Average.
# +
def plotMovingAverage(series, window, plot_intervals=False, scale=1.96, plot_anomalies=False):
"""
:define: smoothe the original time series to identify trends. Helps detect common patterns for noisy data
:param series: dataframe with timeseries
:param window: rolling window size - The number of observations used for calculating the statistic
:param plot_intervals: show confidence intervals
:param scale:
:param plot_anomalies: show anomalies
:return: Plot the time series with the Moving Average trend, predicts one observation in the future
"""
rolling_mean = series.rolling(
window=window).mean() # smoothes the original series to identify trends. Same as moving_average function defined
plt.figure(figsize=(15, 5))
plt.title("Moving average/n window size = {}".format(window))
plt.plot(rolling_mean, "g", label="Rolling mean trend")
# Plot confidence intervals for smoothed values
if plot_intervals:
mae = mean_absolute_error(series[window:], rolling_mean[window:])
deviation = np.std(series[window:] - rolling_mean[window:])
lower_bond = rolling_mean - (mae + scale * deviation)
upper_bond = rolling_mean + (mae + scale * deviation)
plt.plot(upper_bond, "r--", label="Upper Bond / Lower Bond")
plt.plot(lower_bond, "r--")
# Having the intervals, find abnormal values
if plot_anomalies:
anomalies = pd.DataFrame(index=series.index, columns=series.values)
anomalies[series < lower_bond] = series[series < lower_bond]
anomalies[series > upper_bond] = series[series > upper_bond]
plt.plot(anomalies, "ro", markersize=10)
plt.plot(series[window:], label="Actual values")
plt.legend(loc="upper left")
plt.grid(True)
plotMovingAverage(rev_df.Revenue, 20) # Notice the flat line when we approach the number of observations
# -
plotMovingAverage(rev_df.Revenue, 3) # Smooth by the previous 3 months(Quarter)
# Analyzing the rolling mean trend with a quarterly window. Note that we show no anomalies within our data, but there does seem to be a quarterly seasonal trend. If the seasonal trend is stronger for other countries, we risk marking peaks in those trends as anomalies.
plotMovingAverage(rev_df.Revenue, 3, plot_intervals=True, plot_anomalies=True)
# ### Weighted Average
# Weighted average is a modification to the moving average. The weights sum up to 1 with larger weights assigned to more recent observations. It adds importance of the observations that are closer within the window n, n being the number of weights listed in the function below.
#
# +
def weighted_average(series, weights):
"""
:define: Weighted average is a modification to the moving average. The weights sum up to 1, so that larger weights are assigned to more last recent observations
:param series: dataframe with time series
:param weights: list of weighted buckets that add up to 1. ex: [0.6, 0.3, 0.1]
:return: return the weighted_average of a time series, predicts one observation in the future
"""
result = 0.0
weights.reverse()
for n in range(len(weights)):
result += series.iloc[-n - 1] * weights[n]
return float(result)
weighted_average(rev_df.Revenue, [0.9, 0.6, 0.3, 0.1])
# -
# ### Exponential Smoothing
# Exponential smoothing is the process of weighting all available observations while exponentially decreasing the weights as we move further back in time.
# The model value is a weighted average between the current true value and the previous model values.
#
# *
#
# alpha = Weight, or smoothing factor, and defines how quickly we will "forget" the last available true observation.
#
# *
#
# The smaller α is, the more influence the previous observations have and the smoother the series is. Exponentiality is hidden in the recursiveness of the function -- we multiply by (1−α) each time, which already contains a multiplication by (1−α) of previous model values.
# +
def exponential_smoothing(series, alpha):
"""
:define: Exponential smoothing weights all of the observations, while exponentially
decreasing the weights as we move further back in time. Exponentiality is
hidden in the resuriveness of the function:
y-hat = a * y-not + (1-a) * (previous y-not)
:param series: pandas series with time stamps
:param alpha: float [0.0, 1.0], smoothing parameter.
The smaller alpha is, the more influence the previous observations have,
and the smoother the series is
:return: exponentially smoothed dataframe, predicts one observation in the future
"""
result = [series[0]] # first value is same as series
for n in range(1, len(series)):
result.append(alpha * series[n] + (1 - alpha) * result[n - 1])
return result
# exponential_smoothing(rev_df.Revenue,.3)
# +
def plotExponentialSmoothing(series, alphas):
"""
:param series: dataset with timestamps
:param alphas: list of floats, smoothing parameters. The smaller alpha is, the more influence
the previous observations have, and the smoother the series is
:return: plot of exponentially smoothed dataframe, predicts one observation in the future
"""
with plt.style.context('seaborn-white'):
plt.figure(figsize=(17, 7))
for alpha in alphas:
plt.plot(exponential_smoothing(series, alpha), label="Alpha {}".format(alpha))
plt.plot(series.values, "c", label="Actual")
plt.legend(loc="best")
plt.axis('tight')
plt.title("Exponential Smoothing")
plt.grid(True);
plotExponentialSmoothing(rev_df.Revenue, [0.1, 0.5, 0.8])
# -
# ### Double Exponential Smoothing
# The moving_average, weighted_average, and Exponential Smoothing functions defined above only produce a prediction for one single future point. The prior functions have learned to predict intercept, or expected series value. We can add Series decomposition to improve our scope, by adding the slope component, or trend, b.
#
# By applyting the same exponential smoothing to the trend, we can use the previous weighted changes of the slope to produce a slope forecast. As a result, we get the following set of functions:
#
# *
#
# ℓx=αyx+(1−α)(ℓx−1+bx−1)
#
# bx=β(ℓx−ℓx−1)+(1−β)bx−1
#
# ŷ x+1=ℓx+bx
#
# *
#
# The first formula describes the intercept, which depends on the current value of the series. The second term is now split into previous values of the level and of the slope(trend). The second function describes the slope(trend), which depends on the level changes at the current step and on the previous value of the trend. In this case, the β, or Beta,
# coefficient is a weight for exponential smoothing. The final prediction is the sum of the model values of the intercept and trend.
# +
def double_exponential_smoothing(series, alpha, beta):
"""
:define: In the exponential_smoothing method we predict the intercept(level). Now we will apply the same smoothing to the trend by assuming that the future direction of the
series changes depends on the previous weighted changes
:define2: The larger alpha and beta, the more weight the most recent observations will have and the less smoothed the model series will be
:param series: dataset with timestamps
:param alpha: float [0.0, 1.0], smoothing parameter for level. Responsible for the series smoothing around the trend
:param beta: float [0.0, 1.0], smoothing parameter for trend. A weight for the exponential smoothing. Responsible for smoothing the trend itself
:return: sum of the model values of the intercept and trend, a prediction 2 observations in the future
"""
result = [series[0]]
for n in range(1, len(series) + 1):
if n == 1:
level, trend = series[0], series[1] - series[0]
if n >= len(series): # forecasting
value = result[-1]
else:
value = series[n]
last_level, level = level, alpha * value + (1 - alpha) * (level + trend)
trend = beta * (level - last_level) + (1 - beta) * trend
result.append(level + trend)
return result
# -
| TimeSeries_Finance/TimeSeries_Exploration.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Teoría y Landscape de Visualización
# + [markdown] slideshow={"slide_type": "slide"}
# Los objetivos de esta clase son:
#
# * Comprender la importancia de las visualizaciones.
# * Conocer las librerías de visualización en Python.
# -
# Aprender sobre visualizaciones es importante ya que:
#
# * Un resultado no sirve si no puede comunicarse correctamente.
# * Una buena visualización dista de ser una tarea trivial.
# * Un ingenierio necesita producir excelentes gráficos (pero nadie enseña cómo).
# + [markdown] slideshow={"slide_type": "slide"}
# ## ¿Exageración?
# -
# Lamentablemente no es una exageración, en ocasiones se cometen errores que lo único que es confundir o inclusive podrían ser errores intencionales para influir en la percepción del observador.
# + [markdown] slideshow={"slide_type": "slide"}
# 
# + [markdown] slideshow={"slide_type": "slide"}
# 
# + [markdown] slideshow={"slide_type": "slide"}
# 
# + [markdown] slideshow={"slide_type": "slide"}
# 
# + [markdown] slideshow={"slide_type": "slide"}
# 
# + [markdown] slideshow={"slide_type": "slide"}
# ## Primeras visualizaciones
# + [markdown] slideshow={"slide_type": "slide"}
# * Campaña de Napoleón a Moscú (<NAME>, 1889)
#
# 
# * Mapa del cólera (<NAME>, 1855)
#
# 
# + [markdown] slideshow={"slide_type": "slide"}
# ## ¿Por qué utilizamos gráficos para representar datos?
#
# * El 70 % de los receptores sensoriales del cuerpo humano está dedicado a la visión.
# * Cerebro ha sido entrenado evolutivamente para interpretar la información visual de manera masiva.
#
# _“The eye and the visual cortex of the brain form a massively
# parallel processor that provides the highest bandwidth channel
# into human cognitive centers”
# — <NAME>, Information Visualization, 2004._
# + [markdown] slideshow={"slide_type": "slide"}
# ## Ejemplo: Cuarteto de ANSCOMBE
#
# Considere los siguientes 4 conjuntos de datos.
#
# ¿Qué puede decir de los datos?
# +
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
# %matplotlib inline
# -
df = pd.read_csv(os.path.join("..", "data","anscombe.csv"))
df
df.describe()
# ¿Por qué es un ejemplo clásico?
for i in range(1, 4 + 1):
x = df.loc[:, f"x{i}"].values
y = df.loc[:, f"y{i}"].values
slope, intercept = np.polyfit(x, y, 1)
print(f"Grupo {i}:\n\tTiene pendiente {slope:.2f} e intercepto {intercept:.2f}.\n")
# +
groups = range(1, 4 + 1)
x_columns = [col for col in df if "x" in col]
x_aux = np.arange(
df.loc[:, x_columns].values.min() - 1,
df.loc[:, x_columns].values.max() + 2
)
fig, axs = plt.subplots(nrows=2, ncols=2, figsize=(16, 8), sharex=True, sharey=True)
fig.suptitle("Cuarteto de Anscombe")
for i, ax in zip(groups, axs.ravel()):
x = df.loc[:, f"x{i}"].values
y = df.loc[:, f"y{i}"].values
m, b = np.polyfit(x, y, 1)
ax.plot(x, y, 'o')
ax.plot(x_aux, m * x_aux + b, 'r', lw=2.0)
ax.set_title(f"Grupo {i}")
# -
# ## Teoría
# + [markdown] slideshow={"slide_type": "slide"}
# ### Sistema visual humano
#
# * Buenas noticias
# - Gráficos entregan información que la estadística podría no revelar.
# - Despliegue visual es esencial para comprensión.
# * Malas noticias
# - La atención es selectiva y puede ser fácilmente engañada.
# + [markdown] slideshow={"slide_type": "slide"}
# Ejemplos de que la atención es selectiva y que puede ser fácilmente engañada.
#
# 
# + [markdown] slideshow={"slide_type": "slide"}
# 
# + [markdown] slideshow={"slide_type": "slide"}
# 
# + [markdown] slideshow={"slide_type": "slide"}
# 
# + [markdown] slideshow={"slide_type": "slide"}
# ### Consejos generales
#
# <NAME>, en su charla "Cuatro pilatres de la visualización" ([es](https://www.youtube.com/watch?v=nC92wIzpQFE), [en](https://www.youtube.com/watch?v=3eZ15VplE3o)), presenta buenos consejos sobre cómo realizar una correcta visualización:
# * Propósito
# * Información/Contenido
# * Codificación/Estructura
# * Formato
#
# Es altamente aconsejable ver el video, pero en resumen:
#
# * **Propósito** o público tiene que ver con para quién se está preparando la viz y que utilidad se le dará. Es muy diferente preparar un gráfico orientado a información y toma de decisiones.
# * **Información/Contenido** se refiere a contar con la información que se desea mostrar, en el formato necesario para su procesamiento.
# * **Codificación/Estructura** tiene que ver con la selección correcta de la codificación y estructura de la información.
# * **Formato** tiene que ver con la elección de fuentes, colores, tamaños relativos, etc.
#
# Lo anterior indica que una visualización no es el resultado de unos datos. Una visualización se diseña, se piensa, y luego se buscan fuentes de información apropiadas.
# + [markdown] slideshow={"slide_type": "slide"}
# ### Elementos para la creación de una buena visualización
#
# 1. ***Honestidad***: representaciones visuales no deben engañar al observador.
# 2. ***Priorización***: dato más importante debe utilizar elemento de mejor percepción.
# 3. ***Expresividad***: datos deben utilizar elementos con atribuciones adecuadas.
# 4. ***Consistencia***: codificación visual debe permitir reproducir datos.
#
# El principio básico a respetar es que a partir del gráfico uno debe poder reobtener fácilmente los datos originales.
# + [markdown] slideshow={"slide_type": "slide"}
# #### Honestidad
# El ojo humano no tiene la misma precisión al estimar distintas atribuciones:
# * **Largo**: Bien estimado y sin sesgo, con un factor multiplicativo de 0.9 a 1.1.
# * **Área**: Subestimado y con sesgo, con un factor multiplicativo de 0.6 a 0.9.
# * **Volumen**: Muy subestimado y con sesgo, con un factor multiplicativo de 0.5 a 0.8.
# + [markdown] slideshow={"slide_type": "slide"}
# Resulta inadecuado realizar gráficos de datos utilizando áreas o volúmenes buscando inducir a errores.
#
# 
# + [markdown] slideshow={"slide_type": "slide"}
# Resulta inadecuado realizar gráficos de datos utilizando áreas o volúmenes si no queda claro la atribución utilizada.
#
# 
# + [markdown] slideshow={"slide_type": "slide"}
# Una pseudo-excepción la constituyen los _pie-chart_ o gráficos circulares,
# porque el ojo humano distingue bien ángulos y segmentos de círculo,
# y porque es posible indicar los porcentajes respectivos.
# +
## Example from https://matplotlib.org/3.1.1/gallery/pie_and_polar_charts/pie_features.html#sphx-glr-gallery-pie-and-polar-charts-pie-features-py
# Pie chart, where the slices will be ordered and plotted counter-clockwise:
labels = 'Frogs', 'Hogs', 'Dogs', 'Logs'
sizes = [15, 30, 45, 10]
explode = (0, 0.1, 0, 0) # only "explode" the 2nd slice (i.e. 'Hogs')
fig1, ax1 = plt.subplots(figsize=(8, 8))
ax1.pie(
sizes,
explode=explode,
labels=labels,
autopct='%1.1f%%',
shadow=True,
startangle=90
)
ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
plt.show()
# + [markdown] slideshow={"slide_type": "slide"}
# #### Priorización
# Dato más importante debe utilizar elemento de mejor percepción.
# +
np.random.seed(42)
N = 31
x = np.arange(N)
y1 = 80 + 20 *x / N + 5 * np.random.rand(N)
y2 = 75 + 25 *x / N + 5 * np.random.rand(N)
fig, axs = plt.subplots(2, 2, sharex=True, sharey=True, figsize=(16,8))
axs[0][0].plot(x, y1, 'ok')
axs[0][0].plot(x, y2, 'sk')
axs[0][1].plot(x, y1, 'ob')
axs[0][1].plot(x, y2, 'or')
axs[1][0].plot(x, y1, 'ob')
axs[1][0].plot(x, y2, '*k')
axs[1][1].plot(x, y1, 'sr')
axs[1][1].plot(x, y2, 'ob')
plt.show()
# + [markdown] slideshow={"slide_type": "slide"}
# No todos los elementos tienen la misma percepción a nivel del sistema visual.
#
# En particular, el color y la forma son elementos preatentivos: un color distinto o una forma distinta se reconocen de manera no conciente.
#
# Ejemplos de elementos pre-atentivos.
# + [markdown] slideshow={"slide_type": "fragment"}
# 
# + [markdown] slideshow={"slide_type": "fragment"}
# 
# + [markdown] slideshow={"slide_type": "slide"}
# ¿En que orden creen que el sistema visual humano puede estimar los siguientes atributos visuales:
# * Color
# * Pendiente
# * Largo
# * Ángulo
# * Posición
# * Área
# * Volumen
# + [markdown] slideshow={"slide_type": "slide"}
# El sistema visual humano puede estimar con precisión siguientes atributos visuales:
# 1. Posición
# 2. Largo
# 3. Pendiente
# 4. Ángulo
# 5. Área
# 6. Volumen
# 7. Color
#
# Utilice el atributo que se estima con mayor precisión cuando sea posible.
# + [markdown] slideshow={"slide_type": "slide"}
# Puesto que la percepción del color tiene muy baja precisión, resulta ***inadecuado*** tratar de representar un valor numérico con colores.
# * ¿Qué diferencia numérica existe entre el verde y el rojo?
# * ¿Que asociación preexistente posee el color rojo, el amarillo y el verde?
# * ¿Con cuánta precisión podemos distinguir valores en una escala de grises?
#
# 
# + [markdown] slideshow={"slide_type": "slide"}
# Algunos ejemplos de colormaps
# -
import matplotlib.cm as cm
from scipy.stats import multivariate_normal
x, y = np.mgrid[-3:3:.025, -2:2:.025]
pos = np.empty(x.shape + (2,))
pos[:, :, 0] = x
pos[:, :, 1] = y
z1 = multivariate_normal.pdf(
pos,
mean=[-1.0, -1.0],
cov=[[1.0, 0.0], [0.0, 0.1]]
)
z2 = multivariate_normal.pdf(
pos,
mean=[1.0, 1.0],
cov=[[1.5, 0.0], [0.0, 0.5]]
)
z = 10 * (z1 - z2)
# +
fig, axs = plt.subplots(2, 2, figsize=(16, 8), sharex=True, sharey=True)
cmaps = [cm.rainbow, cm.autumn, cm.coolwarm, cm.gray]
for i, ax in zip(range(len(cmaps)), axs.ravel()):
im = ax.imshow(z, interpolation='bilinear', origin='lower',cmap=cmaps[i], extent=(-3, 3, -2, 2))
fig.colorbar(im, ax=ax)
fig.show()
# + [markdown] slideshow={"slide_type": "slide"}
# Consejo: evite mientras pueda los colormaps. Por ejemplo, utilizando contour plots.
# +
fig, axs = plt.subplots(2, 2, figsize=(20, 12), sharex=True, sharey=True)
cmaps = [cm.rainbow, cm.autumn, cm.coolwarm, cm.gray]
countour_styles = [
{"cmap": cm.rainbow},
{"cmap": cm.rainbow},
{"colors": "k", "linestyles": "solid"},
{"colors": "k", "linestyles": "dashed"},
]
for i, ax in zip(range(len(cmaps)), axs.ravel()):
cs = ax.contour(x, y, z, 11, **countour_styles[i])
if i > 0:
ax.clabel(cs, fontsize=9, inline=1)
if i == 3:
ax.grid(alpha=0.5)
fig.show()
# + [markdown] slideshow={"slide_type": "slide"}
# #### Sobre la Expresividad
# Mostrar los datos y sólo los datos.
#
# Los datos deben utilizar elementos con atribuciones adecuadas: _Not all data is born equal_.
#
# Clasificación de datos:
# * ***Datos Cuantitativos***: Cuantificación absoluta.
# * Cantidad de azúcar en fruta: 50 [gr/kg]
# * Operaciones =, $\neq$, <, >, +, −, * , /
# * ***Datos Posicionales***: Cuantificación relativa.
# * Fecha de cosecha: 1 Agosto 2014, 2 Agosto 2014.
# * Operaciones =, $\neq$, <, >, +, −
# * ***Datos Ordinales***: Orden sin cuantificación.
# * Calidad de la Fruta: baja, media, alta, exportación.
# * Operaciones =, $\neq$, <, >
# * ***Datos Nominales***: Nombres o clasificaciones
# * Frutas: manzana, pera, kiwi, ...
# * Operaciones $=$, $\neq$
# + [markdown] slideshow={"slide_type": "slide"}
# Ejemplo: Terremotos. ¿Que tipos de datos tenemos?
# * Ciudad más próxima
# * Año
# * Magnitud en escala Richter
# * Magnitud en escala Mercalli
# * Latitud
# * Longitud
# + [markdown] slideshow={"slide_type": "slide"}
# Contraejemplo: Compañías de computadores.
#
# | Companía | Procedencia |
# |----------|-------------|
# | MSI | Taiwan |
# | Asus | Taiwan |
# | Acer | Taiwan |
# | HP | EEUU |
# | Dell | EEUU |
# | Apple | EEUU |
# | Sony | Japon |
# | Toshiba | Japon |
# | Lenovo | Hong Kong |
# | Samsung | Corea del Sur |
#
# -
brands = {
"MSI": "Taiwan",
"Asus": "Taiwan",
"Acer": "Taiwan",
"HP": "EEUU",
"Dell": "EEUU",
"Apple": "EEUU",
"Sony": "Japon",
"Toshiba": "Japon",
"Lenovo": "Hong Kong",
"Samsung": "Corea del Sur"
}
C2N = {"Taiwan": 1, "EEUU": 2, "Japon": 3, "Hong Kong": 4, "Corea del Sur": 7}
x = np.arange(len(brands.keys()))
y = np.array([C2N[val] for val in brands.values()])
width = 0.35 # the width of the bars
fig, ax = plt.subplots(figsize=(16, 8))
rects1 = ax.bar(x, y, width, color='r')
# add some text for labels, title and axes ticks
ax.set_xticks(x + 0.5*width)
ax.set_xticklabels(brands.keys(), rotation="90")
ax.set_yticks(list(C2N.values()))
ax.set_yticklabels(C2N.keys())
plt.xlim([-1,len(x)+1])
plt.ylim([-1,y.max()+1])
plt.show()
# + [markdown] slideshow={"slide_type": "slide"}
# Clasificación de datos:
# * ***Datos Cuantitativos***: Cuantificación absoluta.
# * Cantidad de azúcar en fruta: 50 [gr/kg]
# * Operaciones =, $\neq$, <, >, +, −, * , /
# * **Utilizar posición, largo, pendiente o ángulo**
# * ***Datos Posicionales***: Cuantificación relativa.
# * Fecha de cosecha: 1 Agosto 2014, 2 Agosto 2014.
# * Operaciones =, $\neq$, <, >, +, −
# * **Utilizar posición, largo, pendiente o ángulo**
# * ***Datos Ordinales***: Orden sin cuantificación.
# * Calidad de la Fruta: baja, media, alta, exportación.
# * Operaciones =, $\neq$, <, >
# * **Utilizar marcadores diferenciados en forma o tamaño, o mapa de colores apropiado**
# * ***Datos Nominales***: Nombres o clasificaciones
# * Frutas: manzana, pera, kiwi, ...
# * Operaciones $=$, $\neq$
# * **Utilizar forma o color**
#
# + [markdown] slideshow={"slide_type": "slide"}
# #### Consistencia
#
# La codificación visual debe permitir reproducir datos. Para ello debemos:
# * Graficar datos que sean comparables.
# * Utilizar ejes escalados adecuadamente.
# * Utilizar la misma codificación visual entre gráficos similares.
#
# Utilizar ejes escalados adecuadamente.
# +
x = list(range(1, 13))
y = 80 + 20 * np.random.rand(12)
x_ticks = list("EFMAMJJASOND")
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(20, 8))
ax1.plot(x, y, 'o-')
ax1.set_xticks(x)
ax1.set_xticklabels(x_ticks)
ax1.grid(alpha=0.5)
ax2.plot(x, y,'o-')
ax2.set_xticks(x)
ax2.set_xticklabels(x_ticks)
ax2.set_ylim([0, 110])
ax2.grid(alpha=0.5)
fig.show()
# + [markdown] slideshow={"slide_type": "slide"}
# Utilizar la misma codificación visual entre gráficos similares
# +
x = np.linspace(0, 1, 50)
f1 = x ** 2 + .2 * np.random.rand(50)
g1 = x + .2 * np.random.rand(50)
f2 = 0.5 - 0.2 * x + .2 * np.random.rand(50)
g2 = x ** 3 + .2 * np.random.rand(50)
fig, (ax1, ax2) = plt.subplots(nrows=2, figsize=(20, 12), sharex=True)
ax1.set_title("Antes de MAT281")
ax1.plot(x, f1, 'b', label='Chile', lw=2.0)
ax1.plot(x, g1, 'g:', label='OECD', lw=2.0)
ax1.legend(loc="upper left")
ax2.set_title("Despues de MAT281")
ax2.plot(x, f2, 'g:', label='Chile', lw=2.0)
ax2.plot(x, g2, 'b', label='OECD', lw=2.0)
ax2.legend()
fig.show()
# -
# ## Python Viz Landscape
#
# Para empezar, [PyViz](https://pyviz.org/) es un sitio web que se dedica a ayudar a los usuarios a decidir dentro de las mejores herramientas de visualización open-source implementadas en Python, dependiendo de sus necesidades y objetivos. Mucho de lo que se menciona en esta sección está en detalle en la página web del proyecto PyViz.
#
# Algunas de las librerías de visualización de Python más conocidas son:
#
# 
#
# Este esquema es una adaptación de uno presentado en la charla [_The Python Visualization Landscape_](https://us.pycon.org/2017/schedule/presentation/616/) realizada por [<NAME>](http://vanderplas.com/) en la PyCon 2017.
#
# Cada una de estas librerías fue creada para satisfacer diferentes necesidades, algunas han ganado más adeptos que otras por uno u otro motivo. Tal como avanza la tecnología, estas librerías se actualizan o se crean nuevas, la importancia no recae en ser un experto en una, si no en saber adaptarse a las situaciones, tomar la mejor decicisión y escoger según nuestras necesidades y preferencias. Por ejemplo, `matplotlib` nació como una solución para imitar los gráficos de `MATLAB` (puedes ver la historia completa [aquí](https://matplotlib.org/users/history.html)), manteniendo una sintaxis similar y con ello poder crear gráficos __estáticos__ de muy buen nivel.
#
# Debido al éxito de `matplotlib` en la comunidad, nacen librerías basadas ella. Algunos ejemplos son:
#
# - `seaborn` se basa en `matpĺotlib` pero su nicho corresponde a las visualizaciones estadísticas.
# - `ggpy` una suerte de copia a `ggplot2` perteneciente al lenguaje de programación `R`.
# - `networkx` visualizaciones de grafos.
# - `pandas` no es una librería de visualización propiamente tal, pero utiliza a `matplotplib` como _bakcned_ en los métodos con tal de crear gráficos de manera muy rápida, e.g. `pandas.DataFrame.plot.bar()`
#
# Por otro lado, con tal de crear visualizaciones __interactivas__ aparecen librerías basadas en `javascript`, algunas de las más conocidas en Python son:
#
# - `bokeh` tiene como objetivo proporcionar gráficos versátiles, elegantes e incluso interactivos, teniendo una gran performance con grandes datasets o incluso streaming de datos.
# - `plotly` visualizaciones interactivas que en conjunto a `Dash` (de la misma empresa) permite crear aplicaciones webs, similar a `shiny` de `R`.
#
# `D3.js` a pesar de estar basado en `javascript` se ha ganado un lugar en el corazón de toda la comunidad, debido a la ilimitada cantidad de visualizaciones que son posibles de hacer, por ejemplo, la [malla interactiva](https://mallas.labcomp.cl/) que hizo un estudiante de la UTFSM está hecha en `D3.js`.
#
# De las librerías más recientes está `Altair`, que consiste en visualizaciones declarativas (ya lo veremos en el próximo laboratorio). Construída sobre `Vega-Lite`, a su vez que esté está sobre `Vega` y este finalmente sobre `D3.js`. `Altair` permite crear visualizaciones estáticas e interactivas con pocas líneas de código, sin embargo, al ser relativamente nueva, aún existen funcionalidades en desarrollo o que simplemente aún no existen en esta librería pero en otras si.
#
# __Clasificación__
#
# En lo concierne a nosotros, una de las principales clasificaciones para estas librerías es si crean visualizaciones __estática__ y/o __interactivas__. La interactividad es un plus que permite adentrarse en los datos en distintos niveles, si agregamos que ciertas librerías permiten crear _widgets_ (algo así como complementos a las visualizaciones) su potencial aumenta. Por ejemplo, un widget podría ser un filtro que permita escoger un país; en una librería estática tendrías que crear un gráfico por cada país (o combinación de países) lo cual no se hace escalable y cómodo para trabajar.
#
# __Spoilers__
#
# Las próximas clases se centrarán en `matplotlib` y `Altair`, dado que son buenos exponentes de visualización imperativa y declarativa, respectivamente.
#
# Finalmente, siempre hay que tener en consideración la manera en que se compartirán las visualizaciones, por ejemplo, si es para un artículo científico bastaría que fuese de buena calidad y estático. Si es para una plataforma web es necesario que sea interactivo, aquí es donde entran en juego los dashboards, que permiten la exploración de datos de manera interactiva. En Python existen librerías como `Dash` o `Panel`, sin embargo, en el mundo empresarial se suele utilizar software dedicado a esto, como `Power BI` o `Tableau`.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Resumen
# Elementos para la creación de una buena visualización
# * ***Honestidad***: representaciones visuales no deben engañar al observador.
# * ***Priorización***: dato más importante debe utilizar elemento de mejor percepción.
# * ***Expresividad***: datos deben utilizar elementos con atribuciones adecuadas.
# * ***Consistencia***: codificación visual debe permitir reproducir datos.
#
# El principio básico a respetar es que a partir del gráfico uno debe poder re-obtener fácilmente los datos originales.
| lessons/M3L01_theory_and_landscape.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:analysis3-20.01]
# language: python
# name: conda-env-analysis3-20.01-py
# ---
# # How To Use The COSIMA Cookbook
#
# This notebook is designed to help new users get to grips with the COSIMA Cookbook. It assumes that:
# * You have access to the COSIMA cookbook.
# * We recommend using the latest version of the cookbook available through the `conda/analysis3-unstable` module on NCI.
# * You can fire up a Jupyter notebook!
# **Before starting,** load in some standard libraries that you are likely to need:
# +
# %matplotlib inline
import matplotlib.pyplot as plt
import xarray as xr
import numpy as np
import pandas as pd
import IPython.display
import cmocean as cm
import cartopy.crs as ccrs
# -
# In addition, you **always** need to load the cosima_cookbook module. This provides a bunch of functions that you will use:
import cosima_cookbook as cc
# ## 1. The Cookbook Philosophy
# The COSIMA Cookbook is a framework for analysing ocean-sea ice model output.
# It is designed to:
# * Provide examples of commonly used diagnostics;
# * Write efficient, well-documented, openly accessible code;
# * Encourage community input to the code;
# * Ensure diagnostic results are reproducible;
# * Process diagnostics directly from the model output, minimising creation of intermediate files;
# * Find methods to deal with the memory limitations of analysing high-resolution model output.
#
#
# ### 1.1 A database of experiments
# The COSIMA Cookbook relies on a database of experiments in order to load model output. This database effectively holds metadata for each experiment, as well as variable names, data ranges and so on.
#
# There are two different ways for you to access the database:
# 1. You can use the default database, which is periodically refreshed automatically. This database sits in `/g/data3/hh5/tmp/cosima/database/access-om2.db` and should be readable for all users. It includes all experiments stored in the COSIMA data directory under project `hh5` on NCI. The examples in this tutorial use this database.
# 2. Otherwise, you can make your own database, which is stored in your own path and includes only the experiments you are interested in. Please refer to the `Make_Your_Own_Database` tutorial for instructions on how to create this database.
#
# To access the default database, you need to start a database session each time you fire up a notebook:
session = cc.database.create_session()
# ### 1.2 Inbuilt Database Functions
#
# We have constructed a few functions to help you operate the cookbook and to access the datasets. These functions all sit in the `cosima_cookbook` directory. For example, `netcdf_index.py` contains the above `build_index` function as well as a series of functions that are built to query the SQL database.
# `get_experiments` lists all of the experiments that are catalogued in the database.
cc.querying.get_experiments(session)
# Internally, an experiment is a set of netCDF4 files as shown in the above table.
# `get_ncfiles` provides a list of all the netcdf filenames saved for a given experiment along with the time stamp for when that file was added to the cookbook database. Note that each of these filenames are present in some or all of the output directories -- **but the cookbook philosophy is that you don't need to know about the directories in which these files are stored**. To see the relevant files:
cc.querying.get_ncfiles(session, '025deg_jra55v13_ryf8485_gmredi6')
# More usefully, `get_variables` provides a list of all the variables available in a specific experiment.
cc.querying.get_variables(session, experiment='025deg_jra55v13_ryf8485_gmredi6', frequency='1 monthly')
# Omitting the `frequency` would give variables at all temporal frequencies. To determine what frequencies are in a given experient, we can use `get_frequencies`. Leaving off the `experiment` gives all possible frequencies.
cc.querying.get_frequencies(session, experiment='025deg_jra55v13_ryf8485_gmredi6')
# ### 1.3 Loading data from a netcdf file
#
# Python has many ways of reading in data from a netcdf file ... so we thought we would add another way. This is achieved in the `querying.getvar()` function, which is the most commonly used function in the cookbook. This function queries the database to find a specific variable, and loads some or all of that file. We will now take a little while to get to know this function. In it's simplest form, you need just three arguments: expt, variable and database.
#
# You can see all the available options using the inbuilt help function, which brings up the function documentation.
help(cc.querying.getvar)
# You may like to note a few things about this function:
# 1. The data is returned as an xarray DataArray, which includes the coordinate and attribute information from the netcdf file (more on xarray later).
# 2. The variable time does not start at zero - and if you don't like it you can introduce an offset to alter the time axis.
# 3. By default, we load the whole dataset, but we can load a subset of the times (see below).
# 4. Other customisable options include setting the variable chunking and incorporating a function to operate on the data.
expt = '025deg_jra55v13_ryf8485_gmredi6'
variable = 'temp_global_ave'
darray = cc.querying.getvar(expt,variable,session)
darray
# You can see that this operation loads the globally averaged potential temperature from the model output. The time axis runs from 1900 to 2198. For some variables (particularly 3D variables that might use a lot of memory) you may prefer to restrict yourself to a smaller time window:
darray = cc.querying.getvar(expt,variable,session,
start_time='2000-01-01',
end_time='2050-12-31')
darray
# You will see that the time boundaries are not exact here. `cc.querying.getvar` loads all files that include any dates within the specified range. You can use `.sel()` to refine this selection if required (see below).
# ### 1.4 Exercises
# OK, this is a tutorial, so now you have to do some work. Your tasks are to:
# * Find and load SSH from an experiment (an experiment ... perhaps a 1° configuration would be best).
# * Just load the last 10 files from an experiment (any variable you like).
# * Load potential temperature from an experiment (again, 1° would be quickest). Can you chunk the data differently from the default?
# ## 2. How to manipulate and plot variables with xarray
# We use the python package `xarray` (which is built on `dask`, `pandas`, `matplotlib` and `numpy`) for many of our diagnostics. `xarray` has a a lot of nice features, some of which we will try to demonstrate for you.
#
# ### 2.1 Plotting
# `xarray`'s `.plot()` method does its best to figure out what you are trying to plot, and plotting it for you. Let's start by loading a 1-dimensional variable and plotting.
expt = '025deg_jra55v13_ryf8485_gmredi6'
variable = 'temp_global_ave'
darray = cc.querying.getvar(expt,variable,session)
darray.plot()
darray
# You should see that `xarray` has figured out that this data is a timeseries, that the x-axis is representing time and that the y-axis is `temp_global_ave`. You can always modify aspects of your plot if you are unhappy with the default xarray behaviour:
darray.plot()
plt.xlabel('Year')
plt.ylabel('Temperature (°C)')
plt.title('Globally Averaged Temperature')
# Because `xarray` knows about dimensions, it has plotting routines which can figure out what it should plot. By way of example, let's load a single time slice of `surface_temp` and see how `.plot()` handles it:
expt = '025deg_jra55v13_ryf8485_gmredi6'
variable = 'surface_temp'
darray = cc.querying.getvar(expt,variable,session,n=-1)
darray.mean('time').plot()
# Again, you can customise this plot as you see fit:
darray = darray - 273.15 # convert from Kelvin to Celsius
darray.mean('time').plot.contourf(levels=np.arange(-2,32,2),cmap=cm.cm.thermal)
plt.ylabel('latitude')
plt.xlabel('longitude')
# ### 2.2 Slicing and dicing
#
# There are two different ways of subselecting from a DataArray: `isel` and `sel`. The first of these is probably what you are used to -- you specify the value of the index of the array. In the second case you specify the value of the coordinate you want to select. These two methods are demonstrated in the following example:
darray = cc.querying.getvar('1deg_jra55v13_iaf_spinup1_B1','pot_rho_2',session)
density = darray.isel(time=200).sel(st_ocean=1000,method='nearest')
density.plot()
# In the above example, a 300-year dataset is loaded. We then use `isel` to select the 201st year (timeindex of 200) and use `sel` to select a z level that is about 1000m deep. The `sel` method is very flexible, allowing us to use similar code in differing model resolutions or grids. In addition, both methods allow you to slice a range of values:
darray = cc.querying.getvar('1deg_jra55v13_iaf_spinup1_B1','v',session)
v = darray.isel(time=100).sel(st_ocean=50,method='nearest')\
.sel(yu_ocean=slice(-50,-20)).sel(xu_ocean=slice(-230,-180)).load()
v.plot()
# Here we have taken meridional velocity, and sliced out a small region of interest for our plot. Note the `load()` method, which tells `xarray` to do the calculation (otherwise `xarray` aims to defer calculations until the variable is needed).
# ### 2.3 Averaging along dimensions
#
# We often perform operations such as averaging on dataarrays. Again, knowledge of the coordinates can be a big help here, as you can instruct the `mean()` method to operate along given coordinates. The case below takes a temporal and zonal average of potential density. (To be precise, it is actually a mean in the i grid direction, which is only zonal outside the tripolar region in the Arctic, i.e. south of 65N in the ACCESS-OM2 models.)
darray = cc.querying.getvar('1deg_jra55v13_iaf_spinup1_B1','pot_rho_2',session,n=-10)
darray.mean('time').mean('xt_ocean').plot(cmap=cm.cm.haline)
plt.gca().invert_yaxis()
# ### 2.4 Resampling
#
# `xarray` uses `datetime` conventions to allow for operations such as resampling in time. This resampling is simple and powerful. Here is an example of re-plotting the figure from 2.1 with annual averaging:
darray = cc.querying.getvar('025deg_jra55v13_iaf_gmredi6','temp_global_ave',session)
meandata = darray.resample(time='A').mean(dim='time')
meandata.plot()
# ### 2.5 Exercises
#
# * Pick an experiment and plot a map of the temperature of the upper 100m of the ocean for one year.
# * Now, take the same experiment and construct a timeseries of spatially averaged (regional or global) upper 700m temperature, resampled every 3 years.
# ## 3. More Advanced Stuff
#
# ### 3.1 Making a map with cartopy
darray = cc.querying.getvar('025deg_jra55v13_iaf_gmredi6','temp',session,n=-1)
temp = darray.mean('time').sel(st_ocean=50,method='nearest') - 273.15
plt.figure(figsize=(8,4))
ax = plt.axes(projection=ccrs.Robinson())
temp.plot.pcolormesh(ax=ax, transform=ccrs.PlateCarree(),x='xt_ocean', y='yt_ocean',cmap=cm.cm.thermal,vmin=-2,vmax=30)
#ax.coastlines()
# ### 3.2 Distributed computing
#
# Many of our scripts use multiple cores for their calculations, usually via the following . It sets up a local cluster on your node for distributed computation.
# +
from dask.distributed import Client
client = Client()
client
# -
# The dashboard link should allow you to access information on how your work is distributed between the cores on your local cluster.
| Tutorials/COSIMA_CookBook_Tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Wrangling GBI Alignments into Workflow
#
# GBI has graciously provided alignments for the ESV, KJV, and NIV.
# Unfortunately those alignments are closed-source, so this notebook
# will only provide code used for interpreting the aligned JSON file
# +
import collections
import re
import pandas as pd
import json
import unicodedata as unicode
from pathlib import Path
from pprint import pprint
import Levenshtein
# import BHSA data with TF to look at alignment possibilities
from tf.app import use
# organize pathways
PROJ_DIR = Path.home().joinpath('github/CambridgeSemiticsLab/translation_traditions_HB')
GBI_DATA_DIR = PROJ_DIR.joinpath('data/_private_/GBI_alignment')
# load BHSA data
bhsa = use('bhsa')
api = bhsa.api
F, E, T, L, Fs, = api.F, api.E, api.T, api.L, api.Fs
# -
# # Explore GBI Data
# +
file2data = {}
for file in GBI_DATA_DIR.glob('*.json'):
if 'ot' in file.name:
file2data[file.stem] = json.loads(file.read_text())
print('keys:', file2data.keys())
# +
# let's check out the NIV OT alignment
niv_data = file2data['niv84.ot.alignment']
len(niv_data)
# +
ex_verse = niv_data[1]
# parts of individual entry
ex_verse.keys()
# +
#ex_verse['links'] # look at the alignment list
# +
# experiment with multiple word link: "was hovering"
def get_trans_text(manu, trans, verse):
"""Join text/translated words for comparison"""
heb_txt = ' '.join(verse['manuscript']['words'][h]['text'] for h in manu).strip('\u200e')
eng_txt = ' '.join(verse['translation']['words'][e]['text'] for e in sorted(trans))
return (heb_txt, eng_txt)
for manu, trans in ex_verse['links']:
if len(trans) > 1:
trans = sorted(trans)
heb_txt, eng_txt = get_trans_text(manu, trans, ex_verse)
print(f'{heb_txt} -> {eng_txt}')
# +
verb_dataset = []
# experiment with collecting verbs in HB
for verse in niv_data:
for manu, trans in verse['links']:
_mainword_ = manu[0]
if verse['manuscript']['words'][_mainword_]['pos'] == 'verb':
heb_txt, eng_txt = get_trans_text(manu, trans, verse)
verb_dataset.append((heb_txt, eng_txt))
verb_dataset[:5]
# -
verb_dataset[:10]
# +
#ex_verse['translation']['words'][:2]
# +
#ex_verse['manuscript']['words'][:2]
# +
# ex_verse['manuscript']['words']
# -
# # Alignment with BHSA
# We will seek to align the Hebrew texts on the basis of the consonantal text. For this, we take
# some cues from <NAME>'s alignment efforts between BHSA and OSM:
# https://github.com/ETCBC/bridging/blob/master/programs/BHSAbridgeOSM.ipynb
#
# The essential issue that makes alignment difficult is word divisions, which differ between various
# texts. There is an added difficulty with the GBI Hebrew data, which is based on Westminster Leningrad
# Codex (WLC), since that texts sometimes goes with the ketiv or other times with qere.
#
# ## Alignment Strategy
#
# One option to align the texts would be to iterate word-by-word while keeping track of the current
# position in the text. If BHSA were the reference point, we'd iterate over all word nodes in BHSA
# and attempt to match with the next WLC word in the set; this would require us to advance the position
# until a match is composed in cases where BHSA word is longer than WLC, while vice versa when BHSA is
# shorter than WLC. <NAME> has followed this strategy in the BHSA // OSM alignment.
#
# Another option, which we shall follow here, is to use verse identity to map positions in
# both BHSA and WLC to a common reference string. For example, consider the following string:
#
# > "A cat jumped up"
#
# Let there be textA and textB, which each index this string differently as follows:
#
# ```
# 0 1 2
# textA = ["A cat", "jumped", "up"]
#
# 0 1 2 3
# textB = ["A", "cat", "jumped", "up"]
# ```
#
# The underlying string for both texts is the same however, and can be joined in such a
# way as to produce an identical string with its own indices:
#
# ```
# 0 1 2 3 4 5 6 7 8 9 10 11
# string = "A c a t j u m p e d u p"
#
# ```
#
# We can use this identity property as a common reference point to which the text positions can
# be mapped and translated:
#
# ```
# [ 0 ] -> [0, 1, 2, 3] <- [0, 1]
# textA string textB
#
# [1] -> [4, 5, 6, 7, 8, 9] <- [2]
# textA string textB
#
# [2] -> [10, 11] <- [3]
# textA string textB
# ```
#
# Using these mappings, we can produce an alignment:
#
# ```
# [
# [[0], [0, 1]],
# [[1], [2]],
# [[2], [3]]
# ]
# ```
#
# Since verses are identical between the two sources, we need only ensure that verse strings
# are likewise identical wherever possible. This is done by converting the text in both
# sources to its consonantal form, stripping it of characters unique to each source, stripping
# spaces, and joining the text on nothing to create an indexable string.
# ## Preprocess verse strings
#
# The first step here is to build the verse strings which can be used for indexing.
# This is also necessary to demonstrate that our strategy will work, and also to
# catch those exceptional cases where it won't.
#
# We need to do a few things in this preprocessing stage:
#
# 1. recognize verse id tags from GBI dataset and convert to TF reference tuples
# 2. GBI alignments are organized into lists, which mostly correspond with verses,
# but not always. To aid the alignment work, we should fix this discrepancy by
# following the Hebrew versification and utilizing word id links rather than
# indices.
# 3. prepare strings and indices for alignment
#
#
# ### Exploratory analysis of word ids
#
# We look at the length of GBI word ids to write a regex pattern that can
# recognize each of the parts. The word ids contain versification information.
# +
# reference data stored at word level in 'id' key
# I expect 'id' length to vary by 1, thus 11 or 12
id_lengths = collections.Counter()
for verse in niv_data:
for word in verse['manuscript']['words']:
id_lengths[len(str(word['id']))] += 1
id_lengths.most_common()
# -
# Some book ID integers have 11 for single-digit books. We'll correct for this in `id2etcbc` by
# adding a 0 padding to normalize this difference for the regex matching.
# ### 1. Function for converting ids to TF reference tuples
# GBI uses English book order, so we need to make an int 2 book mapping using that order.
# +
# eng book order
eng_book_list = '''
Genesis
Exodus
Leviticus
Numbers
Deuteronomy
Joshua
Judges
Ruth
1 Samuel
2 Samuel
1 Kings
2 Kings
1 Chronicles
2 Chronicles
Ezra
Nehemiah
Esther
Job
Psalms
Proverbs
Ecclesiastes
Song of songs
Isaiah
Jeremiah
Lamentations
Ezekiel
Daniel
Hosea
Joel
Amos
Obadiah
Jonah
Micah
Nahum
Habakkuk
Zephaniah
Haggai
Zechariah
Malachi
'''.strip().replace(' ', '_').split()
# map to integers
int2book = {
i+1: book for i, book in enumerate(eng_book_list)
}
# regex pattern for matching word ID info to its parts
# e.g. ('01', '001', '001', '001', '1')
# i.e. (bookN, chapterN, verseN, wordN, partN)
ref_id_re = re.compile('([0-9]{2})([0-9]{3})([0-9]{3})([0-9]{3})([1-9])')
def id2ref(id_int):
"""Convert GBI ID ref tag to TF ref tuple"""
id_str = str(id_int)
# fix ambiguity with lack of book padding in single-digit books
if len(id_str) == 11:
id_str = '0' + id_str
# match parts
bookN, chapterN, verseN, wordN, partN = ref_id_re.match(id_str).groups()
book = int2book.get(int(bookN))
chapter = int(chapterN)
verse = int(verseN)
return (book, chapter, verse)
# -
# Now let's do a sanity check for book order. I'll do this by looking at the
# verse counts for each book. If we've got the book order wrong, we'll see some
# anomalous counts.
#
# [postscript: there was an anomaly when I first tried with Heb. book order, with
# Jonah showing a verse count of 2,523 😂, now it is fixed with Eng order]
# +
verse_data = []
# compile a dataset for easy dataframe statistics
for verse in niv_data:
first_word = verse['manuscript']['words'][0]
book, chapter, verse = id2ref(first_word['id'])
verse_data.append({'book': book, 'chapter': chapter, 'verse': verse})
verse_data_df = pd.DataFrame(verse_data)
# -
verse_data_df.head()
# +
# count n verses by book
verse_data_df.book.value_counts()
# -
len(L.d(T.nodeFromSection(('Job',)), 'verse')) # sanity check Job verse length
# Everything seems to be in order.
# ### 2. Preprocess GBI data and convert to dict of word ids with links paired to each word
# +
gbi_words = {}
for pseudo_verse in niv_data:
# unpack hebrew, english, and alignment data
hebrew_words = pseudo_verse['manuscript']['words']
english_words = pseudo_verse['translation']['words']
links = pseudo_verse['links']
# enter all hebrew words into gbi words
# words not included in the translation are not included in the links
# e.g. את in Gen 1:1 has no entry in links
# so we must initialize all words with an empty links item
for hw in hebrew_words:
hw_id = hw['id']
hw['tf_ref'] = id2ref(hw_id) # store TF tuple ref
hw['links'] = [] # compile linked english word data here
hw['trans_span'] = [] # see below
gbi_words[hw_id] = hw
# add alignment to words that have it
for link in links:
heb_indices, eng_indices = link
# we need to keep track of those cases where
# multiple Hebrew words are covered by the same
# english translation string; these are cases where
# the Hebrew side of the link contains more than 1 element
# thus we build a list of all ids in the Heb side that will be
# added as a key of the word's dictionary, trans_span
heb_ids = [hebrew_words[i]['id'] for i in heb_indices]
# store links and verse id data under word dict
# and prepare word dict to be stored in gbi_words
for hi in heb_indices:
hword_id = hebrew_words[hi]['id']
hword = gbi_words[hword_id]
hword['trans_span'] = sorted(heb_ids) # list of peer Hebrew words in translation
# collect linked english word data
for ei in eng_indices:
eword = english_words[ei]
hword['links'].append(eword)
# sort the english links
# preprocess english gloss tag
# put it in the dict
hword['links'] = sorted(hword['links'], key=lambda k: k['id'])
# +
# # print example with multiple links in translation
# for w, wdat in gbi_words.items():
# if len(wdat['links']) > 2:
# pprint(wdat)
# break
# +
# gbi_words[10010010041] # NB empty links
# -
# ### 3. Build normalized verse strings with maps to ids
#
# We will also do this for BHSA, with maps to word node number.
# +
# define patterns and functions which are used to
# normalize both Hebrew texts to a plain consonantal
# version without punctuation or spacing
# pattern matches only Heb consonants for filtering
heb_cons = re.compile('[\u05D0-\u05EA]')
# to normalize final letters
final_letters = {
'ך':\
'כ',
'ם':\
'מ',
'ן':\
'נ',
'ף':\
'פ',
'ץ':\
'צ',
}
def unFinal(s):
""""Replace final Heb letters with non-final version.
Credit <NAME>
"""
return ''.join(final_letters.get(c, c) for c in s)
def normalize_string(string):
"""Normalize BHSA/WLC strings to make them comparable."""
string = unicode.normalize('NFD', string) # normalize chars
string = ''.join(heb_cons.findall(string)) # strip vowels/points/other chars
string = unFinal(string) # disambiguate final letters
string = string.replace('\u200e', '') # remove RL character (GBI)
string = string.replace(' ', '') # remove any latent spaces
return string
# -
# For BHSA, we need to make two verse strings: one for ketiv, one for qere. Thus,
# we will plan to have multiple strings and associated indices for each one for both
# bhsa and gbi verses. The algorithm will then pick out any matching pair and use that as
# the basis for the alignment. Allowing multiple strings for GBI/WLC allows for the
# possibility of solving exceptions with manually added strings.
def build_string_data(word_ids, string_instructs):
"""Build strings from words with index maps.
Uses the data in string_instructs to convert each word
into a string and add that string to a large string.
Maps each word id to a span of character indices for the larger
string, telling which indices correspond with a given word.
Args:
word_ids: list of word ids unique to BHSA or WLC
string_instructs: list of paired string names / string-making functions
where string-maker takes a word id and converts to string
Returns:
list of three-tuples of name, string, and index mappings for words.
"""
indices = collections.defaultdict(list)
string_data = []
# build data for each string type
for name, stringifier in string_instructs:
string = ''
index = -1
mapping = collections.defaultdict(list)
for word in word_ids:
for c in stringifier(word):
string += c
index += 1
mapping[word].append(index)
string_data.append((name, string, mapping))
return string_data
# +
# here we define a bunch of stringifier functions;
# the variant strings produced by these
# functions will be compared pairwise with the
# gbi strings to look for any pairwise match;
# having numerous variants allows for a more robust
# matching process
verse_strings = collections.defaultdict(dict)
# -- 1. BHSA --
def bhsa_qere(word):
"""Generate qere strings from BHSA word"""
string = F.qere_utf8.v(word) or F.g_word_utf8.v(word)
string = normalize_string(string)
return string
def bhsa_ketiv(word):
"""Generate ketiv strings from BHSA word"""
string = F.g_word_utf8.v(word)
string = normalize_string(string)
return string
def bhsa_qere_art(word):
"""Generate qere with article"""
string = bhsa_qere(word)
if not string and F.lex.v(word) == 'H':
string = 'ה'
return string
def bhsa_ketiv_art(word):
"""Generate ketiv with article"""
string = bhsa_ketiv(word)
if not string and F.lex.v(word) == 'H':
string = 'ה'
return string
# iterate through all BHSA verses and build strings/indices
for verse in F.otype.s('verse'):
ref_tuple = T.sectionFromNode(verse)
verse_words = L.d(verse, 'word')
string_instructs = [
('qere', bhsa_qere),
('ketiv', bhsa_ketiv),
('qere+art', bhsa_qere_art),
('ketiv+art', bhsa_ketiv_art),
]
string_data = build_string_data(verse_words, string_instructs)
verse_strings['bhsa'][ref_tuple] = string_data
# -- 2. WLC --
def gbi_string(word):
"""Generate string for GBI word"""
string = gbi_words[word]['text']
string = normalize_string(string)
return string
def gbi_string_art(word):
"""Build string that has vocalized article
Vocalization of articles can differ between
the two sources.
"""
string = gbi_string(word)
if not string and gbi_words[word]['lemma'] == 'הַ':
string = 'ה'
return string
# 34 words do not match with either ketiv or
# qere of BHSA; we have automatically generated
# edits for these cases to align the word with its
# BHSA equivalent; these edits do not alter the underlying
# text of the word but are temporary placeholders to enable alignment
# those changes are implemented through another stringifier function
# NB: words that are mapped to empty strings will not be matched to
# BHSA since they have no corresponding word
wlc_edits = {
10140020171: {'original': 'צבויימ', 'mod': 'צביימ'},
90250180131: {'original': 'עשוית', 'mod': 'עשויות'},
100180080051: {'original': 'נפצת', 'mod': 'נפצית'},
100180120062: {'original': 'לוא', 'mod': 'לא'},
120160060152: {'original': 'אדומימ', 'mod': 'אדמימ'},
120190230072: {'original': 'רב', 'mod': 'רכב'},
230300050031: {'original': 'הביש', 'mod': 'הבאיש'},
230420240042: {'original': 'משסה', 'mod': 'משוסה'},
240050060221: {'original': 'משובותי', 'mod': 'משבותי'},
240150110061: {'original': 'שריתי', 'mod': 'שרית'},
240180160051: {'original': 'שריקות', 'mod': 'שריקת'},
240420200031: {'original': 'התעיתמ', 'mod': 'התעתמ'},
240480050042: {'original': 'לוחית', 'mod': 'לחית'},
260320320052: {'original': 'י', 'mod': 'ו'},
260400310013: {'original': 'ו', 'mod': 'יו'},
260430110122: {'original': 'ו', 'mod': 'יו'},
260440240091: {'original': 'ישפטו', 'mod': 'ישפט'},
191400100062: {'original': 'מו', 'mod': 'ומו'},
180060290072: {'original': 'שובו', 'mod': 'שבו'},
180260140131: {'original': 'גבורותי', 'mod': 'גבורתי'},
180380120071: {'original': 'ה', 'mod': ''},
200130200021: {'original': 'הולכ', 'mod': 'הלכ'},
200190070131: {'original': 'ל', 'mod': ''},
200190070132: {'original': 'ו', 'mod': ''},
80030140031: {'original': 'מרגלותי', 'mod': 'מרגלתי'},
210050170201: {'original': 'חיי', 'mod': 'חי'},
270020380041: {'original': 'דירינ', 'mod': 'דארינ'},
270020410062: {'original': 'הנ', 'mod': 'הינ'},
270030180093: {'original': 'כ', 'mod': 'יכ'},
270040290172: {'original': 'כ', 'mod': 'יכ'},
270050070263: {'original': 'א', 'mod': 'ה'},
270050160071: {'original': 'תיכול', 'mod': 'תכול'},
270050160233: {'original': 'א', 'mod': 'ה'},
270100190102: {'original': 'ב', 'mod': 'כ'},
160050090022: {'original': 'אומר', 'mod': 'אמר'}
}
def gbi_edited_string(word):
"""Remap word surface forms to facilitate matching"""
remapping = wlc_edits.get(word, None)
if remapping:
return remapping['mod']
else:
return gbi_string(word)
# cluster GBI/WLC words into verses
gbi_verses = collections.defaultdict(list)
for word, word_data in gbi_words.items():
gbi_verses[word_data['tf_ref']].append(word_data['id'])
# iterate through verses and build strings/indices
for ref_tuple, words in gbi_verses.items():
string_instructs = [
('string', gbi_string),
('string+art', gbi_string_art),
('edited_str', gbi_edited_string),
]
string_data = build_string_data(words, string_instructs)
verse_strings['gbi'][ref_tuple] = string_data
# -
# Look at a sample of the mappings / strings:
verse_strings['bhsa'][('Genesis', 1, 1)]
verse_strings['gbi'][('Genesis', 1, 1)]
# Demonstration of string match:
verse_strings['bhsa'][('Genesis', 1, 1)][0][1] == verse_strings['gbi'][('Genesis', 1, 1)][0][1]
# ## Alignment Algorithm
#
# Now that the strings are ready, it is only a matter of iterating over the verses,
# matching up the strings, and cross-referencing the indices.
# +
bhsa2wlc = []
no_match = []
for ref_tuple, string_data in verse_strings['bhsa'].items():
gbi_strings = verse_strings['gbi'][ref_tuple]
# look for matching strings with pairwise iteration
# if match, save indices for alignment
bhsa_indices = None
gbi_indices = None
for b_str_name, b_str, b_indices in string_data:
for g_str_name, g_str, g_indices in gbi_strings:
if b_str == g_str:
bhsa_indices = b_indices # set indices
gbi_indices = g_indices
break
if bhsa_indices: # break double loop
break
# -- no match: record a null match --
if not bhsa_indices:
no_match.append([ref_tuple, string_data, gbi_strings])
continue
# -- match! continue on to alignment maneuver --
# remap gbi string indices to be the keys for easy selection
gbi_str2word = {
str_index:word_id for word_id, indices in gbi_indices.items()
for str_index in indices
}
# finally, make the matches by iterating through
# string indices matched to wordnode, add to set
# to avoid duplicates
for wordnode, str_indices in bhsa_indices.items():
aligned_gbi = set()
for si in str_indices:
gbi_word = gbi_str2word[si]
aligned_gbi.add(gbi_word)
# done! save result
# check to see if WLC word has already been matched
# with previous BHSA word; if so, expand BHSA side
# of the alignment instead
if bhsa2wlc and aligned_gbi.issubset(set(bhsa2wlc[-1][1])):
bhsa2wlc[-1][0].append(wordnode)
else:
bhsa2wlc.append([[wordnode], sorted(aligned_gbi)])
print(len(bhsa2wlc), 'matches made')
print(len(no_match), 'matches missed')
# +
# Bridge missing links
def find_word_from_index(index_set, index_dict):
"""Selects a word from string index dict"""
for word, indices in index_dict.items():
if index_set.issubset(set(indices)):
return word
def build_edits(stringset1, stringset2, debug=False):
"""Iterate through 2 stringsets and look for the differences
Needed to manually correct unlinked verses
"""
# find closest pairwise match with edit distance
scores = []
for namei, stri, indi in stringset1:
for namej, strj, indj in stringset2:
scores.append((Levenshtein.distance(stri, strj), (namei, stri, indi), (namej, strj, indj)))
closest_set = sorted(scores)[0]
# calculate edits necessary
source_set, dest_set = closest_set[1:]
source_str, dest_str = source_set[1], dest_set[1]
source_inds, dest_inds = source_set[2], dest_set[2]
edit_ops = Levenshtein.editops(source_str, dest_str)
# use edit instructions to find and fix
# offending words, note "i" refers to index
alterations = {}
for op, source_i, dest_i in edit_ops:
offending_word = find_word_from_index({source_i}, source_inds)
ow_text = normalize_string(gbi_words[offending_word]['text'])
# edit op indices are relative to the whole verse string
# in order to relate it to a single word, must
# first find out which index that word starts with in verse string
# and adjust with the difference accordingly
orig_source_i = source_i # keep copy for debug
first_i = source_inds[offending_word][0]
source_i = source_i - first_i
# apply corrections using indices
if op == 'delete':
mod_text = ow_text[:source_i] + ow_text[source_i+1:]
elif op == 'insert':
ins_char = dest_str[dest_i]
mod_text = ow_text[:source_i] + ins_char + ow_text[source_i:]
elif op == 'replace':
repl_char = dest_str[dest_i]
mod_text= ow_text[:source_i] + repl_char + ow_text[source_i+1:]
# save corrections
alterations[offending_word] = {'original': ow_text, 'mod': mod_text}
# provide printout of activity
if debug:
print(op)
print(source_set[0], source_str)
print(dest_set[0], dest_str)
return alterations
# store edits here
gbi_word_alts = {}
debug = False
for nm in no_match:
verse, bhsa_str, gbi_str = nm
alterations = build_edits(gbi_str, bhsa_str, debug)
if debug:
print(verse)
print(alterations)
print()
print('-'*60)
else:
gbi_word_alts.update(alterations)
if not debug:
print(len(gbi_word_alts), 'alterations prepared')
# -
gbi_word_alts
# # Examine Alignments
#
# We construct a datatable for exploring the aligned data and ensuring that
# everything has been aligned properly.
pd.set_option('display.max_rows', 200)
# +
dataset = []
for bhsa_words, gbi_wds in bhsa2wlc:
bhsa_dat = lambda feat: '|'.join(Fs(feat).v(w) for w in bhsa_words)
gbi_dat = lambda feat: '|'.join(gbi_words[w][feat] for w in gbi_wds)
ref_tuple = T.sectionFromNode(bhsa_words[0])
ref = '{} {}:{}'.format(*ref_tuple)
book = ref_tuple[0]
len_bhsa = len(bhsa_words)
len_gbi = len(gbi_wds)
dataset.append({
'bhsa_nodes': tuple(bhsa_words),
'gbi_wds': tuple(gbi_wds),
'ref': ref,
'book': book,
'bhsa_nwords': len_bhsa,
'gbi_nwords': len_gbi,
'bhsa_text': bhsa_dat('g_word_utf8'),
'gbi_text': gbi_dat('text').replace('\u200e', ''),
'bhsa_lexs': bhsa_dat('voc_lex_utf8'),
'gbi_lexs': gbi_dat('lemma'),
'bhsa_glosses': bhsa_dat('gloss'),
'gbi_glosses': gbi_dat('gloss'),
'bhsa_pos': bhsa_dat('pdp'),
'gbi_pos': gbi_dat('pos'),
})
align_df = pd.DataFrame(dataset)
align_df.head()
# -
align_df.iloc[4215:4235] # random selection
# ## Statistics
# ### Number of words for each aligned portion per source
# +
nwords_counts = pd.pivot_table(
align_df,
index='bhsa_nwords',
columns='gbi_nwords',
aggfunc='size',
fill_value=0,
)
nwords_counts
# -
# Here we see that vast majority of words are 1-to-1 alignments: 1 BHSA word for 1 WLC word.
# There are many cases of 1-to-2 alignments; the majority of these cases are due to WLC dividing
# pronominal suffixes into separate words, while BHSA stores this data as a feature of a single word.
# There are a few extreme cases; let's have a look.
align_df[(align_df.bhsa_nwords == 4) & (align_df.gbi_nwords == 1)]
# This proper name is treated as 4 separate words in BHSA; but 1 word in GBI.
align_df[(align_df.bhsa_nwords == 1) & (align_df.gbi_nwords == 3)]
align_df[(align_df.bhsa_nwords == 2) & (align_df.gbi_nwords == 1)].head(10)
# ### n-words by verbs
#
# This project is especially interested at the moment in BH verbs. We expect much
# les diversity in the number of words per alignment for verbs.
# +
verb_df = align_df[align_df.bhsa_pos == 'verb']
nword_verbs = pd.pivot_table(
verb_df,
index='bhsa_nwords',
columns='gbi_nwords',
aggfunc='size',
fill_value=0,
)
nword_verbs
# -
verb_df[(verb_df.bhsa_nwords == 1) & (verb_df.gbi_nwords == 3)]
# This is a case of both energic nun and a suffix.
verb_df[(verb_df.bhsa_nwords == 1) & (verb_df.gbi_nwords == 2)].head(10)
# The 1-to-2 words are mostly suffixed verbs.
# # Export Alignment
# +
GBI_WORDS_FILE = GBI_DATA_DIR.joinpath('gbi_words.json')
GBI_ALIGN_FILE = GBI_DATA_DIR.joinpath('bhsa2gbi.json')
# comment out for now; this may not need to be a separate file from the existing .json files
#with open(GBI_WORDS_FILE, 'w') as outfile:
# json.dump(gbi_words, outfile)
with open(GBI_ALIGN_FILE, 'w') as outfile:
json.dump(bhsa2wlc, outfile)
# -
# ## Export Other Files
| workflow/notebooks/old_pipeline/gbi/GBI_alignment_wrangling.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.6 64-bit
# name: python3
# ---
# +
import pandas as pd
import numpy as np
from pandas_datareader import data
def load_financial_data(start_date, end_date, output_file, ticker):
try:
df = pd.read_pickle(output_file)
except FileNotFoundError:
df = data.DataReader(ticker, 'yahoo', start_date, end_date)
df.to_pickle(output_file)
return df
# -
ticker = 'GOOG'
data = load_financial_data(start_date='2001-01-01', end_date='2018-01-01', output_file= ticker.lower() + '_data_large.pkl', ticker=ticker)
def create_classification_trading_condition(df):
df['Open-Close'] = df.Open - df.Close
df['High-Low'] = df.High - df.Low
df = df.dropna()
X = df[['Open-Close'], ['High-Low']]
Y = np.where(df['Close'].shift(-1) > df['Close'], 1, -1)
return (X, Y)
def create_regression_trading_condition(df):
df['Open-Close'] = df.Open - df.Close
df['High-Low'] = df.High - df.Low
df['Target'] = df['Close'].shift(-1) - df['Close']
df = df.dropna()
X = df[['Open-Close', 'High-Low']]
Y = df[['Target']]
return (df, X, Y)
# +
from sklearn.model_selection import train_test_split
def create_train_split_group(X, Y, split_ratio=0.8):
return train_test_split(X, Y, shuffle=False, train_size=split_ratio)
| algo-trading/sample-ml.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.2.0
# language: julia
# name: julia-1.2
# ---
# # Solving Stiff Equations
# ### <NAME>
#
# This tutorial is for getting into the extra features for solving stiff ordinary
# differential equations in an efficient manner. Solving stiff ordinary
# differential equations requires specializing the linear solver on properties of
# the Jacobian in order to cut down on the O(n^3) linear solve and the O(n^2)
# back-solves. Note that these same functions and controls also extend to stiff
# SDEs, DDEs, DAEs, etc.
#
# ## Code Optimization for Differential Equations
#
# ### Writing Efficient Code
#
# For a detailed tutorial on how to optimize one's DifferentialEquations.jl code,
# please see the
# [Optimizing DiffEq Code tutorial](http://tutorials.juliadiffeq.org/html/introduction/03-optimizing_diffeq_code.html).
#
# ### Choosing a Good Solver
#
# Choosing a good solver is required for getting top notch speed. General
# recommendations can be found on the solver page (for example, the
# [ODE Solver Recommendations](https://docs.juliadiffeq.org/latest/solvers/ode_solve)).
# The current recommendations can be simplified to a Rosenbrock method
# (`Rosenbrock23` or `Rodas5`) for smaller (<50 ODEs) problems, ESDIRK methods
# for slightly larger (`TRBDF2` or `KenCarp4` for <2000 ODEs), and Sundials
# `CVODE_BDF` for even larger problems. `lsoda` from
# [LSODA.jl](https://github.com/rveltz/LSODA.jl) is generally worth a try.
#
# More details on the solver to choose can be found by benchmarking. See the
# [DiffEqBenchmarks](https://github.com/JuliaDiffEq/DiffEqBenchmarks.jl) to
# compare many solvers on many problems.
#
# ### Check Out the Speed FAQ
#
# See [this FAQ](http://docs.juliadiffeq.org/latest/basics/faq.html#Performance-1)
# for information on common pitfalls and how to improve performance.
#
# ### Setting Up Your Julia Installation for Speed
#
# Julia uses an underlying BLAS implementation for its matrix multiplications
# and factorizations. This library is automatically multithreaded and accelerates
# the internal linear algebra of DifferentialEquations.jl. However, for optimality,
# you should make sure that the number of BLAS threads that you are using matches
# the number of physical cores and not the number of logical cores. See
# [this issue for more details](https://github.com/JuliaLang/julia/issues/33409).
#
# To check the number of BLAS threads, use:
ccall((:openblas_get_num_threads64_, Base.libblas_name), Cint, ())
# If I want to set this directly to 4 threads, I would use:
using LinearAlgebra
LinearAlgebra.BLAS.set_num_threads(4)
# Additionally, in some cases Intel's MKL might be a faster BLAS than the standard
# BLAS that ships with Julia (OpenBLAS). To switch your BLAS implementation, you
# can use [MKL.jl](https://github.com/JuliaComputing/MKL.jl) which will accelerate
# the linear algebra routines. Please see the package for the limitations.
#
# ### Use Accelerator Hardware
#
# When possible, use GPUs. If your ODE system is small and you need to solve it
# with very many different parameters, see the
# [ensembles interface](https://docs.juliadiffeq.org/latest/features/ensemble)
# and [DiffEqGPU.jl](https://github.com/JuliaDiffEq/DiffEqGPU.jl). If your problem
# is large, consider using a [CuArray](https://github.com/JuliaGPU/CuArrays.jl)
# for the state to allow for GPU-parallelism of the internal linear algebra.
#
# ## Speeding Up Jacobian Calculations
#
# When one is using an implicit or semi-implicit differential equation solver,
# the Jacobian must be built at many iterations and this can be one of the most
# expensive steps. There are two pieces that must be optimized in order to reach
# maximal efficiency when solving stiff equations: the sparsity pattern and the
# construction of the Jacobian. The construction is filling the matrix
# `J` with values, while the sparsity pattern is what `J` to use.
#
# The sparsity pattern is given by a prototype matrix, the `jac_prototype`, which
# will be copied to be used as `J`. The default is for `J` to be a `Matrix`,
# i.e. a dense matrix. However, if you know the sparsity of your problem, then
# you can pass a different matrix type. For example, a `SparseMatrixCSC` will
# give a sparse matrix. Additionally, structured matrix types like `Tridiagonal`,
# `BandedMatrix` (from
# [BandedMatrices.jl](https://github.com/JuliaMatrices/BandedMatrices.jl)),
# `BlockBandedMatrix` (from
# [BlockBandedMatrices.jl](https://github.com/JuliaMatrices/BlockBandedMatrices.jl)),
# and more can be given. DifferentialEquations.jl will internally use this matrix
# type, making the factorizations faster by utilizing the specialized forms.
#
# For the construction, there are 3 ways to fill `J`:
#
# - The default, which uses normal finite/automatic differentiation
# - A function `jac(J,u,p,t)` which directly computes the values of `J`
# - A `colorvec` which defines a sparse differentiation scheme.
#
# We will now showcase how to make use of this functionality with growing complexity.
#
# ### Declaring Jacobian Functions
#
# Let's solve the Rosenbrock equations:
#
# $$\begin{align}
# dy_1 &= -0.04y₁ + 10^4 y_2 y_3 \\
# dy_2 &= 0.04 y_1 - 10^4 y_2 y_3 - 3*10^7 y_{2}^2 \\
# dy_3 &= 3*10^7 y_{3}^2 \\
# \end{align}$$
#
# In order to reduce the Jacobian construction cost, one can describe a Jacobian
# function by using the `jac` argument for the `ODEFunction`. First, let's do
# a standard `ODEProblem`:
# +
using DifferentialEquations
function rober(du,u,p,t)
y₁,y₂,y₃ = u
k₁,k₂,k₃ = p
du[1] = -k₁*y₁+k₃*y₂*y₃
du[2] = k₁*y₁-k₂*y₂^2-k₃*y₂*y₃
du[3] = k₂*y₂^2
nothing
end
prob = ODEProblem(rober,[1.0,0.0,0.0],(0.0,1e5),(0.04,3e7,1e4))
sol = solve(prob,Rosenbrock23())
using Plots
plot(sol, xscale=:log10, tspan=(1e-6, 1e5), layout=(3,1))
# -
using BenchmarkTools
@btime solve(prob)
# Now we want to add the Jacobian. First we have to derive the Jacobian
# $\frac{df_i}{du_j}$ which is `J[i,j]`. From this we get:
# +
function rober_jac(J,u,p,t)
y₁,y₂,y₃ = u
k₁,k₂,k₃ = p
J[1,1] = k₁ * -1
J[2,1] = k₁
J[3,1] = 0
J[1,2] = y₃ * k₃
J[2,2] = y₂ * k₂ * -2 + y₃ * k₃ * -1
J[3,2] = y₂ * 2 * k₂
J[1,3] = k₃ * y₂
J[2,3] = k₃ * y₂ * -1
J[3,3] = 0
nothing
end
f = ODEFunction(rober, jac=rober_jac)
prob_jac = ODEProblem(f,[1.0,0.0,0.0],(0.0,1e5),(0.04,3e7,1e4))
@btime solve(prob_jac)
# -
# ### Automatic Derivation of Jacobian Functions
#
# But that was hard! If you want to take the symbolic Jacobian of numerical
# code, we can make use of [ModelingToolkit.jl](https://github.com/JuliaDiffEq/ModelingToolkit.jl)
# to symbolicify the numerical code and do the symbolic calculation and return
# the Julia code for this.
using ModelingToolkit
de = modelingtoolkitize(prob)
ModelingToolkit.generate_jacobian(de...)[2] # Second is in-place
# which outputs:
:((##MTIIPVar#376, u, p, t)->begin
#= C:\Users\accou\.julia\packages\ModelingToolkit\czHtj\src\utils.jl:65 =#
#= C:\Users\accou\.julia\packages\ModelingToolkit\czHtj\src\utils.jl:66 =#
let (x₁, x₂, x₃, α₁, α₂, α₃) = (u[1], u[2], u[3], p[1], p[2], p[3])
##MTIIPVar#376[1] = α₁ * -1
##MTIIPVar#376[2] = α₁
##MTIIPVar#376[3] = 0
##MTIIPVar#376[4] = x₃ * α₃
##MTIIPVar#376[5] = x₂ * α₂ * -2 + x₃ * α₃ * -1
##MTIIPVar#376[6] = x₂ * 2 * α₂
##MTIIPVar#376[7] = α₃ * x₂
##MTIIPVar#376[8] = α₃ * x₂ * -1
##MTIIPVar#376[9] = 0
end
#= C:\Users\accou\.julia\packages\ModelingToolkit\czHtj\src\utils.jl:67 =#
nothing
end)
# Now let's use that to give the analytical solution Jacobian:
jac = eval(ModelingToolkit.generate_jacobian(de...)[2])
f = ODEFunction(rober, jac=jac)
prob_jac = ODEProblem(f,[1.0,0.0,0.0],(0.0,1e5),(0.04,3e7,1e4))
# ### Declaring a Sparse Jacobian
#
# Jacobian sparsity is declared by the `jac_prototype` argument in the `ODEFunction`.
# Note that you should only do this if the sparsity is high, for example, 0.1%
# of the matrix is non-zeros, otherwise the overhead of sparse matrices can be higher
# than the gains from sparse differentiation!
#
# But as a demonstration, let's build a sparse matrix for the Rober problem. We
# can do this by gathering the `I` and `J` pairs for the non-zero components, like:
I = [1,2,1,2,3,1,2]
J = [1,1,2,2,2,3,3]
using SparseArrays
jac_prototype = sparse(I,J,1.0)
# Now this is the sparse matrix prototype that we want to use in our solver, which
# we then pass like:
f = ODEFunction(rober, jac=jac, jac_prototype=jac_prototype)
prob_jac = ODEProblem(f,[1.0,0.0,0.0],(0.0,1e5),(0.04,3e7,1e4))
# ### Automatic Sparsity Detection
#
# One of the useful companion tools for DifferentialEquations.jl is
# [SparsityDetection.jl](https://github.com/JuliaDiffEq/SparsityDetection.jl).
# This allows for automatic declaration of Jacobian sparsity types. To see this
# in action, let's look at the 2-dimensional Brusselator equation:
const N = 32
const xyd_brusselator = range(0,stop=1,length=N)
brusselator_f(x, y, t) = (((x-0.3)^2 + (y-0.6)^2) <= 0.1^2) * (t >= 1.1) * 5.
limit(a, N) = a == N+1 ? 1 : a == 0 ? N : a
function brusselator_2d_loop(du, u, p, t)
A, B, alpha, dx = p
alpha = alpha/dx^2
@inbounds for I in CartesianIndices((N, N))
i, j = Tuple(I)
x, y = xyd_brusselator[I[1]], xyd_brusselator[I[2]]
ip1, im1, jp1, jm1 = limit(i+1, N), limit(i-1, N), limit(j+1, N), limit(j-1, N)
du[i,j,1] = alpha*(u[im1,j,1] + u[ip1,j,1] + u[i,jp1,1] + u[i,jm1,1] - 4u[i,j,1]) +
B + u[i,j,1]^2*u[i,j,2] - (A + 1)*u[i,j,1] + brusselator_f(x, y, t)
du[i,j,2] = alpha*(u[im1,j,2] + u[ip1,j,2] + u[i,jp1,2] + u[i,jm1,2] - 4u[i,j,2]) +
A*u[i,j,1] - u[i,j,1]^2*u[i,j,2]
end
end
p = (3.4, 1., 10., step(xyd_brusselator))
# Given this setup, we can give and example `input` and `output` and call `sparsity!`
# on our function with the example arguments and it will kick out a sparse matrix
# with our pattern, that we can turn into our `jac_prototype`.
using SparsityDetection, SparseArrays
input = rand(32,32,2)
output = similar(input)
sparsity_pattern = sparsity!(brusselator_2d_loop,output,input,p,0.0)
jac_sparsity = Float64.(sparse(sparsity_pattern))
# Let's double check what our sparsity pattern looks like:
using Plots
spy(jac_sparsity,markersize=1,colorbar=false,color=:deep)
# That's neat, and would be tedius to build by hand! Now we just pass it to the
# `ODEFunction` like as before:
f = ODEFunction(brusselator_2d_loop;jac_prototype=jac_sparsity)
# Build the `ODEProblem`:
# +
function init_brusselator_2d(xyd)
N = length(xyd)
u = zeros(N, N, 2)
for I in CartesianIndices((N, N))
x = xyd[I[1]]
y = xyd[I[2]]
u[I,1] = 22*(y*(1-y))^(3/2)
u[I,2] = 27*(x*(1-x))^(3/2)
end
u
end
u0 = init_brusselator_2d(xyd_brusselator)
prob_ode_brusselator_2d = ODEProblem(brusselator_2d_loop,
u0,(0.,11.5),p)
prob_ode_brusselator_2d_sparse = ODEProblem(f,
u0,(0.,11.5),p)
# -
# Now let's see how the version with sparsity compares to the version without:
@btime solve(prob_ode_brusselator_2d,save_everystep=false)
@btime solve(prob_ode_brusselator_2d_sparse,save_everystep=false)
# ### Declaring Color Vectors for Fast Construction
#
# If you cannot directly define a Jacobian function, you can use the `colorvec`
# to speed up the Jacobian construction. What the `colorvec` does is allows for
# calculating multiple columns of a Jacobian simultaniously by using the sparsity
# pattern. An explanation of matrix coloring can be found in the
# [MIT 18.337 Lecture Notes](https://mitmath.github.io/18337/lecture9/stiff_odes).
#
# To perform general matrix coloring, we can use
# [SparseDiffTools.jl](https://github.com/JuliaDiffEq/SparseDiffTools.jl). For
# example, for the Brusselator equation:
using SparseDiffTools
colorvec = matrix_colors(jac_sparsity)
@show maximum(colorvec)
# This means that we can now calculate the Jacobian in 12 function calls. This is
# a nice reduction from 2048 using only automated tooling! To now make use of this
# inside of the ODE solver, you simply need to declare the colorvec:
f = ODEFunction(brusselator_2d_loop;jac_prototype=jac_sparsity,
colorvec=colorvec)
prob_ode_brusselator_2d_sparse = ODEProblem(f,
init_brusselator_2d(xyd_brusselator),
(0.,11.5),p)
@btime solve(prob_ode_brusselator_2d_sparse,save_everystep=false)
# Notice the massive speed enhancement!
#
# ## Defining Linear Solver Routines and Jacobian-Free Newton-Krylov
#
# A completely different way to optimize the linear solvers for large sparse
# matrices is to use a Krylov subpsace method. This requires choosing a linear
# solver for changing to a Krylov method. Optionally, one can use a Jacobian-free
# operator to reduce the memory requirements.
#
# ### Declaring a Jacobian-Free Newton-Krylov Implementation
#
# To swap the linear solver out, we use the `linsolve` command and choose the
# GMRES linear solver.
@btime solve(prob_ode_brusselator_2d,TRBDF2(linsolve=LinSolveGMRES()),save_everystep=false)
@btime solve(prob_ode_brusselator_2d_sparse,TRBDF2(linsolve=LinSolveGMRES()),save_everystep=false)
# For more information on linear solver choices, see the
# [linear solver documentation](https://docs.juliadiffeq.org/latest/features/linear_nonlinear).
#
# On this problem, handling the sparsity correctly seemed to give much more of a
# speedup than going to a Krylov approach, but that can be dependent on the problem
# (and whether a good preconditioner is found).
#
# We can also enhance this by using a Jacobian-Free implementation of `f'(x)*v`.
# To define the Jacobian-Free operator, we can use
# [DiffEqOperators.jl](https://github.com/JuliaDiffEq/DiffEqOperators.jl) to generate
# an operator `JacVecOperator` such that `Jv*v` performs `f'(x)*v` without building
# the Jacobian matrix.
using DiffEqOperators
Jv = JacVecOperator(brusselator_2d_loop,u0,p,0.0)
# and then we can use this by making it our `jac_prototype`:
f = ODEFunction(brusselator_2d_loop;jac_prototype=Jv)
prob_ode_brusselator_2d_jacfree = ODEProblem(f,u0,(0.,11.5),p)
@btime solve(prob_ode_brusselator_2d_jacfree,TRBDF2(linsolve=LinSolveGMRES()),save_everystep=false)
# ### Adding a Preconditioner
#
# The [linear solver documentation](https://docs.juliadiffeq.org/latest/features/linear_nonlinear/#iterativesolvers-jl-1)
# shows how you can add a preconditioner to the GMRES. For example, you can
# use packages like [AlgebraicMultigrid.jl](https://github.com/JuliaLinearAlgebra/AlgebraicMultigrid.jl)
# to add an algebraic multigrid (AMG) or [IncompleteLU.jl](https://github.com/haampie/IncompleteLU.jl)
# for an incomplete LU-factorization (iLU).
using AlgebraicMultigrid
pc = aspreconditioner(ruge_stuben(jac_sparsity))
@btime solve(prob_ode_brusselator_2d_jacfree,TRBDF2(linsolve=LinSolveGMRES(Pl=pc)),save_everystep=false)
# ## Using Structured Matrix Types
#
# If your sparsity pattern follows a specific structure, for example a banded
# matrix, then you can declare `jac_prototype` to be of that structure and then
# additional optimizations will come for free. Note that in this case, it is
# not necessary to provide a `colorvec` since the color vector will be analytically
# derived from the structure of the matrix.
#
# The matrices which are allowed are those which satisfy the
# [ArrayInterface.jl](https://github.com/JuliaDiffEq/ArrayInterface.jl) interface
# for automatically-colorable matrices. These include:
#
# - Bidiagonal
# - Tridiagonal
# - SymTridiagonal
# - BandedMatrix ([BandedMatrices.jl](https://github.com/JuliaMatrices/BandedMatrices.jl))
# - BlockBandedMatrix ([BlockBandedMatrices.jl](https://github.com/JuliaMatrices/BlockBandedMatrices.jl))
#
# Matrices which do not satisfy this interface can still be used, but the matrix
# coloring will not be automatic, and an appropriate linear solver may need to
# be given (otherwise it will default to attempting an LU-decomposition).
#
# ## Sundials-Specific Handling
#
# While much of the setup makes the transition to using Sundials automatic, there
# are some differences between the pure Julia implementations and the Sundials
# implementations which must be taken note of. These are all detailed in the
# [Sundials solver documentation](https://docs.juliadiffeq.org/latest/solvers/ode_solve/#ode_solve_sundials-1),
# but here we will highlight the main details which one should make note of.
#
# Defining a sparse matrix and a Jacobian for Sundials works just like any other
# package. The core difference is in the choice of the linear solver. With Sundials,
# the linear solver choice is done with a Symbol in the `linear_solver` from a
# preset list. Particular choices of note are `:Band` for a banded matrix and
# `:GMRES` for using GMRES. If you are using Sundials, `:GMRES` will not require
# defining the JacVecOperator, and instead will always make use of a Jacobian-Free
# Newton Krylov (with numerical differentiation). Thus on this problem we could do:
using Sundials
# Sparse Version
@btime solve(prob_ode_brusselator_2d_sparse,CVODE_BDF(),save_everystep=false)
# GMRES Version: Doesn't require any extra stuff!
@btime solve(prob_ode_brusselator_2d,CVODE_BDF(linear_solver=:GMRES),save_everystep=false)
# Details for setting up a preconditioner with Sundials can be found at the
# [Sundials solver page](https://docs.juliadiffeq.org/latest/solvers/ode_solve/#ode_solve_sundials-1).
#
# ## Handling Mass Matrices
#
# Instead of just defining an ODE as $u' = f(u,p,t)$, it can be common to express
# the differential equation in the form with a mass matrix:
#
# $$Mu' = f(u,p,t)$$
#
# where $M$ is known as the mass matrix. Let's solve the Robertson equation.
# At the top we wrote this equation as:
#
# $$\begin{align}
# dy_1 &= -0.04y₁ + 10^4 y_2 y_3 \\
# dy_2 &= 0.04 y_1 - 10^4 y_2 y_3 - 3*10^7 y_{2}^2 \\
# dy_3 &= 3*10^7 y_{3}^2 \\
# \end{align}$$
#
# But we can instead write this with a conservation relation:
#
# $$\begin{align}
# dy_1 &= -0.04y₁ + 10^4 y_2 y_3 \\
# dy_2 &= 0.04 y_1 - 10^4 y_2 y_3 - 3*10^7 y_{2}^2 \\
# 1 &= y_{1} + y_{2} + y_{3} \\
# \end{align}$$
#
# In this form, we can write this as a mass matrix ODE where $M$ is singular
# (this is another form of a differential-algebraic equation (DAE)). Here, the
# last row of `M` is just zero. We can implement this form as:
# +
using DifferentialEquations
function rober(du,u,p,t)
y₁,y₂,y₃ = u
k₁,k₂,k₃ = p
du[1] = -k₁*y₁+k₃*y₂*y₃
du[2] = k₁*y₁-k₂*y₂^2-k₃*y₂*y₃
du[3] = y₁ + y₂ + y₃ - 1
nothing
end
M = [1. 0 0
0 1. 0
0 0 0]
f = ODEFunction(rober,mass_matrix=M)
prob_mm = ODEProblem(f,[1.0,0.0,0.0],(0.0,1e5),(0.04,3e7,1e4))
sol = solve(prob_mm,Rodas5())
plot(sol, xscale=:log10, tspan=(1e-6, 1e5), layout=(3,1))
# -
# Note that if your mass matrix is singular, i.e. your system is a DAE, then you
# need to make sure you choose
# [a solver that is compatible with DAEs](https://docs.juliadiffeq.org/latest/solvers/dae_solve/#dae_solve_full-1)
| notebook/advanced/02-advanced_ODE_solving.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Counterfactual with Reinforcement Learning (CFRL) on MNIST
# This method is described in [Model-agnostic and Scalable Counterfactual Explanations via Reinforcement Learning](https://arxiv.org/abs/2106.02597) and can generate counterfactual instances for any black-box model. The usual optimization procedure is transformed into a learnable process allowing to generate batches of counterfactual instances in a single forward pass even for high dimensional data. The training pipeline is model-agnostic and relies only on prediction feedback by querying the black-box model. Furthermore, the method allows target and feature conditioning.
#
# **We exemplify the use case for the TensorFlow backend. This means that all models: the autoencoder, the actor and the critic are TensorFlow models. Our implementation supports PyTorch backend as well.**
#
# CFRL uses [Deep Deterministic Policy Gradient (DDPG)](https://arxiv.org/abs/1509.02971) by interleaving a state-action function approximator called critic, with a learning an approximator called actor to predict the optimal action. The method assumes that the critic is differentiable with respect to the action argument, thus allowing to optimize the actor's parameters efficiently through gradient-based methods.
#
# The DDPG algorithm requires two separate networks, an actor $\mu$ and a critic $Q$. Given the encoded representation of the input instance $z = enc(x)$, the model prediction $y_M$, the target prediction
# $y_T$ and the conditioning vector $c$, the actor outputs the counterfactual’s latent representation $z_{CF} = \mu(z, y_M, y_T, c)$. The decoder then projects the embedding $z_{CF}$ back to the original input space,
# followed by optional post-processing.
#
# The training step consists of simultaneously optimizing the actor and critic networks. The critic regresses on the reward $R$ determined by the model prediction, while the actor maximizes the critic’s output for the given instance through $L_{max}$. The actor also minimizes two objectives to encourage the generation of sparse, in-distribution counterfactuals. The sparsity loss $L_{sparsity}$ operates on the decoded counterfactual $x_{CF}$ and combines the $L_1$ loss over the standardized numerical features and the $L_0$ loss over the categorical ones. The consistency loss $L_{consist}$ aims to encode the counterfactual $x_{CF}$ back to the same latent representation where it was decoded from and helps to produce in-distribution counterfactual instances.Formally, the actor's loss can be written as:
# $L_{actor} = L_{max} + \lambda_{1}L_{sparsity} + \lambda_{2}L_{consistency}$
# +
import os
import numpy as np
import matplotlib.pyplot as plt
from typing import Dict
import tensorflow as tf
import tensorflow.keras as keras
from alibi.explainers import CounterfactualRL
from alibi.models.tensorflow.autoencoder import AE
from alibi.models.tensorflow.actor_critic import Actor, Critic
from alibi.models.tensorflow.cfrl_models import MNISTEncoder, MNISTDecoder, MNISTClassifier
from alibi.explainers.cfrl_base import Callback
# -
# ## Load MNIST dataset
# +
# Define constants.
BATCH_SIZE = 64
BUFFER_SIZE = 1024
# Load MNIST dataset.
(X_train, Y_train), (X_test, Y_test) = tf.keras.datasets.mnist.load_data()
# Expand dimensions and normalize.
X_train = np.expand_dims(X_train, axis=-1).astype(np.float32) / 255.
X_test = np.expand_dims(X_test, axis=-1).astype(np.float32) / 255.
# Define trainset.
trainset_classifier = tf.data.Dataset.from_tensor_slices((X_train, Y_train))
trainset_classifier = trainset_classifier.shuffle(buffer_size=BUFFER_SIZE).batch(BATCH_SIZE)
# Define testset.
testset_classifier = tf.data.Dataset.from_tensor_slices((X_test, Y_test))
testset_classifier = testset_classifier.shuffle(buffer_size=BUFFER_SIZE).batch(BATCH_SIZE)
# -
# ### Define and train CNN classifier
# +
# Number of classes.
NUM_CLASSES = 10
EPOCHS = 5
# Define classifier path and create dir if it doesn't exist.
classifier_path = os.path.join("tensorflow", "MNIST_classifier")
if not os.path.exists(classifier_path):
os.makedirs(classifier_path)
# Construct classifier. This is the classifier used in the paper experiments.
classifier = MNISTClassifier(output_dim=NUM_CLASSES)
# Define optimizer and loss function
optimizer = keras.optimizers.Adam(learning_rate=1e-3)
loss = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
# Complile the model.
classifier.compile(optimizer=optimizer,
loss=loss,
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
if len(os.listdir(classifier_path)) == 0:
# Fit and save the classifier.
classifier.fit(trainset_classifier, epochs=EPOCHS)
classifier.save(classifier_path)
else:
# Load the classifier if already fitted.
classifier = keras.models.load_model(classifier_path)
# -
# Evaluate the classifier
loss, accuracy = classifier.evaluate(testset_classifier)
# ### Define the predictor (black-box)
# Now that we've trained the CNN classifier, we can define the black-box model. Note that the output of the black-box is a distribution which can be either a soft-label distribution (probabilities/logits for each class) or a hard-label distribution (one-hot encoding). Internally, CFRL takes the `argmax`. Moreover the output **DOES NOT HAVE TO BE DIFFERENTIABLE**.
# Define predictor function (black-box) used to train the CFRL
def predictor(X: np.ndarray):
Y = classifier(X).numpy()
return Y
# ### Define and train autoencoder
# Instead of directly modeling the perturbation vector in the potentially high-dimensional input space, we first train an autoencoder. The weights of the encoder are frozen and the actor applies the
# counterfactual perturbations in the latent space of the encoder. The pre-trained decoder maps the counterfactual embedding back to the input feature space.
#
# The autoencoder follows a standard design. The model is composed from two submodules, the encoder and the decoder. The forward pass consists of passing the input to the encoder, obtain the input embedding and pass the embedding through the decoder.
#
# ```python
# class AE(keras.Model):
# def __init__(self, encoder: keras.Model, decoder: keras.Model, **kwargs) -> None:
# super().__init__(**kwargs)
# self.encoder = encoder
# self.decoder = decoder
#
# def call(self, x: tf.Tensor, **kwargs):
# z = self.encoder(x)
# x_hat = self.decoder(z)
# return x_hat
# ```
# +
# Define autoencoder trainset.
trainset_ae = tf.data.Dataset.from_tensor_slices(X_train)
trainset_ae = trainset_ae.map(lambda x: (x, x))
trainset_ae = trainset_ae.shuffle(buffer_size=BUFFER_SIZE).batch(BATCH_SIZE)
# Define autoencode testset.
testset_ae = tf.data.Dataset.from_tensor_slices(X_test)
testset_ae = testset_ae.map(lambda x: (x, x))
testset_ae = testset_ae.shuffle(buffer_size=BUFFER_SIZE).batch(BATCH_SIZE)
# +
# Define autoencoder path and create dir if it doesn't exist.
ae_path = os.path.join("tensorflow", "MNIST_autoencoder")
if not os.path.exists(ae_path):
os.makedirs(ae_path)
# Define latent dimension.
LATENT_DIM = 64
EPOCHS = 50
# Define autoencoder.
ae = AE(encoder=MNISTEncoder(latent_dim=LATENT_DIM),
decoder=MNISTDecoder())
# Define optimizer and loss function.
optimizer = keras.optimizers.Adam(learning_rate=1e-3)
loss = keras.losses.BinaryCrossentropy(from_logits=False)
# Compile autoencoder.
ae.compile(optimizer=optimizer, loss=loss)
if len(os.listdir(ae_path)) == 0:
# Fit and save autoencoder.
ae.fit(trainset_ae, epochs=EPOCHS)
ae.save(ae_path)
else:
# Load the model.
ae = keras.models.load_model(ae_path)
# -
# ### Test the autoencoder
# +
# Define number of samples to be displayed
NUM_SAMPLES = 5
# Get some random samples from test
np.random.seed(0)
indices = np.random.choice(X_test.shape[0], NUM_SAMPLES)
inputs = [X_test[i].reshape(1, 28, 28, 1) for i in indices]
inputs = np.concatenate(inputs, axis=0)
# Pass samples through the autoencoder
inputs_hat = ae(inputs).numpy()
# +
# Plot inputs and reconstructions.
plt.rcParams.update({'font.size': 22})
fig, ax = plt.subplots(2, NUM_SAMPLES, figsize=(25, 10))
for i in range(NUM_SAMPLES):
ax[0][i].imshow(inputs[i], cmap='gray')
ax[1][i].imshow(inputs_hat[i], cmap='gray')
text1 = ax[0][0].set_ylabel("x")
text2 = ax[1][0].set_ylabel("x_hat")
# -
# ### Counterfactual with Reinforcement Learning
# Define constants
COEFF_SPARSITY = 7.5 # sparisty coefficient
COEFF_CONSISTENCY = 0 # consisteny coefficient -> no consistency
TRAIN_STEPS = 50000 # number of training steps -> consider increasing the number of steps
BATCH_SIZE = 100 # batch size
# #### Define and fit the explainer
# Define explainer.
explainer = CounterfactualRL(predictor=predictor,
encoder=ae.encoder,
decoder=ae.decoder,
latent_dim=LATENT_DIM,
coeff_sparsity=COEFF_SPARSITY,
coeff_consistency=COEFF_CONSISTENCY,
train_steps=TRAIN_STEPS,
batch_size=BATCH_SIZE,
backend="tensorflow")
# Fit the explainer
explainer = explainer.fit(X=X_train)
# #### Test explainer
# Generate counterfactuals for some test instances.
explanation = explainer.explain(X_test[0:200], Y_t=np.array([2]), batch_size=100)
# +
fig, ax = plt.subplots(2, NUM_SAMPLES, figsize=(25, 10))
for i in range(NUM_SAMPLES):
ax[0][i].imshow(explanation.data['orig']['X'][i], cmap='gray')
ax[1][i].imshow(explanation.data['cf']['X'][i], cmap='gray')
ax[0][i].set_xlabel("Label: " + str(explanation.data['orig']['class'][i]))
ax[1][i].set_xlabel("Label: " + str(explanation.data['cf']['class'][i]))
text1 = ax[0][0].set_ylabel("X")
text2 = ax[1][0].set_ylabel("X_hat")
# -
# ### Logging
# Logging is clearly important when dealing with deep learning models. Thus, we provide an interface to write custom callbacks for logging purposes after each training step which we defined [here](../api/alibi.explainers.cfrl_base.rst#alibi.explainers.cfrl_base.Callback). In the following cells we provide some example to log in **Weights and Biases**.
# #### Logging reward callback
class RewardCallback(Callback):
def __call__(self,
step: int,
update: int,
model: CounterfactualRL,
sample: Dict[str, np.ndarray],
losses: Dict[str, float]):
if step % 100 != 0:
return
# Get the counterfactual and target.
X_cf = sample["X_cf"]
Y_t = sample["Y_t"]
# Get prediction label.
Y_m_cf = predictor(X_cf)
# Compute reward
reward = np.mean(model.params["reward_func"](Y_m_cf, Y_t))
wandb.log({"reward": reward})
# #### Logging images callback
class ImagesCallback(Callback):
def __call__(self,
step: int,
update: int,
model: CounterfactualRL,
sample: Dict[str, np.ndarray],
losses: Dict[str, float]):
# Log every 100 steps
if step % 100 != 0:
return
# Defie number of samples to be displayed.
NUM_SAMPLES = 10
X = sample["X"][:NUM_SAMPLES] # input instance
X_cf = sample["X_cf"][:NUM_SAMPLES] # counterfactual
diff = np.abs(X - X_cf) # differences
Y_m = sample["Y_m"][:NUM_SAMPLES].astype(int) # input labels
Y_t = sample["Y_t"][:NUM_SAMPLES].astype(int) # target labels
Y_m_cf = predictor(X_cf).astype(int) # counterfactual labels
# Concatentate images,
X = np.concatenate(X, axis=1)
X_cf = np.concatenate(X_cf, axis=1)
diff = np.concatenate(diff, axis=1)
# Construct full image.
img = np.concatenate([X, X_cf, diff], axis=0)
# Construct caption.
caption = ""
caption += "Input:\t%s\n" % str(list(np.argmax(Y_m, axis=1)))
caption += "Target:\t%s\n" % str(list(np.argmax(Y_t, axis=1)))
caption += "Predicted:\t%s\n" % str(list(np.argmax(Y_m_cf, axis=1)))
# Log image.
wandb.log({"samples": wandb.Image(img, caption=caption)})
# #### Logging losses callback
class LossCallback(Callback):
def __call__(self,
step: int,
update: int,
model: CounterfactualRL,
sample: Dict[str, np.ndarray],
losses: Dict[str, float]):
# Log evary 100 updates.
if (step + update) % 100 == 0:
wandb.log(losses)
# Having defined the callbacks, we can define a new explainer that will include logging.
#
# ```python
# import wandb
#
# # Initialize wandb.
# wandb_project = "MNIST Counterfactual with Reinforcement Learning"
# wandb.init(project=wandb_project)
#
# # Define explainer as before and include callbacks.
# explainer = CounterfactualRL(...,
# callbacks=[RewardCallback(), ImagesCallback()])
#
# # Fit the explainer.
# explainer.fit(X=X_train)
#
# # Close wandb.
# wandb.finish()
# ```
| examples/cfrl_mnist.ipynb |
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .cpp
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: C++14
// language: C++14
// name: xeus-cling-cpp14
// ---
// + nbgrader={"grade": false, "grade_id": "cell-9783944a2d477680", "locked": true, "schema_version": 1, "solution": false}
#include <iostream>
// + nbgrader={"grade": false, "grade_id": "cell-784db4648f7e4753", "locked": true, "schema_version": 1, "solution": false}
using namespace std;
// + nbgrader={"grade": false, "grade_id": "cell-537b715dc2dffb7e", "locked": true, "schema_version": 1, "solution": false}
cout << "Hello World" << endl;
// + nbgrader={"grade": true, "grade_id": "cell-91d68551d51b9f38", "locked": false, "points": 5, "schema_version": 1, "solution": true}
// BEGIN SOLUTION HERE
cout << "Hello World" << endl;
// END SOLUTION HERE
// -
| source/Test-1/Hello World.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:env_multilingual_class]
# language: python
# name: conda-env-env_multilingual_class-py
# ---
# + Collapsed="false"
from google.cloud import translate
import os
# + Collapsed="false"
def batch_translate_text(
input_uri="gs://"+os.environ['BUCKET_NAME']+"/en_small.txt",
output_uri="gs://"+os.environ['BUCKET_NAME_TRANSLATION'],
project_id=os.environ['PROJECT_ID']
):
"""Translates a batch of texts on GCS and stores the result in a GCS location."""
client = translate.TranslationServiceClient()
location = "us-central1"
# Supported file types: https://cloud.google.com/translate/docs/supported-formats
gcs_source = {"input_uri": input_uri}
input_configs_element = {
"gcs_source": gcs_source,
"mime_type": "text/plain" # Can be "text/plain" or "text/html".
}
gcs_destination = {"output_uri_prefix": output_uri}
output_config = {"gcs_destination": gcs_destination}
parent = client.location_path(project_id, location)
# Supported language codes: https://cloud.google.com/translate/docs/language
operation = client.batch_translate_text(
parent=parent,
source_language_code="en",
target_language_codes=["fr","de"], # Up to 10 language codes here.
input_configs=[input_configs_element],
output_config=output_config)
print(u"Waiting for operation to complete...")
response = operation.result(180)
print(u"Total Characters: {}".format(response.total_characters))
print(u"Translated Characters: {}".format(response.translated_characters))
# + Collapsed="false"
batch_translate_text()
| notebook/00-Test/translation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Create a NumPy ndarray Object
# + active=""
# NumPy is used to work with arrays. The array object in NumPy is called ndarray.
#
# We can create a NumPy ndarray object by using the array() function.
# +
import numpy as np
arr = np.array([1, 2, 3, 4, 5])
print(arr)
print(type(arr))
# + active=""
# To create an ndarray, we can pass a list, tuple or any array-like object into the array() method, and it will be converted into an ndarray:
# -
# Use a tuple to create a NumPy array:
# +
import numpy as np
arr = np.array((1, 2, 3, 4, 5))
print(arr)
# -
# # Dimensions in Arrays
# + active=""
# A dimension in arrays is one level of array depth.
#
# i.e (nested arrays).
# + active=""
# nested array: are arrays that have arrays as their elements.
# -
# # 0-D Arrays
# + active=""
# 0-D arrays, or Scalars, are the elements in an array. Each value in an array is a 0-D array.
# -
# Create a 0-D array with value 42
# +
import numpy as np
arr = np.array(42)
print(arr)
# -
# # 1-D Arrays
# + active=""
# An array that has 0-D arrays as its elements is called uni-dimensional or 1-D array.
#
# These are the most common and basic arrays.
# -
# Create a 1-D array containing the values 1,2,3,4,5:
# +
import numpy as np
arr = np.array([1, 2, 3, 4, 5])
print(arr)
# -
# # 2-D Arrays
# + active=""
# An array that has 1-D arrays as its elements is called a 2-D array.
# + active=""
# These are often used to represent matrix or 2nd order tensors.
# -
# Create a 2-D array containing two arrays with the values 1,2,3 and 4,5,6:
# +
import numpy as np
arr = np.array([[1, 2, 3], [4, 5, 6]])
print(arr)
# -
# # 3-D arrays
# + active=""
# An array that has 2-D arrays (matrices) as its elements is called 3-D array.
# + active=""
# These are often used to represent a 3rd order tensor.
# -
# Create a 3-D array with two 2-D arrays, both containing two arrays with the values 1,2,3 and 4,5,6:
# +
import numpy as np
arr = np.array([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]])
print(arr)
# + active=""
# NOTE:NumPy has a whole sub module dedicated towards matrix operations called numpy.mat
# -
# # Check Number of Dimensions?
# + active=""
# By ndim attribute
# -
# Check how many dimensions the arrays have:
# +
import numpy as np
a = np.array(42)
b = np.array([1, 2, 3, 4, 5])
c = np.array([[1, 2, 3], [4, 5, 6]])
d = np.array([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]])
print(a.ndim)
print(b.ndim)
print(c.ndim)
print(d.ndim)
# -
# # Higher Dimensional Arrays
# + active=""
# An array can have any number of dimensions.
# + active=""
# When the array is created, you can define the number of dimensions by using the ndmin argument.
# -
# Create an array with 5 dimensions and verify that it has 5 dimensions:
# +
import numpy as np
arr = np.array([1, 2, 3, 4], ndmin=5)
print(arr)
print('number of dimensions :', arr.ndim)
| 2. NumPy Creating Arrays.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [Root]
# language: python
# name: Python [Root]
# ---
# 
#
# ---
# # Cookbook 6: Basic MNIST in TensorFlow
#
# **Author list:** <NAME>
#
# **References / Sources:**
# * https://www.tensorflow.org/versions/r0.9/tutorials/mnist/beginners/index.html
#
# **License Agreement:** Feel free to do whatever you want with this code
#
# ___
# *In this notebook we will train a neural network to recognize handwritten digits*
# ## Welcome!
#
# In this tutorial, you will be build and train a basic digit recognition network in TensorFlow. This doesn't quite count as "deep learning" yet, as we will not have any hidden layers in between the input and output layers. However, this model can be easily generalized to handle more complex neural network architectures.
# First, we need to import the data we are going to work with. TensorFlow has some sample data ready for us.
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
# This sets "mnist" to an object that holds our data. We will see how to access this data later. Each training set holds an input and an output vector. The input vector is a flattened 28 x 28 image. The output vector is a "one_hot" vector (hence the one_hot setting above) that has ten entries, each one representing a digit. A one_hot vector is a vector common in classification that has a 1 in the correct entry, and 0s everywhere else. In our case, the one_hot vector representing 4 would be [0,0,0,1,0,0,0,0,0,0].
#
# Now, let's import the actual tensorflow module.
import tensorflow as tf
# Next, we are going to initialize a variable for to represent the input. Each row is of length 28 * 28 = 728, the total number of pixels. Since we don't know how many rows we are going to have, we can leave the first dimension with size None. Note that this is mathematically fine since "x" is the leftmost matrix in any multiplication, thus leaving the number of rows invariant.
x = tf.placeholder(tf.float32, [None, 784])
# TensorFlow works by first setting up the network architecture, then feeding in training data for training later. This "x" has no value, it simply represents where the inputs will go later. The "tf.float32" tells TensorFlow the promised type of the later inputs.
#
# Now, let's create the weight and bias variables. The weight matrix will multiply "x" on the right and output a vector with ten entries, so it has dimensions 784 x 10. We want to add the bias vector to every row of this output. Fortunately, TensorFlow does this automatically for us if we simply use the additive notation, as we'll see later, so we can just give it size 1 x 10.
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
# A "variable" type in TensorFlow is something that TensorFlow will adjust the value of during training. We initialize all the values initially to zero with "tf.zeros."
#
# Finally, we perform the multiplication, addition, and apply the softmax function to the output vector, and store the result in "y." You can look up the exact definition of the softmax function, but it is essentially a normalized logistic sigmoid function, and will "squash" the entries of the vector to add up to 1. This allows us to look at the output vector as a probability distribution over the possible digits 0-9.
y = tf.nn.softmax(tf.matmul(x, W) + b)
# Note here that "xW" will give us a matrix of dimensions None x 10. Adding a vector of dimension 1 x 10 to this should make no sense, but TensorFlow automatically expands our "b" to be of size None x 10, thus adding "b" to every row.
#
# Now, we'll create a placeholder object for the target output (which will hold the actual value when we are training). It gets dimensions None x 10, to match "y."
y_ = tf.placeholder(tf.float32, [None, 10])
# Now, we will define our loss function. We will be using cross entropy. This may be slightly different from cross entropy function you are familiar with. We first take the log of each element in our predicted vectors, then do a term by term multiplication of each entry with our target vectors. Then, we add up the entries of each vector using "reduce_mean." The "reduction_indices=[1]" tells TensorFlow to reduce "across" the vector instead "down" it. Then, we will negate it, since all the entries are between 0 and 1 and log will have produced very large negative numbers for entries farther from 1. Finally, we will average over the errors calculated for each vector to get our loss value.
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
# You may notice that since "y_" will be a one hot vector, we will only be accounting for the error of one term, as everything else gets zeroed out. That means that we only care about minimizing the error of one entry: the entry in the correct digit position. At first, this may seem strange, but remember that we are using the softmax function, which normalizes all of our inputs to add up to 1. Making the correct value closer to 1 will, by extension, push the incorrect values closer to 0.
#
# Now we define a training step. Each call to the training step should do one step of optimization. We will simply be doing gradient descent.
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
# 0.5 is our learning rate, and we place the loss function we want to minimize in the minimize() function.
#
# Next, let's establish a way for us to more practically measure how well our model does. Obviously, we want our model to be correct more often than not, so let's look at the percentage of predictions we are getting right.
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# "tf.argmax" returns the position of the entry in a vector with the maximum value. Thus, argmax on [0.1, 0.6, 0.3] would return 1, as that is the index of 0.6. Calling argmax on our predicted vector basically gives us the entry with the "highest likelihood" of being the correct digit, if we view the vector as a probability distribution over the digits. "tf.equal" returns True if the entries are equal and False otherwise. Hence, "correct_prediction" gives us a list of booleans. To see what percentage we got correct, we simply cast the bools to floats (True = 1, False = 0), and average over the resulting vector.
#
# Finally, we are ready to start training our network. We need to initialize all of the variables and start the session. This is mostly just TensorFlow convention.
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
# Now, we run our "train_step" 1000 times on some data. We get this data by calling mnist.train.next_batch(100), which gives us 100 randomly selected data points, split respectively into our x values and our y values.
for i in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
# Note how we run train_step. We don't just call train_step(), for instance. We need to feed data into the session. This is done through the feed dictionary, which will give the associated placholders the value you give them in the dictionary. Running a function in TensorFlow typically takes the form sess.run(function, feed_dict{placeholderName: actual_data}).
#
# Lastly, we print our final accuracy. Note again the way we call the accuracy function.
print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
| 06b-tools-tensorflow/spring2017/basic_mnist.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # [Plot] Subplots
# * 將塞太多組別的圖,拆成多張檢視
# # [教學目標]
# - 以下程式碼將示範如何將多張圖形, 使用 Subplot 與其參數排定顯示相對位置
# # [範例重點]
# - 傳統的 subplot 三碼 (row,column,idx) 繪製法 (In[6], Out[6])
# - subplot index 超過 10 以上的繪圖法 (In[7], Out[7])
# +
# 載入需要的套件
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns # 另一個繪圖-樣式套件
# 忽略警告訊息
# %matplotlib inline
plt.style.use('ggplot')
import warnings
warnings.filterwarnings('ignore')
# 設定 data_path
dir_data = '../data/'
# -
# 讀取檔案
f_app = os.path.join(dir_data, 'application_train.csv')
print('Path of read in data: %s' % (f_app))
app_train = pd.read_csv(f_app)
app_train.head()
# 資料整理 ( 'DAYS_BIRTH'全部取絕對值 )
app_train['DAYS_BIRTH'] = abs(app_train['DAYS_BIRTH'])
# +
# 根據年齡分成不同組別 (年齡區間 - 還款與否)
age_data = app_train[['TARGET', 'DAYS_BIRTH']] # subset
age_data['YEARS_BIRTH'] = age_data['DAYS_BIRTH'] / 365 # day-age to year-age
# 連續資料離散化
age_data['YEARS_BINNED'] = pd.cut(age_data['YEARS_BIRTH'],
bins = np.linspace(20, 70, num = 11)) #自 20 到 70 歲,切 11 個點 (得到 10 組)
print(age_data['YEARS_BINNED'].value_counts())
age_data.head()
# +
# 資料分群後排序
year_group_sorted = np.sort(age_data['YEARS_BINNED'].unique())
age_data.head()
# 繪製分群後的 10 條 KDE 曲線
plt.figure(figsize=(8,6))
for i in range(len(year_group_sorted)):
sns.distplot(age_data.loc[(age_data['YEARS_BINNED'] == year_group_sorted[i]) & \
(age_data['TARGET'] == 0), 'YEARS_BIRTH'], label = str(year_group_sorted[i]))
sns.distplot(age_data.loc[(age_data['YEARS_BINNED'] == year_group_sorted[i]) & \
(age_data['TARGET'] == 1), 'YEARS_BIRTH'], label = str(year_group_sorted[i]))
plt.title('KDE with Age groups')
plt.show()
# -
# ## Subplot
# plt.subplot(row,column,idx)
# +
# 每張圖大小為 8x8
plt.figure(figsize=(8,8))
# plt.subplot 三碼如上所述, 分別表示 row總數, column總數, 本圖示第幾幅(idx)
plt.subplot(321)
plt.plot([0,1],[0,1], label = 'I am subplot1')
plt.legend()
plt.subplot(322)
plt.plot([0,1],[1,0], label = 'I am subplot2')
plt.legend()
plt.subplot(323)
plt.plot([1,0],[0,1], label = 'I am subplot3')
plt.legend()
plt.subplot(324)
plt.plot([1,0],[1,0], label = 'I am subplot4')
plt.legend()
plt.subplot(325)
plt.plot([0,1],[0.5,0.5], label = 'I am subplot5')
plt.legend()
plt.subplot(326)
plt.plot([0.5,0.5],[0,1], label = 'I am subplot6')
plt.legend()
plt.show()
# +
# subplot index 超過10以上的繪製方式
nrows = 5
ncols = 2
plt.figure(figsize=(10,30))
for i in range(len(year_group_sorted)):
plt.subplot(nrows, ncols, i+1)
sns.distplot(age_data.loc[(age_data['YEARS_BINNED'] == year_group_sorted[i]) & \
(age_data['TARGET'] == 0), 'YEARS_BIRTH'],
label = "TARGET = 0", hist = False)
sns.distplot(age_data.loc[(age_data['YEARS_BINNED'] == year_group_sorted[i]) & \
(age_data['TARGET'] == 1), 'YEARS_BIRTH'],
label = "TARGET = 1", hist = False)
plt.title(str(year_group_sorted[i]))
plt.show()
# -
# ## 作業
# ### 請使用 application_train.csv, 根據不同的 HOUSETYPE_MODE 對 AMT_CREDIT 繪製 Histogram
| D19_Subplots/Day_019_EDA_subplots.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# ## Data Augmentation Part 1: Downsampling
# Downsampling of majority classes to balance classes in dataset. Downsample "normal" and "opacity".
# ### Resolved Issues
# - randomly delete images with only opacity to downsample to 500 and drop corresponding row
# - np.random selected same file 23 times
# - replace=False did not work
# - rand_im = [np.random.choice(images, replace=False) for i in range(171)]
# - used random_sample instead
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import cv2
import os
import random
plt.rcParams["axes.grid"] = False
plt.style.use('dark_background')
import warnings
warnings.filterwarnings('ignore')
# -
# Reading csv file
df = pd.read_csv('train_new.csv')
df.head()
# check the file
df.shape
# check the values
df.labels.value_counts()
# Function for downsampling hte instances
def downsample(cat, instances):
images = df.loc[(df.labels == cat), 'filename'].values.tolist()
rand_im = random.sample(images, instances)
print(len(rand_im))
for image in rand_im:
os.remove(r'D:\data\Projects\notebooks\RetinaAI\Preprocessing\train_downsampled\\' + image)
return(rand_im)
deleted_normal = downsample('normal', 25)
deleted_opacity = downsample('opacity', 171)
# Create a new datframe that only contains images not deleted
df_new = df[~df.filename.isin(deleted_normal)]
df_new = df_new[~df.filename.isin(deleted_opacity)]
# check shape
df_new.shape
# check number of instances
df_new.labels.value_counts()
# +
#df_new.to_csv('downsampled.csv', sep=',', encoding='utf-8', index=False)
| 01_Exploration and Preprocessing/3. Downsampling of the majority classes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Import Modules
import warnings
warnings.filterwarnings('ignore')
# +
from src import detect_faces, show_bboxes
from PIL import Image
import torch
from torchvision import transforms, datasets
import numpy as np
import os
# -
# # Path Definitions
# +
dataset_path = '../Dataset/emotiw/'
processed_dataset_path = '../Dataset/FaceCoordinates/'
# -
# # Load Test Dataset
test = sorted(os.listdir(dataset_path + 'test_shared/test/'))
test_filelist = [x.split('.')[0] for x in test]
print(test_filelist[:10])
print(len(test_filelist))
# # Extract Faces from Image using MTCNN
for i in range(len(test_filelist)):
print(test_filelist[i])
img_name = os.path.join(dataset_path, 'test_shared/test/', test_filelist[i]+ '.jpg')
image = Image.open(img_name)
try:
if os.path.isfile(processed_dataset_path + 'test/' + test_filelist[i] + '.npz'):
print(test_filelist[i] + ' Already present')
continue
bounding_boxes, landmarks = detect_faces(image)
bounding_boxes = np.asarray(bounding_boxes)
if bounding_boxes.size == 0:
print('MTCNN model handling empty face condition at ' + test_filelist[i])
np.savez(processed_dataset_path + 'test/' + test_filelist[i] , a=bounding_boxes, b=landmarks)
except ValueError:
print('No faces detected for ' + test_filelist[i] + ". Also MTCNN failed.")
np.savez(processed_dataset_path + 'test/' + test_filelist[i] , a=np.zeros(1), b=np.zeros(1))
| MTCNN/.ipynb_checkpoints/Face_Extractor_BB_Landmarks_Test-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/anitashar/DS-Unit-2-Linear-Models/blob/master/anita_sharma_Copy_of_LS_DS_213_assignment.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="oj0P0YMjRp0Y" colab_type="text"
# Lambda School Data Science
#
# *Unit 2, Sprint 1, Module 3*
#
# ---
# + [markdown] colab_type="text" id="7IXUfiQ2UKj6"
# # Ridge Regression
#
# ## Assignment
#
# We're going back to our other **New York City** real estate dataset. Instead of predicting apartment rents, you'll predict property sales prices.
#
# But not just for condos in Tribeca...
#
# - [ ] Use a subset of the data where `BUILDING_CLASS_CATEGORY` == `'01 ONE FAMILY DWELLINGS'` and the sale price was more than 100 thousand and less than 2 million.
# - [ ] Do train/test split. Use data from January — March 2019 to train. Use data from April 2019 to test.
# - [ ] Do one-hot encoding of categorical features.
# - [ ] Do feature selection with `SelectKBest`.
# - [ ] Fit a ridge regression model with multiple features. Use the `normalize=True` parameter (or do [feature scaling](https://scikit-learn.org/stable/modules/preprocessing.html) beforehand — use the scaler's `fit_transform` method with the train set, and the scaler's `transform` method with the test set)
# - [ ] Get mean absolute error for the test set.
# - [ ] As always, commit your notebook to your fork of the GitHub repo.
#
# The [NYC Department of Finance](https://www1.nyc.gov/site/finance/taxes/property-rolling-sales-data.page) has a glossary of property sales terms and NYC Building Class Code Descriptions. The data comes from the [NYC OpenData](https://data.cityofnewyork.us/browse?q=NYC%20calendar%20sales) portal.
#
#
# ## Stretch Goals
#
# Don't worry, you aren't expected to do all these stretch goals! These are just ideas to consider and choose from.
#
# - [ ] Add your own stretch goal(s) !
# - [ ] Instead of `Ridge`, try `LinearRegression`. Depending on how many features you select, your errors will probably blow up! 💥
# - [ ] Instead of `Ridge`, try [`RidgeCV`](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.RidgeCV.html).
# - [ ] Learn more about feature selection:
# - ["Permutation importance"](https://www.kaggle.com/dansbecker/permutation-importance)
# - [scikit-learn's User Guide for Feature Selection](https://scikit-learn.org/stable/modules/feature_selection.html)
# - [mlxtend](http://rasbt.github.io/mlxtend/) library
# - scikit-learn-contrib libraries: [boruta_py](https://github.com/scikit-learn-contrib/boruta_py) & [stability-selection](https://github.com/scikit-learn-contrib/stability-selection)
# - [_Feature Engineering and Selection_](http://www.feat.engineering/) by Kuhn & Johnson.
# - [ ] Try [statsmodels](https://www.statsmodels.org/stable/index.html) if you’re interested in more inferential statistical approach to linear regression and feature selection, looking at p values and 95% confidence intervals for the coefficients.
# - [ ] Read [_An Introduction to Statistical Learning_](http://faculty.marshall.usc.edu/gareth-james/ISL/ISLR%20Seventh%20Printing.pdf), Chapters 1-3, for more math & theory, but in an accessible, readable way.
# - [ ] Try [scikit-learn pipelines](https://scikit-learn.org/stable/modules/compose.html).
# + colab_type="code" id="o9eSnDYhUGD7" colab={}
# %%capture
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Applied-Modeling/master/data/'
# !pip install category_encoders==2.*
# If you're working locally:
else:
DATA_PATH = '../data/'
# Ignore this Numpy warning when using Plotly Express:
# FutureWarning: Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead.
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning, module='numpy')
# + colab_type="code" id="QJBD4ruICm1m" colab={}
import pandas as pd
import pandas_profiling
# Read New York City property sales data
df = pd.read_csv(DATA_PATH+'condos/NYC_Citywide_Rolling_Calendar_Sales.csv')
# Change column names: replace spaces with underscores
df.columns = [col.replace(' ', '_') for col in df]
# SALE_PRICE was read as strings.
# Remove symbols, convert to integer
df['SALE_PRICE'] = (
df['SALE_PRICE']
.str.replace('$','')
.str.replace('-','')
.str.replace(',','')
.astype(int)
)
# + id="0B9N1tWpRp0m" colab_type="code" colab={}
# BOROUGH is a numeric column, but arguably should be a categorical feature,
# so convert it from a number to a string
df['BOROUGH'] = df['BOROUGH'].astype(str)
# + id="WfOKCxNlRp0p" colab_type="code" colab={}
# Reduce cardinality for NEIGHBORHOOD feature
# Get a list of the top 10 neighborhoods
top10 = df['NEIGHBORHOOD'].value_counts()[:10].index
# At locations where the neighborhood is NOT in the top 10,
# replace the neighborhood with 'OTHER'
df.loc[~df['NEIGHBORHOOD'].isin(top10), 'NEIGHBORHOOD'] = 'OTHER'
# + id="3c0bdmNcRp0u" colab_type="code" outputId="17aa844b-5578-41a9-a42e-15b77b594475" colab={"base_uri": "https://localhost:8080/", "height": 462}
print(df.shape)
df.head()
# + id="WetPk6VIdfq1" colab_type="code" outputId="0e31a614-6e26-4896-85f0-09e4f236a7f3" colab={"base_uri": "https://localhost:8080/", "height": 221}
# Just to check zero values for sale price
df['SALE_PRICE'].value_counts()
# + id="sy3x0cpUdf3P" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 391} outputId="9ebea582-f81a-4eb3-9035-ed56f7d8c3e0"
df.dtypes
# + [markdown] id="axcL_xaqePjZ" colab_type="text"
# ## **Use a subset of the data where BUILDING_CLASS_CATEGORY == '01 ONE FAMILY DWELLINGS' and the sale price was more than 100 thousand and less than 2 million.**
# + id="8E8vVWqUdf--" colab_type="code" colab={}
df_subset = df[(df['BUILDING_CLASS_CATEGORY'] == '01 ONE FAMILY DWELLINGS')
& (df['SALE_PRICE'] > 100000)
& (df['SALE_PRICE'] < 2000000)]
df_subset = df_subset.drop(['EASE-MENT', 'APARTMENT_NUMBER'], axis = 1)
# + id="6I5PjLY6dgDm" colab_type="code" outputId="166d036e-d03b-4178-fcf8-b8fe45898160" colab={"base_uri": "https://localhost:8080/", "height": 326}
# check the shape & have a look at the data
print(df_subset.shape)
df_subset.head()
# + id="unBHFNCvVD1f" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 221} outputId="d1926be7-a821-431d-8c56-98cb2c2142b0"
# check for zero again
df_subset['SALE_PRICE'].value_counts(ascending = True)
# + id="wkxSxPEJVkAI" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 357} outputId="6cff6946-04f3-4c8a-dec3-02bb45cf6d52"
df_subset.isnull().sum()
# + [markdown] id="g449Y5X-mIFC" colab_type="text"
# ## **Do train/test split. Use data from January — March 2019 to train. Use data from April 2019 to test.**
# + id="lSqrLE1KdgU0" colab_type="code" outputId="979f66a7-7952-4b1c-fbdf-d5974f38f90f" colab={"base_uri": "https://localhost:8080/", "height": 436}
# check the data for date column
print(df_subset['SALE_DATE'].dtypes)
df_subset[['SALE_DATE']]
# + id="rcL_Nx1Jdgbk" colab_type="code" outputId="aa300bc7-cf53-482f-d679-2a12f286448f" colab={"base_uri": "https://localhost:8080/", "height": 34}
# change the date column string to datetype
df_subset['SALE_DATE']=pd.to_datetime(df_subset['SALE_DATE'])
df_subset['SALE_DATE'].dtypes
# + id="lEvAG6r4rQc7" colab_type="code" outputId="828f89f0-418b-4e70-8735-d9f9c7ad8a72" colab={"base_uri": "https://localhost:8080/", "height": 204}
# check sale_date data
df_subset[['SALE_DATE']].head()
# + id="tXj3LYElu81s" colab_type="code" outputId="d2d5975e-4c6a-4073-dfa8-b23eb41f2b1f" colab={"base_uri": "https://localhost:8080/", "height": 34}
# check the years in date column
pd.unique(df_subset['SALE_DATE'].dt.year)
# + id="7eBgco4lvj50" colab_type="code" outputId="92590e4f-ae28-48b0-aafb-1b7826b8a315" colab={"base_uri": "https://localhost:8080/", "height": 34}
#We only have one year 2019, so will only extract the month.
# df_subset['MONTH'] = df_subset['SALE_DATE'].dt.month
# df_subset[['SALE_DATE','MONTH']].sample(5)
pd.unique(df_subset['SALE_DATE'].dt.month)
# + id="ksNUxe_xvkB6" colab_type="code" colab={}
# + id="10a6rI--vkIh" colab_type="code" colab={}
# + [markdown] id="cp_j4vk76dbA" colab_type="text"
#
# + id="YjZAuhk5vj_d" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="e1fd9fdd-46a7-4419-b42e-98f899cd6de6"
# create train & test data for df_subset
##Use the month from January to March to get train data
#Use the month from April to get test data
cutoff = pd.to_datetime('2019-04-01')
train = df_subset[df_subset.SALE_DATE < cutoff]
test = df_subset[df_subset.SALE_DATE >= cutoff]
print('Train Dataset:',train.shape)
print('Test Dataset:',test.shape)
# + id="CW8fZ6fkdgij" colab_type="code" outputId="cf1407d1-07c4-496d-ad03-07599780646b" colab={"base_uri": "https://localhost:8080/", "height": 34}
#what are the shapes
train.shape, test.shape
# + id="LXr-zW3Jdgur" colab_type="code" colab={}
# + [markdown] id="1OBfflPm6xHo" colab_type="text"
# ## **Do one-hot encoding of categorical features.**
# + id="8RrVnFUEdgr2" colab_type="code" outputId="5abfea26-143c-4277-c7fc-3b735545d453" colab={"base_uri": "https://localhost:8080/", "height": 359}
# Do one-hot encoding of categorical features.
#numeric
train.select_dtypes(include = 'number').describe().T
# + id="_hHl-rz5dgpe" colab_type="code" outputId="87894e0e-28c8-442c-b084-e9f2e6bb1398" colab={"base_uri": "https://localhost:8080/", "height": 328}
#non-numeric
train.select_dtypes(exclude = 'number').describe().T.sort_values(by= 'unique')
# + id="Hr-rM-49fJqH" colab_type="code" colab={}
# + id="8UAht2vqdgn7" colab_type="code" colab={}
# # TODO---some columns have high cardinality. Lets excude them from our features for now.
target = 'SALE_PRICE'
high_cardinality = ['SALE_DATE','LAND_SQUARE_FEET','ADDRESS']
no_variance = ['TAX_CLASS_AT_TIME_OF_SALE','BUILDING_CLASS_CATEGORY']
features = train.columns.drop([target] + high_cardinality+no_variance)
X_train = train[features]
y_train = train[target]
X_test = test[features]
y_test = test[target]
# + id="eMVtPIvimdSw" colab_type="code" colab={}
# + id="Is6zNWkpdgg0" colab_type="code" outputId="75197a3c-75fb-49e1-a9c9-1e49bef04cd0" colab={"base_uri": "https://localhost:8080/", "height": 439}
#what does the X-train look like before encoding
X_train
# + id="V0qQw01Vm78Q" colab_type="code" colab={}
# + id="IbqA9ge7dgaH" colab_type="code" colab={}
# TODO--enconding
import category_encoders as ce
encoder = ce.OneHotEncoder(use_cat_names =True)
X_train = encoder.fit_transform(X_train)
X_test = encoder.transform(X_test)
# + id="WWI7XiOtdgTS" colab_type="code" outputId="b1755874-0492-4b3f-b136-7ab425feca31" colab={"base_uri": "https://localhost:8080/", "height": 456}
# after encoding how does it look like
X_train
# + id="kb6jpv63dgKl" colab_type="code" outputId="fb73897a-17a2-466a-aa11-74daec9e8995" colab={"base_uri": "https://localhost:8080/", "height": 456}
# X_test after encoding
X_test
# + id="DDOuLpgudgI_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 918} outputId="75902b4e-6671-4a9f-dd0b-8d009d96dbb8"
X_train.std(axis=0)
# + [markdown] id="_DEOe5Fa7nDg" colab_type="text"
# ## **Do feature selection with SelectKBest.**
# + id="vQiKTExzdf8U" colab_type="code" outputId="da4ce4ee-dd96-4aa6-aa5f-00b383ebccf3" colab={"base_uri": "https://localhost:8080/", "height": 34}
# How many features do we have currently?
features = X_train.columns
n = len(features)
n
# + id="Me60i6lZdf0g" colab_type="code" outputId="bde50cff-0a3f-46de-c3a8-ca5f4a7566a6" colab={"base_uri": "https://localhost:8080/", "height": 34}
# How many ways to choose 1 to n features?
from math import factorial
def n_choose_k(n,k):
return factorial(n)/(factorial(k)*factorial(n-k))
combinations = sum(n_choose_k(n,k) for k in range(1,n+1))
print(f'{combinations:,.0f}')
# + id="CHFanzyNdfyq" colab_type="code" outputId="b5f2aaba-7886-430d-9843-540310457391" colab={"base_uri": "https://localhost:8080/", "height": 1000}
#How many features should be selected?
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error
from sklearn.feature_selection import f_regression, SelectKBest
for k in range (1,len(X_train.columns)+1):
print(f'{k} features')
selector = SelectKBest(score_func = f_regression, k=k)
X_train_selected = selector.fit_transform(X_train,y_train)
X_test_selected = selector.transform(X_test)
model = LinearRegression()
model.fit(X_train_selected,y_train)
y_pred = model.predict(X_test_selected)
mae = mean_absolute_error(y_test,y_pred)
print(f'Test MAE : ${mae:,.0f} \n')
# + id="mBP283RddfxN" colab_type="code" colab={}
# + [markdown] id="MOcFDgNMuCai" colab_type="text"
# ## **Fit a ridge regression model with multiple features. Use the normalize=True parameter (or do feature scaling beforehand — use the scaler's fit_transform method with the train set, and the scaler's transform method with the test set)**
# + id="ILW52NeCuF_t" colab_type="code" colab={}
# %matplotlib inline
from IPython.display import display,HTML
from ipywidgets import interact
import matplotlib.pyplot as plt
from sklearn.linear_model import Ridge
from sklearn.preprocessing import StandardScaler
# + id="QspRH73PuGFZ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="816688d0-cdf2-498e-9848-b41c8a20abf9"
# Try a range of alpha parameters for Ridge Regression.
for alpha in [10**1,10**2,10**3,10**4,10**5,10**6]:
# Scale data before doing Ridge Regression
scalar = StandardScaler()
X_train_scaled = scalar.fit_transform(X_train)
X_test_scaled = scalar.transform(X_test)
# Fit Ridge Regression model
display(HTML(f'Ridge Regression,with alpha={alpha}'))
model = Ridge(alpha=alpha)
model.fit(X_train_scaled, y_train)
y_pred = model.predict(X_test_scaled)
# Get test MAE
mae = mean_absolute_error(y_test,y_pred)
display(HTML(f'Test Mean Absolute Error: ${mae:,.0f}'))
# Plot coefficients
coefficients = pd.Series(model.coef_, X_train.columns)
plt.figure(figsize=(16,8))
coefficients.sort_values().plot.barh(color='grey')
plt.xlim(-400,700)
plt.show()
# + id="IQtz3403uGT3" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="8b3b10f2-2c97-47e0-92de-458b93c74686"
# + id="0BrL6NBHuGb2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 483} outputId="623ae2f1-4805-4a99-90f1-f4834036e295"
# + id="yQ-GxSRLuGkd" colab_type="code" colab={}
# + id="B6ZkXuv5uGsC" colab_type="code" colab={}
# + id="UcxwxCLHuG1A" colab_type="code" colab={}
# + id="CpZV9svQuG9A" colab_type="code" colab={}
# + id="6rNqdnzjuHJD" colab_type="code" colab={}
# + id="M1Ggn3aLuHF0" colab_type="code" colab={}
# + id="HAsKOS-AuHDM" colab_type="code" colab={}
# + id="JMDvb6HquG61" colab_type="code" colab={}
# + id="dcrEgw2kuGyb" colab_type="code" colab={}
# + id="by8qXsq9uGoh" colab_type="code" colab={}
# + id="WJODWdjxuGhU" colab_type="code" colab={}
# + id="5oGlCKCRuGYb" colab_type="code" colab={}
# + id="-mLvFFmcuGQJ" colab_type="code" colab={}
# + id="BL61JujiuGM4" colab_type="code" colab={}
# + id="TdLownsDuGK9" colab_type="code" colab={}
| anita_sharma_Copy_of_LS_DS_213_assignment.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="Okfr_uhwhS1X" colab_type="text"
# # Lambda School Data Science - Making Data-backed Assertions
#
# This is, for many, the main point of data science - to create and support reasoned arguments based on evidence. It's not a topic to master in a day, but it is worth some focused time thinking about and structuring your approach to it.
# + [markdown] id="9dtJETFRhnOG" colab_type="text"
# ## Lecture - generating a confounding variable
#
# The prewatch material told a story about a hypothetical health condition where both the drug usage and overall health outcome were related to gender - thus making gender a confounding variable, obfuscating the possible relationship between the drug and the outcome.
#
# Let's use Python to generate data that actually behaves in this fashion!
# + id="WiBkgmPJhmhE" colab_type="code" outputId="4e8ced64-58d9-46f4-a13f-c5483ff542d1" colab={"base_uri": "https://localhost:8080/", "height": 1102}
import random
dir(random) # Reminding ourselves what we can do here
# + id="Ks5qFtpnq-q5" colab_type="code" outputId="4d8e1064-bf60-4a45-81c1-d6dcd3ecfe0a" colab={"base_uri": "https://localhost:8080/", "height": 35}
# Let's think of another scenario:
# We work for a company that sells accessories for mobile phones.
# They have an ecommerce site, and we are supposed to analyze logs
# to determine what sort of usage is related to purchases, and thus guide
# website development to encourage higher conversion.
# The hypothesis - users who spend longer on the site tend
# to spend more. Seems reasonable, no?
# But there's a confounding variable! If they're on a phone, they:
# a) Spend less time on the site, but
# b) Are more likely to be interested in the actual products!
# Let's use namedtuple to represent our data
from collections import namedtuple
# purchased and mobile are bools, time_on_site in seconds
User = namedtuple('User', ['purchased','time_on_site', 'mobile'])
example_user = User(False, 12, False)
print(example_user)
# + id="lfPiHNG_sefL" colab_type="code" outputId="903da34a-fa19-4d4a-8cc7-45dd14f022cf" colab={"base_uri": "https://localhost:8080/", "height": 55}
# And now let's generate 1000 example users
# 750 mobile, 250 not (i.e. desktop)
# A desktop user has a base conversion likelihood of 10%
# And it goes up by 1% for each 15 seconds they spend on the site
# And they spend anywhere from 10 seconds to 10 minutes on the site (uniform)
# Mobile users spend on average half as much time on the site as desktop
# But have three times as much base likelihood of buying something
users = []
for _ in range(250):
# Desktop users
time_on_site = random.uniform(10, 600)
purchased = random.random() < 0.1 + (time_on_site / 1500)
users.append(User(purchased, time_on_site, False))
for _ in range(750):
# Mobile users
time_on_site = random.uniform(5, 300)
purchased = random.random() < 0.3 + (time_on_site / 1500)
users.append(User(purchased, time_on_site, True))
random.shuffle(users)
print(users[:10])
# + id="9gDYb5qGuRzy" colab_type="code" outputId="bc3cb99f-c2d2-4fca-c5a0-0efb421313fb" colab={"base_uri": "https://localhost:8080/", "height": 206}
# Let's put this in a dataframe so we can look at it more easily
import pandas as pd
user_data = pd.DataFrame(users)
user_data.head()
# + id="sr6IJv77ulVl" colab_type="code" outputId="07eace5e-1308-4f04-d3a9-6093a31f24ed" colab={"base_uri": "https://localhost:8080/", "height": 193}
# Let's use crosstabulation to try to see what's going on
pd.crosstab(user_data['purchased'], user_data['time_on_site'])
# + id="hvAv6J3EwA9s" colab_type="code" outputId="7b45d8c5-2fda-4df6-fecc-a14129ed76fb" colab={"base_uri": "https://localhost:8080/", "height": 161}
# OK, that's not quite what we want
# Time is continuous! We need to put it in discrete buckets
# Pandas calls these bins, and pandas.cut helps make them
time_bins = pd.cut(user_data['time_on_site'], 5) # 5 equal-sized bins
pd.crosstab(user_data['purchased'], time_bins)
# + id="pjcXnJw0wfaj" colab_type="code" outputId="e8f6d2d9-7f66-46b4-d213-04145889e0df" colab={"base_uri": "https://localhost:8080/", "height": 161}
# We can make this a bit clearer by normalizing (getting %)
pd.crosstab(user_data['purchased'], time_bins, normalize='columns')
# + id="C3GzvDxlvZMa" colab_type="code" outputId="d07bf197-350b-4bf2-a5d8-8897b8fc8446" colab={"base_uri": "https://localhost:8080/", "height": 143}
# That seems counter to our hypothesis
# More time on the site can actually have fewer purchases
# But we know why, since we generated the data!
# Let's look at mobile and purchased
pd.crosstab(user_data['purchased'], user_data['mobile'], normalize='columns')
# + id="KQb-wU60xCum" colab_type="code" colab={}
# Yep, mobile users are more likely to buy things
# But we're still not seeing the *whole* story until we look at all 3 at once
# Live/stretch goal - how can we do that?
# + [markdown] id="lOqaPds9huME" colab_type="text"
# ## Assignment - what's going on here?
#
# Consider the data in `persons.csv` (already prepared for you, in the repo for the week). It has four columns - a unique id, followed by age (in years), weight (in lbs), and exercise time (in minutes/week) of 1200 (hypothetical) people.
#
# Try to figure out which variables are possibly related to each other, and which may be confounding relationships.
# + id="TGUS79cOhPWj" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="ec1dc410-9e4e-4300-a8d4-4410637d9895"
import pandas as pd
# TODO - your code here
# Use what we did live in lecture as an example
persons = pd.read_csv('https://raw.githubusercontent.com/LambdaSchool/DS-Unit-1-Sprint-1-Dealing-With-Data/master/module4-databackedassertions/persons.csv')
# HINT - you can find the raw URL on GitHub and potentially use that
# to load the data with read_csv, or you can upload it yourself
persons.head()
# + [markdown] id="9Onoi6VitWYM" colab_type="text"
# I would guess that the initial data should show some effect of exercise time on weight, presumably it makes weight go down. I also guess that age is the confounding factor. Maybe as age rises, the effect of exercise on weight decreases?
# + id="Q8jcpJIJq7Uj" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 349} outputId="c636e34c-e306-4ca9-d003-b4e8fcee3c7a"
print(pd.crosstab(persons['weight'],persons['exercise_time']).shape)
#pd.crosstab(persons['weight'],persons['exercise_time']) Not good, lets bin both
time_bins = pd.cut(persons['exercise_time'],5)
weight_bins = pd.cut(persons['weight'],10)
age_bins = pd.cut(persons['age'],5)
pd.crosstab(weight_bins, time_bins)
#it appears that when you weigh more, you exercise less
#but that can't be all
#let's try one of age vs exercise
pd.crosstab(age_bins, time_bins)
# a little more interesting, short exercise actually increases with age
# 2 hours stays the same, more than that has a sudden drop around retirement
#ok now to look at all three
pd.crosstab(time_bins, [age_bins, weight_bins])
#too many columns. What I want is a grid with an x and a y of age and weight
#and then a z of exercise time.
# + id="BzPHflfF8Jl-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 377} outputId="31d20828-1d47-4a65-eda7-0536b1b5c42d"
#lets do ggplot cause that's pretty easy and intuitive
# !pip install ggplot
# + id="irnL_FC78S9y" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 529} outputId="a3851d56-308c-4541-ddac-02aab413888d"
from ggplot import *
#I wonder what that star actually means
ggplot(aes(x='age', y='weight',color='exercise_time'), data = persons) +\
geom_point() +\
scale_color_gradient(low='red', high='blue')
#OH BABY, look at this
#I initially thought of a 3d plot with same layout except instead of colors,
#it would of had bars illustrating exercise time. This is great though.
#Some of the points are blurred, but it's a small sacrifice for this
#overall effect
# + [markdown] id="dbLB3NcI-lEl" colab_type="text"
# What does this graph say? At both extremes of weight and age, exercise time drops to pretty much zero. One interesting thing to look at is that it drops faster with weight than age. Going horizontal from 220-240 and there's just red. Looking at the 70-80 column, there's still a lot of purple until you pass 220. I also like the visual that vigorous exercise stop around 180-200 and age 60-70. Really cool.
#
# I wish that I could zoom in on this plot because I think some datapoints are over lapping but I'm not sure. Maybe a bekoh plot would have that functionality.
# + id="FY81X6FNXCDi" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 529} outputId="a9f592e4-44c6-45b7-96ca-78e7cd4600e4"
#Let's switch weight and exercise time to see any correlation
ggplot(aes(x='age', y='exercise_time',color='weight'), data = persons) +\
geom_point() +\
scale_color_gradient(low='blue', high='red')
# + [markdown] id="oMvm8XGYXbjR" colab_type="text"
# I like this format less. However, it may be better to treat weight as the variable dependant on both ex_time and age. It does show some interesting results. Long exercise times seems to abruptly stop which is definitely worth looking at.
# + id="7A8EaAyXSuDl" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 529} outputId="822b46bf-7bf1-4443-a56f-ddb51599818a"
#How about weight vs age. Is weight dependant on age??? Let's find out.
ggplot(aes(x='age', y='weight'), data = persons) +\
geom_point()
# + [markdown] id="1ewtbc_pYrrs" colab_type="text"
# Nothing that I can see. A density plot was clearer but I need to look up more about that before I can use it and make assertions.
# + id="7OYG8xADVBvI" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 529} outputId="4450b9cc-3df4-4214-ffc8-434651c72cd3"
#Does exercise time influence weight?
ggplot(aes(x = 'exercise_time', y = 'weight'),data = persons) +\
geom_point()
# + [markdown] id="58-9GRHnVoGK" colab_type="text"
# Wow, just look at that. Looks really good as a scatter plot.
# + id="iHr5VD7nY7fh" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 529} outputId="3cbed012-d7f9-4d22-e3bb-11cac63a89ee"
#Does age influence exercise time?
ggplot(aes(x = 'age', y = 'exercise_time'),data = persons) +\
geom_point()
# + [markdown] id="33Vq8AE5ZNDw" colab_type="text"
# The general shape looks very similar to the second colored point chart I did.
# + [markdown] id="BT9gdS7viJZa" colab_type="text"
# ### Assignment questions
#
# After you've worked on some code, answer the following questions in this text block:
#
# 1. What are the variable types in the data?
# 2. What are the relationships between the variables?
# 3. Which relationships are "real", and which spurious?
#
# + [markdown] id="cx_HP98RR1yT" colab_type="text"
# 1. The variables are age, weight and exercise time. They are all continuous. Well at least that's what I want to say but looking back at the data they each seem to be discrete; all of them are integers.
#
# 2. The relationship that I saw was that exercise time went down as weight or age increased.
#
# 3. I'm not really sure what to say to this. I feel like all three factors are inter-related. Age is independant but I feel it influences at least exercise time
# + [markdown] id="_XXg2crAipwP" colab_type="text"
# ## Stretch goals and resources
#
# Following are *optional* things for you to take a look at. Focus on the above assignment first, and make sure to commit and push your changes to GitHub.
#
# - [Spurious Correlations](http://tylervigen.com/spurious-correlations)
# - [NIH on controlling for confounding variables](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4017459/)
#
# Stretch goals:
#
# - Produce your own plot inspired by the Spurious Correlation visualizations (and consider writing a blog post about it - both the content and how you made it)
# - Pick one of the techniques that NIH highlights for confounding variables - we'll be going into many of them later, but see if you can find which Python modules may help (hint - check scikit-learn)
| module4-databackedassertions/LS_DS_114_Making_Data_backed_Assertions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# General outline:
# I want to take in an array (later image) representation of a scrabble board/hand tiles, and output the best play.
#
# Naive best play: the highest scoring play
#
# Goal best play: the play that maximizes your winning chances (takes into account the opponent's potential score)
# * You want to maximize how you're using the tiles in your hand.
# * This doesn't just mean getting the highest score possible. Save your high tiles for the multipliers.
# * As well as minimizing your opponent's reply score
#
# 0. Constants:
# * Multiplier boards
#
# * make functions that define the area that the word is in on the board (given a letter?)
# * allow another function to tell what the letters are in that area.
#
#
# Main steps needed:
# 1. Take in an array of the board and your hand letters **check**
# 2. Tell the available permutations and their locations on the board **check**
# * return a dictionary with the word as a key and the location as the value **check**
# 3. See what crossing words those permutations are going to make **check**
# * this will return a dictionary of the main word and its position as a key and the crossing words as the values **check**
# * could use a nested dictionary for this **check**
# 4. Check to see if the permutations and their crossing words are in the dictionary **check**
# * take in the dictinary from above? **check**
# 5. Do the same for the edge words.
# 6. Score it.
# For
#
# Step 2:
# 1. Need to get permutations for each row
# * Options:
# 1. Get permutations around each group of letters like I've been doing
# 2. Look at it on a row-wide basis **Went with this one**
# * Need to make sure all the sample letters stay together
#
# TODO: it would probably be best to check the crossing words fewer times. Perhaps I could make a set of them, find the intersection of them and the dictionary, then loop over that intersection with the individual groups.
# %reload_ext nb_black
import numpy as np
import csv
import itertools
import re
from collections import defaultdict
# +
sample_board = [
["", "", "", "", "", "", "", "", "", "", "d", "e", "a", "d", ""], # 0
["", "", "", "", "", "", "", "", "", "z", "o", "n", "a", "", "w"], # 1
["", "", "", "", "", "", "", "", "", "", "", "a", "", "", "i"], # 2
["", "", "", "", "", "", "", "", "", "r", "i", "m", "a", "", "l"], # 3
["", "", "", "", "", "", "", "", "", "", "", "o", "b", "e", "y"], # 4
["", "", "", "", "", "", "", "", "", "j", "u", "r", "a", "l", ""],
["", "", "", "", "", "", "", "b", "e", "e", "p", "", "", "", ""], # 6
["", "", "", "f", "", "", "y", "e", "w", "s", "", "t", "", "", ""],
["", "", "", "o", "", "", "", "l", "e", "t", "c", "h", "", "", ""], # 8
["", "", "", "c", "h", "u", "r", "l", "", "", "", "i", "", "r", ""],
["", "", "g", "i", "", "", "", "e", "", "", "", "g", "", "e", ""], # 10
["", "", "i", "", "", "", "", "n", "", "", "", "h", "u", "e", "d"],
["", "", "n", "", "", "", "", "d", "", "", "", "", "", "f", ""], # 12
["", "", "", "", "", "", "", "", "", "v", "e", "e", "p", "s", ""],
["", "", "", "", "", "", "", "", "n", "u", "n", "", "", "", ""], # 14
]
sample_board_letters = ["o", "a", "s", "r", "l",'r','t']
# numpy of the board
board_array = np.array(sample_board)
# making the empty strings spaces
neat_sample_array = np.copy(board_array)
neat_sample_array[neat_sample_array == ""] = " "
# -
class Scrabbler:
#https://raw.githubusercontent.com/jmlewis/valett/master/scrabble/sowpods.txt
data_path = "../sowpods.txt" #from above
with open(data_path, newline="") as f:
reader = csv.reader(f)
allwords = list(reader)
WORDLIST = list(itertools.chain.from_iterable(allwords))
WORDLIST = [x.lower() for x in WORDLIST]
WORDSET = set(WORDLIST)
BOARD_LENGTH = 15
MAX_I = 14
row_mainword_scores = {}
column_mainword_scores = {}
LETTER_MULTIPLIERS = [
[1, 1, 1, 1, 1, 1, 3, 1, 3, 1, 1, 1, 1, 1, 1], # 0
[1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1], # 1
[1, 2, 1, 1, 2, 1, 1, 1, 1, 1, 2, 1, 1, 2, 1], # 2
[1, 1, 1, 3, 1, 1, 1, 1, 1, 1, 1, 3, 1, 1, 1], # 3
[1, 1, 2, 1, 1, 1, 2, 1, 2, 1, 1, 1, 2, 1, 1], # 4
[1, 1, 1, 1, 1, 3, 1, 1, 1, 3, 1, 1, 1, 1, 1],
[3, 1, 1, 1, 2, 1, 1, 1, 1, 1, 2, 1, 1, 1, 3], # 6
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[3, 1, 1, 1, 2, 1, 1, 1, 1, 1, 2, 1, 1, 1, 3], # 8
[1, 1, 1, 1, 1, 3, 1, 1, 1, 3, 1, 1, 1, 1, 1],
[1, 1, 2, 1, 1, 1, 2, 1, 2, 1, 1, 1, 2, 1, 1], # 10
[1, 1, 1, 3, 1, 1, 1, 1, 1, 1, 1, 3, 1, 1, 1],
[1, 2, 1, 1, 2, 1, 1, 1, 1, 1, 2, 1, 1, 2, 1], # 12
[1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1],
[1, 1, 1, 1, 1, 1, 3, 1, 3, 1, 1, 1, 1, 1, 1], # 14
]
LETTER_MULTIPLIERS_ARRAY = np.array(LETTER_MULTIPLIERS)
WORD_MULTIPLIERS = [
[1, 1, 1, 3, 1, 1, 1, 1, 1, 1, 1, 3, 1, 1, 1], # 0
[1, 1, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 1, 1, 1], # 1
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], # 2
[3, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 3], # 3
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], # 4
[1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], # 6
[1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], # 8
[1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], # 10
[3, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 3],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], # 12
[1, 1, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 1, 1, 1],
[1, 1, 1, 3, 1, 1, 1, 1, 1, 1, 1, 3, 1, 1, 1], # 14
]
WORD_MULTIPLIERS_ARRAY = np.array(WORD_MULTIPLIERS)
TILE_SCORES = {
"a": 1,
"b": 4,
"c": 4,
"d": 2,
"e": 1,
"f": 4,
"g": 3,
"h": 3,
"i": 1,
"j": 10,
"k": 5,
"l": 2,
"m": 4,
"n": 2,
"o": 1,
"p": 4,
"q": 10,
"r": 1,
"s": 1,
"t": 1,
"u": 2,
"v": 5,
"w": 4,
"x": 8,
"y": 3,
"z": 10,
}
def __init__(self, board, hand_letters):
self.board = board
self.hand_letters = hand_letters
self.T_board = np.copy(board).transpose() # TODO: should this be here?
self.board_mask = (np.copy(board) != "").astype(int)
# Navigator functions
def check_yx(self, y, x):
if x > -1 and x < 15 and y > -1 and y < 15:
return bool(self.board[y, x])
else:
return "out"
def check_right(self, y, x):
if x < self.BOARD_LENGTH - 1:
return bool(self.board[y, x + 1]), self.board[y, x + 1]
else:
return "out"
def check_left(self, y, x):
if x > 0:
return bool(self.board[y, x - 1]), self.board[y, x - 1]
else:
return "out"
def check_above(self, y, x):
if y > 0:
return bool(self.board[y - 1, x]), self.board[y - 1, x]
else:
return "out"
def check_below(self, y, x):
if y < self.BOARD_LENGTH - 1:
return bool(self.board[y + 1, x]), self.board[y + 1, x]
else:
return "out"
# For potential words of a given length
# start at the left, travel right (keep track of empties) till you hit a letter
def hand_letter_permutations(self, length):
# this returns a list of permutations of the hand letters of a given length
letter_permutations = list(itertools.permutations(self.hand_letters, length))
return letter_permutations
def row_permutations(self, row):
y = row
board_row = self.board[row]
# no_ means "number of" here
no_hand_letters = len(self.hand_letters)
no_pre_filled = sum(list(map(bool, board_row)))
row_perms_dict = {}
if no_pre_filled > 0:
for perm_len in range(1, no_hand_letters + 1):
hand_perms = self.hand_letter_permutations(perm_len)
# for x in range(0, BOARD_LENGTH-(len(self.hand_letters)-1)-no_pre_filled):
# x=0
no_tiles_to_right = no_pre_filled
for hand_perm in hand_perms:
# while x + no_tiles_to_right+perm_len<self.BOARD_LENGTH:
x = 0
if (
self.check_yx(y, x) == False
): # and sum(list(map(bool,board_row[x:x+no_hand_letters])))
# for hand_perm in hand_perms:
while True:
# TODO check this
if (
any(
board_row[max(0, x - 1) : min(x + perm_len + 1, 14)]
)
== True
):
complete_word, word_y, word_x = self.complete_word_y_x(
row, x, hand_perm
)
if complete_word == []:
break
elif len(complete_word) > perm_len:
# TODO: could leave out the part after the and below and delete duplicates later
# TODO: could indicate the length of the original perm
if (
complete_word in row_perms_dict
and [word_y, word_x]
not in row_perms_dict[complete_word]
):
row_perms_dict[complete_word].append(
[word_y, word_x]
)
else:
row_perms_dict[complete_word] = [
[word_y, word_x]
]
x = x + 1
if x == self.BOARD_LENGTH:
break
# no_tiles_to_right=sum(list(map(bool,board_row[x+1:])))#TODO: +1 here?
return row_perms_dict
# fill the next len(hand_perm) spaces with hand tiles and find the connecting words
# row=y
def complete_word_y_x(self, row, x, hand_perm):
hand_perm = list(hand_perm)
perm_len = len(hand_perm)
X = x
board_row = self.board[row]
y = row
no_filled = 0
filled_tiles = []
left_letters = []
right_letters = []
while self.check_left(y, x)[0] == True:
x = x - 1
left_letters = left_letters + [board_row[x]]
left_letters = left_letters[::-1]
x = X
while no_filled < perm_len:
if self.check_yx(y, x) == True:
filled_tiles = filled_tiles + [board_row[x]]
elif self.check_yx(y, x) == "out":
return [], [], []
elif self.check_yx(y, x) == False:
new_tile = hand_perm.pop(0)
filled_tiles = filled_tiles + [new_tile]
no_filled = no_filled + 1
x = x + 1
x = x - 1
while self.check_right(y, x)[0] == True:
x = x + 1
right_letters = right_letters + [board_row[x]]
complete_word_as_list = left_letters + filled_tiles + right_letters
word_x = X - len(left_letters)
complete_word = "".join(complete_word_as_list)
return complete_word, y, word_x
# Find the crossing words for a given complete_word_y_x info
def find_crosses(self, complete_word, y, x):
crossing_words = set()
count = 0
for tile in complete_word:
# TODO: don't think I need this if part, only the last part and len(crossing_word)>1
if (
self.check_above(y, x)[0] == True or self.check_below(y, x)[0] == True
) and self.board[y, x] != tile:
crossing_word, T_y, T_x = self.crossing_words_at_tile(tile, y, x)
if len(crossing_word) > 1:
crossing_words.add((crossing_word, T_y, T_x))
# TODO: not sure if I need all this stuff about edge_letter
# if self.board[y, x] != tile:
# count = count + 1
# edge_letter = tile
# edge_x = x
# edge_y = y
x = x + 1
# TODO should I use/fix this?
# if count == 1 and len(crossing_words) == 0:
# crossing_words.add((edge_letter, edge_y, edge_x))
if len(crossing_words) == 0:
crossing_words.add(("", -1, -1))
return crossing_words
# Enter a tile and its location and return the crosses that it makes
def crossing_words_at_tile(self, tile, y, x):
# We will be using the transpose board so y and x are switched
self.board = self.board.transpose()
y, x = x, y
crossing_word, T_y, T_x = self.complete_word_y_x(y, x, (tile))
self.board = self.board.transpose()
return crossing_word, T_y, T_x
# insert the "hook letter" into a shadow board and find the output connected to that
# assume the board has already been transposed and you are operating with the right y and x
def edge_perms(self, hook_letter, y, x):
self.board[y, x] = hook_letter
self.hand_letters.remove(hook_letter)
shadow_row_perms = self.row_permutations(y)
edge_perms_dict = {
k: v
for (k, v) in shadow_row_perms.items()
if v[0][1] <= x
and v[0][1] + len(k)
>= x # TODO: see if I really need to put out a nested list here
}
self.hand_letters.append(hook_letter)
self.board[y, x] = ""
return edge_perms_dict
def find_valid_perms(self):
crossing_set = set()
board_perms_dict = {}
for row in range(0, self.BOARD_LENGTH):
row_perms = self.row_permutations(row)
for complete_word in row_perms:
if complete_word in board_perms_dict:
board_perms_dict[complete_word] = (
board_perms_dict[complete_word] + row_perms[complete_word]
)
else:
board_perms_dict[complete_word] = row_perms[complete_word]
perms_set = set(board_perms_dict.keys())
valid_perms = self.WORDSET.intersection(perms_set)
valid_perms_dict = {
k: v for (k, v) in board_perms_dict.items() if k in valid_perms
}
return valid_perms_dict
# use the perms to find crossings and check if they are all valid
# TODO: ask to see if I can somehow check all of these sets at once so I only have to iter through once
def check_crosses(self, valid_perm, y, x):
crosses_with_position = self.find_crosses(valid_perm, y, x)
crossing_words = set([i for i in zip(*crosses_with_position)][0])
crosses_valid = crossing_words.issubset(
self.WORDSET
) or crosses_with_position == {("", -1, -1)}
return crosses_valid, crosses_with_position
# make a dictionary of valid plays for one direction as a dictionary of words with a nested crossing dict
# in the first entry of a list that are the values of the dictionary
def valid_plays(self, valid_perms):
# valid_perms = self.find_valid_perms() #TODO is this right to move to a parameter?
valid_plays = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
for valid_perm in valid_perms:
potential_positions = valid_perms[valid_perm]
for potential_position in potential_positions:
y = potential_position[0]
x = potential_position[1]
crosses_valid, crosses_with_position = self.check_crosses(
valid_perm, y, x
)
if crosses_valid == True:
# TODO: I need to be able to add new positionf of the same cross here
# probably need to make another default dict here
for cross_with_position in crosses_with_position:
valid_plays[valid_perm][tuple(potential_position)][
cross_with_position[0]
].append((cross_with_position[1:3]))
return valid_plays
# remember the crosses returned above have the x and y switched, as well as nested default dicts
# Find which tiles of a word originated from the hand
def new_tiles(self, valid_word, y, x):
new_tiles = []
for letter in valid_word:
if self.check_yx(y, x) == False:
new_tiles.append(
[letter, y, x]
) # should this really be a dictionary list?
x = x + 1
return new_tiles
# Find which tiles could be used to build a crossing word
def find_edge_hooks(self, valid_plays):
edge_hooks = []
for valid_word in valid_plays:
for position in valid_plays[valid_word]:
y = position[0]
x = position[1]
new_tiles = self.new_tiles(valid_word, y, x)
if len(new_tiles) == 1:
edge_hooks = edge_hooks + new_tiles
return edge_hooks
# create a dictionary of the valid edge words
# could probably adapt find valid perms above for this
def valid_edge_words(self, edge_hooks):
edge_perms_dict = {}
for hook in edge_hooks:
hook_letter = hook[0]
# x and y are transposed
y = hook[2]
x = hook[1]
edge_perms = self.edge_perms(hook_letter, y, x)
for complete_word in edge_perms:
if complete_word not in edge_perms_dict:
edge_perms_dict[complete_word] = edge_perms[complete_word]
elif edge_perms[complete_word][0] not in edge_perms_dict[complete_word]:
edge_perms_dict[complete_word] = (
edge_perms_dict[complete_word] + edge_perms[complete_word]
)
# copied from above, checking if perm in the dictionary
edge_perms_set = set(edge_perms_dict.keys())
valid_edge_perms = self.WORDSET.intersection(edge_perms_set)
valid_edge_perms_dict = {
k: v for (k, v) in edge_perms_dict.items() if k in valid_edge_perms
}
return valid_edge_perms_dict
# Flattens the row and column dictionaries, making them more intuitive/combinable
def unpack_dictionary(self, dict1):
new_dict = {}
for word in dict1:
location_crosses = dict1[word]
for location in location_crosses:
crosses_dict = location_crosses[location]
last_word_location = []
for crossing_word in crosses_dict:
crossing_word_locations = crosses_dict[crossing_word]
for crossing_location in crossing_word_locations:
if len(last_word_location) > 0:
new_dict[(word, location)] = new_dict[
last_word_location
] + (crossing_word, crossing_location)
else:
new_dict[(word, location)] = (
crossing_word,
crossing_location,
)
last_word_location = (word, location)
new_new_dict = {}
for key in new_dict:
crossers_list = new_dict[key]
new_new_dict[key] = tuple(zip(crossers_list, crossers_list[1:]))[::2]
return new_new_dict
# Returns 2 dictionaries of all the valid words and their crosses
def all_board_words(self):
# find the valid row plays
valid_row_perms = self.find_valid_perms()
valid_row_plays = self.valid_plays(valid_row_perms)
# find the edge hooks for the column-wise edge words
edge_hooks_for_columns = self.find_edge_hooks(valid_row_plays)
# transpose
self.board = self.board.transpose()
# find the valid column plays
valid_column_perms = self.find_valid_perms()
valid_column_plays = self.valid_plays(valid_column_perms)
# find the valid column edge plays
valid_column_edge_perms = self.valid_edge_words(edge_hooks_for_columns)
valid_column_edge_plays = self.valid_plays(valid_column_edge_perms)
# find the edge hooks for the row-wise edge words
edge_hooks_for_rows = self.find_edge_hooks(valid_column_plays)
# transpose
self.board = self.board.transpose()
# find the valid row edge plays
valid_row_edge_perms = self.valid_edge_words(edge_hooks_for_rows)
valid_row_edge_plays = self.valid_plays(valid_row_edge_perms)
# transform them all into a more readable format
valid_row_plays = self.unpack_dictionary(valid_row_plays)
valid_row_edge_plays = self.unpack_dictionary(valid_row_edge_plays)
valid_column_plays = self.unpack_dictionary(valid_column_plays)
valid_column_edge_plays = self.unpack_dictionary(valid_column_edge_plays)
# collapse the row and column dictionaries
all_row_words = {**valid_row_plays, **valid_row_edge_plays}
all_column_words = {**valid_column_plays, **valid_column_edge_plays}
# now I have two dictionaries: row and column in the format of:
# {(mainword, y, x): ((crossingword, y, x), (crossingword, y, x), etc)
# (mainword, y, x): ((crossingword, y, x), etc)
# ...
# }
# remember that the y and x of the crossing words refer to a transposed board vis-a-vis the main word
return all_row_words, all_column_words
# calculate the score of a group of tiles (not the whole play)
def calculate_word_score(
self, word, y, x, letter_multipliers_array, word_multipliers_array
):
letter_multipliers = np.copy(letter_multipliers_array[y, x : x + len(word)])
word_multipliers = np.copy(word_multipliers_array[y, x : x + len(word)])
scores = [self.TILE_SCORES[l] for l in word]
word_score = np.dot(letter_multipliers, scores)
word_score = word_score * np.prod(word_multipliers)
return word_score
# calculating the total value of a main word and its crosses
def total_score(self, main_word_and_location, crossing_words_and_locations):
letter_multipliers_array = np.copy(self.LETTER_MULTIPLIERS_ARRAY)
word_multipliers_array = np.copy(self.WORD_MULTIPLIERS_ARRAY)
# make the multipliers==1 if there are pieces on them on the board
# TODO: how permanent is the below?
bool_mask = (self.board != "").astype(int)
letter_multipliers_array = np.where(bool_mask == 1, 1, letter_multipliers_array)
word_multipliers_array = np.where(bool_mask == 1, 1, word_multipliers_array)
main_word = main_word_and_location[0]
y = main_word_and_location[1][0]
x = main_word_and_location[1][1]
main_score = self.calculate_word_score(
main_word, y, x, letter_multipliers_array, word_multipliers_array
)
# TODO: ask if I should have transpose inside or outside the copy function here
# transpose the multiplier arrays to use to score the crossing words
T_word_multipliers = np.copy(word_multipliers_array.transpose())
T_letter_multipliers = np.copy(letter_multipliers_array.transpose())
# calculating the crossing words' total score
crossers_score = 0
for crossing_word_and_location in crossing_words_and_locations:
if len(crossing_word_and_location[0]) > 1:
y = crossing_word_and_location[1][0]
x = crossing_word_and_location[1][1]
crossing_word = crossing_word_and_location[0]
crosser_score = self.calculate_word_score(
crossing_word, y, x, T_letter_multipliers, T_word_multipliers
)
else:
crosser_score = 0
crossers_score = crossers_score + crosser_score
total_score = main_score + crossers_score
return total_score
# Create a dictionary of all valid main words and their scores
def valid_score_choices(self):
board = self.board
hand_letters = self.hand_letters
letter_multiplier_array = self.LETTER_MULTIPLIERS_ARRAY
word_multiplier_array = self.WORD_MULTIPLIERS_ARRAY
# make a transpose board
transpose_board = np.copy(board)
transpose_board = transpose_board.transpose()
# calculate the words/placement allowed by the rules of the game
all_row_words, all_column_words = self.all_board_words()
# create a dictionary of each of these row words along with their score
row_score_dict = {}
column_score_dict = {}
for main_word_and_location in all_row_words:
crossing_words_and_locations = all_row_words[main_word_and_location]
score = self.total_score(
main_word_and_location, crossing_words_and_locations
)
row_score_dict[main_word_and_location] = score
# transpose the board
self.board = self.board.transpose()
# same for the column words
for main_word_and_location in all_column_words:
crossing_words_and_locations = all_column_words[main_word_and_location]
score = self.total_score(
main_word_and_location, crossing_words_and_locations
)
column_score_dict[main_word_and_location] = score
self.row_mainword_scores = row_score_dict
self.column_mainword_scores = column_score_dict
return row_score_dict, column_score_dict
# Return the highest scoring k words for rows and columns, if you have already run total_score
def top_k_words(self, k):
top_k_rows = {
k: v
for k, v in sorted(
self.row_mainword_scores.items(), key=lambda item: item[1], reverse=True
)[:k]
}
top_k_columns = {
k: v
for k, v in sorted(
self.column_mainword_scores.items(),
key=lambda item: item[1],
reverse=True,
)[:k]
}
return top_k_rows, top_k_columns
# +
# #adapting total score
# def total_score(self,main_word_and_location, crossing_words_and_locations):
# letter_multipliers_array=np.copy(self.LETTER_MULTIPLIERS_ARRAY)
# word_multipliers_array=np.copy(self.WORD_MULTIPLIERS_ARRAY)
# # make the multipliers==1 if there are pieces on them on the board;
# # TODO: how permanent is the below?
# bool_mask = (self.board != " ").astype(int)
# letter_multipliers_array = np.where(bool_mask == 1, 1, letter_multipliers_array)
# word_multipliers_array = np.where(bool_mask == 1, 1, word_multipliers_array)
# main_word=main_word_and_location[0]
# y=main_word_and_location[1][0]
# x=main_word_and_location[1][1]
# main_score = self.calculate_word_score(
# main_word, y, x, letter_multipliers_array, word_multipliers_array
# )
# # TODO: ask if I should have transpose inside or outside the copy function here
# # transpose the multiplier arrays to use to score the crossing words
# T_word_multipliers = np.copy(word_multipliers_array.transpose())
# T_letter_multipliers = np.copy(letter_multipliers_array.transpose())
# # calculating the crossing words' total score
# # need to switch the x and y here so the words don't go off the board
# crossers_score = 0
# for crosser in scored_crossing_words:
# if len(crosser[0]) > 1:
# crosser = list(crosser)
# crosser[1], crosser[2] = crosser[2], crosser[1]
# crosser_score = calculate_word_score(
# crosser, T_letter_multipliers, T_word_multipliers
# )
# else:
# crosser_score = 0
# crossers_score = crossers_score + crosser_score
# total_score = main_score + crossers_score
# return total_score
# def valid_score_choices(
# board, hand_letters, letter_multiplier_array, word_multiplier_array
# ):
# board=self.board
# hand_letters=self.hand_letters
# # make a transpose board
# transpose_board = np.copy(board)
# transpose_board = transpose_board.transpose()
# # calculate the words/placement allowed by the rules of the game
# valid_across_words_crosses, valid_column_words_crosses = valid_words_whole2(
# board, transpose_board, hand_letters
# )
# # create a dictionary of each of these words along with their score
# across_score_dict = {}
# column_score_dict = {}
# for word_crosses in valid_across_words_crosses:
# word = word_crosses[0]
# score = total_score(
# board, word_crosses, letter_multiplier_array, word_multiplier_array
# )
# across_score_dict[tuple(word)] = score
# for word_crosses in valid_column_words_crosses:
# column_word = word_crosses[0]
# column_score = total_score(
# transpose_board,
# word_crosses,
# letter_multiplier_array,
# word_multiplier_array,
# )
# column_score_dict[tuple(column_word)] = column_score
# return across_score_dict, column_score_dict
# +
# def valid_score_choices(
# board, hand_letters, letter_multiplier_array, word_multiplier_array
# ):
# # make a transpose board
# transpose_board = np.copy(board)
# transpose_board = transpose_board.transpose()
# # calculate the words/placement allowed by the rules of the game
# valid_across_words_crosses, valid_column_words_crosses = valid_words_whole2(
# board, transpose_board, hand_letters
# )
# # create a dictionary of each of these words along with their score
# across_score_dict = {}
# column_score_dict = {}
# for word_crosses in valid_across_words_crosses:
# word = word_crosses[0]
# score = total_score(
# board, word_crosses, letter_multiplier_array, word_multiplier_array
# )
# across_score_dict[tuple(word)] = score
# for word_crosses in valid_column_words_crosses:
# column_word = word_crosses[0]
# column_score = total_score(
# transpose_board,
# word_crosses,
# letter_multiplier_array,
# word_multiplier_array,
# )
# column_score_dict[tuple(column_word)] = column_score
# return across_score_dict, column_score_dict
# def calculate_word_score(self, word, y, x, letter_multipliers_array, word_multipliers_array):
# letter_multipliers = np.copy(
# self.LETTER_MULTIPLIERS_ARRAY[y, x : x + len(word)]
# )
# word_multipliers = np.copy(self.WORD_MULTIPLIERS_ARRAY[y, x : x + len(word)])
# scores = [self.TILE_SCORES[l] for l in word]
# word_score = np.dot(letter_multipliers, scores)
# word_score = word_score * np.prod(word_multipliers)
# return word_score
# +
# for edge row, I could find permutations of the surrounding area including spaces
# make a check surrroundings function for both axes
# -
sam = Scrabbler(board_array, sample_board_letters)
cw, _, _ = sam.complete_word_y_x(1, 1, ("a"))
samcross = sam.find_crosses("abcdfe", 12, 9)
# sam.edge_perms("s", 10, 7)
# {i for i in zip(*samcross)}
# valid_plays = {}
# for i in samcross:
# valid_plays["a"] = 1 # ["b"][["c", "d"][0]] = ["e", "f"][1:3]
samcross
# +
sam = Scrabbler(board_array, sample_board_letters)
# samperms = sam.find_valid_perms()
# sam.find_edge_hooks(sam.valid_plays(sam.find_valid_perms()))
# todo edge hooks off by 1
# dict(sam.valid_plays(samperms))
# [i for i in sam.valid_plays()]
# [i for i in sam.hand_letter_permutations(1)][0]
# edge_hooks = sam.find_edge_hooks(sam.valid_plays(sam.find_valid_perms()))
# sam.board = sam.T_board
# edge_words = sam.valid_edge_words(edge_hooks)
# edge_plays = sam.valid_plays(edge_words)
# sam.unpack_dictionary(edge_plays)
valid_plays = sam.valid_plays(sam.find_valid_perms())
sam.unpack_dictionary(valid_plays)
# +
# [i for i in sam.valid_plays()]
# [i for i in sam.valid_plays()["ea"]]
# sam.valid_plays()["ea"]
# sam.valid_plays()["ea"]
# -
sam = Scrabbler(board_array, sample_board_letters)
# sam.valid_plays()
# sam.board_mask
# the x's and y's are transposed here in the hook letters like they are automatically
# in the valid edge words method
vew = sam.valid_edge_words([["e", 4, 10], ["d", 9, 0], ["s", 9, 8]])
sam.unpack_dictionary(sam.valid_plays(vew))
sam.all_board_words()
# +
#current structure:
{'mainword1': {
(main1_y1,main1_x1): {
'cross_word1': [(cross1_x1, cross1_y1), (cross1_x2, cross1_y2)],
'cross_word2': [(cross2_x1, cross2_y1),...]},
(main1_y2,main1_x2): {...}},
'main_word2':{similar to before}}
#hoped for:
{['mainword1', main1_y1,main1_x1]:(['cross_word1', cross1_x1, cross1_y1],
['cross_word1', cross1_x2, cross1_y22],
['cross_word2', cross2_x1, cross2_y1]),
['mainword2', main2_y1,main2_x1]: similar to above
}
# +
sam = Scrabbler(board_array, sample_board_letters)
valid_row_perms = sam.find_valid_perms()
valid_row_plays = sam.valid_plays(valid_row_perms)
# find the edge hooks
edge_hooks_for_columns = sam.find_edge_hooks(valid_row_plays)
# transpose
sam.board = sam.board.transpose()
# find the valid words (column words)
valid_column_perms = sam.find_valid_perms()
valid_column_plays = sam.valid_plays(valid_column_perms)
# find which edge words are valid from above
valid_column_edge_perms = sam.valid_edge_words(edge_hooks_for_columns)
valid_column_edge_plays = sam.valid_plays(valid_column_edge_perms)
# find the edge hooks for the rows
# edge_hooks_for_rows=
sam.unpack_dictionary(valid_row_plays)
valid_row_plays
# +
# edge plays appears to be what's broken
# but it seems mostly right here. A few repeated crossing words. Could get fixed with a set
# Perhaps I'm not doing transpose at the right time in the main function
# sam = Scrabbler(board_array, sample_board_letters)
# samperms = sam.find_valid_perms()
# sam.find_edge_hooks(sam.valid_plays(sam.find_valid_perms()))
# todo edge hooks off by 1
# dict(sam.valid_plays(samperms))
# [i for i in sam.valid_plays()]
# [i for i in sam.hand_letter_permutations(1)][0]
# sam.board = sam.board.transpose()
# edge_hooks = sam.find_edge_hooks(sam.valid_plays(sam.find_valid_perms()))
# sam.board = sam.board.transpose()
# edge_words = sam.valid_edge_words(edge_hooks)
# edge_plays = sam.valid_plays(edge_words)
# sam.unpack_dictionary(edge_plays)
# -
sam = Scrabbler(board_array, sample_board_letters)
# row_words,column_words=sam.all_board_words()
sam.valid_score_choices()
sam.top_k_words(5)
sam.total_score(("sen", (0, 10)), (("na", (12, 0)), ("so", (10, 0))))
print(np.where(board_array == "", " ", board_array))
print(
np.where(
np.transpose(board_array.copy()) == "", " ", np.transpose(board_array.copy())
)
)
sam.complete_word_y_x(11, 6, ["a", "b", "c"])
a = sam.row_permutations(7)
# output:
# {'dfdf':[[3,4]], 'dfkdf':[[5,7]],...}
# l = [[4, 3]]
# l.append(a["daosyews"])
# l
# [i for i in a.values()]
a["yewsatosd"]
"et" in sam.WORDSET
# +
a = {"a": 1, "b": 2, "c": 3}
a["a"] = {a["a"]: 2}
a
b = defaultdict(dict)
b["a"]["b"] = [1, 2]
b["a"]["c"] = [1, 2]
# b["r"] = {1: 2}
dict(b)
# +
d1 = {1: 2, 3: 4}
d2 = {1: 6, 3: 7}
dd = defaultdict(list)
for d in (d1, d2): # you can list as many input dicts as you want here
for key, value in d.items():
dd[key].append(value)
print(dict(dd).values())
# +
class Vividict(dict):
def __missing__(self, key):
value = self[key] = type(self)()
return value
myDict = Vividict()
myDict[2000]["hello"] = [2, 3]
myDict
# +
mydict2 = defaultdict(lambda: defaultdict(dict))
mydict2["a"][3, 2]["b"] = [2, 3]
mydict2["a"][3, 2]["c"] = [4, 3]
mydict2
# +
dicitonary = {
"agios": [[10, 1], [10, 1]],
"taos": [[8, 1], [10, 5]],
"ratos": [[8, 0], [10, 5]],
"rotos": [[8, 0]],
"roost": [[8, 1]],
"roosa": [[8, 1]],
"taros": [[8, 0], [10, 5]],
"toros": [[8, 0]],
}
"agios" in dicitonary
("rotos", [[8, 0]]) in dicitonary.items()
[10, 1] in [[10, 1]]
# -
# First, update the hook letters:
if (
len(complete_word) == perm_len + 1
): # TODO: could also say len(hand_perm)==1
# I'm transposing x and y here
hook_with_position = [hand_perm[0], x, y]
self.hook_letters_global.append(
hook_with_position
)
#debugging
def filter_all_crosses(self, valid_perms):
all_crosses=set()
for valid_perm in valid_perms:
potential_positions = valid_perms[valid_perm]
for potential_position in potential_positions:
y = potential_position[0]
x = potential_position[1]
crosses_with_position = self.find_crosses(valid_perm, y, x)
all_crosses.add(crosses_with_position[0])
valid_crosses=self.WORDSET.intersection(all_crosses)
return valid_crosses
rowj=[' ', 'z', ' ', 'r', ' ', 'j', 'e', 's', 't', ' ', ' ', ' ', ' ', 'v', 'u']
for i in range(0,7):
l=number of letters to before hitting 7 spaces
row slice=rowj[x:x+i+l]
sliced=['r', ' ', 'j', 'e', 's', 't', ' ', ' ', ' ', ' ']
joinedslice="".join(sliced)
joinedslice
| WWF_Project_2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # World Marriage Network
# This network consists of all marriages and shows how dynasties tend to cluster. This network is fairly large with almost 10000 nodes, some cells such as the Eigenvector and Betweeness calculations for the centrality stats can take a while to run.
from pymongo import MongoClient
import pandas as pd
import datetime
# ### Get all members of Irish Dynasties
client = MongoClient()
characters = client.ck2.characters
pipeline = [
{
"$lookup" :
{
"from" : "dynasties",
"localField" : "dnt",
"foreignField" : "_id",
"as" : "dynasty"
}
},
{
"$unwind" : "$dynasty"
},
#{
# "$match" : { "$or" : [{"dynasty.culture" : "irish"}, {"cul" : "irish"}], "spouse" : {"$exists" : True }}
#},
{
"$project" : {"_id": 1, "spouse" : 1}
}
]
chars = characters.aggregate(pipeline)
# +
spouses = set() #set of all Irish dynastic members and their spouses
for char in chars:
spouses.add(char["_id"])
if 'spouse' in char.keys():
for spouse in char['spouse']:
spouses.add(spouse)
# -
# ### Return Edge List of all Irish Dynastic Characters and their spouses
pipeline = [
{
"$lookup" :
{
"from" : "dynasties",
"localField" : "dnt",
"foreignField" : "_id",
"as" : "dynasty"
}
},
{
"$unwind" : "$dynasty"
},
{
"$match" : {"_id" : {"$in" : list(spouses)}}
},
{
"$unwind" : "$spouse"
},
{
"$lookup" :
{
"from" : "characters",
"localField" : "spouse",
"foreignField" : "_id",
"as" : "spouse_data"
}
},
{
"$unwind" : "$spouse_data"
},
{
"$lookup" :
{
"from" : "dynasties",
"localField" : "spouse_data.dnt",
"foreignField" : "_id",
"as" : "spouse_dyn"
}
},
{
"$unwind" : "$spouse_dyn"
},
{
"$project" : {"_id": 1, "dynasty" : "$dynasty._id", "name" : "$dynasty.name", "culture" : "$dynasty.culture", "religion" : "$dynasty.religion",
"spouse_id" : "$spouse_data._id", "spouse_dynasty" : "$spouse_dyn._id", "spouse_dynasty_name" : "$spouse_dyn.name",
"spouse_dyn_cul" : "$spouse_dyn.culture" }
}
]
chars = characters.aggregate(pipeline)
chars_df = pd.DataFrame(list(chars))
# ## Get all Dynasties involved
total_dyns = set(chars_df['dynasty'].unique())
total_dyns = total_dyns.union(set(chars_df['spouse_dynasty'].unique()))
total_dyns_as_ints = [int(i) for i in list(total_dyns)]
# +
dynasties = client.ck2.dynasties
pipeline = [
{
"$match" : {"_id" : {"$in" : total_dyns_as_ints}}
},
{
"$project" : {"name" : 1, "culture" : 1, "religion" : 1}
},
{
"$sort" : {"name" : 1}
}
]
# -
dyns = dynasties.aggregate(pipeline)
# # Build a Network Graph
import networkx as nx
import matplotlib.pyplot as plt
# +
G = nx.Graph()
for dyn in dyns:
if "name" in dyn.keys() and "culture" in dyn.keys() and "religion" in dyn.keys():
G.add_node(dyn["_id"], name = dyn['name'], culture = dyn['culture'], religion = dyn['religion'])
# +
complete_set = set()
for i in range(len(chars_df)):
#if(chars_df.loc[i, "culture"] == "irish" or chars_df.loc[i, "spouse_dyn_cul"] == "irish"): #One of the cultures is irish
if( (chars_df.loc[i, "_id"], chars_df.loc[i, "spouse_id"]) not in complete_set): #if it hasn't be set already
if G.has_edge(chars_df.loc[i, "dynasty"], chars_df.loc[i, "spouse_dynasty"]):
G.edge[chars_df.loc[i, "dynasty"]][chars_df.loc[i, "spouse_dynasty"]]["weight"] +=1
else:
G.add_edge(chars_df.loc[i, "dynasty"], chars_df.loc[i, "spouse_dynasty"], weight = 1)
complete_set.add( (chars_df.loc[i, "spouse_id"], chars_df.loc[i, "_id"]) )
G.remove_nodes_from(nx.isolates(G)) #drop unconnected nodes
# -
nx.write_graphml(max(nx.connected_component_subgraphs(G), key=len), "ck2-World-Marrige-Network.graphml")
# The graphml file in the code above was opened in Gephi and the picture below was generated. The nodes are colored by culture. India is in the top right, the light blue nodes are Greek and in the bottom left is Europe. While it would be expected that dynasties would marry in a way that would cluster around culture and religion it is interesting to see how Italy, the dark purple color on the left, in between the Greek blue and Saxon pink, have managed to form a cluster of their own while the rest of Europe is closer together.
from IPython.display import Image
from IPython.core.display import HTML
Image(url= "http://www.anquantarbuile.com/static/images/ck2/WorldMarriageNetworkCulture.png")
# # Centrality Measures
# Return stats of graph (degree, centrality etc)
def get_graph_stats(graph, by_col = ''):
degree = pd.DataFrame.from_dict(graph.degree(graph), orient = 'index').reset_index()
degree.rename(columns={'index': 'Name', 0: 'Degree'}, inplace=True)
degree_cent = pd.DataFrame.from_dict(nx.degree_centrality(graph), orient = 'index').reset_index()
degree_cent.rename(columns={'index': 'Name', 0: 'Deg Cent'}, inplace=True)
stats_df = pd.merge(degree, degree_cent, on = ['Name', 'Name'])
close_cent = pd.DataFrame.from_dict(nx.closeness_centrality(graph), orient = 'index').reset_index()
close_cent.rename(columns={'index': 'Name', 0: 'Close Cent'}, inplace=True)
stats_df = pd.merge(stats_df, close_cent, on = ['Name', 'Name'])
betw_cent = pd.DataFrame.from_dict(nx.betweenness_centrality(graph), orient = 'index').reset_index()
betw_cent.rename(columns={'index': 'Name', 0: 'Betw Cent'}, inplace=True)
stats_df = pd.merge(stats_df, betw_cent, on = ['Name', 'Name'])
eigenvector = pd.DataFrame.from_dict(nx.eigenvector_centrality(graph), orient = 'index').reset_index()
eigenvector.rename(columns={'index': 'Name', 0: 'Eigenvector'}, inplace=True)
stats_df = pd.merge(stats_df, eigenvector, on = ['Name', 'Name'])
pagerank = pd.DataFrame.from_dict(nx.pagerank(graph), orient = 'index').reset_index()
pagerank.rename(columns={'index': 'Name', 0: 'PageRank'}, inplace=True)
stats_df = pd.merge(stats_df, pagerank, on = ['Name', 'Name'])
if by_col != '':
stats_df = stats_df.sort_values(by = by_col, ascending = False).reset_index(drop = True)
return stats_df
pipeline = [
{
"$project" : {"_id" : "$_id", "name" : "$name", "culture" : "$culture", "religion" : "$religion"}
}
]
dynasties = client.ck2.dynasties
dyn_list = dynasties.aggregate(pipeline)
dyn_df = pd.DataFrame(list(dyn_list))
stats = get_graph_stats(G)
comb_stats = dyn_df.merge(stats, left_on='_id', right_on='Name', how='outer')
comb_stats = comb_stats.dropna(axis=0, how='any')
comb_stats = comb_stats.drop(["Name"], axis = 1)
comb_stats.sort_values(by=["PageRank"], ascending=False).head(10)
comb_stats.to_csv('CK2-Marrige-Network-stats.csv', index=False)
| notebooks/5. World Marriage Network.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/ryanmaullon/Numerical-Methods-58012/blob/main/Control_and_Loop_Statement.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="fcXA5oWMq7wK"
# ##If statement
#
# + colab={"base_uri": "https://localhost:8080/"} id="Xml9rE0Tqt_j" outputId="a385a4e6-7391-4590-f71a-b5abcf81ad43"
a=12
b=100
if b>a:
print("b is greater than a")
# + [markdown] id="HmHNSBVHq-U0"
# ##If.. Elif Statement
#
# + colab={"base_uri": "https://localhost:8080/"} id="sjhPsCArrAVR" outputId="7ade7ec6-92b5-4362-fcdb-ac4fe6d2717f"
a = 100
b = 10
if b>a:
print ("b is greater than a")
elif a==b:
print("a and b are equal")
else:
print("a is greater than b")
# + [markdown] id="BSLOk6G8rIK6"
# ##Else Statement
# + colab={"base_uri": "https://localhost:8080/"} id="8XloLyYHrJgs" outputId="b8516d7f-dd2f-4886-9631-88fa181456f6"
a = 100
b = 10
if b>a:
print ("b is greater than a")
elif a==b:
print ("a and b are equal")
else:
print ("a is greater than b")
# + [markdown] id="6Sl0aM0NrniZ"
# ##Shorthand If statement
#
# + colab={"base_uri": "https://localhost:8080/"} id="C0_f0Yacrph5" outputId="f1271228-0ec9-46d6-9ae3-c527bbdf0beb"
if a>b: print("a is greater than b")
# + [markdown] id="zuCok0V1r3Yr"
# ##Short Hand If.. Else Statement
# + colab={"base_uri": "https://localhost:8080/"} id="5_N4wNhVr2yK" outputId="f3631d8f-fe8a-4942-a741-c2b15c77577d"
print("b is greater than a") if b>a else print ("a is greater than b")
# + [markdown] id="22V3K0RfsIzX"
# ##And - if both conditons are true
# + colab={"base_uri": "https://localhost:8080/"} id="KbtZt-B1sZ4n" outputId="af632b06-a459-46da-8e47-e2896a3acb0e"
a = 100
b = 10
c = 45
if a>b and c>b:
print("Both conditions are true")
# + [markdown] id="IXAQ0zMWsJTF"
# ##Or - if one of the conditions is true
# + colab={"base_uri": "https://localhost:8080/"} id="hybpaUrZstph" outputId="a7ba568c-814f-4ef5-810e-c5631d6e83cf"
if a>b or b>c:
print("One condition is true")
# + [markdown] id="3fqkd2BRtAOU"
# ##Nested If
# + colab={"base_uri": "https://localhost:8080/"} id="ODZNoW0LtBsV" outputId="0ecc5483-a52d-4638-c402-d0998c51471b"
x=int(input())
if x>10:
print("x is above 10")
if x>20:
print("x is above 20")
if x>30:
print("x is above 30")
if x>40:
print("x is above 40")
else:
print("x is below 40")
else:
print("x is below 10")
# + [markdown] id="cPK0vkWCuuKw"
# ##Application 1
# + colab={"base_uri": "https://localhost:8080/"} id="CgA2qDgduvvm" outputId="96664bb4-5a5b-4f40-902e-2d9d2ce2f8e2"
age = int(input())
if age>=18:
print("You are qualified to vote")
else:
print("You are not qualified to vote")
# + [markdown] id="Z0WFFmXxvLQg"
# ##Application 2
#
# + colab={"base_uri": "https://localhost:8080/"} id="7KruYIsKvMhO" outputId="4eb01699-0e70-443a-98d4-ead12fb7e531"
num = int(input())
if num==0:
print ("ZERO")
elif num>0:
print ("POSITIVE")
elif num<0:
print("NEGATIVE")
# + [markdown] id="4fMvu5piv4C4"
# ##Application 3 - Write a program to determine if the grades are:
# grade >= 70 "Passed"
#
# grade = 64 to 69 "Remedial"
#
# grade <64 "Failed"
# + colab={"base_uri": "https://localhost:8080/"} id="i6Rz-K6Qv5ed" outputId="b29aa616-ac3c-48b9-e152-88fd3e91563f"
grade = float(input())
if grade >= 70:
print ("Passed")
elif grade >= 64 and grade <= 69:
print ("Remedial")
else:
print ("Failed")
# + [markdown] id="42qRMfFaw6LD"
# ##For loop
# + colab={"base_uri": "https://localhost:8080/"} id="1q79fISkxASi" outputId="1763224f-d60f-4895-8461-ddb2e28e1225"
week=["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"]
for x in week:
print(x)
# + [markdown] id="nYUFY5QOxZwY"
# ##The break statement
# + colab={"base_uri": "https://localhost:8080/"} id="Lwbzi_gtxbHl" outputId="d942a18e-139e-4aef-d4b8-13133533d0d5"
for x in week:
print (x)
if x=="Friday":
break
# + [markdown] id="TMxOO8rTxu2a"
# ##Looping through a string
# + colab={"base_uri": "https://localhost:8080/"} id="M0uQIwkKxwz5" outputId="56e17db2-7a04-4950-8939-7c8a757e70a5"
for x in "Week":
print(x)
# + [markdown] id="7ADfj10zyVnC"
# ##Range() function
# + colab={"base_uri": "https://localhost:8080/"} id="97olSIzGyXdz" outputId="aed76e95-9c33-43f2-a532-e2c708be27c4"
for x in range(6):
print (x)
for x in range(2,6):
print(x)
# + [markdown] id="KywWxhNSzAyt"
# ##While loop
# + colab={"base_uri": "https://localhost:8080/"} id="lGlng6LIzB_g" outputId="fd3e44da-9771-403a-ee23-f5e64a532ee8"
i = 1
while i<6:
print(i)
i+= 1 #same as i = i+1
# + colab={"base_uri": "https://localhost:8080/"} id="Zp8i9ZeE0Koy" outputId="8b78d32f-2c69-4037-9397-57d6e30682fb"
i =1
while i<6:
print(i)
if i ==3:
break
i+=1
# + [markdown] id="cb3-a4cWz46k"
# ##Continue Statement
# + colab={"base_uri": "https://localhost:8080/"} id="SiSq4H4Zz6jW" outputId="c62dbed5-d572-44b4-81e1-943a85e65581"
i =1
while i<6:
i+=1
if i==3:
continue
print(i)
| Control_and_Loop_Statement.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/TrishaCueno/OOP-1-1/blob/main/GUI.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="u98p5Y_fs37S"
# Students' Grade in OOP Subject
# + colab={"base_uri": "https://localhost:8080/"} id="9BjiArGNsyDT" outputId="87e56360-ef55-4fdb-dc5a-54316cf014b4"
#@title Students' Grade in OOP
student_name = "<NAME>" #@param {type: "string"}
prelim = 95#@param {type: "integer"}
midterm = 98#@param {type: "integer"}
final = 100#@param {type: "integer"}
semestral_grade = ((prelim + midterm + final)/3)
print("Students' Grade in OOP")
print("Student:", student_name)
print("Prelim Grade:", prelim)
print("Midterm Grade:", midterm)
print("Final Grade:", final)
print("Average:", round(semestral_grade, 2))
# + colab={"base_uri": "https://localhost:8080/"} id="VXohHXJYviDr" outputId="82d36084-11e3-4b99-adb8-8308903cc524"
gender = "Female" #@param ["Male", "Female"] {type: "raw"}
birthdate = "2003-04-02" #@param {type: "date"}
print("Gender:", gender)
print("Birthdate:", birthdate)
| GUI.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # NanoEvents tutorial
#
# This is a rendered copy of [nanoevents.ipynb](https://github.com/CoffeaTeam/coffea/blob/master/binder/nanoevents.ipynb). You can optionally run it interactively on [binder at this link](https://mybinder.org/v2/gh/coffeateam/coffea/master?filepath=binder%2Fnanoevents.ipynb)
#
# NanoEvents is a Coffea utility to wrap flat nTuple structures (such as the CMS [NanoAOD](https://www.epj-conferences.org/articles/epjconf/pdf/2019/19/epjconf_chep2018_06021.pdf) format) into a single awkward array with appropriate object methods (such as [Lorentz vector methods](https://coffeateam.github.io/coffea/modules/coffea.nanoevents.methods.vector.html)), cross references, and nested objects, all lazily accessed from the source ROOT TTree via uproot. The interpretation of the TTree data is configurable via schema objects, which are community-supplied interpretations for various source file types. These schema objects allow a richer interpretation of the file contents than the [uproot.lazy](https://uproot4.readthedocs.io/en/latest/uproot4.behaviors.TBranch.lazy.html) method.
#
# In this demo, we will use NanoEvents to read a small CMS NanoAOD sample. The events object can be instantiated as follows:
# +
import awkward1 as ak
from coffea.nanoevents import NanoEventsFactory, NanoAODSchema
fname = "https://raw.githubusercontent.com/CoffeaTeam/coffea/master/tests/samples/nano_dy.root"
events = NanoEventsFactory.from_file(fname, schemaclass=NanoAODSchema).events()
# -
# Consider looking at the [from_file](https://coffeateam.github.io/coffea/api/coffea.nanoevents.NanoEventsFactory.html#coffea.nanoevents.NanoEventsFactory.from_file) class method to see the optional arguments.
#
# The `events` object is an awkward array, which at its top level is a record array with one record for each "collection", where a collection is a grouping of fields (TBranches) based on the naming conventions of [NanoAODSchema](https://coffeateam.github.io/coffea/api/coffea.nanoevents.NanoAODSchema.html). For example, in the file we opened, the branches:
# ```
# Generator_binvar
# Generator_scalePDF
# Generator_weight
# Generator_x1
# Generator_x2
# Generator_xpdf1
# Generator_xpdf2
# Generator_id1
# Generator_id2
# ```
# are grouped into one sub-record named `Generator` which can be accessed using either getitem or getattr syntax, i.e. `events["Generator"]` or `events.Generator`. e.g.
events.Generator.id1
# all names can be listed with:
events.Generator.fields
# In CMS NanoAOD, each TBranch has a help string, which is carried into the NanoEvents, e.g. executing the following cell should produce a help pop-up:
# ```
# Type: Array
# String form: [1, -1, -1, 21, 21, 4, 2, -2, 2, 1, 3, 1, ... -1, -1, 1, -2, 2, 1, 2, -2, -1, 2, 1]
# Length: 40
# File: ~/src/awkward-1.0/awkward1/highlevel.py
# Docstring: id of first parton
# Class docstring: ...
# ```
# where the `Docstring` shows information about the content of this array.
# +
# events.Generator.id1?
# -
# Based on a collection's name or contents, some collections acquire additional _methods_, which are extra features exposed by the code in the mixin classes of the `coffea.nanoevents.methods` modules. For example, although `events.GenJet` has the fields:
events.GenJet.fields
# we can access additional attributes associated to each generated jet by virtue of the fact that they can be interpreted as [Lorentz vectors](https://coffeateam.github.io/coffea/api/coffea.nanoevents.methods.vector.LorentzVector.html#coffea.nanoevents.methods.vector.LorentzVector):
events.GenJet.energy
# We can call more complex methods, like computing the distance $\Delta R = \sqrt{\Delta \eta^2 + \Delta \phi ^2}$ between two LorentzVector objects:
# find distance between leading jet and all electrons in each event
dr = events.Jet[:, 0].delta_r(events.Electron)
dr
# find minimum distance
ak.min(dr, axis=1)
# The assignment of methods classes to collections is done inside the schema object during the initial creation of the array, governed by the awkward array's `__record__` parameter and the associated behavior. See [ak.behavior](https://awkward-array.readthedocs.io/en/latest/ak.behavior.html) for a more detailed explanation of array behaviors.
#
# Additional methods provide convenience functions for interpreting some branches, e.g. CMS NanoAOD packs several jet identification flag bits into a single integer, `jetId`. By implementing the bit-twiddling in the [Jet mixin](https://github.com/CoffeaTeam/coffea/blob/7045c06b9448d2be4315e65d432e6d8bd117d6d7/coffea/nanoevents/methods/nanoaod.py#L279-L282), the analsyis code becomes more clear:
print(events.Jet.jetId)
print(events.Jet.isTight)
# We can also define convenience functions to unpack and apply some mask to a set of flags, e.g. for generated particles:
print(f"Raw status flags: {events.GenPart.statusFlags}")
events.GenPart.hasFlags(['isPrompt', 'isLastCopy'])
# CMS NanoAOD also contains pre-computed cross-references for some types of collections. For example, there is a TBranch `Electron_genPartIdx` which indexes the `GenPart` collection per event to give the matched generated particle, and `-1` if no match is found. NanoEvents transforms these indices into an awkward _indexed array_ pointing to the collection, so that one can directly access the matched particle using getattr syntax:
events.Electron.matched_gen.pdgId
events.Muon.matched_jet.pt
# For generated particles, the parent index is similarly mapped:
events.GenPart.parent.pdgId
# In addition, using the parent index, a helper method computes the inverse mapping, namely, `children`. As such, one can find particle siblings with:
events.GenPart.parent.children.pdgId
# notice this is a doubly-jagged array
# Since often one wants to shortcut repeated particles in a decay sequence, a helper method `distinctParent` is also available. Here we use it to find the parent particle ID for all prompt electrons:
events.GenPart[
(abs(events.GenPart.pdgId) == 11)
& events.GenPart.hasFlags(['isPrompt', 'isLastCopy'])
].distinctParent.pdgId
# Events can be filtered like any other awkward array using boolean fancy-indexing
mmevents = events[ak.num(events.Muon) == 2]
zmm = mmevents.Muon[:, 0] + mmevents.Muon[:, 1]
zmm.mass
# One can assign new variables to the arrays, with some caveats:
#
# * Assignment must use setitem (`events["path", "to", "name"] = value`)
# * Assignment to a sliced `events` won't be accessible from the original variable
# * New variables are not visible from cross-references
mmevents["Electron", "myvar2"] = mmevents.Electron.pt + zmm.mass
mmevents.Electron.myvar2
# ## Using NanoEvents with a processor
#
# NanoEvents can also be used inside a coffea [processor](https://coffeateam.github.io/coffea/api/coffea.processor.ProcessorABC.html#coffea.processor.ProcessorABC), as shown in this simple Z peak sketch below. To use NanoEvents with `run_uproot_job`, pass the appropriate schema as an executor argument, e.g. `"schema": NanoAODSchema` for this example. The dataset name is included in the `events` object under the `metadata` attribute.
# +
from coffea import processor, hist
class MyZPeak(processor.ProcessorABC):
def __init__(self):
self._histo = hist.Hist(
"Events",
hist.Cat("dataset", "Dataset"),
hist.Bin("mass", "Z mass", 60, 60, 120),
)
@property
def accumulator(self):
return self._histo
# we will receive a NanoEvents instead of a coffea DataFrame
def process(self, events):
out = self.accumulator.identity()
mmevents = events[
(ak.num(events.Muon) == 2)
& (ak.sum(events.Muon.charge, axis=1) == 0)
]
zmm = mmevents.Muon[:, 0] + mmevents.Muon[:, 1]
out.fill(
dataset=events.metadata["dataset"],
mass=zmm.mass,
)
return out
def postprocess(self, accumulator):
return accumulator
# +
samples = {
"DrellYan": [fname]
}
result = processor.run_uproot_job(
samples,
"Events",
MyZPeak(),
processor.iterative_executor,
{"schema": NanoAODSchema},
)
# +
# %matplotlib inline
hist.plot1d(result)
| binder/nanoevents.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import cv2
img = cv2.imread('./examples/image01.png', cv2.IMREAD_UNCHANGED)
img.shape
scale_percent = 35 # percent of original size
width = int(img.shape[1] * scale_percent / 100)
height = int(img.shape[0] * scale_percent / 100)
dim = (width, height)
# resize image
resized = cv2.resize(img, dim, interpolation = cv2.INTER_AREA)
resized.shape
plt.imshow("Resized image", resized)
# -
# ## AND Perceptron
# +
import pandas as pd
# TODO: Set weight1, weight2, and bias
weight1 = 0.5
weight2 = 1.5
bias = -1.9
# DON'T CHANGE ANYTHING BELOW
# Inputs and outputs
test_inputs = [(0, 0), (0, 1), (1, 0), (1, 1)]
correct_outputs = [False, False, False, True]
outputs = []
# Generate and check output
for test_input, correct_output in zip(test_inputs, correct_outputs):
linear_combination = weight1 * test_input[0] + weight2 * test_input[1] + bias
output = int(linear_combination >= 0)
is_correct_string = 'Yes' if output == correct_output else 'No'
outputs.append([test_input[0], test_input[1], linear_combination, output, is_correct_string])
# Print output
num_wrong = len([output[4] for output in outputs if output[4] == 'No'])
output_frame = pd.DataFrame(outputs, columns=['Input 1', ' Input 2', ' Linear Combination', ' Activation Output', ' Is Correct'])
if not num_wrong:
print('Nice! You got it all correct.\n')
else:
print('You got {} wrong. Keep trying!\n'.format(num_wrong))
print(output_frame.to_string(index=False))
# -
# ## NOT Perceptron
# +
import pandas as pd
# TODO: Set weight1, weight2, and bias
weight1 = 0.0
weight2 = -2
bias = 1
# DON'T CHANGE ANYTHING BELOW
# Inputs and outputs
test_inputs = [(0, 0), (0, 1), (1, 0), (1, 1)]
correct_outputs = [True, False, True, False]
outputs = []
# Generate and check output
for test_input, correct_output in zip(test_inputs, correct_outputs):
linear_combination = weight1 * test_input[0] + weight2 * test_input[1] + bias
output = int(linear_combination >= 0)
is_correct_string = 'Yes' if output == correct_output else 'No'
outputs.append([test_input[0], test_input[1], linear_combination, output, is_correct_string])
# Print output
num_wrong = len([output[4] for output in outputs if output[4] == 'No'])
output_frame = pd.DataFrame(outputs, columns=['Input 1', ' Input 2', ' Linear Combination', ' Activation Output', ' Is Correct'])
if not num_wrong:
print('Nice! You got it all correct.\n')
else:
print('You got {} wrong. Keep trying!\n'.format(num_wrong))
print(output_frame.to_string(index=False))
# -
# ## Perceptron Algorithm
# +
import numpy as np
# Setting the random seed, feel free to change it and see different solutions.
np.random.seed(42)
def stepFunction(t):
if t >= 0:
return 1
return 0
def prediction(X, W, b):
return stepFunction((np.matmul(X,W)+b)[0])
# TODO: Fill in the code below to implement the perceptron trick.
# The function should receive as inputs the data X, the labels y,
# the weights W (as an array), and the bias b,
# update the weights and bias W, b, according to the perceptron algorithm,
# and return W and b.
def perceptronStep(X, y, W, b, learn_rate = 0.01):
for i in range(len(X)):
y_hat = prediction(X[i],W,b)
if y[i]-y_hat == 1:
W[0] += X[i][0]*learn_rate
W[1] += X[i][1]*learn_rate
b += learn_rate
elif y[i]-y_hat == -1:
W[0] -= X[i][0]*learn_rate
W[1] -= X[i][1]*learn_rate
b -= learn_rate
return W, b
# This function runs the perceptron algorithm repeatedly on the dataset,
# and returns a few of the boundary lines obtained in the iterations,
# for plotting purposes.
# Feel free to play with the learning rate and the num_epochs,
# and see your results plotted below.
def trainPerceptronAlgorithm(X, y, learn_rate = 0.01, num_epochs = 25):
x_min, x_max = min(X.T[0]), max(X.T[0])
y_min, y_max = min(X.T[1]), max(X.T[1])
W = np.array(np.random.rand(2,1))
b = np.random.rand(1)[0] + x_max
# These are the solution lines that get plotted below.
boundary_lines = []
for i in range(num_epochs):
# In each epoch, we apply the perceptron step.
W, b = perceptronStep(X, y, W, b, learn_rate)
boundary_lines.append((-W[0]/W[1], -b/W[1]))
return boundary_lines
# -
# ## TensorFlow
# +
import tensorflow as tf
# Create TensorFlow object called tensor
hello_constant = tf.constant('Hello World!')
with tf.Session() as sess:
# Run the tf.constant operation in the session
output = sess.run(hello_constant)
print(output)
# -
hello_constant = tf.constant('Hello World!')
# A is a 0-dimensional int32 tensor
A = tf.constant(1234)
# B is a 1-dimensional int32 tensor
B = tf.constant([123,456,789])
# C is a 2-dimensional int32 tensor
C = tf.constant([ [123,456,789], [222,333,444] ])
with tf.Session() as sess:
output = sess.run(hello_constant)
print(output)
# ## # session
# +
x = tf.placeholder(tf.string)
with tf.Session() as sess:
output = sess.run(x, feed_dict={x: 'Hello World'})
x = tf.placeholder(tf.string)
y = tf.placeholder(tf.int32)
z = tf.placeholder(tf.float32)
with tf.Session() as sess:
output = sess.run(x, feed_dict={x: 'Test String', y: 123, z: 45.67})
# -
# ## # three formats
A = tf.constant()
x = tf.Variable()
x = tf.placeholder()
## Initialized the global variables
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
# ## tf.truncated_normal()
n_features = 120
n_labels = 5
weights = tf.Variable(tf.truncated_normal((n_features, n_labels)))
# ## tf.zeros()
n_labels = 5
bias = tf.Variable(tf.zeros(n_labels))
# ## Quiz
# +
# Quiz Solution
import tensorflow as tf
def get_weights(n_features, n_labels):
"""
Return TensorFlow weights
:param n_features: Number of features
:param n_labels: Number of labels
:return: TensorFlow weights
"""
# TODO: Return weights
return tf.Variable(tf.truncated_normal((n_features, n_labels)))
def get_biases(n_labels):
"""
Return TensorFlow bias
:param n_labels: Number of labels
:return: TensorFlow bias
"""
# TODO: Return biases
return tf.Variable(tf.zeros(n_labels))
def linear(input, w, b):
"""
Return linear function in TensorFlow
:param input: TensorFlow input
:param w: TensorFlow weights
:param b: TensorFlow biases
:return: TensorFlow linear function
"""
# TODO: Linear Function (xW + b)
return tf.add(tf.matmul(input, w), b)
# +
from tensorflow.examples.tutorials.mnist import input_data
def mnist_features_labels(n_labels):
"""
Gets the first <n> labels from the MNIST dataset
:param n_labels: Number of labels to use
:return: Tuple of feature list and label list
"""
mnist_features = []
mnist_labels = []
mnist = input_data.read_data_sets('/datasets/ud730/mnist', one_hot=True)
# In order to make quizzes run faster, we're only looking at 10000 images
for mnist_feature, mnist_label in zip(*mnist.train.next_batch(10000)):
# Add features and labels if it's for the first <n>th labels
if mnist_label[:n_labels].any():
mnist_features.append(mnist_feature)
mnist_labels.append(mnist_label[:n_labels])
return mnist_features, mnist_labels
# Number of features (28*28 image is 784 features)
n_features = 784
# Number of labels
n_labels = 3
# Features and Labels
features = tf.placeholder(tf.float32)
labels = tf.placeholder(tf.float32)
# Weights and Biases
w = get_weights(n_features, n_labels)
b = get_biases(n_labels)
# Linear Function xW + b
logits = linear(features, w, b)
# Training data
train_features, train_labels = mnist_features_labels(n_labels)
# -
import numpy as np
t = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
u = np.array([1, 2, 3])
print(t + u)
# ## Softmax
np.exp([2,3])
# +
import numpy as np
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
# TODO: Compute and return softmax(x)
return np.exp(x)/np.sum(np.exp(x), axis=0)
logits = [3.0, 1.0, 0.2]
print(softmax(logits))
# -
# ## Softmax in TF
x = tf.nn.softmax([3.0, 1.0, 0.2])
print(x)
# +
# Quiz Solution
import tensorflow as tf
def run():
output = None
logit_data = [2.0, 1.0, 0.1]
logits = tf.placeholder(tf.float32)
softmax = tf.nn.softmax(logits)
with tf.Session() as sess:
output = sess.run(softmax, feed_dict={logits: logit_data})
return output
# -
# ## Mini batching
# +
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
n_input = 784 # MNIST data input (img shape: 28*28)
n_classes = 10 # MNIST total classes (0-9 digits)
# Import MNIST data
mnist = input_data.read_data_sets('/datasets/ud730/mnist', one_hot=True)
# The features are already scaled and the data is shuffled
train_features = mnist.train.images
test_features = mnist.test.images
train_labels = mnist.train.labels.astype(np.float32)
test_labels = mnist.test.labels.astype(np.float32)
# Weights & bias
weights = tf.Variable(tf.random_normal([n_input, n_classes]))
bias = tf.Variable(tf.random_normal([n_classes]))
# -
# ## LeRu
# +
# Quiz Solution
import tensorflow as tf
output = None
hidden_layer_weights = [
[0.1, 0.2, 0.4],
[0.4, 0.6, 0.6],
[0.5, 0.9, 0.1],
[0.8, 0.2, 0.8]]
out_weights = [
[0.1, 0.6],
[0.2, 0.1],
[0.7, 0.9]]
# Weights and biases
weights = [
tf.Variable(hidden_layer_weights),
tf.Variable(out_weights)]
biases = [
tf.Variable(tf.zeros(3)),
tf.Variable(tf.zeros(2))]
# Input
features = tf.Variable([[1.0, 2.0, 3.0, 4.0], [-1.0, -2.0, -3.0, -4.0], [11.0, 12.0, 13.0, 14.0]])
# TODO: Create Model
hidden_layer = tf.add(tf.matmul(features, weights[0]), biases[0])
hidden_layer = tf.nn.relu(hidden_layer)
logits = tf.add(tf.matmul(hidden_layer, weights[1]), biases[1])
# TODO: save and print session results on variable output
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(logits)
print(output)
# -
# ## Deep Neural Network in TensorFlow
# +
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets(".", one_hot=True, reshape=False)
import tensorflow as tf
# Parameters
learning_rate = 0.001
training_epochs = 20
batch_size = 128 # Decrease batch size if you don't have enough memory
display_step = 1
n_input = 784 # MNIST data input (img shape: 28*28)
n_classes = 10 # MNIST total classes (0-9 digits)
n_hidden_layer = 256 # layer number of features
# Store layers weight & bias
weights = {
'hidden_layer': tf.Variable(tf.random_normal([n_input, n_hidden_layer])),
'out': tf.Variable(tf.random_normal([n_hidden_layer, n_classes]))}
biases = {
'hidden_layer': tf.Variable(tf.random_normal([n_hidden_layer])),
'out': tf.Variable(tf.random_normal([n_classes]))}
# tf Graph input
x = tf.placeholder("float", [None, 28, 28, 1])
y = tf.placeholder("float", [None, n_classes])
x_flat = tf.reshape(x, [-1, n_input])
# Hidden layer with RELU activation
layer_1 = tf.add(tf.matmul(x_flat, weights['hidden_layer']),\
biases['hidden_layer'])
layer_1 = tf.nn.relu(layer_1)
# Output layer with linear activation
logits = tf.add(tf.matmul(layer_1, weights['out']), biases['out'])
# Define loss and optimizer
cost = tf.reduce_mean(\
tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\
.minimize(cost)
# Initializing the variables
init = tf.global_variables_initializer()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
# Training cycle
for epoch in range(training_epochs):
total_batch = int(mnist.train.num_examples/batch_size)
# Loop over all batches
for i in range(total_batch):
batch_x, batch_y = mnist.train.next_batch(batch_size)
# Run optimization op (backprop) and cost op (to get loss value)
sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})
# -
# ## Saving Variables
# +
import tensorflow as tf
# The file path to save the data
save_file = './model.ckpt'
# Two Tensor Variables: weights and bias
weights = tf.Variable(tf.truncated_normal([2, 3]))
bias = tf.Variable(tf.truncated_normal([3]))
# Class used to save and/or restore Tensor Variables
saver = tf.train.Saver()
with tf.Session() as sess:
# Initialize all the Variables
sess.run(tf.global_variables_initializer())
# Show the values of weights and bias
print('Weights:')
print(sess.run(weights))
print('Bias:')
print(sess.run(bias))
# Save the model
saver.save(sess, save_file)
# -
# ## Loading Variables
# +
# Remove the previous weights and bias
tf.reset_default_graph()
# Two Variables: weights and bias
weights = tf.Variable(tf.truncated_normal([2, 3]))
bias = tf.Variable(tf.truncated_normal([3]))
# Class used to save and/or restore Tensor Variables
saver = tf.train.Saver()
### First start with a model
with tf.Session() as sess:
# Load the weights and bias
saver.restore(sess, save_file)
# Show the values of weights and bias
print('Weight:')
print(sess.run(weights))
print('Bias:')
print(sess.run(bias))
# -
# ## Save a Trained Model
# ### First start with a model
# +
# Remove previous Tensors and Operations
tf.reset_default_graph()
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
learning_rate = 0.001
n_input = 784 # MNIST data input (img shape: 28*28)
n_classes = 10 # MNIST total classes (0-9 digits)
# Import MNIST data
mnist = input_data.read_data_sets('.', one_hot=True)
# Features and Labels
features = tf.placeholder(tf.float32, [None, n_input])
labels = tf.placeholder(tf.float32, [None, n_classes])
# Weights & bias
weights = tf.Variable(tf.random_normal([n_input, n_classes]))
bias = tf.Variable(tf.random_normal([n_classes]))
# Logits - xW + b
logits = tf.add(tf.matmul(features, weights), bias)
# Define loss and optimizer
cost = tf.reduce_mean(\
tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\
.minimize(cost)
# Calculate accuracy
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# -
# ### Let's train that model, then save the weights:
# +
import math
save_file = './train_model.ckpt'
batch_size = 128
n_epochs = 100
saver = tf.train.Saver()
# Launch the graph
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Training cycle
for epoch in range(n_epochs):
total_batch = math.ceil(mnist.train.num_examples / batch_size)
# Loop over all batches
for i in range(total_batch):
batch_features, batch_labels = mnist.train.next_batch(batch_size)
sess.run(
optimizer,
feed_dict={features: batch_features, labels: batch_labels})
# Print status for every 10 epochs
if epoch % 10 == 0:
valid_accuracy = sess.run(
accuracy,
feed_dict={
features: mnist.validation.images,
labels: mnist.validation.labels})
print('Epoch {:<3} - Validation Accuracy: {}'.format(
epoch,
valid_accuracy))
# Save the model
saver.save(sess, save_file)
print('Trained Model Saved.')
# -
# ### Load a Trained Model
# +
saver = tf.train.Saver()
# Launch the graph
with tf.Session() as sess:
saver.restore(sess, save_file)
test_accuracy = sess.run(
accuracy,
feed_dict={features: mnist.test.images, labels: mnist.test.labels})
print('Test Accuracy: {}'.format(test_accuracy))
# -
# ## FineTuning
# ### Loading the Weights and Biases into a New Model
# ### problem!!
# +
import tensorflow as tf
# Remove the previous weights and bias
tf.reset_default_graph()
save_file = 'model.ckpt'
# Two Tensor Variables: weights and bias
weights = tf.Variable(tf.truncated_normal([2, 3]))
bias = tf.Variable(tf.truncated_normal([3]))
saver = tf.train.Saver()
# Print the name of Weights and Bias
print('Save Weights: {}'.format(weights.name))
print('Save Bias: {}'.format(bias.name))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver.save(sess, save_file)
# Remove the previous weights and bias
tf.reset_default_graph()
# Two Variables: weights and bias
bias = tf.Variable(tf.truncated_normal([3]))
weights = tf.Variable(tf.truncated_normal([2, 3]))
saver = tf.train.Saver()
# Print the name of Weights and Bias
print('Load Weights: {}'.format(weights.name))
print('Load Bias: {}'.format(bias.name))
with tf.Session() as sess:
# Load the weights and bias - ERROR
saver.restore(sess, save_file)
# -
# ### Good example
# +
import tensorflow as tf
tf.reset_default_graph()
save_file = 'model.ckpt'
# Two Tensor Variables: weights and bias
weights = tf.Variable(tf.truncated_normal([2, 3]), name='weights_0')
bias = tf.Variable(tf.truncated_normal([3]), name='bias_0')
saver = tf.train.Saver()
# Print the name of Weights and Bias
print('Save Weights: {}'.format(weights.name))
print('Save Bias: {}'.format(bias.name))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver.save(sess, save_file)
# Remove the previous weights and bias
tf.reset_default_graph()
# Two Variables: weights and bias
bias = tf.Variable(tf.truncated_normal([3]), name='bias_0')
weights = tf.Variable(tf.truncated_normal([2, 3]) ,name='weights_0')
saver = tf.train.Saver()
# Print the name of Weights and Bias
print('Load Weights: {}'.format(weights.name))
print('Load Bias: {}'.format(bias.name))
with tf.Session() as sess:
# Load the weights and bias - No Error
saver.restore(sess, save_file)
print('Loaded Weights and Bias successfully.')
# +
input = tf.placeholder(tf.float32, (None, 32, 32, 3))
filter_weights = tf.Variable(tf.truncated_normal((8, 8, 3, 20))) # (height, width, input_depth, output_depth, filter# Output depth
k_output = 64
# Image Properties
image_width = 10
image_height = 10
color_channels = 3
# Convolution filter
filter_size_width = 5
filter_size_height = 5
# Input/Image
input = tf.placeholder(
tf.float32,
shape=[None, image_height, image_width, color_channels])
# Weight and bias
weight = tf.Variable(tf.truncated_normal(
[filter_size_height, filter_size_width, color_channels, k_output]))
bias = tf.Variable(tf.zeros(k_output))
# Apply Convolution
conv_layer = tf.nn.conv2d(input, weight, strides=[1, 2, 2, 1], padding='SAME')
# Add bias
conv_layer = tf.nn.bias_add(conv_layer, bias)
# Apply activation function
conv_layer = tf.nn.relu(conv_layer))
filter_bias = tf.Variable(tf.zeros(20))
strides = [1, 2, 2, 1] # (batch, height, width, depth)
padding = 'SAME'
conv = tf.nn.conv2d(input, filter_weights, strides, padding) + filter_bias
# -
...
conv_layer = tf.nn.conv2d(input, weight, strides=[1, 2, 2, 1], padding='SAME')
conv_layer = tf.nn.bias_add(conv_layer, bias)
conv_layer = tf.nn.relu(conv_layer)
# Apply Max Pooling
conv_layer = tf.nn.max_pool(
conv_layer,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME')
input = tf.placeholder(tf.float32, (None, 4, 4, 5))
filter_shape = [1, 2, 2, 1]
strides = [1, 2, 2, 1]
padding = 'VALID'
pool = tf.nn.max_pool(input, filter_shape, strides, padding)
def conv2d(x, W, b, strides=1):
x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')
x = tf.nn.bias_add(x, b)
return tf.nn.relu(x)
def maxpool2d(x, k=2):
return tf.nn.max_pool(
x,
ksize=[1, k, k, 1],
strides=[1, k, k, 1],
padding='SAME')
def conv_net(x, weights, biases, dropout):
# Layer 1 - 28*28*1 to 14*14*32
conv1 = conv2d(x, weights['wc1'], biases['bc1'])
conv1 = maxpool2d(conv1, k=2)
# Layer 2 - 14*14*32 to 7*7*64
conv2 = conv2d(conv1, weights['wc2'], biases['bc2'])
conv2 = maxpool2d(conv2, k=2)
# Fully connected layer - 7*7*64 to 1024
fc1 = tf.reshape(conv2, [-1, weights['wd1'].get_shape().as_list()[0]])
fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1'])
fc1 = tf.nn.relu(fc1)
fc1 = tf.nn.dropout(fc1, dropout)
# Output Layer - class prediction - 1024 to 10
out = tf.add(tf.matmul(fc1, weights['out']), biases['out'])
return out
# +
# tf Graph input
x = tf.placeholder(tf.float32, [None, 28, 28, 1])
y = tf.placeholder(tf.float32, [None, n_classes])
keep_prob = tf.placeholder(tf.float32)
# Model
logits = conv_net(x, weights, biases, keep_prob)
# Define loss and optimizer
cost = tf.reduce_mean(\
tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\
.minimize(cost)
# Accuracy
correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initializing the variables
init = tf. global_variables_initializer()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
for epoch in range(epochs):
for batch in range(mnist.train.num_examples//batch_size):
batch_x, batch_y = mnist.train.next_batch(batch_size)
sess.run(optimizer, feed_dict={
x: batch_x,
y: batch_y,
keep_prob: dropout})
# Calculate batch loss and accuracy
loss = sess.run(cost, feed_dict={
x: batch_x,
y: batch_y,
keep_prob: 1.})
valid_acc = sess.run(accuracy, feed_dict={
x: mnist.validation.images[:test_valid_size],
y: mnist.validation.labels[:test_valid_size],
keep_prob: 1.})
print('Epoch {:>2}, Batch {:>3} -'
'Loss: {:>10.4f} Validation Accuracy: {:.6f}'.format(
epoch + 1,
batch + 1,
loss,
valid_acc))
# Calculate Test Accuracy
test_acc = sess.run(accuracy, feed_dict={
x: mnist.test.images[:test_valid_size],
y: mnist.test.labels[:test_valid_size],
keep_prob: 1.})
print('Testing Accuracy: {}'.format(test_acc))
| Learning Process.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/a-forty-two/COG_GN22CDBDS001_MARCH_22/blob/main/NLP_Pipeline_Demo.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="mDZx51M7grWQ"
import spacy
# + colab={"base_uri": "https://localhost:8080/"} id="txMEliG4hAP9" outputId="1c9d8b3f-0381-4961-830f-ae6e4f6a5c32"
phrase = 'mary had a little lamb. The lamb had a cat.'
sentences = phrase.split('.')
sentences
# + id="0qemTAw6heR7"
# dictionary, reverse dictionary
# central dictionary
dictionary = spacy.load('en_core_web_sm')
# + id="-OhKp32Zh13t"
tokens = dictionary(phrase)
# + colab={"base_uri": "https://localhost:8080/"} id="VfC6YXUeh7pb" outputId="8fa30e33-81ea-4b82-df0f-7c6aa0d5156a"
print(len(phrase))
len(tokens) # word length
# + colab={"base_uri": "https://localhost:8080/"} id="YUUHwZVmh9FT" outputId="9c081343-5c9a-49ea-ae30-1f22b7df0844"
tokens[-2]
# + colab={"base_uri": "https://localhost:8080/"} id="3WFvjzd6iSXp" outputId="592c40e8-fc43-4669-ef14-27cf6b230d92"
# !ls
# + colab={"base_uri": "https://localhost:8080/"} id="hY22r04IixMs" outputId="9c1808dc-858b-47f5-c1d6-0c304ec5020a"
f = open('nlpcontent.txt','rt')
content = f.read()
tokens = dictionary(content)
len(tokens)
# + colab={"base_uri": "https://localhost:8080/"} id="RVuIhtQVi6zI" outputId="65430212-006b-42bb-ba28-3c03cfb652d5"
print(dir(tokens[0]))
# + colab={"base_uri": "https://localhost:8080/"} id="jVc-9PT4kL9F" outputId="adfe0689-ca25-4f61-e431-deeed51f61cf"
len(tokens[0].vector)
# + colab={"base_uri": "https://localhost:8080/"} id="Ti9gfVrvkX_0" outputId="dff84c79-4a53-435a-b023-e6ec2647b775"
tokens[0].vector
# + colab={"base_uri": "https://localhost:8080/"} id="bgZgCm49krf8" outputId="3aab23c0-5718-4320-fb00-f3500389c46f"
# grammar tags
for i in range(15,20,1):
print(tokens[i].text)
print(tokens[i].tag) # id of tag-> for programming
print(tokens[i].tag_) # string value of tag-> for display
print('*******')
# + colab={"base_uri": "https://localhost:8080/"} id="8Wca9xENlDV9" outputId="9b85754e-8c79-4a11-aab3-7eea6db50e3f"
# stop words-> pollutants -> do not add any value to the sentence
# not always
for i in range(20):
if(tokens[i].is_stop == True):
print(tokens[i].text)
# + colab={"base_uri": "https://localhost:8080/"} id="Ac4_u8u3l4OU" outputId="78c2d1af-ac0d-46df-cb95-37e50112e430"
# entire stop words list:
stopwords = spacy.lang.en.STOP_WORDS
print(stopwords)
# + id="WenSNTUZmZAK"
# remove or cleanse my text - stop words
mytokens = []
mywords = []
for token in tokens:
if(token.is_stop == False):
mytokens.append(token)
mywords.append(token.text)
# + colab={"base_uri": "https://localhost:8080/"} id="rb4etIFnnBgb" outputId="b9b56b73-6e47-43e8-a4bc-81fdb53b5f0f"
print((" ".join(mywords))[:100])
# + id="WL_Vh72pnJsB"
# Step by step activity of polishing your input to get -> sentiment, phrases, most important phrases
# + id="otlSa01Qnboi"
# Levers-> 1,2,3
# fulcrum, effort, load
# fulcrum-> centre of motion
# NLP-> machine
# verbs-> fulcrum
# pikachu -> effort
# load -> Pineapple
# Pikachu is eating a pineapple
from spacy import displacy
# + id="u_oOuvefpaK_"
sentence = 'Pikachu is eating a pineapple'
tokens = dictionary(sentence)
# + colab={"base_uri": "https://localhost:8080/", "height": 258} id="ozS_f6x7phxb" outputId="5af30ce9-1f26-45b8-87e2-1d021a979519"
displacy.render(tokens, style='dep', jupyter=True, options={'distance':100})
# + colab={"base_uri": "https://localhost:8080/"} id="6Je-9z5bpuRx" outputId="489fc920-1246-46ef-f22c-b4d5285d8d43"
f = open('nlpcontent.txt','rt')
content = f.read()
tokens = dictionary(content)
len(tokens)
# + colab={"base_uri": "https://localhost:8080/"} id="VjdOg7Z1qN4J" outputId="e12546a7-8ad9-44b7-ea43-e3161680ba02"
# common nouns-> tag NNS
for i in range(50):
if(tokens[i].tag_ == 'NNS'): # simple, plural nouns
print(tokens[i].text)
# + id="wNptOVS3qcGk"
# + colab={"base_uri": "https://localhost:8080/"} id="Qn2VAssCtcy4" outputId="0b2b16ff-a17d-4f3b-c83a-bd10c2de29bd"
# censor/redact
# confidential-> 124*****232
# !python -m spacy download en_core_web_lg
# + colab={"base_uri": "https://localhost:8080/", "height": 452} id="fIp4lz-UuSsc" outputId="984b1402-5e19-4ec7-bde1-9589852638dd"
import en_core_web_lg
nlp = en_core_web_lg.load()
def censor(token):
if token.ent_iob !=0 and token.ent_type_ == 'PERSON':
return "<censored>"
else:
return token.string
# + id="BGgqoRG9vS-o"
def scrubbing(text):
doc = nlp(text)
for ent in doc.ents:
ent.merge()
tokens = map(censor, doc)
return " ".join(tokens)
# + id="qpxy6qQ_v0ja"
f = open('nlpcontent.txt','rt')
content = f.read()
words = nlp(content)
len(words)
# + colab={"base_uri": "https://localhost:8080/"} id="kBG0GkSRwPR8" outputId="09c77cd1-7463-4ced-b2f0-e9ea4c8472fe"
print(scrubbing(content)[750:1000])
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="CC8MwIcVwXVR" outputId="f2872b13-e4f8-4f23-ee8a-a4c5acddfa8f"
# !pip install textacy
# + colab={"base_uri": "https://localhost:8080/", "height": 345} id="u4KPPBKSxNTN" outputId="f14414f2-768b-482c-a932-d9afa6e04b4e"
import textacy
statements = textacy.extract.semistructured_statements(doc, 'shrimp' )
# + id="TlLrTOHSxjZK"
| notebooks/NLP_Pipeline_Demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# + [markdown] deletable=true editable=true
# # Curve Fitting Basics
# + [markdown] deletable=true editable=true
# One of the core principles of machine learning is to try and fit the data into a model. There are several ways of defining a model and there are multiple ways to fit the data in the model.
#
# But fitting data into equations is not a new concept. Curve fitting has existed for a long time.
# In this notebook, I wish to explore basics of curve fitting and hopefully develop this idea to be used in machine learning.
#
# Let's define the objectives for this notebook and slowly mature the objectives.
#
# 1. Cost Function
# 2. Fitting data <br>
# a. Linear equation <br>
# b. Quadratic equation <br>
# c. Polynomial <br>
# d. Lagrange's method <br>
# e. Splines <br>
# 3. Residuals
# 4. Automation
# + [markdown] deletable=true editable=true
# ## 1. Cost Function
#
# Let's start by importing all the required libraries
# + deletable=true editable=true
import numpy as np
import matplotlib.pyplot as plt
# + [markdown] deletable=true editable=true
# Let's generate data from an equation and add some randomness to it so that it looks real
#
# $$y = 2(x-4)^3 + 20 + noise$$
# + deletable=true editable=true
x = np.arange(0,10,0.5)
y = 2*(x-4)**3 + 20 + 100*np.random.rand(x.size)
plt.scatter(x,y)
plt.show()
# + [markdown] deletable=true editable=true
# Now if we model this equation using a linear equation $y_{pred} = ax + b$ then modelling would mean to find the values of $a$ and $b$.
#
# For now, let us assume that $a = 25$ and $b = -10$
# + deletable=true editable=true
a = 30
b = 10
y_pred = a*x + b
plt.scatter(x,y)
plt.plot(x,y_pred)
plt.show()
# + [markdown] deletable=true editable=true
# Let us see how badly our model has performed w.r.t to the data. <br>
# We can plot the distance of each point from the line to get a better idea of this.
# + deletable=true editable=true
from matplotlib.gridspec import GridSpec
y_diff = np.abs(y - y_pred)
plt.subplots(figsize=(7,5))
plt.scatter(x,y)
plt.plot(x,y_pred)
plt.show()
plt.subplots(figsize=(7,1))
plt.bar(x,y_diff,width=0.1, align='center', color='red')
plt.show()
# + [markdown] deletable=true editable=true
# There multiple ways to define cost.
# 1. Maximum difference between data and model
# 2. Root mean square error
#
# Let's check how our model did terms of both these measures.
# + deletable=true editable=true
cost1 = y_diff.max()
cost2 = np.sqrt(np.sum(y_diff**2))
print(cost1)
print(cost2)
# + [markdown] deletable=true editable=true
# Let's try with a different model and see what happens to the cost.
# + deletable=true editable=true
a = 40
b = 20
y_pred = a*x + b
y_diff = np.abs(y - y_pred)
cost1 = y_diff.max()
cost2 = np.sqrt(np.sum(y_diff**2))
print(cost1)
print(cost2)
# + [markdown] deletable=true editable=true
# In fact, since we would want to test our model again and again for different values of $a$ and $b$, it would be beneficial to define a function for the same.
# + deletable=true editable=true
def model(coeff, x):
y_pred = np.zeros(x.size)
for i in range(0, len(coeff)):
y_pred = y_pred + coeff[i]*x**i
return y_pred
# + deletable=true editable=true
def objective(y, y_pred):
cost = np.zeros(2)
y_diff = np.abs(y - y_pred)
cost[0] = y_diff.max()
cost[1] = np.sqrt(np.sum(y_diff**2))
return cost
# + [markdown] deletable=true editable=true
# Now lets see for different values of $a$ and $b$, what happens to the cost
# + deletable=true editable=true
a = np.arange(0,100,25)
b = np.arange(0,100,25)
index = 1;
print("Sr.No\ta\tb\tcost[0]\tcost[1]")
for i in range(0,a.size):
for j in range(0,b.size):
y_pred = model([b[j], a[i]], x)
cost = objective(y, y_pred)
print("%2d\t%2d\t%2d\t%4.2d\t%4.2d" % (index, a[i], b[j], cost[0], cost[1]))
index += 1
# + [markdown] deletable=true editable=true
# ## 2. Fitting Data
#
# As said earlier fitting data is to train the model to learn the coefficients of the model.
# So if we trying to fit data into a linear model, then we are learning the coefficients of slope ($a$) and intercept ($b$).
#
# Let's explore this idea further
# + [markdown] deletable=true editable=true
# ### a. Linear Equation
#
# As used earlier while making the cost function, the general form of a linear equation is
# $$y = ax + b$$
# Now the idea is to minimize the cost. If we consider the root mean square cost, then we have to minimize
# $$f(a,b) = \sum_{k=1}^n (ax_k + b - y_k)^2$$
#
# Since $f(a,b)$ is a function of two variables, we have to equate $\dfrac{\partial f}{\partial a} = 0$ and $\dfrac{\partial f}{\partial b} = 0$.
#
# This gives us,
# $$
# \begin{eqnarray}
# \dfrac{\partial f}{\partial a} &= 2 \sum_{k=1}^n (ax_k + b - y_k) x_k &= 0 \\
# \dfrac{\partial f}{\partial b} &= 2 \sum_{k=1}^n (ax_k + b - y_k) &= 0
# \end{eqnarray}
# $$
#
# Simplifying these equations further,
# $$
# \begin{eqnarray}
# a\sum_{k=1}^n x_k^2 + b \sum_{k=1}^n x_k &=& \sum_{k=1}^n x_k y_k \\
# a\sum_{k=1}^n x_k + b n &=& \sum_{k=1}^n y_k
# \end{eqnarray}
# $$
#
# If we write these equations in matrix form, we get a more concise representation,
# $$
# \begin{bmatrix}
# \sum x_k^2 & \sum x_k\\
# \sum x_k & n
# \end{bmatrix}
# \begin{bmatrix}
# a\\
# b
# \end{bmatrix} =
# \begin{bmatrix}
# \sum x_k y_k\\
# \sum y_k
# \end{bmatrix}
# $$
#
# This is of the form $Ax = B$ and can be solve as $x = A^{-1}B$.
#
# Enough of theory, let's try this out in code.
# + deletable=true editable=true
A = np.zeros((2,2))
B = np.zeros((2,1))
A[0,0] = np.sum(x**2)
A[0,1] = np.sum(x)
A[1,0] = A[0,1]
A[1,1] = x.size
B[0] = np.sum(x*y)
B[1] = np.sum(y)
X = np.matmul(np.linalg.inv(A), B)
print('A')
print(A)
print('-'*20)
print('B')
print(B)
print('-'*20)
print('X')
print(X)
print('-'*20)
# + [markdown] deletable=true editable=true
# Let's check the plot and cost with this model
# + deletable=true editable=true
y_pred = model([X[1], X[0]], x)
cost = objective(y, y_pred)
y_diff = np.abs(y - y_pred)
plt.subplots(figsize=(7,5))
plt.scatter(x,y)
plt.plot(x,y_pred)
plt.show()
plt.subplots(figsize=(7,1))
plt.bar(x,y_diff,width=0.1, align='center', color='red')
plt.show()
print('Root Mean Square cost = %d' % cost[1])
# + [markdown] deletable=true editable=true
# As we can, see this curve fits better than our estimate and the RMSE improves from 345 to 254
# + [markdown] deletable=true editable=true
# ### b. Quadratic Equation
# + [markdown] deletable=true editable=true
# On the same lines as a linear model, the general form of the quadratic equation is
# $$y = ax^2 + bx + c$$
# The cost function we have to minimize is
# $$f(a,b, c) = \sum_{k=1}^n (ax_k^2 + bx_k + c - y_k)^2$$
#
# Since $f(a,b,c)$ is a function of three variables, we have to equate $\dfrac{\partial f}{\partial a} = 0$, $\dfrac{\partial f}{\partial b} = 0$ and $\dfrac{\partial f}{\partial c} = 0$.
#
# This gives us,
# $$
# \begin{eqnarray}
# \dfrac{\partial f}{\partial a} &= 2 \sum_{k=1}^n (ax_k^2 + bx_k + c - y_k) x_k^2 &= 0 \\
# \dfrac{\partial f}{\partial b} &= 2 \sum_{k=1}^n (ax_k^2 + bx_k + c - y_k) x_k &= 0 \\
# \dfrac{\partial f}{\partial c} &= 2 \sum_{k=1}^n (ax_k^2 + bx_k + c - y_k) &= 0
# \end{eqnarray}
# $$
#
# Simplifying these equations further,
# $$
# \begin{eqnarray}
# a\sum_{k=1}^n x_k^4 + b \sum_{k=1}^n x_k^3 + c \sum_{k=1}^n x_k^2 &=& \sum_{k=1}^n x_k^2 y_k \\
# a\sum_{k=1}^n x_k^3 + b \sum_{k=1}^n x_k^2 + c \sum_{k=1}^n x_k &=& \sum_{k=1}^n x_k y_k \\
# a\sum_{k=1}^n x_k^2 + b \sum_{k=1}^n x_k + c n &=& \sum_{k=1}^n y_k
# \end{eqnarray}
# $$
#
# In matrix form, we get,
# $$
# \begin{bmatrix}
# \sum x_k^4 & \sum x_k^3 & \sum x_k^2\\
# \sum x_k^3 & \sum x_k^2 & \sum x_k\\
# \sum x_k^2 & \sum x_k & n
# \end{bmatrix}
# \begin{bmatrix}
# a\\
# b\\
# c
# \end{bmatrix} =
# \begin{bmatrix}
# \sum x_k^2 y_k\\
# \sum x_k y_k\\
# \sum y_k
# \end{bmatrix}
# $$
#
#
# + deletable=true editable=true
A = np.zeros((3,3))
B = np.zeros((3,1))
A[0,0] = np.sum(x**4)
A[0,1] = np.sum(x**3)
A[0,2] = np.sum(x**2)
A[1,0] = np.sum(x**3)
A[1,1] = np.sum(x**2)
A[1,2] = np.sum(x)
A[2,0] = np.sum(x**2)
A[2,1] = np.sum(x)
A[2,2] = x.size
B[0] = np.sum(x**2 *y)
B[1] = np.sum(x*y)
B[2] = np.sum(y)
X = np.matmul(np.linalg.inv(A), B)
print('A')
print(A)
print('-'*20)
print('B')
print(B)
print('-'*20)
print('X')
print(X)
print('-'*20)
# + [markdown] deletable=true editable=true
# Let's check our model
# + deletable=true editable=true
y_pred = model([X[2], X[1], X[0]], x)
cost = objective(y, y_pred)
y_diff = np.abs(y - y_pred)
plt.subplots(figsize=(7,5))
plt.scatter(x,y)
plt.plot(x,y_pred)
plt.show()
plt.subplots(figsize=(7,1))
plt.bar(x,y_diff,width=0.1, align='center', color='red')
plt.show()
print('Root Mean Square cost = %d' % cost[1])
# + [markdown] deletable=true editable=true
# We can see that our model fits the data very well.
# + [markdown] deletable=true editable=true
# ### 3. Polynomial model
#
# Let's take it to the next step with building a generic solution for any order of equation.
# That's right. We want to build a algorithm to fit data with the order of equation we supply to it.
#
# As you must have seen with the linear and quadratic model, it is very easy to see the pattern and directly come up with the matrix equation that can be used to fit a $n^{th}$ order polynomial.
#
# $$
# \begin{bmatrix}
# \sum x_k^{2n} & \sum x_k^{2n-1} & \ldots & \sum x_k^{n}\\
# \sum x_k^{2n-1} & \sum x_k^{2n-2} & \ldots & \sum x_k^{n-1}\\
# \vdots & \vdots & \ddots & \vdots\\
# \sum x_k^n & \sum x_k^{n-1} & \ldots & n
# \end{bmatrix}
# \begin{bmatrix}
# a_1\\
# a_2\\
# \vdots \\
# a_{n+1}
# \end{bmatrix} =
# \begin{bmatrix}
# \sum x_k^{n} y_k\\
# \sum x_k^{n-1} y_k\\
# \vdots \\
# \sum y_k
# \end{bmatrix}
# $$
# + deletable=true editable=true
def PolyFit(x, y, n):
AllSums = np.zeros(2*n+1)
for i in range(0, 2*n + 1):
AllSums[i] = np.sum(x**i)
A = np.zeros((n+1, n+1))
for i in range(0,n+1):
for j in range(0,n+1):
A[i,j] = AllSums[2*n-i-j]
B = np.zeros((n+1,1))
for i in range(0,n+1):
B[i] = np.sum(x**(n-i) *y)
X = np.matmul(np.linalg.inv(A), B)
X = X[::-1]
return X
# -
X = PolyFit(x,y,6)
# + deletable=true editable=true
y_pred = model(X, x)
cost = objective(y, y_pred)
y_diff = np.abs(y - y_pred)
plt.subplots(figsize=(7,5))
plt.scatter(x,y)
plt.plot(x,y_pred)
plt.show()
plt.subplots(figsize=(7,1))
plt.bar(x,y_diff,width=0.1, align='center', color='red')
plt.show()
print('Root Mean Square cost = %d' % cost[1])
# + [markdown] deletable=true editable=true
# As can be seen the polynomial fit works perfectly and gives a very good fit.
| CurveFitting.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 0.6.2
# language: julia
# name: julia-0.6
# ---
# +
using DataFrames
using Plots
using JLD
using Distributions
using StatsBase
using Iterators
using EmpiricalBayes
using StatPlots
using LaTeXStrings
# -
settings = 104
k_max=12
nreps = 20
nreps_total = k_max*nreps
nmethods=4
methods = ["CEB"; "CEBSCOOP"; "G0"; "G"]
true_dists = [MixtureModel([ Normal(-0.3,.2), Normal(0,0.9)],[0.8, 0.2]), EmpiricalBayes.ash_flattop ];
Base.getindex(t::EmpiricalBayes.BradDeconvolveR, ::Int64) = t
maxbias(t::EmpiricalBayes.BradDeconvolveR) = 0.0;
maxbias(t::DonohoCI) = t.max_bias;
marginal_grid = collect(linspace(-6.5,6.5,1001));
prior_grid = collect(linspace(-3,3,121));
marginal_h = marginal_grid[2]-marginal_grid[1];
d_true1 = NormalConvolutionProblem(true_dists[1], marginal_grid)
d_true2 = NormalConvolutionProblem(true_dists[2], marginal_grid)
# +
res_df = DataFrame(true_dist = Distribution[],
truetheta=Float64[],
x=Float64[], m=Int64[],
σ=Float64[],
bias=Float64[],
coverage=Float64[],
width=Float64[],
se=Float64[],
maxbias=Float64[],
lowerband=Float64[],
upperband=Float64[],
method = String[]
)
for comb=1:settings
@show comb
for nmethod=1:2
@show nmethod
cnt = one(Int)
point_est = Vector{Float64}(nreps_total)
bias_calib = Vector{Float64}(nreps_total)
coverage_calib = Vector{Bool}(nreps_total)
width_calib = Vector{Float64}(nreps_total)
maxbias_calib = Vector{Float64}(nreps_total)
se_calib = Vector{Float64}(nreps_total)
lower_band_vec = Vector{Float64}(nreps_total)
upper_band_vec = Vector{Float64}(nreps_total)
x=0.0
m=10
σ=1.0
true_θ = 0.0
true_dist = Normal(0,1)
d_true = d_true1
method_name = methods[nmethod]
for sim_batch=1:k_max
sim = load("May22/May22/mysim_$(comb)_$(sim_batch).jld")["res"]
x = sim[2][:x]
m = sim[2][:m]
σ = sim[2][:σ]
true_dist = sim[2][:dist]
if Symbol(true_dist) == Symbol(true_dists[1])
d_true = d_true1
else
d_true = d_true2
end
sim = sim[3]
target = PosteriorTarget(LFSRNumerator(x))
true_num, true_denom, true_θ = posterior_stats(d_true, target)
for k=1:nreps
donoho_res = sim[k][nmethod][1]
l,r = confint(donoho_res, target)
point_est[cnt] = estimate(donoho_res, target)
bias_calib[cnt] = estimate(donoho_res, target) - true_θ
coverage_calib[cnt] = r >= true_θ >= l
width_calib[cnt] = r-l
maxbias_calib[cnt] = maxbias(donoho_res)
lower_band_vec[cnt] = l
upper_band_vec[cnt] = r
cnt +=1
end
end
bias_calib = mean(bias_calib)
coverage_calib = mean(coverage_calib)
width_calib = mean(width_calib)
se_res = std(point_est)
maxb = mean(maxbias_calib)
lower_band = mean(lower_band_vec)
upper_band = mean(upper_band_vec)
push!(res_df, (true_dist, true_θ, x, m, σ, bias_calib, coverage_calib, width_calib, se_res,
maxb, lower_band, upper_band, method_name))
end
end
#res_df[:σ] = string.(res_df[:σ]);
head(res_df)
# -
save("res_df.jld", "res_df", res_df)
| notebooks/May22.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="view-in-github"
# <a href="https://colab.research.google.com/github/dwightchurchill/DS-Unit-1-Sprint-2-Data-Wrangling-and-Storytelling/blob/master/LS_DS_121_Join_and_Reshape_Data.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] colab_type="text" id="pmU5YUal1eTZ"
# _Lambda School Data Science_
#
# # Join and Reshape datasets
#
# Objectives
# - concatenate data with pandas
# - merge data with pandas
# - understand tidy data formatting
# - melt and pivot data with pandas
#
# Links
# - [Pandas Cheat Sheet](https://github.com/pandas-dev/pandas/blob/master/doc/cheatsheet/Pandas_Cheat_Sheet.pdf)
# - [Tidy Data](https://en.wikipedia.org/wiki/Tidy_data)
# - Combine Data Sets: Standard Joins
# - Tidy Data
# - Reshaping Data
# - Python Data Science Handbook
# - [Chapter 3.6](https://jakevdp.github.io/PythonDataScienceHandbook/03.06-concat-and-append.html), Combining Datasets: Concat and Append
# - [Chapter 3.7](https://jakevdp.github.io/PythonDataScienceHandbook/03.07-merge-and-join.html), Combining Datasets: Merge and Join
# - [Chapter 3.8](https://jakevdp.github.io/PythonDataScienceHandbook/03.08-aggregation-and-grouping.html), Aggregation and Grouping
# - [Chapter 3.9](https://jakevdp.github.io/PythonDataScienceHandbook/03.09-pivot-tables.html), Pivot Tables
#
# Reference
# - Pandas Documentation: [Reshaping and Pivot Tables](https://pandas.pydata.org/pandas-docs/stable/reshaping.html)
# - Modern Pandas, Part 5: [Tidy Data](https://tomaugspurger.github.io/modern-5-tidy.html)
# + [markdown] colab_type="text" id="Mmi3J5fXrwZ3"
# ## Download data
#
# We’ll work with a dataset of [3 Million Instacart Orders, Open Sourced](https://tech.instacart.com/3-million-instacart-orders-open-sourced-d40d29ead6f2)!
# + colab={"base_uri": "https://localhost:8080/", "height": 202} colab_type="code" id="K2kcrJVybjrW" outputId="d6483326-62c0-41ae-db9c-8a366ee60495"
# !wget https://s3.amazonaws.com/instacart-datasets/instacart_online_grocery_shopping_2017_05_01.tar.gz
# + colab={"base_uri": "https://localhost:8080/", "height": 235} colab_type="code" id="kqX40b2kdgAb" outputId="4516a4b1-3873-456e-a0e0-bdf31c1bc2e9"
# !tar --gunzip --extract --verbose --file=instacart_online_grocery_shopping_2017_05_01.tar.gz
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="YbCvZZCBfHCI" outputId="42278f71-aba5-4720-d3c5-18293d768ce8"
# %cd instacart_2017_05_01
# + colab={"base_uri": "https://localhost:8080/", "height": 118} colab_type="code" id="etshR5kpvWOj" outputId="f0154d6d-02e0-4763-df72-371d4ba69ffb"
# !ls -lh
# + [markdown] colab_type="text" id="RcCu3Tlgv6J2"
# # Join Datasets
# + [markdown] colab_type="text" id="RsA14wiKr03j"
# ## Goal: Reproduce this example
#
# The first two orders for user id 1:
# + colab={"base_uri": "https://localhost:8080/", "height": 313} colab_type="code" id="vLqOTMcfjprg" outputId="6f9c662b-86c2-4bca-afad-9031b36de8ba"
from IPython.display import display, Image
url = 'https://cdn-images-1.medium.com/max/1600/1*vYGFQCafJtGBBX5mbl0xyw.png'
example = Image(url=url, width=600)
display(example)
# + [markdown] colab_type="text" id="nPwG8aM_txl4"
# ## Load data
#
# Here's a list of all six CSV filenames
# + colab={"base_uri": "https://localhost:8080/", "height": 121} colab_type="code" id="Ksah0cOrfdJQ" outputId="90510aa4-82d6-43fe-dbd8-176123c03602"
# !ls -lh *.csv
# + [markdown] colab_type="text" id="AHT7fKuxvPgV"
# For each CSV
# - Load it with pandas
# - Look at the dataframe's shape
# - Look at its head (first rows)
# - `display(example)`
# - Which columns does it have in common with the example we want to reproduce?
# + [markdown] colab_type="text" id="cB_5T6TprcUH"
# ### aisles
# + colab={} colab_type="code" id="JB3bvwSDK6v3" outputId="a85018cf-3dc1-4e0e-900e-d293d1715cb7"
# !head aisles.csv
# + colab={} colab_type="code" id="TDUioPczmFT4" outputId="1c4509b3-ccdf-4799-d0d4-02dcf363fd94"
# !wc aisles.csv
# + colab={} colab_type="code" id="wEp5WptXmFT6" outputId="98cafe4a-998f-4f19-fe48-1d9fd339a4d4"
import pandas as pd
aisles = pd.read_csv('aisles.csv')
aisles.shape
# + colab={} colab_type="code" id="9f6cxRvzmFT9" outputId="4097a1cb-8490-4bac-823e-b0cf372375f1"
aisles.head()
# + colab={} colab_type="code" id="CMpBpOYImFUA" outputId="fc8763d3-1e18-48a5-f94d-bbc1b2236330"
display(aisles)
# + [markdown] colab_type="text" id="9-GrkqM6rfXr"
# ### departments
# + colab={} colab_type="code" id="yxFd5n20yOVn"
departments = pd.read_csv('departments.csv')
# + colab={} colab_type="code" id="EF0zf-oNmFUH" outputId="859e996c-824f-4be2-8fd2-c59c1de94cab"
departments.shape
# + colab={} colab_type="code" id="39OWd9gFmFUK" outputId="12a68d09-1756-4610-fd5c-619f63ec82b0"
departments.head()
# + colab={} colab_type="code" id="kP3phmmJmFUM" outputId="a7b3088f-42bf-400d-e9d4-cc905f49854b"
display(departments)
# + [markdown] colab_type="text" id="VhhVcn9kK-nG"
# ### order_products__prior
# + colab={} colab_type="code" id="86rIMNFSzKaG" outputId="01b92b0d-534d-4691-d198-4e81ee5f3536"
order_products_prior = pd.read_csv('order_products__prior.csv')
order_products_prior.shape
# + colab={} colab_type="code" id="I0M6Cor5mFUV" outputId="9ddc3065-3a25-47d2-c4cc-76d9483a5238"
order_products_prior.head()
# + colab={} colab_type="code" id="AGkD0DYZmFUY" outputId="f3bf438b-a945-4962-881a-9a85045ea853"
display(order_products_prior)
# + colab={} colab_type="code" id="8Z0hbAmrmFUb" outputId="3c8857fd-6351-4b71-bfaa-ffdf0036e421"
display(example)
# + [markdown] colab_type="text" id="HVYJEKJcLBut"
# ### order_products__train
# + colab={} colab_type="code" id="xgwSUCBk6Ciy" outputId="b67084df-02aa-4dfa-fb8d-443d5a38888c"
order_products_train = pd.read_csv('order_products__train.csv')
order_products_train.shape
# + colab={} colab_type="code" id="W4H5epvlmFUi" outputId="c73a5b8d-4c05-4b28-d801-8086e83fc72c"
order_products_train.head()
# + colab={} colab_type="code" id="v4WFaP3BmFUk" outputId="d2b923c6-b09b-4ba7-fed8-be88e40f657d"
display(order_products_train)
# + [markdown] colab_type="text" id="LYPrWUJnrp7G"
# ### orders
# + colab={} colab_type="code" id="UfPRTW5w128P" outputId="d07447d9-9b26-47e1-888c-448726a16498"
orders = pd.read_csv('orders.csv')
orders.shape
# + colab={} colab_type="code" id="8t5A77n2mFUr" outputId="f440abd7-ed9f-46cd-9353-942c3b375816"
display(orders)
# + colab={} colab_type="code" id="kOiazTDOmFUv" outputId="b395aa7d-515f-4965-8360-d77c37d68af6"
orders.head()
# + colab={} colab_type="code" id="8EL9-i6SmFUx" outputId="324ca2b5-62f2-4cb7-af69-932c89ef2cc1"
display(example)
# + [markdown] colab_type="text" id="nIX3SYXersao"
# ### products
# + colab={} colab_type="code" id="3BKG5dxy2IOA" outputId="1d575547-aa15-484f-f432-8f75e331f606"
products = pd.read_csv('products.csv')
products.shape
# + colab={} colab_type="code" id="fwMzMLkwmFU1" outputId="2023477f-5826-430b-9676-082b15c64b13"
display(products)
# + colab={} colab_type="code" id="dHX-4QqZmFU3" outputId="e1b3a9f7-8377-414f-fb40-854996b5ad74"
products.head()
# + colab={} colab_type="code" id="1frgRMXtmFU5" outputId="ad92f13c-7309-466d-8ed9-7360b795315b"
display(example)
# + [markdown] colab_type="text" id="cbHumXOiJfy2"
# ## Concatenate order_products__prior and order_products__train
# + colab={} colab_type="code" id="TJ23kqpAY8Vv" outputId="99d8c995-6ab8-439a-a4d7-c16268aae28e"
order_products = pd.concat([order_products_prior, order_products_train])
order_products.shape
# + [markdown] colab_type="text" id="Z1YRw5ypJuv2"
# ## Get a subset of orders — the first two orders for user id 1
# + [markdown] colab_type="text" id="eJ9EixWs6K64"
# From `orders` dataframe:
# - user_id
# - order_id
# - order_number
# - order_dow
# - order_hour_of_day
# + colab={} colab_type="code" id="VI8F3v1dmFVC" outputId="23477fdc-75ab-43f8-b2ad-c5221f91afa6"
orders.shape
# + colab={} colab_type="code" id="pEfR0NkqmFVE" outputId="d0d79325-c363-4eb9-cee2-6fbaa10532e9"
orders[orders['user_id']==1]
# + colab={} colab_type="code" id="42jN3-7SmFVG" outputId="760b1f36-c927-4a98-a21c-e387f680ccae"
condition = (orders['user_id'] == 1) & (orders['order_number'] <= 2)
columns = ['user_id',
'order_id',
'order_number',
'order_dow',
'order_hour_of_day']
subset = orders.loc[condition, columns]
subset
# + [markdown] colab_type="text" id="3K1p0QHuKPnt"
# ## Merge dataframes
# + [markdown] colab_type="text" id="4MVZ9vb1BuO0"
# Merge the subset from `orders` with columns from `order_products`
# + colab={} colab_type="code" id="myWjIuxnmFVM" outputId="5ab0675d-cdd0-45a1-8975-e20ecac50133"
order_products.head()
# + colab={} colab_type="code" id="3lajwEE86iKc" outputId="56834658-e641-4239-9b9e-40c7e208bb25"
columns = ['order_id',
'add_to_cart_order',
'product_id']
merge = pd.merge(subset, order_products[columns],
how='inner',on='order_id')
merge
# + [markdown] colab_type="text" id="i1uLO1bxByfz"
# Merge with columns from `products`
# + colab={} colab_type="code" id="D3Hfo2dkJlmh" outputId="126e8c20-f31a-4c2b-985c-05ae5e066f4a"
final = pd.merge(merge, products[['product_id', 'product_name']],
how='inner',on='product_id')
final = final.sort_values(by=['order_number', 'add_to_cart_order'])
final
# + colab={} colab_type="code" id="pJQZWiLxmFVY" outputId="e740225d-a5a4-41de-8f78-7d0dc4c71126"
help(pd.merge)
# + [markdown] colab_type="text" id="dDfzKXJdwApV"
# # Reshape Datasets
# + [markdown] colab_type="text" id="4stCppWhwIx0"
# ## Why reshape data?
#
# #### Some libraries prefer data in different formats
#
# For example, the Seaborn data visualization library prefers data in "Tidy" format often (but not always).
#
# > "[Seaborn will be most powerful when your datasets have a particular organization.](https://seaborn.pydata.org/introduction.html#organizing-datasets) This format ia alternately called “long-form” or “tidy” data and is described in detail by <NAME>. The rules can be simply stated:
#
# > - Each variable is a column
# - Each observation is a row
#
# > A helpful mindset for determining whether your data are tidy is to think backwards from the plot you want to draw. From this perspective, a “variable” is something that will be assigned a role in the plot."
#
# #### Data science is often about putting square pegs in round holes
#
# Here's an inspiring [video clip from _Apollo 13_](https://www.youtube.com/watch?v=ry55--J4_VQ): “Invent a way to put a square peg in a round hole.” It's a good metaphor for data wrangling!
# + [markdown] colab_type="text" id="79KITszBwXp7"
# ## <NAME>'s Examples
#
# From his paper, [Tidy Data](http://vita.had.co.nz/papers/tidy-data.html)
# + colab={} colab_type="code" id="Jna5sk5FwYHr"
# %matplotlib inline
import pandas as pd
import numpy as np
import seaborn as sns
table1 = pd.DataFrame(
[[np.nan, 2],
[16, 11],
[3, 1]],
index=['<NAME>', '<NAME>', '<NAME>'],
columns=['treatmenta', 'treatmentb'])
table2 = table1.T
# + [markdown] colab_type="text" id="eWe5rpI9wdvT"
# "Table 1 provides some data about an imaginary experiment in a format commonly seen in the wild.
#
# The table has two columns and three rows, and both rows and columns are labelled."
# + colab={"base_uri": "https://localhost:8080/", "height": 136} colab_type="code" id="SdUp5LbcwgNK" outputId="176dd9b1-a8f4-49ea-9b0f-2ae4421d935f"
table1
# + [markdown] colab_type="text" id="SaEcDmZhwmon"
# "There are many ways to structure the same underlying data.
#
# Table 2 shows the same data as Table 1, but the rows and columns have been transposed. The data is the same, but the layout is different."
# + colab={"base_uri": "https://localhost:8080/", "height": 106} colab_type="code" id="SwDVoCj5woAn" outputId="8390f63c-c5a0-433e-9f5e-fdb928ff30a9"
table2
# + [markdown] colab_type="text" id="k3ratDNbwsyN"
# "Table 3 reorganises Table 1 to make the values, variables and obserations more clear.
#
# Table 3 is the tidy version of Table 1. Each row represents an observation, the result of one treatment on one person, and each column is a variable."
#
# | name | trt | result |
# |--------------|-----|--------|
# | <NAME> | a | - |
# | <NAME> | a | 16 |
# | <NAME> | a | 3 |
# | <NAME> | b | 2 |
# | <NAME> | b | 11 |
# | <NAME> | b | 1 |
# + [markdown] colab_type="text" id="WsvD1I3TwwnI"
# ## Table 1 --> Tidy
#
# We can use the pandas `melt` function to reshape Table 1 into Tidy format.
# + colab={} colab_type="code" id="S48tKmC46veF" outputId="d586a743-31d8-4bbb-8eeb-65af86011a64"
table1.columns.tolist()
# + colab={} colab_type="code" id="eFyiPWNWmFVp" outputId="ff036cf5-6ccb-4e6a-8da5-a7084e505d23"
table1.index.tolist()
# + colab={} colab_type="code" id="otjOZwLdmFVr" outputId="b9c1b4fc-ce08-4e7e-bff2-d824154583de"
tidy = table1.reset_index().melt(id_vars='index')
tidy = tidy.rename(columns={
'index': 'name',
'variable': 'trt',
'value': 'result'
})
tidy['trt'] = tidy['trt'].str.replace('treatment', '')
tidy.set_index('name')
# + [markdown] colab_type="text" id="Ck15sXaJxPrd"
# ## Table 2 --> Tidy
# + colab={} colab_type="code" id="k2Qn94RIxQhV" outputId="da1ab632-8aa5-4815-9df0-cb2eb14828b5"
tidy2 = table2.reset_index().melt(id_vars='index')
tidy2
# + colab={} colab_type="code" id="My4jNpbUmFVv" outputId="36ffb2d2-d9cd-4ffa-8926-a91be5ff9fdb"
tidy2 = tidy2.rename(columns={
'index': 'trt',
'variable': 'name',
'value': 'result'
})
tidy2['trt'] = tidy2['trt'].str.replace('treatment', '')
tidy2.set_index('name')
tidy2 = tidy2[['name', 'trt','result']]
tidy2
# + [markdown] colab_type="text" id="As0W7PWLxea3"
# ## Tidy --> Table 1
#
# The `pivot_table` function is the inverse of `melt`.
# + colab={} colab_type="code" id="CdZZiLYoxfJC" outputId="b14933f3-707a-4e71-e039-ad3b7513ebdf"
pivot_tidy = pd.pivot_table(tidy, index='trt')
pivot_tidy
# + [markdown] colab_type="text" id="3GeAKoSZxoPS"
# ## Tidy --> Table 2
# + colab={} colab_type="code" id="W2jjciN2xk9r"
##### LEAVE BLANK --an assignment exercise #####
# + [markdown] colab_type="text" id="jr0jQy6Oxqi7"
# # Seaborn example
#
# The rules can be simply stated:
#
# - Each variable is a column
# - Each observation is a row
#
# A helpful mindset for determining whether your data are tidy is to think backwards from the plot you want to draw. From this perspective, a “variable” is something that will be assigned a role in the plot."
# + colab={"base_uri": "https://localhost:8080/", "height": 153} colab_type="code" id="kWo3FIP9xuKo" outputId="25a90cb8-2bfc-4858-851f-d2fec3aa652d"
sns.catplot(x='trt', y='result', col='name',
kind='bar', data=tidy, height=2)
# + [markdown] colab_type="text" id="cIgT41Rxx4oj"
# ## Now with Instacart data
# + colab={} colab_type="code" id="Oydw0VvGxyDJ"
products = pd.read_csv('products.csv')
order_products = pd.concat([pd.read_csv('order_products__prior.csv'),
pd.read_csv('order_products__train.csv')])
orders = pd.read_csv('orders.csv')
# + [markdown] colab_type="text" id="6p-IsG0jyXQj"
# ## Goal: Reproduce part of this example
#
# Instead of a plot with 50 products, we'll just do two — the first products from each list
# - Half And Half Ultra Pasteurized
# - Half Baked F<NAME>
# + colab={"base_uri": "https://localhost:8080/", "height": 383} colab_type="code" id="Rs-_n9yjyZ15" outputId="87a7427c-41e5-48af-e5c4-e6fb99c81519"
from IPython.display import display, Image
url = 'https://cdn-images-1.medium.com/max/1600/1*wKfV6OV-_1Ipwrl7AjjSuw.png'
example = Image(url=url, width=600)
display(example)
# + [markdown] colab_type="text" id="Vj5GR7I4ydBg"
# So, given a `product_name` we need to calculate its `order_hour_of_day` pattern.
# + [markdown] colab_type="text" id="Vc9_s7-LyhBI"
# ## Subset and Merge
#
# One challenge of performing a merge on this data is that the `products` and `orders` datasets do not have any common columns that we can merge on. Due to this we will have to use the `order_products` dataset to provide the columns that we will use to perform the merge.
# + colab={} colab_type="code" id="W1yHMS-OyUTH"
# + [markdown] colab_type="text" id="UvhcadjFzx0Q"
# ## 4 ways to reshape and plot
# + [markdown] colab_type="text" id="aEE_nCWjzz7f"
# ### 1. value_counts
# + colab={} colab_type="code" id="vTL3Cko87VL-"
# + [markdown] colab_type="text" id="tMSd6YDj0BjE"
# ### 2. crosstab
# + colab={} colab_type="code" id="Slu2bWYK0CZD"
# + [markdown] colab_type="text" id="ICjPVqO70Hv8"
# ### 3. Pivot Table
# + colab={} colab_type="code" id="LQtMNVa10I_S"
# + [markdown] colab_type="text" id="7A9jfBVv0M7e"
# ### 4. melt
# + colab={} colab_type="code" id="2AmbAKm20PAg"
# + [markdown] colab_type="text" id="kAMtvSQWPUcj"
# # Assignment
#
# ## Join Data Section
#
# These are the top 10 most frequently ordered products. How many times was each ordered?
#
# 1. Banana
# 2. Bag of Organic Bananas
# 3. Organic Strawberries
# 4. Organic Baby Spinach
# 5. Organic Hass Avocado
# 6. Organic Avocado
# 7. Large Lemon
# 8. Strawberries
# 9. Limes
# 10. Organic Whole Milk
#
# First, write down which columns you need and which dataframes have them.
#
# Next, merge these into a single dataframe.
#
# Then, use pandas functions from the previous lesson to get the counts of the top 10 most frequently ordered products.
#
# ## Reshape Data Section
#
# - Replicate the lesson code
# - Complete the code cells we skipped near the beginning of the notebook
# - Table 2 --> Tidy
# - Tidy --> Table 2
# - Load seaborn's `flights` dataset by running the cell below. Then create a pivot table showing the number of passengers by month and year. Use year for the index and month for the columns. You've done it right if you get 112 passengers for January 1949 and 432 passengers for December 1960.
# + colab={} colab_type="code" id="fgxulJQq0uLw"
flights = sns.load_dataset('flights')
# + colab={} colab_type="code" id="1qKc88WI0up-"
##### YOUR CODE HERE #####
# + [markdown] colab_type="text" id="mnOuqL9K0dqh"
# ## Join Data Stretch Challenge
#
# The [Instacart blog post](https://tech.instacart.com/3-million-instacart-orders-open-sourced-d40d29ead6f2) has a visualization of "**Popular products** purchased earliest in the day (green) and latest in the day (red)."
#
# The post says,
#
# > "We can also see the time of day that users purchase specific products.
#
# > Healthier snacks and staples tend to be purchased earlier in the day, whereas ice cream (especially Half Baked and The Tonight Dough) are far more popular when customers are ordering in the evening.
#
# > **In fact, of the top 25 latest ordered products, the first 24 are ice cream! The last one, of course, is a frozen pizza.**"
#
# Your challenge is to reproduce the list of the top 25 latest ordered popular products.
#
# We'll define "popular products" as products with more than 2,900 orders.
#
# ## Reshape Data Stretch Challenge
#
# _Try whatever sounds most interesting to you!_
#
# - Replicate more of Instacart's visualization showing "Hour of Day Ordered" vs "Percent of Orders by Product"
# - Replicate parts of the other visualization from [Instacart's blog post](https://tech.instacart.com/3-million-instacart-orders-open-sourced-d40d29ead6f2), showing "Number of Purchases" vs "Percent Reorder Purchases"
# - Get the most recent order for each user in Instacart's dataset. This is a useful baseline when [predicting a user's next order](https://www.kaggle.com/c/instacart-market-basket-analysis)
# - Replicate parts of the blog post linked at the top of this notebook: [Modern Pandas, Part 5: Tidy Data](https://tomaugspurger.github.io/modern-5-tidy.html)
| module1-join-and-reshape-data/LS_DS_121_Join_and_Reshape_Data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:python2]
# language: python
# name: conda-env-python2-py
# ---
import pandas as pd
import time
import numpy as np
import requests
import psycopg2
import json
import simplejson
import urllib
import config
import ast
import bs4
import pprint
import progressbar
from pymongo import MongoClient
from geopy.geocoders import Nominatim
from bs4 import BeautifulSoup as BS
from operator import itemgetter
from sklearn.cluster import KMeans
from sqlalchemy import create_engine
# !pip install --upgrade progressbar2
conn_str = "dbname='travel_with_friends' user='Gon' host='localhost'"
# conn_str = "dbname='travel_with_friends' user='Zoesh' host='localhost'"
conn = psycopg2.connect(conn_str)
cur = conn.cursor()
# cur.execute("select index, name, coord0, coord1 from poi_detail_table where city !='%s' and state = '%s';" %(current_city, current_state))
cur.execute("select distinct city, state from poi_detail_table;" )
all_cities = cur.fetchall()
all_cities[0][0]
cities_coords = pd.read_csv('cities_coords.csv', header=None)
cities_coords.columns = ['area_code', 'city','state','nation', 'coord0','coord1']
cities_coords = cities_coords[['city','state','nation', 'coord0','coord1']].drop_duplicates()
cities_coords.reset_index(drop = True, inplace = True)
# +
geolocator = Nominatim()
for items in all_cities:
if cities_coords[cities_coords['state'] == items[1]][cities_coords.city == items[0]].shape[0] == 0:
location_name = ', '.join([items[0], items[1]])
try:
location = geolocator.geocode(location_name)
cities_coords.loc[len(cities_coords)] = [items[0], items[1], 'US', location.latitude, location.longitude]
except:
"error, rest"
time.sleep(20)
print" start again"
# print cities_coords.loc(len(cities_coords))
# -
cities_coords.to_csv('all_cities_coords.csv')
engine = create_engine('postgresql://Gon@localhost:5432/travel_with_friends')
cities_coords.to_sql('all_cities_coords',engine, if_exists = "replace")
import us_state_abbrevation as abb
state_abb_dict = abb.abb2state
state_abb_dict['CA']
# print state_abb_dict.keys()[state_abb_dict.values().index('CA')]
from googleplaces import GooglePlaces, types, lang
YOUR_API_KEY = '<KEY>'
google_places = GooglePlaces(YOUR_API_KEY)
import geocoder
api_key1 = '<KEY>'
api_key2 = '<KEY>'
api_key3 = 'AIzaSyA25LW2CRcD9mSmiAWBYSPOSoiKP_m2plQ'
api_key4 = '<KEY>'
api_key5 = '<KEY>'
add = ' 497 lakeside drive'
g = geocoder.google(add, key = api_key5)
g.ok
import json
with open('api_key_list.config') as api_key_list_file:
api_key_list = json.load(api_key_list_file)
api_key_list['api_key_list']
api_key_list
df_tmp = pd.read_csv('test_poi_detail_df_100.csv', index_col = 0)
# df_tmp.to_csv('test_poi_detail_df_100.csv', index_col=None)
df_tmp.head()
s.find(text ="Recommended length of visit:")
# visit_length = s.find(text ="Recommended length of visit:").parent.next_sibling
client = MongoClient()
db = client.zoeshrm
db.TripAdvisor_state_park.count()
from web_scraping_tripadvisor import state_park_web as web
state_park_pages = db.TripAdvisor_state_park.find()
poi_detail_state_park_df, error_message_df = web(state_park_pages)
# +
import re
page = db.TripAdvisor.find_one({'city': 'San Francisco, California'})
search_visit_length = re.compile('Recommended length of visit:')
s = BS(page['html'], "html.parser")
#index
#name
input_list, error_message = [],[]
state_abb_error, state_error, address_error, geo_error, review_error, score_error, ranking_error, tag_error = 0,0,0,0,0,0,0,0
latitude, longitude, geo_content = None, None, None
# print name
url = page['url']
name = s.find('h1', attrs = {'class':'heading_name'}).text.strip()
#street_address
street_address = s.find('span', attrs = {'class':'street-address'}).text.strip()
#city
city = s.find('span', attrs = {'property':'addressLocality'}).text.strip()
#state
state_abb = s.find('span', attrs = {'property':'addressRegion'}).text.strip()
if state_abb:
try:
# state = state_abb_dict.keys()[state_abb_dict.values().index(state_abb)]
state = abb2state_dict[state_abb]
except:
state_abb_error = 1
state = state_abb
else:
state_error =1
state_abb = None
state = None
#postal_code
postal_code = s.find('span', attrs = {'property':'postalCode'}).text.strip()
#country
if s.find('span', attrs = {'property':'addressCountry'}).get('content'):
country = s.find('span',{'property':'addressCountry'}).get('content')
elif s.find('span',{'property':'addressCountry'}).get('content') == None:
country = s.find('span',{'property':'addressCountry'}).text.strip()
else:
country = 'United States'
#address
if state:
full_address = street_address+', '+city+', '+state+', '+postal_code[:5]+', '+country
else:
address_error =1
full_address = street_address+', '+city+', '+postal_code[:5]+', '+country
# if (name in name_lst) and (full_address in full_address_lst):
# continue
# else:
# name_lst.append(name)
# full_address_lst.append(full_address)
#coord
try:
latitude, longitude, geo_content = find_latlng(full_address, name)
except:
geo_error =1
latitude, longitude, geo_content = None, None, None
#num_reviews
try:
num_reviews = s.find('div', attrs = {'class': 'rs rating'}).find('a').get('content')
if num_reviews == None:
num_reviews = s.find('a', {'property': "reviewCount"}).get('content')
except:
num_reviews = 0
review_error=1
#review_score
try:
review_score = s.find('div', attrs = {'class': 'heading_rating separator'}).find('img').get('content')
if review_score == None:
review_score = s.find('a', {'property': "ratingValue"}).get('content')
except:
review_score = 0
score_error =1
#ranking
try:
ranking = s.find('b', attrs = {'class':'rank_text wrap'}).text.strip().replace('#',"")
except:
ranking = 999
ranking_error=1
#tag
try:
tags = ", ".join(label.text.strip() for label in s.select('div.detail > a') + s.select('span.collapse.hidden > a'))
except:
tags = None
tag_error =1
#visit_length
if s.find('b', text =search_visit_length):
raw_visit_length = s.find('b', text =search_visit_length).next_sibling.strip()
else:
raw_visit_length = None
#fee
if s.find(text= "Fee:"):
fee = s.find(text= "Fee:").parent.next_sibling.upper()
else:
fee = 'NO'
#description
if s.find('div', attrs = {'class': "listing_details"}):
description = s.find('div', attrs = {'class': "listing_details"}).text.strip()
else:
description = None
# error_message = [len(poi_detail_state_park_df), name, url,state_abb_error, state_error, address_error, geo_error, review_error, score_error, ranking_error, tag_error]
# error_message_df.loc[len(poi_detail_state_park_df)] =error_message
# input_list = [len(poi_detail_state_park_df), name, street_address, city, state_abb, state, postal_code, country, full_address, latitude, longitude, num_reviews, review_score, ranking, tags, visit_length, fee, description, url, geo_content]
# poi_detail_state_park_df.loc[len(poi_detail_state_park_df)] = input_list
# -
import re
search_visit_length = re.compile('Recommended length of visit:')
test = s.find('b', text =search_visit_length).next_sibling.strip()
tests
import json
with open('api_key_list.config') as f:
data = json.load(f)
data['api_key_list']
df_poi = pd.read_csv('poi_detail_df.csv', index_col = 0)
search_fee = re.compile('Fee:')
df_poi = pd.read_csv('test_poi_detail_df.csv', index_col= 0)
poi_pages = db.TripAdvisor.find()
fee_lst = []
cnt = 0
for page in poi_pages:
s = BS(page['html'], "html.parser")
if s.find('b', text= search_fee):
fee = s.find('b',text= search_fee).next_sibling.strip()
else:
fee = 'Unknown'
fee_lst.append(fee)
cnt+=1
if cnt%100 ==0 :
print '#items in fee lst: ',len(fee_lst)
fee_lst
error_message_df.to_csv('error_message.csv', encoding=('utf-8'))
poi_detail_state_park_df.to_csv("poi_detail_state_park.csv", encoding=('utf-8'))
try:
poi_additional_detail = poi_detail_state_park[['index','name','url','address','geo_content']]
geo_content_detail=poi_detail_state_park.pop('geo_content')
except:
None
db.geo_content.drop()
db.geo_content.insert_many(poi_additional_detail.to_dict('records'))
poi_detail_state_park.to_sql('poi_detail_state_park_table',engine, if_exists = "replace")
print poi_detail_state_park_df.shape, error_message_df.shape
error_message_df.columns
# +
# # !pip install geocoder
# -
def find_latlng(full_address, name):
g_address = geocoder.google(full_address)
if g_address.ok:
latitude= g_address.lat
longitude = g_address.lng
return latitude, longitude, g_address.content
g_name = geocoder.google(name)
if g_name.ok:
latitude= g_name.lat
longitude = g_name.lng
return latitude, longitude, g_name.content
else:
latitude = None
longitude = None
return latitude, longitude, None
def find_geo_location(full_address, name):
query_result = google_places.nearby_search(location= full_address, keyword=name)
if len(query_result.places) >0:
best_result = query_result.places[0]
latitude = best_result.geo_location["lat"]
longitude = best_result.geo_location["lng"]
google_result_name = best_result.name
return latitude, longitude, google_result_name
else:
print name, "google API cant find here."
return None, None, None
poi_detail_state_park=pd.DataFrame(columns=['index','name','street_address','city','state_abb','state','postal_code','country','address','coord_lat','coord_long','num_reviews','review_score','ranking','tag','visit_length','fee','description','url',"geo_content"])
error_message_df = pd.DataFrame(columns=['index','name','url','state_abb_error','address_error','geo_error','review_error','score_error','ranking_error','tag_error'])
# +
# poi_detail_state_park2=pd.DataFrame(columns=['index','name','street_address','city','state_abb','state','postal_code','country','address','coord_lat','coord_long','num_reviews','review_score','ranking','tag','visit_length','fee','description'])
# -
state_park_pages = db.TripAdvisor_state_park.find()
index = 0
for page in state_park_pages[len(poi_detail_state_park):]:
s = BS(page['html'], "html.parser")
#index
#name
error_message = []
state_abb_error, address_error, geo_error, review_error, score_error, ranking_error, tag_error = 0,0,0,0,0,0,0
input_list = []
# print name
url = page['url']
name = s.find('h1', attrs = {'class':'heading_name'}).text.strip()
#street_address
street_address = s.find('span', attrs = {'class':'street-address'}).text.strip()
#city
city = s.find('span', attrs = {'property':'addressLocality'}).text.strip()
#state
state_abb = s.find('span', attrs = {'property':'addressRegion'}).text.strip()
if state_abb:
try:
state = state_abb_dict[state_abb]
except:
state_abb_error = 1
state = state_abb
else:
state_abb = None
state = None
#postal_code
postal_code = s.find('span', attrs = {'property':'postalCode'}).text.strip()
#country
if s.find('span', attrs = {'property':'addressCountry'}).get('content'):
country = s.find('span',{'property':'addressCountry'}).get('content')
elif s.find('span',{'property':'addressCountry'}).get('content') == None:
country = s.find('span',{'property':'addressCountry'}).text.strip()
else:
country = 'United States'
#address
if state_abb:
full_address = street_address+', '+city+', '+state_abb+', '+postal_code[:5]+', '+country
else:
address_error =1
full_address = street_address+', '+city+', '+postal_code[:5]+', '+country
#coord
try:
latitude, longitude, geo_content = find_latlng(full_address, name)
except:
geo_error =1
latitude, longitude, geo_content = None, None, None
# break
#num_reviews
try:
num_reviews = s.find('div', attrs = {'class': 'rs rating'}).find('a').get('content')
if num_reviews == None:
num_reviews = s.find('a', {'property': "reviewCount"}).get('content')
except:
num_reviews = 0
review_error=1
#review_score
try:
review_score = s.find('div', attrs = {'class': 'heading_rating separator'}).find('img').get('content')
if review_score == None:
review_score = s.find('a', {'property': "ratingValue"}).get('content')
except:
review_score = 0
score_error =1
#ranking
try:
ranking = s.find('b', attrs = {'class':'rank_text wrap'}).text.strip().replace('#',"")
except:
ranking = 999
ranking_error=1
#tag
try:
tags = ", ".join(label.text.strip() for label in s.select('div.detail > a') + s.select('span.collapse.hidden > a'))
except:
tags = None
tag_error =1
#visit_length
if s.find(text ="Recommended length of visit:"):
visit_length = s.find(text ="Recommended length of visit:").parent.next_sibling
else:
visit_length = None
#fee
if s.find(text= "Fee:"):
fee = s.find(text= "Fee:").parent.next_sibling.upper()
else:
fee = 'NO'
#description
if s.find('div', attrs = {'class': "listing_details"}):
description = s.find('div', attrs = {'class': "listing_details"}).text.strip()
else:
description = None
input_list = [index, name, street_address, city, state_abb, state, postal_code, country, full_address, latitude, longitude, num_reviews, review_score, ranking, tags, visit_length, fee, description, url, geo_content]
poi_detail_state_park.loc[len(poi_detail_state_park)] = input_list
error_message = [index, name, url,state_abb_error, address_error, geo_error, review_error, score_error, ranking_error, tag_error]
error_message_df.loc[len(poi_detail_state_park)] =error_message
index += 1
# time.sleep(1)
poi_detail_state_park.shape
url_df.shape
import web_scraping_tripadvisor as web
error_message_df.to_csv('error_message.csv', encoding=('utf-8'))
poi_detail_state_park.to_csv("poi_detail_state_park.csv", encoding=('utf-8'))
try:
poi_additional_detail = poi_detail_state_park[['index','name','url','address','geo_content']]
geo_content_detail=poi_detail_state_park.pop('geo_content')
except:
None
# +
db.geo_content.insert_many(poi_additional_detail.to_dict('records'))
poi_detail_state_park.to_sql('poi_detail_state_park_table',engine, if_exists = "replace")
# +
# poi_detail_state_park[poi_detail_state_park['name']== '<NAME> Memorial State Park']
# +
# poi_detail_state_park.loc[2065]
# +
# poi_detail_state_park.drop(poi_detail_state_park.index[2065:], inplace = True)
# -
poi_detail_state_park.to_csv("poi_detail_state_park.csv", encoding=('utf-8'))
poi_detail_state_park = pd.read_csv('poi_detail_state_park.csv')
poi_detail_df = pd.read_csv('poi_detail_coords_1000.csv', index_col = 0)
# np.isnan(poi_detail_df.coord_lat[0])
poi_detail_df.coord_lat[:100]
update_idx = poi_detail_state_park[poi_detail_state_park.coord_long == incorrect_long].index.values
for index in update_idx:
full_address = poi_detail_state_park.loc[index].address
name = poi_detail_state_park.loc[index].name
try:
print 'start index: ', index
latitude, longitude, geo_content = find_latlng(full_address, name)
poi_detail_state_park.set_value(index, 'coord_long', longitude)
poi_detail_state_park.set_value(index, 'coord_lat', latitude)
poi_detail_state_park.set_value(index, 'geo_content', geo_content)
print poi_detail_state_park.loc[index][['coord_long','coord_lat','geo_content']]
except:
print 'why', index
break
poi_detail_state_park.to_csv('poi_detail_state_park_v2.csv', index=False)
poi_additional_detail = poi_detail_state_park[['index','name','url','address','geo_content']]
geo_content_detail=poi_detail_state_park.pop('geo_content')
poi_detail_state_park['geo_content'] = geo_content_detail
db.geo_content.insert_many(poi_additional_detail.to_dict('records'))
poi_detail_state_park.to_sql('poi_detail_state_park_table',engine, if_exists = "replace")
# +
htmlurl = 'https://www.tripadvisor.com/Attraction_Review-g35805-d1134861-Reviews-Cloud_Gate-Chicago_Illinois.html'
htmlurl = 'https://www.tripadvisor.com/Attraction_Review-g60713-d127854-Reviews-San_Francisco_Zoo-San_Francisco_California.html'
htmlurl = 'https://www.tripadvisor.com/Attraction_Review-g60750-d104122-Reviews-San_Diego_Zoo-San_Diego_California.html'
htmlurl = 'https://www.tripadvisor.com/Attraction_Review-g60713-d102523-Reviews-Alcatraz_Island-San_Francisco_California.html'
# htmlurl = 'https://www.tripadvisor.com/Attraction_Review-g32474-d4236729-Reviews-Harmony_Headlands_State_Park-Harmony_San_Luis_Obispo_County_California.html'
# htmlurl = 'https://www.tripadvisor.com/Attraction_Review-g42926-d142814-Reviews-Cannon_Valley_Trail-Cannon_Falls_Minnesota.html'
# htmlurl = 'https://www.tripadvisor.com/Attraction_Review-g42891-d126627-Reviews-Paul_Bunyan_State_Trail-Brainerd_Minnesota.html'
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
r=requests.get(htmlurl,headers=headers)
s = BS(r.text, 'html.parser')
# for div in s.find('div', attrs = {'class' : "separator" }):
# for tag in div.:
# if tag.name == 'div' and tag.get('class', '') == ['detail']:
# print tag.text
# for item in div.contents:
# # print item
# if type(item)== 'bs4.element.Tag' and item.name == "detail":
# print 1234567890
st = time.time()
for div in s.findAll("div", {"class": "separator"}):
for tag in div.contents:
if isinstance(tag, bs4.element.Tag) and tag.get('class',"") == ['detail'] :
tags = tag.text.encode('utf8').strip()
print time.time() - st
tags
# -
# s.find('span',{'property':'addressCountry'}).get('content')
# s.select('span[property="addressCountry"]').get('content')
# +
#index
#name
name = s.find('h1', attrs = {'class':'heading_name'}).text.strip()
#city
city = s.find('span', attrs = {'property':'addressLocality'}).text.strip()
street_address = s.find('span', attrs = {'class':'street-address'}).text.strip()
#state
state_abb = s.find('span', attrs = {'property':'addressRegion'}).text.strip()
# state = state_abb_dict.keys()[state_abb_dict.values().index(state_abb)]
postal_code = s.find('span', attrs = {'property':'postalCode'}).text.strip()
#country
country = s.find('span',{'property':'addressCountry'}).get('content')
#address
full_address = street_address+', '+city+', '+state_abb+', '+postal_code+', '+country
# from geopy.geocoders import Nominatim
# geolocator = Nominatim()
# location =geolocator.geocode(street_address+', '+city+', '+state_abb+', '+country)
# #coord_lat
# coord_lat = location.latitude
# #coord_long
# coord_long =location.longitude
#num_reviews
# num_reviews = s.find('div', attrs = {'class': 'rs rating'}).find('a').get('content')
#review_score
# review_score = s.find('div', attrs = {'class': 'heading_rating separator'}).find('img').get('content')
#ranking
ranking = s.find('b', attrs = {'class':'rank_text wrap'}).text.strip().replace('#',"")
#tag
tags = ", ".join(label.text for label in s.select('div.detail > a') + s.select('span[class="collapse hidden"] > a'))
#visit_length
# visit_length = s.find(text ="Recommended length of visit:").parent.next_sibling
# #fee
# fee = s.find(text= "Fee:").parent.next_sibling
#description
description = s.find('div', attrs = {'class': "listing_details"}).text.strip()
# -
st =time.time()
d =", ".join(label.text.strip() for label in s.select('div.listing_details'))
# print d
ed = time.time() -st
print ed
st =time.time()
s.find('div', attrs = {'class': "listing_details"}).text.strip()
ed = time.time() -st
print ed
# s.select('span.hidden.collapse > a')
postal_code = s.find('span', attrs = {'property':'postalCode'}).text.strip()
print postal_code[:5]
# num_reviews = s.find('div', attrs = {'class': 'rs rating'}).find('a').get('content')
t1 = time.time()
s.select('a[property="reviewCount"]')[0].get("content")
t2 = time.time()
s.find('a', {'property': "reviewCount"}).get('content')
et = time.time()
print et -t1, et-t2
# !pip install python-google-places
# +
from googleplaces import GooglePlaces, types, lang
YOUR_API_KEY = '<KEY>'
google_places = GooglePlaces(YOUR_API_KEY)
print name, full_address
address1 = "393 County Road 174, Grove Hill, AL, 35975, United States"
query_result = google_places.nearby_search(location = address1, keyword=name)
query_result
# -
name, full_address
# +
# s.select('div[class="detail"] > a')
# +
# <span class="collapse hidden">, <a href="/Attractions-g60713-Activities-c57-t68-San_Francisco_California.html">Nature & Wildlife Areas</a></span>
# +
# detail = {}
# addition_info = s.find('div', attrs = {'class':'details_wrapper'}).text.strip('\n').replace("\n\n","\n").split('\n')
# # if addition_info[0] == 'Description':
# # print addition_info[1]
# addition_info
# for info in addition_info:
# info_list = info.split(':')
# if info_list[0]=="Fee":
# details["Fee"] = info_list[1]
# else:
# details["length of visit"] = info_list[1]
# details
# -
# fee = s.find('div', {'class':'details_wrapper'})
# fee
# length_visit = s.find(text ="Recommended length of visit:").parent.next_sibling
# length_visit
# fee = s.find(text= "Fee:").parent.next_sibling
# fee
# +
# description = s.find('div', attrs = {'class': "listing_details"}).text.strip()
# print description
# -
len(query_result.places)
# +
## different api try
# try:
# YOUR_API_KEY = '<KEY>'
# google_places = GooglePlaces(YOUR_API_KEY)
# latitude, longitude, google_result_name = find_geo_location(full_address, name)
# except:
# print "API error, try different key"
# time.sleep(20)
# try:
# YOUR_API_KEY = '<KEY>'
# google_places = GooglePlaces(YOUR_API_KEY)
# latitude, longitude, google_result_name = find_geo_location(full_address, name)
# except:
# print "both Key dont work"
# print" location not found: ", name, "address : ", full_address
# break
# if location:
# #coord_lat
# poi_detail_state_park['coord_lat'] = location.latitude
# #coord_long
# poi_detail_state_park['coord_long'] =location.longitude
# else:
# print" location not found: ", name, "address : ", full_address
# -
state_abb_error_ix = error_message_df[error_message_df['state_abb_error']==1]['index']
address_error_ix = error_message_df[error_message_df['address_error']==1]['index']
# poi_detail_state_park_df.ix[state_abb_error_ix][['state_abb','state','country']]
poi_detail_state_park_df.ix[address_error_ix][['address','country']]
error_message_df.columns
poi_detail_state_park_df.columns
# +
# poi_detail_state_park.fee[poi_detail_state_park.fee == 'NO']
# -
poi_detail_state_park.shape
err = error_message_df[error_message_df.review_error == 1].index
for i, link in enumerate(poi_detail_state_park_df.ix[err][['name','url']].url):
print i, link
error_message_df.tail()
poi_detail_state_park_df.drop_duplicates('coord_lat').shape
# +
# htmlurl = 'https://en.wikipedia.org/wiki/List_of_areas_in_the_United_States_National_Park_System'
htmlurl= 'https://en.wikipedia.org/wiki/List_of_national_parks_of_the_United_States'
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
r=requests.get(htmlurl,headers=headers)
s = BS(r.text, 'html.parser')
# +
import re
name, state =None, None
table = s.find('table', {"class" : "wikitable"})
# col_name = [x.text for x in table.findAll("th",{"scope":"col"})]
# num_col = len(col_name)
# wiki_table= pd.DataFrame(columns=col_name)
national_park_US_df2 = pd.DataFrame(columns = ["name","state","description"])
for row in table.findAll("tr")[1:]:
if row.find('th', {'scope':"row"}) != None:
name = row.find('th', {'scope':"row"}).next_element.get('title')
cells = row.findAll("td")
#For each "tr", assign each "td" to a variable.
if len(cells) == 6:
state = cells[1].find(text=True)
des = str("".join(cells[5].findAll(text=True)).encode('utf8'))
description = re.sub(r"\[\d+\]","",des)
national_park_US_df2.loc[len(national_park_US_df2)] = [name, state, description]
# -
# "".join(national_park_US_df2.desciption[0])
for index in national_park_US_df.index:
keyword = national_park_US_df.name[index].replace(' ','+')+"+"+national_park_US_df.state[index].replace(' ','+')
# keyword = national_park_US_df.name[index].replace(' ','+')
trip_url = "https://www.tripadvisor.com/Search?q=" +keyword+"&queryParsed=true&searchSessionId"
# headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
# r=requests.get(trip_url,headers=headers)
# test_s = BS(r.text, 'html.parser')
# print index, trip_url
import wikipedia
wiki = wikipedia.page('List_of_national_parks_of_the_United_States')
https://www.tripadvisor.com/Search?q=Acadia+National+Park&geo=28940&queryParsed=true&searchSessionId=F658A1719FACDE7E30D13912D3D1B3381492826820567ssid
https://www.tripadvisor.com/Search?q=Pinnacles+national+park&queryParsed=true&searchSessionId
https://www.tripadvisor.com/Search?q=Acadia+National+Park&queryParsed=true&searchSessionId
test =national_park_US_df.name[0].replace(" ", "+")
trip_url = "https://www.tripadvisor.com/Search?q=" +test+"&queryParsed=true&searchSessionId"
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
r=requests.get(trip_url,headers=headers)
test_s = BS(r.text, 'html.parser')
# +
# trip_url_30 = "https://www.tripadvisor.com/Search?geo=191&redirect&q=national+parks&uiOrigin=MASTHEAD&ssrc=A&returnTo=__2F__Tourism__2D__g143030__2D__Great__5F__Basin__5F__National__5F__Park__5F__Nevada__2D__Vacations__2E__html&pid=3825&startTime=1492837392267&searchSessionId=F658A1719FACDE7E30D13912D3D1B3381492834657203ssid#&ssrc=g&o=0"
# trip_url_60 = "https://www.tripadvisor.com/Search?geo=191&redirect&q=national+parks&uiOrigin=MASTHEAD&ssrc=A&returnTo=__2F__Tourism__2D__g143030__2D__Great__5F__Basin__5F__National__5F__Park__5F__Nevada__2D__Vacations__2E__html&pid=3825&startTime=1492837392267&searchSessionId=F658A1719FACDE7E30D13912D3D1B3381492834657203ssid#&ssrc=g&o=30"
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
r=requests.get(trip_url_30,headers=headers)
# trip_30 = BS(r.text, 'html.parser')
# r=requests.get(trip_url_60,headers=headers)
# trip_60 = BS(r.text, 'html.parser')
import urllib
trip_30_html = urllib.urlopen("trip_30.html").read()
trip_60_html = urllib.urlopen("trip_60.html").read()
trip_30 = BS(trip_30_html, 'html.parser')
trip_60 = BS(trip_60_html, 'html.parser')
# +
import re
to_do = re.compile("Things to do")
# trip_search_result = pd.DataFrame(columns=["name","url"])
for poi in trip_60.findAll('div', {"class": "title"}):
name = poi.text
for child in poi.next_siblings:
if child.find(text=to_do) != None:
url = child.find(text=to_do).parent.get('href')
trip_search_result.loc[len(trip_search_result)] = [name, url]
# for link in trip_30.findAll(text = to_do):
# print link.parent.get('href')
# name = poi.text
# url = poi.get('onclick').replace("ta.setEvtCookie('Search_Results_Page', 'POI_Name', '', 0, '", "").replace("')","")
# +
# trip_search_result
# +
# # for url in trip_search_result.url:
# url = trip_search_result.url[0]
# headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
# r=requests.get(url,headers=headers)
# s = BS(r.text, 'html.parser')
# -
def request_s(url):
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
r=requests.get(url,headers=headers)
s = BS(r.text, 'html.parser')
return s
def thing_to_do(s):
thing_to_do = pd.DataFrame(columns=["national_park_name","activate_name","url","num_reviews","score","ranking","tags"])
national_park_name = s.find('h1', {"id": "HEADING"}).text.strip('\n').replace("Things to Do in ","")
print "park name: ",national_park_name
for activate in s.findAll('div', {"class":"listing_title"}):
activate_name = activate.text.strip()
url ="https://www.tripadvisor.com"+ activate.find('a').get("href")
if activate.find_next('div', {"class":"rs rating"}) ==None:
score, num_reviews = 0, 0
else:
score = activate.find_next('div', {"class":"rs rating"}).find('span').get('alt').replace(" of 5 bubbles","")
num_reviews = activate.find_next('div', {"class":"rs rating"}).find('span', {'class': "more"}).text.strip().replace("reviews","")
ranking = activate.find_next('div', {'class':"popRanking wrap"}).text.strip().replace("#","")[0]
if activate.find_next('div',{'class':"tag_line"}).find('span') == None:
tags = None
else:
tags = activate.find_next('div',{'class':"tag_line"}).find('span').text
list_thing = [national_park_name, activate_name, url, num_reviews, score, ranking, tags]
thing_to_do.loc[len(thing_to_do)] = list_thing
return thing_to_do
thing_to_do_national_park_df = pd.DataFrame(columns=["national_park_name","activate_name","url","num_reviews","score","ranking","tags"])
for url in national_park_US_df.url:
thing_to_do_page = request_s(url)
result = (thing_to_do(thing_to_do_page))
thing_to_do_national_park_df = thing_to_do_national_park_df.append(result, ignore_index=True)
time.sleep(2)
thing_to_do_national_park_df.to_csv('poi_detail_national_park_todo_df.csv',encoding=('utf-8'))
name_list = set(thing_to_do_national_park_df.national_park_name)
['index','name','street_address','city','state_abb','state','postal_code','country','address','coord_lat','coord_long','num_reviews','review_score','ranking','tag','raw_visit_length','fee','description','url',"geo_content"]
# national_park_US_df["tags"] = "National Park"
# national_park_US_df["url"] = None
# national_park_US_df.rename(columns={'desciption': 'description'}, inplace=True)
# national_park_US_df["index"] = national_park_US_df.index
national_park_US_df.columns
national_park_US_df = national_park_US_df[['index','name','street_address','city','state_abb','state','postal_code','country','address','coord_lat','coord_long','num_reviews','review_score','ranking','tag','raw_visit_length','fee','description','url',"geo_content", "check"]]
small_national_park = national_park_US_df[national_park_US_df.check == 0]
national_park_US_df.to_csv("poi_detail_national_park.csv", encoding=('utf-8'))
small_national_park.ix[1].url = "https://www.tripadvisor.com/Attractions-g143045-Activities-National_Park_of_American_Samoa_Tutuila.html"
should_be_big = small_national_park.ix[[12, 14, 30, 45]]
# small_national_park
# 1,49,52
small_national_park_df=pd.DataFrame(columns=['index','name','street_address','city','state_abb','state','postal_code','country','address','coord_lat','coord_long','num_reviews','review_score','ranking','tag','raw_visit_length','fee','description','url',"geo_content"])
error_message_df = pd.DataFrame(columns=['index','name','url','state_abb_error', 'state_error','address_error','geo_error','review_error','score_error','ranking_error','tag_error'])
search_visit_length = re.compile('Recommended length of visit:')
search_fee = re.compile('Fee:')
cnt = 0
name_lst = []
full_address_lst = []
api_i = 0
for url in small_nation_park.url:
s = request_s(url)
input_list, error_message = [],[]
state_abb_error, state_error, address_error, geo_error, review_error, score_error, ranking_error, tag_error = 0,0,0,0,0,0,0,0
latitude, longitude, geo_content = None, None, None
name = s.find('h1', attrs = {'class':'heading_name'}).text.strip()
#street_address
street_address = s.find('span', attrs = {'class':'street-address'}).text.strip()
#city
city = s.find('span', attrs = {'property':'addressLocality'}).text.strip()
#state
state_abb = s.find('span', attrs = {'property':'addressRegion'}).text.strip()
if state_abb:
try:
# state = state_abb_dict.keys()[state_abb_dict.values().index(state_abb)]
state = abb2state_dict[state_abb]
except:
state_abb_error = 1
state = state_abb
else:
state_error =1
state_abb = None
state = None
#postal_code
postal_code = s.find('span', attrs = {'property':'postalCode'}).text.strip()
#country
if s.find('span', attrs = {'property':'addressCountry'}).get('content'):
country = s.find('span',{'property':'addressCountry'}).get('content')
elif s.find('span',{'property':'addressCountry'}).get('content') == None:
country = s.find('span',{'property':'addressCountry'}).text.strip()
else:
country = 'United States'
#address
if state:
full_address = street_address+', '+city+', '+state+', '+postal_code[:5]+', '+country
else:
address_error =1
full_address = street_address+', '+city+', '+postal_code[:5]+', '+country
if (name in name_lst) and (full_address in full_address_lst):
continue
else:
name_lst.append(name)
full_address_lst.append(full_address)
try:
# latitude, longitude, geo_content = find_latlng(full_address, name, 1)
result_longlat = find_latlng(full_address, name, 1)
while result_longlat == False:
api_i+=1
result_longlat = find_latlng(full_address, name, 1)
except:
geo_error =1
latitude, longitude, geo_content = None, None, None
[latitude, longitude, geo_content] = result_longlat
#num_reviews
try:
num_reviews = s.find('div', attrs = {'class': 'rs rating'}).find('a').get('content')
except:
try:
num_reviews = s.find('a', {'property': "reviewCount"}).get('content')
except:
num_reviews = 0
review_error=1
#review_score
try:
review_score = s.find('div', attrs = {'class': 'heading_rating separator'}).find('img').get('content')
except:
try:
review_score = s.find('span', {'property': "ratingValue"}).get('content')
except:
review_score = 0
score_error =1
#ranking
try:
ranking = s.find('b', attrs = {'class':'rank_text wrap'}).text.strip().replace('#',"")
except:
ranking = 999
ranking_error=1
#tag
try:
tags = ", ".join(label.text.strip() for label in s.select('div.detail > a') + s.select('span.collapse.hidden > a'))
except:
tags = None
tag_error =1
#visit_length
if s.find('b', text =search_visit_length):
raw_visit_length = s.find('b', text =search_visit_length).next_sibling.strip()
else:
raw_visit_length = None
#fee
if s.find('b', text= search_fee):
fee = s.find('b',text= search_fee).next_sibling.strip()
else:
fee = 'Unknown'
#description
if s.find('div', attrs = {'class': "listing_details"}):
description = s.find('div', attrs = {'class': "listing_details"}).text.strip()
else:
description = None
error_message = [len(small_national_park_df), name, url,state_abb_error, state_error, address_error, geo_error, review_error, score_error, ranking_error, tag_error]
error_message_df.loc[len(small_national_park_df)] =error_message
input_list = [len(small_national_park_df), name, street_address, city, state_abb, state, postal_code, country, full_address, latitude, longitude, num_reviews, review_score, ranking, tags, raw_visit_length, fee, description, url, geo_content]
small_national_park_df.loc[len(small_national_park_df)] = input_list
for u in small_national_park_df.url:
print u
small_national_park_df.shape
national_park_US_df =national_park_US_df.drop("check",1)
national_park_US_df.to_csv("poi_detail_national_park.csv", encoding=('utf-8'))
# national_park_US_df["url"][national_park_US_df.index == 52] = "https://www.tripadvisor.com/Attractions-g147411-Activities-Virgin_Islands_National_Park_St_John_U_S_Virgin_Islands.html"
name_list=[]
for name in national_park_US_df["name"]:
name_list.append(name.split(",",1)[0])
national_park_US_df["name"]=name_list
national_park_US_df[national_park_US_df.check ==0]
#small 12 14 30 45
small_nation_park = national_park_US_df.ix[[12 ,14, 30, 45]]
national_park_US_df = national_park_US_df.drop(national_park_US_df.index[[12,14,30,45]])
for address in national_park_US_df.address:
for address2 in trip_search_result.address:
if address == address2:
national_park_US_df.name[national_park_US_df.address == address] = trip_search_result.name[trip_search_result.address ==address2].values[0]
# national_park_US_df.url[national_park_US_df.address == address] = trip_search_result.url[trip_search_result.address ==address2].values[0]
trip_search_result.name[trip_search_result.address =="Arches National Park, Utah, USA"].values[0]
# trip_search_result[(trip_search_result.address =="Arches National Park, Utah, USA")].index
# +
# address, lat, lng = [],[],[]
# park name: Acadia National Park
# park name: National Park of American Samoa
list1,list2 =[],[]
# street_address, city, state_abb, postal_code, country,geo_content = [],[],[],[],[],[]
g = geocoder.google("National Park of American Samoa")
name = "National Park of American Samoa"
city = g.city
street_address = g.street_long
state_abb = g.state
state = g.state_long
postal_code=g.postal
country=g.country_long
geo_content=g.content
full_address = g.address
latitude =g.lat
longitude = g.lng
num_reviews, review_score, ranking = None, None ,None
tags = "National Park"
raw_visit_length, fee = None, None
# url = "https://www.tripadvisor.com/Attractions-g143010-Activities-Acadia_National_Park_Mount_Desert_Island_Maine.html"
url ="https://www.tripadvisor.com/Attractions-g143045-Activities-National_Park_of_American_Samoa_Tutuila.html"
# -
national_park_US_df
# list2 = [1, name, street_address, city, state_abb, state, postal_code, country, full_address, latitude, longitude, num_reviews, review_score, ranking, tags, raw_visit_length, fee, description, url, geo_content, 1]
# national_park_US_df["address"] = address
# national_park_US_df["lat"] = lat
# national_park_US_df["lng"] = lng
national_park_US_df["city"] = city
national_park_US_df["street_address"] = street_address
national_park_US_df["state_abb"] = state_abb
national_park_US_df["postal_code"] = postal_code
national_park_US_df["country"] = country
national_park_US_df["geo_content"] = geo_content
# +
# national_park_US_df
# -
address_match = []
for address in national_park_US_df.address:
for address2 in trip_search_result.address:
if address == address2:
address_match.append(address)
check_list = []
for add in trip_search_result.address:
if add in address_match:
check_list.append(1)
else:
check_list.append(0)
trip_search_result['check'] = check_list
# +
# national_park_US_df[national_park_US_df.check ==0]
# -
# trip_search_result[trip_search_result.check == 0]
# +
# trip_search_result.name
# for link in trip_search_result.ix[[52,54,58,53,45,57,56]].url:
# print link
# +
# pop_list = [52,54,58,53,45,57,56]
# trip_search_result = trip_search_result.drop(pop_list).sort()
# -
national_park_US_df
new_poi_df= pd.read_csv("new_poi_df.csv", index_col=0)
new_poi_df.poi_type
national_park_US_df.to_csv("poi_detail_national_park.csv", encoding = ('utf-8'))
national_park_US_df = pd.read_csv("poi_detail_national_park.csv", encoding = ('utf-8'), index_col=0)
state_park_df2.to_csv("poi_detail_national_park.csv", encoding = ('utf-8'))
state_park_df = pd.read_csv("poi_detail_national_park.csv", encoding = ('utf-8'), index_col=0)
US_park= pd.concat([state_park_df, national_park_US_df])
US_park.to_csv("poi_detail_us_park.csv", encoding = ('utf-8'))
import web_scraping_tripadvisor as web
client = MongoClient()
db = client.zoeshrm
db_html = db.TripAdvisor_state_park.find()
print db_html.count()
state_park_df2, error_state_park_df2 = web.state_park_web(db_html)
state_park_df2.shape
sorted(state_park_df2.area)
area_list=[]
for x in state_park_df2.area:
if x<34:
area_list.append(60)
elif x<500:
area_list.append(120)
elif x<2000:
area_list.append(180)
else:
area_list.append(240)
for x in [60,120,180,240]:
print x,": ", area_list.count(x)
state_park_df2["time_base_on_area"] = area_list
# +
# state_park_df2["adjusted_visit_length"][state_park_df2.adjusted_visit_length ==0] =state_park_df2["time_base_on_area"][state_park_df2.adjusted_visit_length ==0]
# state_park_df2["poi_type"] = "StatePark"
# state_park_df2.drop("time_base_on_area", 1, inplace=True)
# state_park_df2.drop("area", 1, inplace=True)
# state_park_df2.drop("index", 1, inplace=True)
state_park_df2.columns
# +
import psycopg2
import simplejson
import numpy as np
from distance import *
conn_str = "dbname='travel_with_friends' user='Gon' host='localhost'"
my_key = '<KEY>'
def find_county(state, city):
'''
Only valid within the U.S.
'''
conn = psycopg2.connect(conn_str)
cur = conn.cursor()
city = city.replace('_',' ')
cur.execute("select county from poi_detail_table where city = '%s' and state = '%s';" %(city.title(), state.title()))
county = cur.fetchone()
conn.close()
if county:
return county[0]
else:
return None
county_list= []
county = None
for index in national_park_US_df.index:
try:
if (national_park_US_df.city[index] == 'New York City') and (national_park_US_df.state[index] == 'New York'):
county = 'NEW YORK'
elif "administrative_area_level_2" in national_park_US_df.geo_content[index]:
geo_content = ast.literal_eval(national_park_US_df.geo_content[index])['results']
for info in geo_content[0]['address_components']:
if info['types'][0] == "administrative_area_level_2":
county = info['short_name'].replace(' County', '').upper().encode('utf-8').strip()
else:
county = find_county(national_park_US_df.state[index], national_park_US_df.city[index])
except:
county = None
county_list.append(county)
national_park_US_df['county'] = county_list
# -
for x in range(len(state_park_df)):
if state_park_df.tag[x].find("State Parks")==-1:
print state_park_df.tag[x]
import matplotlib.pyplot as plt
plt.figure()
# %matplotlib inline
state_park_df2[state_park_df2.area >34]["area"].plot.hist(bins=1000,xlim=[35,2000])
import geocoder
g= geocoder.google("yellowstone national park")
box = g.bbox
g.bbox
find_area(box)
from math import cos, radians
def find_area(box):
# to make thing simple, we use 111.111
# we assume the distance:
# Latitude: 1 deg = 110.574 km
# Longitude: 1 deg = 111.320*cos(latitude) km
# if we need more accuracy number, we need to use different approach.
# ex. using Shapely to calculate polygon/ WGS84 formula
lat = (box["southwest"][0]-box["northeast"][0])*110.574
lng = 111.320*cos(radians(box["southwest"][1]-box["northeast"][1]))
return abs(lat*lng)
# +
from math import sin, cos, sqrt, atan2, radians
# approximate radius of earth in km
R = 6373.0
lat1 = radians(52.2296756)
lon1 = radians(21.0122287)
lat2 = radians(52.406374)
lon2 = radians(16.9251681)
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
distance = R * c
print("Result:", distance)
print("Should be:", 278.546, "km")
# -
def raw_to_adjust_time(raw):
adjusted_time =0
if raw == "1-2 hours":
adjusted_time = 120
if raw == "2-3 hours":
adjusted_time = 180
if raw == "More than 3 hours":
adjusted_time = 360
if raw == "<1 hour":
adjusted_time = 60
return adjusted_time
# +
conn_str = "dbname='travel_with_friends' user='Gon' host='localhost'"
event_ids = [353, 355, 354, 360, 972, 361, 356, 357, 352, 359]
conn = psycopg2.connect(conn_str)
cur = conn.cursor()
# points = np.zeros((len(event_ids), 5))
points = []
for i,v in enumerate(event_ids):
# print i, v
cur.execute("select index, coord0, coord1, city , poi_rank from poi_detail_table where index = %i;"%(float(v)))
a = cur.fetchone()
points.append(list(a))
# points[i] = cur.fetchone()
conn.close()
# points = np.array(points)
# print points
def check_NO_1(poi_list, city_name):
for i, poi in enumerate(poi_list):
if poi[3] == city_name and poi[4]==1:
number_one =poi_list.pop(i)
return np.vstack((np.array(number_one),np.array(poi_list)))
return poi_list
# -
new_points= check_NO_1(points, "Detroit")
print new_points
us_park_df = pd.read_csv("poi_detail_us_park.csv",encoding = ('utf-8'), index_col=0)
poi_detail_v2 = pd.read_csv("new_poi_df_v2.csv" ,encoding = ('utf-8'), index_col=0)
poi_detail_v2.shape, us_park_df.shape
poi_detail_table_final_v1= pd.concat([poi_detail_v2, us_park_df])
poi_detail_table_final_v1.to_csv("poi_detail_table_final_v1.csv", encoding = ('utf-8'))
import os
os.getcwd()
user = pd.read_csv("auth_user.csv")
# +
from sqlalchemy import create_engine
engine = create_engine('postgresql://{}@localhost:5432/travel_with_friends'.format("Gon"))
user.to_sql('auth_user',engine, if_exists = "replace")
# -
poi = pd.read_csv("poi_detail_table_final_v1.csv", encoding = ('utf-8'), index_col =0)
# poi[["num_reviews","review_score"]].fillna(0)
poi[["num_reviews","review_score"]] = poi[["num_reviews","review_score"]].fillna(0)
poi2.to_csv("poi_detail_table_final_v1.csv", encoding = ('utf-8'))
print "github go back to 53294e143b1a3565eedf3854f25b5b6ecee7b813"
poi.shape
poi.columns
poi2 = poi.drop_duplicates(subset = ["name","address"])
poi2.shape
poi2.shape
new_index = poi2.index.tolist()
type(new_index)
poi3= poi[~poi.index.isin(new_index)]
poi3.shape
problem_name_list=[]
problem_index_list =[]
for i in poi3.index.tolist():
for j in poi3.index.tolist():
if poi3.name[i] == poi3.name[j]:
if (poi3.url[i]!=poi3.url[j]) :
problem_name_list.append(poi3.name[i])
problem_index_list.append(i)
# +
# poi3.ix[problem_index_list]
# -
a = list(set(problem_name_list))
len(a)
address need fix
add col icon_img_url after push on s3
import os
import time
img_list = os.listdir("img_file/")
# +
def save_img(path, img_url):
f = open(path,'wb')
f.write(urllib.urlopen(img_url).read())
f.close()
item_done = len(os.listdir("img_file/"))-1
if item_done < 0:
item_done = 0
for i, link in enumerate(poi2.url[item_done:]):
try:
s = request_s(link)
try:
img_url = s.select('img[class="centeredImg"]')[0]["src"]
except IndexError:
print link
pass
path = 'img_file/'+str(item_done+i)+".jpg"
save_img(path, img_url)
time.sleep(5)
except:
time.sleep(300)
s = request_s(link)
try:
img_url = s.select('img[class="centeredImg"]')[0]["src"]
except IndexError:
print link
pass
path = 'img_file/'+str(item_done+i)+".jpg"
save_img(path, img_url)
time.sleep(10)
# -
# img_url = s.findAll("img", class_="centeredImg")
img_url = s.select('img[class="centeredImg"]')
img_id =[]
for img_name in img_list[1:]:
img_id.append(int(img_name.replace(".jpg","")))
# img_id
# +
# import urllib
# f = open('img_file/00000001.jpg','wb')
# f.write(urllib.urlopen(img_url).read())
# f.close()
# -
import cv2
import numpy as np
wrong_img = []
for img_name in img_list[1:]:
pic = cv2.imread("img_file/"+img_name)
if (pic.shape[0] < 10 )or (pic.shape[1]< 10):
wrong_img.append(img_name)
# +
import os
import time
import numpy as np
from matplotlib import pyplot as plt
img_list = os.listdir("img_file/")
wrong_img = []
for img_name in img_list[1:]:
path = "img_file/"+img_name
pic = plt.imread(path, 0)
if type(pic) == "NoneType":
print(img_name)
if (pic.shape[0] < 10 )or (pic.shape[1]< 10):
wrong_img.append(img_name)
print(wrong_img)
# -
len(wrong_img)
for i in poi2[poi2.index.isin([1000,10017])].url:
print i
wrong_page = []
with open ("wrong_page.txt", "r") as f:
for line in f.readlines():
wrong_page.append(line.replace("\n",""))
for i in poi2[poi2.url.isin(wrong_page)].index:
wrong_img.append(i)
len(wrong_img)
wrong_img_id = []
for i in wrong_img:
wrong_img_id.append(str(i).replace(".jpg",""))
with open ("wrong_img_id.txt", "w") as f:
for i in wrong_img_id:
f.write(i+"\n")
| Data Wrangling.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
# This is a literate code document for experimenting which helps with
# exploring the machine learning aspects before putting essential portions into scripts and final report
# Created on: 2021-11-24
# Updated on: 2021-11-25
# Import library
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import altair as alt
from docopt import docopt
from sklearn import datasets
from sklearn.pipeline import Pipeline, make_pipeline
from sklearn.preprocessing import OneHotEncoder, OrdinalEncoder, StandardScaler
from sklearn.compose import ColumnTransformer, make_column_transformer
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import Ridge
from sklearn.model_selection import (
RandomizedSearchCV,
cross_val_score,
cross_validate,
train_test_split,
)
# Reads already-split train and test dfs (80-20%), and creates train and split parts
train_df = pd.read_csv("../data/processed/train_df.csv")
test_df = pd.read_csv("../data/processed/test_df.csv")
X_train = train_df.drop(columns=["total_cup_points"])
X_test = test_df.drop(columns=["total_cup_points"])
y_train = train_df["total_cup_points"]
y_test = test_df["total_cup_points"]
# Create a preprocessor for feature transformations
numeric_features = [
"moisture",
"quakers",
"altitude_mean_meters"
]
categorical_features = [
"country_of_origin",
"harvest_year",
"variety",
"processing_method",
"category_one_defects",
"color",
"category_two_defects",
"region"
]
preprocessor = make_column_transformer(
(StandardScaler(), numeric_features),
(OneHotEncoder(handle_unknown="ignore", sparse=False), categorical_features)
)
# +
# A function to get cross-validated scores on the models
def mean_std_cross_val_scores(model, X_train, y_train, **kwargs):
"""
Returns mean and std of cross validation
Parameters
----------
model :
scikit-learn model
X_train : numpy array or pandas DataFrame
X in the training data
y_train :
y in the training data
Returns
----------
pandas Series with mean scores from cross_validation
"""
scores = cross_validate(model, X_train, y_train, **kwargs)
mean_scores = pd.DataFrame(scores).mean()
std_scores = pd.DataFrame(scores).std()
out_col = []
for i in range(len(mean_scores)):
out_col.append((f"%0.3f (+/- %0.3f)" % (mean_scores[i], std_scores[i])))
return pd.Series(data=out_col, index=mean_scores.index)
# -
# ### Regression
# +
# Building regression models
models = {
"Ridge": Ridge(random_state=123),
"RForest_Regressor": RandomForestRegressor(random_state=123)
}
results_dict = {}
for k, v in models.items():
pipe_multi = make_pipeline(preprocessor, v)
results_dict[k] = mean_std_cross_val_scores(pipe_multi, X_train, y_train,
cv=5, return_train_score=True)
# --> FIRST OUTPUT: Table of results of 2 models from CV
results_dict = pd.DataFrame(results_dict)
results_dict
# ==> RESULT: We select RF Regressor with better CV performance: 0.177 for R squared
# ==> NEXT: We try hyperparameter optimization on RF
# +
# Hyperparameter Optimization for RF Regressor Model
param_dist = {'randomforestregressor__max_depth': np.arange(1, 20),
'randomforestregressor__max_features': np.arange(1, 124),
'randomforestregressor__n_estimators': np.arange(100, 1000, 100)
}
pipe = make_pipeline(preprocessor, RandomForestRegressor(random_state=123))
random_search = RandomizedSearchCV(
pipe, param_distributions=param_dist, n_jobs=-1,
n_iter=10, cv=5, random_state=123,
return_train_score=True)
random_search.fit(X_train, y_train)
random_search.best_score_
# ==> RESULT: Hyperparameter optimization ends up with better score of ~ 0.25
# ==> NEXT: We view the feature importance using parameters by this best estimator
# +
# Inspect important features from best RF model
X_transformed = preprocessor.fit_transform(X_train)
column_names = (
numeric_features +
preprocessor.named_transformers_["onehotencoder"].get_feature_names_out().tolist()
)
# Top 10 features
importances = random_search.best_estimator_['randomforestregressor'].feature_importances_
feat_df = pd.DataFrame({'features': column_names, 'importances': importances})
feat_df = feat_df.sort_values('importances', ascending=False)[:5]
# --> SECOND OUTPUT: Barplot of feature importances
sns.barplot(x="importances", y="features", data=feat_df, color="salmon").set_title('Feature Importance from RF Regression', weight='bold')
plt.xlabel("Importances")
plt.ylabel("Features")
plt.savefig("../results/fig_feature_importance_rfr.png")
# +
# Performance on test set
random_search.score(X_test, y_test)
# + [markdown] tags=[]
# ### Classification
# +
# train_df = pd.read_csv("../data/processed/train_df.csv")
# test_df = pd.read_csv("../data/processed/test_df.csv")
train_df['total_cup_grade'] = train_df['total_cup_points'].apply(lambda x: 'Good' if x>82 else 'Poor')
test_df['total_cup_grade'] = test_df['total_cup_points'].apply(lambda x: 'Good' if x>82 else 'Poor')
X_train_new = train_df.drop(columns=["total_cup_points", "total_cup_grade"])
X_test_new = test_df.drop(columns=["total_cup_points", "total_cup_grade"])
y_train_new = train_df["total_cup_grade"]
y_test_new = test_df["total_cup_grade"]
# +
pipe = make_pipeline(preprocessor, RandomForestClassifier(random_state=123))
results_dict['RForest_Classifier'] = pd.DataFrame(mean_std_cross_val_scores(pipe, X_train_new, y_train_new, cv=10,
return_train_score=True, scoring='roc_auc'))
results_dict
# +
# Inspect important features from best RF model
pipe.fit(X_train_new, y_train_new)
pipe['randomforestclassifier'].feature_importances_
column_names = (
numeric_features +
preprocessor.named_transformers_["onehotencoder"].get_feature_names_out().tolist()
)
# Top 5 features
importances = pipe['randomforestclassifier'].feature_importances_
feat_df = pd.DataFrame({'features': column_names, 'importances': importances})
feat_df = feat_df.sort_values('importances', ascending=False)[:5]
# --> SECOND OUTPUT: Barplot of feature importances
sns.barplot(x="importances", y="features", data=feat_df, color="salmon").set_title('Feature Importance from RF Classifier', weight='bold')
plt.xlabel("Importances")
plt.ylabel("Features")
plt.savefig("../results/feature_importance_rfc.png")
# -
pipe.score(X_test_new, y_test_new)
# + [markdown] tags=[]
# # References
# https://scikit-learn.org/stable/auto_examples/ensemble/plot_forest_importances.html <br>
# https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestRegressor.html
#
| notebooks/ML_experimentation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <a href="https://colab.research.google.com/github/Tessellate-Imaging/Monk_Object_Detection/blob/master/application_model_zoo/Example%20-%20BDD100K%20dataset%20with%20TensorRT%20optimization.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# # Table of contents
#
#
# ## 1. Installation Instructions
#
#
#
# ## 2. Use trained model to detect vehicles on road
#
#
#
# ## 3. How to train using Tensorflow object detection API wrapper and BDD100K dataset
#
#
#
# ## 4. Model optimization using TensorRT
#
#
#
# ## 5. Inferecing on unoptimized and optimized models
# ## More examples on
# - Tensorflow object detection API 1.0 - https://github.com/Tessellate-Imaging/Monk_Object_Detection/tree/master/example_notebooks/12_tf_obj_1
# - Tensorflow object detection API 2.0 - https://github.com/Tessellate-Imaging/Monk_Object_Detection/tree/master/example_notebooks/13_tf_obj_2
# # Installation
#
# - Run these commands
#
# - git clone https://github.com/Tessellate-Imaging/Monk_Object_Detection.git
#
# - cd Monk_Object_Detection/13_tf_obj_2/installation
#
# - Select the right file and run
#
# - chmod +x install_cuda10.sh && ./install_cuda10.sh
# ! git clone https://github.com/Tessellate-Imaging/Monk_Object_Detection.git
# +
# For colab use the command below
# ! cd Monk_Object_Detection/13_tf_obj_2/installation && chmod +x install_colab.sh && ./install_colab.sh
# Restart colab runtime now
# For Local systems and cloud select the right CUDA version
# # ! cd Monk_Object_Detection/13_tf_obj_2/installation && chmod +x install_cuda10.sh && ./install_cuda10.sh
# -
# Check TF version
import tensorflow as tf
print(tf.__version__)
# # Use already trained model for demo
import os
import sys
sys.path.append("Monk_Object_Detection/13_tf_obj_2/lib/")
from infer_detector import Infer
gtf = Infer();
# +
# Download trained model
# -
# ! wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1ukYu1AVfQ5SAmKeiXXYM9vvZJN6ZaV4Q' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1ukYu1AVfQ5SAmKeiXXYM9vvZJN6ZaV4Q" -O obj_bdd_trained.zip && rm -rf /tmp/cookies.txt
# ! unzip -qq obj_bdd_trained.zip
# ! wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1nEkL_0yx0jEbS08oMOg-5qSIqsz8Bj4x' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1nEkL_0yx0jEbS08oMOg-5qSIqsz8Bj4x" -O labelmap.txt && rm -rf /tmp/cookies.txt
# +
# For unoptimized model uncomment the following
#gtf.set_model_params(exported_model_dir = 'obj_bdd_trained/export_dir')
# For tensorRT optimized model uncomment the following
gtf.set_model_params(exported_model_dir = 'obj_bdd_trained/trt_dir_int')
# -
scores, bboxes, labels = gtf.infer_on_image('obj_bdd_trained/test/1.jpg', thresh=0.5);
from IPython.display import Image
Image(filename='output.jpg')
scores, bboxes, labels = gtf.infer_on_image('obj_bdd_trained/test/2.jpg', thresh=0.3);
from IPython.display import Image
Image(filename='output.jpg')
scores, bboxes, labels = gtf.infer_on_image('obj_bdd_trained/test/3.jpg', thresh=0.4);
from IPython.display import Image
Image(filename='output.jpg')
# # Dataset download
# - Dataset credits: - https://www.kaggle.com/solesensei/solesensei_bdd100k
# ! pip install kaggle
# ! kaggle datasets download solesensei/solesensei_bdd100k
# ! unzip -qq solesensei_bdd100k.zip
# ls bdd100k/bdd100k
# ls bdd100k_labels_release/bdd100k/labels
# +
# ! mkdir dataset
# ! mkdir dataset/train/
# ! mkdir dataset/train/annos/
# ! mkdir dataset/val/
# ! mkdir dataset/val/annos/
# -
# ! find bdd100k/bdd100k/images/100k/train/trainA/ -name '*.jpg*' -exec mv {} bdd100k/bdd100k/images/100k/train/ \;
# ! find bdd100k/bdd100k/images/100k/train/trainB/ -name '*.jpg*' -exec mv {} bdd100k/bdd100k/images/100k/train/ \;
# ! find bdd100k/bdd100k/images/100k/train/testA/ -name '*.jpg*' -exec mv {} bdd100k/bdd100k/images/100k/train/ \;
# ! find bdd100k/bdd100k/images/100k/train/testB/ -name '*.jpg*' -exec mv {} bdd100k/bdd100k/images/100k/train/ \;
# ! pip install pascal-voc-writer
img_dir = "bdd100k/bdd100k/images/100k/train/";
anno_file = "bdd100k_labels_release/bdd100k/labels/bdd100k_labels_images_train.json";
output_dir = "dataset/train/annos/";
import json
with open(anno_file) as json_file:
data = json.load(json_file)
len(data)
data[0].keys()
from pascal_voc_writer import Writer
# +
import os
from tqdm import tqdm
import cv2
for i in tqdm(range(len(data))):
name = data[i]["name"];
labels = data[i]["labels"];
#print(img_dir + "/" + name)
img = cv2.imread(img_dir + "/" + name);
h, w, c = img.shape;
writer = Writer(name, w, h)
for j in range(len(labels)):
obj = labels[j];
if("box2d" in obj.keys()):
category = obj["category"];
x1 = int(obj["box2d"]["x1"]);
y1 = int(obj["box2d"]["y1"]);
x2 = int(obj["box2d"]["x2"]);
y2 = int(obj["box2d"]["y2"]);
writer.addObject(category, x1, y1, x2, y2);
writer.save(output_dir + "/" + name.split(".")[0] + ".xml")
#break;
# -
img_dir = "bdd100k/bdd100k/images/100k/val/";
anno_file = "bdd100k_labels_release/bdd100k/labels/bdd100k_labels_images_val.json";
output_dir = "dataset/val/annos/";
import json
with open(anno_file) as json_file:
data = json.load(json_file)
len(data)
from pascal_voc_writer import Writer
# +
import os
from tqdm import tqdm
import cv2
for i in tqdm(range(len(data))):
name = data[i]["name"];
labels = data[i]["labels"];
#print(img_dir + "/" + name)
img = cv2.imread(img_dir + "/" + name);
h, w, c = img.shape;
writer = Writer(name, w, h)
for j in range(len(labels)):
obj = labels[j];
if("box2d" in obj.keys()):
category = obj["category"];
x1 = int(obj["box2d"]["x1"]);
y1 = int(obj["box2d"]["y1"]);
x2 = int(obj["box2d"]["x2"]);
y2 = int(obj["box2d"]["y2"]);
writer.addObject(category, x1, y1, x2, y2);
writer.save(output_dir + "/" + name.split(".")[0] + ".xml")
#break;
# -
# +
# Get classes
# -
anno_file = "bdd100k_labels_release/bdd100k/labels/bdd100k_labels_images_train.json";
import json
with open(anno_file) as json_file:
data = json.load(json_file)
# +
import os
from tqdm import tqdm
import cv2
classes = [];
for i in tqdm(range(len(data))):
name = data[i]["name"];
labels = data[i]["labels"];
for j in range(len(labels)):
obj = labels[j];
if("box2d" in obj.keys()):
category = obj["category"];
if(category not in classes):
classes.append(category)
# -
classes
f = open("classes.txt", 'w');
for i in range(len(classes)):
f.write(classes[i] + "\n");
f.close();
# # Training your own network
import os
import sys
sys.path.append("Monk_Object_Detection/13_tf_obj_2/lib/")
from train_detector import Detector
gtf = Detector();
gtf.list_models();
# +
# Dataset details
# +
train_img_dir = "bdd100k/bdd100k/images/100k/train/";
train_anno_dir = "dataset/train/annos/";
class_list_file = "classes.txt";
gtf.set_train_dataset(train_img_dir, train_anno_dir, class_list_file, batch_size=4)
# +
val_img_dir = "bdd100k/bdd100k/images/100k/val/";
val_anno_dir = "dataset/val/annos/";
gtf.set_val_dataset(val_img_dir, val_anno_dir)
# -
# +
# Create tf record
# -
gtf.create_tfrecord(data_output_dir="data_tfrecord")
# +
# Model and hyper params
# -
gtf.set_model_params(model_name="ssd_resnet50_v1_fpn_640")
gtf.set_hyper_params(num_train_steps=100000, lr=0.03)
# +
# Directory to store inference graph
# -
gtf.export_params(output_directory="export_dir");
# +
# (Optional) Optimize using TensorRT - Feature Not tested on colab
# -
gtf.TensorRT_Optimization_Params(conversion_type="INT8", trt_dir="trt_dir_int")
# +
# training
# tf.app.run() executes sys.exit() function hence cannot run in a jupyter notebook directory
# Run in a terminal - python Monk_Object_Detection/13_tf_obj_2/lib/train.py
# or
# Run the following command from notebook
# -
# %run Monk_Object_Detection/13_tf_obj_2/lib/train.py
# +
# exportaing trained model
# tf.app.run() executes sys.exit() function hence cannot run in a jupyter notebook directory
# Run in a terminal - python Monk_Object_Detection/13_tf_obj_2/lib/export.py
# or
# Run the following command from notebook
# -
# %run Monk_Object_Detection/13_tf_obj_2/lib/export.py
# +
# Optimizing For TensorRT - Feature Not tested on colab
# This requires TensorRT 6.0.1 to be installed
# Go to https://developer.nvidia.com/tensorrt
# Download
# - nv-tensorrt-repo-ubuntu1804-cuda10.1-trt6.0.1.5-ga-20190913_1-1_amd64.deb (For Ubuntu18.04)
# - nv-tensorrt-repo-ubuntu1604-cuda10.1-trt6.0.1.5-ga-20190913_1-1_amd64.deb (For Ubuntu16.04)
# Run the following commands to install trt (in a terminal)
# $ sudo dpkg -i nv-tensorrt-repo-ubuntu1804-cuda10.1-trt6.0.1.5-ga-20190913_1-1_amd64.deb
# $ sudo apt-key add /var/nv-tensorrt-repo-cuda10.1-trt6.0.1.5-ga-20190913/7fa2af80.pub
# $ sudo apt-get update
# $ sudo apt-get install tensorrt
# $ sudo apt-get install uff-converter-tf
# $ sudo apt-get install python3-libnvinfer-dev
# -
# %run Monk_Object_Detection/13_tf_obj_2/lib/optimize.py
# # Inference on exported model (Unoptimized)
import os
import sys
sys.path.append("Monk_Object_Detection/13_tf_obj_2/lib/")
from infer_detector import Infer
gtf = Infer();
gtf.set_model_params(exported_model_dir = 'export_dir')
import os
img_list = os.listdir("bdd100k/bdd100k/images/100k/test/")
len(img_list)
scores, bboxes, labels = gtf.infer_on_image('bdd100k/bdd100k/images/100k/test/' + img_list[0], thresh=0.5);
from IPython.display import Image
Image(filename='output.jpg')
scores, bboxes, labels = gtf.infer_on_image('bdd100k/bdd100k/images/100k/test/' + img_list[10], thresh=0.3);
from IPython.display import Image
Image(filename='output.png')
gtf.benchmark_for_speed('bdd100k/bdd100k/images/100k/test/' + img_list[10])
# # Inference on TensorRT optimized model
import os
import sys
sys.path.append("Monk_Object_Detection/13_tf_obj_2/lib/")
from infer_detector import Infer
gtf = Infer();
gtf.set_model_params(exported_model_dir = 'trt_dir_int')
import os
img_list = os.listdir("bdd100k/bdd100k/images/100k/test/")
len(img_list)
scores, bboxes, labels = gtf.infer_on_image('bdd100k/bdd100k/images/100k/test/' + img_list[0], thresh=0.5);
scores, bboxes, labels = gtf.infer_on_image('bdd100k/bdd100k/images/100k/test/' + img_list[10], thresh=0.3);
gtf.benchmark_for_speed('bdd100k/bdd100k/images/100k/test/' + img_list[10])
| application_model_zoo/Example - BDD100K dataset with TensorRT optimization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="BcM0TCZ4ZIzP"
# # Installing required packages
# Needed for environments not Databricks
# + id="of5OX82BY79l"
from IPython.display import clear_output
# !pip install --upgrade pip
# !pip install findspark
# !pip install pyspark
clear_output(wait=False)
# + [markdown] id="LQe75YzWZT_J"
# # Importing objects
# + id="4AMjQVfgZRkT"
import findspark, pyspark
from pyspark.sql import SparkSession
from pyspark import SparkFiles
# + [markdown] id="pq-Ul0yBZ49y"
# # Global Settings
# Needed for environments not Databricks
# + id="zdDPkeklZ7_b"
findspark.init()
spark = SparkSession.builder.getOrCreate()
# + [markdown] id="p8bruL_vZlLl"
# # Reading data source
# + id="2j-GIh0QZhvf"
url = 'https://raw.githubusercontent.com/edsonlourenco/public_datasets/main/iris_libsvm.txt'
spark.sparkContext.addFile(url)
libsvm_file = SparkFiles.get("iris_libsvm.txt")
df_data = spark.read.format("libsvm").option("numFeatures", "4").load(libsvm_file)
# + [markdown] id="jEBynZePalkF"
# ### Checking **data**
# + colab={"base_uri": "https://localhost:8080/"} id="bsUYWVosabyc" outputId="d078bfd4-e881-4153-9d4d-7ccda670a6d4"
df_data.show(truncate=False)
| 2_libsvm_example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten, \
MaxPooling2D, Conv2D, Dropout
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train / 255.0
x_test = x_test / 255.0
y_train = np.eye(10)[y_train]
y_test = np.eye(10)[y_test]
x_train = x_train.reshape(-1, 28, 28, 1)
x_test = x_test.reshape(-1, 28, 28, 1)
model = Sequential([
Conv2D(32, (5, 5), input_shape=(28, 28, 1), activation='relu'),
MaxPooling2D(pool_size=(2,2)),
Conv2D(64, (5, 5), activation='relu'),
MaxPooling2D(pool_size=(2,2)),
Flatten(),
Dense(1024, activation='relu'),
Dropout(0.5),
Dense(10, activation='softmax')
])
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
history = model.fit(x_train, y_train, epochs=5, batch_size=75,
verbose=1)
# -
score = model.evaluate(x_test, y_test, verbose=1)
print('Accuracy =', score[1])
# +
# %matplotlib inline
import matplotlib.pyplot as plt
for i in range(10):
plt.figure(figsize=(1,1))
score = model.predict(x_test[i].reshape(1, 28, 28, 1))
predicted = np.argmax(score)
answer = np.argmax(y_test[i])
plt.title('Answer:' + str(answer) + ' Predicted:' + str(predicted))
plt.imshow(x_test[i].reshape(28, 28), cmap='gray')
plt.xticks(())
plt.yticks(())
plt.show()
| artificial-intelligence-with-python-ja-master/Chapter 16/cnn_keras.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab={} colab_type="code" id="n3lnWjvI83ix"
# # Filtado de mensajes spam
# -
# ## Descripción del problema real
# La recepción de publicidad no deseada a traves mensajes de texto usando SMS (Short Message Service) es un problema que afecta a muchos usuarios de teléfonos móviles. El problema radica en que los usuarios deben pagar por los mesajes recibidos, y por este motivo resulta muy importante que las compañías prestadoras del servicio puedan filtrar mensajes indeseados antes de enviarlos a su destinatario final. Los mensajes tienen una longitud máxima de 160 caracteres, por lo que el texto resulta poco para realizar la clasificación, en comparación con textos más largos (como los emails). Adicionalmente, los errores de digitación dificultan el proceso de detección automática.
# ## Descripción del problema en términos de los datos
# Se tiene una muestra contiene 5574 mensajes en inglés, no codificados y clasificados como legítimos (ham) o spam (http://www.dt.fee.unicamp.br/~tiago/smsspamcollection/). La información está almacenada en el archivo `datos/spam-sms.zip`.El problema en términos de los datos consiste en clasificar si un mensaje SMS es legítico o spam, a partir del análisis de las palabras que contiente, partiendo del supuesto de que ciertas palabras que son más frecuentes dependiendo del tipo de mensaje. Esto implica que en la fase de preparación de los datos se deben extraer las palabras que contiene cada mensaje para poder realizar el análsis.
# ## Aproximaciones posibles
# En este caso, se desea comparar los resultados de un modelo de redes neuronales artificiales y otras técnicas estadísticas para realizar la clasificación.
# ## Requerimientos
# Usted debe:
#
# * Preprocesar los datos para representarlos usando bag-of-words.
#
#
# * Construir un modelo de regresión logística como punto base para la comparación con otros modelos más complejos.
#
#
# * Construir un modelo de redes neuronales artificiales. Asimismo, debe determinar el número de neuronas en la capa o capas ocultas.
#
#
# * Utiizar una técnica como crossvalidation u otra similar para establecer la robustez del modelo.
#
#
# * Presentar métricas de desempeño para establecer las bondades y falencias de cada clasificador.
# ## Lectura de Datos
# Se hace una lectura simple de la base de datos proporcionada por el profesor para el desarrollo del ejercicio.
# +
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
df = pd.read_csv(
#"https://raw.githubusercontent.com/jdvelasq/datalabs/master/datasets/sms-spam.csv",
'datos/sms-spam.csv',
sep = ',',
thousands = None,
decimal = '.',
encoding='latin-1')
df.head()
# -
# ## Stemmer
# Con esta funcion eliminan los afijos morfológicos de las palabras, dejando solo la raíz de la palabra..
# +
from nltk.stem.porter import PorterStemmer
stemmer = PorterStemmer()
df['stemmed'] = df.text.apply(lambda x: ' '.join([stemmer.stem(w) for w in x.split() ]))
df.head(10)
# -
# ## Matriz de Terminos de Documento
# Luego generamos una matriz, donde cada columna va a representar la presencia de la palabra en el mensaje, donde cada columna representa una palabra de todas las palabras distintas que hay en el set de datos.
# +
from sklearn.feature_extraction.text import CountVectorizer
count_vect = CountVectorizer(
analyzer='word', # a nivel de palabra
lowercase=True, # convierte a minúsculas
stop_words='english', # stop_words en inglés
binary=True, # Los valores distintos de cero son fijados en 1
min_df=5 # ignora palabras con baja freq
)
##
## Aplica la función al texto
##
dtm = count_vect.fit_transform(df.stemmed)
##
## Las filas contienen los mensajes
## y las clomunas los términos
##
dtm.shape
# +
##
## Palabras aprendidas de los mensajes de texto
##
vocabulary = count_vect.get_feature_names()
len(vocabulary)
# +
##
## Recupera los mensajes de la dtm
##
def dtm2words(dtm, vocabulary, index):
as_list = dtm[index,:].toarray().tolist()
docs = []
for i in index:
k = [vocabulary[iword] for iword, ifreq in enumerate(as_list[i]) if ifreq > 0]
docs += [k]
return docs
for i, x in enumerate(dtm2words(dtm, vocabulary, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])):
print('Org: ', df.text[i])
print('Mod: ', ' '.join(x))
print('')
# -
# ## Conjunto de Datos
df['type'][df['type']=='spam'] = 1
df['type'][df['type']=='ham'] = 0
X = dtm
y= np.array(df.type,dtype='int64')
print("Tamaño X:",X.shape)
print("Tamaño y:",y.shape)
y
# ## Naive Bayes
# Este ejemplo fue tomado de la pagina de cursos del profesor
# +
## Entrena el modelo
##
##
## Se importa la libreria
##
import numpy as np
from sklearn.naive_bayes import BernoulliNB
##
## Se crea un clasificador Naive Bayes (NB)
##
clf = BernoulliNB()
##
## Se entrena el clasificador
##
from sklearn.model_selection import cross_val_score
scores = cross_val_score(clf, X, y, cv=5,scoring = 'precision')
print('Precision Promedio:',np.mean(scores))
# -
# ## Regresión Logística
# ### Ajuste de Parametros
# Para la busqueda de los mejores parametros se utiliza la funcion GridSearchCV de Sklearn, la cual dado un dominio de los parametros hace una busqueda exhaustiva de la combinación de parametros que me genere el mejor valor de una metrica escogida usando validación cruzada, la cual en este caso es $Precisión$
# +
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
import sklearn.neural_network
parametros = {"penalty": ['l1', 'l2'],
"C": [0.001,0.01,0.1,1,10,100],
#"solver": ['lbfgs', 'liblinear', 'sag', 'saga']
}
regresion = LogisticRegression()
clf = GridSearchCV(regresion, parametros,cv=5,scoring='precision')
clf.fit(X,y)
print("Mejores Parametros Encontrados")
print(clf.best_params_)
# -
# ## Entrenamiento y Validación
# Para la validación de este modelo se utiliza la metrica de $Precisión$, ya que en este caso los $Falsos Positivos$ son criticos, ya que podriamos estar filtrando mensajes que no son spam, y esto es una perdida de información delicada.
# +
from sklearn import linear_model
import numpy as np
regresion = LogisticRegression(C=clf.best_params_['C'],penalty =clf.best_params_['penalty'] )
from sklearn.model_selection import cross_val_score
scores = cross_val_score(regresion, X, y, cv=5,scoring = 'precision')
print("Precision: ",np.mean(scores))
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
regresion= regresion.fit(X_train,y_train)
y_predict = regresion.predict(X_test)
print(confusion_matrix(y_test,y_predict))
# -
# ## Redes Neuronales
# ### Busqueda de Arquitectura
# Por cuestiones de computo se procede primero a realizar la busqueda de la arquitectura de red neuronal que mejor se comporta, para luego encontrarle los mejores parametros a la arquitectura encontrada.
from sklearn.model_selection import GridSearchCV
import sklearn.neural_network
parametros = {"hidden_layer_sizes": [(6,),(7,),(8,),(10,)]}
red = sklearn.neural_network.MLPRegressor()
clf = GridSearchCV(red, parametros,cv=5)
clf.fit(X,y)
print("Mejores Parametros Encontrados")
print(clf.best_params_)
# ### Busqueda de Parametros
# Con base en la arquitectura encontrada en el paso anterior se procede a hacer la busqueda de los mejores parametros para dicha arquitectura.
# +
from sklearn.model_selection import GridSearchCV
import sklearn.neural_network
parametros = {"hidden_layer_sizes": [(8,)],
"activation": ["identity", "logistic", "tanh", "relu"],
"solver": ["lbfgs", "sgd", "adam"],
"alpha": [0.00005,0.0005,0.1],
'learning_rate':['constant', 'invscaling', 'adaptive'],
#'max_iter':[200,1000],
#'tol':[0.00001,0.0001,0.001,0.01,0.1,1,10],
#'momentum':[0.1,0.2,0.4,0.6,0.8,1]}
}
red = sklearn.neural_network.MLPClassifier()
clf = GridSearchCV(red, parametros,cv=5,scoring='precision')
clf.fit(X,y)
print("Mejores Parametros Encontrados")
print(clf.best_params_)
# -
# ### Entrenamiento y Validación
# Para la validación de este modelo se utiliza la metrica de $Precisión$, ya que en este caso los $Falsos Positivos$ son criticos, ya que podriamos estar filtrando mensajes que no son spam, y esto es una perdida de información delicada.
# +
import sklearn.neural_network
import numpy as np
neural_net = sklearn.neural_network.MLPClassifier(
# hidden_layer_sizes = clf.best_params_['hidden_layer_sizes'], # Una capa oculta con una neurona
#activation = clf.best_params_['activation'], # {‘identity’, ‘logistic’, ‘tanh’, ‘relu’}
# solver = clf.best_params_['solver'], # {‘lbfgs’, ‘sgd’, ‘adam’}
# alpha = clf.best_params_['alpha'], #
# learning_rate = clf.best_params_['learning_rate'],
hidden_layer_sizes = (8,), # Una capa oculta con una neurona
activation = 'logistic', # {‘identity’, ‘logistic’, ‘tanh’, ‘relu’}
solver = 'adam', # {‘lbfgs’, ‘sgd’, ‘adam’}
alpha = 0.1, #
learning_rate = 'constant', # La tasa no se adapta automáticamente
)
from sklearn.model_selection import cross_val_score
scores = cross_val_score(neural_net, X, y, cv=5,scoring = 'precision')
print("Precision: ",np.mean(scores))
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
neural_net = neural_net.fit(X_train,y_train)
y_predict = neural_net.predict(X_test)
print(confusion_matrix(y_test,y_predict))
# -
# # Conclusiones
# En este ejercicio como metrica de validación se escogio la $Precisión$ en donde podemos ver que la regresión logística tuvo una precision exacta, y donde la arquitectura de la red neuronal tuvo una precision del 0.99
#
# $$ Presición = \frac{Verdaderos Positivo}{Verdaderos Positivos + Falsos Positivos}$$
#
# $$Precisión_{Logistic Regression} = 1 $$
#
# $$Precision_{Neural Net} = 0.99 $$
#
#
# Esto nos diría que la regresion logistica tiende a comportarse mejor que la red neuronal, pero esto solo nos dice que se comporta mejor para la detección de etiquetas positivas, osea los mensajes no considerados como spam, pero si miramos otras metricas de validación con base en la matriz de confusión.
#
#
# $$Neural Net $$
#
#
# \begin{equation*}
# \begin{bmatrix}
# Verdaderos Positivos & Falsos Positivos\\
# 1195 & 3\\
# 26 & 170 \\
# Verdaderos Negativos & Falsos Negativos\\
# \end{bmatrix}
# \end{equation*}
#
#
# $$Logistic Regression $$
#
#
# \begin{equation*}
# \begin{bmatrix}
# Positivos & Falsos Positivos\\
# 1198 & 0\\
# 147 & 49 \\
# Negativos & Falsos Negativos\\
# \end{bmatrix}
# \end{equation*}
#
#
# Como la exactitud
#
# $$ Exactitud = \frac{Verdaderos}{Total de Predicciones} $$
#
#
#
#
# $$ Exactitud_{Logistic Regression} = 0.89 $$
#
# $$ Exactitud_{Neural Net} = 0.97 $$
#
#
# En donde podemos ver que en terminos generales la red neuronal se comporta mucho mejor que la regresion logistica, pero para terminos de deteccion de positivos, y no ir a incurrir en una perdida de información delicada, es mejor utilizar una regresión logística para la predicción.
#
#
| 03-spam.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Concatenating, Merging and Joining
#
# In this lecture, we will look at 3 common ways of combining different pandas DataFrames.
# +
# Impor the libraries:
import pandas as pd
import numpy as np
# -
# ## Concatenating:
#
# Concatenating means stitching two dataframes together. A user has the option of concatenating the dataframe vertically or horizontly.
#
# Let's create some sample datasets to begin with:
# +
df1 = pd.DataFrame({'A': ['A0', 'A1', 'A2', 'A3'],
'B': ['B0', 'B1', 'B2', 'B3'],
'C': ['C0', 'C1', 'C2', 'C3'],
'D': ['D0', 'D1', 'D2', 'D3']}, index=[0, 1, 2, 3])
df2 = pd.DataFrame({'A': ['A4', 'A5', 'A6', 'A7'],
'B': ['B4', 'B5', 'B6', 'B7'],
'C': ['C4', 'C5', 'C6', 'C7'],
'D': ['D4', 'D5', 'D6', 'D7']},index=[4, 5, 6, 7])
df3 = pd.DataFrame({'A': ['A8', 'A9', 'A10', 'A11'],
'B': ['B8', 'B9', 'B10', 'B11'],
'C': ['C8', 'C9', 'C10', 'C11'],
'D': ['D8', 'D9', 'D10', 'D11']},index=[8, 9, 10, 11])
# -
df1
df2
df3
# The function `pd.concat()` can concatenate DataFrames horizontally as well as vertically (vertical is the default). To make the DataFrames stack horizontally, you have to specify the keyword argument axis=1 or axis='columns'.
#
result = pd.concat( [df1 , df2, df3 ])
result
result2 = pd.concat( [df1 , df2, df3 ] , axis = 1)
result2
# You can keep the concatenated dataset separated by keys as shown below:
pd.concat( [df1 , df2, df3 ], keys=['x', 'y', 'z'])
# ## Merging
#
# Let's now explore techniques for merging, and learn about left joins, right joins, inner joins, and outer joins.
#
# +
left = pd.DataFrame({'key1': ['K0', 'K0', 'K1', 'K2'],
'key2': ['K0', 'K1', 'K0', 'K1'],
'A': ['A0', 'A1', 'A2', 'A3'],
'B': ['B0', 'B1', 'B2', 'B3']})
right = pd.DataFrame({'key1': ['K0', 'K1', 'K1', 'K2'],
'key2': ['K0', 'K0', 'K0', 'K0'],
'C': ['C0', 'C1', 'C2', 'C3'],
'D': ['D0', 'D1', 'D2', 'D3']})
# -
left
right
merged_data = pd.merge( left , right , on = ['key1'])
merged_data
merged_data2 = pd.merge(left , right , on = ['key1' , 'key2'])
merged_data2
merged_data_left = pd.merge(left , right , on = ['key1' , 'key2'] , how = "left" )
merged_data_left
# ## Joining
#
# `df.join()` is a convenient method for combining the columns of two potentially differently-indexed DataFrames into a single result DataFrame. Here is a very basic example:
# +
left = pd.DataFrame({'A': ['A0', 'A1', 'A2'],
'B': ['B0', 'B1', 'B2']},
index=['K0', 'K1', 'K2'])
right = pd.DataFrame({'C': ['C0', 'C2', 'C3'],
'D': ['D0', 'D2', 'D3']},
index=['K0', 'K2', 'K3'])
# -
left
right
res1 = left.join(right)
res1
# Use the how method
res2 = left.join(right , how="outer")
res2
# #### Get help:
#
# See the official documentation for more complex cases and examples.
# https://pandas.pydata.org/pandas-docs/stable/user_guide/merging.html#database-style-dataframe-or-named-series-joining-merging
# ** End **
| Section 4/05 Concatenating, Merging and Joining.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import mahotas as mh
import numpy as np
from matplotlib import pyplot as plt
from PIL import Image
tmp1 = Image.open("./datas/125507.png")
plt.figure(figsize=(5,5))
plt.imshow(tmp1)
plt.show()
# +
from glob import glob
images = glob('./datas/*.png')
features = []
labels = []
for im in images:
labels.append(im[:-len('./125457.png')])
im = mh.imread(im)
im = mh.colors.rgb2gray(im, dtype=np.uint8)
features.append(mh.features.haralick(im).ravel())
features = np.array(features)
labels = np.array(labels)
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
clf = Pipeline([('preproc', StandardScaler()), ('classifier', LogisticRegression())])
# from sklearn import cross_validation
from sklearn.model_selection import LeaveOneOut
from sklearn.model_selection import cross_val_score
loo = LeaveOneOut()
scores = cross_val_score(clf, features, labels, cv=loo)
print('Accuracy: {:.2%}'.format(scores.mean()))
# -
sc = StandardScaler()
features = sc.fit_transform(features)
from scipy.spatial import distance
dists = distance.squareform(distance.pdist(features))
dists[0][0:29]
# +
def selectImage(n, m, dists, images):
image_position = dists[n].argsort()[m]
image = mh.imread(images[image_position])
return image
def plotImages(n):
plt.figure(figsize=(15,5))
plt.subplot(141)
plt.imshow(selectImage(n,0, dists, images))
plt.title('Original')
plt.xticks([])
plt.yticks([])
plt.subplot(142)
plt.imshow(selectImage(n,1, dists, images))
plt.title('1st simular one')
plt.xticks([])
plt.yticks([])
plt.subplot(143)
plt.imshow(selectImage(n,2, dists, images))
plt.title('2nd simular one')
plt.xticks([])
plt.yticks([])
plt.subplot(144)
plt.imshow(selectImage(n,3, dists, images))
plt.title('3rd simular one')
plt.xticks([])
plt.yticks([])
plt.show()
# -
plotImages(2)
| workspace_yena/mahotas.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Test the bnk module with a PCB accelerometer
# Here we test the amplitude and frequency calibration of the BnK device using a [PCB accelerometer](https://www.pcb.com/spec_sheet.asp?model=352C650) attached to a [PCB calibrated shaker](https://www.pcb.com/spec_sheet.asp?model=394C06&item_id=2198).
# +
import time
from bnk.bnk import OpenWav, WavHeader, Instrument
# -
# # Gather some data from a PCB accelerometer
# The accelerometer is screwed onto the top of the shaker, and is connected to channel 4 of the BnK device.
bnk_ip = "10.1.1.66"
ADAC = Instrument(bnk_ip)
ADAC.disable_all()
ADAC.set_samplerate(16384)
ADAC.set_name("PCB calibrated shaker")
ADAC.set_channel(
channel=4,
name='PCB 394C06 SN 2706 1 g at 159.2 Hz',
c_filter='7.0 Hz',
c_range='10 Vpeak',
sensitivity=0.0991,
unit='g',
powered=True,
serialNumber='LW207441',
transducerType='PCB352C65',
)
ADAC.powerup()
print("Waiting for amplifier in PCB to stabilise ...")
time.sleep(20)
print("Recording")
recording_id = ADAC.record(2)
ADAC.powerdown()
ADAC.list_recordings(start = -5)
WAV_file = ADAC.get_wav(directory='../samples', recording_id=recording_id)
print(WAV_file)
ADAC.delete_recording(recording_id=recording_id)
# ## Load and display the data
WAV_file = '../samples/PCB_calibrated_shaker_20190805205924.wav'
wav_data, metadata, json_data = OpenWav(WAV_file)
# +
# #!conda install -c conda-forge bokeh -y
# +
from ipywidgets import interact
import numpy as np
from bokeh.io import push_notebook, show, output_notebook
from bokeh.plotting import figure
output_notebook()
# +
plt = figure(
title = metadata['Label'],
width = 800,
height = 400,
tools = ['xwheel_zoom','xpan','save','reset']
)
sr = metadata['SampleRate']
N = len(wav_data)
delta_t = 1.0/sr
t = np.arange(0,N*delta_t,delta_t)
phase = 4.4
amp = 1
freq = 159.2
n = 1000
c = 0 # Channel
plt.line(t[:n],
wav_data[:n,c],
line_color='blue',
legend=metadata['ChannelNames'][c]
)
r = plt.line(t[:n],
amp*np.sin(2*np.pi*freq*t[:n] + phase),
line_color='red',
legend=f"{amp} sin({freq} * 2πt + {phase})",
)
plt.legend.click_policy="hide"
def update(phase=0):
r.data_source.data['y'] = amp*np.sin(2*np.pi*freq*t[:n] + phase)
push_notebook()
show(plt, notebook_handle=True)
# -
interact(update, phase=(0, 2*np.pi, 0.1))
| examples/PCB_Test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Scalar factor of the Universe
import numpy as np
from pylab import *
from scipy.integrate import odeint
# In this notebook, we solve for the scale factor of the Universe based on the Standard Model of Cosmology, often called $\Lambda$CDM model. We take numerical values from the following:
#
# [1] <NAME>., & <NAME>. (2009). The primordial density perturbation: Cosmology, inflation and the origin of structure. Cambridge University Press.
#
# The Friedmann equation is given by
# \begin{equation}
# H(a)^2 = H_0^2 \left( \Omega_{r0} a^{-4} + \Omega_{m0} a^{-3} + \Omega_{\Lambda 0} \right)
# \end{equation}
# where $H$ is the Hubble parameter, and $\Omega_{r0}$, $\Omega_{m0}$, and $\Omega_{\Lambda 0}$ are the radiation, matter, and the vacuum (cosmological constant) energy densities, respectively, today. We refer to the following values tabulated in appendix B of Ref. [1]:
# \begin{eqnarray}
# \Omega_{r0} &=& 8.47 \times 10^{-5} \\
# \Omega_{m0} &=& 0.276 \\
# \Omega_{\Lambda 0} &=& 1 - \Omega_{r0} - \Omega_{m0} \\
# H_0 &=& 70 \ \text{km} / \text{s} / \text{Mpc} .
# \end{eqnarray}
#
# Noting that the Hubble parameter $H$ is related to the scale factor $a$ as
# \begin{equation}
# H = \frac{\dot{a}}{a}
# \end{equation}
# where an overdot denotes derivative with respect to the comoving time $t$, then the Friedmann equation can be written down as
# \begin{equation}
# \dot{a} = a H_0 \sqrt{\left( \Omega_{r0} a^{-4} + \Omega_{m0} a^{-3} + \Omega_{\Lambda 0} \right)} .
# \end{equation}
# This is the expression that we input into $odeint$. In the following code, we input this differential equation.
# +
# here we setup the constants and the ode
omega_r = 8.47e-5
omega_m = 0.276
omega_vac = 1. - omega_r - omega_m
H_0 = 1. # rescaled to unity for efficient numerics
a_0 = 1. # initial condition on the scale factor today
def f(y, t):
return y*H_0*np.sqrt( omega_r*(y**(-4.)) + omega_m*(y**(-3.)) + omega_vac )
time_points = np.linspace(1., 0.01, 100)
# -
# Note that by setting $H_0$ to unity, we work in units where time is measured in $H_0^{-1} \sim 14$ billion years. Also, we are integrating backwards in time, starting from the present.
#
# With this said, we obtain the scale factor $a(t)$ of the Universe as follows.
# +
rc('xtick', labelsize = 20) # for the tick marks
rc('ytick', labelsize = 20)
a_lcdm = odeint(f, a_0, time_points) # odeint does its job
plot(time_points, a_lcdm, 'r-', linewidth = 3.0)
ylim(0.01, 1) # aesthetics
xlim(0.01, 1)
xlabel(r'time (14 byr)', fontsize = 20)
ylabel('scale factor', fontsize = 20)
show()
# -
# So, yeah. This is the scale factor $a(t)$ of the Universe. From this, one could think of the size of the universe as $V(t) \sim a(t)^3$.
#
# The expansion history can be divided into three eras (1) radiation (2) matter and (3) dark energy era, depending on the Universe's energy content. The first era, which comes just right after the Big Bang and primordial inflation, is radiation domination, where $a(t) \sim t^{1/2}$. Then comes matter era, as radiation cools down much faster than matter, during which $a(t) \sim t^{2/3}$. Finally, and today, after both radiation and matter domination, comes dark energy era, where the Universe is dominated by an invisible, negative pressure fluid that sources the observed cosmic acceleration.
| integration_cosmo_dynamics/scale_factor_universe.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## SQL - Major SQL commands
# ### BIOINF 575 - Fall 2020
# ##### RESOURCES
# https://sqlite.org/index.html
# https://www.sqlite.org/fullsql.html
# https://docs.python.org/3/library/sqlite3.html
# https://www.sqlite.org/lang_aggfunc.html
# https://www.sqlitetutorial.net/sqlite-create-table/
# https://www.sqlite.org/syntaxdiagrams.html
# https://www.tutorialspoint.com/python_network_programming/python_databases_and_sql.htm
# https://www.tutorialspoint.com/python/python_database_access.htm
# https://www.python-course.eu/sql_python.php
# https://www.sqlalchemy.org/library.html#reference
# https://docs.sqlalchemy.org/en/13/orm/
# https://docs.sqlalchemy.org/en/14/orm/tutorial.html#version-check
# https://towardsdatascience.com/sql-in-python-for-beginners-b9a4f9293ecf
#
# #### What is a database?
#
# * Is an organized collection of data (files)
# * A way to store and retrieve that information
# * A relational database is structured to recognize relations between the data elements
#
#
# * A collection of data
#
# * Dictionary
# {"EGFR":6.8, "MYC": 4.5, "WNT1":11.7}
#
# * Tab-separated text file, or pd.DataFrame
#
#
# | GeneID | GeneSymbol | ExpressionValue |
# |---------|-------------|------------------|
# | 7471 | WNT1 | 11.7 |
# | 4609 | MYC | 4.5 |
# | 1956 | EGFR | 6.8 |
#
#
# Entity-Relationship Diagram - shows the relations between tables in a relational database
# - tables are connected by fields (columns) that are common - called keys
#
#
# https://www.researchgate.net/profile/Adam_Richards3/publication/282134102/figure/fig3/AS:289128232046602@1445944950296/Database-entity-diagram-Data-collected-from-NCBI-the-Gene-Ontology-and-UniProt-are.png
#
# <img src = "https://www.researchgate.net/profile/Adam_Richards3/publication/282134102/figure/fig3/AS:289128232046602@1445944950296/Database-entity-diagram-Data-collected-from-NCBI-the-Gene-Ontology-and-UniProt-are.png" width = "700"/>
#
#
# #### Relational Database Management Systems (RDBMS)
# * Software programs such as Oracle, MySQL, SQLServer, DB2, postgreSQL, SQLite
# * They handle the data storage, indexing, logging, tracking and security (access)
# * They have a very fine-grained way of granting permissions to users at the level of commands that may be used
# * Create a database
# * Create a table
# * Update or insert data
# * View certain tables ... and many more
# * An important part of learning databases is to understand the type of data which is stored in columns and rows.
# * Likewise when we get to the database design section, it is critically important to know what type of data you will be modeling and storing (and roughly how much, in traditional systems)
# * Exactly which types are available depends on the database system
#
#
#
# #### Why use databases and Relational Database Management Systems?
# * Easy, efficient, secure, collaborative management of data that maintains data integrity
#
# #### What is the Structured Query Language (SQL) ?
# * SQL is the standard language for relational database management systems
# * SQL is used to communicate with a database
#
# #### Why SQLite?
# SQLite is a C library that provides a lightweight disk-based database that doesn’t require a separate server process and allows accessing the database using a nonstandard variant of the SQL query language. Some applications can use SQLite for internal data storage.
# * SQLite is often the technology of choice for small applications, particularly those of embedded systems and devices like phones and tablets, smart appliances, and instruments.
# * It’s also possible to prototype an application using SQLite and then port the code to a larger database such as PostgreSQL or Oracle.
#
# #### sqlite3
# The sqlite3 module in the Python standard library provides a SQL interface to communicate with databases.<br>
# https://docs.python.org/3/library/sqlite3.html
#
# Once you have a `Connection`, you can create a `Cursor` object and call its execute() method to perform SQL commands.
#
# `Cursor` objects represent a database cursor, which is used to manage the context of a fetch/retrieval operation.
# A call to the `Cursor`'s execute() method is used to perform SQL commands.
#
# #### SQLite uses a greatly simplified set of data types:
# * INTEGER - numeric
# * REAL - numeric
# * TEXT – text of any length
# * Dates are held as text
# * BLOB – binary large objects
# * Such as images
# +
from sqlite3 import connect
connection = connect('DatabaseExample.sqlite')
cursor = connection.cursor()
# -
type(connection)
type(cursor)
sql = '''SELECT type, name FROM sqlite_master LIMIT 5;'''
cursor.execute(sql)
cursor.rowcount
for row in cursor:
print(row)
cursor.description
sql = '''SELECT * FROM sqlite_master LIMIT 5;'''
cursor.execute(sql)
cursor.description
# +
from sqlite3 import connect
'''
Establish a connection to the database.
This statement creates the file at the given path if it does not exist and that will be an empty database.
The file was provided in this case so the statement should just establish the connection.
'''
connection = connect('org.Hs.eg.sqlite')
cursor = connection.cursor()
# -
type(connection)
dir(connection)
# +
# check what the connection object can do
for elem in dir(connection):
if not elem.startswith("__"):
print(elem)
# -
type(cursor)
for elem in dir(cursor):
if not elem.startswith("__"):
print(elem)
# #### Major SQL commands: SELECT, INSERT, DELETE, UPDATE
# #### SELECT - Retrieves data from one or more tables and doesn’t change the data at all
#
# * SELECT * (means all columns), or the comma separated names of the columns of data you wish to return
# * Returns columns (left to right) in the order received.
# * '*' selects ALL rows and ALL columns and returns them by column order and row_id
# * FROM is the table source or sources (comma separated)
# * WHERE (optional) is the predicate clause: conditions for the query
# * Evaluates to True or False for each row
# * This clause almost always includes Column-Value pairs.
# * Omitting the Where clause returns ALL the records in that table.
# * Note: the match is case sensitive
# * ORDER BY (optional) indicates a sort order for the output data
# * default is row_id, which can be very non-intuitive
# * ASCending or DESCending can be appended to change the sort order. (ASC is default)
# * GROUP BY (optional) groups by a column and creates summary data for a different column
# * HAVING (optional) allows restrictions on the rows selected
# * a GROUP BY clause is required before HAVING
# * LIMIT (optional) reduces the number of rows retrieved to the number provided after this clause
# * In most SQL clients, the ";" indicates the end of a statement and requests execution
#
# +
# In every SQLite database, there is a special table: sqlite_master
# sqlite_master - describes the contents of the database
sql = '''SELECT type, name FROM sqlite_master LIMIT 5;'''
cursor.execute(sql)
# -
sql = '''
SELECT name,type FROM sqlite_master;
'''
cursor.execute(sql)
for row in cursor:
print(row)
cursor.rowcount
# +
from sqlite3 import connect
'''
Establish a connection to the database.
This statement creates the file at the given path if it does not exist and that will be an empty database.
The file was provided in this case so the statement should just establish the connection.
'''
connection = connect('org.Hs.eg.sqlite')
cursor = connection.cursor()
sql = '''
SELECT name,type FROM sqlite_master;
'''
cursor.execute(sql)
# -
result = cursor.fetchall()
type(result)
len(result)
result
# +
# See the result header
cursor.description
# -
def get_header(cursor):
'''
Makes a tab delimited header row from the cursor description.
Arguments:
cursor: a cursor after a select query
Returns:
string: A string consisting of the column names separated by tabs, no new line
'''
return '\t'.join([row[0] for row in cursor.description])
# #### Different ways to retrieve results - observe the different data structures displayed
# +
# See the result
cursor.execute(sql)
print("Iterate through the cursor:")
for row in cursor:
print(row)
print()
cursor.execute(sql)
print("Use the Cursor fetchall() method:")
cursor.fetchall()
# -
def get_results(cursor):
'''
Makes a tab delimited table from the cursor results.
Arguments:
cursor: a cursor after a select query
Returns:
string: A string consisting of the column names separated by tabs, no new line
'''
res = list()
for row in cursor.fetchall():
res.append('\t'.join(list(map(str,row))))
return "\n".join(res)
sql
# +
cursor.execute(sql)
print(get_header(cursor))
print(get_results(cursor))
# +
# WHERE clause example (-- denotes comment)
# more examples later
sql = '''
SELECT name
FROM sqlite_master
WHERE type = "table"; -- condition that allows the selection of specific rows
'''
cursor.execute(sql)
print(get_header(cursor))
print(get_results(cursor))
# +
# Selects all columns (*) of the gene ontology biological process table (go_bp)
# retrieves only 10 rows due to the LIMIT clause
# The first column is the gene id
sql = '''
SELECT *
FROM go_bp
Limit 6;
'''
cursor.execute(sql)
print(get_header(cursor))
print(get_results(cursor))
# -
# Aliasing column names to make them easier to understand - add a new name for a column next to the column name using quotes if the alias contains spaces, the AS keyword can also be used
# - e.g.: column_name AS "Alias name"
# - e.g.: column_name "Alias name"
# - e.g.: column_name Alias_name
sql = '''
SELECT _id "Gene ID", symbol Symbol, gene_name Name
FROM gene_info LIMIT 5;
'''
cursor.execute(sql)
print(get_header(cursor))
print(get_results(cursor))
sql = '''
SELECT _id "Gene ID", go_id "OntologyTerm ID", evidence
FROM go_bp
Limit 6;
'''
cursor.execute(sql)
print(get_header(cursor))
print(get_results(cursor))
# +
#select the first 50 rows from the genes table - all columns
sql = '''
SELECT *
FROM genes
LIMIT 50;
'''
cursor.execute(sql)
print(get_header(cursor))
print(get_results(cursor))
# -
# +
# COUNT returns a single number, which is the count of all rows in the table
sql = '''
SELECT count(*) FROM genes;
'''
cursor.execute(sql)
print(get_header(cursor))
print(get_results(cursor))
# -
sql = '''
SELECT count(_id) AS 'Number of genes'
FROM genes;
'''
cursor.execute(sql)
print(get_header(cursor))
print(get_results(cursor))
# +
# DISTINCT selects non-duplicated elements (rows)
sql = '''
SELECT _id Identifier FROM go_bp LIMIT 5;
'''
cursor.execute(sql)
print(get_header(cursor))
print(get_results(cursor))
print()
sql = '''
SELECT DISTINCT _id Identifier FROM go_bp LIMIT 5;
'''
cursor.execute(sql)
print(get_header(cursor))
print(get_results(cursor))
# +
# Other aggregate functions are available for numerical columns
# https://www.sqlite.org/lang_aggfunc.html
# comments are added using -- in front of the test to comment or using /* comment */
sql = '''
SELECT MIN(DISTINCT _id) MIN_ID
FROM go_bp; -- comment LIMIT 5; /* comment */
'''
cursor.execute(sql)
print(get_header(cursor))
print(get_results(cursor))
# +
# count the number of rows in go_bp
sql = '''
SELECT count(*)
FROM go_bp;
'''
cursor.execute(sql)
print(get_header(cursor))
print(get_results(cursor))
# -
# +
sql = '''
SELECT *
FROM go_bp
LIMIT 2;
'''
cursor.execute(sql)
print(get_header(cursor))
print(get_results(cursor))
# +
# count the number of distinct genes in go_bp
sql = '''
SELECT count(DISTINCT _id)
FROM go_bp;
'''
cursor.execute(sql)
print(get_header(cursor))
print(get_results(cursor))
# -
sql = '''
SELECT count(_id)
FROM go_bp;
'''
cursor.execute(sql)
print(get_header(cursor))
print(get_results(cursor))
# #### WHERE clause operators
# https://www.sqlite.org/lang_expr.html
#
# <> , != inequality <br>
# < less than <br>
# <= less than or equal <br>
# = equal <br>
# '> greater than <br>
# '>= greater than or equal <br>
# BETWEEN v1 AND v2 tests that a value to lies in a given range <br>
# EXISTS test for existence of rows matching query <br>
# IN tests if a value falls within a given set or query <br>
# IS [ NOT ] NULL is or is not null <br>
# [ NOT ] LIKE tests value to see if like or not like another <br>
#
# % is the wildcard in SQL, used in conjunction with LIKE
#
sql = '''
SELECT * FROM go_bp
WHERE _id = '1';
'''
cursor.execute(sql)
print(get_header(cursor))
print(get_results(cursor))
sql = '''
SELECT * FROM go_bp
WHERE _id IN (1,5,7);
'''
cursor.execute(sql)
print(get_header(cursor))
print(get_results(cursor))
# The evidence column in the go_bp table describes the type of evidence that was used for the link between the gene and the biological process.
# http://geneontology.org/docs/guide-go-evidence-codes/
# * Inferred from Experiment (EXP)
# * Inferred from Direct Assay (IDA)
# * Inferred from Physical Interaction (IPI)
# * Inferred from Mutant Phenotype (IMP)
# * Inferred from Genetic Interaction (IGI)
# * Inferred from Expression Pattern (IEP)
# * Inferred from High Throughput Experiment (HTP)
# * Inferred from High Throughput Direct Assay (HDA)
# * Inferred from High Throughput Mutant Phenotype (HMP)
# * Inferred from High Throughput Genetic Interaction (HGI)
# * Inferred from High Throughput Expression Pattern (HEP)
# * Inferred from Biological aspect of Ancestor (IBA)
# * Inferred from Biological aspect of Descendant (IBD)
# * Inferred from Key Residues (IKR)
# * Inferred from Rapid Divergence (IRD)
# * Inferred from Sequence or structural Similarity (ISS)
# * Inferred from Sequence Orthology (ISO)
# * Inferred from Sequence Alignment (ISA)
# * Inferred from Sequence Model (ISM)
# * Inferred from Genomic Context (IGC)
# * Inferred from Reviewed Computational Analysis (RCA)
# * Traceable Author Statement (TAS)
# * Non-traceable Author Statement (NAS)
# * Inferred by Curator (IC)
# * No biological Data available (ND)
# * Inferred from Electronic Annotation (IEA)
#
sql = '''
SELECT * FROM go_bp
WHERE evidence = 'ND' AND _id BETWEEN 20 AND 2000
LIMIT 10;
'''
cursor.execute(sql)
print(get_header(cursor))
print(get_results(cursor))
sql = '''
SELECT *
FROM go_bp
WHERE go_id LIKE '%0081%'
LIMIT 10;
'''
cursor.execute(sql)
print(get_header(cursor))
print(get_results(cursor))
# +
# Retrieve rows from go_bp where the go_id is GO:0008104 and evidence is IEA or IDA
# -
# +
# ORDER BY (optional) - indicates a sort order given by a column in the the output data and the sort order: ASC or DESC
sql = '''
SELECT *
FROM go_bp
WHERE evidence="EXP"
ORDER BY _id ASC
LIMIT 20;
'''
cursor.execute(sql)
print(get_header(cursor))
print(get_results(cursor))
# -
# ____
# Sqlite3 also has some PRAGMA methods <br>
# This is an SQL extension specific to SQLite that is used to modify the operation of the SQLite library or to query the SQLite library for internal (non-table) data <br>
# https://www.sqlite.org/pragma.html <br>
# The code below shows how to get the schema (columns and columns information)
sql = 'PRAGMA table_info("go_bp")'
cursor.execute(sql)
print(get_header(cursor))
print(get_results(cursor))
sql = '''SELECT * FROM pragma_table_info("go_bp") '''
cursor.execute(sql)
print(get_header(cursor))
print(get_results(cursor))
# ___
# +
# SUB-QUERY - we can have a query in a query
sql = '''
SELECT _id, symbol, gene_name
FROM gene_info
WHERE _id IN
(SELECT DISTINCT _id
FROM go_bp
WHERE go_id == 'GO:0008104');
'''
cursor.execute(sql)
print(get_header(cursor))
print(get_results(cursor))
# +
# GROUP BY groups by a column and creates summary data for a different column
# count entries for each GO term
sql = '''
SELECT go_id, count(*)
FROM go_bp
GROUP BY go_id
LIMIT 10;
'''
cursor.execute(sql)
print(get_header(cursor))
print(get_results(cursor))
# +
# specify column in aggregate function and alias the name of the columns
sql = '''
SELECT go_id as "GO Term ID", count(_id) as "Gene Number"
FROM go_bp
GROUP BY go_id
LIMIT 10;
'''
cursor.execute(sql)
print(get_header(cursor))
print(get_results(cursor))
# +
# HAVING allows restrictions on the rows used or selected
# a GROUP BY clause is required before HAVING
sql = '''
SELECT go_id, count(_id) as gene_no
FROM go_bp
GROUP BY go_id
HAVING gene_no>500;
'''
cursor.execute(sql)
print(get_header(cursor))
print(get_results(cursor))
# +
# Select gene ids with more than 100 biological processes associated
# -
# #### A `PRIMARY KEY` is a very important concept to understand.
# * It is the designation for a column or a set of columns from a table.
# * It is recommended to be a serial value and not something related to the business needs of the data in the table.
#
# * A primary key is used to uniquely identify a row of data; combined with a column name, uniquely locates a data entry
# * A primary key by definition must be `UNIQUE` and `NOT NULL`
# * The primary key of a table, should be a (sequential) non-repeating and not null value
# * Primary keys are generally identified at time of table creation
# * A common method for generating a primary key, is to set the datatype to `INTEGER` and declare `AUTOINCREMENT` which will function when data is inserted into the table
# * Primary keys can be a composite of 2 or more columns that uniquely identify the data in the table
#
#
# #### A `FOREIGN KEY` is a column(s) that points to the `PRIMARY KEY` of another table
#
# * The purpose of the foreign key is to ensure referential integrity of the data.
# In other words, only values that are supposed to appear in the database are permitted.<br>
# Only the values that exist in the `PRIMARY KEY` column are allowed to be present in the FOREIGN KEY column.
# Example: A `gene` table has the `PRIMARY KEY` `gene_id`. The GO2_gene GO term is associated with a gene
#
# They are also the underpinning of how tables are joined and relationships portrayed in the database
#
# #### JOIN tables
#
# * Multiple tables contain different data that we want to retrieve from a single query
# * In order to assemble data as part of a query, a JOIN between tables is needed
# * This is a very common practice, since it’s rare for all the data you want to be in a single table
#
#
# * INNER JOIN - return only those rows where there is matching content in BOTH tables (is the default when JOIN is used)
# * OUTER JOIN - returns all rows from both tables even if one of the tables is blank
# * SELF JOIN - can be used to join a table to itself (through aliasing), to compare data internal to the table
#
# ```sql
# SELECT ... FROM table1 [INNER] JOIN table2 ON conditional_expression
# ```
#
sql = '''
SELECT symbol,go_id, evidence
FROM gene_info AS gi
INNER JOIN go_bp AS go
ON gi._id == go._id
WHERE evidence = "ND"
LIMIT 5;
'''
cursor.execute(sql)
print(get_header(cursor))
print(get_results(cursor))
# #### See the create table statement
# +
# sql column in the sqlite_master table
sql = '''
SELECT sql
FROM sqlite_master
WHERE type= "table" and name == "go_bp"
LIMIT 2;
'''
cursor.execute(sql)
print(get_header(cursor))
print(get_results(cursor))
# -
# ### CREATE TABLE - statement
# https://www.sqlitetutorial.net/sqlite-create-table/
#
# ```sql
# CREATE TABLE [IF NOT EXISTS] [schema_name].table_name (
# column_1 data_type PRIMARY KEY,
# column_2 data_type NOT NULL,
# column_3 data_type DEFAULT 0,
# table_constraints
# ) [WITHOUT ROWID];
# ```
#
# In this syntax:
#
# * First, specify the name of the table that you want to create after the CREATE TABLE keywords. The name of the table cannot start with sqlite_ because it is reserved for the internal use of SQLite.
# * Second, use `IF NOT EXISTS` option to create a new table if it does not exist. Attempting to create a table that already exists without using the IF NOT EXISTS option will result in an error.
# * Third, optionally specify the schema_name to which the new table belongs. The schema can be the main database, temp database or any attached database.
# * Fourth, specify the column list of the table. Each column has a name, data type, and the column constraint. SQLite supports `PRIMARY KEY, UNIQUE, NOT NULL`, and `CHECK` column constraints.
# * Fifth, specify the table constraints such as PRIMARY KEY, FOREIGN KEY, UNIQUE, and CHECK constraints.
# * Finally, optionally use the `WITHOUT ROWID` option. By default, a row in a table has an implicit column, which is referred to as the rowid, oid or _rowid_ column. The rowid column stores a 64-bit signed integer key that uniquely identifies the row inside the table. If you don’t want SQLite creates the rowid column, you specify the WITHOUT ROWID option. A table that contains the rowid column is known as a rowid table. Note that the WITHOUT ROWID option is only available in SQLite 3.8.2 or later.
# https://www.sqlite.org/syntaxdiagrams.html#create-table-stmt
#
# <img src = "https://www.sqlite.org/images/syntax/create-table-stmt.gif" width="800"/>
# Each value stored in an SQLite database (or manipulated by the database engine) has one of the following storage classes:
# https://www.sqlite.org/datatype3.html
# * `NULL`. The value is a NULL value.
# * `INTEGER`. The value is a signed integer, stored in 1, 2, 3, 4, 6, or 8 bytes depending on the magnitude of the value.
# * `REAL`. The value is a floating point value, stored as an 8-byte IEEE floating point number.
# * `TEXT`. The value is a text string, stored using the database encoding (UTF-8, UTF-16BE or UTF-16LE).
# * `BLOB`. The value is a blob of data, stored exactly as it was input.
# #### A `PRIMARY KEY` is a very important concept to understand.
# * It is the designation for a column or a set of columns from a table.
# * It is recommended to be a serial value and not something related to the business needs of the data in the table.
#
# * A primary key is used to uniquely identify a row of data; combined with a column name, uniquely locates a data entry
# * A primary key by definition must be `UNIQUE` and `NOT NULL`
# * The primary key of a table, should be a (sequential) non-repeating and not null value
# * Primary keys are generally identified at time of table creation
# * A common method for generating a primary key, is to set the datatype to `INTEGER` and declare `AUTOINCREMENT` which will function when data is inserted into the table
# * Primary keys can be a composite of 2 or more columns that uniquely identify the data in the table
#
#
# #### A `FOREIGN KEY` is a column(s) that points to the `PRIMARY KEY` of another table
#
# * The purpose of the foreign key is to ensure referential integrity of the data.
# In other words, only values that are supposed to appear in the database are permitted.<br>
# Only the values that exist in the `PRIMARY KEY` column are allowed to be present in the FOREIGN KEY column.
# Example: A `gene` table has the `PRIMARY KEY` `gene_id`. The GO2_gene GO term is associated with a gene
#
# They are also the underpinning of how tables are joined and relationships portrayed in the database
#
# The `sqlite_master` has the following create statement:
# ```sql
# CREATE TABLE sqlite_master ( type TEXT, name TEXT, tbl_name TEXT, rootpage INTEGER, sql TEXT );
# ```
# ##### The `connection` object methods can be used to save or revert/reset the changes after a command that makes changes to the database
# ##### `COMMIT` - save the changes
# ##### `ROLLBACK` - revert the changes
#
# +
# Check for tables with a name like go_bp
sql = '''
SELECT name
FROM sqlite_master
WHERE name LIKE "go_bp%"
LIMIT 4;
'''
cursor.execute(sql)
print(cursor.fetchall())
# +
# select the sql statement for the go_bp table
sql = '''
SELECT sql
FROM sqlite_master
WHERE name = "go_bp";
'''
cursor.execute(sql)
print(cursor.fetchall()[0][0])
# -
# #### We create the table `go_bp_ALT` with the columns: `ggo_id`, `gene_id`, `go_id` and `evidence`
# +
# Write and run a create table statement for an alternative go_bp_ALT table
sql='''
CREATE TABLE IF NOT EXISTS go_bp_ALT (
ggo_id INTEGER PRIMARY KEY AUTOINCREMENT,
gene_id INTEGER NOT NULL, -- REFERENCES genes _id
go_id CHAR(10) NOT NULL, -- GO ID
evidence CHAR(30) NOT NULL, -- GO evidence information
FOREIGN KEY (gene_id) REFERENCES genes (_id)
);
'''
try:
cursor.execute(sql)
except connection.DatabaseError:
print("Creating the go_bp_ALT table resulted in a database error!")
connection.rollback()
raise
else:
connection.commit()
finally:
print("done!")
# -
# ##### Similar error handling, as seen above, can be when executing any statement that changes the database.
# ##### Check if the new table appears in the `sqlite_master` table
sql = '''
SELECT name
FROM sqlite_master
WHERE name LIKE "go_bp%"
LIMIT 4;
'''
cursor.execute(sql)
print(cursor.fetchall())
#
# <br><br>
# The `sqlite_sequence` table is created and initialized automatically whenever a regular table is created if it has a column with the `AUTOINCREMENT` option set.<br>
# https://www.sqlite.org/autoinc.html
#
# ##### Check if the new table appears in the `sqlite_master` table
# ### INDEXING
#
# Indexes are lookup table, like the index of a book.
# They are usually created for columns that have unique/ or less redundant values and provide a way to quicky search
# the values.<br>
# Indexing creates a copy of the indexed columns together with a link to the location of the additional information.<br>
# The index data is stored in a data structure that allows for fast sorting. <br>
# E.g.: balanced-tree - every leaf is at most n nodes away from the root) that allows for fast sorting. <br>
# All queries (statements) regarding an indexed table are applied to the index
#
#
# * One important function in Relational Databases is to be able to create indexes on columns in tables
# * These indexes are pre-calculated and stored in the database
# * Indexes should be created on columns that are used in queries and joins
# * They will rapidly speed up query return rate and improve query performance
#
# To create an index use the following command:
#
# ```sql
# CREATE INDEX indexName ON tableName (columnName)
# ```
sql = '''
CREATE INDEX gene_go_idx
ON go_bp_ALT (ggo_id)
'''
cursor.execute(sql)
connection.commit()
# ##### Check if the new index appears in the `sqlite_master` table
sql = '''
SELECT name, sql
FROM sqlite_master
WHERE type= "index" AND
name = "gene_go_idx";
'''
cursor.execute(sql)
print(get_header(cursor))
print(get_results(cursor))
# #### Remove the index
sql = '''
DROP INDEX gene_go_idx
'''
cursor.execute(sql)
connection.commit()
# ##### Check if the index was removed from the `sqlite_master` table
sql = '''
SELECT name, sql
FROM sqlite_master
WHERE type= "index" AND
name = "gene_go_idx";
'''
cursor.execute(sql)
print(get_header(cursor))
print(get_results(cursor))
# ### INSERT - statement
#
# Makes changes to the database table<br>
# Adds new data to a table (if the constraints are met)
# Constraint examples:
# * For one designated column or a group of columns that are designated as Primary Key the values are unique
# * The value inserted in a column that has a Foreign Key constraint should exist in the column that it refers to
#
# ```sql
# INSERT INTO <tablename> (<column1>, <column2>, <column3>) VALUES (value1, value2, value3);
# ```
#
# ##### One simple INSERT command adds 1 row of data at a time into an existing table
#
# ##### Connection object allows us to:
# * ##### COMMIT - save the changes
# * ##### ROLLBACK - reverts/discards the changes
# <br>
#
# ##### Let's see what is in the table (it should be nothing):
sql = '''
SELECT *
FROM go_bp_ALT;
'''
cursor.execute(sql)
print(get_header(cursor))
print(get_results(cursor))
# <br>
#
# ##### Let's try an insert:
# ```sql
# INSERT INTO <tablename> (<column1>, <column2>, <column3>) VALUES (value1, value2, value3);
# ```
# +
values_list = [1234,"GO:1234","CM_EV"]
sql = '''
INSERT INTO go_bp_ALT (gene_id, go_id, evidence)
VALUES (?,?,?);
'''
cursor.execute(sql,values_list)
connection.commit()
# +
# This command retrieves the identifier of the last row from the most current query
# The gene_go_id
id_value = cursor.lastrowid
id_value
# -
# <br>
#
#
# ##### We have a row in the table!!! And the gene_go_id was automatically generated.
sql = '''
SELECT *
FROM go_bp_ALT ;
'''
cursor.execute(sql)
print(get_header(cursor))
print(get_results(cursor))
# #### You can have a Python "table" structure (list of lists) of insert values and get them all inserted in one command, each sublist having the correct number of values.
#
# +
values_tbl = [[1235,"GO:1235","CM_EV"], [1236,"GO:1236","CM_EV"], [1236,"GO:1237","CM_EV"]]
sql = '''
INSERT INTO go_bp_ALT (gene_id, go_id, evidence)
VALUES (?,?,?);
'''
cursor.executemany(sql,values_tbl)
connection.commit()
# -
sql = '''
SELECT *
FROM go_bp_ALT ;
'''
cursor.execute(sql)
print(get_header(cursor))
print(get_results(cursor))
# #### UPDATE - statement - changes the table rows
#
#
# MODIFIES DATA (already in a table) in all rows matching the WHERE clause
#
# ```sql
# UPDATE table_name
# SET column1 = value1, column2 = value2...., columnN = valueN
# WHERE [condition];
# ```
#
# Update is generally a single row command, but use of the where clause can cause data to be updated in multiple rows <br>
# (whether you intended to or not !!!!)
#
# The following statement updates the evidence for all entries for all genes associated with the 2 biological processses
sql = '''
UPDATE go_bp_ALT
SET evidence = "EXP"
WHERE gene_id = 1236;
'''
cursor.execute(sql)
connection.commit()
sql = '''
SELECT *
FROM go_bp_ALT ;
'''
cursor.execute(sql)
print(get_header(cursor))
print(get_results(cursor))
# #### DELETE - statement - deletes table rows
#
# * MAKES CHANGES TO THE DATA
# * Row level deletion – can’t delete less than this.
#
# ```sql
# DELETE FROM <tablename> WHERE <column> = <value>
# ```
#
# * The WHERE predicate is the same as for the SELECT statement, that is, it determines which rows will be deleted
#
#
sql = '''
DELETE FROM go_bp_ALT
WHERE go_id IN ("GO:1234","GO:1236");
'''
cursor.execute(sql)
connection.commit()
sql = '''
SELECT *
FROM go_bp_ALT ;
'''
cursor.execute(sql)
print(get_header(cursor))
print(get_results(cursor))
# ```sql
# DELETE FROM <tablename>;
# ```
#
# * This would delete all rows of data from a table.
# * Preserves table structure (table still exists)
# * Optimized for speed in SQLite, no row-by-row execution.
# * EXISTS <table_name> still evaluates to True
#
# +
# Delete all data from the table - but keep the table
sql = '''
DELETE FROM go_bp_ALT;
'''
cursor.execute(sql)
connection.commit()
# -
sql = '''
SELECT *
FROM go_bp_ALT ;
'''
cursor.execute(sql)
print(get_header(cursor))
print(get_results(cursor))
# <br>
#
# #### `DROP TABLE` - statement - removes a table (permanently)
sql = '''
DROP TABLE IF EXISTS go_bp_ALT;
'''
cursor.execute(sql)
connection.commit()
sql = '''
SELECT name AS "TABLE NAME"
FROM sqlite_master
WHERE name LIKE "go_bp%";
'''
cursor.execute(sql)
print(get_header(cursor))
print(get_results(cursor))
# #### VIEW in a database
#
# * A view is a virtual table which can be created from a query on existing tables
# * Views are created to give a more human readable version of the normalized data / tables
# * http://www.sqlitetutorial.net/sqlite-create-view/
# * An SQLite view is read only
# ```sql
# CREATE [TEMP] VIEW [IF NOT EXISTS] view_name(column-name-list) AS
# select-statement;
# ```
# gene go information for easy access
sql = '''
CREATE VIEW IF NOT EXISTS gene_go_info (symbol, go_id, evidence) AS
SELECT symbol, go_id, evidence
FROM gene_info AS gi
INNER JOIN go_bp AS go
ON gi._id == go._id
WHERE evidence IN ("EXP","IDA") ;
'''
cursor.execute(sql)
connection.commit()
# gene go information
sql = '''
SELECT *
FROM gene_go_info
LIMIT 10;
'''
cursor.execute(sql)
print(get_header(cursor))
print(get_results(cursor))
# ```sql
# DROP VIEW [IF EXISTS] view_name;
# ```
# gene go information for easy access
sql = '''
DROP VIEW IF EXISTS gene_go_info;
'''
cursor.execute(sql)
connection.commit()
# +
# And close()
cursor.close()
connection.close()
# -
# #### To remove the database, delete the .sqlite file.
| completed_notebooks/SQL_commands_continued_MoWe.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <h1>Data Exploration</h1>
# <p>In this notebook we will perform a broad data exploration on the <code>Hitters</code> data set. Note that the aim of this exploration is not to be completely thorough; instead we would like to gain quick insights to help develop a first prototype. Upon analyzing the output of the prototype, we can analyze the data further to gain more insight.</p>
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# %run ../../customModules/DataQualityReports.ipynb
# https://stackoverflow.com/questions/34398054/ipython-notebook-cell-multiple-outputs
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
# -
# <p>We first read the comma-separated values (csv) <code>Hitters</code> file into a pandas DataFrame. To get a feeling for the data we display the top five rows of the DataFrame using the <code>head()</code> method and we show how many rows and columns the DataFrame has by using the <code>shape</code> attribute. We also show the <code>dtypes</code> attribute, which returns a pandas Series with the data type of each column.</p>
df = pd.read_csv("Hitters.csv", index_col = 0)
df.head()
df.shape
df.dtypes
# <p>Is appears that all the columns have the data type we would expect. We can perform another check to see if any values are missing in the DataFrame using its <code>isnull</code> method.</p>
df.reset_index()[df.reset_index().isnull().any(axis=1)]
df[df.isnull().any(axis=1)].shape
# <p>This shows that there are $59$ missing values in total that seem pretty randomly distributed accross the $322$ total rows. So the next step to be able to produce the data quality reports with our custom <code>createDataQualityReports</code> function is to organize our DataFrame by quantitative and categorical variables using hierarchical indexing.</p>
df.columns = pd.MultiIndex.from_tuples([('quantitative', 'AtBat'), ('quantitative', 'Hits'),
('quantitative', 'HmRun'), ('quantitative', 'Runs'),
('quantitative', 'RBI'), ('quantitative', 'Walks'),
('quantitative', 'Years'), ('quantitative', 'CAtBat'),
('quantitative', 'CHits'), ('quantitative', 'CHmRun'),
('quantitative', 'CRuns'), ('quantitative', 'CRBI'),
('quantitative', 'CWalks'), ('categorical', 'League'),
('categorical', 'Division'), ('quantitative', 'PutOuts'),
('quantitative', 'Assists'), ('quantitative', 'Errors'),
('quantitative', 'Salary'), ('categorical', 'NewLeague')],
names=['type of variable', 'variable'])
df.sort_index(axis=1, level='type of variable', inplace=True)
df.head()
# <p>We are now in the position to use our own <code>createDataQualityReports</code> function to create a data quality report for both the categorical and the quantitative variables.</p>
df_qr_quantitative, df_qr_categorical = createDataQualityReports(df)
df_qr_quantitative.name + ':'
df_qr_quantitative.round(2)
df_qr_categorical.name + ':'
df_qr_categorical.round(2)
# <p>To further gain insight into the data, we use the <code>plotQuantitativeVariables</code> and <code>plotCategoricalVariables</code> functions the produce the frequency plots for each quantitative and categorical variable.</p>
plotQuantitativeVariables(df.xs('quantitative', axis=1), height=3, width=7)
plotCategoricalVariables(df.xs('categorical', axis=1), height=3, width=7)
# <p>We also compute the correlation matrix of the variables.</p>
corr = df.corr()
corr.style.background_gradient(cmap='coolwarm').set_precision(2)
| DataSets/Hitters/Exploration.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.7.7 64-bit ('owls')
# metadata:
# interpreter:
# hash: 50046ff9dcf6bc9c7f602b83e29ed8025a3cbb4f44bb186dc7be364a7b0f9523
# name: python3
# ---
# # Feature Descriptions
# # Absolute Features
# Track statistics that depend only on a single track.
#
# A number of the below statistics include an autocorrelation calculation with a specific time lag. High autocorrelation values mean that there is some a repeated pattern to the time series being investigated. Low values mean there is little or no periodicity at the time lag investigated.
#
# If you are running feature generation using the `features` step as part a `HELM_pipeline` call, invalid feature values (e.g., np.inf) will be substituted with a reasonable number. This is to prevent the classifier step from having to handle NaNs. See `features.py` for the dictionary of substitutions.
#
# ## Speed
# Speed statistics depend on spatial distance traveled (in pixels by default) per inter-frame interval.
#
# ### `speed_mean`
# Speed of the particle over time
#
# $ \bar{\textbf{s}} = \frac{1}{N} \sum_{i=1}^N \|{\vec{x}_i - \vec{x}_{(i-1)}}\| $
# ### `speed_max`
# Maximum speed of the particle over time.
#
# $ \max{(\textbf{s})} $
# where
# $ s_i = \|v_i\| $
#
# ### `speed_stdev`
# Standard deviation of the particle's speed over time
#
# ### `speed_autoCorr_lag1`
# Autocorrelation of the particle's speed at 15 frames (1 second if sampling @ 15 FPS)
# ### `speed_autoCorr_lag2`
# Autocorrelation of the particle's speed at 30 frames (2 seconds if sampling @ 15 FPS)
#
#
# ## Acceleration
# Acceleration statistics depend on the change in speed (measured in pixels by default) per inter-frame interval.
#
# ### `accel_mean`
# $ \bar{\textbf{a}} = \frac{1}{N} \sum_{i=1}^N \|{\vec{v}_i - \vec{v}_{(i-1)}}\| $
#
# ### `accel_max`
# Maximum acceleration of the particle over time
#
# $ \max{(\textbf{a})} $
# where
# $ a_i = \|a_i\| $
# ### `accel_stdev`
# Standard deviation of the acceleration
#
# ### `accel_autoCorr_lag1`
# Autocorrelation of the particle's acceleration at 15 frames (1 second if sampling @ 15 FPS)
# ### `accel_autoCorr_lag2`
# Autocorrelation of the particle's acceleration at 30 frames (2 seconds if sampling @ 15 FPS)
#
#
#
# ## Step Angle
# The step angle measures how much a particle turns at each time point. It gives the angle that a particle deviated from a straight path per inter-frame interval (in radians). Passively drifting particles should not vary much in their direction from frame to frame, while highly motile particles that swerve and turn regularly will show large changes in step angle.
#
# ### `step_angle_mean`
# Mean angle of deviation from point to point along the entire particle's path. Strictly positive.
#
# ### `step_angle_max`
# Maximum angle of deviation from one point to the next along the entire particle's path
#
# ### `step_angle_stdev`
# Standard deviation of the step angle along the entire particle's path
#
# ### `step_angle_autoCorr_lag1`
# Autocorrelation of the particle's step angle at 15 frames (1 second if sampling @ 15 FPS)
#
# ### `step_angle_autoCorr_lag2`
# Autocorrelation of the particle's step angle at 30 frames (2 seconds if sampling @ 15 FPS)
#
#
#
# ## Displacement
# The displacement metrics measure track characteristics focusing more on wholistic movement (as opposed to inter-frame changes). Where applicable, matrix coordinates are used (In 2D, positive direction of 0th coordinate is downward and positive direction of first coordinate is rightward.)
#
# ### `track_length`
# Length traveled by the particle calculated by adding sum of interframe distances (in pixels by default).
#
# ### `track_lifetime`
# Number of frames the track was present for.
#
# ### `disp_e2e_h`
# Horizontal displacement from the particle's start point to finishing point (in pixels by default). Rightward is positive.
#
# ### `disp_e2e_v`
# Vertical displacement from the particle's start point to finishing point (in pixels by default). Uses matrix coords, so positive is downward.
#
# ### `disp_e2e_norm`
# Distance between start and stop of track.
#
# ### `disp_mean_h`
# Average horizontal distance traveled by the particle (in pixels by default) per inter-frame interval. Rightward is positive.
#
# ### `disp_mean_v`
# Average vertical distance traveled by the particle (in pixels by default) per inter-frame interval. Uses matrix coords, so downward is positive.
#
# ### `disp_angle_e2e`
# The angle of the vector (in radians) from a track's start to its finish. The angle is zero pointing rightwards on the horizontal axis and increases going counter-clockwise.
#
# ### `sinuosity`
# Measure of movement inefficiency defined as end-to-end distance traveled over total path length. For a straight path, `sinuosity` will equal 1. For a serpentine or circular movement pattern, `sinuosity` will be much greater than 1.
#
# ### `msd_slope`
# Slope of the regression line fitting the measured Mean Squared Displacement (MSD). MSD and its slope provide information about the Euclidean distance traveled in a certain amount of time and they often used to delineate Brownian motion from other types of motion. MSD is defined as
#
# $ MSD(\tau) = \langle\|x(t+\tau) - x(t)\|\rangle $
#
# The slope is a regression fit to the $ \tau $ vs. MSD line.
#
# ## Size
# The size of each particle is estimated by counting the area (in pixels by default) of the bounding box containing all pixels from the clustering step.
#
# ### `bbox_area_mean`
# Mean size of the particle bounding box over time
#
# ### `bbox_area_median`
# Median size of the particle bounding box over time
#
# ### `bbox_area_max`
# Maximum size of the particle bounding box over time
#
# ### `bbox_area_min`
# Minimum size of the particle bounding box over time
#
# # Relative Features
# Relative features consist of metrics that characterize a track relative to all the other tracks in an experiment.
#
# Currently, relative features are not normalized by track length when calculating the global track statistics.
#
# If you are running feature generation using the `features` step as part a `HELM_pipeline` call, invalid feature values (e.g., np.inf) will be substituted with a reasonable number. This is to prevent the classifier step from having to handle NaNs. See `features.py` for the dictionary of substitutions.
#
# ## Speed
# ### `rel_speed`
# Ratio of a track's mean speed to the mean speed of all other tracks. A value of 1 means the particle's speed matched the mean speed of the rest of the particles. A value of 2 means it traveled two times faster than the average particle and 0.5 means it traveled half as fast.
#
# ## Step Angle
# ### `rel_disp_cosine_similarity`
# Cosine similarity between the mean step angle of the track of interest and the mean step angle of all other tracks. Cosine similarity is a measure of alignment between two vectors. A value of 1 means the track of interest is perfectly aligned with all other vectors while -1 means it points directly in the opposite direction.
#
# *Special cases*: If either the track of interest or the mean of all other tracks have/has zero length, the function will return 0. If both the track of interest and the mean of all other tracks have zero length, the function will return 1 (since they are similar in that neither resulted in net displacement).
#
# ### `rel_step_angle`
# Difference between the mean step angle of the track of interest and the mean step angle of all other tracks (in radians). Strictly positive.
| src/helm_dhm/features/feature_descriptions.ipynb |
# ##### Copyright 2021 Google LLC.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# # dudeney
# <table align="left">
# <td>
# <a href="https://colab.research.google.com/github/google/or-tools/blob/master/examples/notebook/contrib/dudeney.ipynb"><img src="https://raw.githubusercontent.com/google/or-tools/master/tools/colab_32px.png"/>Run in Google Colab</a>
# </td>
# <td>
# <a href="https://github.com/google/or-tools/blob/master/examples/contrib/dudeney.py"><img src="https://raw.githubusercontent.com/google/or-tools/master/tools/github_32px.png"/>View source on GitHub</a>
# </td>
# </table>
# First, you must install [ortools](https://pypi.org/project/ortools/) package in this colab.
# !pip install ortools
# +
# Copyright 2010 <NAME> (<EMAIL>)
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ortools.constraint_solver import pywrapcp
def dudeney(n):
solver = pywrapcp.Solver('Dudeney')
x = [solver.IntVar(list(range(10)), 'x' + str(i)) for i in range(n)]
nb = solver.IntVar(list(range(3, 10**n)), 'nb')
s = solver.IntVar(list(range(1, 9 * n + 1)), 's')
solver.Add(nb == s * s * s)
solver.Add(sum([10**(n - i - 1) * x[i] for i in range(n)]) == nb)
solver.Add(sum([x[i] for i in range(n)]) == s)
solution = solver.Assignment()
solution.Add(nb)
collector = solver.AllSolutionCollector(solution)
solver.Solve(
solver.Phase(x, solver.INT_VAR_DEFAULT, solver.INT_VALUE_DEFAULT),
[collector])
for i in range(collector.SolutionCount()):
nbsol = collector.Value(i, nb)
print(nbsol)
print('#fails:', solver.Failures())
print('time:', solver.WallTime(), 'ms')
| examples/notebook/contrib/dudeney.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <a id='logbook'></a>
# # Logbook
# +
# # %load ../imports.py
"""
These is the standard setup for the notebooks.
"""
# %matplotlib inline
# %load_ext autoreload
# %autoreload 2
#from jupyterthemes import jtplot
#jtplot.style(theme='onedork', context='notebook', ticks=True, grid=False)
import pandas as pd
pd.options.display.max_rows = 999
pd.options.display.max_columns = 999
pd.set_option("display.max_columns", None)
import numpy as np
import os
import matplotlib.pyplot as plt
#plt.style.use('paper')
#import data
import copy
from rolldecay.bis_system import BisSystem
from rolldecay import database
from mdldb.tables import Run
from sklearn.pipeline import Pipeline
from rolldecayestimators.transformers import CutTransformer, LowpassFilterDerivatorTransformer, ScaleFactorTransformer, OffsetTransformer
from rolldecayestimators.direct_estimator_cubic import EstimatorQuadraticB, EstimatorCubic
from rolldecayestimators.ikeda_estimator import IkedaQuadraticEstimator
import rolldecayestimators.equations as equations
import rolldecayestimators.lambdas as lambdas
from rolldecayestimators.substitute_dynamic_symbols import lambdify
import rolldecayestimators.symbols as symbols
import sympy as sp
from sklearn.metrics import r2_score
# -
# ## Nomenclature
# | Variable | Explain |
# |---|---|
# |$\pi$| example |
# Here is a cell link: [Logbook](#logbook)
# ## 2020-09-22
# What have I done today?
# ## References
# <div class="cite2c-biblio"></div>
| research_jupyter_templates/logbook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: cord
# language: python
# name: cord
# ---
# ## Similar Papers
# +
# %load_ext autoreload
# %autoreload 2
from cord.core import similar_papers, document_vectors
from cord import ResearchPapers
# -
ids = similar_papers('0jhfn824')
ids
def get_index(cord_uid):
return document_vectors.loc[cord_uid]
import numpy as np
np.where(document_vectors.index == '4x1ckri')
# +
# np.where?
# -
papers = ResearchPapers.load()
papers.similar_to('ivwn4nhl')
np.where(document_vectors.index=='le0ogx1s')
document_vectors
row_match = np.where(document_vectors.index == '4x1ck2i')
row_match
papers.covid_related()
papers[12214]
query = """
Efforts to identify the underlying drivers of fear, anxiety and stigma that fuel misinformation and rumor,
particularly social media
"""
papers.search_2d(query)
| similar-papers.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## This python notebook has examples of the Set-Coverage Problem and Partial Set-Coverage Problem
# ### load libraries
import geopandas as gpd
import pandas as pd
import os, sys, time
sys.path.append(r'/home/vagrant/repos/GOST_PublicGoods/GOSTNets/GOSTNets')
import GOSTnet as gn
import importlib
import networkx as nx
import osmnx as ox
from shapely.ops import unary_union
from shapely.wkt import loads
from shapely.geometry import LineString, MultiLineString, Point
# ### read back in processed graph
G = nx.read_gpickle('./sampleData/nouakchott/biggest_subg.pickle')
gn.example_edge(G)
# ## load origins and destinations
origins = pd.read_csv('./sampleData/nouakchott/origins_test1.csv')
origins['geometry'] = list(zip(origins['Lon'],origins['Lat']))
origins['geometry'] = origins['geometry'].apply(Point)
origins_gdf = gpd.GeoDataFrame(origins, crs = {'init':'epsg:4326'}, geometry = 'geometry')
origins_gdf = gn.pandana_snap(G, origins_gdf, target_crs = 'epsg:32628', add_dist_to_node_col = True)
origins = list(origins_gdf.NN)
origins = list(set(origins))
destinations = pd.read_csv('./sampleData/nouakchott/destinations_test1.csv')
destinations['geometry'] = list(zip(destinations['Lon'],destinations['Lat']))
destinations['geometry'] = destinations['geometry'].apply(Point)
destinations_gdf = gpd.GeoDataFrame(destinations, crs = {'init':'epsg:4326'}, geometry = 'geometry')
destinations_gdf = gn.pandana_snap(G, destinations_gdf, target_crs = 'epsg:32628', add_dist_to_node_col = True)
#destinations_gdf.NN is the nearest node of the road network
destinations = list(destinations_gdf.NN)
destinations = list(set(destinations))
# ## calculate OD Matrix
# %time OD = gn.calculate_OD(G, origins, destinations, fail_value = 9999999999999)
OD_df = pd.DataFrame(OD, columns = destinations, index = origins)
import importlib
importlib.reload(gn)
# ## Set-Coverage Problem
# ### Objective: Determine the minimum number of facilities and their locations in order to cover all demands within a pre-specified maximum distance (or time) coverage
# ### inputs include a pre-specified maximum distance coverage of 3000 seconds
set_coverage_result = gn.optimize_set_coverage(OD_df,max_coverage = 3000)
set_coverage_result
# ## Partial Set-Coverage Problem
# ### Objective: Determine the minimum number of facilities and their locations in order to cover a given fraction of the population within a pre-specified maximum distance (or time) coverage
# ### we need to produce a series that has each origin and its respective population
origins_w_demands_series = pd.Series(origins_gdf.demand.values,index=origins_gdf.NN)
# ### inputs include covering 90 percent of the population, a pre-specified maximum distance coverage of 2000 seconds, and a series of origins with their population
partial_set_coverage_result = gn.optimize_partial_set_coverage(OD_df, pop_coverage = .9, max_coverage = 2000, origins_pop_series = origins_w_demands_series, existing_facilities = None)
partial_set_coverage_result
| GOSTNets/Notebooks/.ipynb_checkpoints/optimization_set_coverage_problem_importing_OD_csv-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Exploratory Data Analysis
# https://towardsdatascience.com/exploratory-data-analysis-8fc1cb20fd15
# ### Import libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# ### Read data
file = 'test.csv'
df = pd.read_csv(file)
df_columns = df.columns.tolist()
# ### Simple basic analysis
df.head()
df.isnull().sum()
df.describe()
df.info()
print(df,5)
# ### Graphical analysis
# Pairplot
sns.pairplot(df,hue='y_column',palette='Set1'))
# Heatmap
sns.heatmap(df.corr())
sns.distplot(df[1])
# sns.distplot(df["column_name"])
# Histogram
sns.set_style('whitegrid')
df['column'].hist(bins=30)
plt.xlabel('column')
# Jointplot
sns.jointplot(x='column1',y='column2',data=df)
# Jointplot with KDE
sns.jointplot(x='column1',y='column2',data=df,color='red',kind='kde');
| eda/eda.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# # This is an example of pseudo-vectorization using lapply function on a list.
#
# The following function applies a function to each element of a list and returns the results of the function application as a list.
l <- list(seq.int(-5, 5, 1))
message("List is ", l)
message("List positives are ", lapply(l, (function(x) x > 0)))
# # This is an example of pseudo-vectorization using vapply function on a list.
#
# The following function applies a function to each element of a list and returns the results of the function application as a vector or an array.
#
l <- list(seq.int(-5, 5, 1))
message("List is ", l)
message("List positives are ", list(vapply(l, (function(x) x > 0), logical(11))))
# # This is an example of pseudo-vectorization using sapply function on a list.
#
# The following function applies a function to each element of a list and returns the results of the function application as a vector or an array.
#
l <- list(seq.int(-5, 5, 1))
message("List is ", l)
message("List positives are ", list(sapply(l, (function(x) x > 0))))
# # This is an example of pseudo-vectorization using apply function on an array.
#
# The following function applies a function to each element of an array's row or column and returns the results of the function application as a vector.
#
m <- array(1:16, dim = c(4,4))
message("Array row sum is ", list(apply(m, 1, sum)))
m <- array(1:16, dim = c(4,4))
message("Array column sum is ", list(apply(m, 2, sum)))
# # This is an example of split-apply-combine using tapply function on a dataframe.
#
# The following function splits a dataframe, applies a function to the split groups and then collects the function application results into a list. The split-apply-combine approach is executed using tapply function.
#
df <- data.frame(r1=1:16, r2=17:32)
message("Split-apply-combine on df gives ", list(with(df, tapply(r1, r2, mean))))
| notebooks/R-Notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="VwK5-9FIB-lu"
# # Natural Language Processing
# + [markdown] id="X1kiO9kACE6s"
# ## Importing the libraries
# + id="zlF1pT0TDWcT"
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# + [markdown] id="wTfaCIzdCLPA"
# ## Importing the dataset
# + id="QWvJ6iL9EIvI"
dataset = pd.read_csv('Restaurant_Reviews.tsv', delimiter = '\t', quoting = 3)
# + [markdown] id="Qekztq71CixT"
# ## Cleaning the texts
# -
import regex
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
corpus = []
for i in range(0, len(dataset)):
review = regex.sub('[^\p{L}]', ' ', dataset['Review'][i])
review = review.lower()
review = review.split()
ps = PorterStemmer()
all_stopwords = stopwords.words('english')
all_stopwords.remove('not')
review = [ps.stem(word) for word in review if not word in set(all_stopwords)]
review = ' '.join(review)
corpus.append(review)
print(corpus)
# + [markdown] id="CLqmAkANCp1-"
# ## Creating the Bag of Words model
# -
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer(max_features = 1500) # set after getting number of all words
X = cv.fit_transform(corpus).toarray()
y = dataset.iloc[:, -1].values
len(X[0])
# + [markdown] id="DH_VjgPzC2cd"
# ## Splitting the dataset into the Training set and Test set
# -
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# + [markdown] id="VkIq23vEDIPt"
# ## Training the Kernel Support Vector Machine model on the Training set
# -
from sklearn.svm import SVC
classifier = SVC(kernel = 'rbf', random_state = 0)
classifier.fit(X_train, y_train)
# + [markdown] id="1JaRM7zXDWUy"
# ## Predicting the Test set results
# -
y_pred = classifier.predict(X_test)
# evaluate performance by comparing the predicted review and the ground truth
print(np.concatenate(
(
y_pred.reshape(len(y_pred), 1),
y_test.reshape(len(y_test), 1)
),
axis=1))
# + [markdown] id="xoMltea5Dir1"
# ## Making the Confusion Matrix
# -
from sklearn.metrics import confusion_matrix, accuracy_score, precision_score, recall_score, f1_score
cm = confusion_matrix(y_test, y_pred)
print(cm)
print('Accuracy: {0:.2g}'.format(accuracy_score(y_test, y_pred)))
print('Precision: {0:.2g}'.format(precision_score(y_test, y_pred)))
print('Recall: {0:.2g}'.format(recall_score(y_test, y_pred)))
print('F1 Score: {0:.2g}'.format(f1_score(y_test, y_pred)))
| nlp/bag-of-words/my_natural_language_processing_kernel_svm.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Background
# Variant calling is the process used for identifying variants from sequence data. In order to achieve this one has to align the sequence files (in the FASTQ format) against a genome reference in order to generate a BAM format file. This BAM file can be used with different variant callers in order to identify what positions differ between the sequence data and the genome reference. This process of variant calling is not perfect, and the algorithms used for the analysis can call variants that are spurious and not real, this is why it is advidsable to apply some filters on the variant call set in order to get rid of these fake variants.
#
# This document describes how to do the variant filtering of a VCF format file generated using BCFTools [[1]](https://samtools.github.io/bcftools/). The approach followed consists on using a supervised machine learning method for the filtering, more specifically we will use a logistic regression classifier and assess the performance on a test dataset in order to decide if the classifier is working
# # Data used
# In this document we have used a VCF file generated for sample NA12878 using the sequencing data generated for the 1000 Genomes Project. The callset in the VCF file was generated using BCFTools.
#
# # Traning the model
# We are going to use the sites in our callset for chr20 that are also found by GIAB NA12878 [[2]](https://github.com/genome-in-a-bottle). GIAB sequenced NA12878 using 13 different sequencing technologies and analysis methods, so the GIAB callset is considered as the gold standard callset and this is why we considered the variants identified by GIAB and us as true sites that are useful to train the model. We will train the classifier independently for the SNPs and the INDELs
# ## Annotations used for the filtering exercise
# BCFTools annotates each of the identified variants with a set of features used as predictors for our model. The variant annotations used are:
#
# + active=""
# ##INFO=<ID=IDV,Number=1,Type=Integer,Description="Maximum number of reads supporting an indel">
# ##INFO=<ID=IMF,Number=1,Type=Float,Description="Maximum fraction of reads supporting an indel">
# ##INFO=<ID=DP,Number=1,Type=Integer,Description="Raw read depth">
# ##INFO=<ID=RPB,Number=1,Type=Float,Description="Mann-Whitney U test of Read Position Bias (bigger is better)">
# ##INFO=<ID=MQB,Number=1,Type=Float,Description="Mann-Whitney U test of Mapping Quality Bias (bigger is better)">
# ##INFO=<ID=BQB,Number=1,Type=Float,Description="Mann-Whitney U test of Base Quality Bias (bigger is better)">
# ##INFO=<ID=MQSB,Number=1,Type=Float,Description="Mann-Whitney U test of Mapping Quality vs Strand Bias (bigger is better)">
# ##INFO=<ID=VDB,Number=1,Type=Float,Description="Variant Distance Bias for filtering splice-site artefacts in RNA-seq data (bigger is better)",Version="3">
# ##INFO=<ID=SGB,Number=1,Type=Float,Description="Segregation based metric.">
# ##INFO=<ID=MQ0F,Number=1,Type=Float,Description="Fraction of MQ0 reads (smaller is better)">
# ##INFO=<ID=ICB,Number=1,Type=Float,Description="Inbreeding Coefficient Binomial test (bigger is better)">
# ##INFO=<ID=HOB,Number=1,Type=Float,Description="Bias in the number of HOMs number (smaller is better)">
# ##INFO=<ID=MQ,Number=1,Type=Integer,Description="Average mapping quality">
# -
# BCFTools annotates each site with a different annotation set depending whether the variant is a SNP or an INDEL.
# For SNPs:
# DP,RPB,MQB,BQB,MQSB,SGB,MQ0F,ICB,HOB,MQ
# For INDELs:
# DP,IDV,IMF,VDB,SGB,MQSB,MQ0F,ICB,HOB,MQ
# ## SNPs
# We are going to use two different SNP VCFs, one has all the sites identified both by GIAB and us (`TP.highconf.vcf.gz`) and the other was identified by us and not by GIAB and are considered False Positive sites (`FP.highconf.vcf.gz`). Only the sites falling in the high-confidence regions as defined by GIAB are considered.
# Then we extract the annotations for each of the files by doing:
# + active=""
# bcftools query -H -f '%CHROM\t%POS\t%INFO/DP\t%INFO/RPB\t%INFO/MQB\t%INFO/BQB\t%INFO/MQSB\t%INFO/SGB\t%INFO/MQ0F\t%INFO/ICB\t%INFO/HOB\t%INFO/MQ\n' TP.highconf.vcf.gz > TP_annotations.tsv
#
# bcftools query -H -f '%CHROM\t%POS\t%INFO/DP\t%INFO/RPB\t%INFO/MQB\t%INFO/BQB\t%INFO/MQSB\t%INFO/SGB\t%INFO/MQ0F\t%INFO/ICB\t%INFO/HOB\t%INFO/MQ\n' FP.highconf.vcf.gz > FP_annotations.tsv
# -
# Now, we read-in the annotations in each of the files into a Pandas dataframe
# +
import pandas as pd
import numpy as np
# %matplotlib inline
DF_TP=pd.read_csv('/Users/ernesto/SCRATCH/VARIANT_FILTERING/SNPS/TP_annotations_snps.tsv',sep="\t",na_values=['.'])
DF_FP=pd.read_csv('/Users/ernesto/SCRATCH/VARIANT_FILTERING/SNPS/FP_annotations_snps.tsv',sep="\t",na_values=['.'])
# -
# Now we will add a new column named `is_valid` to our two dataframes that will be 1 if the variant is real and will be 0 when the variant is a false positive. This new column will be the dependent binary variable in our classifier
DF_TP=DF_TP.assign(is_valid=1)
DF_FP=DF_FP.assign(is_valid=0)
# And now we combine the two dataframes into a single dataframe:
frames = [DF_TP,DF_FP]
DF = pd.concat(frames)
# Now, let's identify the columns with NA values:
DF.isna().any()
# We see several columns with NA values, and the question is how do we deal with them?
# Well, there are 2 possibilities, to remove them by using the function `dropna()` or to impute them by using an estimator (mean, median). I will impute the missing values using the median calculated on each feature:
DF_num = DF.drop("# [1]CHROM", axis=1)
# +
from sklearn.preprocessing import Imputer
imputer = Imputer(strategy="median")
imputer.fit(DF_num)
X = imputer.transform(DF_num)
# -
# The Numpy array (X) is transformed back to a dataframe:
DF_tr = pd.DataFrame(X, columns=DF.columns.drop(['# [1]CHROM']))
# ### Normalization of the different features
# Some of the machine learning algorithms require that the different predictors are in the same range and are comparable (i.e. having the same units). This is why it is better to do a previous step of data normalization (standardization), for this I am going to use `preprocessing` from `sklearn`
feature_names=DF_tr.columns.drop(['[2]POS','is_valid'])
# +
from sklearn import preprocessing
std_scale = preprocessing.StandardScaler().fit(DF_noNA[feature_names])
std_array = std_scale.transform(DF_noNA[feature_names])
# -
# `preprocessing` returns a NumPy array, so we need to transform to a Pandas data frame:
aDF_std=pd.DataFrame(data=std_array,columns=feature_names)
# Now, let's add the column with the status of the variant( is_valid=0 or is_valid=1) to the normalized data frame
aDF_std.insert(loc=0, column='is_valid', value=DF_noNA['is_valid'].values)
# ### Initial exploration of the data
# First, let's examine the number of records having `is_valid=1` (i.e. True positive) and `is_valid=0` (i.e. False Positive)
aDF_std.iloc[:,0].value_counts()
# Let's examine the distribution of the different predictors depending on the `is_valid` variable.
# #### DP (Raw read depth)
import seaborn as sns
ax = sns.boxplot(x="is_valid", y="[3]DP", data=aDF_std)
# * Mean for each category
aDF_std.groupby("is_valid").agg({'[3]DP': 'mean'})
# * Median for each category
aDF_std.groupby("is_valid").agg({'[3]DP': 'median'})
# Judging by the above boxplots and by the median and mean above, we can see that DP is bigger in real sites but that the false positive set present a higher number of DP outliers
# #### RPB (Mann-Whitney U test of Read Position Bias (bigger is better))
ax = sns.boxplot(x="is_valid", y="[4]RPB", data=aDF_std)
# * Mean for each category
aDF_std.groupby("is_valid").agg({'[4]RPB': 'mean'})
# * Median for each category
aDF_std.groupby("is_valid").agg({'[4]RPB': 'median'})
# #### MQB (Mann-Whitney U test of Mapping Quality Bias (bigger is better))
ax = sns.boxplot(x="is_valid", y="[5]MQB", data=aDF_std)
# * Mean for each category
aDF_std.groupby("is_valid").agg({'[5]MQB': 'mean'})
# * Median for each category
aDF_std.groupby("is_valid").agg({'[5]MQB': 'median'})
# #### BQB (Mann-Whitney U test of Base Quality Bias (bigger is better))
ax = sns.boxplot(x="is_valid", y="[6]BQB", data=aDF_std)
# * Mean for each category
aDF_std.groupby("is_valid").agg({'[6]BQB': 'mean'})
# * Median for each category
aDF_std.groupby("is_valid").agg({'[6]BQB': 'median'})
# #### MQSB-Mann-Whitney U test of Mapping Quality vs Strand Bias (bigger is better)
ax = sns.boxplot(x="is_valid", y="[7]MQSB", data=aDF_std)
# * Mean for each category
aDF_std.groupby("is_valid").agg({'[7]MQSB': 'mean'})
# * Median for each category
aDF_std.groupby("is_valid").agg({'[7]MQSB': 'median'})
# #### SGB-Segregation based metric
ax = sns.boxplot(x="is_valid", y="[8]SGB", data=aDF_std)
# * Mean for each category
aDF_std.groupby("is_valid").agg({'[8]SGB': 'mean'})
# * Median for each category
aDF_std.groupby("is_valid").agg({'[8]SGB': 'median'})
# #### MQ0F-Fraction of MQ0 reads (smaller is better)
ax = sns.boxplot(x="is_valid", y="[9]MQ0F", data=aDF_std)
# * Mean for each category
aDF_std.groupby("is_valid").agg({'[9]MQ0F': 'mean'})
# * Median for each category
aDF_std.groupby("is_valid").agg({'[9]MQ0F': 'median'})
# #### ICB- Inbreeding Coefficient Binomial test (bigger is better)
ax = sns.boxplot(x="is_valid", y="[10]ICB", data=aDF_std)
# * Mean for each category
aDF_std.groupby("is_valid").agg({'[10]ICB': 'mean'})
# * Median for each category
aDF_std.groupby("is_valid").agg({'[10]ICB': 'median'})
# #### HOB - Bias in the number of HOMs number (smaller is better)
ax = sns.boxplot(x="is_valid", y="[11]HOB", data=aDF_std)
# * Mean for each category
aDF_std.groupby("is_valid").agg({'[11]HOB': 'mean'})
# * Median for each category
aDF_std.groupby("is_valid").agg({'[11]HOB': 'median'})
# #### MQ - Average mapping quality
ax = sns.boxplot(x="is_valid", y="[12]MQ", data=aDF_std)
# * Mean for each category
aDF_std.groupby("is_valid").agg({'[12]MQ': 'mean'})
# * Median for each category
aDF_std.groupby("is_valid").agg({'[12]MQ': 'median'})
# ### ML model
# Let's the separate the predictors from the binary output
predictors=aDF_std[feature_names]
# Now, let's create a dataframe with the outcome
outcome=aDF_std[['is_valid']]
# Now, let's split the initial dataset into a training set that will be used to train the model and a test set, which will be used to test the performance of the fitted model
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(predictors, outcome, test_size=0.25, random_state=0)
# By setting the `random_state` option, we ensure that the resuts are repeatable
# Now, we use a Logistic Regression model to analyse our data:
# +
from sklearn.linear_model import LogisticRegression
logisticRegr = LogisticRegression(verbose=1)
logisticRegr.fit(x_train, y_train)
# -
# Now, we can check the accuracy of our fitted model by using the `x_test` and comparing with the true outcome in `y_test`
predictions = logisticRegr.predict(x_test)
# We can also estimate the probability of each prediction:
predictions_probs = logisticRegr.predict_proba(x_test)
# The returned probabilities are ordered by the label of classes, this can be checked by doing:
print(logisticRegr.classes_)
score = logisticRegr.score(x_test, y_test)
print(score)
# Now, we can create a confusion matrix in order to assess the accuracy of our predictions:
# +
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import metrics
cm = metrics.confusion_matrix(y_test, predictions)
# -
plt.figure(figsize=(9,9))
sns.heatmap(cm, annot=True, fmt=".0f", linewidths=.5, square = True, cmap = 'Blues_r');
plt.ylabel('Actual label');
plt.xlabel('Predicted label');
all_sample_title = 'Accuracy Score: {0}'.format(score)
plt.title(all_sample_title, size = 15);
# * Conclusion
# It seems that this Logistic Regression classifier has a good performance (score=0.99)
# ### Model persistence
# After checking that the model works, we can save it to a file so we can use it later. For this we will use `Pickle`:
# +
import pickle
filename = 'finalized_model_snps.sav'
pickle.dump(logisticRegr, open(filename, 'wb'))
# -
# And later, we can reload the model from the file:
loaded_model = pickle.load(open(filename, 'rb'))
# ## INDELs
# We are going to use two different INDELs VCFs, one has all the sites identified both by GIAB and us (`TP.highconf.vcf.gz`) and the other was identified by us and not by GIAB and are considered False Positive sites (`FP.highconf.vcf.gz`). Only the sites falling in the high-confidence regions as defined by GIAB are considered.
# Then we extract the annotations for each of the files by doing:
# + active=""
# bcftools query -H -f '%CHROM\t%POS\t%INFO/DP\t%INFO/IDV\t%INFO/IMF\t%INFO/VDB\t%INFO/SGB\t%INFO/MQSB\t%INFO/MQ0F\t%INFO/ICB\t%INFO/HOB\t%INFO/MQ\n' TP.highconf.vcf.gz > TP_annotations.tsv
#
# bcftools query -H -f '%CHROM\t%POS\t%INFO/DP\t%INFO/IDV\t%INFO/IMF\t%INFO/VDB\t%INFO/SGB\t%INFO/MQSB\t%INFO/MQ0F\t%INFO/ICB\t%INFO/HOB\t%INFO/MQ\n' FP.highconf.vcf.gz > FP_annotations.tsv
# -
# Now, we read-in the annotations in each of the files into a Pandas dataframe
# +
import pandas as pd
import numpy as np
# %matplotlib inline
DF_TP=pd.read_csv('/Users/ernesto/SCRATCH/VARIANT_FILTERING/INDELS/TP_annotations.tsv',sep="\t",na_values=['.'])
DF_FP=pd.read_csv('/Users/ernesto/SCRATCH/VARIANT_FILTERING/INDELS/FP_annotations.tsv',sep="\t",na_values=['.'])
# -
# Now we will add a new column named `is_valid` to our two dataframes that will be 1 if the variant is real and will be 0 when the variant is a false positive. This new column will be the dependent binary variable in our classifier
DF_TP=DF_TP.assign(is_valid=1)
DF_FP=DF_FP.assign(is_valid=0)
# And now we combine the two dataframes into a single dataframe:
frames = [DF_TP,DF_FP]
DF = pd.concat(frames)
# Now, let's identify the columns with NA values:
DF.isna().any()
# We see several columns with NA values, and the question is how do we deal with them?
# Well, there are 2 possibilities, to remove them by using the function `dropna()` or to impute them by using an estimator (mean, median). I will impute the missing values using the median calculated on each feature:
DF_num = DF.drop("# [1]CHROM", axis=1)
# +
from sklearn.preprocessing import Imputer
imputer = Imputer(strategy="median")
imputer.fit(DF_num)
X = imputer.transform(DF_num)
# -
# The Numpy array (X) is transformed back to a dataframe:
DF_tr = pd.DataFrame(X, columns=DF.columns.drop(['# [1]CHROM']))
# ### Normalization of the different features
# Some of the machine learning algorithms require that the different predictors are in the same range and are comparable (i.e. having the same units). This is why it is better to do a previous step of data normalization (standardization), for this I am going to use `preprocessing` from `sklearn`
feature_names=DF_tr.columns.drop(['[2]POS','is_valid'])
# +
from sklearn import preprocessing
std_scale = preprocessing.StandardScaler().fit(DF_tr[feature_names])
std_array = std_scale.transform(DF_tr[feature_names])
# -
# `preprocessing` returns a NumPy array, so we need to transform to a Pandas data frame:
aDF_std=pd.DataFrame(data=std_array,columns=feature_names)
# Now, let's add the column with the status of the variant( is_valid=0 or is_valid=1) to the normalized data frame
aDF_std.insert(loc=0, column='is_valid', value=DF_noNA['is_valid'].values)
# ### Initial exploration of the data
# First, let's examine the number of records having `is_valid=1` (i.e. True positive) and `is_valid=0` (i.e. False Positive)
aDF_std.iloc[:,0].value_counts()
# Let's examine the distribution of the different predictors depending on the `is_valid` variable.
# #### DP (Raw read depth)
import seaborn as sns
ax = sns.boxplot(x="is_valid", y="[3]DP", data=aDF_std)
# * Mean for each category
aDF_std.groupby("is_valid").agg({'[3]DP': 'mean'})
# * Median for each category
aDF_std.groupby("is_valid").agg({'[3]DP': 'median'})
# Judging by the above boxplots and by the median and mean above, we can see that DP is bigger in real sites but that the false positive set present a higher number of DP outliers
# #### IDV (Maximum number of reads supporting an indel)
ax = sns.boxplot(x="is_valid", y="[4]IDV", data=aDF_std)
# * Mean for each category
aDF_std.groupby("is_valid").agg({'[4]IDV': 'mean'})
# * Median for each category
aDF_std.groupby("is_valid").agg({'[4]IDV': 'median'})
# #### IMF (Maximum fraction of reads supporting an indel)
ax = sns.boxplot(x="is_valid", y="[5]IMF", data=aDF_std)
# * Mean for each category
aDF_std.groupby("is_valid").agg({'[5]IMF': 'mean'})
# * Median for each category
aDF_std.groupby("is_valid").agg({'[5]IMF': 'median'})
# #### VDB Variant Distance Bias for filtering splice-site artefacts in RNA-seq data (bigger is better)
ax = sns.boxplot(x="is_valid", y="[6]VDB", data=aDF_std)
# * Mean for each category
aDF_std.groupby("is_valid").agg({'[6]VDB': 'mean'})
# * Median for each category
aDF_std.groupby("is_valid").agg({'[6]VDB': 'median'})
# #### MQSB-Mann-Whitney U test of Mapping Quality vs Strand Bias (bigger is better)
ax = sns.boxplot(x="is_valid", y="[8]MQSB", data=aDF_std)
# * Mean for each category
aDF_std.groupby("is_valid").agg({'[8]MQSB': 'mean'})
# * Median for each category
aDF_std.groupby("is_valid").agg({'[8]MQSB': 'median'})
# #### SGB-Segregation based metric
ax = sns.boxplot(x="is_valid", y="[7]SGB", data=aDF_std)
# * Mean for each category
aDF_std.groupby("is_valid").agg({'[7]SGB': 'mean'})
# * Median for each category
aDF_std.groupby("is_valid").agg({'[7]SGB': 'median'})
# #### MQ0F-Fraction of MQ0 reads (smaller is better)
ax = sns.boxplot(x="is_valid", y="[9]MQ0F", data=aDF_std)
# * Mean for each category
aDF_std.groupby("is_valid").agg({'[9]MQ0F': 'mean'})
# * Median for each category
aDF_std.groupby("is_valid").agg({'[9]MQ0F': 'median'})
# #### ICB- Inbreeding Coefficient Binomial test (bigger is better)
ax = sns.boxplot(x="is_valid", y="[10]ICB", data=aDF_std)
# * Mean for each category
aDF_std.groupby("is_valid").agg({'[10]ICB': 'mean'})
# * Median for each category
aDF_std.groupby("is_valid").agg({'[10]ICB': 'median'})
# #### HOB - Bias in the number of HOMs number (smaller is better)
ax = sns.boxplot(x="is_valid", y="[11]HOB", data=aDF_std)
# * Mean for each category
aDF_std.groupby("is_valid").agg({'[11]HOB': 'mean'})
# * Median for each category
aDF_std.groupby("is_valid").agg({'[11]HOB': 'median'})
# #### MQ - Average mapping quality
ax = sns.boxplot(x="is_valid", y="[12]MQ", data=aDF_std)
# * Mean for each category
aDF_std.groupby("is_valid").agg({'[12]MQ': 'mean'})
# * Median for each category
aDF_std.groupby("is_valid").agg({'[12]MQ': 'median'})
# ### ML model
# Let's the separate the predictors from the binary output
predictors=aDF_std[feature_names]
# Now, let's create a dataframe with the outcome
outcome=aDF_std[['is_valid']]
# Now, let's split the initial dataset into a training set that will be used to train the model and a test set, which will be used to test the performance of the fitted model
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(predictors, outcome, test_size=0.25, random_state=0)
# By setting the `random_state` option, we ensure that the resuts are repeatable
# Now, we use a Logistic Regression model to analyse our data:
# +
from sklearn.linear_model import LogisticRegression
logisticRegr = LogisticRegression(verbose=1)
logisticRegr.fit(x_train, y_train)
# The estimated coefficients will all be around 1:
#print(logisticRegr.coef_)
print(np.std(x_train.values, 0)*logisticRegr.coef_)
# -
# Now, we can check the accuracy of our fitted model by using the `x_test` and comparing with the true outcome in `y_test`
predictions = logisticRegr.predict(x_test)
score = logisticRegr.score(x_test, y_test)
print(score)
# Now, we can create a confusion matrix in order to assess the accuracy of our predictions:
# +
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import metrics
cm = metrics.confusion_matrix(y_test, predictions)
# -
plt.figure(figsize=(9,9))
sns.heatmap(cm, annot=True, fmt=".0f", linewidths=.5, square = True, cmap = 'Blues_r');
plt.ylabel('Actual label');
plt.xlabel('Predicted label');
all_sample_title = 'Accuracy Score: {0}'.format(score)
plt.title(all_sample_title, size = 15);
# * Conclusion
# It seems that this Logistic Regression classifier has a good performance (score=0.93)
# ### Model persistence
# After checking that the model works, we can save it to a file so we can use it later. For this we will use `Pickle`:
# +
import pickle
filename = 'finalized_model_indels.sav'
pickle.dump(logisticRegr, open(filename, 'wb'))
# -
# And later, we can reload the model from the file:
loaded_model = pickle.load(open(filename, 'rb'))
| notebooks/Variant Filtering of a BCFTools VCF.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: python3-datasci
# language: python
# name: python3-datasci
# ---
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
# %matplotlib inline
# Tutorial developed from Medium article
# ["Introduction to Supervised Learning with Python: Scikit-Learn Tutorial](https://medium.com/@mwitiderrick/introduction-to-supervised-learning-with-python-scikit-learn-tutorial-966457dd33b9)
# # Loading the Data
#
# Using USA Housing dataset available at
# [Kaggle](https://www.kaggle.com/vedavyasv/usa-housing/data#USA_Housing.csv). Download the data set, unzip
# it, and put it into your data directory (or change path appropriately to read
# the csv file).
# load the data
df = pd.read_csv('../../data/USA_Housing.csv')
display(df.info())
display(df.columns)
display(df.describe())
display(df.head())
sns.pairplot(df);
sns.heatmap(df.corr(), annot=True);
# # Selection of Features
#
# - drop Price, because that is actually the target we wish to predict or model.
# - drop Address, because this is a textual field. If we want to use this
# information in a machine model, we will need to transform it into
# numerical information (we could turn it into a gps coordinate, for example,
# but would this really give us any better information than the existing
# features?)
# selection of features
X = df.drop(['Price', 'Address'], axis=1)
display(X.columns)
# the targets for our models will be in y
y = df['Price']
display(y.describe())
# # Create Training and Test Sets
#
# An example of using utility functions from `sklearn` for a simple train/test
# split of the data.
# +
from sklearn.model_selection import train_test_split
# use 30% for the test data, and 70% for the training
# set random_state seed, so we can recreate this exact same train/test
# split if needed.
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
display(X_train.shape)
display(y_train.shape)
display(X_test.shape)
display(y_test.shape)
# -
# # Create and Train a Linear Regression Model
#
# An example of creating a simple linear regression model using the `sklearn`
# framework.
#
# All basic models in `sklearn` are objects that have the following member
# functions:
#
# - fit() Train model with input data (X) to fit the training data (y)
# - predict() Given a trained model, predict the target value on (possibly new
# and unseen) data.
# +
from sklearn.linear_model import LinearRegression
# create our model instance object
lm = LinearRegression()
# train the model to predict using the training data
lm.fit(X_train, y_train)
# -
# # Linear Regression Model Evaluation
#
# Evaluate how well this model performs.
#
# The coefficients are the learned theta parameters of the linear
# regression model.
# +
display(lm.coef_)
# trick to display the coefficient associated with each feature
# put back into a DataFrame, and specify the column names from X
coeff = pd.DataFrame(lm.coef_, X.columns, columns=['coefficients'])
display(coeff)
# -
# Now we make predictions, using the predict() function. Here we make predictions
# of the held back test data.
# +
predictions = lm.predict(X_test)
# visualize the predictions, perfect predictions would show as
# a perfectly straight line,, where each predictions exactly matched
# the true value of y_test
sns.jointplot(y_test, predictions);
# -
# As we discussed a bit in previous lectures, residuals are important information
# in evaluating the performance of a model. If the residuals are not
# normally distributed, this probably indicates that the model (a linear
# model in this case) is not appropriate for this data.
sns.distplot( (y_test - predictions), bins=30);
# Use evaluation metrics to evaluate the linear regression model.
# +
from sklearn import metrics
print('Mean Absolute Error (MAE): ', metrics.mean_absolute_error(y_test, predictions))
print('Mean Squared Error (MSE): ', metrics.mean_squared_error(y_test, predictions))
print('Root Mean Squared Error (RMSE): ', np.sqrt(metrics.mean_squared_error(y_test, predictions)))
# -
# # Create and Train a Logistic Regression Model
#
# The previous dataset was a regression problem (predicting a real valued
# price). So we will need a different dataset for a logistic regression
# example. But, as you will see, the framework of creating, fitting and
# evaluating the model remains the same.
#
# The kyphosis dataset is also available from kaggle. You will need to
# download it and unzip it to the correct location once again in order
# to use it for the next examples.
#
# The kyphosis column in the data set is a binary category (present or absent)
# that is what we want to model and predict for this data.
# +
# read in the data set
kyphosis = pd.read_csv('../../data/kyphosis.csv')
display(kyphosis.info())
display(kyphosis.columns)
display(kyphosis.describe())
display(kyphosis.head())
# +
# select the features for training
# and do a train/test split
X = kyphosis.drop(['Kyphosis'], axis=1)
y = kyphosis['Kyphosis']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
display(X_train.shape)
display(y_train.shape)
display(X_test.shape)
display(y_test.shape)
# +
# train a logistic regression model
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression()
lr.fit(X_train, y_train)
# -
# after fitting model, use predict to predict the labels of test data
k_pred = lr.predict(X_test)
display(k_pred)
# +
# evaluate the model
from sklearn.metrics import classification_report, confusion_matrix
display(confusion_matrix(y_test, k_pred))
# -
print(classification_report(y_test, k_pred))
# # Decision Tree and Random Forest Models
# +
# train and fit a decision tree model
from sklearn.tree import DecisionTreeClassifier
dt = DecisionTreeClassifier()
dt.fit(X_train, y_train)
# +
# evaluate the model
dt_pred = dt.predict(X_test)
display(confusion_matrix(y_test, dt_pred))
print(classification_report(y_test, dt_pred))
# +
# train and fit a random forest
from sklearn.ensemble import RandomForestClassifier
rfc = RandomForestClassifier(n_estimators=100)
rfc.fit(X_train, y_train)
# +
# evaluate the model
rfc_pred = rfc.predict(X_test)
display(confusion_matrix(y_test, rfc_pred))
print(classification_report(y_test, rfc_pred))
# +
import sys
sys.path.append("../../src") # add our class modules to the system PYTHON_PATH
from ml_python_class.custom_funcs import version_information
version_information()
| lectures/archive/Lecture-scikit-learn-ml-framework-archive.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
a = np.array([1, 2, 3])
b = np.array([3, 4, 5])
a_list = [1, 2, 3]
b_list = [3, 4, 5]
np.array(a_list)*2 + np.array(b_list)
[2*a+b for a,b in zip(a_list, b_list)]
sum([a_list, a_list, b_list])
list(map(lambda x: x*2, a_list)) + np.array(b_list)
a_list + b_list
np.concatenate([a_list, b_list])
np.concatenate(a_list, b_list)
np.concatenate((a_list, b_list))
list(zip(a_list, b_list))
np.stack([a_list, b_list])
| module_3/Module_suppl_notebooks/math1_seminar_practice.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
class DoublyLinkedListNode():
def __init__(self, value):
self.value = value
self.next_node = None
self.prev_node = None
a = DoublyLinkedListNode(1)
b = DoublyLinkedListNode(2)
c = DoublyLinkedListNode(3)
a.next_node = b
b.prev_node = a
b.next_node = c
c.prev_node = b
print(a.prev_node)
a.next_node.value
b.prev_node.value
| 82. Doubly Linked List Implementation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Practice notebook for regression analysis with NHANES
#
# This notebook will give you the opportunity to perform some
# regression analyses with the NHANES data that are similar to
# the analyses done in the week 2 case study notebook.
#
# You can enter your code into the cells that say "enter your code here",
# and you can type responses to the questions into the cells that say "Type Markdown and Latex".
#
# Note that most of the code that you will need to write below is very similar
# to code that appears in the case study notebook. You will need
# to edit code from that notebook in small ways to adapt it to the
# prompts below.
#
# To get started, we will use the same module imports and
# read the data in the same way as we did in the case study:
# +
# %matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import statsmodels.api as sm
import numpy as np
url = "https://raw.githubusercontent.com/kshedden/statswpy/master/NHANES/merged/nhanes_2015_2016.csv"
da = pd.read_csv(url)
# Drop unused columns, drop rows with any missing values.
vars = ["BPXSY1", "RIDAGEYR", "RIAGENDR", "RIDRETH1", "DMDEDUC2", "BMXBMI", "SMQ020"]
da = da[vars].dropna()
# -
# ## Question 1:
#
# Use linear regression to relate the expected body mass index (BMI) to a person's age.
# +
# enter your code here
# -
# __Q1a.__ According to your fitted model, do older people tend to have higher or lower BMI than younger people?
#
# __Q1b.__ Based your analysis, are you confident that there is a relationship between BMI and age in the population that NHANES represents?
#
# __Q1c.__ By how much does the average BMI of a 40 year old differ from the average BMI of a 20 year old?
#
# __Q1d.__ What fraction of the variation of BMI in this population is explained by age?
#
# ## Question 2:
#
# Add gender and ethnicity as additional control variables to your linear model relating BMI to age. You will need to recode the ethnic groups based
# on the values in the codebook entry for [RIDRETH1](https://wwwn.cdc.gov/Nchs/Nhanes/2015-2016/DEMO_I.htm#RIDRETH1).
# +
# enter your code here
# -
# __Q2a.__ How did the mean relationship between BMI and age change when you added additional covariates to the model?
#
# __Q2b.__ How did the standard error for the regression parameter for age change when you added additional covariates to the model?
#
# __Q2c.__ How much additional variation in BMI is explained by age, gender, and ethnicity that is not explained by age alone?
#
# __Q2d.__ What reference level did the software select for the ethnicity variable?
#
# __Q2e.__ What is the expected difference between the BMI of a 40 year-old non-Hispanic black man and a 30 year-old non-Hispanic black man?
#
# __Q2f.__ What is the expected difference between the BMI of a 50 year-old Mexican American woman and a 50 year-old non-Hispanic black man?
#
# ## Question 3:
#
# Randomly sample 25% of the NHANES data, then fit the same model you used in question 2 to this data set.
# +
# enter your code here
# -
# __Q3a.__ How do the estimated regression coefficients and their standard errors compare between these two models? Do you see any systematic relationship between the two sets of results?
#
# ## Question 4:
#
# Generate a scatterplot of the residuals against the fitted values for the model you fit in question 2.
# +
# enter your code here
# -
# __Q4a.__ What mean/variance relationship do you see?
#
# ## Question 5:
#
# Generate a plot showing the fitted mean BMI as a function of age for Mexican American men. Include a 95% simultaneous confidence band on your graph.
# +
# enter your code here
# -
# __Q5a.__ According to your graph, what is the longest interval starting at year 30 following which the mean BMI could be constant? *Hint:* What is the longest horizontal line starting at age 30 that remains within the confidence band?
#
# __Q5b.__ Add an additional line and confidence band to the same plot, showing the relationship between age and BMI for Mexican American women. At what ages do these intervals not overlap?
#
# ## Question 6:
#
# Use an added variable plot to assess the linearity of the relationship between BMI and age (when controlling for gender and ethnicity).
# +
# enter your code here
# -
# __Q6a.__ What is your interpretation of the added variable plot?
#
# ## Question 7:
#
# Generate a binary variable reflecting whether a person has had at least 12 drinks in their lifetime, based on the [ALQ110](https://wwwn.cdc.gov/Nchs/Nhanes/2015-2016/ALQ_I.htm#ALQ110) variable in NHANES. Calculate the marginal probability, odds, and log odds of this variable for women and for men. Then calculate the odds ratio for females relative to males.
# +
# enter your code here
# -
# __Q7a.__ Based on the log odds alone, do more than 50% of women drink alcohol?
#
# __Q7b.__ Does there appear to be an important difference between the alcohol use rate of women and men?
#
# ## Question 8:
#
# Use logistic regression to express the log odds that a person drinks (based on the binary drinking variable that you constructed above) in terms of gender.
# +
# enter your code here
# -
# __Q8a.__ Is there statistical evidence that the drinking rate differs between women and men? If so, in what direction is there a difference?
#
# __Q8b.__ Confirm that the log odds ratio between drinking and smoking calculated using the logistic regression model matches the log odds ratio calculated directly in question 6.
#
# ## Question 9:
#
# Use logistic regression to relate drinking to age, gender, and education.
# +
# enter your code here
# -
# __Q9a.__ Which of these predictor variables shows a statistically significant association with drinking?
#
# __Q9b.__ What is the odds of a college educated, 50 year old woman drinking?
#
# __Q9c.__ What is the odds ratio between the drinking status for college graduates and high school graduates (with no college), holding gender and age fixed?
#
# __Q9d.__ Did the regression parameter for gender change to a meaningful degree when age and education were added to the model?
#
# ## Question 10:
#
# Construct a CERES plot for the relationship between drinking and age (using the model that controls for gender and educational attainment).
# +
# enter your code here
# -
# __Q10a.__ Does the plot indicate any major non-linearity in the relationship between age and the log odds for drinking?
| Fitting_Statistical_Models_to_Data_with_Python/week2/week2_nhanes_practice.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] hide_input=false
# # igv variant classifier
# + hide_input=false
from fastai.vision import *
from fastai import *
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
# -
path = Path('/projects/da_workspace/szspace/data/giab/image2')
classes = ['positives','negatives']
# +
# for c in classes:
# print(c)
# verify_images(path/c, delete=True, max_size=293) # max_size will change the image size on disk
# -
# ## View data
doc(get_transforms)
# somehow, the preprocessing gives 500x425 images
# this is resovled by set dpi=1, and compensate lost pixels to generate 293x293 pixels images
# # figure out my own stats
# imagenet_stats
# normalize?
#
# I think it is not appropriate to use imagenet_stats to normalize this hand-made images
# I can just leave normalize() parameter empty so that it will grab a batch random pictures and decide the mean and std for normalizaion
# +
# # no need to figure out mean and std myself, if i dont use pretrained model.
# train = Path('/projects/da_workspace/szspace/data/giab/image2/positives')
# images = (plt.imread(str(i)) for i in list(train.iterdir()) if plt.imread(str(i)).shape == (293, 293, 3))
# # # for some reason some image end up with different size
# # # print(images)
# images = np.stack(images) # this takes time
# means = np.mean(images, axis=(0, 1, 2))
# stds = np.std(images, axis=(0, 1, 2))
# means, stds
# -
il = ImageImageList.from_folder(path)
il.items[0]
il[0]
path
# +
# get_transforms?
# -
np.random.seed(42)
data = ImageDataBunch.from_folder(path,
train=".",
valid_pct=0.2,
ds_tfms=get_transforms(do_flip=False,
flip_vert=False,
max_rotate=0,
max_zoom=0,
max_warp=0,
max_lighting=0,
p_affine=0,
p_lighting=0 ),
size=293,
bs=32,
num_workers=4).normalize(imagenet_stats)
# +
# normalized to this stats calculated by randomly grab a batch
data.stats
# -
data
# ### add test data set
# +
fd = '/projects/da_workspace/szspace/data/muts/positives'
# fd = '/projects/da_workspace/szspace/data/muts/no'
tl = ImageList.from_folder(fd)
data.add_test(tl)
# -
data
# Good! Let's take a look at some of our pictures then.
data.classes
data.c
# +
# data.show_batch?
# -
# ### due to cropping, lost many information surrounding the mutation, don't have much control over the screen shots. better reconstruct the image myself by pulling out alignment information using samtools.
data.show_batch(rows=3, ds_type=DatasetType.Train, figsize=(7,8))
# plt.savefig('test.png')
data.classes, data.c, len(data.train_ds), len(data.valid_ds)
plt.imshow(image2np(data.train_ds[0][0].data))
image2np(data.train_ds[0][0].data).shape
# one channel, (293, 293)
plt.imshow(image2np(data.train_ds[0][0].data)[:, :, 0])
plt.imshow(image2np(data.train_ds[0][0].data)[:, :, 1])
plt.imshow(image2np(data.train_ds[0][0].data)[:, :, 2])
# ## Train model
# +
# cnn_learner?
# -
learn = cnn_learner(data, models.resnet18, metrics=accuracy, pretrained=False)
learn.fit_one_cycle(4, max_lr=0.003)
learn.save('stage-1')
learn.unfreeze()
# !nvidia-smi
# # !kill 18136
learn.lr_find()
learn.recorder.plot()
# +
# If the plot is not showing try to give a start and end learning rate# learn.lr_find(start_lr=1e-5, end_lr=1e-1)learn.recorder.plot()
# -
learn.fit_one_cycle(2, max_lr=slice(1e-6,5e-6))
learn.save('stage-2')
# ## Interpretation
learn.load('stage-2');
data.test_ds
doc(learn.pred_batch)
# +
# learn.pred_batch?
# -
data.classes
data.train_ds
data.test_ds
doc(learn.pred_batch)
learn.data.test_ds[18][0]; learn.data.test_ds[19][0]
# +
fd = '/projects/da_workspace/szspace/data/muts/positives'
fd = '/projects/da_workspace/szspace/data/muts/negatives'
tl = ImageList.from_folder(fd)
data.add_test(tl)
# -
# specify which data set you want to predict
preds = learn.pred_batch(ds_type=DatasetType.Test)
# learn.pred_batch(ds_type=DatasetType.Valid)
# learn.pred_batch() # default is valid data set
preds.shape
idx = torch.tensor([0])
# number of wrong predicitons for the positive images
(torch.index_select(preds, 1, idx) > torch.index_select(preds, 1, torch.tensor([1]))).sum()
# +
# learn.predict?
# -
data
for i in range(21):
item = learn.data.test_ds[i][0]
batch = learn.data.one_item(item)
pred = learn.pred_batch(batch=batch)
print(pred)
path
learn.export('/projects/da_workspace/szspace/data/giab/images/models/stage-2.pkl')
interp = ClassificationInterpretation.from_learner(learn)
interp.plot_confusion_matrix()
of = '/home/szong/giab_validation_confusion_matrix.png'
plt.savefig(of, bbox_inches='tight', dpi=150)
interp.plot_top_losses(2, heatmap=True) # heatmap show which part of the image contribute to the prediction
# # predict
ipath = Path('/projects/da_workspace/szspace/data/muts/yes')
# img = open_image(ipath/'A78434_11_48894652_48894852.png')
# img = open_image(ipath/'A78437_19_41608695_41608895.png')
pred_class,pred_idx,outputs = learn.predict(img)
pred_class, pred_class.data
pred_class.data
f = '/projects/da_workspace/szspace/data/muts/negatives/negs_imgs.txt'
ipath = Path('/projects/da_workspace/szspace/data/muts/negatives/')
imgs = pd.read_csv(f, header=None)[0].tolist()
pc = 0
nc = 0
for img in imgs:
img = open_image(ipath/img)
pred_class, pred_idx, outputs = learn.predict(img)
# print(pred_class.data)
if pred_class.data == 1: pc += 1
elif pred_class.data == 0: nc += 1
else: print('Error!')
pc
nc
# can i create a databunch here?
path = Path('/projects/da_workspace/szspace/data/muts/no')
# data = ImageDataBunch.from_folder(path,
# train=".",
# valid_pct=0.2,
# ds_tfms=get_transforms(do_flip=False,max_rotate=None, max_zoom=1., max_warp=None,
# max_lighting=None,p_affine=0,p_lighting=0 ),
# size=512, bs=32,
# num_workers=4).normalize(imagenet_stats)
path = Path('/projects/da_workspace/szspace/data/muts')
data = ImageDataBunch.from_folder(path, test='.')
# +
# load_learner?
# -
load_learner('/projects/da_workspace/szspace/data/giab/images/models', 'stage-2.pkl', test)
# # train with 256x256 size image
learn.data
learn.fit_one_cycle(2, max_lr=slice(3e-5,3e-4))
import PIL
f = '/projects/da_workspace/szspace/data/giab/images/negatives/chr9_86,282,732_86,283,032.png'
PIL.Image.open(f)
# ## Putting your model in production
# First thing first, let's export the content of our `Learner` object for production:
learn.export()
# This will create a file named 'export.pkl' in the directory where we were working that contains everything we need to deploy our model (the model, the weights but also some metadata like the classes or the transforms/normalization used).
# You probably want to use CPU for inference, except at massive scale (and you almost certainly don't need to train in real-time). If you don't have a GPU that happens automatically. You can test your model on CPU like so:
defaults.device = torch.device('cpu')
# +
# img = open_image(path/'black'/'00000021.jpg')
# img
# +
learn = load_learner(path)
pred_class,pred_idx,outputs = learn.predict(img)
pred_class
| nbs/dl1/igvReader_20190822.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# 화면 출력
# + slideshow={"slide_type": "-"}
print(1)
print('A')
print('Number:', 1)
# -
# 대소문자 구분
A = '변수1'
a = '변수2'
print('A:', A)
print('a:', a)
# + [markdown] slideshow={"slide_type": "slide"}
# 들여쓰기(indentation)에 민감
# + slideshow={"slide_type": "-"}
print(1)
print(2)
# + slideshow={"slide_type": "-"}
print(1)
print(2)
# + [markdown] slideshow={"slide_type": "slide"}
# 줄 바꿈
# + slideshow={"slide_type": "-"}
B = '문장을 만들어볼까요?'
b = '긴 문장을 ' + \
'한줄로 만들어볼까요?'
print(B)
print(b)
# + [markdown] slideshow={"slide_type": "slide"}
# 주석 (설명)
# -
# 이것은 한 줄 주석 입니다.
p = 1 # 이 줄에서는 여기서부터가 주석입니다.
'''
삼중 따옴표 사이에 들어와도 다 주석입니다.
여러 줄 주석을 입력할 때 사용합니다.
'''
"""
p = 100
주석은 파이썬에 입력되지 않기 때문에 출력되지 않습니다.
"""
print(p) # p 값을 출력하는 부분은 주석 앞에 있습니다.
# + [markdown] slideshow={"slide_type": "slide"}
# 여러가지 변수형
# + slideshow={"slide_type": "-"}
i = 1
type(i)
# -
f = 1.0
type(f)
i = float(i) # 정수형을 소수형으로 변환
type(i)
# + [markdown] slideshow={"slide_type": "slide"}
# 문자형 변수
# + slideshow={"slide_type": "-"}
s = '나는 문자'
s2 = '''나도
문자
'''
type(s)
# -
print(s2)
# + [markdown] slideshow={"slide_type": "slide"}
# 연산
# + slideshow={"slide_type": "-"}
x = 3
print(x)
# -
x + x
x * 2
x ** 2 # x*x 도 같은 표현입니다.
x ** 3 # 거듭제곱은 엑셀에서는 x^3 로 표현됩니다.
# + [markdown] slideshow={"slide_type": "slide"}
# 문자열 다루기
# + slideshow={"slide_type": "-"}
# 문자끼리 더하면 어떻게 될까?
w1 = 'Py'
w2 = 'thon'
word = w1 + w2 # w1과 w2를 합하기
word
# -
word[0:2] # 0부터 2까지
word[1:4] # 1부터 4까지
# + slideshow={"slide_type": "-"}
word[:4] # 처음부터 4까지
# -
word[4:] # 4부터 끝까지
# + [markdown] slideshow={"slide_type": "slide"}
# 리스트(list) 자료형
# + slideshow={"slide_type": "-"}
prices = [100, 200, 300, 400, 500]
prices
# -
# 리스트 일부분 잘라내기(slice)
prices[0:2]
prices[3] # 첫 값은 0이므로 3을 지정하면 네번째 값이 나옵니다.
# + slideshow={"slide_type": "slide"}
# 리스트에 값 추가
prices.append(600) # append 할 때는 []이 아니고 ()를 사용합니다.
prices
# -
# 리스트 내 값의 갯수
len(prices)
# 리스트에서 값 제거
prices.remove(400) # remove 할 때는 []이 아니고 ()를 사용합니다.
prices
len(prices)
# + slideshow={"slide_type": "slide"}
# 리스트에 여러개 값 추가
prices.extend([700, 200]) # extend는 () 안에 []를 넣어줍니다.
prices
# -
len(prices)
# + [markdown] slideshow={"slide_type": "slide"}
# 집합(set) 자료형
# + slideshow={"slide_type": "-"}
A = {1, 2, 3, 4, 5}
B = {4, 5, 6, 7, 8}
type(A)
# -
A | B # 합집합. A.union(B) 도 가능
A & B # 교집합. A.intersection(B) 도 가능
A - B # 차집합
# + [markdown] slideshow={"slide_type": "slide"}
# 튜플(tuple) 자료형
# + slideshow={"slide_type": "-"}
t = (1, 2, 3)
type(t)
# + [markdown] slideshow={"slide_type": "slide"}
# 딕셔너리(dictionary) 자료형
# + slideshow={"slide_type": "-"}
d = {'Kim':123, 'Lee':456}
type(d)
# -
d['Choi'] = 789 # 딕셔너리에 데이터 추가
d
del d['Lee'] # 데이터 삭제
d
d['Park'] = 134
len(d)
# + slideshow={"slide_type": "slide"}
d.items()
# -
d.keys()
d.values()
# + [markdown] slideshow={"slide_type": "slide"}
# 순환문 for
# + slideshow={"slide_type": "-"}
prices # prices에 뭐가 담겼는지 확인 한번 해보고
# -
for p in prices: # prices 에 담긴 내용을 변수 p에 할당해서 루프를 돌림
print('가격은 ', p, ' 입니다.')
# + [markdown] slideshow={"slide_type": "slide"}
# 딕셔너리 내용을 순환문으로 출력해보자
# -
for keys, values in d.items():
print(keys, values)
# + [markdown] slideshow={"slide_type": "slide"}
# 조건문 if
# + slideshow={"slide_type": "-"}
p = int(input())
'''
input()으로 값을 입력받음
int는 입력받은 값을 정수형으로 변환
값을 정수형으로 변환해야 숫자로 인식해서 if문을 통해 대소 비교가 가능
'''
if p < 10000: # 10000원보다 낮으면
print('매수')
elif p > 10000: # 10000원보다 높으면
print('매도')
else: # 위 조건이 모두 틀리다면
print('중립')
# + [markdown] slideshow={"slide_type": "slide"}
# 함수(definition)
# + slideshow={"slide_type": "-"}
def f(x):
y = x + 1 # x는 함수를 호출받을 때 넘겨받은 인자
return y
# -
f(1) # 함수를 호출하고 1을 인자로 전달해 결과값을 달라고 요청한다
f(f(1)) # f°f
# + [markdown] slideshow={"slide_type": "slide"}
# 외부모듈 사용
# -
# 외부 라이브러리 호출
import finterstellar
import finterstellar as fs
from finterstellar import LoadData
# + [markdown] slideshow={"slide_type": "slide"}
# 외부 라이브러리의 함수를 내 함수처럼 사용하기
# -
data = fs.LoadData() # 라이브러리를 data라고 지정
df = data.read_investing_price('./data/', 'KOSPI 200')
df.head()
coin = fs.CoinPrice()
coin.bithumb_current_price('BTC')
| w1/w1-02 Python basics.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
#
# ## A Gentle Introduction to use Auto Encoders for Anomaly Detection
# #### Author <NAME>
# Date: 1/9/2019
#
# #### Citation Info
# If you are using this for your research, please use the following for citation.
#
# Amruthnath, Nagdev, and <NAME>. "A research study on unsupervised machine learning algorithms for early fault detection in predictive maintenance." In 2018 5th International Conference on Industrial Engineering and Applications (ICIEA), pp. 355-361. IEEE, 2018.
#
# #### Disclaimer
# This is a tutorial for performing fault detection using machine learning. You this code at your own risk. I do not gurantee that this would work as shown below. If you have any suggestions please branch this project.
#
# ### Load Libraries
# +
options(warn=-1)
# load libraries
library(dplyr)
library(h2o)
# -
#
# ### Load data
# Here we are using data from a bench press. There are total of four different states in this machine and they are split into four different csv files. We need to load the data first. In the data time represents the time between samples, ax is the acceleration on x axis, ay is the acceleration on y axis, az is the acceleration on z axis and at is the G's. The data was collected at sample rate of 100hz.
#
# Four different states of the machine were collected
#
# 1. Nothing attached to drill press
# 2. Wooden base attached to drill press
# 3. Imbalance created by adding weight to one end of wooden base
# 4. Imbalacne created by adding weight to two ends of wooden base.
setwd("/home/")
#read csv files
file1 = read.csv("dry run.csv", sep=",", header =T)
file2 = read.csv("base.csv", sep=",", header =T)
file3 = read.csv("imbalance 1.csv", sep=",", header =T)
file4 = read.csv("imbalance 2.csv", sep=",", header =T)
head(file1)
# We can look at the summary of each file using summary function in R. Below, we can observe that 66 seconds long data is available. We also have min, max and mean for each of the variables.
# summary of each file
summary(file2)
# ### Data Aggregration and feature extraction
# Here, the data is aggregated by 1 minute and features are extracted. Features are extracted to reduce the dimension of the data and only storing the representation of the data.
# +
file1$group = as.factor(round(file1$time))
file2$group = as.factor(round(file2$time))
file3$group = as.factor(round(file3$time))
file4$group = as.factor(round(file4$time))
#(file1,20)
#list of all files
files = list(file1, file2, file3, file4)
#loop through all files and combine
features = NULL
for (i in 1:4){
res = files[[i]] %>%
group_by(group) %>%
summarize(ax_mean = mean(ax),
ax_sd = sd(ax),
ax_min = min(ax),
ax_max = max(ax),
ax_median = median(ax),
ay_mean = mean(ay),
ay_sd = sd(ay),
ay_min = min(ay),
ay_may = max(ay),
ay_median = median(ay),
az_mean = mean(az),
az_sd = sd(az),
az_min = min(az),
az_maz = max(az),
az_median = median(az),
aT_mean = mean(aT),
aT_sd = sd(aT),
aT_min = min(aT),
aT_maT = max(aT),
aT_median = median(aT)
)
features = rbind(features, res)
}
#view all features
head(features)
# -
# ### Create Train and Test Set
# To build an anomaly detection model, a train and test set is required. Here, the normal condition of the data is used for training and remaining is used for testing.
# create train and test set
train = features[1:67,2:ncol(features)]
test = features[68:nrow(features),2:ncol(features)]
# ### Auto Encoders
# Autoencoders is an unsupervised version of neural network that is used for data encoding. This technique is mainly used to learn the representation of data that can be used for dimensionality reduction by training network to ignore noise. Autoencoders play an important role in unsupervised learning and deep architectures mainly for transfer learning (<NAME>, 2012). When autoencoders are decoded, they are simple linear circuits that transforms inputs to outputs with least distortion. Autoencoders were first introduced in 1980’s to address the issue of back propagation without training and rather use input as a teacher (Rumelhart et al., 1986). Since then, autoencoders have taken a phase change to the form on Restricted Boltzman Machine. Today, autoencoders are used in various applications such as predicting sentiment distributions in Natural Language Processing (NLP) (Socher et al., 2011a) (Socher et al., 2011b), feature extraction (Masci et al., 2011), anomaly detection (Sakurada et al., 2014), facial recognition (Gao et al., 2015), clustering (Dilokthanakul et al., 2016), image classification (Geng et al., 2015) and many other application.
#
# #### Auto Encoders using H2O package
# Use the h2o.init() method to initialize H2O. This method accepts the following options. Note that in most cases, simply using h2o.init() is all that a user is required to do.
# initialize h2o cluser
h2o.init()
# The R object to be converted to an H2O object should be named so that it can be used in subsequent analysis. Also note that the R object is converted to a parsed H2O data object, and will be treated as a data frame by H2O in subsequent analysis.
# convert train and test to h2o object
train_h2o = as.h2o(train)
test_h2o = as.h2o(test)
# The h2o.deeplearning function fits H2O's Deep Learning models from within R. While H2O Deep Learning has many parameters, it was designed to be just as easy to use as the other supervised training methods in H2O. Early stopping, automatic data standardization and handling of categorical variables and missing values and adaptive learning rates (per weight) reduce the amount of parameters the user has to specify. Often, it's just the number and sizes of hidden layers, the number of epochs and the activation function and maybe some regularization techniques.
# +
# build auto encoder model with 3 layers
model_unsup = h2o.deeplearning(x = 2:ncol(features)
, training_frame = train_h2o
, model_id = "Test01"
, autoencoder = TRUE
, reproducible = TRUE
, ignore_const_cols = FALSE
, seed = 42
, hidden = c(50,10,50,100,100)
, epochs = 100
, activation ="Tanh")
# -
# view the model
model_unsup
# Detect anomalies in an H2O dataset using an H2O deep learning model with auto-encoding trained previously.
# +
# now we need to calculate MSE or anomaly score
anmlt = h2o.anomaly(model_unsup
, train_h2o
, per_feature = FALSE) %>% as.data.frame()
# create a label for healthy data
anmlt$y = 0
# view top data
head(anmlt)
# -
# Calulate the threshold value for train anomaly scores. Various methods can be used such as calculating the quantiles, max, median, min etc. It all depends on the use case. Here we will use quantile with probability of 99.9%.
# calculate thresholds from train data
threshold = quantile(anmlt$Reconstruction.MSE, probs = 0.999)
# Now, we have anomaly score for train and its thresholds, we can predict the new anomaly scores for test data and plot it to see how it differs from train data.
# +
# calculate anomaly scores for test data
test_anmlt = h2o.anomaly(model_unsup
, test_h2o
, per_feature = FALSE) %>% as.data.frame()
# create a label for healthy data
test_anmlt$y = 1
# -
# combine the train and test anomaly scores for visulaizatio
results = data.frame(rbind(anmlt,test_anmlt), threshold)
head(results)
# The results are plotted below. The x axis is the observations and y axis is the anomaly score. The green points are the trained data and red are test data. We can note that all the data that was trained except one lied below the anomaly limit. Its also interesting to note the increasing trend pattern for the anomaly scores for other state of the machine.
# Adjust plot sizes
options(repr.plot.width = 15, repr.plot.height = 6)
plot(results$Reconstruction.MSE, type = 'n', xlab='observations', ylab='Reconstruction.MSE', main = "Anomaly Detection Results")
points(results$Reconstruction.MSE, pch=19, col=ifelse(results$Reconstruction.MSE < threshold, "green", "red"))
abline(h=threshold, col='red', lwd=2)
# ### Session info
# Below is the session info for the the packages and their versions used in this analysis.
sessionInfo()
| Anomaly Detection/Predicve Maintenance - Auto Encoder.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.3 64-bit (''base'': conda)'
# language: python
# name: python38364bitbasecondadbbbc0d6687a4c64a122e31974242c3a
# ---
# %matplotlib inline
# +
from __future__ import division
import networkx as nx
import random
import math
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import pickle as pl
import sys
from pandas import Series, DataFrame
import pandas as pd
# cimport numpy as np
# +
##### Parameters
G_i = 2 # 1 for Friendster, 2 for Les miserables network
if G_i == 1:
tour_N = 25 #5
tour_c = 0
deg_def = [50]
flag_fn = 1 # 1 for g(v) = d(v) > 25, 4 for avg clustering coeff
if G_i == 2:
tour_N = 10 #5
tour_c = 0
deg_def = [10,4]
flag_fn = 2 # 1 for g(v) = d(v) > 10, 2 for g(v) = d(v) < 4, 3 for avg degree, 4 for avg clustering coeff
asy_var_MH_lesmis = [4.4154,14.93508,1.204e+03]
asy_var_RDS_lesmis = [1.1084,3.3240,272.7649]
max_B = 10000 # maximum budget
no_runs = 100 # no. of runs to average
MH_do = 1 # Flag for doing MH simulation
RDS_do = 1 # Flag for doing RDS simulation
RDSRR_do = 1
MHRR_do = 1
SHOW_VAR_CONVG = 0
ABS_ERROR = 1
# -
def node_fn(node):
if flag_fn == 1:
temp = int(G.degree(node)>deg_def[0])
elif flag_fn == 2:
temp = int(G.degree(node)<deg_def[1])
elif flag_fn == 3:
temp = G.degree(node)
elif flag_fn == 4:
deg = G.degree(node)
if deg < 2:
temp = 0
else:
temp = 2*nx.triangles(G,node)/(deg*(deg-1))
else:
print("Not a defined a function")
sys.exit(0)
return temp
def MH_sampling(G,B):
est_MH= []
est_MH_t = 0
sample = np.random.choice(G.nodes())
est_MH_t += node_fn(sample)
est_MH.append(est_MH_t)
for ii in range(2,B+1):
# print "MH_sample: ",ii
neighbors = list(nx.neighbors(G,sample))
sample_t = np.random.choice(neighbors)
if np.random.rand() <= (G.degree(sample)/G.degree(sample_t)):
sample = sample_t
est_MH_t += node_fn(sample)
est_MH.append(est_MH_t/ii)
return np.array(est_MH)
def RDS_sampling(G,B):
est_RW = []
est_RW_t1 = 0
est_RW_t2 = 0
sample = np.random.choice(G.nodes())
deg_pr_sent = G.degree(sample)
est_RW_t1 += node_fn(sample)/deg_pr_sent
est_RW_t2 += 1/deg_pr_sent
est_RW.append(est_RW_t1/est_RW_t2)
for ii in range(2,B+1):
# print "rds_sample: ",ii
neighbors = list(nx.neighbors(G,sample))
sample = random.choice(neighbors)
deg_pr_sent = G.degree(sample)
est_RW_t1 += node_fn(sample)/deg_pr_sent
est_RW_t2 += 1/deg_pr_sent
est_RW.append(est_RW_t1/est_RW_t2)
return np.array(est_RW)
def RDSRR_sampling(G,B):
M = np.sqrt(B)
restart_ind = np.square(np.arange(1,M))
est_RW = []
est_RW_t1 = 0
est_RW_t2 = 0
sample = np.random.choice(G.nodes())
deg_pr_sent = G.degree(sample)
est_RW_t1 += node_fn(sample)/deg_pr_sent
est_RW_t2 += 1/deg_pr_sent
est_RW.append(est_RW_t1/est_RW_t2)
for ii in range(2,B+1):
if ii in restart_ind:
sample = np.random.choice(G.nodes)
else:
neighbors = list(nx.neighbors(G,sample))
sample = random.choice(neighbors)
deg_pr_sent = G.degree(sample)
est_RW_t1 += node_fn(sample)/deg_pr_sent
est_RW_t2 += 1/deg_pr_sent
est_RW.append(est_RW_t1/est_RW_t2)
return np.array(est_RW)
def MHRR_sampling(G,B):
M = np.sqrt(B)
restart_ind = np.square(np.arange(1,M))
est_MH= []
est_MH_t = 0
sample = np.random.choice(G.nodes())
est_MH_t += node_fn(sample)
est_MH.append(est_MH_t)
for ii in range(2,B+1):
if ii in restart_ind:
sample_t = np.random.choice(G.nodes)
else:
neighbors = list(nx.neighbors(G,sample))
sample_t = np.random.choice(neighbors)
if np.random.rand() <= (G.degree(sample)/G.degree(sample_t)):
sample = sample_t
est_MH_t += node_fn(sample)
est_MH.append(est_MH_t/ii)
return np.array(est_MH)
if G_i == 1:
G = nx.read_edgelist("friendster_community1_trimmed.edgelist",nodetype = int)
elif G_i == 2:
G = nx.les_miserables_graph()
G = nx.convert_node_labels_to_integers(G, first_label=0, ordering='default', label_attribute=None)
G_no_edges=G.number_of_edges()
G_no_nodes=G.number_of_nodes()
# +
if G_i == 1:
##########################CHECK
if flag_fn == 1:
F_org = 0.265712074303 #sum([1 for i in G.nodes() if G.degree(i) ])/G_no_nodes
elif flag_fn == 4:
F_org = 0.4491010966748313
else:
sys.exit("Invalid function")
elif G_i == 2:
if (flag_fn == 1) or (flag_fn == 2):
F_org = sum([node_fn(i) for i in G.nodes()])/G_no_nodes
elif flag_fn == 3:
F_org = 2*G_no_edges/G_no_nodes
elif flag_fn == 4:
F_org = nx.average_clustering(G)
# -
if MH_do:
MSE_MH_t = 0
for ii in range(1,no_runs+1):
MSE_MH_t += (MH_sampling(G,max_B)-F_org)**2
MSE_MH = MSE_MH_t/(no_runs)
MSE_MH = np.sqrt(MSE_MH)/F_org
if RDS_do:
MSE_rds_t = 0
for ii in range(1,no_runs+1):
MSE_rds_t += (RDS_sampling(G,max_B)-F_org)**2
MSE_rds = MSE_rds_t/(no_runs)
MSE_rds = np.sqrt(MSE_rds)/F_org
if RDSRR_do:
MSE_rdsrr_t = 0
for ii in range(1,no_runs+1):
MSE_rdsrr_t += (RDSRR_sampling(G,max_B)-F_org)**2
MSE_rdsrr = MSE_rdsrr_t/(no_runs)
MSE_rdsrr = np.sqrt(MSE_rdsrr)/F_org
if MHRR_do:
MSE_mhrr_t = 0
for ii in range(1,no_runs+1):
MSE_mhrr_t += (MHRR_sampling(G,max_B)-F_org)**2
MSE_mhrr = MSE_mhrr_t/(no_runs)
MSE_mhrr = np.sqrt(MSE_mhrr)/F_org
plt.figure(1,figsize=(16,12))
plt.plot(np.array(list(range(len(MSE_MH)))),MSE_MH,color='red',linewidth=2.3)
plt.plot(np.array(list(range(len(MSE_mhrr)))),MSE_mhrr,color='blue',linewidth=2.3)
plt.plot(np.array(list(range(len(MSE_rds)))),MSE_rds,color='gray',linewidth=2.5)
plt.plot(np.array(list(range(len(MSE_rdsrr)))),MSE_rdsrr,color='purple',linewidth=2.5)
plt.grid()
| experiments.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
import tensorflow as tf
from tensorflow import keras
df = pd.read_csv('diabetes.csv')
df
df.head()
df.tail()
df.shape
from sklearn.model_selection import train_test_split
x = df.drop(['Outcome'], axis=1)
y = df['Outcome']
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, stratify=y, random_state=0)
from keras.models import Sequential
from keras.layers import Dense
model = Sequential()
model.add(Dense(128, activation='relu', input_dim=8))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
hist = model.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=100, batch_size=100)
model.evaluate(x_test, y_test)
import matplotlib.pyplot as plt
# %matplotlib inline
import seaborn as sns
sns.set()
acc = hist.history['accuracy']
val = hist.history['val_accuracy']
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, '-', label='Training accuracy')
plt.plot(epochs, val, ':', label='Validation accuracy')
plt.title('Training and Validation Accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend(loc='lower right')
plt.plot()
| diabetesProject/DiabetesProjectTensorflowFinal.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Exercise 2
# Add the specified code for each code cell, running the cells _in order_.
# Define a function **`count_evens()`** that takes in a _list_ of integers and returns the number that are even (divisible by two). You can use the modulo operator **`%`** to get the remainder when dividing by 2 (e.g., `5%2 == 1`).
# - This problem is adapted from [CodingBat](http://codingbat.com/prob/p189616), and you can also check your work there!
# +
def count_evens(a_list):
number_even = 0
for numbers in a_list:
if numbers %2 == 0:
number_even = number_even + 1
return number_even
count_evens([1,2,3,4,5,6,7,8])
# -
# Define a function **`get_even_indices()`** that takes in a _list_ of integers and returns a _list_ containing the indices of the elements with even values.
# +
def get_even_indices(a_list):
even_list = []
for index in range(len(a_list)):
if index %2 == 0:
even_list.append(index)
print(index)
return even_list
get_even_indices([1,2,3,4,5])
# -
# Define a function **`rotate_left()`** that takes in a _list_ of elements and "rotates" it by moving each element in that list 1 spot to the left, with the first element becoming the last:
# ```python
# my_list = [1,2,3,4]
# rotate_left(my_list)
# print(my_list) # [2,3,4,1]
# ```
# For practice, try to do this using a loop (it's more complicated, but good loop/index practice)
# +
def rotate_left(a_list):
left_list = a_list[0]
for index in range(1, len(a_list)):
print(a_list(index))
return left_list
my_list = [1,2,3,4]
print(rotate_left(my_list))
# -
# _Challenge_ A [Caesarean Cipher](https://en.wikipedia.org/wiki/Caesar_cipher) is a simple and ancient encryption technique. It works by taking the a string of text and "rotating" each letter a fixed number of places down the alphabet. Thus if the "rotation" number is "3", then a `A` (the 1st letter) would become a `D` (the 4th), a `B` would become a `E`, and a `Z` would wrap around to become a `C`.
#
# Define a method `rotate_text()` that takes in two arguments: a string to encrypt and a number of places to shift each letter. The method should return an "encrypted" version of the string, with each letter shifted the appropriate amount. Note that this method can also be used to "decrypt" text by shifting in the opposite direction (e.g., a negative amount).
# - You can convert the argument to uppercase for ease of processing, and to a list of letters if you wish to mutate it.
# - You can create use a string of all the letters (which is itself a sequence with indices) to look up the shifted rotated letters. The [find()](https://docs.python.org/3/library/stdtypes.html#str.find) string method could help.
| exercise-2/exercise.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 7. Pandas objects
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Python has a series of data containers (list, dicts etc.) and Numpy offers multi-dimensional arrays, however none of these structures offers a simple way neither to handle tabular data, nor to easily do standard database operations. This is why Pandas exists: it offers a complete ecosystem of structures and functions dedicated to handle large tables with inhomogeneous contents.
#
# In this first chapter, we are going to learn about the two main structures of Pandas: Series and Dataframes.
# ## 7.1 Series
# ### 7.1.1 Simple series
# Series are a the Pandas version of 1-D Numpy arrays. We are rarely going to use them directly, but they often appear implicitly when handling data from the more general Dataframe structure. We therefore only give here basics.
#
# To understand Series' specificities, let's create one. Usually Pandas structures (Series and Dataframes) are created from other simpler structures like Numpy arrays or dictionaries:
numpy_array = np.array([4,8,38,1,6])
# The function ```pd.Series()``` allows us to convert objects into Series:
pd_series = pd.Series(numpy_array)
pd_series
# The underlying structure can be recovered with the ```.values``` attribute:
pd_series.values
# Otherwise, indexing works as for regular arrays:
pd_series[1]
# ### 7.1.2 Indexing
# On top of accessing values in a series by regular indexing, one can create custom indices for each element in the series:
pd_series2 = pd.Series(numpy_array, index=['a', 'b', 'c', 'd','e'])
pd_series2
# Now a given element can be accessed either by using its regular index:
pd_series2[1]
# or its chosen index:
pd_series2['b']
# A more direct way to create specific indexes is to transform as dictionary into a Series:
composer_birth = {'Mahler': 1860, 'Beethoven': 1770, 'Puccini': 1858, 'Shostakovich': 1906}
pd_composer_birth = pd.Series(composer_birth)
pd_composer_birth
pd_composer_birth['Puccini']
# ## 7.2 Dataframes
# In most cases, one has to deal with more than just one variable, e.g. one has the birth year and the death year of a list of composers. Also one might have different types of information, e.g. in addition to numerical variables (year) one might have string variables like the city of birth. The Pandas structure that allow one to deal with such complex data is called a Dataframe, which can somehow be seen as an aggregation of Series with a common index.
# ### 7.2.1 Creating a Dataframe
# To see how to construct such a Dataframe, let's create some more information about composers:
composer_death = pd.Series({'Mahler': 1911, 'Beethoven': 1827, 'Puccini': 1924, 'Shostakovich': 1975})
composer_city_birth = pd.Series({'Mahler': 'Kaliste', 'Beethoven': 'Bonn', 'Puccini': 'Lucques', 'Shostakovich': 'Saint-Petersburg'})
# Now we can combine multiple series into a Dataframe by precising a variable name for each series. Note that all our series need to have the same indices (here the composers' name):
composers_df = pd.DataFrame({'birth': pd_composer_birth, 'death': composer_death, 'city': composer_city_birth})
composers_df
# A more common way of creating a Dataframe is to construct it directly from a dictionary of lists where each element of the dictionary turns into a column:
dict_of_list = {'birth': [1860, 1770, 1858, 1906], 'death':[1911, 1827, 1924, 1975],
'city':['Kaliste', 'Bonn', 'Lucques', 'Saint-Petersburg']}
pd.DataFrame(dict_of_list)
# However we now lost the composers name. We can enforce it by providing, as we did before for the Series, a list of indices:
pd.DataFrame(dict_of_list, index=['Mahler', 'Beethoven', 'Puccini', 'Shostakovich'])
# ### 7.2.2 Accessing values
# There are multiple ways of accessing values or series of values in a Dataframe. Unlike in Series, a simple bracket gives access to a column and not an index, for example:
composers_df['city']
# returns a Series. Alternatively one can also use the *attributes* synthax and access columns by using:
composers_df.city
# The attributes synthax has some limitations, so in case something does not work as expected, revert to the brackets notation.
#
# When specifiying multiple columns, a DataFrame is returned:
composers_df[['city', 'birth']]
# One of the important differences with a regular Numpy array is that here, regular indexing doesn't work:
# +
#composers_df[0,0]
# -
# Instead one has to use either the ```.iloc[]``` or the ```.loc[]``` method. ```.ìloc[]``` can be used to recover the regular indexing:
composers_df.iloc[0,1]
# While ```.loc[]``` allows one to recover elements by using the **explicit** index, on our case the composers name:
composers_df.loc['Mahler','death']
# **Remember that ```loc``` and ``ìloc``` use brackets [] and not parenthesis ().**
#
# Numpy style indexing works here too
composers_df.iloc[1:3,:]
# If you are working with a large table, it might be useful to sometimes have a list of all the columns. This is given by the ```.keys()``` attribute:
composers_df.keys()
# ### 7.2.3 Adding columns
# It is very simple to add a column to a Dataframe. One can e.g. just create a column a give it a default value that we can change later:
composers_df['country'] = 'default'
composers_df
# Or one can use an existing list:
country = ['Austria','Germany','Italy','Russia']
composers_df['country2'] = country
composers_df
| 07-DA_Pandas_structures.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
class Calculator:
num1 = 0
num2 = 0
def __init__(self, num1, num2):
self.num1 = num1
self.num2 = num2
def add(self):
sum = self.num1 + self.num2
return sum
def subtract(self):
diff = self.num1 - self.num2
return diff
def multiply(self):
prod = self.num1 * self.num2
return prod
def divide(self):
div = self.num1 // self.num2
return div
print("Enter the first number: ")
n = int(input())
print("Enter the second number: ")
m = int(input())
print("Enter the operation \n1 for add\n2 for subtract\n3 for multiply\n4 for divide: ")
r = int(input())
value = Calculator(n,m)
if(r == 1):
print(value.add())
elif(r == 2):
print(value.subtract())
elif(r == 3):
print(value.multiply())
elif(r == 4):
print(value.divide())
else:
print("Please enter a valid operation")
r = int(input())
| calculator.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import scale
from sklearn.metrics import confusion_matrix, roc_curve, roc_auc_score, average_precision_score
from sklearn.linear_model import LogisticRegression, LogisticRegressionCV
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import (RandomForestClassifier, ExtraTreesClassifier, AdaBoostClassifier,
BaggingClassifier, VotingClassifier, GradientBoostingClassifier)
methods = {'Logistic regression': LogisticRegression(),
'Naive Bayes': GaussianNB(),
'K-NN': KNeighborsClassifier(),
'Decision Tree': DecisionTreeClassifier(),
'Random Forest': RandomForestClassifier(),
'Extra Trees': ExtraTreesClassifier(),
'Vote': VotingClassifier(estimators=[('lr', LogisticRegressionCV(cv=5, max_iter=1000)),
('nb', GaussianNB()),
('dt', DecisionTreeClassifier()),
('et', ExtraTreesClassifier(n_estimators=100, bootstrap=True)),
('rf', RandomForestClassifier(n_estimators=150))],
voting='soft')}
def classifier(X, y, methods, test_size=0.25, save=None):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size)
file_res = ''
for name, clf in methods.items():
res = ''
clf.fit(X_train, y_train)
prediction = clf.predict(X_test)
res += '\t' + name + '\n'
res += 'Confusion matrix:\n{}\n'.format(confusion_matrix(y_test, prediction))
res += 'Accuracy: {}\n'.format(clf.score(X_test, y_test))
res += 'AUC score: {}\n'.format(roc_auc_score(y_test, prediction))
print(res)
tpr, fpr, thrh = roc_curve(y_test, prediction)
plt.plot(tpr, fpr, label=name)
plt.legend(loc='best')
file_res += res
if save:
with open('{}.txt', 'w') as f:
print(file_res, file=f)
def get_name(obj):
return str(obj).split('.')[-1][:-2]
def boosted_classifier(X, y, boost_method, methods, test_size=0.25, save=None):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size)
file_res = ''
for name, clf in methods.items():
res = ''
# because AdaBoostClassifier doesn't support K-NN
boosted_clf = boost_method(clf)
try:
boosted_clf.fit(X_train, y_train)
except ValueError:
continue
prediction = boosted_clf.predict(X_test)
res += '\t{} + {}\n'.format(get_name(boost_method), name)
res += 'Confusion matrix:\n{}\n'.format(confusion_matrix(y_test, prediction))
res += 'Accuracy: {}\n'.format(boosted_clf.score(X_test, y_test))
res += 'AUC score: {}\n'.format(roc_auc_score(y_test, prediction))
print(res)
tpr, fpr, thrh = roc_curve(y_test, prediction)
plt.plot(tpr, fpr, label=name)
plt.legend(loc='best')
file_res += res
if save:
with open('{}.txt', 'w') as f:
print(file_res, file=f)
# ### Working with 10s time windows
data = pd.read_csv('../Parser/human-bot-10000-data.csv', index_col=[0])
len(data[data['label'] == 1]), len(data[data['label'] == 0])
# Very balanced data
X = data.drop('label', axis=1)
y = data['label']
X.describe()
classifier(X, y, methods=methods)
# ### As we can see, the accuracy is not high enough. Lets try 20s time windows
data = pd.read_csv('../Parser/human-bot-20000-data.csv', index_col=[0])
len(data[data['label'] == 1]), len(data[data['label'] == 0])
# Still very balanced
X = data.drop('label', axis=1)
y = data['label']
X.describe()
classifier(X, y, methods=methods)
# #### Accuracy raised pretty good
# ## Boosting
# We will work with 20s time windows as they have shown better accuracy
data = pd.read_csv('../Parser/human-bot-20000-data.csv', index_col=[0])
X = data.drop('label', axis=1)
y = data['label']
# ### Bagging
boosted_classifier(X, y, BaggingClassifier, methods)
# ### Ada boost
boosted_classifier(X, y, AdaBoostClassifier, methods)
# ### Gradient Boosting
classifier(X, y, {'GradientBoosting': GradientBoostingClassifier()})
# ### Small research / feature selection
data = pd.read_csv('../Parser/human-bot-20000-data.csv', index_col=[0])
X = data.drop('label', axis=1)
y = data['label']
X_train, X_test, y_train, y_test = train_test_split(X, y)
X.info()
import eli5
from eli5.sklearn import PermutationImportance
# eli5 provides a way to compute feature importances for any black-box estimator by measuring how score decreases when a feature is not available; the method is also known as “permutation importance” or “Mean Decrease Accuracy (MDA)”.
RF_clf = RandomForestClassifier().fit(X_train, y_train)
DT_clf = DecisionTreeClassifier().fit(X_train, y_train)
V_clf = VotingClassifier(estimators=[('lr', LogisticRegressionCV(cv=5, max_iter=1000)),
('nb', GaussianNB()),
('dt', DecisionTreeClassifier()),
('et', ExtraTreesClassifier(n_estimators=100, bootstrap=True)),
('rf', RandomForestClassifier(n_estimators=150))],
voting='soft')
# #### As RandomForest and DecisionTree showed best accuracy before, we'll use them to estimate feature importances
perm_rf = PermutationImportance(RF_clf).fit(X_test, y_test)
perm_dt = PermutationImportance(DT_clf).fit(X_test, y_test)
eli5.show_weights(perm_rf)
eli5.show_weights(perm_dt)
# #### Making a conclusion, that we should use features ['gpm', 'tbc', 'epm', 'aht'] ('dev'?)
X_red = X[['gpm', 'tbc', 'epm', 'aht']]
classifier(X_red, y, methods)
boosted_classifier(X_red, y, BaggingClassifier, methods)
boosted_classifier(X_red, y, AdaBoostClassifier, methods)
classifier(X_red, y, {'GradientBoosting': GradientBoostingClassifier()})
# ### Let's try add 'dev'
X_red_wdev = X[['tbc', 'aht', 'tln']]
classifier(X_red_wdev, y, methods)
boosted_classifier(X_red, y, BaggingClassifier, methods)
boosted_classifier(X_red, y, AdaBoostClassifier, methods)
# ### We can see, that on the smallest feature set `Random Forest` obviously showes best accuracy, because that features was picked up considering this classificator.
#
# ### Classification results showed up, that adding 'dev' feature decreases accuracy in all cases, except human vs human classification (which, probably, happened because of different play styles of concrete players). In other cases, made a conclusion, that using 'dev' feature makes our data more noisy, that's why we won't use it
| Classifier/classifier.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="SUXreZxtQZy8" colab_type="text"
# # Linear Regression - First Solution
# + [markdown] id="qldvj6CzAvFr" colab_type="text"
# ## Importing the Libraries
# + [markdown] id="p2W8Tj9OAtPP" colab_type="text"
#
# The following script imports the necessary libraries:
# + id="axCbc5ikAakO" colab_type="code" colab={}
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
from sklearn.linear_model import LinearRegression
from sklearn import metrics
from sklearn import datasets, linear_model
from sklearn.metrics import mean_squared_error, r2_score
import seaborn as sns
import statsmodels.formula.api as smf
import pandas as pd
import requests
import numpy as np
import matplotlib.pyplot as plt
import datetime
# %matplotlib inline
# + [markdown] id="EiapYtK9BGIe" colab_type="text"
# ## Dataset
# + [markdown] id="aJelLAaXBJeN" colab_type="text"
# File has been downloaded and kept in my google drive. Follow code will help us to read the data into pandas dataframe.
# + id="Z3sgYyNC6JQQ" colab_type="code" outputId="8beedb88-a890-45d4-d060-173eb4fba49c" colab={"base_uri": "https://localhost:8080/", "height": 50}
from google.colab import drive
drive.mount('/content/gdrive')
currentDT = datetime.datetime.now()
print("Current time:", currentDT)
filepath='/content/gdrive/My Drive/petrol_consumption.csv'
# + id="lNQXWE4fBGcp" colab_type="code" colab={}
# url="https://drive.google.com/open?id=1mVmGNx6cbfvRHCDvF12ZL3wGLSHD9f"
# s=requests.get(url).content
# dataset=pd.read_csv(s)
# + [markdown] id="f_BGu3fjNKT_" colab_type="text"
# Let's take a look at what our dataset actually looks like. Execute the head() command:
# + id="rYwGfWsvMiG0" colab_type="code" outputId="3a1d5216-57d5-4039-8c18-6deee6aa9da5" colab={"base_uri": "https://localhost:8080/", "height": 195}
dataset = pd.read_csv('/content/gdrive/My Drive/petrol_consumption.csv')
dataset.head()
# + [markdown] id="9T8ZLcCRNUsJ" colab_type="text"
# To see statistical details of the dataset, we'll use the describe() command again:
# + id="_OhSURfaNWBY" colab_type="code" outputId="0ad4c020-1dc7-42cd-b8ea-b66cae320f18" colab={"base_uri": "https://localhost:8080/", "height": 284}
dataset.describe()
# + [markdown] id="66laT84SNfZ6" colab_type="text"
# ## Preparing the Data
# + [markdown] id="eWRqvgt5NiDk" colab_type="text"
# The next step is to divide the data into attributes and labels as we did previously. However, unlike last time, this time around we are going to use column names for creating an attribute set and label.
# + id="MooYYO1mNloX" colab_type="code" colab={}
X = dataset[['Petrol_tax', 'Average_income', 'Paved_Highways',
'Population_Driver_licence(%)']]
y = dataset['Petrol_Consumption']
# + [markdown] id="AwFk3BgiNvpC" colab_type="text"
# Execute the following code to divide our data into training and test sets:
# + id="RtC38MgVNylK" colab_type="code" colab={}
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
# + [markdown] id="2XVsmX4fN5W7" colab_type="text"
# ## Training the Algorithm
# + [markdown] id="xEq_5tAYN-wF" colab_type="text"
# And finally, to train the algorithm we execute the same code as before, using the fit() method of the LinearRegression class:
# + id="kYrzui78OIKK" colab_type="code" outputId="d532428d-82ef-411f-a44f-a603a8bb3167" colab={"base_uri": "https://localhost:8080/", "height": 34}
regressor = LinearRegression()
regressor.fit(X_train, y_train)
# + [markdown] id="XpqWrExeOMxI" colab_type="text"
# in case of multivariable linear regression, the regression model has to find the most optimal coefficients for all the attributes. To see what coefficients our regression model has chosen, execute the following script:
# + id="Tg5mIrSHONtB" colab_type="code" outputId="b555ecff-bff9-4d49-c8c6-dfbab231cdc3" colab={"base_uri": "https://localhost:8080/", "height": 166}
coeff_df = pd.DataFrame(regressor.coef_, X.columns, columns=['Coefficient'])
coeff_df
# + [markdown] id="k2OUEeeGOZdH" colab_type="text"
# * **This means that for a unit increase in "petroltax", there is a decrease of 40.016 million gallons in gas consumption.**
# * **Similarly, a unit increase in proportion of population with a drivers license results in an increase of 1.341 billion gallons of gas consumption.**
#
# **We can see that "Averageincome" and "Paved_Highways" have a very little effect on the gas consumption.**
# + [markdown] id="6iSWbJUbPEIU" colab_type="text"
# ## Making Predictions
# + [markdown] id="N3AY1JMsPI3l" colab_type="text"
# To make pre-dictions on the test data, execute the following script:
# + id="oqtRpH_uPHFo" colab_type="code" colab={}
y_pred = regressor.predict(X_test)
# + [markdown] id="aJugWUjkPUDl" colab_type="text"
# To compare the actual output values for X_test with the predicted values, execute the following script:
# + id="PCy6Dq7MPU2h" colab_type="code" outputId="f71c9278-0468-4a54-9a00-8e77d8829194" colab={"base_uri": "https://localhost:8080/", "height": 343}
df = pd.DataFrame({'Actual': y_test, 'Predicted': y_pred})
df
# + [markdown] id="BsyEkXs9PZ88" colab_type="text"
# ## Evaluating the Algorithm
# + [markdown] id="nU2LqhEMPeFL" colab_type="text"
# The final step is to evaluate the performance of algorithm. We'll do this by finding the values for MAE, MSE and RMSE. Execute the following script:
# + id="CiesR06mPlLd" colab_type="code" outputId="f4123dcc-5894-4d03-a266-81d2900cc632" colab={"base_uri": "https://localhost:8080/", "height": 67}
print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred))
print('Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred))
print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
# + [markdown] id="aXjdz9IFPs5I" colab_type="text"
# **You can see that the value of root mean squared error is 68.31, which is slightly greater than 10% of the mean value of the gas consumption in all states. This means that our algorithm was not very accurate but can still make reasonably good predictions.**
# + [markdown] id="7AuIHHPNP5sC" colab_type="text"
# ## Observations
# + [markdown] id="fQhZyZtfP8Ba" colab_type="text"
# **There are many factors that may have contributed to this inaccuracy, a few of which are listed here:**
#
# * Need more data: Only one year worth of data isn't that much, whereas having multiple years worth could have helped us improve the accuracy quite a bit.
# * Bad assumptions: We made the assumption that this data has a linear relationship, but that might not be the case. Visualizing the data may help you determine that.
# * Poor features: The features we used may not have had a high enough correlation to the values we were trying to predict.
# + [markdown] id="YdFjkXsVQnvy" colab_type="text"
# # Linear Regression -Second Solution
# + [markdown] id="X-wxqXiIT4Ob" colab_type="text"
# ## Advertising Data
# + [markdown] id="iXkjSKklT80A" colab_type="text"
# Let's take a look at some data, ask some questions about that data, and then use linear regression to answer those questions!
# + id="XCntz6YQV9j6" colab_type="code" colab={}
import seaborn as sns
import statsmodels.formula.api as smf
# + id="SuHj5KyFVej_" colab_type="code" outputId="7ab73ae8-32a2-4178-db6d-73a59671a211" colab={"base_uri": "https://localhost:8080/", "height": 50}
from google.colab import drive
drive.mount('/content/gdrive')
currentDT = datetime.datetime.now()
print("Current time:", currentDT)
filepath='/content/gdrive/My Drive/Advertising.csv'
# + id="lph-fuPeT7AD" colab_type="code" outputId="bdb7a699-2b92-4d58-ba3e-07aa34d935ae" colab={"base_uri": "https://localhost:8080/", "height": 195}
# read data into a DataFrame
data = pd.read_csv('/content/gdrive/My Drive/Advertising.csv')
data.head()
# + id="YntBYPBsVs7K" colab_type="code" outputId="0b0c48ef-7eb8-4c2c-c1f8-da05c756ade5" colab={"base_uri": "https://localhost:8080/", "height": 34}
# shape of the DataFrame
data.shape
# + [markdown] id="OtNYPFChVxxW" colab_type="text"
# visualize the relationship between the features and the response using scatterplots
# + id="OUeDtcZ2Vysa" colab_type="code" outputId="546eb5fd-09c1-4e50-8245-68851a686c67" colab={"base_uri": "https://localhost:8080/", "height": 583}
sns.pairplot(data, x_vars=['TV','Radio','Newspaper'], y_vars='Sales', size=7, aspect=0.7)
# + [markdown] id="0diEguK3WKm5" colab_type="text"
# ## Questions About the Advertising Data
# + [markdown] id="Eym38DqtWMC6" colab_type="text"
# This general question might lead you to more specific questions:
# * Is there a relationship between ads and sales?
# * How strong is that relationship?
# * Which ad types contribute to sales?
# * What is the effect of each ad type of sales?
# * Given ad spending in a particular market, can sales be predicted?
# + [markdown] id="6KECcsX6WVK8" colab_type="text"
# ## Simple Linear Regression
# + [markdown] id="EMsl89dvWW51" colab_type="text"
# * Simple linear regression is an approach for predicting a quantitative response using a single feature (or "predictor" or "input variable")
# * It takes the following form:
# * y=β0+β1x
#
# What does each term represent?
#
# * y is the response
# * x is the feature
# * β0 is the intercept
# * β1 is the coefficient for x
#
# * β0 and β1 are called the model coefficients
# + [markdown] id="n5CX3cILWorw" colab_type="text"
# **To create your model, you must "learn" the values of these coefficients. Once we've learned these coefficients, we can use the model to predict Sales.**
# + [markdown] id="Mti6yUxaWwFW" colab_type="text"
# ## Estimating ("Learning") Model Coefficients
# + [markdown] id="5FzkSndxWz0o" colab_type="text"
# * Coefficients are estimated using the least squares criterion
# * In other words, we find the line (mathematically) which minimizes the sum of squared residuals (or "sum of squared errors"):
# + [markdown] id="rI3yFwi-XG4e" colab_type="text"
# Let's estimate the model coefficients for the advertising data
# + id="7NAvYbyEXIrp" colab_type="code" outputId="93569851-497a-40d0-d983-b13fb2bd4ce1" colab={"base_uri": "https://localhost:8080/", "height": 67}
### STATSMODELS ###
# create a fitted model
lm1 = smf.ols(formula='Sales ~ TV', data=data).fit()
# print the coefficients
lm1.params
# + id="MKArPunIXWV4" colab_type="code" outputId="62218011-613c-427b-9143-5befb3e355b4" colab={"base_uri": "https://localhost:8080/", "height": 50}
### SCIKIT-LEARN ###
# create X and y
feature_cols = ['TV']
X = data[feature_cols]
y = data.Sales
# instantiate and fit
lm2 = LinearRegression()
lm2.fit(X, y)
# print the coefficients
print(lm2.intercept_)
print(lm2.coef_)
# + [markdown] id="gZm7iiYmXcL-" colab_type="text"
# ## Interpreting Model Coefficients
# + [markdown] id="O5jZwiDSXiKx" colab_type="text"
# **Interpreting the TV coefficient ( β1 )**
#
# * A "unit" increase in TV ad spending is associated with a 0.047537 "unit" increase in Sales
# * Or more clearly: An additional $1,000 spent on TV ads is associated with an increase in sales of 47.537 widgets
# * Note here that the coefficients represent associations, not causations
# + [markdown] id="mNZNmPcVXwvx" colab_type="text"
# ## Using the Model for Prediction
# + [markdown] id="Csk3r6YpXyM1" colab_type="text"
# Let's say that there was a new market where the TV advertising spend was $50,000. What would we predict for the Sales in that market?
#
# y=β0+β1x
#
# **y=7.032594+0.047537×50**
#
# We would use 50 instead of 50,000 because the original data consists of examples that are divided by 1000
# + [markdown] id="1HfxPkvVYBwv" colab_type="text"
# ### Manual Prediction
# + id="CjxJbUftYC86" colab_type="code" outputId="4f5b6e40-4c3e-46fc-da31-1ca48f10eece" colab={"base_uri": "https://localhost:8080/", "height": 34}
# manually calculate the prediction
7.032594 + 0.047537*50
# + [markdown] id="Z-DSluAUYF_H" colab_type="text"
# ### Statsmodels Prediction
# + id="ssa0PAkyYI68" colab_type="code" outputId="f8779b8e-613d-4832-a5e4-80e28eef11de" colab={"base_uri": "https://localhost:8080/", "height": 50}
### STATSMODELS ###
# you have to create a DataFrame since the Statsmodels formula interface expects it
X_new = pd.DataFrame({'TV': [50]})
# predict for a new observation
lm1.predict(X_new)
# + id="mZjR56U1YNEQ" colab_type="code" outputId="579711b8-603b-4415-d86d-bf6eef8d60e3" colab={"base_uri": "https://localhost:8080/", "height": 34}
### SCIKIT-LEARN ###
# predict for a new observation
lm2.predict(X_new)
# + [markdown] id="NiEQfbIuYuwT" colab_type="text"
# ## Plotting the Least Squares Line
# + id="wiF0CYUKYv6O" colab_type="code" outputId="aa12c605-0240-44ef-f611-874270b06c67" colab={"base_uri": "https://localhost:8080/", "height": 583}
sns.pairplot(data, x_vars=['TV','Radio','Newspaper'], y_vars='Sales', size=7, aspect=0.7, kind='reg')
# + [markdown] id="wSCdGl1ZZd_Y" colab_type="text"
# ## Hypothesis Testing and p-values
# + [markdown] id="7RWJnK2cZgBi" colab_type="text"
# **Steps for Hypothesis Testing**
#
# * Start with a null hypothesis and an alternative hypothesis (that is opposite the null)
# * Then, you check whether the data supports rejecting the null hypothesis or failing to reject the null hypothesis
# * "failing to reject" the null is not the same as "accepting" the null hypothesis
# * The alternative hypothesis may indeed be true, except that you just don't have enough data to show that
#
# **Conventional hypothesis test**
#
# * null hypothesis:
# There is no relationship between TV ads and Sales
# β1 equals zero
# * alternative hypothesis:
# There is a relationship between TV ads and Sales
# β1 is not equal to zero
#
# **Testing hypothesis**
#
# * Reject the null
# * There is a relationship
# * If the 95% confidence interval does not include zero
# * Fail to reject the null
# * There is no relationship
# * If the 95% confidence interval includes zero
# + id="1hjzu3cBZfGg" colab_type="code" outputId="6338e9ce-92a9-4232-87b3-be19bb8b8050" colab={"base_uri": "https://localhost:8080/", "height": 67}
### STATSMODELS ###
# print the p-values for the model coefficients
lm1.pvalues
# + [markdown] id="94ah55k_aIxn" colab_type="text"
# **p-value**
#
# Represents the probability that the coefficient is actually zero
#
# **Interpreting p-values**
#
# * If the 95% confidence interval does not include zero
# * p-value will be less than 0.05
# * Reject the null
# * There is a relationship
# * If the 95% confidence interval includes zero
# * p-value for that coefficient will be greater than 0.05
# * Fail to reject the null
# * There is no relationship
# + [markdown] id="9c0_GUlFajwT" colab_type="text"
# * In this case, the p-value for TV is far less than 0.05
# * We generally ignore the p-value for the intercept
# * Believe that there is a relationship between TV ads and Sales
# + [markdown] id="Eflo2zcQauH6" colab_type="text"
# ## How Well Does the Model Fit the data?
# + [markdown] id="m5dXxfFkav2F" colab_type="text"
# To evaluate the overall fit of a linear model, we use the R-squared value
#
# * R-squared is the proportion of variance explained
# * It is the proportion of variance in the observed data that is explained by the model, or the reduction in error over the null model
# * The null model just predicts the mean of the observed response, and thus it has an intercept and no slope
# * R-squared is between 0 and 1
# * Higher values are better because it means that more variance is explained by the model.
# + id="7-TOiQUvavLw" colab_type="code" outputId="95bded70-6c68-4047-a40a-61915feb6eae" colab={"base_uri": "https://localhost:8080/", "height": 34}
### STATSMODELS ###
# print the R-squared value for the model
lm1.rsquared
# + id="raGQpDlJa_1a" colab_type="code" outputId="5b63d085-cdfe-413e-cc6a-7d6d3ddd8eb0" colab={"base_uri": "https://localhost:8080/", "height": 34}
### SCIKIT-LEARN ###
# print the R-squared value for the model
lm2.score(X, y)
# + [markdown] id="Yzr3K859bDER" colab_type="text"
# **Is that a "good" R-squared value?**
#
# * It's hard to say
# * The threshold for a good R-squared value depends widely on the domain
# * Therefore, it's most useful as a tool for comparing different models
# + [markdown] id="xqUfjCbCScz1" colab_type="text"
# # Linear Regression -Third Solution
# + id="1TMlrE-HQyk1" colab_type="code" outputId="f84cd18a-4300-4cf7-a6ae-fa20b503e373" colab={"base_uri": "https://localhost:8080/", "height": 359}
# Load the diabetes dataset
diabetes = datasets.load_diabetes()
# Use only one feature
diabetes_X = diabetes.data[:, np.newaxis, 2]
# Split the data into training/testing sets
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
# Split the targets into training/testing sets
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(diabetes_X_train, diabetes_y_train)
# Make predictions using the testing set
diabetes_y_pred = regr.predict(diabetes_X_test)
# The coefficients
print('Coefficients: \n', regr.coef_)
# The mean squared error
print("Mean squared error: %.2f"
% mean_squared_error(diabetes_y_test, diabetes_y_pred))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % r2_score(diabetes_y_test, diabetes_y_pred))
# Plot outputs
plt.scatter(diabetes_X_test, diabetes_y_test, color='black')
plt.plot(diabetes_X_test, diabetes_y_pred, color='blue', linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
# + [markdown] id="j5tjkBZSQxu-" colab_type="text"
#
| GL_AIML_Session_Linear_Regression_13_07_2019.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# ## Configuration
# _Initial steps to get the notebook ready to play nice with our repository. Do not delete this section._
# Code formatting with [black](https://pypi.org/project/nb-black/).
# %load_ext lab_black
# +
import os
import pytz
import glob
import pathlib
this_dir = pathlib.Path(os.path.abspath(""))
data_dir = this_dir / "data"
# -
import requests
import pandas as pd
from bs4 import BeautifulSoup
import re
import json
from datetime import datetime
# ## Download
# Retrieve the page
# +
#url = "https://www.placer.ca.gov/DocumentCenter/View/46267/dashboard"
# -
url = "https://itwebservices.placer.ca.gov/coviddashboard"
page = requests.get(url)
# ## Parse
soup = BeautifulSoup(page.content, "html.parser")
# Find script tag with the Infogram data
for elem in soup(text=re.compile(r"Foresthill")):
script_content = elem
data = json.loads(script_content)
zip_cities_list = data["x"]["calls"][1]["args"][-3]
dict_list = []
TAG_RE = re.compile(r"<[^>]+>")
for z in zip_cities_list:
split_zips = z.split("<br/>")
area = TAG_RE.sub("", str(split_zips[0]))
zip_code, city = area.split(" - ")
clean_cases = re.sub("Number of cases: ".lower(), "", str(split_zips[1]).lower())
d = dict(area=area, city=city, zip_code=zip_code, confirmed_cases=clean_cases)
dict_list.append(d)
df = pd.DataFrame(dict_list)
# Get timestamp
time_div = soup.find("div", id="dashboard-data-last-updated")
timestamp = time_div.find("span", class_="value-output").get_text()
latest_date = pd.to_datetime(timestamp).date()
df["county_date"] = latest_date
df.insert(0, "county", "Placer")
# Clean up
df["area"] = df["area"].str.replace(" - ", ": ")
df = df[["county", "area", "confirmed_cases", "county_date", "zip_code"]].rename(
columns={"zip_code": "zip"}
)
# ## Vet
try:
assert not len(df) < 32
except AssertionError:
raise AssertionError("Placer County's scraper is missing rows")
try:
assert not len(df) > 32
except AssertionError:
raise AssertionError("Placer County's scraper has more rows than before")
# ## Export
# Set date
tz = pytz.timezone("America/Los_Angeles")
today = datetime.now(tz).date()
slug = "placer"
df
df.to_csv(data_dir / slug / f"{today}.csv", index=False)
# ## Combine
csv_list = [
i
for i in glob.glob(str(data_dir / slug / "*.csv"))
if not str(i).endswith("timeseries.csv")
]
df_list = []
for csv in csv_list:
if "manual" in csv:
df = pd.read_csv(csv, parse_dates=["date"])
else:
file_date = csv.split("/")[-1].replace(".csv", "")
df = pd.read_csv(csv, parse_dates=["county_date"])
df["date"] = file_date
df_list.append(df)
df = pd.concat(df_list).sort_values(["date", "area"])
df.to_csv(data_dir / slug / "timeseries.csv", index=False)
| places/placer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Number of Messages Sent Factorial Analysis (High Density Scenario)
# +
import os
import math
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from itertools import product, chain, combinations
from scipy import stats
from IPython.display import display, HTML
# %matplotlib inline
def parse_if_number(s):
try: return float(s)
except: return True if s=="true" else False if s=="false" else s if s else None
def parse_ndarray(s):
return np.fromstring(s, sep=' ') if s else None
def get_file_name(name):
return name.replace(':', '-')
# -
# ## Config
# +
inputFile = 'messages.csv'
repetitionsCount = -1 # -1 = auto-detect
factors = ['R', 'T', 'm']
tIntervalAlpha = 0.95
plotSize = (10, 10)
plotStyle = 'seaborn-whitegrid'
saveFigures = False
# Filter scalars
scalarsFilter = ['Floorplan.userCount', 'Floorplan.msgsPerSlot:sum']
# Filter vectors
vectorsFilter = []
# Percentiles
percentiles = [0.25, 0.5, 0.75, 0.9, 0.95]
# Performance indexes
perfIndexes = [
('Floorplan.msgsPerSlot:sum', 'total number of messages sent'),
]
# Transformations
transformations = [
]
intPercentiles = [int(i*100) for i in percentiles]
vecPerfIndexes = []
#for intPercentile in intPercentiles:
#vecPerfIndexes.append(('broadcastTime' + str(intPercentile), 'Broadcast time needed to reach the ' + str(intPercentile) + 'th percentile of the coverage'))
for v in vecPerfIndexes:
perfIndexes.append(v)
#transformations.append((v[0], lambda x: math.log(x)))
# -
# ## Load scalars
df = pd.read_csv('exported_data/' + inputFile, converters = {
'attrvalue': parse_if_number,
'binedges': parse_ndarray,
'binvalues': parse_ndarray,
'vectime': parse_ndarray,
'vecvalue': parse_ndarray,
})
# +
if repetitionsCount <= 0: # auto-detect
repetitionsCount = int(df[df.attrname == 'repetition']['attrvalue'].max()) + 1
print('Repetitions:', repetitionsCount)
# Computed
factorsCount = len(factors)
if saveFigures:
os.makedirs('figures', exist_ok=True)
# +
scalars = df[(df.type == 'scalar') | ((df.type == 'itervar') & (df.attrname != 'TO')) | ((df.type == 'param') & (df.attrname == 'Floorplan.userCount')) | ((df.type == 'runattr') & (df.attrname == 'repetition'))]
scalars = scalars.assign(qname = scalars.attrname.combine_first(scalars.module + '.' + scalars.name))
for index, row in scalars[scalars.type == 'itervar'].iterrows():
val = scalars.loc[index, 'attrvalue']
if isinstance(val, str) and not all(c.isdigit() for c in val):
scalars.loc[index, 'attrvalue'] = eval(val)
scalars.value = scalars.value.combine_first(scalars.attrvalue.astype('float64'))
scalars_wide = scalars.pivot_table(index=['run'], columns='qname', values='value')
scalars_wide.sort_values([*factors, 'repetition'], inplace=True)
count = 0
for index in scalars_wide.index:
config = count // repetitionsCount
scalars_wide.loc[index, 'config'] = config
count += 1
scalars_wide = scalars_wide[['config', 'repetition', *factors, *scalarsFilter]]
configsCount = int(scalars_wide['config'].max()) + 1
totalSims = configsCount*repetitionsCount
display(HTML("<style>div.output_scroll { height: auto; max-height: 48em; }</style>"))
pd.set_option('display.max_rows', totalSims)
pd.set_option('display.max_columns', 100)
# coverage
#scalars_wide['coveredUsersPercent'] = scalars_wide['Floorplan.coveredUsers:sum'] / (scalars_wide['Floorplan.userCount'] - 1)
# -
# ## Load vectors
vectors = df[df.type == 'vector']
vectors = vectors.assign(qname = vectors.module + '.' + vectors.name)
for index in scalars_wide.index:
r = index
cfg = scalars_wide.loc[index, 'config']
rep = scalars_wide.loc[index, 'repetition']
vectors.loc[vectors.run == r, 'config'] = cfg
vectors.loc[vectors.run == r, 'repetition'] = rep
vectors = vectors[vectors.qname.isin(vectorsFilter)]
vectors.sort_values(['config', 'repetition', 'qname'], inplace=True)
vectors = vectors[['config', 'repetition', 'qname', 'vectime', 'vecvalue']]
# ## Compute scalars from vectors
# +
def get_percentile(percentile, vectime, vecvalue, totalvalue):
tofind = percentile * totalvalue
idx = 0
csum = vecvalue.cumsum()
for value in csum:
if value >= tofind:
return vectime[idx]
idx += 1
return math.inf
for index, row in vectors.iterrows():
for vecPerf, percentile in zip(vecPerfIndexes, percentiles):
vecPerfIndex = vecPerf[0]
cfg = row['config']
rep = row['repetition']
if vecPerfIndex.startswith('broadcastTime'):
total = scalars_wide[(scalars_wide['config'] == cfg) & (scalars_wide['repetition'] == rep)]['Floorplan.userCount'].values[0] - 1
else:
raise Exception('Need to specify total for ' + vecPerfIndex + '. (coding required)')
value = get_percentile(percentile, row['vectime'], row['vecvalue'], total)
scalars_wide.loc[(scalars_wide['config'] == cfg) & (scalars_wide['repetition'] == rep), vecPerfIndex] = value
# -
# ## Apply transformations
for col, transform in transformations:
scalars_wide[col] = scalars_wide[col].map(transform, 'ignore')
# ## Full factorial
# +
for cfg in range(0, configsCount):
for perfIndex, _ in perfIndexes:
mean = scalars_wide[scalars_wide['config'] == cfg][perfIndex].mean()
variance = scalars_wide[scalars_wide['config'] == cfg][perfIndex].var()
_, positiveInterval = tuple(v*math.sqrt(variance/repetitionsCount) for v in stats.t.interval(tIntervalAlpha, repetitionsCount - 1))
negerr = positiveInterval
poserr = positiveInterval
if perfIndex == 'coveredUsersPercent':
poserr = min(1 - mean, positiveInterval)
if perfIndex == 'Floorplan.msgsPerSlot:sum':
userCount = scalars_wide['Floorplan.userCount'].max()
poserr = min(userCount - mean, positiveInterval)
scalars_wide.loc[scalars_wide['config'] == cfg, perfIndex + 'Mean'] = mean
scalars_wide.loc[scalars_wide['config'] == cfg, perfIndex + 'Variance'] = variance
scalars_wide.loc[scalars_wide['config'] == cfg, perfIndex + 'Negerr'] = negerr
scalars_wide.loc[scalars_wide['config'] == cfg, perfIndex + 'Poserr'] = poserr
scalars_wide = scalars_wide[scalars_wide['repetition'] == 0]
for perfIndex, _ in perfIndexes:
del scalars_wide[perfIndex]
del scalars_wide['repetition']
del scalars_wide['Floorplan.userCount']
#del scalars_wide['Floorplan.coveredUsers:sum']
del scalars_wide['config']
scalars_wide
# -
for xFactor in factors:
print('Plotting with', xFactor, 'on the x axis...')
otherFactors = [fac for fac in factors if fac != xFactor]
current = scalars_wide.sort_values([xFactor, *otherFactors])
count = 0
lastVal = None
for index,row in current.iterrows():
if lastVal != None and lastVal != row[xFactor]:
count = 0
current.loc[index, 'config'] = count
count += 1
lastVal = row[xFactor]
x = current[xFactor].unique().tolist()
for perfIndex, perfIndexDesc in perfIndexes:
plt.figure(figsize=plotSize)
plt.style.use(plotStyle)
for cfg in range(0, int(current['config'].max()) + 1):
y = current[current['config'] == cfg][perfIndex + 'Mean'].tolist()
poserr = current[current['config'] == cfg][perfIndex + 'Poserr'].tolist()
negerr = current[current['config'] == cfg][perfIndex + 'Negerr'].tolist()
realy = []
realx = []
realne = []
realpe = []
curIdx = 0
for val in y:
if not math.isinf(val):
realy.append(val)
realx.append(x[curIdx])
realne.append(negerr[curIdx])
realpe.append(poserr[curIdx])
curIdx += 1
y = realy
negerr = realne
poserr = realpe
err = [negerr, poserr]
lbl = ""
for fac in otherFactors:
lbl += fac + '=' + str(current[current['config'] == cfg][fac].tolist()[0]) + ', '
lbl = lbl[:-2]
plt.errorbar(x=np.array(realx), y=np.array(y), yerr=np.array(err), capsize=3, linestyle='-', marker='.', markersize=10, label=lbl)
plt.title('Full factorial plot for ' + perfIndexDesc)
plt.ylabel(perfIndex)
plt.xlabel(xFactor)
plt.legend()
if saveFigures:
fig = plt.gcf()
fig.savefig('figures/' + get_file_name(perfIndex) + '-' + xFactor + '-ffplot.png')
plt.show()
print('########################################')
print()
# ## Observations
#
# Of course increasing the broadcast radius is the best way to decrease the total number of messages sent, but since we want to optimize the energy efficiency of the network, this is not a viable way.
#
# We get a very large variance in the experiments with `R=8m; T=3s; m=5`. This is probably due to some experiment where the message have completely failed to reach more than just a bunch of users because of the low broadcast radius.
#
# If we want to avoid to increase `R`, we must reduce `m` and increase `T` to improve the energy efficiency. Probably it is best to avoid values lower than 9m or 10m for `R`, to avoid the huge increase in variance.
#
# For every value of `R`, except `R=9m`, the best config is with `T=5s` and `m=2`. In any case, with `R=9`, the config with `T=5s` and `m=2` is the second best config for the number of messages sent. Meaning that the combination of factors `T` and `m` are very important (contrarily to what our 2^kr analysis has found).
# ### Rerun this notebook
#
# To rerun this notebook, you can:
# - just rerun the simulations with the corresponding configuration: `./simulate.sh -s HighDensity -c HighDensityMessages -o messages` (you will get slighly different results)
# - download our datasets from `https://drive.google.com/file/d/1ZFRV2DecoTvax9lngEsuPPw8Cz1DXvLc/view?usp=sharing` (login with UNIPI institutional account)
# - use our seed to rerun the simulations. Add `seed-set = ${runnumber}24020` to the configuration
| analysis/HighDensity/messages.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/KurtEmprese/OOP-1-2/blob/main/GUI_application.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="29yTXBAEywle" outputId="4edfe7ae-775a-407f-bb5e-2d7af58f01c7"
#@title Students Grade in OOP
Student_Name1 = "Enter your student name" #@param{type: "string"}
prelim = 100 #@param {type:"number"}
midterm = 100 #@param {type:"number"}
finals = 100 #@param {type:"number"}
semestral_grade = (prelim + midterm + finals)/3
print("The prelim grade of student 1 is " + str(prelim))
print("The midterm grade of student 1 is " + str(midterm))
print("The final grade of student 1 is " + str(finals))
print("The semestral grade of student 1 is " + str(semestral_grade))
#@title Gender
Gender = "Male"#@param ["Male", "Female"]
print("His gender is " + Gender)
Birthdate = '2022-04-07'#@param {type: "date"}
| GUI_application.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Ji et al. 2015 Processing
#
# I'm going to investigate the data from [Ji et al. 2015](http://www.sciencedirect.com/science/article/pii/S1934590915005056).
# I'd like to understand it better and parse it to make it more useful.
# I'm going to focus on the naive stem cells.
# +
import os
import subprocess
import cdpybio as cpb
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
pd.options.mode.chained_assignment = None
import pybedtools as pbt
import seaborn as sns
import vcf as pyvcf
import cardipspy as cpy
import ciepy
# %matplotlib inline
dy_name = 'ji_et_al_2015_processing'
import socket
if socket.gethostname() == 'fl-hn1' or socket.gethostname() == 'fl-hn2':
dy = os.path.join(ciepy.root, 'sandbox', 'tmp', dy_name)
cpy.makedir(dy)
pbt.set_tempdir(dy)
outdir = os.path.join(ciepy.root, 'output', dy_name)
cpy.makedir(outdir)
private_outdir = os.path.join(ciepy.root, 'private_output', dy_name)
cpy.makedir(private_outdir)
# -
promoters = pbt.BedTool('/publicdata/gencode_v19_20151104/promoters_by_gene.bed')
exons = pbt.BedTool('/publicdata/gencode_v19_20151104/exons.bed')
genes = pbt.BedTool('/publicdata/gencode_v19_20151104/genes.bed')
transcript_to_gene = pd.read_table('/publicdata/gencode_v19_20151104/transcript_to_gene.tsv',
index_col=0, squeeze=True, header=None)
# +
# Download supplementary tables
peaks_fn = os.path.join(private_outdir, 'mmc3.xlsx')
if not os.path.exists(peaks_fn):
url = ('http://www.sciencedirect.com/science/MiamiMultiMediaURL/'
'1-s2.0-S1934590915005056/1-s2.0-S1934590915005056-mmc3.xlsx/'
'274143/html/S1934590915005056/bf6bad434c4c4022025ca74898ee2614/mmc3.xlsx')
# !curl {url} > {peaks_fn}
interactions_fn = os.path.join(private_outdir, 'mmc4.xlsx')
if not os.path.exists(interactions_fn):
url = ('http://www.sciencedirect.com/science/MiamiMultiMediaURL/'
'1-s2.0-S1934590915005056/1-s2.0-S1934590915005056-mmc4.xlsx/'
'274143/html/S1934590915005056/8db92b9dbebd5aaf235d3a1d9d256eb2/mmc4.xlsx')
# !curl {url} > {interactions_fn}
super_enhancers_fn = os.path.join(private_outdir, 'mmc5.xlsx')
if not os.path.exists(super_enhancers_fn):
url = ('http://www.sciencedirect.com/science/MiamiMultiMediaURL/'
'1-s2.0-S1934590915005056/1-s2.0-S1934590915005056-mmc5.xlsx/'
'274143/html/S1934590915005056/2da741445f7de78804be668d429b287f/mmc5.xlsx')
# !curl {url} > {super_enhancers_fn}
# +
# CTCF peaks
ctcf = pd.read_excel(peaks_fn, skiprows=3, sheetname='CTCF ChIP-seq peaks')
ctcf = ctcf[['chr', 'start', 'end']]
ctcf = ctcf.dropna()
ctcf.columns = ['chrom', 'start', 'end']
ctcf['start'] = ctcf.start.astype(int)
ctcf['end'] = ctcf.end.astype(int)
s = '\n'.join(ctcf.chrom + '\t' + ctcf.start.astype(str) +
'\t' + ctcf.end.astype(str)) + '\n'
ctcf_bt = pbt.BedTool(s, from_string=True)
ctcf_bt = ctcf_bt.sort()
ctcf_bt = ctcf_bt.merge()
ctcf_bt.saveas(os.path.join(outdir, 'ctcf_peaks.bed'))
# H3K27ac peaks
enh = pd.read_excel(peaks_fn, skiprows=3, sheetname='H3K27ac ChIP-seq peaks')
enh = enh[['chr', 'start', 'end']]
enh = enh.dropna()
enh.columns = ['chrom', 'start', 'end']
enh['start'] = enh.start.astype(int)
enh['end'] = enh.end.astype(int)
s = '\n'.join(enh.chrom + '\t' + enh.start.astype(str) +
'\t' + enh.end.astype(str)) + '\n'
enh_bt = pbt.BedTool(s, from_string=True)
enh_bt = enh_bt.sort()
enh_bt = enh_bt.merge()
enh_bt.saveas(os.path.join(outdir, 'h3k27ac_peaks.bed'))
# Interactions
interactions = pd.read_excel(interactions_fn, sheetname='Naive', skiprows=2,
converters={8:lambda x: {'T': True, 'F':False}[x],
9:lambda x: {'T': True, 'F':False}[x],
10:lambda x: {'T': True, 'F':False}[x]})
# Super-enhancers
super_enh = pd.read_excel(super_enhancers_fn, skiprows=2)
super_enh = super_enh[super_enh['SE_ID in naive'] != '.']
tdf = pd.DataFrame([cpb.general.parse_region(x) for x in super_enh.ID])
tdf[3] = super_enh.ID.values
s = '\n'.join(tdf.astype(str).apply(lambda x: '\t'.join(x), axis=1)) + '\n'
super_enh_bt = pbt.BedTool(s, from_string=True).sort()
super_enh_bt.saveas(os.path.join(outdir, 'super_enhancers.bed'));
# -
# ## CTCF, H3K27ac, super-enhancers
#
# The CTCF and H3K27ac peaks seem to simply just be the peaks. I've filtered the
# super-enhancers to only keep super-enhancers identified in the naive stem cells.
#
# ## Interactions
#
# Let's figure out what the interaction table has. I think the first six columns define
# interactions between the region `Chr_1:Start_1-End_1` and `Chr_2:Start_2-End_2`.
# The seventh column is just an ID. The paper says
#
# The naive hESC dataset contained ∼88 million unique paired-end tags (PETs) that identified 35,286 high-confidence cohesin-associated intra-chromosomal interactions (Table S3)...
#
# which corresponds to the length of this table (note that we are looking at the naive
# cells):
interactions.shape
# I'm not sure what the score in the eight column is. Let's look a bit.
se = interactions.ix[:, 7]
print(se.min(), se.max())
np.log10(se).hist()
plt.ylabel('Number of interactions')
plt.xlabel('$\log_{10}$ score');
# I'm guessing the score is high for more confident interactions. I can look through the
# methods to verify this.
#
# From the paper:
#
# There were 12,987 CTCF-CTCF loops in naive hESCs, encompassing 37% of the genome and 33% of protein-coding genes (Table S3).
interactions[interactions.columns[-2]].sum()
# We can see that the second to last column identifies things that are loops in naive hESCs
# but are not at enhancers or promoters. We can see that the columns
#
# CTCF-CTCF loops (not at enhancers or promoters)
# CTCF-CTCF loops (putative insulated neighborhoods)
#
# are subsets of
#
# CTCF-CTCF loops
interactions[(interactions[interactions.columns[-3]] == False) &
(interactions[interactions.columns[-2]] == True)].shape
interactions[(interactions[interactions.columns[-3]] == False) &
(interactions[interactions.columns[-1]] == True)].shape
# However the last column is not a subset of the second to last column:
interactions[(interactions['CTCF-CTCF loops (not at enhancers or promoters)'] == True) &
(interactions['CTCF-CTCF loops (putative insulated neighborhoods)'] == True)].shape
interactions[interactions.columns[-3:]].sum()
# So now the question is what to the last three columns mean. I believe
# the `CTCF-CTCF loops` column is `True` for any interaction where there are
# CTCF peaks and motifs in each interaction peak and the CTCF motifs
# are arranged in the expected convergent orientation. These are loops.
#
# I think the `CTCF-CTCF loops (not at enhancers or promoters)` is `False`
# for any interaction where at least one of the interaction peaks overlaps either
# a promoter ("promoter regions that are defined as +/- 2 kb of the Refseq TSS")
# or an enhancer (defined by the H3k27ac peaks) based on this from the methods:
#
# Operationally, an interaction was defined as associated with the regulatory element if one of the two PET peaks of the interaction overlapped with the regulatory element by at least 1 base pair.
#
# The insulated neighborhoods were defined according to a computational method
# but the basic idea is that interactions upstream or downsream of the loop
# should not interact with regions inside of the loop. We can see that of the 12,987
# CTCF loops that are not promoter/enhancer loops, 6,183 are insulated neighborhoods.
#
# Let's look at some of the insulated neighborhoods from one peak to the other.
t = interactions[interactions['CTCF-CTCF loops (putative insulated neighborhoods)']]
t = t[t.Chr_1 == t.Chr_2]
mi = t.apply(lambda x: min(x['End_1'], x['End_1']), axis=1)
ma = t.apply(lambda x: max(x['Start_1'], x['Start_2']), axis=1)
s = '\n'.join(t.Chr_1 + '\t' + mi.astype(str) + '\t' + ma.astype(str)) + '\n'
bt = pbt.BedTool(s, from_string=True)
bt = bt.sort()
bt_merged = bt.merge()
bt.head()
res = bt_merged.intersect(bt, sorted=True, wo=True)
count = 0
for r in res:
if int(r[1]) < int(r[4]) and int(r[2]) > int(r[5]):
count += 1
print(count)
# We can see that insulated neighborhoods can share one end but differ on the other end:
#
# chr1 3408807 3531141
# chr1 3408807 3534941
#
# There are also some insulated neighborhoods which are completely contained within other
# insulated neighborhoods.
#
# ### Interaction sizes
#
# Let's look at the interactions a bit more. Here are the sizes
# of the first and second peaks:
se = interactions.End_1 - interactions.Start_1
np.log10(se.abs()).hist()
plt.ylabel('Number of first peaks')
plt.xlabel('$log_{10}$ size in bp');
se = interactions.End_2 - interactions.Start_2
np.log10(se.abs()).hist()
plt.ylabel('Number of second peaks')
plt.xlabel('$log_{10}$ size in bp');
# The paper says:
#
# There were 12,987 CTCF-CTCF loops in naive hESCs, encompassing 37% of the genome and 33% of protein-coding genes (Table S3). These CTCF-CTCF loops ranged from 4 to >800 kb and contained 0–24 protein-coding genes, with a median of 200 kb and one protein-coding gene per loop.
#
# After talking to <NAME>, it seems that the 200 kb size comes from the insulated neighborhoods.
#
# Here's the plot including the peaks:
mi = interactions[interactions['CTCF-CTCF loops (not at enhancers or promoters)']].apply(
lambda x: min(x['Start_1'], x['Start_2']), axis=1)
ma = interactions[interactions['CTCF-CTCF loops (not at enhancers or promoters)']].apply(
lambda x: max(x['End_1'], x['End_2']), axis=1)
se = ma - mi
np.log10(se.abs()).hist(bins=50)
plt.ylabel('Number of loops')
plt.xlabel('$log_{10}$ size in bp')
plt.title('Median: {}'.format(se.median()));
# Here's the plot not including the peaks:
mi = interactions[interactions['CTCF-CTCF loops (not at enhancers or promoters)']].apply(
lambda x: min(x['End_1'], x['End_1']), axis=1)
ma = interactions[interactions['CTCF-CTCF loops (not at enhancers or promoters)']].apply(
lambda x: max(x['Start_1'], x['Start_2']), axis=1)
se = ma - mi
np.log10(se.abs()).hist(bins=50)
plt.ylabel('Number of loops')
plt.xlabel('$log_{10}$ size in bp')
plt.title('Median: {}'.format(se.median()));
# These medians are closer to 100kb than 200kb.
mi = interactions[interactions['CTCF-CTCF loops (putative insulated neighborhoods)']].apply(
lambda x: min(x['Start_1'], x['Start_2']), axis=1)
ma = interactions[interactions['CTCF-CTCF loops (putative insulated neighborhoods)']].apply(
lambda x: max(x['End_1'], x['End_2']), axis=1)
se = ma - mi
np.log10(se.abs()).hist(bins=50)
plt.ylabel('Number of loops')
plt.xlabel('$log_{10}$ size in bp')
plt.title('Median: {}'.format(se.median()));
mi = interactions[interactions['CTCF-CTCF loops (putative insulated neighborhoods)']].apply(
lambda x: min(x['End_1'], x['End_1']), axis=1)
ma = interactions[interactions['CTCF-CTCF loops (putative insulated neighborhoods)']].apply(
lambda x: max(x['Start_1'], x['Start_2']), axis=1)
se = ma - mi
np.log10(se.abs()).hist(bins=50)
plt.ylabel('Number of loops')
plt.xlabel('$log_{10}$ size in bp')
plt.title('Median: {}'.format(se.median()));
# The median is closer to 200kb for insulated neighborhoods.
# The last column `CTCF-CTCF loops (putative insulated neighborhoods)` identifies
# loops that are "insulated neighborhoods." From the paper:
#
# If the CTCF-CTCF loops identified in hESC function as insulated neighborhoods, we expect that most cohesin-associated interactions with an endpoint inside the loop have their other endpoint within the loop.
#
# So an insulated neighborhood basically means that interactions that have a peak within a loop have
# their other peak in the loop as well.
# ## Interactions parsing and annotation
#
# I'm going to annotate these interactions to make them more useful.
# I'm going to annotate
#
# * whether the interaction is interchromosomal
# * whether each peak overlaps CTCF, H3K27ac, promoter (for which genes(s)), super-enhancer
# * genes contained within loop
#
# Let's start by renaming the columns to be more friendly and annotating inter- vs. intra- chromosomal.
interactions.columns = ['chrom1', 'start1', 'end1', 'chrom2', 'start2', 'end2', 'ID',
'score', 'ctcf_loop', 'nep_loop', 'pin_loop']
interactions['intra'] = True
interactions.ix[interactions.chrom1 != interactions.chrom2, 'intra'] = False
# I'll add in names (chr:start-end)
# for the peaks and the entire loop as well as the loop coordinates (`start1` is not always less
# than `start2` etc.) I'll also make a bed file for the loops. Some of this will be limited
# to intrachromosomal interactions.
# +
interactions['peak1'] = (interactions.chrom1 + ':' +
interactions.start1.astype(str) + '-' +
interactions.end1.astype(str))
interactions['peak2'] = (interactions.chrom2 + ':' +
interactions.start2.astype(str) + '-' +
interactions.end2.astype(str))
tdf = interactions[interactions.intra]
loops = (tdf.chrom1 + ':' + tdf.apply(lambda x: x[['start1', 'start2']].min(), axis=1).astype(str) +
'-' + tdf.apply(lambda x: x[['end1', 'end2']].max(), axis=1).astype(str))
loops_inner = (tdf.chrom1 + ':' + tdf.apply(lambda x: x[['end1', 'end2']].min(), axis=1).astype(str) +
'-' + tdf.apply(lambda x: x[['start1', 'start2']].max(), axis=1).astype(str))
s = '\n'.join(loops.apply(lambda x: x.replace(':', '\t').replace('-', '\t'))) + '\n'
loops_bt = pbt.BedTool(s, from_string=True).sort()
loops_bt.saveas(os.path.join(outdir, 'loops.bt'))
s = '\n'.join(loops_inner.apply(lambda x: x.replace(':', '\t').replace('-', '\t'))) + '\n'
loops_inner_bt = pbt.BedTool(s, from_string=True).sort()
loops_inner_bt.saveas(os.path.join(outdir, 'loops_inner.bt'))
interactions['loop'] = np.nan
interactions.ix[tdf.index, 'loop'] = loops
interactions['loop_inner'] = np.nan
interactions.ix[tdf.index, 'loop_inner'] = loops_inner
# -
# I'll make a bed file for the ChIA-PET peaks so I can work with those.
s = '\n'.join(interactions.chrom1 + '\t' + interactions.start1.astype(str) +
'\t' + interactions.end1.astype(str)) + '\n'
s += '\n'.join(interactions.chrom2 + '\t' + interactions.start2.astype(str) +
'\t' + interactions.end2.astype(str)) + '\n'
chia_peaks = pbt.BedTool(s, from_string=True).sort().merge()
chia_peaks.saveas(os.path.join(outdir, 'chia_peaks.bed'))
# Now let's annotate the ChIA-PET peaks with CTCF and H3K27ac. We have some
# information about this from the last column, but we don't know which exact
# peaks overlap these features.
def annotate_chia_peaks(bt, name):
res = chia_peaks.intersect(bt, sorted=True, wo=True)
tdf = res.to_dataframe()
tdf.index = (tdf.chrom + ':' + tdf.start.astype(str) +
'-' + tdf.end.astype(str))
tdf[name] = (tdf.name + ':' + tdf.score.astype(str) +
'-' + tdf.strand.astype(str))
interactions.index = interactions.peak1
interactions['{}1'.format(name)] = False
ind = set(tdf.index) & set(interactions.index)
interactions.ix[ind, '{}1'.format(name)] = True
interactions.index = interactions.peak2
interactions['{}2'.format(name)] = False
ind = set(tdf.index) & set(interactions.index)
interactions.ix[ind, '{}2'.format(name)] = True
annot = {}
for r in res:
i = '{}:{}-{}'.format(r.chrom, r.start, r.end)
v = '{}:{}-{}'.format(r.name, r.score, r.strand)
annot[i] = annot.get(i, set()) | set([v])
annot = pd.Series(annot)
annot.to_pickle(os.path.join(outdir, 'chia_to_{}.pickle'.format(name)))
interactions.index = range(interactions.shape[0])
return interactions
# + active=""
# # s = '\n'.join(interactions.chrom1 + '\t' + interactions.start1.astype(str) +
# # '\t' + interactions.end1.astype(str)) + '\n'
# # p1 = pbt.BedTool(s, from_string=True).sort().merge()
# # s = '\n'.join(interactions.chrom2 + '\t' + interactions.start2.astype(str) +
# # '\t' + interactions.end2.astype(str)) + '\n'
# # p2 = pbt.BedTool(s, from_string=True).sort().merge()
#
# # These will be boolean columns indicating that a given ChIA-PET peak
# # overlaps an annotation.
# interactions['enh1'] = False
# interactions['enh2'] = False
# interactions['ctcf1'] = False
# interactions['ctcf2'] = False
#
# # Pairs of ChIA-PET/CTCF peaks that overlap.
# chia_ctcf = {}
# # Pairs of ChIA-PET/H3K27ac peaks that overlap.
# chia_enh = {}
#
# # CTCF
# interactions.index = interactions.peak1
# res = p1.intersect(ctcf_bt, sorted=True, wo=True)
# tdf = res.to_dataframe()
# tdf.index = (tdf.chrom + ':' + tdf.start.astype(str) +
# '-' + tdf.end.astype(str))
# tdf['ctcf'] = (tdf.name + ':' + tdf.score.astype(str) +
# '-' + tdf.strand.astype(str))
# interactions.ix[set(tdf.index), 'ctcf1'] = True
# for r in res:
# i = '{}:{}-{}'.format(r.chrom, r.start, r.end)
# v = '{}:{}-{}'.format(r.name, r.score, r.strand)
# chia_ctcf[i] = chia_ctcf.get(i, set()) | set([v])
#
# interactions.index = interactions.peak2
# res = p2.intersect(ctcf_bt, sorted=True, wo=True)
# tdf = res.to_dataframe()
# tdf.index = (tdf.chrom + ':' + tdf.start.astype(str) +
# '-' + tdf.end.astype(str))
# tdf['ctcf'] = (tdf.name + ':' + tdf.score.astype(str) +
# '-' + tdf.strand.astype(str))
# interactions.ix[set(tdf.index), 'ctcf2'] = True
# for r in res:
# i = '{}:{}-{}'.format(r.chrom, r.start, r.end)
# v = '{}:{}-{}'.format(r.name, r.score, r.strand)
# chia_ctcf[i] = chia_ctcf.get(i, set()) | set([v])
# chia_ctcf = pd.Series(chia_ctcf)
# chia_ctcf.to_pickle(os.path.join(outdir, 'chia_to_ctcf.pickle'))
#
# # H3K27ac
# interactions.index = interactions.peak1
# res = p1.intersect(enh_bt, sorted=True, wo=True)
# tdf = res.to_dataframe()
# tdf.index = (tdf.chrom + ':' + tdf.start.astype(str) +
# '-' + tdf.end.astype(str))
# tdf['enh'] = (tdf.name + ':' + tdf.score.astype(str) +
# '-' + tdf.strand.astype(str))
# interactions.ix[set(tdf.index), 'enh1'] = True
# for r in res:
# i = '{}:{}-{}'.format(r.chrom, r.start, r.end)
# v = '{}:{}-{}'.format(r.name, r.score, r.strand)
# chia_enh[i] = chia_enh.get(i, set()) | set([v])
#
# interactions.index = interactions.peak2
# res = p2.intersect(enh_bt, sorted=True, wo=True)
# tdf = res.to_dataframe()
# tdf.index = (tdf.chrom + ':' + tdf.start.astype(str) +
# '-' + tdf.end.astype(str))
# tdf['enh'] = (tdf.name + ':' + tdf.score.astype(str) +
# '-' + tdf.strand.astype(str))
# interactions.ix[set(tdf.index), 'enh2'] = True
# for r in res:
# i = '{}:{}-{}'.format(r.chrom, r.start, r.end)
# v = '{}:{}-{}'.format(r.name, r.score, r.strand)
# chia_enh[i] = chia_enh.get(i, set()) | set([v])
# chia_enh = pd.Series(chia_enh)
# chia_enh.to_pickle(os.path.join(outdir, 'chia_to_h3k27ac.pickle'))
#
# interactions.index = range(interactions.shape[0])
# -
interactions = annotate_chia_peaks(ctcf_bt, 'ctcf')
interactions = annotate_chia_peaks(enh_bt, 'h3k27ac')
# Now let's annotate whether ChIA-PET peaks overlap promoters, exons,
# or gene bodies. We'll also annotate which genes are contained in a loop.
def annotate_chia_peaks(bt, name, use_name_col=False):
res = chia_peaks.intersect(bt, sorted=True, wo=True)
tdf = res.to_dataframe()
tdf.index = (tdf.chrom + ':' + tdf.start.astype(str) +
'-' + tdf.end.astype(str))
tdf[name] = (tdf.name + ':' + tdf.score.astype(str) +
'-' + tdf.strand.astype(str))
interactions.index = interactions.peak1
interactions['{}1'.format(name)] = False
ind = set(tdf.index) & set(interactions.index)
interactions.ix[ind, '{}1'.format(name)] = True
interactions.index = interactions.peak2
interactions['{}2'.format(name)] = False
ind = set(tdf.index) & set(interactions.index)
interactions.ix[ind, '{}2'.format(name)] = True
annot = {}
for r in res:
i = '{}:{}-{}'.format(r.chrom, r.start, r.end)
if use_name_col:
v = r.fields[6]
else:
v = '{}:{}-{}'.format(r.name, r.score, r.strand)
annot[i] = annot.get(i, set()) | set([v])
annot = pd.Series(annot)
annot.to_pickle(os.path.join(outdir, 'chia_to_{}.pickle'.format(name)))
interactions.index = range(interactions.shape[0])
return interactions
# +
interactions = annotate_chia_peaks(promoters, 'promoter', use_name_col=True)
interactions = annotate_chia_peaks(exons, 'exon', use_name_col=True)
interactions = annotate_chia_peaks(genes, 'gene', use_name_col=True)
# I'll parse the peak to promoter and exon mappings for these so they are gene IDs.
t = pd.read_pickle(os.path.join(outdir, 'chia_to_promoter.pickle'))
t = t.apply(lambda x: set([y.split('_')[0] for y in x]))
t.to_pickle(os.path.join(outdir, 'chia_to_promoter_gene.pickle'))
t = pd.read_pickle(os.path.join(outdir, 'chia_to_exon.pickle'))
t = t.apply(lambda x: set([transcript_to_gene[y.split('_')[0]] for y in x]))
t.to_pickle(os.path.join(outdir, 'chia_to_exon_gene.pickle'))
# -
# Now I'll find which genes are contained within loops. I'll go ahead and do this
# for all interactions but it's probably only meaningful for loops.
s = '\n'.join(interactions[interactions.intra].loop.apply(
lambda x: '\t'.join(cpb.general.parse_region(x)))) + '\n'
bt = pbt.BedTool(s, from_string=True)
bt = bt.sort()
res = bt.intersect(genes, wo=True, sorted=True)
interaction_contains = {}
interaction_partially_contains = {}
for r in res:
ind = '{}:{}-{}'.format(r.chrom, r.start, r.end)
if int(r.fields[5]) - int(r.fields[4]) == int(r.fields[-1]):
interaction_contains[ind] = interaction_contains.get(ind, []) + [r.fields[6]]
else:
interaction_partially_contains[ind] = interaction_partially_contains.get(ind, []) + [r.fields[6]]
interaction_contains = pd.Series(interaction_contains)
interaction_contains.to_pickle(os.path.join(outdir, 'interaction_contains_gene.pickle'))
interaction_partially_contains = pd.Series(interaction_partially_contains)
interaction_partially_contains.to_pickle(os.path.join(outdir, 'interaction_partially_contains_gene.pickle'))
gene_to_containing_interactions = {}
for i in interaction_contains.index:
for g in interaction_contains[i]:
gene_to_containing_interactions[g] = gene_to_containing_interactions.get(g, set()) | set([i])
gene_to_containing_interactions = pd.Series(gene_to_containing_interactions)
gene_to_containing_interactions.to_pickle(
os.path.join(outdir, 'gene_to_containing_interactions.pickle'))
interactions.to_csv(os.path.join(outdir, 'interactions.tsv'), sep='\t', index=None)
# ## [Tang et al. 2015](http://www.sciencedirect.com/science/article/pii/S0092867415015044)
# +
# GM12878_RNAPII
# http://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSM1872887
url = ('http://www.ncbi.nlm.nih.gov/geo/download/?acc=GSM1872887&format=file&file='
'GSM1872887%5FGM12878%5FRNAPII%5FPET%5Fclusters%2Etxt%2Egz')
#s = cpb.general.read_gzipped_text_url(url)
# GM12878_CTCF
# http://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSM1872886
url = ('http://www.ncbi.nlm.nih.gov/geo/download/?acc=GSM1872886&format=file&file='
'GSM1872886%5FGM12878%5FCTCF%5FPET%5Fclusters%2Etxt%2Egz')
# HeLa_CTCF
# http://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSM1872888
url = ('http://www.ncbi.nlm.nih.gov/geo/download/?acc=GSM1872888&format=file&file='
'GSM1872888%5FHeLa%5FCTCF%5FPET%5Fclusters%2Etxt%2Egz')
# HeLa_RNAPII
# http://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSM1872889
url = ('http://www.ncbi.nlm.nih.gov/geo/download/?acc=GSM1872889&format=file&file='
'GSM1872889%5FHeLa%5FRNAPII%5FPET%5Fclusters%2Etxt%2Egz')
| notebooks/Ji et al. 2015 Processing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Prediction using normal score for wall street columns using the same data clusters.
# Here we will test how the prediction between using mixed receptive fields in time compares with non-time mixed receptive fields where the clusters are the same for each of the times.
# First as usual we load everything that we need.
# +
import numpy as np
from sklearn import svm, cross_validation
import h5py
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
import sys
sys.path.append("../")
# -
# ## Without Spaces
# #### Load the code vectors and the features
# +
# Data to use
Ndata = 10000
# First we load the file
file_location = '../results_database/text_wall_street_columns_indp.hdf5'
# Now we need to get the letters and align them
text_directory = '../data/wall_street_letters_spaces.npy'
letters_sequence = np.load(text_directory)
Nletters = len(letters_sequence)
symbols = set(letters_sequence)
targets = []
for index in range(Ndata):
letter_index = index // 10
targets.append(letters_sequence[letter_index])
# Transform to array
targets = np.array(targets)
# -
# #### Do the loop and calculate the predictions
# +
# Calculate the predictions
Ntime_clusters_set = np.arange(10, 37, 3)
scores_mixed = []
scores_indp = []
# Nexa parameters
Nspatial_clusters = 3
Nembedding = 3
# -
for Ntime_clusters in Ntime_clusters_set:
print(Ntime_clusters)
# Here calculate the scores for the mixes
run_name = '/test'
f = h5py.File(file_location, 'r')
parameters_string = '/' + str(Nspatial_clusters)
parameters_string += '-' + str(Ntime_clusters)
parameters_string += '-' + str(Nembedding)
nexa = f[run_name + parameters_string]
cluster_to_index = nexa['cluster_to_index']
code_vectors_softmax = np.array(nexa['code-vectors-softmax'])
# Now we need to classify
X = code_vectors_softmax[:Ndata]
y = targets
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.10)
clf_linear = svm.SVC(C=1.0, kernel='linear')
clf_linear.fit(X_train, y_train)
score = clf_linear.score(X_test, y_test) * 100.0
scores_mixed.append(score)
# Here calculate the scores for the independent
run_name = '/indep'
f = h5py.File(file_location, 'r')
parameters_string = '/' + str(Nspatial_clusters)
parameters_string += '-' + str(Ntime_clusters)
parameters_string += '-' + str(Nembedding)
nexa = f[run_name + parameters_string]
cluster_to_index = nexa['cluster_to_index']
code_vectors_softmax = np.array(nexa['code-vectors-softmax'])
# Now we need to classify
X = code_vectors_softmax[:Ndata]
y = targets
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.10)
clf_linear = svm.SVC(C=1.0, kernel='linear')
clf_linear.fit(X_train, y_train)
score = clf_linear.score(X_test, y_test) * 100.0
scores_indp.append(score)
# +
fig = plt.figure(figsize=(16, 12))
ax = fig.add_subplot(111)
ax.plot(Ntime_clusters_set, scores_indp, 'o-', label='independent', lw=2, markersize=10)
ax.plot(Ntime_clusters_set, scores_mixed, 'o-', label='mixed', lw=2, markersize=10)
ax.set_ylim(0, 105)
ax.set_ylabel('Accuracy')
ax.set_xlabel('Number of Data Clusters')
ax.set_title('Accuracy vs Number of Data Clusters for different features (Without Sapces)')
ax.legend()
# -
targets[0:20]
| presentations/2016-03-02(Prediction using normal score wall street columns same data clusters).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Quantum Chemistry I: Obtaining the Qubit Hamiltonian for H2 and LiH
#
# Lectures 22 – 27 introduce the techniques used to solve quantum chemistry problems using gate-based quantum computers, with emphasis on superconducting qubits. The first three lectures describe the structure of quantum chemistry problems and discuss methods for converting these problems into ones that can be mapped onto a gate-based quantum computer, while the next three lectures focus on extracting the best performance from the quantum hardware to solve quantum chemistry problems by implementing variational quantum eigensolvers.
#
# - Download the lecture notes [here](https://github.com/qiskit-community/intro-to-quantum-computing-and-quantum-hardware/blob/master/lectures/introqcqh-lecture-notes-8.pdf?raw=true)
# - Download the lab notebook (and solutions) for the last three lectures [here](https://github.com/qiskit-community/intro-to-quantum-computing-and-quantum-hardware/blob/master/labs/introqcqh-lab-8.zip?raw=true)
#
# <div class="youtube-wrapper"><iframe src="https://www.youtube.com/embed/2XEjrwWhr88" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe></div>
#
# <div class="youtube-wrapper"><iframe src="https://www.youtube.com/embed/DWOfMWPKHDU" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe></div>
#
# <div class="youtube-wrapper"><iframe src="https://www.youtube.com/embed/AZQDCWX_aqA" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe></div>
#
#
| notebooks/summer-school/2020/lec-07.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # What is Pytorch?
# A replacement of Numpy to use power of GPU
#
from __future__ import print_function
import torch
x=torch.empty(5,3)
x
x=torch.rand(5,3)
x
torch.zeros(5,3,dtype=torch.long)
torch.tensor([5.5,3])
# +
x= x.new_ones(5,3,dtype=torch.double) #new methods take in sizes
print(x)
s=torch.randn_like(x,dtype=torch.float) #override the datatype
print(s)
# -
print(x.size())
# torch.Size is in fact a tuple, so it supports all tuple operations.
y=torch.rand(5,3)
print(x+y)
x=torch.randn(4,4)
y=x.view(16)
z=x.view(-1,8)
print(x.size() ,y.size(),z.size())
print(x)
x=torch.randn(1) # Only one element tensors can be converted to python scalars
print(x.item())
# # Numpy Bridge
# Comverting a Torch Tensor to Numpy arrays and vice versa.
# Both will share their underlying memeory locations .
a=torch.ones(5)
print(a)
b=a.numpy()
print(b)
a.add_(1)
print(a)
print(b)
# Converting numpy to torch
import numpy as np
a=np.ones(5)
b=torch.from_numpy(a)
np.add(a,1,out=a)
print(a)
print(b)
# # Cuda Tensors
# Tensors can be moved from one device to another
#Let run this cell only to know cuda is available
#We will use torch device objects to move tensors in and out of GPU
if torch.cuda.is_available():
device=torch.device("cuda") # Cuda is device objects
y=torch.ones_like(x,device=device)
x=x.to(device)
z=x+y
print(z)
print(z.to("cpu",torch.double))
# # AutoGrad :Automation Differentiation
# Pytorch is autograd package.
#
# The autograd package provides automatic differentiation for all operations on tensors.
# # Tensor
#
# torch.Tensor is the central class of the package. If you set its attribute .requires_grad as True, it starts to track all operations on it. When you finish your computation you can call .backward() and have all the gradients computed automatically. The gradient for this tensor will be accumulated into .grad attribute.
#
# To stop a tensor from tracking history, you can call .detach() to detach it from the computation history, and to prevent future computation from being tracked.
#
# To prevent tracking history (and using memory), you can also wrap the code block in with torch.no_grad():. This can be particularly helpful when evaluating a model because the model may have trainable parameters with requires_grad=True, but for which we don’t need the gradients.
#
# There’s one more class which is very important for autograd implementation - a Function.
#
# Tensor and Function are interconnected and build up an acyclic graph, that encodes a complete history of computation. Each tensor has a .grad_fn attribute that references a Function that has created the Tensor (except for Tensors created by the user - their grad_fn is None).
#
# If you want to compute the derivatives, you can call .backward() on a Tensor. If Tensor is a scalar (i.e. it holds a one element data), you don’t need to specify any arguments to backward(), however if it has more elements, you need to specify a gradient argument that is a tensor of matching shape.
import torch
# Create a tensor and set require_grad-True to track computation with it
x=torch.ones(2,2,requires_grad=True)
print(x)
y=x+2
print(y)
print(y.grad_fn)
# +
z = y * y * 3
out = z.mean()
print(z, out)
# -
a=torch.randn(2,2)
a=((a*3)/(a-1))
print(a.requires_grad)
a.requires_grad_(True)
print(a.requires_grad)
b=(a*a).sum()
print(b.grad_fn)
# # Gradients
# Lets backprop now . Because out contains a single scalar, out.backward()
out.backward()
print(x.grad)
# +
x=torch.randn(3,requires_grad=True)
y=x*2
while y.data.norm() < 1000:
y=y*2
print(y)
# -
v=torch.tensor([0.1,1.0,0.0001],dtype=torch.float)
y.backward(v)
print(x.grad)
# +
#Stopping to track the history
print(x.requires_grad)
print((x**2).requires_grad)
with torch.no_grad():
print((x**2).requires_grad)
# -
# # Neural Networks
# Using package torch.nn
#
# nn.Module contains layersm and method forward(input) that returns the output
# # A typical training procedure for a neural network is as follows:
#
# -> Define the neural network that has some learnable parameters (or weights)
# -> Iterate over a dataset of inputs
# -> Process input through the network
# -> Compute the loss (how far is the output from being correct)
# -> Propagate gradients back into the network’s parameters
# -> Update the weights of the network, typically using a simple update rule:
#
# weight = weight - learning_rate * gradient
#
# # Define a Simple NN
import torch
import torch.nn as nn
import torch.nn.functional as F
# +
class Net(nn.Module):
def __init__(self):
super(Net,self).__init__() #1 import image channel, 6 output channesl, 5x5 square convolutions
#kernel
self.conv1=nn.Conv2d(1,6,5)
self.conv2=nn.Conv2d(6,16,5)
#an affine operation : y = Wx +b
self.fc1 = nn.Linear(16*5*5,120)
self.fc2 = nn.Linear(120,84)
self.fc3 = nn.Linear(84,10)
def forward(self,x):
# Max pooling over (2,2) window
x = F.max_pool2d(F.relu(self.conv1(x)),(2,2))
#If the size is a square you can only specify a single number
x = F.max_pool2d(F.relu(self.conv2(x)),2)
x = x.view(-1,self.num_flat_features(x))
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def num_flat_features(self,x):
size = x.size()[1:] #All dimensions except the batch dimension
num_features =1
for s in size:
num_features *=s
return num_features
net = Net()
print(net)
# -
# You just have to define the forward function, and the backward function (where gradients are computed) is automatically defined for you using autograd. You can use any of the Tensor operations in the forward function.
#
# The learnable parameters of a model are returned by net.parameters()
para =list(net.parameters())
print(len(para))
print(para[0].size()) # conv1 weight
input = torch.randn(1,1,32,32)
out = net(input)
print(out)
# Recap:
#
# -> torch.Tensor - A multi-dimensional array with support for autograd operations like backward(). Also holds the gradient w.r.t. the tensor.
#
# -> nn.Module - Neural network module. Convenient way of encapsulating parameters, with helpers for moving them to GPU, exporting, loading, etc.
#
# -> nn.Parameter - A kind of Tensor, that is automatically registered as a parameter when assigned as an attribute to a Module.
#
# -> autograd.Function - Implements forward and backward definitions of an autograd operation. Every Tensor operation creates at least a single Function node that connects to functions that created a Tensor and encodes its history.
#
#
# # Loss Function
# A loss function takes the pair of inputs and computes a value that estimates how far away the output is from the target.
# +
output=net(input)
target = torch.randn(10) #A dummy target,
target = target.view(1,-1)
criterion = nn.MSELoss()
loss = criterion(output,target)
print(loss)
# -
# # Follow the loss in backward direction, using .grad_fn
# input -> conv2d -> relu -> maxpool2d -> conv2d -> relu -> maxpool2d
#
# -> view -> linear -> relu -> linear -> relu -> linear
#
# -> MSELoss
#
# -> loss
print(loss.grad_fn) #MSELoss
print(loss.grad_fn.next_functions[0][0]) #Linear
print(loss.grad_fn.next_functions[0][0].next_functions[0][0]) #Relu
# # Backprop
# To backpropagate te error all we have to do is to loss.backward(). You need to clear the existing gradients
# +
net.zero_grad() #zeros the gradient buffers of all parameters
print('conv1.bias.grad before backward')
print(net.conv1.bias.grad)
loss.backward()
print('conv1.bias.grad after backward')
print(net.conv1.bias.grad)
# -
# # Update the weights
# weight = weight - learning rate *gradient
learning_rate=0.01
for f in net.parameters():
f.data.sub_(f.grad.data * learning_rate)
# However, as you use neural networks, you want to use various different update rules such as SGD, Nesterov-SGD, Adam, RMSProp, etc. To enable this, we built a small package: torch.optim that implements all these methods. Using it is very simple:
# +
import torch.optim as optim
#create your optimizer
optimizer = optim.SGD(net.parameters(),lr=0.01)
#in your training loop:
optimizer.zero_grad() #zero the gradient buffers
output = net(input)
loss = criterion(output,target)
loss.backward()
optimizer.step() #Does the update
# -
| Pytorch Work/Pytorch Tutorials.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/kumardesappan/colab-notebooks/blob/main/create_onnx_model.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="JcCl1oOBxEFh" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="181a3a60-59a9-4a24-c0d3-7afed17d7415"
# #!pip install onnx
import torch
import torch.onnx
import onnx
import onnx.utils
class LoopModel(torch.nn.Module):
def __init__(self, num_ch, scale, size):
super().__init__()
self.batch_norm = torch.nn.BatchNorm2d(num_ch)
self.conv = torch.nn.Conv2d(3,num_ch,kernel_size=1, stride=2)
self.scale = scale
self.size = size
def forward(self, x):
x = self.batch_norm(x)
h, w = x.shape[2:]
h1, w1 = h*self.scale, w*self.scale
a = torch.nn.functional.interpolate(x,size=(h1,w1), mode='bilinear', align_corners=True)
b = torch.nn.functional.interpolate(x,size=(h,w), mode='bilinear', align_corners=True)
c = torch.nn.functional.interpolate(x,size=(16,16), mode='bilinear', align_corners=True)
d = torch.nn.functional.interpolate(x,scale_factor=(h1//h,w1//w), mode='bilinear', align_corners=True)
a = self.batch_norm(a)
b = self.batch_norm(b)
c = self.batch_norm(c)
d = self.batch_norm(d)
return a,b,c,d
num_ch = 3
scale = 2
opset_version = 11
model = LoopModel(num_ch, scale, 16.0)
dummy_input = torch.ones(1, 3, 128, 256, dtype=torch.float)
name = 'resize_'+str(scale)+'x_v'+str(opset_version)+'.onnx'
name_shape = 'resize_'+str(scale)+'x_v'+str(opset_version)+'_shape.onnx'
torch.onnx.export(model, dummy_input, name, verbose=False,opset_version=opset_version, do_constant_folding =True)
onnx_model = onnx.load(name) # load onnx model
infer_onnx_model = onnx.shape_inference.infer_shapes(onnx_model)
onnx.save(infer_onnx_model,name_shape)
print('Before shape inference, the shape info of Y is:\n{}'.format(onnx_model.graph.value_info))
print('After shape inference, the shape info of Y is:\n{}'.format(infer_onnx_model.graph.value_info))
| create_onnx_model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] papermill={"duration": 0.020595, "end_time": "2022-03-31T14:04:34.631874", "exception": false, "start_time": "2022-03-31T14:04:34.611279", "status": "completed"} tags=[]
# # Feature Tokenizer Transformer
# Featured in the paper [Revisiting Deep Learning Models for Tabular Data (2021, June)](https://arxiv.org/abs/2106.11959) Feature Tokenizer Transformer is a simple adaptation of the Transformer architecture for the tabular domain. In a nutshell, Feature Tokenizer Transformer transforms all features (categorical and numerical) to embeddings and applies a stack of Transformer layers to the embeddings. Thus, every Transformer layer operates on the feature level of one object.
#
# In this notebook we will be implementing Feature Tokenizer Transformer using TensorFlow 2 from scratch.
# + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" papermill={"duration": 6.907577, "end_time": "2022-03-31T14:04:41.559509", "exception": false, "start_time": "2022-03-31T14:04:34.651932", "status": "completed"} tags=[]
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers as L
from tensorflow_addons.activations import sparsemax
from tensorflow.data import Dataset
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.model_selection import StratifiedKFold
import joblib
pd.options.display.max_columns = 300
# + [markdown] papermill={"duration": 0.018805, "end_time": "2022-03-31T14:04:41.597609", "exception": false, "start_time": "2022-03-31T14:04:41.578804", "status": "completed"} tags=[]
# # Data
# Loading the train and test csv files into `pandas.DataFrame` and splitting the columns as features and target.
#
# We will be using Stratified K folds as our local cross validation.
# + papermill={"duration": 32.15078, "end_time": "2022-03-31T14:05:13.766160", "exception": false, "start_time": "2022-03-31T14:04:41.615380", "status": "completed"} tags=[]
data = pd.read_csv('../input/tabular-playground-series-feb-2022/train.csv')
data = data.drop_duplicates(subset=data.columns[1:]).reset_index(drop=True)
print(data.shape)
data.head()
# + papermill={"duration": 14.715904, "end_time": "2022-03-31T14:05:28.504193", "exception": false, "start_time": "2022-03-31T14:05:13.788289", "status": "completed"} tags=[]
test = pd.read_csv('../input/tabular-playground-series-feb-2022/test.csv')
print(test.shape)
X_test = test.drop(['row_id'], axis=1)
# + papermill={"duration": 0.163677, "end_time": "2022-03-31T14:05:28.690210", "exception": false, "start_time": "2022-03-31T14:05:28.526533", "status": "completed"} tags=[]
X = data.drop(['row_id', 'target'], axis=1)
y = pd.get_dummies(data['target'])
# + papermill={"duration": 0.031217, "end_time": "2022-03-31T14:05:28.744357", "exception": false, "start_time": "2022-03-31T14:05:28.713140", "status": "completed"} tags=[]
skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=0)
# + [markdown] papermill={"duration": 0.022543, "end_time": "2022-03-31T14:05:28.793311", "exception": false, "start_time": "2022-03-31T14:05:28.770768", "status": "completed"} tags=[]
# # Model
# **Creating some data utility classes**:
#
# `DataConfig` helps to segregate the features into numeric features and categorical features and maintain a vocabulary for the categorical ones.
#
# `DataLoader` class creates `tf.data.Dataset` objects from `pandas.DataFrame` to ensure efficiency in the input pipeline to the model.
# + papermill={"duration": 0.035478, "end_time": "2022-03-31T14:05:28.851093", "exception": false, "start_time": "2022-03-31T14:05:28.815615", "status": "completed"} tags=[]
class DataConfig:
def __init__(self, numeric_feature_names, categorical_features_with_vocabulary):
self.NUMERIC_FEATURE_NAMES = numeric_feature_names
self.CATEGORICAL_FEATURES_WITH_VOCABULARY = categorical_features_with_vocabulary
self.CATEGORICAL_FEATURE_NAMES = list(self.CATEGORICAL_FEATURES_WITH_VOCABULARY.keys())
self.FEATURE_NAMES = self.NUMERIC_FEATURE_NAMES + self.CATEGORICAL_FEATURE_NAMES
class DataLoader:
@classmethod
def from_df(cls, X, y=None, batch_size=1024):
return (
Dataset.from_tensor_slices(({col: X[col].values.tolist() for col in X.columns}, y.values.tolist())).batch(
batch_size
)
if y is not None
else Dataset.from_tensor_slices({col: X[col].values.tolist() for col in X.columns}).batch(batch_size)
)
# + [markdown] papermill={"duration": 0.022767, "end_time": "2022-03-31T14:05:28.898555", "exception": false, "start_time": "2022-03-31T14:05:28.875788", "status": "completed"} tags=[]
# **Creating Input Layers and Feature Encoding Layers**
#
# `get_inputs` returns a dictionary of Input Layers based on the data types of the feature columns mentioned in the `DataConfig` object.
#
# `encode_inputs` applies StringLookup and Embedding Layer to the categorical features and Reshapes the Numeric Features in order to encode the inputs.
# + papermill={"duration": 0.039, "end_time": "2022-03-31T14:05:28.960429", "exception": false, "start_time": "2022-03-31T14:05:28.921429", "status": "completed"} tags=[]
def get_inputs(config):
return {
feature_name: L.Input(
name=feature_name,
shape=(),
dtype=(tf.float32 if feature_name in config.NUMERIC_FEATURE_NAMES else tf.string),
)
for feature_name in config.FEATURE_NAMES
}
def encode_inputs(inputs, config, use_embeddings=False, embedding_dim=32, prefix="", concat_features=False):
cat_features = []
num_features = []
for feature_name in inputs:
if feature_name in config.CATEGORICAL_FEATURE_NAMES:
vocabulary = config.CATEGORICAL_FEATURES_WITH_VOCABULARY[feature_name]
lookup = L.StringLookup(
vocabulary=vocabulary,
mask_token=None,
num_oov_indices=0,
output_mode="int" if use_embeddings else "binary",
name=f"{prefix}{feature_name}_lookup",
)
if use_embeddings:
encoded_feature = lookup(inputs[feature_name])
embedding = L.Embedding(
input_dim=len(vocabulary),
output_dim=embedding_dim,
name=f"{prefix}{feature_name}_embeddings",
)
encoded_feature = embedding(encoded_feature)
else:
encoded_feature = lookup(
L.Reshape((1,), name=f"{prefix}{feature_name}_reshape")(inputs[feature_name])
)
cat_features.append(encoded_feature)
else:
encoded_feature = L.Reshape((1,), name=f"{prefix}{feature_name}_reshape")(inputs[feature_name])
num_features.append(encoded_feature)
features = (
L.Concatenate(name=f"{prefix}inputs_concatenate")(cat_features + num_features)
if concat_features
else (cat_features, num_features)
)
return features
# + [markdown] papermill={"duration": 0.022304, "end_time": "2022-03-31T14:05:29.005787", "exception": false, "start_time": "2022-03-31T14:05:28.983483", "status": "completed"} tags=[]
# **Defining Model Configurations**
# * Number of Outputs
# * Activation of the Output Layer
# * Number of Transformer Blocks
# * Number of heads in the Transformer Blocks
# * Embedding Dimension for the features
# * Dimesion of the Dense Projections in the transfomer blocks
# + papermill={"duration": 0.032786, "end_time": "2022-03-31T14:05:29.061345", "exception": false, "start_time": "2022-03-31T14:05:29.028559", "status": "completed"} tags=[]
class FeatureTokenizerTransformerConfig:
def __init__(
self,
num_outputs,
out_activation,
num_transformer_blocks=2,
num_heads=8,
embedding_dim=32,
dense_dim=16,
):
self.NUM_OUT = num_outputs
self.OUT_ACTIVATION = out_activation
self.NUM_TRANSFORMER_BLOCKS = num_transformer_blocks
self.NUM_HEADS = num_heads
self.EMBEDDING_DIM = embedding_dim
self.DENSE_DIM = dense_dim
# + [markdown] papermill={"duration": 0.022108, "end_time": "2022-03-31T14:05:29.108372", "exception": false, "start_time": "2022-03-31T14:05:29.086264", "status": "completed"} tags=[]
# **Defining a standard Transformer Block**
# + papermill={"duration": 0.036378, "end_time": "2022-03-31T14:05:29.167099", "exception": false, "start_time": "2022-03-31T14:05:29.130721", "status": "completed"} tags=[]
class TransformerBlock(L.Layer):
def __init__(self, embed_dim, dense_dim, num_heads, **kwargs):
super().__init__(**kwargs)
self.embed_dim = embed_dim
self.dense_dim = dense_dim
self.num_heads = num_heads
self.attention = L.MultiHeadAttention(num_heads=num_heads, key_dim=embed_dim)
self.dense_proj = tf.keras.Sequential([L.Dense(dense_dim, activation="relu"), L.Dense(embed_dim)])
self.layernorm1 = L.LayerNormalization()
self.layernorm2 = L.LayerNormalization()
def call(self, inputs, mask=None):
if mask is not None:
mask = mask[: tf.newaxis, :]
attention_output = self.attention(inputs, inputs, attention_mask=mask)
proj_input = self.layernorm1(inputs + attention_output)
proj_output = self.dense_proj(proj_input)
return self.layernorm2(proj_input + proj_output)
# + [markdown] papermill={"duration": 0.023095, "end_time": "2022-03-31T14:05:29.212849", "exception": false, "start_time": "2022-03-31T14:05:29.189754", "status": "completed"} tags=[]
# **Defining the Model**
# The model takes Inputs Layers and then encodes the features from the functions defined above, the numerical features are then passed through a Dense layer of the same dimensions as the embeddings of the categorical features.
#
# All the feature embeddings are then stacked and then passed through a series of Transformer Blocks followed by the Global Average Pooling and Final Output Layer
# + papermill={"duration": 0.037916, "end_time": "2022-03-31T14:05:29.273653", "exception": false, "start_time": "2022-03-31T14:05:29.235737", "status": "completed"} tags=[]
class FeatureTokenizerTransformer:
@classmethod
def from_config(cls, data_config, model_config, name):
inputs = get_inputs(data_config)
cat_features, num_features = encode_inputs(
inputs,
data_config,
use_embeddings=True,
embedding_dim=model_config.EMBEDDING_DIM,
prefix="",
concat_features=False,
)
num_feat_emb = [
L.Dense(model_config.EMBEDDING_DIM, name=f"{feature_name}_embeddings")
for _, feature_name in zip(range(len(num_features)), data_config.NUMERIC_FEATURE_NAMES)
]
num_features = [emb(feat) for emb, feat in zip(num_feat_emb, num_features)]
features = L.Concatenate(axis=1, name="feature_embeddings_stack")(
[
L.Reshape((1, 32), name=f"{feat_name}_reshape_2")(feat)
for feat, feat_name in zip((num_features + cat_features), data_config.FEATURE_NAMES)
]
)
for _ in range(model_config.NUM_TRANSFORMER_BLOCKS):
features = TransformerBlock(
embed_dim=model_config.EMBEDDING_DIM,
dense_dim=model_config.DENSE_DIM,
num_heads=model_config.NUM_HEADS,
)(features)
features = L.GlobalMaxPooling1D()(features)
outputs = L.Dense(
units=model_config.NUM_OUT,
activation=model_config.OUT_ACTIVATION,
name="outputs",
)(features)
model = keras.Model(inputs=inputs, outputs=outputs, name=name)
return model
# + [markdown] papermill={"duration": 0.022378, "end_time": "2022-03-31T14:05:29.318065", "exception": false, "start_time": "2022-03-31T14:05:29.295687", "status": "completed"} tags=[]
# **Creating instances of the various classes defined so far**
# + papermill={"duration": 0.030625, "end_time": "2022-03-31T14:05:29.371645", "exception": false, "start_time": "2022-03-31T14:05:29.341020", "status": "completed"} tags=[]
data_config = DataConfig(
numeric_feature_names=X.columns.tolist(), categorical_features_with_vocabulary={}
)
model_config = FeatureTokenizerTransformerConfig(num_outputs=len(y.columns), out_activation='softmax')
# + papermill={"duration": 10.291903, "end_time": "2022-03-31T14:05:39.686006", "exception": false, "start_time": "2022-03-31T14:05:29.394103", "status": "completed"} tags=[]
blank_model = FeatureTokenizerTransformer.from_config(data_config, model_config, name='ftt')
blank_model.summary()
# + papermill={"duration": 0.042795, "end_time": "2022-03-31T14:05:39.763510", "exception": false, "start_time": "2022-03-31T14:05:39.720715", "status": "completed"} tags=[]
MAX_EPOCHS = 50
get_callbacks = lambda : [
keras.callbacks.EarlyStopping(min_delta=1e-4, patience=3, verbose=1, restore_best_weights=True),
keras.callbacks.ReduceLROnPlateau(patience=2, verbose=1)
]
# + [markdown] papermill={"duration": 0.029339, "end_time": "2022-03-31T14:05:39.822871", "exception": false, "start_time": "2022-03-31T14:05:39.793532", "status": "completed"} tags=[]
# # Training Loop
# + papermill={"duration": 31061.028116, "end_time": "2022-03-31T22:43:20.879702", "exception": false, "start_time": "2022-03-31T14:05:39.851586", "status": "completed"} tags=[]
preds = []
for fold, (train_index, valid_index) in enumerate(skf.split(X, data['target'])):
X_train, X_valid = X.iloc[train_index], X.iloc[valid_index]
y_train, y_valid = y.iloc[train_index], y.iloc[valid_index]
scaler = StandardScaler().fit(X_train)
X_train = pd.DataFrame(scaler.transform(X_train), columns=X.columns)
X_valid = pd.DataFrame(scaler.transform(X_valid), columns=X.columns)
x_test = pd.DataFrame(scaler.transform(X_test), columns=X.columns)
data_train = DataLoader.from_df(X_train, y_train, batch_size=512)
data_valid = DataLoader.from_df(X_valid, y_valid, batch_size=512)
data_test = DataLoader.from_df(x_test, batch_size=512)
model = FeatureTokenizerTransformer.from_config(data_config, model_config, name=f'ftt_fold_{fold}')
model.compile(
loss='categorical_crossentropy', optimizer="adam", metrics=['accuracy']
)
model.fit(
data_train, validation_data=data_valid, callbacks=get_callbacks(),
epochs=MAX_EPOCHS
)
preds.append(model.predict(data_test))
# + [markdown] papermill={"duration": 15.733372, "end_time": "2022-03-31T22:43:52.092011", "exception": false, "start_time": "2022-03-31T22:43:36.358639", "status": "completed"} tags=[]
# # Submission
# + papermill={"duration": 16.863791, "end_time": "2022-03-31T22:44:24.411495", "exception": false, "start_time": "2022-03-31T22:44:07.547704", "status": "completed"} tags=[]
submissions = pd.read_csv('../input/tabular-playground-series-feb-2022/sample_submission.csv')
submissions['target'] = pd.DataFrame(
np.array([arr for arr in preds]).mean(axis=0),columns=y.columns
).idxmax(axis=1).values.tolist()
submissions.to_csv('preds.csv', index=False)
| Feature-Tokenizer-Transformer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.2.0
# language: julia
# name: julia-1.1
# ---
using GenericLinearAlgebra
using LinearAlgebra
using SpecialFunctions
using Dates
using MAT
using Roots
using FFTW
using PyPlot
using PolynomialRoots
using Printf
function Remez(targetf, parity, degree, xapp, lef = -1, rig = 1, maxiter = 10, sample_size = 20, eps = 1e-12)
#--------------------------------------------------------------------------------------------------------------
# Input:
# targetf: target funciton
# parity: the parity of approximation function, 0 -- even, 1 -- odd, 2 -- no constraint
# degree: the degree of approximation polynomial
# xapp: initial points to approximate, if provide a single zero, will choose roots of Chebyshev polynomials
# lef, rig: left and right endpoints of approximation interval
# maxiter: Max iteration
# sample_size: sample size used in Remez algorithm
# eps: accuracy of approximation
#
# Output:
# Expansion of approximation polynomial under Chebyshev basis
#
# We note that our implementation does not strictly follow the reference. The algorithm may crush when
# the degree is very large or the problem is ill-conditioned.
#
#------------------------------------------------------------------------------------------------------------
#
# Reference:
# <NAME>.
# Introduction to approximation theory
#
# Author: <NAME>
# Version 1.0 .... 02/2020
#
#------------------------------------------------------------------------------------------------------------
if(xapp==0)
xapp = big.(cos.(collect(range(pi/2,stop=0,length=degree+1))))
end
iter = 0
eps2 = 1e-18
comp = ones(degree+1,1)
comp[2:2:end] .= -1
while(true)
iter += 1
# Step1: Find the best approximation on n+1 given points by solving a linear equation
A = zeros(BigFloat,degree+1,degree+1)
b = zeros(BigFloat,degree+1,1)
for i=1:degree
if(parity==0)
deg = 2*(i-1)
elseif(parity==1)
deg = 2*i-1
else
deg = i-1
end
for j=1:degree+1
A[j,i] = chebyshev(xapp[j],deg)
end
end
A[:,end] = comp
for i=1:degree+1
b[i] = targetf(xapp[i])
end
sol = A\b
# Step2: Find roots of residual function
eps3 = findmin([eps2,1e-4*abs(sol[end])])[1]
xroot = zeros(BigFloat,degree,1)
for i=1:degree
rootl = xapp[i]
rootr = xapp[i+1]
pm = (-1)^(i)*sign(sol[end])
froot(x) = chebyshevfunc(x,sol[1:end-1],parity,-targetf(x))
rootiter = 0
xroot[i] = brent(rootl,rootr,froot,eps3,eps3)
if(xroot[i]==Inf)
return
end
end
# Step3: In each pair of adjacent roots, find a point x such that the absolute value of
# residual function is maximized. In addition, values of residual function alternate
# in sign.
xappnew = copy(xapp)
maxtot = abs(sol[end])
r_max = -Inf
y_max = -Inf
for i=1:degree+1
maxapp = abs(sol[end])
pm = (-1)^(i)*sign(sol[end])
if(i==1)
lend = lef
else
lend = xroot[i-1]
end
if(i==degree+1)
rend = rig
else
rend = xroot[i]
end
exh = collect(range(lend,stop=rend,length=sample_size))
for j=1:length(exh)
fval = chebyshevfunc(exh[j],sol[1:end-1],parity,-targetf(exh[j]))
if(abs(fval)>r_max)
r_max = abs(fval)
end
if(fval*pm>y_max)
y_max = fval*pm
end
if(fval*pm>maxapp)
maxapp = fval*pm
maxtot = maximum([maxtot,maxapp])
xappnew[i] = exh[j]
end
end
end
xapp = copy(xappnew)
if(abs(r_max-y_max)>1e-12)
@printf("Warning: the interpolation points maybe incorrect\n")
end
@printf("The %3d-th itertion: previous error is %5.4e, L_inf approximation error is %5.4e\n",iter,sol[end],maxtot)
if(maxtot<eps||iter>=maxiter||abs((sol[end]-maxtot)/sol[end])<1e-4)
return sol[1:end-1]
end
end
end
# +
function chebyshev(x,n) # T_n(x)
if(abs(x)<=1)
return cos(n*acos(x))
elseif(x>1)
return cosh(n*acosh(x))
else
return (-1)^n*cosh(n*acosh(-x))
end
end
function chebyshevfunc(x,sol,parity,init)
# Compute the value of a summation of Chebyshev polynomials at x
y = init
for i=1:length(sol)
if(parity==0)
deg = 2*(i-1)
elseif(parity==1)
deg = 2*i-1
else
deg = i-1
end
y += sol[i]*chebyshev(x,deg)
end
return y
end
# -
function brent(a,b,f,tol1,tol2)
# brent method for finding roots on a given interval [a,b]
# f(a)f(b) should be less than 0, tol1, tol2 are stopping criteria
fa = f(a)
fb = f(b)
if(f(a)*f(b)>0)
println("Error, f(a)f(b)>0")
return Inf
end
if(abs(f(a))<abs(f(b)))
tmp = a
tmp2 = fa
a = b
fa = fb
b = tmp
fb = tmp2
end
c = a
fc = fa
s = b
d = 1e-10
iter = 0
mflag = true
while(true)
iter += 1
if(iter>1000)
println("Brent method: reaches max iteration.")
return Inf
end
if(abs(b-a)<tol2||abs(f(s))<tol1)
return s
end
if(fa!=fc&&fb!=fc)
s = a*fb*fc/((fa-fb)*(fa-fc))+b*fa*fc/((fb-fa)*(fb-fc))+c*fa*fb/((fc-fa)*(fc-fb))
else
s = b-fb*(b-a)/(fb-fa)
end
if((s>=b)||(s<=(3*a+b)/4)||((abs(s-b)*2>abs(b-c))&&mflag)||((abs(s-b)*2>abs(d-c))&&(!mflag))||(tol1>abs(b-c)&&mflag)||((tol1>abs(d-c))&&(!mflag)))
s = (a+b)/2
mflag = true
else
mflag = false
end
fs = f(s)
d = c
c = b
fc = fb
if(fa*fs<0)
b = s
fb = fs
else
a = s
fa = fs
end
if(abs(f(a))<abs(f(b)))
tmp = a
tmp2 = fa
a = b
fa = fb
b = tmp
fb = tmp2
end
end
end
# +
# Test case: Matrix inversion
#
# We would like to approximate 1/x over [1/kappa,1] by a polynomial (even or odd)
#
# You may change the target function to approximate desired function, and you can
# save coefficients of approximation polynomail in .mat file and solve for corresponding
# phase factors via optimization method.
#
# parameters
# kappa: parameters of polynomial approximation
# degree: degree of freedom of approximation polynomial (not the degree)
# stop_eps: desired accuracy
# parity: parity of the approximation polynomial
# R_high: number of bits sued in high-precision arithmetic
# save_mat: whether to save the coefficient as a file (.mat)
# where_save: path to save the data file
# save_name: name of the data file
kappa = 10
degree = 60
stop_eps = 1e-6
parity = 0
R_high = 512
save_mat = true
where_save = ""
save_name = ""
#------------------------------------------------------------------
function inversex(x) #1/x divided by a constant factor
return big.(1)/(big.(4)*kappa*x)
end
setprecision(BigFloat,R_high)
xapp = big.(cos.(collect(range(pi/2,stop=0,length=degree+1)))*(kappa-1)/kappa.+(1/kappa))
solu = Remez(inversex, parity, degree, xapp, big.(1)/kappa, 1.0, 20, 20, stop_eps)
solu = Float64.(solu)
if(save_mat)
matpath = where_save*"Data\\"
if(save_name!="")
mattest = matopen(matpath * save_name * ".mat","w")
else
mattest = matopen(matpath * "coef_x_" * string(kappa) * "_" * string(ceil(Int,-log10(stop_eps))) * ".mat","w")
end
write(mattest,"coef",Float64.(solu))
write(mattest,"parity",parity)
close(mattest)
end
# -
| Solvers/Remez.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Execute this code block to install dependencies when running on colab
try:
import torch
except:
from os.path import exists
from wheel.pep425tags import get_abbr_impl, get_impl_ver, get_abi_tag
platform = '{}{}-{}'.format(get_abbr_impl(), get_impl_ver(), get_abi_tag())
# cuda_output = !ldconfig -p|grep cudart.so|sed -e 's/.*\.\([0-9]*\)\.\([0-9]*\)$/cu\1\2/'
accelerator = cuda_output[0] if exists('/dev/nvidia0') else 'cpu'
# !pip install -q http://download.pytorch.org/whl/{accelerator}/torch-1.0.0-{platform}-linux_x86_64.whl torchvision
# +
# %matplotlib inline
import torchvision
import torchvision.transforms as transforms
batch_size = 256
image_dim = 784 #flattened
# dataset construction
transform = transforms.Compose([
transforms.ToTensor(), # convert to tensor
transforms.Lambda(lambda x: x.view(image_dim)) # flatten into vector
])
train_set = torchvision.datasets.FashionMNIST(
root='./data/FashionMNIST'
,train=True
,download=True
,transform=transform
)
train_loader = torch.utils.data.DataLoader(
train_set, batch_size=batch_size
)
# +
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tqdm.autonotebook import tqdm
from itertools import chain
import numpy as np
class Encoder(nn.Module):
'''
simple encoder with a single hidden dense layer (ReLU activation)
and linear projections to the diag-Gauss parameters
'''
# YOUR CODE HERE
def __init__(self, image_dim, hidden_dim, output_dim):
super().__init__()
self.fc_hidden = nn.Linear(image_dim, hidden_dim)
self.fc_mean = nn.Linear(hidden_dim, output_dim)
self.fc_sigma = nn.Linear(hidden_dim, output_dim)
def forward(self, x):
out = self.fc_hidden(x).relu()
mean = self.fc_mean(out)
log_sigma = self.fc_sigma(out)
return mean, log_sigma
class Decoder(nn.Module):
'''
simple decoder: single dense hidden layer (ReLU activation) followed by
output layer with a sigmoid to squish values
'''
# YOUR CODE HERE
def __init__(self, image_dim, hidden_dim, output_dim):
super().__init__()
self.fc_hidden = nn.Linear(image_dim, hidden_dim)
self.fc_out = nn.Linear(hidden_dim, output_dim)
def forward(self, x):
out = self.fc_hidden(x).relu()
out = self.fc_out(out)
return torch.sigmoid(out)
# +
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import os
# Sampling function (using the reparameterisation trick)
def sample(mu, log_sigma2):
eps = torch.randn(mu.shape[0], mu.shape[1])
return mu + torch.exp(log_sigma2 / 2) * eps
#parameters
batch_size = 256
embedding_dim = 2
enc_hidden_units = 512
dec_hidden_units = 512
nEpoch = 10
# construct the encoder, decoder and optimiser
enc = Encoder(image_dim, enc_hidden_units, embedding_dim)
dec = Decoder(embedding_dim, dec_hidden_units, image_dim)
optimizer = optim.Adam(chain(enc.parameters(), dec.parameters()), lr=1e-3)
# training loop
for epoch in range(nEpoch):
losses = []
trainloader = tqdm(train_loader)
for i, data in enumerate(trainloader, 0):
inputs, _ = data
optimizer.zero_grad()
mu, log_sigma2 = enc(inputs)
z = sample(mu, log_sigma2)
outputs = dec(z)
# E[log P(X|z)] - as images are binary it makes most sense to use binary cross entropy
# we need to be a little careful - by default torch averages over every observation
# (e.g. each pixel in each image of each batch), whereas we want the average over entire
# images instead
recon = F.binary_cross_entropy(outputs, inputs, reduction='sum') / inputs.shape[0]
kl = 0
# kl = D_KL(Q(z|X) || P(z|X)) - calculate in closed form
# Compute the term kl which is then added to the total loss
# YOUR CODE HERE
kl = - 0.5 * torch.sum(1 - torch.exp(log_sigma2) - mu**2 + log_sigma2)
kl /= batch_size * image_dim
loss = recon + kl
loss.backward()
optimizer.step()
# keep track of the loss and update the stats
losses.append(loss.item())
trainloader.set_postfix(loss=np.mean(losses), epoch=epoch)
## Please display some of the generated images in the submitted Notebook
# YOUR CODE HERE
with torch.no_grad():
inputs, _ = next(iter(trainloader))
mu, log_sigma2 = enc(inputs)
z = sample(mu, log_sigma2)
outputs = dec(z)
inputs = inputs.view(inputs.shape[0], 28, 28)
outputs = outputs.view(outputs.shape[0], 28, 28)
randidx = torch.randint(outputs.shape[0], (4,))
for i in range(8):
plt.subplot(int(str(24)+str(i+1)))
if i < 4:
plt.title("Input")
plt.imshow(inputs[randidx[i]], cmap=plt.get_cmap('gray'))
else:
plt.title("Output")
plt.imshow(outputs[randidx[i-4]], cmap=plt.get_cmap('gray'))
plt.show()
# -
| 08_autoencoders/8_3_VAE.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Evaluating classifiers
#
# ### Concepts
# - Accuracy
# - Confusion Matrix
# - Precision
# - Recall (also called Sensitivity)
# - F1-Score
# - ROC-Curve
# - AUC (Area under Curve)
# ---
# * **Learning objective**:
# * Understand the main tools available to judge your model
# * **sklearn.metrics** is your friend!
# * **Content to be covered**:
# * True & False / Positives & Negatives:
# * Accuracy
# * Precision vs Recall
# * ROC and AUC
# * Confusion Matrix
# * **First the theory, then the applied coding**
# ---
# ## True & False / Positives & Negatives:
# | Scenario | Predicted: NO | Predicted: YES |
# | --- | ----- | --- |
# | **Actual: NO** | True Negative | False Positive |
# | **Actual: YES** | False Negative | True Positive |
# - True means model and data agree
#
# - False means model and data disagree
#
# - Positive means event is triggered - 1 value
#
# - Negative means an event is not triggered - 0 value
#
# **True Positive** - We predicted the major would survive and he did so we have a True positive (1) outcome.
#
# **True Negative** - We predicted the person would die and she did so we got a True negative (0) outcome.
#
# **False Positive** - We predicted someone would survive and little chimney sweep Bertie died so we have a False Positive prediction that he would survive.
#
# **False Negative** - incorrectly predict a negative outcome - We predicted big fat Count von Marx would die but he survived therefore our negative (0) prediction of death was FALSE - False Negative!
# ## Examples
#
# #### Scenario 1: Gold Diggers!
# * We build a scanner which scans earth samples from different regions for gold. If we find it we excavate that area.
# * Important to consider: Excavating earth is expensive! So if we don't find gold we lose lots of money!
# * Input: 100 diamonds. the model says: 70 real, 30 fake. Actually, 90 real, 10 fake
# * How good is our model?
# * Want to minimise missed opportunities
# #### Scenario 2: Test for Breast Cancer
# * We build a tool which tests mammograms for the presence of breast cancer
# * Input: 100 mammograms. The model says: 70 clear, 30 cancerous. Actually, 90 clear, 10 cancerous
# * How good is our model?
# * We want to minimise risk!
# * **Lets work through a practical application of scenario 2**
# Here, we've included a lot of False Positives!
from sklearn.datasets import load_breast_cancer
from sklearn.linear_model import LogisticRegression
import pandas as pd
from matplotlib import pyplot as plt
# %matplotlib inline
# #### X data is a matrix (more than 1 dimension), y is a vector(only 1 dimension)!
X, y = load_breast_cancer(return_X_y = True)
print(X.shape, y.shape)
y[:50] # Series of 1s and 0s telling you who does and doesn't
# have cancer
X[0] # these are the a coefficient values
from sklearn.linear_model import LogisticRegression
m = LogisticRegression() #creating a model
m.fit(X, y) # training the model
m.score(X, y) # calculating the score == accuracy score
# ---
# ## Accuracy:
# * The percentage of correct guesses
# * A nice simple way of assessing the model
# * Fails with class imbalance
# A common metric in classification, it fails when we have highly imbalanced classes. In those cases, F1 is more appropriate!
# How to do an accuracy score without m.score:
from sklearn.metrics import accuracy_score #import
ypred = m.predict(X) #generating predictive results
accuracy_score(y, ypred) # compare the results with our actual results
# ---
# ### Precision vs Recall
# * A trade off between exactness and totalness
# * Precision = **exactness**
# * High Precision = more relevant than irrelevant results returned (at the expense of missing some relevant ones)
# * Also called False Positive Rate
# * High when false positives are low
#
# **Precision is the ability of a classifier to not label a true negative as positive!**
#
# \begin{align}
# \frac{True Positive}{True Positive + False Positive}
# \end{align}
#
# * Recall = **totalness**.
# * High Recall = most of the relevant results returned (at the expense of including bad results)
# * Also called True Positive Rate
# * High when false negatives are low
# * Remember the two scenarios! Which one suits which measure?
#
# Recall is all about real positives!
#
#
# **Recall is the ability of the classifier to find positive examples. If we wanted to be certain to find all positive examples, we could maximise recall!**
#
# \begin{align}
# \ True Postive Rate (TPR) = \frac{True Positives}{True Positives + False Negatives}
# \end{align}
#
#
# i.e. Precision - exact - If it's exact, it means it doesn't make many mistakes eg in gold example it's good to have high precision even if you miss some.
#
# Recall is making sure False negatives are low (ie predicts it's false but actually True) - better to incorrectly tell people they have breast cancer and then correct it then tell someone they don't have it and they do!
from sklearn.metrics import precision_score, recall_score
precision_score(y, ypred)
# So here we have few false positives (i.e. when your model tells someone they have breast cancer but they don't) so precision is high: 4% chance we tell someone they have it but they don't.
recall_score(y, ypred)
# This is very high! Here we have few false negatives - ie your model tells someone they DON'T have breast cancer but your model is wrong and they do - 2.5% chance we tell someone they don't have breast cancer but they do! This is what we want to avoid!
# ---
# ## Confusion matrix
# 
# * Lets plot the confusion matrix of scenario two, and then prettify it with seaborn
# * Refer above to understand the structure
from sklearn.metrics import confusion_matrix
confusion = confusion_matrix(y, ypred)
confusion
# **Remember:**
#
# | Scenario | Predicted: NO | Predicted: YES |
# | --- | ----- | --- |
# | **Actual: NO** |True Negative | False Positive |
# | **Actual: YES** | False Negative | True Positive |
import numpy as np
import seaborn as sns
from matplotlib import pyplot as plt
# %matplotlib inline
# +
plt.figure(figsize = (5,5))
sns.heatmap(confusion, #confusion matrix from above
xticklabels = np.unique(y), # just labels x-axis
yticklabels = np.unique(y), # just labels y-axis (unique - removes duplicates)
cmap = 'Oranges',
annot = True, #includes the annotations of the numbers on coloured squares
fmt = 'g') # gives us integers on those coloured squares instead of
# scientific notation!
plt.xlabel("Predicted")
plt.ylabel("Actual")
# -
# - Top left corner says NO breast cancer in actuality, and no breast cancer in the model therefore a TRUE NEGATIVE.
#
# - Top right corner says in actuality NO breast cancer, but our model predicted they do therefore a FALSE POSITIVE.
#
# - Bottom left says they DO have breast cancer in actuality, but our model predicted they DON'T so we have a FALSE NEGATIVE.
#
# - Bottom right says in actuality they DO have breast cancer, and our model agrees therefore we have a TRUE POSITIVE.
# ---
# ## ROC & AUC
#
# ### ROC - Receiver Operating Characteristic
#
# * A bit like the confusion matrix, but it still works when there is class imbalance
# * Receiver Operating Characteristics Curve
# * ROC shows the false positive rate (x-axis) versus the true positive rate (y-axis) for a number of different candidate threshold values between 0.0 and 1.0.
# * Put another way, plot recall on the x-axis, and precision on the y-axis
#
# 
#
# ### AUC - Area Under Curve
#
# * The AUC function takes both the true outcomes (0, 1) from the test set and the predicted probabilities for the 1 class. It returns the AUC score between 0.0 and 1.0
# * We basically want as much under the curve as possible! So the higher the AUC the better!
#
# 
X.shape, y.shape
# +
df = pd.DataFrame(X)
df.plot.scatter(0,1, c=y, cmap='jet')
plt.xlabel('x1')
plt.ylabel('x2')
# -
df.plot.scatter(0, 1, c=ypred, cmap='jet')
from sklearn.metrics import roc_curve, auc, roc_auc_score
roc_auc_score(y, ypred)
roc_curve(y, ypred)
# **fpr** : Increasing false positive rates such that element i is the false positive rate of predictions with score >= thresholds[i].
#
# **tpr** : Increasing true positive rates such that element i is the true positive rate of predictions with score >= thresholds[i].
#
# **thresholds** : array, shape = [n_thresholds]. Decreasing thresholds on the decision function used to compute fpr and tpr. thresholds[0] represents no instances being predicted and is arbitrarily set to max(y_score) + 1.
fpr, tpr, thresholds = roc_curve(y, ypred)
fpr
plt.plot(fpr, tpr)
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve')
plt.show()
print(f'AUC: {round(auc(fpr, tpr)*100, 2)}%')
# ### Exercises
#
# 1. When is the accuracy not a good metric?
#
# 2. Evaluate your model
#
# **easy**: calculate an accuracy. **medium**: calculate precision and recall. **hard**: draw an ROC curve or precision-recall plot
#
# 3. Decision Boundary
#
# Plot the decision boundary for two scalar features from the Titanic dataset. Use this code example:
# +
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
# Loading some example data to play with
iris = datasets.load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
# -
X.shape, y.shape
# Train a classification model
m = DecisionTreeClassifier(max_depth=4)
m.fit(X, y)
# What about other models? Try them here as well.
m.score(X, y)
# +
#step size in the mesh
h = 0.01
#Plotting decision regions
#For that, we will assign a color to each point in the mesh [x_min, x_max] * [y_min, y_max]
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
f, ax = plt.subplots(1, 1, sharex='col', sharey='row', figsize=(10, 8))
Xgrid = np.c_[xx.ravel(), yy.ravel()]
Z = m.predict(Xgrid)
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, alpha=0.4)
#plot the training points
ax.scatter(X[:, 0], X[:, 1], c=y, s=50, edgecolor='k')
ax.set_title(f'Decision Tree (depth=4), Training Score: {round(m.score(X, y), 2)}')
plt.xlabel('Sepal Length (cm)')
plt.ylabel('Petal Length (cm)')
plt.show()
# -
# ---
| 03_Evaluating Classifiers.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Description:
#
# * Demultiplexing libraries from MiSeq run
# ## Setting variables (user input)
# +
import os
seqdir = '/home/backup_files/raw_reads/hempmicrobiome.Sam.Ali.SmartLab.2018/'
workdir = '/home/sam/notebooks/hemp_microbiome/data/OTU_pipeline_output/'
# metadata file location(s)
metadata = 'metadata_16S.txt'
index_read1_file = 'index1.16S.fq.gz'
index_read2_file = 'index2.16S.fq.gz'
read12_screed = 'pear_merged-2018-03-30.assembled.fastq_screed'
index_read1_screed = 'index1.16S.fq_screed'
index_read2_screed = 'index1.16S.fq_screed'
# -
# ## Init
import os
import json
import screed
import pandas as pd
from collections import Counter
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import re
import scipy.stats as stats
import screed
from glob import glob
# %cd $workdir
# %load_ext rpy2.ipython
# #%load_ext pushnote
# + language="R"
# library(ggplot2)
# library(dplyr)
# library(tidyr)
# -
# ## Loading metadata tables
# loading metadata sheet
df_meta = pd.read_csv(workdir+metadata, delimiter="\t")
print(df_meta.head(n=3))
# # Uncompress index files and create screed db
# +
def uncomp(fileName, directory):
output = fileName.replace(".gz", "")
fileName = directory+fileName
# !cd $workdir; \
# pigz -k -d -p 24 -c -f $fileName > $output
return os.path.splitext(output)[0]
index_read1_file = uncomp(index_read1_file, seqdir)
index_read2_file = uncomp(index_read2_file, seqdir)
# +
os.chdir(workdir)
screed.read_fastq_sequences(index_read1_file+".fq")
screed.read_fastq_sequences(index_read2_file+".fq")
index_read1_screed = index_read1_file + '.fq_screed'
index_read2_screed = index_read2_file + '.fq_screed'
# -
# # Demultiplex
# +
# rev-comp functions
def complement(seq):
idx = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'N':'N'}
idx.update( {x.lower():y.lower() for x,y in idx.items()} )
return ''.join(idx[x] for x in list(seq))
def revComp(seq):
seq = seq[::-1]
return complement(seq)
# +
# loading/editing metadata
df_meta.columns = ['Sample'] + list((df_meta.columns)[1:])
df_meta['Barcode_FR'] = [(revComp(y) + x).lower() for x,y \
in zip(df_meta.Index1,df_meta.Index2)]
## format of barcode=>sample dict: [revBarcode=read1 + fwdBarcode=read2] : sampleID
map_d = {x:y for x,y in zip(df_meta.Barcode_FR, df_meta.Sample)}
# -
# resulting dict of sample => barcode_FR
n_print = 96
[(list(map_d.keys())[i], map_d[list(map_d.keys())[i]]) for i in range(n_print)]
# loading screed databases
ir1db = screed.ScreedDB(index_read1_screed)
ir2db = screed.ScreedDB(index_read2_screed)
amp_db = screed.ScreedDB(read12_screed)
# +
# demultiplexing: naming reads by sample
outFile = re.sub('fastq_screed', 'dmult.fastq', read12_screed)
counter = 0
unassigned = 0
cnt = Counter()
outFile = os.path.join(workdir, os.path.split(outFile)[1])
c = 0
with open(outFile, "w") as out:
for rec in amp_db.itervalues():
index_concat = ir1db[rec["name"]]["sequence"] + ir2db[rec["name"]]["sequence"]
try:
new_name = map_d[index_concat.lower()] + "_" + str(counter) #case sensitive
except KeyError:
unassigned += 1
continue
counter += 1
cnt[map_d[index_concat.lower()]] += 1 #case sensitive
s, q = rec["sequence"], rec["quality"]
out.write("@%s orig_name=%s\n%s\n+\n%s\n"%(new_name,rec["name"],s,q))
print ("Unable to assign %s reads to samples"%unassigned)
# -
# number of sequences
# n_lines = !wc -l $outFile
n_lines = int(re.sub(' .+', '', n_lines[0]))
print('Number of sequences: {}'.format(n_lines/4))
# # Stats on sequences
# ## Number of sequences per sample
# +
# counting sequences for each sample
re_seqName = re.compile(r'_\d+ orig_name.+')
seq_cnt = dict()
with open(outFile, 'r') as fn:
for line in fn.readlines():
if re.search(re_seqName, line):
sampleName = re.sub(re_seqName, '', line).rstrip().lstrip('@')
try:
seq_cnt[sampleName] += 1
except KeyError:
seq_cnt[sampleName] = 1
# -
# converting to dataframe
df_seq_cnt = pd.DataFrame.from_dict(seq_cnt, orient='index')
df_seq_cnt.index.name = 'Sample'
df_seq_cnt.reset_index(inplace=True)
df_seq_cnt.columns = ['Sample', 'seq_count']
df_seq_cnt
# + magic_args="-i df_seq_cnt -w 900 -h 350" language="R"
#
# df_seq_cnt$Sample = reorder(df_seq_cnt$Sample, desc(df_seq_cnt$seq_count))
#
# ggplot(df_seq_cnt, aes(Sample, seq_count)) +
# geom_bar(stat='identity') +
# theme_bw() +
# theme(
# text = element_text(size=16),
# axis.text.x = element_text(angle=45, hjust=1)
# )
# + language="R"
# df_seq_cnt[df_seq_cnt$seq_count < 10000,]
# -
| 16S_sequence_processing/2_demultiplex.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/EnsiyehRaoufi/ML_Models_Linear_treebased_XGBoost__AutoGluon/blob/main/Random_Forest.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="5ummOQT5c97E" outputId="09d1e2c8-bad4-4c0c-8689-bde7279caf3f"
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
df = pd.DataFrame(
[[1, 2, 0], [3, 4, 1], [5, 6, 0], [7, 8, 1]],
columns=["num", "amount", "target"]
)
clf = RandomForestClassifier().fit(df[["num", "amount"]], df["target"])
print("Score : ", clf.score(df[["num", "amount"]], df["target"]))
print("predict", clf.predict(pd.DataFrame(
[[1, 2]],
columns=["num", "amount"])))
# + colab={"base_uri": "https://localhost:8080/"} id="c-WXYeIag6Cq" outputId="77ced052-3a8f-4387-9983-a18dd25fa205"
clf.predict([[1, 2]])
| Random_Forest.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Accessing satellite data from AWS
# This example notebook shows how to obtain Sentinel-2 imagery and additional data from [AWS S3 storage buckets](https://aws.amazon.com/s3/). The data at AWS is the same as original S-2 data provided by ESA.
#
# The ```sentinelhub``` package supports obtaining data by specifying products or by specifying tiles. It can download data either to the same file structure as it is at AWS or it can download data into original ```.SAFE``` file structure [introduced by ESA](https://sentinel.esa.int/web/sentinel/user-guides/sentinel-2-msi/data-formats).
#
# Before testing any of the examples below please check [Configuration paragraph](https://sentinelhub-py.readthedocs.io/en/latest/configure.html#amazon-s3-capabilities) for details about configuring AWS credentials and information about charges.
# %reload_ext autoreload
# %autoreload 2
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
# Note: `matplotlib` is not a dependency of `sentinelhub` and is used in these examples for visualizations.
# ## Searching for available data
# For this functionality Sentinel Hub instance ID has to be configured according to [Configuration paragraph](https://sentinelhub-py.readthedocs.io/en/latest/configure.html#sentinel-hub-capabilities).
# +
from sentinelhub import WebFeatureService, BBox, CRS, DataCollection, SHConfig
config = SHConfig()
if config.instance_id == '':
print("Warning! To use WFS functionality, please configure the `instance_id`.")
# -
#
# The archive of Sentinel-2 data at AWS consists of two buckets, one containing L1C and the other containing L2A data. There are multiple ways to search the archive for specific tiles and products:
#
# - Manual search using [aws_cli](https://docs.aws.amazon.com/cli/latest/reference/s3/ls.html), e.g.:
#
# ```bash
# aws s3 ls s3://sentinel-s2-l2a/tiles/33/U/WR/ --request-payer
# ```
#
# - Manual search using service available at https://roda.sentinel-hub.com/<bucket name>/, which does not require authentication, e.g.:
#
# https://roda.sentinel-hub.com/sentinel-s2-l1c/tiles/1/C/CV/2017/1/14/0/
#
# - Automatic search by a tile id or by location and time interval using Sentinel Hub Catalog API. More examples are available in [this notebook](./data_search.ipynb).
#
# - Automatic search by location and time interval using Sentinel Hub Web Feature Service (WFS):
# +
search_bbox = BBox(bbox=[46.16, -16.15, 46.51, -15.58], crs=CRS.WGS84)
search_time_interval = ('2017-12-01T00:00:00', '2017-12-15T23:59:59')
wfs_iterator = WebFeatureService(
search_bbox,
search_time_interval,
data_collection=DataCollection.SENTINEL2_L1C,
maxcc=1.0,
config=config
)
for tile_info in wfs_iterator:
print(tile_info)
# -
# From obtained WFS iterator we can extract info which uniquely defines each tile.
wfs_iterator.get_tiles()
# - Automatic search with functions from [sentinelhub.opensearch](https://sentinelhub-py.readthedocs.io/en/latest/opensearch.html) module (no authentication required):
# +
from sentinelhub import get_area_info
for tile_info in get_area_info(search_bbox, search_time_interval, maxcc=0.5):
print(tile_info)
# -
# ## Download data
#
# Once we have found correct tiles or products we can download them and explore the data. Note that in order to do that, you have to provide AWS credentials to the config. Please see also [documentation](https://sentinelhub-py.readthedocs.io/en/latest/configure.html#amazon-s3-capabilities).
#
# ### Aws Tile
#
# Sentinel-2 tile can be uniquely defined either with ESA tile ID (e.g. `L1C_T01WCV_A012011_20171010T003615`) or with tile name (e.g. `T38TML` or `38TML`), sensing time and AWS index. The AWS index is the last number in tile AWS path (e.g. https://roda.sentinel-hub.com/sentinel-s2-l1c/tiles/1/C/CV/2017/1/14/0/ → `0`).
#
# The package works with the second tile definition. To transform tile ID to `(tile_name, time, aws_index)` do the following:
# +
from sentinelhub import AwsTile
tile_id = 'S2A_OPER_MSI_L1C_TL_MTI__20151219T100121_A002563_T38TML_N02.01'
tile_name, time, aws_index = AwsTile.tile_id_to_tile(tile_id)
tile_name, time, aws_index
# -
# Now we are ready to download the data. Let's download only bands `B8A` and `B10`, meta data files `tileInfo.json`, `preview.jp2` and pre-calculated cloud mask `qi/MSK_CLOUDS_B00`. We will save everything into folder `./AwsData`.
# +
from sentinelhub import AwsTileRequest
bands = ['B8A', 'B10']
metafiles = ['tileInfo', 'preview', 'qi/MSK_CLOUDS_B00']
data_folder = './AwsData'
request = AwsTileRequest(
tile=tile_name,
time=time,
aws_index=aws_index,
bands=bands,
metafiles=metafiles,
data_folder=data_folder,
data_collection=DataCollection.SENTINEL2_L1C
)
request.save_data() # This is where the download is triggered
# -
# Note that upon calling this method again the data won't be re-downloaded unless we set the parameter `redownload=True`.
#
# To obtain downloaded data we can simply do:
# +
data_list = request.get_data() # This will not redownload anything because data is already stored on disk
b8a, b10, tile_info, preview, cloud_mask = data_list
# -
# Download and reading could also be done in a single call `request.get_data(save_data=True)`.
plt.imshow(preview);
plt.imshow(b8a);
# ### Aws Product
#
# Sentinel-2 product is uniquely defined by ESA product ID. We can obtain data for the whole product
# +
from sentinelhub import AwsProductRequest
product_id = 'S2A_MSIL1C_20171010T003621_N0205_R002_T01WCV_20171010T003615'
request = AwsProductRequest(product_id=product_id, data_folder=data_folder)
# Uncomment the the following line to download the data:
# data_list = request.get_data(save_data=True)
# -
# If `bands` parameter is not defined all bands will be downloaded. If `metafiles` parameter is not defined no additional metadata files will be downloaded.
#
#
# ### Data into .SAFE structure
#
# The data can also be downloaded into .SAFE structure by specifying `safe_format=True`. The following code will download data from upper example again because now data will be stored in different folder structure.
# +
tile_request = AwsTileRequest(
tile=tile_name,
time=time,
aws_index=aws_index,
data_collection=DataCollection.SENTINEL2_L1C,
bands=bands,
metafiles=metafiles,
data_folder=data_folder,
safe_format=True
)
# Uncomment the the following line to download the data:
# tile_request.save_data()
# +
product_id = 'S2A_OPER_PRD_MSIL1C_PDMC_20160121T043931_R069_V20160103T171947_20160103T171947'
product_request = AwsProductRequest(
product_id=product_id,
bands=['B01'],
data_folder=data_folder,
safe_format=True
)
# Uncomment the the following line to download the data:
# product_request.save_data()
# -
# Older products contain multiple tiles. In case would like to download only some tiles it is also possible to specify a list of tiles to download.
# +
product_request = AwsProductRequest(
product_id=product_id,
tile_list=['T14PNA', 'T13PHT'],
data_folder=data_folder,
safe_format=True
)
# Uncomment the the following line to download the data:
# product_request.save_data()
| examples/aws_request.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import numpy as np
import torch
import torchvision.datasets as dset
import torch.nn as nn
from torch.distributions import MultivariateNormal
import torchvision.transforms as transforms
import pyro
import pyro.distributions as dist
from pyro.infer import SVI, Trace_ELBO, TraceEnum_ELBO
from pyro.optim import Adam
from utils.custom_mlp import MLP, Exp
from utils.mnist_cached import MNISTCached, mkdir_p
# -
pyro.enable_validation(True)
pyro.distributions.enable_validation(False)
pyro.set_rng_seed(0)
pyro.clear_param_store()
# Enable smoke test - run the notebook cells on CI.
smoke_test = 'CI' in os.environ
# for loading and batching MNIST dataset
def setup_data_loaders(batch_size=128, use_cuda=False):
root = './data'
download = True
trans = transforms.ToTensor()
train_set = dset.MNIST(root=root, train=True, transform=trans,
download=download)
test_set = dset.MNIST(root=root, train=False, transform=trans)
kwargs = {'num_workers': 1, 'pin_memory': use_cuda}
train_loader = torch.utils.data.DataLoader(dataset=train_set,
batch_size=batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(dataset=test_set,
batch_size=batch_size, shuffle=False, **kwargs)
return train_loader, test_loader
def CustomLinear(last_layer_size, layer_size, use_cuda):
# get our nn layer module (in this case nn.Linear by default)
cur_linear_layer = nn.Linear(last_layer_size, layer_size)
# for numerical stability -- initialize the layer properly
cur_linear_layer.weight.data.normal_(0, 0.001)
cur_linear_layer.bias.data.normal_(0, 0.001)
# use GPUs to share data during training (if available)
if use_cuda:
cur_linear_layer = nn.DataParallel(cur_linear_layer)
return cur_linear_layer
class Memory():
def __init__(self, n_address, n_memory_vec, n_address_vec, m_value = 1.0):
'''
'''
self.y_dim = n_memory_vec
self.z_dim = n_memory_vec
self.n_address = n_address
self.n_memory_vec = n_memory_vec
self.n_address_vec = n_address_vec
self.m_value = 1.0
self.R = torch.rand(self.n_address, self.n_memory_vec)
self.U = torch.eye(self.n_address)
self.V = torch.eye(self.n_memory_vec)
self.M = None
self.A = nn.Parameter(torch.eye(self.n_address_vec, self.n_address))
self.fc_y1 = nn.Linear(self.y_dim, self.n_address_vec//2)
self.fc_y2 = nn.Linear(self.n_address_vec//2, self.n_address_vec)
def forward(self, _input, batch_size):
_y = self.fc_y1(_input)
bt = self.fc_y2(_y+0.08*torch.randn(_y.size()))
wt = self.A(bt)
return wt.view(batch_size, 1, -1)
# self.M = MultivariateNormal(self.R.view(-1, self.n_address*self.n_memory_vec), covariance_matrix=kronecker_product(self.V, self.U))
def write(self, y_list, Z, v_sigma):
W = None
len_t = len(y_list)
for _y in y_list:
_y = self.fc_y1(_y)
bt = self.fc_y2(_y)
wt = self.A(bt)
if W is None:
W = wt.view(1, self.n_address)
else:
W = torch.cat((W, wt), 0)
dd = Z - torch.matmul(W, self.R)
sigma_c = torch.matmul(W, self.U)
sigma_gusai = torch.eye(len_t)
sigma_z = torch.matmul(torch.matmul(W, self.U), torch.t(W)) + sigma_gusai * (v_sigma*v_sigma)
R = self.R + torch.matmul(torch.matmul(torch.t(sigma_c),torch.inverse(sigma_z)), dd)
U = self.U - torch.matmul(torch.matmul(torch.t(sigma_c),torch.inverse(sigma_z)), sigma_c)
# +
class Encoder1(nn.Module):
'''
return q(y|x) and q(z|x)
'''
def __init__(self, D_in, H, C):
super(Encoder1, self).__init__()
self.linear1 = torch.nn.Linear(D_in, H)
self.linear2 = torch.nn.Linear(H, 2*C)
self.linear3y = torch.nn.Linear(2*C, C)
self.linear3z = torch.nn.Linear(2*C, C)
def forward(self, x):
x = F.relu(self.linear1(x))
x = F.relu(self.linear2(x))
y = F.relu(self.linear3y(x))
z = F.relu(self.linear3z(x))
return y, z
class Encoder2(nn.Module):
'''
return q(z|x, y, M) from 2 C-size vector(q(z'|x) and p(z|y, M))
'''
def __init__(self, C, H):
super(Encoder1, self).__init__()
self.linear1 = torch.nn.Linear(2*C, H)
self.linear2 = torch.nn.Linear(H, C)
def forward(self, x1, x2):
x = torch.cat((x1, x2), 1)
x = F.relu(self.linear1(x))
z = F.relu(self.linear2(x))
return z
class Decoder(nn.Module):
'''
return q(x|z)
'''
def __init__(self, D_in, H, D_out):
super(Decoder, self).__init__()
self.linear1 = torch.nn.Linear(D_in, H)
self.linear2 = torch.nn.Linear(H, D_out)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = F.relu(self.linear1(x))
return self.sigmoid(self.linear2(x))
# -
class KanervaMachine(nn.Module):
'''
'''
def __init__(
self,
n_address=40,
n_memory_vec=50,
n_address_vec=25,
n_input=784,
n_hidden=100,
batch_size=10,
aux_loss_multiplier=None,
use_cuda=False
):
super(KanervaMachine, self).__init__()
if use_cuda:
self.cuda()
self.use_cuda = use_cuda
self.n_address = n_address
self.n_memory_vec = n_memory_vec
self.n_address_vec = n_address_vec
self.n_latent_vec = n_memory_vec # the same size as one of the memory-cols
self.batch_size = batch_size
self.aux_loss_multiplier = aux_loss_multiplier
self.n_input = n_input
self.n_hidden = n_hidden
self.A = CustomLinear(self.n_address_vec, self.n_address, use_cuda) ## Pointer Matrix
self.R = None ## (n_address × n_memory_vec) matrix as the mean of M
self.U = None ## (n_address × n_address matrix) that provides the covariance between rows of M
self.V = None ## (n_memory_vec × n_memory_vec) matrix that provides the covariance between cols of M
self.encoder1 = None
self.encoder2 = None
self.mlp_memory = None
self.decoder = None
self.allow_broadcast = False
self.setup_networks()
def setup_networks(self):
self.encoder1 = MLP(
[self.n_input] + [self.n_hidden, ] + [[self.n_latent_vec, self.n_latent_vec, self.n_latent_vec, self.n_latent_vec]],
activation=nn.Softplus,
output_activation=[None, None, Exp, Exp],
allow_broadcast=self.allow_broadcast,
use_cuda=self.use_cuda
)
self.encoder2 = MLP(
[self.n_latent_vec + self.n_latent_vec] + [self.n_hidden, ] + [[self.n_latent_vec, self.n_latent_vec]],
activation=nn.Softplus,
output_activation=[None, Exp],
allow_broadcast=self.allow_broadcast,
use_cuda=self.use_cuda
)
self.decoder = MLP(
[self.n_latent_vec] + [self.n_hidden, ] + [self.n_input],
activation=nn.Softplus,
output_activation=nn.Sigmoid,
allow_broadcast=self.allow_broadcast,
use_cuda=self.use_cuda
)
self.mlp_memory = MLP(
[self.n_latent_vec] + [self.n_address_vec//2, ] + [self.n_address_vec],
activation=nn.Softplus,
output_activation=nn.Softplus,
allow_broadcast=self.allow_broadcast,
use_cuda=self.use_cuda
)
self.R = torch.zeros(self.n_address, self.n_memory_vec, requires_grad=True)
self.U = torch.eye(self.n_address, requires_grad=True)
self.V = torch.eye(self.n_memory_vec, requires_grad=True)
def model(self, xs):
"""
The model corresponds to the following generative process:
p(z) = normal(0,I) # handwriting style (latent)
p(y|x) = categorical(I/10.) # which digit (semi-supervised)
p(x|y,z) = bernoulli(loc(y,z)) # an image
loc is given by a neural network `decoder`
:param xs: a batch of scaled vectors of pixels from an image
:param ys: (optional) a batch of the class labels i.e.
the digit corresponding to the image(s)
:return: None
"""
# register this pytorch module and all of its sub-modules with pyro
# print("model")
pyro.module("kanerva_machine", self)
batch_size = xs.size(0)
with pyro.iarange("data", xs.size(0)):
# sample y, the latent vector as the key of the memory, from the constant prior distribution
y_prior_loc = xs.new_zeros([batch_size, self.n_latent_vec])
y_prior_scale = xs.new_ones([batch_size, self.n_latent_vec])
ys = pyro.sample("y", dist.Normal(y_prior_loc, y_prior_scale).independent(1))
# sample z, the latent vector to generate an image, from the memory with "y".
b = self.mlp_memory.forward(ys)
w = self.A(b)
z_loc = torch.matmul(w, self.R) # (batch_size, n_memory_vec)
z_scale = torch.ones(batch_size, self.n_memory_vec)
zs = pyro.sample("z", dist.Normal(z_loc, z_scale).independent(1))
# sample x, the target images, with "z".
x_loc = self.decoder.forward(zs)
pyro.sample("x", dist.Bernoulli(x_loc).independent(1), obs=xs)
return x_loc
def guide(self, xs, len_t=30):
"""
The guide corresponds to the following:
q(y|x) = categorical(alpha(x)) # infer digit from an image
q(z|x,y) = normal(loc(x,y),scale(x,y)) # infer handwriting style from an image and the digit
loc, scale are given by a neural network `encoder_z`
alpha is given by a neural network `encoder_y`
:param xs: a batch of scaled vectors of pixels from an image
:param ys: (optional) a batch of the class labels i.e.
the digit corresponding to the image(s)
:return: None
"""
# Writing Phase:
# sample y, z from q(y|x) and q(z|x)
# print("guide")
with pyro.iarange("data", xs.size(0)):
y_loc_w, z_loc_w, y_scale_w, z_scale_w = self.encoder1.forward(xs)
ys = pyro.sample("y", dist.Normal(y_loc_w, y_scale_w).independent(1))
b = self.mlp_memory.forward(ys)
w = self.A(b)
self.write_inference(w, z_scale_w)
#Reading Phase:
# inform Pyro that the variables in the batch of xs, ys are conditionally independent
batch_size = xs.size(0)
b = self.mlp_memory.forward(ys)
w = self.A(b)
pre_z_loc = torch.matmul(w, self.R) # (batch_size, n_memory_vec)
z_loc, z_scale = self.encoder2([z_loc_w, pre_z_loc])
zs = pyro.sample("z", dist.Normal(z_loc, z_scale).independent(1))
def write_inference(self, W, Z, v_sigma=1):
'''
Updating the Memory.
'''
dd = Z - torch.matmul(W, self.R)
sigma_c = torch.matmul(W, self.U)
sigma_gusai = torch.eye(W.size(0))
sigma_z = torch.matmul(torch.matmul(W, self.U), torch.t(W)) + sigma_gusai * (v_sigma*v_sigma)
self.R = self.R + torch.matmul(torch.matmul(torch.t(sigma_c),torch.inverse(sigma_z)), dd)
self.U = self.U - torch.matmul(torch.matmul(torch.t(sigma_c),torch.inverse(sigma_z)), sigma_c)
def init_memory(self):
self.R = torch.zeros(self.n_address, self.n_memory_vec, requires_grad=True)
self.U = torch.eye(self.n_address, requires_grad=True)
self.V = torch.eye(self.n_memory_vec, requires_grad=True)
def train(svi, train_loader, use_cuda=False):
# initialize loss accumulator
epoch_loss = 0.
# do a training epoch over each mini-batch x returned
# by the data loader
for _, (x, _) in enumerate(train_loader):
# if on GPU put mini-batch into CUDA memory
if use_cuda:
x = x.cuda()
# do ELBO gradient and accumulate loss
b_size = x.size(0)
i_size = x.size(-1)*x.size(-1)
epoch_loss += svi.step(x.view(b_size, i_size))
print(epoch_loss)
# return epoch loss
normalizer_train = len(train_loader.dataset)
total_epoch_loss_train = epoch_loss / normalizer_train
return total_epoch_loss_train
def evaluate(svi, test_loader, use_cuda=False):
# initialize loss accumulator
test_loss = 0.
# compute the loss over the entire test set
for i, (x, _) in enumerate(test_loader):
# km.init_memory()
# if on GPU put mini-batch into CUDA memory
if use_cuda:
x = x.cuda()
# compute ELBO estimate and accumulate loss
b_size = x.size(0)
i_size = x.size(-1)*x.size(-1)
test_loss += svi.evaluate_loss(x.view(b_size, i_size))
normalizer_test = len(test_loader.dataset)
total_epoch_loss_test = test_loss / normalizer_test
return total_epoch_loss_test
# +
km = KanervaMachine()
adam_params = {"lr": 0.00042, "betas": (0.9, 0.999)}
optimizer = Adam(adam_params)
svi = SVI(km.model, km.guide, optimizer, loss=TraceEnum_ELBO(max_iarange_nesting=1), num_particles=7)
# -
USE_CUDA = False
NUM_EPOCHS = 100
TEST_FREQUENCY = 5
train_loader, test_loader = setup_data_loaders(batch_size=256, use_cuda=USE_CUDA)
train_elbo = []
test_elbo = []
# training loop
for epoch in range(NUM_EPOCHS):
total_epoch_loss_train = train(svi, train_loader, use_cuda=USE_CUDA)
train_elbo.append(-total_epoch_loss_train)
print("[epoch %03d] average training loss: %.4f" % (epoch, total_epoch_loss_train))
if epoch % TEST_FREQUENCY == 0:
# report test diagnostics
total_epoch_loss_test = evaluate(svi, test_loader, use_cuda=USE_CUDA)
test_elbo.append(-total_epoch_loss_test)
print("[epoch %03d] average test loss: %.4f" % (epoch, total_epoch_loss_test))
??SVI.step()
| notebook/.ipynb_checkpoints/kanerva_machine_pyro-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Homework: Basic Artificial Neural Networks
# The goal of this homework is simple, yet an actual implementation may take some time :). We are going to write an Artificial Neural Network (almost) from scratch. The software design was heavily inspired by [PyTorch](http://pytorch.org) which is the main framework of our course
# This homework requires sending **multiple** files, please do not forget to include all the files when sending to TA. The list of files:
# - This notebook
# - homework_modules.ipynb with all blocks implemented (except maybe `Conv2d` and `MaxPool2d` layers implementation which are part of 'advanced' version of this homework)
# %matplotlib inline
from time import time, sleep
import numpy as np
import matplotlib.pyplot as plt
from IPython import display
# # Framework
# Implement everything in `Modules.ipynb`. Read all the comments thoughtfully to ease the pain. Please try not to change the prototypes.
#
# Do not forget, that each module should return **AND** store `output` and `gradInput`.
#
# The typical assumption is that `module.backward` is always executed after `module.forward`,
# so `output` is stored, this would be useful for `SoftMax`.
# ### Tech note
# Prefer using `np.multiply`, `np.add`, `np.divide`, `np.subtract` instead of `*`,`+`,`/`,`-` for better memory handling.
#
# Example: suppose you allocated a variable
#
# ```
# a = np.zeros(...)
# ```
# So, instead of
# ```
# a = b + c # will be reallocated, GC needed to free
# ```
# You can use:
# ```
# np.add(b,c,out = a) # puts result in `a`
# ```
# (re-)load layers
# %run homework_modules.ipynb
# # Toy example
# Use this example to debug your code, start with logistic regression and then test other layers. You do not need to change anything here. This code is provided for you to test the layers. Also it is easy to use this code in MNIST task.
# +
# Generate some data
N = 500
X1 = np.random.randn(N,2) + np.array([2,2])
X2 = np.random.randn(N,2) + np.array([-2,-2])
Y = np.concatenate([np.ones(N),np.zeros(N)])[:,None]
Y = np.hstack([Y, 1-Y])
X = np.vstack([X1,X2])
plt.scatter(X[:,0],X[:,1], c = Y[:,0], edgecolors= 'none')
# -
# Define a **logistic regression** for debugging.
# +
net = Sequential()
net.add(Linear(2, 2))
net.add(LogSoftMax())
criterion = ClassNLLCriterion()
print(net)
# Test something like that then
# net = Sequential()
# net.add(Linear(2, 4))
# net.add(ReLU())
# net.add(Linear(4, 2))
# net.add(LogSoftMax())
# print(net)
# -
# Start with batch_size = 1000 to make sure every step lowers the loss, then try stochastic version.
# +
# Iptimizer params
optimizer_config = {'learning_rate' : 1e-1, 'momentum': 0.9}
optimizer_state = {}
# Looping params
n_epoch = 20
batch_size = 128
# -
# batch generator
def get_batches(dataset, batch_size):
X, Y = dataset
n_samples = X.shape[0]
# Shuffle at the start of epoch
indices = np.arange(n_samples)
np.random.shuffle(indices)
for start in range(0, n_samples, batch_size):
end = min(start + batch_size, n_samples)
batch_idx = indices[start:end]
yield X[batch_idx], Y[batch_idx]
# ### Train
# Basic training loop. Examine it.
# +
loss_history = []
for i in range(n_epoch):
for x_batch, y_batch in get_batches((X, Y), batch_size):
net.zeroGradParameters()
# Forward
predictions = net.forward(x_batch)
loss = criterion.forward(predictions, y_batch)
# Backward
dp = criterion.backward(predictions, y_batch)
net.backward(x_batch, dp)
# Update weights
sgd_momentum(net.getParameters(),
net.getGradParameters(),
optimizer_config,
optimizer_state)
loss_history.append(loss)
# Visualize
display.clear_output(wait=True)
plt.figure(figsize=(8, 6))
plt.title("Training loss")
plt.xlabel("#iteration")
plt.ylabel("loss")
plt.plot(loss_history, 'b')
plt.show()
print('Current loss: %f' % loss)
# -
# # Digit classification
# We are using old good [MNIST](http://yann.lecun.com/exdb/mnist/) as our dataset.
import mnist
X_train, y_train, X_val, y_val, X_test, y_test = mnist.load_dataset()
# One-hot encode the labels first.
# +
# Your code goes here. ################################################
def one_hot(labels):
return np.eye(10)[labels.reshape(-1)]
one_hot_y_train = one_hot(y_train)
one_hot_y_val = one_hot(y_val)
one_hot_y_test = one_hot(y_test)
X_train = X_train.reshape([len(X_train),-1])
X_val = X_val.reshape([len(X_val),-1])
X_test = X_test.reshape([len(X_test),-1])
def accuracy(prediction, label):
pred = np.argmax(prediction, axis = -1)
return 1 - len(np.nonzero(label-pred)[0])/len(label)
# -
# - **Compare** `ReLU`, `ELU`, `LeakyReLU`, `SoftPlus` activation functions.
# You would better pick the best optimizer params for each of them, but it is overkill for now. Use an architecture of your choice for the comparison.
# - **Try** inserting `BatchNormalization` (folowed by `ChannelwiseScaling`) between `Linear` module and activation functions.
# - Plot the losses both from activation functions comparison and `BatchNormalization` comparison on one plot. Please find a scale (log?) when the lines are distinguishable, do not forget about naming the axes, the plot should be goodlooking.
# - Plot the losses for two networks: one trained by momentum_sgd, another one trained by Adam. Which one performs better?
# - Hint: good logloss for MNIST should be around 0.5.
# Your code goes here. ################################################
acts = ["relu", "elu", "leaky_relu", "soft_plus"]
opts = ["momentum", "adam"]
def net(act="relu", bnorm=False, opt="adam", alpha=0.9, drop=False):
net = Sequential()
net.add(Linear(28*28, 200))
if bnorm:
torch_layer = torch.nn.BatchNorm1d(200, eps=BatchNormalization(alpha).EPS, momentum=1.-alpha, affine=False)
bn = BatchNormalization(alpha)
bn.moving_mean = torch_layer.running_mean.numpy().copy()
bn.moving_variance = torch_layer.running_var.numpy().copy()
net.add(bn)
net.add(ChannelwiseScaling(200))
if drop:
net.add(Dropout(0.9))
if act == "relu":
net.add(ReLU())
elif act == "elu":
net.add(ELU())
elif act == "leaky_relu":
net.add(LeakyReLU())
elif act == "soft_plus":
net.add(SoftPlus())
net.add(Linear(200,10))
if bnorm:
torch_layer = torch.nn.BatchNorm1d(10, eps=BatchNormalization(alpha).EPS, momentum=1.-alpha, affine=False)
bn = BatchNormalization(alpha)
bn.moving_mean = torch_layer.running_mean.numpy().copy()
bn.moving_variance = torch_layer.running_var.numpy().copy()
net.add(bn)
net.add(ChannelwiseScaling(10))
net.add(LogSoftMax())
criterion = ClassNLLCriterion()
n_epoch = 30
batch_size = 1000
optimizer_config = {'learning_rate' : 1e-1, 'momentum': 0.9}
optimizer_state = {}
t = 0
val_acc = []
val_loss = []
train_acc = []
train_loss = []
test_acc = []
test_loss = []
for i in range(n_epoch):
for x_batch, y_batch in get_batches((X_train, one_hot_y_train), batch_size):
net.zeroGradParameters()
time_start = time()
predictions = net.forward(x_batch)
loss = criterion.forward(predictions, y_batch)
dp = criterion.backward(predictions, y_batch)
net.backward(x_batch, dp)
if opt == "momentum":
sgd_momentum(net.getParameters(),
net.getGradParameters(),
optimizer_config,
optimizer_state)
elif opt == "adam":
adam_optimizer(net.getParameters(), net.getGradParameters(), {'learning_rate': 1e-2, 'beta1':0.9, 'beta2':0.99, 'epsilon': 1e-8}, {})
time_finish = time()
t += (time_finish - time_start)
pred_train = net.forward(X_train)
train_loss.append(criterion.forward(pred_train, one_hot_y_train))
train_acc.append(accuracy(pred_train, np.argmax(one_hot_y_train, axis=-1)))
pred_val = net.forward(X_val)
val_loss.append(criterion.forward(pred_val, one_hot_y_val))
val_acc.append(accuracy(pred_val, np.argmax(one_hot_y_val, axis=-1)))
pred_test = net.forward(X_test)
test_loss.append(criterion.forward(pred_test, one_hot_y_test))
test_acc.append(accuracy(pred_test, np.argmax(one_hot_y_test, axis=-1)))
return net, train_loss, train_acc, val_loss, val_acc, test_loss, test_acc, t
# +
net_relu, train_loss_relu, train_acc_relu, val_loss_relu, val_acc_relu, test_loss_relu, test_acc_relu, time_relu = net()
net_elu, train_loss_elu, train_acc_elu, val_loss_elu, val_acc_elu, test_loss_elu, test_acc_elu, time_elu = net(act="elu")
net_leakrelu, train_loss_leakrelu, train_acc_leakrelu, val_loss_leakrelu, val_acc_leakrelu, test_loss_leakrelu, test_acc_leakrelu, time_leakrelu = net(act="leaky_relu")
net_soft, train_loss_soft, train_acc_soft, val_loss_soft, val_acc_soft, test_loss_soft, test_acc_soft, time_soft = net(act="soft_plus")
plt.figure(figsize=(10, 20))
plt.subplot(6,1,1)
plt.title("Training loss", fontsize = 25)
plt.xlabel("epoch", fontsize = 15)
plt.ylabel("loss", fontsize = 15)
plt.semilogy(train_loss_relu, lw = 2, label = 'ReLU')
plt.semilogy(train_loss_elu, lw = 2, label = 'ELU')
plt.semilogy(train_loss_leakrelu, lw = 2, label = 'LeakyReLU')
plt.semilogy(train_loss_soft, lw = 2, label = 'SoftPlus')
plt.grid(True)
plt.legend()
plt.subplot(6,1,2)
plt.title("Training accuracy", fontsize = 25)
plt.xlabel("epoch", fontsize = 15)
plt.ylabel("accuracy", fontsize = 15)
plt.plot(train_acc_relu, lw = 2, label = 'ReLU')
plt.plot(train_acc_elu, lw = 2, label = 'ELU')
plt.plot(train_acc_leakrelu, lw = 2, label = 'LeakyReLU')
plt.plot(train_acc_soft, lw = 2, label = 'SoftPlus')
plt.grid(True)
plt.legend()
plt.subplot(6,1,3)
plt.title("Validation loss", fontsize = 25)
plt.xlabel("epoch", fontsize = 15)
plt.ylabel("loss", fontsize = 15)
plt.semilogy(val_loss_relu, lw = 2, label = 'ReLU')
plt.semilogy(val_loss_elu, lw = 2, label = 'ELU')
plt.semilogy(val_loss_leakrelu, lw = 2, label = 'LeakyReLU')
plt.semilogy(val_loss_soft, lw = 2, label = 'SoftPlus')
plt.grid(True)
plt.legend()
plt.subplot(6,1,4)
plt.title("Validation accuracy", fontsize = 25)
plt.xlabel("epoch", fontsize = 15)
plt.ylabel("accuracy", fontsize = 15)
plt.semilogy(val_acc_relu, lw = 2, label = 'ReLU')
plt.semilogy(val_acc_elu, lw = 2, label = 'ELU')
plt.semilogy(val_acc_leakrelu, lw = 2, label = 'LeakyReLU')
plt.semilogy(val_acc_soft, lw = 2, label = 'SoftPlus')
plt.grid(True)
plt.legend()
plt.subplot(6,1,5)
plt.title("Test loss", fontsize = 25)
plt.xlabel("epoch", fontsize = 15)
plt.ylabel("loss", fontsize = 15)
plt.semilogy(test_loss_relu, lw = 2, label = 'ReLU')
plt.semilogy(test_loss_elu, lw = 2, label = 'ELU')
plt.semilogy(test_loss_leakrelu, lw = 2, label = 'LeakyReLU')
plt.semilogy(test_loss_soft, lw = 2, label = 'SoftPlus')
plt.grid(True)
plt.legend()
plt.subplot(6,1,6)
plt.title("Test accuracy", fontsize = 25)
plt.xlabel("epoch", fontsize = 15)
plt.ylabel("loss", fontsize = 15)
plt.semilogy(test_acc_relu, lw = 2, label = 'ReLU')
plt.semilogy(test_acc_elu, lw = 2, label = 'ELU')
plt.semilogy(test_acc_leakrelu, lw = 2, label = 'LeakyReLU')
plt.semilogy(test_acc_soft, lw = 2, label = 'SoftPlus')
plt.grid(True)
plt.legend()
plt.show()
print(f'Resulting duration of training: \n for ReLU: {time_relu}, \n for ELU: {time_elu} s, \n for LeakyReLU: {time_leakrelu} s, \n for SoftPlus: {time_soft}')
# -
import torch
# +
# Your answer goes here. ################################################
net_bn, train_loss_bn, train_acc_bn, val_loss_bn, val_acc_bn, test_loss_bn, test_acc_bn, time_bn = net(bnorm=True)
net_, train_loss, train_acc, val_loss, val_acc, test_loss, test_acc, time_ = net()
plt.figure(figsize=(10, 20))
plt.subplot(3,1,1)
plt.title("Training loss", fontsize = 20)
plt.xlabel("epoch", fontsize = 15)
plt.ylabel("loss", fontsize = 15)
plt.semilogy(train_loss_bn, lw = 2, color = 'r', label = 'ReLU with normalization')
plt.semilogy(train_loss, lw = 2, color ='b', label = 'ReLU no normalization')
plt.grid(True)
plt.legend()
plt.subplot(3,1,2)
plt.title("Validation loss", fontsize = 20)
plt.xlabel("epoch", fontsize = 15)
plt.ylabel("loss", fontsize = 15)
plt.semilogy(val_loss_bn, lw = 2, color ='r', label = 'ReLU with normalization')
plt.semilogy(val_loss, lw = 2, color ='b', label = 'ReLU no normalization')
plt.grid(True)
plt.legend()
plt.subplot(3,1,3)
plt.title("Test loss", fontsize = 20)
plt.xlabel("epoch", fontsize = 15)
plt.ylabel("loss", fontsize = 15)
plt.semilogy(test_loss_bn, lw = 2, color='r', label = 'ReLU with normalization')
plt.semilogy(test_loss, lw = 2, color='b', label = 'ReLU no normalization')
plt.grid(True)
plt.legend()
plt.show()
print(f'Resulting duration of training: \n for ReLU without normalization: {time_}, \n for ReLU with normalization: {time_bn} ')
# +
net_mom, train_loss_mom, train_acc_mom, val_loss_mom, val_acc_mom, test_loss_mom, test_acc_mom, time_mom = net(opt="momentum")
net_adam, train_loss_adam, train_acc_adam, val_loss_adam, val_acc_adam, test_loss_adam, test_acc_adam, time_adam = net(opt="adam")
plt.figure(figsize=(14, 30))
plt.subplot(4,1,1)
plt.title("Training loss", fontsize = 20)
plt.xlabel("epoch", fontsize = 15)
plt.ylabel("loss", fontsize = 15)
plt.semilogy(train_loss_mom, lw = 2, color='r', label = 'RELU SGD optimizer')
plt.semilogy(train_loss_adam, lw = 2, color='b', label = 'RELU Adam optimizer')
plt.grid(True)
plt.legend()
plt.subplot(4,1,2)
plt.title("Validation loss", fontsize = 20)
plt.xlabel("epoch", fontsize = 15)
plt.ylabel("loss", fontsize = 15)
plt.semilogy(val_loss_mom, lw = 2, color='r', label = 'RELU SGD optimizer')
plt.semilogy(val_loss_adam, lw = 2, color='b', label = 'RELU Adam optimizer')
plt.grid(True)
plt.legend()
plt.subplot(4,1,3)
plt.title("Test loss", fontsize = 20)
plt.xlabel("epoch", fontsize = 15)
plt.ylabel("loss", fontsize = 15)
plt.semilogy(test_loss_mom, lw = 2, color='r', label = 'RELU SGD optimizer')
plt.semilogy(test_loss_adam, lw = 2, color='b', label = 'RELU Adam optimizer')
plt.grid(True)
plt.legend()
plt.show()
print(f'Resulting duration of training: \n for RELU SGD optimizer: {time_mom}, \n for RELU Adam optimizer: {time_adam}')
# +
net_drop, train_loss_drop, train_acc_drop, val_loss_drop, val_acc_drop, test_loss_drop, test_acc_drop, time_drop = net(drop=True)
plt.figure(figsize=(14, 30))
plt.subplot(3,1,1)
plt.title("Loss", fontsize = 25)
plt.xlabel("epoch", fontsize = 15)
plt.ylabel("loss", fontsize = 15)
plt.semilogy(train_loss_drop, lw = 2, label = 'ReLU with Dropout train')
plt.semilogy(train_loss, lw = 2, label = 'ReLU without Dropout train')
plt.semilogy(val_loss_drop, lw = 2, label = 'ReLU with Dropout validation')
plt.semilogy(val_loss, lw = 2, label = 'ReLU without Dropout validation')
plt.grid(True)
plt.legend()
plt.subplot(3,1,2)
plt.title("Accuracy", fontsize = 25)
plt.xlabel("epoch", fontsize = 15)
plt.ylabel("accuracy", fontsize = 15)
plt.semilogy(train_acc_drop, lw = 2, label = 'ReLU with Dropout train')
plt.semilogy(train_acc, lw = 2, label = 'ReLU without Dropout train')
plt.semilogy(val_acc_drop, lw = 2, label = 'ReLU with Dropout validation')
plt.semilogy(val_acc, lw = 2, label = 'ReLU without Dropout validation')
plt.grid(True)
plt.legend()
plt.show()
print(f'Resulting duration of training: \n for Net with dropout optimizer: {time_drop}, \n for net without dropout: {time_}')
# -
# Write your personal opinion on the activation functions, think about computation times too. Does `BatchNormalization` help?
| homework01/homework_main-basic.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Objective:
# • Employ SVM from scikit learn for binary classification.
# • Impact of preprocessing data and hyper paramter search using grid search.
# Questions:
# 1. Load the data from “college.csv” that has attributes collected about private and public colleges for a particular year. We will try to predict the private/public status of the college from other attributes.
# 2. Use LabelEncoder to encode the target variable in to numerical form and split the data such that 20% of the data is set aside for testing.
# 3. Fit a linear svm from scikit learn and observe the accuracy.
# [Hint: Use Linear SVC]
# 4. Preprocess the data using StandardScalar and fit the same model again and observe the change in accuracy.
# [Hint: Refer to scikitlearn’s preprocessing methods]
# 5. Use scikit learn’s gridsearch to select the best hyperparameter for a non-linear SVM, identify the model with best score and its parameters.
# [Hint: Refer to model_selection module of Scikit learn]
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
dfcollege = pd.read_csv('College.csv')
dfcollege.sample(5)
dfcollege.shape
dfcollege.info()
from sklearn.preprocessing import LabelEncoder
labencoder = LabelEncoder()
dfcollege['Private'] = labencoder.fit_transform(dfcollege['Private'].astype('str'))
X = dfcollege.drop('Private', axis = 1)
y= dfcollege.Private
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
X_train, X_test, y_train, y_test = train_test_split(X, y,test_size=0.3, random_state=5)
from sklearn.svm import SVC
svcmodel = SVC(C=1, kernel='linear', gamma=1)
svcmodel.fit(X_train,y_train)
prediction = svcmodel.predict(X_test)
accuracy_score(y_test, prediction)
# 4. Preprocess the data using StandardScalar and fit the same model again and observe the change in accuracy.
# [Hint: Refer to scikitlearn’s preprocessing methods]
# 5. Use scikit learn’s gridsearch to select the best hyperparameter for a non-linear SVM, identify the model with best score and its parameters.
# [Hint: Refer to model_selection module of Scikit learn]
| Cls6-Supervised Learning - II/SupervisedLearning-II-Casestudy-3-solution.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Structuring code
#
# Python statements (code in general) are usually written in [files which are stored in folders](https://stephensugden.com/crash_into_python/CodeOrganization.html).
#
# * A [file](https://en.wikipedia.org/wiki/Computer_file) containing Python code is a module (or a [script](https://en.wikipedia.org/wiki/Scripting_language), if we invoke the module directly from the shell). Modules contain [classes](https://en.wikipedia.org/wiki/Class_%28computer_programming%29), [functions](https://en.wikipedia.org/wiki/Subroutine), [variables](https://en.wikipedia.org/wiki/Variable_%28computer_science%29) and [executable statements](https://en.wikipedia.org/wiki/Statement_%28computer_science%29).
#
# * A [directory (folder)](https://en.wikipedia.org/wiki/Directory_%28computing%29) containing modules and other directories, is a package.
#
# Lets see how to organize our code and how to use it.
# ## Contents
# 1. [Modules and scripts](#Modules).
# 2. [Packages and ... scripts](#Packages).
# 3. [The `PYTHONPATH` environment variable](#PYTHONPATH).
# <a id='Modules'></a>
# ## 1. [Modules](https://docs.python.org/3/tutorial/modules.html) and [scripts](https://www.python-course.eu/python3_execute_script.php)
#
# * Modules/scripts are files which store Python code and have the extension `.py`. Example:
# !cat my_python_module.py
# From Python, modules are invoked using `import` (notice that when a module is imported, the code of the module is run):
import my_python_module
# However, this happens only the first time we invoke a module from another module (this notebook):
import my_python_module # This import should not generate any output
# Module structures are accesible from the invoker code using the notation: `<module_name>.<structure>`:
my_python_module.a
# The module name of a module is accesible through the global variable `__name__`:
my_python_module.__name__
# * Any module that is invoked using the interpreter directly, will be called as a script:
# !python my_python_module.py
# * Listening the names of a (in this case) a module:
dir(my_python_module)
# <a id='Packages'></a>
# ## 2. [Packages](https://docs.python.org/3/tutorial/modules.html#packages) ... and [scripts](https://stackoverflow.com/questions/4042905/what-is-main-py)
#
# * Any folder which stores at least one module and a (even empty) `__init__.py` file is a package.
# !tree my_package
# !cat my_package/__init__.py
# !cat my_package/my_module.py
# * Invoking `my_package` as a module:
import my_package
dir(my_package)
my_package.my_module.a
# * It is possible to avoid using the package namespace for the objects defined inside:
my_module.a # This should fail
from my_package import my_module
my_module.a # This should work
# * To run a package as a script, it must have a `__main__.py` file with the first code to be executed:
# !cat my_package/__main__.py
# !python my_package
# <a id='PYTHONPATH'></a>
# ## Avoiding to reference the path of the packages/modules
# By default, modules and packages are imported from:
import sys
sys.path
# (Note: the previous output depends on the interpreter, the host, the configuration of Python, etc.)
# To add a new directory, we can modify the `PYTHONPATH` environment variable (from the shell):
# !python -c 'import sys; print(sys.path)'
!(export PYTHONPATH=$PYTHONPATH:'/my/new/dir'; python -c 'import sys; print(sys.path)')
# (... or from Python itself):
sys.path
sys.path.append("/my/new/dir")
sys.path
| 04-structuring_code.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Bayes Theorem
# Bayes Theorem is still a kind of "non intuitive" concept to grasp. But to clarify it's usefulness, it's best to explain it through examples.
# We first gonna introduce the **Tree diagrams** concept, to help better visualize the problems
# ## Tree Diagrams
# **Tree diagram** is a tool to better organize **conditioned probabilities**. Here we take The example 2.52 (page 96) registering the midterm and final grades of a class. It shows the proportion of Students getting an A in their finals considering if they got an A in their midterms
from IPython.display import Image
Image(filename="img/midterm_final.png")
# ** Question : ** Picking up a random final exam knowing it got an A, what's the probability this sudent had an A in his midterm ?
# In mathematical notation the goal is to find the conditional probability : $P(midterm = A \space | \space final = A) $
# which translates to : $P(midterm = A \space | \space final = A)$ = $ \frac{P(midterm = A \space and \space final = A)}{P(final=A)}$
# we can know find each part in the tree diagram
# * $P(midterm = A \space and \space final = A)= 0.0611$
from IPython.display import Image
Image(filename="img/mf_A.png")
# * $P(final=A) = P(midterm = other \space and \space final = A) \space + \space P(midterm = A \space and \space final = A) = 0.0611 + 0.0957 = 0.1568$
# <p>
# marginal probability</p>
from IPython.display import Image
Image(filename="img/mf_final.png")
# * $P(midterm = A \space | \space final = A)$ = $ \frac{P(midterm = A \space and \space final = A)}{P(final=A)}$ = $\frac{0.0611}{0.1568} = 0.3897$
# The probability the student who got an A in his finals also got an A in his midterm is $0,39$
# ## Bayes Theorem
# In some cases, given the probability $P(A|B)$, we sould like to find the inverted conditional probability $P(B|A)$
# Basing on the **exercise 2.54** (page 98) of he probability a woman can develop a cancer depending on the mammogram
from IPython.display import Image
Image(filename="img/breast_cancer.png")
# Tree diagram showing the result of a mammogram following if the patient has cancer or not. $P(Mammogram | Cancer)$
# following this tree diagram data, we would like to answer the following **question :** if we tested a woman over 40 for breast cancer with a mammogram, and it returned positive; what is the probability the patient actually has breast cancer.
# Answering this question means finding the opposite of $P(Mammogram | Cancer)$; which means $P(Cancer | Mammogram)$
# Let's set the following nomination :
# * **P(C) :** Probability of having cancer
# * **P(7C) :** Probability of not having cancer
# * **P(M+) :** Probability the mammogram is positive
# * **P(M-) :** Probability the mammogram is negative
# Which means we'll have to solve the following :
# <br>
# <br>
#
# $P(C|M+) = \frac{P(C \space and \space M+)}{P(M+)}$
# - **Finding $P(C \space and \space M+)$ : **
from IPython.display import Image
Image(filename="img/C_and_M+.png")
# $P(C \space and \space M+) = P(M+ \space | \space C) . P(C) = 0.00312$
# * **Finding $P(M+)$ : **
from IPython.display import Image
Image(filename="img/M+.png")
# $ P(M+) = P(M+ and \space C) + P(M+ and \space 7C)$<br>
# $ = P(C).P(M+|\space C) + P(7C).P(M+|\space 7C)$<br>
# $ = 0.0035 \space X \space 0.89 + 0.9965 \space X \space 0.07$<br>
# $= 0.07288$
# * Which gives :
# $P(C \space|\space M+) = \frac{P(C\space and \space M+ )}{P(M+)} = \frac{0.00312}{0.07288} = 0.0428$
# The result is : even if a patient has a positive mammogram screening, the probability that she has breast cancer is 4%
# ### Direct Application of Bayes Rule
# $ P(C \space|\space M+) = \frac{P(M+|\space C).P(C)}{P(M+|\space 7C).P(7C)+P(M+|\space C).P(C)} $
# <hr></hr>
# * **Guided Practice 2.56 : **
# Based on the Tree Diagram of the Parking lot exercise
from IPython.display import Image
Image(filename="img/parking_lot.png")
# we have :
# * $P(A_1) = 0.20 : $ Probability of a Sporting event
# * $P(A_2) = 0.35 : $ Probability of an Academic event
# * $P(A_3) = 0.45: $ Probability of No events
#
# And :
# * $P(B|A_1) = 0.70 :$ Probability of finding a **full garage** given a Sporting event
# * $P(B|A_2) = 0.25 :$ Probability of finding a **full garage** given an Academic event
# * $P(B|A_3) = 0.05 :$ Probability of finding a **full garage** given no events
# **Question :** if a random dude comes to a campus and finds the **garage full**, what is the probability that there is a **sporting** event ?
# To answer this question we need to solve : $ P(A1|B) $
# USing the Bayes rule formula we get :
# <h2>$ = \frac{P(B|A_1).P(A_1)}{P(B|A_1).P(A_1)+P(B|A_2).P(A_2)+P(B|A_3).P(A_3)}$</h2>
# <h2>$ = \frac{(0.7)(0.2)}{(0.7)(0.2)+(0.25)(0.35)+(0.05)(0.45)}$</h2>
# <h4>$ = 0.56$</h4>
| Chapter 2/Chap2_Bayes_Theorem.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Functions for Preprocessing Data
# Load Modules
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from statsmodels.stats.outliers_influence import variance_inflation_factor
from scipy.stats import probplot
import warnings
warnings.filterwarnings("ignore")
from scipy.stats import shapiro
from scipy.stats import normaltest
from scipy.stats import kurtosis
from scipy.stats import skew
from sklearn import preprocessing
import math
# Lets get some data
from sklearn import datasets
iris = datasets.load_iris()
data = pd.DataFrame(iris['data'],columns=['Sepal Length', 'Sepal Width', 'Petal Length', 'Petal Width'])
print(data.head())
# Function(s)
def processData(test_data, plots):
#Find and Fix Holes
def findFix(dat):
data = dat.copy()
print(' '),print('Holes at Start...'),print(' '),print(data.isna().sum())
holes = pd.DataFrame(index=['Before','Nan','After','Mean for column','Mean of B and A','IndexOfHole','Column'])
count = 0
for i in list(data.columns):
for j in list(data.loc[pd.isna(data[i]), :].index):
holes[count] = [data[i].iloc[j-1],data[i].iloc[j],data[i].iloc[j+1],data[i].mean(),(data[i].iloc[j-1]+data[i].iloc[j+1])/2,j,i]
count = count + 1
data[i].iloc[j] = (data[i].iloc[j-1]+data[i].iloc[j+1])/2
print(' '),print('Holes at End...'),print(' '),print(data.isna().sum())
return data
#The holes have been removed and filled using the mean on the item before and after the hole.
ndata = findFix(test_data)
#Histograms
def histPlot(a,c):
if c == 0:
x = math.ceil(len(list(a.columns))/2)
if x < 5:
xx = 5
fig, axs = plt.subplots(2,x,figsize=(8,8)) #sharex=True, sharey=True <- can be added to regularize the graphs
count = 0
countn = 0
for i in a:
if count < x:
axs[0,count].hist(list(a[i]), bins = int(len(a)**.5), stacked=True,color='tab:orange')#colors and labels
axs[0,count].set_title(i,size=20)
axs[0,count].legend(i,fontsize=20)
axs[0,count].grid(True)
count = count + 1
else:
axs[1,countn].hist(list(a[i]), bins = int(len(a)**.5), stacked=True,color='tab:orange')#colors and labels
axs[1,countn].set_title(i,size=20)
axs[1,countn].legend(i,fontsize=20)
axs[1,countn].grid(True)
countn = countn + 1
plt.tight_layout()
plt.show()
elif c == 1:
b = [list(a[i]) for i in a]
plt.hist(b, bins = int(len(a)**.5), stacked=True)#colors and labels
plt.legend(list(a.columns),fontsize=32)
plt.title(list(a.columns),size=32)
plt.grid(True)
plt.show()
return
if plots == 1:
histPlot(ndata,0)
#QQ plot
def qqPlot(a):
def qq(x, axes = None):
if axes is None:
fig = plt.figure()
ax1 = fig.add_subplot(1, 1, 1)
else:
ax1 = axes
p = probplot(x, plot = ax1)
ax1.set_title(x.name,fontsize=20)
return p
count = 1
fig = plt.figure(figsize=(8,8))
x = math.ceil(len(list(a.columns))/2)
for i in a.columns:
ax1 = fig.add_subplot(2,x, count)
p1 = qq(a[i],ax1)
count = count + 1
fig.tight_layout()
fig.show()
return
if plots == 1:
qqPlot(ndata)
result = pd.DataFrame(index=['Mean','Count','Std','Kurtosis','Skew','Shapiro','NormalTest'])
for i in list(ndata.columns):
f = [ndata[i].mean(), ndata[i].describe()['count'],ndata[i].describe()['std'],kurtosis(ndata[i]),skew(ndata[i]),shapiro(ndata[i])[1],normaltest(ndata[i])[1]]
result[i] = [round(num,3) for num in f]
min_max_scaler = preprocessing.MinMaxScaler(feature_range=(0, 1))
nndata = pd.DataFrame()
for i in list(ndata.columns):
nndata[i] = sum(min_max_scaler.fit_transform(np.array(ndata[i]).reshape(-1,1)).tolist(),[])
resultn = pd.DataFrame(index=['Mean','Count','Std','Kurtosis','Skew','Shapiro','NormalTest'])
for i in list(nndata.columns):
f = [nndata[i].mean(), nndata[i].describe()['count'],nndata[i].describe()['std'],kurtosis(nndata[i]),skew(nndata[i]),shapiro(nndata[i])[1],normaltest(nndata[i])[1]]
resultn[i] = [round(num,3) for num in f]
print(' '),print(result),print(''),print(resultn),print(' ')
def calculate_vif_(X, thresh):
variables = list(range(X.shape[1]))
dropped = True
while dropped:
dropped = False
vif = [variance_inflation_factor(X.iloc[:, variables].values, ix)
for ix in range(X.iloc[:, variables].shape[1])]
maxloc = vif.index(max(vif))
if max(vif) > thresh:
print('dropping \'' + X.iloc[:, variables].columns[maxloc] +
'\' at index: ' + str(maxloc))
del variables[maxloc]
dropped = True
print(' '),print('Recommended Remaining variables:')
print(list(X.columns[variables]))
return X.iloc[:, variables]
vif = calculate_vif_(nndata, thresh=10.0) #Threshold value should be adjusted per preference.
return nndata, vif
pre = processData(data,1)
# The function return the processed data and the output from the multicollinearity analysis.
print(pre[0].head())
print(pre[1].head())
| Processing Data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + cellView="form" id="ur8xi4C7S06n"
# @title Copyright & License (click to expand)
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] id="fsv4jGuU89rX"
# # Vertex AI Model Monitoring with Explainable AI Feature Attributions
#
# <table align="left">
# <td>
# <a href="https://console.cloud.google.com/ai-platform/notebooks/deploy-notebook?name=Model%20Monitoring&download_url=https%3A%2F%2Fraw.githubusercontent.com%2FGoogleCloudPlatform%2Fvertex-ai-samples%2Fmaster%2Fnotebooks%2Fcommunity%2Fmodel_monitoring%2Fmodel_monitoring_feature_attribs.ipynb">
# <img src="https://www.gstatic.com/cloud/images/navigation/vertex-ai.svg" alt="Google Cloud Notebooks">Open in Cloud Notebook
# </a>
# </td>
# <td>
# <a href="https://colab.research.google.com/github/GoogleCloudPlatform/vertex-ai-samples/blob/master/notebooks/community/model_monitoring/model_monitoring_feature_attribs.ipynb">
# <img src="https://cloud.google.com/ml-engine/images/colab-logo-32px.png" alt="Colab logo"> Open in Colab
# </a>
# </td>
# <td>
# <a href="https://github.com/GoogleCloudPlatform/vertex-ai-samples/blob/master/notebooks/community/model_monitoring/model_monitoring_feature_attribs.ipynb">
# <img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo">
# View on GitHub
# </a>
# </td>
# </table>
# + [markdown] id="lA32H1oKGgpf"
# ## Overview
# + [markdown] id="t6Cd51FkG09E"
# ### What is Model Monitoring?
#
# Modern applications rely on a well established set of capabilities to monitor the health of their services. Examples include:
#
# * software versioning
# * rigorous deployment processes
# * event logging
# * alerting/notication of situations requiring intervention
# * on-demand and automated diagnostic tracing
# * automated performance and functional testing
#
# You should be able to manage your ML services with the same degree of power and flexibility with which you can manage your applications. That's what MLOps is all about - managing ML services with the best practices Google and the broader computing industry have learned from generations of experience deploying well engineered, reliable, and scalable services.
#
# Model monitoring is only one piece of the ML Ops puzzle - it helps answer the following questions:
#
# * How well do recent service requests match the training data used to build your model? This is called **training-serving skew**.
# * How significantly are service requests evolving over time? This is called **drift detection**.
#
# [Vertex Explainable AI](https://cloud.google.com/vertex-ai/docs/explainable-ai/overview) adds another facet to model monitoring, which we call feature attribution monitoring. Explainable AI enables you to understand the relative contribution of each feature to a resulting prediction. In essence, it assesses the magnitude of each feature's influence.
#
# If production traffic differs from training data, or varies substantially over time, **either in terms of model predictions or feature attributions**, that's likely to impact the quality of the answers your model produces. When that happens, you'd like to be alerted automatically and responsively, so that **you can anticipate problems before they affect your customer experiences or your revenue streams**.
# + [markdown] id="yG7FcXWKHOhC"
# ### Objective
#
# In this notebook, you will learn how to...
#
# * deploy a pre-trained model
# * configure model monitoring
# * generate some artificial traffic
# * understand how to interpret the statistics, visualizations, other data reported by the model monitoring feature
# + [markdown] id="8yVpQt-JHKPF"
# ### Costs
#
# This tutorial uses billable components of Google Cloud:
#
# * Vertext AI
# * BigQuery
#
# Learn about [Vertext AI
# pricing](https://cloud.google.com/vertex-ai/pricing) and [Cloud Storage
# pricing](https://cloud.google.com/storage/pricing), and use the [Pricing
# Calculator](https://cloud.google.com/products/calculator/)
# to generate a cost estimate based on your projected usage.
# + [markdown] id="tvgnzT1CKxrO"
# ### The example model
#
# The model you'll use in this notebook is based on [this blog post](https://cloud.google.com/blog/topics/developers-practitioners/churn-prediction-game-developers-using-google-analytics-4-ga4-and-bigquery-ml). The idea behind this model is that your company has extensive log data describing how your game users have interacted with the site. The raw data contains the following categories of information:
#
# - identity - unique player identitity numbers
# - demographic features - information about the player, such as the geographic region in which a player is located
# - behavioral features - counts of the number of times a player has triggered certain game events, such as reaching a new level
# - churn propensity - this is the label or target feature, it provides an estimated probability that this player will churn, i.e. stop being an active player.
#
# The blog article referenced above explains how to use BigQuery to store the raw data, pre-process it for use in machine learning, and train a model. Because this notebook focuses on model monitoring, rather than training models, you're going to reuse a pre-trained version of this model, which has been exported to Google Cloud Storage. In the next section, you will setup your environment and import this model into your own project.
# + [markdown] id="ze4-nDLfK4pw"
# ## Before you begin
# + [markdown] id="i7EUnXsZhAGF"
# ### Setup your dependencies
# + id="020040f91150"
import os
# The Google Cloud Notebook product has specific requirements
IS_GOOGLE_CLOUD_NOTEBOOK = os.path.exists("/opt/deeplearning/metadata/env_version")
# Google Cloud Notebook requires dependencies to be installed with '--user'
USER_FLAG = ""
if IS_GOOGLE_CLOUD_NOTEBOOK:
USER_FLAG = "--user"
# + id="UcwUlnMnfo3i"
import os
import pprint as pp
import sys
import IPython
assert sys.version_info.major == 3, "This notebook requires Python 3."
# Install Python package dependencies.
print("Installing TensorFlow and TensorFlow Data Validation (TFDV)")
# ! pip3 install {USER_FLAG} --quiet --upgrade tensorflow tensorflow_data_validation[visualization]
# ! rm -f /opt/conda/lib/python3.7/site-packages/tensorflow/core/kernels/libtfkernel_sobol_op.so
# ! pip3 install {USER_FLAG} --quiet --upgrade google-api-python-client google-auth-oauthlib google-auth-httplib2 oauth2client requests
# ! pip3 install {USER_FLAG} --quiet --upgrade google-cloud-aiplatform
# ! pip3 install {USER_FLAG} --quiet --upgrade explainable_ai_sdk
# ! pip3 install {USER_FLAG} --quiet --upgrade google-cloud-storage==1.32.0
# Automatically restart kernel after installing new packages.
if not os.getenv("IS_TESTING"):
print("Restarting kernel...")
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
print("Done.")
# + id="8rv5wnmA6BTd"
# Import required packages.
import json
import os
import random
import sys
import time
import matplotlib.pyplot as plt
import numpy as np
# + [markdown] id="BF1j6f9HApxa"
# ### Set up your Google Cloud project
#
# **The following steps are required, regardless of your notebook environment.**
#
# 1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs.
#
# 1. [Make sure that billing is enabled for your project](https://cloud.google.com/billing/docs/how-to/modify-project).
#
# 1. If you are running this notebook locally, you will need to install the [Cloud SDK](https://cloud.google.com/sdk).
#
# 1. You'll use the *gcloud* command throughout this notebook. In the following cell, enter your project name and run the cell to authenticate yourself with the Google Cloud and initialize your *gcloud* configuration settings.
#
# **For this lab, we're going to use region us-central1 for all our resources (BigQuery training data, Cloud Storage bucket, model and endpoint locations, etc.). Those resources can be deployed in other regions, as long as they're consistently co-located, but we're going to use one fixed region to keep things as simple and error free as possible.**
# + cellView="form" id="wxiE6dEWOFm3"
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
REGION = "us-central1"
SUFFIX = "aiplatform.googleapis.com"
API_ENDPOINT = f"{REGION}-{SUFFIX}"
PREDICT_API_ENDPOINT = f"{REGION}-prediction-{SUFFIX}"
if os.getenv("IS_TESTING"):
# !gcloud --quiet components install beta
# !gcloud --quiet components update
# !gcloud config set project $PROJECT_ID
# !gcloud config set ai/region $REGION
os.environ["GOOGLE_CLOUD_PROJECT"] = PROJECT_ID
# + [markdown] id="sECqTau7Oh6M"
# ### Login to your Google Cloud account and enable AI services
# + id="C6H1vZYjvT6w"
# The Google Cloud Notebook product has specific requirements
IS_GOOGLE_CLOUD_NOTEBOOK = os.path.exists("/opt/deeplearning/metadata/env_version")
# If on Google Cloud Notebooks, then don't execute this code
if not IS_GOOGLE_CLOUD_NOTEBOOK:
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
# %env GOOGLE_APPLICATION_CREDENTIALS ''
# !gcloud services enable aiplatform.googleapis.com
# + [markdown] id="btZeLzqQ7pXc"
# ### Define some helper functions and data structures
#
# Run the following cell to define some utility functions used throughout this notebook. Although these functions are not critical to understand the main concepts, feel free to expand the cell if you're curious or want to dive deeper into how some of your API requests are made.
# + cellView="form" id="yhDFSB2YDvfT"
# @title Utility functions
import copy
import os
from explainable_ai_sdk.metadata.tf.v2 import SavedModelMetadataBuilder
from google.cloud.aiplatform_v1.services.endpoint_service import \
EndpointServiceClient
from google.cloud.aiplatform_v1.services.job_service import JobServiceClient
from google.cloud.aiplatform_v1.services.prediction_service import \
PredictionServiceClient
from google.cloud.aiplatform_v1.types.io import BigQuerySource
from google.cloud.aiplatform_v1.types.model_deployment_monitoring_job import (
ModelDeploymentMonitoringJob, ModelDeploymentMonitoringObjectiveConfig,
ModelDeploymentMonitoringScheduleConfig)
from google.cloud.aiplatform_v1.types.model_monitoring import (
ModelMonitoringAlertConfig, ModelMonitoringObjectiveConfig,
SamplingStrategy, ThresholdConfig)
from google.cloud.aiplatform_v1.types.prediction_service import (
ExplainRequest, PredictRequest)
from google.protobuf import json_format
from google.protobuf.duration_pb2 import Duration
from google.protobuf.struct_pb2 import Value
DEFAULT_THRESHOLD_VALUE = 0.001
def create_monitoring_job(objective_configs):
# Create sampling configuration.
random_sampling = SamplingStrategy.RandomSampleConfig(sample_rate=LOG_SAMPLE_RATE)
sampling_config = SamplingStrategy(random_sample_config=random_sampling)
# Create schedule configuration.
duration = Duration(seconds=MONITOR_INTERVAL)
schedule_config = ModelDeploymentMonitoringScheduleConfig(monitor_interval=duration)
# Create alerting configuration.
emails = [USER_EMAIL]
email_config = ModelMonitoringAlertConfig.EmailAlertConfig(user_emails=emails)
alerting_config = ModelMonitoringAlertConfig(email_alert_config=email_config)
# Create the monitoring job.
endpoint = f"projects/{PROJECT_ID}/locations/{REGION}/endpoints/{ENDPOINT_ID}"
predict_schema = ""
analysis_schema = ""
job = ModelDeploymentMonitoringJob(
display_name=JOB_NAME,
endpoint=endpoint,
model_deployment_monitoring_objective_configs=objective_configs,
logging_sampling_strategy=sampling_config,
model_deployment_monitoring_schedule_config=schedule_config,
model_monitoring_alert_config=alerting_config,
predict_instance_schema_uri=predict_schema,
analysis_instance_schema_uri=analysis_schema,
)
options = dict(api_endpoint=API_ENDPOINT)
client = JobServiceClient(client_options=options)
parent = f"projects/{PROJECT_ID}/locations/{REGION}"
response = client.create_model_deployment_monitoring_job(
parent=parent, model_deployment_monitoring_job=job
)
print("Created monitoring job:")
print(response)
return response
def get_thresholds(default_thresholds, custom_thresholds):
thresholds = {}
default_threshold = ThresholdConfig(value=DEFAULT_THRESHOLD_VALUE)
for feature in default_thresholds.split(","):
feature = feature.strip()
thresholds[feature] = default_threshold
for custom_threshold in custom_thresholds.split(","):
pair = custom_threshold.split(":")
if len(pair) != 2:
print(f"Invalid custom skew threshold: {custom_threshold}")
return
feature, value = pair
thresholds[feature] = ThresholdConfig(value=float(value))
return thresholds
def get_deployed_model_ids(endpoint_id):
client_options = dict(api_endpoint=API_ENDPOINT)
client = EndpointServiceClient(client_options=client_options)
parent = f"projects/{PROJECT_ID}/locations/{REGION}"
response = client.get_endpoint(name=f"{parent}/endpoints/{endpoint_id}")
model_ids = []
for model in response.deployed_models:
model_ids.append(model.id)
return model_ids
def set_objectives(model_ids, objective_template):
# Use the same objective config for all models.
objective_configs = []
for model_id in model_ids:
objective_config = copy.deepcopy(objective_template)
objective_config.deployed_model_id = model_id
objective_configs.append(objective_config)
return objective_configs
def send_predict_request(endpoint, input, type="predict"):
client_options = {"api_endpoint": PREDICT_API_ENDPOINT}
client = PredictionServiceClient(client_options=client_options)
if type == "predict":
obj = PredictRequest
method = client.predict
elif type == "explain":
obj = ExplainRequest
method = client.explain
else:
raise Exception("unsupported request type:" + type)
params = {}
params = json_format.ParseDict(params, Value())
request = obj(endpoint=endpoint, parameters=params)
inputs = [json_format.ParseDict(input, Value())]
request.instances.extend(inputs)
response = None
try:
response = method(request)
except Exception as ex:
print(ex)
return response
def list_monitoring_jobs():
client_options = dict(api_endpoint=API_ENDPOINT)
parent = f"projects/{PROJECT_ID}/locations/us-central1"
client = JobServiceClient(client_options=client_options)
response = client.list_model_deployment_monitoring_jobs(parent=parent)
print(response)
def pause_monitoring_job(job):
client_options = dict(api_endpoint=API_ENDPOINT)
client = JobServiceClient(client_options=client_options)
response = client.pause_model_deployment_monitoring_job(name=job)
print(response)
def delete_monitoring_job(job):
client_options = dict(api_endpoint=API_ENDPOINT)
client = JobServiceClient(client_options=client_options)
response = client.delete_model_deployment_monitoring_job(name=job)
print(response)
# Sampling distributions for categorical features...
DAYOFWEEK = {1: 1040, 2: 1223, 3: 1352, 4: 1217, 5: 1078, 6: 1011, 7: 1110}
LANGUAGE = {
"en-us": 4807,
"en-gb": 678,
"ja-jp": 419,
"en-au": 310,
"en-ca": 299,
"de-de": 147,
"en-in": 130,
"en": 127,
"fr-fr": 94,
"pt-br": 81,
"es-us": 65,
"zh-tw": 64,
"zh-hans-cn": 55,
"es-mx": 53,
"nl-nl": 37,
"fr-ca": 34,
"en-za": 29,
"vi-vn": 29,
"en-nz": 29,
"es-es": 25,
}
OS = {"IOS": 3980, "ANDROID": 3798, "null": 253}
MONTH = {6: 3125, 7: 1838, 8: 1276, 9: 1718, 10: 74}
COUNTRY = {
"United States": 4395,
"India": 486,
"Japan": 450,
"Canada": 354,
"Australia": 327,
"United Kingdom": 303,
"Germany": 144,
"Mexico": 102,
"France": 97,
"Brazil": 93,
"Taiwan": 72,
"China": 65,
"Saudi Arabia": 49,
"Pakistan": 48,
"Egypt": 46,
"Netherlands": 45,
"Vietnam": 42,
"Philippines": 39,
"South Africa": 38,
}
# Means and standard deviations for numerical features...
MEAN_SD = {
"julianday": (204.6, 34.7),
"cnt_user_engagement": (30.8, 53.2),
"cnt_level_start_quickplay": (7.8, 28.9),
"cnt_level_end_quickplay": (5.0, 16.4),
"cnt_level_complete_quickplay": (2.1, 9.9),
"cnt_level_reset_quickplay": (2.0, 19.6),
"cnt_post_score": (4.9, 13.8),
"cnt_spend_virtual_currency": (0.4, 1.8),
"cnt_ad_reward": (0.1, 0.6),
"cnt_challenge_a_friend": (0.0, 0.3),
"cnt_completed_5_levels": (0.1, 0.4),
"cnt_use_extra_steps": (0.4, 1.7),
}
DEFAULT_INPUT = {
"cnt_ad_reward": 0,
"cnt_challenge_a_friend": 0,
"cnt_completed_5_levels": 1,
"cnt_level_complete_quickplay": 3,
"cnt_level_end_quickplay": 5,
"cnt_level_reset_quickplay": 2,
"cnt_level_start_quickplay": 6,
"cnt_post_score": 34,
"cnt_spend_virtual_currency": 0,
"cnt_use_extra_steps": 0,
"cnt_user_engagement": 120,
"country": "Denmark",
"dayofweek": 3,
"julianday": 254,
"language": "da-dk",
"month": 9,
"operating_system": "IOS",
"user_pseudo_id": "104B0770BAE16E8B53DF330C95881893",
}
# + [markdown] id="1mhT_d_Bi-Kf"
# ### Generate model metadata for explainable AI
#
# Run the following cell to extract metadata from the exported model, which is needed for generating the prediction explanations.
# + id="d1a984c57356"
builder = SavedModelMetadataBuilder(
"gs://mco-mm/churn", outputs_to_explain=["churned_probs"]
)
builder.save_metadata(".")
md = builder.get_metadata()
del md["tags"]
del md["framework"]
# + [markdown] id="lAOk8UqvCL0S"
# ## Import your model
#
# The churn propensity model you'll be using in this notebook has been trained in BigQuery ML and exported to a Google Cloud Storage bucket. This illustrates how you can easily export a trained model and move a model from one cloud service to another.
#
# Run the next cell to import this model into your project. **If you've already imported your model, you can skip this step.**
# + id="50c05f40f1dc"
MODEL_NAME = "churn"
IMAGE = "us-docker.pkg.dev/cloud-aiplatform/prediction/tf2-cpu.2-5:latest"
ENDPOINT = "us-central1-aiplatform.googleapis.com"
churn_model_path = "gs://mco-mm/churn"
request_data = {
"model": {
"displayName": "churn",
"artifactUri": churn_model_path,
"containerSpec": {"imageUri": IMAGE},
"explanationSpec": {
"parameters": {"sampledShapleyAttribution": {"pathCount": 5}},
"metadata": md,
},
}
}
with open("request_data.json", "w") as outfile:
json.dump(request_data, outfile)
# output = !curl -X POST \
# -H "Authorization: Bearer $(gcloud auth print-access-token)" \
# -H "Content-Type: application/json" \
# https://{ENDPOINT}/v1/projects/{PROJECT_ID}/locations/{REGION}/models:upload \
# -d @request_data.json 2>/dev/null
# print(output)
MODEL_ID = output[1].split()[1].split("/")[5]
print(f"Model {MODEL_NAME}/{MODEL_ID} created.")
# + [markdown] id="e2030b028cef"
# This request will return immediately but it spawns an asynchronous task that takes several minutes. Periodically check the Vertex Models page on the Cloud Console and don't continue with this lab until you see your newly created model there. It should like something like this:
# <br>
# <br>
# <img src="https://storage.googleapis.com/mco-general/img/mm0.png" />
# <br>
# + [markdown] id="d7cbb0fb73cc"
# ## Deploy your endpoint
#
# Now that you've imported your model into your project, you need to create an endpoint to serve your model. An endpoint can be thought of as a channel through which your model provides prediction services. Once established, you'll be able to make prediction requests on your model via the public internet. Your endpoint is also serverless, in the sense that Google ensures high availability by reducing single points of failure, and scalability by dynamically allocating resources to meet the demand for your service. In this way, you are able to focus on your model quality, and freed from adminstrative and infrastructure concerns.
#
# Run the next cell to deploy your model to an endpoint. **This will take about ten minutes to complete.**
# + id="M4nHQTVh7PIi"
ENDPOINT_NAME = "churn"
# output = !gcloud --quiet beta ai endpoints create --display-name=$ENDPOINT_NAME --format="value(name)"
# print("endpoint output: ", output)
ENDPOINT = output[-1]
ENDPOINT_ID = ENDPOINT.split("/")[-1]
# output = !gcloud --quiet beta ai endpoints deploy-model $ENDPOINT_ID --display-name=$ENDPOINT_NAME --model=$MODEL_ID --traffic-split="0=100"
print(f"Model deployed to Endpoint {ENDPOINT_NAME}/{ENDPOINT_ID}.")
# + [markdown] id="NKsA_lfl9Ryw"
# ## Run a prediction test
#
# Now that you have imported a model and deployed that model to an endpoint, you are ready to verify that it's working. Run the next cell to send a test prediction request. If everything works as expected, you should receive a response encoded in a text representation called JSON, along with a pie chart summarizing the results.
#
# **Try this now by running the next cell and examine the results.**
# + id="QNEb7fDJ9NXc"
# print(ENDPOINT)
# pp.pprint(DEFAULT_INPUT)
try:
resp = send_predict_request(ENDPOINT, DEFAULT_INPUT)
for i in resp.predictions:
vals = i["churned_values"]
probs = i["churned_probs"]
for i in range(len(vals)):
print(vals[i], probs[i])
plt.pie(probs, labels=vals)
plt.show()
pp.pprint(resp)
except Exception as ex:
print("prediction request failed", ex)
# + [markdown] id="a1eb4131bb5e"
# Taking a closer look at the results, we see the following elements:
#
# - **churned_values** - a set of possible values (0 and 1) for the target field
# - **churned_probs** - a corresponding set of probabilities for each possible target field value (5x10^-40 and 1.0, respectively)
# - **predicted_churn** - based on the probabilities, the predicted value of the target field (1)
#
# This response encodes the model's prediction in a format that is readily digestible by software, which makes this service ideal for automated use by an application.
# + [markdown] id="be7830c55cdc"
# ## Run an explanation test
#
# We can also run a test of explainable AI on this endpoint. Run the next cell to send a test explanation request. If everything works as expected, you should receive a response encoding the feature importance of this prediction in a text representation called JSON, along with a bar chart summarizing the results.
#
# **Try this now by running the next cell and examine the results.**
# + id="dcbd4e755931"
# print(ENDPOINT)
# pp.pprint(DEFAULT_INPUT)
try:
features = []
scores = []
resp = send_predict_request(ENDPOINT, DEFAULT_INPUT, type="explain")
for i in resp.explanations:
for j in i.attributions:
for k in j.feature_attributions:
features.append(k)
scores.append(j.feature_attributions[k])
features = [x for _, x in sorted(zip(scores, features))]
scores = sorted(scores)
fig, ax = plt.subplots()
fig.set_size_inches(9, 9)
ax.barh(features, scores)
fig.show()
# pp.pprint(resp)
except Exception as ex:
print("explanation request failed", ex)
# + [markdown] id="8rF5iLuXCT7i"
# ## Start your monitoring job
#
# Now that you've created an endpoint to serve prediction requests on your model, you're ready to start a monitoring job to keep an eye on model quality and to alert you if and when input begins to deviate in way that may impact your model's prediction quality.
#
# In this section, you will configure and create a model monitoring job based on the churn propensity model you imported from BigQuery ML.
# + [markdown] id="wW2gLBQ3Zkhq"
# ### Configure the following fields:
#
# 1. Log sample rate - Your prediction requests and responses are logged to BigQuery tables, which are automatically created when you create a monitoring job. This parameter specifies the desired logging frequency for those tables.
# 1. Monitor interval - time window over which to analyze your data and report anomalies. The minimum window is one hour (3600 seconds)
# 1. Target field - prediction target column name in training dataset
# 1. Skew detection threshold - skew threshold for each feature you want to monitor
# 1. Prediction drift threshold - drift threshold for each feature you want to monitor
# 1. Attribution Skew detection threshold - feature importance skew threshold
# 1. Attribution Prediction drift threshold - feature importance drift threshold
# + id="plpASmM2YIVO"
USER_EMAIL = "" # @param {type:"string"}
JOB_NAME = "churn"
# Sampling rate (optional, default=.8)
LOG_SAMPLE_RATE = 0.8 # @param {type:"number"}
# Monitoring Interval in seconds (optional, default=3600).
MONITOR_INTERVAL = 3600 # @param {type:"number"}
# URI to training dataset.
DATASET_BQ_URI = "bq://mco-mm.bqmlga4.train" # @param {type:"string"}
# Prediction target column name in training dataset.
TARGET = "churned"
# Skew and drift thresholds.
SKEW_DEFAULT_THRESHOLDS = "country,cnt_user_engagement" # @param {type:"string"}
SKEW_CUSTOM_THRESHOLDS = "cnt_level_start_quickplay:.01" # @param {type:"string"}
DRIFT_DEFAULT_THRESHOLDS = "country,cnt_user_engagement" # @param {type:"string"}
DRIFT_CUSTOM_THRESHOLDS = "cnt_level_start_quickplay:.01" # @param {type:"string"}
ATTRIB_SKEW_DEFAULT_THRESHOLDS = "country,cnt_user_engagement" # @param {type:"string"}
ATTRIB_SKEW_CUSTOM_THRESHOLDS = (
"cnt_level_start_quickplay:.01" # @param {type:"string"}
)
ATTRIB_DRIFT_DEFAULT_THRESHOLDS = (
"country,cnt_user_engagement" # @param {type:"string"}
)
ATTRIB_DRIFT_CUSTOM_THRESHOLDS = (
"cnt_level_start_quickplay:.01" # @param {type:"string"}
)
# + [markdown] id="mjVSViZR-dP2"
# ### Create your monitoring job
#
# The following code uses the Google Python client library to translate your configuration settings into a programmatic request to start a model monitoring job. Instantiating a monitoring job can take some time. If everything looks good with your request, you'll get a successful API response. Then, you'll need to check your email to receive a notification that the job is running.
# + id="-62TYm2iYv3K"
skew_thresholds = get_thresholds(SKEW_DEFAULT_THRESHOLDS, SKEW_CUSTOM_THRESHOLDS)
drift_thresholds = get_thresholds(DRIFT_DEFAULT_THRESHOLDS, DRIFT_CUSTOM_THRESHOLDS)
attrib_skew_thresholds = get_thresholds(
ATTRIB_SKEW_DEFAULT_THRESHOLDS, ATTRIB_SKEW_CUSTOM_THRESHOLDS
)
attrib_drift_thresholds = get_thresholds(
ATTRIB_DRIFT_DEFAULT_THRESHOLDS, ATTRIB_DRIFT_CUSTOM_THRESHOLDS
)
skew_config = ModelMonitoringObjectiveConfig.TrainingPredictionSkewDetectionConfig(
skew_thresholds=skew_thresholds,
attribution_score_skew_thresholds=attrib_skew_thresholds,
)
drift_config = ModelMonitoringObjectiveConfig.PredictionDriftDetectionConfig(
drift_thresholds=drift_thresholds,
attribution_score_drift_thresholds=attrib_drift_thresholds,
)
explanation_config = ModelMonitoringObjectiveConfig.ExplanationConfig(
enable_feature_attributes=True
)
training_dataset = ModelMonitoringObjectiveConfig.TrainingDataset(target_field=TARGET)
training_dataset.bigquery_source = BigQuerySource(input_uri=DATASET_BQ_URI)
objective_config = ModelMonitoringObjectiveConfig(
training_dataset=training_dataset,
training_prediction_skew_detection_config=skew_config,
prediction_drift_detection_config=drift_config,
explanation_config=explanation_config,
)
model_ids = get_deployed_model_ids(ENDPOINT_ID)
objective_template = ModelDeploymentMonitoringObjectiveConfig(
objective_config=objective_config
)
objective_configs = set_objectives(model_ids, objective_template)
monitoring_job = create_monitoring_job(objective_configs)
# + id="OiwOVR4D_xhl"
# Run a prediction request to generate schema, if necessary.
try:
_ = send_predict_request(ENDPOINT, DEFAULT_INPUT)
print("prediction succeeded")
except Exception:
print("prediction failed")
# + [markdown] id="SaXYVFFslRru"
# After a minute or two, you should receive email at the address you configured above for USER_EMAIL. This email confirms successful deployment of your monitoring job. Here's a sample of what this email might look like:
# <br>
# <br>
# <img src="https://storage.googleapis.com/mco-general/img/mm6.png" />
# <br>
# As your monitoring job collects data, measurements are stored in Google Cloud Storage and you are free to examine your data at any time. The circled path in the image above specifies the location of your measurements in Google Cloud Storage. Run the following cell to see an example of the layout of these measurements in Cloud Storage. If you substitute the Cloud Storage URL in your job creation email, you can view the structure and content of the data files for your own monitoring job.
# + id="XV-vru2Pm1oX"
# !gsutil ls gs://cloud-ai-platform-fdfb4810-148b-4c86-903c-dbdff879f6e1/*/*
# + [markdown] id="XgUwU0sDpUUD"
# You will notice the following components in these Cloud Storage paths:
#
# - **cloud-ai-platform-..** - This is a bucket created for you and assigned to capture your service's prediction data. Each monitoring job you create will trigger creation of a new folder in this bucket.
# - **[model_monitoring|instance_schemas]/job-..** - This is your unique monitoring job number, which you can see above in both the response to your job creation requesst and the email notification.
# - **instance_schemas/job-../analysis** - This is the monitoring jobs understanding and encoding of your training data's schema (field names, types, etc.).
# - **instance_schemas/job-../predict** - This is the first prediction made to your model after the current monitoring job was enabled.
# - **model_monitoring/job-../serving** - This folder is used to record data relevant to drift calculations. It contains measurement summaries for every hour your model serves traffic.
# - **model_monitoring/job-../training** - This folder is used to record data relevant to training-serving skew calculations. It contains an ongoing summary of prediction data relative to training data.
# - **model_monitoring/job-../feature_attribution_score** - This folder is used to record data relevant to feature attribution calculations. It contains an ongoing summary of feature attribution scores relative to training data.
# + [markdown] id="8V2zo7-MMd7G"
# ### You can create monitoring jobs with other user interfaces
#
# In the previous cells, you created a monitoring job using the Python client library. You can also use the *gcloud* command line tool to create a model monitoring job and, in the near future, you will be able to use the Cloud Console, as well for this function.
#
# + [markdown] id="Q106INuFCXKX"
# ## Generate test data to trigger alerting
#
# Now you are ready to test the monitoring function. Run the following cell, which will generate fabricated test predictions designed to trigger the thresholds you specified above. This cell runs two five minute tests, one minute apart, so it should take roughly eleven minutes to complete the test.
#
# The first test sends 300 fabricated requests (one per second for five minutes) while perturbing two features of interest (cnt_level_start_quickplay and country) by a factor of two. The second test does the same thing but perturbs the selected feature distributions by a factor of three. By perturbing data in two experiments, we're able to trigger both skew and drift alerts.
#
# After running this test, it takes at least an hour to assess and report skew and drift alerts so feel free to proceed with the notebook now and you'll see how to examine the resulting alerts later.
# + id="obZYLLAuKmG8"
def random_uid():
digits = [str(i) for i in range(10)] + ["A", "B", "C", "D", "E", "F"]
return "".join(random.choices(digits, k=32))
def monitoring_test(count, sleep, perturb_num={}, perturb_cat={}):
# Use random sampling and mean/sd with gaussian distribution to model
# training data. Then modify sampling distros for two categorical features
# and mean/sd for two numerical features.
mean_sd = MEAN_SD.copy()
country = COUNTRY.copy()
for k, (mean_fn, sd_fn) in perturb_num.items():
orig_mean, orig_sd = MEAN_SD[k]
mean_sd[k] = (mean_fn(orig_mean), sd_fn(orig_sd))
for k, v in perturb_cat.items():
country[k] = v
for i in range(0, count):
input = DEFAULT_INPUT.copy()
input["user_pseudo_id"] = str(random_uid())
input["country"] = random.choices([*country], list(country.values()))[0]
input["dayofweek"] = random.choices([*DAYOFWEEK], list(DAYOFWEEK.values()))[0]
input["language"] = str(random.choices([*LANGUAGE], list(LANGUAGE.values()))[0])
input["operating_system"] = str(random.choices([*OS], list(OS.values()))[0])
input["month"] = random.choices([*MONTH], list(MONTH.values()))[0]
for key, (mean, sd) in mean_sd.items():
sample_val = round(float(np.random.normal(mean, sd, 1)))
val = max(sample_val, 0)
input[key] = val
print(f"Sending prediction {i}")
try:
send_predict_request(ENDPOINT, input)
except Exception:
print("prediction request failed")
time.sleep(sleep)
print("Test Completed.")
start = 2
end = 3
for multiplier in range(start, end + 1):
test_time = 300
tests_per_sec = 1
sleep_time = 1 / tests_per_sec
iterations = test_time * tests_per_sec
perturb_num = {
"cnt_level_start_quickplay": (
lambda x: x * multiplier,
lambda x: x / multiplier,
)
}
perturb_cat = {"Japan": max(COUNTRY.values()) * multiplier}
monitoring_test(iterations, sleep_time, perturb_num, perturb_cat)
if multiplier < end:
print("sleeping...")
time.sleep(60)
# + [markdown] id="bQohDTJgLQlW"
# ## Interpret your results
#
# While waiting for your results, which, as noted, may take up to an hour, you can read ahead to get sense of the alerting experience.
# + [markdown] id="uGPI92qbOFUR"
# ### Here's what a sample email alert looks like...
#
# <img src="https://storage.googleapis.com/mco-general/img/mm7.png" />
#
# + [markdown] id="HoaqsxpaRs1m"
# This email is warning you that the *cnt_level_start_quickplay*, *cnt_user_engagement*, and *country* feature values seen in production have skewed above your threshold between training and serving your model. It's also telling you that the *cnt_user_engagement* and *country* feature attribution values are skewed relative to your training data, again, as per your threshold specification.
# + [markdown] id="w4jVIVq4VzB_"
# ### Monitoring results in the Cloud Console
#
# You can examine your model monitoring data from the Cloud Console. Below is a screenshot of those capabilities.
# + [markdown] id="2OdIMVBAPZi_"
# #### Monitoring Status
#
# You can verify that a given endpoint has an active model monitoring job via the Endpoint summary page:
#
# <img src="https://storage.googleapis.com/mco-general/img/mm1.png" />
#
# #### Monitoring Alerts
#
# You can examine the alert details by clicking into the endpoint of interest, and selecting the alerts panel:
#
# <img src="https://storage.googleapis.com/mco-general/img/mm2.png" />
#
# #### Feature Value Distributions
#
# You can also examine the recorded training and production feature distributions by drilling down into a given feature, like this:
#
# <img src="https://storage.googleapis.com/mco-general/img/mm9.png" />
#
# which yields graphical representations of the feature distrubution during both training and production, like this:
#
# <img src="https://storage.googleapis.com/mco-general/img/mm8.png" />
# + [markdown] id="TpV-iwP9qw9c"
# ## Clean up
#
# To clean up all Google Cloud resources used in this project, you can [delete the Google Cloud
# project](https://cloud.google.com/resource-manager/docs/creating-managing-projects#shutting_down_projects) you used for the tutorial.
#
# Otherwise, you can delete the individual resources you created in this tutorial:
# + id="TPP_ImwJDFJf"
# Delete endpoint resource
# !gcloud ai endpoints delete $ENDPOINT_NAME --quiet
# Delete model resource
# !gcloud ai models delete $MODEL_NAME --quiet
# + [markdown] id="j3Dh15h3-NoO"
# ## Learn more about model monitoring
#
# **Congratulations!** You've now learned what model monitoring is, how to configure and enable it, and how to find and interpret the results. Check out the following resources to learn more about model monitoring and ML Ops.
#
# - [TensorFlow Data Validation](https://www.tensorflow.org/tfx/guide/tfdv)
# - [Data Understanding, Validation, and Monitoring At Scale](https://blog.tensorflow.org/2018/09/introducing-tensorflow-data-validation.html)
# - [Vertex Product Documentation](https://cloud.google.com/vertex-ai)
# - [Model Monitoring Reference Docs](https://cloud.google.com/vertex-ai/docs/reference)
# - [Model Monitoring blog article](https://cloud.google.com/blog/topics/developers-practitioners/monitor-models-training-serving-skew-vertex-ai)
# - [Explainable AI Whitepaper](https://storage.googleapis.com/cloud-ai-whitepapers/AI%20Explainability%20Whitepaper.pdf)
| notebooks/community/model_monitoring/model_monitoring_feature_attribs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="-6_dbDdTee8-" colab_type="text"
# In this challenge we'll be doing a linear fit with two input variables and one output, so our model looks like y ~ a + b*x_1 + c*x_2. First, let's import the libraries we'll need.
# + id="OiUQbJcPewzu" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
import numpy as np
import pandas as pd
import requests
import io
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
# + [markdown] id="snGZ1Y9we0WX" colab_type="text"
# Now let's grab the data. This is a fictional data set made for the purpose of this exercise.
# + id="vP-tW7gKexOM" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
url="https://raw.githubusercontent.com/mathewphilipc/LambdaDataSets/master/bivariate_linear_data.csv"
s=requests.get(url).content
data=pd.read_csv(io.StringIO(s.decode('utf-8')))
# + [markdown] id="hEaIEqGzioMA" colab_type="text"
# We're going to find the optimal [a,b, c] values the right by, using the *normal equations*. The first step is to add a new column to the left of the x1 column and set all of its values to 1. Think of this as the x0 columns. Then introduce a matrix X consisting of the x0, x1, x2 columns and and vector Y with just the y column. Do any reshaping you need to make later steps work.
# + id="Cwxjiqfee4Y7" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
data['x0'] = np.ones(200)
X = data[['x0', 'x1', 'x2']].as_matrix()
Y = data['y'].as_matrix().reshape(-1, 1)
# + [markdown] id="LkDRXnQhkH6t" colab_type="text"
# To make sure this worked, verify that X has length 200 and width 3.
# + id="1JCPhP3IjpFL" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 34} outputId="5b708c32-b238-409d-8535-e40624312c90" executionInfo={"status": "ok", "timestamp": 1525794430534, "user_tz": 420, "elapsed": 348, "user": {"displayName": "<NAME>", "photoUrl": "//lh4.googleusercontent.com/-BMlr5I5Dhow/AAAAAAAAAAI/AAAAAAAAABc/XW4PF5A8K2Q/s50-c-k-no/photo.jpg", "userId": "116545933704048584401"}}
X.shape
# + [markdown] id="eREV4V2Tm3i-" colab_type="text"
# And just for sanity' sake, print out the first row of X and compare it with the contents of the csv file we read from.
# + id="3VsYTGcWl4a0" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 119} outputId="c62f20f4-c115-41c4-bf17-44c367caa4ec" executionInfo={"status": "ok", "timestamp": 1525794431158, "user_tz": 420, "elapsed": 351, "user": {"displayName": "<NAME>", "photoUrl": "//lh4.googleusercontent.com/-BMlr5I5Dhow/AAAAAAAAAAI/AAAAAAAAABc/XW4PF5A8K2Q/s50-c-k-no/photo.jpg", "userId": "116545933704048584401"}}
print(X[0])
data.iloc[0]
# + [markdown] id="4TBuSnI-nAWs" colab_type="text"
# Now here's the most important part. As it turns out, the optimal vector [a, b, c] can be calculated in a single step (assuming you have a matrix algebra library and don't have to do the operations yourself) in terms of X and Y. Here's the solution:
#
# $(X^T X)^{-1} X^T Y$
#
# Calculate [a, b, c] and print the values of a, b, and c.
# + id="SFXu_C6cl7Fh" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
left = np.linalg.inv(np.matmul(X.T, X))
right = np.matmul(X.T, Y)
M = np.matmul(left, right)
# + id="bXS3ouh_XsN9" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 68} outputId="fabb22dd-29b4-498e-d69d-88faca18eac1" executionInfo={"status": "ok", "timestamp": 1525794432540, "user_tz": 420, "elapsed": 347, "user": {"displayName": "<NAME>", "photoUrl": "//lh4.googleusercontent.com/-BMlr5I5Dhow/AAAAAAAAAAI/AAAAAAAAABc/XW4PF5A8K2Q/s50-c-k-no/photo.jpg", "userId": "116545933704048584401"}}
M
# + [markdown] id="nYaqwc2sHI_v" colab_type="text"
# Finally, plot the points defined by x,y and the plane that you calculated using the above matrix transforms.
# + id="RJTx0yXjZSvr" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
x1, x2 = np.meshgrid(np.linspace(data['x1'].min(), data['x1'].max()),
np.linspace(data['x2'].min(), data['x2'].max()))
Z = x1*M[1] + x2*M[2] + M[0]
# + id="JUSi543Imc_c" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 330} outputId="b8cd3865-5ab7-4087-e67e-500019fe0737" executionInfo={"status": "ok", "timestamp": 1525794435517, "user_tz": 420, "elapsed": 1524, "user": {"displayName": "<NAME>", "photoUrl": "//lh4.googleusercontent.com/-BMlr5I5Dhow/AAAAAAAAAAI/AAAAAAAAABc/XW4PF5A8K2Q/s50-c-k-no/photo.jpg", "userId": "116545933704048584401"}}
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.scatter(data['x1'], data['x2'], data['y'])
ax.plot_surface(x1, x2, Z)
ax.set(xlabel='x1', ylabel='x2', zlabel='y');
| Week 06 Modeling/Code Challenges/Day 2 Linear Regression Normal Equation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Jupyter Notebook and NumPy introduction
#
# Jupyter notebook is often used by data scientists who work in Python. It is loosely based on Mathematica and combines code, text and visual output in one page.
#
# ## Basic Jupyter Notebook commands
#
# Some relevant short cuts:
# * ```SHIFT + ENTER``` executes 1 block of code called a cell
# * Tab-completion is omnipresent after the import of a package has been executed
# * ```SHIFT + TAB``` gives you extra information on what parameters a function takes
# * Repeating ```SHIFT + TAB``` multiple times gives you even more information
#
# To get used to these short cuts try them out on the cell below.
print('Hello world!')
print(range(5))
# ## Imports
#
# In Python you need to import tools to be able to use them. In this workshop we will mainly use the numpy toolbox and you can import it like this:
import numpy as np
# ## Parts to be implemented
#
# In cells like the following example you are expected to implement some code. The remainder of the tutorial won't work if you skip these.
#
# Sometimes assertions are added as a check.
# + deletable=false nbgrader={"checksum": "48c589851e38dc1c67e45d788269853f", "grade": false, "grade_id": "example_impl", "locked": false, "schema_version": 1, "solution": true}
# To proceed, implement the missing code, and remove the 'raise NotImplementedException()'
##### Implement this part of the code #####
raise NotImplementedError()
# three = ?
assert three == 3
# -
# ## Numpy arrays
#
# We'll be working often with numpy arrays so here's a short introduction.
# [`np.array()`](https://docs.scipy.org/doc/numpy-1.10.1/reference/generated/numpy.array.html) create a new array.
# +
import numpy as np
# This is a two-dimensional numpy array:
arr = np.array([[1,2,3,4],[5,6,7,8]])
print(arr)
# The shape is a tuple describing the size of each dimension
print("shape=" + str(arr.shape))
# Elements are selected by specifying two indices, counting from 0
print("arr[1][3] = %d" % arr[1][3])
print()
# This is a three-dimensional numpy array
arr3 = np.array([[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12], [13, 14, 15, 16]]])
print(arr3)
print()
print("shape=" + str(arr3.shape))
# Elements in a three dimensional array are selected by specifying three indices, counting from 0
print(arr3[1][0][2])
# +
# The numpy reshape method allows one to change the shape of an array, while keeping the underlying data.
# One can leave one dimension unspecified by passing -1, it will be determined from the size of the data.
print("Original array:")
print(arr)
print()
print("As 4x2 matrix")
print(np.reshape(arr, (4,2)))
print()
print("As 8x1 matrix")
print(np.reshape(arr, (-1,1)))
print()
print("As 2x2x2 array")
print(np.reshape(arr, (2,2,-1)))
# +
# the numpy sum, mean min and max can be used to calculate aggregates across any axis
table = np.array([[10.9, 12.1, 15.2, 7.3], [3.9, 1.2, 34.6, 8.3], [1.9, 23.3, 1.2, 3.7]])
print(table)
# Calculating the maximum across the first axis (=0).
max0 = np.max(table, axis=0)
# Calculating the maximum across the second axis (=1).
max1 = np.max(table, axis=1)
# Calculating the overall maximum.
max_overall = np.max(table)
print("Maximum over the rows of the table = " + str(max0))
print("Maximum over the columns of the table = " + str(max1))
print("Overall maximum of the table = " + str(max_overall))
# -
# Basic arithmetical operations on arrays of the same shape are done elementwise:
# +
x = np.array([1.,2.,3.])
y = np.array([4.,5.,6.])
print(x + y)
print(x - y)
print(x * y)
print(x / y)
# -
# ## Data plotting
# We use matplotlib.pyplot to make various types of plots
import matplotlib.pyplot as plt
# A basic plot a list of values:
plt.plot([7.3, 8.2, 1.2, 3.2, 9.1, 1.5])
plt.show()
# Plotting a list of X and Y values:
plt.plot([100.0, 110.0, 120.0, 130.0, 140.0, 150.0],[2.3, 8.1, 9.3, 9.7, 9.8, 20.0])
plt.show()
# Using mathematical functions and plotting more than one line on a graph
# +
x = np.arange(0.0, 10.0, 0.1)
# Most numpy function work on arrays as well by applying the function to each element in turn
y_cos = np.cos(x)
y_sin = np.sin(x)
# -
plt.plot(x, y_cos, x, y_sin)
plt.show()
# ## Data import and inspection (optional)
#
# [Pandas](http://pandas.pydata.org/) is a popular library for data wrangling, we'll use it to load and inspect a csv file that contains the historical web request and cpu usage of a web server:
# +
import pandas as pd
data = pd.DataFrame.from_csv("data/request_rate_vs_CPU.csv")
# -
# The head command allows one to quickly see the structure of the loaded data:
data.head()
# We can select the CPU column and plot the data:
data.plot(figsize=(13,8), y="CPU")
# Now to show the plot we need to import `matplotlib.pyplot` and execute the `show()` function.
plt.show()
# Next we plot the request rates, leaving out the CPU column as it has another unit:
data.drop('CPU',1).plot(figsize=(13,8))
plt.show()
# Now to continue and start to model the data, we'll work with basic numpy arrays. By doing this we also drop the time-information as shown in the plots above.
#
# We extract the column labels as the request_names for later reference:
request_names = data.drop('CPU',1).columns.values
request_names
# We extract the request rates as a 2-dimensional numpy array:
request_rates = data.drop('CPU',1).values
request_rates
# and the cpu usage as a one-dimensional numpy array
cpu = data['CPU'].values
cpu
| release/1/notebook_intro.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Pull Request Analysis
# +
import psycopg2
import pandas as pd
# from sqlalchemy.types import Integer, Text, String, DateTime
import sqlalchemy as s
import matplotlib
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import json
with open("config.json") as config_file:
config = json.load(config_file)
database_connection_string = 'postgres+psycopg2://{}:{}@{}:{}/{}'.format(config['user'], config['password'], config['host'], config['port'], config['database'])
dbschema='augur_data'
engine = s.create_engine(
database_connection_string,
connect_args={'options': '-csearch_path={}'.format(dbschema)})
# -
repo_id_list = pd.DataFrame()
myQuery = f"""
SELECT repo_group_id, repo_id, repo_name from repo;
"""
repo_id_list = pd.read_sql_query(myQuery, con=engine)
print(repo_id_list)
# ## Pull Request Filter
## List of repository IDs for the report
repo_dict = {26214, 26215,26216, 26220, 26219, 25158, 26217, 26218}
| code/percentage-affiliation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.6.10 64-bit (''rdkit'': conda)'
# language: python
# name: python361064bitrdkitconda2231f7cd07614fa9b5f8257698e6a5f8
# ---
# # Dataset Cleaning and Exploration
#
# To prepare for our ultimate task of using molecular embeddings to predict abundances and detectability, we need to put our dataset into working order. This is typically referred to as "cleaning", where we're making sure that the data will be valid (free of missing data, for example) and duplicate entries are removed. We will also need to inspect the data to make sure that entries we expect to be there are present, as well as observe some general trends where we can.
#
# Since we're looking at such a large dataset, we need to be able to inspect it from a microscopic and a macroscopic level. Combining both perspectives gives you an overview of what the dataset looks like, and in turn may give you insight into why/how a machine learning model behaves the way it does.
#
# The first part of this notebook will be combining the three datasets: QM9, ZINC15, and KIDA. The latter is actually obtained by scraping their website, i.e. extracting the relevant information from tables in websites.
# + tags=[]
from pathlib import Path
from tempfile import NamedTemporaryFile
import fileinput
import os
import numpy as np
import pandas as pd
from mol2vec import features
from rdkit import Chem
from rdkit.Chem.Draw import IPythonConsole
from rdkit.Chem import Descriptors
from gensim.models import word2vec
# -
# ## Loading and combining SMILES from the two datasets
qm9 = pd.read_csv("../data/gdb9_prop_smiles.csv.tar.gz")
smi_list = qm9["smiles"].dropna().to_list()
# + tags=[]
for smi_file in Path("../data/").rglob("*/*.smi"):
temp = smi_file.read_text().split("\n")[1:]
for line in temp:
if len(line) != 0:
smi_list.append(line.split(" ")[0])
# + tags=[]
print(f"Number of molecules: {len(smi_list)}")
# -
# ## Extracting SMILES from KIDA
#
# Since our database contains mostly terrestrial/stable molecules, we need to augment this set with astronomically relevant molecules. KIDA is one of the biggest reaction networks used in astrochemistry, and is therefore a nice collection molecules that may or may not be found in space (at least of interest).
#
# To perform this, we'll scrape the KIDA website below.
import requests
import datetime
from bs4 import BeautifulSoup
url = requests.get("http://kida.astrophy.u-bordeaux.fr/species.html")
print(f"""Last retrieved: {url.headers["Date"]}""")
RERUN = False
if RERUN:
date = url.headers["Date"]
# cut the date, replace spaces with underscores for naming
date = date[5:16].replace(" ", "_")
# save the webpage for reference. If KIDA decides to go bottom up we
# will always have a copy of this data
with open(f"../data/kida-site_{date}.html", "w+") as write_file:
write_file.write(str(url.content))
soup = BeautifulSoup(url.content)
# the first and only table on the page corresponds to the molecules
molecule_table = soup.find_all("table", "table")[0]
if RERUN:
map_dict = dict()
for row in soup.find_all("tr"):
# some InChI are missing, this sets a default value
inchi = None
for index, column in enumerate(row.find_all("td")):
# loop over columns in each row, and grab the second and
# third columns which are formulae and InChI
if index == 1:
# strip twice because the first header is parsed funnily
name = column.text.strip().strip()
if index == 2:
inchi = column.text.strip()
map_dict[name] = inchi
# Just for reference, dump the KIDA mapping as a dataframe
kida_df = pd.DataFrame.from_dict(map_dict, orient="index").reset_index()
kida_df.columns = ["Formula", "InChI"]
kida_df.to_csv(f"../data/kida-molecules_{date}.csv", index=False)
else:
kida_df = pd.read_csv("../data/kida-molecules_05_Jul_2020.csv")
def inchi_to_smiles(inchi: str):
inchi = str(inchi)
if len(inchi) != 0:
mol = Chem.MolFromInchi(inchi, sanitize=False, removeHs=False)
if mol:
smiles = Chem.MolToSmiles(mol, canonical=True)
return smiles
else:
return ""
# Now we convert all the InChI codes from KIDA into SMILES through RDKit. Initially I was most worried about this because KIDA has strange molecules, and as we see below RDKit has plenty to complain about. The attitude we're taking here is to ignore the ones that don't play by the rules, and we'll worry about them some other time.
# This applies our filtering function we defined above
kida_df["SMILES"] = kida_df["InChI"].apply(inchi_to_smiles)
# Extract only those with SMILES strings
kida_smiles = kida_df.loc[(kida_df["SMILES"].str.len() != 0.)].dropna()
# append all the KIDA entries to our full list
smi_list.extend(kida_smiles["SMILES"].to_list())
print(f"Number of molecules with KIDA: {len(smi_list)}")
# ## Adding extra SMILES from Jacqueline's analysis
#
# Turns out we're missing some molecules (no surprise) that are known in TMC-1, but not inlcuded in our list. The code below will take data directly from the Google Sheets Jacqueline's set up.
# !pip install xlrd
jac_df = pd.read_excel("../data/ChemicalCollection3.xlsx")
jac_df2 = pd.read_excel("../data/ChemicalCollection4.xlsx")
combined_jac = pd.concat([jac_df, jac_df2])
# Missing molecules
missing = combined_jac.loc[~combined_jac["Notation"].isin(smi_list)]
jac_missing = missing["Notation"].to_list()
combined_jac.to_csv("../data/jacqueline_tmc1_combined.csv", index=False)
# ## Microscopic inspection
#
# In this section, we're going to put certain aspects of the dataset under the microscope: for example, we want to check that certain molecules are contained in the set. Here, we'll be using our chemical intuition; the idea is to pick out a few molecules, and check if: (a) they are contained in our list, and (b) what their most similar molecules are.
mol_df = pd.DataFrame(data=smi_list)
mol_df.columns = ["Raw"]
def canonicize_smi(smi: str):
"""
Function to convert any SMILES string into its canonical counterpart.
This ensures that all comparisons made subsequently are made with the
same SMILES representation, if it exists.
"""
return Chem.MolToSmiles(Chem.MolFromSmiles(smi, sanitize=False), canonical=True)
# +
checklist = [
"CC=O", # acetaldehyde
"c1ccccc1", # benzene
"c1ccc(cc1)C#N", # benzonitrile
"N#CC=C", # vinyl cyanide
"CC#N", # methyl cyanide
"C#CC#CC#N",
"C#N",
"C#CC#CC#CC#N",
"C#CC#CC#CC#CC#N",
]
checklist = [canonicize_smi(smi) for smi in checklist]
# -
mol_df.loc[:, "Check"] = mol_df["Raw"].isin(checklist)
mol_df.loc[mol_df["Check"] == True]
molecular_weights = list()
for smi in smi_list:
mol = Chem.MolFromSmiles(smi, sanitize=False)
mol.UpdatePropertyCache(strict=False)
molecular_weights.append(Descriptors.ExactMolWt(mol))
# ## Calculate descriptors
mol_df["MW"] = molecular_weights
# ## Drop duplicate entries, and save the data to disk
#
# Our dataset actually contains a lot of duplicate entries. This step removes them, which would otherwise just waste our computation time.
mol_df.drop_duplicates("Raw", inplace=True)
mol_df.to_pickle("../data/combined_smiles.pkl.bz2")
# ## Tasks for data exploration
#
# ### Distribution of molecular weight
#
# Plot a histogram of the molecular weight in our dataset.
# ### Counting functional group examples
# For example, number of carbonyls
mol_df["Raw"].str.contains("C=O").sum()
# ## Dumping the SMILES data for mol2vec use
#
# This only takes the SMILES column, and dumps it into a list of SMILES formatted and ready for `mol2vec` usage. Every SMILES is separated by a new line, and we don't include a header.
mol_df["Raw"].to_csv("./collected_smiles.smi", sep="\n", index=False, header=None)
| notebooks/B1-exploration.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: uw_ml
# language: python
# name: uw_ml
# ---
# ## Regularized Regression 應用
#
# 我們透過Scikit Learn來完成Ridge和Lasso regression的應用例子。
# - 什麼是Ridge reression?
# - Ridge reression是regression加上L2 norm的regularization
# - 什麼是Lasso reression?
# - 用L1 norm來做regularization。
# - 而跟L2 norm的不同是一個是平方而一個是用絕對值,而L1 norm的效果是讓一些不重要或是影響較小的變數係數為0,如此一來就可以同步達到篩選特徵的效果。
#
#
#
#
# implementation of linear regression
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import datasets
boston = datasets.load_boston()
X_train = boston['data']
y_train = boston['target']
# +
from sklearn.linear_model import Ridge, Lasso
alpha = 1
# Ridge
ridge_model = Ridge(alpha = alpha)
ridge_model.fit(X_train, y_train)
# Lasso
lasso_model = Lasso(alpha = alpha)
lasso_model.fit(X_train, y_train);
# +
from sklearn.linear_model import RidgeCV, LassoCV
alphas = [0.01, 1, 100]
# Ridge
ridgeCV_model = RidgeCV(alphas = alphas)
ridgeCV_model.fit(X_train, y_train)
# Lasso
lassoCV_model = LassoCV(alphas = alphas)
lassoCV_model.fit(X_train, y_train);
# -
print('Ridge alpha:', ridgeCV_model.alpha_)
print('Lasso alpha:', lassoCV_model.alpha_)
| Regularization/regularized_regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Video tutorial
# [How to write a Crypto Exchange API using Python (Authentication)](https://youtu.be/KSD7s6I2J10)
import requests
import json
import time
import hashlib
import hmac
import base64
# # Global Variables
kchost = "https://api.kucoin.com"
# key
#
# secret
#
# passphrase
with open("../../../keys/kc-key") as f:
api_key, api_secret, api_passphrase = f.read().split()
# # Util function
def authentication(endpoint, method):
now = int(time.time() * 1000)
str_to_sign = str(now) + method.upper() + endpoint
signature = base64.b64encode(
hmac.new(api_secret.encode('utf-8'), str_to_sign.encode('utf-8'), hashlib.sha256).digest())
passphrase = base64.b64encode(hmac.new(api_secret.encode('utf-8'), api_passphrase.encode('utf-8'), hashlib.sha256).digest())
headers = {
"KC-API-SIGN": signature,
"KC-API-TIMESTAMP": str(now),
"KC-API-KEY": api_key,
"KC-API-PASSPHRASE": passphrase,
"KC-API-KEY-VERSION": "2"
}
return headers
def paramURL(params):
items = params.items()
t = ["%s=%s" % (k, v) for k, v in items]
return "&".join(t)
def getPrivate(endpoint, params):
parmurl = paramURL(params)
if len(parmurl) > 0:
parmurl = "?" + parmurl
url = "%s%s%s" % (kchost, endpoint, parmurl)
headers = authentication(endpoint, "get")
return requests.get(url, headers=headers, timeout=5).json()
# # get the balances
endpoint = "/api/v1/accounts"
getPrivate(endpoint, {})
| exgapi/kucoin/kucoin-exchange-api-private-authentication.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="8asMiXnG7n0V"
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# + id="kkvUoa8V703r"
data = pd.read_csv("https://raw.githubusercontent.com/dphi-official/Datasets/master/heart_disease.csv")
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="YmP32G8n73iF" outputId="121f92fa-13a8-49d5-9578-13e9e4c24cc3"
data.head()
# + colab={"base_uri": "https://localhost:8080/"} id="iY9ix4hq74k2" outputId="fd09b1aa-4615-4774-ba4a-62320e8e86f9"
data.shape
# + id="z9mclalO77NH"
X = data.drop('target', axis=1) # Input Variable / Attributes
Y = data['target'] # Output Variable / Label
# + id="9gsaaelb8SQv"
# Split the data into test and train
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=2)
# + colab={"base_uri": "https://localhost:8080/"} id="oLmfbZ058x54" outputId="505d8d11-c915-4a41-bcc7-e3c5a1fea754"
print(X.shape, X_train.shape, X_test.shape)
# + id="e59QU6Lp83xT"
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import Sequential
from tensorflow.keras import layers
from tensorflow.keras.layers import Dense
# + id="E6x2xVxi9FIn"
# Define the model architecture
model = Sequential()
model.add(Dense(32, activation="relu", input_shape=(X_train.shape[1],)))
model.add(Dense(16, activation="relu"))
model.add(Dense(8, activation="relu"))
model.add(Dense(1, activation="sigmoid"))
# + id="12ZjNTYA98fD"
# Compiling the model
from tensorflow.keras.optimizers import RMSprop
optimizer = RMSprop(0.001) # Here, we have set our learning rate as 0.001
model.compile(loss='binary_crossentropy', optimizer= optimizer , metrics=['accuracy'])
# + colab={"base_uri": "https://localhost:8080/"} id="iEEmTR69-Ad7" outputId="3f48d334-b8fb-4bf9-c998-aa6b96ac6eb6"
# Print the summary of the model
model.summary()
# + colab={"base_uri": "https://localhost:8080/", "height": 466} id="BbVL42kn-Ewt" outputId="db9165ff-bdba-49c8-8ec7-60e5b441fdb9"
# Plot the model
from tensorflow.keras.utils import plot_model
plot_model(model)
# + colab={"base_uri": "https://localhost:8080/"} id="Q6iT96Ik-HYI" outputId="c0730b37-d499-4859-d819-560e70203ab2"
# Train the model
training = model.fit(X_train, Y_train, validation_split=0.2, epochs=100, batch_size=10, verbose=1)
# + colab={"base_uri": "https://localhost:8080/"} id="hrohaS9b-TA5" outputId="8ffc3ba1-9a87-4ddb-c3f5-dc58d94bcd1f"
# Evaluate the model
model.evaluate(X_test, Y_test)
# + id="yTtkExtO-nWB"
| Binary_Classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# # First Steps in Python
# ### Prepared for the [QuantEcon](http://quantecon.org/) Workshop on Computational Methods in HCMC
#
# #### <NAME>, Nov 2016
#
# ### Basic Maths
10 + 10
2 * 10
10**2
x = 10
y = 2
x * y
x**y
z = x - y
z
# ### Maths with NumPy
# Let's import NumPy, which is a library for numerical work in Python.
import numpy as np
# #### Elementary functions
#
# With NumPy we can access standard functions like $\exp$, $\sin$, $\cos$, etc.
x = 0
np.exp(x)
np.cos(x)
np.sin(x)
# #### Arrays
#
# We can make an "array" of evenly spaced numbers:
x = np.linspace(-3, 3, 5)
x
# The functions listed above work directly on arrays:
np.exp(x)
np.sin(x)
# ### Plotting
# The next line says "show all figures inside the browser"
# %matplotlib inline
# Now let's import the main Python plotting library, called Matplotlib.
import matplotlib.pyplot as plt
# #### Our first plot
# +
fig, ax = plt.subplots()
x = np.linspace(-np.pi, np.pi, 100)
y = np.sin(x)
ax.plot(x, y)
# -
# A plot with two lines and a legend:
# +
fig, ax = plt.subplots()
y1 = np.sin(x)
y2 = np.cos(x)
ax.plot(x, y1, label='sine')
ax.plot(x, y2, label='cosine')
ax.legend()
# -
# ### DYI Functions
# We can make our own functions.
def f(x):
return 2 * x
f(3)
f(10)
def g(x):
alpha = 0.5
return x * np.exp(-alpha * x)
g(1)
g(10)
# +
fig, ax = plt.subplots()
x = np.linspace(0, 10, 100)
y = g(x)
ax.plot(x, y)
# +
def h(x):
return np.abs(np.sin(x))
fig, ax = plt.subplots()
x = np.linspace(0, 10, 100)
y = h(x)
ax.plot(x, y)
# -
# ### Exercises
# Plot the function
#
# $$ f(x) = \sin(2x) - 2 \sin(x) $$
#
# on the interval $[-10, 10]$.
| first_steps.ipynb |