code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Working with files
# !cat hello.py
print('Hello, world!')
# !cat use.py
# !python use.py
# !cat add.py
# !python add.py
# !python add.py 2
# # Reading files in custom format
# !cat custom.txt
# open a file for reading and iterate each line
with open('./custom.txt') as f:
for line in f:
# similar to line = "123 = bread\n"
# assign to multiple variables (tuple unpacking)
id, name = line.strip().split(' = ')
print(id, name)
# ## JSON files
# !cat original.json
# +
"""Read and write JSON files"""
import json
with open('./original.json') as original, \
open('./output.json', 'w') as output:
config = json.load(original)
config["133"] = config["134"]
del config["134"]
json.dump(config, output)
# -
# !cat output.json
# # CSV files
# !cat original.csv
# !cat csv-simple.py
# !cat output.csv
| 2019-10-29.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from gensim.models import Word2Vec
model = Word2Vec.load('model.pkl')
import utils
word = 'горшок'
weights = utils.load_weights('../corpora/ad-nouns/cdict/', word)
contexts = utils.load_contexts('../corpora/ad-nouns-contexts-100k', word)
print(contexts[0])
print(len(contexts))
from collections import Counter
import numpy as np
min_count = 20
min_weight = 1.5
all_words = [
w for w, cnt in Counter(w for ctx in contexts for w in ctx).items()
if cnt >= min_count and weights.get(w, 0) > 1.5]
print(len(all_words))
w2v_vecs = np.array([model[w] for w in all_words if w in model])
print(len(w2v_vecs))
import kmeans
n_senses = 6
km = kmeans.KMeans(w2v_vecs, k=n_senses, metric='cosine')
all_words = np.array(all_words)
for sense in range(n_senses):
indices = km.Xtocentre == sense
distances = km.distances[indices]
sense_words = all_words[indices]
min_indices = np.argsort(distances)[:10]
min_words = list(sense_words[min_indices])
min_words.sort(key=lambda w: weights.get(w, 0), reverse=True)
print(sense, ' '.join(min_words[:5]))
| w2v_ctx_words.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# orphan: true
# ---
# + tags=["remove-input", "active-ipynb", "remove-output"]
# try:
# import openmdao.api as om
# except ImportError:
# !python -m pip install openmdao[notebooks]
# import openmdao.api as om
# -
# # DotProductComp
#
# `DotProductComp` performs a dot product between two compatible inputs. It may be vectorized to provide the result at one or more points simultaneously.
#
# $$
# c_i = \bar{a}_i \cdot \bar{b}_i
# $$
#
# ## DotProductComp Options
#
# The default `vec_size` is 1, providing the dot product of $a$ and $b$ at a single
# point. The lengths of $a$ and $b$ are provided by option `length`.
#
# Other options for DotProductComp allow the user to rename the input variables $a$ and $b$ and the output $c$, as well as specifying their units.
# + tags=["remove-input"]
om.show_options_table("openmdao.components.dot_product_comp.DotProductComp")
# -
# ## DotProductComp Constructor
#
# The call signature for the `DotProductComp` constructor is:
#
# ```{eval-rst}
# .. automethod:: openmdao.components.dot_product_comp.DotProductComp.__init__
# :noindex:
# ```
#
# ## DotProductComp Usage
#
# There are often situations when numerous products need to be computed, essentially in parallel.
# You can reduce the number of components required by having one `DotProductComp` perform multiple operations.
# This is also convenient when the different operations have common inputs.
#
# The ``add_product`` method is used to create additional products after instantiation.
#
# ```{eval-rst}
# .. automethod:: openmdao.components.dot_product_comp.DotProductComp.add_product
# :noindex:
# ```
#
# ## DotProductComp Example
#
# In the following example DotProductComp is used to compute instantaneous power as the
# dot product of force and velocity at 100 points simultaneously. Note the use of
# `a_name`, `b_name`, and `c_name` to assign names to the inputs and outputs.
# Units are assigned using `a_units`, `b_units`, and `c_units`.
# Note that no internal checks are performed to ensure that `c_units` are consistent
# with `a_units` and `b_units`.
#
# + tags=["allow-assert"]
import numpy as np
n = 24
p = om.Problem()
dp_comp = om.DotProductComp(vec_size=n, length=3, a_name='F', b_name='v', c_name='P',
a_units='N', b_units='m/s', c_units='W')
p.model.add_subsystem(name='dot_prod_comp', subsys=dp_comp,
promotes_inputs=[('F', 'force'), ('v', 'vel')])
p.setup()
p.set_val('force', np.random.rand(n, 3))
p.set_val('vel', np.random.rand(n, 3))
p.run_model()
# Verify the results against numpy.dot in a for loop.
expected = []
for i in range(n):
a_i = p.get_val('force')[i, :]
b_i = p.get_val('vel')[i, :]
expected.append(np.dot(a_i, b_i))
actual_i = p.get_val('dot_prod_comp.P')[i]
rel_error = np.abs(expected[i] - actual_i)/actual_i
assert rel_error < 1e-9, f"Relative error: {rel_error}"
print(p.get_val('dot_prod_comp.P', units='kW'))
# + tags=["remove-input", "remove-output"]
from openmdao.utils.assert_utils import assert_near_equal
assert_near_equal(p.get_val('dot_prod_comp.P', units='kW'), np.array(expected)/1000.)
# -
# ## DotProductComp Example with Multiple Products
#
# When defining multiple products:
#
# - An input name in one call to `add_product` may not be an output name in another call, and vice-versa.
# - The units and shape of variables used across multiple products must be the same in each one.
# + tags=["allow-assert"]
n = 24
p = om.Problem()
dp_comp = om.DotProductComp(vec_size=n, length=3,
a_name='F', b_name='d', c_name='W',
a_units='N', b_units='m', c_units='J')
dp_comp.add_product(vec_size=n, length=3,
a_name='F', b_name='v', c_name='P',
a_units='N', b_units='m/s', c_units='W')
p.model.add_subsystem(name='dot_prod_comp', subsys=dp_comp,
promotes_inputs=[('F', 'force'), ('d', 'disp'), ('v', 'vel')])
p.setup()
p.set_val('force', np.random.rand(n, 3))
p.set_val('disp', np.random.rand(n, 3))
p.set_val('vel', np.random.rand(n, 3))
p.run_model()
# Verify the results against numpy.dot in a for loop.
expected_P = []
expected_W = []
for i in range(n):
a_i = p.get_val('force')[i, :]
b_i = p.get_val('disp')[i, :]
expected_W.append(np.dot(a_i, b_i))
actual_i = p.get_val('dot_prod_comp.W')[i]
rel_error = np.abs(actual_i - expected_W[i])/actual_i
assert rel_error < 1e-9, f"Relative error: {rel_error}"
b_i = p.get_val('vel')[i, :]
expected_P.append(np.dot(a_i, b_i))
actual_i = p.get_val('dot_prod_comp.P')[i]
rel_error = np.abs(expected_P[i] - actual_i)/actual_i
assert rel_error < 1e-9, f"Relative error: {rel_error}"
print(p.get_val('dot_prod_comp.W', units='kJ'))
# -
print(p.get_val('dot_prod_comp.P', units='kW'))
# + tags=["remove-input", "remove-output"]
assert_near_equal(p.get_val('dot_prod_comp.W', units='kJ'), np.array(expected_W)/1000.)
assert_near_equal(p.get_val('dot_prod_comp.P', units='kW'), np.array(expected_P)/1000.)
| openmdao/docs/openmdao_book/features/building_blocks/components/dot_product_comp.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
import networkx as nx
import warnings
import gc
from sklearn.metrics import log_loss
from sklearn.model_selection import KFold
import xgboost as xgb
import datetime
import matplotlib.pyplot as plt
# %matplotlib inline
import seaborn as sns
sns.set_style("dark")
# -
def plot_real_feature(df, fname):
ix_train = np.where(df['id'] >= 0)[0]
ix_test = np.where(df['id'] == -1)[0]
ix_is_dup = np.where(df['is_duplicate'] == 1)[0]
ix_not_dup = np.where(df['is_duplicate'] == 0)[0]
fig = plt.figure(figsize=(16, 12))
ax1 = plt.subplot2grid((3, 2), (0, 0), colspan=2)
ax2 = plt.subplot2grid((3, 2), (1, 0), colspan=2)
ax3 = plt.subplot2grid((3, 2), (2, 0))
ax4 = plt.subplot2grid((3, 2), (2, 1))
ax1.set_title('Distribution of %s' % fname, fontsize=20)
sns.distplot(df.loc[ix_train][fname],
bins=50,
ax=ax1)
sns.distplot(df.loc[ix_is_dup][fname],
bins=50,
ax=ax2,
label='is dup')
sns.distplot(df.loc[ix_not_dup][fname],
bins=50,
ax=ax2,
label='not dup')
ax2.legend(loc='upper right', prop={'size': 18})
sns.boxplot(y=fname,
x='is_duplicate',
data=df.loc[ix_train],
ax=ax3)
sns.violinplot(y=fname,
x='is_duplicate',
data=df.loc[ix_train],
ax=ax4)
plt.show()
# +
src = '/media/w/1c392724-ecf3-4615-8f3c-79368ec36380/DS Projects/Kaggle/Quora/scripts/features/'
# trdf = pd.read_csv(src + 'df_train_spacylemmat_fullclean.csv').iloc[:, :-1]
# tedf = pd.read_csv(src + 'df_test_spacylemmat_fullclean.csv').iloc[:, 4:]
trdf = pd.read_csv(src + 'df_train_lemmatfullcleanSTEMMED.csv').iloc[:, :-1]
tedf = pd.read_csv(src + 'df_test_lemmatfullcleanSTEMMED.csv').iloc[:, 4:]
#trdf = pd.read_csv('input/train.csv').iloc[:, :-1]
#tedf = pd.read_csv('input/test.csv')
tr = pd.concat([trdf, tedf], ignore_index = True)
# +
g = nx.Graph()
g.add_nodes_from(tr.question1)
g.add_nodes_from(tr.question2)
edges = list(tr[['question1', 'question2']].to_records(index=False))
g.add_edges_from(edges)
print('Number of unique questions:', len(set(tr.question1) | set(tr.question2)), g.number_of_nodes())
print('Number of rows in the data:', len(tr), g.number_of_edges())
d = g.degree()
print('Mean number of connections:', np.mean([d[k] for k in d]))
# -
def create_q_interactions(name):
comb['min_'+name] = comb[['q1_'+name, 'q2_'+name]].min(1)
comb['max_'+name] = comb[['q1_'+name, 'q2_'+name]].max(1)
comb['mean_'+name] = comb[['q1_'+name, 'q2_'+name]].mean(1)
comb['sum_'+name] = comb['q1_'+name] + comb['q2_'+name]
comb['diff_'+name] = abs(comb['q1_'+name] - comb['q2_'+name])
# +
comb = pd.DataFrame()
comb['q1_neighbor_count'] = tr['question1'].map(g.neighbors).map(len)
comb['q2_neighbor_count'] = tr['question2'].map(g.neighbors).map(len)
create_q_interactions('neighbor_count')
# -
comb['shared_neighbor_count'] = tr[['question1', 'question2']].apply(
lambda x: nx.common_neighbors(g, x.question1, x.question2), 1).apply(lambda x: sum(1 for _ in x))
# +
comb_tr = comb.iloc[:trdf.shape[0], :]
comb_te = comb.iloc[trdf.shape[0]:, :]
comb_te = comb_te.reset_index(drop=True)
comb_tr.to_csv('train_network_neighbors.csv', index=False)
comb_te.to_csv('test_network_neighbors.csv', index=False)
# + active=""
# train_af = pd.read_csv('input/train_comb_feats.csv')
# test_af = pd.read_csv('input/test_comb_feats.csv')
# train_labels = pd.read_csv('train1.csv', usecols=['is_duplicate'], squeeze=True)
# -
# #### Load, drop duplicates and save full feature set
# +
comb_tr = pd.read_csv('train_network_neighbors.csv')
train_networkfeats = pd.read_pickle('train_networkfeats.pkl')
df = pd.concat([train_networkfeats, comb_tr], axis = 1)
dfc = df.iloc[0:10000,:]
dfc = dfc.T.drop_duplicates().T
duplicate_cols = sorted(list(set(df.columns).difference(set(dfc.columns))))
print('Dropping duplicate columns:', duplicate_cols)
df.drop(duplicate_cols, axis = 1, inplace = True)
print('Final shape:', df.shape)
df.to_pickle('train_fullnetworkfeatsTony.pkl')
# -
# Final shape: (404290, 417) for train
# +
comb_te = pd.read_csv('test_network_neighbors.csv')
test_networkfeats = pd.read_pickle('test_networkfeats.pkl')
test_networkfeats = test_networkfeats.reset_index(drop=True)
df = pd.concat([test_networkfeats, comb_te], axis = 1)
del test_networkfeats, comb_te
gc.collect()
dfc = df.iloc[0:10000,:]
dfc = dfc.T.drop_duplicates().T
duplicate_cols = sorted(list(set(df.columns).difference(set(dfc.columns))))
print('Dropping duplicate columns:', duplicate_cols)
df.drop(duplicate_cols, axis = 1, inplace = True)
print('Final shape:', df.shape)
df.to_pickle('test_fullnetworkfeatsTony.pkl')
# -
train_networkfeats.drop(['q1_counts', 'q2_counts', 'sum_counts', 'diff_counts'], 1, inplace=True)
test_networkfeats.drop(['q1_counts', 'q2_counts', 'sum_counts', 'diff_counts'], 1, inplace=True)
def quick_xgb(train_feats, test_feats, train=train_af, test=test_af, pred_trans=True,
train_labels=train_labels, weights=np.ones(len(train_af))):
train_id = np.arange(len(train_af))
test_id = np.arange(len(test_af))
train = pd.concat([train, train_feats], 1)
test = pd.concat([test, test_feats], 1)
params = {}
params["objective"] = "binary:logistic"
params['eval_metric'] = ['logloss']
params["eta"] = 0.2
params["subsample"] = 0.7
params["min_child_weight"] = 5
params["colsample_bytree"] = 0.5
#params["max_delta_step"] = 5.0
#params["gamma"] = 10.0
params["max_depth"] = 10
params["silent"] = 1
params["seed"] = 1001
skf = KFold(n_splits=10, shuffle=True, random_state=1001).split(train_labels)
test_preds = np.zeros(len(test))
for i, (idx_train, idx_val) in enumerate(skf):
val_preds = np.zeros(len(train.iloc[idx_val, :]))
d_train = xgb.DMatrix(train.iloc[idx_train, :], label=train_labels[idx_train], weight=weights[idx_train])
d_valid = xgb.DMatrix(train.iloc[idx_val, :], label=train_labels[idx_val], weight=weights[idx_val])
watchlist = [(d_train, 'train'), (d_valid, 'valid')]
bst = xgb.train(params, d_train, 500000, watchlist, early_stopping_rounds=10, verbose_eval=25)
val_preds = bst.predict(d_valid, ntree_limit=bst.best_ntree_limit)
test_preds = bst.predict(xgb.DMatrix(test), ntree_limit=bst.best_ntree_limit)
break
loss = log_loss(train_labels[idx_val], val_preds)
def pred_transform(preds):
a = 0.165 / 0.369191399096
b = (1 - 0.165) / (1 - 0.369191399096)
return a * preds / (a * preds + b * (1 - preds))
if pred_trans:
print(test_id.shape)
print(test_preds.shape)
test_df = pd.DataFrame({"test_id": test_id, "is_duplicate": pred_transform(test_preds)})
else:
test_df = pd.DataFrame({"test_id": test_id, "is_duplicate": test_preds})
print('Log Loss:', loss)
print('Accuracy:', (train_labels[idx_val] == np.round(val_preds)).mean())
now = datetime.datetime.now()
test_pred_filename = "model_out/quick_preds_xgb_{:.4f}_{:%Y%m%d_%H%M}.csv.gz".format(loss, now)
test_df.to_csv(test_pred_filename, index=False, compression='gzip')
importance = bst.get_fscore()
importance = sorted(importance.items(), key=lambda x:x[1], reverse=True)[:50]
df = pd.DataFrame(importance, columns=['feature', 'fscore'])
df['fscore'] = df['fscore'] / df['fscore'].sum()
plt.figure()
df.plot()
df.plot(kind='barh', x='feature', y='fscore', legend=False, figsize=(6, 10))
plt.title('XGBoost Feature Importance')
plt.xlabel('relative importance')
quick_xgb(pd.DataFrame({'i': np.ones(len(train_af))}), pd.DataFrame({'i': np.ones(len(test_af))}))
quick_xgb(comb_tr, comb_te)
quick_xgb(train_networkfeats, test_networkfeats)
train_networkfeats = pd.concat([train_networkfeats, comb_tr], 1)
test_networkfeats = pd.concat([test_networkfeats, comb_te], 1)
quick_xgb(train_networkfeats, test_networkfeats)
train_networkfeats.to_pickle('train_networkfeats.pkl')
test_networkfeats.to_pickle('test_networkfeats.pkl')
| features/Magic - Network Combined 18.05.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <p><strong>Welcome!</strong> This notebook will teach you about the functions in the Python Programming Language. By the end of this lab, you'll know the basic concepts about function, variables, and how to use functions.</p>
# <h2>Table of Contents</h2>
# <div class="alert alert-block alert-info" style="margin-top: 30px">
# <ul>
# <li>
# <a href="#func">Functions</a>
# <ul>
# <li><a href="content">What is a function?</a></li>
# <li><a href="var">Variables</a></li>
# <li><a href="simple">Functions Make Things Simple</a></li>
# </ul>
# </li>
# <li><a href="pre">Pre-defined functions</a></li>
# <li><a href="if">Using <code>if</code>/<code>else</code> Statements and Loops in Functions</a></li>
# <li><a href="default">Setting default argument values in your custom functions</a></li>
# <li><a href="global">Global variables</a></li>
# <li><a href="scope">Scope of a Variable</a></li>
# <li>
# <a href="#quiz">Quiz on Loops</a>
# </li>
# </ul>
# <p>
# Estimated time needed: <strong>40 min</strong>
# </p>
# </div>
#
# <hr>
# <h2 id="func">Functions</h2>
# A function is a reusable block of code which performs operations specified in the function. They let you break down tasks and allow you to reuse your code in different programs.
#
# There are two types of functions :
#
# - <b>Pre-defined functions or Builtin Funcation</b>
# - <b>User defined functions</b>
# <h3 id="content">What is a Function?</h3>
# You can define functions to provide the required functionality. Here are simple rules to define a function in Python:
# - Functions blocks begin <code>def</code> followed by the function <code>name</code> and parentheses <code>()</code>.
# - There are input `parameters or arguments` that should be placed within these parentheses.
# - You can also define parameters inside these parentheses.
# - There is a body within every function that starts with a colon (<code>:</code>) and is `indented Block` will be Started.
# - You can also place documentation before the body
# - The statement <code>return</code> exits a function, optionally passing back a value
#
# An example of a function that adds on to the parameter <code>a</code> prints and returns the output as <code>b</code>:
# +
# First function example: Add 1 to a and store as b
def add(a):
"""
Addation of two numbers and add 1 value to a
Parameters:
'A': is Parameters are a we have give the Value for A by Calling the Funcation name cal a
"""
b = a + 1
print(a, "if you add one", b)
return b
# -
# The figure below illustrates the terminology:
# + [markdown] jupyter={"source_hidden": true}
# <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%203/Images/FuncsDefinition.png" width="500" />
# -
# We can obtain help about a function :
print(add.__doc__)
# + jupyter={"outputs_hidden": false}
# Get a help on add function
help(add)
# -
help(print)
# We can call the function:
# + jupyter={"outputs_hidden": false}
# Call the function add()
add(1)
# -
# If we call the function with a new input we get a new result:
# + jupyter={"outputs_hidden": false}
# Call the function add()
add(2.2)
# -
add(2j+3)
add(True)
# We can create different functions. For example, we can create a function that multiplies two numbers. The numbers will be represented by the variables <code>a</code> and <code>b</code>:
# +
# Define a function for multiple two numbers
def Mult(a, b):
c = a * b
print("Multiplication of two numbers",c)
return(c)
# -
# The same function can be used for different data types. For example, we can multiply two integers:
#
# + jupyter={"outputs_hidden": false}
# Use mult() multiply two integers
Mult(2, 3)
# -
# Two Floats:
# + jupyter={"outputs_hidden": false}
# Use mult() multiply two floats
Mult(10.0, 3.14)
# -
# We can even replicate a string by multiplying with an integer:
# + jupyter={"outputs_hidden": false}
# Use mult() multiply two different type values together
Mult(3, "<NAME> ")
# -
Mult(2j+3,3j+4)
Mult(True,False)
def Add(a,b):
"""Addation of two Numbers:"""
print("Addation of two Numbers:",a+b)
Add(10,20)
Add([10,20,30],[2,4,5])
Add((10.5,2.5),(2.6,3.6))
Add({10,20},{2,4})
# <h3 id="var">Variables</h3>
# The input to a function is called a formal parameter.
#
# A variable that is declared inside a function is called a local variable. The parameter only exists within the function (i.e. the point where the function starts and stops).
#
# A variable that is declared outside a function definition is a global variable, and its value is accessible and modifiable throughout the program. We will discuss more about global variables at the end of the lab.
#
# + jupyter={"outputs_hidden": false}
# Function Definition
def square(a):
"""Squares of the input"""
# Local variable b
c = 1
b = a * a + c
print(a, "if you square + 1", b)
return(b)
# -
# The labels are displayed in the figure:
# + [markdown] jupyter={"source_hidden": true}
# <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%203/Images/FuncsVar.png" width="500" />
# -
c
# We can call the function with an input of <b>3</b>:
# + jupyter={"outputs_hidden": false}
# Initializes Global variable
x = 3 #Global variable
# Makes function call and return function a y
y = square(x)
y
# -
x
# We can call the function with an input of <b>2</b> in a different manner:
# + jupyter={"outputs_hidden": false}
# Directly enter a number as parameter
square(2)
# -
def fun():
pass # it is Keyword which is used to perform no body of a funcation
fun()
help(fun)
# If there is no <code>return</code> statement, the function returns <code>None</code>. The following two functions are equivalent:
# + jupyter={"outputs_hidden": false}
# Define functions, one with return value None and other without return value
def MJ():
return '<NAME>'
def MJ1():
print('<NAME>')
return
# + jupyter={"outputs_hidden": false}
# See the output
MJ()
# + jupyter={"outputs_hidden": false}
# See the output
MJ1()
# -
# Printing the function after a call reveals a **None** is the default return statement:
# + jupyter={"outputs_hidden": false}
# See what functions returns are
print(MJ())
print(MJ1())
# -
# Create a function <code>con</code> that concatenates two strings using the addition operation:
# +
# Define the function for combining strings
def concatenates(a, b):
#return(a + b)
return(a*2) # this this
# +
# Test on the con() function
concatenates("This ", "is")
# -
concatenates('hi',2)
# <hr/>
# <div class="alert alert-success alertsuccess" style="margin-top: 20px">
# <h4> [Tip] How do I learn more about the pre-defined functions in Python? </h4>
# <p>We will be introducing a variety of pre-defined functions to you as you learn more about Python. There are just too many functions, so there's no way we can teach them all in one sitting. But if you'd like to take a quick peek, here's a short reference card for some of the commonly-used pre-defined functions: <a href="http://www.astro.up.pt/~sousasag/Python_For_Astronomers/Python_qr.pdf">Reference</a></p>
# </div>
# <hr/>
print(dir(' '))
dir()
help('print'),help(print)
# <h3 id="simple">Functions Make Things Simple</h3>
# Consider the two lines of code in <b>Block 1</b> and <b>Block 2</b>: the procedure for each block is identical. The only thing that is different is the variable names and values.
# <h4>Block 1:</h4>
# + jupyter={"outputs_hidden": false}
# a and b calculation block1
a1 = 4
b1 = 5
c1 = a1 + b1 + 2 * a1 * b1 - 1
if(c1 < 0):
c1 = 0
else:
c1 = 5
c1
# + [markdown] jupyter={"source_hidden": true}
# <h4>Block 2:</h4>
# + jupyter={"outputs_hidden": false}
# a and b calculation block2
a2 = 0
b2 = 0
c2 = a2 + b2 + 2 * a2 * b2 - 1
if(c2 < 0):
c2 = 0
else:
c2 = 5
c2
# + [markdown] jupyter={"source_hidden": true}
# We can replace the lines of code with a function. A function combines many instructions into a single line of code. Once a function is defined, it can be used repeatedly. You can invoke the same function many times in your program. You can save your function and use it in another program or use someone else’s function. The lines of code in code <b>Block 1</b> and code <b>Block 2</b> can be replaced by the following function:
# + jupyter={"outputs_hidden": false}
# Make a Function for the calculation above
def Equation(a,b):
c = a + b + 2 * a * b - 1
if(c < 0):
c = 0
else:
c = 5
return(c)
# + [markdown] jupyter={"source_hidden": true}
# This function takes two inputs, a and b, then applies several operations to return c.
# We simply define the function, replace the instructions with the function, and input the new values of <code>a1</code>, <code>b1</code> and <code>a2</code>, <code>b2</code> as inputs. The entire process is demonstrated in the figure:
# + [markdown] jupyter={"source_hidden": true}
# <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%203/Images/FuncsPros.gif" width="850" />
# -
# Code **Blocks 1** and **Block 2** can now be replaced with code **Block 3** and code **Block 4**.
# <h4>Block 3:</h4>
# + jupyter={"outputs_hidden": false}
a1 = 4
b1 = 5
c1 = Equation(a1, b1)
c1
# -
# <h4>Block 4:</h4>
# + jupyter={"outputs_hidden": false}
a2 = 0
b2 = 0
c2 = Equation(a2, b2)
c2
# -
# <hr>
# <h2 id="pre">Pre-defined functions Or Built-in Functions</h2>
# There are many pre-defined functions in Python, so let's start with the simple ones.
# The <code>print()</code> function:
# +
# Build-in function print()
album_ratings = [10.0, 8.5, 9.5, 7.0, 7.0, 9.5, 9.0, 9.5]
print(album_ratings) # STDOUT
# -
# The <code>sum()</code> function adds all the elements in a list or tuple:
# +
# Use sum() to add every element in a list or tuple together
sum(album_ratings)
# -
# The <code>len()</code> function returns the length of a list or tuple:
# Show the length of the list or tuple
len(album_ratings)
# The `max()` to find the max value in list or tuple
max(album_ratings)
# The `min()` to find the min value of tuple or list
min(album_ratings)
# The `chr()` to find the Unicode Char value
chr(97)
# The `ord()` to find the Unicode Ord Of value
ord('a')
# The `id()` to find the Address of Memory Location of the Data
id(album_ratings)
# The `type()` which is used to Know the what type of Data it is
type(album_ratings)
# The `int()` to convert Integer
a = '10'
type(a)
int(a)
complex(a)
bool(a)
str(a)
float(a)
abs(10.5j+2)
abs(-7.25897)
# + jupyter={"outputs_hidden": true}
help(abs)
# -
divmod(10,2)
a = [True,True,False]
all(a)
any(a)
chr(256)
ascii('Ā')
a= [0,1,2,15]
b = bytearray(a)
b
b.append(211)
b
ord('ﻖ')
chr(65238)
bytearray('ﻖﻖﻖﻖźebra','ascii','replace') #using error replace
bytes(a)
callable([1,2,3])
callable(print)
callable(list)
exec(compile('a=5\nb=7\nprint(a+b)',' ','exec'))
for i in enumerate(['a','b','c']):
print(i)
# **Stas**:
#
# $y = \beta_0 +\beta_1*X$ **When you have Postive Relationship between X and Y**
#
#
# **Math**:
#
# $ y = M*X+C$
#
#
# **Machine Learning**
#
#
# $y = \beta_0 +\beta_1*X_0+\beta_2*X_1+\beta_3*X_2+...............+\beta_n*X_n$
#
#
x = 7
eval('x+7*3')
exec('x=2;y=3;print(x+y)')
a,b,c = map(int,input("Enter a and b values").split(','))
a
b,c
c = a+b
c
AC = lambda x:x%2==0,[1,2,0,False]
AC
list(filter(lambda x:x%2==0,[7,1,2,10,55,12]))
float(10)
# +
a,b=2,3
print("a={0} and b={1}".format(a,b))
# -
frozenset((3,10,24,4,2)) #immutable object
# <h2 id="if">Using <code>if</code>/<code>else</code> Statements and Loops in Functions</h2>
# The <code>return()</code> function is particularly useful if you have any IF statements in the function, when you want your output to be dependent on some condition:
# + jupyter={"outputs_hidden": false}
# Function example
def type_of_album(artist, album, year_released):
#print(artist, album, year_released)
if year_released > 1980:
return "Modern"
else:
return "Old"
x = type_of_album("<NAME>", "Thriller", 1998)
print(x)
# -
# We can use a loop in a function. For example, we can <code>print</code> out each element in a list:
# +
# Print the list using for loop
def PrintList(the_list):
for element in the_list:
print(element)
# + jupyter={"outputs_hidden": false}
# Implement the printlist function
PrintList(['1', 1, 'the man', "abc"])
# -
# ## Decorators
#
# **`Def:`** Python has an intersting Feature Called Decorators to add functionality to an existing code.This is Called **MetaProgramming** as apart of Program tries to modify another part of the Program at Compile Time
#
# Consider we Funcation Name `Succ` has a prameter `return` the Parementer.
# +
def Succ(x):
return x+1
Var = Succ # Var is Decorators
Var(10)
# -
# **`Note:`** This Means that we have two name, i.e.. `Succ` and `Var` for the same funcation the next importent fact is that we have Delete eith `Succ` or `Var` without Deleting the Funcation itself.
del Succ
Succ(25)
Var(20)
Var(2.5)
# **Note:** when you run the both funcation `First` and `Second` gives same output. Hear the names `first` and `Second` refers to the same object
def First(msg):
print(msg)
First('Hello')
# +
Second = First
Second("Hello welcome to python")
# -
del First
Second("Hi")
def Even_Odd(x):
if x%2==0:
print("Even Numbers:",x)
else:
print("odd numbers:",x)
Var = Even_Odd # DEC
Var(25)
Even_Odd(26)
def Data():
for i in range(10,22):
Var(i)
Data()
# **Higher Order Funcation**
# +
def temp(t):
"""This Funcation is used for Measure the Temperature"""
def Celsi(x):
return 9*x/5+32
result = "It's" + str(Celsi(t)) + " Degress Celsi!" # Decorators
return result
print(temp(20))
# -
def inc(x):
return x+1
def dec(x):
return x-1
def operate(fun,x):
result = fun(x)
return result
operate(inc,3)
operate(dec,2)
def make(func):
def inner():
print("I Got Decorated")
func()
return inner
def ordinary():
print("I am Ordinary")
ordinary()
# Let's Decorate this ordinary Funcation
p = make(ordinary)
p()
# <hr>
# <h2 id="default">Setting default argument values in your custom functions</h2>
# You can set a default value for arguments in your function. For example, in the <code>isGoodRating()</code> function, what if we wanted to create a threshold for what we consider to be a good rating? Perhaps by default, we should have a default rating of 4:
# + jupyter={"outputs_hidden": false}
# Example for setting param with default value
def isGoodRating(rating=4):
if(rating < 7):
print("this album sucks it's rating is",rating)
else:
print("this album is good its rating is",rating)
# + jupyter={"outputs_hidden": false}
# Test the value with default value and with input
isGoodRating()
# -
isGoodRating(10)
isGoodRating(10.1451245745120)
isGoodRating()
# <hr>
# <h2 id="global">Global variables</h2>
# So far, we've been creating variables within functions, but we have not discussed variables outside the function. These are called global variables.
# <br>
# Let's try to see what <code>printer1</code> returns:
# + jupyter={"outputs_hidden": false}
# Example of global variable
artist_1 = "<NAME>" # Gloabal Variable
def printer1(artist):
internal_var = artist
print(artist, "is an artist")
printer1(artist_1)
# -
print(internal_var)
# If we print <code>internal_var</code> we get an error.
# <b>We got a Name Error: <code>name 'internal_var' is not defined</code>. Why?</b>
#
# It's because all the variables we create in the function is a <b>local variable</b>, meaning that the variable assignment does not persist outside the function.
#
# But there is a way to create <b>global variables</b> from within a function as follows:
# + jupyter={"outputs_hidden": false}
artist = "<NAME>"
def printer(artist):
global internal_var
internal_var= "<NAME>"
print(artist,"is an artist")
printer(artist)
printer(internal_var)
# -
print(internal_var)
# <h2 id="scope">Scope of a Variable</h2>
# The scope of a variable is the part of that program where that variable is accessible. Variables that are declared outside of all function definitions, such as the <code>myFavouriteBand</code> variable in the code shown here, are accessible from anywhere within the program. As a result, such variables are said to have global scope, and are known as global variables.
# <code>myFavouriteBand</code> is a global variable, so it is accessible from within the <code>getBandRating</code> function, and we can use it to determine a band's rating. We can also use it outside of the function, such as when we pass it to the print function to display it:
# + jupyter={"outputs_hidden": false}
# Example of global variable
myFavouriteBand_1 = "AC/DC"
def getBandRating(bandname):
if bandname == myFavouriteBand_1:
return 10.0
else:
return 0.0
print("AC/DC's rating is:", getBandRating("AC/DC"))
# -
print("Deep Purple's rating is:",getBandRating("Deep Purple"))
print("My favourite band is:", myFavouriteBand_1)
getBandRating(myFavouriteBand_1)
# Take a look at this modified version of our code. Now the <code>myFavouriteBand</code> variable is defined within the <code>getBandRating</code> function. A variable that is defined within a function is said to be a local variable of that function. That means that it is only accessible from within the function in which it is defined. Our <code>getBandRating</code> function will still work, because <code>myFavouriteBand</code> is still defined within the function. However, we can no longer print <code>myFavouriteBand</code> outside our function, because it is a local variable of our <code>getBandRating</code> function; it is only defined within the <code>getBandRating</code> function:
# +
# Example of local variable
def getBandRating(bandname):
myFavouriteBand = "AC/DC" # loacl variable
if bandname == myFavouriteBand:
return 10.0
else:
return 0.0
print("AC/DC's rating is: ", getBandRating("AC/DC"))
print("Deep Purple's rating is: ", getBandRating("Deep Purple"))
print("My favourite band is", myFavouriteBand)
# -
# Finally, take a look at this example. We now have two <code>myFavouriteBand</code> variable definitions. The first one of these has a global scope, and the second of them is a local variable within the <code>getBandRating</code> function. Within the <code>getBandRating</code> function, the local variable takes precedence. **Deep Purple** will receive a rating of 10.0 when passed to the <code>getBandRating</code> function. However, outside of the <code>getBandRating</code> function, the <code>getBandRating</code> s local variable is not defined, so the <code>myFavouriteBand</code> variable we print is the global variable, which has a value of **AC/DC**:
# +
# Example of global variable and local variable with the same name
myFavouriteBand = "AC/DC" #gloabal
def getBandRating(bandname):
myFavouriteBand = "Deep Purple" # local
if bandname == myFavouriteBand:
return 10.0
else:
return 0.0
print("AC/DC's rating is:",getBandRating("AC/DC"))
print("Deep Purple's rating is: ",getBandRating("Deep Purple"))
print("My favourite band is:",myFavouriteBand)
# -
# ### Variable Length Argument:
#
# **asterisk:(*)** -- if we are using a astrisk(*) is placed before a parameter in funcation defianation which can hold **non-keyworded variable** length argument
#
# - By using `*args` we can attach a variable-length number of arguments into our function.
#
# `Example:` *arg will consider it as a tuple
#
# **Double asterisk:(* *)** -- if we are using a astrisk(* *double) which can `hold keyworded variable` length argument
#
# - If we want to pass a keyworded variable length of arguments to a function, we use `**kwargs`. It lets us handle named arguments in our function.
#
# `Example:` it will take Multiple values like dict()
#
#
# ### *args example
def fun(*var):
print("Which Will non hold keyworded",var)
fun(10,20,30,40)
# 1. For multiplication and power operations.
# 2. For repeatedly extending the list-type containers ot tuple type containers.
# 3. For using the variadic arguments. (so-called “packing”)
# 4. For unpacking the containers.
# +
def func_var_args(*args):
print(args)
func_var_args(1, 2, '3')
# -
def save_ranking(*args):
print(args)
save_ranking('ming', 'alice', 'tom', 'wilson', 'roy')
# ### **kwargs example
def funs(**args):
print("Which will Hold the keyworded",args)
funs()
# +
def func_keyword_arg(**kwargs):
print(kwargs)
func_keyword_arg(keyword1=10, keyword2='foo')
# +
def func_keyword_arg_dict(**kwargs):
for key, value in kwargs.items():
print(key,":",value)
func_keyword_arg_dict(keyword1=10, keyword2='foo')
# +
def bar(**kwargs):
for a in kwargs:
print(a, kwargs[a])
bar(name='one', age=27,x=1,y=2,z=3,badnews=9)
# -
def func(required_arg, *args, **kwargs):
print(required_arg)
if args:
print(args)
if kwargs:
print(kwargs)
func("required argument")
func("required argument", 1, 2, '3')
func("required argument", 1, 2, '3', keyword1=4, keyword2="foo")
# <h2>Quiz on Functions</h2>
# Come up with a function that divides the first input by the second input:
# Write your code below and press Shift+Enter to execute
# Double-click __here__ for the solution.
#
# <!--
# def div(a, b):
# return(a/b)
# -->
# <hr>
# Use the function <code>con</code> for the following question.
# Can the <code>con</code> function we defined before be used to add to integers or strings?
# Write your code below and press Shift+Enter to execute
# Double-click __here__ for the solution.
#
# <!--
# yes, for example:
# con(2, 2)
# -->
# <hr>
# Can the <code>con</code> function we defined before be used to concentrate a list or tuple?
# Write your code below and press Shift+Enter to execute
# Double-click __here__ for the solution.
#
# <!--
# yes,for example:
# con(['a', 1], ['b', 1])
# -->
| Funcation/PY0101EN-3-4-Functions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="CID_27ISaRpJ"
# # Implement LSH from scratch
#
# In this assignment, you will implement LSH from scratch and predict the labels of the test data. You will then verify the correctness of the your implementation using a "grader" function/cell (provided by us) which will match your implmentation.
#
# The grader fucntion would help you validate the correctness of your code.
#
# Please submit the final Colab notebook in the classroom ONLY after you have verified your code using the grader function/cell.
#
#
# **NOTE: DO NOT change the "grader" functions or code snippets written by us.Please add your code in the suggested locations.**
#
# Ethics Code:
# 1. You are welcome to read up online resources to implement the code.
# 2. You can also discuss with your classmates on the implmentation over Slack.
# 3. But, the code you write and submit should be yours ONLY. Your code will be compared against other stduents' code and online code snippets to check for plagiarism. If your code is found to be plagiarised, you will be awarded zero-marks for all assignments, which have a 10% weightage in the final marks for this course.
# + [markdown] id="mR49rnr6ibOX"
# ## Reading the data from csv file
# + colab={"base_uri": "https://localhost:8080/"} id="gXNLRg93cPJN" outputId="4568c72b-b046-41f5-a291-5f5eda089e4b"
# Code to mount google drive in case you are loading the data from your google drive
from google.colab import drive
drive.mount('/gdrive')
# %cd /gdrive
# + colab={"base_uri": "https://localhost:8080/", "height": 417} id="oA1hSk2odHUy" outputId="69c01a13-6672-489a-e4cf-97010242ab02"
# Loading data from csv file
import pandas as pd
data_path = '/gdrive/MyDrive/Colab Notebooks/lsh_assignment_data.csv'
df = pd.read_csv(data_path)
df
# + colab={"base_uri": "https://localhost:8080/"} id="cKHb7v5edUiU" outputId="64fe9cc5-55f3-4e6a-ed5d-ffe5e442afb6"
# Data Overiview
df['category'].value_counts()
# + [markdown] id="Mcpy_Nrnig9V"
# ### Creating Train and Test Datasets
#
# Note that the labels for test data will not be present in the dataset and hence they are mentioned as NaN.
# + id="ncAK-oHFeKbS"
# The last 10 rows in the csv file are query points, so loading them into test data.
# And loading the reamining points to train_data for which labels are given.
train_data = df.iloc[:-10,:]
test_data = df.iloc[-10:,:]
# + colab={"base_uri": "https://localhost:8080/", "height": 417} id="bs7uYx-1fh66" outputId="ae5f13e4-f559-45a1-a9dc-f9c8cc196f46"
# For train_data here the labels are in the column named "category".
train_data
# + colab={"base_uri": "https://localhost:8080/", "height": 357} id="fc-SORtAfgqI" outputId="960aff1d-4a82-4287-b<PASSWORD>-<PASSWORD>"
test_data
# + [markdown] id="U1jeyM0emKOw"
# ## Custom Implementation
# + [markdown] id="AU6mt5wq3Oyg"
# ### Instructions:
#
# 1. Read in the train_data.
# 2. Vectorize train_data using sklearns built in tfidf vectorizer.
# 3. Ignore unigrams and make use of both **bigrams & trigrams** and also limit the **max features** to **4000** and **minimum document frequency** to **10**.
# 4. After the tfidf vectors are generated as mentioned above, next task is to generate random hyperplanes.
# 5. Generate **5 random hyperplanes**. And generate the hyperplanes using a random normal distribution with **mean zero and variance 1**.
# 6. We have set the **numpy random seed to zero**, please do not change it. And then you can make use of **np.random.normal** to generate the vectors for hyperplanes.
# 7. As mentioned in the course videos, compute the hash function and also the corresponding hash table for it.
# 8. Once the hash table is generated now take in each of the query points from the test data.
# 9. Vectorize those query points using the same tfidf vectorizer as mentioned above.
# 10. Now use the hash function on this query point and fetch all the similar data points from the hashtable.
# 11. Use cosine similarity to compute **11-Nearest Neighbours** from the list of data points obtained in the above step.
# 12. Take a majority vote among the 11-Nearest Neighbours and predict the class label for the query point in the test data.
# 13. **In case of a tie** in the obtained labels from nearest neighbours, you can pick a label after sorting all the labels **alphabetically**(A-Z), i.e. for example labels starting with A would get more preference than labels starting with Z.
# 14. Repeat steps 9 to 13 for all the points in the test data and then finally return a list with all the predicted labels.
# 15. Note that there are a total of 10 data points in the test data so the final list you return should be of length 10.
# 16. Also note that the cosine similarity function should be written from scratch, you should not directly make use of existing libraries.
# 17. Please use the formula of cosine similarity as explained in the course videos, you can make use of numpy or scipy to calculate dot or norm or transpose.
# + id="YECivOCWfvGn"
# Please implement this fucntion and write your code wherever asked. Do NOT change the code snippets provided by us.
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import StandardScaler
import numpy as np
from numpy.linalg import norm
def predictLabels (test_data):
"""
Given the test_data, return the labels for all the rows in the test data.
Follow the step by step instructions mentioned above.
"""
np.random.seed(0)
##############################################################
#### Write YOUR CODE BELOW as per the above instructions ###
##############################################################
#reading train data values from the dataset
x_train,y_train = train_data['text'].copy(),train_data['category'].copy()
#to handle NotImplemented error
#scaler = StandardScaler()
#scaler.fit(x_train)
#x_train = scaler.transform(x_train)
#x_train = StandardScaler().fit(x_train).transform(x_train) #it works only for numbers and not strings
#print(x_train)
#Vectorize train_data using sklearns built in tfidf vectorizer
#Ignoring unigrams and using both bigrams(2) & trigrams(3)
#limiting the max features to 4000 (consider only top 4000 features from the corpus) and minimum document frequency to 10(ignores the document frequency lower than 10).
tfidf_vectorizer = TfidfVectorizer(ngram_range=(2,3),max_features=4000, min_df=10)
tfidf_vectorizer_fitmodel = tfidf_vectorizer.fit(x_train)
X = tfidf_vectorizer_fitmodel.transform(x_train)
#print(X[:10])
#print(X.shape)
#next task is to generate 5 random hyperplanes
#Generating the hyperplanes using a normal random distribution N(0,1)
#By using np.random.normal we will generate the vectors for hyperplane
np.random.seed(0) #setting numpy random seed to zero
Hplanes = np.random.normal(0,1,(5,X.shape[1]))
#print("Hyperplanes are:", Hplanes)
#Computing the hash function and also computing the corresponding hash table for it
#by using Hash function large keys are converted to small keys and then the values are stored in a hash table
def hash_fun(x_vec):
#it returns the hash key of size(1,no.of hyperplanes)
key = tuple() #here we use tuple because lists are not hashable
key_array = x_vec.dot(Hplanes.T)
#print(key_array.shape)
key_array = key_array.tolist()
key = tuple(map(lambda x: (0,1)[x>0],key_array[0]))
return key
#creating hast table
Hashtable = dict()
index = 0
#now finding values of hast table using hash_key()
for position in X:
key = hash_fun(position)
if key not in Hashtable:
Hashtable[key] = list()
Hashtable[key].append(index)
index = index+1
#Using the TfidfVectorizer, vectorizing the query points
x_test, y_test = test_data['text'].copy(),test_data['category'].copy()
#x_test = scaler.transform(x_test)
#query point
Xq = tfidf_vectorizer_fitmodel.transform(x_test)
Xq_nearest_neighbour = dict()
#computing the 11-nearst neighbour using the cosine similarity
def cos_similarity(Query_point,neighbour,k=11):
cosine_sim = []
Query_point_L2 = np.linalg.norm(Query_point.toarray(),2)
for index_neighbour in neighbour:
nearest_neighbour = X[index_neighbour]
cos = Query_point.dot(nearest_neighbour.T)/(Query_point_L2 * np.linalg.norm(nearest_neighbour.toarray(),2))
cosine_sim.append(cos.toarray().item())
neighbour = np.array(neighbour)
return neighbour[np.argsort(cosine_sim)[:-(k+1):-1]]
index = 0
for qp in Xq:
key = hash_fun(qp)
neighbour = Hashtable[key]
nearest_neighbour_11 = cos_similarity(qp,neighbour,k=11)
Xq_nearest_neighbour[index] = nearest_neighbour_11
index = index + 1
#print(Xq_nearest_neighbour)
#taking the Majority voting among 11-nearest neighbour and predicted class label of query point in the test data
Xq_label = []
for index,neighbour in Xq_nearest_neighbour.items():
count_of_label = dict()
max = 0
majority = ' '
for n in neighbour:
label = y_train[n]
count_of_label[label] = count_of_label.get(label,0)+1
if count_of_label[label]>max:
max = count_of_label[label]
majority = label
if count_of_label[label]==max:
if label[0]<majority[0]:
max = count_of_label[label]
majority = label
Xq_label.append(majority)
return np.array(Xq_label)
# + [markdown] id="rhnngvQkrnBB"
# ## Grader Cell
#
# Please execute the following Grader cell to verify the correctness of your above implementation. This cell will print "Success" if your implmentation of the predictLabels() is correct, else, it will print "Failed". Make sure you get a "Success" before you submit the code in the classroom.
# + id="GX1sji2XrtmX" colab={"base_uri": "https://localhost:8080/"} outputId="78e27d3f-3939-42e5-9469-8ad96a6c7806"
###########################################
## GRADER CELL: Do NOT Change this.
# This cell will print "Success" if your implmentation of the predictLabels() is correct and the accuracy obtained is above 80%.
# Else, it will print "Failed"
###########################################
import numpy as np
# Predict the labels using the predictLabels() function
Y_custom = np.array(predictLabels(test_data))
# Reference grader array - DO NOT MODIFY IT
Y_grader = np.array(['tech', 'entertainment', 'tech', 'sport', 'business', 'business', 'politics', 'entertainment', 'politics', 'sport'])
# Calculating accuracy by comparing Y_grader and Y_custom
accuracy = np.sum(Y_grader==Y_custom) * 10
if accuracy >= 80:
print("******** Success ********","Accuracy Achieved = ", accuracy,'%')
else:
print("####### Failed #######","Accuracy Achieved = ", accuracy,'%')
print("\nY_grader = \n\n", Y_grader)
print("\n","*"*50)
print("\nY_custom = \n\n", Y_custom)
| 2)Implementing_LSH_from_Scratch_Assignment.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # distance module
#
# Available methods:
#
# - calc_point_to_point_distance: calculates distances between all points in the given array,
# - calc_block_to_block_distance: calculates distances between blocks based on the population points within the block.
# ## ```calc_point_to_point_distance()```
#
# ```python
# pyinterpolate.distance.calc_point_to_point_distance(
# points_a,
# points_b=None,
# check_coordinates=False
# )
# ```
#
# Function calculates distances between all points in the given array.
#
# INPUT:
#
# - **points_a**: (_numpy array_) points coordinates,
# - **points_b**: (_numpy array_) points coordinates, default is `None`. If `None` is given then distances between all points within `points_a` is calculated,
# - **check_coordinates**: (_bool_) checks if coordinates are unique within a given array. It is a resource-consuming process and it should be invoked with caution, especially for large datasets.
#
# OUTPUT:
#
# - `numpy array` of distances between all coordinates.
# ***
# ## ```calc_block_to_block_distance()```
#
#
# ```python
# pyinterpolate.distance.calc_block_to_block_distance(areas)
# ```
#
# Function calculates distances between all given blocks.
#
# INPUT:
#
# - **areas**: (_numpy array_ or Python _list of lists_) area _ids_ and coordinates per each id. Single input row:
#
# ```python
# [
# area_id,
# [x, y, val]
# ]
# ```
#
# OUTPUT:
#
# - **areal distances**: `tuple of arrays` with matrix with areal distances and ids of each row of distances:
#
#
# ```python
# (
# [
# [dist(id0:id0), ..., dist(id0:id99)],
# ...,
# [dist(id99:id0), ..., dist(id99:id99)]
# ],
# [id0, id1, ..., id999]
# )
# ```
# ***
| docs/build/html/code_documentation/distance.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # How do reproducible and trustworthy workflows impact data science?
# + [markdown] slideshow={"slide_type": "slide"} tags=[]
# ## Topic learning objectives
#
# By the end of this topic, students should be able to:
#
# 1. Define data science, and the related terms reproducibile and audible analysis
# 2. Give examples of workflows that are considered reproducible and trustworthy in the context of a data analysis
# 3. Explain why data analysis benefit from reproducibile and audible workflows
# 4. Provide real-life examples of how a failure in reproducible and trustworthy workflows has negatively impacted the outcome of a data analysis project
# 5. List three useful tools for facilitating organization and collaboration in complex data analysis projects
# 6. Describe the data analysis cycle
# 7. Explain how to mechanistically start a data analysis project
# 8. State and refine a data analysis question
# 9. List other potential sources of untrustworthiness in a data analysis (e.g., data source, mapping question to analysis, interpretation of results), and discuss how a data analysis that employs reproducible and trustworthy workflows can still be flawed
# + [markdown] slideshow={"slide_type": "slide"}
# ## Data science
#
# *the study, development and practice of __reproducible and auditable processes__ to obtain __insight from data.__*
#
# From this definition, we must also define reproducible and auditable analysis:
# + [markdown] slideshow={"slide_type": "slide"}
# #### Reproducible analysis:
# *reaching the same result given the same input, computational methods and conditions $^1$.*
#
# - input = data
# - computational methods = computer code
# - conditions = computational environment (e.g., programming language & it's dependencies)
# + [markdown] slideshow={"slide_type": "slide"}
# #### Auditable/transparent analysis,
# *a readable record of the steps used to carry out the analysis as well as a record of how the analysis methods evolved $^2$.*
#
# 1. [National Academies of Sciences, 2019](https://www.nap.edu/catalog/25303/reproducibility-and-replicability-in-science)
# 2. [Parker, 2017](https://peerj.com/preprints/3210/) and [Ram, 2013](https://scfbm.biomedcentral.com/articles/10.1186/1751-0473-8-7)
# + [markdown] slideshow={"slide_type": "slide"}
# #### Why adopt this definition of data science?
#
# We believe that data science work should both bring insight and employ reproducible and auditable methods so that trustworthy results and data products can be created.
#
# Data products can be built via other methods, but we lack confidence in how the results or products were created.
#
# We believe this stems from non-reproducible and non-auditable analyses:
#
# 1. lacking evidence that the results or product could be regenerated given the same input computational methods, and conditions
#
# 2. lacking evidence of the steps taken during creation
#
# 3. having an incomplete record of how and why analysis decisions were made
# + [markdown] slideshow={"slide_type": "slide"}
# ### What makes trustworthy data science?
#
# Some possible criteria:
#
# 1. It should be reproducible and auditable
#
# 2. It should be correct
#
# 3. It should be fair, equitable and honest
# + [markdown] slideshow={"slide_type": "fragment"}
# There are many ways a data science can be untrustworthy... In this course we will focus on workflows that can help build trust. I highly recommend taking a course in data science ethics to help round out your education in how to do this. Further training in statistics and machine learning will also help with making sure your analysis is correct.
# + [markdown] slideshow={"slide_type": "slide"}
# ### Is this really important?
#
# Yes! There are both small and big ways this can impact your work.
#
# Let's talk about some of these by sharing data-related workflows from the trenches:
#
# 1. In the next 10 minutes, I want you to think and write down a non-reproducible or non-auditable workflow you have used before at work, on a personal project, or in course work, that negatively impacted your work somehow (make sure to include this in the story). Here's an example:
#
# *As a Masters student, I started to use R to do my statistical analysis. I obtained the results I needed from running my code in the R console and copying the results into the word document that was my manuscript. Six months later we were working on revisions requested by the reviewers and I could not remember which version of the code I ran to get my results. I eventually figured it out through much trial and error, but the process was inefficient and very stressful.*
#
# *--<NAME>*
#
#
# 2. When prompted, paste your story in the Google doc (link to be shared in class)
#
#
# 3. Finally, we will take 10 minutes to read at least 3 other stories from the trenches that have been shared.
# + [markdown] slideshow={"slide_type": "slide"}
# #### An example with large impact
#
# [*RETRACTED ARTICLE: Safety and efficacy of favipiravir versus hydroxychloroquine in management of COVID-19: A randomised controlled trial*](https://www.nature.com/articles/s41598-021-98683-5)
#
# <img src="img/covid-19-retraction.png" width=500>
#
# - [A research paper was published in March 2021](https://www.nature.com/articles/s41598-021-85227-0) that claimed that a drug, Favipiravir, was a safe and effective alternative to another drug, hydroxychloroquine (a medication commonly used to prevent or treat malaria), in mild or moderate COVID-19 infected patients.
#
# - In September, 2021 the paper we retracted by the editors - in part due to reproducibility issues:
#
#
# *"After concerns were brought to the Editors' attention after publication, the raw data underlying the study were requested. The authors provided several versions of their dataset. Post-publication peer review confirmed that none of these versions fully recapitulates the results presented in the cohort background comparisons, casting doubt on the reliability of the data. Additional concerns were raised about the randomisation procedure, as the equal distribution of male and female patients is unlikely unless sex is a parameter considered during randomisation. However, based on the clarification provided by the authors, sex was not considered during this process. The Editors therefore no longer have confidence in the results and conclusions presented."*
#
# The problem doesn't just stop once the article is retracted... Between the time the article was published and retracted, the article was cited 17 times!
#
# <img src="img/covid-retraction-citations.png" width=700>
# -
# ### How big is this problem?
#
# Searching the [Retraction Watch Database](http://retractiondatabase.org/) for "Results Not Reproducible" we find 635 records that match!
#
# <img src="img/retraction-watch.png" width=800>
# ### Does this just impact academia?
#
# No! The use of non-reproducible tools can impact government and industry as well! Consider this case:
#
# <img src="img/bbc.png" width=500>
#
# Source: https://www.bbc.com/news/uk-scotland-edinburgh-east-fife-53893101
# What went wrong?
#
# An audit found that the wrong spreadsheet matrix was copied over, and the calculation for only 4 air changes per hour, instead of the required 10 per hour, was done. This error was missed several times by human review of the spreadsheet.
#
# How could this have been prevented via using reproducible tools?
#
# If code instead of a spreadsheet was used for calculations, then unit tests could have been written to check the calculations. Also, the code could be abstracted to a well named function, or a function with well named arguments, that could have been more easily detected than a hidden formula in a spreadsheet.
# + [markdown] slideshow={"slide_type": "slide"}
# ## More reasons to worry about workflows?
# + [markdown] slideshow={"slide_type": "slide"}
# #### Reason 1:
#
# It makes it easier to collaborate with your most imporant collaborator - YOU in 6 months!
# + [markdown] slideshow={"slide_type": "fragment"}
# <img src="img/2011.11.15_life_of_a_swe.png" width=500>
# + [markdown] slideshow={"slide_type": "skip"}
# *Source: http://www.bonkersworld.net/building-software/*
# + [markdown] slideshow={"slide_type": "slide"}
# #### Reason 2:
#
# It makes others think you know what you are doing...
# + [markdown] slideshow={"slide_type": "fragment"}
# <img src="img/business_suit.gif" width=400>
# + [markdown] slideshow={"slide_type": "notes"}
# *Source: https://giphy.com/*
# + [markdown] slideshow={"slide_type": "slide"}
# ## Workflows & complex projects
# + [markdown] slideshow={"slide_type": "slide"}
# ### What are complex projects?
#
# I define complex projects as one that has __*at least one*__ of the following:
# + [markdown] slideshow={"slide_type": "fragment"}
# - two, or more, people directly working on the analysis
# + [markdown] slideshow={"slide_type": "fragment"}
# - projects that involve two or more coding documents
# + [markdown] slideshow={"slide_type": "fragment"}
# - projects that involve analysis of medium/large data
# + [markdown] slideshow={"slide_type": "fragment"}
# - projects where you are working on a remote machine
# + [markdown] slideshow={"slide_type": "fragment"}
# - projects that have many software or environment dependencies, or ones that are difficult or take a long time to install
# + [markdown] slideshow={"slide_type": "fragment"}
# *As a project accumulates more of these features it grows further in complexity.*
# + [markdown] slideshow={"slide_type": "slide"}
# #### Complex projects without intentional Data Science workflows...
#
# <img src ="https://upload.wikimedia.org/wikipedia/en/a/a3/Escher%27s_Relativity.jpg" width ="450">
#
# -- *Relativity by <NAME>*
# + [markdown] slideshow={"slide_type": "slide"}
# #### Concrete examples of problems that can occur in complex analyses
# + [markdown] slideshow={"slide_type": "fragment"}
# - An interesting result that you cannot recreate 😞
# + [markdown] slideshow={"slide_type": "fragment"}
# - Your email inbox is full of information related to the project that only you have access too 😫
# + [markdown] slideshow={"slide_type": "fragment"}
# - A small change to the analysis code requires re-running the entire thing, *and takes hours...* 😧
# + [markdown] slideshow={"slide_type": "fragment"}
# - Activation time to becoming productive after taking a break from the project is hours to days 😴
# + [markdown] slideshow={"slide_type": "fragment"}
# - Code that can only be run on one machine, *and you don't know why...* 😵
# + [markdown] slideshow={"slide_type": "slide"}
# ### How can we avoid such problems and chaos?
# + [markdown] slideshow={"slide_type": "slide"}
# ## Workflow features to mitigate chaos
#
# 1. Version Control (*Git & GitHub*)
#
# 2. Executable analysis scripts & pipelines (*Python/R scripts & Make*)
#
# 3. Defined & shippable dependencies (*Docker*)
#
# *All of these features are a subset of those recommended by <NAME> in her 2016 [Opinionated Analysis Development](https://peerj.com/preprints/3210/) paper*
#
# + [markdown] slideshow={"slide_type": "slide"}
# ### 1. Version Control
#
# - Version control is a tool which archives changes to file(s) over time.
#
# - These changes are archived in a way that you can later revisit different time points in the project.
#
# <img src="http://swcarpentry.github.io/git-novice/fig/play-changes.svg">
#
# *source: http://swcarpentry.github.io/git-novice/*
#
# + [markdown] slideshow={"slide_type": "slide"}
# - Many version control tools also have features that facilitate collaboration.
#
# - Git + GitHub are two of the most common softwares for version control (*and so this is where I draw my examples from*)
#
# <img src="http://faisalweb.com/wp-content/uploads/2017/07/git.jpg" width=600>
# + [markdown] slideshow={"slide_type": "slide"}
# #### Example problem solved by version control
#
# **Problem:** An extremely interesting result that you cannot recreate 😞
#
#
# **Solution**: Version the code **and** the output of the analysis
#
# + [markdown] slideshow={"slide_type": "slide"}
# #### Going back in time via commits
#
# <img src="img/releases.png" width=800>
# + [markdown] slideshow={"slide_type": "slide"}
# #### Going back in time via commits
#
# <img src="img/commits_eg.png" width=800>
# + [markdown] slideshow={"slide_type": "slide"}
# #### Going back in time via commits
#
# <img src="img/commit-visit.png" width=800>
# + [markdown] slideshow={"slide_type": "slide"}
# #### Going back in time via releases
#
# <img src="img/commits.png" width=800>
# + [markdown] slideshow={"slide_type": "slide"}
# #### Going back in time via releases
#
# <img src="img/release_eg.png" width=800>
# + [markdown] slideshow={"slide_type": "slide"}
# #### Going back in time via releases
#
# <img src="img/release-visit.png" width=800>
# + [markdown] slideshow={"slide_type": "slide"}
# #### Example problem solved by version control
#
#
# **Problem:** Your email inbox is full of information related to the project that only you have access too 😫
#
#
# **Solution**: Use GitHub Issues for communications related to the project
# + [markdown] slideshow={"slide_type": "slide"}
# #### GitHub Issues for project-related communications
#
# <img src="img/issue_thread.png" width=800>
# + [markdown] slideshow={"slide_type": "slide"}
# #### GitHub Issues for project-related communications
#
# <img src="img/inbox-notification.png" width=800>
# + [markdown] slideshow={"slide_type": "slide"}
# #### GitHub Issues for project-related communications
# <img src="img/open_issues.png" >
#
# + [markdown] slideshow={"slide_type": "slide"}
# source: https://github.com/LerouxLab/Celegans_wild_isolate_behaviour/issues
# + [markdown] slideshow={"slide_type": "slide"}
# #### Version control contributes to better communication & team work
#
# - All collaborators/team members know where to find the latest (or earlier) version of the analysis (code and output)
#
# - All collaborators/team members have access to all communications associated with the analysis
# + [markdown] slideshow={"slide_type": "slide"}
# ### 2. Executable analysis scripts & pipelines
#
# - As analysis grows in length and complexity, one literate code document generally is not enough
#
# - To improve code report readability (and code reproducibility and modularity) it is better to abstract at least parts of the code away (e.g, to scripts)
#
# - These scripts save figures and tables that will be imported into the final report
#
# <img src="img/scripts.png" width=800>
# + [markdown] slideshow={"slide_type": "slide"}
# #### Example problem solved by executable analysis scripts & pipelines
#
# **Problem:** Activation time to becoming productive after taking a break from the project is hours to days 😴
#
# **Solution:** Record the order scripts need to be run in, and their arguments in one "driver" script/pipeline file.
# + [markdown] slideshow={"slide_type": "slide"}
# #### Create a recipe for your analysis
#
# <img src="img/pipeline.png" width=800>
# + [markdown] slideshow={"slide_type": "slide"}
# #### Example problem solved by executable analysis scripts & pipelines
# **Problem:** A small change to the analysis code requires re-running the entire thing, *and takes hours...* 😧
#
# **Solution:** Use a smart dependency tree tool to only re-run the parts that needs to be updated.
# + [markdown] slideshow={"slide_type": "slide"}
# #### Make - one possible smart dependency tree tool
# + [markdown] slideshow={"slide_type": "fragment"}
# - special file called a Makefile that contains the recipe for your analysis
#
# + [markdown] slideshow={"slide_type": "fragment"}
# - Makefiles are "smart" and after changes, only run the parts of the analysis that has changed (as well as the parts that depend on the parts that changed)
# + [markdown] slideshow={"slide_type": "slide"}
# - Each block of code in a Makefile is called a rule, it looks something like this:
# ```
# file_to_create.png : data_it_depends_on.dat script_it_depends_on.py
# python script_it_depends_on.py data_it_depends_on.dat file_to_create.png
# ```
# + [markdown] slideshow={"slide_type": "fragment"}
# - Makefiles are made of many rules, typically one rule for each time you run an analysis script
# + [markdown] slideshow={"slide_type": "fragment"}
# *Make is not the only smart dependency tree tool - Apache Airflow, Prefect, `snakemake` & `targets` are also great options!*
# + [markdown] slideshow={"slide_type": "slide"}
# #### Example Makefile:
# ```
# # run all analysis
# all: doc/count_report.md
#
# # make dat files
# results/isles.dat: data/isles.txt src/wordcount.py
# python src/wordcount.py data/isles.txt results/isles.dat
# results/abyss.dat: data/abyss.txt src/wordcount.py
# python src/wordcount.py data/abyss.txt results/abyss.dat
#
# #create figures
# results/figure/isles.png: results/isles.dat src/plotcount.py
# python src/plotcount.py results/isles.dat results/figure/isles.png
# results/figure/abyss.png: results/abyss.dat src/plotcount.py
# python src/plotcount.py results/abyss.dat results/figure/abyss.png
#
# # render report
# doc/count_report.md: doc/count_report.Rmd results/figure/isles.png results/figure/abyss.png results/figure/last.png results/figure/sierra.png
# Rscript -e "rmarkdown::render('doc/count_report.Rmd')"
# ```
# + [markdown] slideshow={"slide_type": "slide"}
# ### Makefile dependency tree
#
# <img src="img/Makefile.png" width=800>
# + [markdown] slideshow={"slide_type": "slide"}
# #### Executable analysis scripts & pipelines contribute to better collaboration
#
# - Can be used by others to run/replicate the analysis
# - Makes it easier to understand the landscape of the project and for others to contribute
# - Reduces *some* of the challenges/frustrations of working with larger data sets
# + [markdown] slideshow={"slide_type": "slide"}
# ### 3. Defined & shippable dependencies
# Dependencies are other things one need to install to run your code, and includes:
# - programming languages (e.g., R, Python, Julia, etc)
# - packages from programming languates (e.g., tidyverse, scikit-learn)
# - other tools you rely on (e.g., Make)
# - legacy code (e.g., perl scripts, fortran, etc)
#
# ***Dependencies include versions as well as names!***
# + [markdown] slideshow={"slide_type": "slide"}
# #### Example problem solved by defined & shippable dependencies
# **Problem:** Code that can only be run on one machine, *you don't know why...* 😵
#
# **Problem:** Long install times when setting up a remote machine for analysis 🙄
#
# **One possible solution:** Containerizing your software and environmental dependencies
# + [markdown] slideshow={"slide_type": "slide"}
# #### What are containers?
# - Containers are *like* a light-weight virtual machine, they allow you to share:
# - Python/R versions
# - package versions
# - other tools you rely on (e.g., Make)
# - legacy code (e.g., perl scripts, fortran, etc)
# - The most popular tool for this is Docker
# - Containers can be shared on [DockerHub](https://hub.docker.com/) (similar to how code can be shared on GitHub)
# + [markdown] slideshow={"slide_type": "slide"}
# #### What are containers?
#
# <img src="https://media.springernature.com/full/springer-static/image/art%3A10.1186%2Fs13742-016-0135-4/MediaObjects/13742_2016_135_Fig7_HTML.gif?as=webp" width=300>
#
#
# source: [Tools and techniques for computational reproducibility](https://gigascience.biomedcentral.com/articles/10.1186/s13742-016-0135-4) by <NAME> & <NAME>
# + [markdown] slideshow={"slide_type": "slide"}
# <img src="img/dockerfile.png" width=800>
# + [markdown] slideshow={"slide_type": "slide"}
# <img src="img/docker-hub-eg.png" width=800>
# + [markdown] slideshow={"slide_type": "slide"}
# #### Instructions needed to run analysis on *almost* any machine:
# 1. Install [Docker](https://docs.docker.com/v17.12/install/)
# 2. Clone or download [this GitHub repository](https://github.com/ttimbers/data_analysis_pipeline_eg)
# 3. From the root of the cloned repository, type:
# ```
# docker run --rm -v $(pwd):/home/rstudio/data_analysis_eg \
# ttimbers/data_analysis_pipeline_eg make -C /home/rstudio/data_analysis_eg all
# ```
# + [markdown] slideshow={"slide_type": "slide"}
# #### Defined & shippable dependencies contribute to democratization of Data Science
# If you take care of packaging dependencies in a Docker container and distribute the container on DockerHub, you can add one line to your run instructions to your analysis to take away any installation pain your collaborators may face.
#
# + [markdown] slideshow={"slide_type": "slide"}
# ### When to add these workflow features:
# 1. Version Control
# - **ALWAYS**
# 2. Executable analysis scripts & pipelines
# - **When you start hiding code chunks/cells in your Rmd/Jupter notebook**
# 3. Defined & shippable dependencies
# - **When doing remote computing or when you have tricky dependencies**
# + [markdown] slideshow={"slide_type": "slide"}
# <img src="img/2011.11.15_life_of_a_swe.png" width=600>
# + [markdown] slideshow={"slide_type": "slide"}
# <img src="img/imp_life_ds.png" width=600>
# + [markdown] slideshow={"slide_type": "slide"}
# ## Life cycle of a data analysis project
# + [markdown] slideshow={"slide_type": "slide"}
# 
# *Source: [R for Data Science](http://r4ds.had.co.nz/introduction.html) by Grolemund & Wickham*
# + [markdown] slideshow={"slide_type": "slide"}
# <img src="img/art_of_ds_cycle.png" width=400>
# + [markdown] slideshow={"slide_type": "notes"}
# *Source: [Art of Data Science](https://leanpub.com/artofdatascience) by Peng & Matsui*
# -
# ## Revisiting the definition of data science & what is the question?
#
# (we need to talk about this for your project)
#
# *the study, development and practice of reproducible and auditable processes to obtain **insight from data**.*
#
# For the latter part of this definition, it is critical that you match the correct data science methods to the type of statistical question you are asking.
#
# <img src="http://jtleek.com/ads2020/images/week1/questions.png" width=600>
# #### 1. Descriptive
#
# One that seeks to summarize a characteristic of a set of data. No interpretation of the result itself as the result is a fact, an attribute of the data set you are working with.
#
#
# Examples:
#
# - What is the frequency of viral illnesses in a set of data collected from a group of individuals?
#
# - How many people live in each US state?
# #### 2. Exploratory
#
# One in which you analyze the data to see if there are patterns, trends, or relationships between variables looking for patterns that would support proposing a hypothesis to test in a future study.
#
# Examples:
#
# - Do diets rich in certain foods have differing frequencies of viral illnesses **in a set of data** collected from a group of individuals?
#
# - Does air pollution correlate with life expectancy **in a set of data** collected from groups of individuals from several regions in the United States?
# #### 3. Inferential
#
# One in which you analyze the data to see if there are patterns, trends, or relationships between variables in a representative sample. We want to quantify how much the patterns, trends, or relationships between variables is applicable to all individuals units in the population.
#
# Examples:
#
# - Is eating at least 5 servings a day of fresh fruit and vegetables is associated with fewer viral illnesses per year?
#
# - Is the gestational length of first born babies the same as that of non-first borns?
# #### 4. Predictive
#
# One where you are trying to predict measurements or labels for individuals (people or things). Less interested in what causes the predicted outcome, just what predicts it.
#
# Examples:
#
# - How many viral illnesses will someone have next year?
#
# - What political party will someone vote for in the next US election?
# #### 5. Causal
#
# Asks about whether changing one factor will change another factor, on average, in a population. Sometimes the underlying design of the data collection, by default, allows for the question that you ask to be causal (e.g., randomized experiment or trial)
#
# Examples:
#
# - Does eating at least 5 servings a day of fresh fruit and vegetables cause fewer viral illnesses per year?
#
# - Does smoking lead to cancer?
# #### 6. Mechanistic
#
# One that tries to explain the underlying mechanism of the observed patterns, trends, or relationship (how does it happen?)
#
# Examples:
#
# - How do changes in diet lead to a reduction in the number of viral illnesses?
#
# - How does how airplane wing design changes air flow over a wing, leading to decreased drag?
#
# #### Challenge #1
#
# What kind of statistical question is this?
#
# *Is a yet undiagnosed patient's breast cancer tumor malignant or benign?*
# #### Challenge #2
#
# What kind of statistical question is this?
#
# *Is inhalation of marijuana associated with lung cancer?*
#
# #### Challenge #2
#
# What kind of statistical question is this?
#
# *Does a truncation of the BRCA2 gene cause cancer?*
#
# #### Challenge #4
#
# What kind of statistical question is this?
#
# *Are there sub-types of ovarian tumors?*
#
# ### So you know the type of question, now what?
#
# This helps narrow down the possibilities
# of the kind of analysis you might want to do!
#
# For example, if you have the question: **"How many viral illnesses will someone have next year?"**
# and you identify that it is **predictive.**
# You could narrow down that some kind of statistical or machine learning model
# might help you answer that.
#
# Then you need to go a step deeper and look at the data that you have,
# and see which kind of statistical
# or machine learning model is most suitable for your data.
#
# <img src="https://scikit-learn.org/stable/_static/ml_map.png" width=700>
#
# Source: [scikit-learn algorithm cheat sheet](https://scikit-learn.org/stable/tutorial/machine_learning_map/index.html)
#
# #### General statistical workflow:
#
# 1. Identify the kind of question
#
# 2. Identify or collect the data, and then look at the data
#
# 3. Identify a suitable statistical method for your question and data
#
# 4. Create a visualization
#
# 5. Apply your statistical method
#
# 6. (maybe create another visualization)
#
# 7. Interpret and communicate your assumptions and results
#
# *Note 1: in your project you might swap 1 & 2 as we are restricting ourselves to existing, public data sets.*
#
# *Note 2: Sometimes you will consider #3 with or before #2, in particular when you are running an experiment or collecting the data.*
# ## Wrap up
#
# - We define data science as *the study, development and practice of __reproducible and auditable processes__ to obtain __insight from data.__*
#
# - Both bolded parts of the definition are important! This course will primarily focus on the first part, but you will get the opportunity to practice the second part in your group projects for this course.
#
# - Many ways a data analysis can be untrustworthy... just because a data analysis is reproducible and auditable, doesn't mean it is fully trustworthy. But a data analysis is not trustworthy if it cannot be reproduced or studied...
#
# + [markdown] slideshow={"slide_type": "slide"}
# ### What's next?
#
# - Version control for transparency and collaboration!
| materials/materials/lectures/.ipynb_checkpoints/01_lecture-intro-to-ds-workflows-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from __future__ import print_function, unicode_literals, absolute_import, division
import sys
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
from glob import glob
from tqdm import tqdm
from tifffile import imread
from csbdeep.utils import Path, normalize
from stardist import fill_label_holes, random_label_cmap, calculate_extents, gputools_available
from stardist.models import Config2D, StarDist2D, StarDistData2D
np.random.seed(42)
lbl_cmap = random_label_cmap()
# -
# # Data
#
# We assume that data has already been downloaded via notebook [1_data.ipynb](1_data.ipynb).
#
# <div class="alert alert-block alert-info">
# Training data (for input `X` with associated label masks `Y`) can be provided via lists of numpy arrays, where each image can have a different size. Alternatively, a single numpy array can also be used if all images have the same size.
# Input images can either be two-dimensional (single-channel) or three-dimensional (multi-channel) arrays, where the channel axis comes last. Label images need to be integer-valued.
# </div>
X = sorted(glob('data/dsb2018/train/images/*.tif'))
Y = sorted(glob('data/dsb2018/train/masks/*.tif'))
assert all(Path(x).name==Path(y).name for x,y in zip(X,Y))
X = list(map(imread,X))
Y = list(map(imread,Y))
n_channel = 1 if X[0].ndim == 2 else X[0].shape[-1]
# Normalize images and fill small label holes.
# +
axis_norm = (0,1) # normalize channels independently
# axis_norm = (0,1,2) # normalize channels jointly
if n_channel > 1:
print("Normalizing image channels %s." % ('jointly' if axis_norm is None or 2 in axis_norm else 'independently'))
sys.stdout.flush()
X = [normalize(x,1,99.8,axis=axis_norm) for x in tqdm(X)]
Y = [fill_label_holes(y) for y in tqdm(Y)]
# -
# Split into train and validation datasets.
assert len(X) > 1, "not enough training data"
rng = np.random.RandomState(42)
ind = rng.permutation(len(X))
n_val = max(1, int(round(0.15 * len(ind))))
ind_train, ind_val = ind[:-n_val], ind[-n_val:]
X_val, Y_val = [X[i] for i in ind_val] , [Y[i] for i in ind_val]
X_trn, Y_trn = [X[i] for i in ind_train], [Y[i] for i in ind_train]
print('number of images: %3d' % len(X))
print('- training: %3d' % len(X_trn))
print('- validation: %3d' % len(X_val))
# Training data consists of pairs of input image and label instances.
i = min(9, len(X)-1)
img, lbl = X[i], Y[i]
assert img.ndim in (2,3)
img = img if (img.ndim==2 or img.shape[-1]==3) else img[...,0]
plt.figure(figsize=(16,10))
plt.subplot(121); plt.imshow(img,cmap='gray'); plt.axis('off'); plt.title('Raw image')
plt.subplot(122); plt.imshow(lbl,cmap=lbl_cmap); plt.axis('off'); plt.title('GT labels')
None;
# # Configuration
#
# A `StarDist2D` model is specified via a `Config2D` object.
print(Config2D.__doc__)
# +
# 32 is a good default choice (see 1_data.ipynb)
n_rays = 32
# Use OpenCL-based computations for data generator during training (requires 'gputools')
use_gpu = False and gputools_available()
# Predict on subsampled grid for increased efficiency and larger field of view
grid = (2,2)
conf = Config2D (
n_rays = n_rays,
grid = grid,
use_gpu = use_gpu,
n_channel_in = n_channel,
)
print(conf)
vars(conf)
# -
if use_gpu:
from csbdeep.utils.tf import limit_gpu_memory
# adjust as necessary: limit GPU memory to be used by TensorFlow to leave some to OpenCL-based computations
limit_gpu_memory(0.8)
# **Note:** The trained `StarDist2D` model will *not* predict completed shapes for partially visible objects at the image boundary if `train_shape_completion=False` (which is the default option).
model = StarDist2D(conf, name='stardist', basedir='models')
# Check if the neural network has a large enough field of view to see up to the boundary of most objects.
median_size = calculate_extents(list(Y), np.median)
fov = np.array(model._axes_tile_overlap('YX'))
if any(median_size > fov):
print("WARNING: median object size larger than field of view of the neural network.")
# # Training
# You can define a function/callable that applies augmentation to each batch of the data generator.
# +
augmenter = None
# def augmenter(x, y):
# """Augmentation of a single input/label image pair.
# x is an input image
# y is the corresponding ground-truth label image
# """
# # modify a copy of x and/or y...
# return x, y
# -
# We recommend to monitor the progress during training with [TensorBoard](https://www.tensorflow.org/programmers_guide/summaries_and_tensorboard). You can start it in the shell from the current working directory like this:
#
# $ tensorboard --logdir=.
#
# Then connect to [http://localhost:6006/](http://localhost:6006/) with your browser.
#
# +
quick_demo = True
if quick_demo:
print (
"NOTE: This is only for a quick demonstration!\n"
" Please set the variable 'quick_demo = False' for proper (long) training.",
file=sys.stderr, flush=True
)
model.train(X_trn, Y_trn, validation_data=(X_val,Y_val), augmenter=augmenter,
epochs=2, steps_per_epoch=10)
print("====> Stopping training and loading previously trained demo model from disk.", file=sys.stderr, flush=True)
model = StarDist2D(None, name='2D_demo', basedir='../../models/examples')
model.basedir = None # to prevent files of the demo model to be overwritten (not needed for your model)
else:
model.train(X_trn, Y_trn, validation_data=(X_val,Y_val), augmenter=augmenter)
None;
# -
# # Threshold optimization
# While the default values for the probability and non-maximum suppression thresholds already yield good results in many cases, we still recommend to adapt the thresholds to your data. The optimized threshold values are saved to disk and will be automatically loaded with the model.
model.optimize_thresholds(X_val, Y_val)
| examples/2D/2_training.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 
#
# <a href="https://hub.callysto.ca/jupyter/hub/user-redirect/git-pull?repo=https%3A%2F%2Fgithub.com%2Fcallysto%2Fcurriculum-notebooks&branch=master&subPath=Mathematics/StatisticsProject/AccessingData/pet-popularity.ipynb&depth=1" target="_parent"><img src="https://raw.githubusercontent.com/callysto/curriculum-notebooks/master/open-in-callysto-button.svg?sanitize=true" width="123" height="24" alt="Open in Callysto"/></a>
# # Pet Popularity
#
# Using [Pet Licenses data from the City of Edmonton Open Data Portal](https://data.edmonton.ca/Community-Services/Pet-Licenses-by-Neighbourhood/5squ-mg4w) we can see which (licensed) pets are the most popular.
# +
domain = 'https://data.edmonton.ca/resource/'
uuid = '5squ-mg4w'
query = 'SELECT *'
import requests
import io
import pandas as pd
session = requests.Session()
results = session.get(domain + uuid +'.csv?$query=' + query)
df = pd.read_csv(io.StringIO(results.content.decode('utf-8')))
df
# -
# [](https://github.com/callysto/curriculum-notebooks/blob/master/LICENSE.md)
| _build/jupyter_execute/curriculum-notebooks/Mathematics/StatisticsProject/AccessingData/pet-popularity.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Train an IntegratedML model on Readmission Dataset
# ## Use JDBC to connect to InterSystems IRIS database
# This Notebook demonstrates:
# - Using the JayDeBeApi Python library to connect to InterSystems IRIS
# - Creating views to segment data into training and test sets
# - Defining and training an IntegratedML model to predict marketing campaign responses
# - Comparing the resulting model's predictions to data in the test set (that the model was not trained on)
# - Using the IntegratedML "VALIDATE MODEL" command to calculate accuracy metrics on the test set data
# !conda activate
# !pip3 install jupyterthemes
# !jt -r
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:100% !important; }</style>"))
# # !ls /tf/intersystems-jdbc-3.1.0.jar
# RUN apt-get update && \
# DEBIAN_FRONTEND=noninteractive \
# apt-get -y install default-jre-headless && \
# apt-get clean && \
# rm -rf /var/lib/apt/lists/*
# !pip install JayDeBeApi
# !pip list JPype1
# ### 1. Set environment variables, if necessary
# +
#import os
#os.environ['JAVA_HOME']='C:\Progra~1\Java\jdk1.8.0_241'
#os.environ['CLASSPATH'] = 'C:\interSystems\IRIS20194\dev\java\lib\JDK18\intersystems-jdbc-3.0.0.jar'
#os.environ['HADOOP_HOME']='C:\hadoop\bin' #winutil binary must be in Hadoop's Home
# -
# ### 2. Get jdbc connection and cursor
# +
import jaydebeapi
url = "jdbc:IRIS://irisimlsvr:1972/USER" #"jdbc:IRIS://172.17.0.1:8091/USER"
driver = 'com.intersystems.jdbc.IRISDriver'
user = "SUPERUSER"
password = "<PASSWORD>"
#libx = "C:/InterSystems/IRIS20194/dev/java/lib/JDK18"
#jarfile = "C:/InterSystems/IRIS20194/dev/java/lib/JDK18/intersystems-jdbc-3.0.0.jar"
jarfile = "./intersystems-jdbc-3.1.0.jar"
# -
conn = jaydebeapi.connect(driver, url, [user, password], jarfile)
curs = conn.cursor()
# ### 3. specify the source data table
dataTable = 'Patient.Readmission'
# ### 4. Execute a query and display results in Pandas DataFrame
# +
import pandas as pd
from IPython.display import display
df = pd.read_sql("select TOP 20 * from %s" % dataTable, conn)
display(df)
# -
# Show number rows
df1 = pd.read_sql("SELECT COUNT(*) FROM %s" % dataTable, conn)
display(df1)
# ### Cleaning before retrying
curs.execute("DROP VIEW Patient.ReadmissionTraining")
curs.execute("DROP VIEW Patient.ReadmissionPredict")
curs.execute("DROP MODEL ReadmissionModel")
# ### 5. Make some views to split training and testing datasets
df1 = pd.read_sql("SELECT COUNT(*) FROM %s WHERE ID>=11000" % dataTable, conn)
display(df1)
# Training set view
curs.execute("CREATE VIEW Patient.ReadmissionTraining AS SELECT * FROM Patient.Readmission WHERE ID<11000")
# Prediction set
curs.execute("CREATE VIEW Patient.ReadmissionPredict AS SELECT * FROM Patient.Readmission WHERE ID>=11000")
# ### 6. Look at Data
display(pd.read_sql("select TOP 20 * from Patient.ReadmissionTraining", conn))
display(pd.read_sql("SELECT COUNT(*) rowCount FROM Patient.ReadmissionTraining", conn))
df.dtypes
len(df.dtypes)
# ### 7. Choose ML framework with "SET ML CONFIGURATION"
#
curs.execute("SET ML CONFIGURATION %AutoML")
# ### 8. Create and Train an IntegratedML Model using default settings
# IntegratedML only needs a model name, the name of the column that is the target column to predict, and a table (or SELECT query to specify input columns.
#
# This is a simple DDL query that executes immediately.
res = curs.execute("CREATE MODEL ReadmissionModel PREDICTING (MxWillReAdmit) FROM Patient.ReadmissionTraining")
# Now that the model is defined, you can TRAIN it, which invokes the AutoML machine learning procedure.
curs.execute("TRAIN MODEL ReadmissionModel")
# Once that finishes, you can see some information about the model in the "ML_TRAINED_MODELS" table.
df3 = pd.read_sql("SELECT * FROM INFORMATION_SCHEMA.ML_TRAINED_MODELS", conn)
display(df3)
# ### 9. Compare model output to data it has not seen yet
# Now you can use SQL to SELECT data from another table, run the IntegratedML model on this new data, and see how well the predictions match the data!
df4 = pd.read_sql("SELECT PREDICT(ReadmissionModel) AS PredictedReadmit, \
MxWillReAdmit AS ActualReadmit FROM Patient.ReadmissionPredict", conn)
display(df4)
# ### 10. VALIDATE MODEL command calculates accuracy metrics
# You can certainly take that output above and calculate the accuracy using a standard formula, but IntegratedML has a built-in function to do that!
#
# Each time you run the command "VALIDATE MODEL..." it generates a set of metrics calculated on the data passed into the query. Since this table can be a bit difficult to read in its raw form we use a simple "pivot" call to arrange the data.
curs.execute("VALIDATE MODEL ReadmissionModel FROM Patient.ReadmissionPredict")
df5 = pd.read_sql("SELECT * FROM INFORMATION_SCHEMA.ML_VALIDATION_METRICS", conn)
df6 = df5.pivot(index='VALIDATION_RUN_NAME', columns='METRIC_NAME', values='METRIC_VALUE')
display(df6)
# ### 9. Query that highlights incorrectly predicted rows
#
# This query retrieves encounters the model incorrectly scored, along with the probability estimate of the prediction.
display(pd.read_sql('''
SELECT
PROBABILITY(ReadmissionModel FOR '1') ReadmissionProbability,
PREDICT(ReadmissionModel) PredictedReadmission,
MxWillReAdmit ActualReadmission,
*
FROM
Patient.ReadmissionPredict
WHERE
MxWillReAdmit = 1
AND MxWillReAdmit != PREDICT(ReadmissionModel)
''', conn))
# ### 9. Query that pulls recent Encounters and filters by those with high probability of readmission
display(pd.read_sql('''
SELECT
PROBABILITY(ReadmissionModel FOR '1') ReadmissionProbability,
PREDICT(ReadmissionModel) PredictedReadmission,
*
FROM
Patient.ReadmissionPredict
WHERE
MxEncounterEndYear >=2019 AND
MxEndDateMonth >= 12 AND
PROBABILITY(ReadmissionModel FOR '1') >= 0.25
''', conn))
| jupyter-samples/readmission-integratedml-jdbc.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Kampff lab - Polytrode Impedance
#
#
# Here a description of the dataset:
# http://www.kampff-lab.org/polytrode-impedance
#
# Here the official publication of this open dataset:
# https://crcns.org/data-sets/methods/evi-1/about-evi-1
#
# And the citation:
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME> (2018); Extracellular recordings using a dense electrode array allowing direct comparison of the same neural signal measured with low and high impedance electrodes CRCNS.org
# http://dx.doi.org/10.6080/K07M064M
#
# And a paper on results in Frontier
# https://doi.org/10.3389/fnins.2018.00715
#
# ## Introduction
#
#
# For this study they use polytrode aranged in chess board so that 16 of electrodes have a 1Mohm (at 1kHz) impedance and other 16 have 100kohm impedance.
#
# The goal of this is notebook is to reproduce the spike sorting pipeline on the dataset.
#
# In the official paper in frontier, <NAME> and co, use kilosort for spike sorting but no public report the result of the dataset.
#
# Here is a replicate and reproducible pipeline with tridesclous.
#
# This is done only on one file **amplifier2017-02-02T17_18_46** but the same script can be applied easly on other files form teh same dataset.
#
# ## Download
#
# Dataset must downloaded locally and manually from crcns or from the google drive in **"workdir"** path.
#
#
# ## The PRB file
# tridesclous need a PRB file that describe the geometry of probe.
#
# Create it by copy/paste or download it via github.
#
# Here I split the probe in 3 groups :
# * **0** is all channel
# * **1** is 1Mohm electrodes
# * **2** is 100kohm electrodes
#
# So we could test tridesclous on all channels or one or the other impedance.
#
# ```python
# channel_groups = {
# 0: {
# 'channels': [0, 31, 24, 7, 1, 21, 10, 30, 25, 6, 15, 20, 11, 16, 26, 5,
# 14, 19, 12, 17, 27, 4, 8, 18, 13, 23, 28, 3, 9, 29, 2, 22],
# 'graph' : [],
# 'geometry': {
# 0: [18.0, 0.0],
# 31: [18.0, 25.],
# 24: [0.0, 37.5],
# 7: [36.0, 37.5],
# 1: [18.0, 50.],
# 21: [0.0, 62.5],
# 10: [36., 62.5],
# 30: [18.0, 75.0],
# 25: [0.0, 87.5],
# 6: [36.0, 87.5],
# 15: [18.0, 100.0],
# 20: [0.0, 112.5],
# 11: [36.0, 112.5],
# 16: [18.0, 125.0],
# 26: [0.0, 137.5],
# 5: [36.0, 137.5],
# 14: [18.0, 150.0],
# 19: [0.0, 162.5],
# 12: [36.0, 162.5],
# 17: [18.0, 175.0],
# 27: [0.0, 187.5],
# 4: [36.0, 187.5],
# 8: [18.0, 200.0],
# 18: [0.0, 212.5],
# 13: [36.0, 212.5],
# 23: [18.0, 225.0],
# 28: [0.0, 237.5],
# 3: [36.0, 237.5],
# 9: [18.0, 250.0],
# 29: [0.0, 262.5],
# 2: [36.0, 262.5],
# 22: [18.0, 275.0],
# },
# },
# 1: {
# 'channels': [0, 24, 1, 10, 25, 15, 11, 26, 14, 12, 27, 8, 13, 28, 9, 2],
# 'graph' : [],
# 'geometry': {
# 0: [18.0, 0.0],
# 24: [0.0, 37.5],
# 1: [18.0, 50.],
# 10: [36., 62.5],
# 25: [0.0, 87.5],
# 15: [18.0, 100.0],
# 11: [36.0, 112.5],
# 26: [0.0, 137.5],
# 14: [18.0, 150.0],
# 12: [36.0, 162.5],
# 27: [0.0, 187.5],
# 8: [18.0, 200.0],
# 13: [36.0, 212.5],
# 28: [0.0, 237.5],
# 9: [18.0, 250.0],
# 2: [36.0, 262.5],
# }
# },
# 2: {
# 'channels': [31, 7, 21, 30, 6, 20, 16, 5, 19, 17, 4, 18, 23, 3, 29, 22],
# 'graph' : [],
# 'geometry': {
# 31: [18.0, 25.0],
# 7: [36.0, 37.5],
# 21: [0.0, 62.5],
# 30: [18.0, 75.0],
# 6: [36.0, 87.5],
# 20: [0.0, 112.5],
# 16: [18.0, 125.0],
# 5: [36.0, 137.5],
# 19: [0.0, 162.5],
# 17: [18.0, 175.0],
# 4: [36.0, 187.5],
# 18: [0.0, 212.5],
# 23: [18.0, 225.0],
# 3: [36.0, 237.5],
# 29: [0.0, 262.5],
# 22: [18.0, 275.0],
# }
# }
# }
# ```
# +
# suposing the datset is downloaded here
# workdir = '/media/samuel/dataspikesorting/DataSpikeSortingHD2/kampff/polytrode Impedance/'
workdir = '/home/samuel/Documents/projet/DataSpikeSorting/kampff/polytrode Impedance/'
# Input file
filename = workdir + 'amplifier2017-02-02T17_18_46/amplifier2017-02-02T17_18_46.bin'
# dirname is where tridesclous will put eveything
dirname = workdir + 'tdc_amplifier2017-02-02T17_18_46'
# +
# %matplotlib notebook
import numpy as np
import matplotlib.pyplot as plt
import tridesclous as tdc
from tridesclous import DataIO, CatalogueConstructor, Peeler
import os, shutil
# -
# ## create a DataIO (and remove if already exists)
# +
if os.path.exists(dirname):
#remove is already exists to restart from stractch
shutil.rmtree(dirname)
dataio = DataIO(dirname=dirname)
# feed DataIO with one file
dataio.set_data_source(type='RawData', filenames=[filename],
sample_rate=20000., dtype='uint16', total_channel=32,
bit_to_microVolt=0.195)
print(dataio)
# set the probe file
dataio.set_probe_file('kampff_polytrode_impedance_32.prb')
# -
# ## CatalogueConstructor
#
# Make catalogue on the first 280. After this the signal is totally noisy.
# +
cc = CatalogueConstructor(dataio=dataio, chan_grp=0)
cc.set_preprocessor_params(chunksize=1024, common_ref_removal=False,
highpass_freq=250., lowpass_freq=9500.,
peak_sign='-', relative_threshold=5., peak_span=0.0001)
cc.estimate_signals_noise(duration=30.)
cc.run_signalprocessor(duration=280.)
cc.extract_some_waveforms(n_left=-15, n_right=20, mode='rand', nb_max=20000)
cc.clean_waveforms(alien_value_threshold=100.)
cc.extract_some_features(method='peak_max')
cc.find_clusters(method='sawchaincut', kde_bandwith=1.0)
print(cc)
# -
# ## Noise measurement
#
# This is done with the [MAD](https://en.wikipedia.org/wiki/Median_absolute_deviation) a robust variance.
#
# mad = median(abs(x-median(x)) * 1.4826
#
#
dataio = DataIO(dirname=dirname)
tdc.summary_noise(dataio=dataio, chan_grp=0)
# ## Inspect waveform quality at catalogue level
tdc.summary_catalogue_clusters(dataio=dataio, chan_grp=0, label=0)
# ## construct catalogue
cc.make_catalogue_for_peeler()
# ## apply peeler
#
# This is the real spike sorting: find spike that correcpond to catalogue templates.
#
# This is done in **51 s.** on my old laptop (Intel i5-3337U) without opencl.
#
initial_catalogue = dataio.load_catalogue(chan_grp=0)
peeler = Peeler(dataio)
peeler.change_params(catalogue=initial_catalogue)
peeler.run(duration=None, progressbar=True)
# # final inspection of cells
tdc.summary_after_peeler_clusters(dataio, chan_grp=0, label=0)
| kampfflab_polytrode_impedance/kampfflab_polytrode_impedance.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Dependency
#
# - MPI v >= 4.0
# - libAATM
# - The submodules (git init --recursive --update)
# +
# import system packages
import os
from datetime import datetime
import sys
import argparse
import traceback
import pickle
import yaml
from argparse import ArgumentParser, RawTextHelpFormatter
# import numpy
import numpy as np
import healpy as hp
# the MPI comunicator class, customized for pycal data storage
from pycal.mpi import get_world, Comm
# the Data class
from pycal.dist import distribute_uniform, Data
# some pycal utils to share informations with the environment
from pycal.utils import Logger, Environment, memreport
# some timers
from pycal.timing import function_timer, GlobalTimers, Timer, gather_timers
from pycal.timing import dump as dump_timing
# the simulate the pointing, the atmosphere and put all the information in the TODGround class
from pycal.todmap import TODGround, OpSimAtmosphere, OpPointingHpix
from pycal.weather import Weather
# Some wrapper to libaatm, they solve the radiative transfer equation in local thermodynamic equilibrium
from pycal.todmap.atm import atm_absorption_coefficient, atm_absorption_coefficient_vec
# helper functions
from pycal.tests._helpers import boresight_focalplane
import pycal.qarray as qa
# focal plane and telescope calsses
from pycal.todmap import Focalplane
from pycal.todmap import Telescope
# -
# # Timer and Focalplane functions
# +
# set up the output directory for each mc iterations
@function_timer
def setup_output(outdir, comm, mc, freq):
outpath = "{}/{:08}/{:03}".format(outdir, mc, int(freq))
if comm.world_rank == 0:
print("Creating the outpath: {}".format(outpath))
os.makedirs(outpath, exist_ok=True)
return outpath
def load_focalplane(args, comm):
focalplane = None
# Load focalplane information
if comm.comm_world is None or comm.comm_world.rank == 0:
if focalplane is None:
detector_data = {}
with open(r'./strip_focal_plane.yaml') as file:
focalplane=yaml.safe_load(file)
detecotrs=focalplane['horns'].keys()
for i in detecotrs:
directions=focalplane['horns'][i]['orientation']
l=np.arctan(directions[0]/directions[2])
u=np.arctan(directions[1]/directions[2])
zaxis = np.array([0, 0, 1], dtype=np.float64)
angrot = qa.rotation(zaxis, 0 * np.pi / 180.0)
wx = np.rad2deg(l) * np.pi / 180.0
wy = np.rad2deg(u) * np.pi / 180.0
wz = np.sqrt(1.0 - (wx * wx + wy * wy))
wdir = np.array([wx, wy, wz])
strip_quat = qa.from_vectors(zaxis, wdir)
strip = {}
strip["quat"] = strip_quat
strip["fwhm"] = 20.0
strip["fknee"] = 0.0
strip["fmin"] = 1e-9
strip["alpha"] = 1.0
strip["NET"] = 1.0
strip["color"] = "r"
detector_data[i] = strip
focalplane = Focalplane(
detector_data=detector_data, sample_rate=args.sample_rate
)
else:
focalplane = Focalplane(
fname_pickle=args.focalplane, sample_rate=args.sample_rate
)
if comm.comm_world is not None:
focalplane = comm.comm_world.bcast(focalplane, root=0)
if args.debug:
if comm.comm_world is None or comm.comm_world.rank == 0:
outfile = "{}/focalplane.png".format(args.outdir)
focalplane._plot_fp(12, 12, outfile)
#schedule.telescope.focalplane = focalplane
#detweights = focalplane.detweights
return focalplane
# -
# # Simulation parameters
class args:
# Required arguments
ces_name = "Test-scan"
ces_start_time = "2022,7,1,0,0,0"
ces_stop_time = "2022,7,1,3,0,0"
sample_rate = 20
el_mode_rate = 0.001
el_mod_amplitude = 1.0
el_mod_sine = True
el_mod_step = 0
# Scan Parameters
ces_azmin = 1
ces_azmax = 359
ces_el = 70
scan = "spin"
subscan = ""
scanrate = 6.0
scan_accel = 0.0
# Load the focalplane "The yaml file"
focalplane = None
# Site parameters
site_name = "Tenerife"
site_lon = "-16:31:00"
site_lat = "28:20:00"
site_alt = 2390.0
coord = "C"
# Map parameters
CES_star = None
NSIDE = 128
debug = True
outdir = "out_directory"
# Atmospheric parameters
start_mc = 0
nsimu = 1
cache_name = "atm_"
atm_cache = "atm_cache_"
verbose = 0
freq = 43.0
weather_file = "weather_STRIP.fits"
# # Parallel logger (gather all the messages from different processes)
# +
# definition of the logger, the global timer and the environment
log = Logger.get()
gt = GlobalTimers.get()
env = Environment.get()
gt.start("Atmospheric simulation (globbal timer)")
timer0 = Timer()
timer0.start()
# -
# # Initialize the communicator
# +
# Get the communicator
mpiworld, procs, rank = get_world()
if rank == 0:
print(env)
if mpiworld is None:
log.info("Running serially with one process at {}".format(str(datetime.now())))
else:
if rank == 0:
log.info(
"Running with {} processes at {}".format(procs, str(datetime.now()))
)
comm = Comm(world=mpiworld)
# -
# # Load the focalplane
# +
args.outdir = args.outdir+args.ces_start_time
if comm.world_rank == 0:
print("Creating the outdir: {}".format(args.outdir))
os.makedirs(args.outdir, exist_ok=True)
fp = load_focalplane(args, comm)
# -
# # This thread creates the TOD structure
# +
# Create the TOD structure
data = Data(comm)
weather = args.weather_file
sta = str(args.ces_start_time).split(",")
sto = str(args.ces_stop_time).split(",")
start_time = datetime(int(sta[0]), int(sta[1]), int(sta[2]), int(sta[3]), int(sta[4]), int(sta[5])).timestamp()
stop_time = datetime(int(sto[0]), int(sto[1]), int(sto[2]), int(sto[3]), int(sto[4]), int(sto[5])).timestamp()
totsamples = int((stop_time - start_time) * args.sample_rate)
# create the TOD for this observation
if comm.comm_group is not None:
ndetrank = comm.comm_group.size
else:
ndetrank = 1
try:
tod = TODGround(
comm.comm_group,
fp.detquats,
totsamples,
# detranks=ndetrank,
firsttime=start_time,
rate=args.sample_rate,
site_lon=args.site_lon,
site_lat=args.site_lat,
site_alt=args.site_alt,
azmin=args.ces_azmin,
azmax=args.ces_azmax,
el=args.ces_el,
el_mod_step = args.el_mod_step,
el_mod_rate=args.el_mode_rate,
el_mod_amplitude=args.el_mod_amplitude,
el_mod_sine=args.el_mod_sine,
scanrate=args.scanrate,
scan_accel=args.scan_accel,
cosecant_modulation=True,
CES_start=None,
CES_stop=None,
sun_angle_min=None,
coord=args.coord,
sampsizes=None,
report_timing=None,
hwprpm=None,
hwpstep=None,
hwpsteptime=None,
)
except RuntimeError as e:
raise RuntimeError(
'Failed to create TOD for {}-{}-{}: "{}"'
"".format(args.ces_name, args.scan, args.subscan, e)
)
# -
# # Create the Observation dictionary
# +
# Create the observation, and append the tod
obs = {}
obs["name"] = "CES-{}-{}-{}-{}".format(
args.site_name, args.ces_name, args.scan, args.subscan
)
obs["tod"] = tod
obs["id"] = data.comm.group
obs["telescope_id"] = 1
obs["site"] = "Tenerife"
obs["site_name"] = args.site_name
obs["site_id"] = 123
obs["altitude"] = args.site_alt
obs["weather"] = Weather(weather, site=123)
obs["fpradius"] = 10.0
obs["start_time"] = start_time
obs["focalplane"] = fp
data.obs.append(obs)
# -
# # Pointing expansion
# +
# Expand the pointing, interpolating the quaternions
if comm.comm_world is not None:
comm.comm_world.barrier()
timer0.stop()
if comm.world_rank == 0:
timer0.report("Simulated scans")
if comm.world_rank == 0:
log.info("Expanding pointing")
pointing = OpPointingHpix(
nside=128,
nest=True,
mode="IQU",
single_precision=1e-7,
nside_submap=128,
)
pointing.exec(data)
if comm.comm_world is not None:
comm.comm_world.barrier()
if comm.world_rank == 0:
timer0.report_clear("Pointing generation")
poin={}
for i in obs['tod'].local_dets:
p = obs['tod'].cache.reference("pixels_{}".format(i))
poin[i]=p
np.savez_compressed(args.outdir+'/pointings', poin)
# -
# # Create the Atmospheric emission
# #### Atmospheric parameters
# +
# Atmospheric MC simulation
for mc in range(args.start_mc, args.start_mc + args.nsimu):
timer_MC_iter = Timer()
timer_MC_iter.start()
log = Logger.get()
tmr = Timer()
tmr.start()
if comm.world_rank == 0 and args.verbose:
log.info("Simulating atmosphere")
if args.atm_cache and not os.path.isdir(args.atm_cache):
try:
os.makedirs(args.atm_cache)
except FileExistsError:
pass
common_atm_params = {
"realization": mc,
"component": 123456,
"lmin_center": 0.01, # in m?
"lmin_sigma": 0.001,
"lmax_center": 100, # in m?
"lmax_sigma": 10,
"zatm": 40000.0,
"zmax": 2000.0,
"xstep": 100.0,
"ystep": 100.0,
"zstep": 100.0,
"nelem_sim_max": 10000,
"verbosity": 0,
"gain": 1,
"z0_center": 2000,
"z0_sigma": 0,
"apply_flags": False,
"common_flag_name": None,
"common_flag_mask": 255,
"flag_name": None,
"flag_mask": 255,
"report_timing": True,
"wind_dist": 100,
"flush": False,
}
# Simulate the atmosphere signal
atm = OpSimAtmosphere(out="atm", cachedir=args.atm_cache, freq=args.freq, **common_atm_params)
atm.exec(data)
if comm.comm_world is not None:
comm.comm_world.barrier()
tmr.stop()
if comm.world_rank == 0:
tmr.report("Atmosphere simulation")
if comm.world_rank == 0:
log.info(
"Processing frequency {}GHz, MC = {}".format(args.freq, mc))
# Set up the output directory
mcoffset = args.freq * 1000000
outpath = setup_output(args.outdir, comm, mc + mcoffset, args.freq)
cache_name = "atm"
log = Logger.get()
if comm.world_rank == 0 and args.verbose:
log.info("Scaling atmosphere by frequency")
timer = Timer()
timer.start()
for obs in data.obs: # Now we have only one observation
tod = obs["tod"]
todcomm = tod.mpicomm
weather = obs["weather"]
focalplane = obs["focalplane"]
start_time = obs["start_time"]
weather.set(123, mc, start_time)
altitude = obs["altitude"]
air_temperature = weather.air_temperature
surface_pressure = weather.surface_pressure
pwv = weather.pwv
# Use the entire processing group to sample the absorption
# coefficient as a function of frequency
freqmin = 0
freqmax = 2 * args.freq
nfreq = 1001
freqstep = (freqmax - freqmin) / (nfreq - 1)
if todcomm is None:
nfreq_task = nfreq
my_ifreq_min = 0
my_ifreq_max = nfreq
else:
nfreq_task = int(nfreq // todcomm.size) + 1
my_ifreq_min = nfreq_task * todcomm.rank
my_ifreq_max = min(nfreq, nfreq_task * (todcomm.rank + 1))
my_nfreq = my_ifreq_max - my_ifreq_min
my_freqs = freqmin + np.arange(my_ifreq_min, my_ifreq_max) * freqstep
my_absorption = atm_absorption_coefficient_vec(
altitude,
air_temperature,
surface_pressure,
pwv,
my_freqs[0],
my_freqs[-1],
my_nfreq,
)
if todcomm is None:
freqs = my_freqs
absorption = my_absorption
else:
freqs = np.hstack(todcomm.allgather(my_freqs))
absorption = np.hstack(todcomm.allgather(my_absorption))
for det in tod.local_dets:
try:
# Use detector bandpass from the focalplane
center = focalplane[det]["bandcenter_ghz"]
width = focalplane[det]["bandwidth_ghz"]
except Exception:
# Use default values for the entire focalplane
center = args.freq
width = 0.2 * args.freq
nstep = 101
# Interpolate the absorption coefficient to do a top hat
# integral across the bandpass
det_freqs = np.linspace(center - width / 2, center + width / 2, nstep)
absorption_det = np.mean(np.interp(det_freqs, freqs, absorption))
cachename = "{}_{}".format(cache_name, det)
# print("{}_{}".format(cache_name, det))
ref = tod.cache.reference(cachename)
ref *= absorption_det
del ref
if comm.comm_world is not None:
comm.comm_world.barrier()
timer0.stop()
if comm.world_rank == 0 and args.verbose:
timer0.report("Atmosphere scaling")
log = Logger.get()
if comm.world_rank == 0 and args.verbose:
log.info("Updating atmospheric noise weights")
timer = Timer()
timer.start()
site_id = obs["site_id"]
weather = obs["weather"]
start_time = obs["start_time"]
weather.set(site_id, mc, start_time)
altitude = obs["altitude"]
absorption = atm_absorption_coefficient(
altitude,
weather.air_temperature,
weather.surface_pressure,
weather.pwv,
args.freq,
)
obs["noise_scale"] = absorption * weather.air_temperature
if comm.comm_world is not None:
comm.comm_world.barrier()
timer.stop()
if comm.world_rank == 0 and args.verbose:
timer.report("Atmosphere weighting")
# Questa iterazione montecarlo puo` essere salvata in outhpath, no?
tods = {}
for i in obs['tod'].local_dets:
t = obs['tod'].cache.reference("atm_{}".format(i))
tods[i]=np.float32(t)
np.savez_compressed(outpath+'/tod_mc_'+str(mc), tods)
timer_MC_iter.stop()
timer_MC_iter.report("Monte Carlo iteration completed in ")
gt.stop_all()
# -
if mpiworld is not None:
mpiworld.barrier()
timer = Timer()
timer.start()
alltimers = gather_timers(comm=mpiworld)
if comm.world_rank == 0:
out = os.path.join(args.outdir, "timing")
dump_timing(alltimers, out)
timer.stop()
timer.report("Gather and dump timing info")
timer0.report_clear("Test simulation")
# # Analysis and property of the `obs` dictionary
#
# The complete set of informations about this observation was put into the `obs` dictionary. This structure collects all the information about the pointing and the signal acquired by each single detector
import pylab as plt
# %matplotlib inline
atm_stream = data.obs[0]['tod'].cache.reference('atm_I0')
theta, phi, pa = qa.to_angles(tod.read_pntg(detector="I0"))
num = 10000
plt.figure(figsize=(7, 5))
#plt.plot(np.degrees(phi[:num]), tod.cache.reference("atm_I0")[:num]*10e-3, ".")
plt.figure(2)
plt.plot(np.degrees(pa[:num-1]), '.')
plt.xlabel("$P.A. [deg]$")
plt.ylabel("$Signal [ mK_{RJ} ]$");
# ## Beam convolution
# Thi convolution assumes perfectly gaussian beams. I assume the FWHM of the beam represents the $\sigma$ of the gauss function
# +
res_Q = 0.6 # deg
speed = args.scanrate*np.cos(np.deg2rad(args.ces_el)) # deg/sec
f_sam = args.sample_rate # Hz
N_sample = int((res_Q / speed) * f_sam)
N_sample
# -
def moving_average(x, w):
return np.convolve(x, np.ones(w), 'valid') / w
# # Create the binned map
from numba import njit
@njit
def make_a_map(output_map, signals):
hits = np.zeros(len(output_map), dtype=np.int64)
for pix, signal in signals:
for p,s in zip(pix, signal):
output_map[p] += s
hits[p] += 1
output_map[hits != 0] /= hits[hits != 0]
return hits
# +
#from numba.typed import List
signals = list()
for obs in data.obs:
for ch in data.obs[0]['tod'].detectors:
t = moving_average(obs["tod"].cache.reference("atm_%s" % ch), N_sample)
p = obs["tod"].cache.reference("pixels_%s" % ch)
signals.append((p, t))
# +
npix = 12*args.NSIDE**2
output_map = np.zeros(npix, dtype=np.double)
h = make_a_map(output_map, signals)
# -
hp.mollview(h/np.amax(h), title="hitmap", nest=True, cmap='coolwarm')
hp.graticule()
hp.mollview(output_map, nest=True, cmap="coolwarm", min=-300, max=300)
hp.graticule()
# +
# Logica template
# convert to local
# -> pix
# hp.pix2ang -> alpha,delta
# ora + sito + (alpha,delta) -> Az, El
# I(Az,El) = hpMap(pix)
# -
hp.gnomview(output_map, rot=(75,30), xsize=700, ysize=700, cmap="coolwarm", min=-200, max=200, nest=True)
hp.graticule()
# +
O2 = moving_average(data.obs[0]['tod'].cache.reference('atm_O2')*10E-6, N_sample)
B4 = moving_average(data.obs[0]['tod'].cache.reference('atm_B4')*10E-6, N_sample)
R3 = moving_average(data.obs[0]['tod'].cache.reference('atm_R3')*10E-6, N_sample)
R0 = moving_average(data.obs[0]['tod'].cache.reference('atm_R0')*10E-6, N_sample)
R6 = moving_average(data.obs[0]['tod'].cache.reference('atm_R6')*10E-6, N_sample)
I0 = moving_average(data.obs[0]['tod'].cache.reference('atm_I0')*10E-6, N_sample)
I2 = moving_average(data.obs[0]['tod'].cache.reference('atm_I2')*10E-6, N_sample)
I1 = moving_average(data.obs[0]['tod'].cache.reference('atm_I1')*10E-6, N_sample)
Y3 = moving_average(data.obs[0]['tod'].cache.reference('atm_Y3')*10E-6, N_sample)
Y0 = moving_average(data.obs[0]['tod'].cache.reference('atm_Y0')*10E-6, N_sample)
Y6 = moving_average(data.obs[0]['tod'].cache.reference('atm_Y6')*10E-6, N_sample)
# +
time = np.linspace(0, len(R3), len(R3))/20.0 # sec.
plt.figure(figsize=(10, 7))
plt.plot(time, R3, '-')
plt.plot(time, Y6, '-')
plt.xlabel("Time [sec.]")
plt.ylabel("Signal $K_{RJ}$")
# -
plt.figure(figsize=(10, 7))
plt.plot([0, 1, 2, 3, 4, 5, 6, 7], [np.median(R3), np.median(R0), np.median(R6), np.median(I2), np.median(I1), np.median(Y3), np.median(Y0), np.median(Y6)], '.')
from scipy.signal import periodogram
from scipy.signal import welch
from scipy.signal import correlate
import scipy.signal as signal
# +
f1, p1 = welch(R3, fs=20.0, window='hamming', nperseg=300, noverlap=10, scaling='density')
f2, p2 = welch(Y6, fs=20.0, window=signal.get_window(('kaiser', 4.0), 300), nperseg=300, noverlap=20, scaling='density')
plt.figure(figsize=(10,7))
plt.loglog(f1, p1, '.-')
plt.loglog(f2, p2, '.-')
plt.ylim(1E-11, 1E-1)
# -
import statsmodels.api as sm
# +
cx = sm.tsa.stattools.ccf(O2, B4, adjusted=False)
cx2 = sm.tsa.stattools.ccf(R3, Y6, adjusted=False)
auto_cx = sm.tsa.stattools.ccf(I0, I0, adjusted=False)
# +
plt.figure(figsize=(10, 7))
plt.plot(np.linspace(0, len(cx), len(cx)-5000+1)/(20.0), moving_average(cx, 5000), '.')
plt.plot(np.linspace(0, len(cx2), len(cx2)-5000+1)/(20.0), moving_average(cx2, 5000), '.')
plt.xlabel("Time [sec.]")
plt.ylabel("Cross corr [normalized]")
plt.plot(np.linspace(0, len(auto_cx), len(auto_cx)-5000+1)/(20.0), moving_average(auto_cx, 5000), '.')
# -
# ### Another way to evaluate the CX coef.
conv = np.correlate(R3, Y6, 'same')
plt.semilogx(-conv, '.')
plt.plot(cx2, '.')
import scipy.signal as signal
cxx = correlate(R3, Y6, method='fft')
lags = signal.correlation_lags(len(R3), len(Y6))/20.0
# +
plt.figure(figsize=(15, 15))
plt.semilogx(lags, cxx/np.amax(cxx), alpha=0.5)
#plt.plot(np.linspace(0, len(cx2), len(cx2))/(20.0), cx2, alpha=0.4)
plt.xlim((0, lags[-1]))
# -
cf, cp = welch(cx, fs=20, window="hamming", nperseg=50000, noverlap=15590)
cf2, cp2 = welch(cx2, fs=20, window="hamming", nperseg=50000, noverlap=15590)
cf_a, cp_a = welch(auto_cx, fs=20, window="hamming", nperseg=50000, noverlap=15590)
# +
plt.figure(figsize=(10, 7))
plt.semilogy(cf, cp)
plt.semilogy(cf2, cp2)
plt.semilogy(cf_a, cp_a)
plt.xlim(0,0.3)
plt.ylabel("$\Re[\mathcal{F}(C(t))]$")
plt.xlabel("Fequency [Hz]")
#plt.xlim(0, 0.15)
#plt.ylim(1E-5, 1)
plt.plot([1/60, 2/60, 3/60, 4/60, 5/60, 6/60], [1e-3, 1e-3, 1e-3, 1e-3, 1e-3, 1e-3], 'o')
# -
az = obs['tod'].cache.reference('az')
alt = obs['tod'].cache.reference('el')
plt.plot(np.rad2deg(alt))
| cookbook/Simulation_STRIP.ipynb |
// -*- coding: utf-8 -*-
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .scala
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: Scala
// language: scala
// name: scala
// ---
// # Provingground - HoTT
//
// These notes concern the object _HoTT_, which has the core implementation of homotopy type theory. Implementation details are (rather, will be) in the [scaladocs](http://siddhartha-gadgil.github.io/ProvingGround/).
//
// The major components of homotopy type theory implemented in the object HoTT are
//
// * Terms, types and Universes.
// * Function and dependent function types.
// * λs.
// * Pairs and Dependent pairs.
// * Disjoint union types.
// * Types 0 and 1 and an object in the latter.
// * Identity types
//
// Inductive types, induction and recursion are in different objects as they are rather subtle. The other major way (also not in the _HoTT_ object) of constructing non-composite types is to wrap scala types, possibly including symbolic algebra.
//
// The _core_ project contains code that is agnostic to how it is run. In particular this also compiles to scala-js.
import $ivy.`io.github.siddhartha-gadgil::provingground-core-jvm:0.1.0`
// ### Universes, Symbolic types
//
// We have a family of universes, but mostly use the first one denoted by Type. Given a type, we can construct symbolic objects of that type. We construct such a type _A_.
import provingground._
repl.pprinter.bind(translation.FansiShow.fansiPrint)
import HoTT._
val A ="A" :: Type
A == Type.::("A")
// We consider a symbolic object of the type _A_
val a ="a" :: A
// ## Function types, lambdas, Identity
//
// Given types A and B, we have the function type A → B. An element of this is a function from A to B.
//
// We can construct functions using λ's. Here, for the type _A_, we construct the identity on _A_ using a lambda. We can then view this as a dependent function of _A_, giving the identity function.
//
// In this definition, two λ's are used, with the method _lmbda_ telling the TypecompilerType that the result is a (non-dependent) function.
val id = lambda(A)(lmbda(a)(a))
// The type of the identity function is a mixture of Pi-types and function types. Which of these to use is determined by checking dependence of the type of the value on the varaible in a λ-definition.
id.typ
lmbda(a)(a).typ
lmbda(a)(a).typ.dependsOn(A)
// The lambdas have the same effect at runtime. It is checked if the type of the value depends on the variable.
// The result is either _LambdaFixed_ or _Lambda_ accordingly.
val indep = lmbda(a)(a)
val dep = lambda(a)(a)
indep == dep
// ### Hygiene for λs
//
// A new variable object, which has the same toString, is created in making lambdas. This is to avoid name clashes.
val l = dep.asInstanceOf[LambdaFixed[Term, Term]]
l.variable
l.variable == a
// ## Modus Ponens
//
// We construct Modus Ponens, as an object in Homotopy Type theory. Note that A ->: B is the function type A → B.
// +
val B = "B" :: Type
val f = "f" :: (A ->: B)
val mp = lambda(A)(lambda(B)(lmbda(a)(lmbda(f)(f(a)))))
// -
// The type of Modus Ponens is again a mixture of Pi-types and function types.
mp.typ
// We can apply modus ponens with the roles of _A_ and _B_ reversed. This still works because variable clashes are avoided.
val mpBA = mp(B)(A)
mpBA.typ == B ->: (B ->: A) ->: A
// ### Equality of λs
//
// Lambdas do not depend on the name of the variable.
val aa = "aa" :: A
lmbda(aa)(aa) == lmbda(a)(a)
(lmbda(aa)(aa))(a) == a
// ## Dependent types
//
// Given a type family, we can construct the corresponding Pi-types and Sigma-types. We start with a formal type family, which is just a symbolic object of the appropriate type.
val Bs = "B(_ : A)" :: (A ->: Type)
// ### Pi-Types
//
// In addition to the case class constructor, there is an agda/shapeless-like convenience method for constructing Pi-types. Namely, given a type expression that depends on a varaible _a : A_, we can construct the Pi-type correspoding to the obtained λ-expression.
//
// Note that the !: method just claims and checks a type, and is useful (e.g. here) for documentation.
val fmly = (a !: A) ~>: (Bs(a) ->: A)
// ### Sigma-types
//
// There is also a convenience method for defining Sigma types using λs.
Sgma(a !: A, Bs(a))
Sgma(a !: A, Bs(a) ->: Bs(a) ->: A)
// ## Pair types
//
// Like functions and dependent functions, pairs and dependent pairs can be handled together. The _mkPair_ function assignes the right type after checking dependence, choosing between pair types, pairs and dependent pairs.
val ba = "b(a)" :: Bs(a)
val b = "b" :: B
mkPair(A, B)
mkPair(a, b)
mkPair(a, b).typ
mkPair(a, ba).typ
mkPair(A, B).asInstanceOf[ProdTyp[Term, Term]]
// ## Plus types
//
// We can also construct the plus type _A plus B_, which comes with two inclusion functions.
val AplusB = PlusTyp(A, B)
AplusB.incl1(a)
AplusB.incl2
// In the above, a λ was used, with a variable automatically generated. These have names starting with $ to avoid collision with user defined ones.
// ## Identity type
//
// We have an identity type associated to a type _A_, with reflexivity giving terms of this type.
val eqAa = IdentityTyp(A, a, a)
val ref = Refl(A, a)
ref.typ == eqAa
// ## The Unit and the Nought
//
// Finally, we have the types corresponding to _True_ and _False_
Unit
Zero
Star !: Unit
| notes/HoTT.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# ## Exercise 1
#
# Your task is to write a Python script to do the following:
# 1. Read data from the file `prog_data.txt` into a list.
# + Note: Data in this file comes from the course survey. The numbers represent the level of programming confidence that students had coming into the course on a scale of 1 to 5.
# 2. Using the `enumerate` build-in type, perform a `for` loop to convert elements in the list from ints to floats.
# + Remember that data will be read in as a string type. You will do calculations which require floats. To convert from a string to a float just do float(string). However, note that this only works sometimes: [How to convert data types in Python 3](https://www.digitalocean.com/community/tutorials/how-to-convert-data-types-in-python-3). It will work in our current use case.
# 3. Using the `Counter` method from the `collections` library, determine what the most common number in your list is.
# + Hint: You will need to access the first element of the list returned by the `Counter` method.
# 4. Print out the following for the user:
# + Total number of respondents (suggestions: use the `len` function)
# + Maximum number in the list (suggestion: use the `max` function)
# + Minimum number in the list (suggestion: use the `min` function)
# + Median of list (suggestion: use `numpy` --- look up median in `numpy`)
# + Mean of list (suggestion: use `numpy` --- look up mean in `numpy`)
# + Most common number in list with number of times it occurs.
#
# Note: Please don't write a function to do this. A script is sufficient for this exercise. We'll get to functions soon.
#
# Your output should look something like:
# > The total number of respondants is 45.
#
# > The maximum programming confidence is 5.0.
#
# > The minimum programming confidence is 1.0.
#
# > The median programming level is 4.0.
#
# > The mean programming level is 3.4.
#
# > The most common programming level is 4.0 with 17 respondants.
# Q1
f = open('prog_data.txt', 'r')
levels = [int(d) for d in f.read().strip().split()]
f.close()
# Q2
# levels = [float(d) for d in levels]
levels = [float(d) for i, d in enumerate(levels)]
# Q3
from collections import Counter
count = Counter(levels)
count_sorted = sorted(count.items(), key=lambda x:x[1], reverse=True)
print(count_sorted[0][0])
import numpy as np
# Q4
print('The total number of respondants is %d.' %len(levels))
print('The maximum programming confidence is %.1f.' %max(levels))
print('The minimum programming confidence is %.1f.' %min(levels))
print('The median programming level is %.1f.' %np.median(levels))
print('The mean programming level is %.1f.' %np.mean(levels))
print('The most common programming level is %.1f with %d respondants.' \
%(count_sorted[0][0], count_sorted[0][1]))
| lectures/L4/Exercise_1-final.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/kumiori/mec647/blob/main/mec647_BCs_3.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="bhqKfbydhqlU"
# %%capture
import sys
try:
import google.colab # noqa: F401
except ImportError:
import ufl # noqa: F401
import dolfinx # noqa: F401
else:
try:
import ufl
import dolfinx
except ImportError:
# !wget "https://fem-on-colab.github.io/releases/fenicsx-install.sh" -O "/tmp/fenicsx-install.sh" && bash "/tmp/fenicsx-install.sh";
import ufl # noqa: F401
import dolfinx # noqa: F401
# + id="giEEacK0h89B"
# %%capture
# !sudo apt install libgl1-mesa-glx xvfb;
# !{sys.executable} -m pip install pythreejs;
# !{sys.executable} -m pip install ipygany;
# !{sys.executable} -m pip install --upgrade pyyaml
try:
import google.colab
except ImportError:
pass
else:
pass
# google.colab.output.enable_custom_widget_manager();
try:
import pyvista
except ImportError:
# !pip3 install --upgrade pyvista itkwidgets;
import pyvista # noqa: F401
from pyvista.utilities import xvfb
try:
import gmsh
except ImportError:
# !{sys.executable} -m pip install gmsh
import gmsh
# + colab={"base_uri": "https://localhost:8080/"} id="tIu0L3Ixh9bn" outputId="ec2ad8f8-f946-4c50-f090-634c5b052e66"
# !rm -rf mec647
try:
# !git clone https://github.com/kumiori/mec647.git
except Exception:
print('Something went wrong')
# !rm -rf mec647
# !git clone https://github.com/kumiori/mec647.git
# + id="OFxOqEgOiIO-"
sys.path.append('mec647/')
# + id="7HOLzr6EimhX"
# meshes
import meshes
from meshes import primitives
# visualisation
from utils import viz
import matplotlib.pyplot as plt
from utils.viz import plot_mesh
# + id="lytYYgfoipnb"
# Parameters
parameters = {
'loading': {
'min': 0,
'max': 1
},
'geometry': {
'geom_type': 'bar',
'Lx': 1.,
'Ly': 0.1
}
}
# parameters.get('loading')
# + [markdown] id="cGGdnId4i3es"
# ## Mesh 1
# + id="eGewK6iSiyIj"
# + [markdown] id="QEf3rGuUi_CJ"
# ## Mesh 2
# + id="IX2nXOYrjABv"
# + [markdown] id="s_7F55yUjBmH"
# ## Mesh 3
# + id="z66da9jRjCOD"
| mec647_BCs_3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# This notebook was prepared by [<NAME>](https://github.com/donnemartin). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges).
# # Solution Notebook
# ## Problem: Search a sorted matrix for an item.
#
# * [Constraints](#Constraints)
# * [Test Cases](#Test-Cases)
# * [Algorithm](#Algorithm)
# * [Code](#Code)
# * [Unit Test](#Unit-Test)
# ## Constraints
#
# * Are items in each row sorted?
# * Yes
# * Are items in each column sorted?
# * Yes
# * Is the sorting in ascending or descending order?
# * Ascending
# * Is the matrix a rectangle? Not jagged?
# * Yes
# * Is the matrix square?
# * Not necessarily
# * Is the output a tuple (row, col)?
# * Yes
# * Is the item you are searching for always in the matrix?
# * No
# * Can we assume the inputs are valid?
# * No
# * Can we assume this fits memory?
# * Yes
# ## Test Cases
#
# * None -> Exception
# * General case
# * Item found -> (row, col)
# * Item not found -> None
# ## Algorithm
#
# <pre>
#
# Find 60 (val = 60)
#
# 20 40 63 80
# 30 50 80 90
# 40 60 100 110
# 50 65 105 150
#
# * If the start of a col > val, look left
# * If the end of a col < val, look right
# * If the start of row > val, look up
# * If the end of a row < val, look down
#
# If we start at the upper right corner, we just need to use these cases:
#
# * If the start of a col > val, look left
# * If the end of a row < val, look down
#
# </pre>
#
# Complexity:
# * Time: O(n + m), where n and m are the matrix dimensions
# * Space: O(1)
# ## Code
class SortedMatrix(object):
def find_val(self, matrix, val):
if matrix is None or val is None:
raise TypeError('matrix and val cannot be None')
row = 0
col = len(matrix[0]) - 1
while row < len(matrix) and col >= 0:
if matrix[row][col] == val:
return (row, col)
elif matrix[row][col] > val:
col -= 1
else:
row += 1
return None
# ## Unit Test
# +
# %%writefile test_search_sorted_matrix.py
import unittest
class TestSortedMatrix(unittest.TestCase):
def test_find_val(self):
matrix = [[20, 40, 63, 80],
[30, 50, 80, 90],
[40, 60, 110, 110],
[50, 65, 105, 150]]
sorted_matrix = SortedMatrix()
self.assertRaises(TypeError, sorted_matrix.find_val, None, None)
self.assertEqual(sorted_matrix.find_val(matrix, 1000), None)
self.assertEqual(sorted_matrix.find_val(matrix, 60), (2, 1))
print('Success: test_find_val')
def main():
test = TestSortedMatrix()
test.test_find_val()
if __name__ == '__main__':
main()
# -
# %run -i test_search_sorted_matrix.py
| sorting_searching/search_sorted_matrix/search_sorted_matrix_solution.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Import Packages
# +
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import matplotlib.gridspec as gridspec
import numpy as np
import glob
import cv2
import os
# %matplotlib inline
# -
# # Lane Detection Pipeline
#
# This section performs the following operations to detect (and draw) lanes in a frame:
#
# 1. Camera calibration
# 2. Distortion correction
# 3. Color/gradient threshold
# 4. Perspective transform
# 5. Histogram peak extraction
# 6. Sliding window lane search
# 7. Searching from prior lane bounds (look-ahead filter)
# 8. Inverse perspective transform
#
# ## Basic Test Pipeline
#
# A basic pipeline will be developed to test on a set of test images. This provides core functionality to the entire pipeline, and includes the following:
#
# ### Image Pre-Processing
# 1. Camera calibration
# 2. Distortion correction
# 3. Color/gradient threshold
# 4. Perspective transform
#
# ### Lane Detection
# 1. Histogram peak extraction
# 2. Sliding window lane search
#
# ### Visualization
# 1. Inverse perspective warp
# # Helper Functions
def calibrate(images=[], nx=5, ny=5, vis=False, verbose=False):
'''
Calculates the camera calibration matrix and distortion coefficients using a sample of chessboard images. Grid size must be specified as the number of non-edge corners on the chessboard image.
'''
objpoints = [] # 3D points in real world space
imgpoints = [] # 2D points in the image plane
# nx Number of grids in x-axis
# ny Number of grids in y-axis
objp = np.zeros((nx*ny, 3), np.float32)
objp[:,:2] = np.mgrid[0:nx,0:ny].T.reshape(-1,2)
valid_images = 0
total_images = len(images)
for image in images:
img = mpimg.imread(image)
# Creating a grayscale image for the corner detection
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
#TODO: Add 8-bit image check, which is necessary if a .png is imported instead of a .jpg
# Conversion is (gray*255).astype(np.uint8)
# Finding chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (nx,ny), None)
# If corners are found, add the object and image points
if ret == True:
imgpoints.append(corners)
objpoints.append(objp)
valid_images += 1
# Visualize the corners
if vis == True:
img = cv2.drawChessboardCorners(img, (nx,ny), corners, ret)
plt.imshow(img)
plt.show()
if verbose == True:
print('{} of the {} calibration images were valid'.format(valid_images, total_images))
# Calibrate the camera using the object and image points
ret, mtx, dist, rvec, tvec = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
return ret, mtx, dist, rvec, tvec
# ## 1. Camera Calibration
images = glob.glob('camera_cal/calibration*.jpg')
# Calibrate the camera
ret, mtx, dist, rvec, tvec = calibrate(images, nx=9, ny=6, verbose=True)
# ## 2. Distortion Correction
# +
# Testing the calibration
test_img = mpimg.imread('camera_cal/calibration2.jpg')
test_gray = cv2.cvtColor(test_img, cv2.COLOR_RGB2GRAY)
ret, corners = cv2.findChessboardCorners(test_gray, (9,6), None)
if ret == True:
test_img = cv2.drawChessboardCorners(test_img, (9,6), corners, ret)
plt.imshow(test_img)
plt.title('Original (Distorted) Image')
plt.show()
dst = cv2.undistort(test_img, mtx, dist, None, mtx)
plt.imshow(dst)
plt.title('Undistorted Image')
plt.show()
# -
# ## 3. Color and Gradient Threshold
# ### Sobel Helper Functions
# +
def abs_sobel_thresh(image, orient='x', sobel_kernel=3, thresh=(0, 255)):
'''
Returns a binary image based on the threshold of direction of a Sobel operation.
'''
# Calculate directional gradient
# Apply threshold
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
sobel = None
if orient == 'x':
sobel = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
elif orient == 'y':
sobel = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# Creating a thresholded sobel
abs_sobel = np.absolute(sobel)
scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))
# Creating a binary mask
grad_binary = np.zeros_like(scaled_sobel)
grad_binary[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1
return grad_binary
def mag_thresh(image, sobel_kernel=3, mag_thresh=(0, 255)):
'''
Returns a binary image based on the magnitude of a sobel operation.
'''
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# Magnitude of the Sobel
abs_sobel = np.sqrt(np.add(np.power(sobelx, 2), np.power(sobely, 2)))
# Creating a thresholded sobel
scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))
# Creating binary mask
mag_binary = np.zeros_like(scaled_sobel)
mag_binary[(scaled_sobel >= mag_thresh[0]) & (scaled_sobel <= mag_thresh[1])] = 1
return mag_binary
def dir_threshold(image, sobel_kernel=3, thresh=(0, np.pi/2)):
'''
Returns a binary image based on the threshold of the gradient of a Sobel operation.
'''
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
# Computing Sobel
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
abs_sobelx = np.absolute(sobelx)
abs_sobely = np.absolute(sobely)
# Computing sobel gradients
grad = np.arctan2(abs_sobely, abs_sobelx)
# Creating binary mask based on thresholds
dir_binary = np.zeros_like(grad)
dir_binary[(grad >= thresh[0]) & (grad <= thresh[1])] = 1
return dir_binary
# +
test_img = mpimg.imread('test_images/straight_lines1.jpg')
# Testing Sobel Operations
sobel_binx = abs_sobel_thresh(test_img, orient='x', thresh=(20,100))
sobel_biny = abs_sobel_thresh(test_img, orient='y', thresh=(20,100))
sobel_mag = mag_thresh(test_img, mag_thresh=(20,100))
sobel_dir = dir_threshold(test_img, thresh=(0.53, 1.13))
# Plotting Sobel Operation
gs = gridspec.GridSpec(3, 4)
gs.update(wspace=0.2, hspace=0.2)
plt.figure(figsize=(10,10))
ax0 = plt.subplot(gs[0, :2], )
ax1 = plt.subplot(gs[0, 2:])
ax2 = plt.subplot(gs[1, :2], )
ax3 = plt.subplot(gs[1, 2:])
ax4 = plt.subplot(gs[2, 1:3])
ax0.set_title('Sobel X')
ax1.set_title('Sobel Y')
ax2.set_title('Magnitude Sobel')
ax3.set_title('Direction (Gradient) Threshold')
ax4.set_title('Original Image')
ax0.imshow(sobel_binx, cmap='gray')
ax1.imshow(sobel_biny, cmap='gray')
ax2.imshow(sobel_mag, cmap='gray')
ax3.imshow(sobel_dir, cmap='gray')
ax4.imshow(test_img);
# -
# #### Sobel Note
# > The X-direction Sobel operation performs the best, as it captures the most detail in lane lines with the least amount deail captured for the surroundings.
# ### Color Mask Helper Functions
def color_mask(image, thresh=(0,255)):
'''
Returns a binary image with thresholds applied to the saturation channel of the image.
'''
hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
s_channel = hls[:,:,2]
color_binary = np.zeros_like(s_channel)
color_binary[(s_channel >= thresh[0]) & (s_channel <= thresh[1])] = 1
return color_binary
# +
# Plotting Sobel Operation
gs = gridspec.GridSpec(1, 4)
gs.update(wspace=0.2, hspace=0.2)
plt.figure(figsize=(10,10))
ax0 = plt.subplot(gs[0, :2], )
ax1 = plt.subplot(gs[0, 2:])
ax0.set_title('S-Channel Mask')
ax1.set_title('Original Image')
ax0.imshow(color_mask(test_img, thresh=(175,255)), cmap='gray')
ax1.imshow(test_img);
# +
col_mask = color_mask(test_img, thresh=(175,255))
grad_mask = abs_sobel_thresh(test_img, orient='x', thresh=(20,100))
colgrad_mask = np.zeros_like(test_img[:,:,0])
colgrad_mask[(col_mask == 1) | (grad_mask == 1)] = 1
# Plotting Color and Gradient Combined Mask
gs = gridspec.GridSpec(1, 4)
gs.update(wspace=0.2, hspace=0.2)
plt.figure(figsize=(10,10))
ax0 = plt.subplot(gs[0, :2], )
ax1 = plt.subplot(gs[0, 2:])
ax0.set_title('Combined Mask')
ax1.set_title('Original Image')
ax0.imshow(colgrad_mask, cmap='gray')
ax1.imshow(test_img);
# -
# ## 4. Perspective Transform
| .ipynb_checkpoints/P2-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.12 64-bit (''base'': conda)'
# name: python3
# ---
# # Analysis of HD106315b (Visit 2) using `pycheops`
#
# In the following notebook, we will analyse the data from `CHEOPS` visit 2 data of HD106315b using `pycheops`.
#
# The method is similar to that used in the analysis of KELT-11b data ([https://github.com/Jayshil/pycheops-tutorials/blob/main/KELT-11/p2_kelt11_extra_decorr_parm.ipynb](https://github.com/Jayshil/pycheops-tutorials/blob/main/KELT-11/p2_kelt11_extra_decorr_parm.ipynb)).
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
from dace.cheops import Cheops
import pycheops
from uncertainties import ufloat
from uncertainties.umath import sqrt as usqrt
import lmfit as lmf
import re
from pycheops.utils import phaser
# ## Downloading the data
# +
# Downloading data
dd = pycheops.Dataset('CH_PR100041_TG001401_V0200')
tim, fl, fle = dd.get_lightcurve(aperture='DEFAULT', decontaminate=True)#, reject_highpoints=True)
# To clip outliers (I would, in general, not prefer using this)
tim, fl, fle = dd.clip_outliers(verbose=True);
# Plotting the data
plt.figure(figsize=(16,9))
plt.errorbar(tim, fl, yerr=fle, fmt='.', c='orangered')
plt.xlabel('Time (BJD)')
plt.ylabel('Normalised Flux')
plt.title('Transit lightcurve for HD106315b (Aperture: OPTIMAL)')
plt.grid()
# -
# ### Planetary check
#
# We may want to check the position of various planetary bodies with respect to the target star, because if some of the bodies is very near to the target star then we may want to correct for their effects.
dd.planet_check()
# ## Stellar and Planetary properties
#
# A knowledge of the stellar and planetary property of the system under investigation can proved to be useful while analysing the system. We can use some of the properties as priors in our analysis. In `pycheops` there are some in-build tools available which ca go through the available stellar and planetary catalogues (like SWEET-Cat, or DACE) to give us the properties. Below, we show how to access such properties.
# For planetary properties
# This cell is copied from pycheops examples notebook
# The planet properties will not provide good results
# Hence, we will use ExoCTK to get planetary parameters.
"""
try:
hd106315b = pycheops.PlanetProperties('HD 106315b',query_dace=True)
except:
hd106315b = pycheops.PlanetProperties('HD 106315b',query_dace=False, query_tepcat=False,
depth = ufloat(298.909, 20.791), # PPM
width = ufloat(0.1572,0.0034), # days (that means that is transit duration -- in days)
ecosw=0.,#ufloat(-0.0040, 0.0490),
esinw=0.,#ufloat(0.0310, 0.0680),
T0 = ufloat(2457586.5487, 0.0029),
P = ufloat(9.55237, 0.00089))
print(hd106315b)
"""
# Using ExoCTK to get planetary properties
rprs = np.random.normal(0.01728, 0.0006, 10000)
dep, dep_err = np.mean((rprs**2)*1e6), np.std((rprs**2)*1e6)
# Well..., apparently, I cannot (even after studying two semesters at Geneva) access DACE-Planets. No problem --- we have TEPCat at our service. We derived parameters from there. We can now try searching for the host star.
# +
"""
star = pycheops.StarProperties(dd.target)
print(star)
"""
teff = np.random.normal(6450, 105, 10000)
logg = np.random.normal(4.28, 0.10, 10000)
mh = np.random.normal(-0.23, 0.14, 10000)
h1, h2 = np.zeros(10000), np.zeros(10000)
cdc = pycheops.ld.stagger_power2_interpolator('CHEOPS')
for i in range(len(teff)):
c1, c2, h1[i], h2[i] = cdc(teff[i], logg[i], mh[i])
h1 = h1[np.isfinite(h1)]
h2 = h2[np.isfinite(h2)]
# -
# We have it! Here `h_1` and `h_2` are power2 law limb darkening coefficients.
# ## Renormalising the lightcurve
#
# By a careful observation of the KELT-11 lightcurve, it can be observed that the out-of-transit values of the lightcurve is not equal to unity. So we may want to flatten the lightcurve using the `dataset.flatten` command as below,
# +
# This code is taken from the pycheops example notebook
P = 9.552105
BJD_0 = 2457586.5267
cycle = round((dd.bjd_ref-BJD_0)/P)
T_0 = BJD_0 - dd.bjd_ref + cycle*P
D = dep/1e6 # Depth stored in ppm
W = 0.15729166666666666/P # Width stored in days
try:
f_c = 0.#np.sqrt(0.093)*np.sin(67.0*np.pi/180)
f_s = 0.#np.sqrt(0.093)*np.cos(67.0*np.pi/180)
except:
# From Pepper et al., 2017
ecosw = 0.#ufloat(-0.004,0.05099)
esinw = 0.#ufloat(0.031,0.055)
ecc = usqrt(ecosw**2+esinw**2)
f_s = 0.#esinw/usqrt(ecc) # f_s = sqrt(e)sin(omega) = e.sin(omega)/sqrt(e)
f_c = 0.#ecosw/usqrt(ecc) # f_c = sqrt(e)cos(omega) = e.cos(omega)/sqrt(e)
tim, fl, fle = dd.flatten(T_0, P*W)
# Plotting the data
plt.figure(figsize=(16,9))
plt.errorbar(tim, fl, yerr=fle, fmt='.', c='orangered')
plt.axvline(T_0, c='k')
plt.axvline(x=T_0-(W*P/2), c='cornflowerblue', lw=3)
plt.axvline(x=T_0+(W*P/2), c='cornflowerblue', lw=3)
plt.xlabel('Time (BJD)')
plt.ylabel('Normalised Flux')
plt.title('Transit lightcurve for HD106315b (Aperture: OPTIMAL)')
plt.grid()
# -
# ## Detrending (or, decorrelating) the dataset
#
# To perform this operation of detrending we may want to look at diagnostic report for this observations. The diagnostic report consist of various plots of flux as a function of several instrumental properties like roll angle of the spacecraft, centroid position etc. This would enable us to see if there is any trend going on with instruments so that we can take care of it. So, first let's see the diagnostic report...
dd.diagnostic_plot()
# ### Ramp correction
# A ramp in the flux is often observed, usually beginning the visit, in the lightcurves due to thermal effects (well, it primarily occurs due to small scale change in the shape of the PSF, which happens due to slightly more defocusing of the telescope. The main reason for the later is the thermal effects). The effect is well characterised for aperture radius between ~22 to 40 pixels. We can correct for this effect using the function `dataset.correct_ramp`.
#
# Note that since this effect is well described only for a range of pixels, it is not advisable to make this correction if the aperture size is beyond this range.
tim, fl, fle = dd.correct_ramp(plot=True)
# ### Performing decorrelation
#
# We can see above that, `Dataset.should_I_decorr` function suggests that several decorrelations should be done. Various parameters shows different correlations with different properties of the instrument. From $\texttt{pycheops-cookbook}$ I mention where each of these parameters belong:
#
# - flux versus time: $\texttt{dfdt}$, $\texttt{d2fdt2}$
# - flux versus x-centroid: $\texttt{dfdx}$, $\texttt{d2fdx2}$
# - flux versus y-centroid: $\texttt{dfdy}$, $\texttt{d2fdy2}$
# - flux versus roll angle: $\texttt{dfdsinphi}$, $\texttt{dfdcosphi}$, $\texttt{dfdsin2phi}$, $\texttt{dfdcos2phi}$, $\texttt{dfdsin3phi}$, $\texttt{dfdcos3phi}$
# - flux versus background: $\texttt{dfdbg}$
# - flux versus contamination: $\texttt{dfdcontam}$
# - flux versus smear: $\texttt{dfdsmear}$
#
# Note that not each of the parameters have same units (especially $\texttt{dfdt}$), therefore, one may want to take care while defining these parameters in analysis.
#
# There are (I think) two ways to take care of these correlations --- we can either use `dataset.decorr` function to take care of these correlations, or we can do it in the later stage of fitting the data. There is a little advantage of doing this in a later stage of curve fitting: if we do at that point, we can see the effect of <i>adding</i> parameters to the fitting with Bayes' factors. We can even neglect additional parameters which are not supported by the data.
#
# Below, we try to include one by one parameter in the model and see its effect on the fitting in form of the Bayes' factor. However, before including decorrelation parameters, we fit <i>without</i> any decorrelation parameters. We can use results from this fitting as priors for the modelling which includes decorrelation parameters.
#
# ### Fitting without decorrelation
#
# #### A note on using priors
#
# The priors on the parameters can be find using `uncertainties` or `lmfit` module, or simply by using a tuple. To give normal priors with mean $\mu$ and standard deviation $\sigma^2$, one can use ufloat($\mu$, $\sigma^2$). A uniform prior with upper ($u$) and lower ($l$) bound can be given by a tuple ($l$, $u$), with initial value halfway of the range. If we want to specify the initial value ($in$), we can add it as a third term within the tuple, as ($l$, $in$, $u$). Furthermore, one can use [Parameter](https://lmfit.github.io/lmfit-py/parameters.html#the-parameter-class) object of `lmfit`. It can be defined as,
#
# ```
# lmfit.Parameter(name (str) = name of the parameter,
# value (float) = numerical parameter value,
# vary (bool) = Whether the parameter is varied during a fit (default is True,)
# min (float) = lower bound (default is -np.inf),
# max (float) = upper bound (default is np.inf),
# expr (str) = mathematical expression used to constrain the value during the fit,
# ***other kwargs)
# ```
# +
# Codes from the pycheops example notebook
lmfit0 = dd.lmfit_transit(T_0 = ufloat(T_0, 0.1), P=P,
D=(D/4, D, D*4), W=(W/4, W, W*4),b=(0,0.5,1),
f_c=f_c, f_s=f_s,
h_1=np.mean(h1), h_2=np.mean(h2),
logrhoprior=ufloat(-0.229,0.045))
dd.plot_lmfit(binwidth=0.02, figsize=(10,6), fontsize=14);
#plt.ylim([-0.001, 0.001])
# -
# There is it! Our first fit using `pycheops`. We can see the diagnostic report below.
print(dd.lmfit_report())
# ### Fitting with decorrelation
#
# We now attempt to fit the data with decorrelation parameters. As said previously, we add one by one decorrelation parameters in the fitting procedure and check the Bayes' factor each time.
#
# We first want to check the RMS of the residuals which we can use as standard deviation of decorrelation parameters' priors in the fitting.
sigma0 = lmfit0.rms
dprior = ufloat(0, sigma0)
tprior = ufloat(0, sigma0/np.ptp(tim)) # Remember? Some of the priors, like dfdt, has different units.
# +
# This code is taken from pycheops example notebook
detrend = {}
bestbf = 0
#"""
allpar = ['dfdsinphi','dfdcosphi',
'dfdsin2phi','dfdcos2phi',
'dfdsin3phi','dfdcos3phi',
'dfdx', 'dfdy', 'dfdsmear',
'dfdbg','dfdt', 'dfdcontam']
#"""
#allpar = ['dfdbg', 'dfdsmear', 'dfdt']#, 'dfdx', 'dfdy']
print('Parameter BF Delta_BIC RMS(ppm)')
while bestbf < 1:
bestbf = np.inf
for p in allpar: # This loop will put one by one parameter in the fit and see which one produce lowest Bayes' factor.
dtmp = detrend.copy() ## Copy Updated detrend dictionary to temporary dtmp dict
dtmp[p] = tprior if p == 'dfdt' else dprior
lmfit = dd.lmfit_transit(T_0 = ufloat(T_0, 0.1), P=P,
D=(D/4, D, D*4), W=(W/4, W, W*4),b=(0,0.5,1),
f_c=f_c, f_s=f_s,
h_1=np.mean(h1), h_2=np.mean(h2),
logrhoprior=ufloat(-0.229,0.045), **dtmp)
bre = re.compile(r'{}: *(\d+\.\d{{3}})\n'.format(p))
m = bre.findall(dd.lmfit_report())
if len(m) > 0:
bf = float(m[-1])
if bf < bestbf:
bestbf = bf
newpar = p
if bestbf < 1: # This condition will add lowest Bayes' factor parameter into detrend dict. if it has Bayes' factor <1
print(f'+{newpar:<12s} {bestbf:6.2f} {lmfit.bic-lmfit0.bic:8.1f} {1e6*lmfit.rms:8.1f}')
detrend[newpar] = tprior if newpar == 'dfdt' else dprior
allpar.remove(newpar) # If parameter is added to the dict then get removed from list of all parameters
# This process sometimes leads to a set of parameters that includes a few parameters that are strongly correlated with one another
# and so are therefore not well determined, i.e. they have large Bayes factors.
# So, after adding parameters one-by-one, go through a process of repeatedly removing the parameter with the largest Bayes factor
# if any of the parameters have a Bayes factors Bp>1.
worstbf = 10
while worstbf > 1:
worstbf = 0
for p in detrend: # This loop finds the parameter with largest Bayes' factor
bre = re.compile(r'{}: *(\d+\.\d{{3}})\n'.format(p))
m = bre.findall(dd.lmfit_report())
if len(m) > 0:
bf = float(m[-1])
if bf > worstbf:
worstbf = bf
delpar = p
if worstbf > 1: # This condition removes the parameter with largest Bayes' factor if it is >1
del detrend[delpar]
lmfit = dd.lmfit_transit(T_0 = ufloat(T_0, 0.1), P=P,
D=(D/4, D, D*4), W=(W/4, W, W*4),b=(0,0.5,1),
f_c=f_c, f_s=f_s,
h_1=np.mean(h1), h_2=np.mean(h2),
logrhoprior=ufloat(-0.229,0.045), **detrend)
print(f'-{delpar:<12s} {worstbf:6.2f} {lmfit.bic-lmfit0.bic:8.1f} {1e6*lmfit.rms:8.1f}')
# -
# The remained parameters which would be useful in decorrelating are (the ones which are stored in `detrend` dictionary). Note that the different between BIC is reducing with each addition/removal of the parameter.
print(detrend)
# We can now use this parameters to detrend the dataset using the `lmfit_transit` function.
# +
lmfit1 = dd.lmfit_transit(T_0 = ufloat(T_0, 0.1), P=P,
D=(D/4, D, D*4), W=(W/4, W, W*4), b=(0,0.5,1),
f_c=f_c, f_s=f_s,
h_1=np.mean(h1), h_2=np.mean(h2),
logrhoprior=ufloat(-0.229,0.045), **detrend)
# Note the usage of .n in star.h_2.n which means that it will retrieve only one value and thus fixing the parameter.
print(dd.lmfit_report())
# -
# We can plot the fitting
dd.plot_lmfit(binwidth=0.02, figsize=(10,6), fontsize=14, detrend=True);
#plt.ylim([-0.001, 0.001])
# In above plot, light blue points are flux/trend, while the dark blue ones are the binned flux/trend. Transit model is shown in green and (transit model/trend) is brown line.
#
# We can now check the roll angle plot to see if there is any trend remains. `dataset.rollangle_plot` gives the plot of residuals in the last fit with respect to the roll angle.
fig = dd.rollangle_plot()
#plt.ylim([-0.00075, 0.00075])
# #### Adding glint and moon-glint
#
# Apparently, when the observations were taken, moon was very near ($\sim 16^\circ$) to the target star. And the position of the moon changes significantlly during the observations. Therefore we may want to correct for this effect. Now, since we have already corrected for glint, we want to mask the transit and perform this moon gling only over out of transit points. To do so we create a mask. Then we will add moon glint for out of transit points.
glint_func = dd.add_glint(nspline=48,binwidth=5,figsize=(10,4),gapmax=5)
#plt.ylim([-0.001, 0.001])
dd.lmfit_transit(T_0 = ufloat(T_0, 0.1), P=P,
D=(D/4, D, D*4), W=(W/4, W, W*4),b=(0,0.5,1),
f_c=f_c, f_s=f_s,
h_1=np.mean(h1), h_2=np.mean(h2),
logrhoprior=ufloat(-0.229,0.045), **detrend,
glint_scale=(0,2))
print(dd.lmfit_report(min_correl=0.5))
dd.plot_lmfit(binwidth=0.02, figsize=(10,6), fontsize=14, detrend=True,
title='Including glint');
#plt.ylim([-0.001, 0.001])
fig = dd.rollangle_plot()
#plt.ylim([-0.001, 0.001])
""" Commenting out moon glint as the normal glint is, I think, working well!!
phase = phaser(tim,lmfit.params['P'],lmfit.params['T_0'],-0.5)
mask = abs(phase) < lmfit.params['W']/2
moon_glint = dd.add_glint(moon=True,nspline=48,binwidth=5,
fit_flux=True, mask=mask,
figsize=(10,5))
#plt.ylim([-0.001, 0.001])
"""
"""
dd.lmfit_transit(T_0 = ufloat(T_0, 0.1), P=P,
D=(D/4, D, D*4), W=(W/4, W, W*4),b=(0,0.5,1),
f_c=f_c, f_s=f_s,
h_1=star.h_1, h_2=star.h_2.n,
logrhoprior=star.logrho, **detrend,
glint_scale=(0,2))
print(dd.lmfit_report(min_correl=0.5))
"""
# +
worstbf = 10
while worstbf > 1:
worstbf = 0
for p in detrend: # This loop finds the parameter with largest Bayes' factor
bre = re.compile(r'{}: *(\d+\.\d{{3}})\n'.format(p))
m = bre.findall(dd.lmfit_report())
if len(m) > 0:
bf = float(m[-1])
if bf > worstbf:
worstbf = bf
delpar = p
if worstbf > 1: # This condition removes the parameter with largest Bayes' factor if it is >1
del detrend[delpar]
lmfit = dd.lmfit_transit(T_0 = ufloat(T_0, 0.1), P=P,
D=(D/4, D, D*4), W=(W/4, W, W*4),b=(0,0.5,1),
f_c=f_c, f_s=f_s,
h_1=np.mean(h1), h_2=np.mean(h2),
logrhoprior=ufloat(-0.229,0.045), **detrend)
print(f'-{delpar:<12s} {worstbf:6.2f} {lmfit.bic-lmfit0.bic:8.1f} {1e6*lmfit.rms:8.1f}')
## Fitting for the rest of the parameters
dd.lmfit_transit(T_0 = ufloat(T_0, 0.1), P=P,
D=(D/4, D, D*4), W=(W/4, W, W*4),b=(0,0.5,1),
f_c=f_c, f_s=f_s,
h_1=np.mean(h1), h_2=np.mean(h2),
logrhoprior=ufloat(-0.229,0.045), **detrend,
glint_scale=(0,2))
print(dd.lmfit_report(min_correl=0.5))
# +
#del detrend['dfdbg']
result = dd.lmfit_transit(T_0 = ufloat(T_0, 0.1), P=P,
D=(D/4, D, D*4), W=(W/4, W, W*4),b=(0,0.5,1),
f_c=f_c, f_s=f_s,
h_1=np.mean(h1), h_2=np.mean(h2),
logrhoprior=ufloat(-0.229,0.045), **detrend,
glint_scale=(0,2))
print(dd.lmfit_report(min_correl=0.5))
# -
fig = dd.rollangle_plot()
#plt.ylim([-0.001, 0.001])
dd.plot_lmfit(binwidth=0.02, figsize=(10,6), fontsize=14, detrend=True);
#plt.ylim([-0.001, 0.001])
result.params
# In the end, let's save our results in a file!
dd.save()
| HD106315/Paper/pp1_hd106315_visit2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Working with Symbolic Expressions
# ## 0.1 Finding an exact derivative with a computer algebra system
# ## 0.2 Doing symbolic algebra in Python
from math import sin
def f(x):
return (3*x**2 + x) * sin(x)
# # 1 Modeling algebraic expressions
# ## 1.1 Breaking an expression into pieces
# ## 1.2 Building an expression tree
# ## 1.3 Translating the expression tree to Python
class Power():
def __init__(self,base,exponent):
self.base = base
self.exponent = exponent
# +
class Number():
def __init__(self,number):
self.number = number
class Variable():
def __init__(self,symbol):
self.symbol = symbol
# -
# This represents $x^2$:
Power(Variable("x"),Number(2))
class Product():
def __init__(self, exp1, exp2):
self.exp1 = exp1
self.exp2 = exp2
# This represents $3x^2$:
Product(Number(3),Power(Variable("x"),Number(2)))
# +
class Sum():
def __init__(self, *exps): #<1>
self.exps = exps
class Function(): #<2>
def __init__(self,name):
self.name = name
class Apply(): #<3>
def __init__(self,function,argument):
self.function = function
self.argument = argument
f_expression = Product( #<4>
Sum(
Product(
Number(3),
Power(
Variable("x"),
Number(2))),
Variable("x")),
Apply(
Function("sin"),
Variable("x")))
# -
# This represents $\cos(x^3 + -5)$:
Apply(Function("cos"),Sum(Power(Variable("x"),Number("3")), Number(-5)))
# ## 1.4 Exercises
# **Exercise:** Draw the expression $\ln(y^z)$ as a tree built out of elements and combinators from this section.
# **Exercise:** Translate the expression from the previous exercise to Python code. Write it both as a Python function and as a data structure built from elements and combinators.
# **Solution:** Here's the ordinary Python function
from math import log
def f(y,z):
return log(y**z)
# Here's the data structure:
Apply(Function("ln"), Power(Variable("y"), Variable("z")))
# **Exercise:** Implement a “Quotient” combinator representing one expression divided by another. How do you represent the following expression? $$\frac{a+b}{2}$$
class Quotient():
def __init__(self,numerator,denominator):
self.numerator = numerator
self.denominator = denominator
# Here's the representation of $(a+b)/2$:
Quotient(Sum(Variable("a"),Variable("b")),Number(2))
# **Exercise:** Implement a `Difference` combinator representing one expression subtracted from another. How can you represent the expression $b^2 - 4ac$?
# **Solution:**
class Difference():
def __init__(self,exp1,exp2):
self.exp1 = exp1
self.exp2 = exp2
# $b^2 - 4ac$ is then represented by:
Difference(
Power(Variable('b'),Number(2)),
Product(Number(4),Product(Variable('a'), Variable('c'))))
# **Exercise:** Implement a `Negative` combinator, representing the negation of an expression. For example, the negation of $x^2 + y$ is $-(x^2 + y)$. Represent the latter expression in code using your new combinator.
class Negative():
def __init__(self,exp):
self.exp = exp
# $-(x^2 + y)$ is represented by:
Negative(Sum(Power(Variable("x"),Number(2)),Variable("y")))
# **Exercise:** Add a Function called `"sqrt"` representing a square root, and use it to encode the following formula:
#
# $$\frac{-b \pm \sqrt{b^2 - 4ac}}{2a}$$
A = Variable('a')
B = Variable('b')
C = Variable('c')
Sqrt = Function('sqrt')
Quotient(
Sum(
Negative(B),
Apply(
Sqrt,
Difference(
Power(B,Number(2)),
Product(Number(4), Product(A,C))))),
Product(Number(2), A))
# **Mini-project:** Create an abstract base class called Expression and make all of the elements and combinators inherit from it. For instance, class Variable() should become class Variable(Expression). Then, overload the Python arithmetic operations +, -, *, and / so they produce Expression objects. For instance, the code 2 * Variable(“x”) + 3 should yield: Sum(Product(Number(2), Variable(“x”)), Number(3)).
# **Solution:** see "expressions.py" file, and section 2.2 and beyond below.
# # 2 Putting a symbolic expression to work
# ## 2.1 Finding all the variables in an expression
def distinct_variables(exp):
if isinstance(exp, Variable):
return set(exp.symbol)
elif isinstance(exp, Number):
return set()
elif isinstance(exp, Sum):
return set().union(*[distinct_variables(exp) for exp in exp.exps])
elif isinstance(exp, Product):
return distinct_variables(exp.exp1).union(distinct_variables(exp.exp2))
elif isinstance(exp, Power):
return distinct_variables(exp.base).union(distinct_variables(exp.exponent))
elif isinstance(exp, Apply):
return distinct_variables(exp.argument)
else:
raise TypeError("Not a valid expression.")
distinct_variables(Variable("z"))
distinct_variables(Number(3))
distinct_variables(f_expression)
# ## 2.2 Evaluating an expression
# +
from abc import ABC, abstractmethod
class Expression(ABC):
@abstractmethod
def evaluate(self, **bindings):
pass
# -
# Note: we are redefining these classes now.
# +
class Number(Expression):
def __init__(self,number):
self.number = number
def evaluate(self, **bindings):
return self.number
class Variable(Expression):
def __init__(self,symbol):
self.symbol = symbol
def evaluate(self, **bindings):
try:
return bindings[self.symbol]
except:
raise KeyError("Variable '{}' is not bound.".format(self.symbol))
class Product(Expression):
def __init__(self, exp1, exp2):
self.exp1 = exp1
self.exp2 = exp2
def evaluate(self, **bindings):
return self.exp1.evaluate(**bindings) * self.exp2.evaluate(**bindings)
# -
Product(Variable("x"), Variable("y")).evaluate(x=2,y=5)
# +
import math
from math import sin, cos, log
_function_bindings = {
"sin": math.sin,
"cos": math.cos,
"ln": math.log
}
class Apply(Expression):
def __init__(self,function,argument):
self.function = function
self.argument = argument
def evaluate(self, **bindings):
return _function_bindings[self.function.name](self.argument.evaluate(**bindings))
# -
# From the text: *... Similarly, we can add an “evaluate” method to the Sum, Power, Difference, or Quotient combinators....*
# +
class Sum(Expression):
def __init__(self, *exps):
self.exps = exps
def evaluate(self, **bindings):
return sum([exp.evaluate(**bindings) for exp in self.exps])
class Power(Expression):
def __init__(self,base,exponent):
self.base = base
self.exponent = exponent
def evaluate(self, **bindings):
return self.base.evaluate(**bindings) ** self.exponent.evaluate(**bindings)
class Difference(Expression):
def __init__(self,exp1,exp2):
self.exp1 = exp1
self.exp2 = exp2
def evaluate(self, **bindings):
return self.exp1.evaluate(**bindings) - self.exp2.evaluate(**bindings)
class Quotient(Expression):
def __init__(self,numerator,denominator):
self.numerator = numerator
self.denominator = denominator
def evaluate(self, **bindings):
return self.numerator.evaluate(**bindings) / self.denominator.evaluate(**bindings)
# -
# Redefine `f_expression` in light of the new class definitions
f_expression = Product( #<4>
Sum(
Product(
Number(3),
Power(
Variable("x"),
Number(2))),
Variable("x")),
Apply(
Function("sin"),
Variable("x")))
f_expression.evaluate(x=5)
# +
from math import sin
def f(x):
return (3*x**2 + x) * sin(x)
f(5)
# -
# ## 2.3 Expanding an expression
class Expression(ABC):
@abstractmethod
def evaluate(self, **bindings):
pass
@abstractmethod
def expand(self):
pass
# Printing expressions legibly in REPL (See first mini project in 2.4)
@abstractmethod
def display(self):
pass
def __repr__(self):
return self.display()
# +
class Sum(Expression):
def __init__(self, *exps):
self.exps = exps
def evaluate(self, **bindings):
return sum([exp.evaluate(**bindings) for exp in self.exps])
def expand(self):
return Sum(*[exp.expand() for exp in self.exps])
def display(self):
return "Sum({})".format(",".join([e.display() for e in self.exps]))
class Product(Expression):
def __init__(self, exp1, exp2):
self.exp1 = exp1
self.exp2 = exp2
def evaluate(self, **bindings):
return self.exp1.evaluate(**bindings) * self.exp2.evaluate(**bindings)
def expand(self):
expanded1 = self.exp1.expand()
expanded2 = self.exp2.expand()
if isinstance(expanded1, Sum):
return Sum(*[Product(e,expanded2).expand() for e in expanded1.exps])
elif isinstance(expanded2, Sum):
return Sum(*[Product(expanded1,e) for e in expanded2.exps])
else:
return Product(expanded1,expanded2)
def display(self):
return "Product({},{})".format(self.exp1.display(),self.exp2.display())
class Difference(Expression):
def __init__(self,exp1,exp2):
self.exp1 = exp1
self.exp2 = exp2
def evaluate(self, **bindings):
return self.exp1.evaluate(**bindings) - self.exp2.evaluate(**bindings)
def expand(self):
return self
def display(self):
return "Difference({},{})".format(self.exp1.display(), self.exp2.display())
class Quotient(Expression):
def __init__(self,numerator,denominator):
self.numerator = numerator
self.denominator = denominator
def evaluate(self, **bindings):
return self.numerator.evaluate(**bindings) / self.denominator.evaluate(**bindings)
def expand(self):
return self
def display(self):
return "Quotient({},{})".format(self.numerator.display(),self.denominator.display())
class Negative(Expression):
def __init__(self,exp):
self.exp = exp
def evaluate(self, **bindings):
return - self.exp.evaluate(**bindings)
def expand(self):
return self
def display(self):
return "Negative({})".format(self.exp.display())
class Number(Expression):
def __init__(self,number):
self.number = number
def evaluate(self, **bindings):
return self.number
def expand(self):
return self
def display(self):
return "Number({})".format(self.number)
class Power(Expression):
def __init__(self,base,exponent):
self.base = base
self.exponent = exponent
def evaluate(self, **bindings):
return self.base.evaluate(**bindings) ** self.exponent.evaluate(**bindings)
def expand(self):
return self
def display(self):
return "Power({},{})".format(self.base.display(),self.exponent.display())
class Variable(Expression):
def __init__(self,symbol):
self.symbol = symbol
def evaluate(self, **bindings):
return bindings[self.symbol]
def expand(self):
return self
def display(self):
return "Variable(\"{}\")".format(self.symbol)
class Function():
def __init__(self,name,make_latex=None):
self.name = name
self.make_latex = make_latex
def latex(self,arg_latex):
if self.make_latex:
return self.make_latex(arg_latex)
else:
return " \\operatorname{{ {} }} \\left( {} \\right)".format(self.name, arg_latex)
class Apply(Expression):
def __init__(self,function,argument):
self.function = function
self.argument = argument
def evaluate(self, **bindings):
return _function_bindings[self.function.name](self.argument.evaluate(**bindings))
def expand(self):
return Apply(self.function, self.argument.expand())
def display(self):
return "Apply(Function(\"{}\"),{})".format(self.function.name, self.argument.display())
# -
Y = Variable('y')
Z = Variable('z')
A = Variable('a')
B = Variable('b')
Product(Sum(A,B),Sum(Y,Z))
Product(Sum(A,B),Sum(Y,Z)).expand()
f_expression = Product( #<4>
Sum(
Product(
Number(3),
Power(
Variable("x"),
Number(2))),
Variable("x")),
Apply(
Function("sin"),
Variable("x")))
f_expression.expand()
# ## 2.4 Exercises
# **Exercise:** Write a function `contains(expression, variable)` which checks whether the given expression contains any occurence of the specified variable.
def contains(exp, var):
if isinstance(exp, Variable):
return exp.symbol == var.symbol
elif isinstance(exp, Number):
return False
elif isinstance(exp, Sum):
return any([contains(e,var) for e in exp.exps])
elif isinstance(exp, Product):
return contains(exp.exp1,var) or contains(exp.exp2,var)
elif isinstance(exp, Power):
return contains(exp.base, var) or contains(exp.exponent, var)
elif isinstance(exp, Apply):
return contains(exp.argument, var)
else:
raise TypeError("Not a valid expression.")
# **Exercise:** Write a “distinct_functions” function which takes an expression as an argument and returns the distinct, named functions like “sin” or “ln” that appear in the expression.
def distinct_functions(exp):
if isinstance(exp, Variable):
return set()
elif isinstance(exp, Number):
return set()
elif isinstance(exp, Sum):
return set().union(*[distinct_functions(exp) for exp in exp.exps])
elif isinstance(exp, Product):
return distinct_functions(exp.exp1).union(distinct_functions(exp.exp2))
elif isinstance(exp, Power):
return distinct_functions(exp.base).union(distinct_functions(exp.exponent))
elif isinstance(exp, Apply):
return set([exp.function.name]).union(distinct_functions(exp.argument))
else:
raise TypeError("Not a valid expression.")
# **Exercise:** Write a function contains_sum which takes an expression and returns True if it contains a Sum and returns False otherwise.
def contains_sum(exp):
if isinstance(exp, Variable):
return False
elif isinstance(exp, Number):
return False
elif isinstance(exp, Sum):
return True
elif isinstance(exp, Product):
return contains_sum(exp.exp1) or contains_sum(exp.exp2)
elif isinstance(exp, Power):
return contains_sum(exp.base) or contains_sum(exp.exponent)
elif isinstance(exp, Apply):
return contains_sum(exp.argument)
else:
raise TypeError("Not a valid expression.")
# **NOTE:** For the rest of the mini-projects, consult "expressions.py".
# # 3 Finding the derivative of a function
# For the rest of the notebook, I'll use the complete implementations from `expressions.py` so I don't have to re-implement every time.
from expressions import *
Product(Power(Variable("x"),Number(2)),Apply(Function("sin"),Variable("y")))
# ## 3.1 Derivatives of powers
# ## 3.2 Derivatives of transformed functions
# ## 3.3 Derivatives of some special functions
# ## 3.4 Derivatives of products and compositions
#
# ## 3.5 Exercises
# # 4 Taking derivatives automatically
# ## 4.1 Implementing a derivative method for expressions
Sum(Variable("x"),Variable("c"),Number(1)).derivative(Variable("x"))
# ## 4.2 Implementing the product rule and chain rule
Product(Variable("c"),Variable("x")).derivative(Variable("x"))
Apply(Function("sin"),Power(Variable("x"),Number(2))).derivative(x)
# ## 4.3 Implementing the power rule
f_expression = Product( #<4>
Sum(
Product(
Number(3),
Power(
Variable("x"),
Number(2))),
Variable("x")),
Apply(
Function("sin"),
Variable("x")))
f_expression.derivative(x)
# ## 4.4 Exercises
# # 5 Integrating functions symbolically
# ## 5.1 Integrals as antiderivatives
#
# ## 5.2 Introducing the SymPy library
from sympy import *
from sympy.core.core import *
Mul(Symbol('y'),Add(3,Symbol('x')))
y = Symbol('y')
x = Symbol('x')
y*(3+x)
y*(3+x).subs(x,1)
(x**2).diff(x)
(3*x**2).integrate(x)
# ## 5.3 Exercises
# **Exercise:** What is the integral of $f(x) = 0$? Confirm your answer with SymPy, remembering that SymPy does not automatically include a constant of integration.
Integer(0).integrate(x)
# **Exercise:** What is the integral of $x\cdot \cos(x)$? Hint: look at the derivative of $x\sin(x)$. Confirm your answer with SymPy.
(x*cos(x)).integrate(x)
# **Exercise:** What is the integral of $x^2$? Confirm your answer with SymPy.
(x**2).integrate(x)
| Chapter 10/ch10 walkthrough.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.3 64-bit
# name: python38364bit7354479411b643eea6e2a7e6b60690b9
# ---
# ### Checking dataset Ratings
# + tags=[]
import pandas as pd
ratings = pd.read_csv('dataset/ml-latest-small/ratings.csv')
print(ratings.shape)
ratings.head()
# -
# #### What values exist in the rating column?
# + tags=[]
ratings['rating'].unique()
# -
# #### How many values of each type are there in the rating column?
ratings.rating.value_counts()
# #### Calculating the average of all values in the rating column
ratings['rating'].mean()
# #### Calculating the median of all values in the rating column
ratings['rating'].median()
# #### Ratings column general summary
ratings.rating.describe()
# ##### From the above result, we can conclude that 25% of the movie ratings in the dataset are less than 3.0
# #### Plotting the histogram of the ratings column
# +
import matplotlib.pyplot as plt
import seaborn as sns
plt.figure(figsize=(12, 6))
plt1 = ratings.rating.value_counts().sort_index().plot(kind = 'bar', rot = 55)
plt.title('Ratings vc Frequency')
plt1.set(xlabel = 'Ratings', ylabel='frequency')
plt.show()
# +
plt.figure(figsize = (6,6))
plt.title('BoxPlot - Ratings')
sns.boxplot(y = ratings.rating,
showmeans = True,
meanprops = {"marker":"s","markerfacecolor":"red", "markeredgecolor":"red"})
plt.show()
# -
# ##### Conclusion: half of the people gave ratings between 3 and 4, 25% gave ratings between 4 and 5, that at least 25% of the ratings are between 1.5 and 3, and that ratings 1 and 0.5 (which are outliers) occur rarely compared to the other notes
#
# ##### Outliers indicate possible outliers.
# ### Checking dataset Movies
# + tags=[]
import pandas as pd
movies = pd.read_csv('dataset/ml-latest-small/movies.csv')
print(movies.shape)
movies.head()
# -
# #### Average rating Jumanji movie
ratings.query('movieId == 2').rating.mean()
# #### Plotting the boxplot of the first 5 films rating
# +
import seaborn as sns
sns.boxplot(x = 'movieId' , y = 'rating' ,
data = ratings.query('movieId in [1,2,3,4,5]'),
showmeans = True,
meanprops = {"marker":"s","markerfacecolor":"red", "markeredgecolor":"red"})
# -
# #### Calculating standard deviation
# ##### The standard deviation (standard deviation) is a parameter that indicates how far the data in the set are far from a central trend (median, mean, mode).
# ##### Shows how dispersed or condensed a distribution is.
# + tags=[]
print(ratings.query('movieId == 1').rating.std())
print(ratings.query('movieId == 2').rating.std())
print(ratings.query('movieId == 3').rating.std())
print(ratings.query('movieId == 4').rating.std())
print(ratings.query('movieId == 5').rating.std())
# -
# #### Calculing the average of all movies of dataset
# group by feature movieId
a = ratings.groupby('movieId')
# calculating the average of agrouping
movie_means = a.rating.mean()
# showing 5 firsts movies
movie_means.head()
# #### Plotting histogram and boxplot of average of all rating movie
# +
plt.figure(figsize=(15, 6))
plt.subplot(1,2,1)
plt1 = movie_means.plot(kind = 'hist')
plt.title('Histogram')
plt1.set(ylabel = 'frequency', xlabel='rating')
plt.subplot(1,2,2)
plt2 = sns.boxplot(movie_means,
showmeans = True,
meanprops = {"marker":"s","markerfacecolor":"red", "markeredgecolor":"red"})
plt.title('BoxPlot')
plt2.set(xlabel = 'rating')
plt.show()
# -
# ##### Conclusion: the distribution of average rating is in between 2.8 to 3.9
# ### Analysing language of movies
# + tags=[]
movies_tmdb = pd.read_csv('dataset/tmdb-5000-movie-dataset/tmdb_5000_movies.csv')
movies_tmdb .original_language.unique()
# -
# #### Counting how much to each language
count_language = movies_tmdb.original_language.value_counts().to_frame().reset_index() # converting to dataframe
count_language.columns = ["original_language","frequency"] # changing column names
count_language.head()
# #### Plotting Histogram to number of movies to each language
# +
import seaborn as sns
import matplotlib.pyplot as plt
plt.figure(figsize = (12, 5))
sns.barplot(x = 'original_language' , y = 'frequency' , data = count_language)
# +
#### Isolating english language
# +
total_for_language = movies_tmdb.original_language.value_counts()
total = total_for_language.sum() # numbel total of movies
en_total = total_for_language.loc['en'] # number total of movies with english language
rest_total = total - en_total # number total of movies with language diff english
data = pd.DataFrame(
{
'language' : ['English','Others Languages'],
'frequency': [en_total,rest_total]
}
)
plt.figure(figsize = (5, 3))
sns.barplot(x = 'language' , y = 'frequency' , data = data)
# -
# #### Conclusion: english language is extremely predominant
| notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.1 64-bit
# language: python
# name: python38164bitb3ebfd1fa0594a1c9d5c617333c2c1a4
# ---
# # Item importance evaluation
# This notebook aims to find out the importance of each item in our dataset, so based on the infos we (might) find out, we may direct our efforts to an especific model...
# +
import numpy as np
import pandas as pd
from utils import *
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error as mse
import sys
import xgboost as xgb
import lightgbm as lgb
from datetime import datetime
from catboost import CatBoost, CatBoostRegressor, Pool, cv
plt.rcParams['figure.figsize'] = (45, 10.0) # set default size of plots
NUMBER_OF_LAGS = 4
sys.path.append("../../main/datasets/")
# !ls ../../main/datasets/
# -
infos, items, orders = read_data("../../main/datasets/")
print("Sanity checks...", infos.shape, items.shape, orders.shape)
# ## Preparing our dataset
# Changing our time signatures,
# adding our promotion feature
# and aggregating our data by weeks...
process_time(orders)
df = dataset_builder(orders, items)
df
# ## Calculating the relevance: Naive approach
# At first glance, one might just guesstimate the relevance of an item by the sum of the products between the **number of orders in a given week** and the **mean of the price of the product in the given week**.
df['itemRelevance'] = df['orderSum'] * df['recommendedRetailPrice']
relevances = df.groupby('itemID', as_index=False).agg({'itemRelevance':'mean'})
plt.xlabel('itemID')
plt.ylabel('Relevance')
plt.scatter(relevances['itemID'], relevances['itemRelevance'])
# plt.axhline(np.var(relevances[:, 1]), color="green")
plt.axhline(np.mean(relevances['itemRelevance']), color="red")
# ## Calculating the relevance: Enhanced approach
# We might also try to guesstimate the relevance of items based in **how many weeks a given item is sold**. The main idea behind this approach is to try to eliminate the items that are sold just a few times, but due to its' price, have a high effect on the relevance. So, products that happen to be sold fewer times, will be linearly penalized.
# +
# If an item is sold at least once in a given week...
# ... it will be 'flagged' with '1'
df['soldThisWeek'] = 0
df.loc[df.orderSum > 0, 'soldThisWeek'] = 1
# We'll sum all the flags, and they'll
# mark how many weeks each item was sold.
itemSaleFreq = df.groupby('itemID', as_index=False).agg(
{'soldThisWeek': 'sum'}).rename(columns={'soldThisWeek': 'weeklyFreq'})
df = pd.merge(df, itemSaleFreq, left_on='itemID', right_on='itemID').sort_values(
['group_backwards', 'itemID'], ascending=[False, True], ignore_index=True).drop(columns=['soldThisWeek'])
# -
df
# +
# The objective here is to create a simple
# baseline that chooses to buy a single
# item of the most sold items from the
# set of items that have been sold
# in every week
NUMBER_OF_ITEMS_TO_TAKE = 2
ordersBaseline = np.zeros(df['itemID'].nunique())
items_sold_every_week = df.loc[df.weeklyFreq == 11][df.group_backwards == 3]
mostFrequentlySoldItems = items_sold_every_week.sort_values('orderSum', ascending=False)['itemID'].values[:NUMBER_OF_ITEMS_TO_TAKE]
ordersBaseline[mostFrequentlySoldItems] = 1
print("[SANITY CHECK] How many did we take? :", ordersBaseline.sum())
# -
# We'll store this variable to use it in "./dora/models/Simple Baseline" notebook.
# %store ordersBaseline
all_baselines = []
for i in range(1, 11):
iter_ordersBaseline = np.zeros(df['itemID'].nunique())
items_sold_every_week = df.loc[df.weeklyFreq == 11][df.group_backwards == 3]
mostFrequentlySoldItems = items_sold_every_week.sort_values('orderSum', ascending=False)['itemID'].values[:i]
iter_ordersBaseline[mostFrequentlySoldItems] = 1
all_baselines.append(iter_ordersBaseline)
print("[SANITY CHECK] How many did we take? :", iter_ordersBaseline.sum())
# %store all_baselines
# We'll set our 'enhanced' itemRelevance here
df['itemRelevance'] = df['orderSum'] * df['salesPrice_mean'] * df['weeklyFreq'] / 13
weighted_relevances = df.groupby('itemID', as_index=False).agg({'itemRelevance':'mean'})
plt.xlabel('itemID')
plt.ylabel('Relevance')
plt.scatter(weighted_relevances['itemID'], weighted_relevances['itemRelevance'])
plt.axhline(np.mean(weighted_relevances['itemRelevance']), color="red")
# ## Visual Comparison Scatter
# +
# Naive plot
plt.subplot(211)
plt.xlabel('itemID')
plt.ylabel('Relevance')
plt.scatter(relevances['itemID'], relevances['itemRelevance'])
plt.axhline(np.mean(relevances['itemRelevance']), color="red")
# 'Enhanced' Plot
plt.subplot(212)
plt.xlabel('itemID')
plt.ylabel('Relevance')
plt.scatter(weighted_relevances['itemID'], weighted_relevances['itemRelevance'])
plt.axhline(np.mean(weighted_relevances['itemRelevance']), color="red")
# -
# # Pareto Distribution Comparison
relevance_values = (relevances['itemRelevance'] / relevances['itemRelevance'].sum(
)).sort_values(ascending=False, ignore_index=True).cumsum()
weighted_relevance_values = (weighted_relevances['itemRelevance'] / weighted_relevances['itemRelevance'].sum(
)).sort_values(ascending=False, ignore_index=True).cumsum()
plt.plot(weighted_relevance_values, color='blue', label='Relevance values with frequency')
plt.plot(relevance_values, color='red', label="Relevance values without frequency - Previously discussed")
plt.legend()
plt.show()
| dora/pre-processing-features/Relevance Feature Baseline.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 量子金融应用:投资组合分散化
#
# <em> Copyright (c) 2021 Institute for Quantum Computing, Baidu Inc. All Rights Reserved. </em>
# ## 概览
#
# 当前量子计算应用到金融问题上的解决方案通常可分为三类量子算法,即量子模拟,量子优化以及量子机器学习 [1,2]。许多的金融问题本质上是一个组合优化问题,解决这些问题的算法通常具有较高的时间复杂度,实现难度较大。得益于量子计算强大的计算性能,未来有望通过量子算法解决这些复杂问题。
#
# 量桨的 Quantum Finance 模块主要讨论的是量子优化部分的内容,即如何通过一些量子算法解决实际金融应用中的优化问题。本文主要介绍如何使用量子算法求解被动投资管理中投资组合分散化问题。
# ## 投资组合分散化问题
#
# 普通用户受专业知识和市场经验的不足的限制,在实际的投资中偏向于被动投资策略。指数投资就是一种常见的被动投资例子,比如说投资者长期购买并持有标普 $500$ 指数(Standard & Poor’s $500$)。作为投资人,假如你不想投资已有的指数,那么你也可以自己创建特定的指数投资组合,在市场中挑选合适的股票加入到创建的指数投资组合中。
#
# 分散化是投资组合中平衡风险和收益的一个重要方法。对投资组合分散化的一个具体描述如下:当前可投资的股票数量为 $n$,指数投资组合中包含的股票数量为 $K$,需要对这 $n$ 个股票进行聚类,根据相似性将可选的股票划分为 $K$ 个类别,再从每个类别中选出最能代表该类别的股票,将其加入到指数组合中来,便于更好的控制风险,提高收益。
# ### 编码投资组合分散化问题
#
# 为了将投资组合分散化问题转化成一个参数化量子电路(parameterized quantum circuits, PQC)可解的问题,我们首先需要编码该问题的哈密顿量。
#
# 在对该问题进行建模时,需要明确的有两个问题,第一是如何对不同的股票进行分类,第二便是以什么样的标准挑选代表性的股票。为了解决这两个问题,首先需要定义股票 $i$ 和股票 $j$ 之间的相似度 $\rho_{ij}$:
# * $\rho_{ii} = 1 \quad $ 该股票和其自身的相似度为1
# * $\rho_{ij} \leq 1 \quad$ 不同股票间 $\rho_{ij}$ 越大,相似度越高
#
# 由于两股票间收益率的相关性,我们可以在协方差矩阵基础上进一步对时间序列间的相似性进行度量。动态时间规整(Dynamic Time Warping, DTW)是一种常见的衡量两个时间序列之间相似度的方法,在本文中,采用DTW算法来计算两股票之间的相似性。基于该度量,我们可以对股票进行分类并挑选代表性股票。对于给定的 $n$ 支股票,每支股票我们可以定义 $n$ 个二进制变量 $x_{ij}$ 和 $1$ 个二进制变量 $y_j$。对于变量 $x_{ij}$,每 $n$ 位一组,$i$ 表示是第几支股票,$j$ 表示在该股票对应的 $n$ 个二进制变量中的序号。每支股票的 $n$ 位二进制变量如果相同位置为 $1$ (即 $j$ 相同),则说明这两只股票被分为同一类,其中 $i = j$ 的就是该类别中被选到指数组合中的最具代表性的股票:
#
# $$
# x_{ij}=
# \begin{cases}
# 1, & \text{指数组合中的股票 $j$ 和股票 $i$ 具有最高的相似度}\\
# 0, & \text{其他情况}
# \end{cases},
# $$
#
# $$
# y_{j}=
# \begin{cases}
# 1, & \text{某类中的代表性股票 $j$ 被选择到指数组合中}\\
# 0, & \text{其他情况}
# \end{cases}.
# $$
#
# 在该问题中我们的模型便可以写作:
#
# $$
# \mathcal{M}= \max_{x_{ij}}\sum_{i=1}^n\sum_{j=1}^n \rho_{ij}x_{ij}. \tag{1}
# $$
#
# 该模型需要满足以下几类约束:
# * 聚类约束:限制指数组合中只能有 $K$ 支股票
# - $ \sum_{j=1}^n y_j = K$
# * 整数约束:限制一只股票要么是在指数组合中,要么就不在
# - $ x_{ij},y_j\in{\{0,1\}}, \forall i = 1, \dots,n; j = 1, \dots, n$
# * 一致性约束:保证如果一只股票可以代表另一支股票,那么它必须在指数组合中
# - $\sum_{j=1}^n x_{ij} = 1, \forall i = 1,\dots,n$
# - $x_{ij} \leq y_j, \forall i = 1,\dots,n; j = 1,\dots, n$
# - $x_{jj} = y_j, \forall j = 1,\dots,n$
#
# 该模型目标就是让可选择的 $n$ 个股票与挑选的指数股票组合间相似性最大化。
#
# 由于要对代价函数做梯度下降优化,所以在定义时就根据模型方程和相应的约束条件做一定修改:
#
# $$
# \begin{aligned}
# C_x &= -\sum_{i=1}^{n}\sum_{j=1}^{n}\rho_{ij}x_{ij} + A\left(K- \sum_{j=1}^n y_j \right)^2 + \sum_{i=1}^n A\left(\sum_{j=1}^n 1- x_{ij} \right)^2 \\
# &\quad + \sum_{j=1}^n A\left(x_{jj} - y_j\right)^2 + \sum_{i=1}^n \sum_{j=1}^n A\left(x_{ij}(1 - y_j)\right).\\
# \end{aligned} \tag{2}
# $$
#
# 该式子中第一项为相似性最大化,后面四项均为约束条件,$A$ 为惩罚参数,通常设置为较大的数字,使得最终表示指数投资组合结果的二进制字符串满足约束条件。
#
# 现在我们需要将代价函数转为一个哈密顿量,从而完成投资组合分散化问题的编码。每一个二进制变量可以取0和1两个值,分别对应量子态 $|0\rangle$ 和 $|1\rangle$。每个二进制变量都对应一个量子比特,所以我们需要 $n^2 + n$ 个量子比特来解决投资组合分散化问题。因为我们的变量 $x_{ij}$ 的值为 $0$ 和 $1$,所以我们要构造一个本征值和它对应的哈密顿量。泡利 $Z$ 的本征值为 $\pm 1$,于是我们构造的哈密顿量为 $\frac{I-Z}{2}$,对应的本征值即为 $0$ 和 $1$。
#
# 我们现在将二进制变量映射到泡利 $Z$ 矩阵上,从而使 $C_x$ 转化成哈密顿矩阵:
#
# $$
# x_{ij} \mapsto \frac{I-Z_{ij}}{2}, \tag{3}
# $$
#
# 这里 $Z_{ij} = I \otimes I \otimes \ldots \otimes Z \otimes \ldots \otimes I$,也就是说 $Z$ 作用在 $ij$ 的量子比特上。通过这个映射,如果一个编号为 $ij$ 的量子比特的量子态为 $|1\rangle$,那么对应的二进制变量的取值为 $x_{ij} |1\rangle = \frac{I-Z_{ij}}{2} |1\rangle = 1|1\rangle $,也就是说该项目是我们要投资的。同样地,对于量子态为 $|0\rangle$的量子比特 $i$,它所对应的二进制变量的取值为 $x_{ij}|0\rangle = \frac{I-Z_{ij}}{2} |0\rangle = 0 |0\rangle $。
#
# 我们用上述映射将 $C_x$ 转化成量子比特数为 $n^2+n$ 的系统的哈密顿矩阵 $H_C$(其中 $x_{ij}$ 占 $n^2$ 个qubit,$y_j$ 占 $n$ 个 qubit),从而实现了投资组合分散化问题的量子化。这个哈密顿矩阵 $H_C$ 的基态即为投资组合分散化问题的最优解。在接下来的部分,我们将展示如何用参数化量子电路找到这个矩阵的基态,也就是对应最小本征值的本征态。
# ## Paddle Quantum 实现
#
# 要在量桨上实现用参数化量子电路解决量子金融中的投资组合分散化问题,首先要做的便是加载需要用到的包。
# +
#加载额外需要的包
import numpy as np
import pandas as pd
import datetime
#加载飞桨,量桨相关的模块
import paddle
from paddle_quantum.circuit import UAnsatz
from paddle_quantum.finance import DataSimulator
from paddle_quantum.finance import portfolio_diversification_hamiltonian
# -
# ### 准备实验数据
#
# 和投资组合优化问题相似,在本问题中,我们选定的投资项目类型为股票。对于实验测试要用的数据,提供了两种方法:
# * 第一种是根据一定的条件,随机生成数据。
#
# 如果采用这种方法准备实验数据,用户在初始化数据时,就需要给出可投资股票的名字列表,交易数据的开始日期和结束日期。
num_assets = 3 #可选择股票数目
stocks = [("TICKER%s" % i) for i in range(num_assets)]
data = DataSimulator( stocks = stocks, start = datetime.datetime(2016, 1, 1), end = datetime.datetime(2016, 1, 30))
data.randomly_generate() # 随机生成实验数据
# * 第二种是用户可以选择读取本地收集到的真实数据集用于实验。考虑到文件中包含的股票数可能会很多,用户可以指定用于该实验的股票数量,即 上面初始化的`num_assets`。
#
# 我们收集了 $12$ 支股票 $35$ 个交易日的收盘价格存放到 `realStockData_12.csv` 文件中,在这里我们只选择读取前 $3$ 个股票的信息。
#
# 在本教程中,我们选择读取的真实数据作为实验数据。
# +
df = pd.read_csv('realStockData_12.csv')
dt = []
for i in range(num_assets):
mylist = df['closePrice'+str(i)].tolist()
dt.append(mylist)
print(dt) # 输出从文件中读取的3个股票在35个交易日中的收盘价格
data.set_data(dt) # 指定实验数据为用户读取的数据
# -
# ### 编码哈密顿量
#
# 这里我们将式(2)中的二进制变量用式(3)替换,从而构建哈密顿量 $H_C$。
# 在编码哈密顿量的过程中,首先需要计算各股票之间的相似矩阵 $\rho$。
rho = data.get_similarity_matrix()
# 根据计算的相似矩阵和给定的参数构建哈密顿量。
q = 2 # 指数组合中需要的股票数目
penalty = num_assets # 惩罚参数:不小于可投资股票的数目
hamiltonian = portfolio_diversification_hamiltonian(penalty, rho, q)
# ### 计算损失函数
#
# 调用量桨内置的 [`complex entangled layer`](https://qml.baidu.com/api/paddle_quantum.circuit.uansatz.html) 构造参数化量子电路。该电路会返回一个输出态 $|\vec{\theta}\rangle$,由此输出态,我们可以定义投资组合分散化问题在经典-量子混合模型下的损失函数:
#
# $$
# L(\vec{\theta}) = \langle\vec{\theta}|H_C|\vec{\theta}\rangle.
# \tag{4}
# $$
#
# 之后我们利用经典的优化算法寻找最优参数 $\vec{\theta}^*$。下面的代码给出了通过量桨和飞桨搭建网络的过程。
class PDNet(paddle.nn.Layer):
def __init__(self, n, p, dtype="float64"):
super(PDNet, self).__init__()
self.p = p
self.num_qubits = n * (n+1)
self.theta = self.create_parameter(shape=[self.p, self.num_qubits, 3],
default_initializer=paddle.nn.initializer.Uniform(low=0, high=2 * np.pi),
dtype=dtype, is_bias=False)
# print(self.theta)
def forward(self, hamiltonian):
"""
前向传播
"""
cir = UAnsatz(self.num_qubits)
cir.complex_entangled_layer(self.theta, self.p)
cir.run_state_vector()
loss = cir.expecval(hamiltonian)
return loss, cir
# ### 训练量子神经网络
#
# 定义好了量子神经网络后,我们使用梯度下降的方法来更新其中的参数,使得式(4)的期望值最小。
SEED = 1100 # 随机数种子
p = 2 # 量子电路的层数
ITR = 150 # 迭代次数
LR = 0.4 # 梯度下降优化速率
# 使用飞桨,优化上面定义的网络。
# +
# 比特数量
n = len(rho)
# 固定随机数种子
paddle.seed(SEED)
# 定义量子神经网络
net = PDNet(n, p)
# 使用 Adam 优化器
opt = paddle.optimizer.Adam(learning_rate=LR, parameters=net.parameters())
# 梯度下降优化循环
for itr in range(1, ITR + 1):
loss, cir = net(hamiltonian)
loss.backward()
opt.minimize(loss)
opt.clear_grad()
if itr % 10 == 0:
print("循环数:", itr, " 损失:", "%.4f"% loss.numpy())
# -
# ### 解码量子答案
#
# 当调用优化器求得损失函数的最小值以及相对应的一组参数 $\vec{\theta}^*$后,为了进一步求得投资组合优化问题的近似解,需要从电路输出的量子态 $|\vec{\theta}^*\rangle$ 中解码出经典优化问题的答案。物理上,解码量子态需要对量子态进行测量,然后统计测量结果的概率分布:
#
# $$
# p(z) = |\langle z|\vec{\theta}^*\rangle|^2.
# \tag{6}
# $$
#
# 在量子参数化电路表达能力足够的情况下,某个比特串出现的概率越大,意味着其是投资组合优化问题最优解的可能性越大。
#
# 量桨提供了查看参数化量子电路输出状态的测量结果概率分布的函数。
# 模拟重复测量电路输出态 2048 次
prob_measure = cir.measure(shots=2048)
investment = max(prob_measure, key=prob_measure.get)
print("利用哈密顿量找到的解的比特串形式:", investment)
# 我们的测量结果是表示投资组合分散化问题答案的比特串:如上文结果 ``100001001101``,我们一共有 $n = 3$ 支可投资股票,选择两只到指数组合中。前 $n^2 = 9$ 位 ``100001001`` 代表 $x_{ij}$,每 $3$ 位为一组,第一组 ``100`` 中第一位为 $1$,代表它被划作一类。第二组 ``001`` 和第三组 ``001`` 中第三位被置为 $1$,代表它们被划为一类。同时,第一组和第三组 $1$ 出现的位置符合 $i = j$,即这两支股票为最能代表各自类的股票。另外,可以看出 $1$ 出现的位置是 $j = 1$ 和 $j = 3$,即两个位置可能为 $1$,这和我们预设的指数组合中有两只股票是对应的。同时,后 $3$ 位为 ``101``,代表 $y_j$, 表示第一支股票和第三支股票被选中放入指数组合中。通过上述说明,可以看出我们求解得到的结果是一个有效解。如果最后的结果不是上述这种有效解,读者依然可以通过调整参数化量子电路的参数值,来获得更好的训练效果。
# ### 结语
#
# 在本教程中,我们主要讨论了分散化投资中如何对可投资项目进行分类,以及如何挑选具有代表性的到我们的投资组合中来。在本问题中,每个投资项目都需要 $n$ 位量子比特来表示分类,$1$ 位量子比特表示是否被选中。受量子比特数目的限制,目前能够处理的投资项目数还比较少。
# _______
#
# ## 参考文献
#
# [1] Orus, Roman, <NAME>, and <NAME>. "Quantum computing for finance: Overview and prospects." [Reviews in Physics 4 (2019): 100028.](https://arxiv.org/abs/1807.03890)
#
# [2] <NAME>., et al. "Quantum computing for Finance: state of the art and future prospects." [IEEE Transactions on Quantum Engineering (2020).](https://arxiv.org/abs/2006.14510)
| tutorial/combinatorial_optimization/PortfolioDiversification_CN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Import libraries
import calendar
from collections import Counter, OrderedDict
import datetime
from itertools import chain
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
pd.set_option("display.max_rows", 500)
pd.options.display.max_colwidth = 1000
import seaborn as sns
import time
import warnings
warnings.filterwarnings("ignore")
# Create color palettes for seaborn
sns.palplot(sns.color_palette("BuGn", 10))
sns.palplot(sns.color_palette("YlGnBu", 10))
RdPu = ['#fff7f3','#fde0dd','#fcc5c0','#fa9fb5','#f768a1','#dd3497','#ae017e','#7a0177','#49006a']
sns.palplot(sns.color_palette("RdPu", 10))
BuPu = ['#f7fcfd','#e0ecf4','#bfd3e6','#9ebcda','#8c96c6','#8c6bb1','#88419d','#810f7c','#4d004b']
sns.palplot(sns.color_palette("BuPu", 10))
# +
# Import CSV files
trivago_path = "../data/train.csv"
hotels_path = "../data/hotels_items.csv"
# Create DataFrame
trivago_df = pd.read_csv(trivago_path)
hotels_df = pd.read_csv(hotels_path, usecols = ["item_id", "properties", "city", "price"])
# -
# Convert UNIX time stamp (timestime column) to UTC
date_conv = lambda x: datetime.datetime.utcfromtimestamp(x).strftime("%Y-%m-%d")
time_conv = lambda x: datetime.datetime.utcfromtimestamp(x).strftime("%H:%M:%S")
trivago_df["date"] = trivago_df["timestamp"].map(date_conv)
trivago_df["time"] = trivago_df["timestamp"].map(time_conv)
# Day of the week
trivago_df["day_of_week"] = trivago_df["timestamp"].apply(lambda x: datetime.datetime.utcfromtimestamp(x).strftime("%A"))
# Drop UNIX timestamp
trivago_df.drop("timestamp", axis = 1, inplace = True)
trivago_df.info()
hotels_df.info()
# +
# Split of pipe
hotels_df["properties"] = hotels_df["properties"].str.split("|")
# Convert all properties to lowercase
hotels_df["properties"] = hotels_df["properties"].apply(lambda x: [w.lower() for w in x])
# Create list
properties_list = hotels_df["properties"].tolist()
# Find set of unique properties and convert to a list
unique_properties = list(chain(*[list(set(tags)) for tags in properties_list]))
# +
# Count unique properties
def count_items(l):
# Create a counter object
counts = Counter(l)
# Sort by highest count first and place in ordered dictionary
counts = sorted(counts.items(), key = lambda x: x[1], reverse = True)
counts = OrderedDict(counts)
return counts
properties_count = count_items(unique_properties)
# -
properties_count
hotels_df.head()
# Split city and country
location_df = hotels_df["city"].str.split(",", expand = True)
# Sanity check
location_df.head()
# Merge location with hotels df
hotels_df = pd.merge(location_df, hotels_df[["item_id", "properties", "price"]], left_index = True,
right_index = True, how = "right")
hotels_df.head()
hotels_df.rename({0: "city"}, inplace = True, axis = 1)
hotels_df.rename({1: "country"}, inplace = True, axis = 1)
hotels_df.rename({2: "mistake"}, inplace = True, axis = 1)
hotels_df.info()
# Drop mistkae
hotels_df.drop("mistake", axis = 1, inplace = True)
# Plot price distribution
sns.boxplot(x = hotels_df.price, palette = "RdPu", data = hotels_df).set_title("Price");
plt.show();
# Device Countplot
plt.figure(figsize = (10, 5))
sns.countplot(trivago_df["device"], alpha = .70, palette = "BuPu")
plt.title("Devices")
plt.ylabel("Number of Actions per Device")
plt.show()
# Action Type Countplot
plt.figure(figsize = (20, 10))
sns.countplot(trivago_df["action_type"], alpha = .70, palette = "BuPu")
plt.title("Action Type")
plt.xticks(rotation = "vertical")
plt.ylabel("Number of Actions per Action Type")
plt.show()
# Platform Countplot
plt.figure(figsize = (20, 10))
sns.countplot(trivago_df["platform"], alpha = .70, palette = "YlGnBu")
plt.title("Platform Used by User")
plt.ylabel("Number of Actions per Platform")
plt.show()
# Countries Countplot
plt.figure(figsize = (20, 10))
sns.countplot(hotels_df["country"], alpha = .70, palette = "YlGnBu", order = hotels_df.country.value_counts().iloc[:20].index)
plt.title("Countries where Hotels are Located")
plt.xticks(rotation = "vertical")
plt.ylabel("Number of Hotels per Country")
plt.show()
# Cities Countplot
plt.figure(figsize = (20, 10))
sns.countplot(hotels_df["city"], alpha = .70, palette = "RdPu", order = hotels_df.city.value_counts().iloc[:20].index)
plt.title("Cities where Hotels are Located")
plt.xticks(rotation = "vertical")
plt.ylabel("Number of Hotels per City")
plt.show()
# Properties Countplot
plt.figure(figsize = (20, 10))
sns.countplot(unique_properties, alpha = .70, palette = "RdPu", order = unique_properties[:20])
plt.title("Most Common Hotel Properties")
plt.xticks(rotation = "vertical")
plt.ylabel("Number of Hotels with given Property")
plt.show()
# Create dataframe with only clickout items
trivago_clickout = trivago_df[trivago_df["action_type"] == "clickout item"]
# Device Countplot
plt.figure(figsize = (10, 5))
sns.countplot(trivago_clickout["device"], alpha = .70, palette = "BuPu")
plt.title("Devices")
plt.ylabel("Number of Click-Outs per Device")
plt.show()
# Platform and Clickouts
plt.figure(figsize = (20, 10))
sns.countplot(trivago_clickout["platform"], alpha = .70, palette = "YlGnBu")
plt.title("Platform Used by User")
plt.ylabel("Number of Click-Outs per Platform")
plt.show()
# Cities and Clickouts
plt.figure(figsize = (20, 10))
sns.countplot(trivago_clickout["city"], alpha = .70, palette = "YlGnBu", order = trivago_clickout.city.value_counts().iloc[:20].index)
plt.title("Click-Outs per City")
plt.xticks(rotation = "vertical")
plt.ylabel("Number of Click-Outs per City")
plt.show()
# Time of Day and Clickouts
plt.figure(figsize = (20, 10))
sns.countplot(trivago_clickout["time"], alpha = .70, palette = "RdPu", order = trivago_clickout.time.value_counts().iloc[:20].index)
plt.title("Click-Outs per Time of the Day")
plt.xticks(rotation = "vertical")
plt.ylabel("Number of Click-Outs per Time of the Day")
plt.show()
# Date and Clickouts
plt.figure(figsize = (20, 10))
sns.countplot(trivago_clickout["date"], alpha = .70, palette = "RdPu", order = trivago_clickout.date.value_counts().iloc[:20].index)
plt.title("Click-Outs per Date")
plt.xticks(rotation = "vertical")
plt.ylabel("Number of Click-Outs per Date")
plt.show()
# Day of Week and Clickouts
plt.figure(figsize = (20, 10))
sns.countplot(trivago_clickout["day_of_week"], alpha = .70, palette = "RdPu", order = trivago_clickout.day_of_week.value_counts().iloc[:20].index)
plt.title("Click-Outs per Day")
plt.xticks(rotation = "vertical")
plt.ylabel("Number of Click-Outs per Day")
plt.show()
# +
# From scraping
# Path to file
hotels_path = "../data/clean_hotels_scraped_v2.csv"
# Dataframe
hotels_df = pd.read_csv(hotels_path, usecols = ["city", "country", "hotel_name", "rating",
"address", "popularity_rating", "locality", "price",
"landmark", "URL"])
# Sanity check
hotels_df.head()
# -
# Sanity check
hotels_df.info()
# Ratings Countplot
plt.figure(figsize = (10, 5))
sns.countplot(hotels_df["rating"], alpha = .70, palette = "BuPu")
plt.title("Ratings")
plt.ylabel("Number of Hotels per Rating")
plt.show()
# Price distribution
sns.distplot(hotels_df["price"], color = "orchid")
plt.xlim(50, 200)
plt.show()
# Locality Countplot
plt.figure(figsize = (20, 10))
sns.countplot(hotels_df["locality"], alpha = .70, palette = "BuGn", order = hotels_df.locality.value_counts().iloc[:20].index)
plt.title("Most Popular Hotel Localities")
plt.xticks(rotation = "vertical")
plt.ylabel("Number of Hotels per Locality")
plt.show()
# Cities Countplot
plt.figure(figsize = (20, 10))
sns.countplot(hotels_df["city"], alpha = .70, palette = "BuGn", order = hotels_df.city.value_counts().iloc[:20].index)
plt.title("Most Popular Hotel Cities")
plt.xticks(rotation = "vertical")
plt.ylabel("Number of Hotels per Cities")
plt.show()
# Country Countplot
plt.figure(figsize = (20, 10))
sns.countplot(hotels_df["country"], alpha = .70, palette = "BuPu", order = hotels_df.country.value_counts().iloc[:20].index)
plt.title("Most Popular Hotel Countries")
plt.xticks(rotation = "vertical")
plt.ylabel("Number of Hotels per Countries")
plt.show()
# Price distribution
sns.distplot(hotels_df["popularity_rating"], color = "orchid")
plt.xlim(0, 400)
plt.show()
| jupyter_notebooks/exploratory_data_analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from bs4 import BeautifulSoup
import requests
# +
page = requests.get('http://www.bbb.gob.bo/descargas/')
soup = BeautifulSoup(page.text, 'html.parser')
# print(soup)
container = soup.find(class_="content-blog").div.div
books = container.find_all('div', {"class": "panel-grid-cell"})
# b = [ print(book.prettify()) for book in books ]
# return
for i, book in enumerate(books):
imgContainer = book.div.div.div.div.img
print(imgContainer)
# r = requests.get(imgContainer['src'], allow_redirects=True)
# filename = imgContainer['src'].split('/')[-1]
# open('bbb/img/'+filename, 'wb').write(r.content)
# r = requests.get(imgContainer['src'], stream=True)
# with open(local_filename, 'wb') as f:
# for chunk in r.iter_content(chunk_size=1024):
# if chunk: # filter out keep-alive new chunks
# f.write(chunk)
# #f.flush() commented by recommendation from J.F.Sebastian
# return local_filename
# -
| scrappers/CIS_scrapper.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Clustering Algorithms - Overview
# Both clustering and dimensionality reduction summarize the data. Dimensionality reduction compresses the data by representing it using new, fewer features that capture the most relevant information. Clustering algorithms, in contrast, assign existing observations to subgroups that consist of similar data points.
#
# Clustering can serve to better understand the data through the lens of categories learned from continuous variables. It also permits automatically categorizing new objects according to the learned criteria. Examples of related applications include hierarchical taxonomies, medical diagnostics, or customer segmentation. Alternatively, clusters can be used to represent groups as prototypes, using e.g. the midpoint of a cluster as the best representatives of learned grouping. An example application includes image compression.
#
# Clustering algorithms differ with respect to their strategy of identifying groupings:
# - Combinatorial algorithms select the most coherent of different groupings of observations
# - Probabilistic modeling estimates distributions that most likely generated the clusters
# - Hierarchical clustering finds a sequence of nested clusters that optimizes coherence at any given stage
#
# Algorithms also differ by the notion of what constitutes a useful collection of objects that needs to match the data characteristics, domain and the goal of the applications. Types of groupings include:
# - Clearly separated groups of various shapes
# - Prototype- or center-based, compact clusters
# - Density-based clusters of arbitrary shape
# - Connectivity- or graph-based clusters
#
#
# Important additional aspects of a clustering algorithm include whether
# - it requires exclusive cluster membership,
# - makes hard, i.e., binary, or soft, probabilistic assignment, and
# - is complete and assigns all data points to clusters.
# This notebook compares several clustering algorithms using toy datasets.
# ## Imports & Settings
from warnings import filterwarnings
filterwarnings('ignore')
# + slideshow={"slide_type": "skip"}
# %matplotlib inline
import matplotlib.pyplot as plt
import pandas as pd
from numpy.random import rand, seed
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import kneighbors_graph
from sklearn.datasets import make_blobs, make_circles, make_moons
from matplotlib.colors import ListedColormap
from sklearn.cluster import KMeans, SpectralClustering, DBSCAN, AgglomerativeClustering
from sklearn.mixture import GaussianMixture
from sklearn.metrics import adjusted_mutual_info_score
import seaborn as sns
# -
sns.set_style('white')
seed(42)
flatui = ["#9b59b6", "#3498db", "#95a5a6", "#e74c3c", "#34495e", "#2ecc71"]
cmap = ListedColormap(sns.color_palette(flatui))
# + [markdown] slideshow={"slide_type": "slide"}
# ## Generate Synthetic Datasets
# -
n_samples = 1500
random_state = 170
blobs = make_blobs(n_samples=n_samples,
random_state=random_state)
noisy_circles = make_circles(n_samples=n_samples,
factor=.5,
noise=.05)
noisy_moons = make_moons(n_samples=n_samples,
noise=.05)
uniform = rand(n_samples, 2), None
X, y = make_blobs(n_samples=n_samples,
random_state=random_state)
elongated = X.dot([[0.6, -0.6], [-0.4, 0.8]]), y
varied = make_blobs(n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state)
default_params = {'quantile': .3,
'eps': .2,
'damping': .9,
'preference': -200,
'n_neighbors': 10,
'n_clusters': 3}
datasets = [('Standard Normal', blobs, {}),
('Various Normal', varied, {'eps': .18, 'n_neighbors': 2}),
('Anisotropic Normal', elongated, {'eps': .15, 'n_neighbors': 2}),
('Uniform', uniform, {}),
('Circles', noisy_circles, {'damping': .77, 'preference': -240,
'quantile': .2, 'n_clusters': 2}),
('Moons', noisy_moons, {'damping': .75,
'preference': -220, 'n_clusters': 2})]
# ## Plot Cluster Algorithm Results
# + hide_input=false slideshow={"slide_type": "fragment"}
fig, axes = plt.subplots(figsize=(15, 15),
ncols=5,
nrows=len(datasets),
sharey=True,
sharex=True)
plt.setp(axes, xticks=[], yticks=[], xlim=(-2.5, 2.5), ylim=(-2.5, 2.5))
for d, (dataset_label, dataset, algo_params) in enumerate(datasets):
params = default_params.copy()
params.update(algo_params)
X, y = dataset
X = StandardScaler().fit_transform(X)
# connectivity matrix for structured Ward
connectivity = kneighbors_graph(X, n_neighbors=params['n_neighbors'],
include_self=False)
connectivity = 0.5 * (connectivity + connectivity.T)
kmeans = KMeans(n_clusters=params['n_clusters'])
spectral = SpectralClustering(n_clusters=params['n_clusters'],
eigen_solver='arpack',
affinity='nearest_neighbors')
dbscan = DBSCAN(eps=params['eps'])
average_linkage = AgglomerativeClustering(linkage="average",
affinity="cityblock",
n_clusters=params['n_clusters'],
connectivity=connectivity)
gmm = GaussianMixture(n_components=params['n_clusters'],
covariance_type='full')
clustering_algorithms = (('KMeans', kmeans),
('SpectralClustering', spectral),
('AgglomerativeClustering', average_linkage),
('DBSCAN', dbscan),
('GaussianMixture', gmm))
for a, (name, algorithm) in enumerate(clustering_algorithms):
if name == 'GaussianMixture':
algorithm.fit(X)
y_pred = algorithm.predict(X)
else:
y_pred = algorithm.fit_predict(X)
axes[d, a].scatter(X[:, 0],
X[:, 1],
s=5,
c=y_pred,
cmap=cmap)
if d == 0:
axes[d, a].set_title(name, size=14)
if a == 0:
axes[d, a].set_ylabel(dataset_label, size=12)
if y is None:
y = [.5] * n_samples
mi = adjusted_mutual_info_score(labels_pred=y_pred,
labels_true=y)
axes[d, a].text(0.85, 0.91,
f'MI: {mi:.2f}',
transform=axes[d, a].transAxes,
fontsize=12)
axes[d, a].axes.get_xaxis().set_visible(False)
sns.despine()
fig.tight_layout()
# -
| 13_unsupervised_learning/03_clustering_algorithms/01_clustering_algos.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import gzip
from monty.json import MontyDecoder
from ujson import load
from pymatgen.core.structure import Molecule, Structure
from pathlib import Path
from time import time
from mpcontribs.client import Client
from tqdm.auto import tqdm
name = "open_catalyst_project"
client = Client()
p = Path("/Users/patrick/is2res_train/is2res_train")
jsons = list(p.glob("*.json.gz"))
# +
decoder = MontyDecoder()
def get_contribution(path):
if path.stat().st_size / 1024 / 1024 > 10:
return None
with gzip.open(path) as f:
data = decoder.process_decoded(load(f))
struct = data['trajectory'][-1]
struct.add_site_property('tags', [int(t) for t in data['tags']])
mol = Molecule.from_sites([site for site in struct if site.properties['tags'] == 2])
iupac_formula = mol.composition.iupac_formula
bulk_struct = Structure.from_sites([site for site in struct if site.properties['tags'] != 2])
bulk_formula = bulk_struct.composition.reduced_formula
search_data = {
"mpid": data['bulk_id'],
"adsorptionEnergy": data["adsorption_energy"],
"adsorbateSmiles": data["adsorbate_smiles"],
"adsorbateIUPACFormula": iupac_formula,
"bulkFormula": bulk_formula,
"h": data["surface_miller_indices"][0],
"k": data["surface_miller_indices"][1],
"l": data["surface_miller_indices"][2],
"surfaceTop": data["surface_top"],
"surfaceShift": data["surface_shift"]
}
return {
"project": name,
"formula": struct.composition.reduced_formula,
"identifier": data["id"],
"data": search_data,
"structures": [struct],
"attachments": [path]
}
# -
client.get_totals({"project": name})
# +
all_ids = client.get_all_ids({"project": name}).get(name, {}).get("identifiers", set())
print(len(all_ids))
contributions, cnt = [], 0
for path in tqdm(jsons):
if Path(path.stem).stem not in all_ids:
contrib = get_contribution(path)
if not contrib:
continue
contributions.append(contrib)
cnt += 1
if not cnt % 5:
client.submit_contributions(
contributions, per_request=10,
ignore_dupes=True, skip_dupe_check=True, retry=True
)
contributions.clear()
print(cnt)
| mpcontribs-portal/notebooks/contribs.materialsproject.org/ocp-upload.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="EaqjihtwRrys"
# # Lab03: Web Crawler (Continue) & Information Retrieval.
#
# - MSSV: 18120061
# - Họ và tên: <NAME>
# + [markdown] id="FUG7FFTsRryt"
# ## Yêu cầu bài tập
#
# **Cách làm bài**
#
#
# Bạn sẽ làm trực tiếp trên file notebook này; từ `TODO` cho biết những phần mà bạn cần phải làm.
#
# Bạn có thể thảo luận ý tưởng cũng như tham khảo các tài liệu, nhưng *code và bài làm phải là của bạn*.
#
# Nếu vi phạm thì sẽ bị 0 điểm cho bài tập này.
#
# **Cách nộp bài**
#
# Trước khi nộp bài, rerun lại notebook (`Kernel` -> `Restart & Run All`).
#
# Sau đó, tạo thư mục có tên `MSSV` của bạn (vd, nếu bạn có MSSV là 1234567 thì bạn đặt tên thư mục là `1234567`) Chép file notebook, file `t_data.txt` và file `raw_data` của các bạn (nếu file này kích thước lớn các bạn có thể chép link vào `link_data.txt`), nén thư mục `MSSV` này lại và nộp trên moodle.
#
# **Nội dung bài tập**
#
# Cài đặt một web crawler để thu thập dữ liệu từ: https://en.wikipedia.org/wiki/Web_mining.
# + [markdown] id="0HlhdT6BRryu"
# ## Nội dung bài tập
# + [markdown] id="c9-ZyiLjRryv"
# Cài đặt một Web crawler đơn giản bắt đầu từ URL: https://en.wikipedia.org/wiki/Web_mining, tìm liên kết và thu thập dữ liệu trong HTML tại URL này sau đó lặp lại với các URL vừa tìm được.
#
# + id="FJktAwbCOyod"
import requests
import re
from bs4 import BeautifulSoup
from bs4.element import Comment
import string
import pickle
# + [markdown] id="mMSlOpSsRryv"
# ## 1. Thu thập đường dẫn
# + [markdown] id="OZZ9lSUPRryw"
# - Robot.txt: https://en.wikipedia.org/robots.txt
# - **Bước 1**: Thu thập đường dẫn từ https://en.wikipedia.org/wiki/Web_mining. Lưu trữ vào một danh sách `url_list`.
# - **Bước 2**: Lặp lại bước 1 cho các đường dẫn trong `url_list` (**lưu ý:** kiểm tra các đường dẫn vừa thu được đã nằm trong `url_list` hay không?). Dừng khi đã thu thập được 200 URLs.
# + id="4HFWqw1VOrEe" colab={"base_uri": "https://localhost:8080/"} outputId="ccd68d1b-df48-47f5-d12f-d4c692a57b39"
import random
import urllib.robotparser
UAS = ("Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1",
"Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10; rv:33.0) Gecko/20100101 Firefox/33.0",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.1 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36",
)
ua = UAS[random.randrange(len(UAS))]
rp = urllib.robotparser.RobotFileParser()
rp.set_url("https://en.wikipedia.org/robots.txt")
rp.read()
rrate = rp.request_rate("*")
rp.crawl_delay("*")
head = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.84 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive',
'refere': 'https://example.com',
'cookie': """your cookie value ( you can get that from your web page) """
}
def get_urls(url):
try:
r = requests.get(url, headers=head, stream=True, timeout=5)
except requests.exceptions.ConnectionError as err:
print(err)
return None
# TODO
# Lấy các url nằm trong trang web của url này, lưu lại vào biến urls
findall_urls = lambda r : re.findall(r"http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+", r.text)
all_urls = findall_urls(r)
cp_all_urls = all_urls.copy()
for url in all_urls:
if (not rp.can_fetch("*", url)):
cp_all_urls.remove(url)
return cp_all_urls
def get_urls_recursive(start_url, limit):
urls = [start_url]
for url in urls:
# TODO
# Lấy các url nằm trong trang web của url này, lưu lại vào biến new_urls
new_urls = get_urls(url)
if new_urls is None:
return None
# Với mỗi url mới trong new_urls:
for new_url in new_urls:
# Nếu nó chưa nằm trong urls thì thêm nó vô
if new_url not in urls:
urls.append(new_url)
# Nếu kích thước của urls vượt quá limit thì dừng và xóa phần dư thừa
if len(urls) >= limit:
urls=urls[0:limit]
break
return urls
url_list = get_urls_recursive('https://en.wikipedia.org/wiki/Web_mining', 20)
if url_list is not None:
url_list_cp = url_list.copy()
print('\n'.join(map(str, enumerate(url_list, start=1))))
# + [markdown] id="zk4ty3jcRryx"
# ## 2. Thu thập dữ liệu
# Thu thập dữ liệu từ `url_list`. Lưu trữ dữ liệu thu được vào dictionary data với keys là các từ, values gồm 2 phần tử:
# - `url_idx_list` với $idx \in \left[0,200\right) \cap \mathbb{N}$
# - `frequency`
#
# Ví dụ: `data['at']=[url_idx_list,frequency]`:
# - `url_idx_list`: danh sách các url mà trong dữ liệu của chúng (html document) chứa từ "at".
# - `frequency`: tần suất xuất hiện (số lần xuất hiện) của từ `at` trong dữ liệu của **tất cả đường dẫn thu được**.
# + id="YB5nIZAhQr7-"
def text_filter(element):
# TODO
# Cài đặt lại như Lab02
if element.parent.name in ['style', 'title', 'script', 'head', '[document]', 'class', 'a', 'li']:
return False
elif isinstance(element, Comment):
'''Opinion mining?'''
return False
elif re.match(r"[\s\r\n]+",str(element)):
'''space, return, endline'''
return False
return True
def wordList(url):
# TODO
# Cài đặt lại như Lab02
r = requests.get(url)
soup = BeautifulSoup(r.content, "html.parser", from_encoding="UTF-8")
text = soup.findAll(text=True)
filtered_text = list(filter(text_filter, text)) # list của các chuỗi
word_list = []
# TODO:
# Với mỗi chuỗi trong filtered_text:
for text in filtered_text:
# Thay thế các dấu câu thành khoảng trắng (gợi ý: danh sách các dấu câu: string.punctuation; thay thế: .replace(...))
# Sử dụng .join() function, nếu một char c nào đó nằm trong string.punctuation thì nó sẽ được thay thế bằng ' '. Ngược lại thì không thay đổi gì
# sau đó join với một string rỗng để có được clean_text
clean_text = ''.join(' ' if c in string.punctuation else c for c in text)
# Tách chuỗi bởi khoảng trắng (.split(...))
# cắt clean_text vừa có ở trên ra
lst_words_in_clean_text = clean_text.split()
# Thêm các từ vừa được tách ra vào word_list
word_list = word_list + lst_words_in_clean_text
return word_list
def read_url(url, url_idx, data):
# TODO
# Cài đặt lại như Lab02
word_list = wordList(url)
# TODO
# Với mỗi từ w trong word_list:
for i, word in enumerate(word_list):
# Nếu w chưa có trong data thì khởi tạo data[w] = [[url_idx], 1]
if word not in data.keys():
data[word]=[[url_idx],1]
# Ngược lại thì thêm url_idx vào data[w][0] (nếu chưa có) và tăng data[w][1] lên 1 đơn vị
else:
if url_idx in data[word][0]:
data[word][0].append(i)
data[word][1]+=1
# + id="LrPuiiDhQfrJ" colab={"base_uri": "https://localhost:8080/"} outputId="b3b64bde-4869-493c-d69c-56111c7a0493"
data = {}
for url_index, url in enumerate(url_list, 1):
print(url)
try:
read_url(url, url_index, data)
except:
pass
# + [markdown] id="740eXy7pRryx"
# ## 3. Tiền xử lý
# Loại bỏ các item trong data mà key là các stopword.
#
# **Ngữ liệu:**
# + id="hC58K3Q3Rryy" colab={"base_uri": "https://localhost:8080/"} outputId="40d57bfd-bd42-4300-ba1f-30bb2516545e"
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
english_stopwords = stopwords.words('english')
print(english_stopwords)
# + id="cylo7trpRnun"
# TODO
# Loại bỏ các key của biến data mà nằm trong danh sách english_stopwords
data_copy = data.copy()
for key in data_copy:
if key in english_stopwords:
data.pop(key)
# + [markdown] id="CEoxNVHqRry2"
# ## 4. Lưu trữ và biểu diễn dữ liệu
# Sử dụng pickle lưu lại data với tên file raw_data.
# ### 4.1 Cơ sở dữ liệu giao tác:
# Thông thường, các cơ sở dữ liệu giao tác được lưu trong flat files (các tập phẳng) thay vì trong một hệ cơ sở dữ liệu. Các item là các số nguyên không âm, mỗi giao tác tương ứng với một dòng các số nguyên phân tách nhau bằng khoảng trắng.
# Ví dụ:
#
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
#
# 30 31 32
#
# 33 34 35
#
# 36 37 38 39 40 41 42 43 44 45 46
#
# 38 39 47 48
#
# 38 39 48 49 50 51 52 53 54 55 56 57 58
#
# 32 41 59 60 61 62
#
# 3 39 48
#
# 63 64 65 66 67 68
#
# 32 69
#
# 48 70 71 72
#
# 39 73 74 75 76 77 78 79
#
# 36 38 39 41 48 79 80 81
#
# 82 83 84
#
# 41 85 86 87 88
#
# 39 48 89 90 91 92 93 94 95 96 97 98 99 100 101
#
# 36 38 39 48 89
#
# 39 41 102 103 104 105 106 107 108
#
# 38 39 41 109 110
#
# 39 111 112 113 114 115 116 117 118
#
# 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133
#
# 48 134 135 136
#
# 39 48 137 138 139 140 141 142 143 144 145 146 147 148 149
#
# 39 150 151 152
#
# 38 39 56 153 154 155
# + id="4l--TVLEoN7R"
with open('raw_data', 'wb') as f:
# TODO
pickle.dump(data,f)
# + colab={"base_uri": "https://localhost:8080/"} id="GuISb-CNZ3EX" outputId="a4816939-f99e-452e-d29b-4223dcee0abc"
output = {}
with open("raw_data", "rb") as f:
output = pickle.load(f)
from itertools import islice
list(islice(output.items(), 10))
# + [markdown] id="QvUgw0VkRry3"
# ### 4.2 Xuất dataset
# Lưu một cơ sở dữ liệu giao tác (transactional database) vào file t_data.txt:
# - Các item tương ứng với url_idx
# - Mỗi transaction tương ứng với một từ.
# + id="ZorK46LdSCfi"
with open('t_data.txt', 'w') as f:
for word, (url_list, freq) in data.items():
print(*url_list, file=f)
# + [markdown] id="2OXVFoHiRry3"
# ## 5. Truy vấn and, or, not
# Ví dụ:
# - Truy vấn `and` câu `web mining`: trả về đường dẫn tới các trang web có cả 2 từ web và từ mining.
# - Truy vấn `or` câu `web mining`: trả về đường dẫn tới các trang web có từ web hoặc từ mining.
# - Truy vấn `not` câu `web mining`: trả về đường dẫn tới các trang không có cả từ web và từ mining.
#
# *GỢI Ý: TÁCH CÂU TRUY VẤN THÀNH CÁC TỪ TƯƠNG TỰ PHƯƠNG PHÁP LÀM Ở LAB02.*
# + colab={"base_uri": "https://localhost:8080/"} id="keRglQuZwBBm" outputId="23d2e031-44db-49b6-c2a2-a35d65380b77"
url_list
# + id="sybM0yLHRry6"
ret=[]
def andRetrieval(ret, sentence):
'''Parameters
-----------------------
ret: url_list
sentence: query'''
# TODO
### split sentence (separator ' ') into terms
terms = sentence.split(" ")
### find urls have all terms: urls
flag = 0
for url in url_list_cp:
for term in terms:
if (url.find(term) == -1):
flag = 0
break
else:
flag = 1
continue
if (flag):
ret.append(url)
### if len(ret)==0: return urls
if len(ret)==0:
return ret
### else update ret with urls: intersection of ret and urls
else:
return ret
def orRetrieval(ret, sentence):
'''Parameters
-----------------------
ret: url_list
sentence: query'''
# TODO
### split sentence (separator ' ') into terms
terms=sentence.split(" ")
### find urls have all terms: urls
for url in url_list_cp:
for term in terms:
### find urls have at least 1 term: urls
if (url.find(term)):
ret.append(url)
break
### update ret with urls: extend ret with urls
return ret
def notRetrieval(ret, sentence):
'''Parameters
-----------------------
ret: url_list
sentence: query'''
# TODO
### split sentence (separator ' ') into terms
terms=sentence.split(" ")
### find urls have at least 1 term: urls
urls = []
flag = 0
for url in url_list_cp:
for term in terms:
if (url.find(term)):
flag = 0
break
else:
flag = 1
if flag==1:
urls.append(url)
### update ret with urls: remove urls from ret
ret = list(set(ret) - set(urls))
return ret
# + id="1hkp7ej3qxRQ" colab={"base_uri": "https://localhost:8080/"} outputId="d8bd523d-e71e-41ff-a770-46f4a1c05e63"
print(andRetrieval([], 'web mining'))
# + colab={"base_uri": "https://localhost:8080/"} id="r4WiPtQm0QhN" outputId="a458adb4-2513-46e4-8250-d4a58a9530d3"
print(orRetrieval([], 'web mining'))
# + colab={"base_uri": "https://localhost:8080/"} id="xIDjl7Qn0PlX" outputId="63869a5f-7829-4452-f4ab-7bbede62b4b8"
print(notRetrieval(url_list_cp, 'web mining'))
| lab-03/Lab03_WebCrawler.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.5 64-bit
# language: python
# name: python38564bit02a66c47ce504b05b2ef5646cfed96c2
# ---
# **1**. Find the longset run of Heads in the string recording coin tosses below.
#
#
tosses = 'THTTTTHTTTHTTTHHTTTHTTTHTHHTHHHHTHHTTTHTTHHTHTHHTHTHHHTHTTHHTHHTHHHTHTTHTTTHTHTTHTHHTTTHTTHHTHTHHTHTHHTTTHHHHHTTHTHHTTTTHTTHTHHTHHTTHHTTTHTTHHHHHHHHHHTHTTTTTTHHHTHHTHTTHTHTTTTHTHHHTTTHHTHTTHTHHHTHTHTHTHHHHHHHHHTHTTHHHHTTHTTHTTTTHTTTTHTHTTHHTTHHHHTTTHHHTHTHTHHTHTTTTTTTHHTTHHHHTHHHTTTHHHTTHHTTHTTHTTTHTTHHHTHTHHTHHHTHTHTTTHHHHHTTTHHHHHHTHHHTTHTTTHHTHTTTTTHTTTHHTTTTHTHHHTHTTTHTTHTTTHHTHHTTTTHHHTHTTTTHHTHHTTHHTTHHTHHTHTTHHHHHHTTTTTHTHHTTHHHHHHHTTTTHHTTTHHTHTHHTTHHTTTHHHTTTTTHHHTTHHHHTHTTTTHHTTHHTTHTTTTTTTTHHTTTTHHHHTHHHHHTTTTHHHTTHTTHTTHTTHHHTTHTHTHTHTHHHHTTHHHHHTHHTTHHTTTTHTHHTHHTTHHTHTHHTTHTTHTTTHTTTHHTHTTTHHHHHHHHHTHHHTTHHHTHTHHTHHHTTTTTHTHTHHHTHTHTTHHTTHTTHTHTHTTTHTHHTHHTTHHHTHTTTTHTTHTHTHTTTTHHTHHHTTHHTTHTHHHTHTHTTTTHTTHHHTHTTHTHTTTTTHTHHHHTTHHTHTTHHHTTTTTTTHHTHHHTHHHTTTTHHHTTTHTHTTTHTTTHHHHTHTHHTHHTTTTHTTHTTHHHHHHHHTHTTTTHHHTTTTHTTTTHHHHHHHTTTHHHHHTHHHHTTTHHHTTHTHHTHTTTHTHTHHTTTHHHTHTHHHHHTHHHHHTTTTHTTTHTHHTTHTTHTHHTTTHHTTTHTTTTHTTTTTTTHTTHTHHTTTTHTTTTHHHHTHTTHHTHTTTTHTTTHHHHHHTHTHTHH'
# Option 1: Using a regular expression
# +
import re
max(re.findall('H+', tosses), key=len)
# -
# Option 2: Using a finite state machine
best_run = ''
run = ''
for t in tosses:
if t == 'H':
run += 'H'
else:
if len(run) > len(best_run):
best_run = run
run = ''
best_run
# **2**. Find all the words starting wtih `k` that are in `data/wonderland.txt` but not in `data/lookingglass.txt`. Remove punctuation and ignore case.
# +
with open('data/wonderland.txt') as f:
b1 = f.read()
with open('data/lookingglass.txt') as f:
b2 = f.read()
# -
import string
tbl = b1.maketrans('', '', string.punctuation)
b1_words = b1.lower().translate(tbl).split()
b2_words = b2.lower().translate(tbl).split()
# Option 1: Using sets
k_words = [word for word in set(b1_words) - set(b2_words) if word.startswith('k')]
k_words
# Option 2: Using for loops (inefficient)
k_words = set([])
for word in b1_words:
if word.startswith('k') and word not in b2_words:
k_words.add(word)
k_words
# **3**. Create a dictionary of the number of times each word in the list from the problem above appears in `data/wonderland.txt`
# Option 1: Using a dictionary comprehension and list methods
{word: b1_words.count(word) for word in k_words}
# Option 2: Using a manual count
counts = {}
for word in b1_words:
if word in k_words:
counts[word] = counts.get(word, 0) + 1
counts
# **4**. Find the reverse complement of the DNA sequence shown below.
#
# The following bases are complementary a → t, c → g, t → a, g → c. Reverse complement means reverse each base with its complement, then reverse the entire sequence.
dna = '''
aaattgaaga gtttgatcat ggctcagatt gaacgctggc ggcaggccta acacatgcaa
61 gtcgaacggt aacaggaaga agcttgctct ttgctgacga gtggcggacg ggtgagtaat
121 gtctgggaaa ctgcctgatg gagggggata actactggaa acggtagcta ataccgcata
181 acgtcgcaag accaaagagg gggaccttcg ggcctcttgc catcggatgt gcccagatgg
241 gattagctag taggtggggt aacggctcac ctaggcgacg atccctagct ggtctgagag
301 gatgaccagc cacactggaa ctgagacacg gtccagactc ctacgggagg cagcagtggg
361 gaatattgca caatgggcgc aagcctgatg cagccatgcc gcgtgtatga agaaggcctt
421 cgggttgtaa agtactttca gcggggagga agggagtaaa gttaatacct ttgctcattg
481 acgttacccg cagaagaagc accggctaac tccgtgccag cagccgcggt aatacggagg
541 gtgcaagcgt taatcggaat tactgggcgt aaagcgcacg caggcggttt gttaagtcag
601 atgtgaaatc cccgggctca acctgggaac tgcatctgat actggcaagc ttgagtctcg
661 tagagggggg tagaattcca ggtgtagcgg tgaaatgcgt agagatctgg aggaataccg
721 gtggcgaagg cggccccctg gacgaagact gacgctcagg tgcgaaagcg tggggagcaa
781 acaggattag ataccctggt agtccacgcc gtaaacgatg tcgacttgga ggttgtgccc
841 ttgaggcgtg gcttccggag ctaacgcgtt aagtcgaccg cctggggagt acggccgcaa
901 ggttaaaact caaatgaatt gacgggggcc cgcacaagcg gtggagcatg tggtttaatt
961 cgatgcaacg cgaagaacct tacctggtct tgacatccac ggaagttttc agagatgaga
1021 atgtgccttc gggaaccgtg agacaggtgc tgcatggctg tcgtcagctc gtgttgtgaa
1081 atgttgggtt aagtcccgca acgagcgcaa cccttatcct ttgttgccag cggtccggcc
1141 gggaactcaa aggagactgc cagtgataaa ctggaggaag gtggggatga cgtcaagtca
1201 tcatggccct tacgaccagg gctacacacg tgctacaatg gcgcatacaa agagaagcga
1261 cctcgcgaga gcaagcggac ctcataaagt gcgtcgtagt ccggattgga gtctgcaact
1321 cgactccatg aagtcggaat cgctagtaat cgtggatcag aatgccacgg tgaatacgtt
1381 cccgggcctt gtacacaccg cccgtcacac catgggagtg ggttgcaaaa gaagtaggta
1441 gcttaacctt cgggagggcg cttaccactt tgtgattcat gactggggtg aagtcgtaac
1501 aaggtaaccg taggggaacc tgcggttgga tcacctcctt a
'''
# Option 1: Use the `translate` method
table = dna.maketrans('actg', 'tgac', '0123456789 \n')
dna.translate(table)[::-1]
# Option 2: Use a loop
rc = []
for nuc in dna:
if nuc == 'a':
rc.append('t')
elif nuc == 't':
rc.append('a')
elif nuc == 'c':
rc.append('g')
elif nuc == 'g':
rc.append('c')
''.join(rc)[::-1]
| notebooks/solutions/S02_Exercises_Solutiion.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Mini Course: Matrix Eigendecomposition
# ## Session 1: Basics
#
# ### Quick matrix tutorial
# Let's go through Numpy's syntax for matric manipulations
# Install packages if necessary
import sys
# !{sys.executable} -m pip install numpy matplotlib scipy ipywidgets pandas
# !{sys.executable} -m jupyter nbextension enable --py widgetsnbextension
import numpy as np
import matplotlib.pyplot as plt
import scipy.linalg
from ipywidgets import interact, interactive, fixed, interact_manual
import ipywidgets as widgets
# +
# Defining a vector (really, an array)
vec = np.array([10, 20, 30])
# Defining a matrix (also an array)
matrix = np.array([ [1, 2, 3]
, [4, 5, 6]
, [7, 8, 9]])
# Number of dimensions
vec.ndim # 1
matrix.ndim # 2
# Shape, note: don't write matrix.shape()
vec.shape # (3, )
matrix.shape # (3, 3)
# Get elements
matrix[1, 2] # 6
matrix[-1, -1] # 9
matrix[1, :] # array([4, 5, 6])
matrix[:, 1] # array([2, 5, 8])
# Matrix Multiplication
matrix @ matrix # array([[ 30, 36, 42], [ 66, 81, 96], [102, 126, 150]])
matrix @ vec # array([140, 320, 500])
# Element-by-element operations
matrix * matrix # array([[ 1, 4, 9], [16, 25, 36], [49, 64, 81]])
matrix * vec # array([[ 10, 40, 90], [ 40, 100, 180], [ 70, 160, 270]])
matrix + matrix # array([[ 2, 4, 6], [ 8, 10, 12], [14, 16, 18]])
# Applying a function to elements
np.sin(matrix) # array([[ 0.84147098, 0.90929743, 0.14112001], ...)
np.exp(matrix) # array([[2.71828183e+00, 7.38905610e+00, 2.00855369e+01], ...)
# Matrix operations
matrix.T # array([[1, 4, 7], [2, 5, 8], [3, 6, 9]])
(matrix + 1j * matrix).conjugate() # array([[1.-1.j, 2.-2.j, 3.-3.j], [4.-4.j, 5.-5.j, 6.-6.j], [7.-7.j, 8.-8.j, 9.-9.j]])
matrix.diagonal() # Get diagonal: array([1, 5, 9])
np.diag(vec) # Transforms a vector into a diagonal matrix
matrix.trace() # 15
matrix.sort() # Sorts columns in place
matrix.round(14) # Rounds matric elements to 14 significant digits
# -
# ### Eigendecomposition
#
# Let's explore eigendecompositions with Python
# +
np.linalg.eigvals(matrix) # array([ 1.61168440e+01, -1.11684397e+00, -1.30367773e-15])
np.linalg.eig(matrix) # (array([ 1.61168440e+01, -1.11684397e+00, -1.30367773e-15]), array([[-0.23197069, -0.78583024, 0.40824829],...))
# The columns are eigenvectors are normalized
(vals, vecs) = np.linalg.eig(matrix)
vecs[:, 1] @ vecs[:, 1] # 0.9999999999999997
# Multiplying the matrix by an eigenvector gets the same eigenvector multiplied by its eigenvalue
(matrix @ vecs[:, 1] - vals[1] * vecs[:, 1]).round(14) # array([-0., -0., -0.])
# diag(vals) = inverse(vals) * matrix * diag(vals)
(np.linalg.inv(vecs) @ matrix @ vecs - np.diag(vals)).round(14) # array([[ 0., 0., -0.], [-0., -0., 0.],[-0., -0., 0.]])
# matrix = vecs * diag(vals) * inverse(vals)
(vecs@ np.diag(vals) @ np.linalg.inv(vecs) - matrix).round(14) # array([[ 0., 0., -0.], [-0., -0., 0.],[-0., -0., 0.]])
# Trace of matrix is equal to trace of vals
(matrix.trace() - np.diag(vals).trace()).round(14) # 0.0
# -
# ### Application 1 - Powers of matrices
#
# Let's consider the political parties of a small country. There are 3 parties, S, T and U. After each election cycle, some people leave their party for a different one, while some remain. The probability of people switching parties is give by the matrix
#
# $$
# P = \left(
# \begin{array}
# 0.6 & 0.3 & 0.3 \\
# 0.2 & 0.6 & 0.2\\
# 0.2 & 0.1 & 0.5
# \end{array}
# \right)
# $$
#
# The first row can be read as "60% of people in party S are expected to remain in party S, 30% of people in party T will join S as well as 30% of people from party U"
#
# The first column can be read as "60% of people in party S are expected to remain in party S, 20% are expected to switch to party T and 20% to party U".
#
# 1. Starting with an arbitrary initial population (e.g. $(0.3, 0.5, 0.2)$) for the parties, what will be the population after 1 election cycle?
# 1. What will be the population after 2, 3, 4 election cycle? n election cycles?
# 1. Calculate the population after $n$ election cycles using matrix diagonalization
# 1. What is particular about that population?
# ### Application 2 - Exponential of a function
#
# Compute the exponential of a matrix with
# 1. The built-in `scipy.linalg.expm` function
# 1. The eigendecomposition of the matrix
# and compare the results
#
#
# ### Application 3 - Geometric transformation interpretation
#
# +
line = np.linspace(-5, 5, num = 4)
square = np.array([[i, j] for i in line for j in line]).T
fig1, ax1 = plt.subplots()
ax1.scatter(square[0, :], square[1, :])
plt.xlim([-20, 20])
plt.ylim([-20, 20])
ax1.set_aspect('equal')
# +
theta = np.linspace(0, 2 * np.pi, num = 15)
x = 10 * np.cos(theta)
y = 10 * np.sin(theta)
circle = np.array([x, y])
fig1, ax1 = plt.subplots()
ax1.scatter(circle[0, :], circle[1, :])
plt.xlim([-20, 20])
plt.ylim([-20, 20])
ax1.set_aspect('equal')
# -
shape = circle
shape = square
@interact(a=0.0, b=1.0,c=1.0,d= 0.0,t=(0.0, 1.0), eig = False) # x-y inverse
#@interact(a=1.0, b=1.0,c=0.0,d= 1.0,t=(0.0, 1.0), eig = False) # x shear - not diagonalizable
#@interact(a=2.0, b=0.0,c=0.0,d= 2.0, t=(0.0, 1.0), eig = False) # Identity
def g(a, b, c, d, t, eig):
transformation = np.array([[a, b], [c, d]])
print("Transformation:", transformation)
transformed = transformation @ shape
intermediate = (1 - t) * shape + t * transformed
(vals, vecs) = np.linalg.eig(transformation)
print(vals, vecs)
fig1, ax1 = plt.subplots()
ax1.scatter(shape[0, :], shape[1, :])
ax1.scatter(intermediate[0, :], intermediate[1, :])
for [x1, y1] in shape.T:
ax1.plot((0, x1), (0, y1), 'skyblue')
plt.xlim([-20, 20])
plt.ylim([-20, 20])
for [x0, y0], [x1, y1] in zip(shape.T,intermediate.T) :
ax1.plot((x0, x1), (y0, y1), 'salmon')
plt.xlim([-20, 20])
plt.ylim([-20, 20])
plt.xlim([-20, 20])
plt.ylim([-20, 20])
r = 5 # Arrow scale
if eig:
ax1.arrow(0,0,r * vals[0] * vecs[0,0], r * vals[0] * vecs[1,0],head_width=1,head_length=2)
ax1.arrow(0,0,r * vals[1] * vecs[0,1], r * vals[1] * vecs[1,1],head_width=1,head_length=2)
ax1.set_aspect('equal')
| Session 1 - Basics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="ZsP6kpU6kXby" outputId="985ec445-b856-4b0f-a7a2-a9e6f35fcd1b"
import numpy as np
import keras
from keras.layers import *
from keras.models import *
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
print(x_train[0])
# + id="ouNHHsG2okDV"
x_train = x_train / 256.0
# + colab={"base_uri": "https://localhost:8080/"} id="RgcbZK_oopNm" outputId="23714bc4-9b45-46e9-b0a7-5298df6ba882"
x_train = x_train.reshape((-1, 28,28,1))
print(x_train.shape)
# + id="vRmLoypzo8OO"
input_shape = (28,28,1)
# + colab={"base_uri": "https://localhost:8080/"} id="_m9HF10vosCq" outputId="3b0a049f-ebe8-473c-a3ed-cdc505173bd6"
model = keras.Sequential(
[
keras.Input(shape=input_shape),
keras.layers.Conv2D(32, kernel_size=(3, 3), activation="selu" , padding='same'),
keras.layers.MaxPooling2D(pool_size=(2, 2)),
keras.layers.Conv2D(64, kernel_size=(3, 3), activation="selu", padding='same'),
keras.layers.MaxPooling2D(pool_size=(2, 2)),
keras.layers.Flatten(),
keras.layers.Dense(256, activation="selu"),
keras.layers.Dense(3136),
keras.layers.Reshape((7,7,64)),
keras.layers.Conv2DTranspose(64, 2, 2, padding='same'),
keras.layers.Conv2DTranspose(32, 2, 2, padding='same'),
keras.layers.Conv2DTranspose(1, 2, 2, padding='same'),
keras.layers.Conv2D(1,3, 2, activation="sigmoid", padding='same'),
]
)
model.summary()
model.compile(loss='binary_crossentropy', optimizer='adam')
# + colab={"base_uri": "https://localhost:8080/", "height": 413} id="cKESDl0Yo6DO" outputId="2192ef44-d46d-4a16-91e0-8cc3fad43bc9"
model.fit(x_train,x_train, epochs=5, validation_split=0.2, batch_size=256)
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="8SucBC2trhr_" outputId="8c0a26e5-ac4c-4a6a-de8b-b589aa4123c1"
import matplotlib.pyplot as plt
def display(img):
img = img.reshape((28,28))
img = img * 256
plt.imshow(img)
plt.show()
display(x_train[0])
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="k0zzbLd6sBX-" outputId="da09c7b0-1996-4918-b924-9d1b1fd68628"
y = model.predict(x_train[:5])
display(y[0])
# + colab={"base_uri": "https://localhost:8080/"} id="TQKE6BkNsFom" outputId="d60d688f-1c14-45a6-eda9-f144b730c9ea"
model.summary()
# + colab={"base_uri": "https://localhost:8080/"} id="scGuIsvasJMg" outputId="a5f75a51-88f6-4021-9fc7-b528fe4c7d15"
enc_input = model.layers[0].input
enc_output = model.layers[5].output
enc = keras.models.Model(enc_input,enc_output)
enc.summary()
enc.save('encoder.h5')
# + colab={"base_uri": "https://localhost:8080/"} id="QAnnicREtB7m" outputId="1ab7869f-e9bf-480d-e758-c138da310da8"
dec = Sequential([keras.layers.Input(shape=(256,))] + model.layers[6:])
dec.summary()
dec.save('decoder.h5')
# + colab={"base_uri": "https://localhost:8080/", "height": 320} id="Ej7_pITusYdt" outputId="c3e44983-d643-4585-84f3-f14b99f5be14"
samples = x_train[0:5]
print(samples.shape)
enc_x = enc.predict(samples)
print(enc_x.shape)
dec_x = dec.predict(enc_x)
print(dec_x.shape)
display(dec_x[0])
| examples/Simple_Auto_Encoder.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
def sigmoid(x):
"""
Calculate sigmoid
"""
return 1/(1+np.exp(-x))
# Network size
N_input = 4
N_hidden = 3
N_output = 2
np.random.seed(42)
# Make some fake data
X = np.random.randn(4)
weights_input_to_hidden = np.random.normal(0, scale=0.1, size=(N_input, N_hidden))
weights_hidden_to_output = np.random.normal(0, scale=0.1, size=(N_hidden, N_output))
# TODO: Make a forward pass through the network
hidden_layer_in = np.dot(X, weights_input_to_hidden)
hidden_layer_out = sigmoid(hidden_layer_in)
print('Hidden-layer Output:')
print(hidden_layer_out)
output_layer_in = np.dot(hidden_layer_out, weights_hidden_to_output)
output_layer_out = sigmoid(output_layer_in)
print('Output-layer Output:')
print(output_layer_out)
# -
| hidden-layer/build-dummy-test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # NODDI-Watson in crossings using MIX optimization
# Estimation of microstructure parameters in crossing tissue configuration is currently still a challenge in microstructure imaging. To tackle this problem, *(Farooq et al. 2016)* proposed a stochastic optimization approach called MIX to fit known microstructure models such as NODDI-Watson *(Zhang et al. 2012)* in crossings, which they refer to NODDIx. In this example, we use MIX to reproduce their example application of NODDIx, whose configuration simply an extension of regular NODDI-Watson with multiple bundles:
# \begin{align}
# E^{\textrm{NODDIx
# }}_{\textrm{Watson}}= \underbrace{f_{\textrm{CSF}}\overbrace{E_{\textrm{iso}}(\cdot|\lambda_{\textrm{CSF}})}^{\textrm{Ball}}}_{\textrm{CSF}}
# +\sum_{i=1}^{2}\overbrace{W(\kappa_i,\boldsymbol{\mu}_i)}^{\textrm{Watson}}\,*_{\mathbb{S}^2}\, \left[\underbrace{f_{h,i}\overbrace{E_{\textrm{h}}(\cdot|\lambda_\perp^{\textrm{tort}},\lambda_\parallel)}^{\textrm{Zeppelin}}}_{\textrm{Hindered Extra-Axonal}}+\underbrace{f_{r,i}\overbrace{E_r(\cdot|\lambda_\parallel)}^{\textrm{Stick}}}_{\textrm{Intra-Axonal}}\right].
# \end{align}
# MIX addresses the issue of robustly finding a global minimum for models with many parameters. The approach involves taking multiple optimization steps, which separately optimize the linear parameters (volume fractions) and non-linear parameters (all the others).
# ## Generate NODDIx model
# We generate the model the same way as in the previous example - only now we include two instead of one Watson-dispersed bundles. First we import the separate pieces:
from dmipy.signal_models import cylinder_models, gaussian_models
ball = gaussian_models.G1Ball()
stick = cylinder_models.C1Stick()
zeppelin = gaussian_models.G2Zeppelin()
# Combine them into one Watson dispersed bundle and set NODDI's parameter links and fixes:
from dmipy.distributions.distribute_models import SD1WatsonDistributed
watson_dispersed_bundle1 = SD1WatsonDistributed(models=[stick, zeppelin])
watson_dispersed_bundle1.parameter_names
watson_dispersed_bundle1.set_tortuous_parameter('G2Zeppelin_1_lambda_perp','C1Stick_1_lambda_par','partial_volume_0')
watson_dispersed_bundle1.set_equal_parameter('G2Zeppelin_1_lambda_par', 'C1Stick_1_lambda_par')
watson_dispersed_bundle1.set_fixed_parameter('G2Zeppelin_1_lambda_par', 1.7e-9)
# We can create a second instance of the first bundle by using the .copy() function.
watson_dispersed_bundle2 = watson_dispersed_bundle1.copy()
from dmipy.core.modeling_framework import MultiCompartmentModel
NODDIx_mod = MultiCompartmentModel(models=[ball, watson_dispersed_bundle1, watson_dispersed_bundle2])
# Visualize the model:
from IPython.display import Image
NODDIx_mod.visualize_model_setup(view=False, cleanup=False)
Image('Model Setup.png')
# As the last step we fix the isotropic diffusivity of the model
NODDIx_mod.parameter_names
NODDIx_mod.set_fixed_parameter('G1Ball_1_lambda_iso', 3e-9)
# ## Human Connectome Project Example
from dmipy.data import saved_data
scheme_hcp, data_hcp = saved_data.wu_minn_hcp_coronal_slice()
sub_image = data_hcp[70:90,: , 70:90]
# +
import matplotlib.pyplot as plt
import matplotlib.patches as patches
# %matplotlib inline
fig, ax = plt.subplots(1)
ax.imshow(data_hcp[:, 0, :, 0].T, origin=True)
rect = patches.Rectangle((70,70),20,20,linewidth=1,edgecolor='r',facecolor='none')
ax.add_patch(rect)
ax.set_axis_off()
ax.set_title('HCP coronal slice B0 with ROI');
# -
# ## Fit NODDIx to the HCP subsection
# To use MIX instead of the standard Brute2Fine the solver needs to set to 'mix'. The option 'maxiter' states the threshold amount of iterations the stochastic algorithms takes.
#
# #### Note: Fitting NODDIx using MIX even on this small patch takes close to 90 minutes!
NODDIx_fit = NODDIx_mod.fit(scheme_hcp, sub_image, solver='mix', maxiter=300)
# ## Visualizing NODDIx FODs
# We can now visualize the FODs of the fitted NODDIx model near the corpus callosum and centrum semiovale. We expect to find a single-bundle moving in from the left, leading to a crossing area on the right.
from dipy.data import get_sphere
from dipy.viz.actor import slicer
sphere = get_sphere(name='symmetric724').subdivide()
fods = NODDIx_fit.fod(sphere.vertices, visual_odi_lower_bound=0.05)
# +
import numpy as np
affine = np.eye(4)
affine[0,3] = -10
affine[1,3] = -10
volume_res = fitted_parameters['partial_volume_0']
volume_im = slicer(volume_res[:, 0, :, None], interpolation='nearest', affine=affine, opacity=0.7)
# -
from dipy.viz import fvtk
ren = fvtk.ren()
fod_spheres = fvtk.sphere_funcs(fods, sphere, scale=1., norm=False)
fod_spheres.RotateX(90)
fod_spheres.RotateZ(180)
fod_spheres.RotateY(180)
fvtk.add(ren, fod_spheres)
fvtk.add(ren, volume_im)
fvtk.record(ren=ren, size=[700, 700])
# +
import matplotlib.image as mpimg
img = mpimg.imread('dipy.png')
plt.figure(figsize=[10, 10])
plt.imshow(img[100:-97, 100:-85])
plt.title('NODDIx FODs with isotropic volume fraction background', fontsize=20)
plt.axis('off');
# -
# Indeed, MIX managed to estimate a smooth FOD map, finding consistent crossing areas on the right and a smooth bundle continuation out of the corpus callosum - which is impressive. However, from the background we can see that the isotropic compartment is underestimated above the corpus callosum.
# ## Visualizing parameters maps
# Visualizing the estimated parameter maps as before we can see they are much less interpretable than those of simpler single bundle models. Only partial_volume_0 (the Ball volume fraction) is smooth, showing the CSF on the bottom, but the ordering of the other parameter values is randomly distribution among the two Watson-dispersed bundles. Furtermore, it doesn't look like only a single Watson bundle was active at any point in the corpus callosum - the model found a global minimum where both bundles contributed to the single bundle with some optimal model parameters.
# +
fitted_parameters = NODDIx_fit.fitted_parameters
fig, axs = plt.subplots(3, 3, figsize=[15, 10])
axs = axs.ravel()
counter = 0
for name, values in fitted_parameters.items():
if values.squeeze().ndim != 2:
continue
cf = axs[counter].imshow(values.squeeze().T, origin=True, interpolation='nearest')
axs[counter].set_title(name)
fig.colorbar(cf, ax=axs[counter], shrink=0.5)
counter += 1
# -
# While MIX still produced in good FOD fields, this means that in its current form MIX lacks a way to automatically select the best (actual) number of bundles in a voxel. This means model parameters do not have a microstructural interpretation when the number of defined bundles does not match the actual number of bundles. A sparsity regularization on the volume fractions such as used by *(Zhu et al. 2013)* could be solution.
# ## References
# - <NAME>, et al. "Microstructure imaging of crossing (MIX) white matter fibers from diffusion MRI." Scientific reports 6 (2016): 38927.
# - <NAME>, et al. "NODDI: practical in vivo neurite orientation dispersion and density imaging of the human brain." Neuroimage 61.4 (2012): 1000-1016.
# - <NAME>, et al. "Model selection and estimation of multi-compartment models in diffusion mri with a rician noise model." International Conference on Information Processing in Medical Imaging. Springer, Berlin, Heidelberg, 2013.
| examples/example_mix_microstructure_imaging_in_crossings.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from scipy.stats import poisson
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# cd ~/code/snc
from collections import defaultdict
from snc.meio.gsm.utils import read_supply_chain_from_txt
from snc.experiment.numerical_simulator import simulate,compute_base_stocks,truncate_and_conserve
from snc.experiment.vis_utils import plot_cascading_effect
from snc.meio.gsm.tree_gsm import GuaranteedServiceModelTree
from snc.meio.gsm.tree_gsm import verify_solution_policy,compute_replenishment_times
from snc.experiment.basic_serial_network import (create_serial_stages,
iterate_experimental_profiles,
create_serial_line_from_lead_times)
from snc.experiment.num_sim_utils import get_new_stockout_intervals
n=10000
lam = 10
sla = 0.95
lead_times = [1,3,10,30,100,300,1000,3000,10000]
# +
seed = 8675310
f,ax = plt.subplots(3,1,figsize=(12,18),sharex=False)
stages = create_serial_line_from_lead_times(lead_times=[1],demand_stage_params=demand_stage_params)
policy = {stage_id:{"s":0,"si":0} for stage_id in stages}
base_stocks = compute_base_stocks(stages,policy,lam,sla=sla)
capacity_constraints = {"1":12}
stat_func = np.mean
np.random.seed(seed=seed)
inv_histories = simulate(stages,policy,base_stocks,capacity_constraints,lam,n,stockout_stages=None)
indep_inv_history = inv_histories["1"]
indep_sla = np.mean(inv_histories["1"] >= 0)
indep_stockout_intervals = _collect_stockout_intervals(inv_histories["1"])
indep_backorders = stat_func(sum(indep_stockout_intervals.values(),[]))
indep_stout_dur = stat_func([len(interval) for interval in indep_stockout_intervals.values()])
lead_times = list(lead_times)
for n_bufs in range(5):
effective_slas = [indep_sla]
backorders = [indep_backorders]
stout_durs = [indep_stout_dur]
for l2 in lead_times:
l_times = [1]+[1]*n_bufs+[l2]
stages = create_serial_line_from_lead_times(lead_times=l_times,demand_stage_params=demand_stage_params)
policy = {stage_id:{"s":0,"si":0} for stage_id in stages}
base_stocks = compute_base_stocks(stages,policy,lam,sla=sla)
np.random.seed(seed=seed)
inv_histories = simulate(stages,policy,base_stocks,capacity_constraints,lam,n,stockout_stages=None)
effective_slas.append(np.mean(inv_histories["1"][l2:] >= 0))
stockout_intervals = get_new_stockout_intervals(inv_histories["1"],indep_inv_history)
backorders.append(stat_func(sum(stockout_intervals.values(),[])))
stout_durs.append(stat_func([len(inter) for inter in stockout_intervals.values()]))
#neg = inv_histories["1"][l2:] < 0
#bad_days = ~(neg & indep_neg[l2:]) & neg
#backorders.append(-np.median(inv_histories["1"][l2:][bad_days]))
ax[0].plot([5e-1]+lead_times,effective_slas,"-.b",alpha=0.2)
ax[0].plot([5e-1]+lead_times,effective_slas,"o",label=n_bufs)
ax[1].plot([5e-1]+lead_times,backorders,"-.b",alpha=0.2)
ax[1].plot([5e-1]+lead_times,backorders,"o",label=n_bufs)
ax[2].plot([5e-1]+lead_times,stout_durs,"-.b",alpha=0.2)
ax[2].plot([5e-1]+lead_times,stout_durs,"o",label=n_bufs)
for i in range(3):
ax[i].set_xscale("log")
ax[i].set_xlabel("Supply lead time (days)")
ax[i].grid(axis="y")
ax[i].legend(title="Number of intermediate buffers")
ax[0].set_yticks(np.arange(0.9,0.97,0.01))
ax[0].set_ylabel("Effective SLA")
ax[1].set_ylabel("Mean backorders queue (items)")
ax[2].set_ylabel("Mean stockout duration (days)")
ax[0].set_title("Effect of cascading stockouts in presence of intermediate buffer stages")
ax[1].set_yscale("log")
ax[2].set_yscale("log")
# +
seed = 8675310
f,ax = plt.subplots(3,1,figsize=(12,18),sharex=False)
stages = create_serial_line_from_lead_times(lead_times=[1],demand_stage_params=demand_stage_params)
policy = {stage_id:{"s":0,"si":0} for stage_id in stages}
base_stocks = compute_base_stocks(stages,policy,lam,sla=sla)
stat_func = np.mean
np.random.seed(seed=seed)
inv_histories = simulate(stages,policy,base_stocks,lam,n,stockout_stages=None)
indep_inv_history = inv_histories["1"]
indep_sla = np.mean(inv_histories["1"] >= 0)
indep_stockout_intervals = _collect_stockout_intervals(inv_histories["1"])
indep_backorders = stat_func(sum(indep_stockout_intervals.values(),[]))
indep_stout_dur = stat_func([len(interval) for interval in indep_stockout_intervals.values()])
for n_bufs in range(4,5):
effective_slas = [indep_sla]
backorders = [indep_backorders]
stout_durs = [indep_stout_dur]
effective_slas_2 = [indep_sla]
for l2 in lead_times:
l_times = [1]+[1]*n_bufs+[l2]
stages = create_serial_line_from_lead_times(lead_times=l_times,demand_stage_params=demand_stage_params)
policy = {stage_id:{"s":0,"si":0} for stage_id in stages}
base_stocks = compute_base_stocks(stages,policy,lam,sla=sla)
np.random.seed(seed=seed)
inv_histories = simulate(stages,policy,base_stocks,lam,n,stockout_stages=None)
effective_slas.append(np.mean(inv_histories["1"][l2:] >= 0))
stockout_intervals = get_new_stockout_intervals(inv_histories["1"],indep_inv_history)
backorders.append(stat_func(sum(stockout_intervals.values(),[])))
stout_durs.append(stat_func([len(inter) for inter in stockout_intervals.values()]))
l_times = [1]+[n_bufs]+[l2]
stages = create_serial_line_from_lead_times(lead_times=l_times,demand_stage_params=demand_stage_params)
policy = {stage_id:{"s":0,"si":0} for stage_id in stages}
base_stocks = compute_base_stocks(stages,policy,lam,sla=sla)
np.random.seed(seed=seed)
inv_histories = simulate(stages,policy,base_stocks,lam,n,stockout_stages=None)
effective_slas_2 .append(np.mean(inv_histories["1"][l2:] >= 0))
continue
stockout_intervals = get_new_stockout_intervals(inv_histories["1"],indep_inv_history)
backorders.append(stat_func(sum(stockout_intervals.values(),[])))
stout_durs.append(stat_func([len(inter) for inter in stockout_intervals.values()]))
#neg = inv_histories["1"][l2:] < 0
#bad_days = ~(neg & indep_neg[l2:]) & neg
#backorders.append(-np.median(inv_histories["1"][l2:][bad_days]))
ax[0].plot([5e-1]+lead_times,effective_slas,"-.b",alpha=0.2)
ax[0].plot([5e-1]+lead_times,effective_slas,"o",label=n_bufs)
ax[0].plot([5e-1]+lead_times,effective_slas_2,"-.b",alpha=0.2)
ax[0].plot([5e-1]+lead_times,effective_slas_2,"o",label="aggregated {}".format(n_bufs))
ax[1].plot([5e-1]+lead_times,backorders,"-.b",alpha=0.2)
ax[1].plot([5e-1]+lead_times,backorders,"o",label=n_bufs)
ax[2].plot([5e-1]+lead_times,stout_durs,"-.b",alpha=0.2)
ax[2].plot([5e-1]+lead_times,stout_durs,"o",label=n_bufs)
for i in range(3):
ax[i].set_xscale("log")
ax[i].set_xlabel("Supply lead time (days)")
ax[i].grid(axis="y")
ax[i].legend(title="Number of intermediate buffers")
ax[0].set_yticks(np.arange(0.9,0.97,0.01))
ax[0].set_ylabel("Effective SLA")
ax[1].set_ylabel("Mean backorders queue (items)")
ax[2].set_ylabel("Mean stockout duration (days)")
ax[0].set_title("Effect of cascading stockouts in presence of intermediate buffer stages")
ax[1].set_yscale("log")
ax[2].set_yscale("log")
# -
stages
# +
seed = 8675310
f,ax = plt.subplots(3,1,figsize=(12,18),sharex=False)
stages = create_serial_line_from_lead_times(lead_times=[1],demand_stage_params=demand_stage_params)
policy = {stage_id:{"s":0,"si":0} for stage_id in stages}
base_stocks = compute_base_stocks(stages,policy,lam,sla=sla)
stat_func = np.mean
np.random.seed(seed=seed)
inv_histories = simulate(stages,policy,base_stocks,lam,n,stockout_stages=None)
indep_inv_history = inv_histories["1"]
indep_sla = np.mean(inv_histories["1"] >= 0)
indep_stockout_intervals = _collect_stockout_intervals(inv_histories["1"])
indep_backorders = stat_func(sum(indep_stockout_intervals.values(),[]))
indep_stout_dur = stat_func([len(interval) for interval in indep_stockout_intervals.values()])
n_bufs = 4
i = 0
for service_times in enumerate_serial_service_times():
i+=1
if i >5:
break
service_times = service_times[::-1]+[0]
effective_slas = [indep_sla]
backorders = [indep_backorders]
stout_durs = [indep_stout_dur]
effective_slas_2 = [indep_sla]
for l2 in lead_times:
l_times = [1]+[1]*n_bufs+[l2]
stages = create_serial_line_from_lead_times(lead_times=l_times,demand_stage_params=demand_stage_params)
policy = {stage_id:{"s":service_times[int(stage_id)-1],"si":service_times[int(stage_id)]} for stage_id in stages}
#print(policy)
rep_times = compute_replenishment_times(policy,stages)
#print(rep_times)
base_stocks = compute_base_stocks(stages,policy,lam,sla=sla)
np.random.seed(seed=seed)
inv_histories = simulate(stages,policy,base_stocks,lam,n,stockout_stages=None)
effective_slas.append(np.mean(inv_histories["1"][l2:] >= 0))
stockout_intervals = get_new_stockout_intervals(inv_histories["1"],indep_inv_history)
backorders.append(stat_func(sum(stockout_intervals.values(),[])))
stout_durs.append(stat_func([len(inter) for inter in stockout_intervals.values()]))
#neg = inv_histories["1"][l2:] < 0
#bad_days = ~(neg & indep_neg[l2:]) & neg
#backorders.append(-np.median(inv_histories["1"][l2:][bad_days]))
ax[0].plot([5e-1]+lead_times,effective_slas,"-.b",alpha=0.2)
ax[0].plot([5e-1]+lead_times,effective_slas,"o",label="{}".format(rep_times))
ax[1].plot([5e-1]+lead_times,backorders,"-.b",alpha=0.2)
ax[1].plot([5e-1]+lead_times,backorders,"o",label="{}".format(rep_times))
ax[2].plot([5e-1]+lead_times,stout_durs,"-.b",alpha=0.2)
ax[2].plot([5e-1]+lead_times,stout_durs,"o",label="{}".format(rep_times))
for i in range(3):
ax[i].set_xscale("log")
ax[i].set_xlabel("Supply lead time (days)")
ax[i].grid(axis="y")
ax[i].legend(title="Number of intermediate buffers")
ax[0].set_yticks(np.arange(0.9,0.97,0.01))
ax[0].set_ylabel("Effective SLA")
ax[1].set_ylabel("Mean backorders queue (items)")
ax[2].set_ylabel("Mean stockout duration (days)")
ax[0].set_title("Effect of cascading stockouts in presence of intermediate buffer stages")
ax[1].set_yscale("log")
ax[2].set_yscale("log")
# -
def enumerate_serial_service_times(service_times_list = [0]):
if len(service_times_list) == 5:
service_times_list.append(0)
yield service_times_list
return
for s in range(service_times_list[-1]+1+1):
new_service_times_list = service_times_list + [s]
for full_list in enumerate_serial_service_times(new_service_times_list):
yield full_list
for service_times in enumerate_serial_service_times():
service_times = service_times[::-1]
print(service_times)
plt.figure(figsize=(12,8))
stockout_stages = []
effective_slas = []
for stage_id in range(1,len(stages)+1):
stockout_stages.append(str(stage_id))
np.random.seed(seed=8675309)
inv_histories = simulate(stages,policy,base_stocks,lam,n,stockout_stages=stockout_stages)
effective_slas.append(np.mean(inv_histories["1"] >= 0))
print(np.min(inv_histories["1"]))
plt.plot(effective_slas,"b",alpha=0.5)
plt.plot(range(0,len(stages)),effective_slas,"o")
plt.figure(figsize=(12,8))
stockout_stages = []
effective_slas = []
for stage_id in range(1,len(stages)+1):
stockout_stages.append(str(stage_id))
np.random.seed(seed=8675309)
inv_histories = simulate(stages,policy,base_stocks,lam,n,stockout_stages=stockout_stages)
effective_slas.append(np.mean(inv_histories["1"] >= 0))
print(np.min(inv_histories["1"]))
plt.plot(effective_slas,"b",alpha=0.5)
plt.plot(range(0,len(stages)),effective_slas,"o")
# +
plt.figure(figsize=(12,8))
for profiles_dict in iterate_experimental_profiles():
stages = create_serial_stages(**profiles_dict)
stages["1"].demand_mean = lam
stages["1"].demand_std = np.sqrt(lam)
solution = GuaranteedServiceModelTree(stages).find_optimal_solution()
policy = solution.policy
base_stocks = solution.base_stocks
#policy = {stage_id:{"s":0,"si":0} for stage_id in stages}
#base_stocks = compute_base_stocks(stages,policy,lam,sla)
stockout_stages = []
effective_slas = []
for stage_id in range(1,6):
stockout_stages.append(str(stage_id))
np.random.seed(seed=8675309)
inv_histories = simulate(stages,policy,base_stocks,lam,n,stockout_stages=stockout_stages)
effective_slas.append(np.mean(inv_histories["1"] >= 0))
plt.plot(effective_slas,"b",alpha=0.5)
plt.plot(range(0,5),effective_slas,"o",label=profiles_dict)
plt.xticks(range(0,5))
plt.grid(axis="y")
plt.xlabel("Number of stages with propagating stockouts",fontsize=15)
plt.ylabel("Effective SLA",fontsize=15)
plt.legend()
# -
base_stocks
np.mean(inv_histories["3"] < 0 )
for profiles_dict in iterate_experimental_profiles():
plt.figure(figsize=(12,8))
stages = create_serial_stages(**profiles_dict)
stages["1"].demand_mean = lam
stages["1"].demand_std = np.sqrt(lam)
solution = GuaranteedServiceModelTree(stages).find_optimal_solution()
policy = solution.policy
base_stocks = solution.base_stocks
stockout_stages = []
effective_slas = []
for stage_id in range(1,6):
stockout_stages.append(str(stage_id))
np.random.seed(seed=8675309)
inv_histories = simulate(stages,policy,base_stocks,lam,n,stockout_stages=stockout_stages)
effective_slas.append(np.mean(inv_histories["1"] >= 0))
plt.plot(effective_slas,"b",alpha=0.5)
plt.plot(range(0,5),effective_slas,"bo",label=profiles_dict)
policy = {stage_id:{"s":0,"si":0} for stage_id in stages}
base_stocks = compute_base_stocks(stages,policy,lam,sla)
stockout_stages = []
effective_slas = []
for stage_id in range(1,6):
stockout_stages.append(str(stage_id))
np.random.seed(seed=8675309)
inv_histories = simulate(stages,policy,base_stocks,lam,n,stockout_stages=stockout_stages)
effective_slas.append(np.mean(inv_histories["1"] >= 0))
plt.plot(effective_slas,"r",alpha=0.5)
plt.plot(range(0,5),effective_slas,"ro",label=profiles_dict)
plt.xticks(range(0,5))
plt.grid(axis="y")
plt.xlabel("Number of stages with propagating stockouts",fontsize=15)
plt.ylabel("Effective SLA",fontsize=15)
plt.legend()
poisson
stages = read_supply_chain_from_txt("snc/experiment/basic_serial_network_config.txt")
policy = {"Demand":{"s":0,"si":3},"Dist":{"s":3,"si":0}}
stages["Demand"].lead_time = 1
stages["Dist"].lead_time = 39
stages = read_supply_chain_from_txt("snc/experiment/basic_serial_network.txt")
policy = {"Dist":{"s":3,"si":0},"Demand":{"s":0,"si":3},"Supply":{"s":0,"si":0}}
stages = read_supply_chain_from_txt("snc/experiment/basic_serial_network.txt")
policy = {"Dist":{"s":3,"si":0},"Demand":{"s":0,"si":3},"Supply":{"s":0,"si":0}}
# +
n=1000
sla = 0.95
p_bf = 0.01
lam = 10
bf = 100
np.random.seed(seed=8675309)
demand_history = np.random.poisson(size=n,lam=lam)
#n_bf = np.random.binomial(n,p_bf)
#idx_bf = np.random.choice(np.arange(len(demand_history)),n_bf)
#demand_history[idx_bf] = bf
# -
demand_history.mean()
demand_history.var()
(12-demand_history.mean())/demand_history.std()
# +
base_stocks = compute_base_stocks(stages,policy,lam,sla)
#base_stocks["Dist"] = 864
#base_stocks["Demand"] = 106
#base_stocks["Dist"] = 885
#print(582/base_stocks["Dist"])
print(585/base_stocks["Dist"])
print(110/base_stocks["Demand"])
base_stocks["Dist"] = 393
#base_stocks["Demand"] = 110
#base_stocks["Dist"] = 401
capacity_constraints = {}
#capacity_constraints = {"Demand":12}
indep_inv_histories = simulate(stages,policy,base_stocks,capacity_constraints,demand_history,stockout_stages=[])
casc_inv_histories = simulate(stages,policy,base_stocks,capacity_constraints,demand_history,stockout_stages=None)
#capacity_constraints = {"Demand":13,"Dist":12}
capacity_constraints = {"Demand":35,"Dist":31}
capacity_constraints = {"Dist":12}
indep_inv_histories_cap = simulate(stages,policy,base_stocks,capacity_constraints,demand_history,stockout_stages=[])
casc_inv_histories_cap = simulate(stages,policy,base_stocks,capacity_constraints,demand_history,stockout_stages=None)
# -
base_stocks
#now check the effective sla with coupled stockouts
np.mean(casc_inv_histories_cap["Demand"] >= 0),np.mean(casc_inv_histories_cap["Dist"] >= 0)
#verify stockout frequency against sla
np.mean(indep_inv_histories["Demand"] >= 0),np.mean(indep_inv_histories["Dist"] >= 0)
#verify stockout frequency against sla
np.mean(indep_inv_histories_cap["Demand"] >= 0),np.mean(indep_inv_histories_cap["Dist"] >= 0)
#check the correlation between inventories
np.corrcoef(indep_inv_histories["Demand"][100:n],indep_inv_histories["Dist"][100:n])
#now check the effective sla with coupled stockouts
np.mean(casc_inv_histories["Demand"] >= 0),np.mean(casc_inv_histories["Dist"] >= 0)
#now check the effective sla with coupled stockouts
np.mean(casc_inv_histories_cap["Demand"] >= 0),np.mean(casc_inv_histories_cap["Dist"] >= 0)
#and corresponding correlation between inventories
np.corrcoef(indep_inv_histories_cap["Dist"][100:n],indep_inv_histories["Dist"][100:n])
len(demand_history.shape)
plt.plot(demand_history[:200])
plot_cascading_effect(casc_inv_histories_cap,casc_inv_histories,["Dist","Demand"],time_length=n,remove_transient=True)
plot_cascading_effect(indep_inv_histories_cap,indep_inv_histories,["Dist","Demand"],time_length=n,remove_transient=True)
# +
#find one coupled stockout and plot it
loc = np.where(casc_inv_histories_cap["Demand"]<-200)[0][0]
print(loc)
window = 2000
s = loc-window
e = s+2*window
plt.figure(figsize=(12,8))
for stage_id in casc_inv_histories:
plt.plot(casc_inv_histories_cap[stage_id][s:e],label="{} stage inventory position".format(stage_id))
#plt.plot(casc_inv_histories["Demand"][s:e],label="Demand stage inventory position")
#plt.plot(casc_inv_histories["Dist"][s:e],label="Supply stage inventory position")
#plt.plot(casc_inv_histories["Supply"][s:e],label="Supply stage inventory position")
plt.ylabel("Inventory position")
plt.xlabel("Day")
plt.grid(axis="y")
plt.legend()
# +
window = 100
s = loc-window
e = s+2*window
plt.figure(figsize=(12,8))
#plt.plot(indep_inv_histories["Demand"][s:e],label="{}: no capacity constraint".format(stage_id))
#plt.plot(indep_inv_histories_cap["Demand"][s:e],label="{}: capacity constraint".format(stage_id))
plt.plot(casc_inv_histories_cap["Demand"][s:e],label="{}: capacity constraint, cascade".format(stage_id))
plt.plot(casc_inv_histories["Demand"][s:e],label="{}: no capacity constraint, cascade".format(stage_id))
plt.plot(indep_inv_histories_cap["Demand"][s:e],label="{}: no cascade".format(stage_id))
plt.ylabel("Inventory position")
plt.xlabel("Day")
plt.grid(axis="y")
plt.legend()
# -
from collections import defaultdict
new_stockouts = get_new_stockout_intervals(casc_inv_histories["Demand"],indep_inv_histories["Demand"])
new_stockouts_cap = get_new_stockout_intervals(casc_inv_histories_cap["Demand"],indep_inv_histories_cap["Demand"])
indep_stockouts_cap = _collect_stockout_intervals(indep_inv_histories_cap["Demand"])
indep_stockouts = _collect_stockout_intervals(indep_inv_histories["Demand"])
all_stockouts_cap = _collect_stockout_intervals(casc_inv_histories_cap["Demand"])
all_stockouts = _collect_stockout_intervals(casc_inv_histories["Demand"])
sum([len(inter) for inter in all_stockouts_cap.values()])/sum([len(inter) for inter in indep_stockouts_cap.values()])
sum([len(inter) for inter in all_stockouts.values()])/sum([len(inter) for inter in indep_stockouts.values()])
_collect_stockout_intervals(casc_inv_histories_cap["Demand"])
len(new_stockouts_cap)/len(_collect_stockout_intervals(casc_inv_histories_cap["Demand"]))
np.max([len(inter) for inter in new_stockouts.values()])
np.max([len(inter) for inter in new_stockouts_cap.values()])
from collections import Counter
n/365
Counter([len(inter) for inter in new_stockouts_cap.values()])
for i,j in new_stockouts_cap.items():
if len(j) == 61:
print(i)
f,ax = plt.subplots(2,1,sharex=True)
ax[1].hist([len(inter) for inter in new_stockouts_cap.values()],density=True)
ax[0].hist([len(inter) for inter in new_stockouts.values()],density=True)
30/32
# +
window = 50
s = loc-window
e = s+2*window
plt.figure(figsize=(12,8))
plt.plot(indep_inv_histories["Demand"][s:e],label="{}: no capacity constraint".format(stage_id))
plt.plot(indep_inv_histories_cap["Demand"][s:e],label="{}: capacity constraint".format(stage_id))
plt.plot(casc_inv_histories["Demand"][s:e],label="{}: no capacity constraint, cascade".format(stage_id))
plt.plot(casc_inv_histories_cap["Demand"][s:e],label="{}: capacity constraint, cascade".format(stage_id))
plt.ylabel("Inventory position")
plt.xlabel("Day")
plt.grid(axis="y")
plt.legend()
# +
#find one coupled stockout and plot it
loc = np.where(casc_inv_histories_cap["Demand"]< -30)[0][5]
window = 150
s = loc-window
e = s+2*window
f,ax = plt.subplots(4,1,figsize=(12,12),sharex=True)
for j,inv_hist in enumerate([indep_inv_histories,indep_inv_histories_cap,casc_inv_histories,casc_inv_histories_cap]):
for stage_id in inv_hist:
ax[j].plot(inv_hist[stage_id][s:e],label="{} stage inventory position".format(stage_id))
ax[j].grid(axis="y")
#plt.plot(casc_inv_histories["Demand"][s:e],label="Demand stage inventory position")
#plt.plot(casc_inv_histories["Dist"][s:e],label="Supply stage inventory position")
#plt.plot(casc_inv_histories["Supply"][s:e],label="Supply stage inventory position")
plt.ylabel("Inventory position")
plt.xlabel("Day")
plt.legend()
# -
window = 150
s = loc-window
e = s+2*window
plt.figure(figsize=(12,8))
for stage_id in casc_inv_histories:
plt.plot(indep_inv_histories_cap[stage_id][s:e],label="{} stage inventory position".format(stage_id))
#plt.plot(casc_inv_histories["Demand"][s:e],label="Demand stage inventory position")
#plt.plot(casc_inv_histories["Dist"][s:e],label="Supply stage inventory position")
#plt.plot(casc_inv_histories["Supply"][s:e],label="Supply stage inventory position")
plt.ylabel("Inventory position")
plt.xlabel("Day")
plt.grid(axis="y")
plt.legend()
window = 150
s = loc-window
e = s+2*window
plt.figure(figsize=(12,8))
for stage_id in casc_inv_histories:
plt.plot(casc_inv_histories[stage_id][s:e],label="{} stage inventory position".format(stage_id))
#plt.plot(casc_inv_histories["Demand"][s:e],label="Demand stage inventory position")
#plt.plot(casc_inv_histories["Dist"][s:e],label="Supply stage inventory position")
#plt.plot(casc_inv_histories["Supply"][s:e],label="Supply stage inventory position")
plt.ylabel("Inventory position")
plt.xlabel("Day")
plt.grid(axis="y")
plt.legend()
window = 150
s = loc-window
e = s+2*window
plt.figure(figsize=(12,8))
for stage_id in casc_inv_histories:
plt.plot(indep_inv_histories[stage_id][s:e],label="{} stage inventory position".format(stage_id))
#plt.plot(casc_inv_histories["Demand"][s:e],label="Demand stage inventory position")
#plt.plot(casc_inv_histories["Dist"][s:e],label="Supply stage inventory position")
#plt.plot(casc_inv_histories["Supply"][s:e],label="Supply stage inventory position")
plt.ylabel("Inventory position")
plt.xlabel("Day")
plt.grid(axis="y")
plt.legend()
np.random.seed(seed=8675309)
np.random.poisson(size=20,lam=10)
plt.plot(indep_inv_histories["Demand"])
#compare it with the same time interval without the coupled inventory dynamics
window = 200
s = loc-window
e = s+2*window
plt.figure(figsize=(12,8))
plt.plot(indep_inv_histories["Dist"][s:e],label="Demand stage inventory position")
plt.plot(indep_inv_histories_cap["Dist"][s:e],label="Supply stage inventory position")
plt.ylabel("Inventory position")
plt.xlabel("Day")
plt.grid(axis="y")
plt.legend()
| notebooks/gsm/GSM_cascading_stockouts.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # data preprocessing
# # import the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# # step 2: import data set
dataset=pd.read_csv('Data.csv')
dataset
# # step 3 : to create feature matrix and dependent variable vector
x=dataset.iloc[:,:-1].values
y=dataset.iloc[:,-1].values
x
y
# # step 4: replacing missing data
#
from sklearn.impute import SimpleImputer
imputer=SimpleImputer(missing_values=np.nan,strategy='mean' )
imputer.fit(x[:,1:3])
x[:,1:3]=imputer.transform(x[:,1:3])
x
y
# # Step 5:encoding
# # feature matrix using OneHotEncoding
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder
ct=ColumnTransformer(transformers=[('encoder',OneHotEncoder(),[0])],remainder='passthrough')
x=np.array(ct.fit_transform(x))
x
# # Dependent Variable Vector using Label Encoder
from sklearn.preprocessing import LabelEncoder
le=LabelEncoder()
y=np.array(le.fit_transform(y))
y
# # step 6: splitting of data set into training data set and testing data set
from sklearn.model_selection import train_test_split
xtrain,xtest,ytrain,ytest=train_test_split(x,y,test_size=0.2,random_state=1)
xtest
ytest
# # step 7: feature scaling
from sklearn.preprocessing import StandardScaler
sc=StandardScaler()
xtrain[:,3:]=sc.fit_transform(xtrain[:,3:])
xtest[:,3:]=sc.fit_transform(xtest[:,3:])
xtrain
ytrain
| Python-Week 4/23-24 august 2021 Day 12 data preprocessing .ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
data=pd.read_csv('F:\\bank-additional-full.csv',sep=';')
data.shape
tot=len(set(data.index))
last=data.shape[0]-tot
last
data.isnull().sum()
print(data.y.value_counts())
sns.countplot(x='y', data=data)
plt.show()
cat=data.select_dtypes(include=['object']).columns
# cat
for c in cat:
print(c)
print("-"*50)
print(data[c].value_counts())
print("-"*50)
from sklearn.preprocessing import LabelEncoder,OneHotEncoder
le=LabelEncoder()
data['y']=le.fit_transform(data['y'])
data.drop('poutcome',axis=1,inplace=True)
print( data['age'].quantile(q = 0.75) +
1.5*(data['age'].quantile(q = 0.75) - data['age'].quantile(q = 0.25)))
data['age']=data[data['age']<69.6]
data['age'].fillna(int(data['age'].mean()),inplace=True)
data['age'].values
data[['age','y']].groupby(['age'],as_index=False).mean().sort_values(by='y', ascending=False)
# +
# for x in data:
# x['Sex'] = x['Sex'].map( {'female': 1, 'male': 0}).astype(int)
# -
data['age_slice'] = pd.cut(data['age'],5)
data[['age_slice', 'y']].groupby(['age_slice'], as_index=False).mean().sort_values(by='age_slice', ascending=True)
data['age'] = data['age'].astype(int)
data.loc[(data['age'] >= 16) & (data['age'] <= 28), 'age'] = 1
data.loc[(data['age'] > 28) & (data['age'] <= 38), 'age'] = 2
data.loc[(data['age'] > 38) & (data['age'] <= 49), 'age'] = 3
data.loc[ (data['age'] > 49) & (data['age'] <= 59), 'age'] = 4
data.loc[ (data['age'] > 59 )& (data['age'] <= 69), 'age'] = 5
data.drop('age_slice',axis=1,inplace=True)
data['marital'].replace(['divorced' ,'married' , 'unknown' , 'single'] ,['single','married','unknown','single'], inplace=True)
data['marital']=le.fit_transform(data['marital'])
data
data['job'].replace(['student'] ,['unemployed'], inplace=True)
data[['education', 'y']].groupby(['education'], as_index=False).mean().sort_values(by='education', ascending=True)
fig, ax = plt.subplots()
fig.set_size_inches(20, 5)
sns.countplot(x = 'education', hue = 'loan', data = data)
ax.set_xlabel('Education', fontsize=15)
ax.set_ylabel('y', fontsize=15)
ax.set_title('Education Count Distribution', fontsize=15)
ax.tick_params(labelsize=15)
sns.despine()
fig, ax = plt.subplots()
fig.set_size_inches(20, 5)
sns.countplot(x = 'job', hue = 'loan', data = data)
ax.set_xlabel('job', fontsize=17)
ax.set_ylabel('y', fontsize=17)
ax.set_title('Education Count Distribution', fontsize=17)
ax.tick_params(labelsize=17)
sns.despine()
data['education'].replace(['basic.4y','basic.6y','basic.9y','professional.course'] ,['not_reach_highschool','not_reach_highschool','not_reach_highschool','university.degree'], inplace=True)
ohe=OneHotEncoder()
data['default']=le.fit_transform(data['default'])
data['housing']=le.fit_transform(data['housing'])
data['loan']=le.fit_transform(data['loan'])
data['month']=le.fit_transform(data['month'])
ohe=OneHotEncoder(categorical_features=data['month'])
data['contact']=le.fit_transform(data['contact'])
data['day_of_week']=le.fit_transform(data['day_of_week'])
data['job']=le.fit_transform(data['job'])
data['education']=le.fit_transform(data['education'])
cat=data.select_dtypes(include=['object']).columns
# cat
def outlier_detect(data,feature):
q1 = data[feature].quantile(0.25)
q3 = data[feature].quantile(0.75)
iqr = q3-q1 #Interquartile range
lower = q1-1.5*iqr
upper = q3+1.5*iqr
data = data.loc[(data[feature] > lower) & (data[feature] < upper)]
print('lower IQR and upper IQR of',feature,"are:", lower, 'and', upper, 'respectively')
return data
data.columns
data['pdays'].unique()
data['pdays'].replace([999] ,[0], inplace=True)
data['previous'].unique()
fig, ax = plt.subplots()
fig.set_size_inches(15, 5)
sns.countplot(x = 'campaign', palette="rocket", data = data)
ax.set_xlabel('campaign', fontsize=25)
ax.set_ylabel('y', fontsize=25)
ax.set_title('campaign', fontsize=25)
sns.despine()
sns.countplot(x = 'pdays', palette="rocket", data = data)
ax.set_xlabel('pdays', fontsize=25)
ax.set_ylabel('y', fontsize=25)
ax.set_title('pdays', fontsize=25)
sns.despine()
data[['pdays', 'y']].groupby(['pdays'], as_index=False).mean().sort_values(by='pdays', ascending=True)
sns.countplot(x = 'emp.var.rate', palette="rocket", data = data)
ax.set_xlabel('emp.var.rate', fontsize=25)
ax.set_ylabel('y', fontsize=25)
ax.set_title('emp.var.rate', fontsize=25)
sns.despine()
outlier_detect(data,'duration')
#outlier_detect(data,'emp.var.rate')
outlier_detect(data,'nr.employed')
#outlier_detect(data,'euribor3m')
X = data.iloc[:,:-1]
X = X.values
y = data['y'].values
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
# +
algo = {'LR': LogisticRegression(),
'DT':DecisionTreeClassifier(),
'RFC':RandomForestClassifier(n_estimators=100),
'SVM':SVC(gamma=0.01),
'KNN':KNeighborsClassifier(n_neighbors=10)
}
for k, v in algo.items():
model = v
model.fit(X_train, y_train)
print('Acurracy of ' + k + ' is {0:.2f}'.format(model.score(X_test, y_test)*100))
# -
| Bank.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Simulation Archive
# + [markdown] toc=true
# <h1>Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#Introduction" data-toc-modified-id="Introduction-1"><span class="toc-item-num">1 </span>Introduction</a></span></li><li><span><a href="#Note" data-toc-modified-id="Note-2"><span class="toc-item-num">2 </span>Note</a></span></li><li><span><a href="#Manual-Snapshots" data-toc-modified-id="Manual-Snapshots-3"><span class="toc-item-num">3 </span>Manual Snapshots</a></span></li></ul></div>
# -
# ## Introduction
#
# A Simulation Archive (Rein & Tamayo 2017) is useful when one runs long simulations. With the Simulation Archive, one can easily take snapshots of the simulation, and then later restart and analyze it. Since Spring 2018, the default Simulation Archive version is 2. Version 2 works with all integrators and very few restrictions that apply (you need to be careful when using function pointers).
# To illustrate the Simulation Archive, let us setup a simulation of a two planet system and turn on the Simulation Archive. This is done with the following code:
import rebound
import numpy as np
sim = rebound.Simulation()
sim.add(m=1.)
sim.add(m=1e-3, a=1.)
sim.add(m=1e-3, a=1.9)
sim.move_to_com()
sim.dt = sim.particles[1].P*0.05 # timestep is 5% of orbital period
sim.integrator = "whfast"
sim.automateSimulationArchive("archive.bin",interval=1e3,deletefile=True)
# The first argument of `automateSimulationArchive` is the path and name of the binary file to write to, the `interval` argument specifies the interval at which snapshots of the simulation are saved (in whichever code units you work). The smaller the interval, the larger the file size, but the faster the access. The `deletefile=True` flag makes REBOUND delete the file if it already exists.
# We now integrate the simulation forward in time. This should take a few seconds.
sim.integrate(1e6)
# We can now delete the simulation. Note that we could also have run the simulation using the C version of REBOUND. This might be useful if one wants to run a long simulation on a cluster and doesn't want to bother with installing python. In C, one can initialize the Simulation Archive with (you need to delete the file manually if it already exists):
# ```c
# struct reb_simulation* sim = reb_create_simulation();
# ...
# reb_simulationarchive_automate_interval("archive.bin",1e3);
# ```
del sim
# We now look at the Simulation Archive. You could do this at a later time, on a different computer, with a different version of REBOUND and it will still work.
sa = rebound.SimulationArchive("archive.bin")
# Let's first print the number of snapshots and the time of the first and last snaphot in the archive:
print("Number of snapshots: %d" % len(sa))
print("Time of first and last snapshot: %.1f, %.1f" % (sa.tmin, sa.tmax))
# We can access each snapshot by indexing the Simulation Archive. This returns a REBOUND simulation object that corresponds to that time. Everything is accurate down to the last bit. That means one could use this simulation object and restart the simulation, the final coordinates of the planets will be exactly the same as in the original simulation.
sim = sa[500]
print(sim.t, sim.particles[1])
# One can also step through every simulation in the archive using the generator functionality, for example to store the eccentricity of the inner planet as a function of time:
eccentricities = np.zeros(len(sa))
for i, sim in enumerate(sa):
eccentricities[i] = sim.particles[1].e
# If we want to access a simulation at a specific time, such as in-between snapshots, one can use the `getSimulation()` function:
sim = sa.getSimulation(12345.6)
print(sim.t)
# By default, the function returns a simulation that corresponds to the snapshot that is nearby. To get closer to the requested time, one can use the `mode` attribute:
sim = sa.getSimulation(12345.6, mode="close")
print(sim.t)
# In the above code, REBOUND looks up a nearby snaphot and then integrates the simulation forward in time to get close to the request time. As one can see, with `mode="close"`, one gets a simulation very close to the request time, but it is still slightly off. This is because `WHFast` uses a fixed timestep. If we want to reach the requested time eactly, we have to change the timestep. Changing a timestep in a symplectic integrator can cause problems, but if one really wants to get a simulation object at the exact time (for example to match observations), then the `mode="exact"` flag does that.
sim = sa.getSimulation(12345.6, mode="exact")
print(sim.t)
# Requesting a simulation at any time between `tmin` and `tmax` only takes a few seconds at most (keep in mind, REBOUND integrates the simulation from the nearest snaphot to the requested time). To analyze a large simulation, you might want to do this in parallel. We can easily do that by using REBOUND's `InterruptiblePool`. In the following example, we calculate the distance between the two planets at 432 times in the interval $[t_{min},t_{max}]$.
def thread_init(*rest):
global sat
sat = rebound.SimulationArchive("archive.bin")
def analyze(t):
sim = sat.getSimulation(t,mode="close")
d12 = sim.particles[1] - sim.particles[2]
return np.sqrt(d12.x*d12.x+d12.y*d12.y+d12.z*d12.z)
pool = rebound.InterruptiblePool(initializer=thread_init)
times = np.linspace(sa.tmin, sa.tmax, 432)
distances = pool.map(analyze,times)
# Note that in the above example, we use an initializer function so that each thread has its own Simulation Archive.
# ## Note
#
# Since Spring 2018, the `SimulationArchive` object always returns a new `Simulation` object when you request a simulation from the archive. In earlier versions, it kept a reference to one `Simulation` object internally, updated it when a new time was requested, and then returned a reference.
# ## Manual Snapshots
#
# With the new version of the simulation archive you can also add snapshots manually, giving you further control beyond the automated options used above. This can be useful to save snapshots when particular conditions like collisions or ejections occur. Here we give an example that saves logarithmically spaced snapshots
sim = rebound.Simulation()
sim.add(m=1.)
sim.add(m=1e-3, a=1.)
sim.add(m=1e-3, a=1.9)
sim.move_to_com()
sim.dt = sim.particles[1].P*0.05 # timestep is 5% of orbital period
sim.integrator = "whfast"
# We now iterate over an array of logarithmically spaced times, and save a snapshot after each using the manual `simulationarchive_snapshot` function. If no file with that filename exists, it will create a new one first. Note that if it doesn't already exist, it will always *append* a snapshot to the file, so you need to delete any existing file when starting a new simulation.
# +
filename = 'testsa.bin'
Nout = 1000
times = np.logspace(0, 4, Nout)*sim.particles[1].P
for i, time in enumerate(times):
sim.integrate(time, exact_finish_time=0) # need outputs on the nearest WHFast timesteps to the times we pass to get symplectic behavior
sim.simulationarchive_snapshot(filename)
# -
# We now plot the energy error at each of the snapshots
# +
sa = rebound.SimulationArchive(filename)
sim0 = sa[0]
P = sim0.particles[1].P
E0 = sim.calculate_energy()
Eerr = np.zeros(Nout)
for i, sim in enumerate(sa):
E = sim.calculate_energy()
Eerr[i] = np.abs((E-E0)/E0)
# %matplotlib inline
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
ax.plot(times/sim0.particles[1].P, Eerr, '.')
ax.set_xscale('log'); ax.set_yscale('log')
ax.set_xlabel('time [orbits]'); ax.set_ylabel('relative energy error');
# -
# One can also add manual snapshots when using automated intervals.
dir(enumerate(sa))
enumerate(sa).__sizeof__()
dir(sim.particles[1])
| rebound/rebound_official_examples/SimulationArchive.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# w-net torch leftover
import torch
import torchvision as tv
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision.utils import save_image
# -
# Extract multiple outputs from network
"""
out1, out2 = model(data)
loss1 = criterion1(out1, target1)
loss2 = criterion2(out2, target2)
loss = loss1 + loss2
loss.backward()
"""
import numpy as np
A = torch.tensor([[1,2,3],[4,5,6],[7,8,9]])
print(A.data)
_A = A.view(1, -1)
print(_A.data)
_B = _A.expand(9,-1)
print(_B.data)
__B = _B.repeat(9,9)
print(__B.data)
__A = A.reshape(-1).repeat(1,3)
print(__A)
#_A = A.view(-1,1).expand(3,3).reshape(-1)
#_B = B.repeat(3)
#torch.stack([_A, _B])
# +
A = torch.tensor([[[1,1,1],[2,2,2],[3,3,3]],
[[1,2,3],[1,2,3],[1,2,3]]])
#A.unsqueeze_(-1)
#A.expand(-1,-1,9,-1)
H = 3
W = 4
a = torch.zeros(H,W,H*W,dtype=torch.float)
for i in range(H):
a[i, :, :] = i
b = torch.zeros(H,W,H*W, dtype = torch.float)
for j in range(H):
b[:,:,W*j:W*(j+1)] = j
print(a.size())
print(a.data[:,:,0])
print(b.size())
c = a - b
c = torch.mul(c,c)
print(c.data[:,:,0])
#assume we have f
f = c.clone()
c = c + f
sigma_x_squared = 16
c = torch.exp(-torch.div(c, sigma_x_squared))
c = torch.norm(c,p=2, dim=1)
print(c.size())
#a = torch.randn(64, 10, 3, 32, 32)
#a = a.view(64, -1)
#b = torch.norm(a, p=2, dim=1)
#torch.sum(b)
# +
#print(a)
b = a.data[:,:,0].clone()
print(b)
print(b.size())
c = torch.zeros(3,4,2)
d = b.data.clone()
d.unsqueeze_(-1)
d = d.reshape(1,1,-1)
print(d.size())
print(d.data)
d = d.expand(3,4,-1)
print(d.size())
print(b.data)
print(d.data[:,:,0])
print(d.data[:,:,4])
assert(d.size() ==torch.Size([3,4,12]))
print("--------")
d = d.sum(dim = 2)
print(d.size())
#print(d.size())
# -
a = torch.zeros(2,3,4)
print(a)
a[:,:,0:-1:2] = 1
print(a)
print(a.size())
b = torch.zeros(2,3)
print(b)
print(b.size())
# +
import time
st = time.time()
a = torch.arange(end = 48, dtype=torch.float, requires_grad=True ).cuda()
a.unsqueeze_(-1)
a = a.expand(-1, 80)
a.unsqueeze_(-1)
a = a.expand(-1,-1,48*80)
print(time.time() - st)
b = torch.zeros(48,80,48*80, dtype=torch.float, requires_grad=True).cuda()
st = time.time()
for i in range(48):
b[i,:,:] = i
print(time.time() - st)
# -
print(a[:,:,0])
print(b[:,:,0])
print(a.size())
print(b.size())
st = time.time()
a = torch.arange(end = 48, dtype = torch.float, requires_grad = True).cuda()
a.unsqueeze_(-1)
a = a.expand(-1,80)
#a.permute(1,0)
a.unsqueeze_(-1)
a = a.reshape(1,1,-1)
a = a.expand(48,80,-1)
print(time.time() - st)
H = 48
W = 80
st = time.time()
a = torch.arange(end = H, dtype = torch.float, requires_grad = True).cuda()
a = a.repeat_interleave(W)
a.unsqueeze_(-1)
a.unsqueeze_(-1)
a = a.permute(1,2,0)
a = a.expand(H,W,-1)
print(time.time() - st)
print(a.size())
print(a[:,:,79])
st = time.time()
H = 48
W = 80
a = torch.arange(end = W, dtype=torch.float, requires_grad=True ).cuda()
a.unsqueeze_(-1)
a = a.expand(-1, H)
a = a.permute(1,0)
a.unsqueeze_(-1)
a = a.expand(-1,-1,48*80)
print(time.time() - st)
print(a.size())
print(a[:,:,0])
# +
H = 48
W = 80
st = time.time()
a = torch.arange(end = W, dtype = torch.float, requires_grad = True).cuda()
a = a.repeat(H)
#a.permute(1,0)
a.unsqueeze_(-1)
a.unsqueeze_(-1)
a = a.permute(1,2,0)
a = a.expand(H,W,-1)
print(time.time() - st)
# -
print(a.size())
print(a[:,:,1])
print(a)
self.u_enc1 = nn.Sequential(
nn.Conv2d(self.input_channels, 16, kernel_size=3, padding = (1,1)),
nn.ReLU(True),
nn.Conv2d(16,16,kernel_size=3, padding = (1,1)),
nn.ReLU(True))
import torch
a = torch.tensor([[1,2,3,4],[5,6,7,8]])
a.unsqueeze_(0)
a = a.expand(5,-1,-1)
print(a.size())
print(a[0,:,:])
a = a.sum((1,2))
a.size()
# +
a = torch.tensor([1,2,3])
b = torch.tensor([2,3,4])
a *= b
print(a)
# +
def calculate_denom():
"""
### Before Optimization
seg = segmented_slice.data.clone()
H,W = seg.size()
seg.unsqueeze_(-1)
seg = seg.expand(-1,-1, H*W)
assert(seg.size() == torch.Size([H,W,H*W]))
assert(weight_matrix.size() == torch.Size([H,W,H*W]))
seg = torch.mul(seg, weight_matrix)
return seg.sum()
"""
pass
def calculate_num():
pass
def calculate_weight_matrix():
"""
r_slice = original_img.data[:,0,:,:].clone()
g_slice = original_img.data[:,1,:,:].clone()
b_slice = original_img.data[:,2,:,:].clone()
r_matrix1 = r_slice.data.clone()
r_matrix1.unsqueeze_(-1)
r_matrix1.expand(-1,-1,-1,H*W) #N,H,W,H*W
assert(r_matrix1.size() == torch.Size([N,H,W,H*W]))
r_matrix2 = r_slice.data.clone()
r_matrix2.unsqueeze_(-1)
r_matrix2 = r_matrix2.reshape(N, 1, 1, -1) #N,1,1,H*W
r_matrix2 = r_matrix2.expand(-1, H, W, -1) #N,H,W,H*W
assert(r_matrix2.size() == torch.Size([N, H, W, H*W]))
r = r_matrix1 - r_matrix2
r = torch.mul(r,r)
g_matrix1 = g_slice.data.clone()
g_matrix1.unsqueeze_(-1)
g_matrix1.expand(-1, -1,-1,H*W)
g_matrix2 = g_slice.data.clone()
g_matrix2.unsqueeze_(-1)
g_matrix2 = g_matrix2.reshape(N, 1, 1, -1)
g_matrix2 = g_matrix2.expand(-1, H, W, -1)
assert(g_matrix2.size() == torch.Size([N, H, W, H*W]))
g = g_matrix1 - g_matrix2
g = torch.mul(g,g)
b_matrix1 = b_slice.data.clone()
b_matrix1.unsqueeze_(-1)
b_matrix1.expand(-1, -1,-1,H*W)
b_matrix2 = b_slice.data.clone()
b_matrix2.unsqueeze_(-1)
b_matrix2 = b_matrix2.reshape(N, 1, 1, -1)
b_matrix2 = b_matrix2.expand(-1, H, W, -1)
assert(b_matrix2.size() == torch.Size([N, H, W, H*W]))
b = b_matrix1 - b_matrix2
b = torch.mul(b,b)
pixel_diff = torch.exp( -torch.div(r+g+b, sigma_i_squared) )
weight = torch.mul(pixel_diff, dist_diff)
assert(weight.size() == torch.Size([H,W,H*W]))
return weight
"""
pass
# +
import torch
import torchvision as tv
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision.utils import save_image
import gc
gc.collect()
import os
torch.cuda.empty_cache()
os.environ["CUDA_VISIBLE_DEVICES"]="0"
torch.cuda.set_device(0)
# +
def custom_loss(y_hat, y, z):
#assume z is given from different function
#assume y_hat and y have the same size
zz = z.clone()
yy = y.clone()
yy = yy - z
yy = y_hat - yy
return yy.sum()
def custom_loss1(y_hat, y, z):
#assume z is given from different function
#assume y_hat and y have the same size
yy = torch.tensor(y.clone(), requires_grad = True)
yy = yy - z
yy = y_hat - yy
return yy.sum()
# -
class custom_model(torch.nn.Module):
def __init__(self):
super(custom_model,self).__init__()
self.build()
def forward(self, x):
return self.u_enc1111(x)
def build(self):
self.u_enc1111 = nn.Sequential()
self.u_enc1111.add_module('Conv1_1', nn.Conv2d(3, 16, kernel_size=3, padding = (1,1)))
self.u_enc1111.add_module('Relu1_2', nn.ReLU(True))
self.u_enc1111.add_module('Conv1_3', nn.Conv2d(16,3,kernel_size=3, padding = (1,1)))
self.u_enc1111.add_module('Relu1_4', nn.ReLU(True))
self.u_enc1111.add_module('Soft1_5', nn.Softmax())
model = custom_model().cuda()
distance = nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(),lr=10000, momentum=0.9)
# +
N = 10
C = 3
H = 40
W = 40
imgs = Variable(torch.randn(N,C,H,W)).cuda()
#dist_diff_matrix = Variable(torch.randn(H,W), requires_grad = True).cuda()
dist_diff_matrix = torch.randn(H,W).cuda()
dist_diff_matrix.unsqueeze_(0)
dist_diff_matrix.unsqueeze_(0)
dist_diff_matrix = dist_diff_matrix.expand(N,C, -1, -1)
output = model(imgs)
print("before zero_grad call")
print(model.u_enc1111.Conv1_1.weight[0,:,:,:])
grad = model.u_enc1111.Conv1_1.weight.grad
if grad is not None:
print(grad[0,:,:,:])
else:
print(grad)
print("----------------------------")
optimizer.zero_grad()
loss1 = custom_loss1(output, imgs, dist_diff_matrix)
#loss1 = Variable(loss1, requires_grad = True)
loss1.backward()
#loss2 = distance(output, dist_diff_matrix)
#loss2.backward()
print("after zero call and after backward")
print(model.u_enc1111.Conv1_1.weight[0,:,:,:])
grad = model.u_enc1111.Conv1_1.weight.grad
if grad is not None:
print(grad[0,:,:,:])
else:
print(grad)
print("----------------------------")
optimizer.step()
print("after step")
print(model.u_enc1111.Conv1_1.weight[0,:,:,:])
grad = model.u_enc1111.Conv1_1.weight.grad
if grad is not None:
print(grad[0,:,:,:])
else:
print (grad)
print("----------------------------")
optimizer.zero_grad()
print(model.u_enc1111.Conv1_1.weight[0,:,:,:])
grad = model.u_enc1111.Conv1_1.weight.grad
print("after zero call")
if grad is not None:
print(grad[0,:,:,:])
else:
print (grad)
print("----------------------------")
# -
for name, param in model.named_parameters():
print (param.grad)
break
print(model.u_enc1.Conv1_1.weight[0,:,:,:])
print(model.u_enc1)
a = torch.tensor([[1,2,3],[4,5,6]])
b = a
b.unsqueeze_(-1)
print(b.size())
c = a
c.unsqueeze_(-1)
print(c.size())
# +
def calculate_dist_matrix(N, H, W):
sigma_x_squared = 16
#x_matrix1 = torch.arange(end = H, dtype=torch.float, requires_grad=True).cuda()
x_matrix1 = torch.arange(end = H, dtype=torch.float).cuda()
x_matrix1.unsqueeze_(-1)
x_matrix1 = x_matrix1.expand(-1, W)
x_matrix1.unsqueeze_(-1)
x_matrix1 = x_matrix1.expand(-1,-1,H*W)
#x_matrix2 = torch.arange(end = H, dtype = torch.float, requires_grad = True).cuda()
x_matrix2 = torch.arange(end = H, dtype = torch.float).cuda()
x_matrix2.unsqueeze_(-1)
x_matrix2 = x_matrix2.expand(-1,W)
x_matrix2.unsqueeze_(-1)
x_matrix2 = x_matrix2.reshape(1,1,-1)
x_matrix2 = x_matrix2.expand(H,W,-1)
x_matrix1 = x_matrix1 - x_matrix2
print("h_matrix")
for i in range(H*W):
print(x_matrix1[:,:,i])
print("--------------")
x_matrix1 = torch.pow(x_matrix1, 2)
#y_matrix1 = torch.arange(end = W, dtype=torch.float, requires_grad=True ).cuda()
y_matrix1 = torch.arange(end = W, dtype=torch.float).cuda()
y_matrix1.unsqueeze_(-1)
y_matrix1 = y_matrix1.expand(-1, H)
y_matrix1 = y_matrix1.permute(1,0)
y_matrix1.unsqueeze_(-1)
y_matrix1 = y_matrix1.expand(-1,-1,H*W)
#y_matrix2 = torch.arange(end = W, dtype = torch.float, requires_grad = True).cuda()
y_matrix2 = torch.arange(end = W, dtype = torch.float).cuda()
y_matrix2 = y_matrix2.repeat(H)
y_matrix2.unsqueeze_(-1)
y_matrix2.unsqueeze_(-1)
y_matrix2 = y_matrix2.permute(1,2,0)
y_matrix2 = y_matrix2.expand(H,W,-1)
y_matrix1 = y_matrix1 - y_matrix2
print("w_matrix")
for i in range(H*W):
print(y_matrix1[:,:,i])
print("--------------")
y_matrix1 = torch.pow(y_matrix1,2)
tmp = x_matrix1 + y_matrix1
print("h^2 + w^2 matrix")
for i in range(H*W):
print(tmp[:,:,i])
print("--------------")
dist_diff = torch.exp( -torch.div(x_matrix1+y_matrix1, sigma_x_squared) )
assert(dist_diff.size() == torch.Size([H,W,H*W]))
dist_diff.unsqueeze_(0)
dist_diff = dist_diff.expand(N,-1, -1, -1) #N,H,W,H*W
return dist_diff
# +
N = 1
H = 2
W = 3
dist_matrix = calculate_dist_matrix(N,H,W)
print(dist_matrix[0,:,:,0])
# -
def create_weight_matrix(N, H, W, K, dist_diff, original_img):
# given the corresponding height and width, will create a weight matrix of size H,W,HxW according to the formula given in
# w-net paper.
# original_img size: torch.Size(N,C,H,W)
# dist_diff size: torch.Size(N,H,W,H*W)
# we will assume matrix of size H, W is given, it is initialized to zeros
sigma_i_squared = 100
r = 5
#matrix1 = original_img
#matrix1 = torch.tensor(original_img.clone(), requires_grad = True)
matrix1 = original_img.clone()
matrix1.unsqueeze_(-1)
matrix1 = matrix1.expand(-1,-1,-1,-1,H*W)
assert(matrix1.size() == torch.Size([N,C,H,W,H*W]))
#matrix2 = torch.tensor(original_img.clone(), requires_grad = True)
#matrix2 = original_img
matrix2 = original_img.clone()
matrix2.unsqueeze_(-1)
matrix2 = matrix2.reshape(N,C,1,1,-1) #N,C,1,1,H*W
matrix2 = matrix2.expand(-1,-1,H,W,-1) #N,C,H,W,H*W
assert(matrix2.size() == torch.Size([N,C,H,W,H*W]))
matrix1 = matrix1 - matrix2
matrix1 = torch.pow(matrix1,2) #N,C,H,W,H*W
matrix1 = torch.exp( -torch.div( matrix1.sum(1), sigma_i_squared) ) #N,H,W,H*W
weight = torch.pow(matrix1, 2) #N,H,W,H*W
assert(weight.size() == torch.Size([N,H,W,H*W]))
weight.unsqueeze_(1)
weight = weight.expand(-1,K,-1,-1,-1) #N,K,H,W,H*W
return weight
# +
torch.manual_seed(0)
rgb = torch.randn(3,2,3).cuda()
rgb.unsqueeze_(0)
print(rgb)
print(rgb.size())
# -
K = 5
weight = create_weight_matrix(N,H,W,K, dist_matrix, rgb)
weight.size()
| others/jupyter/core/w-net leftover.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6 (tensorflow)
# language: python
# name: tensorflow
# ---
import pandas as pd
# +
# This code used to find the time interval of equal discharge voltage difference (TIEDVD) for Battrey 05.
di={}
d1={}
d2={}
discharge = pd.read_csv('B0005_10_discharge.csv')
discharge2 = pd.read_csv('B0005_discharge.csv')
cycle=discharge['cycle'][0]
maxm=0.0
minm=0.0
H=0.0
ch1=True
ch2=True
MAX_Value=3.80
MIN_Value=3.41
os=0.0
for i in range(len(discharge)):
if discharge['cycle'][i] != cycle:
#print("the Dis val is "+ str(os) +" and the cycle"+ str(cycle))
d1[cycle]=maxm
d2[cycle]=minm
H=minm-maxm
di[cycle]=H
minm=0
maxm=0
ch1,ch2=True, True
cycle=discharge['cycle'][i]
# discharge2['H']=(H)
else:
if discharge['voltage_battery'][i]>MAX_Value and ch1:
maxm=discharge['time'][i]
else:
ch1=False
if discharge['voltage_battery'][i]>=MIN_Value and ch2:
minm=discharge['time'][i]
os=discharge['voltage_battery'][i]
else:
ch2=False
di[cycle]=minm-maxm
ls=[]
ls=list(di.values())
discharge2['H']=ls
discharge2.to_csv("Discharge5_H1.csv")
# -
dis_H=pd.read_csv('Discharge5_H1.csv')
dis_H1=pd.read_csv('Discharge6_H1.csv')
# +
# %matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("darkgrid")
plt.figure(figsize=(8, 5))
plot_df = dis_H.loc[(dis_H1['cycle']>=1),['cycle','H']]
plot_df1 = dis_H.loc[(dis_H1['cycle']>=1),['cycle','Capacity']]
plt.plot(plot_df1['cycle'], plot_df1['Capacity'], color='red')
plt.title('Discharge B0005 - With Capacity')
sns.set_style("darkgrid")
plt.figure(figsize=(8, 5))
plt.plot(plot_df['cycle'], plot_df['H'])
#plt.plot(plot_df1['cycle'], plot_df1['Capacity'], color='red')
#Draw threshold
#plt.plot(dis['cycle'], dis['limt']) 'g'
plt.ylabel('time')
# make x-axis ticks legible
adf = plt.gca().get_xaxis().get_major_formatter()
#adf.scaled[1.0] = '%m-%d-%Y'
plt.xlabel('cycle')
plt.title('Discharge B0005 - Cycle with Time interval')
# +
# %matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
plt.figure(figsize=(8, 5))
plot_df = dis_H1.loc[(dis_H1['cycle']>=1),['cycle','H']]
plot_df1 = dis_H1.loc[(dis_H1['cycle']>=1),['cycle','Capacity']]
plt.plot(plot_df1['cycle'], plot_df1['Capacity'], color='red')
plt.title('Discharge B0006 - With Capacity')
sns.set_style("darkgrid")
plt.figure(figsize=(8, 5))
plt.plot(plot_df['cycle'], plot_df['H'])
#plt.plot(plot_df1['cycle'], plot_df1['Capacity'], color='red')
#Draw threshold
#plt.plot(dis['cycle'], dis['limt']) 'g'
plt.ylabel('time')
# make x-axis ticks legible
adf = plt.gca().get_xaxis().get_major_formatter()
#adf.scaled[1.0] = '%m-%d-%Y'
plt.xlabel('cycle')
plt.title('Discharge B0006 - Cycle with Time interval')
# -
#dis_H=pd.read_csv('Discharge5_H.csv')
#dis_H1=pd.read_csv('Discharge6_H.csv')
Cap_fit=dis_H['Capacity'].values
H_fit=dis_H['H'].values
l1,l2=[],[]
z=np.polyfit(H_fit,Cap_fit,1)
print(z)
for i in range(len(H_fit)):
xx=(0.0004789782 * H_fit[i])+0.718498039
l1.append(xx)
dis_H['fit']=l1
# <h3> the mapping relationship between indirect HI and Li-ion battery capacity is obtained by the polynomial fitting (B0005)</h3>
# +
# %matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
plot_df = dis_H.loc[(dis_H['cycle']>=1),['cycle','Capacity']]
plot_df1 = dis_H.loc[(dis_H['cycle']>=1),['cycle','fit']]
sns.set_style("darkgrid")
plt.figure(figsize=(20, 8))
plt.plot(plot_df['cycle'], plot_df['Capacity'],color='red')
plt.plot(plot_df1['cycle'], plot_df1['fit'],'-',color='blue')
plt.ylabel('Mapping')
# make x-axis ticks legible
adf = plt.gca().get_xaxis().get_major_formatter()
#adf.scaled[1.0] = '%m-%d-%Y'
plt.xlabel('Cycle')
plt.title('Discharge')
# -
Cap_fit=dis_H1['Capacity'].values
H_fit=dis_H1['H'].values
import numpy as np # linear algebra
l1,l2=[],[]
z=np.polyfit(H_fit,Cap_fit,1)
print(z)
for i in range(len(H_fit)):
xx=(0.000416760705 * H_fit[i])+0.908303981
l2.append(xx)
dis_H1['fit']=l2
# <h3> the mapping relationship between indirect HI and Li-ion battery capacity is obtained by the polynomial fitting (B0006)</h3>
# +
# %matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
plot_df = dis_H1.loc[(dis_H1['cycle']>=1),['cycle','Capacity']]
plot_df1 = dis_H1.loc[(dis_H1['cycle']>=1),['cycle','fit']]
sns.set_style("darkgrid")
plt.figure(figsize=(20, 8))
plt.plot(plot_df['cycle'], plot_df['Capacity'],color='red')
plt.plot(plot_df1['cycle'], plot_df1['fit'],'-',color='blue')
plt.ylabel('Mapping')
# make x-axis ticks legible
adf = plt.gca().get_xaxis().get_major_formatter()
#adf.scaled[1.0] = '%m-%d-%Y'
plt.xlabel('Cycle')
plt.title('Discharge')
# -
| LSTM_Paper/DSTLM_Paper_4.1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="Xwd2MibbbSp7" colab_type="code" colab={}
import time
from collections import namedtuple
import numpy as np
import tensorflow as tf
# + id="S3E5Vaf9fskn" colab_type="code" colab={}
with open('anna.txt','r') as f:
text=f.read()
vocab=sorted(set(text))
vocab_to_int={c:i for i,c in enumerate(vocab)}
int_to_vocab=dict(enumerate(vocab))
encoded=np.array([vocab_to_int[c] for c in text],dtype=np.int32)
# + id="AJhvueKRh2Qs" colab_type="code" outputId="af39fdda-bfa0-440a-a004-07057ad66ea3" colab={"base_uri": "https://localhost:8080/", "height": 134}
print(text[:100])
# + id="GWg0eZHOiATV" colab_type="code" outputId="b3638e3c-75c3-4986-915b-6167fc71958a" colab={"base_uri": "https://localhost:8080/", "height": 134}
encoded[:100]
# + [markdown] id="Zl4oV2YxibLC" colab_type="text"
# Type of a Classification Problem:
# num_classes=len()
# + id="fodCTVnwiTlF" colab_type="code" outputId="8e31996c-2c7c-4684-b271-0ac69882de2d" colab={"base_uri": "https://localhost:8080/", "height": 34}
len(vocab)
# + [markdown] id="xRQocypCi5fa" colab_type="text"
# ##Mini-Batches
# + id="bGNRTeMHixce" colab_type="code" colab={}
def get_batches(arr,batch_size,n_steps):
characters_per_batch=batch_size*n_steps
n_batches=len(arr)//characters_per_batch
arr=arr[:(characters_per_batch*n_batches)]
arr=arr.reshape(batch_size,n_steps*n_batches)
for n in range(0,arr.shape[1],n_steps):
x=arr[:,n:n+n_steps]
y_temp=arr[:,n+1:n+n_steps+1]#but what about last batch
y=np.zeros(x.shape,dtype=x.dtype)
y[:,:y_temp.shape[1]]=y_temp #adds appropriate number of zeros in last batch
yield x,y
# + id="soHabusWnkc1" colab_type="code" colab={}
batches=get_batches(encoded,10,50)
x,y=next(batches)
# + id="MNWU-e_Jvs_k" colab_type="code" outputId="38484d0e-b6dc-4b32-8de8-623415fd6cb4" colab={"base_uri": "https://localhost:8080/", "height": 34}
int_to_vocab[0]
# + id="ZlaGkV5kpM6V" colab_type="code" outputId="e91e39f1-75e9-4c3c-8008-e40730463812" colab={"base_uri": "https://localhost:8080/", "height": 386}
print('x\n',x[:10,:10])
print('y\n',y[:10,:10])
# + id="IV9Nkr7WrGo1" colab_type="code" colab={}
def build_inputs(batch_size,num_steps):
inputs=tf.placeholder(tf.int32,[batch_size,num_steps],name='inputs')
targets=tf.placeholder(tf.int32,[batch_size,num_steps],name='targets')
keep_prob=tf.placeholder(tf.float32,name='keep_prob')
return inputs,targets,keep_prob
# + id="qMRNmNmrzecH" colab_type="code" colab={}
def build_lstm(lstm_size,num_layers,batch_size,keep_prob):
def build_cell(lstm_size,keep_prob):
lstm=tf.nn.rnn_cell.LSTMCell(num_units=lstm_size)
drop=tf.nn.rnn_cell.DropoutWrapper(lstm,output_keep_prob=keep_prob)
return drop
cell=tf.nn.rnn_cell.MultiRNNCell([build_cell(lstm_size,keep_prob) for _ in range(num_layers)])
initial_state=cell.zero_state(batch_size,tf.float32)
return cell,initial_state
# + id="rgpNAUxAR4r0" colab_type="code" colab={}
def build_output(lstm_output,in_size,out_size):
seq_output=tf.concat(lstm_output,axis=1)
x=tf.reshape(seq_output,[-1,in_size])
with tf.variable_scope('softmax'):
softmax_w=tf.Variable(tf.truncated_normal((in_size,out_size),stddev=0.1))
softmax_b=tf.Variable(tf.zeros(out_size))
logits=tf.matmul(x,softmax_w)+softmax_b
out=tf.nn.softmax(logits,name='predictions')
return out,logits
# + id="6qghjhMAyND-" colab_type="code" colab={}
def build_loss(logits,targets,lstm_size,num_classes):
y_one_hot=tf.one_hot(targets,num_classes)
y_reshaped=tf.reshape(y_one_hot,logits.get_shape())
loss=tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_reshaped)
loss=tf.reduce_mean(loss)
return loss
# + id="tdB1QV6D6X8R" colab_type="code" colab={}
def build_optimizer(loss, learning_rate, grad_clip):
''' Build optmizer for training, using gradient clipping.
Arguments:
loss: Network loss
learning_rate: Learning rate for optimizer
'''
# Optimizer for training, using gradient clipping to control exploding gradients
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(loss, tvars), grad_clip)
train_op = tf.train.AdamOptimizer(learning_rate)
optimizer = train_op.apply_gradients(zip(grads, tvars))
return optimizer
# + id="rfYakULW3JEH" colab_type="code" colab={}
batch_size = 100 # Sequences per batch
num_steps = 100 # Number of sequence steps per batch
lstm_size = 512 # Size of hidden layers in LSTMs
num_layers = 2 # Number of LSTM layers
learning_rate = 0.01 # Learning rate
keep_prob = 0.5
# + id="PMLtEVXa5IbI" colab_type="code" colab={}
class CharRNN:
def __init__(self, num_classes, batch_size=64, num_steps=50,
lstm_size=128, num_layers=2, learning_rate=0.001,
grad_clip=5, sampling=False):
# When we're using this network for sampling later, we'll be passing in
# one character at a time, so providing an option for that
if sampling == True:
batch_size, num_steps = 1, 1
else:
batch_size, num_steps = batch_size, num_steps
tf.reset_default_graph()
# Build the input placeholder tensors
self.inputs, self.targets, self.keep_prob = build_inputs(batch_size, num_steps)
# Build the LSTM cell
cell, self.initial_state = build_lstm(lstm_size, num_layers, batch_size, self.keep_prob)
### Run the data through the RNN layers
# First, one-hot encode the input tokens
x_one_hot = tf.one_hot(self.inputs, num_classes)
# Run each sequence step through the RNN and collect the outputs
outputs, state = tf.nn.dynamic_rnn(cell, x_one_hot, initial_state=self.initial_state)
self.final_state = state
# Get softmax predictions and logits
self.prediction, self.logits = build_output(outputs, lstm_size, num_classes)
# Loss and optimizer (with gradient clipping)
self.loss = build_loss(self.logits, self.targets, lstm_size, num_classes)
self.optimizer = build_optimizer(self.loss, learning_rate, grad_clip)
# + id="Pj40fCCm3sSq" colab_type="code" outputId="6ff6959d-c6d9-4926-f3ed-521a82d448b6" colab={"base_uri": "https://localhost:8080/", "height": 1000}
epochs = 40
# Print losses every N interations
print_every_n = 50
# Save every N iterations
save_every_n = 200
model = CharRNN(len(vocab), batch_size=batch_size, num_steps=num_steps,
lstm_size=lstm_size, num_layers=num_layers,
learning_rate=learning_rate)
saver = tf.train.Saver(max_to_keep=100)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Use the line below to load a checkpoint and resume training
#saver.restore(sess, 'checkpoints/______.ckpt')
counter = 0
for e in range(epochs):
# Train network
new_state = sess.run(model.initial_state)
loss = 0
for x, y in get_batches(encoded, batch_size, num_steps):
counter += 1
start = time.time()
feed = {model.inputs: x,
model.targets: y,
model.keep_prob: keep_prob,
model.initial_state: new_state}
batch_loss, new_state, _ = sess.run([model.loss,
model.final_state,
model.optimizer],
feed_dict=feed)
if (counter % print_every_n == 0):
end = time.time()
print('Epoch: {}/{}... '.format(e+1, epochs),
'Training Step: {}... '.format(counter),
'Training loss: {:.4f}... '.format(batch_loss),
'{:.4f} sec/batch'.format((end-start)))
if (counter % save_every_n == 0):
saver.save(sess, "checkpoints/i{}_l{}.ckpt".format(counter, lstm_size))
saver.save(sess, "checkpoints/i{}_l{}.ckpt".format(counter, lstm_size))
# + id="ipul6IMo3vET" colab_type="code" outputId="0a3ae15e-46b5-4236-a550-a3f1194d00c6" colab={"base_uri": "https://localhost:8080/", "height": 706}
tf.train.get_checkpoint_state('checkpoints')
# + id="T-LHJzW83xVA" colab_type="code" colab={}
def pick_top_n(preds, vocab_size, top_n=5):
p = np.squeeze(preds)
p[np.argsort(p)[:-top_n]] = 0
p = p / np.sum(p)
c = np.random.choice(vocab_size, 1, p=p)[0]
return c
# + id="JPA4AbOb3zM-" colab_type="code" colab={}
def sample(checkpoint, n_samples, lstm_size, vocab_size, prime="The "):
samples = [c for c in prime]
model = CharRNN(len(vocab), lstm_size=lstm_size, sampling=True)
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, checkpoint)
new_state = sess.run(model.initial_state)
for c in prime:
x = np.zeros((1, 1))
x[0,0] = vocab_to_int[c]
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
for i in range(n_samples):
x[0,0] = c
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
return ''.join(samples)
# + id="LW-KGzlU36N6" colab_type="code" outputId="587796ed-3832-47ec-87b9-ff944ab604b4" colab={"base_uri": "https://localhost:8080/", "height": 34}
tf.train.latest_checkpoint('checkpoints')
# + id="L4I765ih39De" colab_type="code" outputId="4c51e03b-e422-4151-9087-48f7a0774cc9" colab={"base_uri": "https://localhost:8080/", "height": 739}
checkpoint = tf.train.latest_checkpoint('checkpoints')
samp = sample(checkpoint, 2000, lstm_size, len(vocab), prime="Far")
print(samp)
# + id="KCigcMpy4ByP" colab_type="code" outputId="58e303ed-3e7d-4797-f382-1dbd43872885" colab={"base_uri": "https://localhost:8080/", "height": 306}
checkpoint = 'checkpoints/i200_l512.ckpt'
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
# + id="qIrXe3gC4EmX" colab_type="code" outputId="dbb4454a-e58f-4b96-aa78-f9ae2801eb04" colab={"base_uri": "https://localhost:8080/", "height": 524}
checkpoint = 'checkpoints/i600_l512.ckpt'
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far the once")
print(samp)
# + id="uPTHxaLf4H9t" colab_type="code" outputId="5b2dcb00-211c-4a7f-cc20-aedb3985007e" colab={"base_uri": "https://localhost:8080/", "height": 269}
checkpoint = 'checkpoints/i1200_l512.ckpt'
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
| RNN/Text Generator/Anna_KaRNNa.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# (stats_tutorial)=
# # Intro to the stats module
from scipy import stats
import numpy as np
from xarray_einstats.stats import XrContinuousRV, rankdata, hmean, skew, median_abs_deviation
from xarray_einstats.tutorial import generate_mcmc_like_dataset
ds = generate_mcmc_like_dataset(11)
# (stats_tutorial/dists)=
# ## Probability distributions
# ### Initialization
#
norm = XrContinuousRV(stats.norm, ds["mu"], ds["sigma"])
# ### Using its methods
# Once initialized, you can use its methods exactly as you'd use them with scipy distributions. The only two differences are
# 1. They now take scalars or DataArrays as inputs, arrays are only accepted as the arguments
# on which to evaluate the methods (in scipy docs they are represented by `x`, `k` or `q` depending on the method)
# 2. `size` behaves differently in the `rvs` method. This ensures that you don't need to care about any broadcasting or alignment of arrays, `xarray_einstats` does this for you.
#
# You can generate 10 random draws from the initialized distribution. Here, unlike what would happen with scipy, the output won't have shape 10, but instead will have shape `10, *broadcasted_input_shape`. xarray generates the `broadcasted_input_shape` and `size` is independent from it so you can relax and not care about broadcasting.
norm.rvs(size=(10))
# If the dimension names are not provided, `xarray_einstats` assings `rv_dim#` as dimension name as many times as necessary. To define the names manually you can use the `dims` argument:
norm.rvs(size=(5, 3), dims=["subject", "batch"])
# The behaviour for other methods is similar:
norm.logcdf(ds["x_plot"])
# For convenience, you can also use {term}`array_like` input which is converted to a DataArray under the hood. In such cases, the dimension name is `quantile` for `ppf` and `isf`, `point` otherwise. In both cases, the values passed as input are preserved as coordinate values.
norm.ppf([.25, .5, .75])
pdf = norm.pdf(np.linspace(-5, 5))
pdf
# Plot a subset of the pdf we just calculated with matplotlib.
# +
import matplotlib.pyplot as plt
plt.rcParams["figure.facecolor"] = "white"
fig, ax = plt.subplots()
ax.plot(pdf.point, pdf.sel(team="d", chain=2), color="C0", alpha=.5)
ax.set(xlabel="x", ylabel="pdf of normal distribution", );
# -
# (stats_tutorial/other)=
# ## Other functions
# The rest of the functions in the module have a very similar API to their scipy counterparts, the only differences are:
#
# * They take `dims` instead of `axis`. Moreover, `dims` can be `str` or a sequence of `str` instead of a single integer only as supported by `axis`.
# * Arguments that take {term}`numpy:array_like` as values take `DataArray` inputs instead. For example the `scale` argument in {func}`~xarray_einstats.stats.median_abs_deviation`
# * They accept extra arbitrary kwargs, that are passed to {func}`xarray.apply_ufunc`.
#
# Here are some examples of using functions in the `stats` module of `xarray_einstats` with `dims` argument instead of `axis`.
hmean(ds["mu"], dims="team")
rankdata(ds["score"], dims=("chain", "draw"), method="min")
# :::{important}
# The statistical summaries and other statistical functions can take both {class}`~xarray.DataArray` and {class}`~xarray.Dataset`. Methods in probability functions and functions in linear algebra module
# are tested only on `DataArray`s.
#
# When using `Dataset` inputs, you must make sure that all the dimensions in `dims` are
# present in _all_ the `DataArray`s within the `Dataset`.
# :::
skew(ds[["score", "mu", "sigma"]], dims=("chain", "draw"))
median_abs_deviation(ds)
# %load_ext watermark
# %watermark -n -u -v -iv -w -p xarray_einstats,xarray
| docs/source/tutorials/stats_tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# # Interactive Visualizations of NBA Stats with Bokeh
#
# In this tutorial we will learn how to make an interactive dashboard to visualize player data from the 2015-2016 NBA season. We will do this using data from basketball-reference.com and the python visualization library Bokeh. The point of this tutorial is to introduce you to Bokeh.
#
# I have prepared the data from basketball-reference. It is data on every player in the NBA last season along with their season totals for all the basic NBA statisics (field goals made, field goals attempts, 3 pointers, rebounds, etc.). However, in order to follow this tutorial you will need to install Bokeh. It should actually already be installed if you downloaded Anaconda, but the version included is not the latest. You will need to upgrade to version 12.3. You can do so by entering the following in your shell:
#
# sudo pip install bokeh --upgrade
# After upgrading, you will need to shut down Jupyter completely and open this notebook again. Sorry in advance!
# ## The Basics
#
# Before we get into anything too fancy, let's make some basic plots using our player data with Bokeh. First we will need to import Pandas and some basic Bokeh features. Bokeh.charts includes the functions necessary to make high-level charts. We will use the Scatter function to make a scatterplot. The show function works the same as matplot lib - it displays the plot.
# +
import pandas as pd
from bokeh.charts import Scatter
from bokeh.io import show, output_notebook
output_notebook()
# -
df = pd.read_csv("players.csv", header=0)
# Let's start off by plotting each player's total number of field goals made by the total number of field goals taken.
# +
p = Scatter(df, x='FG', y='FGA',
title="Each Player's Number of Shots Made by Number of Shots Taken",
xlabel="Number of FG Made", ylabel="Number of FG Attempted")
show(p)
# -
# If you are viewing this as a static page, you will not be able to see the plots without running the cells. This is an unforunate feature of Bokeh in Jupyter. For all plots I will paste a screenshot of the plot in the pdf in the folder. I will denote each plot with a number. The plot above is plot 1.
#
# ## Adding Basic Custom Interactivity
#
# Ok that's pretty cool! But you may be wondering which dots correspond to which players. We can add functionality to this graph to figure that out. In order to do this, we will need to stray away from the charts module, which is only intended to make charts without any interactivity. We will instead use the plotting module which includes the figure class and ColumnDataSource. The figure class is a generalized class to take in the specifications of our chart. This will replace "Scatter" from before. ColumnDataSource allows us to generalize the variables used in our plots. It essentially tells Bokeh what data we are using for our plot. In this case, it is the players dataframe.
#
# We also import the tools we want to use in this plot. Bokeh.charts did that automatically for us before, but now with bokeh.plotting we need to specify these. If you want to know more about what each tool does, check out this link: http://bokeh.pydata.org/en/latest/docs/user_guide/tools.html#basic-tooltips
# +
from bokeh.plotting import figure, show, ColumnDataSource
from bokeh.models import HoverTool, PanTool, BoxZoomTool, WheelZoomTool, ResetTool
source = ColumnDataSource(df)
# -
# Here we specify the parameters of our tools. All tools in Bokeh are pretty simple. The most involved is the hover tool, which isn't that much work. All we do here is specify the information shown when the user hovers over a point on the plot. The first argument in each tuple is the label that will appear. The second argument is the actual data from the source. The '@' tells Bokeh to reference our source and look for the given column name. Make note that the values supplied in the second argument must exactly match the name of a column in the data source.
# +
hover = HoverTool(
tooltips=[
("FG", "@FG"),
("FGA", "@FGA"),
("Player", "@Player"),
("Team", "@Tm"),
]
)
pan = PanTool()
boxzoom = BoxZoomTool()
wheelzoom = WheelZoomTool()
reset = ResetTool()
# -
# Here we use the figure class to actually start building our plot. We give the tools that we specified in the previous cell. Notice in "p.circle" that we are able to simply give 'FG' and 'FGA' as arguments (columns from our data source) without including anything about df, the dataframe where our data is coming from. That is because we set the source parameter to our ColumnDataSource(df).
# +
p = figure(tools=[hover, pan, boxzoom, wheelzoom, reset],
title="Each Player's Number of Shots Made by Number of Shots Taken")
p.xaxis.axis_label = "FG Made"
p.yaxis.axis_label = "FG Attempted"
p.circle('FG', 'FGA', source=source)
show(p)
# -
# The above is plot 2 in the pdf.
#
# ## Coloring Based on Other Variables
#
# Now we can hover over points to see which players made and attempted how many shots. We can also see what team they are on. We can use the various tools to zoom in on more crowded parts of the plot to look at individual player stats.
#
# I think another cool feature to add would be to see what types of players make the most field goals. We can split up players by their position and see how many field goals they make/attempt. We will do this by coloring each circle on our plot by a position. To do this, we first make a new column in our dataframe that assigns each position a particular color:
# +
colors = []
for idx, val in df["Pos"].iteritems():
if val == 'PF':
colors.append("red")
if val == 'SG':
colors.append("green")
if val == 'C':
colors.append("blue")
if val == 'SF':
colors.append("orange")
if val == 'PG':
colors.append("brown")
if val == 'PF-C':
colors.append("purple")
if val == 'SG-SF':
colors.append("yellow")
df["pos_colors"] = colors
# -
# Since pos_colors is part of the data frame, after calling ColumnDataSource, it is also part of our plot's data source. So, when we call p.circle, all we have to do is set the color parameter to the name of the column in the data source, which is "pos_color" in this case. You may be wondering why we repeat defining source, the tools, and p. Bokeh simply requires this for each plot.
# +
source = ColumnDataSource(df)
hover = HoverTool(
tooltips=[
("FG", "@FG"),
("FGA", "@FGA"),
("Player", "@Player"),
("Team", "@Tm"),
("Position", "@Pos"),
]
)
pan = PanTool()
boxzoom = BoxZoomTool()
wheelzoom = WheelZoomTool()
reset = ResetTool()
p = figure(tools=[hover, pan, boxzoom, wheelzoom, reset],
title="Each Player's Number of Shots Made by Number of Shots Taken")
p.xaxis.axis_label = "FG Made"
p.yaxis.axis_label = "FG Attempted"
p.circle('FG', 'FGA', color="pos_colors", source=source)
show(p)
# -
# The above is plot 3 in the pdf.
#
# A great part of data visualization is that we can see patterns that are not easily discernible just by looking at the numbers. Take a look at the blue dots in this plot, which correspond to the center position (denoted by 'C'). If you imagine a fitted linear regression through the blue dots, it seems it would have a lower slope than a regression line through all the other dots. This means that NBA centers make more shots on less attempts than other types of players. This makes sense because centers usually take shots much closer to the basket (AKA easier shots) than other types of players.
#
# Now we will move onto making a simple dashboard. This dashboard will allow users to compare multiple columns against each other from our original data. Basically, we will allow users to select which numerical stats they would like to compare on the scatterplot.
# ## A Simple Dashboard
#
# In this simple dashboard, we will just allow our users to select number of field goals made or number of 3-pointers made on the x-axis, and the number of field goals attempted or the number of 3-pointers attempted on the y-axis.
#
# First we create new columns x and y and we assign the FG and FGA columns of the dataframe to them respectively. FG and FGA will be our default x and y. As you will see, the x and y columns will be our generalized columns, and will update based on user inputs. Last, we set our source with ColumnDataSource of our new dataframe.
# +
df['x'] = df['FG']
df['y'] = df['FGA']
source = ColumnDataSource(df)
# -
# Here we define our basic interactions as we did in our earlier plots. The only difference is in defining tooltips within HoverTool. We name our scatterplot x and y simply "X" and "Y". This is because they will represent whatever x and y the user chooses. The values for x and y will be referenced from whatever is in the x an y columns of the dataframe since we use the '@' notation. When we pass hover to the figure class and make our plot, tooltips will connect to the source that we defined.
# +
hover = HoverTool(
tooltips=[
("X", "@x"),
("Y", "@y"),
("Player", "@Player"),
("Team", "@Tm"),
("Position", "@Pos"),
]
)
pan = PanTool()
boxzoom = BoxZoomTool()
wheelzoom = WheelZoomTool()
reset = ResetTool()
# -
# Here we define our plot mostly the same as we did before. The main difference here is that we initalize r as an instance of p.circle. In Bokeh, r is called a glyph instance. This will be important for our next step.
# +
p = figure(tools=[hover, pan, boxzoom, wheelzoom, reset])
p.xaxis.axis_label = "X"
p.yaxis.axis_label = "Y"
r = p.circle('x', 'y', source=source)
# -
# This update function will be called every time a user interacts with our plot. The function takes in the name of the columns we want to display on the x and y axes. It then tells our glyph instance r to change the columns x and y of our data source to mimic the columns the user chooses. The call to push_notebook is what tells Bokeh to push our changes to the plot displayed in the notebook.
# +
from bokeh.io import push_notebook
def update(xax, yax):
r.data_source.data['x'] = list(df[xax])
r.data_source.data['y'] = list(df[yax])
push_notebook()
# -
# We finally show our plot. Note the notebook_handle set to True. This tells Bokeh we are making a notebook-friendly custom interaction.
show(p, notebook_handle=True)
# The above is plot 4 in the pdf.
#
# Now let's make the actual interaction capability. The interact function from ipywidgets allows us to make interactions within a Jupyter notebook. It takes in the update function, which it calls every time the plot is interacted with. The second and third arguments are lists of what columns the user can access for each axis. Note that the argument names in the interact function must exactly match the argument names in the update function. The interact function can distinguish between categorical and numerical variables. If we gave it a number as an argument along with start and end values, it would create a slider to select numbers instead of making dropdowns like it does in this case. Go ahead and play with the differnet inputs and notice how the graph changes.
# +
from ipywidgets import interact
interact(update, xax=["FG", "3P"], yax=["FGA", "3PA"])
# -
# Now we have a basic interactive chart where we can choose our x and y axis variables. Let's add some more variable options for the axes and the ability to color dots by some different variables. This will be our final dashboard that we have been building up to.
#
# ## The Final Dashboard
#
# If we want to be able to switch between colors based on user input, we will need to create a generalized "color" column in our dataframe. This is analgous to when we created x and y columns in the dataframe so the user can define what the current x and y are. We will set the default to the pos_color we made earlier. We also set our x and y to the default values of FG and FGA.
# +
df["color"] = df["pos_colors"]
df['x'] = df['FG']
df['y'] = df['FGA']
# -
# Let's make our plot so that the user can choose to color by each position, or color all the positions like pos_colors does. To do this, we will make new columns with colors for each position. For example, we will have a PG column that is brown for the points representing players that are point guards and gray for all other points.
# +
colors = []
for idx, val in df["Pos"].iteritems():
if val == 'PF':
colors.append("red")
else:
colors.append("gray")
df["PF"] = colors
colors = []
for idx, val in df["Pos"].iteritems():
if val == 'SG':
colors.append("green")
else:
colors.append("gray")
df["SG"] = colors
colors = []
for idx, val in df["Pos"].iteritems():
if val == 'C':
colors.append("blue")
else:
colors.append("gray")
df["C"] = colors
colors = []
for idx, val in df["Pos"].iteritems():
if val == 'SF':
colors.append("orange")
else:
colors.append("gray")
df["SF"] = colors
colors = []
for idx, val in df["Pos"].iteritems():
if val == 'PG':
colors.append("brown")
else:
colors.append("gray")
df["PG"] = colors
colors = []
for idx, val in df["Pos"].iteritems():
if val == 'PF-C':
colors.append("purple")
else:
colors.append("gray")
df["PF-C"] = colors
colors = []
for idx, val in df["Pos"].iteritems():
if val == 'SG-SF':
colors.append("yellow")
else:
colors.append("gray")
df["SG-SF"] = colors
# -
# As before we set our source and define our tools:
# +
source = ColumnDataSource(df)
hover = HoverTool(
tooltips=[
("X", "@x"),
("Y", "@y"),
("Player", "@Player"),
("Team", "@Tm"),
("Position", "@Pos"),
]
)
pan = PanTool()
boxzoom = BoxZoomTool()
wheelzoom = WheelZoomTool()
reset = ResetTool()
# -
# We configure the plot the same as before, but this time we add the color argument and set it to the color column of our data source. Remember, as the user changes which position they would like to view, the color column is changed to copy whichever column in the data source corresponds to the user's choice.
# +
p = figure(tools=[hover, pan, boxzoom, wheelzoom, reset])
p.xaxis.axis_label = "X"
p.yaxis.axis_label = "Y"
r = p.circle('x', 'y', color="color", source=source)
# -
# The update function is mostly the same as before, but now we add the ability to update the color based on user input. As the user changes which position they would like to view, the color column is changed to copy whichever column in the data source corresponds to the user's choice. We then show our plot, setting notebook_handle to True as we did earlier.
# +
def update(xax, yax, position):
r.data_source.data['x'] = list(df[xax])
r.data_source.data['y'] = list(df[yax])
r.data_source.data['color'] = list(df[position])
push_notebook()
show(p, notebook_handle=True)
# -
# The above is plot 5.
#
# Now define the interactions. This time we will dump in all the basic stats as options for both x and y. We also now add in our coloring by position options. Note that the first value in each list is the default option given in the plot.
#
# If you are not familiar with basketball statistics nomenclature, take a look at the basketball-reference's glossary: http://www.basketball-reference.com/about/glossary.html
interact(update,
xax=["FG","2P","3P","2P%","3P%","FGA","2PA","3PA","ORB","DRB","TRB","AST","STL","BLK","TOV","PTS"],
yax=["FGA","2P","3P","2P%","3P%","FG","2PA","3PA","ORB","DRB","TRB","AST","STL","BLK","TOV","PTS"],
position=["pos_colors", "PG", "SG", "SF", "PF", "C", "PF-C", "SG-SF"])
# ## Wrapping Up
#
# That concludes the material for this tutorial. We have created a dashboard that allows users to compare players by position played and by various other basic statistics. This allows users to visualize relationships that are not always obvious. This kind of dashboard is especially essential in business settings, when there are often non-technical people who need to understand the implications of the data they have.
#
# Thank you for reading this tutorial and I hope you have gained something from it!
| 2016/tutorial_final/139/Using Bokeh to Visualize NBA Stats.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
# %matplotlib inline
# -
dataset = pd.read_csv('data-original.csv')
#replace ? with 1
dataset['Bare Nuclei'] = dataset['Bare Nuclei'].replace(['?'], '1')
X = dataset.iloc[:, 1:-1].values
y = dataset.iloc[:, -1].values
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
#feature scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
#train model
from sklearn.tree import DecisionTreeClassifier
classifier = DecisionTreeClassifier(criterion = 'entropy', random_state = 0)
classifier.fit(X_train, y_train)
#predict test set results and get accuracy
y_pred = classifier.predict(X_test)
print('Accuracy', (y_test == y_pred).mean())
#get confusion matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
print(cm)
print("true neatives: {}".format((cm)[0][0]))
print("false positives: {}".format((cm)[0][1]))
print("false negatives: {}".format((cm)[1][0]))
print("true positives: {}".format((cm)[1][1]))
#compute accuracy with k-fold cross validation
from sklearn.model_selection import cross_val_score
accuracies = cross_val_score(estimator = classifier, X = X_train, y = y_train, cv = 10)
#get mean of accuracies
#.2f means 2 decimals after the comma
print("Accuracy: {:.2f} %".format(accuracies.mean()*100))
#get standard deviations from accuracies' list(this values should be small)
print("Standard Deviation: {:.2f} %".format(accuracies.std()*100))
| 201213_DecTree.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Exploratory analysis of the US Airport Dataset
#
# This dataset contains data for 25 years[1995-2015] of flights between various US airports and metadata about these routes. Taken from Bureau of Transportation Statistics, United States Department of Transportation.
#
# Let's see what can we make out of this!
# +
# %matplotlib inline
import networkx as nx
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import warnings
warnings.filterwarnings('ignore')
pass_air_data = pd.read_csv('datasets/passengers.csv')
# -
# In the `pass_air_data` dataframe we have the information of number of people that fly every year on a particular route.
pass_air_data.head()
# +
# Create a MultiDiGraph from this dataset
passenger_graph = nx.from_pandas_edgelist(pass_air_data, source='ORIGIN', target='DEST', edge_attr=['YEAR', 'PASSENGERS'], create_using=nx.MultiDiGraph())
# -
# ### Cleveland to Chicago, how many people fly this route?
passenger_graph['CLE']['ORD']
temp = [(i['YEAR'], i['PASSENGERS'])for i in dict(passenger_graph['CLE']['ORD']).values()]
x, y = zip(*temp)
plt.plot(x, y)
plt.show()
# ## Exercise
#
# Find the busiest route in 1990 and in 2015 according to number of passengers, and plot the time series of number of passengers on these routes.
#
# You can use the DataFrame instead of working with the network. It will be faster ;)
# [5 mins]
# So let's have a look at the important nodes in this network, i.e. important airports in this network. We'll use pagerank, betweenness centrality and degree centrality.
nx.pagerank(passenger_graph)
def year_network(G, year):
temp_g = nx.DiGraph()
for i in G.edges(data=True):
if i[2]['YEAR'] == year:
temp_g.add_edge(i[0], i[1], weight=i[2]['PASSENGERS'])
return temp_g
pass_2015 = year_network(passenger_graph, 2015)
len(pass_2015)
len(pass_2015.edges())
# Load in the GPS coordinates of all the airports
lat_long = pd.read_csv('datasets/GlobalAirportDatabase.txt', delimiter=':', header=None)
lat_long[lat_long[1].isin(list(pass_2015.nodes()))]
pos_dict = {}
for airport in lat_long[lat_long[1].isin(list(pass_2015.nodes()))].iterrows():
pos_dict[airport[1][1]] = (airport[1][15], airport[1][14])
pos_dict
# ## Exercise
#
# Using the position dictionary `pos_dict` create a plot of the airports, only the nodes not the edges.
#
# - As we don't have coordinates for all the airports we have to create a subgraph first.
# - Use `nx.subgraph(Graph, iterable of nodes)` to create the subgraph
# - Use `nx.draw_networkx_nodes(G, pos)` to map the nodes.
#
# or
#
# - Just use a scatter plot :)
# ### What about degree distribution of this network?
plt.hist(list(nx.degree_centrality(pass_2015).values()))
plt.show()
# Let's plot a log log plot to get a better overview of this.
d = {}
for i, j in dict(nx.degree(pass_2015)).items():
if j in d:
d[j] += 1
else:
d[j] = 1
x = np.log2(list((d.keys())))
y = np.log2(list(d.values()))
plt.scatter(x, y, alpha=0.4)
plt.show()
# ### Directed Graphs
#
# 
# +
G = nx.DiGraph()
G.add_edge(1, 2, weight=1)
# print(G.edges())
# G[1][2]
# G[2][1]
# G.is_directed()
# type(G)
# -
G.add_edges_from([(1, 2), (3, 2), (4, 2), (5, 2), (6, 2), (7, 2)])
nx.draw_circular(G, with_labels=True)
G.in_degree()
nx.pagerank(G)
G.add_edge(5, 6)
nx.draw_circular(G, with_labels=True)
nx.pagerank(G)
G.add_edge(2, 8)
nx.draw_circular(G, with_labels=True)
nx.pagerank(G)
# ### Moving back to Airports
sorted(nx.pagerank(pass_2015, weight=None).items(), key=lambda x:x[1], reverse=True)[:10]
sorted(nx.betweenness_centrality(pass_2015).items(), key=lambda x:x[1], reverse=True)[0:10]
sorted(nx.degree_centrality(pass_2015).items(), key=lambda x:x[1], reverse=True)[0:10]
# 'ANC' is the airport code of Anchorage airport, a place in Alaska, and according to pagerank and betweenness centrality it is the most important airport in this network Isn't that weird? Thoughts?
#
# related blog post: https://toreopsahl.com/2011/08/12/why-anchorage-is-not-that-important-binary-ties-and-sample-selection/
#
# Let's look at weighted version, i.e taking into account the number of people flying to these places.
sorted(nx.betweenness_centrality(pass_2015, weight='weight').items(), key=lambda x:x[1], reverse=True)[0:10]
sorted(nx.pagerank(pass_2015, weight='weight').items(), key=lambda x:x[1], reverse=True)[0:10]
# ## How reachable is this network?
#
# We calculate the average shortest path length of this network, it gives us an idea about the number of jumps we need to make around the network to go from one airport to any other airport in this network.
nx.average_shortest_path_length(pass_2015)
# Wait, What??? This network is not connected. That seems like a really stupid thing to do.
list(nx.weakly_connected_components(pass_2015))
# ### SPB, SSB, AIK anyone?
pass_air_data[(pass_air_data['YEAR'] == 2015) & (pass_air_data['ORIGIN'] == 'AIK')]
pass_2015.remove_nodes_from(['SPB', 'SSB', 'AIK'])
nx.is_weakly_connected(pass_2015)
nx.is_strongly_connected(pass_2015)
# ### Strongly vs weakly connected graphs.
G = nx.DiGraph()
G.add_edge(1, 2)
G.add_edge(2, 3)
G.add_edge(3, 1)
nx.draw(G)
G.add_edge(3, 4)
nx.draw(G)
nx.is_strongly_connected(G)
list(nx.strongly_connected_components(pass_2015))
pass_air_data[(pass_air_data['YEAR'] == 2015) & (pass_air_data['DEST'] == 'TSP')]
pass_2015_strong = max(nx.strongly_connected_component_subgraphs(pass_2015), key=len)
len(pass_2015_strong)
nx.average_shortest_path_length(pass_2015_strong)
# #### Exercise! (Actually this is a game :D)
#
# How can we decrease the avg shortest path length of this network?
#
# Think of an effective way to add new edges to decrease the avg shortest path length.
# Let's see if we can come up with a nice way to do this, and the one who gets the highest decrease wins!!!
#
# The rules are simple:
# - You can't add more than 2% of the current edges( ~500 edges)
#
# [10 mins]
# ### What about airlines? Can we find airline specific reachability?
passenger_graph['CLE']['SFO'][25]
def str_to_list(a):
return a[1:-1].split(', ')
for i in str_to_list(passenger_graph['JFK']['SFO'][25]['UNIQUE_CARRIER_NAME']):
print(i)
# %%time
for origin, dest in passenger_graph.edges():
for key in passenger_graph[origin][dest]:
passenger_graph[origin][dest][key]['airlines'] = str_to_list(passenger_graph[origin][dest][key]['UNIQUE_CARRIER_NAME'])
# ### Exercise
#
# Play around with United Airlines network.
#
# - Extract a network for United Airlines flights from the metagraph `passenger_graph` for the year 2015
# - Make sure it's a weighted network, where weight is the number of passengers.
# - Find the number of airports and connections in this network
# - Find the most important airport, according to PageRank and degree centrality.
united_network = nx._________
for _______, _______ in passenger_graph.edges():
if 25 in passenger_graph[______][_______]: # 25 key is for the year 2015
if "'United Air Lines Inc.'" in ____________________:
united_network.add_edge(_____, ______, weight= __________)
# number of nodes
# +
# number of edges
# +
# top 10 according to pagerank
# +
# top 10 according to degree centrality
# -
# ### Exercise
#
# We are in Cleveland so what should we do?
#
# Obviously we will make a time series of number of passengers flying out of Cleveland with United Airlines over the years.
#
# There are 2 ways of doing it.
# - Create a new multidigraph specifically for this exercise.
#
# OR
#
# - exploit the `pass_air_data` dataframe.
| Network-Analysis-Made-Simple/8-US-airports-case-study-student.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.5 64-bit (''bridges'': conda)'
# metadata:
# interpreter:
# hash: 4368c9daf815594993dffb1168695d0cd0ba2a8255d4d6d102b0bfa1e4fa78fe
# name: 'Python 3.8.5 64-bit (''bridges'': conda)'
# ---
# +
import pandas as pd
path1 = "../data/final/B2P_Rwanda_matchedIDs_final_2020-09-24.csv"
df = pd.read_csv(path1)
df.columns
# -
# variables for average household, mean and median income
avg_household = 4.2
mean_income_rwf = 1353.81 # per day
mean_income_usd = 1.39 # per day
median_income = 0 # don't have this information yet
df['inc_income'] = (df['Individuals_directly_served'] / avg_household)
df['inc_income'].describe()
df['inc_income_rwf'] = df['inc_income'] * mean_income_rwf
df['inc_income_usd'] = df['inc_income'] * mean_income_usd
df['inc_income_rwf'] = df['inc_income_rwf'].astype(float).round(decimals=2)
df['inc_income_usd'] = df['inc_income_usd'].astype(float).round(decimals=2)
df.to_csv("../data/final/B2P_Rwanda_matchedIDs_final_2020-10-07.csv")
| notebooks/issue2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/LeeGitaek/2020_AI_Class/blob/master/soil.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="1MFkd60uAwZP" colab_type="text"
# 토양 오염의 수치를 예측
# + id="sqA6j6ba96Lp" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 547} outputId="c65615c5-a29b-48c5-ca1d-bb301e6fea05"
# !pip uninstall --y kaggle
# !pip install --upgrade pip
# !pip install kaggle==1.5.6
# + id="nEBzT6-0A6dG" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="567cd057-9111-4afe-ecfa-7c6ec83c3ece"
# !mkdir -p ~/.kaggle
# !cp kaggle.json ~/.kaggle
# !ls -lha kaggle.json
# !chmod 600 ~/.kaggle/kaggle.json
# + id="KENt0bKxBDqb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="abf66bc9-adf7-4b7d-c811-91d6dc636a49"
# !kaggle competitions download -c 2020soil
# + id="qWPLDxV3BJtQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="f4e54a88-32a6-4303-debb-c4857c8bce44"
# !unzip 2020soil.zip
# + id="9etqW3PUBY9r" colab_type="code" colab={}
import pandas as pd
import numpy as np
import torch
import torchvision.datasets as datasets
import torchvision.transforms as transforms
import torch.optim as optim
import torch.nn.functional as F
import torch.nn as nn
# + id="HBfl6ZzXBqPl" colab_type="code" colab={}
device = 'cuda' if torch.cuda.is_available() else 'cpu'
torch.manual_seed(777)
if device == 'cuda':
torch.cuda.manual_seed_all(777)
# + id="0z7a1eghB6zh" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 493} outputId="8a33cbac-cb3b-4b95-c90c-b2bc29902c6d"
train = pd.read_csv('2020AI_soil_train.csv')
print(train.head(10))
print(train.info())
# + id="8HH2r5DVCTci" colab_type="code" colab={}
learning_rate = 0.001
training_epoch = 1000
batch_size = 50
# + id="kVV6OYuICEOg" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="d07d8423-e24d-402d-915c-76a8d69e2a7c"
x_train = train.iloc[:,1:-1]
y_train = train.iloc[:,[-1]]
x_train = np.array(x_train)
y_train = np.array(y_train)
x_train = torch.FloatTensor(x_train)
y_train = torch.FloatTensor(y_train)
print(x_train.shape)
print(y_train.shape)
# + id="nnOkEqkCCmhN" colab_type="code" colab={}
train_dataset = torch.utils.data.TensorDataset(x_train,y_train)
data_loader = torch.utils.data.DataLoader(dataset = train_dataset,
batch_size = batch_size,
shuffle = True,
drop_last=True)
# + [markdown] id="_3IgF6c6DFp2" colab_type="text"
# layer 5개 , kaiming uniform 초기화
# + id="7whZ-X5XC77U" colab_type="code" colab={}
linear1 = nn.Linear(7,4,bias=True)
linear2 = nn.Linear(4,4,bias=True)
linear3 = nn.Linear(4,4,bias=True)
linear4 = nn.Linear(4,4,bias=True)
linear5 = nn.Linear(4,1,bias=True)
nn.init.xavier_uniform_(linear1.weight)
nn.init.kaiming_normal_(linear2.weight)
nn.init.xavier_uniform_(linear3.weight)
nn.init.kaiming_normal_(linear4.weight)
nn.init.xavier_uniform_(linear5.weight)
relu = nn.ReLU()
# + [markdown] id="O-ooZGBiNMSW" colab_type="text"
# activation function - relu
# + id="HHgH4VEgDRm9" colab_type="code" colab={}
model = nn.Sequential(
linear1,relu,
linear2,relu,
linear3,relu,
linear4,relu,
linear5
).to(device)
# + id="icYmiaIqDirP" colab_type="code" colab={}
loss = nn.MSELoss().to(device)
optimizer = optim.Adam(model.parameters(),lr=learning_rate)
# + id="vp5DzZsMDscD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="71ebd918-4df1-432d-cbc4-8efdd116afb9"
total_batch = len(data_loader)
for epoch in range(training_epoch):
avg_cost = 0
for X,Y in data_loader:
X = X.to(device)
Y = Y.to(device)
optimizer.zero_grad()
hypothesis = model(X)
cost = loss(hypothesis,Y)
cost.backward()
optimizer.step()
avg_cost += cost/total_batch
print('epoch {:.4f} , cost = {:.6f}'.format(epoch,avg_cost))
print('learning finished!')
# + id="INmS0zYPEiXl" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="f33ecc34-6035-4ed4-e2fe-96d9887776fe"
test = pd.read_csv('2020_soil_test.csv')
print(test.info())
test = test.iloc[:,1:]
test = np.array(test)
test = torch.FloatTensor(test).to(device)
with torch.no_grad():
predict = model(test)
predict
# + id="Fqk9wVfoFPDB" colab_type="code" colab={}
correct_prediction = predict.cpu().numpy().reshape(-1,1)
result = pd.read_csv('soil_submission.csv')
# + id="VJynpqTBFx9g" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="de88a77d-bee6-4acf-e5ff-bf7dab4ca7f2"
for i in range(len(correct_prediction)):
result['Expected'][i] = correct_prediction[i]
# + id="Y5jsV71JF68d" colab_type="code" colab={}
result.to_csv('submit.csv',index=False)
# + id="tXyOz0XlGGnC" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="ef33a47a-afd4-48d3-95c6-e2b0e6fc52d5"
# !kaggle competitions submit -c 2020soil -f submit.csv -m "14010974_이기택_기말고사파트2-3"
| soil.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# write a program to identify sub list[1,1,5] is there in the given list in the same order.
if yes print 'its a match' if no then print 'its gone'.
def sub(l,l1):
flag=False
if len(l)>=len(l1):
for i in range(len(l)):
if l[i]==l1[0]:
if l[i+1]==l1[1]:
if l[i+2] == l1[2]:
flag=True
return flag
l1=[1,1,5]
l=input("Enter a list: ").split(",")
for i in range(len(l)):
l[i]=int(l[i])
for i in range(len(l1)):
l1[i]=int(l1[i])
if sub(l,l1):
print("It's a match")
else:
print("It's gone")
# +
# Lambda function for capitalizing the whole sentence passed as an argument
# -
lst=input("Enter a list of sequences: ").split(',')
cap=map(lambda lst : lst.title(),lst)
out=list(cap)
print(out)
# +
# Prime numbers and use filter to filter out all the prime numbers from 1-2500
# +
def isprime(x):
for i in range(2,x):
if x%i==0:
break
else:
if x>1:
return True
res=filter(isprime, range(2500))
print('Prime numbers between 1-2500:', list(res))
# -
| B7-Day5-Assignment 1,2,3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <img src='./img/LogoWekeo_Copernicus_RGB_0.png' alt='Logo EU Copernicus EUMETSAT' align='right' width='20%'></img>
# # WEkEO Training Workshop - Focus on atmospheric data products
# This training is dedicated to atmospheric composition data available on `WEkEO` and features data from `Sentinel-5p`, `Sentinel-3` and the `Copernicus Atmosphere Monitoring Service`.
# ## Course Overview
# #### Overview of atmospheric data products on WEkEO
# - [Atmospheric data products on WEkEO - Overview](./01_wekeo_atmosphere_data_overview.ipynb)
#
# #### Sentinel-5P TROPOMI
# - [Sentinel-5P Level 2 Carbon Monoxide - Retrieve](./10_sentinel5p_L2_retrieve.ipynb)
# - [Sentinel-5P Level 2 Carbon Monoxide - Load and browse](./11_sentinel5p_L2_load_browse.ipynb)
# - [Sentinel-5P Level 2 Carbon Monoxide - Exercise](./12_sentinel5p_L2_exercise.ipynb)
#
# #### Sentinel-3 OLCI Level-1B RGB
# - [Sentinel-3 OLCI Level 1 RGB - Retrieve](./20_sentinel3_OLCI_L1_retrieve.ipynb)
# - [Sentinel-3 OLCI Level 1 RGB - Load and browse](./21_sentinel3_OLCI_L1_load_browse.ipynb)
# - [Sentinel-3 OLCI Level 1 RGB - Exercise](./22_sentinel3_OLCI_L1_exercise.ipynb)
#
# #### Copernicus Atmosphere Monitoring Service (CAMS) Global Reanalysis (EAC4)
# - [CAMS Global Reanalysis (EAC4) - Retrieve](./30_cams_eac4_retrieve.ipynb)
# - [CAMS Global Reanalysis (EAC4) - Load and browse](./31_cams_eac4_load_browse.ipynb)
# - [CAMS Global Reanalysis (EAC4) - Exercise](./32_cams_eac4_exercise.ipynb)
# <hr>
# <img src='./img/all_partners_wekeo.png' alt='Logo EU Copernicus EUMETSAT' align='right' width='100%'></img>
| 90_workshops/202010_WEkEO_training/00_index.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="dqicde8-y-fW"
# ### Temporal Convolutional Networks Overview
#
# 
# -
# # Global Parameters
LEVEL="Level_4"
# + [markdown] colab_type="text" id="SeeOZueJy-fW"
# ## Input
# -
import tensorflow as tf
import numpy as np
print(tf.__version__)
import pandas as pd
pretrained_emb = pd.read_csv("../../data/protVec_100d_3grams.csv", delimiter="\t")
pretrained_emb.shape
pretrained_emb.head()
embedding_weights_array = pretrained_emb.drop("words", axis = 1).as_matrix()
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 86} colab_type="code" executionInfo={"elapsed": 1220, "status": "ok", "timestamp": 1522629683286, "user": {"displayName": "<NAME>", "photoUrl": "//lh6.googleusercontent.com/-TKaCzeGtBXw/AAAAAAAAAAI/AAAAAAAAjB4/Xqwbek0CNps/s50-c-k-no/photo.jpg", "userId": "114938319508229761672"}, "user_tz": -480} id="5zj3MnAMy-fq" outputId="85b5cede-11ac-4182-f7e5-8deffffce67b"
train_data = np.load("../../data/emb_train_features_"+LEVEL+".npy")
train_label = np.load("../../data/emb_train_labels_"+LEVEL+".npy")
val_data = np.load("../../data/emb_val_features_"+LEVEL+".npy")
val_label = np.load("../../data/emb_val_labels_"+LEVEL+".npy")
# + [markdown] colab_type="text" id="A_x00hIey-fw"
# ## Building TCNs
# + [markdown] colab_type="text" id="TcFQu3F0y-fy"
# ### Causal Convolution
# + [markdown] colab_type="text" id="ByuyggHey-gI"
# ### Spatial Dropout
#
# Reference: https://stats.stackexchange.com/questions/282282/how-is-spatial-dropout-in-2d-implemented
#
# Actually, simply setting noise_shape in tf.layers.Dropout will do the trick.
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 312} colab_type="code" executionInfo={"elapsed": 1078, "status": "ok", "timestamp": 1522629692360, "user": {"displayName": "<NAME>", "photoUrl": "//lh6.googleusercontent.com/-TKaCzeGtBXw/AAAAAAAAAAI/AAAAAAAAjB4/Xqwbek0CNps/s50-c-k-no/photo.jpg", "userId": "114938319508229761672"}, "user_tz": -480} id="YRTsgwSGy-gK" outputId="6def0a04-cf65-4226-e6c9-4f7630ae02cb"
tf.reset_default_graph()
with tf.Graph().as_default() as g:
x = tf.random_normal((32, 4, 10)) # (batch_size, channel, length)
dropout = tf.layers.Dropout(0.5, noise_shape=[x.shape[0], x.shape[1], tf.constant(1)])
output = dropout(x, training=True)
init = tf.global_variables_initializer()
with tf.Session(graph=g) as sess:
# Run the initializer
sess.run(init)
res = sess.run(output)
print(res.shape)
print(res[0, :, :])
print(res[1, :, :])
# + [markdown] colab_type="text" id="xdTffJqQy-gU"
# ### Temporal blocks
#
# Note: `tf.contrib.layers.layer_norm` only supports `channels_last`.
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="9bHYwhRxy-gW"
# Redefining CausalConv1D to simplify its return values
class CausalConv1D(tf.layers.Conv1D):
def __init__(self, filters,
kernel_size,
strides=1,
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=tf.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs):
super(CausalConv1D, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding='valid',
data_format='channels_last',
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name, **kwargs
)
def call(self, inputs):
padding = (self.kernel_size[0] - 1) * self.dilation_rate[0]
inputs = tf.pad(inputs, tf.constant([(0, 0,), (1, 0), (0, 0)]) * padding)
return super(CausalConv1D, self).call(inputs)
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="re2siwPgy-ga"
class TemporalBlock(tf.layers.Layer):
def __init__(self, n_outputs, kernel_size, strides, dilation_rate, dropout=0.2,
trainable=True, name=None, dtype=None,
activity_regularizer=None, **kwargs):
super(TemporalBlock, self).__init__(
trainable=trainable, dtype=dtype,
activity_regularizer=activity_regularizer,
name=name, **kwargs
)
self.dropout = dropout
self.n_outputs = n_outputs
self.conv1 = CausalConv1D(
n_outputs, kernel_size, strides=strides,
dilation_rate=dilation_rate, activation=tf.nn.relu,
name="conv1")
self.conv2 = CausalConv1D(
n_outputs, kernel_size, strides=strides,
dilation_rate=dilation_rate, activation=tf.nn.relu,
name="conv2")
self.down_sample = None
def build(self, input_shape):
channel_dim = 2
self.dropout1 = tf.layers.Dropout(self.dropout, [tf.constant(1), tf.constant(1), tf.constant(self.n_outputs)])
self.dropout2 = tf.layers.Dropout(self.dropout, [tf.constant(1), tf.constant(1), tf.constant(self.n_outputs)])
if input_shape[channel_dim] != self.n_outputs:
# self.down_sample = tf.layers.Conv1D(
# self.n_outputs, kernel_size=1,
# activation=None, data_format="channels_last", padding="valid")
self.down_sample = tf.layers.Dense(self.n_outputs, activation=None)
def call(self, inputs, training=True):
x = self.conv1(inputs)
x = tf.contrib.layers.layer_norm(x)
x = self.dropout1(x, training=training)
x = self.conv2(x)
x = tf.contrib.layers.layer_norm(x)
x = self.dropout2(x, training=training)
if self.down_sample is not None:
inputs = self.down_sample(inputs)
return tf.nn.relu(x + inputs)
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 104} colab_type="code" executionInfo={"elapsed": 1162, "status": "ok", "timestamp": 1522634695536, "user": {"displayName": "<NAME>", "photoUrl": "//lh6.googleusercontent.com/-TKaCzeGtBXw/AAAAAAAAAAI/AAAAAAAAjB4/Xqwbek0CNps/s50-c-k-no/photo.jpg", "userId": "114938319508229761672"}, "user_tz": -480} id="-CMZKL1jy-ge" outputId="05e31b54-3bfa-4049-f538-a20844b15f41"
tf.reset_default_graph()
with tf.Graph().as_default() as g:
x = tf.random_normal((32, 10, 4)) # (batch_size, length, channel)
tblock = TemporalBlock(8, 2, 1, 4) #n_outputs, kernel_size, strides, dilation_rate
output = tblock(x, training=tf.constant(True))
init = tf.global_variables_initializer()
with tf.Session(graph=g) as sess:
# Run the initializer
sess.run(init)
res = sess.run(output)
print(res.shape)
print(res[0, :, 0])
print(res[1, :, 1])
# + [markdown] colab_type="text" id="WhzHzZtMy-gk"
# ### Temporal convolutional networks
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="GeLztD1Ly-gm"
class TemporalConvNet(tf.layers.Layer):
def __init__(self, num_channels, kernel_size=2, dropout=0.2,
trainable=True, name=None, dtype=None,
activity_regularizer=None, **kwargs):
super(TemporalConvNet, self).__init__(
trainable=trainable, dtype=dtype,
activity_regularizer=activity_regularizer,
name=name, **kwargs
)
self.layers = []
num_levels = len(num_channels)
for i in range(num_levels):
dilation_size = 2 ** i
out_channels = num_channels[i]
self.layers.append(
TemporalBlock(out_channels, kernel_size, strides=1, dilation_rate=dilation_size,
dropout=dropout, name="tblock_{}".format(i))
)
def call(self, inputs, training=True):
outputs = inputs
for layer in self.layers:
outputs = layer(outputs, training=training)
return outputs
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 104} colab_type="code" executionInfo={"elapsed": 1672, "status": "ok", "timestamp": 1522634708682, "user": {"displayName": "<NAME>", "photoUrl": "//lh6.googleusercontent.com/-TKaCzeGtBXw/AAAAAAAAAAI/AAAAAAAAjB4/Xqwbek0CNps/s50-c-k-no/photo.jpg", "userId": "114938319508229761672"}, "user_tz": -480} id="cRTtl0mey-go" outputId="e335ba5a-34b1-453b-fea7-224fa3f7c606"
tf.reset_default_graph()
with tf.Graph().as_default() as g:
x = tf.random_normal((32, 10, 4)) # (batch_size, length, channel)
tcn = TemporalConvNet([8, 8, 8, 8], 2, 0.25)
output = tcn(x, training=tf.constant(True))
init = tf.global_variables_initializer()
with tf.Session(graph=g) as sess:
# Run the initializer
sess.run(init)
res = sess.run(output)
print(res.shape)
print(res[0, :, 0])
print(res[1, :, 1])
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 104} colab_type="code" executionInfo={"elapsed": 1728, "status": "ok", "timestamp": 1522634710620, "user": {"displayName": "<NAME>", "photoUrl": "//lh6.googleusercontent.com/-TKaCzeGtBXw/AAAAAAAAAAI/AAAAAAAAjB4/Xqwbek0CNps/s50-c-k-no/photo.jpg", "userId": "114938319508229761672"}, "user_tz": -480} id="zCMhWfWjy-g0" outputId="9d39a675-debb-41e0-86e5-970c57845079"
tf.reset_default_graph()
g = tf.Graph()
with g.as_default():
Xinput = tf.placeholder(tf.float32, shape=[None, 10, 4])
tcn = TemporalConvNet([8, 8, 8, 8], 2, 0.25)
output = tcn(Xinput, training=tf.constant(True))
init = tf.global_variables_initializer()
with tf.Session(graph=g) as sess:
# Run the initializer
sess.run(init)
res = sess.run(output, {Xinput: np.random.randn(32, 10, 4)})
print(res.shape)
print(res[0, :, 0])
print(res[1, :, 1])
# + [markdown] colab_type="text" id="5jYugVyby-g-"
# # Model
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 1300, "status": "ok", "timestamp": 1522634805178, "user": {"displayName": "<NAME>", "photoUrl": "//lh6.googleusercontent.com/-TKaCzeGtBXw/AAAAAAAAAAI/AAAAAAAAjB4/Xqwbek0CNps/s50-c-k-no/photo.jpg", "userId": "114938319508229761672"}, "user_tz": -480} id="41qAk9lAy-hC" outputId="a1607a69-b33e-4ef6-e792-360a4b0e250f"
# Training Parameters
learning_rate = 0.001
batch_size = 128
batches_per_epoch = int(train_data.shape[0]/batch_size)+1
num_epochs = 12
print("Number of epochs: {} with batches per epoch: {}".format(num_epochs, batches_per_epoch))
# Network Parameters
sequence_length=train_data.shape[1]
num_classes = np.amax(val_label, axis=0)+1
num_of_kmer = len(embedding_weights_array)
embedding_size = len(embedding_weights_array[0])
dropout = 0.1
kernel_size = 3
levels = 6
nhid = 64 # hidden layer num of features
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 52} colab_type="code" executionInfo={"elapsed": 5236, "status": "ok", "timestamp": 1522636269832, "user": {"displayName": "<NAME>", "photoUrl": "//lh6.googleusercontent.com/-TKaCzeGtBXw/AAAAAAAAAAI/AAAAAAAAjB4/Xqwbek0CNps/s50-c-k-no/photo.jpg", "userId": "114938319508229761672"}, "user_tz": -480} id="bP37UtN5y-hG" outputId="089539a5-b6c1-4cb0-ca1c-deacde5b3cbe"
tf.reset_default_graph()
graph = tf.Graph()
with graph.as_default():
tf.set_random_seed(10)
with tf.variable_scope('input'):
sequences = tf.placeholder(tf.int32, [None, sequence_length], name='sequences')
labels = tf.placeholder(tf.int32, (None,))
is_training = tf.placeholder(tf.bool, name='is_train')
dataset = (tf.data.Dataset.from_tensor_slices((sequences, labels))
.shuffle(buffer_size=10000, reshuffle_each_iteration=True)
.apply(tf.contrib.data.batch_and_drop_remainder(batch_size)))
iterator = dataset.make_initializable_iterator()
with tf.variable_scope('embedding'):
weights_initializer = tf.constant_initializer(embedding_weights_array)
embedding_weights = tf.get_variable(
name='embedding_weights',
shape=(num_of_kmer, embedding_size),
initializer=weights_initializer,
trainable=False)
# acid_embeddings = tf.get_variable("acid_embeddings", [num_of_acids, embedding_size])
batch_sequences, batch_labels = iterator.get_next()
embedded_sequences = tf.nn.embedding_lookup(embedding_weights, batch_sequences)
# embedded_sequences = tf.nn.embedding_lookup(acid_embeddings, batch_sequences)
embedded_sequences = tf.reshape(embedded_sequences,
shape=[-1, sequence_length, embedding_size],
name='embedded_real_sequences')
# Define weights
with tf.variable_scope('tcn'):
logits = tf.layers.dense(TemporalConvNet([nhid] * levels, kernel_size,
dropout)(embedded_sequences, training=is_training)[:, -1, :],
num_classes, activation=None, kernel_initializer=tf.orthogonal_initializer())
# Define loss and optimizer
with tf.name_scope("loss_op"):
loss_op = tf.reduce_mean(tf.losses.sparse_softmax_cross_entropy(labels=batch_labels, logits=logits))
tf.summary.scalar("loss_op", loss_op)
with tf.name_scope("optimizer"):
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss_op)
with tf.name_scope("accuracy"):
prediction = tf.nn.softmax(logits)
correct_pred = tf.equal(tf.argmax(prediction, 1, output_type=tf.int32), tf.squeeze(batch_labels))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
tf.summary.scalar("accuracy", accuracy)
summ = tf.summary.merge_all()
# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
print("All parameters:", np.sum([np.product([xi.value for xi in x.get_shape()]) for x in tf.global_variables()]))
print("Trainable parameters:", np.sum([np.product([xi.value for xi in x.get_shape()]) for x in tf.trainable_variables()]))
[ print("{}{}".format(x.name, x.shape)) for x in tf.trainable_variables() if "LayerNorm" not in x.name]
# -
def print_progress(step, loss, acc):
print("Step {}, Loss={:.4f}, Accuracy={:.3f}".format(str(step), loss, acc))
def validation(epoch):
# Calculate batch loss and accuracy
losses = []
accuracies = []
sess.run(iterator.initializer, feed_dict={sequences: val_data, labels: val_label})
while True:
try:
# Run optimization
loss, acc = sess.run([loss_op, accuracy], feed_dict={is_training: False})
losses.append(loss)
accuracies.append(acc)
except tf.errors.OutOfRangeError:
break
loss_avg = sum(losses)/len(losses)
acc_avg = sum(accuracies)/len(accuracies)
print_progress("VALIDATION for epoch {}".format(epoch), loss_avg, acc_avg)
return acc_avg
# ## Start training
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 278} colab_type="code" executionInfo={"elapsed": 416220, "status": "ok", "timestamp": 1522636686094, "user": {"displayName": "<NAME>", "photoUrl": "//lh6.googleusercontent.com/-TKaCzeGtBXw/AAAAAAAAAAI/AAAAAAAAjB4/Xqwbek0CNps/s50-c-k-no/photo.jpg", "userId": "114938319508229761672"}, "user_tz": -480} id="IjwOnIUmy-hM" outputId="79b8a85e-9f56-458d-ecc9-3eea13094548"
from pathlib import Path
import random
from datetime import datetime
path = "../../logs/tcn_sequence/"
log_dir = "{}{}".format(path, datetime.now().strftime("%Y%m%d_%H%M"))
Path(log_dir).mkdir(exist_ok=True, parents=True)
tb_writer = tf.summary.FileWriter(log_dir, graph)
config = tf.ConfigProto()
config.gpu_options.allow_growth = False
best_val_acc = 0.8
with tf.Session(graph=graph) as sess:
# Run the initializer
epoch, step = 0, 0
sess.run([init, iterator.initializer], feed_dict={sequences: train_data, labels: train_label})
while epoch < num_epochs:
try:
sess.run(train_op, feed_dict={is_training: True})
step = step +1
if step % int(batches_per_epoch/4) == 0 or step == 1:
loss, acc = sess.run([loss_op, accuracy], feed_dict={is_training: True})
print_progress(step, loss, acc)
[train_accuracy, s] = sess.run([accuracy, summ], feed_dict={is_training: True})
tb_writer.add_summary(s, step)
except tf.errors.OutOfRangeError:
path
epoch = epoch + 1
val_acc = validation(epoch)
if val_acc > best_val_acc:
best_val_acc = val_acc
save_path = saver.save(sess, "{}{}".format(path, "v2"))
print("Model saved in path: %s" % save_path)
sess.run(iterator.initializer, feed_dict={sequences: train_data, labels: train_label})
print("Optimization Finished!")
# -
# # Validation with new sequences
import pandas as pd
data = pd.read_csv("..//..//data//test_sequences.csv", sep='\t', skipinitialspace=True)
data["Sequence"] = data.Sequence.str.ljust(500, '0')
letterToIndex = {'0': 0, 'A': 1, 'C': 2, 'D': 3, 'E': 4, 'F': 5, 'G': 6, 'H': 7, 'I': 8, 'K': 9, 'L': 10, 'M': 11, 'N': 12,
'P': 13, 'Q': 14, 'R': 15, 'S': 16, 'T': 17, 'V': 18, 'W': 19, 'Y': 20}
data["Sequence_vector"] = [[letterToIndex[char] for char in val ] for index, val in data.Sequence.iteritems()]
test_data= np.asarray([ np.asarray(element) for element in data["Sequence_vector"].values])
test_data_for_tensorflow = np.append(test_data, np.zeros((batch_size-len(test_data), sequence_length)), axis=0).astype(int)
test_data_for_tensorflow.shape
test_elements = np.array([1,2,3,4,5,6])
label_for_tensorflow = np.append(test_elements, np.zeros((batch_size-len(test_elements))), axis=0).astype(int)
label_for_tensorflow.shape
s = tf.Session(graph=graph)
s.run(init)
saver.restore(s, "../logs/tcn_sequence/v1")
# +
np.set_printoptions(precision=8)
np.set_printoptions(suppress=True)
s.run(iterator.initializer, feed_dict={sequences: test_data_for_tensorflow, labels: label_for_tensorflow})
preds, ls = s.run([prediction, batch_labels], feed_dict={is_training: False})
count = 0
selected_ls = ls.take(np.argwhere(ls > 0))
selected_preds = preds.take(np.argwhere(ls > 0), axis=0)
for i in selected_ls.argsort(axis=0):
if count == 0:
print("\n\r")
print("Oxidoreductases Transferases Hydrolases Lyases Isomerases Ligases")
print(selected_preds[i])
print(np.argmax(selected_preds[i]))
count = count + 1
# print( p["classes"]+1)
# -
selected_ls.argsort(axis=0)
selected_ls
ls.sort()
# ls
np.argwhere(ls > 0)
| notebooks/enzyme/tcn_sequence_embedding.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Dynamic factors and coincident indices
#
# Factor models generally try to find a small number of unobserved "factors" that influence a substantial portion of the variation in a larger number of observed variables, and they are related to dimension-reduction techniques such as principal components analysis. Dynamic factor models explicitly model the transition dynamics of the unobserved factors, and so are often applied to time-series data.
#
# Macroeconomic coincident indices are designed to capture the common component of the "business cycle"; such a component is assumed to simultaneously affect many macroeconomic variables. Although the estimation and use of coincident indices (for example the [Index of Coincident Economic Indicators](http://www.newyorkfed.org/research/regional_economy/coincident_summary.html)) pre-dates dynamic factor models, in several influential papers Stock and Watson (1989, 1991) used a dynamic factor model to provide a theoretical foundation for them.
#
# Below, we follow the treatment found in Kim and Nelson (1999), of the Stock and Watson (1991) model, to formulate a dynamic factor model, estimate its parameters via maximum likelihood, and create a coincident index.
# ## Macroeconomic data
#
# The coincident index is created by considering the comovements in four macroeconomic variables (versions of these variables are available on [FRED](https://research.stlouisfed.org/fred2/); the ID of the series used below is given in parentheses):
#
# - Industrial production (IPMAN)
# - Real aggregate income (excluding transfer payments) (W875RX1)
# - Manufacturing and trade sales (CMRMTSPL)
# - Employees on non-farm payrolls (PAYEMS)
#
# In all cases, the data is at the monthly frequency and has been seasonally adjusted; the time-frame considered is 1972 - 2005.
# +
# %matplotlib inline
import numpy as np
import pandas as pd
import statsmodels.api as sm
import matplotlib.pyplot as plt
np.set_printoptions(precision=4, suppress=True, linewidth=120)
# +
from pandas_datareader.data import DataReader
# Get the datasets from FRED
start = '1979-01-01'
end = '2014-12-01'
indprod = DataReader('IPMAN', 'fred', start=start, end=end)
income = DataReader('W875RX1', 'fred', start=start, end=end)
sales = DataReader('CMRMTSPL', 'fred', start=start, end=end)
emp = DataReader('PAYEMS', 'fred', start=start, end=end)
# dta = pd.concat((indprod, income, sales, emp), axis=1)
# dta.columns = ['indprod', 'income', 'sales', 'emp']
# -
# **Note**: in a recent update on FRED (8/12/15) the time series CMRMTSPL was truncated to begin in 1997; this is probably a mistake due to the fact that CMRMTSPL is a spliced series, so the earlier period is from the series HMRMT and the latter period is defined by CMRMT.
#
# This has since (02/11/16) been corrected, however the series could also be constructed by hand from HMRMT and CMRMT, as shown below (process taken from the notes in the Alfred xls file).
# +
# HMRMT = DataReader('HMRMT', 'fred', start='1967-01-01', end=end)
# CMRMT = DataReader('CMRMT', 'fred', start='1997-01-01', end=end)
# +
# HMRMT_growth = HMRMT.diff() / HMRMT.shift()
# sales = pd.Series(np.zeros(emp.shape[0]), index=emp.index)
# # Fill in the recent entries (1997 onwards)
# sales[CMRMT.index] = CMRMT
# # Backfill the previous entries (pre 1997)
# idx = sales.loc[:'1997-01-01'].index
# for t in range(len(idx)-1, 0, -1):
# month = idx[t]
# prev_month = idx[t-1]
# sales.loc[prev_month] = sales.loc[month] / (1 + HMRMT_growth.loc[prev_month].values)
# -
dta = pd.concat((indprod, income, sales, emp), axis=1)
dta.columns = ['indprod', 'income', 'sales', 'emp']
dta.index.freq = dta.index.inferred_freq
dta.loc[:, 'indprod':'emp'].plot(subplots=True, layout=(2, 2), figsize=(15, 6));
# Stock and Watson (1991) report that for their datasets, they could not reject the null hypothesis of a unit root in each series (so the series are integrated), but they did not find strong evidence that the series were co-integrated.
#
# As a result, they suggest estimating the model using the first differences (of the logs) of the variables, demeaned and standardized.
# +
# Create log-differenced series
dta['dln_indprod'] = (np.log(dta.indprod)).diff() * 100
dta['dln_income'] = (np.log(dta.income)).diff() * 100
dta['dln_sales'] = (np.log(dta.sales)).diff() * 100
dta['dln_emp'] = (np.log(dta.emp)).diff() * 100
# De-mean and standardize
dta['std_indprod'] = (dta['dln_indprod'] - dta['dln_indprod'].mean()) / dta['dln_indprod'].std()
dta['std_income'] = (dta['dln_income'] - dta['dln_income'].mean()) / dta['dln_income'].std()
dta['std_sales'] = (dta['dln_sales'] - dta['dln_sales'].mean()) / dta['dln_sales'].std()
dta['std_emp'] = (dta['dln_emp'] - dta['dln_emp'].mean()) / dta['dln_emp'].std()
# -
# ## Dynamic factors
#
# A general dynamic factor model is written as:
#
# $$
# \begin{align}
# y_t & = \Lambda f_t + B x_t + u_t \\
# f_t & = A_1 f_{t-1} + \dots + A_p f_{t-p} + \eta_t \qquad \eta_t \sim N(0, I)\\
# u_t & = C_1 u_{t-1} + \dots + C_q u_{t-q} + \varepsilon_t \qquad \varepsilon_t \sim N(0, \Sigma)
# \end{align}
# $$
#
# where $y_t$ are observed data, $f_t$ are the unobserved factors (evolving as a vector autoregression), $x_t$ are (optional) exogenous variables, and $u_t$ is the error, or "idiosyncratic", process ($u_t$ is also optionally allowed to be autocorrelated). The $\Lambda$ matrix is often referred to as the matrix of "factor loadings". The variance of the factor error term is set to the identity matrix to ensure identification of the unobserved factors.
#
# This model can be cast into state space form, and the unobserved factor estimated via the Kalman filter. The likelihood can be evaluated as a byproduct of the filtering recursions, and maximum likelihood estimation used to estimate the parameters.
# ## Model specification
#
# The specific dynamic factor model in this application has 1 unobserved factor which is assumed to follow an AR(2) process. The innovations $\varepsilon_t$ are assumed to be independent (so that $\Sigma$ is a diagonal matrix) and the error term associated with each equation, $u_{i,t}$ is assumed to follow an independent AR(2) process.
#
# Thus the specification considered here is:
#
# $$
# \begin{align}
# y_{i,t} & = \lambda_i f_t + u_{i,t} \\
# u_{i,t} & = c_{i,1} u_{1,t-1} + c_{i,2} u_{i,t-2} + \varepsilon_{i,t} \qquad & \varepsilon_{i,t} \sim N(0, \sigma_i^2) \\
# f_t & = a_1 f_{t-1} + a_2 f_{t-2} + \eta_t \qquad & \eta_t \sim N(0, I)\\
# \end{align}
# $$
#
# where $i$ is one of: `[indprod, income, sales, emp ]`.
#
# This model can be formulated using the `DynamicFactor` model built-in to statsmodels. In particular, we have the following specification:
#
# - `k_factors = 1` - (there is 1 unobserved factor)
# - `factor_order = 2` - (it follows an AR(2) process)
# - `error_var = False` - (the errors evolve as independent AR processes rather than jointly as a VAR - note that this is the default option, so it is not specified below)
# - `error_order = 2` - (the errors are autocorrelated of order 2: i.e. AR(2) processes)
# - `error_cov_type = 'diagonal'` - (the innovations are uncorrelated; this is again the default)
#
# Once the model is created, the parameters can be estimated via maximum likelihood; this is done using the `fit()` method.
#
# **Note**: recall that we have demeaned and standardized the data; this will be important in interpreting the results that follow.
#
# **Aside**: in their empirical example, Kim and Nelson (1999) actually consider a slightly different model in which the employment variable is allowed to also depend on lagged values of the factor - this model does not fit into the built-in `DynamicFactor` class, but can be accommodated by using a subclass to implement the required new parameters and restrictions - see Appendix A, below.
# ## Parameter estimation
#
# Multivariate models can have a relatively large number of parameters, and it may be difficult to escape from local minima to find the maximized likelihood. In an attempt to mitigate this problem, I perform an initial maximization step (from the model-defined starting parameters) using the modified Powell method available in Scipy (see the minimize documentation for more information). The resulting parameters are then used as starting parameters in the standard LBFGS optimization method.
# +
# Get the endogenous data
endog = dta.loc['1979-02-01':, 'std_indprod':'std_emp']
# Create the model
mod = sm.tsa.DynamicFactor(endog, k_factors=1, factor_order=2, error_order=2)
initial_res = mod.fit(method='powell', disp=False)
res = mod.fit(initial_res.params, disp=False)
# -
# ## Estimates
#
# Once the model has been estimated, there are two components that we can use for analysis or inference:
#
# - The estimated parameters
# - The estimated factor
# ### Parameters
#
# The estimated parameters can be helpful in understanding the implications of the model, although in models with a larger number of observed variables and / or unobserved factors they can be difficult to interpret.
#
# One reason for this difficulty is due to identification issues between the factor loadings and the unobserved factors. One easy-to-see identification issue is the sign of the loadings and the factors: an equivalent model to the one displayed below would result from reversing the signs of all factor loadings and the unobserved factor.
#
# Here, one of the easy-to-interpret implications in this model is the persistence of the unobserved factor: we find that exhibits substantial persistence.
print(res.summary(separate_params=False))
# ### Estimated factors
#
# While it can be useful to plot the unobserved factors, it is less useful here than one might think for two reasons:
#
# 1. The sign-related identification issue described above.
# 2. Since the data was differenced, the estimated factor explains the variation in the differenced data, not the original data.
#
# It is for these reasons that the coincident index is created (see below).
#
# With these reservations, the unobserved factor is plotted below, along with the NBER indicators for US recessions. It appears that the factor is successful at picking up some degree of business cycle activity.
# +
fig, ax = plt.subplots(figsize=(13,3))
# Plot the factor
dates = endog.index._mpl_repr()
ax.plot(dates, res.factors.filtered[0], label='Factor')
ax.legend()
# Retrieve and also plot the NBER recession indicators
rec = DataReader('USREC', 'fred', start=start, end=end)
ylim = ax.get_ylim()
ax.fill_between(dates[:-3], ylim[0], ylim[1], rec.values[:-4,0], facecolor='k', alpha=0.1);
# -
# ## Post-estimation
#
# Although here we will be able to interpret the results of the model by constructing the coincident index, there is a useful and generic approach for getting a sense for what is being captured by the estimated factor. By taking the estimated factors as given, regressing them (and a constant) each (one at a time) on each of the observed variables, and recording the coefficients of determination ($R^2$ values), we can get a sense of the variables for which each factor explains a substantial portion of the variance and the variables for which it does not.
#
# In models with more variables and more factors, this can sometimes lend interpretation to the factors (for example sometimes one factor will load primarily on real variables and another on nominal variables).
#
# In this model, with only four endogenous variables and one factor, it is easy to digest a simple table of the $R^2$ values, but in larger models it is not. For this reason, a bar plot is often employed; from the plot we can easily see that the factor explains most of the variation in industrial production index and a large portion of the variation in sales and employment, it is less helpful in explaining income.
res.plot_coefficients_of_determination(figsize=(8,2));
# ## Coincident Index
#
# As described above, the goal of this model was to create an interpretable series which could be used to understand the current status of the macroeconomy. This is what the coincident index is designed to do. It is constructed below. For readers interested in an explanation of the construction, see Kim and Nelson (1999) or Stock and Watson (1991).
#
# In essence, what is done is to reconstruct the mean of the (differenced) factor. We will compare it to the coincident index on published by the Federal Reserve Bank of Philadelphia (USPHCI on FRED).
usphci = DataReader('USPHCI', 'fred', start='1979-01-01', end='2014-12-01')['USPHCI']
usphci.plot(figsize=(13,3));
dusphci = usphci.diff()[1:].values
def compute_coincident_index(mod, res):
# Estimate W(1)
spec = res.specification
design = mod.ssm['design']
transition = mod.ssm['transition']
ss_kalman_gain = res.filter_results.kalman_gain[:,:,-1]
k_states = ss_kalman_gain.shape[0]
W1 = np.linalg.inv(np.eye(k_states) - np.dot(
np.eye(k_states) - np.dot(ss_kalman_gain, design),
transition
)).dot(ss_kalman_gain)[0]
# Compute the factor mean vector
factor_mean = np.dot(W1, dta.loc['1972-02-01':, 'dln_indprod':'dln_emp'].mean())
# Normalize the factors
factor = res.factors.filtered[0]
factor *= np.std(usphci.diff()[1:]) / np.std(factor)
# Compute the coincident index
coincident_index = np.zeros(mod.nobs+1)
# The initial value is arbitrary; here it is set to
# facilitate comparison
coincident_index[0] = usphci.iloc[0] * factor_mean / dusphci.mean()
for t in range(0, mod.nobs):
coincident_index[t+1] = coincident_index[t] + factor[t] + factor_mean
# Attach dates
coincident_index = pd.Series(coincident_index, index=dta.index).iloc[1:]
# Normalize to use the same base year as USPHCI
coincident_index *= (usphci.loc['1992-07-01'] / coincident_index.loc['1992-07-01'])
return coincident_index
# Below we plot the calculated coincident index along with the US recessions and the comparison coincident index USPHCI.
# +
fig, ax = plt.subplots(figsize=(13,3))
# Compute the index
coincident_index = compute_coincident_index(mod, res)
# Plot the factor
dates = endog.index._mpl_repr()
ax.plot(dates, coincident_index, label='Coincident index')
ax.plot(usphci.index._mpl_repr(), usphci, label='USPHCI')
ax.legend(loc='lower right')
# Retrieve and also plot the NBER recession indicators
ylim = ax.get_ylim()
ax.fill_between(dates[:-3], ylim[0], ylim[1], rec.values[:-4,0], facecolor='k', alpha=0.1);
# -
# ## Appendix 1: Extending the dynamic factor model
#
# Recall that the previous specification was described by:
#
# $$
# \begin{align}
# y_{i,t} & = \lambda_i f_t + u_{i,t} \\
# u_{i,t} & = c_{i,1} u_{1,t-1} + c_{i,2} u_{i,t-2} + \varepsilon_{i,t} \qquad & \varepsilon_{i,t} \sim N(0, \sigma_i^2) \\
# f_t & = a_1 f_{t-1} + a_2 f_{t-2} + \eta_t \qquad & \eta_t \sim N(0, I)\\
# \end{align}
# $$
#
# Written in state space form, the previous specification of the model had the following observation equation:
#
# $$
# \begin{bmatrix}
# y_{\text{indprod}, t} \\
# y_{\text{income}, t} \\
# y_{\text{sales}, t} \\
# y_{\text{emp}, t} \\
# \end{bmatrix} = \begin{bmatrix}
# \lambda_\text{indprod} & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
# \lambda_\text{income} & 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 \\
# \lambda_\text{sales} & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 \\
# \lambda_\text{emp} & 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 \\
# \end{bmatrix}
# \begin{bmatrix}
# f_t \\
# f_{t-1} \\
# u_{\text{indprod}, t} \\
# u_{\text{income}, t} \\
# u_{\text{sales}, t} \\
# u_{\text{emp}, t} \\
# u_{\text{indprod}, t-1} \\
# u_{\text{income}, t-1} \\
# u_{\text{sales}, t-1} \\
# u_{\text{emp}, t-1} \\
# \end{bmatrix}
# $$
#
# and transition equation:
#
# $$
# \begin{bmatrix}
# f_t \\
# f_{t-1} \\
# u_{\text{indprod}, t} \\
# u_{\text{income}, t} \\
# u_{\text{sales}, t} \\
# u_{\text{emp}, t} \\
# u_{\text{indprod}, t-1} \\
# u_{\text{income}, t-1} \\
# u_{\text{sales}, t-1} \\
# u_{\text{emp}, t-1} \\
# \end{bmatrix} = \begin{bmatrix}
# a_1 & a_2 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
# 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
# 0 & 0 & c_{\text{indprod}, 1} & 0 & 0 & 0 & c_{\text{indprod}, 2} & 0 & 0 & 0 \\
# 0 & 0 & 0 & c_{\text{income}, 1} & 0 & 0 & 0 & c_{\text{income}, 2} & 0 & 0 \\
# 0 & 0 & 0 & 0 & c_{\text{sales}, 1} & 0 & 0 & 0 & c_{\text{sales}, 2} & 0 \\
# 0 & 0 & 0 & 0 & 0 & c_{\text{emp}, 1} & 0 & 0 & 0 & c_{\text{emp}, 2} \\
# 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
# 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 \\
# 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 \\
# 0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 \\
# \end{bmatrix}
# \begin{bmatrix}
# f_{t-1} \\
# f_{t-2} \\
# u_{\text{indprod}, t-1} \\
# u_{\text{income}, t-1} \\
# u_{\text{sales}, t-1} \\
# u_{\text{emp}, t-1} \\
# u_{\text{indprod}, t-2} \\
# u_{\text{income}, t-2} \\
# u_{\text{sales}, t-2} \\
# u_{\text{emp}, t-2} \\
# \end{bmatrix}
# + R \begin{bmatrix}
# \eta_t \\
# \varepsilon_{t}
# \end{bmatrix}
# $$
#
# the `DynamicFactor` model handles setting up the state space representation and, in the `DynamicFactor.update` method, it fills in the fitted parameter values into the appropriate locations.
# The extended specification is the same as in the previous example, except that we also want to allow employment to depend on lagged values of the factor. This creates a change to the $y_{\text{emp},t}$ equation. Now we have:
#
# $$
# \begin{align}
# y_{i,t} & = \lambda_i f_t + u_{i,t} \qquad & i \in \{\text{indprod}, \text{income}, \text{sales} \}\\
# y_{i,t} & = \lambda_{i,0} f_t + \lambda_{i,1} f_{t-1} + \lambda_{i,2} f_{t-2} + \lambda_{i,2} f_{t-3} + u_{i,t} \qquad & i = \text{emp} \\
# u_{i,t} & = c_{i,1} u_{i,t-1} + c_{i,2} u_{i,t-2} + \varepsilon_{i,t} \qquad & \varepsilon_{i,t} \sim N(0, \sigma_i^2) \\
# f_t & = a_1 f_{t-1} + a_2 f_{t-2} + \eta_t \qquad & \eta_t \sim N(0, I)\\
# \end{align}
# $$
#
# Now, the corresponding observation equation should look like the following:
#
# $$
# \begin{bmatrix}
# y_{\text{indprod}, t} \\
# y_{\text{income}, t} \\
# y_{\text{sales}, t} \\
# y_{\text{emp}, t} \\
# \end{bmatrix} = \begin{bmatrix}
# \lambda_\text{indprod} & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
# \lambda_\text{income} & 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 \\
# \lambda_\text{sales} & 0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 \\
# \lambda_\text{emp,1} & \lambda_\text{emp,2} & \lambda_\text{emp,3} & \lambda_\text{emp,4} & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 \\
# \end{bmatrix}
# \begin{bmatrix}
# f_t \\
# f_{t-1} \\
# f_{t-2} \\
# f_{t-3} \\
# u_{\text{indprod}, t} \\
# u_{\text{income}, t} \\
# u_{\text{sales}, t} \\
# u_{\text{emp}, t} \\
# u_{\text{indprod}, t-1} \\
# u_{\text{income}, t-1} \\
# u_{\text{sales}, t-1} \\
# u_{\text{emp}, t-1} \\
# \end{bmatrix}
# $$
#
# Notice that we have introduced two new state variables, $f_{t-2}$ and $f_{t-3}$, which means we need to update the transition equation:
#
# $$
# \begin{bmatrix}
# f_t \\
# f_{t-1} \\
# f_{t-2} \\
# f_{t-3} \\
# u_{\text{indprod}, t} \\
# u_{\text{income}, t} \\
# u_{\text{sales}, t} \\
# u_{\text{emp}, t} \\
# u_{\text{indprod}, t-1} \\
# u_{\text{income}, t-1} \\
# u_{\text{sales}, t-1} \\
# u_{\text{emp}, t-1} \\
# \end{bmatrix} = \begin{bmatrix}
# a_1 & a_2 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
# 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
# 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
# 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
# 0 & 0 & 0 & 0 & c_{\text{indprod}, 1} & 0 & 0 & 0 & c_{\text{indprod}, 2} & 0 & 0 & 0 \\
# 0 & 0 & 0 & 0 & 0 & c_{\text{income}, 1} & 0 & 0 & 0 & c_{\text{income}, 2} & 0 & 0 \\
# 0 & 0 & 0 & 0 & 0 & 0 & c_{\text{sales}, 1} & 0 & 0 & 0 & c_{\text{sales}, 2} & 0 \\
# 0 & 0 & 0 & 0 & 0 & 0 & 0 & c_{\text{emp}, 1} & 0 & 0 & 0 & c_{\text{emp}, 2} \\
# 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
# 0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 \\
# 0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 \\
# 0 & 0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 \\
# \end{bmatrix}
# \begin{bmatrix}
# f_{t-1} \\
# f_{t-2} \\
# f_{t-3} \\
# f_{t-4} \\
# u_{\text{indprod}, t-1} \\
# u_{\text{income}, t-1} \\
# u_{\text{sales}, t-1} \\
# u_{\text{emp}, t-1} \\
# u_{\text{indprod}, t-2} \\
# u_{\text{income}, t-2} \\
# u_{\text{sales}, t-2} \\
# u_{\text{emp}, t-2} \\
# \end{bmatrix}
# + R \begin{bmatrix}
# \eta_t \\
# \varepsilon_{t}
# \end{bmatrix}
# $$
#
# This model cannot be handled out-of-the-box by the `DynamicFactor` class, but it can be handled by creating a subclass when alters the state space representation in the appropriate way.
# First, notice that if we had set `factor_order = 4`, we would almost have what we wanted. In that case, the last line of the observation equation would be:
#
# $$
# \begin{bmatrix}
# \vdots \\
# y_{\text{emp}, t} \\
# \end{bmatrix} = \begin{bmatrix}
# \vdots & & & & & & & & & & & \vdots \\
# \lambda_\text{emp,1} & 0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 \\
# \end{bmatrix}
# \begin{bmatrix}
# f_t \\
# f_{t-1} \\
# f_{t-2} \\
# f_{t-3} \\
# \vdots
# \end{bmatrix}
# $$
#
#
# and the first line of the transition equation would be:
#
# $$
# \begin{bmatrix}
# f_t \\
# \vdots
# \end{bmatrix} = \begin{bmatrix}
# a_1 & a_2 & a_3 & a_4 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
# \vdots & & & & & & & & & & & \vdots \\
# \end{bmatrix}
# \begin{bmatrix}
# f_{t-1} \\
# f_{t-2} \\
# f_{t-3} \\
# f_{t-4} \\
# \vdots
# \end{bmatrix}
# + R \begin{bmatrix}
# \eta_t \\
# \varepsilon_{t}
# \end{bmatrix}
# $$
#
# Relative to what we want, we have the following differences:
#
# 1. In the above situation, the $\lambda_{\text{emp}, j}$ are forced to be zero for $j > 0$, and we want them to be estimated as parameters.
# 2. We only want the factor to transition according to an AR(2), but under the above situation it is an AR(4).
#
# Our strategy will be to subclass `DynamicFactor`, and let it do most of the work (setting up the state space representation, etc.) where it assumes that `factor_order = 4`. The only things we will actually do in the subclass will be to fix those two issues.
#
# First, here is the full code of the subclass; it is discussed below. It is important to note at the outset that none of the methods defined below could have been omitted. In fact, the methods `__init__`, `start_params`, `param_names`, `transform_params`, `untransform_params`, and `update` form the core of all state space models in statsmodels, not just the `DynamicFactor` class.
from statsmodels.tsa.statespace import tools
class ExtendedDFM(sm.tsa.DynamicFactor):
def __init__(self, endog, **kwargs):
# Setup the model as if we had a factor order of 4
super(ExtendedDFM, self).__init__(
endog, k_factors=1, factor_order=4, error_order=2,
**kwargs)
# Note: `self.parameters` is an ordered dict with the
# keys corresponding to parameter types, and the values
# the number of parameters of that type.
# Add the new parameters
self.parameters['new_loadings'] = 3
# Cache a slice for the location of the 4 factor AR
# parameters (a_1, ..., a_4) in the full parameter vector
offset = (self.parameters['factor_loadings'] +
self.parameters['exog'] +
self.parameters['error_cov'])
self._params_factor_ar = np.s_[offset:offset+2]
self._params_factor_zero = np.s_[offset+2:offset+4]
@property
def start_params(self):
# Add three new loading parameters to the end of the parameter
# vector, initialized to zeros (for simplicity; they could
# be initialized any way you like)
return np.r_[super(ExtendedDFM, self).start_params, 0, 0, 0]
@property
def param_names(self):
# Add the corresponding names for the new loading parameters
# (the name can be anything you like)
return super(ExtendedDFM, self).param_names + [
'loading.L%d.f1.%s' % (i, self.endog_names[3]) for i in range(1,4)]
def transform_params(self, unconstrained):
# Perform the typical DFM transformation (w/o the new parameters)
constrained = super(ExtendedDFM, self).transform_params(
unconstrained[:-3])
# Redo the factor AR constraint, since we only want an AR(2),
# and the previous constraint was for an AR(4)
ar_params = unconstrained[self._params_factor_ar]
constrained[self._params_factor_ar] = (
tools.constrain_stationary_univariate(ar_params))
# Return all the parameters
return np.r_[constrained, unconstrained[-3:]]
def untransform_params(self, constrained):
# Perform the typical DFM untransformation (w/o the new parameters)
unconstrained = super(ExtendedDFM, self).untransform_params(
constrained[:-3])
# Redo the factor AR unconstrained, since we only want an AR(2),
# and the previous unconstrained was for an AR(4)
ar_params = constrained[self._params_factor_ar]
unconstrained[self._params_factor_ar] = (
tools.unconstrain_stationary_univariate(ar_params))
# Return all the parameters
return np.r_[unconstrained, constrained[-3:]]
def update(self, params, transformed=True, **kwargs):
# Peform the transformation, if required
if not transformed:
params = self.transform_params(params)
params[self._params_factor_zero] = 0
# Now perform the usual DFM update, but exclude our new parameters
super(ExtendedDFM, self).update(params[:-3], transformed=True, **kwargs)
# Finally, set our new parameters in the design matrix
self.ssm['design', 3, 1:4] = params[-3:]
# So what did we just do?
#
# **`__init__`**
#
# The important step here was specifying the base dynamic factor model which we were operating with. In particular, as described above, we initialize with `factor_order=4`, even though we will only end up with an AR(2) model for the factor. We also performed some general setup-related tasks.
#
# **`start_params`**
#
# `start_params` are used as initial values in the optimizer. Since we are adding three new parameters, we need to pass those in. If we had not done this, the optimizer would use the default starting values, which would be three elements short.
#
# **`param_names`**
#
# `param_names` are used in a variety of places, but especially in the results class. Below we get a full result summary, which is only possible when all the parameters have associated names.
#
# **`transform_params`** and **`untransform_params`**
#
# The optimizer selects possibly parameter values in an unconstrained way. That's not usually desired (since variances cannot be negative, for example), and `transform_params` is used to transform the unconstrained values used by the optimizer to constrained values appropriate to the model. Variances terms are typically squared (to force them to be positive), and AR lag coefficients are often constrained to lead to a stationary model. `untransform_params` is used for the reverse operation (and is important because starting parameters are usually specified in terms of values appropriate to the model, and we need to convert them to parameters appropriate to the optimizer before we can begin the optimization routine).
#
# Even though we do not need to transform or untransform our new parameters (the loadings can in theory take on any values), we still need to modify this function for two reasons:
#
# 1. The version in the `DynamicFactor` class is expecting 3 fewer parameters than we have now. At a minimum, we need to handle the three new parameters.
# 2. The version in the `DynamicFactor` class constrains the factor lag coefficients to be stationary as though it was an AR(4) model. Since we actually have an AR(2) model, we need to re-do the constraint. We also set the last two autoregressive coefficients to be zero here.
#
# **`update`**
#
# The most important reason we need to specify a new `update` method is because we have three new parameters that we need to place into the state space formulation. In particular we let the parent `DynamicFactor.update` class handle placing all the parameters except the three new ones in to the state space representation, and then we put the last three in manually.
# Create the model
extended_mod = ExtendedDFM(endog)
initial_extended_res = extended_mod.fit(maxiter=1000, disp=False)
extended_res = extended_mod.fit(initial_extended_res.params, method='nm', maxiter=1000)
print(extended_res.summary(separate_params=False))
# Although this model increases the likelihood, it is not preferred by the AIC and BIC measures which penalize the additional three parameters.
#
# Furthermore, the qualitative results are unchanged, as we can see from the updated $R^2$ chart and the new coincident index, both of which are practically identical to the previous results.
extended_res.plot_coefficients_of_determination(figsize=(8,2));
# +
fig, ax = plt.subplots(figsize=(13,3))
# Compute the index
extended_coincident_index = compute_coincident_index(extended_mod, extended_res)
# Plot the factor
dates = endog.index._mpl_repr()
ax.plot(dates, coincident_index, '-', linewidth=1, label='Basic model')
ax.plot(dates, extended_coincident_index, '--', linewidth=3, label='Extended model')
ax.plot(usphci.index._mpl_repr(), usphci, label='USPHCI')
ax.legend(loc='lower right')
ax.set(title='Coincident indices, comparison')
# Retrieve and also plot the NBER recession indicators
ylim = ax.get_ylim()
ax.fill_between(dates[:-3], ylim[0], ylim[1], rec.values[:-4,0], facecolor='k', alpha=0.1);
| examples/notebooks/statespace_dfm_coincident.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Done
#
# * performance (`DetectionResults.detections_by_class` memoized)
# * performance (`DetectionResults.num_gt_class` memoized)
# * "crowd" GT detection handling. Yes: each T_IoU level needs `.match_detections(iou_index)`
# * per-class AP
#
# ### To Do (?)
# * small / medium / large AP... (?)
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from evaldets.api import *
from uo.utils import *
# area_rng=None, iou_thresh=None - needed for full cocoeval
dr = DetectionResults('../reval_05/baseline_05/evaluator_dump_R101_101/', area_rng=None, iou_thresh=None)
dr.finish_cocoeval()
# np.mean(dr.coco.eval["precision"][0, :, 0, 0, -1])
dr.coco_mAP_cat('person')
dr.average_precision('person')
# mAP.5 seems as good as it gets:
dr.mean_average_precision()
# np.mean(dr.coco.eval["precision"][0, :, :, 0, -1])
dr.coco_mAP_score()
# mAP.75 doesn't look so great:
# np.mean(dr.coco.eval["precision"][5, :, :, 0, -1])
dr.coco_mAP_score()
dr.mean_average_precision(0.75)
# Until you do it the right way:
dr.match_detections(5)
dr.mean_average_precision(0.75)
# Now, for the big thing:
dr.AP_score(rich=True)
| 2021-07-01-AP-like-cocoapi.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="CdYrOwgncIm0" colab={"base_uri": "https://localhost:8080/"} outputId="776e6203-2c42-4714-b1f1-d600bb93bbd1"
from IPython.display import YouTubeVideo
import numpy as np
import pandas as pd
# !pip install pyexcel
import pyexcel as pe
# !pip install pyexcel-xlsx
from openpyxl import load_workbook
# + id="S8xExIrzyKI7" colab={"base_uri": "https://localhost:8080/", "height": 321} outputId="b84fcb8e-0f16-4b42-822e-0df4b762b08e"
#tutorial on YouTube
YouTubeVideo('T3UZUftD4-Y')
# + [markdown] id="wWrWSGjpd_Xj"
# # **pandas Module**
# + id="TvxRXYqQdZ0j"
df = pd.read_csv('/content/Financial_data_combined.csv')
# + colab={"base_uri": "https://localhost:8080/", "height": 359} id="x9eOd2uweCzs" outputId="4fedcca7-ed0b-4d40-86ae-47930523dc95"
df.head(10)
# + colab={"base_uri": "https://localhost:8080/"} id="2MeEvmmmvj2n" outputId="79b94899-613e-4565-be4e-b328147d1191"
df.dtypes
# + id="9w9Zfbt4tP23" colab={"base_uri": "https://localhost:8080/", "height": 419} outputId="5c97baf7-d8f4-48d7-beea-86ceae0b767b"
df.loc[:,df.columns[1:]].apply(pd.to_numeric)
# + id="JZHNE0crr8R3"
df['NASDAQ Log Return'] = np.log(df['NASDAQ Close']) - np.log(df['NASDAQ Close'].shift(-1))
df['S&P 500 Log Return'] = np.log(df['S&P 500 Close']) - np.log(df['S&P 500 Close'].shift(-1))
# + colab={"base_uri": "https://localhost:8080/", "height": 419} id="oemA621EeC2u" outputId="dd8cf86b-7624-4795-e185-0adf2acd37c8"
df
# + id="GETVOa3pw8O9"
sandp = df.loc[:,df.columns[1:5]]
# + id="WcL4uJfD0TeC"
nasdaq = df.drop(columns=df.columns[1:5])
nasdaq = nasdaq.loc[:,nasdaq.columns[:-2]]
# + id="vtXO_p8AoYNz"
writer = pd.ExcelWriter('/content/Financial_data.xlsx')
sandp.to_excel(writer, sheet_name='S&P500', index = False)
nasdaq.to_excel(writer, sheet_name='Nasdaq', index = False)
writer.save()
# + [markdown] id="AkttdgnaweS0"
# # **pyexcel Module**
# + id="Ces-3Ub0pMge"
fin_data = pe.get_book(file_name = '/content/Financial_data.xlsx')
# + colab={"base_uri": "https://localhost:8080/"} id="9nyKMooyExw9" outputId="31926d81-a7e7-4f84-c71c-faaf5409aec6"
fin_data
# + id="N9qc2pgjpMqx"
fin_dict = fin_data.dict
# + id="DM6vb5UDovsz"
pct_chnge = []
for row,row_lag in zip(fin_dict['S&P500'][1:],fin_dict['S&P500'][2:]):
pct_chnge.append((np.log(row[-1]/row_lag[-1])))
# + id="ePW2jY1qE2RB"
for row,rtn in zip(fin_dict['S&P500'][1:], pct_chnge):
row.append(rtn)
fin_dict['S&P500'][0].append('Log Return')
# + id="QURW7x_xE2ZA"
pe.save_as(array = fin_dict['S&P500'], dest_file_name = '/content/s&p_data.xlsx')
# + colab={"base_uri": "https://localhost:8080/", "height": 419} id="nNcrD2jZHmGu" outputId="81c2fedd-27a7-4a60-ecfd-1b79057fffba"
pd.read_excel('/content/s&p_data.xlsx')
# + [markdown] id="ZLLj96YSMGJ0"
# # **openpyxl Module**
# + id="O17fVcwcMGTX"
workbook = load_workbook(filename = '/content/Financial_data.xlsx')
# + id="zpHgIuM0MGyy"
nasdaq_sheet = workbook['Nasdaq']
# + id="R_czA-JAMG4j"
nasdaq_l = []
for row in range(2, nasdaq_sheet.max_row+1):
nasdaq_l.append(nasdaq_sheet.cell(row, nasdaq_sheet.max_column-1).value)
# + id="cCwhyVtQMG60"
pct_chnge = []
for row,row_lag in zip(nasdaq_l, nasdaq_l[1:]):
pct_chnge.append((np.log(row/row_lag)))
pct_chnge.insert(0, 'Log Return')
pct_chnge.append(None)
# + id="RutR7mlNeYxq"
max_col = nasdaq_sheet.max_column + 1
for row, value in enumerate(pct_chnge, start=1):
nasdaq_sheet.cell(row=row, column=max_col, value=value)
# + id="iMfiglVtfJtX"
workbook.save('nasdaq_returns.xlsx')
# + colab={"base_uri": "https://localhost:8080/", "height": 419} id="bQYHTkCfG1g8" outputId="5ac9b34e-7bb7-465a-adfe-161800c4a7f3"
pd.read_excel('/content/nasdaq_returns.xlsx', sheet_name='Nasdaq')
# + [markdown] id="eIh__MY-dvmA"
# # **References and Additional Learning**
# + [markdown] id="BogjnGWPkoov"
# ## **Websites**
#
# - **[pandas module Documentation](https://pandas.pydata.org/docs/)**
#
# - **[pyexcel module Documentation](https://docs.pyexcel.org/en/latest/)**
#
# - **[openpyxl module Documentation](https://openpyxl.readthedocs.io/en/stable/)**
# + [markdown] id="_FGL_YIqdyn0"
# # **Connect**
# - **Feel free to connect with Adrian on [YouTube](https://www.youtube.com/channel/UCPuDxI3xb_ryUUMfkm0jsRA), [LinkedIn](https://www.linkedin.com/in/adrian-dolinay-frm-96a289106/), [Twitter](https://twitter.com/DolinayG) and [GitHub](https://github.com/ad17171717). Happy coding!**
| Python YouTube Tutorials/Reading and Writing csv and Excel Files/Python_Reading_and_Writing_csv_and_Excel_Files!.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# key - value
cars = {
0: "Ferrari",
1: "Porsche",
2: "Ford",
3: "Ford"
}
print(cars[0])
# +
person = {
"firstname": "<NAME>",
"lastname" : "Sarpong",
"year": 1964,
"colors": ("purple", "red", "yellow"),
}
#print(person)
print(type(person["colors"]))
# -
len(person)
type(person)
person.keys()
person.values()
# +
# Change
uestc ={
"dept1": "SISE",
"dept2": "SSEE",
"dept3": "CSE",
}
print(uestc)
print()
uestc["dept2"] = "MSE"
print(uestc)
print()
uestc.update({
"dept2": "Sports",
"year": 1964,
"colors": ("purple", "red", "yellow"),
})
print(uestc)
# +
# remove by popping
print(uestc)
uestc.pop("colors")
print(uestc)
# remove by popping the last item
uestc.popitem()
uestc
# -
uestc.clear()
uestc
del uestc
uestc
# +
#nested dicts
lecture = {
1: {
"firstname": "Great",
"score": 100
},
2: {
"firstname": "Edward",
"score": 90
},
"special": {
"firstname": "Abena",
"score": 80
},
}
#print(lecture)
print(lecture[1]["firstname"])
print(lecture[1]["score"])
print(lecture[2]["firstname"])
print(lecture[2]["score"])
print(lecture["special"]["firstname"])
print(lecture["special"]["score"])
# +
# effective nested dicts
fir = {
"firstname": "Great",
"score": 100
}
sec = {
"firstname": "Edward",
"score": 90
}
thi = {
"firstname": "Abena",
"score": 80
}
_class ={
1: fir,
2: sec,
3: thi
}
print(_class[1]["firstname"])
# -
| Week 3/Dictionaries.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Chapter: Finding Groups in Process Data: Clustering & Mixture Modeling
#
# # Topic: Process Monitoring via GMM
# +
##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
## Process Monitoring of Etch data
## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# -
# import required packages
import numpy as np
import matplotlib.pyplot as plt
from sklearn.mixture import GaussianMixture
# +
# fetch data
import scipy.io
matlab_data = scipy.io.loadmat('MACHINE_Data.mat', struct_as_record = False)
Etch_data = matlab_data['LAMDATA']
calibration_dataAll = Etch_data[0,0].calibration # calibration_dataAll[i,0] corresponds to a 2D data from ith batch where columns correspond to different variables
variable_names = Etch_data[0,0].variables
# +
##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
## perform Multiway PCA
## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# +
# generate unfolded data matrix
n_vars = variable_names.size - 2 # first 2 columns are not process variables
n_samples = 85 # following the work of He et al.
unfolded_dataMatrix = np.empty((1,n_vars*n_samples))
for expt in range(calibration_dataAll.size):
calibration_expt = calibration_dataAll[expt,0][5:90,2:] # removing first 5 measurements as done in He et al.
if calibration_expt.shape[0] < 85:
continue
unfolded_row = np.ravel(calibration_expt, order='F')[np.newaxis,:]
unfolded_dataMatrix = np.vstack((unfolded_dataMatrix, unfolded_row))
unfolded_dataMatrix = unfolded_dataMatrix[1:,:]
# +
# scale data
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
data_train_normal = scaler.fit_transform(unfolded_dataMatrix)
# +
# PCA
from sklearn.decomposition import PCA
pca = PCA(n_components = 3) # following the work of He et al.
score_train = pca.fit_transform(data_train_normal)
# -
# visualize in 2D
plt.figure()
plt.scatter(score_train[:,0],score_train[:,1])
plt.xlabel('PC1 scores')
plt.ylabel('PC2 scores')
plt.show()
# +
##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
## GMM on PCA scores
## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# +
# finding # of components via BIC
BICs = []
lowestBIC = np.inf
for n_cluster in range(1, 10):
gmm = GaussianMixture(n_components = n_cluster, random_state = 100)
gmm.fit(score_train)
BIC = gmm.bic(score_train)
BICs.append(BIC)
if BIC < lowestBIC:
optimal_n_cluster = n_cluster
lowestBIC = BIC
plt.figure()
plt.plot(range(1,10), BICs, marker='o')
plt.xlabel('Number of components')
plt.ylabel('BIC')
plt.show()
# +
# fit GMM model to metal-etch data
gmm = GaussianMixture(n_components = optimal_n_cluster, random_state = 100)
cluster_label = gmm.fit_predict(score_train)
plt.figure()
plt.scatter(score_train[:, 0], score_train[:, 1], c = cluster_label, s=20, cmap='viridis')
plt.xlabel('PC1 scores')
plt.ylabel('PC2 scores')
cluster_centers = gmm.means_
cluster_plot_labels = ['Cluster ' + str(i+1) for i in range(optimal_n_cluster)]
for i in range(optimal_n_cluster):
plt.scatter(cluster_centers[i, 0], cluster_centers[i, 1], c='red', s=20, marker = '*', alpha=0.5)
plt.annotate(cluster_plot_labels[i], (cluster_centers[i,0], cluster_centers[i,1]))
plt.show()
# +
##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
## Fault detection metric for training data
## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# +
# global mahalonobis distance metric
Dglobal_train = np.zeros((score_train.shape[0],))
for i in range(score_train.shape[0]):
x = score_train[i,:,np.newaxis]
probs = gmm.predict_proba(x.T)
for component in range(3):
Dlocal = np.dot(np.dot((x-gmm.means_[component,:,np.newaxis]).T,np.linalg.inv(gmm.covariances_[component,:])),(x-gmm.means_[component,:,np.newaxis]))
Dglobal_train[i] = Dglobal_train[i] + probs[0,component]*Dlocal
# +
# Dglobal control limit
N = score_train.shape[0]
r = 3
alpha = 0.05 # 95% control limit
Dglobal_CL = r*(N**2-1)*scipy.stats.f.ppf(1-alpha,r,N-r)/(N*(N-r))
# -
# Dglobal plot with CL
plt.figure()
plt.plot(Dglobal_train)
plt.plot([1,len(Dglobal_train)],[Dglobal_CL, Dglobal_CL], color='red')
plt.xlabel('Sample #')
plt.ylabel('D_global for training data')
plt.show()
# +
##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
## test data
## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# +
# fetch test data and unfold
test_dataAll = Etch_data[0,0].test
unfolded_TestdataMatrix = np.empty((1,n_vars*n_samples))
for expt in range(test_dataAll.size):
test_expt = test_dataAll[expt,0][5:90,2:]
if test_expt.shape[0] < 85:
continue
unfolded_row = np.ravel(test_expt, order='F')[np.newaxis,:]
unfolded_TestdataMatrix = np.vstack((unfolded_TestdataMatrix, unfolded_row))
unfolded_TestdataMatrix = unfolded_TestdataMatrix[1:,:]
# -
# PCA on fault data
data_test_normal = scaler.transform(unfolded_TestdataMatrix)
score_test = pca.transform(data_test_normal)
# visualize in 2D (both test and calibration data)
plt.figure()
plt.scatter(score_train[:,0],score_train[:,1], c='blue', alpha=0.1)
plt.scatter(score_test[:,0],score_test[:,1], c='red', marker = '*')
plt.xlabel('PC1 scores')
plt.ylabel('PC2 scores')
plt.show()
# +
# compute Dglobal_test
Dglobal_test = np.zeros((score_test.shape[0],))
for i in range(score_test.shape[0]):
x = score_test[i,:,np.newaxis]
probs = gmm.predict_proba(x.T)
for component in range(3):
Dlocal = np.dot(np.dot((x-gmm.means_[component,:,np.newaxis]).T,np.linalg.inv(gmm.covariances_[component,:])),(x-gmm.means_[component,:,np.newaxis]))
Dglobal_test[i] = Dglobal_test[i] + probs[0,component]*Dlocal
# +
#%% Dglobal plot with CL
plt.figure()
plt.plot(Dglobal_test, marker = '*')
plt.plot([1,len(Dglobal_test)],[Dglobal_CL,Dglobal_CL], color='red')
plt.xlabel('Sample #')
plt.ylabel('D_global for test data')
plt.show()
print('Number of faults identified: ', np.sum(Dglobal_test > Dglobal_CL), ' out of ', len(Dglobal_test))
| Chapter_Clustering_GMM/ProcessMonitoring_GMM.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
import pandas as pd
from corpus import get_all_corpus
# -
corpus = get_all_corpus()
char_cnts = dict()
for idx, poem in enumerate(corpus):
for sentence in poem['sentences']:
for ch in sentence:
char_cnts[ch] = char_cnts[ch]+1 if ch in char_cnts else 1
if 0 == (idx+1)%10000:
print "[Vocabulary] %d/%d poems have been processed." %(idx+1, len(corpus))
vocab = sorted([ch for ch in char_cnts], key = lambda ch: -char_cnts[ch])
char_cnts
df = pd.DataFrame(list(char_cnts.iteritems()))
df_sorted = df.sort([1], ascending=False).reset_index()
df_sorted.loc[8023]
from data_utils import get_keras_train_data
from vocab import get_vocab
import pandas as pd
X_train, Y_train = get_keras_train_data()
X_train
int2ch, ch2int = get_vocab()
def translate(data, int2ch):
result = []
for line in data:
result += [map(lambda x: int2ch[x], line)]
return result
X_ch = translate(X_train, int2ch)
Y_ch = translate(Y_train, int2ch)
X_df = pd.DataFrame(X_ch)
Y_df = pd.DataFrame(Y_ch)
Y_df
| notebooks/Vocab.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Uses push model concept (also works :D)
# +
try: import simplejson as json
except ImportError: import json
import gzip,codecs,numpy as np,random,copy
import scipy.optimize as opt
# +
#with open("finefood_train_random.json","r") as infile:
#with open("beeradvocate_train_random.json","r") as infile:
#with open("beeradvocate_train_lastrating.json","r") as infile:
#with open("finefood_train_lastrating.json","r") as infile:
#with open("ratebeer_train_lastrating.json","r") as infile:
with open("ratebeer_train_random.json","r") as infile:
train = json.load(infile)
infile.close()
#with open("finefood_test_random.json","r") as infile:
#with open("beeradvocate_test_random.json","r") as infile:
#with open("beeradvocate_test_lastrating.json","r") as infile:
#with open("finefood_test_lastrating.json","r") as infile:
#with open("ratebeer_test_lastrating.json","r") as infile:
with open("ratebeer_test_random.json","r") as infile:
test = json.load(infile)
infile.close()
#with open("finefood_quickmap_random.json","r") as infile:
#with open("beeradvocate_quickmap_random.json","r") as infile:
#with open("beeradvocate_quickmap_lastrating.json","r") as infile:
#with open("finefood_quickmap_lastrating.json","r") as infile:
#with open("ratebeer_quickmap_lastrating.json","r") as infile:
with open("ratebeer_quickmap_random.json","r") as infile:
quickmap = json.load(infile)
infile.close()
print(len(train),len(test),len(quickmap))
train = sorted(train, key = lambda k : int(k["review/time"]))
# -
Iu = dict() #set of products reviewed by users
Ui = dict() #set of users who reviewed the product
for review in train:
item = review["product/productId"]
user = review["review/userId"]
if item in Ui:
Ui[item].append(user)
else:
Ui[item] = [user]
if user in Iu:
Iu[user].append(item)
else:
Iu[user] = [item]
print(len(Iu))
distinct_user_set = set()
distinct_item_set = set()
for review in train:
if review["review/userId"] not in distinct_user_set:
distinct_user_set.add(review["review/userId"])
if review["product/productId"] not in distinct_item_set:
distinct_item_set.add(review["product/productId"])
print(len(distinct_user_set), len(distinct_item_set))
import sys
sys.setrecursionlimit(20000)
# +
#with open("finefood_user_map_random.json",'r') as infile:
#with open("finefood_user_map_lastrating.json",'r') as infile:
#with open("beeradvocate_user_map_random.json",'r') as infile:
#with open("beeradvocate_user_map_lastrating.json",'r') as infile:
#with open("ratebeer_user_map_lastrating.json",'r') as infile:
with open("ratebeer_user_map_random.json",'r') as infile:
user_map = json.load(infile)
infile.close()
#with open("finefood_item_map_random.json",'r') as infile:
#with open("finefood_item_map_lastrating.json",'r') as infile:
#with open("beeradvocate_item_map_random.json",'r') as infile:
#with open("beeradvocate_item_map_lastrating.json",'r') as infile:
#with open("ratebeer_item_map_lastrating.json",'r') as infile:
with open("ratebeer_item_map_random.json",'r') as infile:
item_map = json.load(infile)
infile.close()
user_map_int = {}
for key in user_map:
user_map_int[int(key)] = user_map[key]
item_map_int = {}
for key in item_map:
item_map_int[int(key)] = item_map[key]
# -
# # Expertise modelling
class ExpertiseLFM(object):
''' Expertise LFM class implements the evolution latent factor model of collaborative filtering
using matrix factorization
'''
def __init__(self,train_data, Iu_reg, Ui_reg, userproduct_dict,userset,itemset,\
usermap, itemmap,k,Lambda1,Lambda2,E,mode):
''' requires Iu and Ui matrix information, quick mapping of reviews to (user,product),
k =number of latent factor dimensions,
lambda1 = reg parameter, lambda2 = smoothing parameter,
E = number of experience levels.
'''
self.Ntrain = len(train_data) #Number of training samples
self.train_data = train_data #training data
self.Iu = self.deepish_copy(Iu_reg) #Iu mapping
self.Ui = self.deepish_copy(Ui_reg) #Ui mapping
self.quickmap = userproduct_dict #uses key as (userid-itemid) for quick mapping to required review
self.user_set = userset
self.item_set = itemset
self.user_map = usermap #mapping for easier transformation from long gradient vector to individual gradients
self.item_map = itemmap
#hyperparameters
self.Lambda1 = Lambda1 #regularization param
self.Lambda2 = Lambda2 #smoothing reg param
self.k = k # number of latent factor dimension (low dimensional repr)
self.E = E #number of experience levels
self.mode = mode
self.final_param = self.init_theta() #current final_parameters
self.init_exp()
def init_theta(self):
''' Initializes the parameters of E recommender models
flat_theta = <alpha_G, Bu_G, Bi_G, alpha_e1..E, Bu_e1..E, Bi_e1..E, Gu_e1...E, Gi_e1...E>
'''
flat_theta = []
rating_arr = [review["review/score"] for review in self.train_data]
avg_rating = np.mean(rating_arr)
self.alpha_G = avg_rating #global offset
self.Bu_G = dict() #user bias (global)
self.Bi_G = dict() #item bias (global)
for i in range(len(self.user_map)):
self.Bu_G[self.user_map[i]] = np.random.random(1).item()
for i in range(len(self.item_map)):
self.Bi_G[self.item_map[i]] = np.random.random(1).item()
flat_theta.append(self.alpha_G)
flat_theta.extend(list(self.Bu_G.values()))
flat_theta.extend(list(self.Bi_G.values()))
self.alpha = np.random.rand(self.E) #individual offset parameters per exp
self.Bu = [dict() for i in range(self.E)] #user bias per exp
self.Bi = [dict() for i in range(self.E)] #item bias per exp
self.Gu = [dict() for i in range(self.E)] #user latent factor vector repr per exp
self.Gi = [dict() for i in range(self.E)] #item latent factor vector repr per exp
flat_theta.extend(self.alpha)
for e in range(self.E):
for i in range(len(self.user_map)):
self.Bu[e][self.user_map[i]] = np.random.random(1).item()
flat_theta.append(self.Bu[e][self.user_map[i]])
for e in range(self.E):
for j in range(len(self.item_map)):
self.Bi[e][self.item_map[j]] = np.random.random(1).item()
flat_theta.append(self.Bi[e][self.item_map[j]])
for e in range(self.E):
for i in range(len(self.user_map)):
self.Gu[e][self.user_map[i]] = np.random.uniform(0,1,(1,self.k))
flat_theta.extend(np.array(list(self.Gu[e][self.user_map[i]])).flatten())
for e in range(self.E):
for j in range(len(self.item_map)):
self.Gi[e][self.item_map[j]] = np.random.uniform(0,1,(1,self.k))
flat_theta.extend(np.array(list(self.Gi[e][self.item_map[j]])).flatten())
self.recparam = (1 + len(self.user_set) + len(self.item_set) \
+ self.k*(len(self.user_set) + len(self.item_set))) #per experience level parameters
self.globalparam = 1 + len(self.user_set) + len(self.item_set) #global parameters
self.totalparams = self.recparam * self.E + self.globalparam
return np.array(flat_theta)
def init_exp(self):
''' Initializes experience for each user-item combination uniformly over rating time'''
self.eui = dict() #experience dictionary of dictionaries (1: user level, 2: item level)
if self.mode == 1 or self.mode==2: # Community
num_items_in_level = round(len(self.train_data)/self.E)
if num_items_in_level ==0:
print("Something went wrong")
else:
cur_level =0
for i in range(len(self.train_data)):
review = self.train_data[i]
user = review["review/userId"]
item = review["product/productId"]
if user not in self.eui:
self.eui[user]= {}
if i!=0 and i% num_items_in_level == 0 and cur_level != self.E-1:
cur_level +=1
self.eui[user][item] = cur_level
else: #user level
for user in self.user_set:
self.eui[user] = {}
num_items_in_level = round(len(self.Iu[user])/self.E)
if num_items_in_level ==0:
print("Something went wrong.", len(self.Iu[user]),user)
cur_level = 0
for i in range(len(self.Iu[user])):
if i!=0 and i% num_items_in_level == 0 and cur_level != self.E-1:
cur_level+=1
item = self.Iu[user][i]
self.eui[user][item] = cur_level
print("Experience assignment done")
def OPT_rec(self,i,j,n,user):
'''
i = current experience level, j = jth rating of user, n = number of ratings given by user
internally modelled as experience 0 to E-1 (so experience E is invalid)
'''
if i==self.E or j==n:
return np.inf
elif self.OPT[i,j] >=0: #intial value = -1
return self.OPT[i,j]
else:
item = self.Iu[user][j] # jth rating
rating = self.quickmap[user+"-"+item]["review/score"]
temp = min(self.OPT_rec(i+1,j+1,n,user), self.OPT_rec(i,j+1,n,user))
rec_e = self.pred_e(user,item,i) #current level rating
if temp == np.inf:
self.OPT[i,j] = (rec_e - rating)**2
else:
self.OPT[i,j] = (rec_e - rating)**2 + temp
return self.OPT[i,j]
def assign_exp_level_iterative(self,user):
''' Iterative solution for assigning experience to each user'''
n = len(self.Iu[user])
#compute the last column values i.e. the last item's values for each exp level
item = self.Iu[user][n-1]
for i in range(self.E):
rating = self.quickmap[user+"-"+item]["review/score"]
rec_e = self.pred_e(user,item,i) #current level rating
self.OPT[i,n-1] = (rec_e - rating)**2
# now compute the upper most level row values (i.e. all items's highest exp level)
for j in range(n-2,-1,-1):
item = self.Iu[user][j]
rating = self.quickmap[user+"-"+item]["review/score"]
rec_e = self.pred_e(user,item,self.E-1) #current level rating
self.OPT[self.E-1,j] = (rec_e - rating)**2 + self.OPT[self.E-1,j+1]
#now update every other value in the matrix
for j in range(n-2,-1,-1):
item = self.Iu[user][j]
for i in range(self.E-2,-1,-1):
rating = self.quickmap[user+"-"+item]["review/score"]
rec_e = self.pred_e(user,item,i) #current level rating
temp = min(self.OPT[i+1,j+1],self.OPT[i,j+1])
self.OPT[i,j] = (rec_e - rating)**2 + temp
def assign_exp_level(self):
''' Using the DP solution similar to Longest Common SubSequence, predict new experience level
for each user-item combination'''
k = 0
count=0
for user in self.Iu:
n = len(self.Iu[user])
self.OPT = np.matrix([[-1.0]*n]*self.E) #initialize to invalid values
#recursive solution
#for i in range(self.E):
# self.OPT_rec(i,0,n,user)
#Iterative solution:
self.assign_exp_level_iterative(user)
cur_level = np.argmin(self.OPT[:,0])
j = 0
item = self.Iu[user][j]
self.eui[user][item] = cur_level
start_level = cur_level
j+=1
while (j < n):
try:
if cur_level != self.E-1 and self.OPT[cur_level,j] >= self.OPT[cur_level+1,j]:
cur_level +=1
item = self.Iu[user][j]
if cur_level != self.eui[user][item]:
count+=1
self.eui[user][item] = cur_level
j+=1
except Exception as e:
print(e.args,i,j,n)
if k%1000==0:
print("user: {} start level: {} end level: {}".format(user,start_level,cur_level))
k+=1
print("Number of experience levels changed:{}".format(count))
return count
def assign_exp_community(self):
print("Changing community experience levels")
n = self.Ntrain
self.OPT = np.matrix([[-1.0]*n]*self.E)
count = 0
#first get the last column values (i.e. the last review of the community)
review = self.train_data[-1]
user = review["review/userId"]
item = review["product/productId"]
rating = review["review/score"]
for i in range(self.E):
rec_e = self.pred_e(user,item,i) #current level rating
self.OPT[i,n-1] = (rec_e - rating)**2
# now compute the upper most level row values (i.e. all items's highest exp level)
for j in range(n-2,-1,-1):
review = self.train_data[j]
user = review["review/userId"]
item = review["product/productId"]
rating = review["review/score"]
rec_e = self.pred_e(user,item,self.E-1) #current level rating
self.OPT[self.E-1,j] = (rec_e - rating)**2 + self.OPT[self.E-1,j+1]
#now update every other value in the matrix
for j in range(n-2,-1,-1):
review = self.train_data[j]
user = review["review/userId"]
item = review["product/productId"]
for i in range(self.E-2,-1,-1):
rating = review["review/score"]
rec_e = self.pred_e(user,item,i) #current level rating
temp = min(self.OPT[i+1,j+1],self.OPT[i,j+1])
self.OPT[i,j] = (rec_e - rating)**2 + temp
cur_level = np.argmin(self.OPT[:,0])
j = 0
review = self.train_data[j]
user = review["review/userId"]
item = review["product/productId"]
self.eui[user][item] = cur_level
start_level = cur_level
j+=1
while (j < n):
try:
if cur_level != self.E-1 and self.OPT[cur_level,j] >= self.OPT[cur_level+1,j]:
cur_level +=1
review = self.train_data[j]
user = review["review/userId"]
item = review["product/productId"]
if cur_level != self.eui[user][item]:
count+=1
self.eui[user][item] = cur_level
j+=1
except Exception as e:
print(e.args,i,j,n)
if j%100000 ==0:
print(user,item, self.eui[user][item])
return count
def retrieve_theta_components(self,theta):
''' Sets all parameters from the long theta vector obtained after update rule'''
j = 0
umap_len = len(self.user_map)
imap_len = len(self.item_map)
self.alpha_G = theta[j]
j+=1
for i in range(umap_len):
self.Bu_G[self.user_map[i]] = theta[j]
j+=1
for i in range(imap_len):
self.Bi_G[self.item_map[i]] = theta[j]
j+=1
for e in range(self.E):
self.alpha[e] = theta[j]
j+=1
for e in range(self.E):
for i in range(umap_len):
self.Bu[e][self.user_map[i]] = theta[j]
j+=1
for e in range(self.E):
for i in range(imap_len):
self.Bi[e][self.item_map[i]] = theta[j]
j+=1
for e in range(self.E):
for i in range(umap_len):
self.Gu[e][self.user_map[i]] = np.array(theta[j:j+self.k])
j+=self.k
for e in range(self.E):
for i in range(imap_len):
self.Gi[e][self.item_map[i]] = np.array(theta[j:j+self.k])
j+=self.k
if j!= len(theta):
print("Something went wrong. Not all theta values were used")
def pred_e(self,user,item,e):
return self.alpha_G + self.Bu_G[user] + self.Bi_G[item] +\
self.alpha[e] + self.Bu[e][user] + self.Bi[e][item] +\
np.asscalar(np.dot(self.Gu[e][user], self.Gi[e][item].T))
def f(self,theta):
'''Calculates the value of the objective function (loss) on the training data. '''
self.retrieve_theta_components(theta)
#mean squared error
error = 0
for review in self.train_data:
user = review['review/userId']
item = review["product/productId"]
e = self.eui[user][item]
error += (self.pred_e(user,item,e) - review["review/score"])**2
error /= self.Ntrain
#regularization terms
reg_complexity = 0
#ignore global values for now in regularization
Bu_np = np.array(list(self.Bu_G.values()))
Bi_np = np.array(list(self.Bi_G.values()))
reg_complexity = np.sum(np.square(Bu_np)) + np.sum(np.square(Bi_np))
for e in range(self.E):
reg_complexity += np.square(self.alpha[e])
Bu_np = np.array(list(self.Bu[e].values()))
Bi_np = np.array(list(self.Bi[e].values()))
reg_complexity += np.sum(np.square(Bu_np)) + np.sum(np.square(Bi_np))
for user in self.Gu[e]:
reg_complexity += np.linalg.norm(self.Gu[e][user])**2
for item in self.Gi[e]:
reg_complexity += np.linalg.norm(self.Gi[e][item])**2
#regularization (smoothing cost)
reg_term = 0
umap_len = len(self.user_map)
imap_len = len(self.item_map)
for e in range(1,self.E):
reg_term += (self.alpha[e-1] - self.alpha[e])**2
for e in range(1,self.E):
for i in range(umap_len):
reg_term += (self.Bu[e-1][self.user_map[i]] - self.Bu[e][self.user_map[i]])**2
for e in range(1,self.E):
for i in range(imap_len):
reg_term += (self.Bi[e-1][self.item_map[i]] - self.Bi[e][self.item_map[i]])**2
for e in range(1,self.E):
for i in range(umap_len):
reg_term += np.linalg.norm(self.Gu[e-1][self.user_map[i]] - self.Gu[e][self.user_map[i]])**2
for e in range(1,self.E):
for i in range(imap_len):
reg_term += np.linalg.norm(self.Gi[e-1][self.item_map[i]] - self.Gi[e][self.item_map[i]])**2
return (error + self.Lambda1* reg_complexity + self.Lambda2 * reg_term)*0.5
def fprime_one_func(self,theta):
''' does all gradient work in one function. Should be definitely faster'''
self.retrieve_theta_components(theta)
flat_gradient = []
umap_len = len(self.user_map)
imap_len = len(self.item_map)
self.alpha_G_grad = 0
self.Bu_G_grad = dict()
self.Bi_G_grad = dict()
for i in range(len(self.user_map)):
self.Bu_G_grad[self.user_map[i]] = 0.0
for i in range(len(self.item_map)):
self.Bi_G_grad[self.item_map[i]] = 0.0
self.alpha_grad = np.zeros(self.E) #individual offset parameters per exp
self.Bu_grad = [dict() for i in range(self.E)] #user bias per exp
self.Bi_grad = [dict() for i in range(self.E)] #item bias per exp
self.Gu_grad = [dict() for i in range(self.E)] #user latent factor vector repr per exp
self.Gi_grad = [dict() for i in range(self.E)] #item latent factor vector repr per exp
for e in range(self.E):
for i in range(len(self.user_map)):
self.Bu_grad[e][self.user_map[i]] = 0.0
self.Gu_grad[e][self.user_map[i]] = np.zeros((1,self.k))
for j in range(len(self.item_map)):
self.Bi_grad[e][self.item_map[j]] = 0.0
self.Gi_grad[e][self.item_map[j]] = np.zeros((1,self.k))
for review in self.train_data:
user = review['review/userId']
item = review["product/productId"]
e = self.eui[user][item]
rat_diff = self.pred_e(user,item,e)- review["review/score"]
rat_diff/= self.Ntrain
self.alpha_G_grad += rat_diff
self.Bu_G_grad[user] += rat_diff
self.Bi_G_grad[item] += rat_diff
self.alpha_grad[e] += rat_diff
self.Bu_grad[e][user] += rat_diff
self.Bi_grad[e][item] += rat_diff
self.Gu_grad[e][user] += rat_diff * self.Gi[e][item]
self.Gi_grad[e][item] += rat_diff * self.Gu[e][user]
for i in range(len(self.user_map)):
user = self.user_map[i]
self.Bu_G_grad[user] += self.Lambda1 * self.Bu_G[user]
for j in range(len(self.item_map)):
item = self.item_map[j]
self.Bi_G_grad[item] += self.Lambda1 * self.Bi_G[item]
for e in range(self.E):
self.alpha_grad[e] += self.Lambda1*self.alpha[e]
if e == self.E-1:
self.alpha_grad[e] += self.Lambda2 * (self.alpha[e] - self.alpha[e-1])
elif e == 0:
self.alpha_grad[e] += self.Lambda2 * (self.alpha[e] - self.alpha[e+1])
else:
self.alpha_grad[e] += self.Lambda2 * (2*self.alpha[e] - self.alpha[e-1] - self.alpha[e+1])
for i in range(len(self.user_map)):
user = self.user_map[i]
self.Bu_grad[e][user] += self.Lambda1*self.Bu[e][user]
self.Gu_grad[e][user] += self.Lambda1*self.Gu[e][user]
if e == self.E-1:
self.Bu_grad[e][user] += self.Lambda2* (self.Bu[e][user] - self.Bu[e-1][user])
self.Gu_grad[e][user] += self.Lambda2* (self.Gu[e][user] - self.Gu[e-1][user])
elif e==0:
self.Bu_grad[e][user] += self.Lambda2* (self.Bu[e][user] - self.Bu[e+1][user])
self.Gu_grad[e][user] += self.Lambda2* (self.Gu[e][user] - self.Gu[e+1][user])
else:
self.Bu_grad[e][user] += self.Lambda2* (2*self.Bu[e][user] - self.Bu[e-1][user] \
- self.Bu[e+1][user])
self.Gu_grad[e][user] += self.Lambda2* (2*self.Gu[e][user] - self.Gu[e-1][user] \
- self.Gu[e+1][user])
for j in range(len(self.item_map)):
item = self.item_map[j]
self.Bi_grad[e][item] += self.Lambda1*self.Bi[e][item]
self.Gi_grad[e][item] += self.Lambda1*self.Gi[e][item]
if e == self.E-1:
self.Bi_grad[e][item] += self.Lambda2* (self.Bi[e][item] - self.Bi[e-1][item])
self.Gi_grad[e][item] += self.Lambda2* (self.Gi[e][item] - self.Gi[e-1][item])
elif e==0:
self.Bi_grad[e][item] += self.Lambda2* (self.Bi[e][item] - self.Bi[e+1][item])
self.Gi_grad[e][item] += self.Lambda2* (self.Gi[e][item] - self.Gi[e+1][item])
else:
self.Bi_grad[e][item] += self.Lambda2* (2*self.Bi[e][item] - self.Bi[e-1][item] \
- self.Bi[e+1][item])
self.Gi_grad[e][item] += self.Lambda2* (2*self.Gi[e][item] - self.Gi[e-1][item] \
- self.Gi[e+1][item])
#compute gradient wrt global parameters
flat_gradient.append(self.alpha_G_grad)
flat_gradient.extend(list(self.Bu_G_grad.values()))
flat_gradient.extend(list(self.Bi_G_grad.values()))
#compute gradient wrt experience parameters
flat_gradient.extend(self.alpha_grad)
for e in range(self.E):
flat_gradient.extend(list(self.Bu_grad[e].values()))
for e in range(self.E):
flat_gradient.extend(list(self.Bi_grad[e].values()))
for e in range(self.E):
flat_gradient.extend(np.array(list(self.Gu_grad[e].values())).flatten())
for e in range(self.E):
flat_gradient.extend(np.array(list(self.Gi_grad[e].values())).flatten())
return np.array(flat_gradient)
def call(self,theta):
print("{} Objective value: {}".format(self.i, self.f(theta)))
self.i+=1
def objectiveloss_lbfgs(self,thetaguess, grad_tolerance):
self.i =0;
flat_theta_guess = thetaguess
flat_theta,value,d = opt.fmin_l_bfgs_b(self.f,flat_theta_guess,self.fprime_one_func,\
disp=True,\
maxiter = 20, callback = self.call, iprint=0)
#set the final parameters to the final value returned by fmin_l_bfgs_b
return flat_theta
def push_model(self):
''' push the model towards more regularized place'''
e_alpha_avg = np.mean(self.alpha)
self.alpha_G += e_alpha_avg
self.alpha -= e_alpha_avg
for user in self.Bu_G:
e_Bu_avg = 0
for e in range(self.E):
e_Bu_avg += self.Bu[e][user]
e_Bu_avg /= self.E
self.Bu_G[user] += e_Bu_avg
for e in range(self.E):
self.Bu[e][user] -= e_Bu_avg
for item in self.Bi_G:
e_Bi_avg = 0
for e in range(self.E):
e_Bi_avg += self.Bi[e][item]
e_Bi_avg /= self.E
self.Bi_G[item] += e_Bi_avg
for e in range(self.E):
self.Bi[e][item] -= e_Bi_avg
def als (self,grad_tolerance):
''' bad name. not exactly ALS, but performs LBFGS gradient descent, and sets experience level'''
guess = self.final_param
for m in range(10):
print("Iteration {}:".format(m+1))
print("Objective function value: {}".format(self.f(guess)))
guess = self.objectiveloss_lbfgs(guess, grad_tolerance)
self.final_param = guess.copy()
self.retrieve_theta_components(guess)
print("Model alpha parameters(before push): ",[a + self.alpha_G for a in self.alpha])
self.push_model()
print("Model alpha parameters: ",[a + self.alpha_G for a in self.alpha])
if self.mode ==2: #community learned
count = self.assign_exp_community()
elif self.mode==4: #user learned
count = self.assign_exp_level()
print("Objective function value: {}".format(self.f(guess)))
if count==0:
print("Breaking")
return
def mse_test(self,test_data):
''' Uses Mean Squared Error as evaluation metric on test data provided by user'''
self.retrieve_theta_components(self.final_param)
error = 0
unknown_data_count =0;
for review in test_data:
user = review["review/userId"]
item = review["product/productId"]
#assign nearest experience to user-item combo
rtime = int(review["review/time"])
time_arr = []
for it in self.Iu[user]:
time_arr.append(int(self.quickmap[user+"-"+it]["review/time"]))
if all(time_arr[i] <= time_arr[i+1] for i in range(len(time_arr)-1))==False:
print("raising error. Something went wrong. List should be sorted by default")
index = np.searchsorted(time_arr,rtime)
if index == len(self.Iu[user]):
closest_it = self.Iu[user][index-1]
else:
closest_it = self.Iu[user][index]
e = self.eui[user][closest_it]
try:
error += (self.pred_e(user,item,e) - review["review/score"])**2
except Exception as e:
print(e)
unknown_data_count+=1
if unknown_data_count>0:
print("Warning! Unknown {} new data rows; Incorporating this into MSE".format(unknown_data_count))
return error / (len(test_data) - unknown_data_count)
def fprime(self, theta):
''' Calculates the gradient of objective function f()'''
self.retrieve_theta_components(theta)
flat_gradient = []
umap_len = len(self.user_map)
imap_len = len(self.item_map)
#compute gradient wrt global parameters
flat_gradient.append(self.compute_gradient_wrt_alpha_global())
Bu_grad = self.compute_gradient_wrt_Bu_global()
Bi_grad = self.compute_gradient_wrt_Bi_global()
flat_gradient.extend(list(Bu_grad.values()))
flat_gradient.extend(list(Bi_grad.values()))
#compute gradient wrt experience parameters
for e in range(self.E):
flat_gradient.append(self.compute_gradient_wrt_alpha(e))
for e in range(self.E):
Bu_grad = self.compute_gradient_wrt_Bu(e)
flat_gradient.extend(list(Bu_grad.values()))
for e in range(self.E):
Bi_grad = self.compute_gradient_wrt_Bi(e)
flat_gradient.extend(list(Bi_grad.values()))
for e in range(self.E):
Gu_grad = self.compute_gradient_wrt_Gu(e)
flat_gradient.extend(np.array(list(Gu_grad.values())).flatten())
for e in range(self.E):
Gi_grad = self.compute_gradient_wrt_Gi(e)
flat_gradient.extend(np.array(list(Gi_grad.values())).flatten())
return np.array(flat_gradient)
def vanilla_gd(self,eta,guess):
self.i =0;
flat_theta_guess = guess
for i in range(100):
flat_gradient = self.fprime(flat_theta_guess)
flat_theta_guess -= eta*flat_gradient
if i%50 ==0: print("{} U : Objective value: {}".format(i,self.f(flat_theta_guess)))
self.i +=1
return flat_theta_guess
def vanilla_als (self,eta):
guess = self.init_theta()
#print("param = ",guess[self.recparam])
for m in range(100):
guess = self.vanilla_gd(eta, guess)
self.final_param = guess
self.retrieve_theta_components(guess)
count = self.assign_exp_level()
if count==0:
print("Breaking")
return
def compute_gradient_wrt_alpha_global(self):
tempsum = 0
for review in self.train_data: #each user item id combo
user = review['review/userId']
item = review["product/productId"]
e = self.eui[user][item]
tempsum += ( self.pred_e(user,item,e)- review["review/score"])
tempsum /= self.Ntrain
return tempsum
def compute_gradient_wrt_Bu_global(self):
Bu_grad = {}
for user in self.Bu_G:
total = 0.0
for item in self.Iu[user]:
e = self.eui[user][item]
total += ( self.pred_e(user,item,e) - self.quickmap[user+'-'+item]["review/score"])
total /= self.Ntrain
total += self.Lambda1*self.Bu_G[user]
Bu_grad[user] = total
return Bu_grad
def compute_gradient_wrt_Bi_global(self):
Bi_grad = {}
for item in self.Bi_G:
total = 0.0
for user in self.Ui[item]:
e = self.eui[user][item]
total += ( self.pred_e(user,item,e) - self.quickmap[user+'-'+item]["review/score"])
total /= self.Ntrain
total += self.Lambda1*self.Bi_G[item]
Bi_grad[item] = total
return Bi_grad
def compute_gradient_wrt_alpha(self,exp):
''' Compute gradient of objective with respect to alpha parameter of given experience exp level'''
tempsum = 0
for review in self.train_data: #each user item id combo
user = review['review/userId']
item = review["product/productId"]
e = self.eui[user][item]
if e == exp: #only take the values pertaining the current level
tempsum += ( self.pred_e(user,item,e) - review["review/score"])
tempsum /= self.Ntrain
#regularization term
tempsum += self.Lambda1*self.alpha[exp]
if exp == self.E-1:
tempsum += self.Lambda2 * (self.alpha[exp] - self.alpha[exp-1])
elif exp == 0:
tempsum += self.Lambda2 * (self.alpha[exp] - self.alpha[exp+1])
else:
tempsum += self.Lambda2 * (2*self.alpha[exp] - self.alpha[exp-1] - self.alpha[exp+1])
return tempsum
def compute_gradient_wrt_Bu(self,e):
''' Compute gradient of objective with respect to Bu parameter'''
Bu_grad = {}
for user in self.Bu[e]:
total = 0.0
for item in self.Iu[user]:
if self.eui[user][item] == e:
total += ( self.pred_e(user,item,e) - self.quickmap[user+'-'+item]["review/score"])
total /= self.Ntrain
total += self.Lambda1*self.Bu[e][user]
if e == self.E-1:
total += self.Lambda2* (self.Bu[e][user] - self.Bu[e-1][user])
elif e==0:
total += self.Lambda2* (self.Bu[e][user] - self.Bu[e+1][user])
else:
total += self.Lambda2* (2*self.Bu[e][user] - self.Bu[e-1][user] - self.Bu[e+1][user])
Bu_grad[user] = total
return Bu_grad
def compute_gradient_wrt_Bi(self,e):
''' Compute gradient of objective with respect to Bi parameter'''
Bi_grad = {}
for item in self.Bi[e]:
total = 0.0
for user in self.Ui[item]:
if self.eui[user][item] == e:
total += (self.pred_e(user,item,e) - self.quickmap[user+'-'+item]["review/score"])
total /= self.Ntrain
total += self.Lambda1*self.Bi[e][item]
if e == self.E-1:
total += self.Lambda2* (self.Bi[e][item] - self.Bi[e-1][item])
elif e==0:
total += self.Lambda2* (self.Bi[e][item] - self.Bi[e+1][item])
else:
total += self.Lambda2* (2*self.Bi[e][item] - self.Bi[e-1][item] - self.Bi[e+1][item])
Bi_grad[item] = total
return Bi_grad
def compute_gradient_wrt_Gu(self,e):
''' Compute gradient of objective with respect to Gu parameter'''
Gu_grad = {}
for user in self.Gu[e]:
total = np.zeros((1,self.k))
for item in self.Iu[user]:
if self.eui[user][item] == e:
total+= np.multiply((self.pred_e(user,item,e) - self.quickmap[user+'-'+item]["review/score"]),\
self.Gi[e][item])
total /= self.Ntrain
total += self.Lambda1*self.Gu[e][user]
if e == self.E-1:
total += self.Lambda2* (self.Gu[e][user] - self.Gu[e-1][user])
elif e==0:
total += self.Lambda2* (self.Gu[e][user] - self.Gu[e+1][user])
else:
total += self.Lambda2* (2*self.Gu[e][user] - self.Gu[e-1][user] - self.Gu[e+1][user])
Gu_grad[user] = total.copy()
return Gu_grad
def compute_gradient_wrt_Gi(self,e):
''' Compute gradient of objective with respect to Gi parameter'''
Gi_grad = {}
for item in self.Gi[e]:
total = np.zeros((1,self.k))
for user in self.Ui[item]:
if self.eui[user][item] == e:
total+= np.multiply((self.pred_e(user,item,e) - self.quickmap[user+'-'+item]["review/score"]),\
self.Gu[e][user])
total /= self.Ntrain
total += self.Lambda1*self.Gi[e][item]
if e == self.E-1:
total += self.Lambda2* (self.Gi[e][item] - self.Gi[e-1][item])
elif e==0:
total += self.Lambda2* (self.Gi[e][item] - self.Gi[e][item])
else:
total += self.Lambda2* (2*self.Gi[e][item] - self.Gi[e-1][item] - self.Gi[e+1][item])
Gi_grad[item] = total.copy()
return Gi_grad
def deepish_copy(self,org):
'''much, much faster than deepcopy, for a dict of the simple python types.'''
out = dict().fromkeys(org)
for k,v in org.items():
try:
out[k] = v.copy() # dicts, sets
except AttributeError:
try:
out[k] = v[:] # lists, tuples, strings, unicode
except TypeError:
out[k] = v # ints
return out
lfmObj1 = ExpertiseLFM(train,Iu, Ui,quickmap,distinct_user_set,distinct_item_set,user_map_int, item_map_int,5,0.001,0.001,5,4)
| ExpertiseModel_more_param_with_push.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#Loading essential libraries
import os
import pandas as pd
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
#Importing the dataset
os.getcwd()
os.chdir('/Users/anviagarwal/downloads')
bnb = pd.read_csv('airbnb_prices.csv')
bnb
#Retrieving relevant columns
bnb=bnb[['log_price','accommodates','bathrooms','cleaning_fee','host_has_profile_pic','host_identity_verified','host_response_rate',
'instant_bookable','number_of_reviews','review_scores_rating','bedrooms','beds']]
bnb.dropna(inplace=True)
# +
#Changing datatype
#Bool to Integer
bnb['cleaning_fee'] = bnb['cleaning_fee'].astype('int')
#Object to Integer
d = {'t': True, 'f': False}
bnb['host_has_profile_pic']=bnb['host_has_profile_pic'].map(d).astype('bool').astype('int')
bnb['host_identity_verified']=bnb['host_identity_verified'].map(d).astype('bool').astype('int')
bnb['instant_bookable']=bnb['instant_bookable'].map(d).astype('bool').astype('int')
#Object to Integer - removing the '%' symbol
bnb['host_response_rate']=bnb['host_response_rate'].str.rstrip('%').astype('int')
# -
#Converting DV & IVs into an array
y=bnb['log_price'].values
x=bnb[['accommodates','bathrooms','cleaning_fee','host_has_profile_pic','host_identity_verified','host_response_rate',
'instant_bookable','number_of_reviews','review_scores_rating','bedrooms','beds']].values
#Reshaping DV
y=y.reshape(-1,1)
y
#Regression Model
reg=LinearRegression()
reg.fit(x,y)
#R-squared score
reg.score(x,y)
#RMSE
y_pred=reg.predict(x)
np.sqrt(mean_squared_error(y,y_pred))
| Baseline ML Model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (Data Science)
# language: python
# name: python3__SAGEMAKER_INTERNAL__arn:aws:sagemaker:us-west-2:236514542706:image/datascience-1.0
# ---
# # Sentiment Analysis with TensorFlow 2
#
# Amazon SageMaker provides both (1) built-in algorithms and (2) an easy path to train your own custom models. Although the built-in algorithms cover many domains (computer vision, natural language processing etc.) and are easy to use (just provide your data), sometimes training a custom model is the preferred approach. This notebook will focus on training a custom model using TensorFlow 2.
#
# Sentiment analysis is a very common text analytics task that determines whether a text sample is positive or negative about its subject. There are several different algorithms for performing this task, including older statistical algorithms and newer deep learning algorithms. With respect to deep learning, a 1D Convolutional Neural Net (CNN) is sometimes used for this purpose. In this notebook we'll use a CNN built with TensorFlow 2 to perform sentiment analysis in Amazon SageMaker on the IMDb dataset, which consists of movie reviews labeled as having positive or negative sentiment. Several aspects of Amazon SageMaker will be demonstrated:
#
# - How to use a SageMaker prebuilt TensorFlow 2 container with a custom model training script similar to one you would use outside SageMaker. This feature is known as Script Mode.
# - Hosted training: for full scale training on a complete dataset on a separate, larger and more powerful SageMaker-managed GPU instance.
# - Distributed training: using multiple GPUs to speed up training.
# - Batch Transform: for offline, asynchronous predictions on large batches of data.
# - Instance type choices: many different kinds of CPU and GPU instances are available in SageMaker, and are applicable to different use cases.
#
# ### ***Prerequisites***
#
# In SageMaker Studio, for kernel select **Python 3 (Data Science)**; for a SageMaker Notebook Instance, select the kernel **conda_python3**.
#
# # Prepare the dataset
#
# We'll begin by importing some necessary libraries.
# +
# %matplotlib inline
import logging
logging.getLogger("tensorflow").setLevel(logging.ERROR)
import numpy as np
import os
import sys
# !{sys.executable} -m pip install tensorflow --quiet
# -
# Now we'll load the reviews dataset, and pad the reviews so all reviews have the same length. Each review is represented as an array of numbers, where each number represents an indexed word. We'll also pad shorter reviews to match a maximum specified length.
# +
from tensorflow.keras.preprocessing import sequence
from tensorflow.python.keras.datasets import imdb
max_features = 20000
maxlen = 400
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
print(len(x_train), 'train sequences')
print(len(x_test), 'test sequences')
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)
# -
# Next, we'll save the padded data to files, locally for now, and later to Amazon S3.
# +
data_dir = os.path.join(os.getcwd(), 'data')
os.makedirs(data_dir, exist_ok=True)
train_dir = os.path.join(os.getcwd(), 'data/train')
os.makedirs(train_dir, exist_ok=True)
test_dir = os.path.join(os.getcwd(), 'data/test')
os.makedirs(test_dir, exist_ok=True)
csv_test_dir = os.path.join(os.getcwd(), 'data/csv-test')
os.makedirs(csv_test_dir, exist_ok=True)
np.save(os.path.join(train_dir, 'x_train.npy'), x_train)
np.save(os.path.join(train_dir, 'y_train.npy'), y_train)
np.save(os.path.join(test_dir, 'x_test.npy'), x_test)
np.save(os.path.join(test_dir, 'y_test.npy'), y_test)
np.savetxt(os.path.join(csv_test_dir, 'csv-test.csv'), np.array(x_test[:100], dtype=np.int32), fmt='%d', delimiter=",")
# -
# # SageMaker Training
#
# With our dataset prepared, we're now ready to set up a SageMaker hosted training job. The core concept of SageMaker hosted training is to use more powerful compute resources separate from the less powerful, lower cost notebook instance that you use for prototyping. Hosted training spins up one or more instances (i.e. a cluster) for training, and then tears the cluster down when training is complete, with billing per second for cluster up time. In general, hosted training is preferred for doing actual large-scale training on more powerful instances, especially for distributed training on a single large instance with multiple GPUs, or multiple instances each having multiple GPUs.
#
# ### Git Configuration
#
# To begin, we need a training script that can be used to train the model in Amazon SageMaker. In this example, we'll use Git integration. That is, you can specify a training script that is stored in a GitHub, AWS CodeCommit or another Git repository as the entry point so that you don't have to download the scripts locally. For this purpose, the source directory and dependencies should be in the same repository.
#
# To use Git integration, pass a dict `git_config` as a parameter when you create an Amazon SageMaker Estimator object. In the `git_config` parameter, you specify the fields `repo`, `branch` and `commit` to locate the specific repo you want to use. If you do not specify `commit` in `git_config`, the latest commit of the specified repo and branch will be used by default. Also, if authentication is required to access the repo, you can specify fields `2FA_enabled`, `username`, `password` and `token` accordingly.
#
# The script that we will use in this example is stored a public GitHub repo so we don't need authentication to access it. Let's specify the `git_config` argument here:
git_config = {'repo': 'https://github.com/aws-samples/amazon-sagemaker-script-mode',
'branch': 'master'}
# ### Upload data to S3
#
# Before starting hosted training, the data must be present in storage that can be accessed by SageMaker. The storage options are: Amazon S3 (object storage service), Amazon EFS (elastic NFS file system service), and Amazon FSx for Lustre (high-performance file system service). For this example, we'll upload the data to S3.
# +
import sagemaker
s3_prefix = 'tf-keras-sentiment'
traindata_s3_prefix = '{}/data/train'.format(s3_prefix)
testdata_s3_prefix = '{}/data/test'.format(s3_prefix)
train_s3 = sagemaker.Session().upload_data(path='./data/train/', key_prefix=traindata_s3_prefix)
test_s3 = sagemaker.Session().upload_data(path='./data/test/', key_prefix=testdata_s3_prefix)
inputs = {'train':train_s3, 'test': test_s3}
print(inputs)
# -
# ### Estimator setup
#
# With the training data now in S3, we're ready to set up an Estimator object for hosted training. Most of the Estimator parameters are self-explantory; further discussion of the instance type selection is below. The parameters most likely to change between different training jobs, the algorithm hyperparameters, are passed in as a dictionary.
# +
from sagemaker.tensorflow import TensorFlow
model_dir = '/opt/ml/model'
train_instance_type = 'ml.p3.8xlarge'
hyperparameters = {'epochs': 10, 'batch_size': 256, 'learning_rate': 0.01}
estimator = TensorFlow(
git_config=git_config,
source_dir='tf-sentiment-script-mode',
entry_point='sentiment.py',
model_dir=model_dir,
instance_type=train_instance_type,
instance_count=1,
hyperparameters=hyperparameters,
role=sagemaker.get_execution_role(),
base_job_name='tf-sentiment',
framework_version='2.1',
py_version='py3',
script_mode=True)
# -
# ### Distributed training on a single multi-GPU instance
#
# The SageMaker instance type selected above, p3.8xlarge, contains four GPUs based on NVIDIA's V100 Tensor Core architecture. This presents an opportunity to do distributed training within a single multi-GPU instance, utilizing all four GPUs to reduce total training time compared to using a single GPU. Although using multiple instances also is a possibility, using a single multi-GPU instance may be more performant because it avoids extra network traffic necessary to coordinate multiple instances. For larger datasets and more complex models, using multiple instances may be a necessity, however, that is not the case here.
#
# To utilize all four GPUs on the instance, you don't need to do anything special in Amazon SageMaker: TensorFlow 2 itself will handle the details under the hood. TensorFlow 2 includes several native distribution strategies, including MirroredStrategy, which is well-suited for training a model using multiple GPUs on a single instance. To enable MirroredStrategy, we simply add the following lines of code in the training script before defining and compiling the model (this has already been done for this example):
#
# ```python
# def get_model(learning_rate):
#
# mirrored_strategy = tf.distribute.MirroredStrategy()
#
# with mirrored_strategy.scope():
# embedding_layer = tf.keras.layers.Embedding(max_features,
# embedding_dims,
# input_length=maxlen)
# ....
# model.compile(loss='binary_crossentropy',
# optimizer=optimizer,
# metrics=['accuracy'])
#
# return model
# ```
#
# Additionally, the batch size is increased in the Estimator hyperparameters to account for the fact that batches are divided among multiple GPUs. If you are interested in reviewing the rest of the training code, it is at the GitHub repository referenced above in the `git_config` variable.
# ### Start the hosted training job
#
# We simply call `fit` to start the actual hosted training. The training job should take around 5 minutes, including the time needed to spin up the training instance. At the end of hosted training, you'll see from the logs below the code cell that validation accuracy is approaching 90%, and the number of billable seconds (which should be in the neighborhood of 180).
estimator.fit(inputs)
# The validation accuracy appears to have plateaued, so the model might be overfitting: it might be less able to generalize to data it has not yet seen. This is the case even though we are employing dropout as a regularization technique to reduce the possibility of overfitting. (See the training script at the GitHub repository referenced above.) For a production model, further experimentation would be necessary.
#
# TensorFlow 2's tf.keras API provides a convenient way to capture the history of model training. When the model was saved after training, the history was saved alongside it. To retrieve the history, we first download the trained model from the S3 bucket where SageMaker stored it. Models trained by SageMaker are always accessible in this way to be run anywhere. Next, we can unzip it to gain access to the history data structure, and then simply load the history as JSON:
# +
import json
# !aws s3 cp {estimator.model_data} ./model/model.tar.gz
# !tar -xzf ./model/model.tar.gz -C ./model
with open('./model/history.p', "r") as f:
history = json.load(f)
# -
# Now we can plot the training curves based on the history, with separate graphs for model accuracy and model loss. We can see that training converged relatively smoothly to higher model accuracy and correspondingly lower model loss.
# +
import matplotlib.pyplot as plt
def plot_training_curves(history):
fig, axes = plt.subplots(1, 2, figsize=(12, 4), sharex=True)
ax = axes[0]
ax.plot(history['accuracy'], label='train')
ax.set(title='model accuracy', ylabel='accuracy', xlabel='epoch')
ax.legend()
ax = axes[1]
ax.plot(history['loss'], label='train')
ax.set(title='model loss', ylabel='loss', xlabel='epoch')
ax.legend()
fig.tight_layout()
plot_training_curves(history)
# -
# # Batch Prediction
#
#
# If our use case requires individual predictions in near real-time, SageMaker hosted endpoints can be created. Hosted endpoints also can be used for pseudo-batch prediction, but the process is more involved than simply using SageMaker's Batch Transform feature, which is designed for large-scale, asynchronous batch inference.
#
# To use Batch Transform, we first upload to S3 some input test data to be transformed. The data can be in any format accepted by your model; in this case, it is CSV.
csvtestdata_s3_prefix = '{}/data/csv-test'.format(s3_prefix)
csvtest_s3 = sagemaker.Session().upload_data(path='./data/csv-test/', key_prefix=csvtestdata_s3_prefix)
print(csvtest_s3)
# A Transformer object must be set up to describe the Batch Transform job, including the amount and type of inference hardware to be used. Then the actual transform job itself is started with a call to the `transform` method of the Transformer. When setting up Batch Transform jobs, hardware considerations are important, just as they are for training:
#
# - `instance_count`: Batch Transform can spin up a cluster of multiple instances; at the end of the job, the cluster is torn down automatically. Since this dataset is small, we'll just use one instance.
# - `instance_type`: When doing inference for smaller models, such as this one, often CPU-based instance types can be used instead of GPU instance types, allowing significant cost savings. Note, however, that the choice of specific CPU instance type can significantly affect inference speed: although we could use a general purpose instance here such as a m5.xlarge, if we use a compute-optimized c5.xlarge instance, the total batch inference time is cut in half.
# +
transformer = estimator.transformer(instance_count=1,
instance_type='ml.c5.xlarge')
transformer.transform(csvtest_s3, content_type='text/csv')
print('Waiting for transform job: ' + transformer.latest_transform_job.job_name)
transformer.wait()
# -
# We can now download the batch predictions from S3 to the local filesystem on the notebook instance; the predictions are contained in a file with a .out extension, and are embedded in JSON. Next we'll load the JSON and examine the predictions, which are confidence scores from 0.0 to 1.0 where numbers close to 1.0 indicate positive sentiment, while numbers close to 0.0 indicate negative sentiment.
# +
import json
batch_output = transformer.output_path
# !mkdir -p batch_data/output
# !aws s3 cp --recursive $batch_output/ batch_data/output/
with open('batch_data/output/csv-test.csv.out', 'r') as f:
jstr = json.load(f)
results = [float('%.3f'%(item)) for sublist in jstr['predictions'] for item in sublist]
print(results)
# -
# Now let's look at the text of some actual reviews to see the predictions in action. First, we have to convert the integers representing the words back to the words themselves by using a reversed dictionary. Next we can decode the reviews, taking into account that the first 3 indices were reserved for "padding", "start of sequence", and "unknown", and removing a string of unknown tokens from the start of the review.
# +
import re
regex = re.compile(r'^[\?\s]+')
word_index = imdb.get_word_index()
reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])
first_decoded_review = ' '.join([reverse_word_index.get(i - 3, '?') for i in x_test[0]])
regex.sub('', first_decoded_review)
# -
# Overall, this review looks fairly negative. Let's compare the actual label with the prediction:
# +
def get_sentiment(score):
return 'positive' if score > 0.5 else 'negative'
print('Labeled sentiment for this review is {}, predicted sentiment is {}'.format(get_sentiment(y_test[0]),
get_sentiment(results[0])))
# -
# Training deep learning models is a stochastic process, so your results may vary -- there is no guarantee that the predicted result will match the actual label. However, it is likely that the sentiment prediction agrees with the label for this review. Let's now examine another review:
second_decoded_review = ' '.join([reverse_word_index.get(i - 3, '?') for i in x_test[5]])
regex.sub('', second_decoded_review)
print('Labeled sentiment for this review is {}, predicted sentiment is {}'.format(get_sentiment(y_test[5]),
get_sentiment(results[5])))
# Again, it is likely (but not guaranteed) that the prediction agreed with the label for the test data. Note that there is no need to clean up any Batch Transform resources: after the transform job is complete, the cluster used to make inferences is torn down. Now that we've reviewed some sample predictions as a sanity check, this brief example is complete.
| notebooks/sentiment-analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.3 64-bit (''base'': conda)'
# name: python373jvsc74a57bd0ea6d1940014d46e3b1cd608dc01a4850dba11d686206642561c1002c1523c34d
# ---
# ## Prepare Data
import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import keras
from keras.models import Sequential
from keras.optimizers import Adam
from keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
# from imgaug import augmenters as iaa
import cv2
import pandas as pd
import ntpath
import random
datadir = 'sim_data'
columns = ['center', 'left', 'right', 'steering', 'throttle', 'reverse', 'speed']
data = pd.read_csv(os.path.join(datadir, 'driving_log.csv'), names = columns)
pd.set_option('display.max_colwidth', -1)
data.head()
def path_leaf(path):
head, tail = ntpath.split(path)
return tail
data['center'] = data['center'].apply(path_leaf)
data['left'] = data['left'].apply(path_leaf)
data['right'] = data['right'].apply(path_leaf)
data.head()
num_bins = 25
samples_per_bin = 400
hist, bins = np.histogram(data['steering'], num_bins)
center = (bins[:-1]+ bins[1:]) * 0.5
plt.bar(center, hist, width=0.05)
plt.plot((np.min(data['steering']), np.max(data['steering'])), (samples_per_bin, samples_per_bin))
print('total data:', len(data))
# +
remove_list = []
for j in range(num_bins):
list_ = []
for i in range(len(data['steering'])):
if data['steering'][i] >= bins[j] and data['steering'][i] <= bins[j+1]:
list_.append(i)
list_ = shuffle(list_)
list_ = list_[samples_per_bin:]
remove_list.extend(list_)
print('removed:', len(remove_list))
data.drop(data.index[remove_list], inplace=True)
print('remaining:', len(data))
# -
hist, _ = np.histogram(data['steering'], (num_bins))
plt.bar(center, hist, width=0.05)
plt.plot((np.min(data['steering']), np.max(data['steering'])), (samples_per_bin, samples_per_bin))
print(data.iloc[1])
# +
def load_img_steering(datadir, df):
image_path = []
steering = []
for i in range(len(data)):
indexed_data = data.iloc[i]
center, left, right = indexed_data[0], indexed_data[1], indexed_data[2]
image_path.append(os.path.join(datadir, center.strip()))
steering.append(float(indexed_data[3]))
# left image append
image_path.append(os.path.join(datadir,left.strip()))
steering.append(float(indexed_data[3])+0.15)
# right image append
image_path.append(os.path.join(datadir,right.strip()))
steering.append(float(indexed_data[3])-0.15)
image_paths = np.asarray(image_path)
steerings = np.asarray(steering)
return image_paths, steerings
image_paths, steerings = load_img_steering(datadir + '/IMG', data)
# -
image_paths, steerings
X_train, X_valid, y_train, y_valid = train_test_split(image_paths, steerings, test_size=0.2, random_state=6)
print('Training Samples: {}\nValid Samples: {}'.format(len(X_train), len(X_valid)))
fig, axes = plt.subplots(1, 2, figsize=(12, 4))
axes[0].hist(y_train, bins=num_bins, width=0.05, color='blue')
axes[0].set_title('Training set')
axes[1].hist(y_valid, bins=num_bins, width=0.05, color='red')
axes[1].set_title('Validation set')
# ## Preprocessing Data
def img_preprocess(img):
img = mpimg.imread(img)
img = img[60:135,:,:]
img = cv2.cvtColor(img, cv2.COLOR_RGB2YUV)
img = cv2.GaussianBlur(img, (3,3), 0)
img = cv2.resize(img, (200,66))
img = img/255
return img
# +
image = image_paths[100]
original_image = mpimg.imread(image)
preprocessed_image = img_preprocess(image)
fig, axs = plt.subplots(1,2, figsize=(15,10))
fig.tight_layout()
axs[0].imshow(original_image)
axs[0].set_title('Original image')
axs[1].imshow(preprocessed_image)
axs[1].set_title('Preprocessed image')
# -
X_train = np.array(list(map(img_preprocess, X_train)))
X_valid = np.array(list(map(img_preprocess, X_valid)))
plt.imshow(X_train[random.randint(0, len(X_train) - 1)])
plt.axis('off')
print(X_train.shape)
# ## NVIDIA Model
# +
def nvidia_model():
model = Sequential()
model.add(Conv2D(24, 5, 2, input_shape=(66, 200, 3), activation='elu'))
model.add(Conv2D(36, 5, 2, activation='elu'))
model.add(Conv2D(48, 5, 2, activation='elu'))
model.add(Conv2D(64, 3, activation='elu'))
model.add(Conv2D(64, 3, activation='elu'))
# model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(100, activation = 'elu'))
# model.add(Dropout(0.5))
model.add(Dense(50, activation = 'elu'))
# model.add(Dropout(0.5))
model.add(Dense(10, activation = 'elu'))
# model.add(Dropout(0.5))
model.add(Dense(1))
optimizer = Adam(lr=1e-3)
model.compile(loss='mse', optimizer=optimizer)
return model
model = nvidia_model()
print(model.summary())
# -
history = model.fit(X_train, y_train, epochs=30, validation_data=(X_valid, y_valid), batch_size=100, verbose=1, shuffle = 1)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.legend(['training', 'validation'])
plt.title('Loss')
plt.xlabel('Epoch')
model.save('model.h5')
| notebooks/behavioural-cloning-2021_04_25.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" papermill={"duration": 11.982842, "end_time": "2020-08-10T23:09:02.280254", "exception": false, "start_time": "2020-08-10T23:08:50.297412", "status": "completed"} tags=[]
# !pip install -q efficientnet --quiet
# + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" papermill={"duration": 9.034946, "end_time": "2020-08-10T23:09:11.334164", "exception": false, "start_time": "2020-08-10T23:09:02.299218", "status": "completed"} tags=[]
import numpy as np
import pandas as pd
import os
from tqdm.notebook import tqdm
import gc
import tensorflow as tf
from sklearn.model_selection import train_test_split
import pickle
from tensorflow.keras.callbacks import CSVLogger
import time
from efficientnet.tfkeras import center_crop_and_resize
from tensorflow_addons.losses import TripletSemiHardLoss
from tensorflow.keras.applications.imagenet_utils import preprocess_input
from kaggle_datasets import KaggleDatasets
# + papermill={"duration": 17.450187, "end_time": "2020-08-10T23:09:28.803584", "exception": false, "start_time": "2020-08-10T23:09:11.353397", "status": "completed"} tags=[]
from model_semantic import MODEL_INPUT, linear_warmup, build_model_extractor, timecallback, Score_call
# + papermill={"duration": 4.490499, "end_time": "2020-08-10T23:09:33.313531", "exception": false, "start_time": "2020-08-10T23:09:28.823032", "status": "completed"} tags=[]
try:
# TPU detection. No parameters necessary if TPU_NAME environment variable is
# set: this is always the case on Kaggle.
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
print('Running on TPU ', tpu.master())
except ValueError:
tpu = None
if tpu:
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
else:
# Default distribution strategy in Tensorflow. Works on CPU and single GPU.
strategy = tf.distribute.get_strategy()
print("REPLICAS: ", strategy.num_replicas_in_sync)
# + papermill={"duration": 0.030579, "end_time": "2020-08-10T23:09:33.364165", "exception": false, "start_time": "2020-08-10T23:09:33.333586", "status": "completed"} tags=[]
dataset_loader_path = '../input/dataset-loader-25/'
# + papermill={"duration": 0.426145, "end_time": "2020-08-10T23:09:33.810214", "exception": false, "start_time": "2020-08-10T23:09:33.384069", "status": "completed"} tags=[]
MODEL_TYPE = 'B5'
MODEL_ = 'efficientnet'
IMAGE_SIZE = MODEL_INPUT[MODEL_TYPE]
BATCH_SIZE = 256 * strategy.num_replicas_in_sync
EPOCHS = 1
NUMBER_OF_IMAGE = 16
CLASS_PER_BATCH = BATCH_SIZE // NUMBER_OF_IMAGE
EFF_NET_SHAPE = (IMAGE_SIZE, IMAGE_SIZE, 3)
# For tf.dataset
AUTO = tf.data.experimental.AUTOTUNE
# # Data access
GCS_DS_PATH = KaggleDatasets().get_gcs_path('landmark-retrieval-2020')
# + papermill={"duration": 2.268803, "end_time": "2020-08-10T23:09:36.099313", "exception": false, "start_time": "2020-08-10T23:09:33.830510", "status": "completed"} tags=[]
with open(dataset_loader_path + 'train_.pkl', 'rb') as f:
train_ = pickle.load(f)
train_['train_path'] = [GCS_DS_PATH + x[32:] for x in tqdm(train_.train_path)]
N_CLASS = train_.landmark_id.unique().shape[0]
IMAGE_PER_BATCH = N_CLASS * NUMBER_OF_IMAGE
STEPS_PER_EPOCH = N_CLASS // CLASS_PER_BATCH
# + papermill={"duration": 0.035983, "end_time": "2020-08-10T23:09:36.156675", "exception": false, "start_time": "2020-08-10T23:09:36.120692", "status": "completed"} tags=[]
def land_generator(dataset = train_, number_of_image = 4):
landmark_ids = dataset["landmark_id"].unique()
dict_landmark_to_images_mapping = dataset.groupby("landmark_id")["id"].apply(list).to_dict()
path_dict = dataset.set_index("id")["train_path"].to_dict()
indexes = landmark_ids.copy()
np.random.shuffle(indexes)
while True:
for land in indexes:
all_landmark_image = dict_landmark_to_images_mapping[land]
images_id = np.random.choice(all_landmark_image, number_of_image, replace = False)
for images in images_id:
image_path = path_dict[images]
yield image_path, land
# + papermill={"duration": 8.152973, "end_time": "2020-08-10T23:09:44.331717", "exception": false, "start_time": "2020-08-10T23:09:36.178744", "status": "completed"} tags=[]
path_ = []
label_ = []
for epoch in range(EPOCHS):
gen = land_generator()
for it in tqdm(range(IMAGE_PER_BATCH)):
image_path, land = next(gen)
path_.append(image_path)
label_.append(land)
# + papermill={"duration": 0.266125, "end_time": "2020-08-10T23:09:44.620427", "exception": false, "start_time": "2020-08-10T23:09:44.354302", "status": "completed"} tags=[]
slicer = pd.DataFrame({'path': path_, 'label': label_})
# + papermill={"duration": 0.044611, "end_time": "2020-08-10T23:09:44.688285", "exception": false, "start_time": "2020-08-10T23:09:44.643674", "status": "completed"} tags=[]
def decode_image(filename, label, image_size = IMAGE_SIZE, crop_padding = 32):
#image bytes
image_bytes = tf.io.read_file(filename)
#extract shape and get crop window
shape = tf.image.extract_jpeg_shape(image_bytes)
image_height = shape[0]
image_width = shape[1]
padded_center_crop_size = tf.cast(
((image_size / (image_size + crop_padding)) *
tf.cast(tf.minimum(image_height, image_width), tf.float32)),
tf.int32)
offset_height = ((image_height - padded_center_crop_size) + 1) // 2
offset_width = ((image_width - padded_center_crop_size) + 1) // 2
crop_window = tf.stack([offset_height, offset_width,
padded_center_crop_size, padded_center_crop_size])
#decode and crop image --> resize
image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3)
image = tf.image.resize([image], [image_size, image_size], method = tf.image.ResizeMethod.BICUBIC)[0]
image = tf.reshape(image, [image_size, image_size, 3])
image = tf.image.convert_image_dtype(image, dtype = tf.float32)
#ensure pixel are [0, 255]
image = tf.clip_by_value(image, 0.0, 255.0)
image = tf.cast(image, dtype = tf.float32)
#imagenet pre process
image = preprocess_input(image, mode = 'torch')
return(image, label)
# + papermill={"duration": 0.920681, "end_time": "2020-08-10T23:09:45.631750", "exception": false, "start_time": "2020-08-10T23:09:44.711069", "status": "completed"} tags=[]
training_generator = (
tf.data.Dataset.
from_tensor_slices((slicer['path'], slicer['label']))
.map(decode_image, num_parallel_calls = AUTO)
.repeat(EPOCHS)
.batch(BATCH_SIZE)
.prefetch(AUTO)
)
ignore_order = tf.data.Options()
ignore_order.experimental_deterministic = True # disable order, increase speed
training_generator = training_generator.with_options(ignore_order) # uses data as soon as it streams in, rather than in its original order
# + papermill={"duration": 0.034479, "end_time": "2020-08-10T23:09:45.695135", "exception": false, "start_time": "2020-08-10T23:09:45.660656", "status": "completed"} tags=[]
total_step = EPOCHS * STEPS_PER_EPOCH
initial_lr = 0.001
num_warmup_steps = 61# (STEPS_PER_EPOCH * EPOCHS) //10
optimizer_warmup = linear_warmup(init_lr = initial_lr, num_train_steps = total_step, num_warmup_steps = num_warmup_steps)
# + papermill={"duration": 47.305359, "end_time": "2020-08-10T23:10:33.024410", "exception": false, "start_time": "2020-08-10T23:09:45.719051", "status": "completed"} tags=[]
with strategy.scope():
model = build_model_extractor(
optimizer_warmup, EFF_NET_SHAPE, TripletSemiHardLoss(),
model = MODEL_, modeltype = 'B5', weights = 'noisy-student', trainable = False)
# + papermill={"duration": 6.336514, "end_time": "2020-08-10T23:10:39.399834", "exception": false, "start_time": "2020-08-10T23:10:33.063320", "status": "completed"} tags=[]
model.load_weights('../input/effb5-block-extractor-4/efficientnetB5_embedding_model.hdf5')
# + papermill={"duration": 0.07771, "end_time": "2020-08-10T23:10:39.515272", "exception": false, "start_time": "2020-08-10T23:10:39.437562", "status": "completed"} tags=[]
for i in [2, 3, 4, 5, 6]:
print('Gem {}; Value: {}'.format(i, model.get_layer(f'gem_block_{i}').p.numpy()[0]))
# + papermill={"duration": 8933.613268, "end_time": "2020-08-11T01:39:33.163071", "exception": false, "start_time": "2020-08-10T23:10:39.549803", "status": "completed"} tags=[]
history = model.fit(
training_generator,
batch_size=BATCH_SIZE, epochs=EPOCHS,
steps_per_epoch = STEPS_PER_EPOCH,
verbose = 1
)
# + papermill={"duration": 46.228141, "end_time": "2020-08-11T01:40:19.482831", "exception": false, "start_time": "2020-08-11T01:39:33.254690", "status": "completed"} tags=[]
for i in [2, 3, 4, 5, 6]:
print('Gem {}; Value: {}'.format(i, model.get_layer(f'gem_block_{i}').p.numpy()[0]))
# + papermill={"duration": 4.298322, "end_time": "2020-08-11T01:40:23.864261", "exception": false, "start_time": "2020-08-11T01:40:19.565939", "status": "completed"} tags=[]
model.save_weights(f'{MODEL_}{MODEL_TYPE}_embedding_model.hdf5')
# + papermill={"duration": 131.190123, "end_time": "2020-08-11T01:42:35.136825", "exception": false, "start_time": "2020-08-11T01:40:23.946702", "status": "completed"} tags=[]
# !mkdir -p saved_model
model.save('saved_model/my_model')
| Google Landmark Retrieval 2020/src/tpu-eff-b5-block-extractor.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import csv
import numpy as np
import tensorflow as tf
from sklearn.model_selection import train_test_split
RANDOM_SEED = 42
# -
# # 各パス指定
dataset = 'model/point_history_classifier/point_history.csv'
model_save_path = 'model/point_history_classifier/point_history_classifier.hdf5'
# # 分類数設定
NUM_CLASSES = 4
# # 入力長
TIME_STEPS = 16
DIMENSION = 2
# # 学習データ読み込み
X_dataset = np.loadtxt(dataset, delimiter=',', dtype='float32', usecols=list(range(1, (TIME_STEPS * DIMENSION) + 1)))
y_dataset = np.loadtxt(dataset, delimiter=',', dtype='int32', usecols=(0))
X_train, X_test, y_train, y_test = train_test_split(X_dataset, y_dataset, train_size=0.75, random_state=RANDOM_SEED)
# # モデル構築
# +
use_lstm = False
model = None
if use_lstm:
model = tf.keras.models.Sequential([
tf.keras.layers.InputLayer(input_shape=(TIME_STEPS * DIMENSION, )),
tf.keras.layers.Reshape((TIME_STEPS, DIMENSION), input_shape=(TIME_STEPS * DIMENSION, )),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.LSTM(16, input_shape=[TIME_STEPS, DIMENSION]),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(10, activation='relu'),
tf.keras.layers.Dense(NUM_CLASSES, activation='softmax')
])
else:
model = tf.keras.models.Sequential([
tf.keras.layers.InputLayer(input_shape=(TIME_STEPS * DIMENSION, )),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(24, activation='relu'),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(10, activation='relu'),
tf.keras.layers.Dense(NUM_CLASSES, activation='softmax')
])
# -
model.summary() # tf.keras.utils.plot_model(model, show_shapes=True)
# モデルチェックポイントのコールバック
cp_callback = tf.keras.callbacks.ModelCheckpoint(
model_save_path, verbose=1, save_weights_only=False)
# 早期打ち切り用コールバック
es_callback = tf.keras.callbacks.EarlyStopping(patience=20, verbose=1)
# モデルコンパイル
model.compile(
optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy']
)
# # モデル訓練
model.fit(
X_train,
y_train,
epochs=1000,
batch_size=128,
validation_data=(X_test, y_test),
callbacks=[cp_callback, es_callback]
)
# 保存したモデルのロード
model = tf.keras.models.load_model(model_save_path)
# 推論テスト
predict_result = model.predict(np.array([X_test[0]]))
print(np.squeeze(predict_result))
print(np.argmax(np.squeeze(predict_result)))
# # 混同行列
# +
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix, classification_report
def print_confusion_matrix(y_true, y_pred, report=True):
labels = sorted(list(set(y_true)))
cmx_data = confusion_matrix(y_true, y_pred, labels=labels)
df_cmx = pd.DataFrame(cmx_data, index=labels, columns=labels)
fig, ax = plt.subplots(figsize=(7, 6))
sns.heatmap(df_cmx, annot=True, fmt='g' ,square=False)
ax.set_ylim(len(set(y_true)), 0)
plt.show()
if report:
print('Classification Report')
print(classification_report(y_test, y_pred))
Y_pred = model.predict(X_test)
y_pred = np.argmax(Y_pred, axis=1)
print_confusion_matrix(y_test, y_pred)
# -
# # Tensorflow-Lite用のモデルへ変換
# 推論専用のモデルとして保存
model.save(model_save_path, include_optimizer=False)
model = tf.keras.models.load_model(model_save_path)
tflite_save_path = 'model/point_history_classifier/point_history_classifier.tflite'
# +
# モデルを変換(量子化
converter = tf.lite.TFLiteConverter.from_keras_model(model) # converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_path)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
tflite_quantized_model = converter.convert()
open(tflite_save_path, 'wb').write(tflite_quantized_model)
# -
# # 推論テスト
interpreter = tf.lite.Interpreter(model_path=tflite_save_path)
interpreter.allocate_tensors()
# 入出力テンソルを取得
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
print(input_details)
interpreter.set_tensor(input_details[0]['index'], np.array([X_test[0]]))
# %%time
# 推論実施
interpreter.invoke()
tflite_results = interpreter.get_tensor(output_details[0]['index'])
print(np.squeeze(tflite_results))
print(np.argmax(np.squeeze(tflite_results)))
| point_history_classification.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .sos
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: SoS
# language: sos
# name: sos
# ---
# + [markdown] kernel="SoS" tags=[]
# # Factor analysis using PEER
# + [markdown] kernel="Bash" tags=[]
# ## Overview
#
# This workflow performs [Probabilistic Estimation of Expression Residuals (PEER) method](https://github.com/PMBio/peer/wiki/Tutorial), a method also used for GTEx eQTL data analysis.
# + [markdown] kernel="Bash" tags=[]
# ## Input
#
# 1. Molecular phenotype in `bed.gz` file format with indexing:
# + kernel="R" tags=[]
readr::read_delim("data/example_data.bed.gz",show_col_types= F)[1:3,1:8]
# + [markdown] kernel="SoS"
# 2. A covariate file in text format with the first column `#id` that lists the name of all covariates, followed by other columns each being a sample with sample names matching that in the molecular phenotype file:
# + kernel="R"
readr::read_delim("data/example_cov.txt",show_col_types= F)[,1:8]
# + [markdown] kernel="R"
# Covariates file is optional to the PEER method but it should be supplied in order to construct a proper PEER model in the context of xQTL mapping.
# + [markdown] kernel="Markdown" tags=[]
# ## Outputs
#
# Output prefix will be derived from the phenotype and covariate files unless `--name` parameter is specified.
#
# ### Main outputs
#
# * `{name}.PEER.cov`: result for peer factor covariates
#
# ### Other outputs
#
# * A diagnosis polt for check of convergence
# * peer precision /residuals /weights
# + [markdown] kernel="Markdown" tags=[]
# ## Number of factors to use
#
# By default the pipeline applies recommendations from [GTEx](https://gtexportal.org/home/documentationPage):
#
# > A set of covariates identified using the Probabilistic Estimation of Expression Residuals (PEER) method (Stegle et al., PLoS Comp. Biol., 2010 ), calculated for the normalized expression matrices (described below). For eQTL analyses, the number of PEER factors was determined as function of sample size (N): 15 factors for N<150, 30 factors for 150≤ N<250, 45 factors for 250≤ N<350, and 60 factors for N≥350, as a result of optimizing for the number of eGenes discovered. For sQTL analyses, 15 PEER factors were computed for each tissue.
#
# Although one can specify fixed number of factors using `--N` option in this pipeline.
# + [markdown] kernel="SoS"
# ## Minimumal working example
# + kernel="Bash" tags=[]
sos run PEER_factor.ipynb PEER \
--cwd output \
--phenoFile data/peer_example.bed.gz \
--covFile data/peer_cov.txt \
--name MWE.Cov_PEER \
--container container/PEER.sif
# + kernel="Bash"
tree ./output
# + kernel="SoS"
# %preview ./output/MWE.Cov_PEER.PEER_diagnosis.pdf -s png
# + kernel="Bash"
cat ./output/MWE.Cov_PEER.PEER.cov.stdout
# + [markdown] kernel="Bash"
# ## MWE with PCA input
# In our pipeline, it is designed to include the pca as part of our cov input. This can be achieved by the merge_covariate step
# + kernel="Bash"
sos run PEER_factor.ipynb merge_covariate \
--cwd output \
--pcaFile AC.for_pca.mol_phe.AC.pca.projected.rds \
--covFile AC.APEX.cov \
--name demo \
--container PEER.sif -J 1 -c csg.yml -q csg &
nohup sos run PEER_factor.ipynb PEER \
--cwd output \
--phenoFile AC.mol_phe.annotated.bed.gz \
--covFile output/AC.APEX.pca.cov.gz \
--name demo -N 3 \
--container PEER.sif -J 1 -c csg.yml -q csg &
# + kernel="Bash"
# + [markdown] jupyter={"outputs_hidden": true} kernel="Bash" tags=[]
# ## Command interface
# + kernel="Bash"
sos run PEER_factor.ipynb -h
# + [markdown] kernel="R"
# ## Implementation
# + kernel="SoS" tags=[]
[global]
# The output directory for generated files. MUST BE FULL PATH
parameter: cwd = path
# The covariate file
parameter: covFile = path("./")
# For cluster jobs, number commands to run per job
parameter: job_size = 1
# Wall clock time expected
parameter: walltime = "5h"
# Memory expected
parameter: mem = "16G"
# Number of threads
parameter: numThreads = 8
# Software container option
parameter: container = ""
parameter: name = ""
# N PEER factors, If do not specify or specified as 0, default values suggested by
# GTEx (based on different sample size) will be used
parameter: N = 0
# Default values from PEER software:
## The number of max iteration
parameter: max_iter = 1000
## Prior parameters
parameter: Alpha_a = 0.001
parameter: Alpha_b = 0.1
parameter: Eps_a = 0.1
parameter: Eps_b = 10.0
## Tolarance parameters
parameter: tol = 0.001
parameter: var_tol = 0.00001
# + kernel="SoS" tags=[]
[PEER]
# The molecular phenotype matrix
parameter: phenoFile = path
if len(name) == 0:
name = f"{phenoFile:bn}.{covFile:bn}"
input: phenoFile, covFile
output: f'{cwd:a}/{name}.PEER.cov.gz', f'{cwd:a}/{name}.PEER_diagnosis.pdf'
task: trunk_workers = 1, walltime = walltime, mem = mem, cores = numThreads, tags = f'{step_name}_{_output[0]:bn}'
R: container=container, expand= "${ }", stderr = f'{_output[0]}.stderr', stdout = f'{_output[0]}.stdout'
library(peer)
#### PLOTING FUNCTIONS: ####
## Adapted from the source code of package: https://github.com/PMBio/peer/blob/40bc4b2cd92459ce42f44dfe279717436395f3f6/R/peer/R/plot.R
PEER_plotModel_adapted <- function(model){
par(mfrow=c(2,1))
bounds = PEER_getBounds(model)
vars = PEER_getResidualVars(model)
plot(bounds, type="l", col="red", lwd=2, xlab="Iterations", ylab="Lower bound")
par(new=TRUE)
plot(vars,,type="l",col="blue",xaxt="n",yaxt="n",xlab="",ylab="")
axis(4)
mtext("Residual variance",side=4,line=3)
legend("right",col=c("red","blue"),lty=1,legend=c("Lower bound","Residual variance"))
alpha = PEER_getAlpha(model)
plot(alpha,xlab="Factors",ylab="Inverse variance of factor weights", type="b", col="blue", lwd=4, xaxp=c(1,length(alpha), length(alpha)-1))
}
WriteTable <- function(data, filename, index.name) {
datafile <- file(filename, open = "wt")
on.exit(close(datafile))
header <- c(index.name, colnames(data))
writeLines(paste0(header, collapse = "\t"), con = datafile, sep = "\n")
write.table(data, datafile, sep = "\t", col.names = F, quote = F)
}
#### MAIN ####
# Start analysis:
model <- PEER()
cat("PEER: loading input data... ")
df <- read.delim(${_input[0]:ar}, check.names = F)
mtx = df[,5:ncol(df)]
rownames(mtx) = df$gene_ID
# Load covariate file:
pc <- "${_input[1] if _input[1].is_file() else ''}"
if(pc != ""){
cat("PEER: loading covariate file ...")
cov.mat <- read.delim(pc, row.names = 1, check.names = F)
common_name <- intersect(names(cov.mat),names(mtx))
if(length(common_name) ==0){
stop("No common samples between covariate and phenotype files! ")
}else{
mtx <- mtx[,common_name]
cov.mat <- cov.mat[,common_name]
M <- t(as.matrix(mtx, rownames = T))
cov.mat <- t(as.matrix(cov.mat))
cat("done!\n")
cat("PEER: Input summary:",dim(M)[1], "samples with",dim(M)[2], "genes and", dim(cov.mat)[2],"covariates \n")
invisible(PEER_setCovariates(model, cov.mat))
}
}else{
cov.mat <- NULL
M <- t(as.matrix(mtx, rownames = T))
cat("PEER: Input summary:",dim(M)[1], "samples with",dim(M)[2], "genes \n")
}
# Suggest the number of factors to use if no input value
if(${N} == 0) {
# Use suggestion
if(dim(M)[1] < 150){
num_factor = 15
} else {
if(dim(M)[1] < 250) {
num_factor = 30
} else {
if(dim(M)[1] < 350) {
num_factor = 45
} else {
num_factor = 60
}
}
}
} else {
num_factor = ${N}
}
# run PEER
cat(paste0("Setting initialization parameters ..."))
invisible(PEER_setNk(model, num_factor))
invisible(PEER_setPhenoMean(model, M))
invisible(PEER_setPriorAlpha(model,${Alpha_a},${Alpha_b}))
invisible(PEER_setPriorEps(model,${Eps_a}, ${Eps_b}))
invisible(PEER_setTolerance(model, ${tol}))
invisible(PEER_setVarTolerance(model, ${var_tol}))
invisible(PEER_setNmax_iterations(model, ${max_iter}))
if(!is.null(cov.mat)){
invisible(PEER_setCovariates(model, cov.mat))
}
cat("Done.\n")
cat(paste0("PEER: estimating hidden confounders (", num_factor, ")\n"))
time <- system.time(PEER_update(model))
# Add relevant row/column names
factor.mat <- PEER_getX(model) # samples x PEER factors
weight.mat <- PEER_getW(model) # omic features x PEER factors
precision.mat <- PEER_getAlpha(model) # PEER factors x 1
resid.mat <- t(PEER_getResiduals(model)) # omic features x samples
peer.var.names <- paste0("PEER_", 1:ncol(factor.mat))
rownames(factor.mat) <- rownames(M)
colnames(factor.mat) <- peer.var.names
colnames(weight.mat) <- peer.var.names
rownames(weight.mat) <- colnames(M)
rownames(precision.mat) <- peer.var.names
colnames(precision.mat) <- "alpha"
precision.mat <- as.data.frame(precision.mat)
precision.mat$relevance <- 1.0 / precision.mat$alpha
rownames(resid.mat) <- colnames(M)
colnames(resid.mat) <- rownames(M)
cat("Exporting results ... ")
# Diagnosis plot:
pdf("${_output[1]}")
invisible(PEER_plotModel_adapted(model))
invisible(dev.off())
t(factor.mat)
# Write 4 numeric results
WriteTable(t(factor.mat), file.path("${_output[0]:an}"), "#id")
WriteTable(weight.mat, file.path(${cwd:ar}, "${_output[0]:bn}.peer_weights.txt"), "#id")
WriteTable(precision.mat, file.path(${cwd:ar}, "${_output[0]:bn}.peer_precisions.txt"), "#id")
WriteTable(resid.mat, file.path(${cwd:ar}, "${_output[0]:bn}.peer_residuals.txt"), "#id")
cat("Done.\n")
bash: container=container, expand= "${ }", stderr = f'{_output[0]}.stderr', stdout = f'{_output[0]}.stdout'
gzip -f ${_output[0]:n}
# + [markdown] kernel="Bash"
# ## Reference:
# * Codes are adapted from [here](https://github.com/RTIInternational/biocloud_docker_tools/blob/master/peer/v1.3/run_peer.R)
# * GTEx recommandation of PEER factors is [here](https://gtexportal.org/home/documentationPage)
# * Examples by PEER is at [github](https://github.com/PMBio/peer/wiki/Tutorial)
| pipeline/PEER_factor.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: py3-ada
# language: python
# name: py3-ada
# ---
# # Linking Education and Multistate Employment Data
#
# ### Table of Contents
# 1. [Introduction](#Introduction)
# 2. [Setting up a Database Connection](#Database-Connection)
# 3. [Education Queries ](#Writing-Our-Education-Database-Query)
# 4. [Identifying People in Multiple Datasets](#Linking-Ohio-Education-and-Employment-Microdata)
# 5. [Explore the Connected Data](#Exploratory-Data-Analysis)
# 6. [Identifying People Across Multiple States](#Linking-Ohio-Education-and-Multistate-Employment-Microdata)
# 7. [Additional Data Exploration:Multistate Wages](#Additional-Exploratory-Data-Analysis)
# 8. [Exploring Sources of Errors and Inference](#Exploring-Sources-of-Error-and-Inference)
#
# ## Introduction
# - Back to [Table of Contents](#Table-of-Contents)
#
# In this notebook, we use Ohio administrative data on earned education degrees. We will identify a specific type of education program and select a specific cohort (a group of students who graduated in the same year).
#
# Next, we see if we can find employment outcomes for this group of students. Here, we have to think of the type of employment outcome we are interested in. How many years after graduation and when do we identify someone as being employed?
#
# In addition, we have to think about where to find these employment outcomes. We can investigate the Ohio administrative data on employment, but we can also investigate if students became employed in some other states.
#
# By going through this notebook, you get an idea of all the choices you make while going through such a process, and you get an idea of how these decisions influences the outcomes you will find.
# We start by loading the packages we are going to use:
# %pylab inline
import pandas as pd
import psycopg2
from sqlalchemy import create_engine
import time
# ## Database Connection
# First we set our database connection parameters
#
# - Back to [Table of Contents](#Table-of-Contents)
db_name = "appliedda"
hostname = "10.10.2.10"
# Next, we set our database connections, and we use the psycopg2 module so we can more easily execute queries without returning data:
conn = psycopg2.connect(database=db_name, host=hostname)
cursor = conn.cursor()
# ## Writing Our Education Database Query
#
# - Back to [Table of Contents](#Table-of-Contents)
#
# Note that with this query, we identify and select only persons who obtained a education degree with 'cip' = '23', which equates to a bachelor's degree in English. In addition, we only include persons who obtained their degree in 2010. When you are finished with going through this notebook, you can come back to this query and play around with selecting other degrees or other cohorts.
# Here we use list comprehension to shorten the list of table names in our query text
# +
level_list = ['degcert_au_inst1_level_{}'.format(i) for i in range(1, 17)] +\
['degcert_sm_inst1_level_{}'.format(i) for i in range(1, 12)] +\
['degcert_sp_inst1_level_{}'.format(i) for i in range(1, 18)] +\
['degcert_wi_inst1_level_{}'.format(i) for i in range(1, 11)]
level_cols = ','.join([l+'::text' for l in level_list])
subject_list = ['degcert_au_inst1_subject_{}'.format(i) for i in range(1, 17)] +\
['degcert_sm_inst1_subject_{}'.format(i) for i in range(1, 12)] +\
['degcert_sp_inst1_subject_{}'.format(i) for i in range(1, 18)] +\
['degcert_wi_inst1_subject_{}'.format(i) for i in range(1, 11)]
subject_cols = ','.join([s+'::text' for s in subject_list])
term_list = ['degcert_au_inst1_term_earned_{}'.format(i) for i in range(1, 17)] +\
['degcert_sm_inst1_term_earned_{}'.format(i) for i in range(1, 12)] +\
['degcert_sp_inst1_term_earned_{}'.format(i) for i in range(1, 18)] +\
['degcert_wi_inst1_term_earned_{}'.format(i) for i in range(1, 11)]
term_cols = ','.join([t+'::text' for t in term_list])
# +
# Identify:
# - (From Ohio state public institutions)
# - English (CIP 23)
# - Both 1st and 2nd bachelors
# - Multiple simultaneous English language diplomas not included
# - This is only cohort that obtained diploma in 2010
SQL_EDU = '''
CREATE TEMP TABLE cohort_2010 AS
SELECT DISTINCT ON (key_id) key_id, format('%s-%s-1', deg_year, deg_term::int*3-2)::date yr_q
FROM (SELECT key_id, file_year AS "deg_year",
UNNEST(array[{TERM_COLS}]) AS "deg_term",
UNNEST(array[{SUBJECT_COLS}]) AS "cip",
UNNEST(array[{LEVEL_COLS}]) AS "degr_lvl"
FROM data_ohio_olda_2018.oh_hei
WHERE file_year = '2010'
) q
WHERE deg_term IS NOT NULL
AND LEFT(cip, 2)='23'
AND (degr_lvl ='5' OR
degr_lvl ='22')
ORDER BY key_id, deg_term, deg_year;
COMMIT;
'''.format(TERM_COLS = term_cols, SUBJECT_COLS = subject_cols, LEVEL_COLS = level_cols)
cursor.execute(SQL_EDU)
# -
# We just generated a SQL query to select our group of interest and stored it in a TEMP TABLE. We now load it into a panda dataframe so that we are able to investigate it in a bit more detail.
# Read in the 2010 cohort
SQL_LOAD_EDU = '''
SELECT *
FROM cohort_2010
'''
df_edu = pd.read_sql(SQL_LOAD_EDU, conn)
# How many people (or actually 'degrees') are in our cohort? And how many variables
df_edu.shape
# What do the variables mean?
df_edu.head()
# Are there persons in here who obtained multiple degrees?
df_edu['key_id'].nunique()
# ## Linking Ohio Education and Employment Microdata
#
# - Back to [Table of Contents](#Table-of-Contents)
#
# As you saw in the 'education dataset' that we just generated, persons are identified with 'key_id'. As this is an Ohio specific identification method, we also find the corresponding SSN numbers. Apparently these are hashed, so we link the corresponding values for 'ssn_hash'. The 'ssn_hash' values corresponding to 'key_id' are found in ' data_ohio_olda_2018.oh_person. We save this in a TEMP TABLE so that we can use it later in other SQL queries.
SQL_EDU_SSN_2 = """
CREATE TEMP TABLE cohort_2010_EDU_SSN AS
SELECT a.key_id, a.yr_q, b.ssn_hash
FROM cohort_2010 a
LEFT JOIN data_ohio_olda_2018.oh_person b
ON a.key_id = b.key_id;
COMMIT;
"""
cursor.execute(SQL_EDU_SSN_2)
# To check if the linking worked, we also load it into a pandas dataframe:
SQL_EDU_SSN_test = """
SELECT a.key_id, a.yr_q, b.ssn_hash
FROM cohort_2010 a
LEFT JOIN data_ohio_olda_2018.oh_person b
ON a.key_id = b.key_id;
"""
df_edu_ssn = pd.read_sql(SQL_EDU_SSN_test, conn)
df_edu_ssn['ssn_hash'].nunique()
# Now that we have identified a cohort of students of interest, and linked their SSN numbers, we can start by looking for the employment outcomes for these students.
#
# We will use a very specific definition for employment here, namely 'full quarter employment one year after graduation'. Full quarter employment means that a person is employed for at least one full quarter. In order to meet this requirement, there should also an employment registration for this person at the same employed in the previous and subsequent quarter.
#
# As our cohort consists of people who graduated in quarters 1, 2, 3 and 4 in 2010, we are interested in employment registrations in quarters 1, 2, 3 and 4 in 2011, but to identify the 'full' quarters, we also need employment registrations of 2010 quarter 4 and 2012 quarter 1.
#
# We start by identifying all quarters within this period from the administrative data from Ohio:
SQL_cohort_OH = '''
CREATE TEMP TABLE cohort_OH AS
SELECT *,
format('%s-%s-1', year, quarter*3-2)::date job_yrq
FROM data_ohio_olda_2018.oh_ui_wage_by_employer a
WHERE (year = 2011 OR (year = 2010 AND quarter = 4)
OR (year = 2012 AND quarter = 1))
AND key_id IN (SELECT key_id FROM cohort_2010_EDU_SSN);
COMMIT;
'''
cursor.execute(SQL_cohort_OH)
# Next, for all the selected quarters, we identify which meet the 'full quarter' requirement:
SQL_EMP_2_OH = """
CREATE TEMP TABLE cohort_OH_full AS
SELECT a.*
from cohort_OH a, cohort_OH b, cohort_OH c
where a.key_id=b.key_id
AND a.key_id=c.key_id
AND a.employer = b.employer
AND a.employer = c.employer
AND a.job_yrq = (b.job_yrq - '3 month'::interval)::date
AND a.job_yrq = (c.job_yrq + '3 month'::interval)::date;
commit;
"""
cursor.execute(SQL_EMP_2_OH)
# As this 'Ohio employment' dataset we just generated also doesn't contain SSN's, we again link them:
SQL_EMP_3_OH = """
CREATE TEMP TABLE cohort_OH_full_SSN AS
SELECT a.*, b.ssn_hash
FROM cohort_OH_full a
LEFT JOIN data_ohio_olda_2018.oh_person b
ON a.key_id = b.key_id;
COMMIT;
"""
cursor.execute(SQL_EMP_3_OH)
# This 'employment dataset' we generated only contains employment information from Ohio. We are going to look at other states for employment information as well, and in the final dataset we would like to have a variable identifying from which state the employment information originated (so in which state this person is actually employed). Therefore, we now generate a variable 'state' which all has the state code '39' for the Ohio people:
SQL_EMP_4_OH = """
ALTER TABLE cohort_OH_full_SSN
ADD state text DEFAULT '39';
commit;
"""
cursor.execute(SQL_EMP_4_OH)
# ## Exploratory Data Analysis
#
# - Back to [Table of Contents](#Table-of-Contents)
#
# We now investigate how the dataset we just generated looks like:
SQL_OH = '''
SELECT *
FROM cohort_OH_full_SSN
'''
df_OH = pd.read_sql(SQL_OH, conn)
df_OH.head()
print('the number of individual graduates represented in the Ohio dataset is'), df_OH['key_id'].nunique()
df_OH.shape
# ## Linking Ohio Education and Multistate Employment Microdata
#
# - Back to [Table of Contents](#Table-of-Contents)
#
# So apparently this dataset contains 2938 quarters with employment information from 918 individuals. Note that at this point, we did not merge the education information to the employment information yet. So this are just all the quarters meeting our definition from the people in the cohort.
# Next, we are going to see if we can also identify persons of this cohort in employment registrations in other states. We start by Missouri. Here, we use the SSN's of the Ohio education cohort to identify persons in the Missouri employment dataset, and we use the exact same definition of 'full quarter employment'.
SQL_cohort_MO = '''
CREATE TEMP TABLE cohort_MO AS
SELECT *,
format('%s-%s-1', year, quarter*3-2)::date job_yrq
FROM kcmo_lehd.mo_wage a
WHERE (year = 2011 OR (year = 2010 AND quarter = 4)
OR (year = 2012 AND quarter = 1))
AND SSN IN (SELECT ssn_hash FROM cohort_2010_EDU_SSN);
COMMIT;
'''
cursor.execute(SQL_cohort_MO)
# Also for Missouri, we identify which of the quarters meet the 'full quarter' requirement.
SQL_cohort_MO_full = """
CREATE TEMP TABLE cohort_MO_full AS
SELECT a.*
from cohort_MO a, cohort_MO b, cohort_MO c
where a.ssn=b.ssn
AND a.ssn=c.ssn
AND a.empr_no = b.empr_no
AND a.empr_no = c.empr_no
AND a.job_yrq = (b.job_yrq - '3 month'::interval)::date
AND a.job_yrq = (c.job_yrq + '3 month'::interval)::date;
commit;
"""
cursor.execute(SQL_cohort_MO_full)
# And we check how the Missouri dataset we just generated looks like. As you can see here, SSN is already in here as well as a 'state' variable, indicating this data originates from Missouri.
# Check the Missouri set
SQL_MO = '''
SELECT *
FROM cohort_MO_full
'''
df_MO = pd.read_sql(SQL_MO, conn)
df_MO.head()
# We now apply the same steps on data from the state Illinois:
SQL_cohort_IL_full = '''
CREATE TEMP TABLE cohort_IL AS
SELECT *,
format('%s-%s-1', year, quarter*3-2)::date job_yrq
FROM il_des_kcmo.il_wage a
WHERE (year = 2011 OR (year = 2010 AND quarter = 4)
OR (year = 2012 AND quarter = 1))
AND SSN IN (SELECT ssn_hash FROM cohort_2010_EDU_SSN);
COMMIT;
'''
cursor.execute(SQL_cohort_IL_full)
SQL_EMP_2_IL = """
CREATE TEMP TABLE cohort_IL_full AS
SELECT a.*
from cohort_IL a, cohort_IL b, cohort_IL c
where a.ssn=b.ssn
AND a.ssn=c.ssn
AND a.empr_no = b.empr_no
AND a.empr_no = c.empr_no
AND a.job_yrq = (b.job_yrq - '3 month'::interval)::date
AND a.job_yrq = (c.job_yrq + '3 month'::interval)::date;
commit;
"""
cursor.execute(SQL_EMP_2_IL)
SQL_IL = '''
SELECT *
FROM cohort_IL_full
'''
df_IL= pd.read_sql(SQL_IL, conn)
df_IL.head()
# And the state of Indiana...
SQL_cohort_IN_full = '''
CREATE TEMP TABLE cohort_IN AS
SELECT *,
format('%s-%s-1', year, quarter*3-2)::date job_yrq
FROM in_data_2019.wages_by_employer a
WHERE (year = 2011 OR (year = 2010 AND quarter = 4)
OR (year = 2012 AND quarter = 1))
AND SSN IN (SELECT ssn_hash FROM cohort_2010_EDU_SSN);
COMMIT;
'''
cursor.execute(SQL_cohort_IN_full)
SQL_EMP_2_IN = """
CREATE TEMP TABLE cohort_IN_full AS
SELECT a.*
from cohort_IN a, cohort_IN b, cohort_IN c
where a.ssn=b.ssn
AND a.ssn=c.ssn
AND a.fein = b.fein
AND a.fein = c.fein
AND a.job_yrq = (b.job_yrq - '3 month'::interval)::date
AND a.job_yrq = (c.job_yrq + '3 month'::interval)::date;
commit;
"""
cursor.execute(SQL_EMP_2_IN)
SQL_IN = '''
SELECT *
FROM cohort_IN_full
'''
df_IN= pd.read_sql(SQL_IN, conn)
df_IN.head()
SQL_EMP_4_IN = """
ALTER TABLE cohort_IN_full
ADD state text DEFAULT '18';
commit;
"""
cursor.execute(SQL_EMP_4_IN)
# We can now merge the employment information from the four different states. Note that we cannot just merge the four complete datasets as they are, as they all have some unique variables. We first identify the ones that are similar, which are: 'ssn', 'state', 'wage' and 'job_yrq'. Note that the variables are sometimes spelled a bit differently over different states. Also note that we are not including any information identifying multiple employers, as this information is not generalizable over different states. This is information we are losing by doing this merge.
SQL_MERGE_MO_IL = """
CREATE TEMP TABLE cohort_merge AS
SELECT ssn, state, wage, job_yrq FROM cohort_MO_full
UNION ALL
SELECT ssn, state, wage, job_yrq FROM cohort_IL_full
UNION ALL
SELECT ssn, state, wages, job_yrq FROM cohort_IN_full
UNION ALL
SELECT ssn_hash, state, wages, job_yrq FROM cohort_OH_full_SSN;
commit;
"""
cursor.execute(SQL_MERGE_MO_IL)
# ## Additional Exploratory Data Analysis
#
# - Back to [Table of Contents](#Table-of-Contents)
#
# And we investigate how the merged dataset looks like:
SQL_OH_TEST = """
SELECT * FROM cohort_merge;
"""
df_test = pd.read_sql(SQL_OH_TEST, conn)
df_test['ssn'].nunique()
df_test.head()
# Now that we obtained all employment information for our cohort that we were able to find in the available administrative registries, we can link this information to the education cohort dataset. We use 'LEFT JOIN' in such a way that we only link the employment information exactly one year later (same quarter) to the education cohort data.
SQL_link_edu_emp = """
CREATE TEMP TABLE cohort_2010_test AS
SELECT cohort_2010_EDU_SSN.*, cohort_merge.*
FROM cohort_2010_EDU_SSN
LEFT JOIN cohort_merge
ON cohort_2010_EDU_SSN.ssn_hash = cohort_merge.ssn
AND cohort_2010_EDU_SSN.yr_q = (cohort_merge.job_yrq -'1 year'::interval)::date;
commit;
"""
cursor.execute(SQL_link_edu_emp)
# And we again investigate this dataset
SQL_OH_TEST = """
SELECT * FROM cohort_2010_test;
"""
df_test = pd.read_sql(SQL_OH_TEST, conn)
df_test.head()
df_test['key_id'].nunique()
df_test['ssn'].nunique()
# Apparently, this dataset has 1602 registrations for 1551 persons. So a couple of them had wages from multiple employers (resulting in multiple rows in this dataset). In addition, there are 686 ssns in there. SSN was only registered in the employment dataset, as we had 'key_id' and 'ssn_hash' in the education data. So 696 persons had employment registrations in this dataset.
# As we saw that some persons had multiple employers from whom they obtained income, there are different ways we can handle this. We can just sum all the wages for every person, or we identify the highest wage per person, and identify this as primary income. We name them 'sumwage' and 'maxwage'. Note that in the way we specified the code here, we do not sum or max wages if a person is earning wages in multiple states simultaneously.
SQL_SUM_MAX = """
CREATE TEMP TABLE cohort_2010_sumwages AS
SELECT key_id, state,
SUM(wage) as sumwage,
MAX(wage) as maxwage
FROM cohort_2010_test
GROUP BY key_id, state;
commit;
"""
cursor.execute(SQL_SUM_MAX)
# We can also link a dataset with auxiliary information per person:
SQL_LINK_AUX = '''
select *
from cohort_2010_sumwages a
left join data_ohio_olda_2018.oh_hei_demo b
on a.key_id = b.key_id
'''
df_aux = pd.read_sql(SQL_LINK_AUX, conn)
# Now we can generate variables measuring wages one year after employment. We generate a few different definitions here: either 'sum' or 'max' of the wages, and we set different thresholds. A threshold here can be used to identify persons who for example earn less than the minimum wage with fulltime employment, or half of that value.
# Generate wage variables with different definitions for employment
df_aux['max_0_singleS_fullQ'] = np.where(df_aux['maxwage'] > 0, df_aux['maxwage'], np.nan)
df_aux['sum_0_singleS_fullQ'] = np.where(df_aux['sumwage'] > 0, df_aux['sumwage'], np.nan)
df_aux['max_1924_singleS_fullQ'] = np.where(df_aux['maxwage'] > 1924, df_aux['maxwage'], np.nan)
df_aux['sum_1924_singleS_fullQ'] = np.where(df_aux['sumwage'] > 1924, df_aux['maxwage'], np.nan)
df_aux['max_3848_singleS_fullQ'] = np.where(df_aux['maxwage'] > 3848, df_aux['maxwage'], np.nan)
df_aux['sum_3848_singleS_fullQ'] = np.where(df_aux['sumwage'] > 3848, df_aux['maxwage'], np.nan)
# Next, we can generate boxplots of the wages we found using the different definitions that we just generated.
f, ax = plt.subplots(figsize=(16,6))
df_aux[['max_0_singleS_fullQ','sum_0_singleS_fullQ','max_1924_singleS_fullQ',
'sum_1924_singleS_fullQ','max_3848_singleS_fullQ','sum_3848_singleS_fullQ']].boxplot()
ax.set_ylim(0,25000);
# Here, we see for example that the spread is smaller if we define a threshold and also the median is higher in such cases. Also, we see that the differences between 'sum' and 'max' are minor.
df_aux[['max_0_singleS_fullQ','sum_0_singleS_fullQ','max_1924_singleS_fullQ',
'sum_1924_singleS_fullQ','max_3848_singleS_fullQ','sum_3848_singleS_fullQ']].count()
# If we look at the number of observations per variable, we also see differences. If we would define employment using the first listed definition, the employment rate for this cohort of students English would be much higher compared to if we would have used the last definition listed.
# ## Exploring Sources of Error and Inference
#
# - Back to [Table of Contents](#Table-of-Contents)
#
# Now you can ask yourself a few questions:
#
# Do you think these employment rates and median wages could be used for policy purposes to evaluate the Bachelor English in Ohio? Why/why not?
#
# What information are we missing that might make these results less usefull?
# Information we might be missing:
# - Employment of persons in other states than the ones we were able to investigate
# - Employment at federal government is not included in the dataset
# - People who are self-employed
#
# What do you think of the definitions that we made? How would you interpret the median wage values as we have no information whether the persons are full-time or part-time employed. Is setting a threshold a solution for this?
#
# How would the results be different for different education types and/or different cohorts? You can try this out by changing the first set of code that identifies the cohort.
#
# How would the results be different if we used a different definition for employment. So instead of 'full quarter employment', we for example used 'single quarter employment'? You can also try this yourself.
#
| notebooks/05_1_inference.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Categorical Input and Continuous Output
# Students t-test is usually used when we want to check if the two samples were drawn from the same population or not and ANOVA when more than two categorical variables are involved. These techniques can also be adopted for Feature Selection.
#
#
# ## a. Students t-test for Feature Selection:
#
# When we have a binary classification problem t test can be used to select features. The idea is that a large t-statistic value with a smaller p – value would provide sufficient evidence that the distribution of values for each of the examined classes are distinct and the variable may have enough discriminative power to be included in the classification model.
#
# - Null Hypothesis: There is no significant difference between the means of two groups.
# - Alternate Hypothesis: There is a significant difference between the means of two groups.
#
# ### About the data:
# Features are computed from a digitized image of a fine needle aspirate (FNA) of a breast mass. They describe characteristics of the cell nuclei present in the image.
#
from scipy import stats
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_classif
# ### About the data
# 1) ID number<br>
# 2) Diagnosis (M = malignant, B = benign)
#
# Ten real-valued features are computed for each cell nucleus:
#
# a) radius (mean of distances from center to points on the perimeter)<br>
# b) texture (standard deviation of gray-scale values)<br>
# c) perimeter<br>
# d) area<br>
# e) smoothness (local variation in radius lengths)<br>
# f) compactness (perimeter^2 / area - 1.0)<br>
# g) concavity (severity of concave portions of the contour)<br>
# h) concave points (number of concave portions of the contour)<br>
# i) symmetry<br>
# j) fractal dimension ("coastline approximation" - 1)
df = pd.read_csv('data.csv')
df.drop(['id','Unnamed: 32'],axis = 1,inplace = True)
df.columns
#Encoding Male = 0 and Female = 1
alter = {'B' : 1,'M' : 0}
df['diagnosis'] = df['diagnosis'].map(alter)
df.shape
# Selecting features whose p value is > 0.05
# +
new_features = []
for x in df.columns[1:]:
pvalue = stats.ttest_ind(df.loc[df.diagnosis==1][x], df.loc[df.diagnosis==0][x])[1]
if pvalue < 0.05:
new_features.append(x)
new_df = df[new_features]
A = new_df.columns
B = df.columns
print('Çolumns whose p-value was >0.05 are:\n',
list(set(A).symmetric_difference(set(B))))
# -
# ## b. Using ANOVA F- Test
# Analysis of Variance is a statistical method which is used to check the means of two or more groups that are significantly different from each other.
#
# The scikit-learn machine library provides an implementation of the ANOVA F-test in the f classif() function. This function can be used in a feature selection strategy, such as selecting the top k most relevant features (largest values) via the SelectKBest class.
# +
# split into input (X) and output (y) variables
X = df.iloc[:,1:]
y = df.iloc[:,:1]
#Split into Train and Test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=1)
select = SelectKBest(score_func=f_classif, k=8)
new = select.fit_transform(X_train,y_train)
#printing the features that have been selected using get_support()
cols = select.get_support(indices=True)
#Printing the scores of the selected columns
for i in range(len(cols)):
print('Feature %d: %f' % (cols[i], select.scores_[i]))
# -
# Creating a new dataframe with the selected columns
features_df_new = df.iloc[:,cols]
features_df_new.columns
features_df_new.shape
| Categorical Input Continuous Output.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# # Notebook for backend info.
#
# - Ideally, the backend is written by ourselves, with using `@firebase/auth`.
#
#
# ## ProfileData
#
# - The profile is split into 2 parts, the `firebaseProfile` and the `profileData`.
# - `firebaseProfile` is managed by firebase and we can only store and update specific fields, (we are using displayName, emailVerified here)
# - `profileData` is managed by us and we can store any data we want. We will be storing details like mobile-number etc. No verification is implemented at this time however.
#
// Interface for the profileData is as follows.
export interface profileDetails {
username: string;
email: string;
first_name: string;
last_name: string;
mobile_number: string;
address: string;
locality: string;
city: string;
state: string;
country: string;
bio: string;
}
| BACKEND.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Grid Search Hyperparameter optimization
# This case study is all about using grid searches to identify the optimal parameters for a machine learning algorithm. To complere this case study, you'll use the Pima Indian diabetes dataset from Kaggle and KNN. Follow along with the preprocessing steps of this case study.
# + [markdown] tags=[]
# Load the necessary packages
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import plot_confusion_matrix
from sklearn.metrics import classification_report
from sklearn.ensemble import RandomForestClassifier
sns.set()
# set random seed to try make this exercise and solutions reproducible (NB: this is just for teaching purpose and not something you would do in real life)
random_seed_number = 42
np.random.seed(random_seed_number)
# + [markdown] tags=[]
# #### Load the diabetes data
# -
diabetes_data = pd.read_csv('data/diabetes.csv')
diabetes_data.head()
# + [markdown] tags=[]
# **<font color='teal'> Start by reviewing the data info.</font>**
# -
diabetes_data.info()
# + [markdown] tags=[]
# **<font color='teal'> Apply the describe function to the data.</font>**
# -
diabetes_data.describe()
diabetes_data.isnull().sum()
# + [markdown] tags=[]
# **<font color='teal'> Currently, the missing values in the dataset are represented as zeros. Replace the zero values in the following columns ['Glucose','BloodPressure','SkinThickness','Insulin','BMI'] with nan .</font>**
# -
diabetes_data[['Glucose','BloodPressure','SkinThickness','Insulin','BMI']] = diabetes_data[['Glucose','BloodPressure','SkinThickness','Insulin','BMI']].replace(0,np.nan)
diabetes_data.isnull().sum()
# + [markdown] tags=[]
# **<font color='teal'> Plot histograms of each column. </font>**
# -
for i in ['Glucose','BloodPressure','SkinThickness','Insulin','BMI']:
_ = diabetes_data.hist(i)
plt.show()
# + [markdown] tags=[]
# #### Replace the zeros with mean and median values.
# -
diabetes_data['Glucose'].fillna(diabetes_data['Glucose'].mean(), inplace = True)
diabetes_data['BloodPressure'].fillna(diabetes_data['BloodPressure'].mean(), inplace = True)
diabetes_data['SkinThickness'].fillna(diabetes_data['SkinThickness'].median(), inplace = True)
diabetes_data['Insulin'].fillna(diabetes_data['Insulin'].median(), inplace = True)
diabetes_data['BMI'].fillna(diabetes_data['BMI'].median(), inplace = True)
diabetes_data.isnull().sum()
# + [markdown] tags=[]
# **<font color='teal'> Plot histograms of each column after replacing nan. </font>**
# -
for i in ['Glucose','BloodPressure','SkinThickness','Insulin','BMI']:
_ = diabetes_data.hist(i)
plt.show()
# + [markdown] tags=[]
# #### Plot the correlation matrix heatmap
# -
plt.figure(figsize=(12,10))
print('Correlation between various features')
p=sns.heatmap(diabetes_data.corr(), annot=True,cmap ='Blues')
# + [markdown] tags=[]
# **<font color='teal'> Define the `y` variable as the `Outcome` column.</font>**
# +
X = diabetes_data.drop('Outcome', axis = 1)
y = diabetes_data['Outcome']
# + [markdown] tags=[]
# **<font color='teal'> Create a 70/30 train and test split. </font>**
# + [markdown] tags=[]
# **<font color='teal'> Using Sklearn, standarize the magnitude of the features by scaling the values. </font>**
# -
# Note: Don't forget to fit() your scaler on X_train and then use that fitted scaler to transform() X_test. This is to avoid data leakage while you standardize your data.
# +
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X_scaled, y, test_size=0.3, random_state=42)
# + [markdown] tags=[]
# #### Using a range of neighbor values of 1-10, apply the KNearestNeighbor classifier to classify the the data.
# +
test_scores = []
train_scores = []
for i in range(1,10):
knn = KNeighborsClassifier(i)
knn.fit(X_train,y_train)
train_scores.append(knn.score(X_train,y_train))
test_scores.append(knn.score(X_test,y_test))
# + [markdown] tags=[]
# **<font color='teal'> Print the train and test scores for each iteration.</font>**
# -
for i in range(1,10):
print('Train score is {}'.format(train_scores[i-1]), 'Test score is {}'.format(test_scores[i-1]))
# + [markdown] tags=[]
# **<font color='teal'> Identify the number of neighbors that resulted in the max score in the training and testing dataset. </font>**
# -
knn = KNeighborsClassifier(3)
knn.fit(X_train, y_train)
print("n_neighbors = 3")
print(f"Train score: {knn.score(X_train, y_train)}")
print(f"Test score: {knn.score(X_test, y_test)}")
def train_KNN(n_neighbors):
knn = KNeighborsClassifier(n_neighbors = n)
knn_model = knn.fit(X_train, y_train)
train_score = knn.score(X_train, y_train)
test_score = knn.score(X_test, y_test)
print('Train score: {} / Test score: {} for {} neighbors'.format(train_score, test_score, n))
for n in range(1,10):
train_KNN(n)
# Max score in training dataset was 0.854 for 3 neighbors
# Max score in testing dataset was 0.740 for 8 neighbors
# + [markdown] tags=[]
# Plot the train and test model performance by number of neighbors.
# -
plt.figure(figsize=(12,5))
p = sns.lineplot(range(1,10),train_scores,marker='*',label='Train Score')
p = sns.lineplot(range(1,10),test_scores,marker='o',label='Test Score')
# + [markdown] tags=[]
# **<font color='teal'> Fit and score the best number of neighbors based on the plot. </font>**
# -
knn = KNeighborsClassifier(n_neighbors = 8)
knn_model = knn.fit(X_train, y_train)
train_score = knn.score(X_train, y_train)
test_score = knn.score(X_test, y_test)
print('Train score: {} / Test score: {} for 8 neighbors'.format(train_score, test_score))
y_pred = knn.predict(X_test)
pl = confusion_matrix(y_test,y_pred)
pl
# + [markdown] tags=[]
# **<font color='teal'> Plot the confusion matrix for the model fit above. </font>**
# -
_ = plot_confusion_matrix(knn, X_test, y_test)
_ = plt.title('Diabetes - 1, No Diabetes - 0')
_ = plt.show()
# + [markdown] tags=[]
# **<font color='teal'> Print the classification report </font>**
# -
print(classification_report(y_test, y_pred))
# + [markdown] tags=[]
# #### In the case of the K nearest neighbors algorithm, the K parameter is one of the most important parameters affecting the model performance. The model performance isn't horrible, but what if we didn't consider a wide enough range of values in our neighbors for the KNN? An alternative to fitting a loop of models is to use a grid search to identify the proper number. It is common practice to use a grid search method for all adjustable parameters in any type of machine learning algorithm. First, you define the grid — aka the range of values — to test in the parameter being optimized, and then compare the model outcome performance based on the different values in the grid.
# + [markdown] tags=[]
# #### Run the code in the next cell to see how to implement the grid search method for identifying the best parameter value for the n_neighbors parameter. Notice the param_grid is the range value to test and we apply cross validation with five folds to score each possible value of n_neighbors.
# -
from sklearn.model_selection import GridSearchCV
param_grid = {'n_neighbors':np.arange(1,50)}
knn = KNeighborsClassifier()
knn_cv= GridSearchCV(knn,param_grid,cv=5)
knn_cv.fit(X,y)
# + [markdown] tags=[]
# #### Print the best score and best parameter for n_neighbors.
# -
print("Best Score:" + str(knn_cv.best_score_))
print("Best Parameters: " + str(knn_cv.best_params_))
# Here you can see that the ideal number of n_neighbors for this model is 14 based on the grid search performed.
# + [markdown] tags=[]
# **<font color='teal'> Now, following the KNN example, apply this grid search method to find the optimal number of estimators in a Randon Forest model.
# </font>**
# +
param_grid = {
'n_estimators': np.arange(1,50),
'max_features': ['auto', 'sqrt', 'log2'],
'max_depth' : [4,5,6,7,8],
'criterion' :['gini', 'entropy']
}
rfc = RandomForestClassifier()
CV_rfc = GridSearchCV(rfc, param_grid, cv= 5)
CV_rfc.fit(X,y)
# + tags=[]
print("Best Score: " + str(CV_rfc.best_score_))
print("Best Parameters: " + str(CV_rfc.best_params_))
# -
| Case study - Grid Search in KNN/GridSearchKNN_Case_Study.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="sj_pbQzwpiUS"
# # Simulating Requests to an AI Prediction Deployed Model
#
#
# + [markdown] colab_type="text" id="NDOuIyjJpxQa"
# ## Setup
# + colab={} colab_type="code" id="1Kgv1ijYI3bV"
import os
from tensorflow import io as tf_io
import matplotlib.pyplot as plt
import numpy as np
# + colab={} colab_type="code" id="w3SMI1H_DknU"
PROJECT = 'mlops-dev-env'
BUCKET = 'mlops-dev-workspace'
GCS_DATA_LOCATION = 'gs://workshop-datasets/covertype/data_validation'
REGION = 'us-central1'
LOCAL_WORKSPACE = '/home/jarekk/workspace-dataprep'
LOCAL_DATA_DIR = os.path.join(LOCAL_WORKSPACE, 'data')
LOCAL_DATA_FILE = os.path.join(LOCAL_DATA_DIR, 'train.csv')
BQ_DATASET_NAME = 'data_validation'
BQ_TABLE_NAME = 'covertype_classifier_logs_tf'
MODEL_NAME = 'covertype_classifier_tf'
VERSION_NAME = 'v2'
MODEL_OUTPUT_KEY = 'probabilities'
SIGNATURE_NAME = 'serving_default'
# + colab={} colab_type="code" id="AaibI_P_Dwdz"
# !gcloud config set project {PROJECT}
# + [markdown] colab_type="text" id="8tH1C0Yjp1Rw"
# ## 1. Download Data
#
# We use the [covertype](https://archive.ics.uci.edu/ml/datasets/covertype) from UCI Machine Learning Repository. The task is to Predict forest cover type from cartographic variables only.
#
# The dataset is preprocessed, split, and uploaded to uploaded to the `gs://workshop-datasets/covertype` public GCS location.
#
# We use this version of the preprocessed dataset in this notebook. For more information, see [Cover Type Dataset](https://github.com/GoogleCloudPlatform/mlops-on-gcp/tree/master/datasets/covertype)
# + colab={} colab_type="code" id="mPyEIzdTD1p0"
if tf_io.gfile.exists(LOCAL_WORKSPACE):
print("Removing previous workspace artifacts...")
tf_io.gfile.rmtree(LOCAL_WORKSPACE)
print("Creating a new workspace...")
tf_io.gfile.makedirs(LOCAL_WORKSPACE)
tf_io.gfile.makedirs(LOCAL_DATA_DIR)
# + colab={} colab_type="code" id="YDVDp0STEKB9"
# !gsutil cp gs://workshop-datasets/covertype/data_validation/training/dataset.csv {LOCAL_DATA_FILE}
# !wc -l {LOCAL_DATA_FILE}
# + colab={} colab_type="code" id="zbDx6WekEKZP"
import pandas as pd
data = pd.read_csv(LOCAL_DATA_FILE)
print("Total number of records: {}".format(len(data.index)))
data.sample(10).T
# + [markdown] colab_type="text" id="PNPxgqVCqE7J"
# ## 2. Define Metadata
# + colab={} colab_type="code" id="rvRdZIuoETkU"
HEADER = ['Elevation', 'Aspect', 'Slope','Horizontal_Distance_To_Hydrology',
'Vertical_Distance_To_Hydrology', 'Horizontal_Distance_To_Roadways',
'Hillshade_9am', 'Hillshade_Noon', 'Hillshade_3pm',
'Horizontal_Distance_To_Fire_Points', 'Wilderness_Area', 'Soil_Type',
'Cover_Type']
TARGET_FEATURE_NAME = 'Cover_Type'
FEATURE_LABELS = ['0', '1', '2', '3', '4', '5', '6']
NUMERIC_FEATURE_NAMES = ['Aspect', 'Elevation', 'Hillshade_3pm',
'Hillshade_9am', 'Hillshade_Noon',
'Horizontal_Distance_To_Fire_Points',
'Horizontal_Distance_To_Hydrology',
'Horizontal_Distance_To_Roadways','Slope',
'Vertical_Distance_To_Hydrology']
CATEGORICAL_FEATURE_NAMES = ['Soil_Type', 'Wilderness_Area']
FEATURE_NAMES = CATEGORICAL_FEATURE_NAMES + NUMERIC_FEATURE_NAMES
HEADER_DEFAULTS = [[0] if feature_name in NUMERIC_FEATURE_NAMES + [TARGET_FEATURE_NAME] else ['NA']
for feature_name in HEADER]
NUM_CLASSES = len(FEATURE_LABELS)
# + [markdown] colab_type="text" id="K0tufr6ROKbC"
# #### create a container for the serving data
# + colab={} colab_type="code" id="wPb-CbwfOKjY"
serving_data = {
'2020-04-01': None,
'2020-04-02': None,
'2020-04-03': None,
'2020-04-04': None,
'2020-04-05': None,
'2020-04-06': None,
}
# + [markdown] colab_type="text" id="gejOSnS_E_lo"
# ## 3. Sampling Unskewed Data
#
# * Sample data for *three* consecutive dates: **01-04-20202**, **02-04-2020**, and **03-04-2020**
# * Each day has 1000 examples
# * No altering is applied
#
#
# + colab={} colab_type="code" id="kNMF9rkpE_tz"
data_normal = data.sample(3000)
# + colab={} colab_type="code" id="WYlqANUdGw0I"
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(20, 5))
data_normal['Aspect'].plot.hist(bins=15, ax=axes[0], title='Aspect')
data_normal['Wilderness_Area'].value_counts(normalize=True).plot.bar(ax=axes[1], title='Wilderness Area')
data_normal[TARGET_FEATURE_NAME].value_counts(normalize=True).plot.bar(ax=axes[2], title=TARGET_FEATURE_NAME)
# + colab={} colab_type="code" id="ateUWw4dI9X2"
normal_days_data_list =[
data_normal[:1000],
data_normal[1000:2000],
data_normal[2000:]
]
# + colab={} colab_type="code" id="z82nsYFRL08q"
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(20, 5))
for i, day_data in enumerate(normal_days_data_list):
data_normal['Aspect'].plot.hist(bins=15, ax=axes[i])
# + colab={} colab_type="code" id="rQDj-0UBL2_g"
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(20, 5))
for i, day_data in enumerate(normal_days_data_list):
data_normal['Wilderness_Area'].value_counts(normalize=True).plot.bar(ax=axes[i])
# + colab={} colab_type="code" id="1zxjU-THQtLP"
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(20, 5))
for i, day_data in enumerate(normal_days_data_list):
data_normal[TARGET_FEATURE_NAME].value_counts(normalize=True).plot.bar(ax=axes[i])
# + colab={} colab_type="code" id="TblBzO47QDrT"
serving_data['2020-04-01'] = normal_days_data_list[0]
serving_data['2020-04-02'] = normal_days_data_list[1]
serving_data['2020-04-03'] = normal_days_data_list[2]
# + [markdown] colab_type="text" id="Lrp50MOhFK2y"
# ## 4. Preparing Skewed Data
# We are going to introduce the following skews to the data:
# 1. **Numerical Features**
# 1. *Elevation - Feature Skew*: Convert the unit of measure from meters to kilometers in 2020-04-05
# 2. *Aspect - Distribution Skew*: gradual decrease of the value
#
# 2. **Categorical Features**
# 1. *Wilderness_Area - Feature Skew*: Adding a new category "Others" in 2020-04-05
# 2. *Wilderness_Area - Distribution Skew*: Gradual increase of of the frequency of "Cache" and "Neota" values
# 3. **Target Features**: check the change of the distribution of predictied class labels
# + colab={} colab_type="code" id="aabdFtM-FLCT"
data_to_skew = data.sample(3000)
serving_data['2020-04-04'] = data_to_skew[:1000]
serving_data['2020-04-05'] = data_to_skew[1000:2000]
serving_data['2020-04-06'] = data_to_skew[2000:]
# + [markdown] colab_type="text" id="_nalaB83U75N"
# ### 4.1 Skew numerical features
# + [markdown] colab_type="text" id="YdFqotRGWtXp"
# #### 4.1.1 Elevation Feature Skew
# + colab={} colab_type="code" id="ROUARI9MU3w4"
serving_data['2020-04-05']['Elevation'] = serving_data['2020-04-05']['Elevation'] / 1000
# + colab={} colab_type="code" id="LdiF7ONLVuM_"
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(20, 5))
serving_data['2020-04-01']['Elevation'].plot.hist(bins=15, ax=axes[0], title='Elevation Meters')
serving_data['2020-04-05']['Elevation'].plot.hist(bins=15, ax=axes[1], title='Elevation Kilometers')
# + [markdown] colab_type="text" id="POXFPHdtWzSw"
# #### 4.1.2 Aspect Distribution Skew
# + colab={} colab_type="code" id="euB8qRnzWzaP"
serving_data['2020-04-04']['Aspect'] = serving_data['2020-04-04']['Aspect'].apply(
lambda value: value * np.random.uniform(0.90, 1) if value > 250 else value
)
serving_data['2020-04-05']['Aspect'] = serving_data['2020-04-05']['Aspect'].apply(
lambda value: value * np.random.uniform(0.85, 1) if value > 250 else value
)
serving_data['2020-04-06']['Aspect'] = serving_data['2020-04-06']['Aspect'].apply(
lambda value: value * np.random.uniform(0.80, 1) if value > 250 else value
)
# + colab={} colab_type="code" id="1Qga7opvY5Ir"
fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(20, 10))
serving_data['2020-04-01']['Aspect'].plot.hist(bins=15, ax=axes[0, 0], title='Aspect - day 1')
serving_data['2020-04-02']['Aspect'].plot.hist(bins=15, ax=axes[0, 1], title='Aspect - day 2')
serving_data['2020-04-03']['Aspect'].plot.hist(bins=15, ax=axes[0, 2], title='Aspect - day 3')
serving_data['2020-04-04']['Aspect'].plot.hist(bins=15, ax=axes[1, 0], title='Aspect - day 4')
serving_data['2020-04-05']['Aspect'].plot.hist(bins=15, ax=axes[1, 1], title='Aspect - day 5')
serving_data['2020-04-06']['Aspect'].plot.hist(bins=15, ax=axes[1, 2], title='Aspect - day 6')
# + [markdown] colab_type="text" id="RDwWYPTnfTZ1"
# ### 4.2 Skew categorical features
# + [markdown] colab_type="text" id="MuGay843fcQY"
# #### 4.2.1 Wilderness Area Feature Skew
# Adding a new category "Others"
#
# + colab={} colab_type="code" id="sIXorDZff4yS"
serving_data['2020-04-05']['Wilderness_Area'] = serving_data['2020-04-05']['Wilderness_Area'].apply(
lambda value: 'Others' if np.random.uniform() <= 0.1 else value
)
# + colab={} colab_type="code" id="A725svb-gRaR"
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(20, 5))
serving_data['2020-04-04']['Wilderness_Area'].value_counts(normalize=True).plot.bar(ax=axes[0], title='Wilderness Area - Normal')
serving_data['2020-04-05']['Wilderness_Area'].value_counts(normalize=True).plot.bar(ax=axes[1], title='Wilderness Area - New Category')
serving_data['2020-04-06']['Wilderness_Area'].value_counts(normalize=True).plot.bar(ax=axes[2], title='Wilderness Area - Normal')
# + [markdown] colab_type="text" id="zz8hhWluhSp9"
# #### 4.2.2 Wilderness Area Distribution Skew
# + colab={} colab_type="code" id="aISRDXvrhSzR"
serving_data['2020-04-04']['Wilderness_Area'] = serving_data['2020-04-04']['Wilderness_Area'].apply(
lambda value: 'Neota' if value in ['Rawah', 'Commanche'] and np.random.uniform() <= 0.1 else value
)
serving_data['2020-04-05']['Wilderness_Area'] = serving_data['2020-04-05']['Wilderness_Area'].apply(
lambda value: 'Neota' if value in ['Rawah', 'Commanche'] and np.random.uniform() <= 0.15 else value
)
serving_data['2020-04-06']['Wilderness_Area'] = serving_data['2020-04-06']['Wilderness_Area'].apply(
lambda value: 'Neota' if value in ['Rawah', 'Commanche'] and np.random.uniform() <= 0.2 else value
)
# + colab={} colab_type="code" id="VjnFXyERi941"
fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(20, 10))
serving_data['2020-04-01']['Wilderness_Area'].value_counts(normalize=True).plot.bar(ax=axes[0, 0], title='Wilderness Area - day 1')
serving_data['2020-04-02']['Wilderness_Area'].value_counts(normalize=True).plot.bar(ax=axes[0, 1], title='Wilderness Area - day 2')
serving_data['2020-04-03']['Wilderness_Area'].value_counts(normalize=True).plot.bar(ax=axes[0, 2], title='Wilderness Area - day 3')
serving_data['2020-04-04']['Wilderness_Area'].value_counts(normalize=True).plot.bar(ax=axes[1, 0], title='Wilderness Area - day 4')
serving_data['2020-04-05']['Wilderness_Area'].value_counts(normalize=True).plot.bar(ax=axes[1, 1], title='Wilderness Area - day 5')
serving_data['2020-04-06']['Wilderness_Area'].value_counts(normalize=True).plot.bar(ax=axes[1, 2], title='Wilderness Area - day 6')
# + [markdown] colab_type="text" id="Y0kQHXkdFGZJ"
# ## 5. Simulating serving workload
# + [markdown] colab_type="text" id="2TCilipgUKKf"
# ### 5.1 Implement the model API client
# + colab={} colab_type="code" id="yEyoTL5wFlu9"
import googleapiclient.discovery
import numpy as np
service = googleapiclient.discovery.build('ml', 'v1')
name = 'projects/{}/models/{}/versions/{}'.format(PROJECT, MODEL_NAME, VERSION_NAME)
print("Service name: {}".format(name))
def caip_predict(instances):
serving_instances = []
for instance in instances:
serving_instances.append(
{key: [value] for key, value in instance.items()})
request_body={
'signature_name': SIGNATURE_NAME,
'instances': serving_instances}
response = service.projects().predict(
name=name,
body=request_body
).execute()
if 'error' in response:
raise RuntimeError(response['error'])
probability_list = [output[MODEL_OUTPUT_KEY] for output in response['predictions']]
classes = [FEATURE_LABELS[int(np.argmax(probabilities))] for probabilities in probability_list]
return classes
# + [markdown] colab_type="text" id="quF2MGwjUWk9"
# ### 5.2 Prepare the request instances
# + colab={} colab_type="code" id="gc9l_w-aQg6M"
def prepare_instances(serving_data):
instances = []
for request_timestamp, data in serving_data.items():
for _, row in data.iterrows():
instance = dict()
for column_name in data.columns:
if column_name == TARGET_FEATURE_NAME: continue
instance[column_name] = str(row[column_name]) if column_name in CATEGORICAL_FEATURE_NAMES else row[column_name]
# instance['request_timestamp'] = request_timestamp
instances.append(instance)
return instances
# + colab={} colab_type="code" id="4t0vQlldRkDM"
import time
def simulate_requests():
instances = prepare_instances(serving_data)
print("Simulation started...")
print("--------------------")
print("Number of instances: {}".format(len(instances)))
for i, instance in enumerate(instances):
predicted_class = caip_predict([instance])
print(".", end='')
if (i + 1) % 100 == 0:
print()
print("Sent {} requests.".format(i + 1))
#time.sleep(0.1)
print("")
print("--------------------")
print("Simulation finised.")
# + colab={} colab_type="code" id="8w_CtO5xTwjS"
simulate_requests()
# + colab={} colab_type="code" id="SaH2HYFskmu4"
| utilities/snippets/tf_covertype_skewed_data_generation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: learn-env
# language: python
# name: learn-env
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# - Box Office Mojo: bo_mojo
# - Rotten Tomatoes: rt_movies, rt_reviews
# - TheMovieDB.org: tmdb_movies
# - the-numbers.com: tn_budgets
#
bo_mojo = pd.read_csv('unzippedData/bom.movie_gross.csv')
bo_mojo.info()
bo_mojo.head()
theMovieDB = pd.read_csv('unzippedData/tmdb.movies.csv')
theMovieDB.info()
theMovieDB[theMovieDB['original_title']=='Toy Story']
the_numbers = pd.read_csv('unzippedData/tn.movie_budgets.csv')
the_numbers.info()
rt_reviews = pd.read_csv('unzippedData/rt.reviews.tsv', sep='\t', index_col=False, encoding = 'unicode_escape')
rt_reviews.head()
rt_reviews['id'].value_counts()
rt_movies= pd.read_csv('unzippedData/rt.movie_info.tsv', sep='\t', index_col=False, encoding = 'unicode_escape')
rt_movies.info()
# rt_movies[rt_movies['id']==3]
rt_reviews = pd.read_csv('unzippedData/rt.reviews.tsv', sep='\t', index_col=False, encoding = 'unicode_escape')
rt_reviews.info()
#rt_reviews[rt_reviews['id']==3 & rt_reviews['fresh']=='fresh']
#reddit_data=pd.read_csv('reddit_rt_data.csv', sep='\t', encoding = 'unicode_escape')
reddit_data=pd.read_excel('reddit_rt_data2.xlsx')
reddit_data.head()
reddit_data.info()
the_numbers.info()
y=the_numbers['release_date'][0]
print(y)
pd.to_datetime(y)
# +
#convert release date to date format
#convert dollar figures to int format
the_numbers['release_date']=pd.to_datetime(the_numbers['release_date'])
the_numbers['production_budget'] = the_numbers['production_budget'].str.replace(',', '').str.replace('$', '').astype(int)
the_numbers['domestic_gross'] = the_numbers['domestic_gross'].str.replace(',', '').str.replace('$', '').astype(int)
the_numbers['worldwide_gross'] = the_numbers['worldwide_gross'].str.replace(',', '').str.replace('$', '').astype(int)
# -
the_numbers['worldwide_gross'][0]
# +
x1=the_numbers['production_budget']
y1=the_numbers['worldwide_gross']
plt.figure(figsize=(15,10))
ax1 = sns.scatterplot(x=x1, y=y1)
plt.title('Scatterplot of Profitability')
plt.xlabel('Budget')
plt.ylabel('Worldwide Earnings')
plt.show()
# -
x1=the_numbers['production_budget']
y1=the_numbers['domestic_gross']
plt.figure(figsize=(15,10))
ax1 = sns.scatterplot(x=x1, y=y1)
plt.title('Scatterplot of Profitability')
plt.xlabel('Budget')
plt.ylabel('Domestic Earnings')
plt.show()
#add column for worldwide profitability
the_numbers['Worldwide Profitability'] = the_numbers['worldwide_gross']/the_numbers['production_budget']
the_numbers.head()
#add column for domestic profitability
the_numbers['Domestic Profitability'] = the_numbers['domestic_gross']/the_numbers['production_budget']
the_numbers.head()
plt.figure(figsize=(15,10))
ax1 = sns.scatterplot(the_numbers['Domestic Profitability'], the_numbers['Worldwide Profitability'])
plt.title('Worldwide vs Domestic')
plt.xlabel('Domestic Profitability')
plt.ylabel('Worldwide Profitability')
plt.show()
# +
#the_numbers[the_numbers['Domestic Profitability']>180].sort_values(by='Domestic Profitability', ascending=False)
# +
#the_numbers[(the_numbers['Worldwide Profitability']>180) & (the_numbers['production_budget']>100000)].sort_values(by='Worldwide Profitability', ascending=False)
# -
#add column for year
the_numbers['release_year']=the_numbers['release_date'].dt.year
the_numbers.head()
# +
imdb_title_basics=pd.read_csv('unzippedData/imdb.title.basics.csv') #movie title, year, runtime, genre
# imdb_name_basics=pd.read_csv('unzippedData/imdb.name.basics.csv') #names of people
# imdb_title_crew=pd.read_csv('unzippedData/imdb.title.crew.csv') #directors, writers, by code
# imdb_title_principals=pd.read_csv('unzippedData/imdb.title.principals.csv') #more personnel
# imdb_title_akas=pd.read_csv('unzippedData/imdb.title.akas.csv') #global region, language, attributes
# imdb_title_ratings=pd.read_csv('unzippedData/imdb.title.ratings.csv') #average rating, number of votes
imdb_title_basics.head()
# -
#change column name to align with the-numbers data
imdb_title_basics.rename(columns={'primary_title':'movie'}, inplace=True)
imdb_title_basics.head()
imdb_title_basics.head()
imdb_title_basics.set_index('movie', inplace=True, drop=False)
#the_numbers.set_index('movie', inplace=True)
joined_df = the_numbers.join(imdb_title_basics, how='left')
joined_df.info()
imdb_title_ratings=pd.read_csv('unzippedData/imdb.title.ratings.csv') #average rating, number of votes
imdb_title_ratings.head()
imdb_title_basics.set_index('tconst', inplace=True)
imdb_title_ratings.set_index('tconst', inplace=True)
# +
imdb_combined = imdb_title_ratings.join(imdb_title_basics, how='left')
imdb_title_basics.head()
# -
imdb_title_basics['movie']=imdb_title_basics.index
imdb_title_basics.head()
imdb_title_basics.set_index('tconst', inplace=True)
imdb_title_basics.head()
imdb_title_ratings.head()
imdb_combined = imdb_title_ratings.join(imdb_title_basics, how='left')
imdb_combined.info() #now have ratings and movie names in same Dataframe
#can now add earnings/budget information from the-numbers
#imdb_combined.set_index('movie', inplace=True, drop=False)
imdb_combined.info() #~73000 entries
#the_numbers.info() #5782 entries
the_numbers_imdb_combined = the_numbers.join(imdb_combined, how='left')
the_numbers_imdb_combined2=the_numbers_imdb_combined[the_numbers_imdb_combined['averagerating'].isna()==False]
the_numbers_imdb_combined2.info()
the_numbers_imdb_combined2.head()
# +
#y=the_numbers_imdb_combined2.loc['12 Rounds']['genres'].split(',')
the_numbers_imdb_combined2['Genre 1']=the_numbers_imdb_combined2['genres'].str.split(',', expand=True)[0]
the_numbers_imdb_combined2['Genre 2']=the_numbers_imdb_combined2['genres'].str.split(',', expand=True)[1]
the_numbers_imdb_combined2['Genre 3']=the_numbers_imdb_combined2['genres'].str.split(',', expand=True)[2]
the_numbers_imdb_combined2.head()
# -
genres_1 = list(the_numbers_imdb_combined2['Genre 1'].unique())
genres_2 = list(the_numbers_imdb_combined2['Genre 2'].unique())
genres_3 = list(the_numbers_imdb_combined2['Genre 3'].unique())
genre_set = set(genres_1 + genres_2 + genres_3)
genre_set
main_df = the_numbers_imdb_combined2
main_df['genres']=main_df['genres'].str.split(',')
main_df.head()
main_df.drop(['domestic_gross', 'Domestic Profitability', 'original_title', 'start_year', 'release_year', 'movie','Genre 1','Genre 2','Genre 3'], axis=1, inplace=True)
main_df.head()
main_df.drop(['numvotes'], inplace=True, axis=1)
main_df.head()
bo_mojo.head()
bo_mojo = bo_mojo.rename(columns={'title':'movie'})
bo_mojo = bo_mojo.set_index('movie')
bo_mojo.head()
bo_mojo['movie']=bo_mojo.index
bo_mojo.head()
# +
movie_stripped =[]
for x in bo_mojo.movie:
if '(' in x:
movie_stripped.append(x[:(x.find('(')-1)])
else:
movie_stripped.append(x)
bo_mojo['movie']=movie_stripped
bo_mojo.head()
# -
bo_mojo.set_index('movie', inplace=True)
bo_mojo.head()
bo_mojo.info()
main_df2=main_df.join(bo_mojo, how='left')
main_df2.head()
main_df2.drop(['domestic_gross','domestic_gross','year'], axis=1, inplace=True)
main_df2.head()
#main_df2.drop(['foreign_gross'], axis=1, inplace=True)
main_df2.info()
main_df2.head()
# +
#need to cut:
# old movies - 666 are from 2009 and before
# cheap movies - 249 have budgets < $1,000,000
# no financial information - 275 have $0 worldwide gross. of those, domestic gross is also $0
# there are 96 studios
#main_df2[main_df2['release_date'].dt.year<=2009]
main_df2[main_df2['worldwide_gross']==0]
# -
main_df2.info()
df_exper = the_numbers_imdb_combined[(the_numbers_imdb_combined['domestic_gross']!=0) & (the_numbers_imdb_combined['worldwide_gross']==0)]
df_exper
main_df3=main_df.join(bo_mojo, how='left')
main_df3.head()
#main_df3=main_df3[main_df3['worldwide_gross']==0]
main_df3[main_df3.index=='Home']
| .ipynb_checkpoints/Irv_EDA-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# +
import pandas as pd
data = pd.read_csv('data_session', header = None, delimiter = " ")
data.head()
# +
t0 = data[0][0]
day = 86400
numberOfDaysForTrain = 31
t1 = t0 + int(day * numberOfDaysForTrain)
numberOfDaysForTest = 14
t2 = t1 + day*numberOfDaysForTest
numTotalUsers = len(data[1].unique())
# -
data = data[data[0] <= t2]
data_train = data[data[0] <= t1]
data_test = data[data[0] > t1]
print(len(data_train[1]))
print(len(data_train[1].unique()))
print(len(data_train[2].unique()))
print(data_train[2].max())
print(float(len(data_train[2].unique())) / data_train[2].max())
stats_over_test_days = data_test.groupby(1).size()
numTestUsers = len(data_test[1].unique())
print(stats_over_test_days.min())
print(stats_over_test_days.max())
print(stats_over_test_days.mean())
print(stats_over_test_days.quantile(0.5))
print(stats_over_test_days.quantile(0.75))
print(stats_over_test_days.quantile(0.85))
print("#users in train: ")
print(len(data_train[1].unique()))
print("#users in test: ")
print(len(data_test[1].unique()))
print("#users in both: ")
print(len(set(data_test[1].unique()).intersection(set(data_train[1].unique()))))
import numpy as np
# +
dic = {}
interval = np.arange(0.0, 1.0, 0.01)
for i in interval:
dic[i] = stats_over_test_days.quantile(i)
# -
# %matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(font="Droid Sans",font_scale = 2)
sns.set_style("whitegrid")
sns.set_color_codes("dark")
# +
import matplotlib.pylab as plt
lists = sorted(dic.items()) # sorted by key, return a list of tuples
x, y = zip(*lists) # unpack a list of pairs into two tuples
plt.ylabel("#artists listened to")
plt.xlabel("% of users ")
plt.plot(x, y)
plt.show()
# -
data_train = pd.concat([data_train[0], data_train[1], data_train[2]], axis = 1)
data_train.to_csv('data_session_train', sep = ' ', header = False, index = False)
data_test = pd.concat([data_test[1], data_test[2]], axis = 1)
data_test = data_test.sort_values(1)
data_test.to_csv('data_session_test', sep = ' ', header = False, index = False)
| Notebooks/Data_Manipulation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
o=np.array([-2,3.45,8,5,6])
o
o+100
o-100
o*100
o**100
o.T
m=np.array([[1,2,9/10],[2,3,-2.56]])
m
np.transpose(m)
m.T
np.trace(m)
np.trace(m.T)
np.diag(m)
o%2==0
o%3==0
m%2==0
m%3==0
import numpy as np
o=np.array([-2,3.45,8,5,6])
o
o%2==0
np.where(o%2==0,o*100,o)
o.min()
o.max()
o.mean()
o.sort()
o
o.sum()
np.mean(o)
np.min(o)
np.max(o)
np.median(o)
m=np.array([[1,2,9/10],[2,3,-2.56]])
m
n=np.array([[1,-2,9/10],[2,3.65,-2.56]])
n
np.append(m,n,axis=0)
np.append(m,n,axis=-1)
np.append(m,n,axis=1)
import numpy as np
a=np.random.random((-3,8))
a
a=np.random.random((3,8))
a
b=np.hsplit(a,8)
b
np.roll(a,0)
a.astype(float)
L=[2,3,4,8,-9]
[str(i) for i in L]
list(map(str,L))
# +
print("imesh: Hello I am imesh. What is your Name?")
name=input("Your Name :")
print(f"imesh: Hi(name)...so what do you want to know about our company?")
ans=input("Your answer :")
if all(i in ans.lower().split(" ") for i in ["company","name"]):
print("imesh: our company is Leeza(PVT)Ltd")
# +
print("imesh: Hello I am imesh. What is your Name?")
name=input("Your Name :")
print(f"imesh: Hi(name)...so what do you want to know about our company?")
ans=input("Your answer :")
flag=True
while flag:
if all(i in ans.lower().split(" ") for i in ["company","name"]):
print("imesh: our company is Leeza(PVT)Ltd")
print("imesh: Do you want to know anything else?")
check=input("Your answer:")
if any(i in check.lower().split(" ") for i in ["yes","yeah","exactly","sure"]):
print("imesh: okay, what do you want to know?")
ans=input("Your answer:")
flag=True
else:
print(f"imesh: okay(name). Nice to chat with you. Have a nice day")
flag=False
elif all(i in ans.lower().split(" ") for i in ["ceo"]):
print("imesh: our CEO is Mr.<NAME>")
elif any(i in ans.lower().split(" ") for i in ["address","location","place","located","where"]):
print("imesh: our address is 175 Kaduwela Road, Malabe")
elif any(i in ans.lower().split(" ") for i in ["Contact","Telephone","email","telephone number"]):
print("imesh: Contact us. Our email:<EMAIL>, Our telephone number:0112874646")
print("imesh: Do you want to know anything else?")
check=input("Your answer:")
if any(i in check.lower().split(" ") for i in ["yes","yeah","exactly","sure"]):
print("imesh: okay, what do you want to know?")
ans=input("Your answer:")
flag=True
else:
print(f"imesh: okay(name). Nice to chat with you. Have a nice day")
flag=False
else:
print("imesh: Sorry[name], I can't understand your question")
print("imesh: Do you want to know anything else?")
check=input("Your answer:")
if any(i in check.lower().split(" ") for i in ["yes","yeah","exactly","sure"]):
print("imesh: okay, what do you want to know?")
ans=input("Your answer:")
flag=True
else:
print(f"imesh: okay(name). Nice to chat with you. Have a nice day")
flag=False
| Online Python Webinar Workshop Session Day 13.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Data Collection (2019-2.0)
# language: ''
# name: collection-2019-2.0
# ---
import os
os.__file__
import numpy as np
# For energies > 15 keV scan range 6.5 -0.1 to +0.15 mm
#
# Note: Avoid 6.500 gap setting to avoid Bluesky crash.
# If 6.500 mm chosen, the lower scan value gets set to just < 6.4 mm, with the lowest allowed IVU gap = 6.400 mm
# For some reason this doesn't happen for smaller gaps, e.g. 13500 keV and gap = 6.472 mm?
#
# MF 2018-04-02 Poor man's backup
# [ 5000, 6000, 6539, 7110, 7200, 7500, 7600, 8052, 8331, 8979, 9660, 10000, 10400, 10500, 10871, 11564, 11919, 12284, 12660, 13400, 13500, 14200, 14799, 14800, 15000, 16000, 18000, 20000, 21000, 22000, 25000],
# [ 6.972, 7.940, 8.466, 9.046, 9.138, 9.457, 6.533, 6.805, 6.969, 7.347, 7.741, 7.938, 8.169, 6.472, 6.633, 6.927, 7.075, 7.226, 7.377, 6.437, 7.062, 7.320, 7.530, 6.430, 6.484, 6.790, 6.522, 6.980, 6.480, 6.690, 6.660])
# [ 5000, 6000, 6539, 7110, 7200, 7500, 7600, 8052, 8331, 8979, 9660, 10000, 10400, 10500, 10871, 11564, 11849, 11850, 13500, 14200, 14799, 14800, 15000, 16000, 18000, 20000, 21000, 22000, 25000],
# [ 6.972, 7.940, 8.466, 9.046, 9.138, 9.457, 6.533, 6.805, 6.969, 7.347, 7.741, 7.938, 8.169, 6.472, 6.633, 6.927, 7.100, 6.430, 7.062, 7.320, 7.530, 6.430, 6.484, 6.790, 6.522, 6.980, 6.480, 6.690, 6.660])
#
# MF 2019-06-30 Very poor man's backup: Copied over Wuxian's high E values
# [ 5000, 6000, 6539, 7110, 7200, 7500, 7600, 8052, 8331, 8979, 9660, 10000, 10400, 10500, 10871, 11564, 11919, 12284, 12660, 13349, 13350, 13500, 14200, 14799, 14800, 15000, 16000, 18000, 20000, 21000, 22000, 25000, 30000],
# np.array([ 6.972, 7.940, 8.466, 9.046, 9.138, 9.457, 6.533, 6.805, 6.969, 7.347, 7.741, 7.938, 8.169, 6.472, 6.633, 6.927, 7.075, 7.226, 7.377, 7.675, 6.430, 7.062, 7.320, 7.530, 6.430, 6.484, 6.790, 6.522, 6.980, 6.480, 6.690, 6.660, 6.720]) * 1000)
#
#
print(read_lut('ivu_gap'))
write_lut('ivu_gap',
[ 5000, 6000, 6539, 7110, 7200, 7500, 7600, 8052, 8331, 8979, 9660, 10000, 10400, 10500, 10871, 11564, 11919, 12284, 12660, 13349, 13350, 13500, 13800, 14100, 14400, 14700, 14799, 14800, 15100, 15400, 15700, 16000, 16300, 16599, 16600, 16900, 17200, 17500, 17799, 17800, 18100, 18400, 18700, 19000, 19300, 19600, 19900, 20200, 20500, 20799, 20800, 21100, 21400, 21700, 22000, 22300, 22600, 22900, 23200, 23500, 23799, 23800, 24100, 24400, 24700, 25000, 25300, 25600, 26000, 26500, 26999, 27000, 27500, 28000, 28500, 29000, 29500, 30000],
np.array([ 6.972, 7.940, 8.466, 9.046, 9.138, 9.457, 6.533, 6.805, 6.969, 7.347, 7.741, 7.938, 8.169, 6.472, 6.633, 6.927, 7.075, 7.226, 7.377, 7.675, 6.420, 6.470, 6.571, 6.672, 6.772, 6.871, 6.901, 6.425, 6.517, 6.610, 6.697, 6.787, 6.878, 6.968, 6.492, 6.575, 6.659, 6.739, 6.819, 6.426, 6.500, 6.578, 6.652, 6.731, 6.805, 6.878, 6.952, 7.023, 7.099, 7.174, 6.430, 6.496, 6.559, 6.627, 6.690, 6.752, 6.815, 6.882, 6.943, 7.007, 7.070, 6.431, 6.487, 6.548, 6.603, 6.660, 6.719, 6.770, 6.849, 6.935, 7.030, 6.475, 6.552, 6.635, 6.715, 6.803, 6.885, 6.965]) * 1000)
print(read_lut('ivu_gap'))
import numpy as np
print(read_lut('ivu_gap_off'))
write_lut('ivu_gap_off', [ 5000, 8052, 8331, 11919, 12284, 13500, 15000],
np.array([-0.009, -0.009, -0.006, -0.006, -0.003, -0.002, -0.001]) * 1000)
print(read_lut('ivu_gap_off'))
# HDCM T2 gap from FDR documentation
# Horizontal offset d = 30 mm
# T2 gap = d / cos (Bragg angle)
print(read_lut('hdcm_g'))
write_lut('hdcm_g',
[ 5000, 6000, 6539, 7110, 7200, 7500, 7600, 8052, 8331, 8979, 9660, 10000, 10400, 10500, 10871, 11564, 11919, 12284, 12660, 13400, 13474, 13500, 15000, 18000, 20000, 22500, 25000, 27500, 30000],
[ 16.331, 15.887, 15.737, 15.616, 15.600, 15.550, 15.535, 15.474, 15.430, 15.378, 15.324, 15.302, 15.279, 15.273, 15.254, 15.224, 15.211, 15.198, 15.186, 15.166, 15.164, 15.163, 15.132, 15.091, 15.074, 15.058, 15.047, 15.039, 15.033])
print(read_lut('hdcm_g'))
print(read_lut('hdcm_r'))
write_lut('hdcm_r', [5000, 6539, 7000, 8331, 11564, 12660, 13500, 30000],
[2.651, 2.663, 2.673, 2.677, 2.685, 2.685, 2.72, 2.75])
print(read_lut('hdcm_r'))
print(read_lut('hdcm_p'))
write_lut('hdcm_p', [5000, 6550, 9000, 12700, 13500, 15000, 20000, 25000, 30000],
[0.12, 0.11, 0.10, 0.11, 0.12, 0.13, 0.18, 0.205, 0.205])
print(read_lut('hdcm_p'))
print(read_lut('hfm_y'))
write_lut('hfm_y', [5000, 9999, 10000, 13500],
[ 0, 0, -8, -8])
print(read_lut('hfm_y'))
print(read_lut('hfm_x'))
write_lut('hfm_x', [5000, 13500],
[ 1.3, 1.3])
print(read_lut('hfm_x'))
print(read_lut('hfm_pitch'))
write_lut('hfm_pitch', [ 5000, 9999, 10000, 13500],
[-2.554, -2.554, -2.527, -2.527])
print(read_lut('hfm_pitch'))
# +
# Obsolete: Changed to energy dependent LUT waveform
#print(read_lgp('kbm_vx'))
#write_lgp('kbm_vx', 4500)
#print(read_lgp('kbm_vx'))
# -
print(read_lut('kbm_vx'))
write_lut('kbm_vx', [ 5000, 30000],
[4500.0, 4500.0])
print(read_lut('kbm_vx'))
print(read_lgp('kbm_vy'))
write_lgp('kbm_vy', -488)
print(read_lgp('kbm_vy'))
print(read_lgp('kbm_vp'))
write_lgp('kbm_vp', -2539)
print(read_lgp('kbm_vp'))
print(read_lgp('kbm_hx'))
write_lgp('kbm_hx', 500)
print(read_lgp('kbm_hx'))
# +
# Obsolete: Changed to energy dependent LUT waveform
#print(read_lgp('kbm_hy'))
#write_lgp('kbm_hy', 7000)
#print(read_lgp('kbm_hy'))
# -
print(read_lut('kbm_hy'))
write_lut('kbm_hy', [ 5000, 9999, 10000, 30000],
[0.0, 0.0, 7000.0, 7000.0])
print(read_lut('kbm_hy'))
print(read_lgp('kbm_hp'))
write_lgp('kbm_hp', -2385)
print(read_lgp('kbm_hp'))
# Attenuation for auto beam alignment, to avoid saturated scintillator image
# Set to value that will work with cam_8 centroid
print(read_lut('atten'))
write_lut('atten', [5000, 6550, 9000, 12700, 13500, 15000, 20000, 25000, 30000],
[0.06, 0.03, 0.01, 0.03, 0.05, 0.06, 0.1, 0.15, 0.2])
print(read_lut('atten'))
# Calibration of LoMag camera in an EPICS PV [um/px]
# There is also the Beamline PVs CSS screen for this PV
print( cameraCalGet('LoMag') )
cameraCalSet('LoMag', 1.079)
# Calibration of HiMag camera in an EPICS PV [um/px]
# There is also the Beamline PVs CSS screen for this PV
print( cameraCalGet('HiMag') )
cameraCalSet('HiMag', 0.272)
| set_energy setup FMX-20200428.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.5.0
# language: julia
# name: julia-1.5
# ---
using Revise, OrdinaryDiffEq, Flux, DiffEqFlux, Optim
using Plots, FileIO, JLD2, OffsetArrays, ProgressMeter, Kinetic
case = "shock"
space = "1d2f1v"
nSpecies = 1
interpOrder = 1
limiter = "vanleer"
cfl = 0.7
maxTime = 250.0
x0 = -25.0
x1 = 25.0
nx = 80
pMeshType = "uniform"
nxg = 1
umin = -10.0
umax = 10.0
nu = 48
nug = 0
vmin = -10.0
vmax = 10.0
nv = 28
nvg = 0
wmin = -10.0
wmax = 10.0
nw = 28
nwg = 0
vMeshType = "rectangle"
knudsen = 1.0
mach = 2.0
prandtl = 0.6666667
inK = 2
omega = 0.5
alphaRef = 1.0
omegaRef = 0.5
nm = 5
tLen = 3
nh = 12
# +
γ = heat_capacity_ratio(inK, 1)
set = Setup(case, space, nSpecies, interpOrder, limiter, cfl, maxTime)
pSpace = PSpace1D(x0, x1, nx, pMeshType, nxg)
μᵣ = ref_vhs_vis(knudsen, alphaRef, omegaRef)
gas = GasProperty(knudsen, mach, prandtl, inK, γ, omega, alphaRef, omegaRef, μᵣ)
vSpace = VSpace1D(umin, umax, nu, vMeshType)
vSpace2D = VSpace2D(vmin, vmax, nv, wmin, wmax, nw, vMeshType)
vSpace3D = VSpace3D(umin, umax, nu, vmin, vmax, nv, wmin, wmax, nw, vMeshType)
wL, primL, hL, bL, bcL, wR, primR, hR, bR, bcR = ib_rh(mach, γ, vSpace.u, inK)
ib = IB2F(wL, primL, hL, bL, bcL, wR, primR, hR, bR, bcR)
ks = SolverSet(set, pSpace, vSpace, gas, ib, pwd())
kn_bzm = hs_boltz_kn(ks.gas.μᵣ, 1.0)
sos = sound_speed(ks.ib.primR, γ)
tmax = (ks.vSpace.u1 + sos) / ks.pSpace.dx[1]
dt = Float32(ks.set.cfl / tmax)
tspan = (0.f0, dt)
tran = range(tspan[1], tspan[2], length = tLen)
# -
ctr = OffsetArray{ControlVolume1D2F}(undef, eachindex(ks.pSpace.x))
face = Array{Interface1D2F}(undef, ks.pSpace.nx + 1)
for i in eachindex(ctr)
if i <= ks.pSpace.nx ÷ 2
ctr[i] = ControlVolume1D2F(
ks.pSpace.x[i],
ks.pSpace.dx[i],
Float32.(ks.ib.wL),
Float32.(ks.ib.primL),
Float32.(ks.ib.hL),
Float32.(ks.ib.bL),
)
else
ctr[i] = ControlVolume1D2F(
ks.pSpace.x[i],
ks.pSpace.dx[i],
Float32.(ks.ib.wR),
Float32.(ks.ib.primR),
Float32.(ks.ib.hR),
Float32.(ks.ib.bR),
)
end
end
for i = 1:ks.pSpace.nx+1
face[i] = Interface1D2F(ks.ib.wL, ks.ib.hL)
end
sumRes = zeros(3)
@showprogress for iter = 1:2000
Kinetic.evolve!(ks, ctr, face, dt)
Kinetic.update!(ks, ctr, face, dt, sumRes)
end
X = Array{Float32}(undef, ks.vSpace.nu * 2, ks.pSpace.nx)
for i = 1:ks.pSpace.nx
X[1:nu, i] .= ctr[i].h
X[nu+1:end, i] .= ctr[i].b
end
# +
function shakhov!(df, f, p, t)
H, B, tau = p
df[1:end÷2, :] .= (H .- f[1:end÷2, :]) ./ tau
df[end÷2+1:end, :] .= (B .- f[end÷2+1:end, :]) ./ tau
end
H = Array{Float32}(undef, nu, size(X, 2))
B = Array{Float32}(undef, nu, size(X, 2))
SH = Array{Float32}(undef, nu, size(X, 2))
SB = Array{Float32}(undef, nu, size(X, 2))
τ = Array{Float32}(undef, 1, size(X, 2))
for i in axes(X, 2)
H[:, i] .= maxwellian(ks.vSpace.u, ctr[i].prim)
B[:, i] .= H[:, i] .* ks.gas.K ./ (2.0 .* ctr[i].prim[end])
q = heat_flux(ctr[i].h, ctr[i].b, ctr[i].prim, ks.vSpace.u, ks.vSpace.weights)
H1, B1 = shakhov(ks.vSpace.u, H[:,i], B[:,i], q, ctr[i].prim, ks.gas.Pr, ks.gas.K)
SH[:,i] .= H[:,i] .+ H1
SB[:,i] .= B[:,i] .+ B1
τ[1, i] = vhs_collision_time(ctr[i].prim, ks.gas.μᵣ, ks.gas.ω)
end
P = [SH, SB, τ]
M = vcat(H, B)
prob = ODEProblem(shakhov!, X, tspan, P)
Y = solve(prob, Euler(), dt=dt) |> Array;
# +
#--- universal differential equation ---#
model_univ = FastChain(
FastDense(nu * 2, nu * 2 * nh, tanh),
#FastDense(nu * 2 * nh, nu * 2 * nh, tanh),
FastDense(nu * 2 * nh, nu * 2),
)
p_model = initial_params(model_univ)
function dfdt(f, p, t)
h = f[1:nu, :]
b = f[nu+1:end, :]
dh = (H .- h) ./ τ .+ model_univ(f .- M, p)[1:nu, :]
db = (B .- b) ./ τ .+ model_univ(f .- M, p)[nu+1:end, :]
df = vcat(dh, db)
end
prob_ube = ODEProblem(dfdt, X, tspan, p_model)
function loss(p)
#sol_ube = solve(prob_ube, Midpoint(), u0=X, p=p, saveat=tran)
sol_ube = solve(prob_ube, Euler(), u0=X, p=p, dt=dt)
loss = sum(abs2, Array(sol_ube) .- Y)
return loss
end
cb = function (p, l)
#display(l)
return false
end
res = DiffEqFlux.sciml_train(loss, p_model, ADAM(), cb=Flux.throttle(cb, 1), maxiters=200)
# -
phi, psi, phipsi = kernel_mode(
nm,
vSpace3D.u1,
vSpace3D.v1,
vSpace3D.w1,
vSpace3D.du[1, 1, 1],
vSpace3D.dv[1, 1, 1],
vSpace3D.dw[1, 1, 1],
vSpace3D.nu,
vSpace3D.nv,
vSpace3D.nw,
ks.gas.αᵣ,
);
function bench_boltz()
f_full = full_distribution(
ctr[1].h[1:end],
ctr[1].b[1:end],
ks.vSpace.u[1:end],
ks.vSpace.weights[1:end],
vSpace3D.v[1:end,1:end,1:end],
vSpace3D.w[1:end,1:end,1:end],
ctr[1].prim,
ks.gas.γ,
)
df = boltzmann_fft(f_full[1:end,1:end,1:end], kn_bzm, nm, phi, psi, phipsi);
end
using BenchmarkTools
@benchmark bench_boltz()
| example/shock/efficiency_compare.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# default_exp __init__
# +
# hide
import os
notebooks_dir = os.getcwd()
project_dir = os.path.dirname(notebooks_dir)
import sys
sys.path.append(project_dir)
# -
# # Trading Specifications Fetcher
from ccstabilizer import MXCAPI
# +
# export
import os
import time
import yaml
class Fetcher(object):
LIQUIDITY_YAML = './crypto-liquidity'
TRADING_SPEC_YAML = './crypto-trading-spec'
def __init__(self, exchange):
self.mxc = MXCAPI(os.environ['MXC_ACCESS_KEY'], os.environ['MXC_SECRET_KEY'])
self.exchange = exchange
@staticmethod
def write(yaml_data, yaml_file_path):
with open(yaml_file_path, 'w') as f:
yaml.dump(yaml_data, f, sort_keys=False)
@staticmethod
def read(yaml_file_path):
if not os.path.exists(yaml_file_path):
return {}
with open(yaml_file_path) as f:
return yaml.load(f, Loader=yaml.FullLoader)
@staticmethod
def extract_symbols(liquidity):
symbol_and_liquid = liquidity.items()
symbols = {
symbol for symbol, _ in symbol_and_liquid
}
liquid_symbols = {
symbol for symbol, liquid in symbol_and_liquid if liquid == True
}
return symbols, liquid_symbols
def fetch_symbols(self):
liquidity = {
trading_spec['symbol']: trading_spec.get('limited', False) for trading_spec in self.mxc.get_symbols()
}
type(self).write(liquidity, f'{type(self).LIQUIDITY_YAML}-{type(self.exchange).__name__}.yaml')
return type(self).extract_symbols(liquidity)
def read_symbols(self):
liquidity = type(self).read(f'{type(self).LIQUIDITY_YAML}-{type(self.exchange).__name__}.yaml')
return type(self).extract_symbols(liquidity)
def get_trading_spec(self, crypto_symbol, fiat_symbol):
yaml_file_path = f'{type(self).TRADING_SPEC_YAML}-{type(self.exchange).__name__}.yaml'
trading_spec = self.exchange.get_trading_specification(crypto_symbol, fiat_symbol)
trading_specs = type(self).read(yaml_file_path)
trading_specs.update({
f'{crypto_symbol}-{fiat_symbol}': trading_spec
})
type(self).write(trading_specs, yaml_file_path)
return trading_spec
# -
from ccstabilizer import MXC
from ccstabilizer import secrets
exchange = MXC()
Fetcher(exchange).get_trading_spec('AR', 'USDT')
from ccstabilizer import Binance
from ccstabilizer import secrets
exchange = Binance()
Fetcher(exchange).get_trading_spec('CELO', 'USDT')
from ccstabilizer import MXC
from ccstabilizer import secrets
exchange = MXC()
print(type(exchange).__name__)
| notebooks/21_fetcher.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %load_ext autoreload
# %autoreload 2
# %env CUDA_DEVICE_ORDER=PCI_BUS_ID
# %env CUDA_VISIBLE_DEVICES=2
from cuml.manifold.umap import UMAP as cumlUMAP
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
from tqdm.autonotebook import tqdm
from joblib import Parallel, delayed
import umap
import pandas as pd
from avgn.utils.paths import DATA_DIR, most_recent_subdirectory, ensure_dir
from avgn.visualization.spectrogram import draw_spec_set
from avgn.utils.buckeye_utils import VOWEL_CONSONANT
from avgn.signalprocessing.create_spectrogram_dataset import flatten_spectrograms
# ### load data
DATASET_ID = 'buckeye'
df_loc = DATA_DIR / 'syllable_dfs' / DATASET_ID / 'phones_uniform_32.pickle'
df_loc
syllable_df = pd.read_pickle(df_loc)
syllable_df[:3]
# individuals
pd.DataFrame({i:[np.sum(syllable_df.indv == i)] for i in syllable_df.indv.unique()})
# most frequent_phones
freq_phones = (
pd.DataFrame(
{i: [np.sum(syllable_df.labels == i)] for i in syllable_df.labels.unique()}
)
.T.sort_values(by=0, ascending=False)[:45]
.T
)
freq_phones
# +
subset_df = syllable_df[
syllable_df.labels.isin(freq_phones.columns)
]
specs = flatten_spectrograms(list(subset_df.spectrogram.values))
# -
specs_flattened = flatten_spectrograms(specs)
np.shape(specs_flattened)
# #### cluster and plot
cuml_umap = cumlUMAP()
embedding = cuml_umap.fit_transform(specs_flattened)
np.shape(embedding)
z = list(embedding)
z = np.vstack(z)
| notebooks/08.0-human-phones-analyses/.ipynb_checkpoints/buckeye_project_all-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# 68-points
# 2017-12-28
# By TimeStamp
# #cnblogs: http://www.cnblogs.com/AdaminXie/
import dlib #人脸识别的库dlib
import numpy as np #数据处理的库numpy
import cv2 #图像处理的库OpenCv
# dlib预测器
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')
path="********************"
# cv2读取图像
img=cv2.imread(path+"test.jpg")
# 取灰度
img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# 人脸数rects
rects = detector(img_gray, 0)
for i in range(len(rects)):
landmarks = np.matrix([[p.x, p.y] for p in predictor(img, rects[i]).parts()])
for idx, point in enumerate(landmarks):
# 68点的坐标
pos = (point[0, 0], point[0, 1])
# 利用cv2.circle给每个特征点画一个圈,共68个
cv2.circle(img, pos, 5, color=(0, 255, 0))
# 利用cv2.putText输出1-68
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img, str(idx+1), pos, font, 0.8, (0, 0, 255), 1, cv2.LINE_AA)
cv2.namedWindow("img", 2)
cv2.imshow("img", img)
cv2.waitKey(0)
| face_struct/Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="1oiL9Z46nv9o"
# <center><h1>Convolutions</h1></center>
#
# <center><h2><a href="https://arthurdouillard.com/deepcourse/">Course link</a></h2></center>
#
# To keep your modifications in case you want to come back later to this colab, do *File -> Save a copy in Drive*.
#
# If you find a mistake, or know how to improve this notebook, please open an issue [here](https://github.com/arthurdouillard/deepcourse/issues).
#
# In this colab, we are going to learn convolutions by practicing different hard coded convolution kernels. In later colabs, we won't handcraft those kernels, but let neural networks find the optimal kernel values.
#
# First, let's download a famous mandril image to visualize results.
# + id="y9k12vTpnmtc"
# !wget https://arthurdouillard.com/deepcourse/img/mandril.jpg
# + id="mwB7dtlsnti8"
from PIL import Image
import torch
from torch import nn
from torch.nn import functional as F
import numpy as np
import matplotlib.pyplot as plt
def torch_to_jpg(tensor):
# Useful function to pass from tensor image to numpy image to Pillow image
if len(tensor.shape) == 3:
tensor = tensor.permute(1, 2, 0)
if tensor.shape[-1] == 1:
tensor = tensor[..., 0]
return Image.fromarray(tensor.numpy().astype(np.uint8))
def jpg_to_torch(jpg_image):
img = torch.tensor(np.array(jpg_image)).float()
return img.permute(2, 0, 1) # In torch the order is channel / height / width
# + id="7X-2nU3JpBrU"
jpg_img = Image.open("mandril.jpg")
img = jpg_to_torch(jpg_img)
jpg_img
# + [markdown] id="D2xiSXu_XMTM"
# Always in deep learning, when working on images, text, audio, internal representation, etc. look at the shape!
# + id="oU63GxjspDaQ"
img.shape
# + [markdown] id="6NKbDfldXUxY"
# A gray image can simply be made by averaging the channels, do it:
# + id="-wyRlOtRpT10"
gray_image = # TODO
print(gray_image.shape)
torch_to_jpg(gray_image)
# + id="HJ6tqHCqfn2c"
# Execute this cell to see the solution, but try to do it by yourself before!
# !wget https://raw.githubusercontent.com/arthurdouillard/deepcourse/master/static/code/cnn/gray.py
# %pycat gray.py
# + [markdown] id="p7SPSqnvXrxQ"
# Now, let's try a random convolution kernel. We initialize its value using a "kaiming normal" which is the usual initialization in convolutional networks. We'll see later what it is exactly.
#
# Remember in CNN, a "kernel" is actually several convolutions kernels. There is one $k\times k$ kernel per input channel and per output channel, thus we have $C_i \times C_o$ $k \times k$ kernels.
#
# The final size is therefore $C_i \times C_o \times k \times k $.
#
# In the following block, we define a $5 \times 5$ kernel. There are $C_i = 3$ input channels (i.e. RGB) which we will map onto $C_o = 3$ output channels:
# + id="5ktZLSpeqpm4"
w = torch.randn(3, 3, 5, 5)
w = torch.nn.init.kaiming_normal_(w)
# + id="M6W81MkFq3Lo"
o = F.conv2d(
# torch wants a "batch" of image, so we add a new dimension to have a batch of size 1
img[None],
w
)
torch_to_jpg(o[0])
# + [markdown] id="H1Onqy34ZTQs"
# Pretty colors right? But not very useful. What about handcrafting a $5 \times 5$ blurring kernel?
# + id="PEwF4KfHuCL0"
w = # TODO
o = F.conv2d(img[None], w)
torch_to_jpg(o[0])
# + id="IHBehFIKgInU"
# Execute this cell to see the solution, but try to do it by yourself before!
# !wget https://raw.githubusercontent.com/arthurdouillard/deepcourse/master/static/code/cnn/blur.py
# %pycat blur.py
# + [markdown] id="aXRM9foXZgyU"
# Visualize the kernel values and interpret it. Remember, we don't want to mix channels!
# + id="XQ5yyPMXvM9w"
w
# + [markdown] id="56DnKxXCZxBY"
# Now do a **identity kernel**, that will do... nothing:
# + id="7R2K7a-yvRQw"
w = # TODO
o = F.conv2d(img[None], w)
torch_to_jpg(o[0])
# + id="lq64jc4egPQU"
# Execute this cell to see the solution, but try to do it by yourself before!
# !wget https://raw.githubusercontent.com/arthurdouillard/deepcourse/master/static/code/cnn/id.py
# %pycat id.py
# + [markdown] id="MXXJSOmWZ3LI"
# Now try a **edge detection** kernel from https://en.wikipedia.org/wiki/Kernel_(image_processing). This kind of kernel was extremely useful before CNN, and can be found in many algorithms such a SIFT.
# + id="l7dNE7fTvq2Q"
w = torch.zeros(1, 3, 3, 3)
# TODO
o = F.conv2d(img[None].mean(dim=0, keepdims=True), w)
o = 255 * (o - o.min()) / (o.max() - o.min()) # Rescale to [0, 255]
torch_to_jpg(o[0])
# + id="xxKrLH_pgX5c"
# Execute this cell to see the solution, but try to do it by yourself before!
# !wget https://raw.githubusercontent.com/arthurdouillard/deepcourse/master/static/code/cnn/edge.py
# %pycat edge.py
# + [markdown] id="zzIcbMpEcpa8"
# Now, let's reproduce an average pooling that will divide the height & width by 2, but with a convolution kernel. First do it with `F.avg_pool2d`, then with a convolution with `F.conv2d`.
#
# + id="gf7FyUAGbBWA"
pooled = F.avg_pool2d(img[None], kernel_size=2)
plt.figure(figsize=(13, 8))
plt.subplot(1, 2, 1)
plt.imshow(torch_to_jpg(pooled[0]))
plt.title(f"w/ avg pooling, {pooled[0].shape}")
w = # TODO
o = F.conv2d(img[None].mean(dim=0, keepdims=True), w, stride=2)
o = 255 * (o - o.min()) / (o.max() - o.min()) # Rescale to [0, 255]
plt.subplot(1, 2, 2)
plt.imshow(torch_to_jpg(o[0]))
plt.title(f"w/ convolution, {o[0].shape}");
# + id="RN3UXkcCdbsI"
# Execute this cell to see the solution, but try to do it by yourself before!
# !wget https://raw.githubusercontent.com/arthurdouillard/deepcourse/master/static/code/cnn/avg.py
# %pycat avg.py
| static/notebooks/Convolutions_Deepcourse.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Customized Outputs
# ### Use this notebook to produce the final csv and geospatial output files with extra custom calculations
# +
import geopandas as gpd
import requests
import pandas as pd
import os
import shutil
import glob
import numpy as np
# On first usage, download the Bus Stop Inventory - or whenever you want to update. Then Comment it out.
# url = 'https://opendata.arcgis.com/datasets/490db54636704d35aae661a12c12e9a0_0.geojson'
# r = requests.get(url, allow_redirects=True)
# open('Bus_Stop_Inventory.geojson', 'wb').write(r.content)
geo_df = gpd.read_file('Bus_Stop_Inventory.geojson')
routes_shapes = gpd.read_file('routes_shapes.geojson')
crs = routes_shapes.crs
# +
routes_shapes['routeShortName'] = routes_shapes['routeShortName'].astype(int)
routes_shapes['directionId'] = routes_shapes['directionId'].astype(int)
routes_shapes['stopId'] = routes_shapes['stopId'].astype(int)
frames = []
stops_frames = []
missing_rows = []
allFiles = glob.glob('.' + "/results/*.csv")
for file_ in allFiles:
print(file_)
df = pd.read_csv(file_)
route_id = df['route_id'].value_counts().reset_index().head(1)['index'].values[0]
df['route_id'] = route_id #alternatively df = df.loc[df['route_id'].dropna().index,]
#Add daily totals.
daily_counts = df.groupby(['STOP_ID','direction_id'])['BOARD_ALL','ALIGHT_ALL','LOAD_ALL'].sum().reset_index().rename(columns={
"BOARD_ALL":"BOARD_ALL_DAILY","ALIGHT_ALL":"ALIGHT_ALL_DAILY","LOAD_ALL":"LOAD_ALL_DAILY"})
df = pd.merge(df,daily_counts, how='outer')
#df = df.query("TIME_PERIOD=='AM Peak'|TIME_PERIOD=='PM Peak'")
df = df.sort_values(by=['TIME_PERIOD','DIRECTION_NAME','SORT_ORDER'])
df['travel delay'] = df['travel_time_secs_mean'] - df['travel_time_min_secs']
df['activity'] = df['BOARD_ALL'] + df['ALIGHT_ALL']
df['daily activity'] = df['BOARD_ALL_DAILY'] + df['ALIGHT_ALL_DAILY']
df['activity divided by dwell'] = df['activity']/df['dwell_time_secs_mean']
#df['total_sec_delay'] = df['LOAD_ALL']*df['delay_secs_mean'].shift(periods=-1)
df['distance to next stop'] = df['stop_path_length_meters']*[3.28084]
df['distance to next stop'] = df['distance to next stop'].shift(periods=-1)
df['travel_speed_miles_per_hour'] = df['travel_speed_miles_per_hour'].shift(periods=-1)
df['travel_time_secs_mean'] = df['travel_time_secs_mean'].shift(periods=-1)
df['travel_time_min_secs'] = df['travel_time_min_secs'].shift(periods=-1)
df['travel delay'] = df['travel delay'].shift(periods=-1)
df['travel_time_secs_std'] = df['travel_time_secs_std'].shift(periods=-1)
df['boardings per Obs'] = df['BOARD_ALL']/df['percent_stopped']
df['alightings per Obs'] = df['ALIGHT_ALL']/df['percent_stopped']
df['activity per Obs'] = df['activity']/df['percent_stopped']
df['dwell seconds times load'] = df['dwell_time_secs_mean']*df['LOAD_ALL']
df['load divided by ob activity'] = df['LOAD_ALL']/df['activity per Obs']
df['Load times travel delay'] = df['LOAD_ALL']*df['travel delay']
df = df.replace([np.inf, -np.inf], np.nan)
df_csv = df.sort_values(by=['TIME_PERIOD','DIRECTION_NAME','SORT_ORDER'])
columns_to_keep = ['STOP_ID', 'TIME_PERIOD','DIRECTION_NAME','route_id','SORT_ORDER','BOARD_ALL', 'ALIGHT_ALL','daily activity','timepoint','TRIPS_ALL','TRIPS_GROSS', 'activity','dwell_time_secs_mean', 'dwell_time_secs_std', 'percent_stopped',
'boardings per Obs', 'alightings per Obs', 'activity per Obs', 'distance to next stop', 'travel_speed_miles_per_hour', 'travel_time_secs_mean', 'travel_time_min_secs', 'travel delay',
'travel_time_secs_std', 'LOAD_ALL', 'dwell seconds times load', 'load divided by ob activity', 'activity divided by dwell', 'Load times travel delay', 'direction_id','BOARD_ALL_DAILY', 'ALIGHT_ALL_DAILY', 'LOAD_ALL_DAILY',]
df_csv = df_csv[columns_to_keep]
df_csv.rename(columns={'STOP_ID':'stop id','sec_per_activity':'seconds per activity', 'total_sec_delay':'Total Passanger Delay',
'travel_time_min_secs':'minimum travel time', 'TIME_PERIOD':'time period', 'DIRECTION_NAME':'direction name', 'SORT_ORDER':'sort order',
'travel_time_secs_mean':'travel time', 'travel_time_secs_std':'travel time STD', 'delay_secs_mean':'delay time',
'travel_speed_miles_per_hour':'MPH','dwell_time_secs_mean':'dwell time', 'dwell_time_secs_std':'dwell time std',
'BOARD_ALL': 'Boardings','ALIGHT_ALL':'Alightings', 'LOAD_ALL':'Load' },inplace=True)
df_csv = pd.merge(df_csv,geo_df[['trapeze_id','stopname', 'st_loc', 'routes_listed']],left_on=['stop id'],right_on=['trapeze_id'], how='left')
df_csv.sort_values(by=['time period','direction name','sort order']).to_csv("fast_output/fast_output_" + file_.split('_')[-1], index=False)
frames.append(df_csv.copy())
stops_columns_to_keep = ['stop id', 'time period', 'direction name', 'direction_id', 'timepoint', 'Boardings',
'Alightings', 'activity','daily activity','dwell time', 'dwell time std',
'percent_stopped', 'boardings per Obs', 'alightings per Obs',
'activity per Obs', 'distance to next stop', 'Load',
'dwell seconds times load', 'load divided by ob activity',
'activity divided by dwell', 'trapeze_id',
'stopname', 'st_loc', 'route_id','routes_listed']
df_stops_geojson = pd.merge(df_csv[stops_columns_to_keep],geo_df[['trapeze_id','rtiid', 'geometry']],left_on=['trapeze_id'],right_on=['trapeze_id'], how='inner')
stops_frames.append(df_stops_geojson)
# df_stops_geojson = pd.merge(df[stops_columns_to_keep],geo_df[['trapeze_id','stopname','rtiid', 'geometry']],left_on=['STOP_ID'],right_on=['trapeze_id'])
# stops_frames.append(df_stops_geojson)
# missing_row = pd.merge(df, routes_shapes, left_on=['route_id','direction_id','STOP_ID'],right_on=['routeShortName','directionId','stopId'], how='left', indicator=True).query("_merge!='both'")
# missing_rows.append(missing_row)
pd.concat(frames,ignore_index=True).to_csv("fast_output/full_routes.csv",index=False)
df_all = pd.concat(frames,ignore_index=True)
stops_df = pd.concat(stops_frames,ignore_index=True)
stops_df.rename(columns=lambda x: x.replace(' ','_'), inplace=True)
stops_df = gpd.GeoDataFrame(stops_df, crs = crs).set_geometry(stops_df['geometry'])
### Try to write out the geospatial output, remove and write if it already exists.
try:
os.remove('fast_geospatial_output/stops_df.geojson')
except OSError:
pass
try:
os.remove('fast_geospatial_output/shapes_data.geojson')
except OSError:
pass
try:
shutil.rmtree('fast_geospatial_output/stops_df/')
except FileNotFoundError:
pass
try:
shutil.rmtree('fast_geospatial_output/shapes_data/')
except FileNotFoundError:
pass
stops_df.to_file('fast_geospatial_output/stops_df.geojson',driver='GeoJSON')
stops_df.to_file('fast_geospatial_output/stops_df',driver='ESRI Shapefile')
shapes_data = pd.merge(df_all, routes_shapes, left_on=['route_id','direction_id','stop id'],right_on=['routeShortName','directionId','stopId'], how='left')
shapes_data.rename(columns=lambda x: x.replace(' ','_'), inplace=True)
shapes_data = shapes_data.loc[~shapes_data['geometry'].isnull(),]
shapes_data = gpd.GeoDataFrame(shapes_data, crs = crs).set_geometry(shapes_data['geometry'])
shapes_data.to_file('fast_geospatial_output/shapes_data.geojson',driver='GeoJSON')
shapes_data.to_file('fast_geospatial_output/shapes_data',driver='ESRI Shapefile')
# pd.concat(missing_rows).to_csv('missing_data_rows.csv',index=False)
# +
# df_all['direction_id']
# df_all.loc[df_all['direction_id'].isnull(),]
# +
# stops_df.loc[stops_df['direction_id'].isnull(),]
# -
| fast_csv_and_geospatial_output.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
"""
为不同类dog图片名称添加前缀 哈士奇:0_ 吉娃娃:1_ 松狮犬:2_
"""
import os
#为不同类dog添加前缀 哈士奇:0 吉娃娃:1 松狮犬:2
def ranameJPG(filepath, kind):
images = os.listdir(filepath)
# print(images)
for name in images:
os.rename(filepath+name, filepath+str(kind)+'_'+(name.split("_")[-1]).split('.')[0]+'.jpg')
# print(name)
if __name__ == '__main__':
ranameJPG("/Users/s/code/python/keras/dog_img/h_dog/",0)
ranameJPG("/Users/s/code/python/keras/dog_img/j_dog/",1)
ranameJPG("/Users/s/code/python/keras/dog_img/s_dog/",2)
# +
'''
keras 中类似的vgg的神经网络要求输入图片为100 * 100,处理图片为
'''
from PIL import Image
def converting(orgimg, outdir, width=100, height=100):
img = Image.open(orgimg)
try:
new_img = img.resize((width, height), Image.BILINEAR)
new_img.save(os.path.join(outdir,orgimg.split('/')[-1]))
except Exception as e:
print(e)
'''
图片的每一个分类放在一个文件加里,再由一个文件夹放所有的分类文件加
org_parent_dir:图片处理前的总文件夹
out_parent_dir:图片处理后的总文件夹
'''
def convertjpg(org_parent_dir,out_parent_dir):
for f in os.listdir(org_parent_dir):
if "." in f:
continue
org_dir = org_parent_dir+f+'/'
out_dir = out_parent_dir+f+'/'
for img in os.listdir(org_dir):
print(img)
converting(org_dir+img, out_dir)
#img = Image.open("/Users/s/code/python/keras/dog_img/s_dog/2_songshi99.jpg")
if __name__ == "__main__":
convertjpg("/Users/s/code/python/keras/dog_img/","/Users/s/code/python/keras/dog_img_org/")
# -
#待测贴图片处理
def convertpre(org_dir, out_dir, width=100, height=100):
print(os.listdir(org_dir))
for f in os.listdir(org_dir):
# if "." in f:
# continue
print(org_dir+f)
try:
img = Image.open(org_dir+f)
print(os.path.join(out_dir,f))
new_img = img.resize((width, height), Image.BILINEAR)
new_img.save(os.path.join(out_dir,f))
except Exception as e:
print(e)
convertpre("predict_img/org_img/","predict_img/dealed_img/")
# +
"""
VGG 深度学习网络
"""
import os
from PIL import Image
import numpy as np
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.optimizers import SGD, RMSprop, Adam
from keras.layers import Conv2D, MaxPooling2D
#--------------------------------------------------------------------------------------------
# 将训练集图片转换成数组
ima1 = os.listdir('./train_img/')
def read_image(filename):
img = Image.open(filename).convert('RGB')
return np.array(img)
x_train = []
for i in ima1:
if i == r".DS_Store":
continue
x_train.append(read_image('./train_img/'+i))
x_train = np.array(x_train)
# 根据文件名提取标签
y_train = []
for filename in ima1:
if filename == r".DS_Store":
continue
y_train.append(int(filename.split('_')[0]))
y_train = np.array(y_train)
# -----------------------------------------------------------------------------------------
# 将测试集图片转化成数组
ima2 = os.listdir('./test_img/')
# def read_image2(filename):
# img = Image.open('./test_img/'+filename).convert('RGB')
# return np.array(img)
x_test = []
for i in ima2:
if i == r".DS_Store":
continue
x_test.append(read_image("./test_img/"+i))
x_test = np.array(x_test)
# 根据文件名提取标签
y_test = []
for filename in ima2:
if filename == r".DS_Store":
continue
y_test.append(int(filename.split('_')[0]))
y_test = np.array(y_test)
#-------------------------------------------------------------------------------------
# 将标签转换格式
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
# 将特征点从0~255转换成0~1提高特征提取精度
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
# 搭建卷积神经网络
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(100, 100, 3)))
model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(3, activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
model.fit(x_train, y_train, batch_size=40, epochs=32)
model.save_weights('./dog_weights.h5', overwrite=True)
score = model.evaluate(x_test, y_test, batch_size=40)
print(score)
# -
# +
#测试图片属于种类
img_dir = os.listdir('/Users/s/code/python/keras/predict_img/dealed_img/')
# def read_image2(filename):
# img = Image.open('./test_img/'+filename).convert('RGB')
# return np.array(img)
print(os.listdir("./predict_img/dealed_img/"))
print(img_dir)
x_predice = []
for i in img_dir:
if i == r".DS_Store":
continue
x_predice.append(read_image("/Users/s/code/python/keras/predict_img/dealed_img/"+i))
x_predice = np.array(x_predice)
x_predice = x_predice.astype('float32')
x_predice /= 255
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(100, 100, 3)))
model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(3, activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
model.load_weights('dog_weights.h5')
classes = model.predict_classes(x_predice)
target = ['哈士奇', '吉娃娃', '松狮犬']
for index in classes:
print(target[index])
# -
| dog_class/.ipynb_checkpoints/distinguish_dog-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import tensorflow as tf
import tensorflow_datasets as tfds
from t5.data import preprocessors as prep
import functools
import t5
import gin
import sentencepiece as spm
from glob import glob
import os
gin.parse_config_file('pretrained_models_base_operative_config.gin')
vocab = 'sp10m.cased.ms-en.model'
sp = spm.SentencePieceProcessor()
sp.Load(vocab)
# +
import json
with open('/home/husein/news/populate-news.json') as fopen:
data = json.load(fopen)
# -
data.keys()
# +
from tqdm import tqdm
import re
def cleaning(string):
string = string.replace('\n', ' ').replace('\t', ' ')
string = re.sub(r'[ ]+', ' ', string).strip()
return string
# -
len(data['title'][0].split())
with tf.io.gfile.GFile('newstitle.tsv', "w") as outfile:
for i in range(len(data['title'])):
c = cleaning(data['text'][i])
if len(data['title'][i].split()) > 3 and len(c) > 20 and 'javascript' not in c and 'register in order to view' not in c:
outfile.write("%s\t%s\n" % (c, cleaning(data['title'][i])))
# +
def news_dataset(split, shuffle_files = False):
del shuffle_files
ds = tf.data.TextLineDataset(
[
'newstitle.tsv'
]
)
ds = ds.map(
functools.partial(
tf.io.decode_csv,
record_defaults = ['', ''],
field_delim = '\t',
use_quote_delim = False,
),
num_parallel_calls = tf.data.experimental.AUTOTUNE,
)
ds = ds.map(lambda *ex: dict(zip(['question', 'answer'], ex)))
return ds
def news_preprocessor(ds):
def to_inputs_and_targets(ex):
return {
'inputs': tf.strings.join(['tajuk: ', ex['question']]),
'targets': ex['answer'],
}
return ds.map(
to_inputs_and_targets,
num_parallel_calls = tf.data.experimental.AUTOTUNE,
)
# -
t5.data.TaskRegistry.remove('news_dataset')
t5.data.TaskRegistry.add(
'news_dataset',
dataset_fn = news_dataset,
splits = ['train'],
text_preprocessor = [news_preprocessor],
sentencepiece_model_path = vocab,
metric_fns = [t5.evaluation.metrics.accuracy],
)
nq_task = t5.data.TaskRegistry.get("news_dataset")
ds = nq_task.get_dataset(split='qa.tsv', sequence_length={"inputs": 1024, "targets": 1024})
r = tfds.as_numpy(ds)
next(r)
| pretrained-model/t5/prepare/2.prepare-newstitle.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
from math import *
from scipy.integrate import dblquad
import numpy as np
from numpy import square as sqr
import matplotlib as mpl
import matplotlib.pyplot as plt
from copy import deepcopy
#import plotly.plotly as py
import sys,os
#sys.path.append(os.path.dirname(__file__), '..','Lib')
#sys.path.append(os.path.join('..','Lib'))
#from foxlink.stylelib.ase1_styles import ase1_sims_stl, ase1_runs_stl
#plt.style.use(ase1_runs_stl)
# Solvers for first order PDEs
#from sympy.solvers.pde import pdsolve
#from sympy import Function, diff, pprint, exp
#from sympy.abc import x,y
xlink_methods_stl = {
"axes.titlesize": 18,
"axes.labelsize": 15,
"lines.linewidth": 3,
"lines.markersize": 10,
"xtick.labelsize": 15,
"ytick.labelsize": 15,
"font.size": 15
}
plt.style.use(xlink_methods_stl)
# Set parameters for partition function integration
# Geometric parameters
r = 1 # Distance between MT centers
a = 0 # Dot product between r unit vector and MT1 direction vector
b = 0 # Dot product between r unit vector and MT2 direction vector
d = -1 # Dot product between MT1 and MT2 direction vector
# Crosslinker parameters
k = 4.562 # spring constant of motors
ho = 0. # equilibrium length
c = 40. # Crosslinker affinity * fugacity
# System parameters
bt = 1. # beta
# MT parameters
hL = 10. # Half length of filaments
fs = 1. # Stall force of motors
ko = 1. # Turnover rate of motors
vo = 1. # Base velocity of motors
default_params = []
default_params_gen = [r, a, b, d, k, ho, c, bt, hL, fs, ko, vo]
def create_param_list(var_list, index, base_params=default_params):
param_list = []
for v in var_list:
params = deepcopy(base_params) # Copy over the default parameters
params[index] = v # Change the distance between MTs
param_list += [params] # Add to the param list used in integration steps
return param_list
def create_param_list_array( index = 0, p_range = [0,1], n=100, base_params=default_params,):
p_list = np.linspace(p_range[0], p_range[1], n).tolist()
param_list = create_param_list(p_list, index, base_params)
return p_list, np.array(param_list)
def para_force21(s1, s2, bp=default_params):
return -k*(bp[0]*bp[2]+s2-s1*bp[3])
def para_force12(s1, s2, bp=default_params):
return k*(bp[0]*bp[1]-s1+s2*bp[3])
def vel_force(s1, s2, bp=default_params):# bp = base_params
# Only for second head at the moment
alpha = 1. + 2.*(para_force12(s1,s2,bp)/bp[9])
return 1./(1.+exp(-2.*alpha))
def dvel_dforce(s1, s2, bp=default_params):# bp = base_params
# Only for second head at the moment
alpha = 1. + 2.*(para_force12(s1,s2,bp)/bp[9])
return -k
# # Force-velocity relation
params = deepcopy(default_params_gen)
params[0] = 0
s2 = 0
s1 = np.linspace(-1, 1, 200).tolist()
vel = [vel_force(s, s2, params) for s in s1]
force = np.array([para_force21(s, s2, params) for s in s1])
fig, ax = plt.subplots(figsize=(10,7))
lin_vel = np.piecewise(force,
[force <-1., (force >= -1) & (force < 0), force >= 0],
[0, lambda force: params[11]*(1.+force/params[9]), 1])
ax.plot(force, lin_vel, label="Linear")
ax.plot(force, vel, label="Smoothed")
ax.axvline(x=0, color='r', linestyle='--', label="Zero force")
ax.set_xlabel("Normalized parallel force ($F_{\parallel}/F_s$)")
ax.set_ylabel("Normalized motor velocity ($v_g/v_o$)")
ax.set_ylim(0, 1.2)
ax.legend()
plt.show()
# # Spring constant compare
fs = 2.0
ks = .3
ho = 52.
sep = 25.
def get_zrl_parallel_spring_const(ks, fs, ho, sep):
a = sep*sep
b = (ks*ks*(ho*ho - sep*sep)) + (fs*fs)
c = -ks*ks*fs*fs
ks_new = np.sqrt((-1.*b + np.sqrt((b*b)-(4.*a*c)))/(2.*a))
return ks_new
def get_zrl_spring_const(ks, fs, ho, sep):
return ks*fs/(fs+(ho*ks))
def get_nzrl_parallel_force(x, ks, ho, sep):
return -1.*ks*x*(1.-(ho/np.sqrt(x*x + sep*sep)))
def get_nzrl_force(h, ks, ho):
return -1.*ks*(h-ho)
# +
#print(ks)
ks_new1 = 0.03715512882326001
ks_new2 = get_zrl_spring_const(ks, fs, ho, sep)
#print("Matching distance along filament for parallel:", fs/ks_new1)
#print("Matching distance for parallel:", np.sqrt((fs/ks_new1)**2+sep**2))
#print(get_zrl_parallel_force(fs/ks_new1, ks, ho, sep))
x = np.linspace(0,1.5*fs/ks_new1, 50)
fig, axarr = plt.subplots(2,2,figsize=(10,10))
axarr[0,0].plot(x, -ks_new1*x, label='rest length = 0 nm')
axarr[0,0].plot(x, get_nzrl_parallel_force(x,ks,ho,sep), label="rest length = {} nm".format(ho))
axarr[0,0].axhline(y=-fs, color='r', linestyle='--', label="Stall force")
axarr[0,0].set_xlabel('Distance along filament (nm)')
axarr[0,0].set_ylabel('Force parallel to filament(pN)')
axarr[0,0].set_title('Parallel force calculation\n(Parallel force match)')
axarr[0,0].legend()
axarr[0,1].plot(x, -ks_new1*x, label='rest length = 0 nm')
axarr[0,1].plot(x, get_nzrl_force(x,ks,ho), label="rest length = {} nm".format(ho))
axarr[0,1].axhline(y=-fs, color='r', linestyle='--', label="Stall force")
axarr[0,1].set_xlabel('Protein stretch(nm)')
axarr[0,1].set_ylabel('Force (pN)')
axarr[0,1].set_title('Total force calculation\n(Parallel force match)')
axarr[0,1].set_xlim(0,1.5*fs/ks_new1)
axarr[0,1].legend()
#axarr[0].set_ylim(-5, 0)
print(ks_new2)
#print("Matching distance along filament for parallel:", np.sqrt((fs/ks_new1)**2 - sep**2))
#print("Matching distance for general:", fs/ks_new2)
#print(get_zrl_parallel_force(fs/ks_new1, ks, ho, sep))
axarr[1,0].plot(x, -ks_new2*x, label='rest length = 0 nm')
axarr[1,0].plot(x, get_nzrl_parallel_force(x,ks,ho,sep), label="rest length = {} nm".format(ho))
axarr[1,0].axhline(y=-fs, color='r', linestyle='--', label="Stall force")
axarr[1,0].set_xlabel('Distance along filament (nm)')
axarr[1,0].set_ylabel('Force parallel to filament(pN)')
axarr[1,0].set_title('Parallel force calculation\n(Total force match)')
axarr[1,0].legend()
axarr[1,1].plot(x, -ks_new2*x, label='rest length = 0 nm')
axarr[1,1].plot(x, get_nzrl_force(x,ks,ho), label="rest length = {} nm".format(ho))
axarr[1,1].axhline(y=-fs, color='r', linestyle='--', label="Stall force")
axarr[1,1].set_xlabel('Protein stretch(nm)')
axarr[1,1].set_ylabel('Force (pN)')
axarr[1,1].set_title('Total force calculation\n(Total force match)')
axarr[1,1].set_xlim(0,1.5*fs/ks_new1)
axarr[1,1].legend()
#axarr[1].set_ylim(-5, 0)
plt.tight_layout()
# +
#print(ks)
ks_new1 = 0.03715512882326001
ks_new2 = get_zrl_spring_const(ks, fs, ho, sep)
#print("Matching distance along filament for parallel:", fs/ks_new1)
#print("Matching distance for parallel:", np.sqrt((fs/ks_new1)**2+sep**2))
#print(get_zrl_parallel_force(fs/ks_new1, ks, ho, sep))
x = np.linspace(0,1.5*fs/ks_new1, 50)
fig, axarr = plt.subplots(1,2,figsize=(10,5))
#axarr[0,0].plot(x, -ks_new1*x)
#axarr[0,0].plot(x, get_nzrl_parallel_force(x,ks,ho,sep))
#axarr[0,0].axhline(y=-fs, color='r', linestyle='--', label="Stall force")
#axarr[0,0].set_xlabel('Distance along filament (nm)')
#axarr[0,0].set_ylabel('Force parallel to filament(pN)')
#axarr[0,0].set_title('Parallel force calculation')
#axarr[0,1].plot(x, -ks_new1*x)
#axarr[0,1].plot(x, get_nzrl_force(x,ks,ho,sep))
#axarr[0,1].axhline(y=-fs, color='r', linestyle='--', label="Parallel force")
#axarr[0,1].set_xlabel('Protein stretch(nm)')
#axarr[0,1].set_ylabel('Force (pN)')
#axarr[0,1].set_title('Total force calculation')
#axarr[0,1].set_xlim(0,1.5*fs/ks_new1)
#axarr[0].set_ylim(-5, 0)
print(ks_new2)
#print("Matching distance along filament for parallel:", np.sqrt((fs/ks_new1)**2 - sep**2))
#print("Matching distance for general:", fs/ks_new2)
#print(get_zrl_parallel_force(fs/ks_new1, ks, ho, sep))
axarr[1,0].plot(x, -ks_new2*x)
axarr[1,0].plot(x, get_nzrl_parallel_force(x,ks,ho,sep))
axarr[1,0].axhline(y=-fs, color='r', linestyle='--', label="Parallel force")
axarr[1,0].set_xlabel('Distance along filament (nm)')
axarr[1,0].set_ylabel('Force parallel to filament(pN)')
axarr[1,0].set_title('Parallel force calculation')
axarr[1,1].plot(x, -ks_new2*x)
axarr[1,1].plot(x, get_nzrl_force(x,ks,ho,sep))
axarr[1,1].axhline(y=-fs, color='r', linestyle='--', label="Parallel force")
axarr[1,1].set_xlabel('Protein stretch(nm)')
axarr[1,1].set_ylabel('Force (pN)')
axarr[1,1].set_title('Total force calculation')
axarr[1,1].set_xlim(0,1.5*fs/ks_new1)
#axarr[1].set_ylim(-5, 0)
plt.tight_layout()
# -
# # Solutions to steady state
phi = Function('phi')
s1_arr = np.asarray([3,5,8])
s2_arr = np.asarray([4,12,15])
print(s1_arr)
print(s2_arr)
Y,X = np.meshgrid(s2_arr, s1_arr)
print(X)
print(Y)
c = np.sqrt(np.power(X,2)+np.power(Y,2))
import time
a = np.linspace(0,1,1000)
b = np.linspace(1,2,1000)
X, Y = np.meshgrid(a,b)
t0 = time.time()
c = np.sqrt(np.power(X,2)+np.power(Y,2))
t1 = time.time()
print("Total time: ", t1-t0)
t0 = time.time()
c = np.exp(np.power(np.sqrt(np.power(X,2)+np.power(Y,2))-.1,2))
t1 = time.time()
print("Total time: ", t1-t0)
print(c)
d = np.asarray([1,2,3])
e = np.outer(d,c)
print(e.shape)
s1 = np.arange(5)
s2 = np.arange(5,9)
print(s1, s2)
u1=np.asarray([1,0,0])
u2=np.asarray([0,1,0])
S2, S1 = np.meshgrid(s2,s1)
hvec = S2[:,:,None]*u2[None,None,:]-S1[:,:,None]*u1[None,None,:]
print(hvec)
A = np.linalg.norm(hvec, axis=2)
print(A)
B = np.ones((5,4,3))
B[:,:,1]*=2
B[:,:,2]*=3
print(B)
C = A[:,:,None]*B[:,:,:]
print(C)
print(C.sum(axis=(0,1)))
v = np.asarray([[1,0,0],[0,1,0],[0,0,1],[1,0,0]])
print(v)
D = np.cross(v,C)
print(D)
u = np.asarray([1,2,3])
uu = np.outer(u,u)
print(uu)
np.power(uu,2)
Iish = np.eye(3)
print(Iish)
# # Source testing
cmap = mpl.cm.ScalarMappable( mpl.colors.Normalize(0, 50), 'viridis')
fig, ax = plt.subplots()
print(cmap.to_rgba(25))
#ax.plot(np.linspace(0,10), np.linspace(0,10)*10)
fig.colorbar(cmap, ax=ax)
mat = np.ones((200,200))
#mat = np.arange(81).reshape(9,9)
print(mat)
#mat_new = mat.reshape(3,3,3,3)
mat_new = mat.reshape(20,10,20,10)
print(mat_new)
mat_coarse = mat_new.sum(axis=(1,3))
print(mat_coarse)
print(mat_coarse.shape)
| notebooks/fokker_planck_misc.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from keras import backend as K
from keras.models import load_model
from keras.preprocessing import image
from keras.optimizers import Adam
from imageio import imread
import numpy as np
from matplotlib import pyplot as plt
from models.keras_ssd300 import ssd_300
from keras_loss_function.keras_ssd_loss import SSDLoss
from keras_layers.keras_layer_AnchorBoxes import AnchorBoxes
from keras_layers.keras_layer_DecodeDetections import DecodeDetections
from keras_layers.keras_layer_DecodeDetectionsFast import DecodeDetectionsFast
from keras_layers.keras_layer_L2Normalization import L2Normalization
from ssd_encoder_decoder.ssd_output_decoder import decode_detections, decode_detections_fast
from data_generator.object_detection_2d_data_generator import DataGenerator
from data_generator.object_detection_2d_photometric_ops import ConvertTo3Channels
from data_generator.object_detection_2d_geometric_ops import Resize
from data_generator.object_detection_2d_misc_utils import apply_inverse_transforms
# %matplotlib inline
img_height = 300
img_width = 300
# 1: Build the Keras model
K.clear_session() # Clear previous models from memory.
model = ssd_300(image_size=(img_height, img_width, 3),
n_classes=20,
mode='inference',
l2_regularization=0.0005,
scales=[0.1, 0.2, 0.37, 0.54, 0.71, 0.88, 1.05], # The scales for MS COCO are [0.07, 0.15, 0.33, 0.51, 0.69, 0.87, 1.05]
aspect_ratios_per_layer=[[1.0, 2.0, 0.5],
[1.0, 2.0, 0.5, 3.0, 1.0/3.0],
[1.0, 2.0, 0.5, 3.0, 1.0/3.0],
[1.0, 2.0, 0.5, 3.0, 1.0/3.0],
[1.0, 2.0, 0.5],
[1.0, 2.0, 0.5]],
two_boxes_for_ar1=True,
steps=[8, 16, 32, 64, 100, 300],
offsets=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
clip_boxes=False,
variances=[0.1, 0.1, 0.2, 0.2],
normalize_coords=True,
subtract_mean=[123, 117, 104],
swap_channels=[2, 1, 0],
confidence_thresh=0.5,
iou_threshold=0.45,
top_k=200,
nms_max_output_size=400)
# 2: Load the trained weights into the model.
# TODO: Set the path of the trained weights.
weights_path = 'VGG_VOC0712Plus_SSD_300x300_ft_iter_160000.h5'
model.load_weights(weights_path, by_name=True)
# 3: Compile the model so that Keras won't complain the next time you load it.
adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)
model.compile(optimizer=adam, loss=ssd_loss.compute_loss)
colors = plt.cm.hsv(np.linspace(0, 1, 21)).tolist()
classes = ['background',
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat',
'chair', 'cow', 'diningtable', 'dog',
'horse', 'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor']
# -
# import the necessary packages
from imutils.video import VideoStream
import argparse
import datetime
import imutils
import time
import cv2
import time
import json
vs = cv2.VideoCapture('../modd.avi')
# initialize the first frame in the video stream
firstFrame = None
frameCount =0
inferencesCount=0
inferencesCountFinal=0
framesCountFinal=0
frame_width = int(vs.get(3))
frame_height = int(vs.get(4))
print(frame_width)
print(frame_height)
out = cv2.VideoWriter('full_baseline.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 30, (frame_width,frame_height))
started = False
# your code
no_tracking_res= []
while frameCount<641:
# grab the current frame and initialize the occupied/unoccupied
# text
frame = vs.read()[1]
# if the frame could not be grabbed, then we have reached the end
# of the video
if frame is None:
break
frameCount+=1
if(frameCount<0):
continue
elif started==False:
start_time = time.time()
started=True
# resize the frame, convert it to grayscale, and blur it
cv2.imwrite('temp.jpg',frame)
img = image.load_img('temp.jpg', target_size=(img_height, img_width))
img = image.img_to_array(img)
input_images = []
input_images.append(img)
input_images = np.array(input_images)
y_pred = model.predict(input_images)
confidence_threshold = 0.4
y_pred_thresh = [y_pred[k][y_pred[k,:,1] > confidence_threshold] for k in range(y_pred.shape[0])]
# loop over the contours
for box in y_pred_thresh[0]:
# Transform the predicted bounding boxes for the 512x512 image to the original image dimensions.
if(box[0]!=4):
continue
xmin = int(box[-4] * frame.shape[1] / img_width)
ymin = int(box[-3] * frame.shape[0] / img_height)
xmax =int(box[-2] * frame.shape[1] / img_width)
ymax =int(box[-1] * frame.shape[0] / img_height)
cv2.rectangle(frame, (int(xmin), int(ymin)), (int(xmax),int(ymax)), (0, 255, 0), 2)
cv2.putText(frame,'{}: {:.2f}'.format(classes[int(box[0])], box[1]), (xmin, ymin),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
no_tracking_res.append({"image_id" : frameCount, "category_id" : 1, "bbox" : [float(xmin),float(ymin),float(xmax-xmin),float(ymax-ymin)], "score" : int(box[1])})
out.write(frame)
# cleanup the camera and close any open windows
elapsed_time = time.time() - start_time
print(elapsed_time)
vs.release()
cv2.destroyAllWindows()
out.release()
with open('full_baseline.json', 'w') as outfile:
json.dump(no_tracking_res, outfile)
out.release()
import json
with open('1_no_tracking.json', 'w') as outfile:
json.dump(no_tracking_res, outfile)
# +
def boxes_match(box1,box2,iou_threshold=0.4):
if(box1[1]!=box2[1] or box1[7]==1 or box2[7]==1):
return False
return iou(box1[3:7],box2[3:7])>0.4
def boxes_equal(box1,box2,iou_threshold=0.6):
if(box1[1]!=box2[1] or box1[2]!=box2[2]):
return False
return iou(box1[3:7],box2[3:7])==1
def box_matches_list(box1,list_boxes):
index =-1
if(len(list_boxes)==0):
return index
match_list = []
box_index =0
for box in list_boxes:
if(box.shape[0]>0):
if(box.shape[0]==1):
if(boxes_match(box[0],box1)):
match_list.append(box_index)
elif(box.shape[0]==8):
if(boxes_match(box,box1)):
match_list.append(box_index)
box_index+=1
if(len(match_list)== 0):
return index
max_match_index = 0
max_match_iou = 0
for i in match_list:
cur_iou = iou(box1[3:7],list_boxes[i][3:7])
if(cur_iou>max_match_iou):
max_match_iou=cur_iou
max_match_index =i
list_boxes[max_match_index][7] = 1
box1[7] = 1
return max_match_index
def box_in_list(box1,list_boxes):
if(len(list_boxes)==0):
return -1
index=0
for box in list_boxes:
if(box.shape[0]>0):
if(box.shape[0]==1):
if(boxes_match(box[0],box1)):
return index
elif(box.shape[0]==7):
if(boxes_match(box,box1)):
return index
index+=1
return -1
# -
# ### Using Flow of center point and manual confidence update
# +
# import the necessary packages
from imutils.video import VideoStream
import argparse
import datetime
import imutils
import time
import cv2
import time
import json
from bounding_box_utils.bounding_box_utils import iou
no_tracking_res = []
tracking_res = []
vs = cv2.VideoCapture('../modd.avi')
# initialize the first frame in the video stream
firstFrame = None
frameCount =0
step = 0.05
total_objects_no_tracking=0
total_objects_tracking=0
frame_width = int(vs.get(3))
frame_height = int(vs.get(4))
print(frame_width)
print(frame_height)
preds = []
pred = None
tracks=[]
out_tracking = cv2.VideoWriter('mod_ssd_tracking_all.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 30, (frame_width,frame_height))
started = False
# your code
print("prev_count current_count entered exited")
multiplier=0
cc=0
prev_frame=None
while frameCount<641:
# grab the current frame and initialize the occupied/unoccupied
# text
frame = vs.read()[1]
# if the frame could not be grabbed, then we have reached the end
# of the video
if frame is None:
break
frameCount+=1
if(frameCount<0):
continue
elif started==False:
start_time = time.time()
started=True
# resize the frame, convert it to grayscale, and blur it
cv2.imwrite('temp.jpg',frame)
img = image.load_img('temp.jpg', target_size=(img_height, img_width))
img = image.img_to_array(img)
input_images = []
input_images.append(img)
input_images = np.array(input_images)
y_pred = model.predict(input_images)
confidence_threshold = 0.1
current_pred = []
y_pred_thresh = [y_pred[k][y_pred[k,:,1] > confidence_threshold] for k in range(y_pred.shape[0])]
f = frame
# loop over the contours
preds = []
for box in y_pred_thresh[0]:
if(box[0]!=4):
continue
preds.append(np.insert(box,0,0))
#print(len(prev_pred),len(temp),len(entered_pred),len(exited_pred))
print(tracks)
for pred in preds:
if(box_matches_list(pred,tracks)):
index = box_in_list(pred,tracks)
multiplier = tracks[index][0]
org_conf =tracks[index][2]
if(multiplier<0):
multiplier=1
else:
multiplier +=1
del tracks[index]
pred[2] += multiplier*step
pred[0] = multiplier
#print(pred)
tracks.append(pred)
else:
temp_pred = np.copy(pred)
multiplier = 1
temp_pred[0] = multiplier
tracks.append(temp_pred)
cc = frameCount
print(tracks)
for track in tracks:
if(not box_matches_list(track,preds)):
#print('not found in current frame')
track[0]-=1
#print('multiplier decreased to ',track[0])
track[2]+=(step*track[0])
frame_grey = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
prev_frame_grey = cv2.cvtColor(prev_frame,cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prev_frame_grey,frame_grey, None, 0.5, 3, 15, 3, 5, 1.2, 0)
xmin = int(track[3] * frame.shape[1] / img_width)
ymin = int(track[4] * frame.shape[0] / img_height)
xmax =int(track[5] * frame.shape[1] / img_width)
ymax =int(track[6] * frame.shape[0] / img_height)
center_flow = flow[np.minimum(int((xmin+xmax)/2),flow.shape[0]-1),np.minimum(int((ymin+ymax)/2),flow.shape[1]-1)]
#print('item is')
#print(track)
#print('center in image is')
#print(int((xmin+xmax)/2),int((ymin+ymax)/2))
#print('flow in image is')
# print(center_flow)
center_flow_min_x = int(center_flow[0] * img_width / frame.shape[1])
center_flow_min_y = int(center_flow[1] * img_height / frame.shape[0])
center_min_x = int((xmin+xmax)/2*img_width/frame.shape[1])
center_min_y = int((ymin+ymax)/2*img_height/frame.shape[0])
#print('center in min image is')
#print(center_min_x,center_min_y)
track[3] += center_flow_min_x
track[5]+=center_flow_min_x
track[4]+=center_flow_min_y
track[6]+=center_flow_min_y
#print('flow in min image is')
#print(center_flow)
to_display = [track for track in tracks if track[2]>0.4]
#print(tracks)
for box in to_display:
# Transform the predicted bounding boxes for the 512x512 image to the original image dimensions.
xmin = int(box[3] * frame.shape[1] / img_width)
ymin = int(box[4] * frame.shape[0] / img_height)
xmax =int(box[5] * frame.shape[1] / img_width)
ymax =int(box[6] * frame.shape[0] / img_height)
cv2.rectangle(frame, (int(xmin), int(ymin)), (int(xmax),int(ymax)), (0, 255, 0), 2)
cv2.putText(frame,'{}: {:.2f}'.format(classes[int(box[1])], box[2]), (xmin, ymin),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
total_objects_tracking+=1
tracking_res.append({"image_id" : frameCount, "category_id" : 1, "bbox" : [float(xmin),float(ymin),float(xmax-xmin),float(ymax-ymin)], "score" : int(box[1])})
# if the `q` key is pressed, break from the lop
out_tracking.write(frame)
prev_frame=frame
# cleanup the camera and close any open windows
elapsed_time = time.time() - start_time
print(elapsed_time)
vs.release()
cv2.destroyAllWindows()
out_tracking.release()
print(total_objects_tracking)
with open('res_tracking.json', 'w') as outfile:
json.dump(tracking_res, outfile)
# -
with open('res_no_tracking.json', 'w') as outfile:
json.dump(no_tracking_res, outfile)
with open('res_tracking.json', 'w') as outfile:
json.dump(tracking_res, outfile)
# ### Using Key-Point Flow
# +
# import the necessary packages
from imutils.video import VideoStream
import argparse
import datetime
import imutils
import time
import cv2 as cv
import time
import json
from bounding_box_utils.bounding_box_utils import iou
feature_params = dict( maxCorners = 25,
qualityLevel = 0.3,
minDistance = 7,
blockSize = 7 )
lk_params = dict( winSize = (15,15),
maxLevel = 2,
criteria = (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 0.03))
no_tracking_res = []
tracking_res = []
vs = cv.VideoCapture('../1.avi')
# initialize the first frame in the video stream
firstFrame = None
frameCount =0
step = 0.05
total_objects_no_tracking=0
total_objects_tracking=0
frame_width = int(vs.get(3))
frame_height = int(vs.get(4))
print(frame_width)
print(frame_height)
preds = []
pred = None
tracks=[]
out_tracking = cv.VideoWriter('1_tracking_keypoi.avi',cv.VideoWriter_fourcc('M','J','P','G'), 30, (frame_width,frame_height))
started = False
# your code
print("prev_count current_count entered exited")
multiplier=0
cc=0
prev_frame=None
while frameCount<540:
# grab the current frame and initialize the occupied/unoccupied
# text
frame = vs.read()[1]
# if the frame could not be grabbed, then we have reached the end
# of the video
if frame is None:
break
frameCount+=1
if(frameCount<0):
continue
elif started==False:
start_time = time.time()
started=True
# resize the frame, convert it to grayscale, and blur it
cv.imwrite('temp.jpg',frame)
img = image.load_img('temp.jpg', target_size=(img_height, img_width))
img = image.img_to_array(img)
input_images = []
input_images.append(img)
input_images = np.array(input_images)
y_pred = model.predict(input_images)
confidence_threshold = 0.1
current_pred = []
y_pred_thresh = [y_pred[k][y_pred[k,:,1] > confidence_threshold] for k in range(y_pred.shape[0])]
f = frame
# loop over the contours
preds = []
for box in y_pred_thresh[0]:
if(box[0]!=4):
continue
temp_pred = np.insert(box,0,0)
temp_pred=np.insert(temp_pred,7,0)
preds.append(temp_pred)
for track in tracks:
track[7]=0
#print(len(prev_pred),len(temp),len(entered_pred),len(exited_pred))
for pred in preds:
index = box_matches_list(pred,tracks)
if(index>-1):
multiplier = tracks[index][0]
org_conf =tracks[index][2]
if(multiplier<0):
multiplier=1
else:
multiplier +=1
del tracks[index]
pred[2] += multiplier*step
pred[0] = multiplier
#print(pred)
tracks.append(pred)
else:
temp_pred = np.copy(pred)
multiplier = 1
temp_pred[0] = multiplier
tracks.append(temp_pred)
cc = frameCount
for track in tracks:
if(box_matches_list(track,preds)==-1):
#print('not found in current frame')
track[0]-=1
#print('multiplier decreased to ',track[0])
track[2]+=(step*track[0])
frame_grey = cv.cvtColor(frame,cv.COLOR_BGR2GRAY)
prev_frame_grey = cv.cvtColor(prev_frame,cv.COLOR_BGR2GRAY)
mask = np.zeros(frame_grey.shape, dtype = "uint8")
cv.rectangle(mask, (track[3], track[4]), (track[5], track[6]), (255, 255, 255), -1)
p0 = cv.goodFeaturesToTrack(prev_frame_grey, mask = mask, **feature_params)
if(not p0 is None ):
p1, st, err = cv.calcOpticalFlowPyrLK(prev_frame_grey, frame_grey, p0, None, **lk_params)
average_flow = np.average(p1-p0,0)[0]
else:
average_flow=[0,0]
xmin = int(track[3] * frame.shape[1] / img_width)
ymin = int(track[4] * frame.shape[0] / img_height)
xmax =int(track[5] * frame.shape[1] / img_width)
ymax =int(track[6] * frame.shape[0] / img_height)
flow_x_box_coords = int(average_flow[0] * img_width / frame.shape[1])
flow_y_box_coords = int(average_flow[1] * img_height / frame.shape[0])
track[3] += flow_x_box_coords
track[5]+=flow_x_box_coords
track[4]+=flow_y_box_coords
track[6]+=flow_y_box_coords
to_display = [track for track in tracks if track[2]>0.4]
#print(tracks)
for box in to_display:
# Transform the predicted bounding boxes for the 512x512 image to the original image dimensions.
xmin = int(box[3] * frame.shape[1] / img_width)
ymin = int(box[4] * frame.shape[0] / img_height)
xmax =int(box[5] * frame.shape[1] / img_width)
ymax =int(box[6] * frame.shape[0] / img_height)
cv.rectangle(frame, (int(xmin), int(ymin)), (int(xmax),int(ymax)), (0, 255, 0), 2)
cv.putText(frame,'{}: {:.2f}'.format(classes[int(box[1])], box[2]), (xmin, ymin),cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
total_objects_tracking+=1
tracking_res.append({"image_id" : frameCount, "category_id" : 1, "bbox" : [float(xmin),float(ymin),float(xmax-xmin),float(ymax-ymin)], "score" : int(box[1])})
#print('preds%d'%len(preds))
#print('tracks%d'%len(tracks))
#print('displayed%d'%len(to_display))
# if the `q` key is pressed, break from the lop
#plt.figure()
#plt.imshow(frame)
out_tracking.write(frame)
prev_frame=frame
# cleanup the camera and close any open windows
elapsed_time = time.time() - start_time
print(elapsed_time)
vs.release()
cv.destroyAllWindows()
out_tracking.release()
print(total_objects_tracking)
with open('1_res_keypoint.json', 'w') as outfile:
json.dump(tracking_res, outfile)
# +
# import the necessary packages
from imutils.video import VideoStream
import argparse
import datetime
import imutils
import time
import cv2 as cv
import time
import json
from bounding_box_utils.bounding_box_utils import iou
feature_params = dict( maxCorners = 25,
qualityLevel = 0.3,
minDistance = 7,
blockSize = 7 )
lk_params = dict( winSize = (15,15),
maxLevel = 2,
criteria = (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 0.03))
no_tracking_res = []
tracking_res = []
vs = cv.VideoCapture('../modd.avi')
# initialize the first frame in the video stream
firstFrame = None
frameCount =0
step = 0.05
total_objects_no_tracking=0
total_objects_tracking=0
frame_width = int(vs.get(3))
frame_height = int(vs.get(4))
print(frame_width)
print(frame_height)
preds = []
pred = None
tracks=[]
out_tracking = cv.VideoWriter('1_tracking_keypoi.avi',cv.VideoWriter_fourcc('M','J','P','G'), 30, (frame_width,frame_height))
started = False
# your code
print("prev_count current_count entered exited")
multiplier=0
cc=0
prev_frame=None
while frameCount<540:
# grab the current frame and initialize the occupied/unoccupied
# text
frame = vs.read()[1]
# if the frame could not be grabbed, then we have reached the end
# of the video
if frame is None:
break
frameCount+=1
if(frameCount<150):
continue
elif started==False:
start_time = time.time()
started=True
# resize the frame, convert it to grayscale, and blur it
cv.imwrite('temp.jpg',frame)
img = image.load_img('temp.jpg', target_size=(img_height, img_width))
img = image.img_to_array(img)
input_images = []
input_images.append(img)
input_images = np.array(input_images)
y_pred = model.predict(input_images)
confidence_threshold = 0.1
current_pred = []
y_pred_thresh = [y_pred[k][y_pred[k,:,1] > confidence_threshold] for k in range(y_pred.shape[0])]
f = frame
prediction = kalman.predict()
process_noise = sqrt(kalman.processNoiseCov[0,0]) * np.random.randn(4, 1)
state = np.dot(kalman.transitionMatrix, state) + process_noise
#print(state)
# loop over the contours
preds = []
for box in y_pred_thresh[0]:
if(box[0]!=4):
continue
temp_pred = np.insert(box,0,0)
temp_pred=np.insert(temp_pred,7,0)
preds.append(temp_pred)
print('correcting with ',box[2:6])
kalman.correct(box[2:6])
for track in tracks:
track[7]=0
#print(len(prev_pred),len(temp),len(entered_pred),len(exited_pred))
for pred in preds:
index = box_matches_list(pred,tracks)
if(index>-1):
multiplier = tracks[index][0]
org_conf =tracks[index][2]
if(multiplier<0):
multiplier=1
else:
multiplier +=1
del tracks[index]
pred[2] += multiplier*step
pred[0] = multiplier
#print(pred)
tracks.append(pred)
else:
temp_pred = np.copy(pred)
multiplier = 1
temp_pred[0] = multiplier
tracks.append(temp_pred)
cc = frameCount
print('prediction:',kalman.predict())
for track in tracks:
if(tracks[7]==0 and box_matches_list(track,preds)==-1):
#print('not found in current frame')
track[0]-=1
#print('multiplier decreased to ',track[0])
track[2]+=(step*track[0])
frame_grey = cv.cvtColor(frame,cv.COLOR_BGR2GRAY)
prev_frame_grey = cv.cvtColor(prev_frame,cv.COLOR_BGR2GRAY)
mask = np.zeros(frame_grey.shape, dtype = "uint8")
cv.rectangle(mask, (track[3], track[4]), (track[5], track[6]), (255, 255, 255), -1)
p0 = cv.goodFeaturesToTrack(prev_frame_grey, mask = mask, **feature_params)
if(not p0 is None ):
p1, st, err = cv.calcOpticalFlowPyrLK(prev_frame_grey, frame_grey, p0, None, **lk_params)
average_flow = np.average(p1-p0,0)[0]
else:
average_flow=[0,0]
xmin = int(track[3] * frame.shape[1] / img_width)
ymin = int(track[4] * frame.shape[0] / img_height)
xmax =int(track[5] * frame.shape[1] / img_width)
ymax =int(track[6] * frame.shape[0] / img_height)
flow_x_box_coords = int(average_flow[0] * img_width / frame.shape[1])
flow_y_box_coords = int(average_flow[1] * img_height / frame.shape[0])
track[3] += flow_x_box_coords
track[5]+=flow_x_box_coords
track[4]+=flow_y_box_coords
track[6]+=flow_y_box_coords
to_display = [track for track in tracks if track[2]>0.4]
#print(tracks)
for box in to_display:
# Transform the predicted bounding boxes for the 512x512 image to the original image dimensions.
xmin = int(box[3] * frame.shape[1] / img_width)
ymin = int(box[4] * frame.shape[0] / img_height)
xmax =int(box[5] * frame.shape[1] / img_width)
ymax =int(box[6] * frame.shape[0] / img_height)
cv.rectangle(frame, (int(xmin), int(ymin)), (int(xmax),int(ymax)), (0, 255, 0), 2)
cv.putText(frame,'{}: {:.2f}'.format(classes[int(box[1])], box[2]), (xmin, ymin),cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
total_objects_tracking+=1
tracking_res.append({"image_id" : frameCount, "category_id" : 1, "bbox" : [float(xmin),float(ymin),float(xmax-xmin),float(ymax-ymin)], "score" : int(box[1])})
#print('preds%d'%len(preds))
#print('tracks%d'%len(tracks))
#print('displayed%d'%len(to_display))
# if the `q` key is pressed, break from the lop
#plt.figure()
#plt.imshow(frame)
out_tracking.write(frame)
prev_frame=frame
# cleanup the camera and close any open windows
elapsed_time = time.time() - start_time
print(elapsed_time)
vs.release()
cv.destroyAllWindows()
out_tracking.release()
print(total_objects_tracking)
with open('1_res_keypoint.json', 'w') as outfile:
json.dump(tracking_res, outfile)
# +
# #!/usr/bin/env python
"""
Tracking of rotating point.
Rotation speed is constant.
Both state and measurements vectors are 1D (a point angle),
Measurement is the real point angle + gaussian noise.
The real and the estimated points are connected with yellow line segment,
the real and the measured points are connected with red line segment.
(if Kalman filter works correctly,
the yellow segment should be shorter than the red one).
Pressing any key (except ESC) will reset the tracking with a different speed.
Pressing ESC will stop the program.
"""
# Python 2/3 compatibility
import sys
PY3 = sys.version_info[0] == 3
if PY3:
long = int
import cv2 as cv
from math import cos, sin, sqrt
import numpy as np
if __name__ == "__main__":
img_height = 500
img_width = 500
kalman = cv.KalmanFilter(4, 1, 0)
code = long(-1)
cv.namedWindow("Kalman")
while True:
state = 0.1 * np.random.randn(4, 1)
kalman.transitionMatrix = np.array([[1., 1.,1.,1.], [0., 1.,1.,1.],[0,1,1,0],[1,0,0,1]])
kalman.measurementMatrix = 1. * np.ones((1, 4))
kalman.processNoiseCov = 1e-5 * np.eye(4)
kalman.measurementNoiseCov = 1e-1 * np.ones((1, 1))
kalman.errorCovPost = 1. * np.ones((4, 4))
kalman.statePost = 0.1 * np.random.randn(4, 1)
while True:
def calc_point(angle):
return (np.around(img_width/2 + img_width/3*cos(angle), 0).astype(int),
np.around(img_height/2 - img_width/3*sin(angle), 1).astype(int))
state_angle = state[0, 0]
state_pt = calc_point(state_angle)
prediction = kalman.predict()
predict_angle = prediction[0, 0]
predict_pt = calc_point(predict_angle)
measurement = kalman.measurementNoiseCov * np.random.randn(1, 1)
# generate measurement
measurement = np.dot(kalman.measurementMatrix, state) + measurement
measurement_angle = measurement[0, 0]
measurement_pt = calc_point(measurement_angle)
# plot points
def draw_cross(center, color, d):
cv.line(img,
(center[0] - d, center[1] - d), (center[0] + d, center[1] + d),
color, 1, cv.LINE_AA, 0)
cv.line(img,
(center[0] + d, center[1] - d), (center[0] - d, center[1] + d),
color, 1, cv.LINE_AA, 0)
img = np.zeros((img_height, img_width, 3), np.uint8)
draw_cross(np.int32(state_pt), (255, 255, 255), 3)
draw_cross(np.int32(measurement_pt), (0, 0, 255), 3)
draw_cross(np.int32(predict_pt), (0, 255, 0), 3)
cv.line(img, state_pt, measurement_pt, (0, 0, 255), 3, cv.LINE_AA, 0)
cv.line(img, state_pt, predict_pt, (0, 255, 255), 3, cv.LINE_AA, 0)
kalman.correct(measurement)
process_noise = sqrt(kalman.processNoiseCov[0,0]) * np.random.randn(2, 1)
state = np.dot(kalman.transitionMatrix, state) + process_noise
cv.imshow("Kalman", img)
code = cv.waitKey(100)
if code != -1:
break
if code in [27, ord('q'), ord('Q')]:
break
cv.destroyWindow("Kalman")
# +
# kalman.correct??
# -
| SSD300-Video.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.0 64-bit (''py3_env'': conda)'
# name: python380jvsc74a57bd01ab258772917483a99abdb90c01462cd6ef85609ebb6c5e2ab1a1469260a9239
# ---
import requests
import datetime
import json
# +
POST_CODE = "400708"
age = 52
# Print details flag
print_flag = 'Y'
numdays = 20
# -
base = datetime.datetime.today()
date_list = [base + datetime.timedelta(days=x) for x in range(numdays)]
date_str = [x.strftime("%d-%m-%Y") for x in date_list]
for INP_DATE in date_str:
URL = "https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/calendarByPin?pincode={}&date={}".format(POST_CODE, INP_DATE)
response = requests.get(URL)
if response.ok:
resp_json = response.json()
# print(json.dumps(resp_json, indent = 1))
flag = False
if resp_json["centers"]:
print("Available on: {}".format(INP_DATE))
if(print_flag=='y' or print_flag=='Y'):
for center in resp_json["centers"]:
for session in center["sessions"]:
if session["min_age_limit"] <= age:
print("\t", center["name"])
print("\t", center["block_name"])
print("\t Price: ", center["fee_type"])
print("\t Available Capacity: ", session["available_capacity"])
if(session["vaccine"] != ''):
print("\t Vaccine: ", session["vaccine"])
print("\n\n")
else:
print("No available slots on {}".format(INP_DATE))
| cowin-api-availability_pincode.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Analyze data from split0 and split1 folders generated by noisy_student and saves all figures in experiment/figures folder.
# +
# exp_dir = "/home/henriklg/master-thesis/code/kvasir-capsule/experiments/cl_500"
exp_dir = "/home/henriklg/master-thesis/code/kvasir-capsule/experiments/cl_500"
split0 = exp_dir+"/split0"
split1 = exp_dir+"/split1"
sub_dirs = ["0_teacher", "0_student", "1_teacher", "1_student", "2_teacher", "2_student"]
# -
# # Functions
# +
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
sns.set()
SMALL_SIZE = 12
MEDIUM_SIZE = 14
BIGGER_SIZE = 16
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=BIGGER_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=MEDIUM_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
# +
def l2d_old(line):
return " ".join(line.split()).split(" ")
def l2d(line):
name = (line.split(" ")) # split on double whitespace
name = next(sub for sub in name if sub) # fetch first non-empty cell
name = name.strip() # remove whitespace from string
metrics = (" ".join(line.split()).split(" ")[-4:])
return ([name]+metrics)
def parse_classification_report(path):
class_m = {}
tot_m = {}
if "teacher" in path:
model = "teacher"
else:
model = "student"
with open(path) as file:
line = file.readline()
line = file.readline() # skip first line
line = file.readline()
while line:
data = l2d(line)
class_m[data[0]] = {
"prec": float(data[1]),
"rec": float(data[2]),
"f1": float(data[3]),
"model": model
}
line = file.readline()
if len(line) == 1:
line = False
line = file.readline()
tot_m["acc"] = l2d_old(line)[1]
line = file.readline()
tot_m["macro"] = l2d_old(line)[2:5]
line = file.readline()
tot_m["weighted"] = l2d_old(line)[2:5]
return class_m, tot_m
# +
import ast
def parse_history(path):
with open(path) as file:
# loss - acc - val_los - val_acc
loss = file.readline()
acc = file.readline()
val_loss = file.readline()
val_acc = file.readline()
history = {
"loss": ast.literal_eval(loss.split(":")[-1].strip()),
"acc": ast.literal_eval(acc.split(":")[-1].strip()),
"val_loss": ast.literal_eval(val_loss.split(":")[-1].strip()),
"val_acc": ast.literal_eval(val_acc.split(":")[-1].strip())
}
return history
# -
def average_history(history_list):
result = [0]*int(len(history_list[0]))
for history in (history_list):
for idx, val in enumerate(history):
result[idx] += val
result = [res/(len(history_list)) for res in result]
return (result)
# ## Get the data from experiment results
# +
mets = ["loss", "acc", "val_loss", "val_acc"]
hist0 = {key: [] for key in mets}
hist1 = {key: [] for key in mets}
lowest_epoch = 200
for hist, split in zip([hist0, hist1], [split0, split1]):
for dir_ in sub_dirs:
path = "{}/{}/history.txt".format(split, dir_)
history = parse_history(path)
hist["loss"].append(history["loss"])
hist["acc"].append(history["acc"])
hist["val_loss"].append(history["val_loss"])
hist["val_acc"].append(history["val_acc"])
if (len(history["loss"]) < lowest_epoch):
lowest_epoch = len(history["loss"])
# Shorten the history lists to the lowest epoch
for hist in [hist0, hist1]:
for key, outerlist in hist.items():
hist[key] = [sublist[0:lowest_epoch] for sublist in outerlist]
# +
import pathlib
def savefig(name):
path = exp_dir+"/figures/"
pathlib.Path(path).mkdir(parents=True, exist_ok=True)
plt.savefig(path+name+".pdf", format="pdf")
# -
# # Get results from training on split_0
# ### Accuracy and loss
# +
x = range(lowest_epoch)
plt.figure(figsize=(14,6));
# Subplot 1
plt.subplot(1, 2, 1)
for model in hist0["acc"]:
plt.plot(x, model, linewidth=1.5)
plt.legend(sub_dirs);
plt.xlabel("Epoch")
plt.ylabel("Accuracy");
plt.title('Training accuracy')
plt.tight_layout()
# Subplot 2
plt.subplot(1, 2, 2)
for model in hist0["loss"]:
plt.plot(x, model, linewidth=1.5)
plt.legend(sub_dirs);
plt.xlabel("Epoch")
plt.ylabel("Loss");
plt.tight_layout()
plt.title('Training loss')
savefig("split0_history_training")
plt.figure(figsize=(14,6));
# Subplot 1
plt.subplot(1, 2, 1)
for model in hist0["val_acc"]:
plt.plot(x, model, linewidth=1.5)
plt.legend(sub_dirs);
plt.xlabel("Epoch")
plt.ylabel("Accuracy");
plt.title('Validation accuracy')
plt.tight_layout()
# Subplot 2
plt.subplot(1, 2, 2)
for model in hist0["val_loss"]:
plt.plot(x, model, linewidth=1.5)
plt.legend(sub_dirs);
plt.xlabel("Epoch")
plt.ylabel("Loss");
plt.tight_layout()
plt.title('Validation loss')
savefig("split0_history_validation")
# +
avg_val_acc_0 = average_history(hist0["val_acc"])
avg_val_loss_0 = average_history(hist0["val_loss"])
plt.figure(figsize=(14,6));
# Subplot 1
plt.subplot(1, 2, 1)
plt.plot(x, avg_val_acc_0, linewidth=1.5)
plt.xlabel("Epoch")
plt.ylabel("Accuracy");
plt.title('Average validation accuracy')
plt.tight_layout()
# Subplot 2
plt.subplot(1, 2, 2)
plt.plot(x, avg_val_loss_0, linewidth=1.5)
plt.xlabel("Epoch")
plt.ylabel("Loss");
plt.tight_layout()
plt.title('Average validation loss')
savefig("split0_average_validation_history")
# -
# ### F1-measure
# +
report_contents_0 = []
for model in sub_dirs:
report_file = "{}/{}/classification_report.txt".format(split0, model)
_, tot_m = parse_classification_report(report_file)
report_contents_0.append(tot_m);
# +
# Compare weighted and macro precision, recall and f1-score
metrics0 = {
"prec": [],
"rec": [],
"f1": [],
"acc": []
}
metric = "weighted"
for idx, content in enumerate(report_contents_0):
metrics0["acc"].append(float(content["acc"]))
metrics0["prec"].append(float(content[metric][0]))
metrics0["rec"].append(float(content[metric][1]))
metrics0["f1"].append(float(content[metric][2]))
# +
x = list(range(len(sub_dirs)))
plt.figure(figsize=(9,6))
plt.plot(
x,metrics0["prec"],'r',
x,metrics0["rec"],'b',
x,metrics0["f1"],'g',
linewidth=1.5, marker='o'
)
plt.legend(["Precision","Recall","F1-score", "acc"])
plt.xlabel("Iteration")
plt.ylabel("Weighted average score")
plt.tight_layout(pad=1.5)
savefig("split0_prec_rec_f1")
# -
plt.figure(figsize=(9,6))
plt.plot(
x,metrics0["acc"],'r--',
x,metrics0["f1"],'g-',
linewidth=1.5, marker='o'
)
plt.legend(["Accuracy", "F1-Score"])
plt.xlabel("Iteration")
plt.ylabel("Weighted average score")
plt.tight_layout(pad=1.5)
savefig("split0_acc_f1")
# # Get results from training on split_1
# ### Accuracy and loss
# +
x = range(lowest_epoch)
plt.figure(figsize=(14,6));
# Subplot 1
plt.subplot(1, 2, 1)
for model in hist1["acc"]:
plt.plot(x, model, linewidth=1.5)
plt.legend(sub_dirs);
plt.xlabel("Epoch")
plt.ylabel("Accuracy on training data");
plt.title('Training accuracy')
plt.tight_layout()
# Subplot 2
plt.subplot(1, 2, 2)
for model in hist1["loss"]:
plt.plot(x, model, linewidth=1.5)
plt.legend(sub_dirs);
plt.xlabel("Epoch")
plt.ylabel("Loss on training data");
plt.tight_layout()
plt.title('Training loss')
savefig("split1_history_training")
plt.figure(figsize=(14,6));
# Subplot 1
plt.subplot(1, 2, 1)
for model in hist1["val_acc"]:
plt.plot(x, model, linewidth=1.5)
plt.legend(sub_dirs);
plt.xlabel("Epoch")
plt.ylabel("Accuracy");
plt.title('Validation accuracy')
plt.tight_layout()
# Subplot 2
plt.subplot(1, 2, 2)
for model in hist1["val_loss"]:
plt.plot(x, model, linewidth=1.5)
plt.legend(sub_dirs);
plt.xlabel("Epoch")
plt.ylabel("Loss");
plt.tight_layout()
plt.title('Validation loss')
savefig("split1_history_validation")
# +
avg_val_acc_1 = average_history(hist1["val_acc"])
avg_val_loss_1 = average_history(hist1["val_loss"])
plt.figure(figsize=(14,6));
# Subplot 1
plt.subplot(1, 2, 1)
plt.plot(x, avg_val_acc_1, linewidth=1.5)
plt.xlabel("Epoch")
plt.ylabel("Accuracy");
plt.title('Average validation accuracy')
plt.tight_layout()
# Subplot 2
plt.subplot(1, 2, 2)
plt.plot(x, avg_val_loss_1, linewidth=1.5)
plt.xlabel("Epoch")
plt.ylabel("Loss");
plt.tight_layout()
plt.title('Average validation loss')
savefig("split1_average_validation_history")
# -
# ### F1-measure
# +
report_contents_1 = []
for model in sub_dirs:
report_file = "{}/{}/classification_report.txt".format(split1, model)
_, tot_m = parse_classification_report(report_file)
report_contents_1.append(tot_m);
# +
# Compare weighted and macro precision, recall and f1-score
metrics1 = {
"prec": [],
"rec": [],
"f1": [],
"acc": []
}
metric = "weighted"
for idx, content in enumerate(report_contents_1):
metrics1["acc"].append(float(content["acc"]))
metrics1["prec"].append(float(content[metric][0]))
metrics1["rec"].append(float(content[metric][1]))
metrics1["f1"].append(float(content[metric][2]))
# +
x = list(range(len(sub_dirs)))
plt.figure(figsize=(9,6))
plt.plot(
x,metrics1["prec"],'r',
x,metrics1["rec"],'b',
x,metrics1["f1"],'g',
linewidth=1.5, marker='o'
)
plt.legend(["Precision","Recall","F1-score", "acc"])
plt.xlabel("Iteration")
plt.ylabel("Weighted average score")
plt.tight_layout(pad=1.5)
savefig("split1_prec_rec_f1")
# -
plt.figure(figsize=(9,6))
plt.plot(
x,metrics1["acc"],'r--',
x,metrics1["f1"],'g-',
linewidth=1.5, marker='o'
)
plt.legend(["Accuracy", "F1-Score"])
plt.xlabel("Iteration")
plt.ylabel("Weighted average score")
plt.tight_layout(pad=1.5)
savefig("split1_acc_f1")
# # Average the results of both splits
# +
x = range(lowest_epoch)
plt.figure(figsize=(14,6));
# Subplot 1
plt.subplot(1, 2, 1)
plt.plot(
x, avg_val_acc_0,
x, avg_val_acc_1,
linewidth=1.5
)
plt.legend(["split_0", "split_1"])
plt.xlabel("Epoch")
plt.ylabel("Accuracy");
plt.title('Average validation accuracy split_0 and split_1')
plt.tight_layout()
# Subplot 2
plt.subplot(1, 2, 2)
plt.plot(
x, avg_val_loss_0,
x, avg_val_loss_1,
linewidth=1.5
)
plt.legend(["split_0", "split_1"])
plt.xlabel("Epoch")
plt.ylabel("Loss");
plt.tight_layout()
plt.title('Average validation loss split_0 and split_1')
savefig("both_average_validation_history")
# +
x = range(len(sub_dirs))
plt.figure(figsize=(9,6))
plt.plot(
x, metrics0["f1"],
x, metrics1["f1"],
linewidth=1.5
)
plt.legend(["split_0", "split_1"])
plt.title("F1 score for split_0 and split_1")
plt.xlabel("Iteration")
plt.ylabel("F1 score")
plt.tight_layout(pad=1.5)
savefig("both_f1")
# +
average_acc = average_history([metrics0["acc"], metrics1["acc"]])
average_f1 = average_history([metrics0["f1"], metrics1["f1"]])
plt.figure(figsize=(9,6))
plt.plot(
x,average_acc,'r--',
x,average_f1,'g-',
linewidth=1.5, marker='o'
)
plt.legend(["Accuracy", "F1-Score"])
plt.title("Average F1 and Accuracy metrics for split0 and split1")
plt.xlabel("Iteration")
plt.ylabel("Weighted average score")
plt.tight_layout(pad=1.5)
savefig("both_avg_acc_f1")
# -
for acc, f1 in zip(average_acc, average_f1):
print (round(acc,3), round(f1,3))
| src/generate_figures/analyze_ns_results.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# + [markdown] colab_type="text" id="6umP1IKf4Dg6"
# # Autobatching log-densities example
#
# This notebook demonstrates a simple Bayesian inference example where autobatching makes user code easier to write, easier to read, and less likely to include bugs.
#
# Inspired by a notebook by @davmre.
# + colab_type="code" id="PaW85yP_BrCF" colab={}
# !pip install --upgrade -q https://storage.googleapis.com/jax-wheels/cuda$(echo $CUDA_VERSION | sed -e 's/\.//' -e 's/\..*//')/jaxlib-0.1.21-cp36-none-linux_x86_64.whl
# !pip install --upgrade -q jax
# + colab={} colab_type="code" id="8RZDkfbV3zdR"
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
import re
import sys
import time
from matplotlib.pyplot import *
import jax
from jax import lax
from jax import numpy as np
from jax import scipy
from jax import random
import numpy as onp
import scipy as oscipy
# + [markdown] colab_type="text" id="p2VcZS1d34C6"
# # Generate a fake binary classification dataset
# + colab={} colab_type="code" id="pq41hMvn4c_i"
onp.random.seed(10009)
num_features = 10
num_points = 100
true_beta = onp.random.randn(num_features).astype(np.float32)
all_x = onp.random.randn(num_points, num_features).astype(np.float32)
y = (onp.random.rand(num_points) < oscipy.special.expit(all_x.dot(true_beta))).astype(np.int32)
# + colab={"height": 102} colab_type="code" executionInfo={"elapsed": 30, "status": "ok", "timestamp": 1549999404494, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-r5gqCRwU9kk/AAAAAAAAAAI/AAAAAAAAALw/T9KGDIrA_iA/s64/photo.jpg", "userId": "11857134876214181812"}, "user_tz": 480} id="O0nVumAw7IlT" outputId="c474098f-4e81-4fc8-ad8f-3ba825409be3"
y
# + [markdown] colab_type="text" id="DZRVvhpn5aB1"
# # Write the log-joint function for the model
#
# We'll write a non-batched version, a manually batched version, and an autobatched version.
# + [markdown] colab_type="text" id="C_mDXInL7nsP"
# ## Non-batched
# + colab={} colab_type="code" id="ZHyL2sJh5ajG"
def log_joint(beta):
result = 0.
# Note that no `axis` parameter is provided to `np.sum`.
result = result + np.sum(scipy.stats.norm.logpdf(beta, loc=0., scale=1.))
result = result + np.sum(-np.log(1 + np.exp(-(2*y-1) * np.dot(all_x, beta))))
return result
# + colab={"height": 34} colab_type="code" executionInfo={"elapsed": 3383, "status": "ok", "timestamp": 1549999409301, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-r5gqCRwU9kk/AAAAAAAAAAI/AAAAAAAAALw/T9KGDIrA_iA/s64/photo.jpg", "userId": "11857134876214181812"}, "user_tz": 480} id="e51qW0ro6J7C" outputId="c778d4fc-85b9-4fea-9875-0d0a3397a027"
log_joint(onp.random.randn(num_features))
# + colab={"height": 895} colab_type="code" executionInfo={"elapsed": 4130, "status": "error", "timestamp": 1549999413496, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-r5gqCRwU9kk/AAAAAAAAAAI/AAAAAAAAALw/T9KGDIrA_iA/s64/photo.jpg", "userId": "11857134876214181812"}, "user_tz": 480} id="fglQXK1Y6wnm" outputId="cf85d9a7-b403-4e75-efb6-b9d057e66f3c"
# This doesn't work, because we didn't write `log_prob()` to handle batching.
batch_size = 10
batched_test_beta = onp.random.randn(batch_size, num_features)
log_joint(onp.random.randn(batch_size, num_features))
# + [markdown] colab_type="text" id="_lQ8MnKq7sLU"
# ## Manually batched
# + colab={} colab_type="code" id="2g5-4bQE7gRA"
def batched_log_joint(beta):
result = 0.
# Here (and below) `sum` needs an `axis` parameter. At best, forgetting to set axis
# or setting it incorrectly yields an error; at worst, it silently changes the
# semantics of the model.
result = result + np.sum(scipy.stats.norm.logpdf(beta, loc=0., scale=1.),
axis=-1)
# Note the multiple transposes. Getting this right is not rocket science,
# but it's also not totally mindless. (I didn't get it right on the first
# try.)
result = result + np.sum(-np.log(1 + np.exp(-(2*y-1) * np.dot(all_x, beta.T).T)),
axis=-1)
return result
# + colab={"height": 68} colab_type="code" executionInfo={"elapsed": 735, "status": "ok", "timestamp": 1549999417264, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-r5gqCRwU9kk/AAAAAAAAAAI/AAAAAAAAALw/T9KGDIrA_iA/s64/photo.jpg", "userId": "11857134876214181812"}, "user_tz": 480} id="KdDMr-Gy85CO" outputId="1e90fc29-60fb-4460-f08f-2dd486cc8f5e"
batch_size = 10
batched_test_beta = onp.random.randn(batch_size, num_features)
batched_log_joint(batched_test_beta)
# + [markdown] colab_type="text" id="-uuGlHQ_85kd"
# ## Autobatched with vmap
#
# It just works.
# + colab={"height": 68} colab_type="code" executionInfo={"elapsed": 174, "status": "ok", "timestamp": 1549999417694, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-r5gqCRwU9kk/AAAAAAAAAAI/AAAAAAAAALw/T9KGDIrA_iA/s64/photo.jpg", "userId": "11857134876214181812"}, "user_tz": 480} id="SU20bouH8-Za" outputId="5637b58a-0d7e-4a61-b74a-f4d2cab2105a"
vmap_batched_log_joint = jax.vmap(log_joint)
vmap_batched_log_joint(batched_test_beta)
# + [markdown] colab_type="text" id="L1KNBo9y_yZJ"
# # Self-contained variational inference example
#
# A little code is copied from above.
# + [markdown] colab_type="text" id="lQTPaaQMJh8Y"
# ## Set up the (batched) log-joint function
# + colab={} colab_type="code" id="AITXbaofA3Pm"
@jax.jit
def log_joint(beta):
result = 0.
# Note that no `axis` parameter is provided to `np.sum`.
result = result + np.sum(scipy.stats.norm.logpdf(beta, loc=0., scale=10.))
result = result + np.sum(-np.log(1 + np.exp(-(2*y-1) * np.dot(all_x, beta))))
return result
batched_log_joint = jax.jit(jax.vmap(log_joint))
# + [markdown] colab_type="text" id="UmmFMQ8LJk6a"
# ## Define the ELBO and its gradient
# + colab={} colab_type="code" id="MJtnskL6BKwV"
def elbo(beta_loc, beta_log_scale, epsilon):
beta_sample = beta_loc + np.exp(beta_log_scale) * epsilon
return np.mean(batched_log_joint(beta_sample), 0) + np.sum(beta_log_scale - 0.5 * onp.log(2*onp.pi))
elbo = jax.jit(elbo, static_argnums=(2, 3))
elbo_val_and_grad = jax.jit(jax.value_and_grad(elbo, argnums=(0, 1)))
# + [markdown] colab_type="text" id="oQC7xKYnJrp5"
# ## Optimize the ELBO using SGD
# + colab={"height": 1717} colab_type="code" executionInfo={"elapsed": 2986, "status": "ok", "timestamp": 1549999510348, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-r5gqCRwU9kk/AAAAAAAAAAI/AAAAAAAAALw/T9KGDIrA_iA/s64/photo.jpg", "userId": "11857134876214181812"}, "user_tz": 480} id="9JrD5nNgH715" outputId="1b7949cc-1296-46bb-9d88-412475834944"
def normal_sample(key, shape):
"""Convenience function for quasi-stateful RNG."""
new_key, sub_key = random.split(key)
return new_key, random.normal(sub_key, shape)
normal_sample = jax.jit(normal_sample, static_argnums=(1,))
key = random.PRNGKey(10003)
beta_loc = np.zeros(num_features, np.float32)
beta_log_scale = np.zeros(num_features, np.float32)
step_size = 0.01
batch_size = 128
epsilon_shape = (batch_size, num_features)
for i in range(1000):
key, epsilon = normal_sample(key, epsilon_shape)
elbo_val, (beta_loc_grad, beta_log_scale_grad) = elbo_val_and_grad(
beta_loc, beta_log_scale, epsilon)
beta_loc += step_size * beta_loc_grad
beta_log_scale += step_size * beta_log_scale_grad
if i % 10 == 0:
print('{}\t{}'.format(i, elbo_val))
# + [markdown] colab_type="text" id="b3ZAe5fJJ2KM"
# ## Display the results
#
# Coverage isn't quite as good as we might like, but it's not bad, and nobody said variational inference was exact.
# + colab={"height": 481} colab_type="code" executionInfo={"elapsed": 263, "status": "ok", "timestamp": 1549999510632, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-r5gqCRwU9kk/AAAAAAAAAAI/AAAAAAAAALw/T9KGDIrA_iA/s64/photo.jpg", "userId": "11857134876214181812"}, "user_tz": 480} id="zt1NBLoVHtOG" outputId="2f0081cf-bbfe-426c-bc5e-a1c09468234a"
figure(figsize=(7, 7))
plot(true_beta, beta_loc, '.', label='Approximated Posterior Means')
plot(true_beta, beta_loc + 2*np.exp(beta_log_scale), 'r.', label='Approximated Posterior $2\sigma$ Error Bars')
plot(true_beta, beta_loc - 2*np.exp(beta_log_scale), 'r.')
plot_scale = 3
plot([-plot_scale, plot_scale], [-plot_scale, plot_scale], 'k')
xlabel('True beta')
ylabel('Estimated beta')
legend(loc='best')
# + colab={} colab_type="code" id="_bXdOlvUEJl0"
| notebooks/vmapped log-probs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
def compute_strains_3stations(lons, lats):
from numpy.linalg import inv
xcentroid = np.mean(lons)
ycentroid = np.mean(lats)
dE1 = (lons[0] - xcentroid) * 111.0 * np.cos(np.deg2rad(ycentroid));
dE2 = (lons[1] - xcentroid) * 111.0 * np.cos(np.deg2rad(ycentroid));
dE3 = (lons[2] - xcentroid) * 111.0 * np.cos(np.deg2rad(ycentroid));
dN1 = (lats[0] - ycentroid) * 111.0;
dN2 = (lats[1] - ycentroid) * 111.0;
dN3 = (lats[2] - ycentroid) * 111.0;
Design_Matrix = np.array(
[[1, 0, dE1, dN1, 0, 0], [0, 1, 0, 0, dE1, dN1], [1, 0, dE2, dN2, 0, 0], [0, 1, 0, 0, dE2, dN2],
[1, 0, dE3, dN3, 0, 0], [0, 1, 0, 0, dE3, dN3]]);
# Invert to get the components of the velocity gradient tensor.
DMinv = inv(Design_Matrix);
def inner_function(uxs, uys):
(VE1, VE2, VE3) = uxs
(VN1, VN2, VN3) = uys
obs_vel = np.array([[VE1], [VN1], [VE2], [VN2], [VE3], [VN3]]);
vel_grad = np.dot(DMinv, obs_vel); # this is the money step.
dudx = vel_grad[2][0];
dudy = vel_grad[3][0];
dvdx = vel_grad[4][0];
dvdy = vel_grad[5][0];
exx = dudx;
exy = (0.5 * (dvdx + dudy));
eyy = dvdy;
rot = (0.5 * (dvdx - dudy));
return [exx, exy, eyy, rot];
return inner_function
# +
import csv
from dateutil.parser import isoparse
import numpy as np
def read_gps_data(filename):
date = []
ux = []
uy = []
uz = []
sig_ux = []
sig_uy = []
sig_uz = []
with open(filename, 'r') as file:
reader = csv.reader(file, delimiter = ',')
for _ in range(5):
next(reader)
line = next(reader)[0]
import re
match = re.search('Latitude: ([+-]?([0-9]*[.])?[0-9]+)', line)
if match:
lat = float(match.group(1))
match = re.search('Longitude: ([+-]?([0-9]*[.])?[0-9]+)', line)
if match:
lon = float(match.group(1))
for _ in range(3):
next(reader)
for row in reader:
date += [isoparse(row[0])]
ux += [float(row[1])]
uy += [float(row[2])]
uz += [float(row[3])]
sig_ux += [float(row[4])]
sig_uy += [float(row[5])]
sig_uz += [float(row[6])]
date = np.array(date, dtype = np.datetime64)
ux = np.array(ux)
uy = np.array(uy)
uz = np.array(uz)
sig_ux = np.array(sig_ux)
sig_uy = np.array(sig_uy)
sig_uz = np.array(sig_uz)
return date, ux, uy, uz, sig_ux, sig_uy, sig_uz, lat, lon
# +
date_knol, ux_knol, uy_knol, uz_knol, _, _, _, lat_knol, lon_knol = read_gps_data('data/GPS/KNOL-cwu-nam14-gpos.csv')
date_p630, ux_p630, uy_p630, uz_p630, _, _, _, lat_p630, lon_p630 = read_gps_data('data/GPS/P630-cwu-nam14-gpos.csv')
date_linc, ux_linc, uy_linc, uz_linc, _, _, _, lat_linc, lon_linc = read_gps_data('data/GPS/LINC-cwu-nam14-gpos.csv')
_, indexes, _ = np.intersect1d(date, dates_m, assume_unique = True, return_indices = True)
# +
lats = np.array([lat_knol, lat_p630, lat_linc])
lons = np.array([lon_knol, lon_p630, lon_linc])
strain_computer = compute_strains_3stations(lons, lats)
# +
mean_normal_strain = np.zeros_like(ux_linc)
for index, (ux_1, ux_2, ux_3, uy_1, uy_2, uy_3) in enumerate(zip(ux_knol, ux_p630, ux_linc, uy_knol, uy_p630, uy_linc)):
[exx, exy, eyy, rot] = strain_computer((ux_1, ux_2, ux_3), (uy_1, uy_2, uy_3))
mean_normal_strain[index] = (exx + eyy) / 2.0
# -
mean_normal_strain
| strains.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Architecting Analytic Pipelines on GCP - Chicago Cloud Conference 2020
# + pycharm={"name": "#%%\n"}
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
plt.style.use('seaborn-whitegrid')
import numpy as np
import pandas as pd
# + pycharm={"name": "#%%\n"}
# !export GOOGLE_APPLICATION_CREDENTIALS='/<KEY>'
# !pip install
# -
# Now that the data is in bigquery we can save a section to cloud storage or grab it direct from bigquery.
# + pycharm={"name": "#%%\n"}
df = pd.read_csv('gs://chicago-cloud-conference-2020/nlpstorage/results-20200920-131208.csv')
df.head()
# + pycharm={"name": "#%%\n"}
print(df['user_text'][0])
print(len(df['user_text']))
# + pycharm={"name": "#%%\n"}
tweets=df[df['user_text'].str.contains('trump', na=False)]
print(len(tweets))
# + pycharm={"name": "#%%\n"}
from google.cloud import language
from google.cloud.language import enums
from google.cloud.language import types
from google.oauth2 import service_account
from google.protobuf.json_format import MessageToDict
score=[]
magnitude=[]
creds = service_account.Credentials.from_service_account_file('/<KEY>')
client = language.LanguageServiceClient(credentials=creds)
for tweet in tweets['user_text']:
document = types.Document(
content=tweet,
type=enums.Document.Type.PLAIN_TEXT
)
analyze_sentiment_response = client.analyze_sentiment(document=document)
message = MessageToDict(analyze_sentiment_response, including_default_value_fields=True)
score.append(message['documentSentiment']['score'])
magnitude.append(message['documentSentiment']['magnitude'])
print(len(score))
# print('POLARITY=%s MAGNITUDE=%s for %s' % (score, magnitude, tweet))
# + pycharm={"name": "#%%\n"}
tweets['score']=score
tweets['magnitude']=magnitude
tweets.head()
# + pycharm={"name": "#%%\n"}
tweets['datef']=pd.to_datetime(tweets['tweet_timestamp'], unit='s', yearfirst='TRUE')
tweets['datef'].head()
# + pycharm={"name": "#%%\n"}
#averages
score_avg=np.mean(tweets['score'])
magnitude_evg=np.mean(tweets['magnitude'])
print('score', score_avg, 'magnitude', magnitude_evg)
# + pycharm={"is_executing": true, "name": "#%%\n"}
data = pd.concat([tweets['datef'], tweets['magnitude']], axis=1)
data.set_index('datef',inplace=True)
fig, ax = plt.subplots(figsize=(15,7))
data.plot(ax=ax, legend=False, marker='o')
ax.axhline(y=magnitude_evg, linewidth=4, color='r')
ax.xaxis.set_major_locator(mdates.MinuteLocator(interval=5))
ax.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M:%S'))
ax.set_xlabel('Date August 15th ~3:30 P.M')
ax.set_ylabel('Magnitude')
# + pycharm={"is_executing": true, "name": "#%%\n"}
data2 = pd.concat([tweets['datef'], tweets['score']], axis=1)
data2.set_index('datef',inplace=True)
fig, ax2 = plt.subplots(figsize=(15,7))
data2.plot(ax=ax2, legend=False, marker='o')
ax2.axhline(y=score_avg, linewidth=4, color='r')
ax2.xaxis.set_major_locator(mdates.MinuteLocator(interval=5))
ax.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M:%S'))
ax2.set_xlabel('Date August 15th ~3:30 P.M')
ax2.set_ylabel('Score')
# -
# High magnitude tweets are more impactful than low magnitude tweets, a weak statement doesn't say much.
# We will concentrate only on statements with a magnitude of 0.5 or higher.
# + pycharm={"is_executing": true, "name": "#%%\n"}
fig, ax = plt.subplots()
ax.figure.set_size_inches(10,4)
ax.grid(False)
ax.scatter(tweets.magnitude, tweets.score, s=120, c='black', alpha=0.5)
ax.set(xlabel='magnitude', ylabel='score')
plt.show()
# -
# We can also see that things around zero polarity (neither very positive nor negative) are not interesting to flag
# + pycharm={"name": "#%%\n"}
love=tweets[(tweets['magnitude'] >=0.5) & (tweets['score'] >=0.5)]
hate=tweets[(tweets['magnitude'] >=0.5) & (tweets['score'] <= -0.5)]
# + pycharm={"name": "#%%\n"}
print(len(tweets['magnitude']), len(love['magnitude']), len(hate['magnitude']))
print("weak", len(tweets['magnitude']) - (len(love['magnitude'])+len(hate['magnitude'])))
# + pycharm={"name": "#%%\n"}
labels = ['loves', 'hates', 'weak']
sizes = [9, 0, 139]
colors = ['gold', 'lightskyblue', 'lightcoral']
patches, texts = plt.pie(sizes, colors=colors, shadow=True, startangle=90)
plt.legend(patches, labels, loc="best")
plt.axis('equal')
plt.tight_layout()
plt.show()
# + pycharm={"name": "#%%\n"}
fig, ax= plt.subplots()
ax.scatter(love.magnitude, love.score, s=120, c='purple', alpha=0.5)
ax.scatter(hate.magnitude, hate.score, s=120, c='red', alpha=0.5)
ax.figure.set_size_inches(10,4)
ax.grid(False)
ax.set(xlabel='magnitude >0.5', ylabel='abs(score) >=0.5')
plt.show()
# + pycharm={"name": "#%%\n"}
fig, ax1 = plt.subplots()
ax1.set_xlabel('Date August 15th ~3:30 P.M')
ax1.set_ylabel('magnitude (*)')
ax1.grid(False)
ax1.xaxis.set_major_locator(mdates.MinuteLocator(interval=5))
ax1.xaxis.set_major_formatter(mdates.DateFormatter('%M'))
ax1.figure.set_size_inches(10,4)
ax1.plot(love['datef'], love['magnitude'], 'g*', markersize=20, alpha=0.5)
ax1.plot(hate['datef'], hate['magnitude'], 'r*', markersize=20, alpha=0.5)
ax2=ax1.twinx()
ax2.grid(False)
ax2.plot(love.datef, love.score, 'g.', markersize=20, alpha=0.5)
ax2.plot(hate.datef, hate.score, 'r.', markersize=20, alpha=0.5)
ax2.set_ylabel('score (.)')
plt.show()
# + pycharm={"name": "#%%\n"}
fig, ax = plt.subplots()
ax.set_xlabel('Date July 10th ~3:30 P.M')
ax.grid(False)
ax.xaxis.set_major_locator(mdates.MinuteLocator(interval=5))
ax.xaxis.set_major_formatter(mdates.DateFormatter('%M'))
ax.figure.set_size_inches(10,4)
ax.plot(love['datef'], love['magnitude'], 'g', linestyle=':', marker='*', linewidth=2, markersize=20, alpha=0.5)
ax.plot(hate['datef'], hate['magnitude'], 'k', linestyle=':', marker='.', linewidth=2, markersize=20, alpha=0.5)
ax.set_ylabel('magnitude')
plt.show()
# + pycharm={"name": "#%%\n"}
fig, ax = plt.subplots()
ax.set_xlabel('Date July 10th ~3:30 P.M')
ax.set_ylabel('score')
ax.grid(False)
ax.xaxis.set_major_locator(mdates.MinuteLocator(interval=5))
ax.xaxis.set_major_formatter(mdates.DateFormatter('%M'))
ax.figure.set_size_inches(10,4)
ax.plot(love['datef'], love['score'], 'b', linestyle=':', marker='.', linewidth=2, markersize=20, alpha=0.5)
ax.plot(hate['datef'], hate['score'], 'r', linestyle=':', marker='.', linewidth=2, markersize=20, alpha=0.5)
plt.show()
| visualization/nlptweets.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Follows notebook 065 but directly train on the rescaled data
# # Imports
# +
import sys
sys.path.insert(1,"/home1/07064/tg863631/anaconda3/envs/CbrainCustomLayer/lib/python3.6/site-packages") #work around for h5py
from cbrain.imports import *
from cbrain.cam_constants import *
from cbrain.utils import *
from cbrain.layers import *
from cbrain.data_generator import DataGenerator
from cbrain.climate_invariant import *
import tensorflow as tf
physical_devices = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
tf.config.experimental.set_memory_growth(physical_devices[1], True)
tf.config.experimental.set_memory_growth(physical_devices[2], True)
import os
os.environ["CUDA_VISIBLE_DEVICES"]="0"
from tensorflow import math as tfm
from tensorflow.keras.layers import *
from tensorflow.keras.models import *
import tensorflow_probability as tfp
import xarray as xr
import numpy as np
from cbrain.model_diagnostics import ModelDiagnostics
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.image as imag
import scipy.integrate as sin
# import cartopy.crs as ccrs
import matplotlib.ticker as mticker
# from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
import pickle
# from climate_invariant import *
from tensorflow.keras import layers
import datetime
from climate_invariant_utils import *
import yaml
# -
# # Global Variables
# +
# Load coordinates (just pick any file from the climate model run)
# GP path below
path_0K = '/DFS-L/DATA/pritchard/tbeucler/SPCAM/fluxbypass_aqua/'
coor = xr.open_dataset(path_0K+"AndKua_aqua_SPCAM3.0_sp_fbp_f4.cam2.h1.0000-09-02-00000.nc")
lat = coor.lat; lon = coor.lon; lev = coor.lev;
coor.close();
# +
# GP path below
TRAINDIR = '/DFS-L/DATA/pritchard/tbeucler/SPCAM/SPCAM_PHYS/'
path = '/export/nfs0home/tbeucler/CBRAIN-CAM/cbrain/'
path_nnconfig = '/export/nfs0home/tbeucler/CBRAIN-CAM/nn_config/'
# Load hyam and hybm to calculate pressure field in SPCAM
path_hyam = 'hyam_hybm.pkl'
hf = open(path+path_hyam,'rb')
hyam,hybm = pickle.load(hf)
# Scale dictionary to convert the loss to W/m2
scale_dict = load_pickle(path_nnconfig+'scale_dicts/009_Wm2_scaling.pkl')
# -
# # Data Generator
path = '/DFS-L/DATA/pritchard/tbeucler/SPCAM/SPCAM_PHYS/'
# +
in_vars = ['RH','BMSE','PS', 'SOLIN', 'SHFLX', 'LHF_nsDELQ']
#if path==path_aquaplanet: out_vars=['PHQPERC','TPHYSTNDPERC','QRLPERC','QRSPERC']
out_vars = ['PHQ','TPHYSTND','FSNT','FSNS','FLNT','FLNS','PRECT']
NORMFILE = '2021_11_30_NORM_Nando_Cl_Inv.nc'
# In physical space
TRAINFILE = '2021_09_03_TRAIN_For_Nando_ClInv.nc'
VALIDFILE = '2021_09_03_VALID_For_Nando_ClInv.nc'
TESTFILE = '2021_09_03_TEST_For_Nando_ClInv.nc'
# -
valid_gen_CI = DataGeneratorCI(data_fn = path+'Aqua_0K_withVBP/'+VALIDFILE,
input_vars=in_vars,
output_vars=out_vars,
norm_fn=path+NORMFILE,
input_transform=('mean', 'maxrs'),
output_transform=scale_dict,
batch_size=8192,
shuffle=True,
xarray=False,
var_cut_off=None,
Qscaling=None,
Tscaling=None,
LHFscaling=None,
SHFscaling=None,
output_scaling=False,
interpolate=False,
hyam=hyam,hybm=hybm,
inp_sub_Qscaling=None,
inp_div_Qscaling=None,
inp_sub_Tscaling=None,
inp_div_Tscaling=None,
inp_sub_LHFscaling=None,
inp_div_LHFscaling=None,
inp_sub_SHFscaling=None,
inp_div_SHFscaling=None,
lev=None, interm_size=40,
lower_lim=6,is_continous=True,Tnot=5,
epsQ=1e-3,epsT=1,mode='train')
test_gen_CI = DataGeneratorCI(data_fn = path+'Aqua_0K_withVBP/'+TESTFILE,
input_vars=in_vars,
output_vars=out_vars,
norm_fn=path+NORMFILE,
input_transform=('mean', 'maxrs'),
output_transform=scale_dict,
batch_size=8192,
shuffle=True,
xarray=False,
var_cut_off=None,
Qscaling=None,
Tscaling=None,
LHFscaling=None,
SHFscaling=None,
output_scaling=False,
interpolate=False,
hyam=hyam,hybm=hybm,
inp_sub_Qscaling=None,
inp_div_Qscaling=None,
inp_sub_Tscaling=None,
inp_div_Tscaling=None,
inp_sub_LHFscaling=None,
inp_div_LHFscaling=None,
inp_sub_SHFscaling=None,
inp_div_SHFscaling=None,
lev=None, interm_size=40,
lower_lim=6,is_continous=True,Tnot=5,
epsQ=1e-3,epsT=1,mode='train')
# # Train neural network
inp = Input(shape=(64,)) ## input after rh and tns transformation
densout = Dense(128, activation='linear')(inp)
densout = LeakyReLU(alpha=0.3)(densout)
for i in range (6):
densout = Dense(128, activation='linear')(densout)
densout = LeakyReLU(alpha=0.3)(densout)
dense_out = Dense(65, activation='linear')(densout)
model = tf.keras.models.Model(inp, dense_out)
model.summary()
mse
model.compile(tf.keras.optimizers.Adam(), loss=mse)
# Where to save the model
path_HDF5 = '/DFS-L/DATA/pritchard/tbeucler/SPCAM/HDF5_DATA/'
save_name = '2021_12_10_Test_Nando'
earlyStopping = EarlyStopping(monitor='val_loss', patience=10, verbose=0, mode='min')
mcp_save_pos = ModelCheckpoint(path_HDF5+save_name+'.hdf5',save_best_only=True, monitor='val_loss', mode='min')
Nep = 20
model.fit_generator(valid_gen_CI, epochs=Nep, validation_data=test_gen_CI,\
callbacks=[earlyStopping, mcp_save_pos])
| notebooks/tbeucler_devlog/078_Training_Directly_on_Rescaled_Data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# #!/usr/bin/python
import sys
from pyspark import SparkConf, SparkContext
'''
Let v be a given vertex
gamma(v) is then defined as the neighbouhood of v i.e.
it is the set of all the vertices connected to v
gamma_plus(v) is then gamma(v) + {v}
'''
# +
'''
Connects all the strictly large neighbours in gamma(v)
to min(gamma_plus(v))
'''
def large_star(x):
global changes
v, gamma_plus = x[0], x[1]
# Add the vertex to actually make it gamma_plus
gamma_plus.append(v)
# Get the minimum vertex
min_vertex = min(gamma_plus)
# Emit
# Gamma and gamma plus doesn't affect as when x=v x==v
to_return = []
for x in gamma_plus:
if x>v:
to_return.append((x, min_vertex))
# If v is not the min_vertex and larger neighbours present
if min_vertex != v:
changes += 1
return to_return
'''
Connect all the smaller neighbours including self
to min(gamma(v))
'''
def small_star(x):
global changes
v, gamma = x[0], x[1]
min_vertex = min(gamma)
# Make gamma to gamma_plus by appending v
gamma.append(v)
# Connect all the smaller neighbours including self to min(gamma(v))
# don't connect min vertex to itself
to_return = []
for x in gamma:
if x <= v and x != min_vertex:
to_return.append((x, min_vertex))
# If v is not the minimum and has a lesser vertexes to connected to minimum
if x < v:
changes += 1
return to_return
# -
if __name__ == "__main__":
file_name = sys.argv[1]
save_as_file = 'graph_output'
delimitter = ' '
conf = SparkConf().setAppName("connected_components")
sc = SparkContext(conf=conf)
# Load the data file
lines = sc.textFile(file_name)
# To keep a track of the changes
changes = sc.accumulator(0)
def parse_txt(line): return [int(x) for x in line.split(delimitter)]
def large_star_map(x): return [(x[0], [x[1]]), (x[1], [x[0]])]
def small_star_map(x): return (
x[0], [x[1]]) if x[1] <= x[0] else (x[1], [x[0]])
def list_extend(x, y): return x+y
# Parse the read edges
edges = lines.map(parse_txt)
out = edges
prv_val = 0
i=0
print('-'*70)
while True:
l_star = out.flatMap(large_star_map).reduceByKey(
list_extend).flatMap(large_star)
s_star = l_star.map(small_star_map).reduceByKey(
list_extend).flatMap(small_star)
# Force spark to evaluate the data -> use take(1) for minimal communication
_ = s_star.take(1)
print('total changes are',changes.value - prv_val)
#If no changes then break!!!
if changes.value - prv_val == 0:
break
# Update the iterators
prv_val = changes.value
out = s_star
i+=1
##Another convergence condition
#if l_star.subtract(s_star).union(s_star.subtract(l_star)).isEmpty():
# break
# Add self loop for indicating vertices belonging to the class
self_nodes = out.values().distinct().map(lambda x: (x, x))
combined = out.union(self_nodes)
print(combined.values().distinct())
combined.saveAsTextFile(save_as_file)
sc.stop()
| A2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Name:- <NAME> , Roll no 100150
# # Step 1 :- read the data from given csv file.
from numpy import genfromtxt
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from sklearn import metrics
csv_data = genfromtxt('Iris data.csv', delimiter=',')
observations_All = csv_data[1:,1:-1];
print("All observations from input CSV file")
print(observations_All)
# # Step 2 :- Set Global variables used in classification
feature_names = ['sepal length (cm)', 'sepal width (cm)', 'petal length (cm)', 'petal width (cm)']
target = [0, 0, 0 ,0 ,0 ,0 ,0, 0 ,0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2]
target_names = ['setosa', 'versicolor', 'virginica']
X = observations_All
y = target
# # Step 3 :- Divide the dataset into training and testing datasets, Total 120 datasets for training and 30 datasets for testing (Train = 40 * 3 , Test = 10 * 3).
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, shuffle=False)
print (X_train.shape)
print (X_test.shape)
# # Step 4a :- Define the KNeighborsClassifier and predict using K = 3 and find the accuracy of the predication
# +
# taking value of k as 3
knn = KNeighborsClassifier(n_neighbors = 3)
knn.fit(X_train, y_train)
y_pred = knn.predict(X_test)
print (metrics.accuracy_score(y_test, y_pred))
# -
# # Step 4b :- Define the KNeighborsClassifier and predict using K = 3 and find the accuracy of the predication
# +
# taking value of k as 5
knn = KNeighborsClassifier(n_neighbors = 5)
knn.fit(X_train, y_train)
y_pred = knn.predict(X_test)
print (metrics.accuracy_score(y_test, y_pred))
# -
# # Step5:- Using different values for 'K' in KNN
# for the case k = 3 we got , accuracy = 96.67%, trying k values from 1 to 25, we get
# +
k_range = range(1, 26)
scores = []
for k in k_range:
knn = KNeighborsClassifier(n_neighbors = k)
knn.fit(X_train, y_train)
y_pred = knn.predict(X_test)
scores.append(metrics.accuracy_score(y_test, y_pred))
import matplotlib.pyplot as plt
# %matplotlib inline
plt.plot(k_range, scores)
plt.xlabel('Values of K')
plt.ylabel('Accuracy')
# -
| Vikash IRIS Data KNN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Machine Learning Midterm, Aug Semester, 2020
#
# In this exam, you will demonstrate your understanding of the material from the lectures, tutorials, and problem sets.
#
# Exam submission is on Gradescope. Before you begin the exam, make sure you have created an account at [http://gradescope.com](http://gradescope.com). Then you have to sign up for AT82.03 in Aug 2020. The course signup code is MY3YPW .
#
# For each question, insert your answer directly in this sheet. When complete, export the sheet as a PDF and upload to Gradescope. If you are using puffer.cs.ait.ac.th, in JupyterLab, click on "File" then "Export Notebook As" then select PDF. If you are running Jupyter on your own machine, you may need to install the `nbconvert` package with pip or conda.
#
# Note that you have **2.5 hours** to do the exam. Also note that there are short answer questions that you may be able to answer faster than the coding questions. You might consider answering those questions first to get as much credit as possible!
#
# ## Question 1 (10 points)
#
# We have seen that regression problems and binary classification problems have seemingly different cost functions. For regression, we normally use the cost function
# $$J(\theta) = \sum_{i=1}^m \left( h_\theta(\mathbf{x}^{(i)}) - y^{(i)} \right)^2,$$
# whereas for binary classifiation, we normally use the cost function
# $$J(\theta) = \sum_{i=1}^m \left( y^{(i)} \log h_\theta(\mathbf{x}^{(i)}) + (1-y^{(i)})\log(1 - h_\theta(\mathbf{x}^{(i)})) \right). $$
#
# Briefly explain (in English) how these two different cost functions are derived from the same idea or principle.
#
# **Both cost functions can be derived from the principle of maximum likelihood. For regression, the negative log likelihood of $\theta$ under the assumption that $y \sim {\cal N}(\theta^\top \mathbf{x}, \sigma^2)$ turns out to be the least squares cost function, and the negative log likelihood of $\theta$ under the assumption that $y^{(i)} \sim \text{Bernoulli}(p)$ with $p = \frac{1}{1+e^{-\theta^\top \mathbf{x}^{(i)}}}$ gives the cross entropy
# loss function for binary classification.**
#
# ## Question 2 (20 points)
#
# Generate a sample of 200 points from a 2D Gaussian with mean $\mu_1 = \begin{bmatrix} 3 \\ 3 \end{bmatrix}$ and covariance $\Sigma_1 = \begin{bmatrix} 4 & 0 \\ 0 & 4 \end{bmatrix}$
#
# Generate a second sample of 200 points from a 2D Gaussian with mean $\mu_2 = \begin{bmatrix} 5 \\ 5 \end{bmatrix}$ and covariance $\Sigma_2 = \begin{bmatrix} 4 & 0 \\ 0 & 4 \end{bmatrix}$.
#
# Assuming the first set of points is class 0 and the second set of points is class 1, split the dataset into 80% training and 20% testing, and plot the
# training set and test set in separate plots with different colors for each class below.
#
# +
# Place code to generate the data and plot them here.
import numpy as np
import matplotlib.pyplot as plt
def generate_data(mu1, Sigma1, mu2, Sigma2):
X1 = np.random.multivariate_normal(mu1, Sigma, 200)
y1 = np.zeros((200,1))
X2 = np.random.multivariate_normal(mu2, Sigma, 200)
y2 = np.ones((200,1))
X = np.concatenate((X1, X2), 0)
y = np.concatenate((y1, y2), 0)
idx = np.arange(0, 400)
np.random.shuffle(idx)
train_idx = idx[0:320]
test_idx = idx[320:]
X_train = X[train_idx,:]
X_test = X[test_idx,:]
y_train = y[train_idx,:]
y_test = y[test_idx,:]
return X_train, X_test, y_train, y_test
mu1 = [3, 3]
Sigma = [[4, 0], [0, 4]]
mu2 = [5, 5]
X_train, X_test, y_train, y_test = generate_data(mu1, Sigma, mu2, Sigma)
def plot_data(X, y, title):
X_0 = X[(y==0).flat]
X_1 = X[(y==1).flat]
plt.plot(X_0[:,0], X_0[:,1], 'ro')
plt.plot(X_1[:,0], X_1[:,1], 'bo')
plt.title(title)
plt.axis('equal')
plt.rcParams["figure.figsize"] = (14, 8)
plt.subplot(1, 2, 1)
plot_data(X_train, y_train, 'Training data')
plt.subplot(1, 2, 2)
plot_data(X_test, y_test, 'Test data')
# -
# ## Question 3 (20 points)
#
# Build a logistic regression model for the training set in Question 2. Plot the test set with correctly and incorrectly classified points using different symbols, and show the classification boundary in a plot below.
# +
# Place code to train the logistic regression model and plot the results here.
def sigmoid(z):
return 1 / (1 + np.exp(-z))
def h(X, theta):
return sigmoid(X @ theta)
def grad_j(X, y, y_pred):
return X.T @ (y - y_pred) / X.shape[0]
def j(theta, X, y):
y_pred = h(X, theta)
error = (-y * np.log(y_pred)) - ((1 - y) * np.log(1 - y_pred))
cost = sum(error) / X.shape[0]
grad = grad_j(X, y, y_pred)
return cost[0], grad
def train(X, y, theta_initial, alpha, num_iters):
theta = theta_initial
j_history = []
for i in range(num_iters):
cost, grad = j(theta, X, y)
#print(cost)
theta = theta + alpha * grad
j_history.append(cost)
return theta, j_history
X_train_aug = np.concatenate((np.ones((320,1)), X_train), 1)
theta_initial = np.ones((3,1))
theta_final, j_history = train(X_train_aug, y_train, theta_initial, 0.05, 2000)
plt.plot(j_history, 'r-')
plt.title('Training set loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.show()
# +
def boundary_points(X, theta):
v_orthogonal = np.array([[theta[1,0]],[theta[2,0]]])
v_ortho_length = np.sqrt(v_orthogonal.T @ v_orthogonal)
dist_ortho = theta[0,0] / v_ortho_length
v_orthogonal = v_orthogonal / v_ortho_length
v_parallel = np.array([[-v_orthogonal[1,0]],[v_orthogonal[0,0]]])
projections = X @ v_parallel
proj_1 = min(projections)
proj_2 = max(projections)
point_1 = proj_1 * v_parallel - dist_ortho * v_orthogonal
point_2 = proj_2 * v_parallel - dist_ortho * v_orthogonal
return point_1, point_2
def plot_data_with_correctness(X, y, yhat, theta, title):
X_0_corr = X[np.logical_and(y == 0, yhat < 0.5).flat]
X_0_incorr = X[np.logical_and(y == 0, yhat >= 0.5).flat]
X_1_corr = X[np.logical_and(y == 1, yhat >= 0.5).flat]
X_1_incorr = X[np.logical_and(y == 1, yhat < 0.5).flat]
plt.rcParams["figure.figsize"] = (12, 12)
plt.plot(X_0_corr[:,0], X_0_corr[:,1], 'ro')
plt.plot(X_0_incorr[:,0], X_0_incorr[:,1], 'r*', markersize=10)
plt.plot(X_1_corr[:,0], X_1_corr[:,1], 'bo')
plt.plot(X_1_incorr[:,0], X_1_incorr[:,1], 'b*', markersize=10)
point_1, point_2 = boundary_points(X, theta)
plt.plot([point_1[0,0], point_2[0,0]],[point_1[1,0], point_2[1,0]], 'g-')
plt.title(title)
plt.axis('equal')
X_test_aug = np.concatenate((np.ones((80,1)), X_test), 1)
yhat_test = h(X_test_aug, theta_final)
plot_data_with_correctness(X_test, y_test, yhat_test, theta_final, 'Test set classification results')
# -
# ## Question 4 (10 points)
#
# Explain why it is not possible to train a SVM using the techniques we've studied so far in class to classify the data from Question 2. Give a value for $\mu_2$ in Question 2 keeping the other values constant that would make it very likely that the training could be classified using the SVM techniques we studied so far in class.
#
# **For the techniques we've studied so far for the SVM to work, the training data need to be linearly separable. That is clearly not true from the plot in Question 2. To make the data linearly separable, we'd need a $\mu_2$ much farther from $\mu_1$ than what is given. If $\mu_2 = \begin{bmatrix} 12 & 12 \end{bmatrix}^\top$, the data are linearly separable almost always:**
# +
mu1 = [3, 3]
Sigma = [[4, 0], [0, 4]]
mu2 = [12, 12]
X_train, X_test, y_train, y_test = generate_data(mu1, Sigma, mu2, Sigma)
plt.rcParams["figure.figsize"] = (14, 8)
plt.subplot(1, 2, 1)
plot_data(X_train, y_train, 'Training data')
plt.subplot(1, 2, 2)
plot_data(X_test, y_test, 'Test data')
# -
# ## Question 5 (10 points)
#
# Suppose you are building a linear SVM in $\mathbb{R}^2$. The two classes are represented as 'X's and 'O's in the diagram below.
#
# <img src="http://www.cs.ait.ac.th/~mdailey/class/ml/q2.jpg" width="400"></img>
#
# If the training data consisted only of the four examples A, B, C, and D in the diagram above, which would be the support
# vectors?
#
# **Points A, B, and D would be the support vectors.**
#
# ## Question 6 (10 points)
#
# In Question 5 (the linear SVM),
# if you were told that Lagrange multipliers $\alpha_A = 1.0$ and $\alpha_D = 2.0$, what would $\alpha_B$ and $\alpha_C$ be?
#
# **Clearly $\alpha_C = 0$, since it is not a support vector. Since $$\sum_{i=1}^m \alpha_i y^{(i)} = 0,$$ we need $\alpha_B=1$.**
#
# ## Question 7 (10 points)
#
# Suppose the optimal linear SVM for Question 5 had $\mathbf{w} = \begin{bmatrix} 1 & 1 \end{bmatrix}^\top$ and $b = -2$.
# Find the geometric margin $\gamma^{(i)}$ for $\mathbf{x}^{(i)} = \begin{bmatrix} 2 & 5 \end{bmatrix}^\top$ and $y^{(i)} = 1$.
#
# **The geometric margin for example $i$ is $$\gamma^{(i)} = y^{(i)} \left( \frac{\mathbf{w}^\top \mathbf{x}^{(i)}}{\| \mathbf{w} \|} + \frac{b}{\| \mathbf{w} \|} \right),$$ which according to the code below is 3.536.**
w = np.array([[1],[1]])
x = np.array([[2],[5]])
b = -2
norm_w = np.sqrt(w.T @ w)
gamma = w.T @ x / norm_w + b / norm_w
print('Gamma:', gamma[0,0])
# ## Question 8 (10 points)
#
# Consider the SVM kernel $K(\mathbf{x},\mathbf{z}) = (\mathbf{x}^\top \mathbf{z})^2$. Explain the similarities and differences between a SVM with this kernel and a logistic regression using a quadratic polynomial transformation (an ordinary logistic regression preceeded by a transformation of the input vector. If they are exactly the same, prove it. If not, clearly explain how they are different.
#
# **They are similar but not exactly the same. In the lecture notes, it's proven that $K(\mathbf{x},\mathbf{z}) = (\mathbf{x}^\top \mathbf{z})^2$ corresponds to a transformation $$\phi = \begin{bmatrix} x_1 x_1 & x_1 x_2 & \ldots & x_2 x_1 & x_2 x_2 & \ldots & x_n x_n \end{bmatrix},$$ which is indeed a polynomial transformation with degree 2. The SVM doesn't need a constant input like logistic regression does, because the $b$ parameter is separate from $\mathbf{w}$. However, the transformation above does leaves out the linear terms, which could be important. Also, SVMs find maximum margin linear classifiers in feature space rather than the maximum likelihood linear classifiers. This could affect the final decision boundary, depending on the training data distribution.**
| Exams/Sample-Midterm-With-Solution-2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # TensorBoard with Fashion MNIST
#
# In this week's exercise you will train a convolutional neural network to classify images of the Fashion MNIST dataset and you will use TensorBoard to explore how it's confusion matrix evolves over time.
# ## Setup
# Load the TensorBoard notebook extension.
# %load_ext tensorboard
# +
import io
import itertools
import numpy as np
import sklearn.metrics
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow import keras
from datetime import datetime
from os import getcwd
print("TensorFlow version: ", tf.__version__)
# -
# ## Load the Fashion-MNIST Dataset
#
# We are going to use a CNN to classify images in the the [Fashion-MNIST](https://research.zalando.com/welcome/mission/research-projects/fashion-mnist/) dataset. This dataset consist of 70,000 grayscale images of fashion products from 10 categories, with 7,000 images per category. The images have a size of $28\times28$ pixels.
#
# First, we load the data. Even though these are really images, we will load them as NumPy arrays and not as binary image objects. The data is already divided into training and testing sets.
# +
# Load the data.
train_images = np.load(f"{getcwd()}/../tmp2/train_images.npy")
train_labels = np.load(f"{getcwd()}/../tmp2/train_labels.npy")
test_images = np.load(f"{getcwd()}/../tmp2/test_images.npy")
test_labels = np.load(f"{getcwd()}/../tmp2/test_labels.npy")
# The labels of the images are integers representing classes.
# Here we set the Names of the integer classes, i.e., 0 -> T-short/top, 1 -> Trouser, etc.
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
# -
# ## Format the Images
#
# `train_images` is a NumPy array with shape `(60000, 28, 28)` and `test_images` is a NumPy array with shape `(10000, 28, 28)`. However, our model expects arrays with shape `(batch_size, height, width, channels)` . Therefore, we must reshape our NumPy arrays to also include the number of color channels. Since the images are grayscale, we will set `channels` to `1`. We will also normalize the values of our NumPy arrays to be in the range `[0,1]`.
# +
# Pre-process images
train_images = train_images.reshape(60000, 28, 28, 1)
train_images = train_images / 255.0
test_images = test_images.reshape(10000, 28, 28, 1)
test_images = test_images / 255.0
# -
# ## Build the Model
#
# We will build a simple CNN and compile it.
# +
# Build the model
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(64, (3,3), activation='relu', input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# -
# ## Plot Confusion Matrix
#
# When training a classifier, it's often useful to see the [confusion matrix](https://en.wikipedia.org/wiki/Confusion_matrix). The confusion matrix gives you detailed knowledge of how your classifier is performing on test data.
#
# In the cell below, we will define a function that returns a Matplotlib figure containing the plotted confusion matrix.
def plot_confusion_matrix(cm, class_names):
"""
Returns a matplotlib figure containing the plotted confusion matrix.
Args:
cm (array, shape = [n, n]): a confusion matrix of integer classes
class_names (array, shape = [n]): String names of the integer classes
"""
figure = plt.figure(figsize=(8, 8))
plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
plt.title("Confusion matrix")
plt.colorbar()
tick_marks = np.arange(len(class_names))
plt.xticks(tick_marks, class_names, rotation=45)
plt.yticks(tick_marks, class_names)
# Normalize the confusion matrix.
cm = np.around(cm.astype('float') / cm.sum(axis=1)[:, np.newaxis], decimals=2)
# Use white text if squares are dark; otherwise black.
threshold = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
color = "white" if cm[i, j] > threshold else "black"
plt.text(j, i, cm[i, j], horizontalalignment="center", color=color)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
return figure
# ## TensorBoard Callback
#
# We are now ready to train the CNN and regularly log the confusion matrix during the process. In the cell below, you will create a [Keras TensorBoard callback](https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/TensorBoard) to log basic metrics.
# +
# Clear logs prior to logging data.
# !rm -rf logs/image
# Create log directory
logdir = "logs/image/" + datetime.now().strftime("%Y%m%d-%H%M%S")
# EXERCISE: Define a TensorBoard callback. Use the log_dir parameter
# to specify the path to the directory where you want to save the
# log files to be parsed by TensorBoard.
tensorboard_callback = keras.callbacks.TensorBoard(log_dir=logdir) # YOUR CODE HERE
file_writer_cm = tf.summary.create_file_writer(logdir + '/cm')
# -
# ## Convert Matplotlib Figure to PNG
#
# Unfortunately, the Matplotlib file format cannot be logged as an image, but the PNG file format can be logged. So, you will create a helper function that takes a Matplotlib figure and converts it to PNG format so it can be written.
def plot_to_image(figure):
"""
Converts the matplotlib plot specified by 'figure' to a PNG image and
returns it. The supplied figure is closed and inaccessible after this call.
"""
buf = io.BytesIO()
# EXERCISE: Use plt.savefig to save the plot to a PNG in memory.
# YOUR CODE HERE
plt.savefig(buf, format='png')
# Closing the figure prevents it from being displayed directly inside
# the notebook.
plt.close(figure)
buf.seek(0)
# EXERCISE: Use tf.image.decode_png to convert the PNG buffer
# to a TF image. Make sure you use 4 channels.
image = tf.image.decode_png(buf.getvalue(), channels=4) # YOUR CODE HERE
# EXERCISE: Use tf.expand_dims to add the batch dimension
image = tf.expand_dims(image, 0) # YOUR CODE HERE
return image
# ## Confusion Matrix
#
# In the cell below, you will define a function that calculates the confusion matrix.
# +
def log_confusion_matrix(epoch, logs):
# EXERCISE: Use the model to predict the values from the test_images.
test_pred_raw = model.predict(test_images) # YOUR CODE HERE
test_pred = np.argmax(test_pred_raw, axis=1)
# EXERCISE: Calculate the confusion matrix using sklearn.metrics
cm = sklearn.metrics.confusion_matrix(test_labels, test_pred) # YOUR CODE HERE
figure = plot_confusion_matrix(cm, class_names=class_names)
cm_image = plot_to_image(figure)
# Log the confusion matrix as an image summary.
with file_writer_cm.as_default():
tf.summary.image("Confusion Matrix", cm_image, step=epoch)
# Define the per-epoch callback.
cm_callback = keras.callbacks.LambdaCallback(on_epoch_end=log_confusion_matrix)
# -
# ## Running TensorBoard
#
# The next step will be to run the code shown below to render the TensorBoard. Unfortunately, TensorBoard cannot be rendered within the Coursera environment. Therefore, we won't run the code below.
#
# ```python
# # Start TensorBoard.
# # %tensorboard --logdir logs/image
#
# # Train the classifier.
# model.fit(train_images,
# train_labels,
# epochs=5,
# verbose=0, # Suppress chatty output
# callbacks=[tensorboard_callback, cm_callback],
# validation_data=(test_images, test_labels))
# ```
#
# However, you are welcome to download the notebook and run the above code locally on your machine or in Google's Colab to see TensorBoard in action. Below are some example screenshots that you should see when executing the code:
# <table>
# <tr>
# <td>
# <img src="../tmp2/tensorboard_01.png" width="500"/>
# </td>
# <td>
# <img src="../tmp2/tensorboard_02.png" width="500"/>
# </td>
# </tr>
# </table>
# <br>
# <br>
# <table>
# <tr>
# <td>
# <img src="../tmp2/tensorboard_03.png" width="500"/>
# </td>
# <td>
# <img src="../tmp2/tensorboard_04.png" width="500"/>
# </td>
# </tr>
# </table>
# # Submission Instructions
# +
# Now click the 'Submit Assignment' button above.
# -
# # When you're done or would like to take a break, please run the two cells below to save your work and close the Notebook. This frees up resources for your fellow learners.
# + language="javascript"
# <!-- Save the notebook -->
# IPython.notebook.save_checkpoint();
# + language="javascript"
# <!-- Shutdown and close the notebook -->
# window.onbeforeunload = null
# window.close();
# IPython.notebook.session.delete();
| course-4/week-3/utf-8''TF_Serving_Week_3_Exercise_Question.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import nltk
with open("../data/processed/CAP/CAP.txt",'r') as file:
CAP = file.read()
with open("../data/processed/PL/PL.txt",'r') as file:
PL = file.read()
with open("../data/processed/GPPD/GPPD.txt",'r') as file:
GPPD = file.read()
# -
pattern = r'''(?x) # set flag to allow verbose regexps
(?:[A-Z]\.)+ # abbreviations, e.g. U.S.A.
| \w+(?:-\w+)* # words with optional internal hyphens
| \$?\d+(?:\.\d+)?%? # currency and percentages, e.g. $12.40, 82%
| \.\.\. # ellipsis
| [][.,;"'?():_`-] # these are separate tokens; includes ], [
'''
PL_tokens=nltk.regexp_tokenize(PL, pattern)
PL_token_num=len(PL_tokens)
CAP_tokens=nltk.regexp_tokenize(CAP, pattern)
CAP_tokens_num=len(CAP_tokens)
GPPD_tokens=nltk.regexp_tokenize(GPPD, pattern)
GPPD_tokens_num=len(GPPD_tokens)
PL_token_num+CAP_tokens_num+GPPD_tokens_num
| notebook/tokenize.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.9.5 64-bit (''nassy-hasler-vBIYXc2u'': pipenv)'
# name: python3
# ---
# +
import pandas as pd
from os import walk
from copy import copy
import math, pywt, numpy as np
# #%matplotlib widget
pd.set_option('display.max_rows', 1000)
# +
# data can be found smb://nas-weber01.unisg.ch/data/Nassy/03_Online_Model/data
_, _, file_names = next(walk('./split'))
files = [(name.split('_')[0], name) for name in file_names]
files[0:20]
# +
tasks = [
('t1', 't1_gsr', 't1_eclipse (drawing_a_1.java)'),
('t2', 't2_gsr', 't2_eclipse (drawing_a_2.java)'),
('t3', 't3_gsr', 't3_eclipse (drawing_b_1.java)'),
('t4', 't4_gsr', 't4_eclipse (drawing_b_2.java)'),
('t5', 't5_gsr', 't5_eclipse (drawing_b_3.java)'),
('t6', 't6_gsr', 't6_eclipse (drawing_b_6.java)'),
('t7', 't7_gsr', 't7_eclipse (drawing_b_9.java)'),
('t8', 't8_gsr', 't8_eclipse (drawing_b_11.java)'),
]
tasks
# +
import json
import requests
import random, string
import pandas as pd
import io
import time
DEFAULT_SETTINGS = {
"authorisation": {
'username': '<EMAIL>',
'password': '<PASSWORD>'
},
"url": 'http://localhost:8989'
}
DEFAULT_FILTERS = [
{
'name': "SubstitutePupilFilter",
'actualParameters': {
'left_pupil': 'ET_PupilLeft',
'right_pupil': 'ET_PupilRight',
'timestampcolumn': 'Timestamp',
},
'columns': {
'left_pupil': 'ET_PupilLeft',
'right_pupil': 'ET_PupilRight',
}
},
{
'name': "SubstituteGazePointFilter",
'actualParameters': {
'leftPupilGazeXName': 'ET_GazeLeftx',
'leftPupilGazeYName': 'ET_GazeLefty',
'rightPupilGazeXName': 'ET_GazeRightx',
'rightPupilGazeYName': 'ET_GazeRighty',
'timestampcolumn': 'Timestamp',
},
'columns': {}
},
{
'name': "ButterworthFilter",
'actualParameters': {
'hertz': 4,
'sampleRate': 300,
'timestampcolumn': 'Timestamp',
},
'columns': {
'left_pupil': 'ET_PupilLeft',
'right_pupil': 'ET_PupilRight',
},
'decimalSeparator': '.'
},
{
'name': "LinearInterpolationFilter",
'actualParameters': {
'left_pupil': 'ET_PupilLeft',
'right_pupil': 'ET_PupilRight',
'timestampcolumn': 'Timestamp',
},
'columns': {
'left_pupil': 'ET_PupilLeft',
'right_pupil': 'ET_PupilRight',
}
}
]
"""
Broken Filter
{
'name': "BlinkDetectionFilter",
'actualParameters': {
'leftPupilGazeXColumnName': 'ET_GazeLeftx',
'leftPupilGazeYColumnName': 'ET_GazeLefty',
'rightPupilGazeXColumnName': 'ET_GazeRightx',
'rightPupilGazeYColumnName': 'ET_GazeRighty',
'blinkDetectionTimeThreashold': 60,
'left_pupil': 'ET_PupilLeft',
'right_pupil': 'ET_PupilRight',
'timestampcolumn': 'Timestamp',
},
'columns': {}
},
"""
def randomword(length):
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(length))
def clean(df, settings=DEFAULT_SETTINGS, filters=DEFAULT_FILTERS):
with requests.Session() as s:
# login
r = s.post(
url=settings.get('url') + "/cheetah/login",
data=settings.get('authorisation')
)
# create study
try:
data = json.loads(s.post(
url=settings.get('url') + "/cheetah/api/user/study",
headers={'content-type': 'application/json'},
data=json.dumps({
'comment': '',
'name': randomword(20)
})
).content.decode('UTF-8'))
study_id = data['resBody']["id"]
except Exception as e:
print('Could not create study')
raise e
# create subject
try:
data = json.loads(s.post(
url=settings.get('url') + "/cheetah/api/user/addsubject",
headers={'content-type': 'application/json'},
data=json.dumps({
'comment': '',
'email': f'{randomword(20)}@<EMAIL>',
'id': 0,
'studyId': study_id,
'studyName': randomword(20),
'subject_id': randomword(20),
'synchronized_from': 0,
})
).content.decode('UTF-8'))
subject_id = data['resBody']["id"]
except Exception as e:
print('Could not create subject')
raise e
# upload data
try:
stream = io.StringIO()
df.to_csv(stream, sep='\t', index=False)
data = json.loads(s.post(
url=settings.get('url') + "/cheetah/api/user/uploadFile",
files=[('files', ('file.tsv', stream.getvalue(), 'application/octet-stream'))],
data={
'subjectIds': [subject_id]
}
).content.decode('UTF-8'))
file_id = data['resBody']['easyUserDataListSuccessfultMapped'][0]["id"]
except Exception as e:
print('Could not upload data')
raise e
# filter data
try:
# request filter
data = json.loads(s.post(
url=settings.get('url') + "/cheetah/api/user/filterrequest",
headers={'content-type': 'application/json'},
data=json.dumps({
"files": [file_id],
"filters": filters
})
).content.decode('UTF-8'))
task_id = data['resBody']
# wait for task to finish
while True:
data = json.loads(s.get(
url=settings.get('url') + "/cheetah/api/user/taskFinished/" + task_id,
headers={'content-type': 'application/json'},
).content.decode('UTF-8'))
if data['resBody'] == True:
break
time.sleep(0.1)
# get filtered file id
data = json.loads(s.get(
url=settings.get('url') + "/cheetah/api/user/taskid/" + task_id,
).content.decode('UTF-8'))
filtered_file_id = data['resBody']["id"]
# download filtered data
data = s.get(
url=settings.get('url') + "/cheetah/api/user/download/" + str(filtered_file_id),
).content.decode("utf-8")
except Exception as e:
print('Could not filter data')
raise e
# cleanup
# TODO: also delete study and subject
s.delete(
url=settings.get('url') + "/cheetah/api/user/file/"+str(file_id)
)
s.delete(
url=settings.get('url') + "/cheetah/api/user/file/"+str(filtered_file_id)
)
return pd.read_csv(io.StringIO(data), sep='\t')
# +
def modmax(d):
m = [0.0]*len(d)
for i in iter(range(len(d))):
m[i] = math.fabs(d[i])
t = [0.0]*len(d)
for i in iter(range(len(d))):
ll = m[i-1] if i >= 1 else m[i]
oo = m[i]
rr = m[i+1] if i < len(d) - 2 else m[i]
if (ll <= oo and oo >= rr) and (ll < oo or oo > rr):
t[i] = math.sqrt(d[i]**2)
else:
t[i] = 0.0
return t
def lhipa(d):
w = pywt.Wavelet('sym16')
maxlevel = pywt.dwt_max_level(len(d), filter_len=w.dec_len)
hif, lof = 1, int(maxlevel/2)
if hif <=0 or lof <=0:
return 0.0
cD_H = pywt.downcoef('d', d, 'sym16', 'per', level=hif)
cD_L = pywt.downcoef('d', d, 'sym16', 'per', level=lof)
cD_H[:] = [x/math.sqrt(2**hif) for x in cD_H]
cD_L[:] = [x/math.sqrt(2**lof) for x in cD_L]
cD_LH = cD_L
for i in range(len(cD_L)):
cD_LH[i] = cD_L[i] / cD_H[int(((2**lof)/(2**hif))*i)]
cD_LHm = modmax(cD_LH)
luniv = np.std(cD_LHm) * math.sqrt(2.0*np.log2(len(cD_LHm)))
cD_LHt = pywt.threshold(cD_LHm, luniv, mode="less")
tt = 10#d[-1].timestamp() - d[0].timestamp()
ctr = 0
for i in iter(range(len(cD_LHt))):
if math.fabs(cD_LHt[i]) > 0: ctr += 1
return float(ctr)/tt
# -
def extract_features(df):
df_min, df_max, df_mean, df_median, df_std = df['ET_PubilAvg'].agg([pd.np.min, pd.np.max, pd.np.mean, pd.np.median, pd.np.std])
df['rolling_mean'] = df['ET_PubilAvg'].rolling('8s', min_periods=1).mean()
df_resampled = df[~df.index.duplicated(keep='first')].resample('1ms').pad()
df_resampled['rolling_mean'] = df_resampled['rolling_mean'].shift(periods=-4000)
df_resampled['phasic'] = df_resampled['ET_PubilAvg'] - df_resampled['rolling_mean']
df_resampled['rolling_phasic'] = df_resampled['phasic'].rolling('1s', min_periods=1).mean().shift(periods=-500)
df_resampled['peaks'] = df_resampled['rolling_phasic'][(df_resampled['rolling_phasic'].shift(1) < df_resampled['rolling_phasic']) & (df_resampled['rolling_phasic'].shift(-1) < df_resampled['rolling_phasic']) & (df_resampled['rolling_phasic'] > 0.1)]
peak_count = df_resampled['peaks'].dropna().count()
duration = (df_resampled.index[-1] - df_resampled.index[0]).seconds / 60
peak_count_pm = peak_count / duration
return (df_min, df_max, df_mean, df_median, df_std, peak_count, peak_count_pm, lhipa(df['ET_PubilAvg'].dropna().to_numpy()))
files[52:]
# +
for file_name in files[52:53]:
print(file_name)
df = pd.read_csv(f'./split/{file_name[1]}', comment='#')
#df = df.set_index(pd.TimedeltaIndex(df['timestamp'].values))
df['ET_PupilRight'] = df['ET_PupilRight'].apply(lambda x: x if x > 0 else np.nan)
df['ET_PupilLeft'] = df['ET_PupilLeft'].apply(lambda x: x if x > 0 else np.nan)
df['ET_PubilAvg'] = df.apply(lambda x: (x['ET_PupilLeft'] + x['ET_PupilRight']) / 2, axis=1)
df['Timestamp'] = df['Timestamp'].astype('int64')
df['Timestamp'] = df.apply(lambda x: int(x['Timestamp'] * 1000), axis=1).astype('int64')
baseline = clean(copy(df[df['type'] == 'B']))
baseline = baseline.set_index(pd.to_datetime(baseline['Timestamp'].values))
b_min, b_max, b_mean, b_median, b_std, b_peak_count, b_peak_count_pm, b_lhipa = extract_features(baseline)
experiment = clean(copy(df[df['type'] == 'M']))
experiment = experiment.set_index(pd.to_datetime(experiment['Timestamp'].values))
e_min, e_max, e_mean, e_median, e_std, e_peak_count, e_peak_count_pm, e_lhipa = extract_features(experiment)
d_mean = b_mean - e_mean
d_median = b_median - e_median
d_std = b_std - e_std
f = open(f'dataset/{file_name[1]}', "a")
f.write(f'task,subject,d_mean,d_median,d_std,e_min,e_max,e_mean,e_median,e_std,e_peak_count,e_peak_count_pm,b_min,b_max,b_mean,b_median,b_std,b_peak_count,b_peak_count_pm,e_lhipa,b_lhipa\n')
f.write(f'{file_name[1].split("_")[1].split(".")[0]},{file_name[0]},{d_mean},{d_median},{d_std},{e_min},{e_max},{e_mean},{e_median},{e_std},{e_peak_count},{e_peak_count_pm},{b_min},{b_max},{b_mean},{b_median},{b_std},{b_peak_count},{b_peak_count_pm},{e_lhipa},{b_lhipa}')
f.close()
# -
| model/feat.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
# +
data = pd.read_csv('FIFA 2018 Statistics.csv')
y = (data['Man of the Match'] == "Yes") # Convert from string "Yes"/"No" to binary
feature_names = [i for i in data.columns if data[i].dtype in [np.int64]]
X = data[feature_names]
train_X, val_X, train_y, val_y = train_test_split(X, y, random_state=1)
tree_model = DecisionTreeClassifier(random_state=0, max_depth=5, min_samples_split=5).fit(train_X, train_y)
# -
data.info()
# +
from sklearn import tree
import graphviz
tree_graph = tree.export_graphviz(tree_model, out_file=None, feature_names=feature_names)
graphviz.Source(tree_graph)
# -
from matplotlib import pyplot as plt
from pdpbox import pdp, get_dataset, info_plots
# +
# Create the data that we will plot
pdp_goals = pdp.pdp_isolate(model=tree_model, dataset=val_X, model_features=feature_names, feature='Goal Scored')
# plot it
pdp.pdp_plot(pdp_goals, 'Goal Scored')
plt.show()
# +
feature_to_plot = 'Distance Covered (Kms)'
pdp_dist = pdp.pdp_isolate(model=tree_model, dataset=val_X, model_features=feature_names, feature=feature_to_plot)
pdp.pdp_plot(pdp_dist, feature_to_plot)
plt.show()
# +
# Build Random Forest model
rf_model = RandomForestClassifier(random_state=0).fit(train_X, train_y)
pdp_dist = pdp.pdp_isolate(model=rf_model, dataset=val_X, model_features=feature_names, feature=feature_to_plot)
pdp.pdp_plot(pdp_dist, feature_to_plot)
plt.show()
# +
# Similar to previous PDP plot except we use pdp_interact instead of pdp_isolate and pdp_interact_plot instead of pdp_isolate_plot
features_to_plot = ['Goal Scored', 'Distance Covered (Kms)']
inter1 = pdp.pdp_interact(model=tree_model, dataset=val_X, model_features=feature_names, features=features_to_plot)
pdp.pdp_interact_plot(pdp_interact_out=inter1, feature_names=features_to_plot, plot_type='contour')
plt.show()
# -
| shap/Partial Dependence Plots.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# # Combined Data
# This notebook was loaded with:
#
# ```bash
# PYSPARK_DRIVER_PYTHON=jupyter PYSPARK_DRIVER_PYTHON_OPTS=notebook ./dse/bin/dse pyspark --num-executors 5 --driver-memory 8g --executor-memory 8g
# ```
#
# At this point, we've got several sets of data processed and cleansed. We also have discovered several fields we can use for joining:
#
# - license_id
# - longitude, latitude
#
# Longitude and latitude are great candidates for joining crime, sanitation, weather, and inspections. The problem is that it's not reasonable to expect them to fall on exactly the same coordinate.
#
# Suppose we divided the city up into a grid and determined the coordinates for the center of each cell. Then, we could determine which sanitation complaints and crimes were committed in the cell, and connect that to inspections.
# %pylab inline
from pyspark.sql import Row
from pyspark.sql.types import *
from pyspark.sql.functions import count, datediff, lag, sum, coalesce, rank, lit, when,col, udf, to_date, year, mean, month, date_format, array
from pyspark.sql.types import StructType, StructField, IntegerType, StringType, FloatType, DateType
from pyspark.ml.feature import StringIndexer
from datetime import datetime
from pyspark.sql.window import Window
import pyspark
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
# # Creating the City Grid
# Let's create a grid by finding the boundaries of our coordinates (using crime data because it's the largest set), then assign a grid identifier (`city_grid`). Then, we'll add that id to all of our sets. We'll follow the logical Cassandra pattern of creating a table representing the query we'll want (with the grid identifier as the key)
# +
#-------- cartesian
# A function that creates the cartesian product/combination
# Input:
# x1 (numpy vector 1)
# x2 (numpy vector 2)
# Returns:
# cartesian combination
def cartesian(x1, x2):
return np.transpose([np.tile(x1, len(x2)), np.repeat(x2, len(x1))])
#-------- cartesian
# A function that creates "risk cells" out of the longitude/latitude combination. That means in
# seperates a x, y plane into n cells.
# Input:
# longitude
# latitude
# n_cells
# Returns:
# risk cells
def create_risk_cells(longitude, latitude, n_cells, ward, district):
n = int(np.sqrt(n_cells))
x1 = np.zeros(n)
x2 = np.zeros(n)
min_long = min(longitude)
min_lat = min(latitude)
step_long = (max(longitude) - min(longitude)) / n
step_lat = (max(latitude) - min(latitude)) / n
for i in range(0, n):
x1[i] = min_long + (step_long * i)
x2[i] = min_lat + (step_lat * i)
df = pd.DataFrame(cartesian(x1, x2))
df["ward"] = ward
df["district"] = district
return df
# -
# Start by loading the crime data and map it to risk cells to create a master table.
df_test = sqlContext.read.format("org.apache.spark.sql.cassandra")\
.load(keyspace="chicago_data", table="crime")\
.toPandas()
# We've got some missing districts and wards. Let's convert that to 0 to indicate we don't know.
df_test.ward.unique()
df_test.district.unique()
df_test['ward'] = pd.to_numeric(df_test.ward, errors='coerce').fillna(0).astype(int)
df_test['district'] = pd.to_numeric(df_test.district, errors='coerce').fillna(0).astype(int)
df_grid = create_risk_cells(df_test.longitude, df_test.latitude, 100*100, df_test.ward, df_test.district)
df_grid.head()
df_grid.columns=["center_longitude", "center_latitude", "ward", "police_district"]
df_grid["id"] = df_grid.index
df_grid.head()
df_grid.shape
# Now, we'll save that master table. It'll only be useful for human readability. We won't use it in analysis from here on out.
# ```cql
# CREATE TABLE chicago_data.city_grid (
# id int,
# center_latitude float,
# center_longitude float,
# ward int,
# police_district int,
# PRIMARY KEY (id));
# ```
#save the grid cells
sqlContext.createDataFrame(df_grid).write\
.format("org.apache.spark.sql.cassandra")\
.mode('append')\
.options(table="city_grid", keyspace="chicago_data")\
.save()
# Now, we'll use KNN to find out which grid cell each of our inspections are in. If you're not familiar with it, KNN is an ML classification algorithm that calculates the distance between (let's say the Euclidian distance) an item in question, and it's $k$ nearest neighbors. Mathematically, for $k=1$ it looks like this:
#
# $$\hat{y} = \min \sqrt{\sum_{i=1}^{k} (x_i-y_i)^2}$$
#
# Because of that, it's computationally intensive, so it requires the entire set to be in memory and traversed each time. That's ok because we'll do this during cleanup and save the results to a table to use when we run the various models.
df_inspections = sqlContext.read.format("org.apache.spark.sql.cassandra").\
load(keyspace="chicago_data", table="inspections")
# +
#use knn to figure out which cell you're in. Unfortunately, MLlib doesn't seem to have KNN for classification.
#We could use this: https://github.com/saurfang/spark-knn or we could just use sklearn, since we're already in a pandas
#dataframe
from sklearn.neighbors import KNeighborsRegressor as KNN
#this gives us something for the model to predict. It doesn't matter that they are all labels.
knn = KNN(n_neighbors=1)
# -
# Uh oh, we've got 117 inspections without the coordinates entered. We need to fix that.
df_inspections.filter(col("longitude").isNull()).count()
# Let's see if the license records have the GPS coordinates entered.
df_license_coords = sqlContext.sql("select license_id, latitude as lat, longitude as long from chicago_data.licenses")
df_license_coords.head()
df_inspections_joined = df_inspections.join(df_license_coords, on="license_id", how="left")
df_inspections_joined.columns
df_inspections2 = df_inspections_joined.select('license_id', 'inspection_dt', 'canvass', 'complaint', 'cumulative_failures', \
'cumulative_inspections', 'days_since_last_inspection', 'ever_failed', 'fire', 'inspection_date_string', 'inspection_type', \
'inspection_type_description', 'license_related', 'liquor', 'month', \
'prev_fail', 'proportion_past_failures', 'recent_inspection', 'reinspection', 'risk', 'risk_description', \
'special_event', 'task_force', 'weekday', 'weekday_description', 'y', 'y_description', 'y_fail', 'zip', \
coalesce(df_inspections_joined["latitude"], df_inspections_joined["lat"]).alias("latitude"),
coalesce(df_inspections_joined["longitude"], df_inspections_joined["long"]).alias("longitude"), )
df_inspections2.filter(col("longitude").isNull()).count()
# Hmm... that did not help at all. Such is the life of a data scientist. ok! `coalesce` to fill in our missing data from the license set was worth a shot.
df_inspections2 = df_inspections2.filter(col("longitude").isNotNull())
# We also have a similar issue with `days_since_last_inspection` since the first year in the set has no previous inspections. We'll just set that to zero to avoid nulls.
df_inspections2 = df_inspections2.withColumn("days_since_last_inspection", coalesce(col("days_since_last_inspection"), lit(0)))
# Now, we'll compute the `city_grid` for each license_id
# +
y_train = pd.Series(range(0, df_grid.shape[0], 1))
fit_knn = knn.fit(df_grid[["center_longitude", "center_latitude"]].values, y_train.values)
x_test = df_inspections2.toPandas()[["longitude", "latitude", "license_id"]].values
inspections_gridspots = pd.DataFrame(fit_knn.predict(x_test[:,0:2])).values
# -
len(inspections_gridspots)
# Concatenate the computed grids with the `license_id` and coordinates so that we can use that to join them to our inspections on `license_id`
np.concatenate((x_test,inspections_gridspots), axis=1)
df_inspections3 = pd.DataFrame(np.concatenate((x_test,inspections_gridspots), axis=1))
df_inspections3.columns=["longitude", "latitude", "license_id", "city_grid"]
df_inspections3.head()
df_inspections4 = df_inspections3.drop_duplicates()
df_inspections4 = pd.merge(df_inspections4, df_grid, left_on="city_grid", right_on="id", how="left")
# To do the join, we need to take this pandas dataframe and convert it to a Spark dataframe. That gives us the added benefit of being able to drop it back to Cassandra.
df_inspections4 = sqlContext.createDataFrame(df_inspections4)
df_inspections5 = df_inspections2.join(df_inspections4.select("license_id", "city_grid", "ward", "police_district"), on="license_id", how="left_outer")
df_inspections5.head()
df_inspections5.count()
df_inspections5.cache()
df_inspections5.dtypes
# This is our new table.
# ```cql
# CREATE TABLE chicago_data.inspections_by_city_grid (
# city_grid int,
# license_id text,
# risk_description text,
# zip text,
# inspection_date_string text,
# inspection_type_description text,
# y_description text,
# latitude text,
# longitude text,
# y int,
# y_fail int,
# reinspection int,
# recent_inspection int,
# task_force int,
# special_event int,
# canvass int,
# fire int,
# liquor int,
# complaint int,
# license_related int,
# inspection_type int,
# risk int,
# inspection_dt date,
# prev_fail int,
# cumulative_failures int,
# weekday_description text,
# month int,
# weekday int,
# ever_failed int,
# cumulative_inspections int,
# proportion_past_failures double,
# days_since_last_inspection int,
# ward int,
# police_district int,
# PRIMARY KEY (city_grid, license_id, inspection_dt));
# ```
df_inspections5.write\
.format("org.apache.spark.sql.cassandra")\
.mode('append')\
.options(table="inspections_by_city_grid", keyspace="chicago_data")\
.save()
# The advantage is that we can now push back some aggregation to cassandra.
# ## Crime
# We'll do the same thing for crime
df_crime = sqlContext.sql("select * from chicago_data.crime")
df_crime.filter(col("longitude").isNull()).count()
df_crime.columns
# We don't need to retrain the model. The grid spots are correct. We'll use that model to "predict" these.
#x_test = df_crime.toPandas()[["longitude", "latitude", "id"]].values
x_test = df_crime.toPandas().values
crime_gridspots = pd.DataFrame(fit_knn.predict(x_test[:,0:2])).values
df_crime.count()
crime_gridspots.shape
df_crime2 = pd.DataFrame(np.concatenate((x_test,crime_gridspots), axis=1))
df_crime2.columns=["longitude", "latitude", "id", "city_grid"]
df_crime2 = df_crime2.drop_duplicates()
df_crime2 = sqlContext.createDataFrame(df_crime2)
df_crime_final = df_crime.join(df_crime2.select("id", "city_grid"), on="id", how="left_outer")
# After joining, we'll add this data to our new table.
# ```cql
# CREATE TABLE chicago_data.crime_by_city_grid (
# city_grid int,
# id text,
# case_number text,
# date text,
# block text,
# iucr text,
# primary_type text,
# arrest boolean,
# beat text,
# district text,
# ward text,
# community_area text,
# fbi_code text,
# year text,
# latitude float,
# longitude float,
# PRIMARY KEY (city_grid, id));
# ```
df_crime_final.count()
df_crime_final.write\
.format("org.apache.spark.sql.cassandra")\
.mode('append')\
.options(table="crime_by_city_grid", keyspace="chicago_data")\
.save()
# We're also going to store the data by type of crime (so that we can use that in aggregation later).
# ```cql
# CREATE TABLE chicago_data.crime_by_type (
# primary_type text,
# city_grid int,
# id int,
# PRIMARY KEY (primary_type, city_grid, id));
# ```
df_crime_final.select("id", "city_grid", "primary_type").write\
.format("org.apache.spark.sql.cassandra")\
.mode('append')\
.options(table="crime_by_type", keyspace="chicago_data")\
.save()
# ## Sanitation
# Sanitation data is the same, again.
df_sanitation = sqlContext.read.format("org.apache.spark.sql.cassandra").\
load(keyspace="chicago_data", table="sanitation")
df_sanitation.filter(col("longitude").isNull()).count()
df_sanitation.columns
x_test = df_sanitation.toPandas()[["longitude", "latitude", "service_request_number"]].values
sanitation_gridspots = pd.DataFrame(fit_knn.predict(x_test[:,0:2])).values
df_sanitation.count()
df_sanitation2 = pd.DataFrame(np.concatenate((x_test,sanitation_gridspots), axis=1))
df_sanitation2.columns=["longitude", "latitude", "service_request_number", "city_grid"]
df_sanitation2 = sqlContext.createDataFrame(df_sanitation2)
df_sanitation_final = df_sanitation.join(df_sanitation2.select("service_request_number", "city_grid"), on="service_request_number", how="left_outer")
df_sanitation_final.columns
# ```cql
# CREATE TABLE chicago_data.sanitation_by_city_grid (
# city_grid int,
# creation_date text,
# status text,
# completion_date text,
# service_request_number text,
# type_of_service_request text,
# "what_is_the_nature_of_this_code_violation?" text,
# street_address text,
# zip_code text,
# ward text,
# police_district double,
# community_area double,
# latitude double,
# longitude double,
# PRIMARY KEY (city_grid, service_request_number));
# ```
df_sanitation_final.write\
.format("org.apache.spark.sql.cassandra")\
.mode('append')\
.options(table="sanitation_by_city_grid", keyspace="chicago_data")\
.save()
| Step6-MoreFeatureProcessing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Data Wrangling with Lytics Profile Data - Tools and Techniques
#
# The goal of this notebook is to present some tools and techniques that can be used to wrangle Industry Dive data.
#
# ## What is Data Wrangling again?
# >Data wrangling, sometimes referred to as data munging, is the process of transforming and mapping data from one "raw" data form into another format with the intent of making it more appropriate and valuable for a variety of downstream purposes such as analytics. Some transformation techniques include: parsing, joining, standardizing, augmenting, cleansing, and consolidating.
#
# [per wikipedia](https://en.wikipedia.org/wiki/Data_wrangling)
#
# ## Bad Data in, Bad Data out
#
# 
#
# Many websites contain forms in order to collect information from users for various reasons. In our case, we have signup forms for dives that asks for information about our users like so:
#
# 
#
# As you can see, there are fields that are restricted to pre-defined values (e.g., Job Function), and free-form fields (e.g., Company Name) where a user can type most anything they like. Whenever users are exposed to free-form fields, there is a possibility of bad/messy/non-standardized data making into your system.
#
# For example, here are some variants of "IKEA" that are present for user profiles that we have:
#
# * IKEA
# * IKEA AG
# * IKEA Belgium
# * IKEA Canada
# * IKEA Danville
# * IKEA Food
# * IKEA Home Furnishings
# * IKEA Portugal
# * IKEA USA
# * IKEA US EAST, LLC 215
# * IKEA US
#
# Without some wrangling, you would not be able to aggregate these folks properly into a single group based on company.
#
# ## Lytics Profile Data
# Now, let's take a look at some Lytics profile data, which consists of all information we have about users who interact with our content. Within this data, there are key demographic fields that can help us understand who our users are, such as:
# * first and last name
# * job title
# * email domain
# * company name
# * address
#
# The data file we are going to look at is an export of the "All" audience segment in Lytics.
# https://activate.getlytics.com/audiences/4cc5d612f46fb86e5cfd0c995250e60c/summary?aid=2751
#
# 
#
# Let's start looking at this data to see how we can clean it up in order to help us create more accurate statistics about our users.
# +
import pandas as pd
import numpy as np
#dtypes = {'company': 'str', 'company_name': 'str', 'domain': 'object', 'emaildomain': 'object', 'emaildomains': 'object',
# 'st_profile_id': 'object', 'user_id': np.float64, 'lytics_segment': 'object'}
df = pd.read_csv('../data/files/lytics_profile_data_export.csv', sep=',', error_bad_lines=False, index_col=False, encoding='latin-1')
#, dtype=dtypes)
# list columns in dataset
print(list(df))
# number of rows
print('# of rows left: %s' % df.shape[0])
# print(df[df['st_profile_id'].str.contains("5a2ba1f6ff530ac11a8b4868", na=False)])
# -
# There are multiple fields in the data we can choose to cleanup, but first let's look at the "company_name" field. One of the first things we should do is get rid of rows with company name values we don't care about.
# +
# remove null company name values
df = df.dropna(subset=['company_name'])
# number of rows
print('# of rows left: %s' % df.shape[0])
# +
# find values that are any combination of special characters
special_char_values = df['company_name'].str.contains("^[!@#$%^&*(),.?]*$", na=False)
print(df[special_char_values].company_name.unique())
# number of rows
print('# of special character value rows: %s' % df[special_char_values].shape[0])
df = df[~special_char_values]
print('# of rows left: %s' % df.shape[0])
# print(df[df['st_profile_id'].str.contains("5a2ba1f6ff530ac11a8b4868", na=False)])
# +
# find values that are only numbers
number_values = df['company_name'].str.contains("^[0-9]*$", na=False)
print(df[number_values].company_name.unique())
# number of rows
print('# of number value rows: %s' % df[number_values].shape[0])
df = df[~number_values]
print('# of rows left: %s' % df.shape[0])
# +
# random additional values that I found when I was looking at the data in Excel
weird_vals = ['#NAME?', '{Re}', '< self >']
weird_values = df['company_name'].isin(weird_vals)
df = df[~weird_values]
# left over rows in dataframe
print('# of rows left: %s' % df.shape[0])
# -
# Now that we have cleaned all the bad company name values from our dataset, let's work on standardizing the names to help with comparison.
# +
# change the values to all lower case
df['stndrdzed_company_name'] = df['company_name'].str.lower()
# remove all punctuation
df["stndrdzed_company_name"] = df['stndrdzed_company_name'].str.replace('[^\w\s]','')
# remove rows with "none" as value
none_rows = df['stndrdzed_company_name'].str.contains('none', na=False)
df = df[~none_rows]
# remove rows with "" as value
empty_string_rows = df['stndrdzed_company_name'].values == ''
df = df[~empty_string_rows]
print('# of rows left: %s' % df.shape[0])
# -
# Let's take a look at our dataset to see what we are working with:
# +
grouped = df.groupby('stndrdzed_company_name')
grouped = grouped.size().reset_index(name='counts')
grouped.sort_values(by=['counts'], ascending=False)
# -
# One thing to note from looking at this is that there are company names that contain values other than English. For instance, "현대엔지니어링" is Korean. This is one thing you could work on eliminating as well if you wanted to focus on English values. I tried to use a library called "langdetect" for this, but it did not do a good job of picking up the obvious cases.
#
# Once we have wrangled the data bit, we can now try to enhance our dataset with an external dataset. One of the datasets we bought rights to recently, DiscoverOrg, has different information about companies that could be useful for analysis. The common field these two datasets have is the company name. So we can try to load this dataset, clean it up a bit, then compare it to our original cleaned dataset in order to try and match on company name and enhance our existing dataset.
# +
dtypes= {'Company ID': np.int64, 'Company Name': 'str', 'Company Website': 'object', 'Company HQ Phone': 'object',
'Company Email Domain': 'object', 'Company Description': 'object', 'Company Primary Industry': 'object',
'Company Revenue': np.float64, 'Company IT Budget (Mil)': 'object', 'Number of Employees': np.int64,
'Company IT Employees': np.float64, 'Company Fortune Rank': np.float64, 'Company Ownership': 'object', 'Company Profile URL': 'object',
'Company Business Model (B2B/B2C/B2G)': 'object', 'Hospital Beds': 'object', 'HQ Address 1': 'object', 'HQ Address 2': 'object',
'HQ City': 'object', 'HQ State': 'object', 'HQ Postal Code': 'object', 'HQ County': 'object', 'HQ Country': 'object'
}
df2 = pd.read_csv('../data/files/DiscoverOrg_Company_223030_20180731141156.csv', encoding='latin-1', sep=',', error_bad_lines=False, index_col=False, dtype=dtypes)
# change the values to all lower case
df2['stndrdzed_company_name'] = df2['Company Name'].astype(str).str.lower()
# remove all punctuation
df2["stndrdzed_company_name"] = df2['stndrdzed_company_name'].str.replace('[^\w\s]','')
# +
# merge with discovery org data in order to find matches
merged_rows = pd.merge(df, df2, how= 'left', on= 'stndrdzed_company_name', sort=True, suffixes=('_a', '_b'),)
# number of rows after merging
print('# of rows left: %s' % merged_rows.shape[0])
# -
# So, after our intial cleaning process, we had 456,521 rows in our lytics file. Our DiscoverOrg file had 68,735 rows. By merging the two files on company name we were able to match 98,811 rows. That is not a bad start.
#
# Next, we will write the merged and non-merged rows to a file for further analysis.
import os
path=r'/Users/sasanbahadaran/Downloads'
merged_rows.to_csv(os.path.join(path,r'lytics_profile_disc_org_merged_rows.csv'), index=False)
# Next, we can work on wrangling other fields and joining them to the DiscoveryOrg data for enhancement, such as:
# * email domain
# * address
#
# We could also decide to work on further cleaning up the company data as well. This could be through performing additional wrangling after examining our output file, or going beyond the deterministic types of methods we have covered so far. All in all though, the more we standardize our dataset, the better results we will get when performing analysis on our data.
# Ideas: Take address, standardize and clean, match up to discOrg and see what additional results you can yield. Take email domain and do the same. Filter out more junk based on analysis.
| notebooks/lytics_data_wrangling.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Le Bloc Note pour ajouter de l'interaction...
#
# https://ipywidgets.readthedocs.io/en/stable/index.html
from ipywidgets import interact, interactive, fixed, interact_manual
import ipywidgets as widgets
def f(x):
return x*x
interact(f, x=10);
# ### Pour aller plus loin :
# - https://ipywidgets.readthedocs.io/en/latest/examples/Using%20Interact.html
# - https://ipywidgets.readthedocs.io/en/latest/examples/Widget%20Basics.html
# - https://nbviewer.jupyter.org/github/jupyter-widgets/ipywidgets/blob/master/docs/source/examples/Index.ipynb
#
# - https://towardsdatascience.com/interactive-controls-for-jupyter-notebooks-f5c94829aee6
#
# ### Tutoriels en Français :
# - https://linuxfr.org/news/creer-une-application-web-avec-jupyter-ipywidgets-et-voila-7b03d5dd-ab10-47cb-a2bd-bd99fa9e2457
# - https://makina-corpus.com/blog/metier/2019/augmenter-linteractivite-de-vos-notebooks-jupyter-1
#
# ## Exemples :
#
# ### Simulateur de fonctions booléennes
# +
# Il faut importer la fonction interact() du module ipywidgets
from ipywidgets import interact
# Définition de la fonction logique à simuler
def OUexclusif(a, b):
# Expression booléenne de la fonction en python
S = not a and b or a and not b
# Résultat interactif à afficher
return (print(f"Si l'entrée a = {a} et l'entrée b = {b} alors la sortie S = {S}"))
# Appel de la fonction interact()
interact(OUexclusif, a = False, b = False)
# -
| ipywidgets_Le_BN_interactif.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (bl_tf)
# language: python
# name: ml
# ---
# # Deconstructed Raw Voltage Pipeline
# This tutorial walks through the steps behind the RAW data pipeline, without recording to disk.
# If you have access to a GPU, it is highly recommended to install CuPy, which performs the equivalent NumPy array operations on the GPU (https://docs.cupy.dev/en/stable/install.html). This is not necessary to run raw voltage generation, but will highly accelerate the pipeline. Once you have CuPy installed, to enable GPU acceleration you must set `SETIGEN_ENABLE_GPU` to '1' in the shell or in Python via `os.environ`. It can also be useful to set `CUDA_VISIBLE_DEVICES` to specify which GPUs to use.
# +
# # !pip install cupy-cuda110
# -
import os
os.environ['SETIGEN_ENABLE_GPU'] = '1'
os.environ['CUDA_VISIBLE_DEVICES'] = '2'
# +
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
import matplotlib.pyplot as plt
try:
import cupy as xp
except ImportError:
import numpy as xp
import numpy as np
from astropy import units as u
import blimpy as bl
import sys
sys.path.insert(0, "/mnt_home/bryanb/setigen/")
import setigen as stg
def get_numpy(v):
try:
return xp.asnumpy(v)
except AttributeError:
return v
def db(x):
""" Convert linear value to dB value """
return 10*np.log10(x)
# -
# Sometimes it can be necessary to re-run this command for plots to show automatically
# %matplotlib inline
# We first set some basic parameters behind the pipeline. `sample_rate` is in samples per second (Hz); `num_taps` and `num_branches` are specific to the polyphase filterbank described below.
# +
sample_rate = 3e9
num_taps = 8
num_branches = 1024
chan_bw = sample_rate / num_branches
# -
# ## Creating an Antenna
#
# We will start off with an `Antenna` object, as in the first tutorial. This time, we'll only use a single polarization. Showing noise (and converting from CuPy array if necessary).
# +
sample_rate = 3e9
antenna = stg.voltage.Antenna(sample_rate=sample_rate,
fch1=0*u.GHz,
ascending=True,
num_pols=1)
antenna.x.add_noise(v_mean=0,
v_std=1)
antenna.x.add_constant_signal(f_start=8e6,
drift_rate=-2*u.Hz/u.s,
level=0.5)
v = antenna.x.get_samples(20000)
plt.figure(figsize=(10, 5))
plt.plot(get_numpy(v))
plt.xlabel('Sample')
plt.ylabel('V')
plt.show()
# -
# ## Getting real voltage samples and stepping through the pipeline
#
# First, we gather enough samples to be able to eventually do a fine channelization. Here we plot a histogram showing the overall distribution of real voltages.
# +
num_windows = 10
num_samples = num_taps * num_branches * (num_windows + 1)
v = antenna.x.get_samples(num_samples)
print(v.shape)
plt.hist(get_numpy(v), bins=100)
plt.xlabel('V')
plt.ylabel('Counts')
plt.show()
# -
# ### Digitization
#
# Next, we do our "digitization" step, which essentially scales the FWHM of the real voltage distribution to a desired FWHM for quantized data, which is constrained between `-2**(num_bits-1)` and `2**(num_bits-1)-1`. Plotting a histogram to show the same overall distribution:
# +
num_bits = 8
target_fwhm = 32 * 2**num_bits / 2**8
digitizer = stg.voltage.RealQuantizer(target_fwhm=target_fwhm,
num_bits=num_bits)
v_q = digitizer.quantize(v)
print(v_q.shape)
plt.hist(get_numpy(v_q), bins=2**num_bits)
plt.xlabel('V')
plt.ylabel('Counts')
plt.show()
# -
# ### Coarse channelization via Polyphase Filterbank
#
# The quantized voltages are passed into a polyphase filterbank, and we obtain complex voltages from the real FFT. This produces `num_branches/2+1` coarse channels; in real systems, the last bin is dropped out so that we're left with `num_branches/2` effective coarse channels. Of course, you may truncate the resulting array to the desired range of coarse channels however you'd like after this step.
#
# The shape of the data directly after channelization is `(time_samples, num_branches/2+1)`. If `num_samples=num_taps*num_branches*(num_windows+1)`, then `time_samples=num_taps*num_windows`, by design. This yields an easy formula for determing the number of real voltage samples necessary to get the desired number of time samples in our raw complex voltage data; this is built into `RawVoltageBackend` in the higher level `setigen` voltage pipeline.
#
# Plotting histograms of real and imaginary values after the rFFT:
# +
filterbank = stg.voltage.PolyphaseFilterbank(num_taps=num_taps,
num_branches=num_branches)
v_c = filterbank.channelize(v_q)
print(v_c.shape)
plt.hist(get_numpy(v_c.real.flatten()), bins=100)
plt.title('Real')
plt.xlabel('V')
plt.ylabel('Counts')
plt.show()
plt.hist(get_numpy(v_c.imag.flatten()), bins=100)
plt.title('Imag')
plt.xlabel('V')
plt.ylabel('Counts')
plt.show()
# -
v_c.real.std()
# We can also plot a spectrogram of the coarsely channelized data:
plt.figure(figsize=(10, 6))
plt.imshow(db(np.abs(get_numpy(v_c))**2),
cmap='viridis',
aspect='auto',
interpolation='none')
plt.colorbar()
plt.xlabel("Frequency (px)")
plt.ylabel("Time (px)")
plt.title("Coarse Channelization")
plt.show()
# ### Requantization
#
# The last data manipulation step is the requantization to either 8 or 4 bits, since these have a specific schema in GUPPI RAW format. That being said, the following cell will still work for alternate values. Plotting histograms here as well:
# +
num_bits = 8
target_fwhm = 32 * 2**num_bits / 2**8
requantizer = stg.voltage.ComplexQuantizer(target_fwhm=target_fwhm,
num_bits=num_bits)
v_rq = requantizer.quantize(v_c)
print(v_rq.shape)
plt.hist(get_numpy(v_rq.real.flatten()), bins=2**num_bits)
plt.title('Real')
plt.xlabel('V')
plt.ylabel('Counts')
plt.show()
plt.hist(get_numpy(v_rq.imag.flatten()), bins=2**num_bits)
plt.title('Imag')
plt.xlabel('V')
plt.ylabel('Counts')
plt.show()
# -
v_rq.imag.std()
v_rq.real.std()
# In the standard pipeline, this is where we'd truncate to the desired range of coarse channels and save to GUPPI RAW format.
# ## Fine channelization
# We can do a quick fine channelization to find the injected signal. Note that since we only have a few samples in the time direction, our fftlength can't be too large, but we can still clearly recover the injected signal:
# +
# Truncate coarse channels to the first 64.
v_tr = v_rq[:, 0:64]
psd = stg.voltage.get_pfb_waterfall(v_tr, int_factor=1, fftlength=8)
print(f"Spectrogram shape: {psd.shape}")
plt.figure(figsize=(10, 6))
plt.imshow(db(get_numpy(psd)),
cmap='viridis',
aspect='auto',
interpolation='none')
plt.colorbar()
plt.xlabel("Frequency (px)")
plt.ylabel("Time (px)")
plt.title("Fine Channelization")
plt.show()
# -
# Integrating over time:
plt.figure(figsize=(8, 6))
plt.plot(db(np.sum(get_numpy(psd), axis=0)))
plt.xlabel("Frequency bins")
plt.ylabel("Integrated Power (dB)")
plt.show()
v_rq_data = v_rq
# Adding only signal
# +
sample_rate = 3e9
antenna = stg.voltage.Antenna(sample_rate=sample_rate,
fch1=0*u.GHz,
ascending=True,
num_pols=1)
antenna.x.add_noise(v_mean=0,
v_std=1)
antenna.x.add_constant_signal(f_start=12e6,
drift_rate=-2*u.Hz/u.s,
level=0.5)
v = antenna.x.get_samples(20000)
num_windows = 10
num_samples = num_taps * num_branches * (num_windows + 1)
v = antenna.x.get_samples(num_samples)
print(v.shape)
# num_bits = 8
# target_fwhm = 32 * 2**num_bits / 2**8
# digitizer = stg.voltage.RealQuantizer(target_fwhm=target_fwhm,
# num_bits=num_bits)
# v_q = digitizer.quantize(v)
# print(v_q.shape)
# plt.hist(get_numpy(v_q), bins=2**num_bits)
# plt.xlabel('V')
# plt.ylabel('Counts')
# plt.show()
filterbank = stg.voltage.PolyphaseFilterbank(num_taps=num_taps,
num_branches=num_branches)
v_c = filterbank.channelize(v)
num_bits = 8
target_fwhm = 32 * 2**num_bits / 2**8
requantizer = stg.voltage.ComplexQuantizer(target_fwhm=target_fwhm,
num_bits=num_bits)
v_rq = requantizer.quantize(v_c)
# Truncate coarse channels to the first 64.
v_tr = v_rq[:, 0:64]
psd = stg.voltage.get_pfb_waterfall(v_tr, int_factor=1, fftlength=8)
print(f"Spectrogram shape: {psd.shape}")
plt.figure(figsize=(10, 6))
plt.imshow(db(get_numpy(psd)),
cmap='viridis',
aspect='auto',
interpolation='none')
plt.colorbar()
plt.xlabel("Frequency (px)")
plt.ylabel("Time (px)")
plt.title("Fine Channelization")
plt.show()
plt.figure(figsize=(8, 6))
plt.plot(db(np.sum(get_numpy(psd), axis=0)))
plt.xlabel("Frequency bins")
plt.ylabel("Integrated Power (dB)")
plt.show()
v_rq_data = v_rq
# +
sample_rate = 3e9
antenna = stg.voltage.Antenna(sample_rate=sample_rate,
fch1=0*u.GHz,
ascending=True,
num_pols=1)
antenna.x.add_noise(v_mean=0,
v_std=1)
# antenna.x.add_constant_signal(f_start=12e6,
# drift_rate=-2*u.Hz/u.s,
# level=0.5)
v = antenna.x.get_samples(20000)
num_windows = 10
num_samples = num_taps * num_branches * (num_windows + 1)
v = antenna.x.get_samples(num_samples)
print(v.shape)
# num_bits = 8
# target_fwhm = 32 * 2**num_bits / 2**8
# digitizer = stg.voltage.RealQuantizer(target_fwhm=target_fwhm,
# num_bits=num_bits)
# v_q = digitizer.quantize(v)
# print(v_q.shape)
# plt.hist(get_numpy(v_q), bins=2**num_bits)
# plt.xlabel('V')
# plt.ylabel('Counts')
# plt.show()
filterbank = stg.voltage.PolyphaseFilterbank(num_taps=num_taps,
num_branches=num_branches)
v_c = filterbank.channelize(v)
num_bits = 8
target_fwhm = 32 * 2**num_bits / 2**8
requantizer = stg.voltage.ComplexQuantizer(target_fwhm=target_fwhm,
num_bits=num_bits)
v_rq = requantizer.quantize(v_c)
# Truncate coarse channels to the first 64.
v_tr = v_rq[:, 0:64]
psd = stg.voltage.get_pfb_waterfall(v_tr, int_factor=1, fftlength=8)
print(f"Spectrogram shape: {psd.shape}")
plt.figure(figsize=(10, 6))
plt.imshow(db(get_numpy(psd)),
cmap='viridis',
aspect='auto',
interpolation='none')
plt.colorbar()
plt.xlabel("Frequency (px)")
plt.ylabel("Time (px)")
plt.title("Fine Channelization")
plt.show()
plt.figure(figsize=(8, 6))
plt.plot(db(np.sum(get_numpy(psd), axis=0)))
plt.xlabel("Frequency bins")
plt.ylabel("Integrated Power (dB)")
plt.show()
v_rq_data = v_rq
# +
sample_rate = 3e9
antenna = stg.voltage.Antenna(sample_rate=sample_rate,
fch1=0*u.GHz,
ascending=True,
num_pols=1)
antenna.x.add_noise(v_mean=0,
v_std=1)
# antenna.x.add_constant_signal(f_start=12e6,
# drift_rate=-2*u.Hz/u.s,
# level=0.5)
v = antenna.x.get_samples(2000*1024)
filterbank = stg.voltage.PolyphaseFilterbank(num_taps=num_taps,
num_branches=num_branches)
print(v.mean(), v.std())
v_c = filterbank.channelize(v)
print(v_c.real.mean(), v_c.real.std())
print(v_c.imag.mean(), v_c.imag.std())
# -
9.16478944/13.6
0.673*2**0.5
vars(filterbank)
filterbank.window.std()
# +
V = v.reshape((2000, 1024))
V_pfb = np.fft.rfft(V, num_branches, axis=1) / num_branches**0.5
print(V_pfb.real.mean(), V_pfb.real.std())
print(V_pfb.imag.mean(), V_pfb.imag.std())
# -
1/2**0.5
0.6741650510274363/0.708415779908209
0.672307136094871/0.7049769403382617
# +
num_taps = 8
num_branches = 1024
sample_rate = 3e9
antenna = stg.voltage.Antenna(sample_rate=sample_rate,
fch1=0*u.GHz,
ascending=True,
num_pols=1)
antenna.x.add_noise(v_mean=0,
v_std=1)
# antenna.x.add_constant_signal(f_start=12e6,
# drift_rate=-2*u.Hz/u.s,
# level=0.5)
v = antenna.x.get_samples(2000*num_branches)
filterbank = stg.voltage.PolyphaseFilterbank(num_taps=num_taps,
num_branches=num_branches)
print(v.mean(), v.std())
print('==========')
v_c = filterbank.channelize(v)
print(v_c.real.mean(), v_c.real.std())
print(v_c.imag.mean(), v_c.imag.std())
print('==========')
V = v.reshape((2000, num_branches))
V_pfb = np.fft.rfft(V, num_branches, axis=1) / num_branches**0.5
print(V_pfb.real.mean(), V_pfb.real.std())
print(V_pfb.imag.mean(), V_pfb.imag.std())
# +
num_taps = 8
num_branches = 1024
sample_rate = 3e9
antenna = stg.voltage.Antenna(sample_rate=sample_rate,
fch1=0*u.GHz,
ascending=True,
num_pols=1)
antenna.x.add_noise(v_mean=0,
v_std=1)
# antenna.x.add_constant_signal(f_start=12e6,
# drift_rate=-2*u.Hz/u.s,
# level=0.5)
v = antenna.x.get_samples(2000*num_branches)
num_bits = 8
target_fwhm = 32 * 2**num_bits / 2**8
digitizer = stg.voltage.RealQuantizer(target_fwhm=target_fwhm,
num_bits=num_bits)
v = digitizer.quantize(v)
filterbank = stg.voltage.PolyphaseFilterbank(num_taps=num_taps,
num_branches=num_branches)
print(v.mean(), v.std())
print('==========')
v_c = filterbank.channelize(v)
print(v_c.real.mean(), v_c.real.std())
print(v_c.imag.mean(), v_c.imag.std())
print('==========')
V = v.reshape((2000, num_branches))
V_pfb = np.fft.rfft(V, num_branches, axis=1) / num_branches**0.5
print(V_pfb.real.mean(), V_pfb.real.std())
print(V_pfb.imag.mean(), V_pfb.imag.std())
# -
filterbank.channelized_stds * digitizer.target_std
vars(digitizer)
digitizer.target_std/2**0.5
vars(filterbank)
v_c.shape
v_c = v_c[:, 1:]
print(v_c.real.mean(), v_c.real.std())
print(v_c.imag.mean(), v_c.imag.std())
| raw_voltage_dev/deconstructed_pipeline.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Write a Python program to print "Hello Python"?
print('Hello Python')
# ### Write a Python program to do arithmetical operations addition and division.?
try:
a=float(input('Enter first number'))
b=float(input('Enter second number'))
print(a+b)
except:
print('Input a number')
try:
a=float(input('Enter first number'))
b=float(input('Enter second number'))
print(a/b)
except:
print('Input a number')
# ### Write a Python program to find the area of a triangle?
def areaoftriangle(b,h):
'''
retutrns area of triangle and takes base and height as arguments
areaoftriangle(b,h)
'''
print('Area of triangle is' ,(b*h)/2)
areaoftriangle(10,4)
# ### Write a Python program to swap two variables?
try:
a=int(input('Enter first number '))
b=int(input('Enter second number '))
print('Before swap a={},b={}'.format(a,b))
temp=a
a=b
b=temp
print('After swap a={},b={}'.format(a,b))
except:
print('Enter a number')
# ### Write a Python program to generate a random number?
import random
random.randint(0,100)
| Python Basic Programming_1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19"
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# -
df = pd.read_csv("../input/bank-dataset/UNIONBANK_5Y.csv")
df.head()
close = df["Close"]
close.shape
close
close = close.dropna()
close.shape
# +
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler(feature_range=(0,1))
tmp = scaler.fit(np.array(close).reshape(-1,1))
new_df = scaler.transform(np.array(close).reshape(-1,1))
print(new_df)
# +
training_size=int(len(new_df)*0.67)
test_size=len(new_df)-training_size
train_data,test_data=new_df[0:training_size,:],new_df[training_size:len(new_df),:1]
print(train_data.shape)
print(test_data.shape)
# -
def create_dataset(dataset, time_step=1):
dataX, dataY = [], []
for i in range(len(dataset)-time_step-1):
a = dataset[i:(i+time_step), 0] ###i=0, 0,1,2,3-----99 100
dataX.append(a)
dataY.append(dataset[i + time_step, 0])
return np.array(dataX), np.array(dataY)
time_step=100
X_train, Y_train = create_dataset(train_data, time_step)
X_test, Y_test = create_dataset(test_data, time_step)
print(X_train.shape, Y_train.shape, X_test.shape, Y_test.shape)
#print(X_train, Y_train)
X_train =X_train.reshape(X_train.shape[0],X_train.shape[1] , 1)
X_test = X_test.reshape(X_test.shape[0],X_test.shape[1] , 1)
print(X_train.shape, X_test.shape)
#print(X_train, Y_train)
# +
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.layers import LSTM
model=Sequential()
model.add(LSTM(50,return_sequences=True,input_shape=(X_train.shape[1],X_train.shape[2])))
model.add(Dropout(0.2))
model.add(LSTM(50,return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(50))
model.add(Dense(1))
model.compile(loss='mean_squared_error',optimizer='adam')
model.summary()
# -
model.fit(X_train,Y_train,validation_data=(X_test, Y_test),epochs=100,batch_size=64,verbose=1)
train_predict=model.predict(X_train)
test_predict=model.predict(X_test)
print(train_predict.shape, test_predict.shape)
train_predict=scaler.inverse_transform(train_predict)
test_predict=scaler.inverse_transform(test_predict)
print(test_predict)
# +
X = scaler.fit_transform(np.array(close).reshape(-1,1))
import matplotlib.pyplot as plt
look_back=100
trainPredictPlot = np.empty_like(X)
trainPredictPlot[:, :] = np.nan
trainPredictPlot[look_back:len(train_predict)+look_back, :] = train_predict
# shift test predictions for plotting
testPredictPlot = np.empty_like(X)
testPredictPlot[:, :] = np.nan
testPredictPlot[len(train_predict)+(look_back*2)+1:len(X)-1, :] = test_predict
# plot baseline and predictions
plt.plot(scaler.inverse_transform(X))
plt.plot(trainPredictPlot)
plt.plot(testPredictPlot)
plt.show()
| python_script/unionbankofindia-script.ipynb |