code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Online News Popularity Prediction Using Deep Learning
# #### Applied two deep learning models in my dataset, Single Layer Perceptron and Multi Layer Perceptron
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
from sklearn.preprocessing import MinMaxScaler, Normalizer
from sklearn.decomposition import PCA as sklearnPCA
import warnings
warnings.filterwarnings("ignore")
import tensorflow as tf
from sklearn.neural_network import MLPClassifier
# ### Importing the dataset
data = pd.read_csv('OnlineNewsPopularity/OnlineNewsPopularity.csv')
data.head()
def converter(x):
if(x > 1400):
return 1
else:
return 0
data['NumberOfShares'] = data[' shares'].apply(converter)
data.head()
# ## Data Sampling
X = data.drop([' shares', 'NumberOfShares', 'url'], axis=1)
y = data['NumberOfShares']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30)
# ### Data Cleanup
data = data[data[' n_unique_tokens'] <= 1]
data = data[data[' n_non_stop_words'] <= 1]
data = data[data[' n_non_stop_unique_tokens'] <= 1]
data = data[data[' rate_positive_words'] + data[' rate_negative_words'] != 0]
data = data[data[' average_token_length'] != 0]
data_week = data.iloc[:, 31:37]
data1 = data.drop(data.iloc[:, 31:37], axis=1)
# ### Resampling Data
X_new = data1.drop(['url', ' shares', 'NumberOfShares'], axis=1)
y_new = data1.iloc[:,-1:]
X_train, X_test, y_train, y_test = train_test_split(X_new, y_new, test_size=0.30)
X_new.head()
# +
#clf = MLPClassifier(activation='logistic', alpha=0.5, batch_size=1000, hidden_layer_sizes=(30000,), learning_rate='adaptive'
# , max_iter = 1000)
# -
# ### Normalize x_data values for better prediction
# +
# Training Data
# -
train_x = MinMaxScaler().fit_transform(X_train)
print("Training Data :", train_x.shape)
# +
# Testing Data
# -
test_x = MinMaxScaler().fit_transform(X_test)
print("Testing Data :", test_x.shape)
train_y = y_train
test_y = y_test
# ### Make ANN-SLP Model
# #### Make "Placeholder" for dinamic variable allocation
X = tf.placeholder(tf.float32, [None,53])
Y = tf.placeholder(tf.float32, [None, 1])
# #### Make Weight, Bias value with randomly
# weight
W = tf.Variable(tf.random_normal([53,1], seed=0), name='weight')
# bias
b = tf.Variable(tf.random_normal([1], seed=0), name='bias')
# ### Make Output Results
logits = tf.matmul(X,W) + b
# ### Cross Entropy
hypothesis = tf.nn.sigmoid(logits)
cost_i = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits,labels=Y)
cost = tf.reduce_mean(cost_i)
# ### Gradient Descent Optimizer
train = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(cost)
# ### Compare : original vs. prediction
prediction = tf.cast(hypothesis > 0.5, dtype=tf.float32)
correct_prediction = tf.equal(prediction, Y)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, dtype=tf.float32))
# ### Activate Model
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for step in range(30001):
sess.run(train, feed_dict={X: train_x, Y: train_y})
if step % 1000 == 0:
loss, acc = sess.run([cost, accuracy], feed_dict={X: train_x, Y: train_y})
print("Step: {:5}\tLoss: {:.3f}\tAcc: {:.2%}".format(step, loss, acc))
train_acc = sess.run(accuracy, feed_dict={X: train_x, Y: train_y})
test_acc,test_predict,test_correct = sess.run([accuracy,prediction,correct_prediction], feed_dict={X: test_x, Y: test_y})
print("Model Prediction =", train_acc)
print("Test Prediction =", test_acc)
# ### MLP Model Summary and Compare
def ann_mlp():
print("===========Data Summary===========")
print("Training Data :", train_x.shape)
print("Testing Data :", test_x.shape)
X = tf.placeholder(tf.float32, [None,53])
Y = tf.placeholder(tf.float32, [None, 1])
# input
W1 = tf.Variable(tf.random_normal([53,106], seed=0), name='weight1')
b1 = tf.Variable(tf.random_normal([106], seed=0), name='bias1')
layer1 = tf.nn.sigmoid(tf.matmul(X,W1) + b1)
# hidden1
W2 = tf.Variable(tf.random_normal([106,106], seed=0), name='weight2')
b2 = tf.Variable(tf.random_normal([106], seed=0), name='bias2')
layer2 = tf.nn.sigmoid(tf.matmul(layer1,W2) + b2)
# hidden2
W3 = tf.Variable(tf.random_normal([106,159], seed=0), name='weight3')
b3 = tf.Variable(tf.random_normal([159], seed=0), name='bias3')
layer3 = tf.nn.sigmoid(tf.matmul(layer2,W3) + b3)
# output
W4 = tf.Variable(tf.random_normal([159,1], seed=0), name='weight4')
b4 = tf.Variable(tf.random_normal([1], seed=0), name='bias4')
logits = tf.matmul(layer3,W4) + b4
hypothesis = tf.nn.sigmoid(logits)
cost_i = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits,labels=Y)
cost = tf.reduce_mean(cost_i)
train = tf.train.GradientDescentOptimizer(learning_rate=0.001).minimize(cost)
prediction = tf.cast(hypothesis > 0.5, dtype=tf.float32)
correct_prediction = tf.equal(prediction, Y)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, dtype=tf.float32))
print("\n============Processing============")
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
summary_writer = tf.summary.FileWriter('./logg', sess.graph)
for step in range(10001):
sess.run(train, feed_dict={X: train_x, Y: train_y})
if step % 1000 == 0:
loss, acc = sess.run([cost, accuracy], feed_dict={X: train_x, Y: train_y})
print("Step: {:5}\tLoss: {:.3f}\tAcc: {:.2%}".format(step, loss, acc))
train_acc = sess.run(accuracy, feed_dict={X: train_x, Y: train_y})
test_acc,test_predict,test_correct = sess.run([accuracy,prediction,correct_prediction], feed_dict={X: test_x, Y: test_y})
print("\n============Results============")
print("Model Prediction =", train_acc)
print("Test Prediction =", test_acc)
return train_acc,test_acc
ann_mlp_train_acc, ann_mlp_test_acc = ann_mlp()
# #### Accuracy using mlp is 58%
| Project Code - Online_News_Popularity_Prediction Neural Network.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import tensorflow as tf
# ```
# input > weight > hidden layer 1 (activation function) > weights > hidden layer 2 (activation function) > weights > output layer
#
# compare the output to intended output > cost or loss function (cross entropy)
#
# optimization function or optimizer > minimize cost (AdamOptimizer, .....
# SGD, AdaGrad)
#
# backpropagation
#
# Feed forward + backprop = epoch
# ```
mnist = input_data.read_data_sets("/tmp/data", one_hot = True)
# +
# 10 classes 0-9
# -
# ```
# 0 = [1,0,0,0,0,0,0,0,0,0]
# ```
# +
n_nodes_hl1 = 500
n_nodes_hl2 = 500
n_nodes_hl3 = 500
n_classes = 10
# -
batch_size = 100
#height * width
x = tf.placeholder('float', [None, 784])
y = tf.placeholder('float')
| scratch_notebooks/MNIST_classifier.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/mendiang/GoodEnoughAlgs/blob/master/Project__DL(RNN)_Bangkit.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="Jkubi3BJfWU7" colab_type="text"
# #0. Import Library
# + id="4cA8MHP4BCfK" colab_type="code" outputId="67aba478-db1c-4a37-fbe5-5c3f37d8c7a2" colab={"base_uri": "https://localhost:8080/", "height": 514}
# !pip uninstall tensorflow
# !pip install tensorflow==1.13.2
# + id="JQegZhOCB6Nw" colab_type="code" outputId="bb3dcd99-9e96-44bd-817d-33c810812c33" colab={"base_uri": "https://localhost:8080/", "height": 207}
# !pip show tensorflow
# + id="R_yTotx6fNin" colab_type="code" outputId="d6db09a7-9272-4af1-8410-6be9534e1196" colab={"base_uri": "https://localhost:8080/", "height": 287}
# tenserflow sebagai deep learning proses
import tensorflow as tf
from tensorflow.contrib import rnn
import numpy as np # untuk mendefinisikan array
import pandas as pd # untuk mendefinisikan dataframe agar lebih complex
import time # waktu
# metplotlib sebagai ploting data
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
# %matplotlib inline
mpl.style.use( 'ggplot' ) # style Plot dari matplotlib tsb
# + [markdown] id="KwfOLXGJfa-C" colab_type="text"
# #1. Mengenal Data
# + id="RGgnWNGJsiLt" colab_type="code" outputId="038edf6c-0268-4d68-e763-c13dc8af1f34" colab={"base_uri": "https://localhost:8080/", "height": 411}
# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
# mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
mnist = input_data.read_data_sets('data/fashion', one_hot=True) # membaca dataset dari input_data yang berada apada tensorflow.examples.tutorial.mnist
# + id="91MSxpZLbPf_" colab_type="code" outputId="156c466e-9e9d-4c87-e178-8f3fc24b36ab" colab={"base_uri": "https://localhost:8080/", "height": 802}
# !mkdir data/fashion # membuat sebuah directory dalam input_data yang bernama "data/fashion"
# !wget -O data/fashion/train-images-idx3-ubyte.gz http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-images-idx3-ubyte.gz
# !wget -O data/fashion/train-labels-idx1-ubyte.gz http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-labels-idx1-ubyte.gz
# !wget -O data/fashion/t10k-images-idx3-ubyte.gz http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-images-idx3-ubyte.gz
# !wget -O data/fashion/t10k-labels-idx1-ubyte.gz http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-labels-idx1-ubyte.gz
# + id="QVFdfWsObRbY" colab_type="code" outputId="8586769e-3b88-42df-c546-9c1689867619" colab={"base_uri": "https://localhost:8080/", "height": 85}
# Import Fashion MNIST
mnist = input_data.read_data_sets('data/fashion', one_hot=True) # memanggil sebuah data pada dataset untuk mengextrak data
# + [markdown] id="WmJ2lcxed4sY" colab_type="text"
#
# > Membaca dataset fashion mnist
#
#
#
#
# + id="wLPZRITSsrgw" colab_type="code" outputId="c23ccd8a-ccb9-48a3-bba9-238725b41eaf" colab={"base_uri": "https://localhost:8080/", "height": 85}
print(f'image training: { mnist.train.images.shape}')
print(f'label training: { mnist.train.labels.shape}')
print(f'image testing: { mnist.test.images.shape}')
print(f'label testing: { mnist.test.labels.shape}')
# + id="mllLBQNcv2au" colab_type="code" outputId="9baef30a-2fc9-448a-cd8e-0b3dba3a8e92" colab={"base_uri": "https://localhost:8080/", "height": 1000}
print(mnist.train.images[40]) #membaca data images ke 40
# + id="OwTP3F2zfCDc" colab_type="code" outputId="e65357be-41fb-4245-bd4e-e017862f5394" colab={"base_uri": "https://localhost:8080/", "height": 282}
sample_1 = mnist.train.images[40].reshape(28,28) # untuk memanggil image ke 40 dengan ukuran pixel 28x28
plt.imshow(sample_1, cmap='Greys') #menampilkan inputan dari variable sample_i dengan memberikan warna abu-abu
# + id="X25TKmjNgY_Y" colab_type="code" outputId="2153b4e3-cec4-4e4c-ed28-44f96bef2661" colab={"base_uri": "https://localhost:8080/", "height": 272}
datatrainimages=pd.DataFrame(mnist.train.images) # untuk meengubah data dari numpy ke data frame pandas
datatestimages=pd.DataFrame(mnist.test.images)
combine = [datatrainimages, datatestimages]
ffmnist = pd.concat(combine)
print(ffmnist)
print(ffmnist.isnull().any().sum()) # untuk menghitung jumlah data yang kosong
# + id="bCqcY4hKfShl" colab_type="code" outputId="962aef26-89ac-4133-90cf-d96d16a692d4" colab={"base_uri": "https://localhost:8080/", "height": 234}
fig, axes = plt.subplots(2, 5, figsize=(7,4))
for img, label, ax in zip(mnist.train.images[0:10], np.argmax(mnist.train.labels[0:10], 1), axes.flat):
ax.set_title(label)
ax.imshow(img.reshape(28,28))
ax.axis('off')
plt.show()
# + [markdown] id="VBMRV7Mdfi7f" colab_type="text"
# #2. Membangun RNN/LSTM
# + [markdown] id="iRjvWiS0fnvu" colab_type="text"
# ##2.1. Hyperparameter
# + id="1VZ9nIadtJw3" colab_type="code" colab={}
# 55000
#hyperparamters
learning_rate = 0.0001
# jumlah perpndaha
training_iterations = 100000
# banyaknya pengulangan
batch_size = 128
# banyaknya jumlah gambar yang diolah dalam sekali pengolahan
display_step = 10
#
epochs = 1000
#
n_inputs = 28
# jumlah neuran masukan
n_time_steps = 28
#
n_hidden_units = 128
#
n_classes = 10
#
# + [markdown] id="7zIDEV0qfqmi" colab_type="text"
# ##2.2. Placeholders
# + id="Um0vgGd0tJai" colab_type="code" outputId="fa75342d-af6a-446f-b5ba-4bf55de6f7d1" colab={"base_uri": "https://localhost:8080/", "height": 34}
X = tf.placeholder(tf.float32, [None, n_time_steps, n_inputs])
# X = tf.placeholder(tf.float32, [batch_size, n_time_steps, n_inputs])
Y = tf.placeholder(tf.float32, [None, n_classes])
X
# + [markdown] id="vJuMZTSTfuBj" colab_type="text"
# ##2.3. Variable
# + id="ExLyUL-2njfn" colab_type="code" outputId="835f5fc7-912e-4750-e553-952dd0110a8b" colab={"base_uri": "https://localhost:8080/", "height": 105}
output_w = tf.Variable(tf.random_normal([n_hidden_units, n_classes]))
output_b = tf.Variable(tf.random_normal([n_classes]))
output_w
# + [markdown] id="1CIu0T7FgHnQ" colab_type="text"
# ##2.4. Menyiapkan dan Membangun RNN/LSTM
# + id="bsowyY-Z3k5-" colab_type="code" outputId="ef744c86-02d0-4ba7-b593-168ed8058a7c" colab={"base_uri": "https://localhost:8080/", "height": 156}
lstm_cell = tf.contrib.rnn.BasicLSTMCell(n_hidden_units, forget_bias=1)
outputs, final_state = tf.nn.dynamic_rnn(lstm_cell, X, dtype="float32")
outputs
# + id="o3g_CMGdpJ4j" colab_type="code" outputId="a922618e-d2bd-4768-b2dc-e962e7b85af0" colab={"base_uri": "https://localhost:8080/", "height": 34}
outputs[:,-1,:]
# + colab_type="code" id="YtKd9dSO20G3" colab={}
# a = np.array([[[1,2,3], [4,5,6], [7,8,9]], [[10,11,12], [13,14,15], [16,17,18]], [[19,20,21], [22,23,24], [25,26,27]]]) # menyimpan hasil pixel image 28 x 28
# a
# + id="GX8TiNqfs7Vi" colab_type="code" colab={}
# a[:,-1,:]
# + [markdown] id="9Tzd9hPbgROb" colab_type="text"
# ##2.5. Memprediksi
# + id="1ikKVr6m5TsA" colab_type="code" outputId="1a901157-eb17-45b1-a30a-0fe0cd103efc" colab={"base_uri": "https://localhost:8080/", "height": 34}
logits = tf.matmul(outputs[:,-1,:], output_w) + output_b #(outputs[:,-1,:]*output_w) + output_b
logits
# + [markdown] id="UQQ7kt-Rgdf2" colab_type="text"
# ##2.6. Loss dan Optimasi
# + id="ArJlpEKC5gUI" colab_type="code" outputId="4e7a422b-5f46-4cb4-8f6a-360292b936cd" colab={"base_uri": "https://localhost:8080/", "height": 173}
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=Y, logits=logits )) #mencari jumlah rata rata dari cross and tropy dari label = y dan logits
optimize = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss) # pengoptimasikan leartning_rate dengan jumlah lose menggunakan AdamOptimizer
# optimize = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(loss)
# opsi lainnya
# + [markdown] id="MY0Y93zBgiuR" colab_type="text"
# ##2.7. Metric untuk evaluasi
# + id="0wOY-bue7O8n" colab_type="code" outputId="ba9fc428-3a34-448f-a4b4-fc5bcbbad6c5" colab={"base_uri": "https://localhost:8080/", "height": 139}
# pembuatan konfusion matrik yang digunakan untuk mengoptimasikan
confusion_op = tf.math.confusion_matrix(tf.argmax(Y, 1), tf.argmax(logits, 1))
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# + [markdown] id="JNQePOvAgmzM" colab_type="text"
# #3. Training
# + id="g1VcHTpl6ioA" colab_type="code" outputId="d241159b-d412-45f6-f381-0ce708f91de2" colab={"base_uri": "https://localhost:8080/", "height": 1000}
timestart=time.time()
# insialisasi variable
init = tf.global_variables_initializer()
graph=[]
# inisialisasi seasino(untuk menjalankan graph)
session = tf.Session()
session.run(init)
for epoch in range(epochs):
# mengambil gambar gambar dalam batch
batch_x, batch_y = mnist.train.next_batch(batch_size = batch_size)
# reshape, karena gambarnya masih dalam bentuk vektor
batch_x2 = batch_x.reshape((batch_size, n_time_steps, n_inputs))
session.run(optimize, feed_dict={X: batch_x2, Y: batch_y})
if not epoch % 10: # hanya menampilkan stap ke 10
acc = session.run(accuracy, feed_dict={X: batch_x2, Y: batch_y})
los = session.run(loss, feed_dict={X: batch_x2, Y: batch_y})
graph.append([epoch,acc,los]) # graph menyimpan sebuah data dari epoch,acc dan loss untuk digunakan pada ploting
print(f'epoch: {epoch}, loss: {los:.4f}, accuracy: {acc:.4f}')
# print(graph)
if epoch==(epochs-1):
output_val = np.array(session.run(outputs, feed_dict={X: batch_x2}))
finalstate_val = np.array(session.run(final_state, feed_dict={X: batch_x2}))
timestop=time.time()
# + id="dzujZfjAiT-_" colab_type="code" outputId="b5b28f29-2e80-4a52-c291-9483d15d87d5" colab={"base_uri": "https://localhost:8080/", "height": 34}
print(timestop-timestart) # menampilkan waktu proses training
# + [markdown] id="U3UbS4kw3VnA" colab_type="text"
# #4. Evaluasi
# + id="Iu9vxXcp42fu" colab_type="code" outputId="f6de7b94-cc36-49b8-e1af-2ca1c4d713e5" colab={"base_uri": "https://localhost:8080/", "height": 187}
confusion = session.run(confusion_op, feed_dict={X: mnist.test.images.reshape(10000, n_time_steps, n_inputs), Y: mnist.test.labels})
print(confusion)
# + id="P6YyvZ0v3ZwH" colab_type="code" outputId="68955409-1602-4b28-cdc7-9d93b06aad0d" colab={"base_uri": "https://localhost:8080/", "height": 306}
from sklearn.metrics import classification_report
yhat = session.run(logits, feed_dict={X: mnist.test.images.reshape(10000, n_time_steps, n_inputs)})
yhat = np.argmax(yhat, axis=1)
y = session.run(Y, feed_dict={Y: mnist.test.labels})
y = np.argmax(y, axis=1)
print(classification_report(y, yhat))
# + id="P32F4P-jJBYd" colab_type="code" outputId="4872de04-886f-4d79-802f-0c6487bb7392" colab={"base_uri": "https://localhost:8080/", "height": 1000}
# untuk menampilkan data presentasi dari setiap data test
i=0
presentasi_data = session.run(logits, feed_dict={X: mnist.test.images.reshape(10000, n_time_steps, n_inputs)})
while i < len(y):
print ("Data test ke",i)
print (y[i],yhat[i])
print (presentasi_data[i])
i=i+1
# + id="H1pAzGOZgkUW" colab_type="code" colab={}
target_dict = {
0: 'T-shirt/top',
1: 'Trouser',
2: 'Pullover',
3: 'Dress',
4: 'Coat',
5: 'Sandal',
6: 'Shirt',
7: 'Sneaker',
8: 'Bag',
9: 'Ankle boot',
}
# + id="HNk6TO3W4lhh" colab_type="code" outputId="652cff8a-da82-4923-c21c-165d1214c7d2" colab={"base_uri": "https://localhost:8080/", "height": 801}
fig, axes = plt.subplots(10, 10, figsize=(12,14))
for img, label, ax in zip(mnist.test.images[0:100], yhat, axes.flat):
ax.set_title(target_dict[label])
ax.imshow(img.reshape(28,28))
ax.axis('off')
plt.show()
# + id="_etEl15ujQpr" colab_type="code" outputId="3cda0083-2453-4930-ae49-8bdb5a14cbec" colab={"base_uri": "https://localhost:8080/", "height": 299}
# plot antara epoch dan akurasi
sumbux=[item[0] for item in graph]
sumbuy=[item[1] for item in graph]
# print(sumbuy)
plt.plot(sumbux, sumbuy)
plt.yscale('linear')
plt.title('akurasi')
plt.xlabel('epoch')
plt.ylabel('akurasi')
plt.axis('auto')
plt.grid(True)
# + id="oi3tyUqNvQtX" colab_type="code" outputId="63656ed3-ed80-4144-d376-f9052c19fac8" colab={"base_uri": "https://localhost:8080/", "height": 299}
# plot antara epoch dan loss
sumbux=[item[0] for item in graph]
sumbuy=[item[2] for item in graph]
plt.plot(sumbux, sumbuy)
# print(sumbuy)
plt.yscale('linear')
plt.title('loss')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.axis('auto')
plt.grid(True)
| Project__DL(RNN)_Bangkit.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Exercise 1
#
# Write a Python program to count the number of characters (character frequency) in a word enter by a user.
#
# For example,if the user enter the word <font color = red> google </font>
#
# the output should be <font color = red> {'g': 2, 'o': 2, 'l': 1, 'e': 1} </font>
#
# <font color='red'><b>Correct<b></font>
#
a=input("enter the character")
c=dict()
for i in range(len(a)):
s=0
for j in range(len(a)):
if a[i]==a[j]:
s=s+1
b={a[i]:s}
c.update(b)
c
# # Exercise 2
#
# Given the following function $ f(x) = x^3 -3x - 5;$ let $x_0 \in [a,b]=[-10,10]$ be the solution of the equation $f(x) = 0$.
#
# - Write a function named Bissection1 that return an approximation $\bar{x}$ of $x_0$ such that $$ \mid \bar{x} - x_0 \mid \leq \epsilon, $$
# for a given parameter, $\epsilon>0$.
#
# - Write the function named Bissection2 that complete the same task but has as parameters $f,a,b,\epsilon$.
#
# - Write a function named Bissection3 that takes the same input as Bissection2 and return the approximation value $\bar{x}$ and its image $f(\bar{x})$.
#
# <font color = red><b> Correct<b> </font>
def bissection1(eps):
f= lambda x: x**3-3*x-5
a=-10
b=10
while abs(a-b)>eps:
c=(a+b)/2
if f(c)>0: #f(c)*f(a)
b=c
else:
a=c
return c
bissection1(0.000000001)
def bissection2(eps,a,b,f):
while abs(a-b)>eps:
c=(a+b)/2
if f(c)>0:
b=c
else:
a=c
return c
f= lambda x: x**3-3*x-5
bissection2(0.0000001,-10,10,f)
def bissection3(eps,a,b,f):
while abs(a-b)>eps:
c=(a+b)/2
if f(c)>0:
b=c
else:
a=c
return c,f(c)
f= lambda x: x**3-3*x-5
bissection3(0.0000001,-10,10,f)
# # Exercise 3
# Write a Python function that accepts a string and calculate the number of upper case letters and lower case letters. Check for 'The quick Brow Fox and 'I am proud of <NAME>'.
#
# <font color='red'><b>Correct, you may need to input the full statement though which will result in (7-uppers and 31-lowers). Pay close attention to the efficiency as well.<b></font>
#
# a='Tfk'
# a[0].islower()
# s=0
# dir(a)
#if a.islower()==True :
# s=s+1
# print(s)
def count_low(z):
s1=0
s2=0
b=z.split( )
for i in b:
for k in range(len(i)):
if i[k].islower()==True:
s1=s1+1
else:
s2=s2+1
return s1,s2
count_low('I am proud of Mr McDonald')
#count_low( 'The quick Brow Fox and I am proud of Mr McDonald')
# # Exercise 4
#
# Write a Python function called <b> InRange </b>to check whether a given number $a$ is in a given range $r$. For example for $[0,100]$, the output of 20 should appear: 20 is in the range. in addition, if the number is outside the range, print the following message: The number is outside the given range.
#
# <font color='red'><b>Correct<b></font>
#
def InRange(L,n):
if L[0]<=n<=L[-1]:
return n,'is in the range'
else:
return n, 'is not in the range'
InRange([0,100],20)
# # Exercise 5
# Write a Python function called <b>IsPrime</b> that takes a positive integer as a parameter and check the number is prime or not.
#
# Note : A prime number (or a prime) is a natural number greater than 1 and that has no positive divisors other than 1 and itself.
#
# <font color='red'><b>Correct<b></font>
#
def IsPrime(n):
s=0
if n==1:
return("not prime")
else:
for i in range(2,n//2+1):
if n%i==0:
s+=1
if s>=1:
return (n,'not prime')
else:
return (n, 'prime')
IsPrime(11)
# # Exercise 6
# Write a Python function called <b>EvenNum</b> that returns the list of even numbers from a given list.
#
# <font color='red'><b>Correct<b></font>
#
def EvenNum(l):
b=[]
for i in l:
if type(i)==int:
if i%2==0:
b+=[i]
return b
EvenNum([1,'fs',3,4,'',6,'f',8,11,18,23])
# # Exercise 7
#
# - Write a function called <b>sum_digits</b> that takes a positive integer $num$ as input and returns the sum $S$ of the
# digits of $num$.
# - Write a function called <b>sum_digits_OddEven</b> that takes a positive integer $num$ as input and returns the sums $S1$ and $S2$ of the odd and even
# digits of $num$, respectively.
#
# <font color = red><b> Correct <b></font>
#
# +
def sum_digits_OddEven(num):
a=str(num)
s1=0
s2=0
for i in range(len(a)):
if int(a[i])%2==0:
s1+=int(a[i])
if int(a[i])%2!=0:
s2+=int(a[i])
return s1,s2
sum_digits_OddEven(956)
# -
# # Exercise 8
#
# The digital root of a positive number n is obtained as follows: Add up the digits n to get a new number.
# Add up the digits of that to get another new number. Keep doing this until you get a number
# that has only one digit. That number is the digital root.
#
# For example, if $n = 45893$, we add up the digits to get $$4 + 5 + 8 + 9 + 3 = 29.$$ We then add up
# the digits of $29$ to get $$2 + 9 = 11.$$ We then add up the digits of $11$ to get $$1 + 1 = 2.$$ Since 2 has
# only one digit, 2 is our digital root.
#
# Write a function called <b>digital_root</b> that returns the digital root of a given integer n.
#
# <font color = red><b> Correct <b></font>
#
def digital_root(n):
while n>=10 :
s=0
while n>=1:
q=n//10
r=n-10*q
n=q
s=s+r
n=s
return n
digital_root(45893)
# # Exercise 9
#
# Write a function called <b>first_diff</b> that is given two strings and returns the first location in
# which the strings differ. If the strings are identical, it should return -1.
#
# <font color = red><b> incorrect, recheck please as it only output one integer no matter what the location the strings differ at. <b></font>
#
def first_diff(A,B):
s=0
if A==B:
return -1
else:
if len(B)<=len(A):
B,A=A,B
for i in range(len(A)):
if B[i]!=A[i]:
return i+1
else:
return len(A)+1
first_diff('difff','dffff')
# # Exercise 10
#
# Write a function called <b>number_of_factors</b> that takes a positive integer and returns how many factors the number has; and a list of its factors.
#
# <font color = red><b>Correct.</b></font>
#
def number_of_factors(n):
L=[n]
s=1
for i in range(1,n//2+1):
if n%i==0:
s=s+1
L+=[i]
return s, L
number_of_factors(18)
# # Exercise 11.1
#
# Write a function called <b>closest</b> that takes a list of numbers $L$ and a number $n$ and returns
# the largest element in $L$ that is not larger than $n$. For instance, if $L=[1,6,3,9,11]$ and $n=8$,
# then the function should return $6$, because $6$ is the closest number in $L$ to $8$ that is not larger than
# $8$. If L is empty or all of the numbers in L are larger than n then return <b> No value of L is smaller than n</b>.
#
#
# <font color = red><b>Even though, you considered the case where $L$ is empty or all the numbers in $L$ are larger than $n$, your code is still not giving the expected output. The problem maybe on the for loops.</b></font>
def closest(L,n):
max1=0
L.sort()
if L==[]:
return 'No value of L is smaller than',n
elif L[-1]>n:
return 'No value of L is smaller than',n
else:
for i in L:
for j in L:
if int(i)<=int(j)<=n:
max1=int(j)
return max1
closest([69,15,67],6)
# # Exercise 11.2
#
# Write a function called <b> sort_ind</b> that takes a non empty and sorted list of numbers $L$ and a number $n$ and return and index $ind$ such that $L.insert(ind,n)$ gives a sorted list. Example: for $L=[2,4,7]$ and $n=5$, <b> sort_ind</b> must return 2 because $L.insert(2,5)$ return $[2,4,5,7]$
#
# Do not use the method <b>.sort()</b> or the function <b>sorted()</b>.
#
# <font color = red><b>When trying different example, the code doesn't work.</b></font>
# l=[2,4,7]
# l.insert(3,5)
# l
def sort_ind(L,n):
for i in range(len(L)):
for j in range(len(L)):
if L[i]<n<L[j]:
c=j
return c
sort_ind([2,4,7],5)
# # Exercise 11.3
#
# Using the above function <b> sort_ind</b>, write a function called <b>SortList</b> that returns a sorted list of a given list $L$.
#
# Do not use the method <b>.sort()</b> or the function <b>sorted()</b>.
#
#
# <font color = red><b>Recheck how you declare your list and how you assigned a value to the local variable.</b></font>
#def SortList(L):
# a=[4,7,0,5,3]
# a.insert(0,8)
# a
def SortList(L):
l=[]
for i in range(len(L)):
l.insert(sort_ind(L,i),i)
return l
SortList([4,6,3])
# # Exercise 12
#
# Write a function called <b>matches</b> that takes two strings as arguments and returns how many
# matches there are between the strings. A match is where the two strings have the same character
# at the same index. For instance, <b>'python'</b> and <b>'path'</b> match in the first, third, and
# fourth characters, so the function should return <b>3</b>.
#
# <font color = red><b>Correct. </b></font>
def matches(Z,Y):
s=0
if len(Y)<=len(Z):
Y,Z=Z,Y
for i in range(len(Z)):
if Z[i]==Y[i]:
s=s+1
return s
matches('python','path')
# # Exercise 13
#
# Recall that if <b>s</b> is a string, then <b>s.find('a')</b> will find the location of the first a in <b>s</b>. The
# problem is that it does not find the location of every a. Write a function called <b>findall</b> that
# given a string and a single character, returns a list containing all of the locations of that character
# in the string. It should return an empty list if there are no occurrences of the character
# in the string.
#
# <font color = red><b>Correct. </b></font>
# a='google'
# a.find('g')
def findall(z,o):
L=[]
for i in range(len(z)):
if z[i]==o:
L+=[i]
return L
findall('google','o')
# # Exercise 14
#
# 1. Write a function called <b>primes</b> that is given a positive integer $n$ and returns a list of the first $n$ primes. Let the default value of $n$ be $100$.
#
# 2. Write the function <b>primes_start</b> by modifying the function above so that there is an optional argument called start that allows the list to start at a value other than 2. The function should return the first n primes that are greater than or equal to start. The default value of start should be 2.
#
#
# <font color = red><b>You were asked to use defaults values in both questions. Also, in the first question, you were asked to return a list of the first $n$ primes not the list of primes less than $n$.The second question is supposed take two inputs. </b></font>
# +
def listprime(n):
L=[]
for p in range(2,n+1):
s=0
for i in range(2,p//2+1):
if p%i==0:
s+=i
if s==0:
L+=[p]
return L
def primes(n):
L=listprime(600) ## What do you have 600 here?
l=[]
for i in range(n):
l.append(L[i])
return l
primes(10)
# -
# # Exercise 16
#
# For any integer $n>0$ and a prime number $p$, define $\nu_p(n)$ as the greaters integer $r$ such that $p^r$ divides $n$.
# Define $$ D(n,m) = \sum_{p\; prime} \Bigl| \nu_p(n) - \nu_p(m)\Bigr| $$
#
# For example $D(14,24)=4$.
#
# Furthermore, define
#
# $$S(N) = \sum_{n=1}^{N}\sum_{m=1}^{N}D(n,m).$$
#
# You are given $S(10)=210$ and $S(10^2)$ = 37018.
#
# Find $S(10^3)$.
#
# <font color = red><b>Correct. </b></font>
# +
def val(n,p):
L=[]
for i in range(n):
if n%(p**i)==0:
L+=[i]
return L[-1]
def listprime(n):
L=[]
for p in range(2,n+1):
s=0
for i in range(2,p//2+1):
if p%i==0:
s+=i
if s==0:
L+=[p]
return L
def dump(n,m):
list_prime=listprime(100)
S=0
for i in list_prime:
S+=abs(val(n,i)-val(m,i))
return S
def s(N):
s=0
for i in range(1,N+1):
for j in range(1,N+1):
s=s+dump(i,j)
return s
s(1000)
# -
# # Exercise 17
#
# $\text{Exercise 1}$
#
# Write a Python code to compute and display the integrals bellow using the Python function $quad$
# $$A = \int_{0}^{2} \dfrac{x^3+5x-20}{x^2+3}dx$$
#
# $$B = \int_{3}^{5} \bigg(x-\frac{1}{3}\bigg)\sqrt{x^4-3}dx$$
#
#
# <font color = red><b>Correct. But the first term is divided by the second term and also the format of the output for the both integrals are not very good. </b></font>
from scipy.integrate import quad
def f(x):
return (x**3+5*x-20)*(x**2+3) # It is the first term divided by the second term
r, e = quad(f, 0, 2)
print('the value of A is {:f}(+-1){:g}'.format(r,e))
from scipy.integrate import quad
def f(x):
return (x-1/3)*(x**4-3)**(1/2)
r, e = quad(f, 3, 5)
print('the value of A is {:f}(+-1){:g}'.format(r,e))
# # Exercise 18
#
# 1. Write code to solve the following system of ordinary differential equations using the Python function odeint.
#
# $$
# \begin{cases}
# \dfrac{dx_1}{dt}& = & -\dfrac{1}{2}x_1\\\\
# \dfrac{dx_2}{dt}& = & \dfrac{1}{2}x_1-\dfrac{1}{4}x_2\\\\
# \dfrac{dx_3}{dt}& = & \dfrac{1}{4}x_2-\dfrac{1}{6}x_3
# \end{cases}, \text{ on } [0,4]
# $$
#
# Subject to the initial conditions $x_1(0) = 1, x_2(0) = 1, x_3(0) = 1$.
#
#
# <font color = red><b>Correct. But the instruction is to solve the system. Not plotting it.</b></font>
# +
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
# function that returns dz/dt
def model(z,t,u):
x = z[0]
y = z[1]
x3 = z[2]
dxdt = -1/2 *x
dydt = 1/2*x-1/4 *y
dx3dt = 1/4 *x-1/6*y
dzdt = [dxdt,dydt,dx3dt]
return dzdt
# initial condition
z0 = [1,1,1]
# number of time points
n = 401
# time points
t = np.linspace(0,4,n)
# step input
u = np.zeros(n)
# change to 2.0 at time = 5.0
u[51:] = 2.0
# store solution
x = np.empty_like(t)
y = np.empty_like(t)
x3 = np.empty_like(t)
# record initial conditions
x[0] = z0[0]
y[0] = z0[1]
x3[0] = z0[2]
# solve ODE
for i in range(1,n):
# span for next time step
tspan = [t[i-1],t[i]]
# solve for next step
z = odeint(model,z0,tspan,args=(u[i],))
# store solution for plotting
x[i] = z[1][0]
y[i] = z[1][1]
x3[i] = z[1][2]
# next initial condition
z0 = z[1]
# plot results
#plt.plot(t,u,'g:',label='u(t)')
plt.plot(t,x,'b-',label='x1(t)')
plt.plot(t,y,'r--',label='x2(t)')
plt.plot(t,x3,'y--', label='x3(t)')
plt.ylabel('values')
plt.xlabel('time')
plt.legend(loc='best')
plt.show()
# -
# 2. The exact solution of the above system of ODEs is given by
#
# $$
# \begin{cases}
# x_1(t)& = & e^{-t/2}\\
# x_2(t)& = & -2e^{-t/2}+3e^{-t/4}\\
# x_3(t)& = & \dfrac{3}{2}e^{-t/2} - 9e^{-t/4} + \dfrac{17}{2}e^{-t/6}
# \end{cases}
# $$
#
# Use $Subplot$ to plot side by side
#
# - each exact and approximate solution in the same window
# - and their absolute error vs the time
#
#
# <font color = red><b>You did not do the absolute error vs time. Do some exercises on how to manipulate plt, to avoid empty graph.</b></font>
# +
import matplotlib.pyplot as plt
import numpy as np
from math import exp
x=0
y=0
x1 = np.linspace(0, 10, 1000)
figure, axes = plt.subplots(nrows=2, ncols=2)
axes[0, 0].plot(x1, np.exp(-1*x1/2))
axes[0, 1].plot(x1, np.exp(-1*x1/2)*(-2)+np.exp(-1*x1/4)*3)
axes[1, 0].plot(x1, np.exp(-1*x1/2)*(3/2)+np.exp(-1*x1/4)*9+np.exp(-1*x1/6)*17/2)
# -
| feedbackPython1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Iallen520/lhy_DL_Hw/blob/master/hw9_unsupervised.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="r1_Q26m0LN2r" colab_type="text"
# This is the tutorial of **Image Clustering**
# <br>
# If you want to skip the **training** phase, please refer to the **clustering** section directly.
# <br>
# **Training** required sections: Prepare Training Data, Model, Training
# <br>
# **Clustering** required sections: Prepare Training Data, Model, Dimension Reduction & Clustering
# + [markdown] id="GUyppmxhsgJg" colab_type="text"
# ๅๅญธๅไนๅฏไปฅๅฉ็จๆไพ็wgetๆไปคไธ่ผ่จ็ทด่ณๆ๏ผไธฆ่ช่กmountๅฐ้ฒ็ซฏ่ณๆๅคพไธ๏ผๅฆไฝๆฅญไธๆ็คบใ้้ๅฐฑไธๅ่ด
่ฟฐ<br>
# ไฝๆฅญ็็ฌฌไธ้จๅๆฏ่ฆ่จ็ทดไธๅautoencoderไปฅๆฝๅๅฅฝ็ๅ็่กจๅพต๏ผ็ฌฌไบ้จๅๅๆฏๅฐๆฝๅบไพ็่กจๅพต้็ถญๅฐไบ็ถญ๏ผไปฅไพฟๆๅๅฉ็จๅ็พค็ๆนๆณ็ฒๅพๆๅ็็ญๆก<br>
#
# ่ฅๆไปปไฝๅ้ก๏ผๆญก่ฟไพไฟก่ณๅฉๆไฟก็ฎฑ <EMAIL>
# + [markdown] id="w8WjVvaONQ-m" colab_type="text"
# # Prepare Training Data
# + [markdown] id="NrIsy5olK1sH" colab_type="text"
# ๅฎ็พฉๆๅ็ preprocess๏ผๅฐๅ็็ๆธๅผไปๆผ 0~255 ็ int ็ทๆง่ฝ็บ -1๏ฝ1 ็ floatใ
# + id="xXTyAnhzHzHP" colab_type="code" colab={}
import numpy as np
def preprocess(image_list):
""" Normalize Image and Permute (N,H,W,C) to (N,C,H,W)
Args:
image_list: List of images (9000, 32, 32, 3)
Returns:
image_list: List of images (9000, 3, 32, 32)
"""
image_list = np.array(image_list)
image_list = np.transpose(image_list, (0, 3, 1, 2))
image_list = (image_list / 255.0) * 2 - 1
image_list = image_list.astype(np.float32)
return image_list
# + id="qj_hairpGhLj" colab_type="code" colab={}
from torch.utils.data import Dataset
class Image_Dataset(Dataset):
def __init__(self, image_list):
self.image_list = image_list
def __len__(self):
return len(self.image_list)
def __getitem__(self, idx):
images = self.image_list[idx]
return images
# + [markdown] id="8EvJPEeGLgt7" colab_type="text"
# ๅฐ่จ็ทด่ณๆ่ฎๅ
ฅ๏ผไธฆไธ preprocessใ
# ไนๅพๆๅๅฐ preprocess ๅฎ็่จ็ทด่ณๆ่ฎๆๆๅ้่ฆ็ datasetใ่ซๅๅญธไธ่ฆไฝฟ็จ valX ๅ valY ไพ่จ็ทดใ
# + id="G_tMv9S5oqn9" colab_type="code" colab={}
from torch.utils.data import DataLoader
trainX = np.load('trainX_new.npy')
trainX_preprocessed = preprocess(trainX)
img_dataset = Image_Dataset(trainX_preprocessed)
# + [markdown] id="Z64cTA5jaNtg" colab_type="text"
# # Some useful functions
#
# + [markdown] id="wCtxq6GSL4tq" colab_type="text"
# ้้ๆไพไธไบๆ็จ็ functionsใ
# ไธๅๆฏ่จ็ฎ model ๅๆธ้็๏ผreport ๆ็จๅฐ๏ผ๏ผๅฆไธๅๆฏๅบๅฎ่จ็ทด็้จๆฉ็จฎๅญ๏ผไปฅไพฟ reproduce๏ผใ
# + id="KWJNJs-UaUFb" colab_type="code" colab={}
import random
import torch
def count_parameters(model, only_trainable=False):
if only_trainable:
return sum(p.numel() for p in model.parameters() if p.requires_grad)
else:
return sum(p.numel() for p in model.parameters())
def same_seeds(seed):
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.
np.random.seed(seed) # Numpy module.
random.seed(seed) # Python random module.
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
# + [markdown] id="O_ZuRV_dNjhD" colab_type="text"
# # Model
# + [markdown] id="0mxBEwGYMSjm" colab_type="text"
# ๅฎ็พฉๆๅ็ baseline autoeocoderใ
# + id="dci5VCIuQwvI" colab_type="code" colab={}
import torch.nn as nn
class AE(nn.Module):
def __init__(self):
super(AE, self).__init__()
self.encoder = nn.Sequential(
nn.Conv2d(3, 64, 3, stride=1, padding=1),
nn.ReLU(True),
nn.MaxPool2d(2),
nn.Conv2d(64, 128, 3, stride=1, padding=1),
nn.ReLU(True),
nn.MaxPool2d(2),
nn.Conv2d(128, 256, 3, stride=1, padding=1),
nn.ReLU(True),
nn.MaxPool2d(2)
)
self.decoder = nn.Sequential(
nn.ConvTranspose2d(256, 128, 5, stride=1),
nn.ReLU(True),
nn.ConvTranspose2d(128, 64, 9, stride=1),
nn.ReLU(True),
nn.ConvTranspose2d(64, 3, 17, stride=1),
nn.Tanh()
)
def forward(self, x):
x1 = self.encoder(x)
x = self.decoder(x1)
return x1, x
# + [markdown] id="mF_7fi7xM5Er" colab_type="text"
# # Training
# + [markdown] id="p8Wr2tNYNgcP" colab_type="text"
# ้ๅ้จๅๅฐฑๆฏไธป่ฆ็่จ็ทด้ๆฎตใ
# ๆๅๅ
ๅฐๆบๅๅฅฝ็ dataset ็ถไฝๅๆธ้คต็ตฆ dataloaderใ
# ๅฐ dataloaderใmodelใloss criterionใoptimizer ้ฝๆบๅๅฅฝไนๅพ๏ผๅฐฑๅฏไปฅ้ๅง่จ็ทดใ
# ่จ็ทดๅฎๆๅพ๏ผๆๅๆๅฐ model ๅญไธไพใ
# + id="uKZ9rhK-2b76" colab_type="code" outputId="ea7e3428-a8f2-4d24-eb42-c55639591c1e" colab={"base_uri": "https://localhost:8080/", "height": 1000} tags=[]
import torch
from torch import optim
model = AE().cuda()
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=1e-5, weight_decay=1e-5)
model.train()
n_epoch = 100
same_seeds(0)
# ๆบๅ dataloader, model, loss criterion ๅ optimizer
img_dataloader = DataLoader(img_dataset, batch_size=64, shuffle=True)
# ไธป่ฆ็่จ็ทด้็จ
for epoch in range(n_epoch):
for data in img_dataloader:
img = data
img = img.cuda()
output1, output = model(img)
loss = criterion(output, img)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (epoch+1) % 10 == 0:
torch.save(model.state_dict(), './checkpoints/checkpoint_{}.pth'.format(epoch+1))
print('\r epoch [{}/{}], loss:{:.5f}'.format(epoch+1, n_epoch, loss.data), end=' ')
# ่จ็ทดๅฎๆๅพๅฒๅญ model
torch.save(model.state_dict(), './checkpoints/last_checkpoint.pth')
# + [markdown] colab_type="text" id="HhU5gcRhlTE1"
# # Dimension Reduction & Clustering
# + id="jrn7UhtLyB4n" colab_type="code" colab={}
import numpy as np
def cal_acc(gt, pred):
""" Computes categorization accuracy of our task.
Args:
gt: Ground truth labels (9000, )
pred: Predicted labels (9000, )
Returns:
acc: Accuracy (0~1 scalar)
"""
# Calculate Correct predictions
correct = np.sum(gt == pred)
acc = correct / gt.shape[0]
# ๅ ็บๆฏ binary unsupervised clustering๏ผๅ ๆญคๅ max(acc, 1-acc)
return max(acc, 1-acc)
# + id="hl9skAvMOvSV" colab_type="code" colab={}
import matplotlib.pyplot as plt
def plot_scatter(feat, label, savefig=None):
""" Plot Scatter Image.
Args:
feat: the (x, y) coordinate of clustering result, shape: (9000, 2)
label: ground truth label of image (0/1), shape: (9000,)
Returns:
None
"""
X = feat[:, 0]
Y = feat[:, 1]
plt.scatter(X, Y, c = label)
plt.legend(loc='best')
if savefig is not None:
plt.savefig(savefig)
plt.show()
# + [markdown] id="c0QvyGtKOp_p" colab_type="text"
# ๆฅ่ๆๅไฝฟ็จ่จ็ทดๅฅฝ็ model๏ผไพ้ ๆธฌ testing data ็้กๅฅใ
#
# ็ฑๆผ testing data ่ training data ไธๆจฃ๏ผๅ ๆญคๆๅไฝฟ็จๅๆจฃ็ dataset ไพๅฏฆไฝ dataloaderใ่ training ไธๅ็ๅฐๆนๅจๆผ shuffle ้ๅๅๆธๅผๅจ้้ๆฏ Falseใ
#
# ๆบๅๅฅฝ model ่ dataloader๏ผๆๅๅฐฑๅฏไปฅ้ฒ่ก้ ๆธฌไบใ
#
# ๆๅๅช้่ฆ encoder ็็ตๆ๏ผlatents๏ผ๏ผๅฉ็จ latents ้ฒ่ก clustering ไนๅพ๏ผๅฐฑๅฏไปฅๅ้กไบใ
# + id="DBnn6RlncV-j" colab_type="code" outputId="4f3da19c-9f55-4204-92e8-3ee2a5c24698" colab={"base_uri": "https://localhost:8080/", "height": 101} tags=[]
import torch
from sklearn.decomposition import KernelPCA
from sklearn.manifold import TSNE
from sklearn.cluster import MiniBatchKMeans
def inference(X, model, batch_size=256):
X = preprocess(X)
dataset = Image_Dataset(X)
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=False)
latents = []
for i, x in enumerate(dataloader):
x = torch.FloatTensor(x)
vec, img = model(x.cuda())
if i == 0:
latents = vec.view(img.size()[0], -1).cpu().detach().numpy()
else:
latents = np.concatenate((latents, vec.view(img.size()[0], -1).cpu().detach().numpy()), axis = 0)
print('Latents Shape:', latents.shape)
return latents
def predict(latents):
# First Dimension Reduction
transformer = KernelPCA(n_components=200, kernel='rbf', n_jobs=-1)
kpca = transformer.fit_transform(latents)
print('First Reduction Shape:', kpca.shape)
# # Second Dimesnion Reduction
X_embedded = TSNE(n_components=2).fit_transform(kpca)
print('Second Reduction Shape:', X_embedded.shape)
# Clustering
pred = MiniBatchKMeans(n_clusters=2, random_state=0).fit(X_embedded)
pred = [int(i) for i in pred.labels_]
pred = np.array(pred)
return pred, X_embedded
def invert(pred):
return np.abs(1-pred)
def save_prediction(pred, out_csv='prediction.csv'):
with open(out_csv, 'w') as f:
f.write('id, label\n')
for i, p in enumerate(pred):
f.write(f'{i},{p}\n')
print(f'Save prediction to {out_csv}.')
# load model
model = AE().cuda()
model.load_state_dict(torch.load('./checkpoints/last_checkpoint.pth'))
model.eval()
# ๆบๅ data
trainX = np.load('trainX_new.npy')
# ้ ๆธฌ็ญๆก
latents = inference(X=trainX, model=model)
pred, X_embedded = predict(latents)
# ๅฐ้ ๆธฌ็ตๆๅญๆช๏ผไธๅณ kaggle
save_prediction(pred, 'prediction.csv')
# ็ฑๆผๆฏ unsupervised ็ไบๅ้กๅ้ก๏ผๆๅๅชๅจไนๆๆฒๆๆๅๅฐๅ็ๅๆๅ
ฉ็พค
# ๅฆๆไธ้ข็ๆชๆกไธๅณ kaggle ๅพๆญฃ็ขบ็ไธ่ถณ 0.5๏ผๅช่ฆๅฐ label ๅ้ไพๅฐฑ่กไบ
save_prediction(invert(pred), 'prediction_invert.csv')
# + [markdown] id="snHP0s9kciEn" colab_type="text"
# Problem 1.b (ไฝๅ)
# ===
# ๅฐ val data ็้็ถญ็ตๆ (embedding) ่ไปๅๅฐๆ็ label ็ซๅบไพใ
# + id="iyNe4gUEcAhZ" colab_type="code" outputId="84031586-97c8-4164-fe64-858b0bb52fd1" colab={"base_uri": "https://localhost:8080/", "height": 366} tags=[]
valX = np.load('valX.npy')
valY = np.load('valY.npy')
# ==============================================
# ๆๅ็คบ็ฏ basline model ็ไฝๅ๏ผ
# report ่ซๅๅญธๅฆๅค้่ฆๅ็ซไธๅผต improved model ็ๅใ
# ==============================================
model.load_state_dict(torch.load('./checkpoints/last_checkpoint.pth'))
model.eval()
latents = inference(valX, model)
pred_from_latent, emb_from_latent = predict(latents)
acc_latent = cal_acc(valY, pred_from_latent)
print('The clustering accuracy is:', acc_latent)
print('The clustering result:')
plot_scatter(emb_from_latent, valY, savefig='p1_baseline.png')
# + [markdown] id="vd10OPt5cOqi" colab_type="text"
# Problem 2
# ===
# ไฝฟ็จไฝ test accuracy ๆ้ซ็ autoencoder๏ผๅพ trainX ไธญ๏ผๅๅบ index 1, 2, 3, 6, 7, 9 ้ 6 ๅผตๅ็
# ็ซๅบไปๅ็ๅๅไปฅๅ reconstruct ไนๅพ็ๅ็ใ
#
# + id="D1OYYPEqcCpl" colab_type="code" outputId="107e2ffa-747d-4b8e-8389-2433ee921b52" colab={"base_uri": "https://localhost:8080/", "height": 269} tags=[]
import matplotlib.pyplot as plt
import numpy as np
# ็ซๅบๅๅ
plt.figure(figsize=(10,4))
indexes = [1,2,3,6,7,9]
imgs = trainX[indexes,]
for i, img in enumerate(imgs):
plt.subplot(2, 6, i+1, xticks=[], yticks=[])
plt.imshow(img)
# ็ซๅบ reconstruct ็ๅ
inp = torch.Tensor(trainX_preprocessed[indexes,]).cuda()
latents, recs = model(inp)
recs = ((recs+1)/2).cpu().detach().numpy()
recs = recs.transpose(0, 2, 3, 1)
for i, img in enumerate(recs):
plt.subplot(2, 6, 6+i+1, xticks=[], yticks=[])
plt.imshow(img)
plt.tight_layout()
# + [markdown] id="hNhjaEnOcdtR" colab_type="text"
# Problem 3
# ===
# ๅจ autoencoder ็่จ็ทด้็จไธญ๏ผ่ณๅฐๆ้ธ 10 ๅ checkpoints
# ่ซ็จ model ็ train reconstruction error ๅฐ val accuracy ไฝๅ
# ็ฐกๅฎ่ชชๆไฝ ่งๅฏๅฐ็็พ่ฑก
#
# + id="IAc9Ha55cDOr" colab_type="code" outputId="66f252df-78c0-4e88-94d6-3de776496cbf" colab={"base_uri": "https://localhost:8080/", "height": 1000} tags=[]
import glob
checkpoints_list = sorted(glob.glob('checkpoints/checkpoint_*.pth'))
# load data
dataset = Image_Dataset(trainX_preprocessed)
dataloader = DataLoader(dataset, batch_size=64, shuffle=False)
points = []
with torch.no_grad():
for i, checkpoint in enumerate(checkpoints_list):
print('[{}/{}] {}'.format(i+1, len(checkpoints_list), checkpoint))
model.load_state_dict(torch.load(checkpoint))
model.eval()
err = 0
n = 0
for x in dataloader:
x = x.cuda()
_, rec = model(x)
err += torch.nn.MSELoss(reduction='sum')(x, rec).item()
n += x.flatten().size(0) # ๆปๅ
ฑๅคๅฐๅ็ด ็น
print('Reconstruction error (MSE):', err/n)
latents = inference(X=valX, model=model)
pred, X_embedded = predict(latents)
acc = cal_acc(valY, pred)
print('Accuracy:', acc)
points.append((err/n, acc))
# + id="_vIXEr5jsFUh" colab_type="code" outputId="0004de34-ebf8-4f39-a0f9-1e097d0a871e" colab={"base_uri": "https://localhost:8080/", "height": 390}
ps = list(zip(*points))
plt.figure(figsize=(6,6))
plt.subplot(211, title='Reconstruction error (MSE)').plot(ps[0])
plt.subplot(212, title='Accuracy (val)').plot(ps[1])
plt.show()
# -
| hw9/hw9_unsupervised.ipynb |
-- -*- coding: utf-8 -*-
-- ---
-- jupyter:
-- jupytext:
-- text_representation:
-- extension: .hs
-- format_name: light
-- format_version: '1.5'
-- jupytext_version: 1.14.4
-- kernelspec:
-- display_name: Haskell
-- language: haskell
-- name: haskell
-- ---
-- # 2 Context-Free Languages
-- ----
-- ## 2.1 Context-free Grammars
--
-- ### Formal definition of a context-free grammar
--
-- $G = (V,\Sigma,R,S)$
--
-- $\Gamma = V\cup\Sigma$
--
-- $u\Rightarrow w$๋ผ๋ derivation์
-- $u=v_1 A v_2$ ์ ๊ท์น $A\to w \in R$๋ฅผ ์ ์ฉํด $v=v_1 w v_2$๋ฅผ ์ป์ ์ ์์์ ๋ปํ๋ค.
-- ($u,v,w,v_1,v_2\in \Gamma^*$, $A\in V$)
--
-- $G$๊ฐ ๋ง๋ค์ด๋ด๋ ์ธ์ด๋ $\{ w \mid w\in\Sigma^*, S\Rightarrow^*w \}$
--
-- ### Examples of context-free grammars
--
-- ### Designing context-free grammars
--
-- ### Ambiguity
--
-- +
type Gamma = String
type GString = [Gamma]
newtype CFG = CFG ([Gamma],[Gamma],[Rule],Gamma) deriving Show
data Rule = Gamma :-> GString deriving (Eq,Ord,Show)
-- -
-- ๊ต๊ณผ์์ ๋์ค๋ G3 ๋ฌธ๋ฒ
cfgG3 = CFG (["S"],["a","b"],rs,"S")
where
rs = [ "S" :-> ["a","S","b"]
, "S" :-> ["S","S"]
, "S" :-> []
]
-- +
import Data.List
zip (inits [1,2,3,4]) (tails [1,2,3,4])
-- +
step (CFG(vs,as,rs,_)) gs = -- any step
[ gsL++gs'++gsR | (gsL, v : gsR) <- zip (inits gs) (tails gs),
v `elem` vs, -- v ๊ฐ variable ์ด๊ธฐ๋ง ํ๋ฉด ๋จ
(v' :-> gs') <- rs, v==v' ]
lstep (CFG(vs,as,rs,_)) gs = -- leftmost step
[ gsL++gs'++gsR | (gsL, v : gsR) <- zip (inits gs) (tails gs),
all (`elem` as) gsL, v `elem` vs, -- v๋ ๊ฐ์ฅ ์ผ์ชฝ์ variable
(v' :-> gs') <- rs, v==v' ]
-- -
mapM_ print $ step cfgG3 ["a","S","S","b"]
mapM_ print $ lstep cfgG3 ["a","S","S","b"]
-- +
-- derivation step operator
infixl 5 .=>
(.=>) :: [GString] -> GString -> [GString]
gss .=> gs = gss++[gs]
derivation = (:[])
-- valid derivation judgement
infix 4 |-
(|-) :: CFG -> [GString] -> Bool
cfg |- gss
| length gss < 2 = error "derivation must have at least one step"
| otherwise = and [gs2 `elem` step cfg gs1 | (gs1,gs2) <- zip gss (tail gss)]
-- valid leftmost derivation judgement
infix 4 ||-
(||-) :: CFG -> [GString] -> Bool
cfg ||- gss
| length gss < 2 = error "derivation must have at least one step"
| otherwise = and [gs2 `elem` lstep cfg gs1 | (gs1,gs2) <- zip gss (tail gss)]
-- -
derivation ["S"] .=> ["a","S","b"] .=> ["a","b"]
cfgG3 |- derivation ["S"]
cfgG3 |- derivation ["S"] .=> ["a","S","b"]
cfgG3 |- derivation ["S"] .=> ["a","S","b"] .=> ["a","b"]
cfgG3 |- derivation ["S"] .=> ["a","S","b"] .=> ["a","S","b"]
cfgG3 |- derivation ["S"] .=> ["a","S","b"] .=> ["a","S","S","b"]
-- +
cfgG3 |- derivation ["S"] .=> ["a","S","b"]
.=> ["a","S","S","b"]
.=> ["a","S","a","S","b","b"]
cfgG3||- derivation ["S"] .=> ["a","S","b"]
.=> ["a","S","S","b"]
.=> ["a","S","a","S","b","b"]
-- +
-- G3 is an ambiguous CFG because multiple leftmost derivation exists for S =>* ab
cfgG3||- derivation ["S"] .=> ["a","S","b"]
.=> ["a","b"]
cfgG3||- derivation ["S"] .=> ["a","S","b"]
.=> ["a","S","S","b"]
.=> ["a","S","b"]
.=> ["a","b"]
-- +
-- ๊ทธ๋ฅ parse tree๊ฐ ์ ์ผํ๋ฉด ambiguousํ์ง ์์
import Data.Tree
-- data Tree a = Node a [a] -- ์ฒซ๋ฒ์งธ rootLabel ๋๋ฒ์งธ subForest
tree1 =
Node "S" [ Node "a" [] -- terminal์ ์๋
์์
, Node "S" [] -- epsilon
, Node "b" [] -- terminal์ ์๋
์์
]
putStrLn $ drawTree tree1
-- +
tree2 =
Node "S" [ Node "a" [] -- terminal์ ์๋
์์
, Node "S" [ Node "S" [] -- epsilon
, Node "S" [] -- epsilon
]
, Node "b" [] -- terminal์ ์๋
์์
]
putStrLn $ drawTree tree2
-- -
-- ### Chomsky normal form
-- CFG ๊ท์น์ $A \to BC$์ $A\to a$์ ํํ($B$์ $C$๋ ์์ ๋ณ์ ์๋)๋ง ์์ผ๋ฉด ์ด์คํค์ ๊ทํ(CNF)์ด๋ผ ํ๋ค.<br>
-- ๋จ, $\varepsilon$์ ํฌํจํ๋ ์ธ์ด ์ ์๋ ๊ฐ๋ฅํด์ผ ํ๋ฏ๋ก, ์์ธ์ ์ผ๋ก ์์ ๋ณ์ $S$์ ๋ํด์๋ง $S\to \varepsilon$ ๊ท์น์ ํ์ฉํ๋ค.
--
-- ๋ชจ๋ ๋ฌธ๋งฅ์์ ์ธ์ด(CFL)๋ฅผ CNF๋ก ๋ํ๋ผ ์ ์๋ค๋ ์ฑ์ง์ด ์๋ ค์ ธ ์๋ค.<br>
-- ์ด๋ ๋ชจ๋ ๋ฌธ๋งฅ์์ ๋ฌธ๋ฒ(CFG)์ ๋๊ฐ์ ์ธ์ด๋ฅผ ์์ฑํ๋ CNF๋ก ๋ณํ ๊ฐ๋ฅํจ์ ๋ณด์์ผ๋ก์จ ์ฆ๋ช
๊ฐ๋ฅํ๋ค.
--
-- ๊ธฐ๋ณธ์ ์ธ ์์ด๋์ด๋
--
-- * $A\to \varepsilon$ ํํ์ $\varepsilon$๊ท์น๊ณผ $A\to B$ ํํ์ unit๊ท์น์ ์ ๊ฑฐํ๊ณ
-- * ๊ทธ๋ฐ ํํ์ ๊ท์น๋ค์ด ๋ง๋ค์ด๋ด๋ ๋ฌธ์์ด์ CNF๋ฅผ ๋ง์กฑํ๋ ๋ค๋ฅธ ๊ท์น๋ค๋ก ๋ง๋ค์ด๋ด๋๋ก
--
-- ๋์ฒดํ๋ ๋ฐฉ๋ฒ๋ง ์ฐพ์๋ด๋ฉด ๋๋ค.
-- ๊ทธ ์ธ์ ์ข๋ ๊ณ ์น๊ธฐ ์ฌ์ด ํํ์ ๊ท์น๋ค,<br>
-- ์๋ฅผ ๋ค๋ฉด $A \to B_1 B_2 C_1 C_2$๋
-- ์๋ก์ด ๋ณ์ $B_0$์ $C_0$๋ฅผ ๋์
ํด ์๋ ๊ท์น๋ค๋ก ๋์ฒดํ๋ฉด ๋๋ค.
--
-- $A\to B_0C_0$<br>
-- $B_0\to B_1 B_2$<br>
-- $C_0\to C_1 C_2$
--
-- CNF๊ฐ ์ด๋ก ์ ์ ๊ฐํ๊ณ ํน์ ์๊ณ ๋ฆฌ๋ฌ์ ์ค๋ช
ํ๋ ๋ฐ ์๊ธดํ๊ฒ ์ฐ์ธ๋ค.<br>
-- ๊ทธ๋ฌ๋ CNF๋ฅผ ์ต์ํ๋ ํน์ ์ต์ ํ๋ ๋ฌธ๋ฒ๊ท์น์ด๋ผ ๋งํ ์๋ ์๋ค.<br>
-- CFG์๋ ์ต์ํ๋ ๋ฌธ๋ฒ์ด๋ผ๋ ๊ฐ๋
์ด ์ผ๋ฐ์ ์ผ๋ก ์ ์ ์๋์ง ์๋๋ค.<br>
-- (์ฐธ๊ณ ๋ก, ๊ฒฐ์ ์ ์ ํ์คํ ๋งํ(DFA)์ ๊ฒฝ์ฐ์๋ ์ํ ๊ฐ์๋ผ๋ ๋ช
ํํ ๊ธฐ์ค์ผ๋ก ์ต์ํ๋ DFA๋ก ๋ณํํ๋ ๊ฒ์ด ๊ฐ๋ฅ)
-- ----
-- ## 2.2 Pushdown Automata
--
-- ### Pushdown Automata
--
-- ### Formal definition of a pushdown automaton
--
-- $M = (Q,\Sigma,\Gamma,\delta,q_0,F)$
--
-- 1. $Q$๋ ์ํ์ ์ ํ์งํฉ
-- 1. $\Sigma$๋ ์
๋ ฅ ์ํ๋ฒณ (์
๋ ฅ ๋ฌธ์์ด์ ๊ตฌ์ฑํ๋ ์ฌ๋ณผ์ ์ ํ์งํฉ)
-- 1. $\Gamma$๋ ์คํ ์ํ๋ฒณ (์คํ์ ์ ์ฅ๋ ์ ์๋ ์ฌ๋ณผ์ ์ ํ์งํฉ)
-- 1. $\delta : Q\times \Sigma_\varepsilon \times \Gamma_\varepsilon \to \wp(Q\times \Gamma_\varepsilon)$๋ ์ ์ดํจ์
-- 1. $q_0\in Q$๋ ์์์ํ
-- 1. $F\subseteq Q$๋ ๋ฐ์๋ค์ฌ์ง๋ ์ํ(accept state) ํน์ ์ข
๋ฃ ์ํ(final state)๋ผ๋๋ ํ๋ค
--
-- ๊ผญ ๊ทธ๋ ๊ฒ ํด์ผ๋ง ํ๋ ๊ฒ์ ์๋์ง๋ง ๋๋ถ๋ถ์ ๊ฒฝ์ฐ $\Sigma$ ์ ์ฒด ํน์ $\Sigma$์ ๋ถ๋ถ์งํฉ์ $\Gamma$๊ฐ ํฌํจํ๋ ๊ฒฝ์ฐ๊ฐ ๋ง๋ค.<br>
-- ํฌํจํด ๋๊ณ ๋ ์ ์ดํจ์์์ ์ฌ์ฉ์ ์ํ๋ฉด (๊ทธ๋ฌ๋๊น ๊ฒฐ๊ณผ๋ก ๊ณต์งํฉ์ ๊ณ์ฐํ๋ฉด) ๋๋๊น ๊ทธ๋ฅ ๋ง์ ๊ฒฝ์ฐ $\Sigma \subset \Gamma$๋ผ๊ณ ์๊ฐํ๊ณ ์ค๊ณํด๋ ๋๋ค.<br>
-- ์ ์์์ CFG๋ฅผ ์ค๋ช
ํ ๋ $\Gamma = \Sigma \cup V$ ๋ผ๊ณ ํ $\Gamma$๋ผ๋ ๊ธฐํธ๋ฅผ ์ฌ์ฉํ๋ ๊ฒ์ด ๋ฐ๋ก ์ด๋ฌํ ๋งฅ๋ฝ์์๋ค.<br>
-- ์์ผ๋ก ๋ฐฐ์ธ PDA๊ด๋ จ ์ ์์ CFG๋ฅผ PDA๋ก ์ฎ๊ธธ ๋ ๋๋ต ์คํ ์ฌ๋ณผ์ $\Sigma \cup V$๋ฅผ ํฌํจํ๊ฒ ์ค๊ณํ๋ฉด ๋๊ธฐ ๋๋ฌธ์ ...
--
--
-- $w = w_1w_2\cdots w_m$์ (๋จ, $w_i\in \Sigma_\varepsilon$) PDA๊ฐ ๋ฐ์๋ค์ธ๋ค๋ ๋ป์<br>
-- ์ผ๋ จ์ ์ํ $r_0,r_1,\ldots,r_m \in Q$์ ์ผ๋ จ์ ์คํ $s_0,s_1,\ldots,s_m\in\Gamma^*$๊ฐ ์์ด<br>
-- ๋ค์๊ณผ ๊ฐ์ด ๊ณ์ํด์ ์ ์ดํจ์๋ฅผ ๋ฐ๋ผ๊ฐ๋ค ๋ณด๋ฉด ์ข
๋ฃ์ํ์์ ๋๋๋ ๊ฒฝ๋ก๊ฐ ํ๋๋ผ๋ ์กด์ฌํ๋ค๋ ์๋ฏธ์ด๋ค.<br>
-- ๋จ, ์ด๊ธฐ ์ํ $r_0 = q_0$์ด๊ณ ์ด๊ธฐ ์คํ $s_0 = \varepsilon$๋ก ์์ํ๋ค.
--
-- $\exists (r_1,b_1) \in\delta(r_0,w_1,b_0)$ ๋จ, $s_0 = b_0 t_1, s_1 = b_1 t_1$
--
-- $\exists (r_2,b_2) \in\delta(r_1,w_2,b_1)$ ๋จ, $s_0 = b_1 t_2, s_2 = b_2 t_2$
--
-- $\vdots$
--
-- $\exists (r_m,b_m) \in\delta(r_{m-1},w_m,b_{m-1})$ ๋จ, $s_m = b_{m-1} t_m, s_m = b_m t_m$
--
-- ๋ง์ง๋ง ์ํ $r_m\in F$. (์์์ $b_i \in \Gamma_\varepsilon$, $t_i\in\Gamma^*$)
--
-- ### Examples of pushdown automata
-- ์ฑ
์ ๋์จ ๋๋ก ์์ ๊ฐ์ด ์์ฑํ ๊ฒ์ NFA๋ ์ต๋ํ ๋น์ทํ๊ฒ ์ดํด๋ฅผ ๋๊ณ ์ ํจ์ด๋ค.<br>
-- ํ์ง๋ง ๋น๊ฒฐ์ ์ ์ผ๋ก๋ผ๋ ์ ๋ฐ ์์ผ๋ก ๊ณ์ฐ์ ์งํํ๋ ๊ฒ์ ๋ฒ๊ฑฐ๋กญ๋ค.<br>
-- ๊ทธ๋์ PDA ์ํ์ ์คํ์ ํ ๋ฉ์ด๋ฆฌ๋ก ๋ฌถ์ด ์ข
ํฉ์ ์ธ PDA์ ์ํ๋ก ์๊ฐํ๋ ๊ฒ์ด ํธํ๋ค.<br>
-- ๊ทธ๋ฆฌ๊ณ ๋น๊ฒฐ์ ์ ๊ณ์ฐ์ด๋ฏ๋ก ์ข
ํฉ์ ์ธ PDA์ํ์ ์งํฉ์์ ์งํฉ์ผ๋ก ๊ฐ๋ ํจ์๋ฅผ ๋ง๋ค์ด ํ์ฉํ๋ค.<br>
-- ์ฆ, $\delta$๋ก๋ถํฐ ์๋์ ๊ฐ์ $\hat\delta : \wp(Q\times \Gamma^*) \times \Sigma_\varepsilon \to \wp(Q\times \Gamma^*)$ ํจ์๋ฅผ ๋ง๋ค์ด ๊ณ์ฐํ์๋ ์ด์ผ๊ธฐ.
--
-- $\hat\delta(C,x) = \{ (r',b't) \mid (r,bt)\in C, (r',b't)\in\delta(r,x,b) \}$
--
-- ๊ทธ๋ฆฌ๊ณ ์ด๊ฒ์ ๋ ํธํ๊ฒ ํ๊ธฐ ์ํด $\delta$๋ก๋ถํฐ $\delta': Q\times \Sigma_\varepsilon \times \Gamma^* \to \wp(Q\times \Gamma^*)$ ํจ์๋ฅผ ๋ง๋ค์ด ์๋์ ๊ฐ์ด ์ ์ํ ์ ์๋ค.
--
-- $\hat\delta(C,z) = \{ (r',s') \mid (r,s)\in C, (r',s')\in\delta'(r,z,s) \}$
--
-- ์์ ๊ฐ์ด ์ ์ํ $\hat\delta$๋ฅผ ์ด์ฉํ๋ฉด ์
๋ ฅ ๋ฌธ์์ด $w=w_1 \ldots w_m$์ด ์ด๋ ๊ฒ ํํํ ์ ์๋ค
--
-- $\exists (r_m, s_m) \in \hat\delta(\ldots(\hat\delta(\hat\delta(\{(r_0,s_0)\},w_1),w_2),\ldots),w_m)$ such that $r_m\in F$
--
-- ์ด์ ๊ฐ์ ๊ณ์ฐ ํจํด์ ํจ์ํ ํ๋ก๊ทธ๋๋ฐ์์ foldl์ ํด๋นํ๋ค.
-- +
import Data.List (union)
-- see p. 113 Definition 2.13
newtype PDA q a = PDA ([q], [a], [a], (q,[a],[a])->[(q,[a])], q, [q])
-- ์ฌ์ค์ a = String์ธ ๊ฒฝ์ฐ๋ง์ ํ์ฉํ ๊ฒ์ด๋ค
-- ์ฃผ์ด์ง PDA์ delta hat ํจ์
stepPDA :: (Eq q, Eq a) => PDA q a -> [(q,[a])] -> [a] -> [(q,[a])]
stepPDA (PDA(qs,as,gs,d,q0,fs)) cs z = [ (r',s') | (r,s) <- cs, (r',s') <- d'(r,z,s)]
where
d'(r,x,g:gs) = [ (r',b'++gs) | (r',b') <- d(r,x,[g]) ] -- b==[g], t==gs
`union` [ (r',b'++g:gs) | (r',b') <- d(r,x,[] ) ] -- b==[], t==g:gs
d'(r,x,[]) = d(r,x,[])
-- -
pdaM1 = PDA (["q1","q2","q3","q4"],["0","1"],["0","$"],d,"q1",["q1","q4"])
where
d("q1",[] ,[] ) = [("q2",["$"])]
d("q2",["0"],[] ) = [("q2",["0"])]
d("q2",["1"],["0"]) = [("q3",[] )]
d("q3",["1"],["0"]) = [("q3",[] )]
d("q3",[] ,["$"]) = [("q4",[] )]
d(_ ,_ ,_ ) = []
-- for input string 0011 = ฮต0011ฮต
stepPDA pdaM1 [("q1",[] )] []
stepPDA pdaM1 [("q2",["$"])] ["0"]
stepPDA pdaM1 [("q2",["0","$"])] ["0"]
stepPDA pdaM1 [("q2",["0","0","$"])] ["1"]
stepPDA pdaM1 [("q2",["0","$"])] ["1"]
stepPDA pdaM1 [("q3",["$"])] []
foldl (stepPDA pdaM1) [("q1",[])] [ [], ["0"], ["0"], ["1"], ["1"], [] ]
-- foldl๋ก ํ๋๊น ์ข ๊ฐํธํ์ง๊ธด ํ๋๋ฐ ๊ทธ๋๋ ์
๋ ฅ ๋ฌธ์์ด์ ์ฒ๋ฆฌํ๋ ๋์ค์ ์ด๋์ $\varepsilon$์ ์ผ๋ง๋งํผ ๋ฃ์ ๊ฒ์ธ๊ฐ๋ฅผ ์ ์๊ฐํด์ ํ์ํ ๊ณณ์ ์ถ๊ฐํด ์ฃผ์ด์ผ ํ๋ค.
--
-- NFA์์๋ ๊ฐ๋จํ๊ฒ ์
๋ ฅ ๋ฌธ์์ด์์์ $\varepsilon$์ ์ฒ๋ฆฌํ๋ ๊ฑฐ ๊ฐ์๋ PDA๋ ๊ทธ๋ ๊ฒ ์ ๋ ๊น?
-- NFA๋งํผ ๊ฐ๋จํ์ง๋ ์๋ค.<br>
-- ์
๋ ฅ ๋ฌธ์์ด $\varepsilon$์ฒ๋ฆฌ์ ํจ๊ป ์คํ์๋ ๋ณํ๊ฐ ์ผ์ด๋ ์ ์๊ธฐ ๋๋ฌธ์ ์คํ์ด ๋ณํํ๋ ๊ฐ๋ฅ์ฑ๊น์ง๋ฅผ ๋ชจ๋ ๊ณ ๋ คํด์ ํ๊ธฐ ๋๋ฌธ์ด๋ค.<br>
-- ์ฆ, ์
๋ ฅ ๋ฌธ์์ด ๋์ค์ ์ด๋ค ์์ ์ $\varepsilon$์ ์์์ ๊ฐ์ ํ์ฉํ์ ๋ ์คํ์ ํฌํจํ ์ข
ํฉ์ ์ธ PDA์ํ์ closure๊ฐ ์๊ธด๋ค๋ ๋ณด์ฅ์ด ์๋ค.<br>
-- ์๋ฅผ ๋ค์ด ์
๋ ฅ ๋ฌธ์์ด์ ์ด๋ค $\varepsilon$ ์ฒ๋ฆฌ๋ฅผ ํ ๋๋ง๋ค ์คํ์ ๋ฌด์์ธ๊ฐ ์๋๋ค๊ณ ํ๋ฉด ํ์๋ฅผ ๋ฐ๋ณตํ ์๋ก ๊ณ์ ์ ์ ๋ณด์ง ๋ชปํ๋ ๋ค๋ฅธ ์คํ์ด ๋ง๋ค์ด์ง๊ฒ ๋๋ค.
-- ### Equivalence with context-free grammars
--
-- CFG๋ฅผ PDA๋ก ๋ณํํ๋ ์์ ๋ฅผ ํ๋๋ง ์ดํด๋ณด์. (๋ฐ๋๋ก ํ๋ ๊ฒ์ ์๊ฐ์ ์์
์์๋ ๋ค๋ฃจ์ง ๋ชปํ๊ณ ๋์ด๊ฐ๋ค.)
--
-- Example 2.25
--
-- * ๊ท์น S1: $S \to \mathtt{a}T\mathtt{b}$
-- * ๊ท์น S2: $S \to \mathtt{b}$
-- * ๊ท์น T1: $T \to T\mathtt{a}$
-- * ๊ท์น T2: $T \to \varepsilon$
--
-- ๊ธธ์ด $n \ge 2$์ธ intermediate string์ ์์ฑํ๋ ๊ท์น๋ง๋ค $n-1$๊ฐ์ ์ถ๊ฐ ์ํ ํ์. (๊ท์น S1, T1)
--
-- ๊ธธ์ด $n \le 1$์ธ intermediate string์ ์์ฑํ๋ ๊ท์น์๋ ์ถ๊ฐ ์ํ ํ์ ์์. (๊ท์น S2, T2)
--
-- (์ ์์ CFG ํ๋ก๊ทธ๋จ์์๋ intermediat string์ ํด๋นํ๋ ๊ฒ์ GString์ด๋ผ๋ ํ์
๋ณ๋ช
์ผ๋ก ๋ถ๋ฆ)
cfgG1 = CFG (["S","T"],["a","b"],rs,"S")
where
rs = [ "S" :-> ["a","T","b"]
, "S" :-> ["b"]
, "T" :-> ["T","a"]
, "T" :-> []
]
cfgG1||- derivation
["S"] .=> ["b"]
cfgG1||- derivation
["S"] .=> ["a","T","b"]
.=> ["a","T","a","b"]
.=> ["a","a","b"]
cfgG1||- derivation
["S"] .=> ["a","T","b"]
.=> ["a","a","T","b"]
.=> ["a","a","b"]
pdaP1 = PDA ( ["q0","qS","qL","qA", "qS1b","qS1T", "qT1a"]
, ["a","b"] -- ฮฃ
, ["a","b","$","S","T"] -- ฮ = ฮฃ โ { $ } โ V
, d
, "q0"
, ["qA"] )
where
d("q0",[] ,[] ) = [("qS",["$"])]
d("qS",[] ,[] ) = [("qL",["S"])]
-- CFG production rules
d("qL" ,[] ,["S"]) = [("qS1b",["b"]) -- S -> a T;b
,("qL" ,["b"])] -- S ->.b
d("qS1b",[] ,[] ) = [("qS1T",["T"])] -- S -> a;T b
d("qS1T",[] ,[] ) = [("qL" ,["a"])] -- S ->.a T b
d("qL" ,[] ,["T"]) = [("qT1a",["a"]) -- T -> T;a
,("qL" ,[] )] -- T ->.ฮต
d("qT1a",[] ,[] ) = [("qL" ,["T"])] -- T ->.T a
-- pop terminal on the stack
d("qL",["a"],["a"]) = [("qL",[] )]
d("qL",["b"],["b"]) = [("qL",[] )]
-- to accept state
d("qL",[] ,["$"]) = [("qA",[] )]
-- default
d(_,_,_) = []
-- aab = ฮตฮตฮตฮตฮตaฮตฮตฮตฮตabฮต
stepPDA pdaP1 [("q0",[])] []
stepPDA pdaP1 [("qS",["$"])] []
stepPDA pdaP1 [("qL",["S","$"])] []
stepPDA pdaP1 [("qS1b",["b","$"])
,("qL",["b","$"])] []
stepPDA pdaP1 [("qS1T",["T","b","$"])] []
stepPDA pdaP1 [("qL",["a","T","b","$"])] ["a"]
stepPDA pdaP1 [("qL",["T","b","$"])] []
stepPDA pdaP1 [("qT1a",["a","b","$"])
,("qL",["b","$"])] []
stepPDA pdaP1 [("qL",["T","a","b","$"])] []
stepPDA pdaP1 [("qT1a",["a","a","b","$"])
,("qL",["a","b","$"])] ["a"]
stepPDA pdaP1 [("qL",["b","$"])] ["b"]
stepPDA pdaP1 [("qL",["$"])] []
-- ----
-- ## Non-context-free Languages
--
-- ### The pumping lemma for context-free languages
-- ----
-- ## Deterministic Context-Free Languages
--
-- DPDA๋ PDA์์ ์ ์ดํจ์ ํจ์์ ๊ฒฐ๊ณผ๋ฅผ 1๊ฐ ์ดํ๋ก ๊ฐ์ ํ๋ฉฐ ์ถ๊ฐ๋ก ์
๋ ฅ ๋ฐ ์คํ์ ๋ํ $\varepsilon$ ์ฒ๋ฆฌ์ ๋น๊ฒฐ์ ์ฑ์ ์ ํํ๋ค.<br>
-- ์ฑ
์์๋ $\delta : Q\times \Sigma_\varepsilon \times \Gamma_\varepsilon \to (Q\times \Gamma_\varepsilon)\cup\{\emptyset\}$์ผ๋ก
-- ์ ์ดํจ์ ๊ฒฐ๊ณผ๋ฅผ 1๊ฐ ์ดํ๋ก ๊ฐ์ ํจ์ผ๋ก ํํํ๋ฉฐ,<br>
-- ์ถ๊ฐ์ ์ธ ์ ํ์ฌํญ์ด๋ ๋ชจ๋ $q\in Q$, $a\in\Sigma$, $x\in\Gamma$์ ๋ํด
-- $\delta(q,a,x)$, $\delta(q,a,\varepsilon)$, $\delta(q,\varepsilon,x)$, $\delta(q,\varepsilon,\varepsilon)$์ค
-- ๋ฑ ํ๊ฐ์ฉ๋ง $\emptyset$์ด ์๋์ด์ผ ํ๋ค.<br>
-- ์ด ์ถ๊ฐ์ ์ธ ์ ํ์ฌํญ์ ์
๋ ฅ์ด๋ ์คํ์ $\varepsilon$์ธ ๊ฒฝ์ฐ ์ ์ด๊ฐ $\varepsilon$์ด ์๋ ๊ฒฝ์ฐ์ ์ ์ด๋ฅผ ์ ํํ๋ ๊ฒ๊ณผ ๋น๊ฒฐ์ ์ ์ธ ์ํฉ์ด ๋ฐ์ํ๋ ๊ฒ์ ๋ฐฉ์งํ๋ ค๋ ๊ฒ์ด๋ค.
--
--
-- DPDA๋ ์ ์ ์์ฒด๋ก๋ ์ ํํ ์คํจ(finite failure)๋ฅผ ๋ณด์ฅํ์ง๋ ์๋๋ค. ($\varepsilon$์
๋ ฅ์ ๋ฌดํํ ์ฒ๋ฆฌํ๋ฉฐ ๋ ์ด์์ ์
๋ ฅ ๋ฌธ์์ด์ ์ฒ๋ฆฌ๋ฅผ ์งํํ์ง ๋ชปํ๋ ๊ฒฝ์ฐ ๋ฐ์ ๊ฐ๋ฅ)
-- +
{- -- 0^n1^n์ธ ์ธ์ด๋ฅผ ์ ์ํ๋ ์ ์์ PDA M1 ์ ์๋ฅผ ๊ทธ๋๋ก ๊ฐ์ ธ์๋ค
pdaM1 = PDA (["q1","q2","q3","q4"],["0","1"],["0","$"],d,"q1",["q1","q4"])
where
d("q1",[] ,[] ) = [("q2",["$"])]
d("q2",["0"],[] ) = [("q2",["0"])]
d("q2",["1"],["0"]) = [("q3",[] )]
d("q3",["1"],["0"]) = [("q3",[] )]
d("q3",[] ,["$"]) = [("q4",[] )]
d(_ ,_ ,_ ) = []
-}
PDA(qs,as,gs,d,q0,fs) = pdaM1
-- d๊ฐ ๊ณ์ฐํ๋ ์งํฉ ํฌ๊ธฐ๋ฅผ ๊ฐ๊ฐ ์์๋ณด๋ฉด ๋ชจ๋ 1 ์ดํ์ด๋ค
[length(d(q,[a],[x])) | q<-qs, a<-as, x<-gs] ++
[length(d(q,[a],[])) | q<-qs, a<-as] ++
[length(d(q,[],[x])) | q<-qs, x<-gs] ++
[length(d(q,[],[] )) | q<-qs]
putStr "=============="
-- ๊ทธ๋ฌ๋ ์ถ๊ฐ์ ์ธ ์กฐ๊ฑด์ ์์๋ณด๋ฉด ๋ชจ๋ 1์ด์ด์ผ ํ๋๋ฐ 0์ธ ๊ฒ๋ค์ด ์๋ค
mapM_ print
[((q,a,x), length $ filter (not.null)
[d(q,[a],[x]),d(q,[a],[]),d(q,[],[x]),d(q,[],[])]) | q<-qs, a<-as,x<-gs]
putStr "=============="
mapM_ print . filter ((1/=).snd) $
[((q,a,x), length $ filter (not.null) [d(q,[a],[x]),d(q,[a],[]),d(q,[],[x]),d(q,[],[])]) | q<-qs, a<-as,x<-gs]
-- ๊ทธ๋ฌ๋๊น ์ฑ
Example 2.40์์ ์ค๋ช
ํ๋ ๊ฒ์ฒ๋ผ ์คํจํ๋ ์ํ๋ฅผ ๋ง๋ค์ด ๋ฐฉ๊ธ 0์ธ ๊ฒฝ์ฐ๋ค์ ๋ํด
-- -
pdaM1' = PDA (["q1","q2","q3","q4","q5"],["0","1"],["0","$"],d,"q1",["q1","q4"])
where
d("q1",[] ,[] ) = [("q2",["$"])]
d("q2",["0"],[] ) = [("q2",["0"])]
d("q2",["1"],["0"]) = [("q3",[] )]
d("q3",["1"],["0"]) = [("q3",[] )]
d("q3",[] ,["$"]) = [("q4",[] )]
d("q2",["1"],["$"]) = [("q5",[] )]
d("q3",["0"],["0"]) = [("q5",[] )]
d("q4",["0"],["0"]) = [("q5",[] )]
d("q4",["0"],["$"]) = [("q5",[] )]
d("q4",["1"],["0"]) = [("q5",[] )]
d("q4",["1"],["$"]) = [("q5",[] )]
d("q5",[] ,[] ) = [("q5",[] )]
d(_ ,_ ,_ ) = []
-- +
PDA(qs,as,gs,d,q0,fs) = pdaM1'
-- d๊ฐ ๊ณ์ฐํ๋ ์งํฉ ํฌ๊ธฐ๋ฅผ ๊ฐ๊ฐ ์์๋ณด๋ฉด ๋ชจ๋ 1 ์ดํ์ด๋ค
[length(d(q,[a],[x])) | q<-qs, a<-as, x<-gs] ++
[length(d(q,[a],[])) | q<-qs, a<-as] ++
[length(d(q,[],[x])) | q<-qs, x<-gs] ++
[length(d(q,[],[] )) | q<-qs]
putStr "=============="
-- ๊ทธ๋ฌ๋ ์ถ๊ฐ์ ์ธ ์กฐ๊ฑด์ ์์๋ณด๋ฉด ๋ชจ๋ 1์ด์ด์ผ ํ๋๋ฐ 0์ธ ๊ฒ๋ค์ด ์๋ค
mapM_ print
[((q,a,x), length $ filter (not.null) [d(q,[a],[x]),d(q,[a],[]),d(q,[],[x]),d(q,[],[])]) | q<-qs, a<-as,x<-gs]
putStr "=============="
mapM_ print . filter ((1/=).snd) $
[((q,a,x), length $ filter (not.null) [d(q,[a],[x]),d(q,[a],[]),d(q,[],[x]),d(q,[],[])]) | q<-qs, a<-as,x<-gs]
-- -
-- ### Properties of DCFLs
--
-- Theorem 2.42: DCFL ์ธ์ด๋ ์ฌ์งํฉ(complementation)์ ๋ํด ๋ซํ ์๋ค.
--
-- ์ฐธ๊ณ ๋ก union, intersection, star, reverse ์ฐ์ฐ์ ๋ํด์๋ DCFL ์ธ์ด๋ ๋ซํ ์์ง ์๋ค.
--
-- ๊ทธ๋ฆฌ๊ณ ์ผ๋ฐ์ ์ผ๋ก CFL์ ์ฌ์งํฉ ์ฐ์ฐ์ ๋ํด ๋ซํ ์์ง ์๋ค.
--
-- Theorem 2.43: $A$๊ฐ DCFL์๊ณผ ๊ทธ endmarked language์ธ $A{\pmb{\dashv}}$๊ฐ DCFL์์ ํ์์ถฉ๋ถ์กฐ๊ฑด
--
-- $A{\pmb{\dashv}} ~=~ \{w\pmb{\dashv} \mid w\in A\}$
--
-- $A$์ ์ํ๋ฒณ์ด $\Sigma$์ผ ๋ $A{\pmb{\dashv}}$ ์ํ๋ฒณ์ $\Sigma\cup\{\pmb{\dashv}\}$์ด๋ค.
-- (๋จ, $\pmb{\dashv}\notin\Sigma$)
--
-- ๊ทธ๋์ ๋ชจ๋ DCFL์๋ ๊ทธ์ ๋์๋๋ endmarked langauge๊ฐ ์์์ ์ด์ฉํด<br>
-- endmarked language์ ๋ํ ๊ฒฝ์ฐ๋ง ๊ณ ๋ คํด ์ด๋ก ์ ์งํํด ์ผ๋ฐ์ ์ธ DCFL์ ๋ํ ์ฑ์ง์ ์ ์ถํ ์ ์๋ค.<br>
-- ๋ค์์ ๋์ค๋ DCFG์ ๋ํ ์ด๋ก ์ ์ ์ฌํ ๋๋ ๊ทธ๋ ๊ฒ ํ๋ค.
--
-- ์ฑ
์ p. 131 ๋์์ ๋๋ฒ์งธ ๋ฌธ๋จ์์๋ ์ธ๊ธํ์ง๋ง DPDA์ ๋ํ ๋
ผ์๋ ์ด๋ฐ ์์ผ๋ก ์์ ๋จ๊ณ๋ฅผ ๋ฐ์ ๋๊ฐ๋ฉด์ ์๊ธฐ์ ์ด ๋ง์ด ๋ค์ด๊ฐ๋ค.
-- ์๋ฅผ ๋ค๋ฉด ์ ํ์คํ ๋งํ์์๋ DFA์ ์ ์๋ก ์์ํ์ง๋ง ์ข๋ ํธํ๊ฒ ์ด๋ก ์ ์ ๊ฐํ๊ธฐ ์ํด์ NFA์ ์ค์ค์ด๋ผ๋ ๊ฒ์ ๋ณด์ด๊ณ NFA์ ๋ํด ๊ณ ๋ คํ ๊ฒ๊ณผ ๋น์ทํ ๋งฅ๋ฝ.
--
-- ### Deterministic context-free grammars
--
-- derivation์ ๋ฒ์(variable ๋๋ non-terminal)์ ๋ ๊ตฌ์ฒด์ ์ธ intermediate string์ผ๋ก ์นํ(substitute)ํ๋ฉฐ ์งํ.<br>
-- ๊ทธ๋ฌ๋๊น ๋ฌธ๋ฒ๊ท์น์์ ํ์ดํ ์ค๋ฅธ์ชฝ์ ๋ณ์๋ฅผ ์ผ์ชฝ์ intermediate string์ผ๋ก.
--
-- reduction์ ๊ทธ ๋ฐ๋ ๊ฐ๋
์ผ๋ก intermideate string์ ๋ํ๋๋ ๋ฌธ์์ด์ ์ผ๋ถ๋ถ์ด ๋ฌธ๋ฒ๊ท์น ํ์ดํ ์ผ์ชฝ๊ณผ ์ผ์นํ ๋ ์ค๋ฅธ์ชฝ์ ๋ณ์๋ก ๊ฑฐ๊พธ๋ก ์นํ.
--
-- $S\to T\pmb{\dashv}$<br>
-- $T\to T(T) \mid \varepsilon$
--
-- * derivation
--
-- $S \Rightarrow T\pmb{\dashv}
-- \Rightarrow T(T)\pmb{\dashv}
-- \Rightarrow T()\pmb{\dashv}
-- \Rightarrow T(T)()\pmb{\dashv}
-- \Rightarrow T()()\pmb{\dashv}
-- \Rightarrow ()()\pmb{\dashv}$
--
-- * reduction (์์ derivation๊ณผ ๋น๊ตํ๊ธฐ ์ข๊ฒ ์ฑ
์์์ ์ข์ฐ๋ฅผ ๋ฐ๋๋ก ๋ค์ง์ด ์ผ๋ค)
--
-- $S \leftarrowtail \underline{T\pmb{\dashv}}\color{red}{|}
-- \leftarrowtail \underline{T(T)}\color{red}{|}\pmb{\dashv}
-- \leftarrowtail T(\underline{}\color{red}{|})\pmb{\dashv}
-- \leftarrowtail \underline{T(T)}\color{red}{|}()\pmb{\dashv}
-- \leftarrowtail T(\underline{}\color{red}{|})()\pmb{\dashv}
-- \leftarrowtail \underline{}\color{red}{|}()()\pmb{\dashv}$
--
-- ์์ ๊ฐ์ด ๊ฐ์ฅ ์ผ์ชฝ๋ถํฐ ๋ฌถ์ด๊ฐ๋ reduction์ leftmost reduction์ด๋ผ๊ณ ํ๋ค.<br>
-- ์ฐธ๊ณ ๋ก leftmost reduction์ rightmost derivation์ ๋์๋๋ค.
--
-- unambiguous CFG์ด๋ฉด
-- * ์์ ์ํ($S$)๋ก๋ถํฐ ๊ทธ ์ธ์ด์ ํน์ ๋ฌธ์์ด($w$)์ ๋ง๋ค์ด๋ด๋
-- rightmost derivation์ด ์ ์ผํ๊ฒ ์กด์ฌ
-- * ์ฌ๊ธฐ์ ๋์๋๋ leftmost reduction, ์ฆ,
-- ๊ทธ ์ธ์ด์ ์ํ๋ ์
๋ ฅ ๋ฌธ์์ด $w$๋ก๋ถํฐ $S$๊น์ง์
-- leftmost reduction๋ ์ ์ผํ๊ฒ ์กด์ฌ
-- * ๊ทธ๋ฌ๋ฏ๋ก ๋ฐ์ค์น ๋ถ๋ถ(handle)๋ ํ๊ฐ์ง๋ก ์ ํด์ง ์๋ฐ์ ์๋ค!
-- (์๊ทธ๋ ๋ค๋ฉด ambiguous CFG๋ผ๋ ์ด์ผ๊ธฐ)
--
-- $w$๊ฐ ์ธ์ด์ ์ํ๋ ๋ฌธ์์ด์ด๋ผ $w$๋ก๋ถํฐ $S$๊น์ง์ leftmost reduction์ด ์กด์ฌํ ๋,<br>
-- ์ด๋ฌํ reduction ๋์ค์ ๋ํ๋๋ intermediate string์ valid (intermediate) string์ด๋ผ๊ณ ๋ถ๋ฅธ๋ค.
--
-- deterministic CFG๊ฐ ๋๊ธฐ ์ํด์๋<br>
-- valid string์์ ์ง๊ธ๊น์ง ์ฝ์ด๋ค์ธ ๋ถ๋ถ๋ง์ผ๋ก handle์ด ๊ฒฐ์ ๊ฐ๋ฅํด์ผ<br>
-- (์ฆ, ์ด๋๋ฅผ ๋ฐ์ค์น ์ง ํ๋จ ๊ฐ๋ฅํด์ผ ํ๋ค๋ ์ด์ผ๊ธฐ)<br>
-- DCFG์์ valid string์ธ $x\,\underline{h}\color{red}{|}y$์์ ๋ฐ์ค์น $h$๊ฐ handle์ด๋ผ๋ฉด
-- * handle๊น์ง๋ง ์ฝ์ด๋ค์ธ ๋ถ๋ถ์ด๋ฏ๋ก ์์ง ์ฝ์ด๋ค์ด์ง ์์ $y\in\Sigma^*$ (์ฆ y์๋ ๋ณ์๊ฐ ํฌํจ๋์ด ์์ง ์์)
-- * ๋ค๋ฅธ ๋ชจ๋ valid string $x\,\underline{h}\color{red}{|}y'$์ ๋ํด์๋ ($y'\in\Sigma^*$) ๋ง์ฐฌ๊ฐ์ง๋ก $h$๊ฐ handle์ด ๋์ด์ผ ํ๋ค
-- * ์ด๋ ๊ฒ valid string์์ handle์ด ์ง๊ธ๊น์ง ์ฝ์ด๋ค์ธ ๋ถ๋ถ๋ง์ผ๋ก ๊ฒฐ์ ๋ ๋ forced handle์ด๋ผ๊ณ ๋ถ๋ฅธ๋ค
--
-- Definition 2.47 **DCFG๋ ๋ชจ๋ valid string์ด forced handle์ ๊ฐ๋ CFG์ด๋ค.**
--
-- ์ด๋ ๊ฒ ์ ์ํ๋ ๊ฒ์ ๊ฐ๋
์ ์ผ๋ก ์ข์๋ฐ, ์ด ์ ์๋ง ๊ฐ์ง๊ณ ๋ ์ด๋ค CFG๋ฅผ ๋ณด๊ณ ๊ทธ ๋ฌธ๋ฒ์ด ๊ฒฐ์ ์ (deterministic)์ธ์ง ์ด๋ป๊ฒ ๊ฒ์ฌํด์ผ ํ ์ง๊น์ง๋ ๊ธ๋ฐฉ ์ ์๊ฐ ์๋ค. Parsing and LR(k) Grammars ๋ถ๋ถ์์ ์ด๊ฒ์ ๋ค๋ฃฌ๋ค.
--
--
-- CFL์์ **๋ชจํธํ์ง ์์**(non-ambiguous ํน์ unambiguous)๊ณผ **๊ฒฐ์ ์ **deterministic์ ์ผ์นํ๋ ๊ฐ๋
์ด ์๋๋ค!
-- * ์ผ๋จ deterministic CFL๋ ๊ธฐ๋ณธ์ ์ผ๋ก unambiguous CFL์ผ ์๋ฐ์ ์์
-- * ํ์ง๋ง unambiguous CFL๋ผ๊ณ ์๋์ ์ผ๋ก deterministic CFL๋ ์๋
--
-- ๊ทธ๋ฌ๋๊น ์ ์ผํ parse tree๊ฐ ๋ง๋ค์ด์ง๋๋ก ๋ฌธ๋ฒ์ ์์ฑํ ์๋ ์๋๋ผ๋ ๊ฑฐ๊ธฐ์ ๋์๋๋ DPDA๋ฅผ ์ค๊ณํ๋ ๊ฒ์ด ๋ถ๊ฐ๋ฅํ ๊ฒฝ์ฐ๊ฐ ์กด์ฌํจ.
--
--
-- ์ฐธ๊ณ : [Computer Science Stack Exchange์ ์ฌ๋ผ์จ ๊ด๋ จ ์ง๋ฌธ](https://cs.stackexchange.com/questions/14583/how-is-non-ambuiguity-different-from-determinism)์ Wandering Logic์ด๋ผ๋ ์์ด๋๋ก ๋จ๊ธด ๋ต๋ณ์ ๋ณด๋ฉด ์ํคํผ๋์์ ์ ์๋ unambiguous non-deterministic language์ ์ฌ๋ก๋ฅผ ์ธ์ฉ (๋ฌผ๋ก ์ฐ๋ฆฌ ์ฑ
์์๋ ์๊ฐํ๊ณ ์ค๋ช
ํ๋ ์์ ์ด๋ค)
--
-- ์ด๊ฒ ์ ๊ทธ๋ด๊น๋ฅผ ์๊ฐํด ๋ณด๋ฉด PDA์ DPDA์ ๋์ ๋ฐฉ์์ ๋ํด ์ข๋ ํ์คํ๊ฒ ์ดํดํ ์ ์๋ค.
-- ์ข๋ ๊ตฌ์ฒด์ ์ผ๋ก๋ DFA๋ NFA์ ๋นํด ๊ทธ ํํ๊ฐ๋ฅํ ์ธ์ด์ ๋ฒ์๊ฐ ์ ํ์ ์ด์ง ์์๋ฐ,
-- ์ด์งธ์ DPDA๋ ๋ฅ๋ ฅ์ด ์ PDA์ ๋นํด ์ ํ์ ์ธ์ง๋ฅผ ์ดํดํ ์ ์๋ค.
-- ### Parsing and LR(k) Grammars
-- DCFL์ ๋ชจ๋ LR(0)๋ฌธ๋ฒ์ผ๋ก ๋ํ๋ผ ์ ์๋ค. (๊ทธ๋ฌ๋๊น DPDA, DCFG, LR(0)๋ฌธ๋ฒ ์ด๊ฒ๋ค์ด ๋ชจ๋ ์ค์ค)
--
-- LR(0)๋ (์ง๋์น๊ฒ ๋ณ์์ ๊ฐ์๊ฐ ๋ง์์ง๋ฏ๋ก) ๋ถํธํด์ LR(k)๋ผ๋ ๊ฒ์ ํ์ฉํ๋ค<br>
-- ์ด๋ก ์ ์ผ๋ก ๋ชจ๋ k์ ๋ํด์ LR(k)์ LR(0)์ ์ค์ค. (๋ณดํต์ LR(1)์ ๊ฐ์ฅ ๋ง์ด ํ์ฉ)<br>
-- ์ฐธ๊ณ ๋ก ์ฑ
์์๋ ๋ค๋ฃจ์ง ์์ง๋ง ์ข๋ ์ธ๋ถํํ๋ฉด $\textrm{LR}(k) \supseteq \textrm{LALR}(k) \supseteq \textrm{SLR}(k)$ ์ด๋ฐ ๊ฒ๋ค์ด ์๋๋ฐ, DPDA๋ก ์ฎ๊ธด๋ค๊ณ ์น ๋ SLR ์ชฝ์ผ๋ก ๊ฐ์๋ก ๋น๊ฒฐ์ ์ฑ์ ํด์ํ๋ ์ ์ฐจ๋ฅผ ๋จ์ํํด์ ์ํ ๊ฐ์๋ฅผ ์ค์ผ ์ ์๋ ๋์ ๋ฌธ๋ฒ์ ์ ์ฝ์ด ๋ ๋ง์์ง๋ค. ์ค์ฉ์ ์ผ๋ก๋ ์ค๊ฐ์ฏค์ ์๋ LALR(1)์ด ๋ง์ด ํ์ฉ๋์ด ์๋ค. ๋ํ์ ์ผ๋ก yacc์ด๋ผ๋ ํ์ ์์ฑ๊ธฐ ๋ฐ ๊ฐ ์ธ์ด๋ณ๋ก yacc์ ๋ณธ๋ฐ ๋ง๋ ๋ค์์ ํ์ ์์ฑ๊ธฐ๋ค์ด LALR(1)์ ๊ธฐ๋ฐ์ผ๋ก ๊ตฌํ๋์ด ์๋ค.
--
-- ๊ทธ๋ฆฌ๊ณ ์ฐ๋ฆฌ ์ฑ
์ LR๊ณผ ๊ฐ์ bottom-up parsing์ ๊ณง๋ฐ๋ก ์๊ฐํ๋๋ฐ LR(k)์ด ๋์ค๊ฒ ๋ ๋ฐฐ๊ฒฝ์๋ LL(k), ์ฆ top-down parsing ํน์ recursive decendent parsing์ผ๋ก ๋ฌธ๋ฒ๋ถ์์ ๊ตฌํํ๊ธฐ ์ํด์๋ ๋ฌธ๋ฒ์ ์ ์ฝ์ด ๋ง์์ ๋ถํธํ๊ธฐ ๋๋ฌธ์ด๋ค.
--
-- ๋ํ, ์ฑ
์๋ ์๊ฐํ์ง ์์ ์ต๊ทผ ๋ํฅ์ผ๋ก๋ GLR parser๋ผ๋ ๊ฒ์ด ๋น๊ต์ ์ต๊ทผ์ ํ์ฉ ๋๊ธฐ๋
-- ํ๋๋ฐ ์ด๊ฒ์ ๋ชจ๋ DCFG๋ฅผ ๋ค ์ฒ๋ฆฌํ ์ ์์ผ๋ฉด์๋ ๊ฒฐ์ ์ ์ธ (ํน์ ๊ฑฐ์ ๊ฒฐ์ ์ ์ธ) CFG์ ๊ฒฝ์ฐ์๋
-- LALR์ ํฌ๊ฒ ๋ค๋จ์ด์ง์ง ์๋ ์ฑ๋ฅ์ ๋ณด์ฌ์ค๋ค๊ณ ํ๋ค.
-- ์ฐธ๊ณ ๋ก, ์ฌ์ค์ yacc์ ๋์ฒดํ bison ํ์ ์์ฑ๊ธฐ๋ LALR๊ณผ ํจ๊ป GLR๋ ์ง์ํ๋ค.
--
-- ์ฃผ์: LL, LR ์ด๋ฐ ๊ฒ์ "๋ฌธ๋ฒ"์ ๋ถ๋ฅ์ด๋ค. ("์ธ์ด"์ ๋ถ๋ฅ๊ฐ ์๋)
--
-- ์์ ๋ณ์๊ฐ ํ์ดํ ์ค๋ฅธ์ชฝ์ ๋ํ๋์ง ์๋ ๋ฌธ๋ฒ๋ง์ ๊ณ ๋ คํ๋ค.<br>
-- (์์ ๋ณ์ $S$๊ฐ ์ค๋ฅธ์ชฝ์ ๋ํ๋๋ ๋ฌธ๋ฒ์ ์๋ก์ด ์์ ๋ณ์ $S'$๋ฅผ ๋ง๋ค์ด $S'\to S$ ๊ท์น์ ํ๋ ์ถ๊ฐํ๋ฉด ์์ ๋ณ์๊ฐ ์ค๋ฅธ์ชฝ์ ๋ํ๋์ง ์๋ ๋ฌธ๋ฒ์ผ๋ก ๋ฐ๊ฟ ์ ์๋ค๊ณ ์ฑ
์ ๋์์๋ค.)
--
-- ### Relationship of DPDAs and DCFGs
-- endmarked language์ ๋ํ CFG๋ง์ ๊ณ ๋ คํ๋ฉด DPDA์ DCFG๋ ์ค์ค์ด๋ผ๊ณ ์ฑ
์์ ์ด์ฌํ ์ค๋ช
์ ...
-- +
type DottedRule = (Gamma,(GString,GString))
type NodeDK = [DottedRule]
type EdgeDK = (NodeDK,Gamma,NodeDK)
type GraphDK = ([NodeDK],[EdgeDK])
dottedClosure :: CFG -> [DottedRule] -> [DottedRule]
dottedClosure cfg@(CFG(_,_,rs,_)) drs
| drs==drs' = drs
| otherwise = dottedClosure cfg drs'
where
drs' = drs `union` [(v',([],gs')) | (v,(_,v2:gs2)) <- drs,
v':->gs' <-rs, v2 == v']
showDottedRule :: DottedRule -> String
showDottedRule (v,(gs1,gs2))= v++" โ "++concat(gs1++"โข":gs2)
stepNode :: CFG -> NodeDK -> Gamma -> NodeDK
stepNode cfg drs g = dottedClosure cfg [ (v,(gs1++[g'],gs2)) | (v,(gs1,g':gs2)) <- drs, g==g']
cfg2graphDK :: CFG -> GraphDK
cfg2graphDK cfg@(CFG(_,_,rs,s0)) = mkGraphDK cfg ([[],n0],[([],"",n0)])
where
n0 = dottedClosure cfg [(v,([],gs)) | v:->gs <- rs, v==s0]
src (n,_,_) = n
dst (_,_,n) = n
mkGraphDK :: CFG -> GraphDK -> GraphDK
mkGraphDK cfg@(CFG(vs,as,_,_)) (ns,es)
| (ns,es) == (ns',es') = (ns,es)
| otherwise = mkGraphDK cfg (ns',es')
where
es' = es `union` esNew
ns' = ns `union` nsNew
out0ns = ns \\ map src es
esNew = [(n,g,n') | n <- out0ns, g<-vs++as,
let n' = stepNode cfg n g,
not(null n')]
nsNew = map dst esNew
-- -
cfgG55 = CFG(["S","T"],["(",")","โฃ"],rs,"S")
where
rs = [ "S" :-> ["T","โฃ"]
, "T" :-> ["T","(","T",")"]
, "T" :-> []
]
-- +
closS0 = dottedClosure cfgG55 [("S",([],["T","โฃ"]))]
mapM_ (putStrLn . showDottedRule) closS0
-- +
closS0_T = stepNode cfgG55 closS0 "T"
closS0_T
mapM_ (putStrLn . showDottedRule) closS0_T
-- +
closS0_Tl = stepNode cfgG55 closS0_T "("
closS0_Tl
mapM_ (putStrLn . showDottedRule) closS0_Tl
-- +
import Data.List (intercalate, elemIndices)
import IHaskell.Display.Graphviz
-- dot "digraph { l -> o; o -> v; v -> e; h -> a ; a -> s; s -> k ; k -> e ; e -> l ; l -> l}"
drawDK = dot . cfg2DKgraphviz
cfg2DKgraphviz :: GraphDK -> String
cfg2DKgraphviz ([]:ns,([],"",n0):es)
= "digraph { rankdir=LR; node [shape=box style=rounded]; "
++ "start [shape=none]; start -> n0; "
++ intercalate "; " [ nodeName n++" ["++attrDKnode n++"]" | n <- ns ]
++ intercalate "; " [ nodeName n1++" -> "++nodeName n2
++ " [label=\""++g++"\"]" | (n1,g,n2) <- es ]
++ "}"
where
nodeName n = "n"++show i where [i] = elemIndices n ns
attrDKnode :: NodeDK -> String
attrDKnode drs
= "peripheries="++(if reducible then "2" else "1")++" "
++ "label=\""++concat [showDottedRule r++"\\l" | r<-drs]++"\""
where
reducible = any null [ gs2 | (_,(gs1,gs2))<-drs]
-- +
import IHaskell.Display.Graphviz
-- dot "digraph { l -> o; o -> v; v -> e; h -> a ; a -> s; s -> k ; k -> e ; e -> l ; l -> l}"
-- ๋๋ต ์ด๋ฐ ์์ผ๋ก ํ๋ก๊ทธ๋จ์ด ์๋์ผ๋ก ๋ง๋ค์ด๋ด๋๋ก
dot $ "digraph { rankdir=LR; node [shape=box style=rounded]; "
++ "v0 ["++attrDKnode closS0++"]; "
++ "v1 ["++attrDKnode closS0_T++"]; "
++ "v2 ["++attrDKnode closS0_Tl++"]; "
++ "v0 -> v1 [label=\"T\"];"
++ "v1 -> v2 [label=\"โฃ\"];"
++ "}"
-- ์ด๊ฑฐ๋ ๋ฌผ๋ก cfgG55๋ฅผ ๊ทธ๋ฆฌ๋ค ๋ง๊ฑฐ๋ค
-- +
-- DK-test ์กฐ๊ฑด:
-- ๋ชจ๋ ํ
๋๋ฆฌ ๋๊ฒน์ธ ์ํ๋
-- 1. ์ ์ผ๋ก ๋๋๋ ๊ท์น(completed rule)์ ๋ฑ ํ๋๋ง ์์ด์ผ
-- 2. ์ ๋ค์์ alphabet์ด ์ค๋ ๊ท์น์ ์์ด์ผ
-- DK-test๋ฅผ ๋ง์กฑํ๋ ์
drawDK (cfg2graphDK cfgG55)
-- -
-- DK-test๋ฅผ ๋ง์กฑํ์ง ์๋ ์
cfgN1 = CFG(["S","A","B"],["0","1","โฃ"],rs,"S")
where
rs = [ "S" :-> ["A","โฃ"]
, "A" :-> ["0","A","0"]
, "A" :-> ["B"]
, "B" :-> ["1","B","1"]
, "B" :-> [] -- 2๋ฅผ ์ํ๋ฒณ์ ์ถ๊ฐํด์ ์ฌ๊ธฐ ์ง์ด๋ฃ์ผ๋ฉด ์ด๋ป๊ฒ ๋๋ ์ํํด ๋ณด๋ผ
]
drawDK (cfg2graphDK cfgN1)
-- DK-test๋ฅผ ๋ง์กฑํ์ง ์๋ ์
cfgG64 = CFG(["S","E","T"],["a","+","ร","โฃ"],rs,"S")
where
rs = [ "S" :-> ["E","โฃ"]
, "E" :-> ["E","+","T"]
, "E" :-> ["T"]
, "T" :-> ["T","ร","a"]
, "T" :-> ["a"]
]
-- +
cfgG64DK = cfg2graphDK cfgG64
drawDK cfgG64DK
-- +
nullables :: CFG -> [Gamma]
nullables (CFG(_,as,rs,_)) = fix0s ns0
where
rs0 = [r | r@(v:->gs) <- rs, all (`notElem` as) gs]
ns0 = [v | v:->[] <- rs0]
fix0s ns | ns == ns' = ns
| otherwise = fix0s ns'
where
ns' = ns `union` [v | v:->gs <- rs0, all (`elem` ns) gs]
first :: CFG -> [Gamma] -> [Gamma]
first cfg@(CFG(vs,as,rs,_)) = f1 []
where
ns = nullables cfg
f1 ys [] = []
f1 ys (x:xs)
| x `elem` as = [x]
| x `elem` ys = if x `elem` ns then f1' xs else []
| x `elem` ns = foldr union (f1' xs) [f1' gs | v:->gs <- rs, v==x]
| otherwise = foldr union [] [f1' gs | v:->gs <- rs, v==x]
where
f1' = f1 (ys `union` [x])
-- -
nullables cfgN1
first cfgN1 ["S"]
first cfgN1 ["A"]
first cfgN1 ["B"]
nullables cfgG55
first cfgG55 ["S"]
first cfgG55 ["T"]
nullables cfgG64
first cfgG64 ["S"]
first cfgG64 ["E"]
first cfgG64 ["T"]
first cfgG64 ["+","T"]
-- +
type DottedRule1 = (DottedRule,Gamma)
type DottedRule1' = (DottedRule,[Gamma])
type NodeDK1 = [DottedRule1]
type EdgeDK1 = (NodeDK1,Gamma,NodeDK1)
type GraphDK1 = ([NodeDK1],[EdgeDK1])
dottedClosure1 :: CFG -> [DottedRule1] -> [DottedRule1]
dottedClosure1 cfg@(CFG(_,_,rs,_)) drs
| drs==drs' = drs
| otherwise = dottedClosure1 cfg drs'
where
drs' = drs `union`
[((v',([],gs')),x') | ((v,(_,v2:gs2)),x) <- drs,
v':->gs' <-rs, v2 == v',
x' <- first cfg gs2 `union` [x | nullable gs2] ]
nullable = all (`elem` ns)
ns = nullables cfg
showDottedRule1 :: DottedRule1 -> String
showDottedRule1 (r,x) = showDottedRule r++" "++x
showDottedRule1' :: DottedRule1' -> String
showDottedRule1' (r,xs) = showDottedRule r++" "++concat xs
stepNode1 :: CFG -> NodeDK1 -> Gamma -> NodeDK1
stepNode1 cfg drs g = dottedClosure1 cfg
[ ((v,(gs1++[g'],gs2)),x) | ((v,(gs1,g':gs2)),x) <- drs, g==g']
-- ๋งจ ์์ start๋ฅผ ํ์ํ ๋๋ฏธ ์ํ๋ฅผ []๋ก ์ฒ๋ฆฌ
cfg2graphDK1 :: CFG -> GraphDK1
cfg2graphDK1 cfg@(CFG(_,as,rs,s0)) = mkGraphDK1 cfg ([[],n0],[([],"",n0)])
where
n0 = dottedClosure1 cfg [((v,([],gs)),x) | v:->gs <- rs, v==s0, x<-as]
src (n,_,_) = n
dst (_,_,n) = n
mkGraphDK1 :: CFG -> GraphDK1 -> GraphDK1
mkGraphDK1 cfg@(CFG(vs,as,_,_)) (ns,es)
| (ns,es) == (ns',es') = (ns,es)
| otherwise = mkGraphDK1 cfg (ns',es')
where
es' = es `union` esNew
ns' = ns `union` nsNew
out0ns = ns \\ map src es
esNew = [(n,g,n') | n <- out0ns, g<-vs++as,
let n' = stepNode1 cfg n g,
not(null n')]
nsNew = map dst esNew
-- +
import Data.List (intercalate, elemIndices)
import IHaskell.Display.Graphviz
-- dot "digraph { l -> o; o -> v; v -> e; h -> a ; a -> s; s -> k ; k -> e ; e -> l ; l -> l}"
drawDK1 = dot . cfg2DK1graphviz
cfg2DK1graphviz :: GraphDK1 -> String
cfg2DK1graphviz ([]:ns,([],"",n0):es)
= "digraph { rankdir=LR; node [shape=box style=rounded]; "
++ "start [shape=none]; start -> n0; "
++ intercalate "; " [ nodeName n++" ["++attrDK1node n++"]" | n <- ns ]
++ intercalate "; " [ nodeName n1++" -> "++nodeName n2
++ " [label=\""++g++"\"]" | (n1,g,n2) <- es ]
++ "}"
where
nodeName n = "n"++show i where [i] = elemIndices n ns
attrDK1node :: NodeDK1 -> String
attrDK1node drs
= "peripheries="++(if reducible then "2" else "1")++" "
++ "label=\""++concat [showDottedRule1' r++"\\l" | r<-drs']++"\""
where
reducible = any null [ gs2 | ((_,(gs1,gs2)),_)<-drs]
drs' = [(r,xs) | (r:_,xs) <- map unzip . groupBy (\(r1,_) (r2,_) -> r1==r2) $ sort drs]
-- -
length $ dottedClosure cfgG64 [("S",([],["E","โฃ"]))]
length $ dottedClosure1 cfgG64 [(("S",([],["E","โฃ"])),x)|x<-["a","+","ร","โฃ"]]
mapM_ (putStrLn . showDottedRule) $ dottedClosure cfgG64 [("S",([],["E","โฃ"]))]
putStrLn "---------"
mapM_ (putStrLn . showDottedRule1) $ dottedClosure1 cfgG64 [(("S",([],["E","โฃ"])),x)|x<-["a","+","ร","โฃ"]]
drawDK (cfg2graphDK cfgG64)
drawDK1 (cfg2graphDK1 cfgG64)
drawDK (cfg2graphDK cfgG55)
drawDK1 (cfg2graphDK1 cfgG55)
-- +
-- find the forced handle of the valid string prefix xh by the complteted rule v:->h.
forceDK :: NodeDK -> [Gamma] -> ([Gamma], Rule)
forceDK node xh = (x, v:->h)
where
(x, v:->h) = head [(x,v:->gs) | (v,(gs,[]))<-node, (x,h) <- zip (inits xh) (tails xh), gs==h]
type DKconfig = ([NodeDK],[Gamma])
type DKinput = [Gamma]
reduceDK :: GraphDK -> DKconfig -> DKconfig
reduceDK dk@(_, es) (stack@(node:_), xh) = shiftDK dk (stack', x) v
where
(x, v:->h) = forceDK node xh
stack' = drop (length h) stack
-- append a symbol to the end of the valid string
shiftDK :: GraphDK -> DKconfig -> Gamma -> DKconfig
shiftDK dk@(_, es) (stack@(node:_), x) a = (n':stack, x++[a])
where
n' = head [n' | (n,a',n')<-es, node==n, a'==a]
stepDK :: GraphDK -> (DKconfig,DKinput) -> (DKconfig,DKinput)
stepDK dk ((stack@(node:_), x), as)
| accepting = error "already in accept state"
| reducible = ( reduceDK dk (stack,x), as )
| otherwise = ( shiftDK dk (stack,x) (head as), tail as )
where
accepting = acceptingDK node
reducible = reducibleDK node
runDK :: GraphDK -> (DKconfig,DKinput) -> [(DKconfig,DKinput)]
runDK dk (config,as) = ps++[p]
where (ps,p:_) = break (\((node:_,_),_) -> acceptingDK node)
$ iterate (stepDK dk) (config,as)
acceptingDK node = any (("โฃ"==) . last) [ gs1 | (_,(gs1@(_:_),[]))<-node]
reducibleDK node = any null [ gs2 | (_,(gs1,gs2))<-node]
printDKconfig (stack,x) = do
putStrLn $ "valid: " ++ concat x
putStrLn "--------"
putStr . intercalate "\n--------\n"
$ map (intercalate "\n" . map showDottedRule) stack
-- +
dk55 = cfg2graphDK cfgG55
(_:ns55,_:es55) = dk55
ns55start = head ns55
-- -
mapM_ (putStrLn . showDottedRule) ns55start
-- +
-- ["โฃ"] ์ฒ๋ฆฌ๊ณผ์
putStrLn "========"
(stack0, x0) = ([ns55start], [])
printDKconfig (stack0,x0)
putStrLn "========"
(stack1, x1) = reduceDK dk55 (stack0, x0)
printDKconfig (stack1,x1)
putStrLn "========"
(stack2, x2) = shiftDK dk55 (stack1,x1) "โฃ"
printDKconfig (stack2,x2)
putStrLn "========"
-- +
-- -- ["(",")","(",")","โฃ"] ์ฒ๋ฆฌ๊ณผ์
putStrLn "========"
(config0, as0) = ( ([ns55start], []), ["(",")","(",")","โฃ"] )
putStrLn $ "input: " ++ concat as0
printDKconfig config0
putStrLn "========"
(config1, as1) = stepDK dk55 (config0, as0)
putStrLn $ "input: " ++ concat as1
printDKconfig config1
putStrLn "========"
(config2, as2) = stepDK dk55 (config1, as1)
putStrLn $ "input: " ++ concat as2
printDKconfig config2
putStrLn "========"
(config3, as3) = stepDK dk55 (config2, as2)
putStrLn $ "input: " ++ concat as3
printDKconfig config3
putStrLn "========"
(config4, as4) = stepDK dk55 (config3, as3)
putStrLn $ "input: " ++ concat as4
printDKconfig config4
putStrLn "========"
(config5, as5) = stepDK dk55 (config4, as4)
putStrLn $ "input: " ++ concat as5
printDKconfig config5
putStrLn "========"
(config6, as6) = stepDK dk55 (config5, as5)
putStrLn $ "input: " ++ concat as6
printDKconfig config6
putStrLn "========"
(config7, as7) = stepDK dk55 (config6, as6)
putStrLn $ "input: " ++ concat as7
printDKconfig config7
putStrLn "========"
(config8, as8) = stepDK dk55 (config7, as7)
putStrLn $ "input: " ++ concat as8
printDKconfig config8
putStrLn "========"
(config9, as9) = stepDK dk55 (config8, as8)
putStrLn $ "input: " ++ concat as9
printDKconfig config9
putStrLn "========"
(config10, as10) = stepDK dk55 (config9, as9)
putStrLn $ "input: " ++ concat as10
printDKconfig config10
putStrLn "========"
-- +
config0 = ([ns55start], [])
mapM_ (\(config,as) -> do putStrLn $ "input: " ++ concat as
printDKconfig config
putStrLn "\n========" )
$ runDK dk55 (config0,["โฃ"])
-- +
config0 = ([ns55start], [])
mapM_ (\(config,as) -> do putStrLn $ "input: " ++ concat as
printDKconfig config
putStrLn "\n========" )
$ runDK dk55 (config0,["(",")","(",")","โฃ"])
-- +
import Data.Tree
forceDK' :: NodeDK -> Forest Gamma -> (Forest Gamma, Tree Gamma)
forceDK' node xh = head [(x, Node v h) | (v,(gs,[]))<-node, (x,h) <- zip (inits xh) (tails xh),
gs == map rootLabel h]
type DKconfig' = ([NodeDK],Forest Gamma)
reduceDK' :: GraphDK -> DKconfig' -> DKconfig'
reduceDK' dk@(_, es) (stack@(node:_), xh) = shiftDK' dk (stack', x) t
where
(x, t@(Node _ h)) = forceDK' node xh
stack' = drop (length h) stack
-- append a symbol to the end of the valid string
shiftDK' :: GraphDK -> DKconfig' -> Tree Gamma -> DKconfig'
shiftDK' dk@(_, es) (stack@(node:_), x) a = (n':stack, x++[a])
where
n' = head [n' | (n,a',n')<-es, node==n, a'==rootLabel a]
stepDK' :: GraphDK -> (DKconfig',DKinput) -> (DKconfig',DKinput)
stepDK' dk ((stack@(node:_), x), as)
| accepting = error "already in accept state"
| reducible = ( reduceDK' dk (stack,x), as )
| otherwise = ( shiftDK' dk (stack,x) (Node (head as) []), tail as )
where
accepting = acceptingDK node
reducible = reducibleDK node
parseDK :: GraphDK -> (DKconfig',DKinput) -> (DKconfig',DKinput)
parseDK dk (config,as) = p
where (_,p:_) = break (\((node:_,_),_) -> acceptingDK node)
$ iterate (stepDK' dk) (config,as)
-- +
config0 = ([ns55start], [])
((node:_,forest),[]) = parseDK dk55 (config0,["โฃ"])
acceptingDK node
putStrLn "======"
putStr $ drawForest forest
-- +
config0 = ([ns55start], [])
((node:_,forest),[]) = parseDK dk55 (config0,["(",")","(",")","โฃ"])
acceptingDK node
putStrLn "======"
putStr $ drawForest forest
-- -
config0 = ([ns55start], [])
-- ํ๋จ๊ณ์ฉ stepDK'๋ฅผ ํด๋ณด๋ฉด ํธ๋ฆฌ๊ฐ ๋ง๋ค์ด์ง๋ ๊ณผ์ ์ด ๋ณด์ธ๋ค
putStr . drawForest . snd . fst
$ stepDK' dk55 (config0,["(",")","(",")","โฃ"])
putStrLn "======"
putStr . drawForest . snd . fst
. stepDK' dk55
$ stepDK' dk55 (config0,["(",")","(",")","โฃ"])
putStrLn "======"
putStr . drawForest . snd . fst
. stepDK' dk55 . stepDK' dk55
$ stepDK' dk55 (config0,["(",")","(",")","โฃ"])
putStrLn "======"
putStr . drawForest . snd . fst
. stepDK' dk55 . stepDK' dk55 . stepDK' dk55
$ stepDK' dk55 (config0,["(",")","(",")","โฃ"])
putStrLn "======"
putStr . drawForest . snd . fst
. stepDK' dk55 . stepDK' dk55 . stepDK' dk55
. stepDK' dk55
$ stepDK' dk55 (config0,["(",")","(",")","โฃ"])
putStrLn "======"
putStr . drawForest . snd . fst
. stepDK' dk55 . stepDK' dk55 . stepDK' dk55
. stepDK' dk55 . stepDK' dk55
$ stepDK' dk55 (config0,["(",")","(",")","โฃ"])
putStrLn "======"
putStr . drawForest . snd . fst
. stepDK' dk55 . stepDK' dk55 . stepDK' dk55
. stepDK' dk55 . stepDK' dk55 . stepDK' dk55
$ stepDK' dk55 (config0,["(",")","(",")","โฃ"])
putStrLn "======"
putStr . drawForest . snd . fst
. stepDK' dk55 . stepDK' dk55 . stepDK' dk55
. stepDK' dk55 . stepDK' dk55 . stepDK' dk55
. stepDK' dk55
$ stepDK' dk55 (config0,["(",")","(",")","โฃ"])
putStrLn "======"
putStr . drawForest . snd . fst
. stepDK' dk55 . stepDK' dk55 . stepDK' dk55
. stepDK' dk55 . stepDK' dk55 . stepDK' dk55
. stepDK' dk55 . stepDK' dk55
$ stepDK' dk55 (config0,["(",")","(",")","โฃ"])
putStrLn "======"
putStr . drawForest . snd . fst
. stepDK' dk55 . stepDK' dk55 . stepDK' dk55
. stepDK' dk55 . stepDK' dk55 . stepDK' dk55
. stepDK' dk55 . stepDK' dk55 . stepDK' dk55
$ stepDK' dk55 (config0,["(",")","(",")","โฃ"])
putStrLn "======"
putStr . drawForest . snd . fst
. stepDK' dk55 . stepDK' dk55 . stepDK' dk55
. stepDK' dk55 . stepDK' dk55 . stepDK' dk55
. stepDK' dk55 . stepDK' dk55 . stepDK' dk55
$ stepDK' dk55 (config0,["(",")","(",")","โฃ"])
-- ----
--
-- DK1 ์ ๋ํด์๋ ๋น์ทํ๊ฒ ๋ง๋ค์ด ๋ณผ ์ ์๊ฒ ์ง๋ง ์๊ฐ์ด ๋ง์ง ์์ผ๋ฏ๋ก ์๋ต
| 02 Context-Free Languages.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: geostats_env
# language: python
# name: geostats_env
# ---
# ## Multicollinearity and Regression Analysis
# In this tutorial, we will be using a spatial dataset of county-level election and demographic statistics for the United States. This time, we'll explore different methods to diagnose and account for multicollinearity in our data. Specifically, we'll calculate variance inflation factor (VIF), and compare parameter estimates and model fit in a multivariate regression predicting 2016 county voting preferences using an OLS model, a ridge regression, a lasso regression, and an elastic net regression.
#
# Objectives:
# * ***Calculate a variance inflation factor to diagnose multicollinearity.***
# * ***Use geographicall weighted regression to identify if the multicollinearity is scale dependent.***
# * ***Interpret model summary statistics.***
# * ***Describe how multicollinearity impacts stability in parameter esimates.***
# * ***Explain the variance/bias tradeoff and describe how to use it to improve models***
# * ***Draw a conclusion based on contrasting models.***
#
# Review:
# * [<NAME>. (2013). Collinearity: a review of methods to deal with it and a simulation study evaluating their performance. Ecography, 36(1), 27-46.](https://onlinelibrary.wiley.com/doi/full/10.1111/j.1600-0587.2012.07348.x)
#
import numpy as np
import geopandas as gpd
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import statsmodels.api as sm
from statsmodels.stats.outliers_influence import variance_inflation_factor
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import RepeatedKFold
from sklearn.linear_model import Ridge
from sklearn.linear_model import Lasso
from sklearn.linear_model import ElasticNet
from numpy import mean
from numpy import std
from numpy import absolute
from libpysal.weights.contiguity import Queen
import libpysal
from statsmodels.api import OLS
sns.set_style('white')
# First, we're going to load the 'Elections' dataset from the libpysal library, which is a very easy to use API that accesses the Geodata Center at the University of Chicago.
#
# * More on spatial data science resources from UC: https://spatial.uchicago.edu/
# * A list of datasets available through lipysal: https://geodacenter.github.io/data-and-lab//
from libpysal.examples import load_example
elections = load_example('Elections')
#note the folder where your data now lives:
#First, let's see what files are available in the 'Elections' data example
elections.get_file_list()
# When you are out in the world doing research, you often will not find a ready-made function to download your data. That's okay! You know how to get this dataset without using pysal! Do a quick internal review of online data formats and automatic data downloads.
#
# ### TASK 1: Use urllib functions to download this file directly from the internet to you H:/EnvDatSci folder (not your git repository). Extract the zipped file you've downloaded into a subfolder called H:/EnvDatSci/elections.
# +
# Task 1 code here:
#import required function:
#define online filepath (aka url):
#define local filepath:
#download elections data:
#unzip file: see if google can help you figure this one out!
# -
# ### TASK 2: Use geopandas to read in this shapefile. Call your geopandas.DataFrame "votes"
# TASK 2: Use geopandas to read in this shapefile. Call your geopandas.DataFrame "votes"
# ### EXTRA CREDIT TASK (+2pts): use os to delete the elections data downloaded by pysal in your C: drive that you are no longer using.
# Extra credit task:
#Let's view the shapefile to get a general idea of the geometry we're looking at:
# %matplotlib inline
votes.plot()
#View the first few line]s of the dataset
votes.head()
#Since there are too many columns for us to view on a signle page using "head", we can just print out the column names so we have them all listed for reference
for col in votes.columns:
print(col)
# #### You can use pandas summary statistics to get an idea of how county-level data varies across the United States.
# ### TASK 3: For example, how did the county mean percent Democratic vote change between 2012 (pct_dem_12) and 2016 (pct_dem_16)?
#
# Look here for more info on pandas summary statistics:https://www.earthdatascience.org/courses/intro-to-earth-data-science/scientific-data-structures-python/pandas-dataframes/run-calculations-summary-statistics-pandas-dataframes/
#Task 3
# We can also plot histograms of the data. Below, smoothed histograms from the seaborn package (imported as sns) let us get an idea of the distribution of percent democratic votes in 2012 (left) and 2016 (right).
# Plot histograms:
f,ax = plt.subplots(1,2, figsize=(2*3*1.6, 2))
for i,col in enumerate(['pct_dem_12','pct_dem_16']):
sns.kdeplot(votes[col].values, shade=True, color='slategrey', ax=ax[i])
ax[i].set_title(col.split('_')[1])
# Plot spatial distribution of # dem vote in 2012 and 2016 with histogram.
f,ax = plt.subplots(2,2, figsize=(1.6*6 + 1,2.4*3), gridspec_kw=dict(width_ratios=(6,1)))
for i,col in enumerate(['pct_dem_12','pct_dem_16']):
votes.plot(col, linewidth=.05, cmap='RdBu', ax=ax[i,0])
ax[i,0].set_title(['2012','2016'][i] + "% democratic vote")
ax[i,0].set_xticklabels('')
ax[i,0].set_yticklabels('')
sns.kdeplot(votes[col].values, ax=ax[i,1], vertical=True, shade=True, color='slategrey')
ax[i,1].set_xticklabels('')
ax[i,1].set_ylim(-1,1)
f.tight_layout()
plt.show()
# ### TASK 4: Make a new column on your geopandas dataframe called "pct_dem_change" and plot it using the syntax above. Explain the plot.
# Task 4: add new column pct_dem_change to votes:
#Task 4: plot your pct_dem_change variable on a map:
# Click on this url to learn more about the variables in this dataset: https://geodacenter.github.io/data-and-lab//county_election_2012_2016-variables/
# As you can see, there are a lot of data values available in this dataset. Let's say we want to learn more about what county-level factors influence percent change in democratic vote between (pct_dem_change).
#
# Looking at the data description on the link above, you see that this is an exceptionally large dataset with many variables. During lecture, we discussed how there are two types of multicollinearity in our data:
#
# * *Intrinsic multicollinearity:* is an artifact of how we make observations. Often our measurements serve as proxies for some latent process (for example, we can measure percent silt, percent sand, and percent clay as proxies for the latent variable of soil texture). There will be slight variability in the information content between each proxy measurement, but they will not be independent of one another.
#
# * *Incidental collinearity:* is an artifact of how we sample complex populations. If we collect data from a subsample of the landscape where we don't see all combinations of our predictor variables (do not have good cross replication across our variables). We often induce collinearity in our data just because we are limitted in our ability to sample the environment at the scale of temporal/spatial variability of our process of interest. Incidental collinearity is a model formulation problem.(See here for more info on how to avoid it: https://people.umass.edu/sdestef/NRC%20601/StudyDesignConcepts.pdf)
# ### TASK 5: Looking at the data description, pick two variables that you believe will be intrinsically multicollinear. List and describe these variables. Why do you think they will be collinear? Is this an example of *intrinsic* or *incidental* collinearity?
#
# *Click on this box to enter text*
# I chose:
# * "RHI125214", #White alone, percent, 2014
# * "RHI225214", #Black or African American alone, percent, 2014
# These variables are intrinsically multicollinear. A decrease in one of a finite number of races implicitly signifies an increase in another race.
# ## Multivariate regression in observational data:
# Our next step is to formulate our predictive/diagnostic model. We want to create a subset of the "votes" geopandas data frame that contains ten predictor variables and our response variable (pct_pt_16) two variables you selected under TASK 1. First, create a list of the variables you'd like to select.
#
# ### TASK 6: Create a subset of votes called "my_list" containing only your selected predictor variables. Make sure you use the two variables selected under TASK 3, and eight additional variables
# +
# Task 4: create a subset of votes called "my list" with all your subset variables.
#my_list = ["pct_pt_16", <list your variables here>]
# -
#check to make sure all your columns are there:
votes[my_list].head()
# ### Scatterplot matrix
# We call the process of getting to know your data (ranges and distributions of the data, as well as any relationships between variables) "exploratory data analysis". Pairwise plots of your variables, called scatterplots, can provide a lot of insight into the type of relationships you have between variables. A scatterplot matrix is a pairwise comparison of all variables in your dataset.
#Use seaborn.pairplot to plot a scatterplot matrix of you 10 variable subset:
sns.pairplot(votes[my_list])
# ### TASK 7: Do you observe any collinearity in this dataset? How would you describe the relationship between your two "incidentally collinear" variables that you selected based on looking at variable descriptions?
#
# *Type answer here*
#
#
# ### TASK 8: What is plotted on the diagonal panels of the scatterplot matrix?
#
# *Type answer here*
#
# ## Diagnosing collinearity globally:
# During class, we discussed the Variance Inflation Factor, which describes the magnitude of variance inflation that can be expected in an OLS parameter estimate for a given variable *given pairwise collinearity between that variable and another variable*.
#VIF = 1/(1-R2) of a pairwise OLS regression between two predictor variables
#We can use a built-in function "variance_inflation_factor" from statsmodel.api to calculate VIF
#Learn more about the function
# ?variance_inflation_factor
#Calculate VIFs on our dataset
vif = pd.DataFrame()
vif["VIF Factor"] = [variance_inflation_factor(votes[my_list[1:10]].values, i) for i in range(votes[my_list[1:10]].shape[1])]
vif["features"] = votes[my_list[1:10]].columns
vif.round()
# ### Collinearity is always present in observational data. When is it a problem?
# Generally speaking, VIF > 10 are considered "too much" collinearity. But this value is somewhat arbitrary: the extent to which variance inflation will impact your analysis is highly context dependent. There are two primary contexts where variance inflation is problematic:
#
# 1\. **You are using your analysis to evaluate variable importance:** If you are using parameter estimates from your model to diagnose which observations have physically important relationships with your response variable, variance inflation can make an important predictor look unimportant, and parameter estimates will be highly leveraged by small changes in the data.
#
# 2\. **You want to use your model to make predictions in a situation where the specific structure of collinearity between variables may have shifted:** When training a model on collinear data, the model only applies to data with that exact structure of collinearity.
# ### Caluculate a linear regression on the global data:
# In this next step, we're going to calculate a linear regression in our data an determine whether there is a statistically significant relationship between per capita income and percent change in democratic vote.
# +
#first, forumalate the model. See weather_trend.py in "Git_101" for a refresher on how.
#extract variable that you want to use to "predict"
X = np.array(votes[my_list[1:10]].values)
#standardize data to assist in interpretation of coefficients
X = (X - np.mean(X, axis=0)) / np.std(X, axis=0)
#extract variable that we want to "predict"
Y = np.array(votes['pct_dem_change'].values)
#standardize data to assist in interpretation of coefficients
Y = (Y - np.mean(X)) / np.std(Y)
lm = OLS(Y,X)
lm_results = OLS(Y,X).fit().summary()
# -
print(lm_results)
# ### TASK 9: Answer: which coefficients indicate a statisticall significant relationship between parameter and pct_dem_change? What is your most important predictor variable? How can you tell?
#
# *Type answer here*
#
# ### TASK10: Are any of these parameters subject to variance inflation? How can you tell?
#
# *Type answer here*
#
# Now, let's plot our residuals to see if there are any spatial patterns in them.
#
# Remember residuals = predicted - fitted values
#Add model residuals to our "votes" geopandas dataframe:
votes['lm_resid']=OLS(Y,X).fit().resid
sns.kdeplot(votes['lm_resid'].values, shade=True, color='slategrey')
# ### TASK 11: Are our residuals normally distributed with a mean of zero? What does that mean?
#
# *Type answer here*
#
# ## Penalized regression: ridge penalty
# In penalized regression, we intentionally bias the parameter estimates to stabilize them given collinearity in the dataset.
#
# From https://www.analyticsvidhya.com/blog/2016/01/ridge-lasso-regression-python-complete-tutorial/
# "As mentioned before, ridge regression performs โL2 regularizationโ, i.e. it adds a factor of sum of squares of coefficients in the optimization objective. Thus, ridge regression optimizes the following:
#
# **Objective = RSS + ฮฑ * (sum of square of coefficients)**
#
# Here, ฮฑ (alpha) is the parameter which balances the amount of emphasis given to minimizing RSS vs minimizing sum of square of coefficients. ฮฑ can take various values:
#
# * **ฮฑ = 0:** The objective becomes same as simple linear regression. Weโll get the same coefficients as simple linear regression.
#
# * **ฮฑ = โ:** The coefficients will approach zero. Why? Because of infinite weightage on square of coefficients, anything less than zero will make the objective infinite.
#
# * **0 < ฮฑ < โ:** The magnitude of ฮฑ will decide the weightage given to different parts of objective. The coefficients will be somewhere between 0 and ones for simple linear regression."
#
# In other words, the ridge penalty shrinks coefficients such that collinear coefficients will have more similar coefficient values. It has a "grouping" tendency.
# when L2=0, Ridge equals OLS
model = Ridge(alpha=1)
# define model evaluation method
cv = RepeatedKFold(n_splits=10, n_repeats=3, random_state=1)
# evaluate model
scores = cross_val_score(model, X, Y, scoring='neg_mean_absolute_error', cv=cv, n_jobs=-1)
#force scores to be positive
scores = absolute(scores)
print('Mean MAE: %.3f (%.3f)' % (mean(scores), std(scores)))
model.fit(X,Y)
#Print out the model coefficients
print(model.coef_)
# ## Penalized regression: lasso penalty
#
# From https://www.analyticsvidhya.com/blog/2016/01/ridge-lasso-regression-python-complete-tutorial/
# "LASSO stands for Least Absolute Shrinkage and Selection Operator. I know it doesnโt give much of an idea but there are 2 key words here โ โabsoluteโ and โselectionโ.
#
# Lets consider the former first and worry about the latter later.
#
# Lasso regression performs L1 regularization, i.e. it adds a factor of sum of absolute value of coefficients in the optimization objective. Thus, lasso regression optimizes the following:
#
# **Objective = RSS + ฮฑ * (sum of absolute value of coefficients)**
# Here, ฮฑ (alpha) works similar to that of ridge and provides a trade-off between balancing RSS and magnitude of coefficients. Like that of ridge, ฮฑ can take various values. Lets iterate it here briefly:
#
# * **ฮฑ = 0:** Same coefficients as simple linear regression
# * **ฮฑ = โ:** All coefficients zero (same logic as before)
# * **0 < ฮฑ < โ:** coefficients between 0 and that of simple linear regression
#
# Yes its appearing to be very similar to Ridge till now. But just hang on with me and youโll know the difference by the time we finish."
#
# In other words, the lasso penalty shrinks unimportant coefficients down towards zero, automatically "selecting" important predictor variables. But what if that shrunken coefficient is induced by incidental collinearity (i.e. is a feature of how we sampled our data)?
# when L1=0, Lasso equals OLS
model = Lasso(alpha=0)
# define model evaluation method
cv = RepeatedKFold(n_splits=10, n_repeats=3, random_state=1)
# evaluate model
scores = cross_val_score(model, X, Y, scoring='neg_mean_absolute_error', cv=cv, n_jobs=-1)
#force scores to be positive
scores = absolute(scores)
print('Mean MAE: %.3f (%.3f)' % (mean(scores), std(scores)))
model.fit(X,Y)
#Print out the model coefficients
print(model.coef_)
#How do these compare with OLS coefficients above?
# when L1 approaches infinity, certain coefficients will become exactly zero, and MAE equals the variance of our response variable:
model = Lasso(alpha=10000000)
# define model evaluation method
cv = RepeatedKFold(n_splits=10, n_repeats=3, random_state=1)
# evaluate model
scores = cross_val_score(model, X, Y, scoring='neg_mean_absolute_error', cv=cv, n_jobs=-1)
#force scores to be positive
scores = absolute(scores)
print('Mean MAE: %.3f (%.3f)' % (mean(scores), std(scores)))
model.fit(X,Y)
#Print out the model coefficients
print(model.coef_)
#How do these compare with OLS coefficients above?
# ### Penalized regression: elastic net penalty
#
# In other words, the lasso penalty shrinks unimportant coefficients down towards zero, automatically "selecting" important predictor variables. The ridge penalty shrinks coefficients of collinear predictor variables nearer to each other, effectively partitioning the magnitude of response from the response variable between them, instead of "arbitrarily" partitioning it to one group.
#
# We can also run a regression with a linear combination of ridge and lasso, called the elastic net, that has a cool property called "group selection."
#
# The ridge penalty still works to distribute response variance equally between members of "groups" of collinear predictor variables. The lasso penalty still works to shrink certain coefficients to exactly zero so they can be ignored in model formulation. The elastic net produces models that are both sparse and stable under collinearity, by shrinking parameters of members of unimportant collinear predictor variables to exactly zero:
# when L1 approaches infinity, certain coefficients will become exactly zero, and MAE equals the variance of our response variable:
model = ElasticNet(alpha=1, l1_ratio=0.2)
# define model evaluation method
cv = RepeatedKFold(n_splits=10, n_repeats=3, random_state=1)
# evaluate model
scores = cross_val_score(model, X, Y, scoring='neg_mean_absolute_error', cv=cv, n_jobs=-1)
#force scores to be positive
scores = absolute(scores)
print('Mean MAE: %.3f (%.3f)' % (mean(scores), std(scores)))
model.fit(X,Y)
#Print out the model coefficients
print(model.coef_)
#How do these compare with OLS coefficients above?
# ### TASK 11: Match these elastic net coefficients up with your original data. Do you see a logical grouping(s) between variables that have non-zero coefficients?Explain why or why not.
# *Type answer here*
# +
# Task 11 scratch cell:
| CodeSprints/multicollinearity_methods.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 1-7.2 Intro Python Practice
# ## `while()` loops & increments
# <font size="5" color="#00A0B2" face="verdana"> <B>Student will be able to</B></font>
# - create forever loops using `while` and `break`
# - use incrementing variables in a while loop
# - control while loops using Boolean operators
# +
# [ ] use a "forever" while loop to get user input of integers to add to sum,
# until a non-digit is entered, then break the loop and print sum
sum = 0
num1 = "0"
while True:
num1 = input("Enter your no.: ")
if num1.isdigit():
sum += int(num1)
else:
break
print("sum is:",sum)
# +
# [ ] use a while True loop (forever loop) to give 4 chances for input of a correct color in a rainbow
# rainbow = "red orange yellow green blue indigo violet"
rainbow = "red orange yellow green blue indigo violet"
loop = 0
while True:
rain_color = input("enter a color: ").lower()
if rain_color in rainbow:
print("rainbow color is found!")
break
else:
loop += 1
if loop < 4:
pass
else:
print("no more chances!")
break
# -
# [ ] Get input for a book title, keep looping while input is Not in title format (title is every word capitalized)
title = ""
while True:
title = input("enter a book title: ")
if title.istitle():
print("your book is:",title)
break
# [ ] create a math quiz question and ask for the solution until the input is correct
while True:
num = input("enter a number: ")
if num.isdigit():
int_num = int(num)
num_sqr = int_num * int_num
print("square of",num,"is",num_sqr)
break
# ### Fix the Error
# +
# [ ] review the code, run, fix the error
tickets = int(input("enter tickets remaining (0 to quit): "))
while tickets > 0:
if int(tickets/3) == tickets/3:
print("you win!")
else:
print("sorry, not a winner.")
tickets = int(input("enter tickets remaining (0 to quit): "))
print("Game ended")
# -
# ### create a function: quiz_item() that asks a question and tests if input is correct
# - quiz_item()has 2 parameter **strings**: question and solution
# - shows question, gets answer input
# - returns True if `answer == solution` or continues to ask question until correct answer is provided
# - use a while loop
#
# create 2 or more quiz questions that call quiz_item()
# **Hint**: provide multiple choice or T/F answers
# Create quiz_item() and 2 or more quiz questions that call quiz_item()
def quiz_item(question, solution):
while True:
answer = input(question).capitalize()
if answer == solution:
return True
quiz_item("what is the capital of Georgia = ?: ","Atlanta")
quiz_item("12 - 2 = 10 (T/F)?: ","T")
# [Terms of use](http://go.microsoft.com/fwlink/?LinkID=206977) [Privacy & cookies](https://go.microsoft.com/fwlink/?LinkId=521839) ยฉ 2017 Microsoft
| Python Absolute Beginner/Module_4_Practice_2_IntroPy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # SHFQA
# Just like the driver for the HDAWG in the previous example, we now use the `zhinst.qcodes.SHFQA` instrument driver.
# +
import numpy as np
import matplotlib.pyplot as plt
import qcodes as qc
import zhinst.qcodes as ziqc
shfqa = ziqc.SHFQA("shfqa", "dev12036", interface="1gbe", host="localhost")
# -
print([k for k in shfqa.submodules.keys()])
print([k for k in shfqa.parameters.keys()])
# ## Channel parameters of the SHFQA
#
# SHFQA has 2 or 4 channels, each of them can serve for a single readout line. Each QAchannel module can readout up to 8 or 16 quits.
print([k for k in shfqa.qachannels[0].parameters.keys()])
shfqa.qachannels[0].input_range(0)
shfqa.qachannels[0].output_range(-5)
shfqa.qachannels[0].center_freq(5e9)
shfqa.qachannels[0].input('on')
shfqa.qachannels[0].output('on')
# test what are correctly printed
print(shfqa.qachannels[0].input_range.__doc__)
# ## Spectroscopy mode of the SHFQA
# SHFQA has two application modes, **Spectroscopy** and **Readout**. Spectroscopy mode is generally used for resonator spectroscopy experiments, and Readout mode is used for qubit readout experiments with fixed readout frequencies.
#
# In Spectroscopy mode, offset frequency sweep is done by the **sweeper** module. Each frequency sweep is triggered by an internal or external trigger. External trigger is recommended. With **shfqa.set_trigger_loopback()** a marker output is connected to a trigger input internally without any physical connections.
print([k for k in shfqa.qachannels[0].sweeper.parameters.keys()])
# +
shfqa.qachannels[0].mode('spectroscopy')
sweeper0=shfqa.qachannels[0].sweeper
shfqa.set_trigger_loopback()
sweeper0.trigger_source("channel0_trigger_input0")
sweeper0.trigger_level(0)
sweeper0.trigger_imp50(1)
sweeper0.start_frequency(-200e-6)
sweeper0.stop_frequency(200e6)
sweeper0.num_points(51)
sweeper0.mapping("linear")
sweeper0.integration_time(100e-6)
sweeper0.num_averages(2)
sweeper0.averaging_mode("sequential")
# print(sweeper0.mapping.__doc__)
# -
sweeper0.run()
result=sweeper0.read()
# sweeper0.plot()
# ## Readout mode of the SHFQA
# In Readout mode, **generator** module is used to configure waveform playback, such as upload waveforms, construct and compile readout sequences. Integration parameters and result source are configured by **readout**. Please note that the data type of uploaded waveforms has to be **complex128**.
shfqa.qachannels[0].mode('readout')
generator0 = shfqa.qachannels[0].generator
readout0 = shfqa.qachannels[0].readout
print([k for k in shfqa.qachannels[0].generator.parameters.keys()])
print([k for k in shfqa.qachannels[0].readout.parameters.keys()])
# help(generator0)
# help(readout0)
# +
num_readouts = 100
pulse_duration = 100e-9
readout_freq = 200e6
sampling_rate = 2e9
pulse = 0.5*np.exp(2j*np.pi*readout_freq*np.linspace(0, pulse_duration, int(pulse_duration * sampling_rate)))
weight = np.conj(pulse)
shfqa.set_trigger_loopback()
generator0.dig_trigger1_source("chan0trigin0")
# Delay between receving the trigger and playing the readout pulses
generator0.playback_delay(0)
#Define the program
seqc_program ="""
repeat($param1$) {
waitDigTrigger(1);
startQA(QA_GEN_ALL, QA_INT_ALL, true, 0, 0x0);
}
"""
generator0.set_sequence_params(
sequence_type="Custom",
program = seqc_program ,
custom_params = [num_readouts],
)
# Upload readout pulse and integration weight
generator0.reset_queue()
generator0.queue_waveform(pulse)
generator0.compile()
generator0.upload_waveforms()
readout0.integrations[0].set_int_weights(weight)
# +
setup_delay = 200e-9
readout0.integration_length(len(weight))
readout0.integration_delay(setup_delay)
readout0.result_source("result_of_integration")
readout0.arm(length = num_readouts, averages = 1)
generator0.stop()
generator0.run()
result = readout0.read()
# -
# ## Scope monitor of the SHFQA
#
# The **scope** module of the SHFQA is used to monitor or record time traces of signals at IF frequency down converted by the SHFQA.
print([k for k in shfqa.scope.parameters.keys()])
# help(shfqa.scope)
scope=shfqa.scope
scope.channel1('on')
scope.input_select1
scope.trigger_source("channel0_trigger_input0")
scope.trigger_delay(200e-9)
scope.length(1024)
scope.averaging(1)
scope.segments(1)
scope.run()
result=scope.read()
# scope.stop()
# ## DIO of the SHFQA
# The **dios** of the SHFQA is used to communicate qubit readout results from SHFQA to qubit control instruments, such as SHFSG or HDAWG.
print([k for k in shfqa.dios[0].parameters.keys()])
| examples/example2-5_SHFQA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
# -
train_df = pd.read_csv("/home/Data/appa-real/interim/appa-real-release/gt_avg_train.csv")
train_df.head()
extend_df = pd.read_csv("/home/Data/appa-real/interim/appa-real-release/allcategories_train.csv")
extend_df.head()
test_df = pd.read_csv("/home/Data/appa-real/processed/train.csv")
print(test_df.count())
test_df.head()
test_df = pd.read_csv("/home/Data/appa-real/processed/valid.csv")
print(test_df.count())
test_df.head()
test_df = pd.read_csv("/home/Data/appa-real/processed/test.csv")
print(test_df.count())
test_df.head()
age = test_df["age"][0]
type(age)
gender = test_df["gender"][0]
type(gender)
# +
import numpy
gender = test_df.iloc[0]["gender"].astype(numpy.float32)
# -
gender
gender = test_df.iloc[0]["age"].astype(numpy.float32)
type(gender)
import torch.nn.
| notebooks/appa-real-EDA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: mlenv
# language: python
# name: mlenv
# ---
## Import dependencies
import numpy as np
import pandas as pd
from pathlib import Path
from getpass import getpass
from sqlalchemy import create_engine
import psycopg2
from sklearn.preprocessing import LabelEncoder
## Load the data
file_path = Path("Resources/DisneylandReviews.csv")
disney_raw_df = pd.read_csv(file_path)
# Inspect data
disney_raw_df
# Inspect counts
disney_raw_df.count()
# Inspect data types
disney_raw_df.dtypes
# Check length of reviews
disney_raw_df["Review_Text"].astype('str').str.split().str.len()
# Check first entry to confirm results
disney_raw_df["Review_Text"].loc[0]
disney_raw_df["Review_Text"].astype('str').str.len().loc[0]
# Add column for review lengths
disney_raw_df["Review_Words"] = disney_raw_df["Review_Text"].astype('str').str.split().str.len()
disney_raw_df["Review_Letters"] = disney_raw_df["Review_Text"].astype('str').str.len()
disney_raw_df.describe()
# Remove data with missing time values
disney_raw_df = disney_raw_df[disney_raw_df["Year_Month"]!='missing']
# Split year/month column into two columns
disney_raw_df[["Year", "Month"]] = disney_raw_df["Year_Month"].str.split(pat="-", expand = True)
disney_raw_df["Year_Month"].value_counts()
# Check for nulls
disney_raw_df.isna().sum()
# Check unique locations
locations = disney_raw_df["Reviewer_Location"].unique()
sorted(locations)
# Replace locations with missing characters
disney_raw_df["Reviewer_Location"] = disney_raw_df["Reviewer_Location"].replace(["Cura๏ฟฝao", "C๏ฟฝte d'Ivoire", "๏ฟฝland Islands"],["Curacao", "Cote d'Ivoire", "Aland Islands"])
# Check which disney parks were visited
disney_raw_df["Branch"].unique()
# Come up with function for determining if reviewer was a local or tourist (in broad terms)
def tourist(row):
if (row["Branch"]=="Disneyland_HongKong") & (row["Reviewer_Location"]=="Hong Kong"):
return 0
elif (row["Branch"]=="Disneyland_California") & (row["Reviewer_Location"]=="United States"):
return 0
elif (row["Branch"]=="Disneyland_Paris") & (row["Reviewer_Location"]=="France"):
return 0
else:
return 1
# Create tourism column: 1 is a reviewer from another country, 0 is a reviewer from the same country
disney_raw_df["Tourist"] = disney_raw_df.apply(tourist, axis=1)
# Check results
disney_raw_df[disney_raw_df["Tourist"]==0]
# Check counts of tourist vs local
disney_raw_df["Tourist"].value_counts()
# Change data types
disney_raw_df["Tourist"] = disney_raw_df["Tourist"].astype(int)
disney_raw_df["Month"] = disney_raw_df["Month"].astype(int)
disney_raw_df["Year"] = disney_raw_df["Year"].astype(int)
disney_raw_df["Year_Month"] = pd.to_datetime(disney_raw_df["Year_Month"])
disney_raw_df.dtypes
# Look at range of years
sorted(disney_raw_df["Year"].unique())
# Look for duplicate rows
disney_raw_df["Review_ID"].duplicated().sum()
# Drop duplicate rows
disney_raw_df = disney_raw_df.drop_duplicates(subset="Review_ID", keep="first")
# We may have to bin locations; check number/distribution of unique entries
disney_raw_df["Reviewer_Location"].value_counts()
# Create instance of labelencoder
labelencoder = LabelEncoder()
# Encode categorical data
disney_raw_df["Branch_Encoded"] = labelencoder.fit_transform(disney_raw_df["Branch"])
disney_raw_df["Location_Encoded"] = labelencoder.fit_transform(disney_raw_df["Reviewer_Location"])
# View encoded branches
disney_raw_df.groupby(["Branch_Encoded", "Branch"]).size()
disney_raw_df.groupby(["Tourist", "Branch"]).size()
# View encoded locations
disney_raw_df.groupby(["Location_Encoded", "Reviewer_Location"]).size()
disney_clean_df = disney_raw_df
# Reset index
disney_clean_df.reset_index(inplace=True, drop=True)
## Now we upload our dataframe to SQL
# Build the connection string
protocol = 'postgresql'
user = 'postgres'
location = 'localhost'
port = '5432'
db = 'disney_db'
password = getpass('Enter database password')
# Store string as variable
db_string = f'{protocol}://{user}:{password}@{location}:{port}/{db}'
# Create database engine
engine = create_engine(db_string)
# Send to database
disney_clean_df.to_sql(name='disneyland_reviews', con=engine, if_exists='replace')
# Export to csv
disney_clean_df.to_csv("Resources/disney_clean.csv", index=False)
| .ipynb_checkpoints/disney_review_ETL-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/apache/beam/blob/master/examples/notebooks/get-started/try-apache-beam-java.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="lNKIMlEDZ_Vw" colab_type="text"
# # Try Apache Beam - Java
#
# In this notebook, we set up a Java development environment and work through a simple example using the [DirectRunner](https://beam.apache.org/documentation/runners/direct/). You can explore other runners with the [Beam Capatibility Matrix](https://beam.apache.org/documentation/runners/capability-matrix/).
#
# To navigate through different sections, use the table of contents. From **View** drop-down list, select **Table of contents**.
#
# To run a code cell, you can click the **Run cell** button at the top left of the cell, or by select it and press **`Shift+Enter`**. Try modifying a code cell and re-running it to see what happens.
#
# To learn more about Colab, see [Welcome to Colaboratory!](https://colab.sandbox.google.com/notebooks/welcome.ipynb).
# + [markdown] id="Fz6KSQ13_3Rr" colab_type="text"
# # Setup
#
# First, you need to set up your environment.
# + id="GOOk81Jj_yUy" colab_type="code" outputId="68240031-2990-41fa-a327-38e15dc9fdf9" colab={"base_uri": "https://localhost:8080/", "height": 136}
# Run and print a shell command.
def run(cmd):
print('>> {}'.format(cmd))
# !{cmd} # This is magic to run 'cmd' in the shell.
print('')
# Copy the input file into the local filesystem.
run('mkdir -p data')
run('gsutil cp gs://dataflow-samples/shakespeare/kinglear.txt data/')
# + [markdown] id="Hmto8JTSWwUK" colab_type="text"
# ## Installing development tools
#
# Let's start by installing Java. We'll use the `default-jdk`, which uses [OpenJDK](https://openjdk.java.net/). This will take a while, so feel free to go for a walk or do some stretching.
#
# **Note:** Alternatively, you could install the propietary [Oracle JDK](https://www.oracle.com/technetwork/java/javase/downloads/index.html) instead.
# + id="ONYtX0doWpFz" colab_type="code" outputId="04bfa861-0bf8-4352-e878-0f24c6c7b61e" colab={"base_uri": "https://localhost:8080/", "height": 187}
# Update and upgrade the system before installing anything else.
run('apt-get update > /dev/null')
run('apt-get upgrade > /dev/null')
# Install the Java JDK.
run('apt-get install default-jdk > /dev/null')
# Check the Java version to see if everything is working well.
run('javac -version')
# + [markdown] id="Wab7H4IZW9xZ" colab_type="text"
# Now, let's install [Gradle](https://gradle.org/), which we'll need to automate the build and running processes for our application.
#
# **Note:** Alternatively, you could install and configure [Maven](https://maven.apache.org/) instead.
# + id="xS3Oeu3DW7vy" colab_type="code" outputId="1b2c1f11-5e35-4d22-8002-814ea61224c9" colab={"base_uri": "https://localhost:8080/", "height": 595}
import os
# Download the gradle source.
gradle_version = 'gradle-5.0'
gradle_path = f"/opt/{gradle_version}"
if not os.path.exists(gradle_path):
run(f"wget -q -nc -O gradle.zip https://services.gradle.org/distributions/{gradle_version}-bin.zip")
run('unzip -q -d /opt gradle.zip')
run('rm -f gradle.zip')
# We're choosing to use the absolute path instead of adding it to the $PATH environment variable.
def gradle(args):
run(f"{gradle_path}/bin/gradle --console=plain {args}")
gradle('-v')
# + [markdown] id="YTkkapX9KVhA" colab_type="text"
# ## build.gradle
#
# We'll also need a [`build.gradle`](https://guides.gradle.org/creating-new-gradle-builds/) file which will allow us to invoke some useful commands.
# + id="oUqfqWyMuIfR" colab_type="code" outputId="292a06b2-ce06-46b6-8598-480d83974bbb" colab={"base_uri": "https://localhost:8080/", "height": 34}
# %%writefile build.gradle
plugins {
// id 'idea' // Uncomment for IntelliJ IDE
// id 'eclipse' // Uncomment for Eclipse IDE
// Apply java plugin and make it a runnable application.
id 'java'
id 'application'
// 'shadow' allows us to embed all the dependencies into a fat jar.
id 'com.github.johnrengelman.shadow' version '4.0.3'
}
// This is the path of the main class, stored within ./src/main/java/
mainClassName = 'samples.quickstart.WordCount'
// Declare the sources from which to fetch dependencies.
repositories {
mavenCentral()
}
// Java version compatibility.
sourceCompatibility = 1.8
targetCompatibility = 1.8
// Use the latest Apache Beam major version 2.
// You can also lock into a minor version like '2.9.+'.
ext.apacheBeamVersion = '2.+'
// Declare the dependencies of the project.
dependencies {
shadow "org.apache.beam:beam-sdks-java-core:$apacheBeamVersion"
runtime "org.apache.beam:beam-runners-direct-java:$apacheBeamVersion"
runtime "org.slf4j:slf4j-api:1.+"
runtime "org.slf4j:slf4j-jdk14:1.+"
testCompile "junit:junit:4.+"
}
// Configure 'shadowJar' instead of 'jar' to set up the fat jar.
shadowJar {
baseName = 'WordCount' // Name of the fat jar file.
classifier = null // Set to null, otherwise 'shadow' appends a '-all' to the jar file name.
manifest {
attributes('Main-Class': mainClassName) // Specify where the main class resides.
}
}
# + [markdown] id="cwZcqmFgoLJ9" colab_type="text"
# ## Creating the directory structure
#
# Java and Gradle expect a specific [directory structure](https://docs.gradle.org/current/userguide/organizing_gradle_projects.html). This helps organize large projects into a standard structure.
#
# For now, we only need a place where our quickstart code will reside. That has to go within `./src/main/java/`.
# + id="Mr1KTQznbd9F" colab_type="code" outputId="2e4635b9-0577-4399-b8d6-078183ff9da2" colab={"base_uri": "https://localhost:8080/", "height": 51}
run('mkdir -p src/main/java/samples/quickstart')
# + [markdown] id="cPvvFB19uXNw" colab_type="text"
# # Minimal word count
#
# The following example is the "Hello, World!" of data processing, a basic implementation of word count. We're creating a simple data processing pipeline that reads a text file and counts the number of occurrences of every word.
#
# There are many scenarios where all the data does not fit in memory. Notice that the outputs of the pipeline go to the file system, which allows for large processing jobs in distributed environments.
# + [markdown] id="Fl3iUat7KYIE" colab_type="text"
# ## WordCount.java
# + id="5l3S2mjMBKhT" colab_type="code" outputId="6e55ec70-e727-44c9-a425-4afed97188fe" colab={"base_uri": "https://localhost:8080/", "height": 34}
# %%writefile src/main/java/samples/quickstart/WordCount.java
package samples.quickstart;
import org.apache.beam.sdk.Pipeline;
import org.apache.beam.sdk.io.TextIO;
import org.apache.beam.sdk.options.PipelineOptions;
import org.apache.beam.sdk.options.PipelineOptionsFactory;
import org.apache.beam.sdk.transforms.Count;
import org.apache.beam.sdk.transforms.Filter;
import org.apache.beam.sdk.transforms.FlatMapElements;
import org.apache.beam.sdk.transforms.MapElements;
import org.apache.beam.sdk.values.KV;
import org.apache.beam.sdk.values.TypeDescriptors;
import java.util.Arrays;
public class WordCount {
public static void main(String[] args) {
String inputsDir = "data/*";
String outputsPrefix = "outputs/part";
PipelineOptions options = PipelineOptionsFactory.fromArgs(args).create();
Pipeline pipeline = Pipeline.create(options);
pipeline
.apply("Read lines", TextIO.read().from(inputsDir))
.apply("Find words", FlatMapElements.into(TypeDescriptors.strings())
.via((String line) -> Arrays.asList(line.split("[^\\p{L}]+"))))
.apply("Filter empty words", Filter.by((String word) -> !word.isEmpty()))
.apply("Count words", Count.perElement())
.apply("Write results", MapElements.into(TypeDescriptors.strings())
.via((KV<String, Long> wordCount) ->
wordCount.getKey() + ": " + wordCount.getValue()))
.apply(TextIO.write().to(outputsPrefix));
pipeline.run();
}
}
# + [markdown] id="yoO4xHnaKiz9" colab_type="text"
# ## Build and run
# + [markdown] id="giJMbbcq2OPu" colab_type="text"
# Let's first check how the final file system structure looks like. These are all the files required to build and run our application.
#
# * `build.gradle` - build configuration for Gradle
# * `src/main/java/samples/quickstart/WordCount.java` - application source code
# * `data/kinglear.txt` - input data, this could be any file or files
#
# We are now ready to build the application using `gradle build`.
# + id="urmCmtG08F-0" colab_type="code" outputId="a2b65437-4244-4844-82d2-1789d5cfd7ca" colab={"base_uri": "https://localhost:8080/", "height": 510}
# Build the project.
gradle('build')
# Check the generated build files.
run('ls -lh build/libs/')
# + [markdown] id="LrRFNZHD8dtu" colab_type="text"
# There are two files generated:
# * The `content.jar` file, the application generated from the regular `build` command. It's only a few kilobytes in size.
# * The `WordCount.jar` file, with the `baseName` we specified in the `shadowJar` section of the `gradle.build` file. It's a several megabytes in size, with all the required libraries it needs to run embedded in it.
#
# The file we're actually interested in is the fat JAR file `WordCount.jar`. To run the fat JAR, we'll use the `gradle runShadow` command.
# + id="CgTXBdTsBn1F" colab_type="code" outputId="5e447cf9-a01a-4a82-9237-676f0091d4bd" colab={"base_uri": "https://localhost:8080/", "height": 1822}
# Run the shadow (fat jar) build.
gradle('runShadow')
# Sample the first 20 results, remember there are no ordering guarantees.
run('head -n 20 outputs/part-00000-of-*')
# + [markdown] id="T_oqlIM55MzM" colab_type="text"
# ## Distributing your application
#
# We can run our fat JAR file as long as we have a Java Runtime Environment installed.
#
# To distribute, we copy the fat JAR file and run it with `java -jar`.
# + id="b3YSRjYnavpd" colab_type="code" outputId="ef88153a-f75f-4e80-8434-ac452a77a199" colab={"base_uri": "https://localhost:8080/", "height": 1907}
# You can now distribute and run your Java application as a standalone jar file.
run('cp build/libs/WordCount.jar .')
run('java -jar WordCount.jar')
# Sample the first 20 results, remember there are no ordering guarantees.
run('head -n 20 outputs/part-00000-of-*')
# + [markdown] id="k-HubCrk-h_G" colab_type="text"
# # Word count with comments
#
# Below is mostly the same code as above, but with comments explaining every line in more detail.
# + id="wvnWyYklCXer" colab_type="code" outputId="275507a3-05e9-44ca-8625-d745154d5720" colab={"base_uri": "https://localhost:8080/", "height": 34}
# %%writefile src/main/java/samples/quickstart/WordCount.java
package samples.quickstart;
import org.apache.beam.sdk.Pipeline;
import org.apache.beam.sdk.io.TextIO;
import org.apache.beam.sdk.options.PipelineOptions;
import org.apache.beam.sdk.options.PipelineOptionsFactory;
import org.apache.beam.sdk.transforms.Count;
import org.apache.beam.sdk.transforms.Filter;
import org.apache.beam.sdk.transforms.FlatMapElements;
import org.apache.beam.sdk.transforms.MapElements;
import org.apache.beam.sdk.values.KV;
import org.apache.beam.sdk.values.PCollection;
import org.apache.beam.sdk.values.TypeDescriptors;
import java.util.Arrays;
public class WordCount {
public static void main(String[] args) {
String inputsDir = "data/*";
String outputsPrefix = "outputs/part";
PipelineOptions options = PipelineOptionsFactory.fromArgs(args).create();
Pipeline pipeline = Pipeline.create(options);
// Store the word counts in a PCollection.
// Each element is a KeyValue of (word, count) of types KV<String, Long>.
PCollection<KV<String, Long>> wordCounts =
// The input PCollection is an empty pipeline.
pipeline
// Read lines from a text file.
.apply("Read lines", TextIO.read().from(inputsDir))
// Element type: String - text line
// Use a regular expression to iterate over all words in the line.
// FlatMapElements will yield an element for every element in an iterable.
.apply("Find words", FlatMapElements.into(TypeDescriptors.strings())
.via((String line) -> Arrays.asList(line.split("[^\\p{L}]+"))))
// Element type: String - word
// Keep only non-empty words.
.apply("Filter empty words", Filter.by((String word) -> !word.isEmpty()))
// Element type: String - word
// Count each unique word.
.apply("Count words", Count.perElement());
// Element type: KV<String, Long> - key: word, value: counts
// We can process a PCollection through other pipelines, too.
// The input PCollection are the wordCounts from the previous step.
wordCounts
// Format the results into a string so we can write them to a file.
.apply("Write results", MapElements.into(TypeDescriptors.strings())
.via((KV<String, Long> wordCount) ->
wordCount.getKey() + ": " + wordCount.getValue()))
// Element type: str - text line
// Finally, write the results to a file.
.apply(TextIO.write().to(outputsPrefix));
// We have to explicitly run the pipeline, otherwise it's only a definition.
pipeline.run();
}
}
# + id="wKAJp7ON4Vpp" colab_type="code" outputId="9a4c7a72-70a1-4d31-89c1-cf7fb8fcdf53" colab={"base_uri": "https://localhost:8080/", "height": 2060}
# Build and run the project. The 'runShadow' task implicitly does a 'build'.
gradle('runShadow')
# Sample the first 20 results, remember there are no ordering guarantees.
run('head -n 20 outputs/part-00000-of-*')
| examples/notebooks/get-started/try-apache-beam-java.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# #!python -m pip install seaborn
# -
# %load_ext autoreload
# %autoreload 2
import seaborn as sns
df = sns.load_dataset('titanic')
df
df.info()
# survived, pclass, age, sibsp, parch, fare
X = df[['pclass', 'sibsp', 'parch', 'fare']]
Y = df[['survived']]
X.shape, Y.shape
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(X, Y)
x_train.shape, x_test.shape, y_train.shape, y_test.shape
from sklearn.linear_model import LogisticRegression
logR = LogisticRegression()
type(logR)
logR.fit(x_train, y_train)
logR.classes_
logR.coef_
# 'pclass', 'sibsp', 'parch', 'fare'
logR.score(x_train, y_train)
logR.predict(x_train)
# ๋จธ์ ๋ฌ๋์ด ํ์ตํ์ฌ ์ฐ์ถํ ๊ฐ์ด๊ธฐ ๋๋ฌธ์ PC๋ง๋ค ๊ฒฐ๊ณผ๊ฐ์ด ๋ค๋ฅผ ์ ์์ (train set, test set์ด ๋๋ค์ผ๋ก ์ฐ์ถ)
logR.predict_proba(x_train)
logR.predict_proba(x_train[10:13])
# ์์กด์์ ์ฌ๋ง์ ๋น๊ต
0.42995782+0.57004218
0.77516024+0.22483976
0.69949365+0.30050635
# ์๊ทธ๋ชจ์ด๋ ํจ์
# (๋ฅ๋ฌ๋์์๋ ์ฐ์ด์ง ์์)
logR.predict(x_train[10:13])
# ๋ค๊ฐ ํฌ๋ฉด 1, ์์ด ํฌ๋ฉด 0
from sklearn import metrics
metrics.confusion_matrix(x_train, y_train)
| titanic_classification.ipynb |
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .cs
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: .NET (C#)
// language: C#
// name: .net-csharp
// ---
// [this doc on github](https://github.com/dotnet/interactive/tree/master/samples/notebooks/csharp/Docs)
//
// # Formatting Outputs
// ## HTML Formatting
//
// When you return a value or a display a value in a .NET notebook, the default formatting behavior is normally uses HTML to try to provide some useful information about the object.
//
// ### Enumerables
//
// If it's an array or other type implementing `IEnumerable`, that might look like this:
// +
display(new [] {"hello", "world"} );
Enumerable.Range(1, 5)
// -
// As you can see, the same basic structure is used whether you pass the object to the `display` method or return it as the cell's value.
//
// ### Objects
//
// Similarly to the behavior for `IEnumerable` objects, you'll also see table output for dictionaries, but for each value in the dictionary, the key is provided rather than the index within the collection.
var dictionary = new Dictionary<string, int>
{
["zero"] = 0,
["one"] = 1,
["two"] = 2
};
dictionary
// The default formatting behavior for other types of objects is to produce a table showing their properties and the values of those properties.
// +
class Person
{
public string FirstName { get; set; }
public string LastName { get; set; }
public int Age { get; set; }
}
display(new Person { FirstName = "Mitch", LastName = "Buchannon", Age = 42} );
// -
// When you have a collection of such objects, you can see the values listed for each item in the collection:
// +
var groupOfPeople = new []
{
new Person { FirstName = "Mitch", LastName = "Buchannon", Age = 42 },
new Person { FirstName = "Hobie ", LastName = "Buchannon", Age = 23 },
new Person { FirstName = "Summer", LastName = "Quinn", Age = 25 },
new Person { FirstName = "C.J.", LastName = "Parker", Age = 23 },
};
display(groupOfPeople);
// -
// ### Dictionaries
//
// Displaying a dictionary will show the items by key rather than index.
display(groupOfPeople.ToDictionary(p => $"{p.FirstName}"));
// Now let's try something a bit more complex. Let's look at a graph of objects.
//
// We'll redefine the `Person` class to allow a reference to a collection of other `Person` instances.
// +
class Person
{
public string FirstName { get; set; }
public string LastName { get; set; }
public int Age { get; set; }
public List<Person> Friends { get; } = new List<Person>();
}
var mitch = new Person { FirstName = "Mitch", LastName = "Buchannon", Age = 42 };
var hobie = new Person { FirstName = "Hobie ", LastName = "Buchannon", Age = 23 };
var summer = new Person { FirstName = "Summer", LastName = "Quinn", Age = 25 };
var cj = new Person { FirstName = "C.J.", LastName = "Parker", Age = 23 };
mitch.Friends.AddRange(new [] { hobie, summer, cj });
hobie.Friends.AddRange(new [] { mitch, summer, cj });
summer.Friends.AddRange(new [] { mitch, hobie, cj });
cj.Friends.AddRange(new [] { mitch, hobie, summer });
var groupOfPeople = new List<Person> { mitch, hobie, summer, cj };
display(groupOfPeople);
// -
// That's a bit hard to read, right? The defaut formatting behaviors are not always as useful as they might be. In order to give you more control object formatters can be customized from within the .NET notebook.
// ## Customization
//
// ## Registering plain text formatters
// Let's clean up the output above by customizing the formatter for the `Person.Friends` property, which is creating a lot of noise.
//
// The way to do this is to use the `Formatter` API. This API lets you customize the formatting for a specific type. For example:
// +
using Microsoft.DotNet.Interactive.Formatting;
Formatter.Register<Person>((person, writer) => {
writer.Write("person");
}, mimeType: "text/plain");
groupOfPeople
// -
// With that in mind, we can make it even more concise by registering a good formatter for `Person`:
// +
Formatter.ResetToDefault();
Formatter.Register<Person>((person, writer) => {
writer.Write(person.FirstName);
}, mimeType: "text/plain");
groupOfPeople
// -
// ### Registering HTML formatters
//
// To replace the default HTML table view, you can register a formatter for the `"text/html"` mime type. Let's do that, and write some HTML using PocketView.
// +
using static Microsoft.DotNet.Interactive.Formatting.PocketViewTags;
Formatter.ResetToDefault();
Formatter.Register<List<Person>>((people, writer) =>
{
foreach (var person in people)
{
writer.Write(
span(
b(person.FirstName),
" ",
i($"({person.Age} years old and has {person.Friends.Count} friends)"),
br));
}
}, mimeType: "text/html");
groupOfPeople
// -
// ---
// **_See also_**
// * [Displaying output](Displaying%20output.ipynb)
// * [HTML](HTML.ipynb)
| samples/notebooks/csharp/Docs/Formatting-outputs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Install some more libs
# ! sudo pip install pandas
# ! sudo pip install matplotlib
# ! sudo apt-get -y install python3-tk
# import required libs
from revscoring.dependencies import solve
from revscoring.features import wikitext
import pandas as pd
import re
import matplotlib.pyplot as plt
import numpy as np
# %matplotlib inline
# Load dataset
df = pd.read_csv("enwiki.draft_quality.75_not_OK_sample.censored.tsv", sep="\t")
df.head()
# ### Character features
# +
# The number of characters
chars = lambda x:list(solve([ wikitext.revision.chars], cache={'datasource.revision.text': x}))[0]
df["chars"] = df["censored_text"].apply(chars)
# whitespace_chars
whitespace_chars = lambda x:list(solve([ wikitext.revision.whitespace_chars], cache={'datasource.revision.text': x}))[0]
df["whitespace_chars"] = df["censored_text"].apply(whitespace_chars)
# The number of wikitext markup characters
markup_chars = lambda x:list(solve([ wikitext.revision.markup_chars], cache={'datasource.revision.text': x}))[0]
df["markup_chars"] = df["censored_text"].apply(markup_chars)
# The number of Chinese/Japanese/Korean characters
cjk_chars = lambda x:list(solve([ wikitext.revision.cjk_chars], cache={'datasource.revision.text': x}))[0]
df["cjk_chars"] = df["censored_text"].apply(cjk_chars)
# The number of HTML entity characters
entity_chars = lambda x:list(solve([ wikitext.revision.entity_chars], cache={'datasource.revision.text': x}))[0]
df["entity_chars"] = df["censored_text"].apply(entity_chars)
# The number of URL characters
url_chars = lambda x:list(solve([ wikitext.revision.url_chars], cache={'datasource.revision.text': x}))[0]
df["url_chars"] = df["censored_text"].apply(url_chars)
# The number of word characters
word_chars = lambda x:list(solve([ wikitext.revision.word_chars], cache={'datasource.revision.text': x}))[0]
df["word_chars"] = df["censored_text"].apply(word_chars)
# The number of UPPERCASE WORD characters
uppercase_word_chars = lambda x:list(solve([ wikitext.revision.uppercase_word_chars], cache={'datasource.revision.text': x}))[0]
df["uppercase_word_chars"] = df["censored_text"].apply(uppercase_word_chars)
# The number of punctuation characters
punctuation_chars = lambda x:list(solve([ wikitext.revision.punctuation_chars], cache={'datasource.revision.text': x}))[0]
df["punctuation_chars"] = df["censored_text"].apply(punctuation_chars)
# The number of break characters
break_chars = lambda x:list(solve([ wikitext.revision.break_chars], cache={'datasource.revision.text': x}))[0]
df["break_chars"] = df["censored_text"].apply(break_chars)
# The length of the most longest character repetition
longest_repeated_char = lambda x:list(solve([ wikitext.revision.longest_repeated_char], cache={'datasource.revision.text': x}))[0]
df["longest_repeated_char"] = df["censored_text"].apply(longest_repeated_char)
# -
# ### Tokenized features
# +
# The number of tokens
tokens = lambda x:list(solve([ wikitext.revision.tokens], cache={'datasource.revision.text': x}))[0]
df["tokens"] = df["censored_text"].apply(tokens)
# The number of number tokens
numbers = lambda x:list(solve([ wikitext.revision.numbers], cache={'datasource.revision.text': x}))[0]
df["numbers"] = df["censored_text"].apply(numbers)
# The number of whitespace tokens
whitespaces = lambda x:list(solve([ wikitext.revision.whitespaces], cache={'datasource.revision.text': x}))[0]
df["whitespaces"] = df["censored_text"].apply(whitespaces)
# The number of markup tokens
markups = lambda x:list(solve([ wikitext.revision.markups], cache={'datasource.revision.text': x}))[0]
df["markups"] = df["censored_text"].apply(markups)
# The number of Chinese/Japanese/Korean tokens
cjks = lambda x:list(solve([ wikitext.revision.cjks], cache={'datasource.revision.text': x}))[0]
df["cjks"] = df["censored_text"].apply(cjks)
# The number of HTML entity tokens
entities = lambda x:list(solve([ wikitext.revision.entities], cache={'datasource.revision.text': x}))[0]
df["entities"] = df["censored_text"].apply(entities)
# The number of URL tokens
urls = lambda x:list(solve([ wikitext.revision.urls], cache={'datasource.revision.text': x}))[0]
df["urls"] = df["censored_text"].apply(urls)
# The number of word tokens
words = lambda x:list(solve([ wikitext.revision.words], cache={'datasource.revision.text': x}))[0]
df["words"] = df["censored_text"].apply(words)
# The number of UPPERCASE word tokens
uppercase_words = lambda x:list(solve([ wikitext.revision.uppercase_words], cache={'datasource.revision.text': x}))[0]
df["uppercase_words"] = df["censored_text"].apply(uppercase_words)
# The number of punctuation tokens
punctuations = lambda x:list(solve([ wikitext.revision.punctuations], cache={'datasource.revision.text': x}))[0]
df["punctuations"] = df["censored_text"].apply(punctuations)
# The number of break tokens
breaks = lambda x:list(solve([ wikitext.revision.breaks], cache={'datasource.revision.text': x}))[0]
df["breaks"] = df["censored_text"].apply(breaks)
# The length of the longest token
longest_token = lambda x:list(solve([ wikitext.revision.longest_token], cache={'datasource.revision.text': x}))[0]
df["longest_token"] = df["censored_text"].apply(longest_token)
# The length of the longest word-token
longest_word = lambda x:list(solve([ wikitext.revision.longest_word], cache={'datasource.revision.text': x}))[0]
df["longest_word"] = df["censored_text"].apply(longest_word)
# -
# ### Parsed features
# +
# The number of characters of viewable content (no markup or templates)
content_chars = lambda x:list(solve([ wikitext.revision.content_chars], cache={'datasource.revision.text': x}))[0]
df["content_chars"] = df["censored_text"].apply(content_chars)
# The number of headings
headings = lambda x:list(solve([ wikitext.revision.headings], cache={'datasource.revision.text': x}))[0]
df["headings"] = df["censored_text"].apply(headings)
# The number of external links
external_links = lambda x:list(solve([ wikitext.revision.external_links], cache={'datasource.revision.text': x}))[0]
df["external_links"] = df["censored_text"].apply(external_links)
# The number of wikilinks (internal to other pages in the wiki)
wikilinks = lambda x:list(solve([ wikitext.revision.wikilinks], cache={'datasource.revision.text': x}))[0]
df["wikilinks"] = df["censored_text"].apply(wikilinks)
# The number of HTML tags
tags = lambda x:list(solve([ wikitext.revision.tags], cache={'datasource.revision.text': x}))[0]
df["tags"] = df["censored_text"].apply(tags)
# The number of <ref> tags
ref_tags = lambda x:list(solve([ wikitext.revision.ref_tags], cache={'datasource.revision.text': x}))[0]
df["ref_tags"] = df["censored_text"].apply(ref_tags)
# The number of templates
templates = lambda x:list(solve([ wikitext.revision.templates], cache={'datasource.revision.text': x}))[0]
df["templates"] = df["censored_text"].apply(templates)
# -
# ### Custom features
# + active=""
# Same features as above but we consider frequencies instead of numbers
# -
df["whitespace_chars_norm"] = df["whitespace_chars"] / df["chars"]
df["markup_chars_norm"] = df["markup_chars"] / df["chars"]
df["cjk_chars_norm"] = df["cjk_chars"] / df["chars"]
df["entity_chars_norm"] = df["entity_chars"] / df["chars"]
df["url_chars_norm"] = df["url_chars"] / df["chars"]
df["word_chars_norm"] = df["word_chars"] / df["chars"]
df["uppercase_word_chars_norm"] = df["uppercase_word_chars"] / df["chars"]
df["punctuation_chars_norm"] = df["punctuation_chars"] / df["chars"]
df["break_chars_norm"] = df["break_chars"] / df["chars"]
df["longest_repeated_char_norm"] = df["longest_repeated_char"] / df["chars"]
df["numbers_norm"] = df["numbers"] / df["tokens"]
df["whitespaces_norm"] = df["whitespaces"] / df["tokens"]
df["markups_norm"] = df["markups"] / df["tokens"]
df["cjks_norm"] = df["cjks"] / df["tokens"]
df["entities_norm"] = df["entities"] / df["tokens"]
df["urls_norm"] = df["urls"] / df["tokens"]
df["words_norm"] = df["words"] / df["tokens"]
df["uppercase_words_norm"] = df["uppercase_words"] / df["tokens"]
df["punctuations_norm"] = df["punctuations"] / df["tokens"]
df["breaks_norm"] = df["breaks"] / df["tokens"]
df["longest_token_norm"] = df["longest_token"] / df["tokens"]
### Recap the columns in the main dataframe
df.columns
# ### Feature selection
### We consider only the features we've defined above
features = df.columns[6:]
### We consider only the features we've defined above
target = df.columns[4]
# +
# Recursive Feature Elimination
from sklearn.feature_selection import RFE
from sklearn.linear_model import LogisticRegression
# create a base classifier used to evaluate a subset of attributes
model = LogisticRegression()
# create the RFE model and select 4 attributes
rfe = RFE(model, 4)
rfe = rfe.fit(df[features], df[target])
# summarize the selection of the attributes
print(rfe.support_)
print(rfe.ranking_)
features[rfe.support_]
# -
rfe.score(df[features], df[target])
# Feature Importance
from sklearn import metrics
from sklearn.ensemble import ExtraTreesClassifier
# fit an Extra Trees model to the data
model = ExtraTreesClassifier()
model.fit(df[features], df[target])
# display the relative importance of each attribute
print(model.feature_importances_)
model.score(df[features], df[target])
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(df[features], df[target], test_size=0.08, random_state=0)
model2 = ExtraTreesClassifier()
model2.fit(X_train, y_train)
model2.score(X_test,y_test)
y_test
df.describe()
df.boxplot(by='draft_quality', column=['external_links', 'ref_tags', 'whitespaces_norm', 'longest_repeated_char_norm'], figsize=(15,15))
# +
X = df[features]
y = df[target]
# Build a forest and compute the feature importances
forest = ExtraTreesClassifier(n_estimators=250,
random_state=0)
forest.fit(X, y)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(X.shape[1]):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Plot the feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(X.shape[1]), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(X.shape[1]), indices)
plt.xlim([-1, X.shape[1]])
plt.show()
# -
forest.score(X,y)
# ### Univariate Selection
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
modelKbest = SelectKBest(chi2, k=6)
fit=modelKbest.fit(df[features], df[target])
newFeatures = fit.transform(df[features])
print(fit.scores_)
#print(newFeatures[0:5,:])
print(fit.get_support())
features[fit.get_support()]
# ### PCA
from sklearn.decomposition import PCA
from sklearn import preprocessing
newf=preprocessing.scale(df[features]) #normalisation
pca = PCA(n_components=10)
pca.fit(newf)
print(pca.explained_variance_ratio_)
| notebooks/Feature extraction v2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import glob
import warnings
warnings.filterwarnings('ignore')
glob.glob('data/*')
df_p = pd.read_csv('data/2017_China_data.csv')
df_p
# +
df_disease = pd.read_csv('data/2019_nCoV_data.csv')
df_disease.Country[df_disease.Country == 'China'] = 'Mainland China'
# ๅฐๆนพใฎๅฝๅใTiwanใซ็ตฑไธ
df_disease.Country[df_disease['Province/State']=='Taiwan'] = 'Taiwan'
# -
set(df_p['Province/State']) == set(df_disease[df_disease['Country'] == 'Mainland China']['Province/State'])
| data_check.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
import pickle
import os
# +
dir_in_Lx = '../out/20.0909 Lx/L200only_reg_rf_boruta_all/'
df_conc_tr = pd.read_csv(os.path.join(dir_in_Lx, 'anlyz', 'concordance', 'concordance_tr.csv'))
df_conc_te = pd.read_csv(os.path.join(dir_in_Lx, 'anlyz', 'concordance', 'concordance_te.csv'))
# -
plt.figure()
ax = sns.distplot(df_conc_tr.concordance, kde=False)
ax.set(xlim=[0,1.05], xlabel='Concordance')
plt.tight_layout()
plt.figure()
ax = sns.violinplot(df_conc_tr.concordance)
#ax.set(xlim=[0,1.05], xlabel='Concordance')
ax.set(xlim=[0.5,1.05], xlabel='Concordance', ylabel='Count')
plt.tight_layout()
df1 = df_conc_tr['concordance'].to_frame().copy()
df1['dataset'] = 'train'
df2 = df_conc_te['concordance'].to_frame().copy()
df2['dataset'] = 'test'
df = pd.concat([df1,df2])
df['cat'] = 'one'
plt.figure()
ax = sns.violinplot(y='cat', x='concordance', hue='dataset', data=df, split=True, linewidth=1.6)
#ax.set(xlim=[0,1.05], xlabel='Concordance')
ax.set(xlim=[0.34,1.05], xlabel='Concordance', ylabel='', yticks=[])
ax.legend(loc='upper left')
plt.tight_layout()
# Compare with Sanger
# +
dir_in_Lx = '../out/20.0909 Lx/L200only_reg_rf_boruta_all/'
df_conc_te = pd.read_csv(os.path.join(dir_in_Lx, 'anlyz', 'concordance', 'concordance_te.csv'))
dir_in_Lx_sanger = '../out/20.0926 feat Sanger/reg_rf_boruta_gs16/'
df_conc_te_sanger = pd.read_csv(os.path.join(dir_in_Lx_sanger, 'anlyz', 'concordance', 'concordance_te.csv'))
# -
plt.figure()
ax = sns.distplot(df_conc_te_sanger.concordance, kde=False)
ax.set(xlim=[0,1.05], xlabel='Concordance')
plt.tight_layout()
df1 = df_conc_te['concordance'].to_frame().copy()
df1['dataset'] = 'Broad'
df2 = df_conc_te_sanger['concordance'].to_frame().copy()
df2['dataset'] = 'Sanger'
df = pd.concat([df1,df2])
df['cat'] = 'one'
plt.figure()
ax = sns.violinplot(y='cat', x='concordance', hue='dataset', data=df, split=True, linewidth=1.6)
ax.set(xlim=[0.34,1.05], xlabel='Concordance', ylabel='', yticks=[])
ax.legend(loc='upper left')
plt.tight_layout()
# Sanger/Broad compare - just on the common genes (the mitochondiral genes)
common_genes = set(df_conc_te_sanger['gene']).intersection(set(df_conc_te['gene']))
# match to genes in both datasets
df1 = df_conc_te.loc[df_conc_te['gene'].isin(common_genes), 'concordance'].to_frame().copy()
df1['dataset'] = 'Broad'
df2 = df_conc_te_sanger.loc[df_conc_te_sanger['gene'].isin(common_genes), 'concordance'].to_frame().copy()
df2['dataset'] = 'Sanger'
df = pd.concat([df1,df2])
df['cat'] = 'one'
plt.figure()
ax = sns.violinplot(y='cat', x='concordance', hue='dataset', data=df, split=True, width=0.4, linewidth=1.6)
ax.set(xlim=[0.2,1.099], xlabel='Concordance', ylabel='', yticks=[])
ax.legend(loc='upper left')
plt.tight_layout()
plt.figure()
ax = sns.violinplot(y='dataset', x='concordance', data=df, split=False, width=0.4, linewidth=1.6, dodge=False)
ax.set(xlim=[0.3,1.099], xlabel='Concordance', ylabel='')
ax.legend([],[], frameon=False)
ax.set_aspect(0.2)
plt.tight_layout()
| notebooks/02c-concordance_visual.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Example Notebook
#
# This is an example notebook.
#
# Modify / remove any of the below as suited for your needs
# ## Setup
# +
# Standard python packages
import os
import sys
# Other package imports
# import numpy as np
# import pandas as pd
# from matplotlib import pyplot as plt
# -
# Setup some global settings and configuration
project_root = os.path.abspath(os.path.join(os.getcwd(), os.pardir, os.pardir))
data_folder = os.path.join(project_root, 'data')
data_folder_raw = os.path.join(data_folder, 'raw')
src_folder = os.path.join(project_root, 'src')
# This notebook uses the shared package however first we need to ensure it is available (otherwise you get an error about the module not being found). You can either run setup.py as discussed in the readme to install the package or modify the path to include the src folder.
# +
# Explicitly set path so don't need to run setup.py - if we have multiple copies of
# the code we would otherwise need to setup a seperate environment for each to
# ensure the code pointers are correct.
sys.path.insert(0, src_folder)
from assetallocation_arp import examplemodule
# -
# ## Some Processing
# Use our package
examplemodule.hello_world()
# ## Appendix 1 - Environment Configuration
print (os.getcwd())
print (sys.version)
print (sys.executable)
print (sys.path)
# ## Appendix 2 - Automated Tests
# +
# Run tests within notebook
f_path = os.getcwd()
os.chdir(os.path.abspath(os.path.join(os.getcwd(), os.pardir, os.pardir)))
# Run pytest from the repository root
# !pytest
os.chdir(f_path)
| notebooks/example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import os,sys
sys.path.append('../../../RL_lib/Agents/PPO')
sys.path.append('../../../RL_lib/Utils')
sys.path.append('../..')
# %load_ext autoreload
# %load_ext autoreload
# %autoreload 2
# %matplotlib nbagg
import os
print(os.getcwd())
# + language="html"
# <style>
# .output_wrapper, .output {
# height:auto !important;
# max-height:1000px; /* your desired max-height here */
# }
# .output_scroll {
# box-shadow:none !important;
# webkit-box-shadow:none !important;
# }
# </style>
# +
from env import Env
from dynamics_model import Dynamics_model
from reward import Reward
from lander_model import Lander_model
from flat_constraint import Flat_constraint
from glideslope_constraint import Glideslope_constraint
from ic_gen import Landing_icgen
from drdv_agent import DRDV_agent
from utils import Mapminmax,Logger
logger = Logger()
dynamics_model = Dynamics_model(h=0.05)
lander_model = Lander_model(use_trajectory_list=True)
lander_model.get_state_agent = lander_model.get_state_agent8
#lander_model.max_thrust=55000
reward_object = Reward()
glideslope_constraint = Glideslope_constraint(gs_limit=0.0)
#shape_constraint = Parabaloid_constraint(altitude=500,debug=False)
shape_constraint = Flat_constraint()
env = Env(lander_model,dynamics_model,logger,
reward_object=reward_object,
glideslope_constraint=glideslope_constraint,
shape_constraint=shape_constraint,
tf_limit=200.0,print_every=10,
scale_agent_action=False)
env.ic_gen = Landing_icgen(mass_uncertainty=0.05,g_uncertainty=(0.0,0.0),noise_u=100,noise_sd=50, #u=1000 breaks
downrange = (0,3000 , -70, -10),
crossrange = (-1500,1500 , -30,30),
altitude = (2400,2600,-90,-70))
env.ic_gen.show()
obs_dim = 6
act_dim = 3
agent = DRDV_agent(env)
agent.test()
# +
pos,vel,traj=agent.test_batch(n=1000)
# -
tl = lander_model.trajectory_list
print(len(tl))
pos = []
vel = []
fuel = []
gs = []
steps = []
for t in tl:
pos.append(np.linalg.norm(t['position'][-1]))
vel.append(np.linalg.norm(t['velocity'][-1]))
fuel.append(np.linalg.norm(t['fuel'][-1]))
gs.append(np.min(t['glideslope']))
steps.append(len(t['position']))
print(np.max(pos), np.max(vel))
wc = np.argmax(pos)
print ('1: ',tl[wc]['position'][0])
print ('2: ',tl[wc]['velocity'][0])
print ('3: ',tl[wc]['position'][-1])
print ('4: ',tl[wc]['velocity'][-1])
print('mean fuel: ',np.mean(fuel))
print('min fuel: ',np.min(fuel))
print('std fuel: ',np.std(fuel))
print('max fuel: ',np.max(fuel))
print('min GS: ',np.min(gs))
print('mean steps: ',np.mean(steps))
print('max steps: ',np.max(steps))
foo = tl
import env_utils as envu
npos = np.linalg.norm(pos,axis=1)
wc = np.argmax(npos)
envu.render_traj(traj[wc])
np.random.uniform(low=3,high=3)
# +
env.ic_gen = Landing_icgen(mass_uncertainty=0.05,g_uncertainty=(0.05,0.05),noise_u=50,noise_sd=200,
downrange = (1500,1500 , -70, -70),
crossrange = (-500,-500 , -30,-30),
altitude = (2100,2100,-90,-90))
env.ic_gen.show()
obs_dim = 6
act_dim = 3
agent = DRDV_agent(env)
agent.test()
# -
import matplotlib.pyplot as plt
xy = np.linspace(0,300,300)
tau = 10
atarg = 10*(1-np.exp(-xy/tau))
plt.figure()
plt.plot(xy,atarg)
plt.show()
| AAS_18-290_3dof_journal/Run/Run_4km/drdv_9km.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Install FuxiCTR
#
# For FuxiCTR v1.0 only.
#
# FuxiCTR has the following requirements.
#
# + python 3.6
# + pytorch v1.0/v1.1
# + pyyaml >=5.1
# + scikit-learn
# + pandas
# + numpy
# + h5py
# + tqdm
#
# We recommend to install the above enviornment through Anaconda using [Anaconda3-5.2.0-Linux-x86_64.sh](https://link.zhihu.com/?target=https%3A//mirrors.tuna.tsinghua.edu.cn/anaconda/archive/Anaconda3-5.2.0-Linux-x86_64.sh).
#
# There are two ways to install FuxiCTR v1.0.
#
# **Solution 1**: pip install (note that all dependent requirements need to be installed accordingly.)
# !pip install fuxictr==1.0.*
# **Solution 2**: git clone or download the zip file: https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip
#
# If you download the source code, you need to add the fuxictr folder to the system path in your code.
import sys
sys.path.append('./YOUR_PATH/fuxictr')
# Check if fuxictr has been installed successfully.
import fuxictr
fuxictr.__version__
# Run the tests:
# !cd tests
# !bash test_all.sh
# !bash test_all_gpu.sh # if GPU is available
| tutorials/v1.0/0_install_fuxictr.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Milestone 1
# ## Load data as dataframe with pandas
import pandas as pd
rawdata = pd.read_csv('../../data/raw/mushrooms.csv')
n = len(rawdata)
m = len(rawdata.columns)
print("There are {} rows and {} columns.".format(n,m))
print("The columns are:")
for col in rawdata.columns:
print("\t"+col)
| analysis/alec_nixon/Milestone1.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.5.3
# language: julia
# name: julia-1.5
# ---
# # MOwNiT
# ## Laboratorium 3
# ### Tablice wielowymiarowe i mnoลผenie macierzy w Julii
#tablice wielowymiarowe w Julii
Asmall=[[1.0 0.0]; [0.0 1.0]]
Bsmall=Asmall
#size(B,1)
# mnoลผenie macierzy - wersja naiwna
function naive_multiplication(A,B)
C=zeros(Float64,size(A,1),size(B,2))
for i=1:size(A,1)
for j=1:size(B,2)
for k=1:size(A,2)
C[i,j]=C[i,j]+A[i,k]*B[k,j]
end
end
end
C
end
#kompilacja
naive_multiplication(Asmall,Bsmall)
#kompilacja funkcji BLASowej do mnoลผenia macierzy
#https://docs.julialang.org/en/stable/stdlib/linalg/#BLAS-Functions-1
Asmall*Bsmall
A=rand(1000,1000);
B=rand(1000,1000);
# Naleลผy pamiฤtaฤ o "column-major" dostฤpie do tablic -
# pierwszy indeks zmienia siฤ szybciej
# tak jak Matlab, R, Fortran
# inaczej niz C, Python
A1 = [[1 2]; [3 4]]
vec(A1)
# poprawiona funkcja korzytajฤ
ca z powyลผszego oraz z faktu, ลผe
#moลผna zmieniaฤ kolejnoลฤ operacji dodawania (a co za tym idzie kolejnosc petli).
function better_multiplication( A,B )
C=zeros(Float64,size(A,1),size(B,2))
for j=1:size(B,2)
for k=1:size(A,2)
for i=1:size(A,1)
C[i,j]=C[i,j]+A[i,k]*B[k,j]
end
end
end
C
end
better_multiplication(Asmall, Bsmall)
@elapsed naive_multiplication(A,B)
@elapsed better_multiplication(A,B)
@elapsed A*B
# ### Wielomiany i aproksymacja
# aproksymacja sredniokwadratowa wielomianem - tutaj przyklad dla wielomianu 3 stopnia
# pakiet Polynomials jest mozliwy do instalacji pod Juliabox
# https://github.com/JuliaMath/Polynomials.jl
#using Pkg
#Pkg.add("Polynomials")
using Polynomials
xs = 0:10; ys = map(x->exp(x), xs)
fit1=fit(xs, ys,3)
# obliczanie wartosci wielomianu
fit1(1)
# obliczanie wartosci wielomianu (drugi sposรณb)
polyval(fit1, 1)
# +
using Plots
# geste punkty do wyliczenia wartosci wielomianu aproksymujacego:
xd=0:0.1:10
# wykres wartosci wielomianu dla gestych punktow:
plot(xd,polyval(fit1, xd))
# ! -dodanie do tego samego wykresu punktรณw wg ktorych aproksymowalismy
scatter!(xs,ys)
# -
# ### Zadania
#
# 1. Uruchomiฤ
# - naive_multiplication(A,B),
# - better_multiplication(A,B)
# - mnoลผenie BLAS w Julii (A*B)
#
# dla coraz wiฤkszych macierzy i zmierzyฤ czasy. Narysowaฤ wykres zaleลผynoลci czasu od rozmiaru macierzy wraz z sลupkami bลฤdรณw, tak jak na poprzednim laboratorium. Wszystkie trzy metody powinny byฤ na jednym wykresie.
#
#
# 2. Napisaฤ w jฤzyku C:
# - naiwnฤ
metodฤ mnoลผenia macierzy (wersja 1)
# - ulepszonฤ
za pomocฤ
zamiany pฤtli metodฤ mnoลผenia macierzy (wersja 2), pamiฤtajฤ
c, ลผe w C macierz przechowywana jest wierszami (row major order tzn A11,A12, ..., A1m, A21, A22,...,A2m, ..Anm), inaczej niลผ w Julii !
# - skorzystaฤ z moลผliwoลci BLAS dostฤpnego w GSL(wersja 3).
#
# Naleลผy porรณwnywaฤ dziaลanie tych trzech algorytmow bez wลฤ
czonej opcji optymalizacji kompilatora. Przedstawiฤ wyniki na jednym wykresie tak jak w p.1.(osobno niลผ p.1). (Dla chฤtnych) sprawdziฤ, co siฤ dzieje, jak wลฤ
czymy optymalizacjฤ kompilatora i dodaฤ do wykresu.
#
#
# 3. Uลผyฤ funkcji polyfit z pakietu Polynomials do znalezienia odpowiednich wielomianow, ktore najlepiej pasujฤ
do zaleลผnoลci czasowych kazdego z algorytmow. Stopieล wielomianu powinien zgadzaฤ siฤ z teoretycznฤ
zลoลผonosciฤ
. Dodaฤ wykresy uzyskanych wielomianow do wczesniejszych wykresรณw.
#
| Mownit_Lab3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: cv3
# language: python
# name: cv3
# ---
# ### 1. Start the Environment
# +
from gym_unity.envs import UnityEnv
import numpy as np
# Some more magic so that the notebook will reload external python modules;
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
# %load_ext autoreload
# %autoreload 2
# -
# **_Before running the code cell below_**, change the `file_name` parameter to match the location of the Reacher Unity environment.
#
# For instance, if you are using a Mac, then you downloaded `Reacher.app`. If this file is in the same folder as the notebook, then the line below should appear as follows:
# ```
# env = UnityEnvironment(file_name="Reacher.app")
# ```
#env_name = 'unity_envs/Crawler_StaticTarget_Linux/Crawler_StaticTarget_Linux.x86_64'
env_name = 'unity_envs/Crawler_StaticTarget'
env = UnityEnv(env_name,worker_id=1,use_visual=False, multiagent=True)
# ### 2. Examine the State and Action Spaces
#
# * Set-up: A creature with 4 arms and 4 forearms.
# * Goal: The agents must move its body toward the goal direction without falling.
# * CrawlerStaticTarget - Goal direction is always forward.
# * CrawlerDynamicTarget- Goal direction is randomized.
# * Agents: The environment contains 3 agent linked to a single Brain.
# * Agent Reward Function (independent):
# * +0.03 times body velocity in the goal direction.
# * +0.01 times body direction alignment with goal direction.
# * Brains: One Brain with the following observation/action space.
# * Vector Observation space: 117 variables corresponding to position, rotation, velocity, and angular velocities of each limb plus the acceleration and angular acceleration of the body.
# * Vector Action space: (Continuous) Size of 20, corresponding to target rotations for joints.
# * Visual Observations: None.
# * Reset Parameters: None
# * Benchmark Mean Reward for CrawlerStaticTarget: 2000
# * Benchmark Mean Reward for CrawlerDynamicTarget: 400
#
# Lets print some information about the environment.
# +
# number of agents
num_agents = env.number_agents
print('Number of agents:', num_agents)
# size of each action
action_size = env.action_space.shape[0]
print('Size of each action:', action_size)
# examine the state space
states = env.reset()
state_size = env.observation_space.shape[0]
print('There are {} agents. Each observes a state with length: {}'.format(num_agents, state_size))
print('The state for the first agent looks like:', states[0])
# -
# ### 3. Take Random Actions in the Environment
states = env.reset() # reset env and get the current state (for each agent)
scores = np.zeros(num_agents) # initialize the score (for each agent)
step=0
while True:
# select an action (for each agent)
actions = list(2*np.random.rand(num_agents, action_size)-1)
next_states,rewards,dones,_ = env.step(actions)
# update the score (for each agent)
scores += rewards
# roll over states to next time step
states = next_states
step+=1
# exit loop if episode finished
if np.any(dones):
break
print('Total score (averaged over agents) this episode: {}'.format(np.mean(scores)))
# ### 4. Training the agent!
#
# Now it's turn to train an agent to solve the environment! When training the environment, we have to set `train_mode=True`, so that the line for resetting the environment looks like the following:
# ```python
# env_info = env.reset(train_mode=True)[brain_name]
# ```
# +
import random
import datetime
import torch
import numpy as np
from collections import deque
import matplotlib.pyplot as plt
# %matplotlib inline
#pytorch
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.distributions import Normal
# imports for rendering outputs in Jupyter.
from JSAnimation.IPython_display import display_animation
from matplotlib import animation
from IPython.display import display
# Some more magic so that the notebook will reload external python modules;
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
# %load_ext autoreload
# %autoreload 2
# -
# defining the device
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print ("using",device)
# ### 3. Define policy network (Actor Critic style)
# +
action_low = env.action_space.low
action_high = env.action_space.high
# define actor critic network
class ActorCritic(nn.Module):
def __init__(self,state_size,action_size,action_high,action_low,hidden_size=32):
super(ActorCritic, self).__init__()
# action range
self.action_high = torch.tensor(action_high).to(device)
self.action_low = torch.tensor(action_low).to(device)
self.std = nn.Parameter(torch.zeros(action_size))
# common network
self.fc1 = nn.Linear(state_size,512)
# actor network
self.fc2_actor = nn.Linear(512,256)
self.fc3_action = nn.Linear(256,action_size)
#self.fc3_std = nn.Linear(64,action_size)
# critic network
self.fc2_critic = nn.Linear(512,64)
self.fc3_critic = nn.Linear(64,1)
def forward(self,state):
# common network
x = F.relu(self.fc1(state))
# actor network
x_actor = F.relu(self.fc2_actor(x))
action_mean = F.sigmoid(self.fc3_action(x_actor))
## rescale action mean
action_mean_ = (self.action_high-self.action_low)*action_mean + self.action_low
#action_std = F.sigmoid(self.fc3_std(x_actor))
# critic network
x_critic = F.relu(self.fc2_critic(x))
v = self.fc3_critic(x_critic)
return action_mean_,v
def act(self,state,action=None):
# converting state from numpy array to pytorch tensor on the "device"
state = torch.from_numpy(state).float().to(device)
action_mean,v = self.forward(state)
prob_dist = Normal(action_mean,F.softplus(self.std))
if action is None:
action = prob_dist.sample()
log_prob = prob_dist.log_prob(action).sum(-1).unsqueeze(-1)
entropy = prob_dist.entropy().sum(-1).unsqueeze(-1)
return {'a': action,
'log_pi_a': log_prob,
'ent': entropy,
'mean': action_mean,
'v': v}
# -
# ### 4. Storage class
class Storage:
def __init__(self, size, keys=None):
if keys is None:
keys = []
keys = keys + ['s', 'a', 'r', 'm',
'v', 'q', 'pi', 'log_pi', 'ent',
'adv', 'ret', 'q_a', 'log_pi_a',
'mean']
self.keys = keys
self.size = size
self.reset()
def add(self, data):
for k, v in data.items():
assert k in self.keys
getattr(self, k).append(v)
def placeholder(self):
for k in self.keys:
v = getattr(self, k)
if len(v) == 0:
setattr(self, k, [None] * self.size)
def reset(self):
for key in self.keys:
setattr(self, key, [])
def cat(self, keys):
data = [getattr(self, k)[:self.size] for k in keys]
return map(lambda x: torch.cat(x, dim=0), data)
# ### 4. PPO agent
# +
from collections import deque
from itertools import accumulate
import torch.tensor as tensor
def random_sample(indices, batch_size):
indices = np.asarray(np.random.permutation(indices))
batches = indices[:len(indices) // batch_size * batch_size].reshape(-1, batch_size)
for batch in batches:
yield batch
r = len(indices) % batch_size
if r:
yield indices[-r:]
class Agent:
def __init__(self,env,learning_rate=1e-3):
self.env = env
nS = state_size
nA = action_size
self.policy = ActorCritic(state_size=nS,hidden_size=128,action_size=nA,
action_low=action_low,action_high=action_high).to(device)
self.optimizer = optim.Adam(self.policy.parameters(), lr=learning_rate, eps=1e-5)
# reset the environment
self.states = np.array(env.reset())
self.episode_rewards_window = deque(maxlen=100)
self.episode_rewards = []
num_trajectories = 12
self.online_rewards = np.zeros(num_trajectories)
def train(self,max_opt_steps=1000,num_trajectories=12,rollout_length=2048,mini_batch_size=64,gamma=.99,
target_score=-250,use_gae=False,gae_tau=0.95,PRINT_EVERY=100):
for opt_step in range(max_opt_steps):
storage = Storage(rollout_length)
states = self.states
for _ in range(rollout_length):
prediction = self.policy.act(states)
# send all actions to tne environment
next_states,rewards,terminals,_ = self.env.step(list(prediction['a'].cpu().numpy()))
next_states = np.array(next_states)
rewards = np.array(rewards)
terminals = np.array(terminals)
self.online_rewards += rewards
for i, terminal in enumerate(terminals):
if terminals[i]:
self.episode_rewards.append(self.online_rewards[i])
self.episode_rewards_window.append(self.online_rewards[i])
self.online_rewards[i] = 0
storage.add(prediction)
storage.add({'r': tensor(rewards).unsqueeze(-1).float().to(device),
'm': tensor(1 - terminals).unsqueeze(-1).float().to(device),
's': tensor(states).to(device)})
states = next_states
self.states = states
prediction = self.policy.act(states)
storage.add(prediction)
storage.placeholder()
advantages = tensor(np.zeros((num_trajectories, 1))).float().to(device)
returns = prediction['v'].detach()
for i in reversed(range(rollout_length)):
returns = storage.r[i] + gamma * storage.m[i] * returns
if not use_gae:
advantages = returns - storage.v[i].detach()
else:
td_error = storage.r[i] + gamma * storage.m[i] * storage.v[i + 1] - storage.v[i]
advantages = advantages * gae_tau * gamma * storage.m[i] + td_error
storage.adv[i] = advantages.detach()
storage.ret[i] = returns.detach()
states, actions, log_probs_old, returns, advantages = storage.cat(['s', 'a', 'log_pi_a', 'ret', 'adv'])
actions = actions.detach()
log_probs_old = log_probs_old.detach()
advantages = (advantages - advantages.mean()) / advantages.std()
ppo_ratio_clip = 0.2
gradient_clip = 0.5
entropy_weight = 0.0
print (states.shape)
for _ in range(10):
sampler = random_sample(np.arange(states.size(0)), mini_batch_size)
for batch_indices in sampler:
batch_indices = tensor(batch_indices).long()
sampled_states = states[batch_indices]
sampled_actions = actions[batch_indices]
sampled_log_probs_old = log_probs_old[batch_indices]
sampled_returns = returns[batch_indices]
sampled_advantages = advantages[batch_indices]
prediction = self.policy.act(sampled_states.cpu().numpy(), sampled_actions)
ratio = (prediction['log_pi_a'] - sampled_log_probs_old).exp()
obj = ratio * sampled_advantages
obj_clipped = ratio.clamp(1.0 - ppo_ratio_clip,
1.0 + ppo_ratio_clip) * sampled_advantages
policy_loss = -torch.min(obj, obj_clipped).mean() - entropy_weight * prediction['ent'].mean()
value_loss = 0.5 * (sampled_returns - prediction['v']).pow(2).mean()
self.optimizer.zero_grad()
(policy_loss + value_loss).backward()
nn.utils.clip_grad_norm_(self.policy.parameters(), gradient_clip)
self.optimizer.step()
#printing progress
if opt_step % PRINT_EVERY == 0:
print ("Opt step: {}\t Avg reward: {:.2f}\t std: {}".format(opt_step,np.mean(self.episode_rewards_window),
self.policy.std))
# save the policy
torch.save(self.policy, 'ppo-crawler.policy')
if np.mean(self.episode_rewards_window)>= target_score:
print ("Environment solved in {} optimization steps! ... Avg reward : {:.2f}".format(opt_step-100,
np.mean(self.episode_rewards_window)))
# save the policy
torch.save(self.policy, 'ppo-crawler.policy')
break
return self.episode_rewards
# -
# ### 5. Train the agent
# lets define and train our agent
agent = Agent(env=env,learning_rate=3e-4)
scores = agent.train(max_opt_steps=2000,gamma=0.99,target_score=2000,use_gae=True,PRINT_EVERY=1)
# ### 6. Watch the smart agent
# uncomment this cell to load the trained policy for Pendulum-v0
# load policy
policy = torch.load('ppo-crawler.policy',map_location='cpu')
agent = Agent(env)
agent.policy = policy
# +
frames = []
total_rewards = np.zeros(12)
# reset the environment
states = np.array(env.reset())
value = []
r = []
for t in range(2000):
prediction = agent.policy.act(states)
action = prediction['a'].cpu().numpy()
v = prediction['v'].detach().cpu().numpy()
#frames.append(env.render(mode='rgb_array'))
# send all actions to tne environment
next_states,rewards,terminals,_ = env.step(list(action))
next_states = np.array(next_states)
rewards = np.array(rewards)
terminals = np.array(terminals)
#value.append(v.squeeze())
#r.append(reward)
states=next_states
total_rewards+= rewards
if np.any(terminals):
for i,terminal in enumerate(terminals):
if terminal:
eps_reward = total_rewards[i]
break
break
print ("Total reward:",eps_reward)
#animate_frames(frames)
# -
| .ipynb_checkpoints/Crawler-ppo-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
from pathlib import Path
import torch
df = pd.read_csv('/data/marking.csv')
df.head()
meta_full = {}
meta_full['age_num'] = df.age_approx.fillna(-5.).values
meta_full['loc_oh'] = pd.get_dummies(df.anatom_site_general_challenge).values
meta_full['sex_oh'] = pd.get_dummies(df.sex).values
meta_full
pd.to_pickle(meta_full, 'meta_data/meta_full.pkl')
labels = df[['image_id', 'target']]
labels['0'] = 0
labels['1'] = 0
labels.loc[labels.target == 1, '1'] = 1
labels.loc[labels.target == 0, '0'] = 1
labels.drop('target', axis=1, inplace=True)
labels = labels.reset_index()
meta = pd.read_pickle('meta_data/official/meta_data_official.pkl')
preds = pd.read_pickle('/content/clouderizer/melanoma/out/2020.test_effb0_rr/2020.test_effb0_rr_bestgpu0_8.pkl')
params = pd.read_pickle('/code/mdlParams.pkl')
params
| pt/labels.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# https://www.kaggle.com/abhinavwalia95/entity-annotated-corpus
import os
import numpy as np
import pandas as pd
import seaborn as sb
# ner.csv has a formatting issue:
#
# ```Error tokenizing data. C error: Expected 25 fields in line 281837, saw 34```
# +
df_ner = pd.read_csv(os.path.join('..',
'data',
'raw',
'ner.csv'),
sep=",",
encoding='Latin-1',
error_bad_lines=False
)
df_ner_dataset = pd.read_csv(os.path.join('..',
'data',
'raw',
'ner_dataset.csv'),
sep=",",
encoding='Latin-1'
)
# -
df_ner.info()
df_ner_dataset.info()
df_ner.head()
df_ner_dataset.head()
| notebooks/cgc-0.1-ner-ataset-eda.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import random
import json
import random
import requests
import bs4
#import matplotlib.pyplot as plt
# <h3>Current School Panda</h3>
#
# Working with directory school data
#
# Creative Commons in all schools
#
# This script uses a csv file from Creative Commons New Zealand and csv file from Ministry of Education.
#
# The ccnz csv file contains schools names that have cc licence, type of licence,
#
# The Ministry of Education csv file contains every public school in New Zealand and info about them.
#
# Standards for website addresses - if school name ends with school then cut it from name and add to .
# eg Horowhenua Collage
# horowhenua.collage.nz
# not
# horowhenuacollage.school.nz
#
# Auckland Girls Grammar School
#
# aucklandgirlsgrammar.school.nz
# not
# aucklandgirlsgrammarschool.school.nz
#
# Everyschool has their own domain name and Linux server hosting the site. Private/Public keys. Static site, git repo. Nikola blog.
# What made you choose that particular Creative Commons licence?
#
# I like the CC:BY licence because it offers the most freedom to people.
#
# I am not a fan of licenses that restrict commucial use. I believe everyone should be able to do what the like with my work with minimal interference.
#
# If I could I would remove non-commucial licenses.
#
# In the early days of my art blogging I would license under cc nc. This was wrong and I later changed this to a cc by license.
#
# With my photography I once had a photo I taken in the newpaper. It made the front page. I was offered money and seeked permission. I was fine with it of course - the license allows this. At the bottom of the photo it read: PHOTO: <NAME>.
# Perfect.
#
# The only thing I ask is they attupute.
#
# I like the idea of sharealike but at the end of the I really don't care and would hate to chase down people to license it wrong. Sure, I don't like it that people could take my stuff and make it not open. I think everything should be open and free.
#
# My art site - artcontrol.me is currently down but when it was up I licensed the site under a cc:by. Elements of the site are still up - such as my YouTube channel.
#
# I attended art school in Wellington - The Learning Connexion. My focus was on drawing and painting. I taught myself programming on the bus to art school. Even when I was drawing on the easel I would be 'drawing' python code. During breaks I would often get my laptop out.
#
# I volunteered at Whaihanga Early Learning Centre. I spend the majority of my time there in the art area doing collabarth works with others. Oil Pastel, coloured pencil and pencil were my mediums of choice. Sometimes I would use paint, but it's quite messy.
#
# Copyright shouldn't be default. Apply and pay if you want copyright. CC license by default. That will sort the world.
crcom = pd.read_csv('/home/wcmckee/Downloads/List of CC schools - Sheet1.csv', skiprows=5, index_col=0, usecols=[0,1,2])
# Compare the schools on List of CC schools with list of all public/private schools.
#
# Why shouldn't it be default for all public schools licence to be under a Creative Commons BY license?
# +
#crcom
# -
aqcom = pd.read_csv('/home/wcmckee/Downloads/List of CC schools - Sheet1.csv', skiprows=6, usecols=[0])
aqjsz = aqcom.to_json()
dicthol = json.loads(aqjsz)
dschoz = dicthol['School']
# +
#dicthol
# -
dscv = dschoz.values()
ccschool = list()
for ds in range(87):
#print(dschoz[str(ds)])
ccschool.append((dschoz[str(ds)]))
schccd = dict()
scda = dict({'cc' : True})
sanoc = dict({'cc' : False})
# +
#schccd.update({ccs : scda})
# -
for ccs in ccschool:
#These schools have a cc license. Update the list of all schools with cc and value = true.
#Focus on schools that don't have cc license.
#Filter schools in area that don't have cc license.
#print (ccs)
schccd.update({ccs : scda})
ccschz = list()
for dsc in range(87):
#print (dschoz[str(dsc)])
ccschz.append((dschoz[str(dsc)]))
# +
#Append in names of schools that are missing from this dict.
#Something like
#schccd.update{school that doesnt have cc : {'cc' : False}}
#schccd
# -
# Cycle through only first 89 values - stop when reaching : These are schools that have expressed an interest in CC, and may have a policy in progress.
#
# New spreadsheet for schools in progress of CC license. Where are they up to? What is the next steps?
#
# Why are schools using a license that isn't CC:BY. They really should be using the same license. CC NC is unexceptable. SA would be OK but majority of schools already have CC BY so best to go with what is common so you don't have conflicts of licenses.
noclist = pd.read_csv('/home/wcmckee/Downloads/Directory-School-current.csv', skiprows=3, usecols=[1])
webskol = pd.read_csv('/home/wcmckee/Downloads/Directory-School-current.csv', skiprows=3, usecols=[6])
websjs = webskol.to_json()
dictscha = json.loads(websjs)
numsweb = dictscha['School website']
lenmuns = len(numsweb)
# +
#for nuran in range(lenmuns):
# print (numsweb[str(nuran)])
# +
#noclist.values[0:10]
# -
aqjaq = noclist.to_json()
jsaqq = json.loads(aqjaq)
najsa = jsaqq['Name']
alsl = len(najsa)
allschlis = list()
for alr in range(alsl):
allschlis.append(najsa[str(alr)])
# +
#allschlis
# -
newlis = list(set(allschlis) - set(ccschool))
empd = dict()
# Create restfulapi of schools thaat have cc and those that don't
#
# Merge two dicts together.
# Both are
# {name of school : 'cc' : 'True'/'False'}
sstru = json.dumps(schccd)
for newl in newlis:
#print (newl)
empd.update({newl : sanoc})
empdum = json.dumps(empd)
empdum
savjfin = open('/home/wcmckee/ccschool/nocc.json', 'w')
savjfin.write(empdum)
savjfin.close()
savtru = open('/home/wcmckee/ccschool/cctru.json', 'w')
savtru.write(sstru)
savtru.close()
#for naj in najsa.values():
#print (naj)
# for schk in schccd.keys():
#print(schk)
# allschlis.append(schk)
# +
#for i in ccschz[:]:
# if i in allschlis:
# ccschz.remove(i)
# allschlis.remove(i)
# -
#Cycle though some schools rather than everything.
#Cycle though all schools and find schools that have cc
#for naj in range(2543):
#print(najsa[str(naj)])
# for schk in schccd.keys():
# if schk in (najsa[str(naj)]):
#Remove these schools from the list
# print (schk)
| curschopanda.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Problem - solution approach
# # Spark Architecture and the Resilient Distributed Dataset
# The main components of the Spark architecture are the driver (accessed throw SparkContect object in pyspark) and executors. For each PySpark application, there will be one driver program and one or more executors running on the cluster slave machine. You might be wondering, what is an application in the context of PySpark? An application is a whole bunch of code used to solve a problem.
#
# The driver is the process that coordinates with many executors running on various slave machines. Spark follows a master/slave architecture. The SparkContext object is created by the driver. SparkContext is the main entry point to a PySpark application.
# **We can perform two types of operations on the RDD:** transformation and action . Transformation on an RDD returns another RDD. We know that RDDs are immutable; therefore, changing the RDD is impossible. Hence transformations always return another RDD. Transformations are lazy, whereas actions are eagerly evaluated. I say that the transformation is lazy because whenever a transformation is applied to an RDD, that operation is not applied to the data at the same time. Instead, PySpark notes the operation request, but all the transformations are applied when the first action is called.
# ## Problem1: Create an RDD
# a python list of float
plist = [1.2 , 2.3 , 3.4 , 4.5 , 2.4 , 2.3, 4.0 ]
display(plist)
#init the pyspark
import findspark
findspark.init('/opt/spark/spark-3.0.1-bin-hadoop2.7')
#create the session and sparkcontext
from pyspark.sql import SparkSession
spark = SparkSession.builder.appName("RDD").master("local[*]").getOrCreate()
sc = spark.sparkContext
print(spark)
print(spark.sparkContext)
sc = spark.sparkContext
parPythonData = sc.parallelize(plist,2)
# a lazy definition
parPythonData
# now we avaluate
parPythonData.collect()
parPythonData.first()
parPythonData.take(3)
# the number of // partitions
parPythonData.getNumPartitions()
# ## Problem2 : You are given daily temperatures in Fahrenheit. You want to perform some analysis on that data. But your new software takes input in Celsius!
# $$^oC = (^oF โ 32) ร 5/9$$
tempData = [59,57.2,53.6,55.4,51.8,53.6,55.4]
parTempData = sc.parallelize(tempData,4)
ftoc = lambda tempf: (tempf -32) *5/9
ftoc(80)
parCentigradeData = parTempData.map(ftoc)
parCentigradeData.collect()
# ### Filtering temp greater than $13^o$ C
tmorethan13 = lambda t: t >=13
#more general
tmorethan = lambda threshold: lambda t: t >= threshold
tmorethan(13)
filteredTemprature = parCentigradeData.filter(tmorethan13)
filteredTemprature.collect()
parCentigradeData.filter(tmorethan(14)).collect()
# ## problem 3: Data manipulation and run aggregation operations (avg, sums, ...).
# given data indicating student grades for a two-sessions exams. eight students are enrolled in this course.
# Calculate the following:
# * Average grades per session, each semester, for each student
# * Top three students who have the highest average grades in the second year
# * Bottom three students who have the lowest average grades in the second year
# * All students who have earned more than an 80% average in the second semester of the second semester
rawstudentsfile = sc.textFile('students.txt')
rawstudentsfile.getNumPartitions()
rawstudentsfile.take(3)
cleanstudentsfiles = rawstudentsfile.filter(lambda line: line.startswith('st'))
cleanstudentsfiles.collect()
studentMarksData = cleanstudentsfiles.map(lambda line: line.split('\t'))
studentMarksData.getNumPartitions()
studentMarksData.collect()
studentMarksDataRDD = studentMarksData.repartition(4)
studentMarksDataRDD.getNumPartitions()
studentMarksDataRDD.collect()
# ### Calculating Average Session Grades
studentMarksMean = studentMarksDataRDD.map(lambda x : [x[0],x[1],(int(x[2])+int(x[3]))/2])
# $$[x_0,x_1,x_2,x_3] \mapsto [x_0,x_1,(x_2+x_3)/2]$$
studentMarksMean.collect()
secondSemMarks = studentMarksMean.filter(lambda x : "s2" in x)
secondSemMarks.collect()
sortedMarksData = secondSemMarks.sortBy(keyfunc = lambda x : -x[2])
sortedMarksData.collect()
secondSemMarks.sortBy(keyfunc = lambda x : x[2]).collect()
# the top 3
sortedMarksData.take(3)
# We have our answer. But can we optimize it further? In order to get top-three data, we are sorting the whole list. We can optimize this by using the takeOrdered() function. This function takes two arguments: the number of elements we require, and key, which uses a lambda function to determine how to take the data out.
topThreeStudents = secondSemMarks.takeOrdered(num=3, key = lambda x :-x[2])
topThreeStudents
# In order to print the result, we are not using the collect() function to get the data. Remember that transformation creates another RDD, so we require the collect() function to collect data. But an action will directly fetch the data to the driver, and collect() is not required. So you can conclude that the takeOrdered() function is an action.
| baseRDD.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from sidmpy.Solver.util import compute_rho_sigmav_grid
from sidmpy.CrossSections.tchannel import TChannel
import numpy as np
import matplotlib.pyplot as plt
# The model used to compute the central density of the SIDM profile sometimes admits two solutions, one with a core radius that is the same order as the scale radius, and a second solution with a core that is much smaller. To show this, we'll compute the goodness of fit of many combinations of central densities and velocity dispersions.
# +
N = 100
log_rho = np.linspace(6.7, 14., N)
vdis = np.linspace(3, 15, N)
log_rho_values, vdis_values = np.meshgrid(log_rho, vdis)
shape0 = log_rho_values.shape
log_rho_values = log_rho_values.ravel()
vdis_values = vdis_values.ravel()
rhos, rs, halo_age = 5 * 10 ** 7, 0.5, 10.
kwargs_cross = {'norm': 10., 'v_ref': 50.}
cross_section_class = TChannel(**kwargs_cross)
rmin_profile = 1e-3
rmax_profile = 10.
grid = compute_rho_sigmav_grid(log_rho_values, vdis_values, rhos, rs, cross_section_class, halo_age,
rmin_profile, rmax_profile, use_nfw_velocity_dispersion=False).reshape(shape0)
loggrid = np.log10(grid)
# -
# ### Plot results
#
# Below, the x-axis shows log10(rho_central) and the y-axis shows the velocity dispersion in km/sec
# +
worst = np.max(loggrid)
best = np.min(loggrid)
aspect = abs(log_rho[-1] - log_rho[0])/(vdis[-1] - vdis[0])
fig = plt.figure(1)
ax = plt.subplot(111)
ax.imshow(loggrid, extent=[log_rho[0], log_rho[-1], vdis[0], vdis[-1]],
aspect=aspect, cmap='bwr', origin='lower', vmin=best, vmax=worst)
cored_logrho, cored_sigmav = 7.95, 7.5
cored_solution = [cored_logrho, cored_sigmav]
plt.scatter(*cored_solution, color='k', marker='+')
cusp_logrho, cusp_sigmav = 11.3, 8.7
cusp_solution = [cusp_logrho, cusp_sigmav]
plt.scatter(*cusp_solution, color='k', marker='+')
plt.xlabel(r'$\log_{10} \rho_0$', fontsize=16)
plt.ylabel('velocity dispersion', fontsize=12)
plt.savefig('multiple_solutions.pdf')
# -
| example_notebooks/multiple_solutions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
import re
import datetime as dt
import nltk
from nltk.sentiment.vader import SentimentIntensityAnalyzer as SIA
from nltk.corpus import *
from nltk.tokenize import *
import time
nltk.download('vader_lexicon')
# -
Yahoo_News = pd.read_csv('Yahoo_News_Scraped_0501.csv', index_col = 0)
Yahoo_News
Yahoo_News.dtypes
stop_words = nltk.corpus.stopwords.words('english')
# +
Yahoo_News.ARTICLE_CONTENT = Yahoo_News.ARTICLE_CONTENT.astype(str)
new_article_content = []
for content in Yahoo_News['ARTICLE_CONTENT']:
content = content.lower()
content = content.lstrip(' ')
sentence = ''
tokenized_words = word_tokenize(content)
for token in tokenized_words:
if token not in stop_words:
token = re.sub('[^.,a-zA-Z0-9 \n\.]', ' ', token)
sentence += token
sentence += ' '
new_article_content.append(sentence)
new_article_content
# +
post = []
neut = []
neg = []
comp = []
for transformed_content in new_article_content:
score = SIA().polarity_scores(transformed_content)
post.append(score['pos'])
neut.append(score['neu'])
neg.append(score['neg'])
comp.append(score['compound'])
# +
Yahoo_News['TRANSFORMED_ARTICLE_CONTENT'] = new_article_content
Yahoo_News['POSITIVE_VALUE'] = post
Yahoo_News['NEUTRAL_VALUE'] = neut
Yahoo_News['NEGATIVE_VALUE'] = neg
Yahoo_News['COMPOUND_VALUE'] = comp
Yahoo_News
# +
Yahoo_News.DATES = Yahoo_News.DATES.astype(str)
for date in range(len(Yahoo_News['DATES'])):
match = re.search(r'\w{3}\s\d{1,2}\,\s\d{4}', Yahoo_News['DATES'][date])
try:
mdate = dt.datetime.strptime(match[0], '%b %d, %Y')
Yahoo_News['DATES'][date] = mdate
except:
pass
for date in range(len(Yahoo_News['DATES'])):
match = re.search(r'\d{2}\/\d{2}\/\d{4}', Yahoo_News['DATES'][date])
try:
mdate = dt.datetime.strptime(match[0], '%m/%d/%Y')
Yahoo_News['DATES'][date] = mdate
except:
pass
for date in range(len(Yahoo_News['DATES'])):
match = re.search(r'\d{4}\-\d{2}\-\d{2}', Yahoo_News['DATES'][date])
try:
mdate = dt.datetime.strptime(match[0], '%Y-%m-%d')
Yahoo_News['DATES'][date] = mdate
except:
pass
# -
Yahoo_News
Yahoo_News.DATES = Yahoo_News.DATES.astype('datetime64')
Yahoo_News
Yahoo_News.info()
Yahoo_News.to_csv('Yahoo_News_Sentiment')
Yahoo_News['ARTICLE_CONTENT'][0]
Yahoo_News['TRANSFORMED_ARTICLE_CONTENT'][0]
| Sentiment Analysis - Yahoo News.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
# Exports scikit-learn model to json
from sklearn_model.export import Model
# -
# Load Dataset
df = pd.read_csv("assets/regr.csv")
scaler = StandardScaler()
df_scaled = pd.DataFrame(scaler.fit_transform(df),
columns = list(df.columns))
df_scaled.head()
# The independent variables
dfX = df_scaled.drop("Y house price of unit area", axis =1)
dfX.head()
# The target
dfY = df_scaled[["Y house price of unit area"]]
dfY.head()
# Get numpy arrays
X, y = dfX.values , dfY.values
# Perform train test split
X_train, X_test, y_train, y_test = train_test_split (X,
y,
test_size=0.2,
random_state = 1)
# Fetch Y as a 1D array
y_train = y_train.flatten()
y_test = y_test.flatten()
# Fit Linear Regression Model
model_reg = LinearRegression().fit(X_train,y_train)
model_reg.score(X_train,y_train)
# Score on test dataset R-sqaured Goodness of fit
model_reg.score(X_test,y_test)
# +
# Start the process of model export
mdl = Model()
# Add the details of input and output fields
mdl.add_fields(dfX, dfY)
# Since StandardScaler is used add the transformer
mdl.add_transformer(scaler, list(df_scaled.columns))
# Add the model
mdl.add_model(model_reg)
# -
# View the exported model
print(mdl.exportJSON())
# Save the model in a file
mdl.exportJSON('regr.json')
| examples/01-LinearRegression-model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Asset Classes and Financial Instruments
# $Table-2.1$
# - The money market
# 1. Treasury bills (T-bills)
# 2. Certificates of deposit (CD)
# 3. Commercial paper
# 4. Bankers' acceptances
# 5. Eurodollars
# 6. Repos and reverses
# 7. Federal funds
# 8. Brokers' calls
# 9. LIBOR rate
# - Indexes
# 1. Dow Jones averages
# 2. Standard & Poor's indexes
# 3. Bond market indicators
# 4. International indexes
# - The bond market
# 1. Treasury bonds and notes
# 2. Federal agency debt
# 3. Municipal bonds
# 4. Corporate bonds
# 5. Mortgage-backed securities
# - Equity markets
# 1. Common stocks
# 2. Preferred stocks
# 3. Depository Receipts
# - Derivative markets
# 1. Options
# 2. Futures and forwards
# 3. Swaps
# ***
# $\dagger$
# Basicly there three markets:
# 1. Debt Market (Fixed income securities)
# 2. Equity Market
# 3. Derivative Market
#
# And another way to classify them looks like this:
# - Money market (short-term)
# - Capital market (long-term)
# - Bond market
# - Equity market
# - Derivative market
#
# And remember:
# Debt Market contains the whold Money market and the Bond market in Capital market.
#
# ## The money market
# **Money markets**: Include *short-term*, *highly liquid*, and *relatively low-risk* debt instruments.
# And highly marketable.
# $\odot$
# Many of these securities trade in large denominations so are out of the reach of individual investors. Money market mutual funds, however, are easily accessible to small investors.$\square$
#
# **Treasury bills**: Short-term government securities issued at a discount from face value and returning the face amount at maturity.
# $\odot$
# 1. Most marketable in the money market.
# 2. The simplest form of borrowing.
# 3. Government selling bills to raise money. Investors buy them at a discount from the maturity value and benefit from the difference of the purchace price and final maturity value.$\square$
#
# **Certificates of Deposit** (CD): A bank time deposit.
# $\odot$
# 1. Time deposits may not be with-drawn on demand. Principalๆฌ้ and interest will be paid at the end of an fixed term of CD.
# 2. Short-term CDs are highly marketable.
# 3. Treated as bank deposits by the Federal Deposit Insurance Corporation, thus they're insured.$\square$
#
# **Commercial paper** (CP): Short-term unsecured debt issued by large corporations.
# $\odot$
# 1. Sometims, CP is backed by a bank line of credit, which gives the borrower (big company) cash to pay off the paper at muturity.
# 2. Commonly issued with denomination of multiples of \$\$100,000. Thus small investors can only invest from money market mutual funds.
# 3. The yield on CP depends on
# - time to maturity
# - company credit rating.
# *asset-backed commercial paper*: a new kind of CP issued by financial firms such as banks, in order to raise funds to the institution to invest in other assets.
#
# **Bankersโ Acceptances**: like a postdated check that bank will pay a sum of money at a future date.
# $\odot$
# 1. The acceptance can be traded among secondary market.
# 2. Very safe assets
# $\square$
#
# **Eurodollars**: dollar-denominated deposits at foreign banks or foreign branches of American banks.
# 1. Eurodollar CD
# 2. Eurodollar bonds (but this is *NOT* a money market investment since it's a long-term one)
# $\odot$
# 1. escape regulation by the Federal Reserve Board.
# 2. less liquid and riskier than domestic CDs
# 3. $\square$
#
# **Repurchase agreements** (repos): Short-term sales of securities with an agreement to repurchase the securities at a higher price, usually overnight, and for dealers in government securities.
# $\odot$
# 1. Very safe because the loans are collateralized by the securities.
# 2. *term repo*: repos with longer term like 30 days
# 3. *reverse repo*: Mirror image of repo. Short-term buying of securities from one who holds government securities with an agreement to resell the securities at a higher price.$\square$
#
# $\dagger$
# Repo only applied to government securities.
#
# **Brokers' call**: broker borrow funds from bank, agreeing to repay immediately if the bank requests. The rate of repay is usually 1% higher than that of short-term T-bills.
#
# **Federal funds**: Funds in the accounts of commercial banks at the Federal Reserve Bank. Some banks have more funds than required at the Fed while others may tend to have a shortage of Federal funds, so that those with excess funds lend to those with shortage. Usually overnight transactions at a rate called *Federal Funds Rate*.
#
# **London interbank offer rate** (LIBOR): Lending rate among banks in the London market.
# $\odot$
# 1. serves as a key reference rate for a wide range of transactions.
# 2. widely followed by investors.
# $\square$
#
# ## The Bond Market
# longer-term borrowing or debt instruments, normally with higher risk than money market because of the longer term.
#
# $\dagger$
# Corporate bonds have higher default risk. Municipal bonds have the risk level in between Treasury bond and Corporate bonds.
#
# **Treasury notes and bonds**: Debt obligations of the federal government with original maturities of one year or more.
# $\odot$
# 1. T-notes, maturity range up to 10 years; T-bonds, maturity range from 10 to 30 years.
# 2. semiannual interest payments, *coupon payments*, are paid directly to your account.$\square$
#
# **Bid and Ask price**: From the view of a dealer.
# 1. if he want to buy sth, he bid it with a bid price
# 2. if he want to sell it, he sell it with a ask price
#
# For market with higher fluidity, the Bid-ask spread (ask price minus bid price) is lower
#
# >**e.g.**
# >
# | Maturity | Coupon | Bid | Ask | CHG | Asked Yield to maturity|
# | :---------: | :-----:| :-------:| :-------:| :------:| :----:|
# | 2015 Feb 15 | 4.000 | 101.6250 | 101.6328 | -0.0078 | 0.046|
# | 2017 May 15 | 4.500 | 109.3516 | 109.3750 | 0.0234 | 0.927|
# | 2020 Feb 15 | 3.625 | 108.8906 | 108.9375 | 0.0938 | 1.880|
# Here we can see for the **3.625% February 2020 Treasury bond**
# > 1. *bid price*: 108.8906% of par, or \$\$1088.906.
# > 2. *ask price*: 108.9375, or \$\$1089.375.
# > 3. the corresponding yield is 1.880%.
# > 4. *yesterday ask price*: $\textbf{ASK} - \textbf{CHG} = 108.375 - 0.0938 = 108.8437$, or \$\$1088.437.
# >> Here *CHG* means 'change' from yesterday of the ask price.
# >> And **Yield to maturity** is the annualized rate of return for investors who hold them to the maturity.
#
# **Inflation-Protected Treasury Bonds** (TIPS): related to the cost of living index, so that citizen can hedge inflation risk.
# $\odot$
# 1. The principal amount is adjusted in proportion to increases in the Consumer Price Index.
# 2. The real interest rates are risk-free if you hold them to maturity.$\square$
#
# > **e.g.**, a TIPS issued in 2015, with face value 1000 dollars and coupon rate 8%. Between 2015-2016, the inflation rate is 2%.
# > The coupon payment in 2015: 1000 ร 8% = 80;
# > The coupon payment in 2016: 1000(1+2%) ร 8% = 81.6
#
# **Federal Agency Debt**: issued by government agencies to finance their activities.
# Several major mortgage-related agencies:
# - Federal Home Loan Bank (FHLB)
# - Federal National Mortgage Association (FNMA, or Fannie Mae)
# - Government National Mortgage Association (GNMA, or <NAME>)
# - Federal Home Loan Mortgage Corporation (FHLMC, or <NAME>)
#
# $\odot$
# Not explicitly insured by the federal government, but long been assumed that government would assist an agency nearing default.$\square$
#
# **International Bonds (Eurobonds)**: a bond denominated in a currency other than that of the country in which it is issued.
# > **e.g.**
# > Euro-dollar bond: dollar-denominated bond sold *NOT* in America.
#
# $\odot$
# There are bonds issued in foreign countries but also in the currency of the investor. Like Yankee bonds in America and Samurai bonds in Japan.$\square$
#
# **Municipal Bonds** (munis): issued by state and local governments.
# $\odot$
# 1. interest income is exempt from federal income taxation.
# 2. usually interest income is also exempt from state and local taxation in the issuing state.
# 3. however if you have to pay capital gains taxes, if your selling price is higher than your purchasing price.$\square$
#
# $Subtype$
# - *General obligation bonds*: backed by the โfull faith and creditโ ($i.e.$, the taxing power) of the issuer
# - *Revenue bonds*: issued to finance particular projects; and backed either by the revenues from that project or by the municipal agency operating the project.
#
# $\odot$
# Revenue bonds are riskier in terms of default than general obligation bonds.$\square$
#
# - *industrial development bond*: a revenue bond that is issued to finance commercial enterprisesๅไธไผไธ, such as the construction of a factory that can be operated by a private firm.
#
# $\odot$
# This device gives the firm access to tax-exempt rates borrowing, so that its amount is limited by federal government. $\square$
#
# - *tax anticipation notes*: short-term, to pay expenses before actual collection of taxes.
# ***
# **equivalent taxable yield** formula:
# Here $t$ is the marginal tax rate, $r_m$ is the rate of municipal bonds, $r$ is the equivalent taxable bonds rate, then
# $$r = \frac {r_m} {1-t}$$
#
# **yield ratio**: $${r_m}/r$$
# The higher the yield ratio, since $t = 1 - {r_m}/r$, the lower the *cutoff tax bracket*, the more individuals will prefer to hold municipal debt.
# ***
# **Corporate Bonds**: Long-term debt issued by private corporations, typically paying semiannual coupons and returning the face value of the bond at maturity.
#
# $Subtype$
# 1. *Callable bonds*: the firm can repurchase the bond from the holder at a stipulated call price.
# 2. *Convertible bonds*: the bondholder can convert each bond into a stipulated number of shares of stock.
#
# **Mortgage-Backed Securities**: either an ownership claim in a pool of mortgages or an obligation that is secured by such a pool.
#
# $Subtype$
# 1. *conforming mortgages*: the loans had to satisfy certain underwriting guidelines (standards for the creditworthiness of the borrower) before they could be purchased by <NAME> or <NAME>.
# 2. *subprime mortgages*: riskier loans made to financially weaker borrowers.
#
# ## Equity Securities
# **Common stocks (equity securities, equities)**: Ownership shares in a publicly held corporation. Shareholders have voting rights and may receive dividends.
# $\odot$
# For owner with one share:
# 1. one vote at the corporationโs annual meeting.
# 2. one share in the financial benefits of ownership.$\square$
#
# The common stock of most large corporations can be bought or sold freely on stock markets. A corporation whose stock is not publicly traded is said to be *private*. In most privately held corporations, the owners of the firm also take an active role in its management.
#
# **Characteristics of Common Stock**
# - residual claimๅฉไฝ็ดขๅฟๆ
# 1. In a liquidation of the firmโs assets, the shareholders have claim to what is left after paying all other claimants
# 2. Residual claim means stockholders are the last in line of all those who have a claim on the assets and income of the corporation.
# - limited liability features
# 1. Limited liability means that the most shareholders can lose in event of the failure of the corporation is their original investment.
# 2. In the event of the firmโs bankruptcy, corporate stockholders at worst have worthless stock.
#
#
#
# > **e.g.**
# > 1.If you buy 100 shares of IBM common stock, to what are you entitled?
# >> You are entitled to a prorated share of IBMโs dividend payments and to vote in any of IBMโs stockholder meetings.
#
# > 2.If you pay \$\$4190 per share, what is the most money you could lose over the year?
# >> Your outlay was \$190 ร 100 = \$19,000. Because of limited liability, this is the most you can lose.
#
# **Stock market Listing**
#
# | G | Symbol | Close | Net CHG | Volume | 52 Week High | 52 Week Low | Div | Yield | PE | YTD %CHG |
# |:-------------|:------:|:-----:|:-------:|:---------:|:------------:|:-----------:|:----:|:-----:|:------:|:--------:|
# | Gap | GPS | 44.12 | 0.09 | 2,696,353 | 46.84 | 36.13 | 0.88 | 1.99 | 15.99 | 12.90 |
# | Gartner | IT | 76.36 | 0.02 | 372,214 | 76.82 | 56.57 | .... | .... | 36.71 | 7.47 |
# | General Cable| BGC | 20.47 | -0.16 | 983,060 | 34.61 | 20.21 | 0.72 | 3.52 | ....dd | -30.40 |
#
# Here *52 Week High/Low* are the reference expected price. **DIV** is the *Division่กๆฏ๏ผ็บขๅฉ*; **PE** is the *P/E ratio*, price-to-earning ratio, the ratio of the current stock price to last yearโs earnings; **dd** means that P/E cannot be computed because earnings were negative; **YTD %CHG** is the *year to date percentage changeๅนดๅ่ณไป็่กไปทๅๅ*.
#
# >**e.g.**, Gap:
# > - Today Close price: $\$44.12$
# > - Yesterday Close price: $\textbf{Close} - \textbf{Net CHG} = \$44.12 - \$0.09 = \$44.03$
# > - Last quarterly divident payment: $\textbf{Div} \div 4 = \$0.88 \div 4 = \$0.22 $ **per share**.
# >> **DIV** column stands for the *annual* dividend payment
#
# > - Dividend Yield, $i.e.,$ the **Yield** column: $\textbf{DIV} \div \textbf{Close} = \$0.88 \div \$44.12 = 0.019 = 1.99\%$
# > - Price change since the beginning of the year: $\textbf{YTD %CHG} = 12.90\%$
# >
# > **e.g.**, about the *total return*: You buy a share of stock for \$\$50, hold it for one year, collect a \$\$1.00 dividend and sell the stock for \$\$54. What were your *dividend yield*, *capital gain yield* and *total return*? (Ignore taxes)
# >> 1. *Dividend yield*: = $\textbf{Dividend} \div P_{\textrm{buy}} = \$\$1.00 \div \$\$50 = 2\%$.
# >> 2. *Capital gain yield*: = $(P_{\textrm{sell}} โ P_{\textrm{buy}} )\div P_{\textrm{buy}} = (\$\$54 - \$\$50) \div \$\$50 = 8\%$
# >> 3. *Total return*: = *Dividend yield* + *Capital gain yield* = 10%
#
# **Preferred Stock**: Nonvoting shares in a corporation, usually paying a fixed stream of dividends, similar to both equity and debt.
# 1. it promises to pay a fixed stream of income each year, like an infinite-maturity bond, or we say perpetuity
# 2. it does not give the holder voting power regarding the firmโs management, like a debt
#
# $\odot$
# 1. Firm has no contractual obligation to pay the dividends, but debt is a contractual obligation for firm to pay timely interst, otherwise bankruptcy set off. Usually preferred dividends are *cumulative*: unpaid dividends cumulate and must be paid in full before any dividends may be paid to holders of common stock.
# 2. Firm need to pay tax for dividends, not like bonds. So the corporations may exclude 70% of dividends received from domestic corporations in the computation of their taxable income, because preferred stock payments are treated as dividends rather than as interest on debt. Preferred stocks, therefore, make desirable fixed-income investments for some corporations.
# 3. Preferred stock often sells at lower yields than corporate bonds, even they rank after bonds in terms of the priority of its claim to the assets of the firm when bankruptcy.
# 4. it can be callable by the issuing firm, in which case it is said to be *redeemable*.
# 5. It also can be convertible into common stock at some specified conversion ratio.
# 6. *adjustable-rate preferred stock*, like adjustable-rate bonds, ties the dividend rate to current market interest rates.$\square$
#
# **Depositary Receipts**: tradable certificates of ownership in shares of a foreign company.
# $\odot$
# 1. Each may correspond to a fraction of a foreign share, one share, or several shares of the foreign corporation.
# 2. most common way for U.S. investors to invest in and trade the shares of foreign corporations.$\square$
# ***
# **Summary Table**
#
# | | Common Stock | Preferred Stock | Bond |
# |:--------------------------------------------:|:------------:|:--------------------------------------:|:----:|
# | Voting right | Yes | No | No |
# | Fixed stream of income (dividends or coupon) | No | Yes | Yes |
# | Force payment (dividends or coupon) | No | No | Yes |
# | Tax-deductible | No | No (but 70% exclusion for corporation) | Yes |
# | Priority of claim when liquidating | Third | Second | First|
#
# ## Stock and Bond Market Indexes
# **Dow Jones Industrial Average** (DJIA): 30 large, โblue-chip่็ญนโ corporations since 1896. (The average covered only 20 stocks until 1928.)
# $\odot$
# Originally, the DJIA was calculated as the average price of the stocks included in the index. Thus the amount of money invested in each company is proportional to the companyโs share price, we call this an **price-weighted average**, $i.e.$: *An average computed by adding the prices of the stocks and dividing by a โdivisorโ*.$\square$
#
# >**e.g.**: two-stock version of the Dow Jones Average
# >
# | Stock | Initial Price | Final Price | Shares (millions) | Initial Value of Outstanding Stock (\$ million) | Final Value of Outstanding Stock (\$ million) |
# |:-----:|:-------------:|:-----------:|:-----------------:|:---------------------:|:-----------------------:|
# | ABC | 25 | 30 | 20 | 500 | 600 |
# | XYZ | 100 | 90 | 1 | 100 | 90 |
# | Total | | | | 600 | 690 |
# >*Outstanding Stockๆต้่ก*.
# > Portfolio:
# >>Initial value = \$25 + \$100 = \$125
# >>Final value = \$30 + \$90 = \$120
# >>Percentage change in portfolio value = โ5/125 = โ.04 = โ4%
#
# > Index:
# >>Initial index value = (25 + 100)/2 = 62.5
# >>Final index value = (30 + 90)/2 = 60
# >>Percentage change in index = โ2.5/62.5 = โ.04 = โ4%
# >The portfolio and the index have identical 4% declines in value.
#
# $\dagger$
# **NOTHING** to do with the number of shares you hold.
#
# $\odot$
# But what if a stock splits or pays a stock dividend, then how the Dow keeps at a certain level? When these events occur, the divisor used to compute the โaverage priceโ is adjusted so as to leave the index unaffected.$\square$
#
# >**e.g.**: Firm "XYZ" have split two for one so that its share price fell to \$\$50.
# >
# | Stock | Initial Price | Final Price | Shares (millions) | Initial Value of Outstanding Stock (\$ million) | Final Value of Outstanding Stock (\$ million) |
# |:-----:|:-------------:|:-----------:|:-----------------:|:---------------------:|:-----------------------:|
# | ABC | 25 | 30 | 20 | 500 | 600 |
# | XYZ | 50 | 45 | 2 | 100 | 90 |
# | Total | | | | 600 | 690 |
# > Before the split, the index was 62.5, so that (25+50)/d = 62.5, which implies that the divisor must fall from its original value of 2.0 to a new value of 1.20
#
# $\odot$
# Because the Dow Jones averages are based on small numbers of firms, care must be taken to ensure that they are representative of the broad market. As a result, the composition of the average is changed every so often to reflect changes in the economy.
# $\square$
#
# **Standard & Poorโs Indexes**: an improvement over the Dow Jones averages.
# 1. it is a more broadly based index of 500 firms
# 2. it is a **market valueโweighted index**, $i.e.$: Index return equals the weighted average of the returns of each component security, with weights proportional to outstanding market value.
#
# $\odot$
# The S&P 500 is computed by calculating the total market value of the 500 firms in the index and that on the previous day of trading.$\square$
#
# $\dagger$
# Total market value is the **Market Capital**, which is approximately the product of number of outstanding shares and stock price
#
# >**e.g.**:
# > The final value of all outstanding stock in our two-stock universe is \$690 million, and the initial was \$600 million. Therefore, if the initial level of a market valueโweighted index of stocks ABC and XYZ were **SET equal to an arbitrarily chosen starting value** such as 100, the index value at year-end would be 100 ร (690/600) = 115. The increase in the index would reflect the 15% return earned on a portfolio consisting of those two stocks held in proportion to outstanding market values.
#
# $\dagger$
# Just calculate the initial total market value, and let it be an 100.
#
# $\odot$
# Both two kinds of indexes reflect the returns to straightforward portfolio strategies.$\square$
#
# Investors can buy market indexes in two ways below:
# 1. *Index funds*: Investors purchase shares in mutual funds that hold shares in proportion to their representation in certain stock indexes.
# *low-cost passive investment strategy*
# 2. *Exchange-traded fund* (ETF): Investors purchase this portfolio of shares as a unit, just like a normal stock. They mimit the performance of certain index.
#
# **Equally Weighted Indexes**: An index computed from a simple average of returns.
#
# >**e.g.**:
# > Put equal amount of money to buy the two stocks
#
# $\odot$
# No connection with buy-and-hold portfolio strategies.$\square$
#
# **Summary**
#
# | Price weighted (DJIA) | Market-value weighted (S&P500, NASDAQ) | Equally weighted (Value Line Index) |
# |:----------------------------:|:-------------------------:|:------------------------------------------:|
# | Invest **1** share in each stock. | Investment in each stock is proportional to *market value* of each stock. | Invest **\$\$1** in each stock (not buy-and-hold). |
#
# **Foreign and International Stock Market Indexes**
#
# Development in financial markets worldwide includes the construction of indexes for these markets. Among these are the Nikkei (Japan), FTSE (U.K., pronounced โfootsieโ), DAX (Germany), Hang Seng (Hong Kong), and TSX (Toronto). A leader in the construction of international indexes has been *MSCI* (Morgan Stanley Capital International), which computes over 50 country indexes and several regional indexes.
#
# **Bond Market Indicators**: The three most well-known groups of indexes are those of Merrill Lynch, Barclays (formerly <NAME>), and <NAME> (now part of Citigroup).
#
# $\odot$
# The major problem with these indexes is that true rates of return on many bonds are difficult to compute because **bonds trade infrequently**, which makes it hard to get reliable, up-to-date prices.
# In practice, some prices must be estimated from bond-valuation models. These so-called *matrix prices* may **differ** from true market values.$\square$
#
# ## Derivative Markets
# **Derivative assets**: A security with a payoff that depends on the prices of other securities.
#
# ### Options
# **Call option**: The right to buy an asset at a specified *exercise price (strike price)* **on** or **before** a specified expiration date.
#
# $\odot$
# 1. Each option contract is for the purchase of 100 shares, with quotationsๆฅไปท made on a per share basis.
# 2. Option holder can give up exercise because it only make sense when the market value of the asset exceeds the exercise price.
# 3. Then the holder may *call away* the asset at the exercise price and reap a benefit equal to the difference between the stock price and the exercise price.
# 4. And if not exercised before the expiration date, the option expires and no longer has value.
# $\square$
#
# **Put option**: The right to sell an asset at a specified exercise price on or before a specified expiration date.
#
# $\odot$
# European option is slicely different that you can only exercise **AT** the expiraiton date.
# $\square$
#
# > **e.g.**: Apple, with underlying stock price = 101.05 dollar
# >
# >| Expiration | Strike | Call | Put |
# |:----------:|:------:|:----:|:----:|
# | Sep. | 95 | 6.20 | 0.21 |
# | Sep. | 100 | 2.20 | 1.18 |
# | Sep. | 105 | 0.36 | 4.35 |
# | Oct. | 95 | 6.35 | 0.33 |
# | Oct. | 100 | 2.62 | 1.55 |
# | Oct. | 105 | 0.66 | 4.75 |
# >
# >First thing to know that each option *contract* is the price multiply with 100.
# >Second we should notice that the prices of Call Options decrease as the exercise price increases but increase with time until expiration. And for put it's just the opposite.
# >> 1. the right to purchase a share at a higher price is less valuable.
# >> 2. the right to buy a stock at certain price at any time until an ealier time than at a later time is less valuable.
#
# >Now we do some calculations. (Even though options **must be trade as contract as 100 multiples**, but the return rate will never change, so here we will not multiply payoff and return with 100.)
# >>Call option: You bought the October 2014 expiration Apple Call option with exercise price 100 dollars
# >>
# | Real future price | Todo | Payoff | Return | Return rate |
# |:------------------------------------------------:|:--------:|:------:|:-------------:|:-----------------:|
# | Larger than exercise price (110) | Exercise | 10 | 10-2.62 =7.28 | 7.28/2.62 =277.9% |
# | A little bit higher than exercise price (102.62) | Exercise | 2.62 | 2.62-2.62 =0 | 0 |
# | Less than or equal to exercise price (90) | Give up | 0 | 0-2.62 =-2.62 | -100% |
# >>Put option: You bought the October 2014 expiration Apple Put option with exercise price 100 dollars
# >>
# | Real future price | Todo | Payoff | Return | Return rate |
# |:------------------------------------------------:|:--------:|:------:|:-------------:|:-----------------:|
# | Less than exercise price (90) | Exercise | 10 | 10-1.55 =8.45 | 8.45/1.55 =545.2% |
# | A little bit lower than exercise price (98.45) | Exercise | 1.55 | 1.55-1.55 =0 | 0 |
# | Larger than or equal to exercise price (110) | Give up | 0 | 0-1.55 =-1.55 | -100% |
#
# ### Futures
# **Futures Contracts**: Obliges traders to purchase or sell an asset at an agreed-upon price, the *future price*, at a specified future date, *maturity date*.
#
# $\odot$
# The long position is held by the trader who commits to purchasing the commodityๅๅ on the delivery date. The trader who takes the short position commits to delivering the commodity at contract maturity.
# The trader holding the long position profits from price increases. And ignoring brokerage fees, the short positionโs loss equals the long positionโs profit.$\square$
#
# > **e.g.**: Corn futures prices in the Chicago Board of Trade, September 17, 2014
# >
# | Month | Last | CHG | Open | High | Low | Volumn | Open Interest |
# |:-------:|:-----:|:----:|:-----:|:-----:|:-----:|:------:|:-------------:|
# | Dec '14 | 341'6 | -2'0 | 343'2 | 344'2 | 339'2 | 74580 | 796121 |
# | Mar '15 | 354'0 | -1'4 | 355'4 | 356'0 | 351'0 | 19416 | 201794 |
# | May '15 | 362'4 | -1'6 | 364'0 | 364'2 | 359'4 | 6153 | 51800 |
# | Jul '15 | 369'6 | -1'4 | 371'2 | 371'2 | 366'6 | 5171 | 76051 |
# >
# >The most recent price was \$3.4175 per bushel. (The numbers after each apostrophe denote eighths of a cent.) That price is down \$.02 from yesterdayโs close. Volume is the number of contracts trading that day; open interest is the number of outstanding contracts.
# >
# >Suppose that at expiration, corn is selling for \$3.6175 per bushel. Then the profit to the long position is 5,000 ร (\$3.6175 โ \$3.4175) = \$1,000.
#
# $\odot$
# The purchase price of an option is called the *premium*. It represents the compensation the purchaser of the call must pay for the ability to exercise the option only when it is advantageous to do so.$\square$
#
# ## Summary
# - Money market securities are very short-term debt obligations. They are usually highly marketable and have relatively low credit risk. Their low maturities and low credit risk ensure minimal capital gains or losses. These securities often trade in large denominations, but they may be purchased indirectly through money market funds.
# - Much of U.S. government borrowing is in the form of Treasury bonds and notes. These are coupon-paying bonds usually issued at or near par value. Treasury bonds are similar in design to coupon-paying corporate bonds.
# - Municipal bonds are distinguished largely by their tax-exempt status. Interest payments (but not capital gains) on these securities are exempt from income taxes.
# - Mortgage pass-through securities are pools of mortgages sold in one package. Owners of pass-throughs receive all principal and interest payments made by the borrower. The firm that originally issued the mortgage merely services the mortgage, simply โpassing throughโ the payments to the purchasers of the mortgage. Payments of interest and principal on government agency pass-through securities are guaranteed, but payments on private-label mortgage pools are not.
# - Common stock is an ownership share in a corporation. Each share entitles its owner to one vote on matters of corporate governance and to a prorated share of the dividends paid to shareholders. Stock, or equity, owners are the residual claimants on the income earned by the firm.
# - Preferred stock usually pays a fixed stream of dividends for the life of the firm: It is a perpetuity. A firmโs failure to pay the dividend due on preferred stock, however, does not set off corporate bankruptcy. Instead, unpaid dividends simply cumulate. Varieties of preferred stock include convertible and adjustable-rate issues.
# - Many stock market indexes measure the performance of the overall market. The Dow Jones averages, the oldest and best-known indicators, are price-weighted indexes. Today, many broad-based, market valueโweighted indexes are computed daily. These include the Standard & Poorโs Composite 500 stock index, the NYSE index, the NASDAQ index, the Wilshire 5000 Index, and several international indexes, including the Nikkei, FTSE, and DAX.
# - A call option is a right to purchase an asset at a stipulated exercise price on or before an expiration date. A put option is the right to sell an asset at some exercise price. Calls increase in value, while puts decrease in value, as the price of the underlying asset increases.
# - A futures contract is an obligation to buy or sell an asset at a stipulated futures price on a maturity date. The long position, which commits to purchasing, gains if the asset value increases, while the short position, which commits to delivering the asset, loses.
#
# ## KEY TERMS
# - bankersโ acceptance
# - call option
# - certificate of deposit
# - commercial paper
# - common stocks
# - corporate bonds
# - derivative asset
# - equally weighted index
# - Eurodollars
# - Federal funds
# - futures contract
# - London Interbank Offer Rate (LIBOR)
# - market valueโweighted index
# - money markets
# - municipal bonds
# - preferred stock
# - price-weighted average
# - put option
# - repurchase agreements
# - Treasury bills
# - Treasury bonds
# - Treasury notes
#
# ## Assignment
# 1. ***Preferred stock*** is **not** a money market instrument.
# 2. A dollar-denominated deposit at a London bank is called ***eurodollars***.
# 3. *TIPS* are ***Treasury bonds that protect investors from inflation***.
# 4. A tax free municipal bond provides a yield of $3.2%$, so that the equivalent taxable yield on the bond given a 35% tax bracket is $4.92\% = 3.2\% \div (1-35\%)$.
# 5. ...
# 6. The tax exempt equivalent yield on a $9\%$ bond yield given a marginal tax rate of $24\%$ is $6.84\% = 9\% \times (1 - 24\%)$
# 7. A benchmark index has three stocks priced at \$30, \$53, and \$63. The number of outstanding shares for each is 385,000 shares, 475,000 shares, and 623,000 shares, respectively. If the market value weighted index was 840 yesterday and the prices changed to \$30, \$50, and \$66, so now the new index value is
# $$\textrm{Index} =\frac{\$30 \times 385,000 + \$50 \times 475,000 + \$66 \times 623,000} {\$30 \times 385,000 + \$53 \times 475,000 + \$63 \times 623,000} \times 840 = 845$$
# 8. A benchmark market value index is comprised of three stocks. Yesterday the three stocks were priced at \$18, \$26, and \$60. The number of outstanding shares for each is 650,000 shares, 550,000 shares, and 250,000 shares, respectively. If the stock prices changed to \$22, \$24, and \$62 today respectively, so the 1-day rate of return on the index is
# $$\textrm{Index Return} =\frac{\$22 \times 650,000 + \$24 \times 550,000 + \$62 \times 250,000} {\$18 \times 650,000 + \$26 \times 550,000 + \$60 \times 250,000} - 1 = 4.88\%$$
# 9. ...
# 10. ...
| FinMath/Intermediate Investment/Note_Chap02.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# ## Load Required Libraries
# <br>
#
# **Standard Process** <br>
# - ```pip install pandas```<br>
# - ```pip install sqlalchemy```<br>
#
# **Parallel Process** (Not Recommended) <br>
# - ```pip install dask```<br>
# - ```pip install toolz```<br>
#
# **SQL Server Driver** <br>
# - ```pip install sqlalchemy-pytds```
# - ```pip install python-tds```
# - or ```pip install pymssql```
# - or ```pip install pyodbc```<br>
#
# **MySQL Driver** <br>
# - ```pip install pymysql```<br>
#
# **Google's BigQuery Driver** <br>
# - ```pip install google-cloud-bigquery```<br>
# - ```pip install google-cloud-bigquery-storage```<br>
# - ```pip install pybigquery```<br>
# - ```pip install pandas-gbq```<br>
# - ```pip install pyarrow```<br>
#
# **PostGreSQL Driver** <br>
# - ```pip install psycopg2```<br>
#
# **SQLite Driver** <br>
# - I'm already forgot =_="
#
# # Article about this notebook
# - https://faun.pub/quick-etl-with-python-part-2-modify-and-upsert-table-into-sql-a192a6b81ad4
# +
## Load Password from my PC
import os, re,json
import pandas as pd
with open('setting.txt','r') as f:
setting = json.load(f)
# -
# # Working with lazy_SQL
# Change from da_tran_SQL since 0.3.11
from py_topping.data_connection.database import lazy_SQL
# ## SQL Server (or MS SQL)
set_mssql = setting['MSSQL']
print(set_mssql['type'])
# ### Create connection to server
mssql = lazy_SQL(sql_type = 'MSSQL', #set_mssql['type'], #
host_name = '{YOUR HOST NAME or ID Address}', #set_mssql['host'], #
database_name = '{Your Database Name}', #set_mssql['database'], #
user = '{Your Username}', #set_mssql['user'], #
password = '{<PASSWORD>}') #set_mssql['password']) #
# for SQL Server the <b>sql_type must == "MSSQL" (ignore case sensitive in version >= 0.1.3)</b>
#
#
# by Default, the class will using <span style="color:red">**pytds**</span>. as driver and port 1433<br>
# **change from pymssql to pytds in version >= 0.3.16*
#
# But you could also change them if you want like below
mssql = lazy_SQL(sql_type = 'MSSQL', #set_mssql['type'],
host_name = '{YOUR HOST NAME or ID Address}', #set_mssql['host'],
database_name = '{Your Database Name}', #set_mssql['database'],
user = '{Your Username}', #set_mssql['user'],
password = '{<PASSWORD>}',#set_mssql['password'])
port = '1433',
driver = 'pymssql')
# **from version 0.3.13**<br>
# You can mute with parameter "mute"
mssql = lazy_SQL(sql_type = 'MSSQL', #set_mssql['type'],
host_name = '{YOUR HOST NAME or ID Address}', #set_mssql['host'],
database_name = '{Your Database Name}', #set_mssql['database'],
user = '{Your Username}', #set_mssql['user'],
password = '{Your Password}',#set_mssql['password'])
port = '1433',
driver = 'pymssql',
mute = True )
# **from version >= 0.2.2**<br>
# You can add more parameter in class by passing to parameter
mssql = lazy_SQL(sql_type = 'MSSQL', #set_mssql['type'],
host_name = '{YOUR HOST NAME or ID Address}', #set_mssql['host'],
database_name = '{Your Database Name}', #set_mssql['database'],
user = '{Your Username}', #set_mssql['user'],
password = '{<PASSWORD>}',#set_mssql['password'])
port = '1433',
driver = 'pyodbc',
parameter = 'driver=SQL+Server')
# **from version >= 0.1.9**<br>
# You could fine tune insert's speed by adjust these parameters into the lazy_SQL class :
# - chunksize, chunksize parameter in pandas' to_sql method (Default Value 150)
# - partition_size, dataframe will divide into smaller size as partition_size before dump (Default Value 5,000)
# - parallel_dump, to activate parallel dump (Default Value False) **Risk, not recommend to use**
# - max_parallel, number of parellel dump execute pertime (Default Value 2)
mssql = lazy_SQL(sql_type = 'MSSQL', #set_mssql['type'],
host_name = '{YOUR HOST NAME or ID Address}', #set_mssql['host'],
database_name = '{Your Database Name}', #set_mssql['database'],
user = '{Your Username}', #set_mssql['user'],
password = '{<PASSWORD>}',#set_mssql['password'])
port = '1433',
driver = 'pymssql',
chunksize = 150,
partition_size = 5000,
parallel_dump = False,
max_parallel = 2)
# The lazy_SQL class also has engine method which equal to engine in sqlalchemy
#
# Which I'm going to use it to set up sample environment that include,
# * 2 dataframe
# * table in mssql
# * store procedure in mssql
# +
def setup_sample(class_in):
test_table = 'unit_test_git'
df1 = pd.DataFrame({'col1' : [1,2,3,4,5] , 'col2' : [1,1,2,2,3]
,'date' : pd.date_range('2020-10-10','2020-10-14') , 'col3' : [1,1,1,1,1]})
new_df = pd.DataFrame({'col1' : [4,5,6,7,8] , 'col2' : [3,3,4,4,5]
,'date' : pd.date_range('2020-10-12','2020-10-16') , 'col3' : [2,2,2,2,2]})
df1.to_sql(test_table, index = False, if_exists = 'replace', con = mssql.engine)
sql_q = """ CREATE OR ALTER PROCEDURE unit_test_git_SP (@PARAM1 AS VARCHAR(100))
AS
BEGIN
SELECT * FROM {} WHERE col1 = @PARAM1
END""".format(test_table)
class_in.engine.execute(sql_q)
return df1, new_df, test_table, 'unit_test_git_SP'
df1, new_df, table_name, sp = setup_sample(mssql)
pd.read_sql('unit_test_git', con = mssql.engine)
# -
# ### 1st Method : Read
#
# You could read any table or view from your database by use method "read"
mssql.read(table_name)
# You could also use pass SQL's "WHERE" statement into parameter "condition_in"
#
# to filter the result
mssql.read(table_name, condition_in = 'col1 <= 2')
# **from version>= 0.3.5**<br>
# You could also use select statement directly with "raw" = True
mssql.read("""select 'foo' as test_col""", raw = True)
# **from version >= 0.3.7**<br>
# You could select only some columns by pass list of columns in "columns_list"
mssql.read(table_name, columns_list = ['col1','date'])
# futher more, you could also use store procedure and pass dict parameter with read method
mssql.read(sp, SP = True, param = {'@PARAM1' : 3})
# You could select only unique value of selected column
# +
# Coming Soon...
# -
# You could group by and use aggregrate function
# +
# Coming Soon...
# -
# ### 2nd Method : dump_whole
#
# You could replace whole table with new dataframe with is method
#
# **Becareful with Columns' Type, I'm suggest to fix columns' type before using this method**
df1, new_df, table_name, sp = setup_sample(mssql)
mssql.read(table_name)
# +
mssql.dump_whole(new_df, table_name)
mssql.read(table_name)
# -
# By the way, **dump_whole will use columns and types as the New DataFrame**
# +
mssql.dump_whole(new_df.drop('col1', axis = 1) , table_name)
mssql.read(table_name).dtypes
# -
# In case that you would like to fixe original table's columns and types
#
# You could add parameter **fix_table = True**
# +
df1, new_df, table_name, sp = setup_sample(mssql)
mssql.engine.execute("""ALTER TABLE {} ALTER COLUMN col1 varchar(10)""".format(table_name))
mssql.read(table_name).dtypes
# +
mssql.dump_whole(new_df.drop('col1', axis = 1), table_name , fix_table = True)
mssql.read(table_name)
# -
mssql.read(table_name).dtypes
# ### 3rd Method : dump_replace
#
# You could update the existing dataframe by using this method
#
# `self.dump_replace(df_in, table_name_in, list_key, math_logic = '', partition_delete = 100000, debug = False)**`
#
# **List of Parameter**
# - **df_in** : DataFrame to dump into Table (Pandas DataFrame)
# - **table_name_in** : Name of Table (Str)
# - **list_key** : List of key columns (Str or List)
# - **math_logic** : Use math logic to filter your table (Dict), default : ''
# - **partition_delete** : Number of row delete per time (Int), default : 100,000 *Available 0.3.6*
# - **debug** : To show SQL Query or Not (Boolean), default : False
df1, new_df, table_name, sp = setup_sample(mssql)
mssql.read(table_name)
new_df
# If col1 is the primary key for table and new_df
#
# we could update the existing key and add new key into table by using dump_replace
mssql.dump_replace(new_df, table_name, list_key = ['col1'])
mssql.read(table_name)
# For table with multiple keys, we could just simple add more key into the parameter "list_key"
df1, new_df, table_name, sp = setup_sample(mssql)
mssql.read(table_name)
mssql.dump_replace(new_df, table_name, list_key = ['col1','col2'])
mssql.read(table_name)
# dump_replace also has math logic for some ETL process
df1, new_df, table_name, sp = setup_sample(mssql)
# +
mssql.dump_replace(new_df, table_name, list_key = ['date'],
math_logic = {'date' : {'logic' : '>=', 'value' : new_df['date'].min().date(), 'type' : 'date'}} )
mssql.read(table_name)
# -
# math logic also could use with num too
df1, new_df, table_name, sp = setup_sample(mssql)
# +
mssql.dump_replace(new_df, table_name, list_key = ['col1','date'],
math_logic = {'col1' : {'logic' : '>=', 'value' : new_df['col1'].min(), 'type' : 'int'} ,
'date' : {'logic' : '>=', 'value' : new_df['date'].min().date(), 'type' : 'date'}} )
mssql.read(table_name)
# -
# You could also combine math_logic and non logic key together
df1, new_df, table_name, sp = setup_sample(mssql)
# +
mssql.dump_replace(new_df, table_name, list_key = ['col1','col2'],
math_logic = {'col1' : {'logic' : '>', 'value' : new_df['col1'].min(), 'type' : 'int'} } )
mssql.read(table_name)
# -
# Since there're many things going inside this function
#
# This Function also have boolean paramter "debug" to print what's going on inside
# +
mssql.dump_replace(new_df, table_name, list_key = ['col1','date'],
math_logic = {'col1' : {'logic' : '>=', 'value' : new_df['col1'].min(), 'type' : 'int'} ,
'date' : {'logic' : '>=', 'value' : new_df['date'].min().date(), 'type' : 'date'}}
,debug = True)
mssql.read(table_name)
# -
# To be honest, I'd recommend you to use a text and good defined key.
#
# ***
#
# ### 4th Method : dump_new
#
# dump only non existing key into the table
# +
df1, new_df, table_name, sp = setup_sample(mssql)
mssql.read(table_name)
# +
mssql.dump_new(new_df, table_name, list_key = ['col1'])
mssql.read(table_name)
# -
# Just like dump_replace, you colud also use multiple key at once.
df1, new_df, table_name, sp = setup_sample(mssql)
# +
mssql.dump_new(new_df, table_name, list_key = ['col1','col2'])
mssql.read(table_name)
# -
# # MySQL
set_mysql = setting['MYSQL']
print(set_mysql['type'])
# ### Create connection to server
mysql = lazy_SQL(sql_type = 'MYSQL', #set_mysql['type'],
host_name = '{YOUR HOST NAME or ID Address}', #set_mysql['host'],
database_name = '{Your Database Name}', #set_mysql['database'],
user = '{Your Username}', #set_mysql['user'],
password = '{<PASSWORD>}')#set_mysql['password'])
# Same as SQL Server, For MySQL the <b>sql_type must == "MYSQL" (ignore case sensitive)</b>
# <br><br>
# by Default, the class will using pymysql as driver and port 3306
# <br><br>
# But you could also change them if you want like below
mysql = lazy_SQL(sql_type = 'MYSQL', #set_mysql['type'],
host_name = '{YOUR HOST NAME or ID Address}', #set_mysql['host'],
database_name = '{Your Database Name}', #set_mysql['database'],
user = '{Your Username}', #set_mysql['user'],
password = '{<PASSWORD>}',#set_mysql['password'],
port = '3306',
driver = 'pymysql')
# <b>Every Method and Function are the same as SQL Server.</b>
# # Google's BigQuery
set_bgq = setting['gbq']
print(set_bgq['type'])
# ### Create connection to server
gbq = lazy_SQL(sql_type = 'bigquery', #set_bgq['type'], #
host_name = '{YOUR Project ID}', #set_bgq['project_id'], #
database_name = '{Your Dataset Name}') #set_bgq['dataset']) #
# For GCP's BigQuery the <b>sql_type must == "BIGQUERY" (ignore case sensitive)</b>
# <br><br>
# Follow by Project ID and Dataset's name
# <br><br>
# Parameter's Names are not related because I never thought this function will be used with GCP's BigQuery in the place.
# <br><br>
# If you have credential json file, you could add files' path into the "credentials" parameter
gbq = lazy_SQL(sql_type = 'bigquery', #set_bgq['type'],
host_name = '{YOUR Project ID}', #set_bgq['project_id'], #
database_name = '{Your Dataset Name}', #set_bgq['dataset'], #
credentials_path = '{Path to Credentials file (JSON)}') #set_bgq['credentials_path']) #
# <b>Every Method and Function are the same as SQL Server.</b>
#
# Because BigQuery not automated data type for us like other SQL<br>
# SUGGEST TO **USE KEY COLUMNS TYPE AS GOOD FORMATTING TEXT BEFORE DUMP**
# # PostGreSQL
set_plsql = setting['POSTGRESQL']
print(set_plsql['type'])
# ### Create connection to server
plsql = lazy_SQL(sql_type = 'POSTGRESQL', #set_plsql['type'],
host_name = '{YOUR HOST NAME or ID Address}', #set_plsql['host'],
database_name = '{Your Database Name}', #set_plsql['database'],
user = '{Your Username}', #set_plsql['user'],
password = '{<PASSWORD>}')#set_plsql['password'])
# Same as SQL Server, For PostGreSQL the <b>sql_type must == "POSTGRESQL" (ignore case sensitive)</b>
# <br><br>
# by Default, the class will using pymysql as driver and port 5432
# <br><br>
# But you could also change them if you want like below
plsql = lazy_SQL(sql_type = 'POSTGRESQL', #set_plsql['type'],
host_name = '{YOUR HOST NAME or ID Address}', #set_plsql['host'],
database_name = '{Your Database Name}', #set_plsql['database'],
user = '{Your Username}', #set_plsql['user'],
password = '{<PASSWORD>}', #set_plsql['password'])
port = '5432',
driver = 'psycopg2')
# <b>Every Method and Function are the same as SQL Server.<br>
# Except PostGreSQL still can't use Store Procedure or Function yet</b>
# # SQLite
sqlite = lazy_SQL(sql_type = 'sqlite',
host_name = '{path to database file}', #'test.db',
database_name = '',
user = '',
password = '' )
# Same as SQL Server, For SQLite the sql_type must == "SQLITE" (ignore case sensitive)
#
# by Default, the class will using sqlite as driver
# - host_name for SQLite is the path to your .db file
# - database_name, user and password will not be read if you didn't address special driver
# - chunksize for SQLite will reduce to 50 because of SQLite's limit
#
# Every Method and Function are the same as SQL Server.<br>
# **Don't forget that SQLite don't have Store Procedure or Datetime Type**
# # OracleSQL
# +
# Coming Soon...
| samples/database.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# import dependencies
from splinter import Browser
from bs4 import BeautifulSoup
import pandas as pd
import urllib.request
from PIL import Image
import json
# Setup splinter
executable_path = {'executable_path': '/Users/kristenhanold/.wdm/drivers/chromedriver/mac64/96.0.4664.45/chromedriver'}
browser = Browser('chrome', **executable_path, headless=False)
# # NASA - Scraping Most Recent Mars News
# create a connection to NASA site
url1 = 'https://redplanetscience.com/'
browser.visit(url1)
# Create a Beautiful Soup and HTML object
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
# Scrape the Mars News Site and collect the latest News Title
# Assign the text to variables that you can reference later
nasa_title = soup.find('div', class_ = 'content_title').text
print(nasa_title)
# Scrape the Mars News Site and collect the Paragraph Text
# Assign the text to variables that you can reference later
nasa_paragraph = soup.find('div', class_ = 'article_teaser_body').text
print(nasa_paragraph)
# # JPL Mars Space Images - Scraping Featured Image
# create a connection to NASA image site
url2 = 'https://spaceimages-mars.com'
browser.visit(url2)
# Create a Beautiful Soup and HTML object
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
# +
# Use splinter to navigate the site and find the image url for the current Featured Mars Image
# Find the image url to the full size `.jpg` image.
nasa_img = soup.find('img', class_ = 'headerimage fade-in')['src']
nasa_img
# -
# Assign the url string to a variable called `featured_image_url`.
featured_image_url = f"https://spaceimages-mars.com/{nasa_img}"
print(featured_image_url)
# +
# display above image
urllib.request.urlretrieve(featured_image_url, 'nasa.jpg')
img = Image.open('nasa.jpg')
img.show()
# -
# # Mars Facts - Scraping with Pandas
# use Pandas to scrape the table containing facts about the planet including Diameter, Mass, etc.
url3 = 'https://galaxyfacts-mars.com'
tables = pd.read_html(url3)
tables
# extract only the second table containing 'MARS PLANET PROFILE'
tables_df = tables[1]
tables_df
# adding column names to new dataframe containing mars info
mars_df = pd.DataFrame({
'Mars Profile': tables_df[0],
'Measurements': tables_df[1]
})
mars_df
# convert the data to a HTML table string and removing unwanted newlines
html_table = mars_df.to_html().replace('\n', '')
html_table
# # Mars Hemispheres
# create a connection to GUSS astropedia site
url4 = 'https://marshemispheres.com/'
browser.visit(url4)
# Create a Beautiful Soup and HTML object
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
# return an iterable list of all the hemisphere links
results = soup.find_all('div', class_ = 'item')
# +
# create an empty list for image urls
hemisphere_image_urls = []
for result in results:
hemispheres = {}
hemispheres['title'] = result.find('h3').text
hemispheres['img_url'] = result.img['src']
# Append Hemisphere Object to List
hemisphere_image_urls.append(hemispheres)
# -
# display hemisphere_image_urls list using json
print(json.dumps(hemisphere_image_urls, sort_keys=False, indent=4))
| Missions_to_Mars/mission_to_mars.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Modeling the Impact of Lateral Flow Parameterizations on Total Evapotranspiration in the Reynolds Mountain East catchment using pySUMMA
# ## 1. Introduction
# One part of the Clark et al. (2015) study explored the impact of the lateral flux of liquid water on total evapotranspiration (ET) using a SUMMA model for the Reynolds Mountain East catchment. This study looked at the sensitivity of the different model representation of the lateral flux of liquid water, which determines the availability of soil water.
#
# In this Jupyter Notebook, the pySUMMA library is used to reproduce this analysis. First, the latertal flux from the soil profile are described. Next, the Methods section describes how the pySUMMA can be used to create three different lateral model representation of the Reynolds Mountain East catchment model, 1d Richards', lumped topmodel, and distributed topmodel. The Results section shows how to use pySUMMA and the Pandas library to reproduce Figure 8(right) from Clark et al. (2015).
#
# Collectively, this Jupyter Notebook serves as an example of how hydrologic modeling can be conducted directly within a Jupyter Notebook by leveraging the pySUMMA library.
#
# | Method | 1dRichards' | Lumped Topmodel | Distributed Topmodel |
# |---------------------------------------------|-------------|-------------------|------------------------|
# | groundwater parameterization | noXplict | qTopmodl | qTopmodl |
# | hydraulic conductivity profile | constant | pow_prof | pow_prof |
# |lower boundary condition for soil hydrology | drainage | zeroFlux | zeroFlux |
# |thermal conductivity representation for soil | mixConstit | funcSoilWet | funcSoilWet |
# ## 2. Background
# ### The Transpiration from soil layers available in SUMMA
#import libraries to display equations within the notebook
from IPython.display import display, Math, Latex
# ### Latertal flux from the soil profile
# The soil columns can be hydrologically connected, such that the lateral flux from upslope soil columns is the inflow to downslope soil columns, or hydrologically-disconnected (using one or many soil columns), in which case the lateral flux of water from soil columns is assumed to flow directly into the river network.
# The continuity equation for sub-surface storage (i.e., below the water table) can be written for a given model element as [Wigmosta et al., 1994]
#
# \begin{equation*}
# Q_{dr} = \frac{dz_{wt}}{dt} = \frac{Q_{out}-Q_{in}}{A} - q_{rchg}
# \end{equation*}
#
# $Q_{dr} = (\theta_{sat}^{soil} - \theta_{fc}^{soil}) $ : โdrainableโ porosity, $\theta_{fc}^{soil}$ : the field capacity of soil, $z_{wt}$ $(m)$ : the depth to the water table
#
# $Q_{out}$ and $Q_{in}$ $(m^{3}/s)$: the lateral inflow and outflow, $q_{rchg}$ $(m/s)$ : the vertical recharge rate, $A$ $(m^2)$ : the element area
# #### Storage-based implementation to represent lateral flow between soil columns
# The โdrainableโ water storage and the maximum drainable water storage can be given as
# \begin{equation*}
# W_{dr}^{soil} = \int_{z_{crit}}^{z_{soil}}\ [\theta_{liq}^{soil} (z) - \theta_{fc}^{soil} ] \mathrm{d}z, \ W_{dr,max}^{soil} = \phi_{dr}z_{soil}
# \end{equation*}
#
# $\theta_{liq}^{soil} (z)$ : the volumetric liquid water content at soil depth z, $z_{crit}$ : the lowest point in the soil profile where $\theta_{liq}^{soil}$ < $\theta_{fc}^{soil}$
# #### The total lateral outflow
# \begin{equation*}
# Q_{out} = x_{len}tan(\beta) \frac{K_{sat}^{0} W_{dr,max}^{soil}}{\phi_{dr}n_{sf}}[\frac{W_{dr}^{soil}}{W_{dr,max}^{soil}}]^{n_{sf}}
# \end{equation*}
#
# $\beta$ : the gradient in the land surface, used to approximate the water table gradient
# #### The total lateral flux
# \begin{equation*}
# q_{base}^{soil} = \frac{Q_{out}-Q_{in}}{A}
# \end{equation*}
#
# The total lateral flux $q_{base}^{soil}$ can then be apportioned to individual soil layers, obtained after spatial discretization described in Clark et al. [2015b], to provide the lateral flow sink term
# \begin{equation*}
# (S_{lf})_{j} = (w_{tv})_{j} q_{base}^{soil}
# \end{equation*}
#
# $(w_{tv})_{j}$ : the ratio of the transmissivity of the $j$-th layer to the total transmissivity
# The above descriptions are taken from the lateral flux from the soil profile section(3.2.3.5) within the manual Structure for Unifying Multiple Modeling Alternatives (SUMMA), Version 1.0: Technical Description (April, 2015).
# ## 3. Methods
# ### 1) Study Area
# #### The Reynolds Mountain East catchment is located in southwestern Idaho as shown in the figure below.
from ipyleaflet import Map, GeoJSON
import json
m = Map(center=[43.06745, -116.75489], zoom=15)
with open('reynolds_geojson_latlon.geojson') as f:
data = json.load(f)
g = GeoJSON(data=data)
m.add_layer(g)
m
# ### 3) Create pySUMMA Simulation Object of 1d Richards method and Run SUMMA Model
from pysumma.Simulation import Simulation
from pysumma.Plotting import Plotting
# create a pySUMMA simulation object using the SUMMA 'file manager' input file
S_1dRichards = Simulation('/glade/u/home/ydchoi/summaTestCases_2.x/settings/wrrPaperTestCases/figure09/summa_fileManager_1dRichards.txt')
# set SUMMA executable file
S_1dRichards.executable = "/glade/u/home/ydchoi/summa/bin/summa.exe"
# check the simulation start and finish times
S_1dRichards.decision_obj.simulStart.value, S_1dRichards.decision_obj.simulFinsh.value
# check option and selected method of (11) choice of groundwater parameterization in Decision file
S_1dRichards.decision_obj.groundwatr.options, S_1dRichards.decision_obj.groundwatr.value
# check option and selected method of (12) choice of hydraulic conductivity profile in Decision file
S_1dRichards.decision_obj.hc_profile.options, S_1dRichards.decision_obj.hc_profile.value
# check option and selected method of (16) type of lower boundary condition for soil hydrology in Decision file
S_1dRichards.decision_obj.bcLowrSoiH.options, S_1dRichards.decision_obj.bcLowrSoiH.value
# check option and selected method of (27) choice of thermal conductivity representation for soil in Decision file
S_1dRichards.decision_obj.thCondSoil.options, S_1dRichards.decision_obj.thCondSoil.value
# check Basin variable meta data in file manager file
S_1dRichards.meta_basinvar.filename
# check Basin Parameter info data in file manager file
S_1dRichards.basin_par.filename
# check Forcing list data in file manager file
S_1dRichards.forcing_list.filename
# check Initial condition data in file manager file
S_1dRichards.initial_cond.filename
# #### If you have output file, you don't need to run SUMMA. Move next
# run the model giving the output the suffix "1dRichards_local" and get "results_1dRichards" object
results_1dRichards, output_R = S_1dRichards.execute(run_suffix="1dRichards_local", run_option = 'local')
R = Plotting(output_R)
results_1dRichards = R.open_netcdf()
# ### 4) Create pySUMMA Simulation Object of Lumped Topmodel method and Run SUMMA Model
# create a pySUMMA simulation object using the SUMMA 'file manager' input file
S_lumpedTopmodel = Simulation('/glade/u/home/ydchoi/summaTestCases_2.x/settings/wrrPaperTestCases/figure09/summa_fileManager_lumpedTopmodel.txt')
# set SUMMA executable file
S_lumpedTopmodel.executable = "/glade/u/home/ydchoi/summa/bin/summa.exe"
# check the simulation start and finish times
S_lumpedTopmodel.decision_obj.simulStart.value, S_lumpedTopmodel.decision_obj.simulFinsh.value
# check option and selected method of (11) choice of groundwater parameterization in Decision file
S_lumpedTopmodel.decision_obj.groundwatr.options, S_lumpedTopmodel.decision_obj.groundwatr.value
# check option and selected method of (12) choice of hydraulic conductivity profile in Decision file
S_lumpedTopmodel.decision_obj.hc_profile.options, S_lumpedTopmodel.decision_obj.hc_profile.value
# check option and selected method of (16) type of lower boundary condition for soil hydrology in Decision file
S_lumpedTopmodel.decision_obj.bcLowrSoiH.options, S_lumpedTopmodel.decision_obj.bcLowrSoiH.value
# check option and selected method of (27) choice of thermal conductivity representation for soil in Decision file
S_lumpedTopmodel.decision_obj.thCondSoil.options, S_lumpedTopmodel.decision_obj.thCondSoil.value
# check Basin variable meta data in file manager file
S_lumpedTopmodel.meta_basinvar.filename
# check Basin Parameter info data in file manager file
S_lumpedTopmodel.basin_par.filename
# check Forcing list data in file manager file
S_lumpedTopmodel.forcing_list.filename
# check Initial condition data in file manager file
S_lumpedTopmodel.initial_cond.filename
# run the model giving the output the suffix "lumpedTopmodel_docker_develop" and get "results_lumpedTopmodel" object
results_lumpedTopmodel, output_LT = S_lumpedTopmodel.execute(run_suffix="lumpedTopmodel_local", run_option = 'local')
# ### 5) Create pySUMMA Simulation Object of Distributed Topmodel method and Run SUMMA Model
# create a pySUMMA simulation object using the SUMMA 'file manager' input file
S_distributedTopmodel = Simulation('/glade/u/home/ydchoi/summaTestCases_2.x/settings/wrrPaperTestCases/figure09/summa_fileManager_distributedTopmodel.txt')
# set SUMMA executable file
S_distributedTopmodel.executable = "/glade/u/home/ydchoi/summa/bin/summa.exe"
# check the simulation start and finish times
S_distributedTopmodel.decision_obj.simulStart.value, S_distributedTopmodel.decision_obj.simulFinsh.value
# check option and selected method of (11) choice of groundwater parameterization in Decision file
S_distributedTopmodel.decision_obj.groundwatr.options, S_distributedTopmodel.decision_obj.groundwatr.value
# check option and selected method of (12) choice of hydraulic conductivity profile in Decision file
S_distributedTopmodel.decision_obj.hc_profile.options, S_distributedTopmodel.decision_obj.hc_profile.value
# check option and selected method of (16) type of lower boundary condition for soil hydrology in Decision file
S_distributedTopmodel.decision_obj.bcLowrSoiH.options, S_distributedTopmodel.decision_obj.bcLowrSoiH.value
# check option and selected method of (27) choice of thermal conductivity representation for soil in Decision file
S_distributedTopmodel.decision_obj.thCondSoil.options, S_distributedTopmodel.decision_obj.thCondSoil.value
# check Basin variable meta data in file manager file
S_distributedTopmodel.meta_basinvar.filename
# check Basin Parameter info data in file manager file
S_distributedTopmodel.basin_par.filename
# check Forcing list data in file manager file
S_distributedTopmodel.forcing_list.filename
# check Initial condition data in file manager file
S_distributedTopmodel.initial_cond.filename
# run the model giving the output the suffix "distributedTopmodel_docker_develop" and get "results_distributedTopmodel" object
results_distributedTopmodel, output_DT = S_distributedTopmodel.execute(run_suffix="distributedTopmodel_local", run_option = 'local')
# ## 4. Results
# ### Recreate the Figure 8(right) plot from Clark et al., 2015: The total ET Sensitivity for the model representation of the lateral flux of liquid water
from pysumma.Plotting import Plotting
from jupyterthemes import jtplot
import matplotlib.pyplot as plt
import pandas as pd
jtplot.figsize(x=10, y=10)
# #### 4.1) Create function to calculate Total ET of hour of day from SUMMA output for the period 1 June to 20 August 2007
def calc_total_et(et_output_df):
# Total Evapotranspiration = Canopy Transpiration + Canopy Evaporation + Ground Evaporation
# Change unit from kgm-2s-1 to mm/hr (mulpitle 3600)
total_et_data = (et_output_df['scalarCanopyTranspiration'] + et_output_df['scalarCanopyEvaporation'] + et_output_df['scalarGroundEvaporation'])*3600
# create dates(X-axis) attribute from ouput netcdf
dates = total_et_data.coords['time'].data
# create data value(Y-axis) attribute from ouput netcdf
data_values = total_et_data.data
# create two dimensional tabular data structure
total_et_df = pd.DataFrame(data_values, index=dates)
# round time to nearest hour (ex. 2006-10-01T00:59:59.99 -> 2006-10-01T01:00:00)
total_et_df.index = total_et_df.index.round("H")
# set the time period to display plot
total_et_df = total_et_df.loc["2007-06-01":"2007-08-20"]
# resample data by the average value hourly
total_et_df_hourly = total_et_df.resample("H").mean()
# resample data by the average for hour of day
total_et_by_hour = total_et_df_hourly.groupby(total_et_df_hourly.index.hour).mean()
return total_et_by_hour
# #### 4.2) Get hour of day output of the Parameterization of Later Flux of Liquid water for the period 1 June to 20 August 2007
# get hour of day output using calc_total_et method (1d Richards method appied 1 hru)
hour_1dRichards = calc_total_et(results_1dRichards)
# get hour of day output using calc_total_et method (lumped Topmodel method appied 1 hru)
hour_lumpedTopmodel = calc_total_et(results_lumpedTopmodel)
# get hour of day output using calc_total_et method (lumped Topmodel method appied 6 hru)
hour_distributedTopmodel = calc_total_et(results_distributedTopmodel)
# check the area of each hru to calculate areal average ET
trial_parameter_nc = Plotting(S_distributedTopmodel.setting_path.filepath+S_distributedTopmodel.local_attr.value)
trial_parameter = trial_parameter_nc.open_netcdf()
# read the area of each hru
trial_parameter['HRUarea']
# calculate areal average ET for distributed Topmodel
hour_distributedTopmodel_average = (hour_distributedTopmodel[0]*78300 + hour_distributedTopmodel[1]*32700 + hour_distributedTopmodel[2]*18600 + hour_distributedTopmodel[3]*32800 + hour_distributedTopmodel[4]*168200 + hour_distributedTopmodel[5]*45400)/(78300+32700+18600+32800+168200+45400)
# #### 4.3) Combine the Parameterization of the Lateral Flux of Liquid Water into a single Pandas Dataframe
# Combine ET for model representation of the lateral flux of liquid water
ET_Combine = pd.concat([hour_1dRichards, hour_lumpedTopmodel, hour_distributedTopmodel_average], axis=1)
# add label
ET_Combine.columns = ["Baseflow = 1D Richards'", 'Baseflow = Topmodel(lumped)', 'Baseflow = Topmodel(distributed)']
ET_Combine
# #### 4.4) Add obervation data in Aspen station in Reynolds Mountain East to the plot
# create pySUMMA Plotting Object
Val_eddyFlux = Plotting('/glade/u/home/ydchoi/summaTestCases_2.x/testCases_data/validationData/ReynoldsCreek_eddyFlux.nc')
# read Total Evapotranspiration(LE-wpl) from validation netcdf file
Obs_Evapotranspitaton = Val_eddyFlux.ds['LE-wpl']
# create dates(X-axis) attribute from validation netcdf file
dates = Obs_Evapotranspitaton.coords['time'].data
# Change unit from Wm-2 to mm/hr (1 Wm-2 = 0.0864 MJm-2day-1, 1 MJm-2day-1 = 0.408 mmday-1, 1day = 24h)
data_values = Obs_Evapotranspitaton.data*0.0864*0.408/24
# create two dimensional tabular data structure
df = pd.DataFrame(data_values, index=dates)
# set the time period to display plot
df_filt = df.loc["2007-06-01":"2007-08-20"]
# select aspen obervation station among three different stations
df_filt.columns = ['-','Observation (aspen)','-']
# resample data by the average for hour of day
df_gp_hr = df_filt.groupby([df_filt.index.hour, df_filt.index.minute]).mean()
# reset index so each row has an hour an minute column
df_gp_hr.reset_index(inplace=True)
# add hour and minute columns for plotting
xvals = df_gp_hr.reset_index()['level_0'] + df_gp_hr.reset_index()['level_1']/60.
# #### 4.5) Plotting output of the Parameterization of the Lateral Flux of Liquid Water and observation data
# create plot with the Parameterization of model representation of the lateral flux of liquid water
ET_Combine_Graph = ET_Combine.plot()
# invert y axis
ET_Combine_Graph.invert_yaxis()
# plot scatter with x='xvals', y='Observation (aspen)'
ET_Combine_Graph.scatter(xvals, df_gp_hr['Observation (aspen)'])
# add x, y label
ET_Combine_Graph.set(xlabel='Time of day (hr)', ylabel='Total evapotranspiration (mm h-1) ')
# show up the legend
ET_Combine_Graph.legend()
# ## 5. Discussion
# As stated in Clark et al., 2015, the following insights can be gained from this analysis:
#
# * The simulation in Figure 8 illustrates the model representation of the lateral flux of liquid water, which determines (in part) the availability of soil water.
#
# * The results in Figure 8 demonstrate strong sensitivities the lateral flow parameterization. The parameterizations based on power-law transmissivity profiles (both lumped and distributed) have more drainage of soil water at deeper soil layers; however, the distributed simulations include inflow from upslope, resulting in more plant-available soil water and an increase in transpiration. Taken together, the results in Figure 8 illustrate the strong interdependencies among different modeling decisions, which of course complicate discriminating among competing process parameterizations.
| sopron_2018_notebooks/pySUMMA_Demo_Example_Fig8_right_Using_TestCase_in_Local.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # WeatherPy
# ----
#
# #### Note
# * Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
# +
import pandas as pd
import numpy as np
import requests
import matplotlib.pyplot as plt
import time
from scipy.stats import linregress
# citypy allows us to determine cities based on latitude and longitude
from citipy import citipy
# Import wether api zccess key
from api_keys import weather_api_key
# Create variable to access output file
cities_output = "../output_data/cities.csv"
# Define range of latitudes and longitudes
lat_rng = (-90,90)
lng_rng = (-180, 180)
# -
# ## Generate Cities List
# +
# Create empty lists to store coordinates and city names
coordinates = []
cities = []
# Create variable to store random set of latitude and longitude values
lat = np.random.uniform(lat_rng[0], lat_rng[1], size = 1500)
lng = np.random.uniform(lng_rng[0], lng_rng[1], size = 1500)
# Store both sets together
coordinates = zip(lat,lng)
# We create a loop that uses citipy to identify the nearest city to each set of coordinates
for lat_lng in coordinates:
# Store the city name in a variable using citipy
city = citipy.nearest_city(lat_lng[0], lat_lng[1]).city_name
# Append the city name to our cities liest
if city not in cities:
cities.append(city)
# Print the count of cities
len(cities)
# -
# ### Perform API Calls
# * Perform a weather check on each city using a series of successive API calls.
# * Include a print log of each city as it'sbeing processed (with the city number and city name).
#
# +
# Createa variable for the API URL
url = f"http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID={weather_api_key}"
# Create a list to store the data of each city
city_data = []
# Create counters
record_count = 1
for city in cities:
query_url =f"{url}&q={city}"
# Try and except function help us treat missing data
try:
#Request data and parse it in json
response = requests.get(query_url).json()
# parse the data into each category
country = response["sys"]["country"]
city_name = response["name"]
date = response["dt"]
cloudiness = response["clouds"]["all"]
wind_speed = response["wind"]["speed"]
humidity = response["main"]["humidity"]
max_temp = response["main"]["temp_max"]
latitude = response["coord"]["lat"]
longitude = response["coord"]["lon"]
record_count +=1
# Append data to our data list
city_data.append({"City": city_name,
"Lat": latitude,
"Lng": longitude,
"Max Temp": max_temp,
"Humidity": humidity,
"Cloudiness": cloudiness,
"Wind Speed": wind_speed,
"Country": country,
"Date": date})
print(f"Processing Record{record_count} | city: {city}")
except Exception:
print('City not found! skipping...')
print(" --- Data Collection Completed --- ")
# -
#Transform our city_data list into a pandas data frame
city_df = pd.DataFrame(city_data)
city_df.count()
# ### Convert Raw Data to DataFrame
# * Export the city data into a .csv.
# * Display the DataFrame
city_df.to_csv('cities.csv')
city_df.head()
# ## Inspect the data and remove the cities where the humidity > 100%.
# ----
# Skip this step if there are no cities that have humidity > 100%.
# Get the indices of cities that have humidity over 100%.
# Make a new DataFrame equal to the city data to drop all humidity outliers by index.
# Passing "inplace=False" will make a copy of the city_data DataFrame, which we call "clean_city_data".
# ## Plotting the Data
# * Use proper labeling of the plots using plot titles (including date of analysis) and axes labels.
# * Save the plotted figures as .pngs.
# ## Latitude vs. Temperature Plot
# ## Latitude vs. Humidity Plot
# ## Latitude vs. Cloudiness Plot
# ## Latitude vs. Wind Speed Plot
# ## Linear Regression
# #### Northern Hemisphere - Max Temp vs. Latitude Linear Regression
# #### Southern Hemisphere - Max Temp vs. Latitude Linear Regression
# #### Northern Hemisphere - Humidity (%) vs. Latitude Linear Regression
# #### Southern Hemisphere - Humidity (%) vs. Latitude Linear Regression
# #### Northern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression
# #### Southern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression
# #### Northern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression
# #### Southern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression
| starter_code/.ipynb_checkpoints/WeatherPy-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
plt.style.use(['science', 'notebook'])
import sympy as smp
from skimage import color
from skimage import io
from scipy.fft import fftfreq
from scipy.fft import fft, ifft, fft2, ifft2
# # Different Types of Fourier Transforms
# ## 1. Fourier Transform (Continuous time and frequency)
#
# This occurs when the functional form of your time series is known analytically (i.e. you have a formula $x(t)=...$ for it) and goes from $-\infty$ to $\infty$
#
# $$\hat{x}(f) = \int_{-\infty}^{\infty} x(t) e^{-2 \pi i f t} dt $$
# **Solving Analytically (If Possible)**: Be careful giving proper information about your variables when you define them for sympy to work properly!
t, f = smp.symbols('t, f', real=True)
t, f = smp.symbols('t, f', real=True)
k = smp.symbols('k', real=True, positive=True)
x = smp.exp(-k * t**2) * k * t
x
from sympy.integrals.transforms import fourier_transform
x_FT = fourier_transform(x, t, f)
x_FT
# **Solving Numerically**: Sometimes sympy can't evaluate integrals analytically, in which case you'll need to use scipy
# +
# Won't run
#x = smp.exp(-k * t**2) * smp.sin(k*t) * t**4
#fourier_transform(x, t, f)
# -
from scipy.integrate import quad
# Define function we want to take Fourier transform of and function to compute Fourier transform
# +
def x(t, k):
return np.exp(-k * t**2) * np.sin(k*t) * t**4
def get_x_FT(x, f, k):
x_FT_integrand_real = lambda t: np.real(x(t, k)*np.exp(-2*np.pi*1j*f*t))
x_FT_integrand_comp = lambda t: np.imag(x(t, k)*np.exp(-2*np.pi*1j*f*t))
x_FT_real = quad(x_FT_integrand_real, -np.inf, np.inf)[0]
x_FT_comp = quad(x_FT_integrand_comp, -np.inf, np.inf)[0]
return x_FT_real + 1j*x_FT_comp
# -
# Get frequencies and fourier transform values
f = np.linspace(-4, 4, 100)
x_FT = np.vectorize(get_x_FT)(x, f, k=2)
# Plot
plt.plot(f, np.abs(x_FT))
plt.ylabel('$|\hat{x}(f)|$', fontsize=20)
plt.xlabel('$f$', fontsize=20)
# ## 2. Fourier Series (Continuous Time, Discrete Frequency)
# This occurs when the function $x(t)$ is bounded between times $0$ and $T$ (non-infinite)
#
# $$\hat{x}(f_n) = \frac{1}{T} \int_{0}^{T} x(t) e^{-2 \pi i f_n t} dt $$
#
# where $f_n = n/T$.
# Consider now only between t=0 to t=1
t = smp.symbols('t', real=True)
k, n, T = smp.symbols('k, n, T', real=True, positive=True)
fn = n/T
x = smp.exp(-k * t)
x
# Compute the Fourier transform analytically:
x_FT = smp.integrate(1/T * x*smp.exp(-2*smp.pi*smp.I*fn*t), (t, 0, T)).simplify()
x_FT
smp.Abs(x_FT).simplify()
# Convert to a numerical function so the values can be extracted numerically and plotted:
get_FT = smp.lambdify([k, T, n], x_FT)
ns = np.arange(0, 20, 1)
xFT = get_FT(k=1, T=4, n=ns)
# Plot:
plt.figure(figsize=(10,3))
plt.bar(ns, np.abs(xFT))
plt.xticks(ns)
plt.ylabel('$|\hat{x}_n|$', fontsize=25)
plt.xlabel('$n$', fontsize=25)
plt.show()
# If it can't be done analytically, need to use scipy like before. Consider
#
# $$x(t) = e^{-k t^2} \sin(kt) / t \hspace{10mm} k=2, T=4$$
# +
def x(t, k):
return np.exp(-k * t**2) * np.sin(k*t) / t
def get_x_FT(x, n, k, T):
x_FT_integrand_real = lambda t: np.real(x(t, k)*np.exp(-2*np.pi*1j*(n/T)*t))
x_FT_integrand_comp = lambda t: np.imag(x(t, k)*np.exp(-2*np.pi*1j*(n/T)*t))
x_FT_real = quad(x_FT_integrand_real, 0, T)[0]
x_FT_comp = quad(x_FT_integrand_comp, 0, T)[0]
return x_FT_real + 1j*x_FT_comp
# -
# Compute values of $n$ in $f_n=n/T$ and then $\hat{x}_n$ itself using the function above:
ns = np.arange(0, 20, 1)
xFT = np.vectorize(get_x_FT)(x, ns, k=2, T=4)
# Plot
plt.figure(figsize=(10,3))
plt.bar(ns, np.abs(xFT))
plt.xticks(ns)
plt.ylabel('$|\hat{x}_n|$', fontsize=25)
plt.xlabel('$n$', fontsize=25)
plt.show()
# ## 3. Discrete Fourier Transform (Discrete Time, Discrete Frequency)
#
# Here we consider a discrete time series $x_t$ that's measured for a finite amount of time ($N$ measurements over a time $T$ implies $N\Delta t = T$). The Fourier transform here is **defined** as
#
# $$\hat{x}(f_n) = \sum_{k=0}^{N-1} x_t e^{-2 \pi i f_n (k \Delta t)} \hspace{10mm} f_n=\frac{n}{N\Delta t}$$
#
# where $f_n$ are the so-called Fourier frequencies. The notation can be simplfied as
#
# $$\hat{x}_n = \sum_{k=0}^{N-1} x_t e^{-2 \pi i kn/N}$$
#
#
# Note we get $\hat{x}_n = \hat{x}_{n \pm N} = \hat{x}_{n \pm 2N} = ...$ with this definition. With this we can restrict ourselves from $n=0$ to $n=N-1$ and not lose any information OR we can also restrict ourselves to
#
# * In the case that $N$ is even, $n=-N/2$ to $n=N/2-1$
# * In the case that $N$ is odd, $n=-(N-1)/2$ to $(N-1)/2$
#
# This is precisely what scipy does, returning an array $\hat{x}_n$ corresponding to the frequencies
#
# `f = [0, 1, ..., N/2-1, -N/2, ..., -1] / (dt*N) if N is even`
#
# `f = [0, 1, ..., (N-1)/2, -(N-1)/2, ..., -1] / (dt*N) if N is odd`
#
# Why does it do this? Well typically one deals with real time series $x_t$, and there's a handy identity
#
# $$\hat{x}_n = \hat{x}_{-n}^*$$
#
# so one only needs to look at the first half of the frequencies to know everything about the Fourier transform $\hat{x}_n$.
#
#
T = 40 #seconds
N = 100 #measurements
t = np.linspace(0, T, N)
dt = np.diff(t)[0]
# Look at a couple particular frequencies
f1 = 20/(N*dt)
f2 = 10/(N*dt)
f3 = (10+5*N)/(N*dt)
# Get a few time series:
x1 = np.sin(2*np.pi*f1*t) + 0.3*np.sin(2*np.pi*f2*t) + 0.3*np.random.randn(len(t))
x2 = np.sin(2*np.pi*f2*t)+ 0.1*np.random.randn(len(t))
x3 = np.sin(2*np.pi*f3*t)+ 0.1*np.random.randn(len(t))
plt.plot(t, x1)
plt.xlabel('$t$ [seconds]', fontsize=20)
plt.ylabel('Signal [arb]')
plt.show()
f = fftfreq(len(t), np.diff(t)[0])
x1_FFT = fft(x1)
# Plot the first half of the spectrum (for $x(t)$ real, all information is contained in the first half)
plt.plot(f[:N//2], np.abs(x1_FFT[:N//2]))
plt.xlabel('$f_n$ [$s^{-1}$]', fontsize=20)
plt.ylabel('|$\hat{x}_n$|', fontsize=20)
plt.show()
# Demonstrate that $\hat{x}_n = \hat{x}_{n+5N}$ here:
print(f2)
print(f3)
plt.plot(t,x2)
plt.plot(t,x3)
plt.xlabel('$t$ [seconds]', fontsize=20)
plt.ylabel('Signal [arb]')
plt.show()
x2_FFT = fft(x2)
x3_FFT = fft(x3)
plt.plot(f[:N//2], np.abs(x2_FFT[:N//2]), label='$x_2$')
plt.plot(f[:N//2], np.abs(x3_FFT[:N//2]), 'r--', label='$x_3$')
plt.axvline(1/(2*dt), ls='--', color='k')
plt.xlabel('$f_n$ [$s^{-1}$]', fontsize=20)
plt.ylabel('|$\hat{x}_n$|', fontsize=20)
plt.show()
# A little bit of 2D Fourier transform stuff:
img = color.rgb2gray(io.imread('images/flower.PNG'))
img
plt.imshow(img, cmap='gray')
img_FT = fft2(img)
fy = np.fft.fftfreq(img.shape[0],d=10) #suppose the spacing between pixels is 10mm, for example
fx = np.fft.fftfreq(img.shape[1],d=10)
print('{:.2f} correponds to fx={:.6f} and fy={:.6f}'.format(img_FT[10,20], fx[20], fy[10]))
# Analogous to 1D, the zero frequency terms correspond to low-order corners of the array, the positive frequency terms in the first half, the nyquist frequency in the middle, and the negative frequencies in the second half.
#
# * If $M(x,y)$ (the image) contains real values then $\hat{M}(f_x, f_y)$ is symmetric WRT to the middle of each axis.
plt.imshow(np.abs(img_FT), cmap='gray', vmax=50)
plt.colorbar()
# Remove low frequencies
img_FT_alt = np.copy(img_FT)
img_FT_alt[-2:] = 0
img_FT_alt[:,-2:] = 0
img_FT_alt[:2] = 0
img_FT_alt[:,:2] = 0
img_alt = np.abs(ifft2(img_FT_alt))
plt.imshow(img_alt, cmap='gray')
plt.colorbar()
# For more advanced image processing see https://scikit-image.org/
| fourier_transform1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="y4vrggqAK5VV" colab_type="code" colab={}
import tensorflow as tf
AUTOTUNE = tf.data.experimental.AUTOTUNE
import IPython.display as display
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import os
import pathlib
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D,Dropout
# + id="jNMQ2cQfLXPu" colab_type="code" colab={}
import zipfile
with zipfile.ZipFile('/content/cropped.zip', 'r') as zip_ref:
zip_ref.extractall()
# + id="iD2xzfI1L5SM" colab_type="code" colab={}
data_dir = pathlib.Path('/content/cropped')
# + id="zJZCdNbmL-tw" colab_type="code" outputId="d7f73de3-658b-46ef-b811-e241742bb35f" colab={"base_uri": "https://localhost:8080/", "height": 34}
image_count = len(list(data_dir.glob('*/*.jpeg')))
image_count
# + id="jOa5YFveMFwP" colab_type="code" outputId="f8cad9a8-3685-4c76-a67f-8ef2a714c30c" colab={"base_uri": "https://localhost:8080/", "height": 34}
CLASS_NAMES = np.array([item.name for item in data_dir.glob('*') if item.name != "LICENSE.txt"])
CLASS_NAMES
# + id="hisI9BWbMKsn" colab_type="code" colab={}
# The 1./255 is to convert from uint8 to float32 in range [0,1].
image_generator = tf.keras.preprocessing.image.ImageDataGenerator( featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True,rescale=1./255)
# + id="AGU35qQMMQJT" colab_type="code" colab={}
BATCH_SIZE = 32
IMG_HEIGHT = 32
IMG_WIDTH = 32
compression = 0.5
STEPS_PER_EPOCH = np.ceil(image_count/BATCH_SIZE)
# + id="uDqPxdsRMSsr" colab_type="code" outputId="c16c3a6b-3f4f-4594-b7af-a74bb894fc4f" colab={"base_uri": "https://localhost:8080/", "height": 34}
train_data_gen = image_generator.flow_from_directory(directory=str(data_dir),
batch_size=BATCH_SIZE,
shuffle=True,
target_size=(IMG_HEIGHT, IMG_WIDTH),
classes = list(CLASS_NAMES))
# + id="6DSgI_JqVZuk" colab_type="code" outputId="9c764a0a-363d-4fa0-fb59-daee7afe0c6a" colab={"base_uri": "https://localhost:8080/", "height": 105}
x,y = next(train_data_gen)
# + id="oI1raUALMjbm" colab_type="code" colab={}
from tensorflow.keras import models, layers
from tensorflow.keras.models import Model
from tensorflow.keras.layers import BatchNormalization, Activation, Flatten
from tensorflow.keras.optimizers import Adam
# Dense Block
def denseblock(input, num_filter = 12, dropout_rate = 0.2):
global compression
temp = input
for _ in range(l):
BatchNorm = layers.BatchNormalization()(temp)
relu = layers.Activation('relu')(BatchNorm)
Conv2D_3_3 = layers.Conv2D(int(num_filter*compression), (3,3), use_bias=False ,padding='same')(relu)
if dropout_rate>0:
Conv2D_3_3 = layers.Dropout(dropout_rate)(Conv2D_3_3)
concat = layers.Concatenate(axis=-1)([temp,Conv2D_3_3])
temp = concat
return temp
## transition Block
def transition(input, num_filter = 12, dropout_rate = 0.2):
global compression
BatchNorm = layers.BatchNormalization()(input)
relu = layers.Activation('relu')(BatchNorm)
Conv2D_BottleNeck = layers.Conv2D(int(num_filter*compression), (1,1), use_bias=False ,padding='same')(relu)
if dropout_rate>0:
Conv2D_BottleNeck = layers.Dropout(dropout_rate)(Conv2D_BottleNeck)
avg = layers.AveragePooling2D(pool_size=(2,2))(Conv2D_BottleNeck)
return avg
#output layer
def output_layer(input):
global compression
BatchNorm = layers.BatchNormalization()(input)
relu = layers.Activation('relu')(BatchNorm)
conv_output = layers.Conv2D(4, (1,1), activation='relu',padding='same',use_bias=False)(relu)
AvgPooling = layers.AveragePooling2D(pool_size=(2,2))(conv_output)
global_pool =layers.GlobalAveragePooling2D()(AvgPooling)
output = layers.Activation('softmax')(global_pool)
return output
# + id="4mfOwVjNNDIh" colab_type="code" colab={}
num_filter = 36
dropout_rate = 0.33
l = 12
input = layers.Input(shape=(IMG_HEIGHT, IMG_WIDTH, 3))
First_Conv2D = layers.Conv2D(num_filter, (3,3), use_bias=False ,padding='same')(input)
First_Block = denseblock(First_Conv2D, num_filter, dropout_rate)
First_Transition = transition(First_Block, num_filter, dropout_rate)
Second_Block = denseblock(First_Transition, num_filter, dropout_rate)
Second_Transition = transition(Second_Block, num_filter, dropout_rate)
Third_Block = denseblock(Second_Transition, num_filter, dropout_rate)
Third_Transition = transition(Third_Block, num_filter, dropout_rate)
Last_Block = denseblock(Third_Transition, num_filter, dropout_rate)
output = output_layer(Last_Block)
# + id="EHAq3aHMNFGO" colab_type="code" outputId="0b4bfc41-a041-4650-fb64-f12970b820b2" colab={"base_uri": "https://localhost:8080/", "height": 1000}
model = Model(inputs=[input], outputs=[output])
model.summary()
# + id="uikseRVqNG1H" colab_type="code" colab={}
# determine Loss function and Optimizer
model.compile(loss='categorical_crossentropy',
optimizer=Adam(),
metrics=['accuracy'])
# + id="pfjrGfuGT-X1" colab_type="code" outputId="03347d65-46da-41da-815f-369ddef2325e" colab={"base_uri": "https://localhost:8080/", "height": 139}
model.fit_generator(train_data_gen,epochs=1)
# + id="tTWqnDDqVy8k" colab_type="code" outputId="2f314c83-f1d9-4884-de13-0f0ce29d1358" colab={"base_uri": "https://localhost:8080/", "height": 105}
model.save('classifier')
# + id="ZUUAXBZ7ZT1h" colab_type="code" outputId="f4e435bc-a3dc-4ddc-83a5-fd72383157ac" colab={"base_uri": "https://localhost:8080/", "height": 105}
x,y = next(train_data_gen)
# + id="ePuCinB7ZYjP" colab_type="code" colab={}
| image_classifier.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [Root]
# language: python
# name: Python [Root]
# ---
# +
import sys
sys.path.append('../../code/')
import os
import json
from datetime import datetime
import time
from math import *
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import scipy.stats as stats
import igraph as ig
from load_data import load_citation_network, case_info
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
data_dir = '../../data/'
court_name = 'all'
# -
# # load into igraph
# +
start = time.time()
if court_name == 'all':
case_metadata = pd.read_csv(data_dir + 'clean/case_metadata_master.csv')
edgelist = pd.read_csv(data_dir + 'clean/edgelist_master.csv')
else:
net_dir = data_dir + 'clean/' + court_name + '/'
if not os.path.exists(net_dir):
os.makedirs(net_dir)
make_court_subnetwork(court_name, data_dir)
case_metadata = pd.read_csv(net_dir + 'case_metadata.csv')
edgelist = pd.read_csv(net_dir + 'edgelist.csv')
edgelist.drop('Unnamed: 0', inplace=True, axis=1)
# create a dictonary that maps court listener ids to igraph ids
cl_to_ig_id = {}
cl_ids = case_metadata['id'].tolist()
for i in range(case_metadata['id'].size):
cl_to_ig_id[cl_ids[i]] = i
# add nodes
V = case_metadata.shape[0]
g = ig.Graph(n=V, directed=True)
# g.vs['date'] = case_metadata['date'].tolist()
g.vs['name'] = case_metadata['id'].tolist()
# create igraph edgelist
cases_w_metadata = set(cl_to_ig_id.keys())
ig_edgelist = []
missing_cases = 0
start = time.time()
for row in edgelist.itertuples():
cl_ing = row[1]
cl_ed = row[2]
if (cl_ing in cases_w_metadata) and (cl_ed in cases_w_metadata):
ing = cl_to_ig_id[cl_ing]
ed = cl_to_ig_id[cl_ed]
else:
missing_cases += 0
ig_edgelist.append((ing, ed))
# add edges to graph
g.add_edges(ig_edgelist)
end = time.time()
print '%d seconds for %d edges' % (end - start, len(g.es))
# -
# add vertex attributes
g.vs['court'] = case_metadata['court'].tolist()
g.vs['year'] = [int(d.split('-')[0]) for d in case_metadata['date'].tolist()]
# +
# g.write_graphml(data_dir + 'clean/entire_law_net.graphml')
# G = ig.read_graphml(data_dir + 'clean/entire_law_net.graphml')
# -
# # analyze
g.summary()
# # in degree distribution
# +
indegrees = g.indegree()
plt.figure(figsize = [20, 10])
plt.subplot(1,2,1)
dmax = 100
binwidth = 1
plt.hist(indegrees, bins=range(0, dmax + binwidth, binwidth));
plt.xlim([0, dmax])
plt.ylim([0, 2e5])
plt.subplot(1,2,2)
plt.loglog(sorted(indegrees, reverse=True), '-', marker='.', color='black',
alpha=.7);
# -
# # Out degree distribution
# +
outdegrees = g.outdegree()
# out degree distribution
plt.figure(figsize = [20, 10])
plt.subplot(1,2,1)
dmax = 50
binwidth = 1
plt.hist(outdegrees, bins=range(0, dmax + binwidth, binwidth));
plt.xlim([0, dmax])
plt.ylim([0, 2e5])
plt.subplot(1,2,2)
plt.loglog(sorted(outdegrees, reverse=True), '-', marker='.', color='black',
alpha=.7);
# -
# # degree statistics by year
year_range = range(1631, 2016 + 1)
year_quotient = pd.DataFrame(index=year_range, columns=['count', 'avg_indegree', 'avg_outdegree'])
# count number of cases
# +
year_counts = {y: 0 for y in year_quotient.index}
for v in g.vs:
year_counts[v['year']] += 1
year_quotient['count'] = year_counts.values()
# -
# get average in/out degrees
# +
indegrees = g.indegree()
outdegrees = g.outdegree()
indegs_counts = {y: [] for y in year_quotient.index}
outdegs_counts = {y: [] for y in year_quotient.index}
# get degrees for cases in each year
for i in range(len(g.vs)):
year = g.vs[i]['year']
indeg = indegrees[i]
outdeg = outdegrees[i]
indegs_counts[year].append(indeg)
outdegs_counts[year].append(outdeg)
# average the degrees by yaer
for y in indegs_counts.keys():
indegs = indegs_counts[y]
outdegs = outdegs_counts[y]
if len(indegs) == 0:
year_quotient.loc[y, 'avg_indegree'] = 0
else:
year_quotient.loc[y,'avg_indegree'] = np.mean(indegs)
if len(outdegs) == 0:
year_quotient.loc[y, 'avg_outdegree'] = 0
else:
year_quotient.loc[y,'avg_outdegree'] = np.mean(outdegs)
# -
year_quotient
# # Plot the time series
plt.figure(figsize=[10, 10])
plt.scatter(year_quotient.index,
year_quotient['count'],
marker='.',
color='black')
plt.ylim(0, max(year_quotient['count']))
plt.xlim([1850, 2016])
plt.xlabel('year')
plt.ylabel('number of cases')
plt.figure(figsize=[8, 8])
plt.scatter(year_quotient.index,
year_quotient['avg_indegree'],
marker='.',
color='black')
plt.ylim(0, max(year_quotient['avg_indegree']))
plt.xlim([1900, 2016])
plt.xlabel('year')
plt.ylabel('average in-degree')
plt.figure(figsize=[8, 8])
plt.scatter(year_quotient.index,
year_quotient['avg_outdegree'],
marker='.',
color='black')
plt.ylim(0, max(year_quotient['avg_outdegree']))
plt.xlim([1850, 2016])
plt.xlabel('year')
plt.ylabel('average out-degree')
def CreateSubGraph(g, court, includeIfMatched=True):
sub_g = g.as_directed()
if includeIfMatched:
to_delete_ids = [v.index for v in g.vs if court not in v['court']]
else:
to_delete_ids = [v.index for v in g.vs if court in v['court']]
sub_g.delete_vertices(to_delete_ids)
return sub_g
def YearQuotient(g):
year_range = range(1631, 2016 + 1)
year_quotient = pd.DataFrame(index=year_range, columns=['count', 'avg_indegree', 'avg_outdegree'])
#count number of cases
year_counts = {y: 0 for y in year_quotient.index}
for v in g.vs:
year_counts[v['year']] += 1
year_quotient['count'] = year_counts.values()
#get average in/out degrees
indegrees = g.indegree()
outdegrees = g.outdegree()
indegs_counts = {y: [] for y in year_quotient.index}
outdegs_counts = {y: [] for y in year_quotient.index}
# get degrees for cases in each year
for i in range(len(g.vs)):
year = g.vs[i]['year']
indeg = indegrees[i]
outdeg = outdegrees[i]
indegs_counts[year].append(indeg)
outdegs_counts[year].append(outdeg)
# average the degrees by yaer
for y in indegs_counts.keys():
indegs = indegs_counts[y]
outdegs = outdegs_counts[y]
if len(indegs) == 0:
year_quotient.loc[y, 'avg_indegree'] = 0
else:
year_quotient.loc[y,'avg_indegree'] = np.mean(indegs)
if len(outdegs) == 0:
year_quotient.loc[y, 'avg_outdegree'] = 0
else:
year_quotient.loc[y,'avg_outdegree'] = np.mean(outdegs)
return year_quotient
def DegreePlots(g):
year_quotient = YearQuotient(g)
#Plot the time series
plt.figure(1)
plt.figure(figsize=[10, 10])
plt.scatter(year_quotient.index,
year_quotient['count'],
marker='.',
color='black')
plt.ylim(0, max(year_quotient['count']))
plt.xlim([1850, 2016])
plt.xlabel('year')
plt.ylabel('number of cases')
plt.figure(2)
plt.figure(figsize=[8, 8])
plt.scatter(year_quotient.index,
year_quotient['avg_indegree'],
marker='.',
color='black')
plt.ylim(0, max(year_quotient['avg_indegree']))
plt.xlim([1900, 2016])
plt.xlabel('year')
plt.ylabel('average in-degree')
plt.figure(3)
plt.figure(figsize=[8, 8])
plt.scatter(year_quotient.index,
year_quotient['avg_outdegree'],
marker='.',
color='black')
plt.ylim(0, max(year_quotient['avg_outdegree']))
plt.xlim([1850, 2016])
plt.xlabel('year')
plt.ylabel('average out-degree')
plt.show()
def CompareDegreePlots(g,sub_g,overall_net='',sub_net=''):
year_quotient = YearQuotient(g)
year_quotient_sub_g = YearQuotient(sub_g)
#Plot the time series
plt.figure(1)
plt.figure(figsize=[10, 10])
plt.scatter(year_quotient.index,
year_quotient['count'],
marker='.',
color='black',
label = overall_net)
plt.scatter(year_quotient_sub_g.index,
year_quotient_sub_g['count'],
marker='.',
color='red',
label = sub_net)
plt.ylim(0, max( max(year_quotient['count']), max(year_quotient_sub_g['count']) ))
plt.xlim([1850, 2016])
plt.xlabel('year')
plt.ylabel('number of cases')
plt.title('counts')
plt.legend(loc='upper right')
plt.figure(2)
plt.figure(figsize=[8, 8])
plt.scatter(year_quotient.index,
year_quotient['avg_indegree'],
marker='.',
color='black',
label=overall_net)
plt.scatter(year_quotient_sub_g.index,
year_quotient_sub_g['avg_indegree'],
marker='.',
color='red',
label = sub_net)
plt.ylim(0, max( max(year_quotient['avg_indegree']), max(year_quotient_sub_g['avg_indegree']) ))
plt.xlim([1800, 2016])
plt.xlabel('year')
plt.ylabel('average in-degree')
plt.title('in-degree')
plt.legend(loc='upper right')
plt.figure(3)
plt.figure(figsize=[8, 8])
plt.scatter(year_quotient.index,
year_quotient['avg_outdegree'],
marker='.',
color='black',
label = overall_net)
plt.scatter(year_quotient_sub_g.index,
year_quotient_sub_g['avg_outdegree'],
marker='.',
color='red',
label = sub_net)
plt.ylim(0, max( max(year_quotient['avg_outdegree']), max(year_quotient_sub_g['avg_outdegree']) ))
plt.xlim([1800, 2016])
plt.xlabel('year')
plt.ylabel('average out-degree')
plt.title('out-degree')
plt.legend(loc='upper right')
plt.show()
g_scotus = CreateSubGraph(g,'scotus')
CompareDegreePlots(g,g_scotus,overall_net='overall',sub_net='scotus')
g_minus_scotus = CreateSubGraph(g, 'scotus', includeIfMatched=False)
CompareDegreePlots(g_minus_scotus,g_scotus,overall_net='overall - scotus',sub_net='scotus')
| explore/James/load-igraph.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
def check_palindrome(n):
isPalindrome = False
li = []
while n>0:
d = n%10
li.append(d)
n = n//10
li2 = []
li2[::] = li[::-1]
if li == li2:
isPalindrome = True
return isPalindrome
largestPalindrome = 0
a = 999
while a >= 100:
if a%11 == 0:
b = 999
db = 1
else:
b = 990 #The largest number less than or equal 999 and divisible by 11
db = 11
while b >= a:
if a*b <= largestPalindrome:
break
if check_palindrome(a*b):
largestPalindrome = a*b
b = b-db
a = a-1
print(largestPalindrome)
# -
"""
def check_prime(k):
isNotPrime = False
for d in range(2, k, 1):
if k % d == 0:
isNotPrime = True
return isNotPrime
def check_palindrome(n):
isPalindrome = False
li = []
while n>0:
d = n%10
li.append(d)
n = n//10
li2 = []
li2[::] = li[::-1]
if li == li2:
isPalindrome = True
return isPalindrome
count = 0
for i in range(10000,1000000,1):
palindrome = check_palindrome(i)
if palindrome:
palindrome_non_prime = check_prime(i)
if palindrome_non_prime:
temp = i
test = 0
max = 0
first = 0
second = 0
factors = []
for j in range(2, i, 1):
while temp%j==0:
s = str(j)
l = len(s)
if l == 3:
factors.append(j)
temp = temp//j
count = count + 1
else:
break
if len(factors) >1:
#print(i, " the factors are ", factors)
test = i
if test>max and test!= 999999:
if first < factors[0] and second < factors[1]:
first = factors[0]
second = factors[1]
max = test
print(max, first, second)
#Evil93
"""
| largest_palindrome_product.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
import pysed
target = "hd21997"
# -
data = pysed.sed_catalog_search(target)
# add PACS data to HD21997
data = pysed.add_photometry(data,[100,665.4,47.5,"PACS"])
data = pysed.add_photometry(data,[160,410.8,30,"PACS"])
pysed.fit_sed(target,data,nbb='single') #nbb can be double
pheonix_models = pysed.compile_stellar_models('./PHEONIX/')
ax = pysed.fit_sed(target,data,nbb='single',star_type="stellar",star_models=pheonix_models)
import matplotlib.pyplot as plt
ax.plot()
plt.show()
| examples/example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # $H_2$ Energy Surface with XACC-VQE
# To run this problem via docker, run the following from an empty directory
#
# ```bash
# $ docker run --name xacc-notebook -it -p 8888:8888 -d -v $(pwd):/home/notebooks mccaskey/xacc-all-gate-jupyter-fc26
# ```
# ## Setup the Problem
# +
import sys, os
import pyxacc as xacc
import pyxaccvqe as vqe
from pyxaccvqe import PauliOperator
from pyxacc import InstructionParameter
import numpy as np
# %matplotlib inline
# Create our Hamiltonian
H = PauliOperator('g0') + PauliOperator({0:'Z'},'g1') + PauliOperator({1:'Z'},'g2') \
+ PauliOperator({0:'Z',1:'Z'},'g3') + PauliOperator({0:'Y',1:'Y'}, 'g4') \
+ PauliOperator({0:'X',1:'X'},'g5')
# Read in the coefficient data as a function of R
h2coeffs = np.genfromtxt('h2_hamiltonian_data.csv', delimiter=',', names=['R', 'I', 'Z0', 'Z1', 'Z0Z1', 'Y0Y1', 'X0X1'])
# Construct some lists to detail the name of
# the variable coefficients, and the columns in the data file
gs = ['g0', 'g1', 'g2', 'g3', 'g4', 'g5']
cols = ['I', 'Z0', 'Z1', 'Z0Z1', 'Y0Y1', 'X0X1']
# Map column name to variable name
gMap = {c:gs[i] for i,c in enumerate(cols)}
# Setup an empty options map, we will add to it for
# VQE run after straight diagonalization run
# NOTE that default task is vqe-diagonalize and
# default backend is Eigen.
vqeOptions = {}
print('Hamiltonian = ', H)
# -
# ## Compute Energy Surface using Eigen Diagonalization
# +
energies = []
# Loop over all R
for i, r in enumerate(h2coeffs['R']):
# Construct the variable to coeff map
varMap = {gMap[k]:h2coeffs[k][i] for k in cols}
# Evaluate our Hamiltonian
Heval = H.eval(varMap)
# Execute - this diagonalizes the Hamiltonian
# and returns lowest eigenvalue
e = vqe.execute(Heval, **vqeOptions).energy
energies.append(e)
import matplotlib.pyplot as plt
plt.xlabel(r'R')
plt.ylabel(r'$\langle H\rangle$')
plt.plot(h2coeffs['R'], energies, color='k', label='Eigen exact diagonalization')
plt.legend()
plt.show()
print(energies)
# -
# ## Compute Energy Surface with VQE using TNQVM
# +
# Create the State Preparation Circuit for our VQE Run
statePrep = xacc.gate.GateFunction('statePrep', ['theta'])
statePrep.add(xacc.gate.create('Rx',[0],[np.pi]))
statePrep.add(xacc.gate.create('Ry',[1],[np.pi/2.]))
statePrep.add(xacc.gate.create('Rx',[0],[7.8539752]))
statePrep.add(xacc.gate.create('CNOT',[1,0]))
statePrep.add(xacc.gate.create('Rz',[0],['theta']))
statePrep.add(xacc.gate.create('CNOT',[1,0]))
statePrep.add(xacc.gate.create('Ry',[1],[7.8539752]))
statePrep.add(xacc.gate.create('Rx',[0],[np.pi/2.]))
vqeOptions = {'task':'vqe', 'ansatz':statePrep}
vqeenergies = []
# Loop over all R
for i, r in enumerate(h2coeffs['R']):
# Construct the variable to coeff map
varMap = {gMap[k]:h2coeffs[k][i] for k in cols}
# Evaluate our Hamiltonian
Heval = H.eval(varMap)
# Execute - this diagonalizes the Hamiltonian
# and returns lowest eigenvalue
e = vqe.execute(Heval, **vqeOptions).energy
vqeenergies.append(e)
import matplotlib.pyplot as plt
plt.xlabel(r'R')
plt.ylabel(r'$\langle H\rangle$')
plt.plot(h2coeffs['R'], vqeenergies, color='k', label='TNQVM VQE')
plt.legend()
plt.show()
# -
# ## Finalize the Framework
xacc.Finalize()
| examples/h2_energy_curve_python/xacc_vqe_h2_dissociation_calculation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + pycharm={"is_executing": false}
# %matplotlib widget
import numpy as np
import cupy as cp
import matplotlib.pyplot as plt
from matplotlib_scalebar.scalebar import ScaleBar
from mpl_toolkits.axes_grid1 import make_axes_locatable
import realtime_ptycho as sm
from realtime_ptycho.core import Sparse4DData, Metadata4D
from realtime_ptycho.util import get_qx_qy_1D, get_qx_qy_2D, disk_overlap_function, plotcxmosaic, sector_mask, plot, single_sideband_reconstruction, imsave, mosaic, sparse_to_dense_datacube, wavelength
from numpy.fft import fftshift
from py4DSTEM.process.dpc import get_phase_from_CoM
from pathlib import Path
from tifffile import imwrite
import time
from ipywidgets import AppLayout, FloatSlider, GridspecLayout, VBox, IntSlider, FloatLogSlider, HBox
import ipywidgets as widgets
import time
import cupy as cp
from cupyx.scipy.fft import fft2
from tqdm import trange
from skimage.filters import gaussian
# +
scan_number = 147
base_path = Path(os.getcwd())
adfpath = base_path
sparse_path = base_path
results_path = base_path / 'results/'
if not results_path.exists():
results_path.mkdir()
filename4d = sparse_path / f'data_scan{scan_number}_th4.0_electrons.h5'
filenameadf = adfpath / f'scan{scan_number}.dm4'
alpha_max_factor = 1.2
alpha_max_factor = 1.05
# + jupyter={"outputs_hidden": false} pycharm={"is_executing": false, "name": "#%%\n"}
print('1: data loading')
d = Sparse4DData.from_4Dcamera_file(filename4d)
metadata = Metadata4D.from_dm4_file(filenameadf)
metadata.alpha_rad = 25e-3
metadata.rotation_deg = 0
metadata.wavelength = wavelength(metadata.E_ev)
center, radius = d.determine_center_and_radius(manual=False, size=200)
print(f'center: {center}')
print(f'radius: {radius}')
print('2: cropping')
d.crop_symmetric_center_(center, radius*alpha_max_factor)
print('3: sum diffraction pattern')
s = d.sum_diffraction()
print('4: plotting')
f,ax = plt.subplots(1,2,figsize=(8,4))
imax = ax[0].imshow(s)
ax[0].set_title(f'Scan {scan_number} sum after cropping')
imax = ax[1].imshow(np.log10(s+1))
ax[1].set_title(f'Scan {scan_number} log10(sum) after cropping')
plt.colorbar(imax)
plt.tight_layout()
# + pycharm={"is_executing": false}
dwell_time = 1/87e3
detector_to_real_fluence_80kv = 1#1/0.56
fluence = d.fluence(metadata.dr[0]) * detector_to_real_fluence_80kv
flux = d.flux(metadata.dr[0], dwell_time) * detector_to_real_fluence_80kv
# print(f"E = {metadata.E_ev/1e3} keV")
# print(f"ฮป = {metadata.wavelength * 1e2:2.2} pm")
print(f"dR = {metadata.dr} ร
")
print(f"scan size = {d.scan_dimensions}")
print(f"detector size = {d.frame_dimensions}")
print(f"scan FOV = {d.scan_dimensions*metadata.dr/10} nm")
print(f"fluence ~ {fluence} e/ร
^2")
print(f"flux ~ {flux} e/ร
^2/s")
# -
def get_phase_from_CoM(CoMx, CoMy, theta, flip, regLowPass=0.5, regHighPass=100, paddingfactor=2,
stepsize=1, n_iter=10, phase_init=None):
"""
Calculate the phase of the sample transmittance from the diffraction centers of mass.
A bare bones description of the approach taken here is below - for detailed discussion of the
relevant theory, see, e.g.:
Ishizuka et al, Microscopy (2017) 397-405
Close et al, Ultramicroscopy 159 (2015) 124-137
Wadell and Chapman, Optik 54 (1979) No. 2, 83-96
The idea here is that the deflection of the center of mass of the electron beam in the
diffraction plane scales linearly with the gradient of the phase of the sample transmittance.
When this correspondence holds, it is therefore possible to invert the differential equation and
extract the phase itself.* The primary assumption made is that the sample is well
described as a pure phase object (i.e. the real part of the transmittance is 1). The inversion
is performed in this algorithm in Fourier space, i.e. using the Fourier transform property
that derivatives in real space are turned into multiplication in Fourier space.
*Note: because in DPC a differential equation is being inverted - i.e. the fundamental theorem
of calculus is invoked - one might be tempted to call this "integrated differential phase
contrast". Strictly speaking, this term is redundant - performing an integration is simply how
DPC works. Anyone who tells you otherwise is selling something.
Accepts:
CoMx (2D array) the diffraction space centers of mass x coordinates
CoMy (2D array) the diffraction space centers of mass y coordinates
theta (float) the rotational offset between real and diffraction space coordinates
flip (bool) whether or not the real and diffraction space coords contain a
relative flip
regLowPass (float) low pass regularization term for the Fourier integration operators
regHighPass (float) high pass regularization term for the Fourier integration operators
paddingfactor (int) padding to add to the CoM arrays for boundry condition handling.
1 corresponds to no padding, 2 to doubling the array size, etc.
stepsize (float) the stepsize in the iteration step which updates the phase
n_iter (int) the number of iterations
phase_init (2D array) initial guess for the phase
Returns:
phase (2D array) the phase of the sample transmittance
error (1D array) the error - RMSD of the phase gradients compared to the CoM - at
each iteration step
"""
# Coordinates
R_Nx,R_Ny = CoMx.shape
R_Nx_padded,R_Ny_padded = R_Nx*paddingfactor,R_Ny*paddingfactor
qx = cp.fft.fftfreq(R_Nx_padded)
qy = cp.fft.rfftfreq(R_Ny_padded)
qr2 = qx[:,None]**2 + qy[None,:]**2
# Invese operators
denominator = qr2 + regHighPass + qr2**2*regLowPass
_ = np.seterr(divide='ignore')
denominator = 1./denominator
denominator[0,0] = 0
_ = np.seterr(divide='warn')
f = 1j * 0.25*stepsize
qxOperator = f*qx[:,None]*denominator
qyOperator = f*qy[None,:]*denominator
# Perform rotation and flipping
if not flip:
CoMx_rot = CoMx*np.cos(theta) - CoMy*np.sin(theta)
CoMy_rot = CoMx*np.sin(theta) + CoMy*np.cos(theta)
if flip:
CoMx_rot = CoMx*np.cos(theta) + CoMy*np.sin(theta)
CoMy_rot = CoMx*np.sin(theta) - CoMy*np.cos(theta)
# Initializations
phase = cp.zeros((R_Nx_padded,R_Ny_padded))
update = cp.zeros((R_Nx_padded,R_Ny_padded))
dx = cp.zeros((R_Nx_padded,R_Ny_padded))
dy = cp.zeros((R_Nx_padded,R_Ny_padded))
error = cp.zeros((n_iter,))
mask = cp.zeros((R_Nx_padded,R_Ny_padded),dtype=bool)
mask[:R_Nx,:R_Ny] = True
maskInv = mask==False
if phase_init is not None:
phase[:R_Nx,:R_Ny] = phase_init
# Iterative reconstruction
for i in range(n_iter):
# Update gradient estimates using measured CoM values
dx[mask] -= CoMx_rot.ravel()
dy[mask] -= CoMy_rot.ravel()
dx[maskInv] = 0
dy[maskInv] = 0
# Calculate reconstruction update
update = cp.fft.irfft2( cp.fft.rfft2(dx)*qxOperator + cp.fft.rfft2(dy)*qyOperator)
# Apply update
phase += stepsize*update
# Measure current phase gradients
dx = (cp.roll(phase,(-1,0),axis=(0,1)) - cp.roll(phase,(1,0),axis=(0,1))) / 2.
dy = (cp.roll(phase,(0,-1),axis=(0,1)) - cp.roll(phase,(0,1),axis=(0,1))) / 2.
# Estimate error from cost function, RMS deviation of gradients
xDiff = dx[mask] - CoMx_rot.ravel()
yDiff = dy[mask] - CoMy_rot.ravel()
error[i] = cp.sqrt(cp.mean((xDiff-cp.mean(xDiff))**2 + (yDiff-cp.mean(yDiff))**2))
# Halve step size if error is increasing
if i>0:
if error[i] > error[i-1]:
stepsize /= 2
phase = phase[:R_Nx,:R_Ny]
return phase, error, denominator
def run_dpc(regLowPass, regHighPass, stepsize, n_iter):
print(n_iter[0])
dpc, error, denominator = get_phase_from_CoM(comy,comx,np.deg2rad(metadata.rotation_deg),False, regLowPass=regLowPass[0], regHighPass=regHighPass[0], paddingfactor=2,
stepsize=stepsize[0], n_iter=n_iter[0], phase_init=None)
fy = fftshift(cp.abs(fft2(comy))).get()
dd = denominator.get()
dd = fftshift(np.hstack([dd,np.fliplr(dd)]))
dd = dd[::2,::2]
dd = dd[:,:-1]
ps = np.log10((fy * dd) + 1)
return dpc, error, ps
# +
plt.ioff()
comy, comx = d.center_of_mass()
regLowPass = np.array([1e3])
regHighPass = np.array([5e-1])
stepsize = np.array([0.9])
n_iter = np.array([20]).astype(np.int)
dpc, error, ps = run_dpc(regLowPass, regHighPass, stepsize, n_iter)
fig1, ax1 = plt.subplots(1,1,figsize=(7.5,7.5))
im1 = ax1.imshow(dpc.get(), cmap= plt.cm.get_cmap('bone'))
ax1.set_title(f'DPC reconstruction')
ax1.set_xticks([])
ax1.set_yticks([])
ax1.add_artist(ScaleBar(metadata.dr[0]/10,'nm'))
divider = make_axes_locatable(ax1)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(im1, cax=cax)
fig2, ax2 = plt.subplots(1,1,figsize=(7.5,7.5))
im2 = ax2.imshow(ps, cmap= plt.cm.get_cmap('bone'), alpha=1)
ax2.set_title(f'log10(Power spectrum * regularization)')
ax2.set_xticks([])
ax2.set_yticks([])
fig3, ax3 = plt.subplots(1,1,figsize=(7.5,7.5))
im3 = ax3.plot(error.get())
ax3.set_xlabel('iteration')
ax3.set_ylabel('Error')
gs = GridspecLayout(1,9)
plot_box1 = HBox(children =[fig1.canvas, fig2.canvas, fig3.canvas])
Cslider_box = VBox(width=10)
sliders = []
i = 0
def set_new_data_and_update(dpc, error, ps):
im1.set_data(dpc)
im2.set_data(ps)
im3[0].set_ydata(error)
im3[0].set_xdata(np.arange(len(error)))
ax3.set_xlim(0,len(error))
ax3.set_ylim(error.min(),error.max())
im1.set_clim(dpc.min(),dpc.max())
im2.set_clim(ps.min(),ps.max())
fig1.canvas.draw()
fig1.canvas.flush_events()
fig2.canvas.draw()
fig2.canvas.flush_events()
# fig3.canvas.draw()
# fig3.canvas.flush_events()
text.value = f'all values set'
plt.draw()
def regLowPass_changed(v):
w = v['new']
text.value = f'{w}'
regLowPass[:] = v['new']
dpc[:], error, ps = run_dpc(regLowPass, regHighPass, stepsize, n_iter)
text.value = f'dpc done'
set_new_data_and_update(dpc.get(), error.get(), ps)
i += 1
text.value = f'{i}'
def regHighPass_changed(v):
regHighPass[:] = v['new']
dpc[:], error, ps = run_dpc(regLowPass, regHighPass, stepsize, n_iter)
set_new_data_and_update(dpc.get(), error.get(), ps)
i += 1
text.value = f'{i}'
def stepsize_changed(v):
stepsize[:] = v['new']
dpc[:], error, ps = run_dpc(regLowPass, regHighPass, stepsize, n_iter)
set_new_data_and_update(dpc.get(), error.get(), ps)
i += 1
def n_iter_changed(v):
n_iter[:] = v['new']
dpc[:], error, ps = run_dpc(regLowPass, regHighPass, stepsize, n_iter)
set_new_data_and_update(dpc.get(), error.get(), ps)
i += 1
text.value = f'{i}'
s1 = FloatLogSlider(description='regLowPass',value=1e3,base=10,step=0.2, min=0, max=6)
s1.observe(regLowPass_changed, names='value')
sliders.append(s1)
s2 = FloatLogSlider(description='regHighPass',value=5e-1,base=10,step=0.2, min=-2, max=4)
s2.observe(regHighPass_changed, names='value')
sliders.append(s2)
s3 = FloatSlider(description='stepsize',value=0.9, min=0.1, max=2)
s3.observe(stepsize_changed, names='value')
sliders.append(s3)
s4 = IntSlider(description='n_iter',value=20, min=1, max=500)
s4.observe(n_iter_changed, names='value')
sliders.append(s4)
text = widgets.HTML(
value="1",
placeholder='',
description='',
)
Cslider_box.children=sliders+ [text]
gs[0,1:] = plot_box1
gs[0,0] = Cslider_box
AppLayout(center=gs)
# -
m = 10
fig3, ax3 = plt.subplots(1,1,dpi=300)
im3 = ax3.imshow(dpc.get()[m:-m,m:-m], cmap= plt.cm.get_cmap('bone'))
fig3.savefig(results_path / 'dpc.pdf')
AppLayout(center=fig3.canvas)
imwrite(results_path /f'scan{scan_number}_dpc', dpc.get().astype('float32'), imagej=True, resolution=(1./(metadata.dr[0]/10), 1./(metadata.dr[1]/10)), metadata={'spacing': 1 / 10, 'unit': 'nm', 'axes': 'YX'})
| examples/quick_processing_dpc.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Environment (conda_snorkel)
# language: python
# name: conda_snorkel
# ---
# +
from typing import (
List,
Tuple,
Dict,
Optional,
)
import os
import sys
import csv
import altair as alt
import pandas as pd
EXPERIMENT_DIR = 'data/window_collation'
# +
METRICS = {
'al/valid/f1-measure-overall': 'Span F1',
'al/valid/precision-measure-overall': 'Span Precision',
'al/valid/recall-measure-overall': 'Span Recall',
'al/valid/tag_f1': 'Token F1',
'al/valid/tag_precision': 'Token Precision',
'al/valid/tag_recall': 'Token Recall',
}
DataDef = List[
Dict[str, object]
]
def parse_file(
file_name: str,
**kwargs,
) -> DataDef:
output = []
with open(file_name, 'r') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
# tag, value, step
metric_name, metric_val, step = row
if metric_name not in METRICS:
continue
vals = {
'metric_name': METRICS[metric_name],
'metric_val': round(float(metric_val), 2),
'step': float(step),
}
vals.update(kwargs)
output.append(vals)
return output
def load_baseline(root_dir: str = 'data/cached/no_weak', collation_type='no_weak') -> DataDef:
res = []
for trial_dir in os.listdir(root_dir):
if not trial_dir.startswith('trial_'):
print(f'skipping: {trial_dir}')
continue
trial_num = int(trial_dir[len('trial_'):])
summary_file = os.path.join(
root_dir,
trial_dir,
'summary.csv',
)
for method in ['fine_tune', 'weighted']:
parsed_data = parse_file(
file_name=summary_file,
experiment_tag='ADR',
weak_weight=0.01,
weak_train_method=method,
trial=trial_num,
collation_type=collation_type,
)
res.extend(parsed_data)
parsed_data = parse_file(
file_name=summary_file,
experiment_tag='ADR',
weak_weight=0.1,
weak_train_method=method,
trial=trial_num,
collation_type=collation_type,
)
res.extend(parsed_data)
parsed_data = parse_file(
file_name=summary_file,
experiment_tag='ADR',
weak_weight=1.0,
weak_train_method=method,
trial=trial_num,
collation_type=collation_type,
)
res.extend(parsed_data)
return res
def get_experiment_data(root_dir: str) -> DataDef:
res = []
for weak_train in os.listdir(root_dir):
weak_train_dir = os.path.join(
root_dir,
weak_train,
)
for collation_type in os.listdir(weak_train_dir):
collation_dir = os.path.join(
weak_train_dir,
collation_type,
)
for weak_weight in os.listdir(collation_dir):
weak_weight = float(weak_weight)
weight_dir = os.path.join(
collation_dir,
f'{weak_weight}',
)
for trial_dir in os.listdir(weight_dir):
if not trial_dir.startswith('trial_'):
print(f'skipping: {trial_dir}')
continue
trial_num = int(trial_dir[len('trial_'):])
summary_file = os.path.join(
weight_dir,
trial_dir,
'summary.csv',
)
parsed_data = parse_file(
file_name=summary_file,
experiment_tag='ADR',
weak_weight=weak_weight,
weak_train_method=weak_train,
trial=trial_num,
collation_type=collation_type,
)
res.extend(parsed_data)
return res
baseline = load_baseline()
baseline.extend(load_baseline(root_dir='data/cached/fine_tune/0.01/linear', collation_type='linear'))
data = get_experiment_data(EXPERIMENT_DIR)
data.extend(baseline)
data_frame = pd.DataFrame(data)
# +
def metric_graph(data_frame: pd.DataFrame, metric: str, train_method: str, weak_weight: float) -> alt.Chart:
def filter_graph(chart: alt.Chart) -> alt.Chart:
return chart.transform_filter(
alt.datum.metric_name == metric
).transform_filter(
alt.datum.weak_train_method == train_method,
).transform_filter(
alt.datum.weak_weight == weak_weight,
)
# .transform_filter(
# alt.datum.collation_type != 'intersection'
# ).transform_filter(
# alt.datum.collation_type != 'union'
# )
base = alt.Chart(data_frame, title=f'{metric} vs Dataset Size')
line = filter_graph(base.mark_line(point=True).encode(
x=alt.X('step', title='Dataset Size'),
y=alt.Y('mean(metric_val)', title=metric, scale=alt.Scale(zero=False)),
color='collation_type:N',
))
confidence_interval = filter_graph(base.mark_area(opacity=0.3).encode(
x=alt.X('step'),
y=alt.Y('ci0(metric_val):Q'),
y2='ci1(metric_val):Q',
color='collation_type:N',
))
return (line).properties(width=1000)
weight = 0.01
method = 'fine_tune'
metric_graph(data_frame, 'Token F1', method, weight) & metric_graph(data_frame, 'Span F1', method, weight)
# -
| experiments/collation_experiment_analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import sys
import os
import requests
import pandas as pd
sys.path.append(os.path.abspath("../../"))
from ampel.ztf.dev.ZTFAlert import ZTFAlert
from ampel.contrib.hu.t2.T2BrightSNProb import T2BrightSNProb
from ampel.log.AmpelLogger import AmpelLogger
from ampel.ztf.util import ZTFIdMapper
from ampel.ztf.ingest.ZiDataPointShaper import ZiDataPointShaper
from ampel.content.T1Document import T1Document
from ampel.view.LightCurve import LightCurve
# # Running SNGuess over a ZTF candidate's alerts
#
# In this notebook we will query the DESY archive for the alerts of a ZTF candidate, and then run SNGuess over it in order to obtain a relevance score.
#
# ## Steps
#
# First, we enter the connection parameters for the DESY archive. In our case, this is an archive token that can be obtained by accessing https://ampel.zeuthen.desy.de/live/dashboard/tokens , and clicking on the _"Archive tokens"_ tab.
#
# Note: you need to generate a persistent __archive token__, and __not__ a 1-hour-valid API token.
#
# If your GitHub user is not an active member in the ZTF or AMPEL organizations, please send an e-mail to <EMAIL> .
token = # Add token string
# Next, we enter the ZTF identifier of the candidate we want to analyze.
name = 'ZTF20abyfpze'
filter_names = {1: 'g', 2: 'r', 3: 'i'}
# We initialize the relevant AMPEL T2 units.
logger = AmpelLogger.get_logger()
t2snguess = T2BrightSNProb(logger=logger)
t2snguess.filter_names = filter_names
t2snguess.post_init()
shaper = ZiDataPointShaper(logger=logger)
print(t2snguess.filter_names)
# We set the DESY archive endpoint url and request the alerts of the candidate using our connection token.
endpoint = "https://ampel.zeuthen.desy.de/api/ztf/archive/v2/object/{}/alerts?with_history=true&with_cutouts=false".format(name)
headers = {'Authorization': f"bearer {token}"}
headers
response = requests.get(endpoint, headers=headers)
response.raise_for_status()
alerts = response.json()
print("Found {} alerts for {}".format(len(alerts), name))
# Finally, we iterate over the alerts and run SNGuess over them in order to obtain a relevance score and a boolean that indicates whether its candidate is likely to be relevant for follow up observations or not.
summary = []
for alert in alerts:
# Create standardized LightCurve object
pps = [alert['candidate']]
pps.extend( [prv_cand for prv_cand in alert['prv_candidates'] ] )
# The following loop is _likely_ due to an inconsistency in the alert archive with the shaper
# and can hopefully be removed soon
for pp in pps:
if "magpsf" in pp.keys():
pp["candid"] = 999
stockId = ZTFIdMapper.to_ampel_id(name)
dps = shaper.process( pps, stockId)
t1d = T1Document(stock=stockId, link=0)
lc = LightCurve.build(t1d, dps)
# Content
jds = lc.get_values("jd")
if jds is None:
continue
t2out = t2snguess.process(lc)
if t2out['success']:
summary.append({
'last_detection': max(jds),
'number_of_detections': len(jds),
'success': t2out['success'],
'score': t2out['SNGuess'],
'selected': t2out['SNGuessBool']
})
else:
summary.append({
'last_detection': max(jds),
'number_of_detections': len(jds),
'success': t2out['success'],
'score': None,
'selected': None
})
# We display the results.
pd.DataFrame(summary)
| notebooks/snguess/snguess_ztf_alert.ipynb |
# # Automatic generation of Notebook using PyCropML
# This notebook implements a crop model.
# +
import numpy as np
from copy import copy
from math import *
def partitioning(cPartitionNStressReduction=1.0,
cLeavesPartitioningTableDVS=([0.1,0.2,0.4]),
iTRANRF=1.0,
cStemsPartitioningTableFraction=([0.1,0.2,0.4]),
cLeavesPartitioningTableFraction=([0.1,0.2,0.4]),
cFSOTB=([0.1,0.2,0.4]),
cStemsPartitioningTableDVS=([0.1,0.2,0.4]),
cFRTTB=([0.1,0.2,0.4]),
cFSTTB=([0.1,0.2,0.4]),
cStorageOrgansPartitioningTableDVS=([0.1,0.2,0.4]),
iNitrogenNutritionIndex=1.0,
cRootsPartitioningTableDVS=([0.1,0.2,0.4]),
nameoSow=True,
cFLVTB=([0.1,0.2,0.4]),
cRootsPartitioningTableFraction=([0.1,0.2,0.4]),
idevStage=0.0,
cStorageOrgansPartitioningTableFraction=([0.1,0.2,0.4])):
""" Partitioning
Author:
Reference: as given in the documentation
Instituton: INRES Pflanzenbau, Uni Bonn
Abstract: see documentation at http://www.simplace.net/doc/simplace_modules/class_net.simplace.sim.components.experimental.amei.Partitioning.html
"""
FractionStems=0
FractionStorageOrgans=0
FractionLeaves=0
FractionRoot=0
"""
double FRTWET = FSTFunctions.AFGEN(cRootsPartitioningTableDVS, cRootsPartitioningTableFraction, DevStage.getValue());
if (TRANRF.getValue() < NitrogenNutritionIndex.getValue()) {
double FRTMOD = max(1.0, 1.0 / (TRANRF.getValue() + 0.5));
FractionRoot.setValue(FRTWET * FRTMOD, this);
double FSHMOD = (1.0 - FractionRoot.getValue()) / (1.0 - FractionRoot.getValue() / FRTMOD);
FractionLeaves.setValue(FSTFunctions.AFGEN(cLeavesPartitioningTableDVS, cLeavesPartitioningTableFraction, DevStage.getValue()) * FSHMOD, this);
FractionStems.setValue(FSTFunctions.AFGEN(cStemsPartitioningTableDVS, cStemsPartitioningTableFraction, DevStage.getValue()) * FSHMOD, this);
FractionStorageOrgans.setValue(FSTFunctions.AFGEN(cStorageOrgansPartitioningTableDVS, cStorageOrgansPartitioningTableFraction, DevStage.getValue()) * FSHMOD, this);
} else {
double FLVMOD = exp(-PartitionNStressReduction.getValue() * (1 - NitrogenNutritionIndex.getValue()));
FractionRoot.setValue(FRTWET, this);
double FLVT = FSTFunctions.AFGEN(cLeavesPartitioningTableDVS, cLeavesPartitioningTableFraction, DevStage.getValue());
FractionLeaves.setValue(FLVT * FLVMOD, this);
FractionStems.setValue(FSTFunctions.AFGEN(cStemsPartitioningTableDVS, cStemsPartitioningTableFraction, DevStage.getValue()) + FLVT - FractionLeaves.getValue(), this);
FractionStorageOrgans.setValue(FSTFunctions.AFGEN(cStorageOrgansPartitioningTableDVS, cStorageOrgansPartitioningTableFraction, DevStage.getValue()), this);
"""
return FractionStems, FractionStorageOrgans, FractionLeaves, FractionRoot
# -
# ## Run the model with a set of parameters.
# Each run will be defined in its own cell.
| test/data/test5.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (conda_env)
# language: python
# name: conda_env
# ---
# ### Representation learning for boxing
# +
import sys,os
sys.path.append('../')
from deep_rl import *
import matplotlib.pyplot as plt
import torch
# from tqdm.notebook import trange, tqdm
import random
import numpy as np
# %load_ext autoreload
# %reload_ext autoreload
# %autoreload 2
# -
select_device(0)
# +
def dqn_feature(**kwargs):
generate_tag(kwargs)
kwargs.setdefault('log_level', 0)
config = Config()
config.merge(kwargs)
config.task_fn = lambda: Task(config.game)
config.eval_env = config.task_fn()
# config.action_dim = 3
# config.optimizer_fn = lambda params: torch.optim.RMSprop(
# params, lr=0.00025, alpha=0.95, eps=0.001, centered=True)
config.optimizer_fn = lambda params: torch.optim.RMSprop(
params, lr=0.001, centered=True)
# config.network_fn = lambda: VanillaNet(config.action_dim, FCBody(config.state_dim, hidden_units=(43,)))
config.network_fn = lambda: VanillaNet(config.action_dim, NatureConvBody(in_channels=4))
# print(config.action_dim)
config.replay_fn = lambda: Replay(memory_size=int(1e6), batch_size=32)
# config.replay_fn = lambda: AsyncReplay(memory_size=int(1e4), batch_size=10)
config.batch_size = 32
config.state_normalizer = ImageNormalizer()
config.reward_normalizer = SignNormalizer()
config.random_action_prob = LinearSchedule(1.0, 0.01, 2e5)
config.discount = 0.99
config.target_network_update_freq = 2000
config.exploration_steps = 1000
# config.double_q = True
config.double_q = False
config.sgd_update_frequency = 4
config.gradient_clip = 5
config.eval_interval = int(5e10)
config.max_steps = 1
config.async_actor = False
agent = DQNAgent(config)
#run_steps function below
config = agent.config
agent_name = agent.__class__.__name__
t0 = time.time()
for i in tqdm(range(int(config.max_steps))):
if config.save_interval and not agent.total_steps % config.save_interval:
agent.save('data/%s-%s-%d' % (agent_name, config.tag, agent.total_steps))
if config.log_interval and not agent.total_steps % config.log_interval:
t0 = time.time()
if config.eval_interval and not agent.total_steps % 5000:
# agent.eval_episodes()
print(agent.total_steps)
pass
if config.max_steps and agent.total_steps >= config.max_steps:
return agent
break
agent.step()
agent.switch_task()
return agent
# -
# game = 'MiniGrid-Empty-5x5-v0'
game = 'BoxingNoFrameskip-v0'
# game = 'CartPole-v0'
agent = dqn_feature(game=game, is_wb=False)
plt.figure(figsize=(18,6))
plt.plot(np.array(agent.returns)[:,0], np.array(agent.returns)[:,1], '.-')
plt.xlabel('timesteps'), plt.ylabel('returns')
plt.title('DQN performance on ' + game), plt.show()
# +
import sys,os
sys.path.append('../')
from deep_rl import *
import matplotlib.pyplot as plt
import torch
# from tqdm.notebook import trange, tqdm
import random
import numpy as np
select_device(0)
class torch_reshape(torch.nn.Module):
def forward(self, x, size=[64,7,7]):
batch_size = x.shape[0]
return x.view(batch_size, size[0], size[1], size[2])
class Flatten(torch.nn.Module):
def forward(self, x):
batch_size = x.shape[0]
return x.view(batch_size, -1)
class SRNetImage(nn.Module):
def __init__(self, output_dim, hidden_units_sr=(512*4,), hidden_units_psi2q=(), gate=F.relu, config=1):
"""
This network has two heads: SR head (SR) and reconstruction head (rec).
config -> type of learning on top of state abstraction
0 - typical SR with weights sharing
1 - learning SR without weights sharing
"""
super(SRNetImage, self).__init__()
self.feature_dim = 512
self.output_dim = output_dim
self.gate = gate
in_channels = 4
self.encoder = nn.Sequential(
layer_init(nn.Conv2d(in_channels, 32, kernel_size=8, stride=4)), # b, 16, 10, 10
nn.ReLU(True),
layer_init(nn.Conv2d(32, 64, kernel_size=4, stride=2)),
nn.ReLU(True),
layer_init(nn.Conv2d(64, 64, kernel_size=3, stride=1)),
nn.ReLU(True),
Flatten(),
nn.Linear(7 * 7 * 64, self.feature_dim)
)
self.decoder = nn.Sequential(
layer_init(nn.Linear(self.feature_dim, 7 * 7 * 64)),
torch_reshape(),
layer_init(nn.ConvTranspose2d(64, 64, kernel_size=3, stride=1)), # b, 16, 5, 5
nn.ReLU(True),
layer_init(nn.ConvTranspose2d(64, 32, kernel_size=4, stride=2)), # b, 16, 5, 5
nn.ReLU(True),
layer_init(nn.ConvTranspose2d(32, in_channels, kernel_size=8, stride=4, output_padding=0)), # b, 8, 15, 15
nn.ReLU(True),
nn.Tanh()
)
# layers for SR
dims_sr = (self.feature_dim,) + hidden_units_sr + (self.feature_dim * output_dim,)
self.layers_sr = nn.ModuleList(
[layer_init_0(nn.Linear(dim_in, dim_out)) for dim_in, dim_out in zip(dims_sr[:-1], dims_sr[1:])])
# SR final head layer
if(config == 0):
self.psi2q = Psi2QNet(output_dim, self.feature_dim)
if(config == 1):
self.psi2q = Psi2QNetFC(output_dim, self.feature_dim, hidden_units=hidden_units_psi2q)
self.to(Config.DEVICE)
def forward(self, x):
# Finding the latent layer
phi = self.encoder(tensor(x)) # shape: b x state_dim
# Reconstruction
state_est = self.decoder(phi)
# Estimating the SR from the latent layer
psi = phi
for layer in self.layers_sr[:-1]:
psi = self.gate(layer(psi))
psi = self.layers_sr[-1](psi)
psi = psi.view(psi.size(0), self.output_dim, self.feature_dim) # shape: b x action_dim x state_dim
q_est = self.psi2q(psi)
return phi, psi, state_est, q_est
# -
a = SRNetImage(output_dim=4)
a.layers_sr.parameters()
# +
class avDSRAgent_v2(BaseAgent):
def __init__(self, config, agents, style='DQN'):
"""
agents -> list of agents whose actions we need to consider.
"""
BaseAgent.__init__(self, config)
self.config = config
config.lock = mp.Lock()
self.loss_rec_vec = []
self.loss_psi_vec = []
self.loss_vec = []
self.replay = config.replay_fn()
self.choice = config.choice
self.actor = avDSRActor_v2(config, agents, style, self.choice)
self.network = config.network_fn()
self.network.share_memory()
self.optimizer = config.optimizer_fn(self.network.parameters())
self.optimizer_phi = config.optimizer_fn(list(self.network.encoder.parameters()) + \
list(self.network.decoder.parameters()))
self.optimizer_psi = config.optimizer_fn(self.network.layers_sr.parameters())
self.actor.set_network(self.network)
self.total_steps = 0
self.batch_indices = range_tensor(self.replay.batch_size) # Need to make this size bigger
def close(self):
close_obj(self.replay)
close_obj(self.actor)
def eval_step(self, state):
self.config.state_normalizer.set_read_only()
state = self.config.state_normalizer(state)
_, _, q = self.network(state)
action = to_np(q.argmax(-1))
self.config.state_normalizer.unset_read_only()
return action
def step(self):
config = self.config
# Store transitions in the buffer
transitions = self.actor.step()
experiences = []
for state, action, reward, next_state, next_action, done, info in transitions:
# self.record_online_return(info)
self.total_steps += 1
reward = config.reward_normalizer(reward)
experiences.append([state, action, reward, next_state, next_action, done])
self.replay.feed_batch(experiences)
# Start updating network parameters after exploration_steps
if self.total_steps > self.config.exploration_steps:
# Getting samples from buffer
experiences = self.replay.sample()
states, actions, rewards, next_states, next_actions, terminals = experiences
states = self.config.state_normalizer(states)
next_states = self.config.state_normalizer(next_states)
# Estimate targets
with torch.no_grad():
_, psi_next, _, _ = self.network(next_states)
if self.config.double_q:
best_actions = torch.argmax(self.network(next_states), dim=-1)
q_next = q_next[self.batch_indices, best_actions]
else:
next_actions = tensor(next_actions).long()
psi_next = psi_next[self.batch_indices, next_actions, :] # TODO: double check dims here
terminals = tensor(terminals)
psi_next = self.config.discount * psi_next * (1 - terminals.unsqueeze(1).repeat(1, psi_next.shape[1]))
phi, psi, state_rec, _ = self.network(states)
psi_next.add_(phi) # TODO: double chec this
# Computing estimates
actions = tensor(actions).long()
psi = psi[self.batch_indices, actions, :]
loss_psi = (psi_next - psi).pow(2).mul(0.5).mean()
# Estimating reconstuction loss
loss_rec = (state_rec - tensor(states)).pow(2).mul(0.5).mean()
loss = loss_psi + config.c(self.total_steps) * loss_rec
total_loss = loss.mean()
self.loss_vec.append(total_loss.item())
self.loss_psi_vec.append(loss_psi.item())
self.loss_rec_vec.append(loss_rec.item())
# Alternative way to estiamte the loss
# Step 1: Update weights of phi and phi_rec
self.optimizer.zero_grad()
loss_rec.backward(retain_graph=True)
with config.lock:
self.optimizer_phi.step()
# Step 2: Update weights of psi
self.optimizer_psi.zero_grad()
loss_psi.backward()
with config.lock:
self.optimizer_psi.step()
# self.optimizer.zero_grad()
# loss.backward()
# nn.utils.clip_grad_norm_(self.network.parameters(), self.config.gradient_clip)
# with config.lock:
# self.optimizer.step()
# -
def avdsr_feature_v2(dnn, **kwargs):
kwargs['tag'] = 'Training avDSR based on DQN agents'
generate_tag(kwargs)
kwargs.setdefault('log_level', 0)
config = Config()
config.merge(kwargs)
config.task_fn = lambda: Task(config.game)
config.eval_env = config.task_fn()
config.c = LinearSchedule(1, 1, 3e4)
config.optimizer_fn = lambda params: torch.optim.RMSprop(params, lr=1e-4)
if(dnn is not None):
config.network_fn = lambda: dnn
else:
config.network_fn = lambda: SRNetImage(config.action_dim, config=1) #CHECK
config.replay_fn = lambda: Replay(memory_size=int(3e4), batch_size=10)
config.state_normalizer = ImageNormalizer()
config.random_action_prob = LinearSchedule(1, 1, 1e4) # CHECK
config.discount = 0.99
config.target_network_update_freq = 200
config.exploration_steps = 0
# config.double_q = True
config.double_q = False
config.sgd_update_frequency = 4
config.gradient_clip = 5
config.max_steps = 1e5 * 1.5
config.async_actor = False
agent = avDSRAgent_v2(config, config.agents, style='DQN')
#run_steps function below
config = agent.config
agent_name = agent.__class__.__name__
t0 = time.time()
while True:
if config.log_interval and not agent.total_steps % config.log_interval:
agent.logger.info('steps %d, %.2f steps/s' % (agent.total_steps, config.log_interval / (time.time() - t0)))
t0 = time.time()
if config.max_steps and agent.total_steps >= config.max_steps:
return agent
break
agent.step()
agent.switch_task()
# +
# class torch_reshape(torch.nn.Module):
# def forward(self, x):
# batch_size = x.shape[0]
# return x.view(batch_size, 64, 9, 9)
# class Flatten(torch.nn.Module):
# def forward(self, x):
# batch_size = x.shape[0]
# return x.view(batch_size, -1)
class Pick():
def __init__(self):
self.network = 0
class avDSRActor_v2(BaseActor):
def __init__(self, config, agents, style='DQN', choice=1):
"""
style -> depicts the network config of the agent used for exploration.
choice -> tells how we choose which agent to use for exploration
0 - at every timestep, we randomly pick and agent and take an eps greedy action
1 - we choose a new DQN every switch_period
"""
BaseActor.__init__(self, config)
self.config = config
self.agents = agents
self.style = style
self.choice = choice
# Parameters to decide which agents should learn
self.batch_steps = 0
self.switch_period = 10
self.agent_id = 0
self.start()
def _transition(self):
if self._state is None:
self._state = self._task.reset()
config = self.config
# Choosing which agent for taking actions
if(len(self.agents) == 0):
pick = Pick()
pick.network = lambda a: torch.zeros(self.config.action_dim,1)
elif(self.choice == 0):
pick = random.choice(self.agents)
elif(self.choice == 1):
self.batch_steps += 1
if(self.batch_steps % self.switch_period == 0):
# CHECK: multiprocessing might be screwing something up
self.agent_id = np.random.randint(len(self.agents))
pick = self.agents[self.agent_id]
else:
raise NameError('Invalid choice config')
# Find qvalues of the picked agent for the present state
with config.lock:
if(self.style == 'DSR'):
_, _, q_values = pick.network(config.state_normalizer(self._state))
elif(self.style == 'DQN'):
q_values = pick.network(config.state_normalizer(self._state))
q_values = to_np(q_values).flatten()
# Take action based on this estimated q value
if self._total_steps < config.exploration_steps \
or np.random.rand() < config.random_action_prob():
action = np.random.randint(0, len(q_values))
else:
action = np.argmax(q_values)
next_state, reward, done, info = self._task.step([action])
# Also estimate next action
#############
if(len(self.agents) == 0):
pick2 = Pick()
pick2.network = lambda a : torch.zeros(self.config.action_dim,1)
elif(self.choice == 0):
pick2 = random.choice(self.agents)
elif(self.choice == 1):
pick2 = pick
with config.lock:
if(self.style == 'DSR'):
_, _, q_values = pick2.network(config.state_normalizer(next_state))
elif(self.style=='DQN'):
q_values = pick2.network(config.state_normalizer(next_state))
q_values = to_np(q_values).flatten()
if self._total_steps < config.exploration_steps \
or np.random.rand() < config.random_action_prob():
next_action = np.random.randint(0, len(q_values))
else:
next_action = np.argmax(q_values)
entry = [self._state[0], action, reward[0], next_state[0], next_action, int(done[0]), info]
self._total_steps += 1
self._state = next_state
return entry
# -
game='BoxingNoFrameskip-v0'
avdsr = avdsr_feature_v2(game=game, agents=[], choice=0, dnn=None)
plt.subplot(211), plt.plot(convolve(avdsr.loss_rec_vec,10)[1000:]), plt.title('loss_reconstruction')
plt.subplot(212), plt.plot(convolve(avdsr.loss_psi_vec,10)[1000:]), plt.title('loss_psi')
plt.show()
avdsr.network.state_dict().keys()
# +
import gym
from gym.spaces.box import Box
from gym.spaces.discrete import Discrete
from baselines.common.atari_wrappers import make_atari, wrap_deepmind
from baselines.common.atari_wrappers import FrameStack as FrameStack_
from baselines.common.vec_env.subproc_vec_env import SubprocVecEnv, VecEnv
class TransposeImage(gym.ObservationWrapper):
def __init__(self, env=None):
super(TransposeImage, self).__init__(env)
obs_shape = self.observation_space.shape
self.observation_space = Box(
self.observation_space.low[0, 0, 0],
self.observation_space.high[0, 0, 0],
[obs_shape[2], obs_shape[1], obs_shape[0]],
dtype=self.observation_space.dtype)
def observation(self, observation):
return observation.transpose(2, 0, 1)
class FrameStack(FrameStack_):
def __init__(self, env, k):
FrameStack_.__init__(self, env, k)
def _get_ob(self):
assert len(self.frames) == self.k
return LazyFrames(list(self.frames))
class LazyFrames(object):
def __init__(self, frames):
"""This object ensures that common frames between the observations are only stored once.
It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay
buffers.
This object should only be converted to numpy array before being passed to the model.
You'd not believe how complex the previous solution was."""
self._frames = frames
def __array__(self, dtype=None):
out = np.concatenate(self._frames, axis=0)
if dtype is not None:
out = out.astype(dtype)
return out
def __len__(self):
return len(self.__array__())
def __getitem__(self, i):
return self.__array__()[i]
# -
env_id='BoxingNoFrameskip-v0'
env = make_atari(env_id)
env = wrap_deepmind(env, episode_life=True, clip_rewards=False, frame_stack=False, scale=False)
obs_shape = env.observation_space.shape
if len(obs_shape) == 3:
env = TransposeImage(env)
env = FrameStack(env, 4)
# +
obs = env.reset()
obs = tensor(obs)/255.0
a, b, recons, _ = avdsr.network(obs.unsqueeze(0))
re_np = recons.squeeze(0).detach().cpu().numpy()
imgIn = tensor(obs).cpu().numpy()[0,:,:]
imgRe = re_np[0,:,:]
plt.subplot(121), plt.imshow(imgIn)
plt.subplot(122), plt.imshow(imgRe); plt.show()
# -
torch.save(avdsr.network, '../storage/41-avdsr-trained-boxing-512.weights')
| notebooks/17-boxing-unsup-repLearn-Copy1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Verify Support Points Implementation
#
# *<NAME>*, 2021-01-05
#
# This notebook verifies the implementation of `gr.tran_sp()`. This routine is based on the `sp.ccp` algorithm of Mak and Joseph (2018). This verification study includes both a visual check and a formal convergence study.
#
# +
import grama as gr
import numpy as np
import pandas as pd
import time
from datetime import datetime
from plotnine import *
DF = gr.Intention()
# -
print("Last executed: {}".format(datetime.now(tz=None)))
# ## Visual Inspection
#
# ---
#
# The following section is an "eyeball norm" check of the `gr.tran_sp()` implementation. A compacted dataset should appear representative of the original dataset.
#
# Set up model:
#
# +
## Correlated gaussian
md = (
gr.Model()
>> gr.cp_marginals(
x=dict(dist="norm", loc=0, scale=1),
y=dict(dist="norm", loc=0, scale=1),
)
>> gr.cp_copula_gaussian(
df_corr=gr.df_make(var1="x", var2="y", corr=0.5)
)
)
md.printpretty()
# -
# Generate a dataset to compact:
#
# +
df_data = gr.eval_monte_carlo(
md,
n=1e3,
skip=True,
)
(
df_data
>> ggplot(aes("x", "y"))
+ geom_point()
+ theme_minimal()
)
# -
# Run the support points algorithm; visually compare the compacted dataset against the original.
#
# +
df_sp = (
df_data
>> gr.tf_sp(
n=50,
seed=102,
tol=1e-3,
n_maxiter=int(1e3),
)
)
(
df_data
>> gr.tf_mutate(source="model")
>> gr.tf_bind_rows(
df_sp
>> gr.tf_mutate(source="sp")
)
>> ggplot(aes("x", "y", color="source"))
+ geom_point()
+ theme_minimal()
)
# -
# This passes the "eyeball norm" for me; the support points (`sp`) appear to be representative of the original dataset.
#
# Visualize the change from initial guess to support points.
#
# +
df_sp0 = (
df_data
>> gr.tf_sp(
n=50,
seed=102,
n_maxiter=0, # Halt at initial guess
)
)
df_both = (
df_sp
>> gr.tf_bind_cols(
df_sp0
>> gr.tf_rename(x0=DF.x, y0=DF.y)
)
)
(
df_sp
>> gr.tf_mutate(source="sp")
>> gr.tf_bind_rows(
df_sp0
>> gr.tf_mutate(source="init")
)
>> ggplot(aes("x", "y"))
+ geom_point(
data=df_data,
color="grey",
)
+ geom_point(aes(color="source"))
+ geom_segment(
data=df_both,
mapping=aes(x="x0", xend="x", y="y0", yend="y"),
arrow=arrow(length=0.05)
)
+ theme_minimal()
)
# -
# *Observations*:
#
# - Generally, points in the more-dense region of the distribution tend to move less, while those in the less-dense tend to move further.
# - Generally, points are moving "up" the density, towards its center.
#
# Formally, the `sp.ccp` algorithm minimizes a Monte Carlo approximation of the energy distance. Could we assess convergence by inspecting the gradient of this quantity?
#
# Set up the $\hat{E}$ objective; Equation (MC) from Mak and Joseph (2018).
#
# +
Y = df_data.values
def obj(X):
# Setup
n = X.shape[0]
N = Y.shape[0]
t1 = 0
t2 = 0
for i in range(n):
t1 = t1 + np.sum(np.linalg.norm(Y - X[i], axis=1))
t2 = t2 + np.sum(np.linalg.norm(X - X[i], axis=1))
return (2 / n / N) * t1 + t2 / n**2
def obj_v(v):
p = Y.shape[1]
X_tmp = v.reshape((-1, p))
return obj(X_tmp)
# Compute objective at start and end
X0_v = df_sp0.values.flatten()
f0 = obj_v(X0_v)
Xs_v = df_sp.values.flatten()
fs = obj_v(Xs_v)
print(f0)
print(fs)
# -
# Approximate the gradient of the objective with a central difference.
#
# +
# Stepsize
h = 1e-8
## Central difference
def grad(v0, fun, h=1e-6):
n_v = len(v0)
grad = np.zeros(n_v)
for i in range(n_v):
e_v = np.zeros(n_v)
e_v[i] = 0.5 * h
fs_m = fun(v0 - e_v)
fs_p = fun(v0 + e_v)
grad[i] = (fs_p - fs_m) / h
return grad
G0 = grad(X0_v, obj_v)
Gs = grad(Xs_v, obj_v)
# -
# Compare the gradient at the start and end.
#
print(np.linalg.norm(G0))
print(np.linalg.norm(Gs))
# The gradient of the objective does not seem to be a promising way to quantitatively assess convergence.
#
# ### A note on strong anisotropy
#
# The support points algorithm seems to have trouble with strongly anisotropic data. For example, let's apply the algorithm to the cantilever beam's random variable input space:
# +
from grama.models import make_cantilever_beam
md_beam = make_cantilever_beam()
df_beam = gr.eval_monte_carlo(
md_beam,
n=1e3,
df_det="nom",
skip=True,
seed=101
)
df_beam_sp_no = (
df_beam
>> gr.tf_sp(
n=50,
var=["H", "V", "E", "Y"],
# *Disable* the standardization for demonstration
standardize=False,
n_maxiter=1000,
seed=101,
)
)
df_both = (
df_beam
>> gr.tf_select(["H", "V", "E", "Y"])
>> gr.tf_mutate(source="Original")
>> gr.tf_bind_rows(
df_beam_sp_no
>> gr.tf_mutate(source="SP")
)
)
# -
# Note that the distance criterion is not met even after `1000` iterations. If we inspect the (unconverged) points, we can see they are "compressed" along the lower-scale directions:
#
(
df_both
>> ggplot(aes("E", "Y", color="source"))
+ geom_point()
)
# To solve this problem, the implementation of `tran_sp()` by default *standardizes* each column before applying the support points algorithm, and restores the scale before returning the results. This solve the issue shown above:
#
# +
df_beam_sp_std = (
df_beam
>> gr.tf_sp(
n=50,
var=["H", "V", "E", "Y"],
standardize=True,
# n_maxiter=1000,
seed=101,
)
)
df_both_std = (
df_beam
>> gr.tf_select(["H", "V", "E", "Y"])
>> gr.tf_mutate(source="Original")
>> gr.tf_bind_rows(
df_beam_sp_std
>> gr.tf_mutate(source="SP; standardized")
)
)
(
df_both_std
>> ggplot(aes("E", "Y", color="source"))
+ geom_point()
)
# -
# With standardization, the sp algorithm converges properly with the default settings, and properly represents the scale of all the variable considered here.
#
# ## Formal Convergence Study
#
# ---
#
# Perform a formal convergence study to numerically assess the accuracy of the `tran_sp()` routine. Use an integration problem with known expectation.
#
# Implement the [Bratley](https://www.sfu.ca/~ssurjano/bratleyetal92.html) model:
#
# +
## Bratley model at various dimensionalities
md_br1 = (
gr.Model("Bratley, 1d")
>> gr.cp_function(
fun=lambda x: -x[0],
var=1,
out=["f"],
)
>> gr.cp_marginals(
x0=dict(dist="uniform", loc=0, scale=1),
)
>> gr.cp_copula_independence()
)
md_br2 = (
gr.Model("Bratley, 2d")
>> gr.cp_function(
fun=lambda x: -x[0] + x[0] * x[1],
var=2,
out=["f"],
)
>> gr.cp_marginals(
x0=dict(dist="uniform", loc=0, scale=1),
x1=dict(dist="uniform", loc=0, scale=1),
)
>> gr.cp_copula_independence()
)
md_br3 = (
gr.Model("Bratley, 3d")
>> gr.cp_function(
fun=lambda x: -x[0] + x[0] * x[1] - x[0] * x[1] * x[2],
var=3,
out=["f"],
)
>> gr.cp_marginals(
x0=dict(dist="uniform", loc=0, scale=1),
x1=dict(dist="uniform", loc=0, scale=1),
x2=dict(dist="uniform", loc=0, scale=1),
)
>> gr.cp_copula_independence()
)
## Exact expectation
def br_exact(d):
return -(1 - (-0.5)**d) / 3
md_br3.printpretty()
# -
# Overkill sampling, to check implementation against analytic expression and for support point algorithm:
#
# +
df_br3 = (
md_br3
>> gr.ev_monte_carlo(df_det="nom", n=1e4, seed=101)
)
Ehat3 = (
df_br3
>> gr.tf_summarize(E=gr.mean(DF.f))
).E[0]
E3 = br_exact(3)
err3 = np.abs((Ehat3 - E3) / E3)
print("Relative error = {0:4.3e}".format(err3))
# -
# We should not expect the support point approach to reach a relative error smaller than this observed error; the `sp.ccp` algorithm is limited by the Monte Carlo approximation of the energy distance. The `sp.sccp` algorithm should be able to overcome this limitation, but I have not yet implemented this.
#
# Perform a convergence study with support points and simple Monte Carlo.
#
# +
# Select sample sizes
N_all = [3, 4, 5, 6, 7, 8, 9, 10, 30, 50]
n_repl = 40
# Setup
Er_sp = np.zeros((len(N_all), n_repl))
Er_mc = np.zeros((len(N_all), n_repl))
SE_mc = np.zeros((len(N_all), n_repl))
# Exact solution
E3 = br_exact(3)
seed0 = 101
df_convergence = pd.DataFrame()
t0 = time.time()
for i, n in enumerate(N_all):
for j in range(n_repl):
seed = seed0 + j
## Simple Monte Carlo
df_mc_tmp = (
md_br3
>> gr.ev_monte_carlo(df_det="nom", n=n, seed=seed)
>> gr.tf_summarize(
E=gr.mean(DF.f),
se=gr.sd(DF.f) / np.sqrt(n)
)
)
Ehat_mc = df_mc_tmp.E[0]
Er_mc[i, j] = np.abs((Ehat_mc - E3) / E3)
# Store SE for MC as well
SE_mc[i, j] = np.abs(df_mc_tmp.se[0] / E3)
## Support Points
df_sp_tmp = gr.tran_sp(
df_br3[["x0", "x1", "x2"]],
n=n,
seed=seed,
verbose=False,
)
df_sp_br3 = (
md_br3
>> gr.ev_df(df=df_sp_tmp)
)
Ehat_sp = (
df_sp_br3
>> gr.tf_summarize(E=gr.mean(DF.f))
).E[0]
Er_sp[i, j] = np.abs((Ehat_sp - E3) / E3)
df_tmp = gr.df_make(
Er_mc=Er_mc[i],
SE_mc=SE_mc[i],
Er_sp=Er_sp[i],
repl=list(range(n_repl)),
N=n,
)
df_convergence = pd.concat((df_convergence, df_tmp), axis=0)
t1 = time.time()
print("Execution time: {0:4.3e} min".format((t1 - t0) / 60))
# -
# Visually compare the convergence histories:
#
(
df_convergence
>> gr.tf_group_by(DF.N)
>> gr.tf_summarize(
Er_mc_mean=gr.mean(DF.Er_mc),
Er_mc_lo=gr.quant(DF.Er_mc, p=0.25),
Er_mc_hi=gr.quant(DF.Er_mc, p=0.75),
Er_sp_mean=gr.mean(DF.Er_sp),
Er_sp_lo=gr.quant(DF.Er_sp, p=0.25),
Er_sp_hi=gr.quant(DF.Er_sp, p=0.75),
)
>> ggplot(aes(x="N"))
+ geom_hline(yintercept=err3, color="black", linetype="dotted")
+ geom_ribbon(
mapping=aes(ymin="Er_mc_lo", ymax="Er_mc_hi"),
color="black",
alpha=1 / 5
)
+ geom_ribbon(
mapping=aes(ymin="Er_sp_lo", ymax="Er_sp_hi"),
color="salmon",
alpha=1 / 5
)
+ geom_line(aes(y="Er_mc_mean"), color="black")
+ geom_line(aes(y="Er_sp_mean"), color="salmon")
+ geom_abline(intercept=0, slope=-1/2, linetype="dashed")
+ scale_x_log10()
+ scale_y_log10()
+ theme_minimal()
)
# *Observations*:
#
# - Simple Monte Carlo (in black) exhibits a square-root convergence (dashed line), as expected
# - Support (in salmon) exhibits a rapid initial convergence at low $N$, then becomes roughly parallel to SMC.
# - This is much like the results in Mak and Joseph (2018), Figure 4, e.g. GAPK (p = 5).
# - I suspect the mechanism is different though; they use `sp.sccp` in their results.
# - As expected, the support approach does not significantly exceed the "overkill" SMC relative error (horizontal dotted line) computed above.
#
# ## References
#
# - Mak and Joseph "Support Points" (2018) *Annals of Statistics*
| tests/longrun/sp_convergence.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import cv2
import numpy as np
def flip_image(image,direction):
image=cv2.flip(image,direction)
cv2.imwrite(""/home/paa/flip/0/"+i".jpg",image)
def rotate_image(image,deg):
rows, cols,c = image.shape
M = cv2.getRotationMatrix2D((cols/2,rows/2), deg, 1)
image = cv2.warpAffine(image, M, (cols, rows))
cv2.imwrite("/home/paa/A/5/"+"rotate"+".jpg", image)
image=cv2.imread("/home/paa/sign/Dataset/1/1(3).JPG")
cv2.imshow("image",image)
cv2.waitKey(0)
cv2.destroyAllWindows()
print(image.shape)
rotate_image(image,45)
rotate_image(image,-45)
flip_image(image,1)
import cv2
import numpy as np
j=1
z=9
def flip_image(image,direction,j,z):
image=cv2.flip(image,direction)
cv2.imwrite("/home/paa/Newdataset/" +str(z)+ "/" +str(j) +".jpeg" ,image)
def rotate_image(image,deg,j,z):
rows, cols,c = image.shape
M = cv2.getRotationMatrix2D((cols/2,rows/2), deg, 1)
image = cv2.warpAffine(image, M, (cols, rows))
cv2.imwrite("/home/paa/Newdataset/"+str(z)+"/"+str(j)+".jpeg", image)
for i in range(1,205):
image=cv2.imread("/../../../home/paa/Dataset/"+str(z)+"/" +str(i) +".JPG")
cv2.imwrite("/home/paa/Newdataset/"+str(z)+"/" +str(j) +".jpeg" ,image)
j=j+1
darkimg=image[:,:,:]*.75 #darkiamage
cv2.imwrite("/home/paa/Newdataset/"+str(z)+"/" +str(j) +".jpeg" ,darkimg)
j=j+1
brightimg=image[:,:,:]*1.1 #brightimage
cv2.imwrite("/home/paa/Newdataset/"+str(z)+"/" +str(j) +".jpeg" ,brightimg)
j=j+1
flip_image(image,1,j,z)
j=j+1
rotate_image(image,20,j,z)
j=j+1
rotate_image(image,-20,j,z)
j=j+1
| ImageAugmentation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
data1 = pd.read_csv('listings.csv')
data2 = pd.read_csv('listings2.csv')
avg_price_by_neighbour = data1.groupby(data1['neighbourhood_group'])['price'].mean()
data1['rating'] = data2['review_scores_rating']
avg_rating_by_neighbour = data1.groupby(data1['neighbourhood_group'])['rating'].mean()
group_name = avg_rating_by_neighbour.index.tolist()
avg_price_list = avg_price_by_neighbour.tolist()
avg_rating_list = avg_rating_by_neighbour.tolist()
df = pd.DataFrame({'group_name' : group_name,'price' : avg_price_list, 'rating' : avg_rating_list})
df = df.to_json(orient='records')
# +
file = open("avg_by_neighbour.js", "w")
file.write( "var data = " + df);
# Close opend file
file.close()
# +
(avg_price_by_neighbour - 83)*2.5
# -
avg_price_by_neighbour
| AirbnbViz/examples/create avg.js.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Training with augmented datasets
# To test the effectiveness of the augmentation let's train our model on with a smaller starting set of images and use augmentation to increase it to 1000. We will compare it to 1000 images without augmentation.
#
# +
import keras
import numpy as np
from keras.datasets import fashion_mnist
from keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing.image import (
random_rotation, random_shift, random_shear, random_zoom,
random_channel_shift, img_to_array, ImageDataGenerator)
from keras.models import Model, Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, BatchNormalization
from keras.optimizers import Adam
from keras.utils.np_utils import to_categorical
# -
# Let's define our model architecture
def getModel():
model = Sequential()
model.add(Conv2D(filters = 32, kernel_size = (3, 3), activation='relu',
input_shape = (28, 28, 1)))
model.add(Conv2D(filters = 32, kernel_size = (2, 2), activation='relu'))
model.add(MaxPool2D(strides=(2,2)))
model.add(Conv2D(filters = 64, kernel_size = (3, 3), activation='relu'))
model.add(Conv2D(filters = 64, kernel_size = (3, 3), activation='relu'))
model.add(MaxPool2D(strides=(2,2)))
model.add(Flatten())
model.add(Dense(64, activation='relu'))
model.add(Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer = Adam(lr=1e-3), metrics=["accuracy"])
return model
print(getModel().summary())
# Let's define an image data generator for augmenting our data set with various transforms
gen = ImageDataGenerator(
rotation_range=20,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=0.5,
zoom_range=(0.9, 1.1),
horizontal_flip=False,
vertical_flip=False,
fill_mode='constant',
cval=0
)
# We are going to use 100 images and augment them to 1000 and take 1000 images for comparison
# +
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
x_train_image_aug = x_train[1:100]
y_train_image_aug = y_train[1:100]
x_test_image_aug = x_test[1:100]
y_test_image_aug = y_test[1:100]
x_train_orig = x_train[1:1000]
y_train_orig = y_train[1:1000]
x_test_orig = x_test[1:1000]
y_test_orig = y_test[1:1000]
test_data_img_aug = np.asarray(x_test_image_aug / 255.0 , dtype=float)
train_data_img_aug = np.asarray(x_train_image_aug / 255.0 , dtype=float)
test_labels_img_aug = np.asarray(y_test_image_aug , dtype=np.int32)
train_labels_img_aug = np.asarray(y_train_image_aug , dtype=np.int32)
test_data_orig = np.asarray(x_test_orig / 255.0 , dtype=float)
train_data_orig = np.asarray(x_train_orig / 255.0 , dtype=float)
test_labels_orig = np.asarray(y_test_orig , dtype=np.int32)
train_labels_orig = np.asarray(y_train_orig , dtype=np.int32)
# -
# Let's train our model using our image augmentation set up
# +
y_train_labels_img_aug = to_categorical(train_labels_img_aug)
y_test_labels_img_aug = to_categorical(test_labels_img_aug)
x_train_data_img_aug = train_data_img_aug.reshape(-1, 28, 28, 1)
x_test_data_img_aug = test_data_img_aug.reshape(-1, 28, 28, 1)
train_gen = gen.flow(x_train_data_img_aug,
y_train_labels_img_aug,
batch_size=10,
seed=42)
model = getModel()
history = model.fit_generator(
train_gen,
steps_per_epoch=100,
epochs=30,
verbose=1,
validation_data=(x_test_data_img_aug, y_test_labels_img_aug),
shuffle=True
)
# -
# Let's now train the model without image augmentation
# +
y_train_labels_orig = to_categorical(train_labels_orig)
y_test_labels_orig = to_categorical(test_labels_orig)
x_train_data_orig = train_data_orig.reshape(-1, 28, 28, 1)
x_test_data_orig = test_data_orig.reshape(-1, 28, 28, 1)
training_history = model.fit(x_train_data_orig,
y_train_labels_orig,
epochs=30,
verbose=1,
batch_size=32,
validation_data=(x_test_data_orig, y_test_labels_orig),
shuffle=True
)
# -
# Let's now see how they compare
# +
# %matplotlib inline
import matplotlib.pyplot as plt
figure = plt.figure(figsize=(20,9))
subplot = figure.add_subplot(1, 2, 1)
plt.plot(training_history.history['acc'], color='#aaaa30', label='Image augmentation')
plt.plot(history.history['acc'], color='#ff3d00', label='Image augmentation')
plt.ylim(0.0,2.0)
# -
| Section 2/CODE/section2_video3_code.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
# # Extracting signals from a brain parcellation
#
# Here we show how to extract signals from a brain parcellation and compute
# a correlation matrix.
#
# We also show the importance of defining good confounds signals: the
# first correlation matrix is computed after regressing out simple
# confounds signals: movement regressors, white matter and CSF signals, ...
# The second one is without any confounds: all regions are connected to
# each other.
#
#
# One reference that discusses the importance of confounds is `Varoquaux and
# Craddock, Learning and comparing functional connectomes across subjects,
# NeuroImage 2013
# <http://www.sciencedirect.com/science/article/pii/S1053811913003340>`_.
#
# This is just a code example, see the `corresponding section in the
# documentation <parcellation_time_series>` for more.
#
# <div class="alert alert-info"><h4>Note</h4><p>This example needs SciPy >= 1.0.0 for the reordering of the matrix.</p></div>
#
# +
import ast
import gzip
import io
import json
import nilearn
import os
import sklearn
import tarfile
import tempfile
import nibabel as nib
import numpy as np
import pandas as pd
import seaborn as sns
from bids_validator import BIDSValidator
from fetch_difumo import fetch_difumo
from io import BytesIO
from pathlib import Path
from os import listdir as ls
from os.path import basename as bname
from os.path import dirname as dname
from os.path import expanduser as xpu
from os.path import join as pjoin
from pandas import DataFrame as df
from tempfile import TemporaryDirectory as tmpdir
from tempfile import TemporaryFile as tmpfile
from tqdm import tqdm
from typing import Union
from collections.abc import Iterable
from typing import Sequence
import loadutils as lu
import sniffbytes as snif
import scanzip as szip
import shutil
from nilearn import masking
from nilearn.plotting import plot_stat_map, plot_anat, plot_img, plot_epi
from nilearn.image import concat_imgs, mean_img
from nilearn.input_data import NiftiMasker
from cimaqprep import participant_data
# -
# % install_ext https://raw.github.com/cpcloud/ipython-autotime/master/autotime.py
# % load_ext autotime
import warnings
warnings.filterwarnings('ignore')
from cimaqprep.participant_data import participant_data
subject00 = participant_data(cimaq_nov_dir = xpu('~/../../data/cisl/DATA/cimaq_20190901'),
cimaq_mar_dir = xpu('~/../../data/cisl/DATA/cimaq_03-19'),
events_path = xpu('~/../../data/cisl/DATA/cimaq_corrected_events/events'),
behav_path = xpu('~/../../data/cisl/DATA/cimaq_corrected_behavioural/behavioural'),
participants_path = xpu('~/../../data/cisl/DATA/cimaq_03-19/derivatives/CIMAQ_fmri_memory/data/participants/'))
# ## Retrieve the atlas and the data
#
#
# +
from nilearn import datasets
dataset = datasets.fetch_atlas_harvard_oxford('cort-maxprob-thr25-2mm',
data_dir=pjoin(dname(os.getcwd()),'harvard_oxford'))
difumo_dir=pjoin(dname(os.getcwd()),'DiFuMo_atlases')
difumo512_3mm=datasets.atlas.fetch_atlas_difumo(dimension=512,
resolution_mm=3,
data_dir=difumo_dir,
resume=True,
verbose=1)
difumo512_3mm.labels
# atlas_filename, labels = difumo512_3mm.maps,difumo512_3mm.maps[0],difumo512_3mm.maps,difumo512_3mm.labels
atlas_filename, labels = difumo512_3mm.maps, difumo512_3mm.labels
# # One subject fmri data
# # data=subject_data
# fmri_filenames=subject00.resampled_fmri_to_events
# data = datasets.fetch_development_fmri(n_subjects=1,
# data_dir=pjoin(dname(os.getcwd()),'development_fmri'))
# fmri_filenames = data.func[0]
# -
type(dataset)
# ## Extract signals on a parcellation defined by labels
# Using the NiftiLabelsMasker
#
#
# #### Here we go from nifti files to the signal time series in a numpy array.
# #### Note how we give confounds to be regressed out during signal extraction.
from nilearn.image import concat_imgs, mean_img
mean_img(concat_imgs(subject00.resampled_fmri_to_events.values))
from nilearn.input_data import NiftiLabelsMasker
masker = NiftiLabelsMasker(labels_img=mean_img(difumo512_3mm.maps),
labels=difumo512_3mm.labels,
standardize=True,
memory='nilearn_cache', verbose=5)
# help(masker.fit_transform)
help(NiftiLabelsMasker)
# +
time_series = masker.fit_transform(imgs=mean_img(concat_imgs(fmri_filenames.values)))
# imgs=mean_img(concat_imgs(subject00.resampled_fmri_to_events.values)))
# confounds=data.confounds)
# -
time_series.shape
# subject00.resampled_confounds =
df([(item[0],tuple(val.mid for val in pd.cut(item[1],bins=subject00.events.shape[0])))
for item in subject00.confounds.iteritems()])[1]
# ## Compute and display a correlation matrix
# ### - Plot the correlation matrix
# #### - Make a large figure
# #### - Mask the main diagonal for visualization:
# #### - The labels we have start with the background (0).
# - Hence we skip the first label.
# #### - Matrices are ordered for block-like representation
#
from nilearn.connectome import ConnectivityMeasure
correlation_measure = ConnectivityMeasure(kind='correlation')
correlation_matrix = correlation_measure.fit_transform([time_series])[0]
correlation_matrix.shape
# +
import numpy as np
from nilearn import plotting
np.fill_diagonal(correlation_matrix, 0)
plotting.plot_matrix(correlation_matrix,
figure=(10, 8),
labels=difumo512_3mm.labels,
vmax=0.8,
vmin=-0.8,
reorder=True)
# -
# ## Same thing without confounds, to stress the importance of confounds
#
#
# +
time_series = masker.fit_transform(fmri_filenames)
# Note how we did not specify confounds above. This is bad!
correlation_matrix = correlation_measure.fit_transform([time_series])[0]
# Mask the main diagonal for visualization:
np.fill_diagonal(correlation_matrix, 0)
plotting.plot_matrix(correlation_matrix, figure=(10, 8), labels=labels[1:],
vmax=0.8, vmin=-0.8, title='No confounds', reorder=True)
plotting.show()
| plot_signal_extraction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Exercise 4 - Advantages of Jupyter Notebooks
#
# There are a number of helpful features of Jupyter notebooks that makes them so useful, in addiiton to just executing code. We will cover a couple of features in this exercise.
#
# Jupyter notebooks can execute commands directly within the Anaconda prompt by including an exclamation point prefix (!).
# !dir
# ## Valid Markdown syntax
#
# You can render valid **Markdown** in Markdown cells
#
# * Just like
# * this one
| Chapter 1 - Machine Learning Toolkit/Exercise 4 - Advantages of Jupyter Notebooks.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Bayesian Calibration: Acceleration due to Gravity
# To illustrate how to use XXX within YYY we will start with a very simple example of a falling ball example.
#
# First, import the main libraries we use for this example:
import numpy as np
from scipy.stats import norm
from pyDOE import *
import scipy.optimize as spo
import sys
import os
# ### Data: Acceleration due to Gravity
# Let's read the real data first, and then visualize:
ball = np.loadtxt('ball.csv', delimiter=',')
n = len(ball)
#height
X = np.reshape(ball[:, 0], (n, 1))
#time
Y = np.reshape(ball[:, 1], ((n, 1)))
plt.scatter(X, Y)
plt.xlabel("height")
plt.ylabel("time")
plt.show()
# ### Computer model experiments
# We know that $t = \sqrt{2h/g}$.
#Computer implementation of the mathematical model
def timedrop(x, theta, hr, gr):
min_g = min(gr)
range_g = max(gr) - min(gr)
min_h = min(hr)
range_h = max(hr) - min(hr)
f = np.zeros((theta.shape[0], x.shape[0]))
for k in range(0, theta.shape[0]):
g = range_g*theta[k] + min_g
h = range_h*x + min(hr)
f[k, :] = np.sqrt(2*h/g).reshape(x.shape[0])
return f.T
# Consider computer model emulator where $u \in [0, 30]$:
# +
n1 = 150
hvec = lhs(1, samples=n1)
height_range = np.array([min(X), max(X)])
print(np.shape(hvec))
# Draw 50 random parameters from uniform prior
n2 = 100
thetavec = lhs(1, samples=n2)
theta_range = np.array([1, 30])
print(np.shape(thetavec))
# Obtain computer model output
Y_model = timedrop(hvec, thetavec, height_range, theta_range)
print(np.shape(Y_model))
# -
# ### Building an emulator via XXX
SCRIPT_DIR = os.getcwd()
sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, '..')))
from base.emulation import emulator
# #### Emulator without filtering
emulator_model = emulator(hvec, thetavec, Y_model, method = 'PCGPwM')
# Let's observe the quality of an emulator without filtering:
#Predict
Xscale = (X[0:21] - min(X))/(max(X)- min(X))
print(np.shape(hvec))
print(np.shape(thetavec))
print(np.shape(Xscale))
pred_model = emulator_model.predict(hvec, thetavec)
pred_mean = pred_model.mean()
print(np.shape(pred_mean))
plt.scatter(X, Y, color = 'grey')
for i in range(np.shape(pred_mean)[1]):
plt.plot(pred_mean[:, i])
plt.xlabel("height")
plt.ylabel("time")
plt.title("Computer model surrogates for different u")
plt.show()
# #### Emulator with filtering
# +
n1 = 150
hvec = lhs(1, samples=n1)
height_range = np.array([min(X), max(X)])
print(np.shape(hvec))
# Draw 50 random parameters from uniform prior
n2 = 100
thetavec = lhs(1, samples=n2)
theta_range = np.array([5, 15])
print(np.shape(thetavec))
# Obtain computer model output
Y_model = timedrop(hvec, thetavec, height_range, theta_range)
print(np.shape(Y_model))
emulator_model = emulator(hvec, thetavec, Y_model, method = 'PCGPwM')
#Predict
print(np.shape(hvec))
print(np.shape(thetavec))
pred_model = emulator_model.predict(hvec, thetavec)
pred_mean = pred_model.mean()
print(np.shape(pred_mean))
plt.scatter(X, Y, color = 'grey')
for i in range(np.shape(pred_mean)[1]):
plt.plot(pred_mean[:, i])
plt.xlabel("height")
plt.ylabel("time")
plt.title("Computer model surrogates for different u")
plt.show()
# -
| examples/.ipynb_checkpoints/Example_Acceleration-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.7.7 64-bit
# language: python
# name: python37764bit0fb2af63a77e49af99dd377937c85efa
# ---
# # Indexing More Data
#
# You can use this notebook to index an area of interest. Add a Lat and Lon center and a buffer (but note that you can only index 10,000 STAC items at a time) and run the steps below to add more data to your local ODC index.
# +
import os
import datacube
from satsearch import Search
from odc.apps.dc_tools.stac_api_to_dc import stac_api_to_odc
# +
lon_center = 15
lat_center = -15
buffer = 1
bbox = [lon_center - buffer, lat_center - buffer, lon_center + buffer, lat_center + buffer]
start_date = '2017-01-01'
end_date = '2021-01-01'
collections = ['sentinel-s2-l2a-cogs']
config = {
'collections': collections,
'bbox': bbox,
'datetime': f"{start_date}/{end_date}"
}
STAC_API_URL = 'https://explorer.sandbox.dea.ga.gov.au/stac/'
os.environ['STAC_API_URL'] = STAC_API_URL
# -
srch = Search().search(**config)
found_items = srch.found()
print(f"Found {found_items} items that can be indexed")
# _Note that the following cell will potentially raise some errors or warnings, but should result in data being indexed still._
# +
dc = datacube.Datacube()
indexed, failed = stac_api_to_odc(dc, 's2_l2a', None, False, False, config)
print(f"Indexed {indexed} out of {found_items} with {failed} failures.")
| notebooks/Indexing_More_Data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:DeepLearn] *
# language: python
# name: conda-env-DeepLearn-py
# ---
#import os # only lib needed to cut the files
import glob # these two for splitting
import shutil
# %config Completer.use_jedi = False # for auto complete to work this option needs to be disabled I dont know why my pc is stupid.
# ### Move all images out of subfolders into 1 folder
# +
# get images of the scans with cancer cells and cuts it to a folder, only need this to run ONCE.
#src_dir = r'D:\Breast Cancer Diagnosis\Dataset\Cancer' + '\\' # source folder with subfolders that contain the images
#target_dir = r'D:\trimmed_dataset\Cancer'+ '\\' # destination which i am copying the files to
#for path, dir, files in os.walk(src_dir): # loop through all files in the source folder
#if files: # subfolders
#for img in files: # loop through every image in every subfolder
#if not os.path.isfile(target_dir + img): #if not a duplicate
#os.rename(path + '\\' + img, target_dir + img) # cut to new folder
# +
# get images of the scans with normal cells and cuts it to a folder, only need this to run ONCE.
#src_dir = r'D:\Breast Cancer Diagnosis\Dataset\Normal' + '\\' # source folder with subfolders that contain the images
#target_dir = r'D:\trimmed_dataset\Normal'+ '\\' # destination which i am copying the files to
#for path, dir, files in os.walk(src_dir): # loop through all files in the source folder
#if files: # subfolders
#for img in files: # loop through every image in every subfolder
#if not os.path.isfile(target_dir + img): #if not a duplicate
#os.rename(path + '\\' + img, target_dir + img) # cut to new folder
# -
# ### Split into Test and Train Folders
# +
# split the cancer folder into train folder.
# Train Folder is 80% of total imgs i.e. 2,172 images.
#src_dir = r'D:\trimmed_dataset\Cancer' + '//'
#target_dir = r'D:\trimmed_dataset\Train\Cancer' + '//'
#count = 0
#for img in glob.iglob(os.path.join(src_dir, "*.png")):
#shutil.move(img, target_dir)
#count += 1
#if (count == 2172):
#break
# Test Folder is 20% of total images i.e 544 images.
#src_dir = r'D:\trimmed_dataset\Cancer' + '//'
#target_dir = r'D:\trimmed_dataset\Test\Cancer' + '//'
#count = 0
#for img in glob.iglob(os.path.join(src_dir, "*.png")):
#shutil.move(img, target_dir)
#count += 1
#if (count == 544):
#break
# -
| models/Breast Cancer/Diagnosis/Breast Cancer Diagnosis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Enhanced Intake-ESM Catalog Demo
#
# This Jupyter Notebook compares the original Intake-ESM Catalog with an enhanced catalog that includes additional attributes. Both catalogs are an inventory of the NCAR Community Earth System Model (CESM)
# Large Ensemble (LENS) data hosted on AWS S3 ([doi:10.26024/wt24-5j82](https://doi.org/10.26024/wt24-5j82)).
#
# [Intake-esm Documentation](https://intake-esm.readthedocs.io/en/latest/notebooks/tutorial.html)
#
# ## Set up environment
import intake
import pandas as pd
import pprint
# Not used here:
###import numpy as np
###import xarray as xr
# Allow multiple lines per cell to be displayed without print (default is just last line)
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
# Enable more explicit control of DataFrame display (e.g., to omit annoying line numbers)
from IPython.display import HTML
# ## Inspect original intake-esm catalog
# Open original collection description file
cat_url_orig = 'https://ncar-cesm-lens.s3-us-west-2.amazonaws.com/catalogs/aws-cesm1-le.json'
coll_orig = intake.open_esm_datastore(cat_url_orig)
print(coll_orig.esmcol_data['description']) #Description of collection
print("Catalog file:", coll_orig.esmcol_data['catalog_file'])
print(coll_orig) # Summary of collection structure
# Show expanded version of collection structure with details
uniques_orig = coll_orig.unique(columns=["component", "frequency", "experiment", "variable"])
pprint.pprint(uniques_orig, compact=True, indent=1, width=80)
# Show the first few lines of the catalog. There are as many lines as there are paths.
# The order is the same as that of the CSV catalog file listed in the JSON description file.
print("Catalog file:", coll_orig.esmcol_data['catalog_file'])
df = coll_orig.df
HTML(df.head(10).to_html(index=False))
# **Table:** *First few lines of the original Intake-ESM Catalog showing the model component, the temporal frequency, the experiment, the abbreviated variable name, and the AWS S3 path for each Zarr store.*
# ## Finding Data
#
# If you happen to know the meaning of the variable names, you can find what data are available for that variable. For example:
# Filter the catalog to find available data for one variable
df = coll_orig.search(variable='FLNS').df
HTML(df.to_html(index=False))
# **Table:** *All available Zarr stores for the "FLNS" data.*
# Narrow the filter to specific frequency and expiriment
df = coll_orig.search(variable='FLNS', frequency='daily', experiment='RCP85').df
HTML(df.to_html(index=False))
# **Table:** *The single Zarr store for daily "FLNS" data from "RCP85" experiment.*
# ## The Problem
# Do all potential users know that "FLNS" is a CESM-specific abbreviation for "Net longwave flux at surface"? How would a novice user find out, other than by finding separate documentation, or by opening a Zarr store in the hopes that the long name might be recorded there? How do we address the fact that every climate model code seems to have a different, non-standard name for all the variables, thus making multi-source research needlessly difficult?
# ## Enhanced Intake-ESM Catalog
# By adding additional columns to the Intake-ESM catalog, we should be able to improve semantic interoperability and provide potentially useful information to the users.
# Open enhanced collection description file
cat_url = 'https://ncar-cesm-lens.s3-us-west-2.amazonaws.com/catalogs/aws-cesm1-le-enhanced.json'
coll = intake.open_esm_datastore(cat_url)
coll
print(coll.esmcol_data['description']) # Description of collection
print("Catalog file:", coll.esmcol_data['catalog_file'])
print(coll) # Summary of collection structure
# ### Long names
#
# In the summary above, note the addition of additional elements: long_name, start, end, and dim.
# Show the first few lines of the enhanced catalog.
print("Catalog file:", coll.esmcol_data['catalog_file'])
HTML(coll.df.head(10).to_html(index=False))
# **Table:** *First few lines of the enhanced catalog, listing of the same information as the original catalog as well as the long name of each variable and an indication of whether each variable is 2D or 3D.*
# **ISSUE:** *The long names are **not** CF Standard Names, but rather are those documented at http://www.cgd.ucar.edu/ccr/strandwg/CESM-CAM5-BGC_LENS_fields.html. For interoperability, the long_name column should be replaced by a cf_name column and possibly an attribute column to disambiguate if needed.*
# List all available variables by Long Name, sorted alphabetically
uniques = coll.unique(columns=['long_name'])
nameList = sorted(uniques['long_name']['values'])
print(*nameList, sep='\n') #note *list to unpack each item for print function
# Show all available data for a specific variable based on long name
myName = 'Salinity'
HTML(coll.search(long_name=myName).df.to_html(index=False))
# **Table:** *All available data in this catalog for selected variable*
# ### Substring matches
#
# **ISSUE:** The current version of intake-esm search() function requires an exact
# full-string case-sensitive match of the long_name.
# (This has been reported as an issue at https://github.com/NCAR/cesm-lens-aws/issues/48)
#
# Demonstrate a work-around: find all variables with a particular substring in the long name
myTerm = 'Wind'
myTerm = myTerm.lower() #search regardless of case
partials = [name for name in nameList if myTerm in name.lower()]
print(f"All datasets with name containing {myTerm}:")
print(*partials, sep='\n')
# Display full table for each match (could be lengthy if many matches)
for name in partials:
df = coll.search(long_name=name).df[['component', 'dim', 'experiment', 'variable', 'long_name']]
HTML(df.to_html(index=False))
###df.head(1) #show only first entry in each group for compactness
# Note: It is also possible to hide column(s) instead of specifying desired columns
###coll.search(long_name=name).df.drop(columns=['path'])
# **Table(s):** *Information about all matching datasets*
# **ISSUE:** The case-insensitive substring matching is not integrated into intake-esm, so it is not clear
# whether resulting search results can be passed directly to Xarray to read data.
# ### Other attributes
#
# Other columns in the enhanced catalog may be useful. For example, the dimensionality column enables us to list all data from the ocean component that is 3D.
df = coll.search(dim="3D",component="ocn").df
HTML(df.to_html(index=False))
# **Table:** *List of all 3D ocean datasets*
# ### Spatiotemporal filtering
#
# * If there were both regional and global data available (e.g., LENS and NA-CORDEX data for the same variable, both listed in same catalog), some type of coverage indicator (or columns for bounding box edges) could be listed.
# * Temporal extent in LENS is conveyed by the experiment (HIST, 20C, etc) but this is imprecise and requires external documentation. We have added start/end columns to the catalog, but intake-esm currently does not have built-in functionality to filter based on time.
#
#
# We can do a simple search that exactly matches a temporal value
df = coll.search(dim="3D",component="ocn", end='2100-12').df
HTML(df.to_html(index=False))
# **Table:** *List of all 3D ocean datasets with an end month of 2100 December. Note that because intake-esm currently requires an exact match this search would not find daily data with an end date of 2100 Dec 31.
# ### TO DO
#
# * Add substring search capability to intake-esm.
# * Add simple temporal search capability to intake-esm.
# * Consider adding spatial attributes (and appropriate intake-esm support) once we have some regional data available.
# * Have some tables in this demo only list only variable and long name rather than every single frequency and experiment.
| notebooks/EnhancedIntakeCatalogDemo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # FloPy
# ## Parameter Estimation with FloPy
# This notebook demonstrates the current parameter estimation functionality that is available with FloPy. The capability to write a simple template file for PEST is the only capability implemented so far. The plan is to develop functionality for creating PEST instruction files as well as the PEST control file.
# +
import os
import sys
import numpy as np
# run installed version of flopy or add local path
try:
import flopy
except:
fpth = os.path.abspath(os.path.join('..', '..'))
sys.path.append(fpth)
import flopy
print(sys.version)
print('numpy version: {}'.format(np.__version__))
print('flopy version: {}'.format(flopy.__version__))
# -
# This notebook will work with a simple model using the dimensions below
# +
# Define the model dimensions
nlay = 3
nrow = 20
ncol = 20
# Create the flopy model object and add the dis and lpf packages
m = flopy.modflow.Modflow(modelname='mymodel', model_ws='./data')
dis = flopy.modflow.ModflowDis(m, nlay, nrow, ncol)
lpf = flopy.modflow.ModflowLpf(m, hk=10.)
# -
# ### Simple One Parameter Example
# In order to create a PEST template file, we first need to define a parameter. For example, let's say we want to parameterize hydraulic conductivity, which is a static variable in flopy and MODFLOW. As a first step, let's define a parameter called HK_LAYER_1 and assign it to all of layer 1. We will not parameterize hydraulic conductivity for layers 2 and 3 and instead leave HK at its value of 10. (as assigned in the block above this one). We can do this as follows.
# +
mfpackage = 'lpf'
partype = 'hk'
parname = 'HK_LAYER_1'
idx = np.empty((nlay, nrow, ncol), dtype=bool)
idx[0] = True
idx[1:] = False
# The span variable defines how the parameter spans the package
span = {'idx': idx}
# These parameters have not affect yet, but may in the future
startvalue = 10.
lbound = 0.001
ubound = 1000.
transform='log'
p = flopy.pest.Params(mfpackage, partype, parname, startvalue,
lbound, ubound, span)
# -
# At this point, we have enough information to the write a PEST template file for the LPF package. We can do this using the following statement:
tw = flopy.pest.TemplateWriter(m, [p])
tw.write_template()
# At this point, the lpf template file will have been created. The following block will print the template file.
lines = open('./data/mymodel.lpf.tpl', 'r').readlines()
for l in lines:
print(l.strip())
# The span variable will also accept 'layers', in which the parameter applies to the list of layers, as shown next. When 'layers' is specified in the span dictionary, then the original hk value of 10. remains in the array, and the multiplier is specified on the array control line.
# +
mfpackage = 'lpf'
partype = 'hk'
parname = 'HK_LAYER_1-3'
# Span indicates that the hk parameter applies as a multiplier to layers 0 and 2 (MODFLOW layers 1 and 3)
span = {'layers': [0, 2]}
# These parameters have not affect yet, but may in the future
startvalue = 10.
lbound = 0.001
ubound = 1000.
transform='log'
p = flopy.pest.Params(mfpackage, partype, parname, startvalue,
lbound, ubound, span)
tw = flopy.pest.templatewriter.TemplateWriter(m, [p])
tw.write_template()
# -
lines = open('./data/mymodel.lpf.tpl', 'r').readlines()
for l in lines:
print(l.strip())
# ### Multiple Parameter Zoned Approach
#
# The params module has a helper function called zonearray2params that will take a zone array and some other information and create a list of parameters, which can then be passed to the template writer. This next example shows how to create a slightly more complicated LPF template file in which both HK and VKA are parameterized.
# Create a zone array
zonearray = np.ones((nlay, nrow, ncol), dtype=int)
zonearray[0, 10:, 7:] = 2
zonearray[0, 15:, 9:] = 3
zonearray[1] = 4
# Create a list of parameters for HK
mfpackage = 'lpf'
parzones = [2, 3, 4]
parvals = [56.777, 78.999, 99.]
lbound = 5
ubound = 500
transform = 'log'
plisthk = flopy.pest.zonearray2params(mfpackage, 'hk', parzones, lbound,
ubound, parvals, transform, zonearray)
# In this case, Flopy will create three parameters: hk_2, hk_3, and hk_4, which will apply to the horizontal hydraulic conductivity for cells in zones 2, 3, and 4, respectively. Only those zone numbers listed in parzones will be parameterized. For example, many cells in zonearray have a value of 1. Those cells will not be parameterized. Instead, their hydraulic conductivity values will remain fixed at the value that was specified when the Flopy LPF package was created.
# Create a list of parameters for VKA
parzones = [1, 2]
parvals = [0.001, 0.0005]
zonearray = np.ones((nlay, nrow, ncol), dtype=int)
zonearray[1] = 2
plistvk = flopy.pest.zonearray2params(mfpackage, 'vka', parzones, lbound,
ubound, parvals, transform, zonearray)
# Combine the HK and VKA parameters together
plist = plisthk + plistvk
for p in plist:
print(p.name, p.mfpackage, p.startvalue)
# Write the template file
tw = flopy.pest.templatewriter.TemplateWriter(m, plist)
tw.write_template()
# Print contents of template file
lines = open('./data/mymodel.lpf.tpl', 'r').readlines()
for l in lines:
print(l.strip())
# ## Two-Dimensional Transient Arrays
#
# Flopy supports parameterization of transient two dimensional arrays, like recharge. This is similar to the approach for three dimensional static arrays, but there are some important differences in how span is specified. The parameter span here is also a dictionary, and it must contain a 'kper' key, which corresponds to a list of stress periods (zero based, of course) for which the parameter applies. The span dictionary must also contain an 'idx' key. If span['idx'] is None, then the parameter is a multiplier for those stress periods. If span['idx'] is a tuple (iarray, jarray), where iarray and jarray are a list of array indices, or a boolean array of shape (nrow, ncol), then the parameter applies only to the cells specified in idx.
# +
# Define the model dimensions (made smaller for easier viewing)
nlay = 3
nrow = 5
ncol = 5
nper = 3
# Create the flopy model object and add the dis and lpf packages
m = flopy.modflow.Modflow(modelname='mymodel', model_ws='./data')
dis = flopy.modflow.ModflowDis(m, nlay, nrow, ncol, nper=nper)
lpf = flopy.modflow.ModflowLpf(m, hk=10.)
rch = flopy.modflow.ModflowRch(m, rech={0: 0.001, 2: 0.003})
# -
# Next, we create the parameters
plist = []
# +
# Create a multiplier parameter for recharge
mfpackage = 'rch'
partype = 'rech'
parname = 'RECH_MULT'
startvalue = None
lbound = None
ubound = None
transform = None
# For a recharge multiplier, span['idx'] must be None
idx = None
span = {'kpers': [0, 1, 2], 'idx': idx}
p = flopy.pest.Params(mfpackage, partype, parname, startvalue,
lbound, ubound, span)
plist.append(p)
# -
# Write the template file
tw = flopy.pest.TemplateWriter(m, plist)
tw.write_template()
# Print the results
lines = open('./data/mymodel.rch.tpl', 'r').readlines()
for l in lines:
print(l.strip())
# Multiplier parameters can also be combined with index parameters as follows.
# +
plist = []
# Create a multiplier parameter for recharge
mfpackage = 'rch'
partype = 'rech'
parname = 'RECH_MULT'
startvalue = None
lbound = None
ubound = None
transform = None
# For a recharge multiplier, span['idx'] must be None
span = {'kpers': [1, 2], 'idx': None}
p = flopy.pest.Params(mfpackage, partype, parname, startvalue,
lbound, ubound, span)
plist.append(p)
# +
# Now create an index parameter
mfpackage = 'rch'
partype = 'rech'
parname = 'RECH_ZONE'
startvalue = None
lbound = None
ubound = None
transform = None
# For a recharge index parameter, span['idx'] must be a boolean array or tuple of array indices
idx = np.empty((nrow, ncol), dtype=bool)
idx[0:3, 0:3] = True
span = {'kpers': [1], 'idx': idx}
p = flopy.pest.Params(mfpackage, partype, parname, startvalue,
lbound, ubound, span)
plist.append(p)
# +
# Write the template file
tw = flopy.pest.templatewriter.TemplateWriter(m, plist)
tw.write_template()
# Print the results
lines = open('./data/mymodel.rch.tpl', 'r').readlines()
for l in lines:
print(l.strip())
# -
| examples/Notebooks/flopy3_PEST.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import matplotlib.pyplot as plt
df=pd.read_csv('Bank_Marketing.csv',delimiter=';')
df.head()
# -
df.shape
df.info()
df.describe()
df['job'].unique()
df['job'].value_counts()
df['education'].unique()
df['education'].value_counts()
df['default'].unique()
df['default'].value_counts()
df['housing'].unique()
df['housing'].value_counts()
df['loan'].unique()
df['loan'].value_counts()
df['contact'].unique()
df['contact'].value_counts()
df['month'].unique()
df['month'].value_counts()
df['day_of_week'].unique()
df['day_of_week'].value_counts()
df['poutcome'].unique()
df['poutcome'].value_counts()
df['y'].unique()
df['y'].value_counts()
# +
#conversion of object into category and then into numeric
for col in df.columns:
if(df[col].dtype=='object'):
df[col] = df[col].astype('category')
df[col] = df[col].cat.codes
# -
df.info()
df.head()
# +
#Spliting the dataset into a training set and a testing set
x=df.drop('y',axis=1)
y=df['y']
from sklearn.cross_validation import train_test_split
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.3,random_state=5)
print(x_train.shape)
print(y_train.shape)
print(x_test.shape)
print(y_test.shape)
# -
x.head()
# +
#K โ Nearest Neighbour (KNN) Classifier
from sklearn.neighbors import KNeighborsClassifier
model_KNN=KNeighborsClassifier().fit(x_train,y_train)
print('Accuracy of K โ Nearest Neighbour Classifier is {:.2f}' .format(model_KNN.score(x_test,y_test)))
# +
#Logistic Regression
from sklearn.linear_model import LogisticRegression
model_lr=LogisticRegression().fit(x_train,y_train)
print("Accuracy of Logistic Regression is {:.2f}" .format(model_lr.score(x_test,y_test)))
# +
#Random Forest Classifier
from sklearn.ensemble import RandomForestClassifier
model_rf=RandomForestClassifier().fit(x_train,y_train)
print("Accuracy of Random Forest Classifier is {:.2f}" .format(model_rf.score(x_test,y_test)))
# +
#Support Vector Machine (SVM) Classifier
from sklearn.svm import SVC
model_svm=SVC().fit(x_train,y_train)
print("Accuracy of Support Vector Machine Classifier is {:.2f}" .format(model_svm.score(x_test,y_test)))
# +
#Decision Tree Classifier
from sklearn.tree import DecisionTreeClassifier
model_dt=DecisionTreeClassifier().fit(x_train,y_train)
print("Accuracy of Decision Tree Classifier is {:.2f}" .format(model_dt.score(x_test,y_test)))
# +
#Gaussian Naive Bayes Classifier
from sklearn.naive_bayes import GaussianNB
model_gnb=GaussianNB().fit(x_train,y_train)
print("Accuracy of Gaussian Naive Bayes Classifier is {:.2f}" .format(model_gnb.score(x_test,y_test)))
# +
#Gradient Boosting Regressor
from sklearn.ensemble import GradientBoostingRegressor
model_gbr=GradientBoostingRegressor().fit(x_train,y_train)
print("Accuracy of Gradient Boosting Regressor is {:.2f}" .format(model_gbr.score(x_test,y_test)))
# +
#AdaBoost Classifier
from sklearn.ensemble import AdaBoostClassifier
model_ab=AdaBoostClassifier().fit(x_train,y_train)
print("Accuracy of Gradient Boosting Regressor is {:.2f}" .format(model_ab.score(x_test,y_test)))
# +
#Linear Discriminant Analysis
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
model_lda=LinearDiscriminantAnalysis().fit(x_train,y_train)
print('Accuracy of Linear Discriminant Analysis is {:.2f}' .format(model_lda.score(x_test,y_test)))
# +
#K โ Nearest Neighbour (KNN) Classifier
from sklearn.neighbors import KNeighborsClassifier
# experimenting with different k values on which KNN gives more accurate result
score=[]
k_range=range(1,1000)
A_max=0
k_best=1
for k in k_range:
model_knn=KNeighborsClassifier(n_neighbors=k)
model_knn.fit(x_train,y_train)
accu_knn=model_knn.score(x_test,y_test)
score.append(accu_knn)
if(A_max<accu_knn):
A_max=accu_knn
k_best=k
plt.plot(k_range,score)
plt.xlabel('Values of K for KNN')
plt.ylabel('Accuracy Score')
plt.title('Accuracy Scores for Values of k of k-Nearest-Neighbors')
plt.show()
print('KNN gives Maximum Accuracy is {0} and least value of K for maximum accuracy is {1}' .format(A_max,k_best))
# -
| Bank_Marketing/Bank_Marketing.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# ### 2.5 The Mitscherlich NPK surface response model
# In order to be compared with ML models, we created a Mitscherlich surface model with fertilizer-dependant rates and environment and a single asymptote. Our model is inspired by [Dodds et al. (1995)](https://link.springer.com/article/10.1007/BF00790661).
#
# $$ yield = A \times \left( 1- e^{-R_N \times \left( E_N + dose_N \right)} \right) \times \left( 1- e^{-R_P \times \left( E_P + dose_P \right)} \right) \times \left( 1- e^{-R_K \times \left( E_K + dose_K \right)} \right) $$
# Import libraries
library("tidyverse") # data manipulation
library("stringr") # strings operations
#library("fastDummies") # dummy_columns()
library("nlme") # regression
library("Metrics") # MAE, RMSE
source('lib/Mitschmm.R') # custom functions
# Import the datasets
#
# For comparison with the machine learning models, we must use the same data sets for modeling.
X_train <- read.csv("output/x_train.csv")
X_test <- read.csv("output/x_test.csv")
Y_train <- read.csv("output/y_train.csv")
Y_test <- read.csv("output/y_test.csv")
train_id_table <- read.csv("output/train_id_table.csv")
test_id_table <- read.csv("output/test_id_table.csv")
# All the variables are scaled. For the Mitscherlich modeling purpose, fertilizers and response variables are put back to their original values.
mean_numvars <- read.csv("output/mean_numvars.csv", header = FALSE)[, 1]
std_numvars <- read.csv("output/std_numvars.csv", header = FALSE)[, 1]
mean_respvars <- read.csv("output/mean_respvars.csv", header = FALSE)[1, 1]
std_respvars <- read.csv("output/std_respvars.csv", header = FALSE)[1, 1]
# +
X_train_sc <- X_train
X_train_sc$NtotDose <- X_train$NtotDose * std_numvars[7] + mean_numvars[7]
X_train_sc$PtotDose <- X_train$PtotDose * std_numvars[8] + mean_numvars[8]
X_train_sc$KtotDose <- X_train$KtotDose * std_numvars[9] + mean_numvars[9]
Y_train_sc <- Y_train
Y_train_sc$RendVendable <- Y_train$RendVendable * std_respvars + mean_respvars
# +
X_test_sc <- X_test
X_test_sc$NtotDose <- X_test$NtotDose * std_numvars[7] + mean_numvars[7]
X_test_sc$PtotDose <- X_test$PtotDose * std_numvars[8] + mean_numvars[8]
X_test_sc$KtotDose <- X_test$KtotDose * std_numvars[9] + mean_numvars[9]
Y_test_sc <- Y_test
Y_test_sc$RendVendable <- Y_test$RendVendable * std_respvars + mean_respvars
# -
df_mm_tr <- bind_cols(train_id_table, X_train_sc, Y_train_sc)
df_mm_te <- bind_cols(test_id_table, X_test_sc, Y_test_sc)
df_mm <- bind_rows(df_mm_tr, df_mm_te)
df_mm$NoEssai <- factor(df_mm$NoEssai)
df_mm$NoBloc <- factor(df_mm$NoBloc)
df_mm <- df_mm %>% select(-starts_with('X'))
keys_col <- c('NoEssai', 'NoBloc', 'NoTraitement')
num_vars <- c('DensitePlants', 'growing.season',
'temp_moy_5years', 'prec_tot_5years', 'sdi_5years', 'gdd_5years',
#'NtotDose', 'PtotDose', 'KtotDose',
'soilTextIlr1', 'soilTextIlr2', 'soilTextIlr3',
'soilTypeIlr1_3', 'soilTypeIlr2_3',
'soil_pH',
'soil_P1_Fv.AlP', 'soil_P1_Al.P', 'soil_K2_FvMgCa.K', 'soil_K2_Fv.MgCa', 'soil_K2_Mg.Ca')
#cat_vars <- 'PrecCropFiveClasses'
resp_vars <- 'RendVendable'
dose_vars <- c('NtotDose', 'PtotDose', 'KtotDose')
# ### The multilevel model
#
# The __Mitscherlich__ ordinary model approach was selected in a trivariate response scheme as mentioned in introduction. All the parameters starting values were set to zero.
start_list <- list()
for (i in 1:length(num_vars)) {
if (is.factor(df_mm[num_vars[i]][[1]])) {
start_list[[i]] <- rep(0, length(levels(df_mm[num_vars[i]][[1]]))-1)
} else {
start_list[[i]] <- 0
}
}
start_vector <- unlist(start_list)
# As presented by Dodds et al. (1996), to model the response of three variables (__N__, __P__ and __K__ treatments), we would like te have an equation which is a Mitscherlich curve (or similar to) in each dimension.
# - Asymptote `(Asym)` is the yield toward which the curve converges with increasing dose.
# - Environment `(Env)` describes the fertilizer-equivalent dose provided by environmental conditions, and
# - Rate `(Rate)` is the steepness of the curve relating Environment to Asymptote.
#
# The __Asym__ is unique while __Env__ and __Rate__ are defined for each fertilizer. Hence, we defined fertilizer-equivalent dose provided by environmental conditions for N `(Env_N)`, P `(Env_N)`, and K `(Env_N)`, and three steepnesses `(Rate_N)`, `(Rate_N)` and `(Rate_N)` respectively. These first level parameters are linear combinations of experimental conditions with random effects applied only on `Asym`. The right-hand side `(rhs)` of the formula to construct the linear combinations in the `nlme` model is given by:
rhs <- paste(num_vars, collapse = "+")
rhs
# This chain defines the non-linear mixed effects model using the training dataset `df_mm_tr`.
mm_NPK <- nlme(RendVendable ~ Asym * ((1-exp(-Rate_N*(NtotDose+Env_N)))) * ((1-exp(-Rate_P*(PtotDose+Env_P)))) * ((1-exp(-Rate_K*(KtotDose+Env_K)))),
data = df_mm_tr,
start = c(Asym = 40, start_vector,
Rate_N = 0.1, start_vector,
Env_N = 50, start_vector,
Rate_P = 0.1, start_vector,
Env_P = 50, start_vector,
Rate_K = 0.05, start_vector,
Env_K = 100, start_vector
),
fixed = list(as.formula(paste("Asym ~ ", rhs)),
as.formula(paste("Rate_N ~ ", rhs)),
as.formula(paste("Env_N ~ ", rhs)),
as.formula(paste("Rate_P ~ ", rhs)),
as.formula(paste("Env_P ~ ", rhs)),
as.formula(paste("Rate_K ~ ", rhs)),
as.formula(paste("Env_K ~ ", rhs))
),
random = Asym ~ 1 | NoEssai/NoBloc,
control = list(maxIter = 100, returnObject = TRUE,
msVerbose = FALSE, minScale = 1e-8),
method = 'REML')
# ### Model goodness of fit assessement
# We check the goodness of fit using the R-square, MAE and RSME values at level __0__ (_without random effects of blocks and trials_).
good_tr <- data.frame(Train_R2 = rsq(y = df_mm_tr$RendVendable, y_hat = predict(object = mm_NPK, level = 0)),
Train_MAE = mae(actual = df_mm_tr$RendVendable, predicted = predict(object = mm_NPK, level = 0)),
Train_RMSE = rmse(actual = df_mm_tr$RendVendable, predicted = predict(object = mm_NPK, level = 0)))
good_tr
# Goodness of fit with test set
#
# Because the function `predict()` meets a bug with new data using our custum model, the chains below uses a custom function `pred_mitsch()`.
# + active=""
# pred_mitsch(mm = mm_NPK, newdata = df_mm_te, rhs = rhs)$pred
# -
good_te <- data.frame(Test_R2 = rsq(y = df_mm_te$RendVendable,
y_hat = pred_mitsch(mm = mm_NPK, newdata = df_mm_te, rhs = rhs)$pred),
Test_MAE = mae(actual = df_mm_te$RendVendable,
predicted = pred_mitsch(mm = mm_NPK, newdata = df_mm_te, rhs = rhs)$pred),
Test_RMSE = rmse(actual = df_mm_te$RendVendable,
predicted = pred_mitsch(mm = mm_NPK, newdata = df_mm_te, rhs = rhs)$pred))
good_te
mitsch_train_test_scores <- bind_cols(good_tr, good_te)
#colnames(mitsch_train_test_scores) <- gsub("_"," ", colnames(mitsch_train_test_scores))
rownames(mitsch_train_test_scores) = "Mitscherlich"
write_csv(mitsch_train_test_scores, 'output/mitsch_train_test_scores.csv')
mitsch_train_test_scores
# ### Point estimation: prediction of economic optimal __NPK__ dosage
#
# #### Create a table to model
#
# The same random row used in the __`1.4.1_ml-marketable-yield-model.ipynb`__ notebook is repoted here as the example where the model is used for diagnosis. The row is referenced by its `NoEssai-NoBloc-NoTraitement` identifyer.
r_sample <- read.csv('output/r_sample.csv')
reference_row <- df_mm %>%
filter(ID == r_sample$ID)
# Instead of generating a grid of values (_which is numerically inefficient_), I generate random NPK doses from _uniform distributions_ on plausible doses. The random uniform `runif()` function gives a random floating point number in a given range.
n_grid_samples <- 1000 # number of samples
N_range <- c(0, 250)
P_range <- c(0, 250)
K_range <- c(0, 250)
# We grab the _reference_row_ with only the columns we need for modeling. The last part to create the table is to stack the (_selected_) observation a number of times equal to the __dose_grid__ table length, so that it conveys the same information at each row. Then replace only __NPK__ doses with the one sampled randomly: the __dose_grid__.
reference_stack <- data.frame(matrix(nrow = n_grid_samples, ncol = ncol(reference_row)))
colnames(reference_stack) <- colnames(reference_row)
for (i in 1:ncol(reference_stack)) {
reference_stack[, i] = reference_row[1, i]
}
set.seed(936492)
reference_stack$NtotDose <- runif(n_grid_samples, N_range[1], N_range[2])
reference_stack$PtotDose <- runif(n_grid_samples, P_range[1], P_range[2])
reference_stack$KtotDose <- runif(n_grid_samples, K_range[1], K_range[2])
# The test dataset prediction
# The optimal economical dose is where the marginal benefit reaches its maximum. Rates are estimated based on experience.
N_cost = 1.2 # unit fertilizer N cost
P_cost = 1.1 # unit fertilizer P2O5 cost
K_cost = 0.9 # unit fertilizer K2O cost
price_yield = 250 # unit crop yield price (1 Mg)
# I have fertilizer doses, so I can compute the costs. I can also compute revenue from yield, and marginal benefits by subtracting marginal costs from revenue (__profit from yield - fertilizers cost__).
reference_stack <- reference_stack %>%
mutate(Prediction = pred_mitsch(mm = mm_NPK, newdata = ., rhs = rhs)$pred,
cost = NtotDose * N_cost + PtotDose * P_cost + KtotDose * K_cost,
revenue = Prediction * price_yield,
benefit = revenue - cost)
# + active=""
# glimpse(reference_stack)
# -
# I isolate the row where the marginal benefit is maximum, and its informations.
predMitsch <- reference_stack[which.max(reference_stack$benefit), c(dose_vars, 'Prediction', 'benefit')] %>%
select(-benefit) %>% rename(Output = Prediction) %>% mutate(Target = "Yield")
predMitsch$PtotDose <- predMitsch$PtotDose*0.436
predMitsch$KtotDose <- predMitsch$KtotDose*0.830
write_csv(predMitsch, 'output/predMitsch.csv')
predMitsch
# Plot for the selected trial type (_NB : les 3 elements ont รฉtรฉ variรฉs ร la fois_)
options(repr.plot.wight = 4, repr.plot.height = 4)
df_mm %>%
filter(NoEssai == r_sample$NoEssai) %>%
ggplot(aes(x = PtotDose, y = RendVendable)) +
geom_point() +
geom_line(aes(x = PtotDose, y = Prediction), data = reference_stack) # a P trial
# ### Prediction specific to trial type
# Reference trial where only __N__ dose varied: `(test_type = N)`
test_types <- read.csv('output/test_types.csv')
reference_row <- df_mm %>%
filter(ID == test_types$ID[1])
test_types
n_grid_samples <- 251 # number of samples
N_range <- c(0, 250)
P_range <- c(reference_row$PtotDose, reference_row$PtotDose)
K_range <- c(reference_row$KtotDose, reference_row$KtotDose)
reference_stack <- data.frame(matrix(nrow = n_grid_samples, ncol = ncol(reference_row)))
colnames(reference_stack) <- colnames(reference_row)
for (i in 1:ncol(reference_stack)) {
reference_stack[, i] = reference_row[1, i]
}
set.seed(936492)
reference_stack$NtotDose <- runif(n_grid_samples, N_range[1], N_range[2])
reference_stack$PtotDose <- runif(n_grid_samples, P_range[1], P_range[2])
reference_stack$KtotDose <- runif(n_grid_samples, K_range[1], K_range[2])
# The prediction
reference_stack <- reference_stack %>%
mutate(Prediction = pred_mitsch(mm = mm_NPK, newdata = ., rhs = rhs)$pred,
cost = NtotDose * N_cost + PtotDose * P_cost + KtotDose * K_cost,
revenue = Prediction * price_yield,
benefit = revenue - cost)
# The optimum
reference_stack[which.max(reference_stack$benefit), c(dose_vars, 'Prediction', 'benefit')]
# Save predictions for N trial
write_csv(reference_stack[, c(dose_vars, 'Prediction', 'benefit')], 'output/yield_mitsch_N.csv')
# Plot for the N trial
options(repr.plot.wigth = 4, repr.plot.height = 4)
df_mm %>%
filter(NoEssai == reference_row$NoEssai) %>%
ggplot(aes(x = NtotDose, y = RendVendable)) +
geom_point() +
geom_line(aes(x = NtotDose, y = Prediction), data = reference_stack)
# Reference trial where only __P__ dose varied: `(test_type = P)`
reference_row <- df_mm %>%
filter(ID == test_types$ID[2])
n_grid_samples <- 251 # number of samples
N_range <- c(reference_row$NtotDose, reference_row$NtotDose)
P_range <- c(0, 250)
K_range <- c(reference_row$KtotDose, reference_row$KtotDose)
reference_stack <- data.frame(matrix(nrow = n_grid_samples, ncol = ncol(reference_row)))
colnames(reference_stack) <- colnames(reference_row)
for (i in 1:ncol(reference_stack)) {
reference_stack[, i] = reference_row[1, i]
}
set.seed(936492)
reference_stack$NtotDose <- runif(n_grid_samples, N_range[1], N_range[2])
reference_stack$PtotDose <- runif(n_grid_samples, P_range[1], P_range[2])
reference_stack$KtotDose <- runif(n_grid_samples, K_range[1], K_range[2])
# The prediction
reference_stack <- reference_stack %>%
mutate(Prediction = pred_mitsch(mm = mm_NPK, newdata = ., rhs = rhs)$pred,
cost = NtotDose * N_cost + PtotDose * P_cost + KtotDose * K_cost,
revenue = Prediction * price_yield,
benefit = revenue - cost,
P = PtotDose*0.436)
# The optimum
reference_stack[which.max(reference_stack$benefit), c(dose_vars, 'Prediction', 'benefit', 'P')]
# Save predictions for P trial
write_csv(reference_stack[, c(dose_vars, 'Prediction', 'benefit', 'P')], 'output/yield_mitsch_P.csv')
# Plot for the P trial
df_mm %>%
filter(NoEssai == reference_row$NoEssai) %>%
ggplot(aes(x = PtotDose, y = RendVendable)) +
geom_point() +
geom_line(aes(x = PtotDose, y = Prediction), data = reference_stack)
# Reference trial where only __K__ dose varied: `(test_type = K)`
reference_row <- df_mm %>%
filter(ID == test_types$ID[3])
n_grid_samples <- 251 # number of samples
N_range <- c(reference_row$NtotDose, reference_row$NtotDose)
P_range <- c(reference_row$PtotDose, reference_row$PtotDose)
K_range <- c(0, 250)
reference_stack <- data.frame(matrix(nrow = n_grid_samples, ncol = ncol(reference_row)))
colnames(reference_stack) <- colnames(reference_row)
for (i in 1:ncol(reference_stack)) {
reference_stack[, i] = reference_row[1, i]
}
set.seed(936492)
reference_stack$NtotDose <- runif(n_grid_samples, N_range[1], N_range[2])
reference_stack$PtotDose <- runif(n_grid_samples, P_range[1], P_range[2])
reference_stack$KtotDose <- runif(n_grid_samples, K_range[1], K_range[2])
# The prediction
reference_stack <- reference_stack %>%
mutate(Prediction = pred_mitsch(mm = mm_NPK, newdata = ., rhs = rhs)$pred,
cost = NtotDose * N_cost + PtotDose * P_cost + KtotDose * K_cost,
revenue = Prediction * price_yield,
benefit = revenue - cost,
K = KtotDose*0.830)
# The optimum
reference_stack[which.max(reference_stack$benefit), c(dose_vars, 'Prediction', 'benefit', 'K')]
# Save predictions for K trial
write_csv(reference_stack[, c(dose_vars, 'Prediction', 'benefit', 'K')], 'output/yield_mitsch_K.csv')
# Plot for the K trial
df_mm %>%
filter(NoEssai == reference_row$NoEssai) %>%
ggplot(aes(x = KtotDose, y = RendVendable)) +
geom_point() +
geom_line(aes(x = KtotDose, y = Prediction), data = reference_stack)
# + active=""
#
| 2.5_NPK_multilevel_model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <h1>WORDS, WORDS, WORDS...</h1>
#
# <h2>The Beatles Songs Analysis Project</h2>
#
# Here I conduct the comparison of general statistical information about word numbers in titles and lyrics, taking into consideration such parameters as release year and originality.
# +
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
# this allows plots to appear directly in the notebook
# %matplotlib inline
# load data
df = pd.read_json('data.json')
# configure graphics
plt.style.use('ggplot')
plt.figure()
color = dict(boxes='DarkGreen', whiskers='DarkOrange', medians='DarkBlue', caps='Gray')
# -
# Here I am going to analyze two songs features: number of words in titles and number of words in lyrics. Please note that this data have not been adjusted in any way: all articles and prepositions are present, and <a href="https://en.wikipedia.org/wiki/Wikipedia:List_of_English_contractions">contractions</a> are counted as one word. Some songs contain words in brackets in lyrics, these are counted too. The numbers were already counted after downloading the data (see the <a href="https://github.com/dpol2000/tbsa/blob/master/get_data.py">script</a>) and are present in the file.
#
# First, let's analyze the number of words in songs titles in the same fashion as we have done with songs lengths in <a href="http://www.hrutr.net/entries/tbsa-step-1-second-by-second-length-above-all">Step 1</a>. In contrast to length, this data has a very short range. A plotbox goes first.
df['num_words_in_title'].plot.box(color=color, sym='r+').set_ylabel('Number of words')
# The median number is three. Now let's use a histogram to see the frequency of each number, and look at the common statistic information.
# histogram
plt.title('Frequency of number of words in titles')
df['num_words_in_title'].hist(figsize=(12, 5)).set_ylabel('Number of songs')
# But absolute number of songs in each category doesn't say much; let's count relative figures.
# number of words in titles in proportion to number of songs
df_rlt = df['num_words_in_title'].value_counts() / df['num_words_in_title'].describe()['count'] * 100
df_rlt.plot(kind="bar", figsize=(12, 5), rot=0,
title="Frequency of number of words in titles").set_ylabel('Number of songs, %')
# So we can see that the songs with the most common title length comprise about one third of all songs. The songs with 2, 3, and 4 words in their titles comprise in common more than two thirds:
# percentage of the songs with 2,3, or 4 words in their titles
df_rlt.values[0] + df_rlt.values[1] + df_rlt.values[2]
# There are several songs with the minimal title length (one word, of course), but which song has the maximum number (it is 10)? And from which album?
# the song with a title of maximum length
df[df['num_words_in_title']==df['num_words_in_title'].describe()['max']].title.iat[0]
# the song with maximum title length's album name
df[df['num_words_in_title']==df['num_words_in_title'].describe()['max']].album_title.iat[0]
# What can be seen from the time series?
df_sorted = df.sort_values(by='year')
df_sorted['num_words_in_title'].plot(kind='bar', figsize=(12, 5),
title="Number of words in titles, changes with time").xaxis.set_visible(False)
# We see that the number of words follow a similar pattern in the first half; there are titles of different length every year, but the longest doesn't exceed 7. And then, in the second half, titles with 8 and 10 words appear (titles with one word title appear in the both halves). So the variance is clearly higher in the second half. Let's look at the means, medians and variance by year.
df_by_year = df_sorted.groupby('year')
fig, axs = plt.subplots(1, 3, sharey=True)
axs[0].set_title('Mean by year')
axs[1].set_title('Median by year')
axs[2].set_title('Standard deviation by year')
df_by_year['num_words_in_title'].aggregate(np.mean).plot(kind='bar', ax=axs[0], figsize=(16, 8))
df_by_year['num_words_in_title'].aggregate(np.median).plot(kind='bar', ax=axs[1])
df_by_year['num_words_in_title'].aggregate(np.std).plot(kind='bar', ax=axs[2])
# We can see that mean and median are almost the same, with the peak in 1967. Overall the mean title length is three words, ascending to four in 1967 (with median falling to two in 1968).
#
# No surprise that the variance is higher in the second half. It has its peak in 1968. But the lowest point also lies in the second half, in 1970. So it's generally rises, reaches its peak and then falls down. Let's check if the second half is really less uniform!
df_63_66 = df[df['year'].isin(range(1963, 1967))]
df_67_70 = df[df['year'].isin(range(1967, 1971))]
df_63_66['num_words_in_title'].std()
df_67_70['num_words_in_title'].std()
# So, the second half is really less uniform in the number of words in titles as the variance of the second half is higher.
#
# Now let's examine the number of words in lyrics.
df_sorted['num_words_in_lyrics'].plot(kind='bar', figsize=(12, 5),
title="Number of words in lyrics, changes with time").xaxis.set_visible(False)
# It seems that the second half again has higher variance.
df_63_66['num_words_in_lyrics'].std()
df_67_70['num_words_in_lyrics'].std()
# So the impression from the picture was right. Let's look at general statistical parameters and plots now.
df_sorted['num_words_in_lyrics'].describe()
# So the mean number of words in lyrics is 182. Let's plot mean, median and standard deviation by year.
fig, axs = plt.subplots(1, 3, sharey=True)
axs[0].set_title('Means by year')
axs[1].set_title('Medians by year')
axs[2].set_title('Standard deviations by year')
df_by_year['num_words_in_lyrics'].aggregate(np.mean).plot(kind='bar', figsize=(16, 8), ax=axs[0])
df_by_year['num_words_in_lyrics'].aggregate(np.median).plot(kind='bar', ax=axs[1])
df_by_year['num_words_in_lyrics'].aggregate(np.std).plot(kind='bar', ax=axs[2])
# Again variance is higher in the second half, but this time its peak is in 1969.
#
# We see that the average number of words is more or less the same, with significant rising in 1967 (again).
#
# Now let's find the song with minimal words number.
# the song with minimal words number
df[df['num_words_in_lyrics']==df['num_words_in_lyrics'].min()].title.iat[0]
# And the album?
# the album of the song with minimal words number
df[df['num_words_in_lyrics']==df['num_words_in_lyrics'].min()].album_title.iat[0]
# The White Album again! What about the biggest number of words?
# # the song with maximal words number
df[df['num_words_in_lyrics']==df['num_words_in_lyrics'].max()].title.iat[0]
# This song is not from the White Album, it's a single, but is from the same period.
#
# So, the song with the shortest number of words is <i>Wild Honey Pie</i> (21 words), the longest is <i><NAME></i> with 392.
#
# Let's look at cover versions now, both number of title words and number of lyrics words.
# +
df_cover = df[df.cover==True]
df_orig = df[df.cover==False]
fig, axs = plt.subplots(1, 3, sharey=True)
axs[0].set_title('All songs')
axs[1].set_title('Original songs')
axs[2].set_title('Cover songs')
df['num_words_in_title'].plot.box(color=color, sym='r+', ax=axs[0], figsize=(16, 8))
df_orig['num_words_in_title'].plot.box(color=color, sym='r+', ax=axs[1])
df_cover['num_words_in_title'].plot.box(color=color, sym='r+', ax=axs[2])
# -
# We see that covers don't differ from other songs in average title length (it's 3). The only evident difference is that they don't have any outliers and therefore are more uniform.
#
# What about words number in lyrics?
# mean number of words in lyrics for covers
df_cover['num_words_in_lyrics'].mean()
# mean number of words in lyrics for originals
df_orig['num_words_in_lyrics'].mean()
# median number of words in lyrics for covers
df_cover['num_words_in_lyrics'].median()
# median number of words in lyrics for originals
df_orig['num_words_in_lyrics'].median()
# We see that both means and medians in covers and originals are rather close. Let's look at the plots!
fig, axs = plt.subplots(1, 3, sharey=True)
axs[0].set_title('All songs')
axs[1].set_title('Original songs')
axs[2].set_title('Cover songs')
df['num_words_in_lyrics'].plot.box(color=color, sym='r+', ax=axs[0], figsize=(16, 8))
df_orig['num_words_in_lyrics'].plot.box(color=color, sym='r+', ax=axs[1])
df_cover['num_words_in_lyrics'].plot.box(color=color, sym='r+', ax=axs[2])
# The covers have significantly less outliers. But what about the variance?
# standard deviation of number of words in lyrics for covers
df_cover['num_words_in_lyrics'].std()
# standard deviation of number of words in lyrics for originals
df_orig['num_words_in_lyrics'].std()
# We see that the covers' variance is actually higher!
#
# Now let's find the outliers in covers that exist, and to which albums they belong.
# cover song with minimal number of words in lyrics' title
df_cover[df_cover.num_words_in_lyrics==df_cover.num_words_in_lyrics.min()].title.iat[0]
# cover song with minimal number of words in lyrics' album
df_cover[df_cover.num_words_in_lyrics==df_cover.num_words_in_lyrics.min()].album_title.iat[0]
# cover song with maximal number of words in lyrics' title
df_cover[df_cover.num_words_in_lyrics==df_cover.num_words_in_lyrics.max()].title.iat[0]
# cover song with maximal number of words in lyrics' title
df_cover[df_cover.num_words_in_lyrics==df_cover.num_words_in_lyrics.max()].album_title.iat[0]
# So, the both outliers are on the same album!
#
# Finally, let's plot the data in the coordinates of number of words in lyrics against length in seconds, and assign different color to covers and non-covers.
ax = df[df.cover==True].plot.scatter(x='num_words_in_lyrics', y='length', figsize=(16, 8),
color='Blue', label='Covers')
df[df.cover==False].plot.scatter(x='num_words_in_lyrics', y='length',
color='Green', label='Originals', ax=ax)
# We see that there is more or less linear dependency. It seems natural; the more words in the song, the more time you need to sing them. Let's find the Pearson's coefficient.
# the Pearson's coefficient for all songs
np.corrcoef(df['num_words_in_lyrics'], df['length'])[0][1]
# the Pearson's coefficient for originals
np.corrcoef(df_orig['num_words_in_lyrics'], df_orig['length'])[0][1]
# the Pearson's coefficient for covers
np.corrcoef(df_cover['num_words_in_lyrics'], df_cover['length'])[0][1]
# So the Pearson's coefficient is 0.46, and it's not that low. But it's not that high either, so the true dependency is somewhat more complicated than just linear one.
#
# For originals alone it's almost the same (0.48), but for covers it's even lower! It's not a surprise, though. Just take a look at the picture: covers are rather uniform in length. Even those with longer lyrics have almost the same length as ones with short lyrics.
# Also it's evident that the covers in general actually do not differ much from the original compositions. At least, they are not linearly separable.
#
# Finally, let's find the most typical Beatles song(s) in terms of all three features, as we did in the pervious step with length only.
median_by_length = df[df.length==df.length.median()].title
median_by_title = df[df.num_words_in_title==df.num_words_in_title.median()].title
median_by_lyrics = df[df.num_words_in_lyrics==df.num_words_in_lyrics.median()].title
# indices of songs combining median length, median number of words in title and median number of words in lyrics
median_by_length.index & median_by_title.index & median_by_lyrics.index
# It's a pity, but there is no song which would combine all three medians. Still, there are songs that combine two of them:
median_length_title = median_by_length.index & median_by_title.index
median_lyrics_title = median_by_lyrics.index & median_by_title.index
median_lyrics_length = median_by_lyrics.index & median_by_length.index
# titles of songs combining median length and number of words in title
df.loc[median_length_title].title
# titles of songs with median number of words in title and number of words in lyrics
df.loc[median_lyrics_title].title
# titles of songs with median length and number of words in lyrics
df.loc[median_lyrics_length].title
# So, there are two songs combining median length and number of words in title: <i>For You Blue</i> and <i>Hold Me Tight</i>. There are no songs with median number of words in title and number of words in lyrics, and no songs combining median length and number of words in lyrics.
# <h2>Conclusion</h2>
#
# The average song title consists of three words. Such songs comprise about one third of all songs. The minimum lies at 1 and the maximum at 10 (<i>Everybody's Got Something To Hide Except Me And My Monkey</i> from the <b>White Album</b>, 1968). The number of words in titles is more diverse in the second period (1967-1970). The covers have generally the same title lengths as the original songs, but they don't have outliers.
#
# The average song contains 182 words. The number of words in lyrics is again more diverse in the second period. The longest songs in average are in 1967, and the year with the highest variance of number of words is 1969. Still, the shortest (<i>Wild Honey Pie</i>, 21 words) song was released in 1968 and the longest (<i>Hey Jude</i>, 392 words) song in 1967. The covers don't differ much from the original songs, they have lower range but higher variance.
#
# There are no songs combining median length, median number of words in title and median number of words in lyrics. Still, there are two songs combining median length and number of words in title: <i>Hold Me Tight</i> (1963) and <i>For You Blue</i> (1970).
| TBSA - Step 2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# #%matplotlib inline
import numpy as np
from scipy.misc import imread
import pickle
import matplotlib.pyplot as plt
from scipy.misc import imread, imresize
import tensorflow as tf
from keras.preprocessing import image
from keras.backend.tensorflow_backend import set_session
from ssd import SSD300
from keras.applications.imagenet_utils import preprocess_input
from ssd_utils import BBoxUtility
import matplotlib.pyplot as plt
from SSD_tester import calc_detection_prec_rec, calc_detection_ap, calc_detection_ap_recall
from object_detection.utils.object_detection_evaluation import ObjectDetectionEvaluator
from object_detection.core import standard_fields
import time
import sys
a = time.time()
config = tf.ConfigProto(
gpu_options=tf.GPUOptions(
visible_device_list='1',
allow_growth=True
)
)
sess = sess = tf.Session(config=config)
# -
NYU_CLASSES = ['bathtub', 'bed', 'bookshelf', 'box', 'chair', 'counter', 'desk', 'door', 'dresser',
'garbage_bin', 'lamp', 'monitor', 'night_stand', 'pillow', 'sink', 'sofa', 'table', 'tv', 'toilet']
NUM_CLASSES = len(NYU_CLASSES) + 1
input_shape = (300, 300, 3) #channel lastde
depth_input_shape = (300, 300, 1)
ver = 7
np.random.seed(7)
model = SSD300(input_shape, num_classes=NUM_CLASSES)
model.load_weights('/media/hdd2/jun/checkpoints/bmvc/RGB/v{:d}/weights.best.hdf5'.format(ver), by_name=True)
# +
rgb_gt = pickle.load(open('../pkls/RGB.pkl', 'rb'), encoding='latin1')
rgb_keys = sorted(rgb_gt.keys())
rgb_keys = np.array(rgb_keys)
perm = np.random.permutation(len(rgb_keys))
split = np.split(perm, 10)
test_perm = split.pop(ver)
ind = []
for ary in split:
ind += list(ary)
num_train = int(len(ind) * 0.96)
rgb_train_keys = rgb_keys[ind[:num_train]]
rgb_val_keys = rgb_keys[ind[num_train:]]
rgb_test_keys = rgb_keys[test_perm]
num_train = len(rgb_train_keys)
num_val = len(rgb_val_keys)
num_test = len(rgb_test_keys)
# +
path_prefix = '/media/hdd2/jun/dataset/'
rgb_inputs = []
depth_inputs = []
images = []
# img_path = path_prefix + sorted(val_keys )[100]
# img = image.load_img(img_path, target_size=(300, 300))
# img = image.img_to_array(img)
# images.append(imread(img_path))
# inputs.append(img.copy())
# inputs = preprocess_input(np.array(inputs))
for rgb_key in rgb_test_keys:
rgb_img_path = path_prefix + rgb_key
rgb_img = image.load_img(rgb_img_path, target_size=(300, 300))
img = imread(rgb_img_path)
images.append(img)
rgb_img = image.img_to_array(rgb_img)
rgb_inputs.append(rgb_img.copy())
inputs = preprocess_input(np.array(rgb_inputs))
# +
priors = pickle.load(open('../pkls/prior_boxes_ssd300.pkl', 'rb'))
bbox_util = BBoxUtility(NUM_CLASSES, priors)
now = time.time()
preds = model.predict(inputs, batch_size=1, verbose=1)
finish = time.time()
duration = finish - now
print(duration)
print(966/duration)
# -
results = bbox_util.detection_out(preds, confidence_threshold=0.5)
gt_bboxes = []
gt_labels = []
gt_scores = []
for key in rgb_test_keys:
index = np.where(rgb_gt[key][:, 4:] == 1)
gt_bboxes.append(rgb_gt[key][:, :4])
gt_labels.append((index[1]).reshape(len(index[1]), 1))
gt_scores.append(np.ones((len(index[1]), 1)))
gt_bboxes = np.array(gt_bboxes)
gt_labels = np.array(gt_labels)
gt_scores = np.array(gt_scores)
# +
pred_labels = []
pred_scores = []
pred_bboxes = []
for result in results:
if len(result) != 0:
nm = len(result[:, 1])
#pred_labels.append((result[:, 0]-1).reshape(nm, 1))
pred_labels.append((result[:, 0]-1).reshape(nm, 1))
pred_scores.append(result[:, 1:2].reshape(nm, 1))
pred_bboxes.append(result[:, 2:].reshape(nm, 4))
else:
pred_labels.append(np.array([]).reshape(0, 1))
pred_scores.append(np.array([]).reshape(0, 1))
pred_bboxes.append(np.array([]).reshape(0, 1))
pred_labels = np.array(pred_labels)
pred_scores = np.array(pred_scores)
pred_bboxes = np.array(pred_bboxes)
# -
prec, rec = calc_detection_prec_rec(pred_labels, pred_scores, pred_bboxes, gt_bboxes, gt_labels, iou_thresh=0.5)
ap = calc_detection_ap(prec, rec, use_07_metric=False)
{'ap': ap, 'map': np.nanmean(ap)}
CLASSES = [{'id': 1, 'name': 'bathtub'}, {'id': 2, 'name': 'bed'}, {'id': 3, 'name': 'bookshelf'},
{'id': 4, 'name': 'box'}, {'id': 5, 'name': 'chair'}, {'id': 6, 'name': 'counter'},
{'id': 7, 'name': 'desk'}, {'id': 8, 'name': 'door'}, {'id': 9, 'name': 'dresser'},
{'id': 10, 'name': 'garbage_bin'}, {'id': 11, 'name': 'lamp'}, {'id': 12, 'name': 'monitor'},
{'id': 13, 'name': 'night_stand'}, {'id': 14, 'name': 'pillow'}, {'id': 15, 'name': 'sink'},
{'id': 16, 'name': 'sofa'}, {'id': 17, 'name': 'table'},
{'id': 18, 'name': 'tv'}, {'id': 19, 'name': 'toilet'}]
evaluator = ObjectDetectionEvaluator(CLASSES)
gt_bboxes = []
gt_labels = []
gt_scores = []
for key in rgb_test_keys:
index = np.where(rgb_gt[key][:, 4:] == 1)
gt_bboxes.append(rgb_gt[key][:, :4])
gt_labels.append((index[1].tolist()))
gt_scores.append(np.ones((len(index[1]), 1)))
gt_bboxes = np.array(gt_bboxes)
gt_labels = np.array(gt_labels)
gt_scores = np.array(gt_scores)
N = len(gt_labels)
for i in range(N):
if len(pred_bboxes[i]) != 0:
gt_dict = {standard_fields.InputDataFields.groundtruth_boxes: gt_bboxes[i],
standard_fields.InputDataFields.groundtruth_classes: np.array(gt_labels[i])
}
detected_dict = {
standard_fields.DetectionResultFields.detection_boxes: pred_bboxes[i],
standard_fields.DetectionResultFields.detection_classes: pred_labels[i].flatten(),
standard_fields.DetectionResultFields.detection_scores: pred_scores[i].flatten()
}
evaluator.add_single_ground_truth_image_info(i, gt_dict)
evaluator.add_single_detected_image_info(i, detected_dict)
metrics_dict = evaluator.evaluate()
pred_labels
| src/.ipynb_checkpoints/ssd_evaluation-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.10.0 64-bit (''.venv'': poetry)'
# name: python3
# ---
# # Type Hinting
# +
from typing import (
Any,
Callable,
Iterable,
Iterator,
ParamSpec,
TypeAlias,
TypeGuard,
TypeVar,
Union,
)
from contextlib import contextmanager
# -
# ## PEP 604
# `Union` now can be replaced with `"|"`.
# +
uid: Union[int, str] = 1
uid: int | str = 2
my_id: int | None = 2
print(isinstance(uid, str | int))
# -
# ## PEP 612
# `ParamSpec` is added to provide type hints for the decorated functions which changes function parameters.
# +
@contextmanager
def cm(value: int) -> Iterator[int]:
yield value
P = ParamSpec("P")
T = TypeVar("T")
def return_list(fn: Callable[P, Iterable[T]]) -> Callable[P, list[T]]:
def _new_fn(*args: P.args, **kwargs: P.kwargs):
return list(fn(*args, **kwargs))
return _new_fn
@return_list
def generate_integers(limit: int) -> Iterator[int]:
for i in range(limit):
yield i
integer_to_5: list[int] = generate_integers(5)
print(integer_to_5)
# -
# ## PEP 613: Explicit type aliases
# +
Nodes = set[int]
ClientStr = "DBClient" # here type of `Client` is str
ClientAlias: TypeAlias = "DBClient"
class DBClient:
pass
client: ClientAlias = DBClient()
# -
# ## PEP 647: Type guards
# Function that cause narrowing of types
# +
def example(a: None | str | int):
if isinstance(a, str):
print(a) # Type Narrowing
def is_string_list_no_guard(strings: list[Any]) -> bool:
return all(isinstance(string, str) for string in strings)
def is_string_list(strings: list[Any]) -> TypeGuard[list[str]]:
"""Now type checkers know if this evaluates to True
`strings` is a `list[str]`
"""
return all(isinstance(string, str) for string in strings)
a = ["", "", 1, 2]
if is_string_list_no_guard(a):
print(a) # Here the type is list[unknown]
if is_string_list(a):
print(a) # Whereas here a is list[str]
| typehints.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import scipy.signal as signal
import scipy.fftpack as fftpack
import random
import pandas as pd
import matplotlib.pyplot as plt
import time
import sys
sys.path.append("../..")
from mfilter.implementations.simulate import SimulateSignal
from mfilter.regressions import *
from mfilter.types import FrequencySamples, TimeSeries, FrequencySeries, TimesSamples
from mfilter.filter import *
from microlensing.lib.microlensing import *
# %matplotlib inline
plt.style.use('seaborn')
# -
# # Match Filter with Regressor and NFFT
# #### Comparing result by doing a linear regressor and a NFFT iterative solver for compute Inverse Fourier Transform
#
# +
# generating time samples
def time_irreg_samples(n=100, basic_dt=1, struct="slight"):
return TimesSamples(n=n, delta=basic_dt, struct=struct)
# creating templates
def mlens_temp(times, pars, idx=0, t0=None):
if t0 is None:
t0 = np.random.random() * times.duration*0.9 + times.min() + times.duration * 0.05
ml = microlens(tE=pars['tE'][idx], U0=pars['U0'][idx], fs=pars['fs'][idx], t0=t0)
return TimeSeries(ml.eval(times), times=times), t0
# generate parameter generator object and sample parameters
parfile = "test.txt" # here put your favourite file with columns tE, U0 and fs, in data directory
pargen = microlens_pars(parsfile = "../../microlensing/data/%s" % parfile)
nsample = 100
pars = pargen.sample(nsample)
# +
# using the chi statistic
def power_chisq(htilde, stilde, num_bins, times, psd=None, method="regression", **kwargs):
bins = power_chisq_bins(htilde, num_bins, psd=psd, method=method, **kwargs)
snr, corr, norm = matched_filter_core(htilde, stilde, psd=psd, times=times, method=method, **kwargs)
return power_chisq_from_precomputed(corr, snr, norm, bins, times, method=method, **kwargs), len(bins)
def power_chisq_bins(htilde, num_bins, psd=None, method="regression", **kwargs):
sigma_vec = sigmasq_series(htilde, psd=psd)
return power_chisq_bins_from_sigmasq_series(sigma_vec, num_bins)
def sigmasq_series(htilde, psd=None):
autocorr = htilde.conj() * htilde
if psd is not None:
autocorr /= psd
return autocorr.cumsum()
def power_chisq_bins_from_sigmasq_series(sigma_vec, num_bins):
sigmasq = sigma_vec[len(sigma_vec)-2]
edge_vec = np.arange(0, num_bins) * sigmasq / num_bins
bins = np.searchsorted(sigma_vec, edge_vec, side='right')
bins = np.append(bins, len(sigma_vec) - 1)
bins = np.unique(bins)
# if len(bins) != num_bins + 1:
# print("using {} bins instead of {}".format(len(bins), num_bins))
return bins
def power_chisq_from_precomputed(corr, snr, norm, bins, times, method="regression", **kwargs):
qtilde = FrequencySeries(np.zeros(len(corr)), frequency_grid=corr.frequency_object, dtype=corr.dtype,
epoch=corr.epoch)
chisq = TimeSeries(np.zeros(len(snr)), times=snr.times, dtype=snr.dtype, epoch=snr.epoch)
num_bins = len(bins) - 1
for j in range(num_bins):
k_min = int(bins[j])
k_max = int(bins[j+1])
qtilde[k_min:k_max] = corr[k_min:k_max]
q = qtilde.to_timeseries(method=method, times=times, **kwargs)
qtilde.fill(0)
chisq += q.squared_norm()
chisq = (chisq * num_bins - snr.squared_norm()) * (norm ** 2)
chisq = TimeSeries(chisq, times=snr.times, epoch=snr.epoch)
return chisq
def weighted_snr(snr, chisq):
for i in range(len(chisq)):
if chisq[i] > 1:
snr[i] /= ((1 + chisq[i]**(3))/2.0)**(1.0/6)
return snr
# +
n = 100
delta_t = 0.8
times = time_irreg_samples(n=n, basic_dt=delta_t, struct="slight")
times -= min(times)
def one_run(times, pars, nsample, noise_level=1, idx=None, t0=None,
chi=False, band=False, window=False, psd=False):
if idx is None:
idx = np.random.randint(1, nsample)
data, t0 = mlens_temp(times, pars, idx=idx, t0=t0)
while (any(np.isnan(d) for d in data)):
idx = np.random.randint(1, nsample)
data, t0 = mlens_temp(times, pars, idx=idx, t0=t0)
plt.plot(times, data, '--')
data += np.random.normal(0, noise_level, n)
temp, _ = mlens_temp(times, pars, idx=idx, t0=times[n//2])
plt.plot(times, data)
plt.plot(times, temp)
if band:
cutoff = 0.2
bb, ab = signal.butter(3, cutoff*2 * len(times)/times.duration, btype='lowpass', analog=False)
data = TimeSeries(signal.filtfilt(bb, ab, data), times=times)
temp = TimeSeries(signal.filtfilt(bb, ab, temp), times=times)
if window:
wind = signal.windows.hann(len(times))
wind = signal.windows.tukey(len(times), alpha=1/8)
data *= wind
temp *= wind
# create regresssors
nyq = 1 / (2 * delta_t)
samples_per_peak = 5
df = 1 / times.duration / samples_per_peak
freqs = FrequencySamples(input_time=times,
minimum_frequency=samples_per_peak * df,
maximum_frequency=nyq + samples_per_peak * df,
samples_per_peak=samples_per_peak)
F = Dictionary(times, freqs)
reg = RidgeRegression(alpha=10**(-6), phi=F)
reg = SGDRegression(alpha=10**(-3), max_iter=10, phi=F)
# compute transforms with regressors
t_i = time.time()
stilde_reg = data.to_frequencyseries(method="regression", reg=reg)
htilde_reg = temp.to_frequencyseries(method="regression", reg=reg)
t_f = time.time()
print("time for reg", (t_f-t_i))
#compute transforms with NFFT
t_i = time.time()
stilde_nfft = data.to_frequencyseries(method="nfft")
htilde_nfft = temp.to_frequencyseries(method="nfft")
t_f = time.time()
print("time for nfft", (t_f-t_i))
#lets use psd related to same noise distribution
# psd_reg = freqs.lomb_scargle(times, np.random.normal(0, 0.1, n), norm="standard")
# do MF for regressors
t_i = time.time()
snr_reg = matched_filter(htilde_reg, stilde_reg, psd=psd, method="regression", reg=reg,
times=times, unitary_energy=True)
t_f = time.time()
print("time for reg is", (t_f-t_i))
# do MF for NFFT
t_i = time.time()
snr_nfft = matched_filter(htilde_nfft, stilde_nfft, psd=psd, method="nfft",
times=times, unitary_energy=True)
t_f = time.time()
print("time for nfft is", (t_f-t_i))
if chi:
divs = 40
chsq_reg, n_bins = power_chisq(htilde_reg, stilde_reg, divs, times, reg=reg)
chsq_reg /= (n_bins * 2) - 2
chsq_nfft, n_bins = power_chisq(htilde_reg, stilde_reg, divs, times, method="nfft")
chsq_nfft /= (n_bins * 2) - 2
snr_reg = weighted_snr(snr_reg, chsq_reg)
snr_nfft = weighted_snr(snr_nfft, chsq_nfft)
return data, temp, snr_reg, snr_nfft, t0, idx
def compare_match(times, pars, nsample, idx=15, window=False, chi=False):
data, temp, snr_reg, snr_nfft, true_t0, idx = one_run(times, pars, nsample, idx=idx,
window=window, chi=chi)
print(idx)
print("true t0 is:", true_t0)
t_max_snr_reg = np.argmax(snr_reg.real)
t_max_snr_nfft = np.argmax(snr_nfft.real)
fig, [ax1, ax2] = plt.subplots(1, 2, figsize=(16, 3))
ax1.plot(times, snr_reg.real, 'r')
ax2.plot(times, snr_nfft.real, 'b')
fig, [ax1, ax2] = plt.subplots(1, 2, figsize=(16, 3))
ax1.plot(times, data, 'k')
# t_reg = np.copy(times.value) - times[t_max_snr_reg]
# for i in range(len(t_reg)):
# if t_reg[i] < min(times):
# t_reg[i] = max(times) + t_reg[i]
# idx = np.argmin(t_reg)
# ax1.plot(np.roll(t_reg, -idx), np.roll(temp, -idx), 'r')
temp_reg, _ = mlens_temp(times, pars, idx=idx,
t0=(times[len(times)//2] - times[np.argmax(snr_reg.real)]) % times.duration)
temp_nfft, _ = mlens_temp(times, pars, idx=idx,
t0=(times[len(times)//2] - times[np.argmax(snr_nfft.real)]) % times.duration)
ax1.plot(times, temp_reg, 'r')
ax2.plot(times, data, 'k')
ax2.plot(times, temp_nfft, 'b')
compare_match(times, pars, nsample, window=True, chi=False)
# -
# Some kind of cuantification of aliasing is to run several times the match for same type of data (same template constructor) and with different sampling structure, this should lead to different aliasing impact which are going to affect whether the detection occurs or not
# +
def repeat(times, pars, nsample, N, idx=15, tol=0.1, chi=False, band=False,
window=False, psd=False, t0=None):
goods_reg = 0
goods_nfft = 0
limit = times.duration * tol
for i in range(N):
data, temp, snr_reg, snr_nfft, t0 = one_run(times, pars, nsample, idx=idx, noise_level=0.1, chi=chi,
band=band, window=window, psd=psd, t0=t0)
temp_reg = np.roll(temp, -np.argmax(snr_reg))
temp_nfft = np.roll(temp, -np.argmax(snr_nfft))
if abs(t0 - times[np.argmin(temp_reg)]) < limit:
goods_reg += 1
if abs(t0 - times[np.argmin(temp_nfft)]) < limit:
goods_nfft += 1
return goods_reg, goods_nfft
gd_reg, gd_nfft = repeat(times, pars, nsample, 50, t0=times[n//2])
print(gd_reg, gd_nfft)
# -
def aliasing(times, pars, nsample, N, tol_factor=0.03, chi=False, band=False, window=False, psd=False, t0=None):
reg_over_tol = []
nfft_over_tol = []
for i in range(10):
print(i, end='')
gd_reg, gd_nfft = repeat(times, pars, nsample, N, tol=tol_factor * (i+1),
chi=chi, band=band, window=window, psd=psd, t0=t0)
reg_over_tol.append(gd_reg)
nfft_over_tol.append(gd_nfft)
return reg_over_tol, nfft_over_tol
# +
n = 60
delta_t = 0.8
times = time_irreg_samples(n=n, basic_dt=delta_t, struct="slight")
times -= min(times)
N = 50
basic_reg, basic_nfft = aliasing(times, pars, nsample, N, tol_factor=0.04)
with_chi_reg, with_chi_nfft = aliasing(times, pars, nsample, N, tol_factor=0.04, chi=True)
with_band_reg, with_band_nfft = aliasing(times, pars, nsample, N, tol_factor=0.04, band=True)
with_band_and_chi_reg, with_band_and_chi_nfft = aliasing(times, pars, nsample, N, tol_factor=0.04,
chi=True, band=True)
# +
fig, [[ax1, ax2], [ax3, ax4]] = plt.subplots(2, 2, figsize=(17, 6), sharey=True)
tol_arr = (np.arange(10)+1)*0.04
ax1.plot(tol_arr, basic_reg, label="regressor")
ax1.plot(tol_arr, basic_nfft, label="nfft")
ax1.legend(fontsize=15)
ax1.axhline(N, color='k')
ax2.plot(tol_arr, with_chi_reg)
ax2.plot(tol_arr, with_chi_nfft)
ax2.axhline(N, color='k')
ax3.plot(tol_arr, with_band_reg)
ax3.plot(tol_arr, with_band_nfft)
ax3.axhline(N, color='k')
ax4.plot(tol_arr, with_band_and_chi_reg)
ax4.plot(tol_arr, with_band_and_chi_nfft)
ax4.axhline(N, color='k')
fig.add_subplot(111, frameon=False)
plt.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
plt.grid(False)
plt.xlabel("tolarance in %", fontsize=18)
plt.ylabel("number of good matchs", fontsize=18)
plt.title(r"Aliasing Testing: Same waveform constructor with same noise but different samples and different $t_0$",
fontsize=18)
plt.savefig("tol1.png")
# ax.set_xlabel("tolerance in \%")
# +
# then repeat with same t0 for generating the data
n = 60
delta_t = 0.8
times = time_irreg_samples(n=n, basic_dt=delta_t, struct="slight")
times -= min(times)
N = 50
t0 = times[n//2]
basic_reg, basic_nfft = aliasing(times, pars, nsample, N, tol_factor=0.04, t0=t0)
with_chi_reg, with_chi_nfft = aliasing(times, pars, nsample, N, tol_factor=0.04, chi=True, t0=t0)
with_band_reg, with_band_nfft = aliasing(times, pars, nsample, N, tol_factor=0.04, band=True, t0=t0)
with_band_and_chi_reg, with_band_and_chi_nfft = aliasing(times, pars, nsample, N, tol_factor=0.04,
chi=True, band=True, t0=t0)
# +
fig, [[ax1, ax2], [ax3, ax4]] = plt.subplots(2, 2, figsize=(17, 6), sharey=True)
tol_arr = (np.arange(10)+1)*0.04
ax1.plot(tol_arr, basic_reg, 'o', label="regressor")
ax1.plot(tol_arr, basic_nfft, label="nfft")
ax1.legend(fontsize=15)
ax1.set_title("raw SNR", fontsize=16)
ax1.axhline(N, color='k')
ax1.set_ylim(45, 55)
ax2.plot(tol_arr, with_chi_reg, 'o')
ax2.plot(tol_arr, with_chi_nfft)
ax2.axhline(N, color='k')
ax2.set_title("SNR after Chi veto", fontsize=16)
ax2.set_ylim(45, 55)
ax3.plot(tol_arr, with_band_reg, 'o')
ax3.plot(tol_arr, with_band_nfft)
ax3.axhline(N, color='k')
ax3.set_title("SNR with band passing", fontsize=16)
ax3.set_ylim(45, 55)
ax4.plot(tol_arr, with_band_and_chi_reg, 'o')
ax4.plot(tol_arr, with_band_and_chi_nfft)
ax4.axhline(N, color='k')
ax4.set_title("SNR witn band passing and chi veto", fontsize=16)
ax4.set_ylim(45, 55)
fig.add_subplot(111, frameon=False)
plt.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
plt.grid(False)
plt.xlabel("tolarance in %", fontsize=18)
plt.ylabel("number of good matchs", fontsize=18)
# plt.title(r"Aliasing Testing: Same waveform constructor with same noise but different samples and same $t_0$",
# fontsize=18)
plt.tight_layout()
plt.savefig("tol2.png")
# ax.set_xlabel("tolerance in \%")
# -
# #### Threshold study
# now, for a given threshold we are going to see how many detections we get
def detection(times, pars, nsample, threshold, N, tol=0.04, chi=False, band=False,
window=False, psd=False, t0=None):
reg_detect = 0
nfft_detect = 0
limit = times.duration * tol
for i in range(N):
data, temp, snr_reg, snr_nfft, t0 = one_run(times, pars, nsample, idx=idx, noise_level=0.1, chi=chi,
band=band, window=window, psd=psd, t0=t0)
temp_reg = np.roll(temp, -np.argmax(snr_reg))
temp_nfft = np.roll(temp, -np.argmax(snr_nfft))
if abs(t0 - times[np.argmin(temp_reg)]) < limit:
if max(snr_reg.real) >= threshold:
goods_reg += 1
if abs(t0 - times[np.argmin(temp_nfft)]) < limit:
if max(snr_nfft.real) >= threshold:
goods_nfft += 1
goods_reg = 0
goods_nfft = 0
limit = times.duration * tol
for i in range(N):
data, temp, snr_reg, snr_nfft, t0 = one_run(times, pars, nsample, idx=idx, noise_level=0.1, chi=chi,
band=band, window=window, psd=psd, t0=t0)
temp_reg = np.roll(temp, -np.argmax(snr_reg))
temp_nfft = np.roll(temp, -np.argmax(snr_nfft))
if abs(t0 - times[np.argmin(temp_reg)]) < limit:
goods_reg += 1
if abs(t0 - times[np.argmin(temp_nfft)]) < limit:
goods_nfft += 1
return goods_reg, goods_nfft
repeat(times, pars, nsample, N, idx=15, tol=0.1, chi=False, band=False,
window=False, psd=False, t0=None)
| developing/notebooks/ComparisonMF.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Touring Seaborn with Titanic
#
# In this lab, we will use a familiar dataset to explore the use of visualizations in feature analysis and selection.
#
#
# The objective of this lab is to work through some of the visualization capabilities available in Seaborn. For a more thorough investigation of the capabilities offered by Seaborn, you are encouraged to do the full tutorial linked below. Seaborn is an API to matplotlib. It integrates with pandas dataframes, simplifying the process of visualizing data. It provides simple functions for plotting.
#
# Some of the features that seaborn offers are
#
# * Several built-in themes that improve on the default matplotlib aesthetics
# * Tools for choosing color palettes to make beautiful plots that reveal patterns in your data
# * Functions for visualizing univariate and bivariate distributions or for comparing them between subsets of data
# * Tools that fit and visualize linear regression models for different kinds of independent and dependent variables
# * Functions that visualize matrices of data and use clustering algorithms to discover structure in those matrices
# * A function to plot statistical timeseries data with flexible estimation and representation of uncertainty around the estimate
# * High-level abstractions for structuring grids of plots that let you easily build complex visualizations
#
# We are going to look at 3 useful functions in seaborn: factorplot, pairplot, and joinplot.
#
# ** Before running the code in this lab, articulate to your partner what you expect the visualization to look like. Look at the code and the Seaborn documentation to figure out what data is being plotted and what the type of plot may look like.**
#
# sources:
#
# Previous Titanic work: https://github.com/rebeccabilbro/titanic
#
# Seaborn Tutorial: https://stanford.edu/~mwaskom/software/seaborn/tutorial.html
#
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
# %matplotlib inline
pd.set_option('display.max_columns', 500)
# Like scikit-learn, Seaborn has "toy" datasets available to import for exploration. This includes the Titanic data we have previously looked at. Let's load the Seaborn Titanic dataset and take a look.
#
# (https://github.com/mwaskom/seaborn-data shows the datasets available to load via this method in Seaborn.)
df = sns.load_dataset('titanic')
# Write the code to look at the head of the dataframe
# As you can see, the data has been cleaned up a bit.
#
# We performed some rudimentary visualization for exploratory data analysis previously. For example, we created a histogram using matplotlib to look at the age distirbution of passengers.
#
# +
# Create a histogram to examine age distribution of the passengers.
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist(df['age'], bins = 10, range = (df['age'].min(),df['age'].max()))
plt.title('Age distribution')
plt.xlabel('Age')
plt.ylabel('Count of Passengers')
plt.show()
# -
# ### Factorplot
#
# Our prior work with the Titanic data focused on the available numeric data. Factorplot gives us an easy method to explore some of the categorical data as well. Factorplots allow us to look at a parameter's distribution in bins defined by another parameter.
#
# For example, we can look at the survival rate based on the deck a passenger's cabin was on.
#
# **Remember**: take a look at the documentation first (https://stanford.edu/~mwaskom/software/seaborn/index.html) and figure out what the code is doing. Being able to understand documentation will help you a lot in your projects.
# +
# What is a factorplot? Check the documentation! Which data are we using? What is the count a count of?
g = sns.factorplot("alive", col="deck", col_wrap=4,
data=df[df.deck.notnull()], kind="count", size=4, aspect=.8)
# -
# What other options can you set with a factorplot in Seaborn? Using the code above as a starting point, create some code to create a factorplot with the data above, but in a different configuration. For example- make 2 plots per column, change the colors, add a legend, change the size, etc.
# Try your own variation of the factorplot above.
# As you saw in the factorplot documentation, you can specify several different types of plots in the parameters. Let's use factorplot to create a nested barplot showing passenger survival based on their class and sex. Fill in the missing pieces of the code below.
#
# The goal is a barplot showing survival probablility by class that further shows the sex of the passengers in each class. (Hint: how can you use the hue parameter?)
# Draw a nested barplot to show survival for class and sex
g = sns.factorplot(x="CHANGE TO THE CORRECT FEATURE",
y="CHANGE TO THE CORRECT FEATURE",
hue="CHANGE TO THE CORRECT FEATURE",
data=df,
size=6, kind="bar", palette="muted")
g.despine(left=True)
g.set_ylabels("survival probability")
# Take a look at the code below. Let's again plot passenger survival based on their class and who they were (man, woman, child) but using a different plot for each class, like what we did above for the deck information.
g = sns.factorplot(x="CHANGE TO THE CORRECT FEATURE",
y="CHANGE TO THE CORRECT FEATURE",
col="CHANGE TO THE CORRECT FEATURE",
data=df,
saturation=.5, kind="bar", ci=None,aspect=.6)
(g.set_axis_labels("", "Survival Rate").set_xticklabels(["Men", "Women", "Children"]).set_titles
("{col_name} {col_var}").set(ylim=(0, 1)).despine(left=True))
# Factorplot has 6 different kinds of plots, we explored two of them above. Using the documentation, try out one of the remaining plot types. A suggestion is provided below. You can follow it, and/or create your own visualization.
# With factorplot, make a violin plot that shows the age of the passengers at each embarkation point
# based on their class. Use the hue parameter to show the sex of the passengers
# ### Pairplot
#
# In the Wheat Classification notebook, we saw a scatter matrix. A scatter matrix plots each feature against every other feature. The diaganol showed us a density plot of just that data. Seaborn gives us this ability in the pairplot. In order to make a useful pairplot with the data, let's update some information.
df.age = df.age.fillna(df.age.mean())
g = sns.pairplot(data=df[['survived', 'pclass', 'age', 'sibsp', 'parch', 'fare']], hue='survived', dropna=True)
# The Titanic data gives an idea of what we can see with a pairplot, but it might not be the most illustrative example. Using the information provided so far, make a pairplot using the seaborn car crashes data.
# +
# Pairplot of the crash data
# -
# ### Jointplot
#
# Like pairplots, a jointplot shows the distribution between features. It also shows individual distributions of the features being compared.
g = sns.jointplot("fare", "age", df)
# Using either the Titanic or crash data, create some jointplots.
# Jointplot, titanic data
# Jointplot, crash data
# ### Bonus
# Use the Titanic data to create a boxplot of the age distribution on each deck by class.
#
# ### Extra Bonus
# Plot the same inforamtion using FacetGrid.
# boxplot of the age distribution on each deck by class
# boxplot of the age distribution on each deck by class using FacetGrid
| seaborn/SeabornTour-Titanic.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Checking can load in (and eventually plot) the sswidl
# output of some quicklook summary RHESSI data
#
# 21-Oct-2020 IGH
# +
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import scipy.io as io
import astropy.time
import astropy.units as u
# %matplotlib inline
import warnings
warnings.simplefilter('ignore')
# -
# sumdata=io.readsav('qlsum_20021103_163000.dat')
sumdata=io.readsav('qlsum_20030302_220000.dat')
# sumdata=io.readsav('qlsum_20050311_120000.dat')
print(sumdata.keys())
print(sumdata['times'][0])
print(sumdata['countrate'][0])
print(sumdata['dets_id'])
print(sumdata['engs_id'])
print(sumdata['saa_flag'])
# To get rid of the b at the start of the times
times=[]
for tt in sumdata['times']:
times.append(tt.decode('utf-8'))
print(times[0])
# +
# Quick plot (not using the times)
plt.rcParams.update({'font.size': 16,'font.family':"sans-serif",\
'font.sans-serif':"Arial",'mathtext.default':"regular"})
fig = plt.figure(figsize=(10, 6))
plt.semilogy(sumdata['countrate'][:,0],color='black',\
label=sumdata['engs_id'][0].decode('utf-8'))
plt.semilogy(sumdata['countrate'][:,1],color='magenta',\
label=sumdata['engs_id'][1].decode('utf-8'))
plt.semilogy(sumdata['countrate'][:,2],color='chartreuse',\
label=sumdata['engs_id'][2].decode('utf-8'))
plt.semilogy(sumdata['countrate'][:,3],color='cyan',\
label=sumdata['engs_id'][3].decode('utf-8'))
plt.semilogy(sumdata['countrate'][:,4],color='goldenrod',\
label=sumdata['engs_id'][4].decode('utf-8'))
plt.semilogy(sumdata['ecl_flag']*1e3,color='gray',label='Night')
plt.ylim([1,2e3])
# plt.xlim([2001,2019])
# plt.xlabel('Year')
plt.ylabel('count/s')
plt.legend(loc=1, prop={'size': 10})
fig.show()
# -
# For plotting need to put into actual times not strings
# Start by putting in to astropy time format (most control)
atimes=astropy.time.Time(times, format = 'isot')
# Then convert to something matplotlib understands
mtimes = matplotlib.dates.date2num(atimes.datetime)
# The above might not be the best way of doing this....
# +
plt.rcParams.update({'font.size': 16,'font.family':"sans-serif",\
'font.sans-serif':"Arial",'mathtext.default':"regular"})
fig,ax= plt.subplots(figsize=(10, 6))
plt.plot_date(mtimes,sumdata['countrate'][:,0],'-',color='black',\
label=sumdata['engs_id'][0].decode('utf-8'))
plt.plot_date(mtimes,sumdata['countrate'][:,1],'-',color='magenta',\
label=sumdata['engs_id'][1].decode('utf-8'))
plt.plot_date(mtimes,sumdata['countrate'][:,2],'-',color='chartreuse',\
label=sumdata['engs_id'][2].decode('utf-8'))
plt.plot_date(mtimes,sumdata['countrate'][:,3],'-',color='cyan',\
label=sumdata['engs_id'][3].decode('utf-8'))
plt.plot_date(mtimes,sumdata['countrate'][:,4],'-',color='gold',\
label=sumdata['engs_id'][4].decode('utf-8'))
# Now only plot where the flag is == 1
plt.plot_date(mtimes[sumdata['ecl_flag']==1],\
sumdata['ecl_flag'][sumdata['ecl_flag']==1]*10**3.1,'s',color='grey',\
label='Night',markersize=0.5)
plt.plot_date(mtimes[sumdata['flr_flag']==1],\
sumdata['flr_flag'][sumdata['flr_flag']==1]*10**3.0,'s',color='red',\
label='Flare',markersize=0.5)
plt.plot_date(mtimes[sumdata['saa_flag']==1],\
sumdata['saa_flag'][sumdata['saa_flag']==1]*10**3.15,'s',color='orange',\
label='SAA',markersize=0.5)
plt.ylim([1,2e3])
plt.yscale('log')
plt.xlabel('Start time '+atimes[0].iso[:-4])
plt.ylabel('RHESSI Corrected Count Rate [s$^{-1}$]')
plt.legend(loc=4, prop={'size': 12})
# The following should give HH:MM for the axis formate
myFmt = matplotlib.dates.DateFormatter('%H:%M')
ax.xaxis.set_major_formatter(myFmt)
fig.show()
# -
| rhsi_qlsumltc.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] pycharm={"name": "#%% md\n"}
# **Task 1: Clustering**
#
# In class, you have been introduced to the unsupervised learning and the K-means
# algorithm ang GMM. Our goal is to categorize the two-dimensional dataset cluster.dat
# into several clusters.
# -
# **Method 1. K-means (20 points)**
#
# Implement K-means method on the cluster.dat. You should try different numbers of
# clusters.
# + pycharm={"is_executing": false, "name": "#%%\n"}
# %pylab inline
# following is alternative import
# import numpy as np
# import matplotlib as plt
cluster_dat = np.loadtxt("cluster.dat")
print(cluster_dat.shape)
print(cluster_dat)
# + pycharm={"is_executing": false, "name": "#%%\n"}
import random
def kmeans_2d_sse(data, labels, centroids):
sse = 0
for i in range(0, int(labels.max()) + 1):
if not np.isnan(centroids[i]).any():
sse += np.sqrt((np.power(data[labels == i] - centroids[i],
2)).sum(axis=1)).sum()
return sse
def kmeans_2d_predict_sse(data, centroids):
labels = np.zeros(data.shape[0])
for i, point in enumerate(data):
labels[i] = np.argmin(
np.sqrt((np.power(point - centroids, 2)).sum(axis=1)))
return kmeans_2d_sse(data, labels, centroids)
def init_centroid_2d(data, k):
min_x = np.min(data[:, 0])
max_x = np.max(data[:, 0])
min_y = np.min(data[:, 1])
max_y = np.max(data[:, 1])
centroid_x = random.uniform(min_x, max_x)
centroid_y = random.uniform(min_y, max_y)
centroids = np.array([centroid_x, centroid_y])
for i in range(1, k):
centroid_x = random.uniform(min_x, max_x)
centroid_y = random.uniform(min_y, max_y)
centroids = np.vstack([centroids, [centroid_x, centroid_y]])
return centroids
def kmeans_2d(data, k):
centroids = init_centroid_2d(data, k)
labels = np.zeros(data.shape[0])
for i, point in enumerate(data):
labels[i] = np.argmin(
np.sqrt((np.power(point - centroids, 2)).sum(axis=1)))
new_centroids = centroids.copy()
for i in range(0, k):
new_centroids[i] = np.mean(data[labels == i], axis=0)
while kmeans_2d_predict_sse(data, new_centroids) < kmeans_2d_sse(
data, labels, centroids):
if np.isnan(new_centroids).any():
return kmeans_2d(data, k) # try again
centroids = new_centroids
for i, point in enumerate(data):
labels[i] = np.argmin(
np.sqrt((np.power(point - centroids, 2)).sum(axis=1)))
for i in range(0, k):
new_centroids[i] = np.mean(data[labels == i], axis=0)
labels = np.zeros(data.shape[0])
for i, point in enumerate(data):
labels[i] = np.argmin(
np.sqrt((np.power(point - centroids, 2)).sum(axis=1)))
return labels, new_centroids, kmeans_2d_sse(data, labels, new_centroids)
# + pycharm={"is_executing": false, "name": "#%%\n"}
K = range(2, 6)
for k in K:
labels, centroids, _ = kmeans_2d(cluster_dat, k)
for i in range(0, k):
plt.scatter(cluster_dat[labels == i][:, 0],
cluster_dat[labels == i][:, 1])
plt.scatter(centroids[i, 0], centroids[i, 1])
plt.show()
# + [markdown] pycharm={"name": "#%% md\n"}
# **Do something extra! (BONUS: 10 points)**
#
# Split the dataset using 80-20 train-test ratio. Train your predictor using newlyimplemented K_means function. Iterate over k, for each report the training and
# testing loss. Plot training and testing loss versus k. Plot the samples for three choices
# of k. Pick k which reveals the structure of the data. Comment the results.
# +
from sklearn import model_selection
K = range(2, 11)
cluster_train, cluster_test = model_selection.train_test_split(cluster_dat,
test_size=0.2)
sse_train = np.zeros(K.stop - 2)
sse_test = np.zeros(K.stop - 2)
for k in K:
labels, centroids, sse_train[k - 2] = kmeans_2d(cluster_train, k)
sse_test[k - 2] = kmeans_2d_predict_sse(cluster_test, centroids)
plt.plot(K, sse_train, label='training loss')
plt.plot(K, sse_test, label='testing loss')
plt.xticks(K)
plt.xlabel("K")
plt.ylabel("sse")
plt.legend()
plt.show()
# -
# **Comment**
#
# kไธๅ็่็ฑปๅจไธ้ข๏ผๅ
ถๅฎ่ฟๆฏๅพ้พ็ๅบ่ฟ็ปๆฐๆฎ็็ปๆ็๏ผๅ ไธบๆฐๆฎๆฌ่บซๅๅธๅฏ้็จๅบฆๅฐฑๅพ้ซ๏ผๆๅฎๅๆ1็ฑป้ฝๆฏๅ็็๏ผไฝๅฆๆไธๅฎ่ฆ้็่ฏ๏ผk=5ๆถๅๆ็5็ป็ด่งไธๆดๅฅฝ๏ผๅๆถ่ฎญ็ปๅๆต่ฏsse้ฝๆฏ่พๅฐ๏ผๆด้ซ็kๅฏนsse็ไธ้ๆๆไนๅๆดๅ ๅผฑ
# **Method 2. Gaussian Mixture Model (30 points)**
#
# Implement EM fitting of a mixture of gaussians on the cluster.dat. You should try
# different numbers of mixtures, as well as tied vs. separate covariance matrices for
# each gaussian.
# +
from scipy.stats import multivariate_normal
def E_step(data, means, covs, weights):
n_data = data.shape[0]
n_clusters = means.shape[0]
responsibilities = np.zeros([n_data, n_clusters])
for c in range(n_clusters):
responsibilities[:, c] = multivariate_normal.pdf(
data, means[c], covs[c])
responsibilities = weights * responsibilities
responsibilities /= responsibilities.sum(axis=1)[:, np.newaxis]
return responsibilities
def M_step(data, responsibilities, means, covs):
n_data, n_clusters = responsibilities.shape
weights = responsibilities.sum(axis=0)
for c in range(n_clusters):
resp = responsibilities[:, c][:, np.newaxis]
means[c] = (resp * data).sum(axis=0) / resp.sum()
covs[c] = ((data - means[c]).T).dot(
(data - means[c]) * resp) / weights[c]
weights /= weights.sum()
return means, covs, weights
def Gaussian_Mixture(data, centroids, n_iterations=99):
k = centroids.shape[0]
means = centroids
weights = np.ones(k) / k
weights[0] += 1
covs = np.array([np.cov(data.T)] * k)
weights /= weights.sum()
for i in range(n_iterations):
responsibilities = E_step(data, means, covs, weights)
means, covs, weights = M_step(data, responsibilities, means, covs)
labels = responsibilities.argmax(axis=1)
return labels
# +
K = range(2, 6)
for k in K:
labels = Gaussian_Mixture(cluster_dat,
init_centroid_2d(cluster_dat, k))
for i in range(0, k):
plt.scatter(cluster_dat[labels == i][:, 0],
cluster_dat[labels == i][:, 1])
plt.show()
# -
# **Do something extra! (BONUS: 10 points)**
#
# Split the dataset using 80-20 train-test ratio. Plot likelihood on training and testing vs
# iteration for different numbers of mixtures.
| BUPT/Machine-Learning-II/Assignment1/Task1_Clustering.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import tensorflow as tf
import numpy as np
def create_samples(n_clusters, n_samples_per_cluster, n_features, embiggen_factor, seed):
np.random.seed(seed)
slices = []
centroids = []
# Create samples for each cluster
for i in range(n_clusters):
samples = tf.random_normal((n_samples_per_cluster, n_features),
mean=0.0, stddev=5.0, dtype=tf.float32, seed=seed,
name="cluster_{}".format(i))
current_centroid = np.random.random((1, n_features)) * embiggen_factor - (embiggen_factor / 2)
centroids.append(current_centroid)
samples += current_centroid
slices.append(samples)
# Create a big 'samples' dataset
samples = tf.concat(sllices, 0, name='samples')
centroids = tf.concat(centroids, 0, name='centroids')
return centroids, samples
| ltf/lesson6.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %%capture
#export
from typing import *
from dataclasses import asdict
import torch
import torch.nn as nn
from fastai2.basics import *
from fastcore.test import *
from transformers.modeling_utils import PreTrainedModel, top_k_top_p_filtering, BeamHypotheses
# +
# default_exp generated_lm
# -
# # GeneratedLM
# > Language Model or Decoder with Generate function
# ## FakeDecoder, FakeLM for Test
# +
class FakeDecoder(nn.Module):
''' with memory not support past'''
def __init__(self, tgt_vocab_size):
super().__init__()
self.tgt_vocab_size = tgt_vocab_size
def forward(self, tgt, memory, **kwargs):
'''
inputs: (tgt, memory)
tgt: (b, tgt_seq_len)
memory: (b, src_seq_len, embed_dim)
returns: logits, others
logits: (b, tgt_seq_len, tgt_vocab_size)
'''
assert tgt.shape[0] == memory.shape[0], (tgt.shape[0], memory.shape[0])
logits = torch.randn((*tgt.shape, self.tgt_vocab_size))
return logits, None
class FakeLM(nn.Module):
''' without memory support past '''
def __init__(self, tgt_vocab_size):
super().__init__()
self.tgt_vocab_size = tgt_vocab_size
def forward(self, tgt, past=None, **kwargs):
'''
if past==None:
inputs: (tgt)
tgt: (b, tgt_seq_len)
returns: logits, presents, others
logits: (b, tgt_seq_len, tgt_vocab_size)
presents: List of (2, b, ...)
else:
inputs: (tgt, past)
tgt: (b, 1)
past: List of (2, bs, num_heads, tgt_seq_len-1, ..)
returns: logits, presents, others
logits: (b, tgt_seq_len, tgt_vocab_size)
presents: List of (2, bs, num_heads, tgt_seq_len, ..)
'''
if past is None:
b = tgt.shape[0]
tgt_seq_len = tgt.shape[1]
logits = torch.randn((b, tgt_seq_len, self.tgt_vocab_size))
presents = [torch.randn((2, b, 12, tgt_seq_len, 16))] * 6
else:
b = tgt.shape[0]
tgt_seq_len = past[0].shape[3]+1
logits = torch.randn((b, tgt_seq_len, self.tgt_vocab_size))
presents = [torch.randn((2, b, 12, tgt_seq_len, 16))] * 6
return logits, presents, None
# -
bs = 3
tgt_seq_len = 10
tgt_vocab_size = 20
memory = torch.randn((bs, 9, 9))
past = [torch.randn((2, bs, 12, tgt_seq_len-1, 16))] * 6
pad_token_id=0
eos_token_id=tgt_vocab_size-1
bos_token_id=tgt_vocab_size-2
# +
decoder = FakeDecoder(tgt_vocab_size)
lm = FakeLM(tgt_vocab_size)
tgt = torch.randint(0, tgt_vocab_size-2, (bs, tgt_seq_len))
test_eq(decoder(tgt, memory)[0].shape, (bs, tgt_seq_len, tgt_vocab_size))
test_eq(lm(tgt)[0].shape, (bs, tgt_seq_len, tgt_vocab_size))
test_eq(lm(tgt)[1][0].shape, (2, bs, 12, tgt_seq_len, 16))
tgt = torch.randint(0, tgt_vocab_size-2, (bs, tgt_seq_len))
test_eq(lm(tgt, past)[0].shape, (bs, tgt_seq_len, tgt_vocab_size))
test_eq(lm(tgt, past)[1][0].shape, (2, bs, 12, tgt_seq_len, 16))
# -
# ## GeneratedLM
# export
class GeneratedLM():
def __init__(self, lm, vocab_size, pad_token_id, eos_token_ids, support_past=False):
'''
Your lm's forward function should be this format:
if support_past==False
inputs: (tgt, *model_otherargs, **model_otherkwargs)
tgt: (bs, tgt_seq_len)
returns: logits, others
logits: (bs, tgt_seq_len, tgt_vocab_size)
else:
inputs: (tgt, past, *model_otherargs, **model_otherkwargs)
tgt: (bs, 1)
past: The infos for quickly generate sentence. List of (2, bs, num_heads, tgt_seq_len-1, ..)
returns: logits, presents, others
logits: (bs, tgt_seq_len, tgt_vocab_size)
presents: List of (2, bs, num_heads, tgt_seq_len, ..)
'''
assert isinstance(pad_token_id, int) and pad_token_id >= 0, "`pad_token_id` should be a positive integer."
assert isinstance(eos_token_ids, (list, tuple)) and (
e >= 0 for e in eos_token_ids
), "`eos_token_ids` should be a positive integer or a list/tuple of positive integers."
self.lm = lm
self.vocab_size = vocab_size
self.pad_token_id = pad_token_id
self.eos_token_ids = eos_token_ids
self.support_past = support_past
# ### _generate_no_beam_search
# export
@patch
def _generate_no_beam_search(
self:GeneratedLM,
tgt, # (bs, tgt_seq_len)
max_length,
do_sample,
temperature,
top_k,
top_p,
repetition_penalty,
model_otherargs=[],
model_otherkwargs={},
):
""" Generate sequences for each example without beam search (num_beams == 1).
All returned sequence are generated independantly.
"""
# current position / max lengths / length of generated sentences / unfinished sentences
pad_token_id = self.pad_token_id
eos_token_ids = self.eos_token_ids
batch_size = tgt.shape[0]
cur_len = tgt.shape[1]
unfinished_sents = tgt.new(batch_size).fill_(1)
past = None
while cur_len < max_length:
# print('===================================:', cur_len)
# print(tgt)
if self.support_past==False:
outputs = self.lm(tgt, *model_otherargs, **model_otherkwargs)
else:
model_inputs = tgt if past is None else tgt[:, -1].unsqueeze(-1)
outputs = self.lm(model_inputs, past, *model_otherargs, **model_otherkwargs)
next_token_logits = outputs[0][:, -1, :]
# if model has past, then set the past variable to speed up decoding
if self.support_past:
past = outputs[1]
# repetition penalty from CTRL paper (https://arxiv.org/abs/1909.05858)
if repetition_penalty != 1.0:
for i in range(batch_size):
for previous_token in set(tgt[i].tolist()):
# if score < 0 then repetition penalty has to multiplied to reduce the previous token probability
if next_token_logits[i, previous_token] < 0:
next_token_logits[i, previous_token] *= repetition_penalty
else:
next_token_logits[i, previous_token] /= repetition_penalty
if do_sample:
# Temperature (higher temperature => more likely to sample low probability tokens)
if temperature != 1.0:
next_token_logits = next_token_logits / temperature
# Top-p/top-k filtering
next_token_logits = top_k_top_p_filtering(next_token_logits, top_k=top_k, top_p=top_p)
# Sample
next_token = torch.multinomial(F.softmax(next_token_logits, dim=-1), num_samples=1).squeeze(1)
else:
# Greedy decoding
next_token = torch.argmax(next_token_logits, dim=-1)
# update generations and finished sentences
tokens_to_add = next_token * unfinished_sents + pad_token_id * (1 - unfinished_sents)
tgt = torch.cat([tgt, tokens_to_add.unsqueeze(-1)], dim=-1)
for eos_token_id in eos_token_ids:
unfinished_sents.mul_(tokens_to_add.ne(eos_token_id).long())
cur_len = cur_len + 1
# stop when there is a </s> in each sentence, or if we exceed the maximul length
if unfinished_sents.max() == 0:
break
# add eos_token_ids to unfinished sentences
if cur_len == max_length:
tgt[:, -1].masked_fill_(unfinished_sents.to(dtype=torch.bool), eos_token_ids[0])
return tgt # (bs, (tgt_seq_len <= ? <= max_length))
# + hide_input=false
max_length=20
generate_args = dict(
max_length=max_length,
do_sample=True,
temperature=0.1,
top_k=3,
top_p=0.5,
repetition_penalty=1.2,
)
# With memory, Without past
generated_decoder = GeneratedLM(decoder, tgt_vocab_size, pad_token_id, [eos_token_id], support_past=False)
tgt = torch.zeros((bs, 1), dtype=torch.long).fill_(bos_token_id)
result = generated_decoder._generate_no_beam_search(tgt, **generate_args, model_otherargs=[memory])
test_eq(result.shape[0], bs)
assert result.shape[1] <= max_length and result.shape[1] >= 1
# Without memory, With past
generated_lm = GeneratedLM(lm, tgt_vocab_size, pad_token_id, [eos_token_id], support_past=True)
tgt = torch.randint(0, tgt_vocab_size-2, (bs, tgt_seq_len))
result = generated_lm._generate_no_beam_search(tgt, **generate_args)
test_eq(result.shape[0], bs)
assert result.shape[1] <= max_length and result.shape[1] >= tgt_seq_len
# Without memory, Without past
generated_lm = GeneratedLM(lm, tgt_vocab_size, pad_token_id, [eos_token_id], support_past=False)
tgt = torch.randint(0, tgt_vocab_size-2, (bs, tgt_seq_len))
result = generated_lm._generate_no_beam_search(tgt, **generate_args)
test_eq(result.shape[0], bs)
assert result.shape[1] <= max_length and result.shape[1] >= tgt_seq_len
# -
# ### build_model_otherargs_for_beam
# export
@patch
def build_model_otherargs_for_beam(self: GeneratedLM, model_otherargs, num_beams):
''' model_otherargs: List of tensor with shape (bs, ...)
returns list of expanded args with shape (bs*num_beams, ...)
'''
# Expand model_otherargs to num beams
expanded_args = []
for arg in model_otherargs:
bs = arg.shape[0]
other_dim = arg.shape[1:]
arg = arg.unsqueeze(1).expand(bs, num_beams, *other_dim) # (bs, num_beams, *other_dim)
arg = arg.contiguous().view(bs * num_beams, *other_dim) # (bs*num_beams, *other_dim)
expanded_args.append(arg)
return expanded_args
model_otherargs = [torch.tensor([[1, 2, 3],
[4, 5, 6]])]
expected = [torch.tensor([[1, 2, 3],
[1, 2, 3],
[4, 5, 6],
[4, 5, 6]])]
result = build_model_otherargs_for_beam(None, model_otherargs, 2)
test_eq(result, expected)
# ### _generate_beam_search
# export
@patch
def _generate_beam_search(
self:GeneratedLM,
tgt, # (b, tgt_seq_len)
max_length,
do_sample,
temperature,
top_k,
top_p,
repetition_penalty,
length_penalty,
num_beams,
vocab_size,
model_otherargs=[],
model_otherkwargs={},
):
""" Generate sequences for each example with beam search.
"""
pad_token_id = self.pad_token_id
eos_token_ids = self.eos_token_ids
batch_size = tgt.shape[0]
cur_len = tgt.shape[1]
# Expand input to num beams
tgt = tgt.unsqueeze(1).expand(batch_size, num_beams, cur_len)
tgt = tgt.contiguous().view(batch_size * num_beams, cur_len) # (batch_size * num_beams, cur_len)
# generated hypotheses
generated_hyps = [
BeamHypotheses(num_beams, max_length, length_penalty, early_stopping=False) for _ in range(batch_size)
]
# scores for each sentence in the beam
beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=tgt.device)
beam_scores[:, 1:] = -1e9
beam_scores = beam_scores.view(-1) # shape (batch_size * num_beams,)
# cache compute states
past = None
# done sentences
done = [False for _ in range(batch_size)]
while cur_len < max_length:
if self.support_past==False:
outputs = self.lm(tgt, *model_otherargs, **model_otherkwargs)
else:
model_inputs = tgt if past is None else tgt[:, -1].unsqueeze(-1)
outputs = self.lm(model_inputs, past, *model_otherargs, **model_otherkwargs)
# outputs = self.lm(tgt, *model_otherargs, **model_otherkwargs) if self.support_past==False else self.lm(tgt, past, *model_otherargs, **model_otherkwargs) # (batch_size * num_beams, cur_len, vocab_size)
scores = outputs[0][:, -1, :] # (batch_size * num_beams, vocab_size)
# if model has past, then set the past variable to speed up decoding
if self.support_past:
past = outputs[1]
# repetition penalty (from CTRL paper https://arxiv.org/abs/1909.05858)
if repetition_penalty != 1.0:
for i in range(batch_size * num_beams):
for previous_token in set(tgt[i].tolist()):
# if score < 0 then repetition penalty has to multiplied to reduce the previous token probability
if scores[i, previous_token] < 0:
scores[i, previous_token] *= repetition_penalty
else:
scores[i, previous_token] /= repetition_penalty
if do_sample:
# Temperature (higher temperature => more likely to sample low probability tokens)
if temperature != 1.0:
scores = scores / temperature
# Top-p/top-k filtering
scores = top_k_top_p_filtering(
scores, top_k=top_k, top_p=top_p, min_tokens_to_keep=2
) # (batch_size * num_beams, vocab_size)
# Sample 2 next words for each beam (so we have some spare tokens and match output of greedy beam search)
next_words = torch.multinomial(F.softmax(scores, dim=-1), num_samples=2) # (batch_size * num_beams, 2)
# Compute next scores
_scores = F.log_softmax(scores, dim=-1) # (batch_size * num_beams, vocab_size)
_scores = torch.gather(_scores, -1, next_words) # (batch_size * num_beams, 2)
next_scores = _scores + beam_scores[:, None].expand_as(_scores) # (batch_size * num_beams, 2)
# Match shape of greedy beam search
next_words = next_words.view(batch_size, 2 * num_beams) # (batch_size, 2 * num_beams)
next_scores = next_scores.view(batch_size, 2 * num_beams) # (batch_size, 2 * num_beams)
else:
# do greedy beam search
scores = F.log_softmax(scores, dim=-1) # (batch_size * num_beams, vocab_size)
assert scores.size() == (batch_size * num_beams, vocab_size)
# Add the log prob of the new beams to the log prob of the beginning of the sequence (sum of logs == log of the product)
_scores = scores + beam_scores[:, None].expand_as(scores) # (batch_size * num_beams, vocab_size)
# re-organize to group the beam together (we are keeping top hypothesis accross beams)
_scores = _scores.view(batch_size, num_beams * vocab_size) # (batch_size, num_beams * vocab_size)
next_scores, next_words = torch.topk(_scores, 2 * num_beams, dim=1, largest=True, sorted=True)
assert next_scores.size() == next_words.size() == (batch_size, 2 * num_beams)
# next batch beam content
# list of (batch_size * num_beams) tuple(next hypothesis score, next word, current position in the batch)
next_batch_beam = []
# for each sentence
for batch_ex in range(batch_size):
# if we are done with this sentence
done[batch_ex] = done[batch_ex] or generated_hyps[batch_ex].is_done(next_scores[batch_ex].max().item())
if done[batch_ex]:
next_batch_beam.extend([(0, pad_token_id, 0)] * num_beams) # pad the batch
continue
# next sentence beam content
next_sent_beam = []
# next words for this sentence
for idx, score in zip(next_words[batch_ex], next_scores[batch_ex]):
# get beam and word IDs
beam_id = idx // vocab_size
word_id = idx % vocab_size
# end of sentence, or next word
if word_id.item() in eos_token_ids or cur_len + 1 == max_length:
generated_hyps[batch_ex].add(
tgt[batch_ex * num_beams + beam_id, :cur_len].clone(), score.item()
)
else:
next_sent_beam.append((score, word_id, batch_ex * num_beams + beam_id))
# the beam for next step is full
if len(next_sent_beam) == num_beams:
break
# update next beam content
assert len(next_sent_beam) == 0 if cur_len + 1 == max_length else num_beams
if len(next_sent_beam) == 0:
next_sent_beam = [(0, pad_token_id, 0)] * num_beams # pad the batch
next_batch_beam.extend(next_sent_beam)
assert len(next_batch_beam) == num_beams * (batch_ex + 1)
# sanity check / prepare next batch
assert len(next_batch_beam) == batch_size * num_beams
beam_scores = beam_scores.new([x[0] for x in next_batch_beam])
beam_words = tgt.new([x[1] for x in next_batch_beam])
beam_idx = tgt.new([x[2] for x in next_batch_beam])
# re-order batch
tgt = tgt[beam_idx, :]
tgt = torch.cat([tgt, beam_words.unsqueeze(1)], dim=-1)
# re-order internal states
if past is not None:
reordered_past = []
for layer_past in past:
# get the correct batch idx from layer past batch dim
# batch dim of `past` and `mems` is at 2nd position
reordered_layer_past = [layer_past[:, i].unsqueeze(1).clone().detach() for i in beam_idx]
reordered_layer_past = torch.cat(reordered_layer_past, dim=1)
# check that shape matches
assert reordered_layer_past.shape == layer_past.shape
reordered_past.append(reordered_layer_past)
past = tuple(reordered_past)
# update current length
cur_len = cur_len + 1
# stop when we are done with each sentence
if all(done):
break
# visualize hypotheses
# print([len(x) for x in generated_hyps], cur_len)
# globals().update( locals() );
# # !import code; code.interact(local=vars())
# for ii in range(batch_size):
# for ss, ww in sorted(generated_hyps[ii].hyp, key=lambda x: x[0], reverse=True):
# print("%.3f " % ss + " ".join(self.dico[x] for x in ww.tolist()))
# print("")
# select the best hypotheses
tgt_len = tgt.new(batch_size)
best = []
for i, hypotheses in enumerate(generated_hyps):
best_hyp = max(hypotheses.hyp, key=lambda x: x[0])[1]
tgt_len[i] = len(best_hyp) + 1 # +1 for the <EOS> symbol
best.append(best_hyp)
# generate target batch
decoded = tgt.new(batch_size, tgt_len.max().item()).fill_(pad_token_id)
for i, hypo in enumerate(best):
decoded[i, : tgt_len[i] - 1] = hypo
decoded[i, tgt_len[i] - 1] = eos_token_ids[0]
return decoded # (b, (tgt_seq_len <= ? <= max_length))
# +
max_length=20
generate_args = dict(
max_length=max_length,
do_sample=True,
temperature=0.1,
top_k=3,
top_p=0.5,
repetition_penalty=1.2,
length_penalty=1,
num_beams=4,
vocab_size=tgt_vocab_size,
)
# With memory, Without past
generated_decoder = GeneratedLM(decoder, tgt_vocab_size, pad_token_id, [eos_token_id], support_past=False)
tgt = torch.zeros((bs, 1), dtype=torch.long).fill_(bos_token_id)
model_otherargs = generated_decoder.build_model_otherargs_for_beam([memory], generate_args['num_beams'])
result = generated_decoder._generate_beam_search(tgt, **generate_args, model_otherargs=model_otherargs)
test_eq(result.shape[0], bs)
assert result.shape[1] <= max_length and result.shape[1] >= 1
# Without memory, With past
generated_lm = GeneratedLM(lm, tgt_vocab_size, pad_token_id, [eos_token_id], support_past=True)
tgt = torch.randint(0, tgt_vocab_size-2, (bs, tgt_seq_len))
result = generated_lm._generate_beam_search(tgt, **generate_args)
test_eq(result.shape[0], bs)
assert result.shape[1] <= max_length and result.shape[1] >= tgt_seq_len
# Without memory, Without past
generated_lm = GeneratedLM(lm, tgt_vocab_size, pad_token_id, [eos_token_id], support_past=False)
tgt = torch.randint(0, tgt_vocab_size-2, (bs, tgt_seq_len))
result = generated_lm._generate_beam_search(tgt, **generate_args)
test_eq(result.shape[0], bs)
assert result.shape[1] <= max_length and result.shape[1] >= tgt_seq_len
# -
# ### GenerateArgs
# export
@dataclass
class GenerateArgs():
max_length: int = 20
do_sample: bool = False
num_beams: int = 1
temperature: float = 1.0
top_k: int = 1
top_p: float = 1.0
repetition_penalty: float = 1.0
length_penalty: float = 1.0
# ### generate
# export
@patch
@torch.no_grad()
def generate(
self:GeneratedLM,
tgt,
generate_args: GenerateArgs=GenerateArgs(),
model_otherargs=[],
model_otherkwargs={},
):
'''
tgt: (b, tgt_seq_len)
model_otherargs: Other positional args that your model need. Maybe momory from encoder.
model_otherkwargs: Other keyword args that your model need. Maybe some masks.
returns: (b, (tgt_seq_len <= ? <= max_length))
'''
max_length=generate_args.max_length
do_sample=generate_args.do_sample
num_beams=generate_args.num_beams
temperature=generate_args.temperature
top_k=generate_args.top_k
top_p=generate_args.top_p
repetition_penalty=generate_args.repetition_penalty
length_penalty=generate_args.length_penalty
assert isinstance(max_length, int) and max_length > 0, "`max_length` should be a strictely positive integer."
assert isinstance(do_sample, bool), "`do_sample` should be a boolean."
assert isinstance(num_beams, int) and num_beams > 0, "`num_beams` should be a strictely positive integer."
assert temperature > 0, "`temperature` should be strictely positive."
assert isinstance(top_k, int) and top_k >= 0, "`top_k` should be a positive integer."
assert 0 <= top_p <= 1, "`top_p` should be between 0 and 1."
assert repetition_penalty >= 1.0, "`repetition_penalty` should be >= 1."
assert length_penalty > 0, "`length_penalty` should be strictely positive."
if num_beams > 1:
output = self._generate_beam_search(
tgt,
max_length,
do_sample,
temperature,
top_k,
top_p,
repetition_penalty,
length_penalty,
num_beams,
self.vocab_size,
model_otherargs,
model_otherkwargs,
)
else:
output = self._generate_no_beam_search(
tgt,
max_length,
do_sample,
temperature,
top_k,
top_p,
repetition_penalty,
model_otherargs,
model_otherkwargs,
)
return output
# +
generate_args = GenerateArgs(
do_sample=True,
num_beams=1,
)
generated_decoder = GeneratedLM(decoder, tgt_vocab_size, pad_token_id, [eos_token_id], support_past=False)
tgt = torch.zeros((bs, 1), dtype=torch.long).fill_(bos_token_id)
result = generated_decoder.generate(tgt, generate_args, model_otherargs=[memory])
test_eq(result.shape[0], bs)
assert result.shape[1] <= max_length and result.shape[1] >= 1
generated_lm = GeneratedLM(lm, tgt_vocab_size, pad_token_id, [eos_token_id], support_past=True)
tgt = torch.randint(0, tgt_vocab_size-2, (bs, tgt_seq_len))
result = generated_lm.generate(tgt, generate_args)
test_eq(result.shape[0], bs)
assert result.shape[1] <= max_length and result.shape[1] >= tgt_seq_len
generated_lm = GeneratedLM(lm, tgt_vocab_size, pad_token_id, [eos_token_id], support_past=False)
tgt = torch.randint(0, tgt_vocab_size-2, (bs, tgt_seq_len))
result = generated_lm.generate(tgt, generate_args)
test_eq(result.shape[0], bs)
assert result.shape[1] <= max_length and result.shape[1] >= tgt_seq_len
# +
generate_args = GenerateArgs(
do_sample=True,
num_beams=2,
)
# With memory, Without past
generated_decoder = GeneratedLM(decoder, tgt_vocab_size, pad_token_id, [eos_token_id], support_past=False)
tgt = torch.zeros((bs, 1), dtype=torch.long).fill_(bos_token_id)
model_otherargs = generated_decoder.build_model_otherargs_for_beam([memory], generate_args.num_beams)
result = generated_decoder.generate(tgt, generate_args, model_otherargs=model_otherargs)
test_eq(result.shape[0], bs)
assert result.shape[1] <= max_length and result.shape[1] >= 1
# Without memory, With past
generated_lm = GeneratedLM(lm, tgt_vocab_size, pad_token_id, [eos_token_id], support_past=True)
tgt = torch.randint(0, tgt_vocab_size-2, (bs, tgt_seq_len))
result = generated_lm.generate(tgt, generate_args)
test_eq(result.shape[0], bs)
assert result.shape[1] <= max_length and result.shape[1] >= tgt_seq_len
# Without memory, Without past
generated_lm = GeneratedLM(lm, tgt_vocab_size, pad_token_id, [eos_token_id], support_past=False)
tgt = torch.randint(0, tgt_vocab_size-2, (bs, tgt_seq_len))
result = generated_lm.generate(tgt, generate_args)
test_eq(result.shape[0], bs)
assert result.shape[1] <= max_length and result.shape[1] >= tgt_seq_len
# -
# ## Test
# Test that with do_sample=False, GeneratedLM.generate should returns the same result as huggingface's PretrainedModel.generate
# slow
from transformers import AutoModelWithLMHead, AutoTokenizer
gpt2_lm = AutoModelWithLMHead.from_pretrained('distilgpt2')
gpt2_lm.eval()
tokenizer = AutoTokenizer.from_pretrained('distilgpt2')
from fastai_transformers_utils.all import *
# +
# slow
sentence = 'The dog is a'
tgt = torch.tensor([tokenizer.encode(sentence)])
generate_args = GenerateArgs(
max_length=20,
do_sample=False,
num_beams=1,
temperature=1.0,
repetition_penalty=1,
length_penalty=1.0,
)
generated_lm = GeneratedLM(gpt2_lm, tokenizer.vocab_size, gpt2_lm.config.pad_token_id, [gpt2_lm.config.eos_token_ids], True)
numeric_result = generated_lm.generate(tgt, generate_args)
result = tokenizer.decode(list(numeric_result[0]))
huggingface_numeric_result = gpt2_lm.generate(tgt, **asdict(generate_args))
huggingface_result = tokenizer.decode(list(huggingface_numeric_result[0]))
test_eq(result, huggingface_result)
# -
# ## Export -
#hide
from nbdev.export import notebook2script
notebook2script()
| nbs/05_GeneratedLM.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.9 64-bit (''dev'': conda)'
# name: python3
# ---
# Import the required libraries and dependencies
import pandas as pd
from pathlib import Path
# Dict of country codes to country names
full_country_names_dict = {
'LU' : 'Luxembourg',
'IE' : 'Ireland',
'GB' : 'United Kingdom',
'MX' : 'Mexico',
'HK' : 'Hong Kong',
'SG' : 'Singapore',
'AT' : 'Austria',
'BE' : 'Belgium',
'ES' : 'Spain',
'SE' : 'Sweden',
'DK' : 'Denmark',
'AU' : 'Australia',
'IT' : 'Italy',
'CH' : 'Switzerland',
'CA' : 'Canada',
'JP' : 'Japan',
'FR' : 'France',
'NL' : 'Netherlands',
'NZ' : 'New Zealand',
'NO' : 'Norway',
'DE' : 'Germany',
'US' : 'United States'
}
# +
# Import country lat lon data
country_lat_lon_df = pd.read_csv(
Path('./Resources/country_info/country_lat_long.csv'),
sep='\t'
)
# Create dictionary of country key and lat/long values
country_lat_lon_dict = dict(zip(country_lat_lon_df['country'],list(zip(country_lat_lon_df['latitude'],country_lat_lon_df['longitude']))))
# +
# Import country lat lon data
city_lat_lon_df = pd.read_csv(
Path('./Resources/city_info/us_cities.csv')
)
city_lat_lon_df['city_state'] = city_lat_lon_df['CITY'] + ', ' + city_lat_lon_df['STATE_CODE']
# Create dictionary of city key and lat/lon values
city_lat_lon_dict = dict(zip(city_lat_lon_df['city_state'],list(zip(city_lat_lon_df['LATITUDE'],city_lat_lon_df['LONGITUDE']))))
# -
# Kickstarter categories mappings
kickstarter_cat_dict = {
'Transportation' : 'Technology',
'Human Rights' : 'Journalism',
'Photography' : 'Photography',
'Camera Gear' : 'Film & Video',
'Wellness' : 'Food',
'Productivity' : 'Technology',
'Audio' : 'Technology',
'Tabletop Games' : 'Games',
'Energy & Green Tech' : 'Technology',
'Podcasts, Blogs & Vlogs' : 'Journalism',
'Culture' : 'Art',
'Fashion & Wearables' : 'Technology',
'Phones & Accessories' : 'Technology',
'Dance & Theater' : 'Theater',
'Video Games' : 'Games',
'Music' : 'Music',
'Health & Fitness' : 'Food',
'Art' : 'Art',
'Writing & Publishing' : 'Journalism',
'Comics' : 'Art',
'Food & Beverages' : 'Food',
'Film' : 'Film & Video',
'Web Series & TV Shows' : 'Film & Video'
}
| Resources/Utils/consts.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Light Curve Basics
#
# The examples over at [lightkurve](http://lightkurve.keplerscience.org) are really useful for understanding all the wonderful ways you can access *Kepler* data via online archives. This very short example demonstrates a quick-and-dirty tool that uses `lightkurve` to download the entire *Kepler* light curve for a particular star.
# ## Download a light curve.
#
# Our first step is to download some data. We'll import our basic set of tools, and then use them to download a Kepler light curve.
# +
# let's import some common Python packages we like
import matplotlib.pyplot as plt, numpy as np
# let's also import our custom Python package for this class
import henrietta as hsl
# -
# This last command creates an object called `hsl` that contains a useful function: `download_kepler_lc`. This function takes a star name as input, and tries its best to return a *Kepler* light curve for it. There are a few options you can feed it if you want, to know what those are, remember that in `jupyter` you can type the name of a function, and then hit `<shift>-<tab>` to see what it does (once for basic info, twice for more). Give it a try!
hsl.download_kepler_lc
# Here, we use that function to download a light curve for Kepler-186. Light curves from Kepler are split into ~90 day quarters; this function makes a rough attempt at stitching all of those together.
lc = hsl.download_kepler_lc('Kepler-186')
# ## Plot your light curve.
#
# The `lc` object we just created it is a `KeplerLightCurve` object (see the documentation for `lightkurve`) if you want to know more about what these objects can do). One snazzy feature of a light curve object like this is that we can make a simple plot of the data, just by running
lc.plot()
# You'll notice that the folks who wrote this tool put a bit of thought into making sure the plot looks pretty good and the axes are labeled in a relatively straightforward way.
#
# However, we might want to play around with things a bit, so let's dig under the hood of our light curve, and pull out the actual data that's being plotted.
# +
# this is the time, in something like Julian Date
time_in_days = lc.time
# this is the relative flux of the star, normalized to the star's median brightness
relative_flux = lc.flux
# -
N = len(time_in_days)
print("Our light curve contains {} data points!".format(N))
# With these arrays, you can start playing around with real Kepler data, either for Kepler-186 or for any other star you choose!
# ## Using `lightkurve` functions.
#
# You just saw the light curve `.plot()` function. Beyond just that one, there are oodles of built-in functions that we can call directly from that `lc` object. Some are listed below, with explanations from ASTR3400 students! You can also check out the original `lightkurve` documentation over [here](http://lightkurve.keplerscience.org/tutorials/1.03-what-are-lightcurves.html).
# ### `lc.plot()`
# Chris says "`.plot()` plots the light curve with the normalised flux on the y-axis and the time in days on the x-axis."
#
# Sebastian added that "it acts much like `matplotlib.pyplot.plot` works, which we are all more than familiar with due to ASTR2600. Any kwargs that you could pass into matplotlib.pylot.plot also work with `.plot()`."
lc.plot(color='mediumseagreen');
# ### `lc.scatter()`
#
# Zach says "`.scatter()` is almost the same as plot, but instead of connecting each data point with a line, the data are plotted as discrete points."
#
# Jon points out that it also takes keyword arguments to change the plotting style, just like `matplotlib.pyplot.scatter`.
#
# These keywords act slightly differently than the ones for `plot`. Notably, with `scatter` the `c=` and `s=` keywords allow you to set the color or size of different points with an array, as shown here.
#
lc.scatter(c=lc.time, cmap='autumn');
# ### `lc.flatten()`
# Diego says "The `.flatten()` function does exactly as one would expect. Two graphs are shown for comparison. One is the regular one and the other is flattened. This is helpful because flattening the graph can get rid of the factors that influence the light curve that comes from the star. Stars look brighter or dimmer depending on where its spots are. This function makes it so the light curve does not reflect that fact."
#
# Shae adds that `.flatten()` gets rid of wiggles in the light curve "using a SavitzkyโGolay filter (a smoothing technique)".
#
# Chris noticed that for his transits, "there seems to be a 'lead up' to the transit coming from both sides like the [caldera](https://en.wikipedia.org/wiki/Caldera) of a volcano." This can be caused by the `.flatten()` function, because it tries to smooth out everything as best it can and doesn't know where your transits are, it can sometimes overcorrect near transits.
# +
# scatter a light curve
ax = lc.scatter(label='original')
# flatten and scatter a light curve
lc.flatten().scatter(label='flattened', ax=ax);
# -
# ### `lc.fold()`
# Shae says "`.fold()` takes the data and folds it on itself, depending on the specified period given. The y-axis remains as the normalized flux, whereas the x-axis is the phase which represents the time within a single period."
#
# Diego adds "The `.fold()` function takes all the data points and shifts them all over to a certain time period. It is 'folding' in the sense that the data is all piled into one 'area'. This helps to show its transit around its host star."
# +
# parameters for Kepler-186b
period = 3.8867907
t0 = 2454966.3304
t0_in_bkjd = t0 - 2454833
# flatten, fold, and scatter a light curve
lc.flatten().fold(period=period, phase=t0_in_bkjd/period).scatter(alpha=0.1);
# -
# ### `lc.bin()`
# Zach says "The function `.bin(binsize=int)` takes a given number of data points and averages them together, returning a new lightcurve with fewer data points. It can be applied to a curve that has already been processed by other functions like `.fold()`.
#
# Dana explains how it works "If you want a bin size of 50, this takes the first 50 flux measurements and averages them to give one data point, the second point is the second 50, and so on until the function reaches the end of the data."
#
# Sebastian adds "The process of binning leads to less noise in the data, allowing us to focus much more intensely on the actual patterns in the data that we care about."
# +
folded = lc.flatten().fold(period=period, phase=t0_in_bkjd/period)
# flatten, fold, and scatter a light curve
ax = folded.scatter(alpha=0.1, label='unbinned')
# flatten, fold, bin, and scatter a light curve
folded.bin(100).scatter(ax=ax, s=10, label='binned');
# -
# ### `lc.remove_outliers()`
#
# Tyler says "`.remove_outliers(sigma=5.0)` removes the outliers from the data set based on how many standard deviations the value is from the trend. If done incorrectly it can remove transit data." Setting `sigma` to a larger value means that an outlier must be more extreme to be removed.
#
# Dana provides a nice example: "Say you have a list of numbers (3,5,7,2,45,7,9), and you use this function on that set of numbers, it would remove that 45 because it is an outlier and not close to the other numbers."
# +
# make a flattened light curve
flattened = lc.flatten()
# scatter the original flattened light curve
ax = flattened.scatter(label='original', color='red')
# scatter the flattened light curve after removing outliers
flattened.remove_outliers().scatter(ax=ax, label='outliers removed');
# -
# ### `lc.yourownfunction()`
#
# Imagine about what other functions you might like to apply to a light curve. We could write our own `lightkurve` functions. What would they do? If you have an idea, share it with the class!
| docs/source/lightcurves.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # <NAME> al., 2016 - A DataJoint example
# This notebook presents data and results associated with the following papers:
#
# ><NAME>, <NAME>, <NAME> & <NAME>. "Layer 4 fast-spiking interneurons filter thalamocortical signals during active somatosensation" (2016) Nat Neurosci (https://dx.doi.org/10.1038/nn.4412)
#
# In this study, membrane potential and spikes recordings were performed ventral posteromedial nucleus (VPM) neurons and layer 4 (L4) neurons of the barrel cortex, during an object-locating with whiskers task. The study reported a movement-related suppression mechanism of the thalamocortical circuit. Specifically, the L4 fast-spiking interneurons, inherited from the sensory input driven VPM activity, suppressed movement-related activity of the L4 excitatory neurons. This movement-related suppression thus improved selectivity for touch-related information during active tactile sensation.
#
# A ***DataJoint*** data pipeline has been constructed for this study, with the presented data ingested into this pipeline. This notebook demonstrates the queries, processing, and reproduction of several figures from the paper. From the pipeline, export capability to NWB 2.0 format is also available.
# ## About the data
#
# Data consists of membrane potential, extracellular recordings and spike sorted results of the mouse's VPM and L4 neurons a whisker-based object locating task. The behavior data includes detailed description of the trial structure (e.g. trial timing, trial instruction, trial response, etc.), lick trace data and a variety of whisker movement related tracking data: whisker angle, whisker curavture, touch times, etc. Trial information also includes optogenetic photostimulation details.
#
# Original data shared here: http://crcns.org/data-sets/ssc/ssc-7
#
# The data in original NWB 1 format (.nwb) have been ingested into a DataJoint data pipeline presented below. This notebook demonstrates the queries, processing, and reproduction of several figures from the paper.
#
# Data are also exported into NWB 2.0 format. See NWB export code [here](../scripts/datajoint_to_nwb.py)
# +
# import necessary Python packages including DataJoint
from datetime import datetime
import os
os.chdir('..')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import datajoint as dj
from pipeline import (reference, subject, acquisition, stimulation, analysis, virus,
intracellular, extracellular, behavior, utilities)
# -
import warnings
warnings.filterwarnings('ignore')
# %matplotlib inline
# ## Architecture of the data pipeline
#
# This data pipeline is comprised of three core components:
# + **subject**: the meta information on the animal, e.g. genetic information, virus injection
# + **acquisition**: the organization of the experiment: session acquisition, trial structure, behavior data
# + **electrophysiology**: intracellular and extracellular recordings, as well as spike sorted results
# In DataJoint, related tables are organized together into a **schema**, and a data pipeline typically consists of tables across multiple schemas connected to each other.
# ### Subject
# `subject` schema contains various tables representing information and meta-information about the experimental animals, as well as information about genetic backgrounds and virus injected for the experiment. The DataJoint *Diagram* below shows all tables found inside the `subject` schema.
dj.Diagram(subject) - 1 + dj.Diagram(virus)
# For example, `Subject` table contains information about each and every mouse in the experiment, capturing information such as unique subject ID and animal sourcce.
subject.Subject()
# ### Experiment acquisition
# Information pertaining to conducting the experiment as well as data and meta-data acquired during the experiment (except for electrophysiology) are found among three schemas: `acquisition`, `stimulation` and `behavior`. Below, tables across all three schemas are depicted in the DataJoint Diagram.
dj.Diagram(acquisition) - 1 + dj.Diagram(stimulation) + dj.Diagram(behavior)
# `Session` tables contains information about individual experiment session.
acquisition.Session.proj() * subject.Subject
# `Trial` table (found as part table of `TrialSet`) contains information about each trial.
acquisition.TrialSet.Trial()
# ### Electrophysiology
# Electrophysiology information are found among two related schemas: `extracellular` and `intracellular`.
(dj.Diagram(extracellular) - 1) + (dj.Diagram(intracellular) - 1)
# The Unit table
# As an example, `extracellular.UnitSpikeTimes` table contains information about each and every sorted units and their associated spike trains (in the form of unit spike times).
extracellular.UnitSpikeTimes * extracellular.ProbeInsertion.InsertLocation
# ## Reproduced analysis and figures
# To demonstrate the availability and usage of the data in the pipeline, the following sections of this notebook will show the extraction and plotting of several sets of tracking and ephys data of interest, specifically, we will reproduce ***Figure 1*** and ***Figure 2*** of this study.
# Figure 1 and 2 depict the behavior and electrophysiology data of VPM and L4 excitatory neurons over the course of an experimental trial, as well as trial-averaged membrane potential and spike rate. To reproduce these figures, the following steps are taken:
# 1. extract ephys and behavioral data:
# + membrane potential
# + spikes
# + whisker movement
# + touch trace
# 2. plot these data, per trial, time-locked to trial-start
# 3. extract touch times
# 4. perform trial segmentation and realignment of ephys data to touch times
# 5. plot spike raster, trial-averaged membrane potential and spike histogram
# To accomplish those steps, we define 3 utility functions to help with extracting and segmenting data:
# + ***segment_timeseries*** - segment any timeseries (e.g. Vm, lick traces) or event-series (e.g. spike times) to some specified time (e.g. trial start time or first touch time)
# + ***build_raster_and_PSTH*** - from spike times, compute PSTH
# + ***build_trial_segmented_timeseries*** - the main processing routine, to extract Vm, behavioral data, spikes, etc. and segment, aligned to i) trial-start and ii) touch onset
#
# and 2 plotting functions:
# + ***plot_trial_start_align*** - plot behavioral and ephys daata from a single trial, aligned to trial-start time
# + ***plot_touch_aligned*** - plot trial aggregated behavioral, ephys, and spike PSTH, aligned to first touch onset
def segment_timeseries(data, timestamps, event_times, pre_dur, post_dur):
d_seg = [data[np.logical_and(timestamps > t - pre_dur, timestamps <= t + post_dur)] for t in event_times]
t_seg = [timestamps[np.logical_and(timestamps > t - pre_dur, timestamps <= t + post_dur)] - t for t in event_times]
return d_seg, t_seg
# trial-average spike histogram
def build_raster_and_PSTH(spike_times, timerange, bin_size=0.05):
stacked_spikes = np.hstack(spike_times)
stacked_trials = np.hstack([np.full_like(r, idx) if r.size > 0 else [] for idx, r in enumerate(spike_times)])
spk_counts, spk_edges = np.histogram(stacked_spikes, bins=int((timerange[-1] - timerange[0]) / bin_size), range=timerange)
spk_rates = spk_counts / np.diff(spk_edges) / len(spike_times)
return stacked_spikes, stacked_trials, spk_rates, spk_edges[:-1]
def build_trial_segmented_timeseries(session_key):
trial_start, trial_stop = (acquisition.TrialSet.Trial & session_key).fetch('start_time', 'stop_time')
# ==== Step 1 - get behavioral data from the principal whisker ====
dis_to_pole, touch_on, touch_off, whisk_pos, whisk_curv, b_tvec = (behavior.Whisker
& session_key
& 'principal_whisker=1').fetch1(
'distance_to_pole', 'touch_onset', 'touch_offset', 'whisker_angle', 'whisker_curvature', 'behavior_timestamps')
touch_on = np.where(touch_on==1, touch_on, np.nan)
touch_off = np.where(touch_off==1, touch_off, np.nan)
if extracellular.UnitSpikeTimes & session_key:
spike_times = (extracellular.UnitSpikeTimes & session_key).fetch1('spike_times')
elif intracellular.UnitSpikeTimes & session_key:
spike_times = (intracellular.UnitSpikeTimes & session_key).fetch1('spike_times')
lick_traces, l_tvec = (behavior.LickTrace & session_key).fetch1('lick_trace', 'lick_trace_timestamps')
# ==== Step 2 - align to trial start ====
# segment: 0 to 5s of trial_start
event_times = trial_start
pre_dur = 0
post_dur = 5
b_fs = round(1/np.median(np.diff(b_tvec)))
segmented_spikes = [spike_times[np.logical_and(spike_times > t - pre_dur,
spike_times <= t + post_dur)] - t for t in event_times]
segmented_lick_traces, segmented_ltvec = segment_timeseries(lick_traces, l_tvec, event_times, pre_dur, post_dur)
segmented_behav = {k: segment_timeseries(d, b_tvec, event_times, pre_dur, post_dur)[0]
for k, d in zip(('touch_on', 'touch_off', 'whisk_pos', 'whisk_curv'),
(touch_on, touch_off, whisk_pos, whisk_curv))}
segmented_btvec = segment_timeseries(whisk_pos, b_tvec, event_times, pre_dur, post_dur)[1]
trial_start_aligned = {'pre_start': pre_dur, 'post_start':post_dur, 'spikes': segmented_spikes,
'licks': segmented_lick_traces, 'lick_times': segmented_ltvec,
'whiskers': segmented_behav, 'whisker_times': segmented_btvec}
if intracellular.MembranePotential & session_key:
Vm, v_tvec = (intracellular.MembranePotential & cell_key).fetch1('membrane_potential', 'membrane_potential_timestamps')
segmented_Vm, segmented_vtvec = segment_timeseries(Vm, v_tvec, event_times, pre_dur, post_dur)
trial_start_aligned.update(Vm=segmented_Vm, Vm_times=segmented_vtvec)
# ==== Step 3 - align to touch onsets ====
# touch times
touch_onset = b_tvec[touch_on==1]
touch_offset = b_tvec[touch_off==1]
# inter-touch-interval - of each touch
iti = (touch_onset[1:] - touch_offset[:-1])
# get touch-onset that has inter-touch-interval of at least 25ms
touch_onset = touch_onset[:-1][iti > 25e-3]
# segment: -25ms to 50ms of touch-time
pre_touch = 25e-3
post_touch = 50e-3
# touch-aligned spike-times
touch_aligned_spikes = [spike_times[np.logical_and(spike_times > t_touch - pre_touch,
spike_times < t_touch + post_touch)] - t_touch
for t_touch in touch_onset]
# touch-aligned Whisker Curvature
touch_aligned_curvature, touch_aligned_btvec = segment_timeseries(whisk_curv, b_tvec, touch_onset, pre_touch, post_touch)
touch_aligned_curvature = [np.where((k - k[:100].mean()) < 0, k - k.mean(), 0) for k in touch_aligned_curvature]
# psth
stacked_spikes, stacked_trials, spk_rates, spk_edges = build_raster_and_PSTH(
touch_aligned_spikes, (-pre_touch, post_touch), bin_size=0.001)
touch_aligned = {'pre_touch': pre_touch, 'post_touch':post_touch, 'spikes': touch_aligned_spikes,
'whisk_curv': touch_aligned_curvature, 'whisker_times': touch_aligned_btvec,
'stacked_spikes': stacked_spikes, 'stacked_trials': stacked_trials,
'spk_rates': spk_rates, 'spk_edges': spk_edges}
if intracellular.MembranePotential & session_key:
# touch-aligned Vm
touch_aligned_Vm, touch_aligned_vtvec = segment_timeseries(Vm, v_tvec, touch_onset, pre_touch, post_touch)
touch_aligned.update(Vm=touch_aligned_Vm, Vm_times=touch_aligned_vtvec)
return trial_start_aligned, touch_aligned
def plot_trial_start_aligned(trial_num, trial_start_aligned):
pre_dur = trial_start_aligned['pre_start']
post_dur = trial_start_aligned['post_start']
l_tvec = trial_start_aligned['lick_times'][trial_num]
licks = trial_start_aligned['licks'][trial_num]
b_tvec = trial_start_aligned['whisker_times'][trial_num]
whisk_curv = trial_start_aligned['whiskers']['whisk_curv'][trial_num]
touch_on = trial_start_aligned['whiskers']['touch_on'][trial_num]
whisk_pos = trial_start_aligned['whiskers']['whisk_pos'][trial_num]
spikes = trial_start_aligned['spikes'][trial_num]
if 'Vm' in trial_start_aligned:
fig, axs = plt.subplots(5, 1, figsize=(12, 6))
fig.subplots_adjust(hspace=0.01)
for x in b_tvec[touch_on==1]:
axs[3].axvline(x, 0.25, 0.75, color='deepskyblue')
axs[3].plot(trial_start_aligned['Vm_times'][trial_num], trial_start_aligned['Vm'][trial_num], 'k')
axs[4].plot(b_tvec, whisk_pos, 'g')
axs[4].plot(b_tvec, whisk_pos * touch_on, '.', c='deepskyblue')
else:
fig, axs = plt.subplots(4, 1, figsize=(12, 6))
fig.subplots_adjust(hspace=0.01)
for x in b_tvec[touch_on==1]:
axs[2].axvline(x, 0.25, 0.75, color='deepskyblue')
axs[3].plot(b_tvec, whisk_pos, 'g')
axs[3].plot(b_tvec, whisk_pos * touch_on, '.', c='deepskyblue')
axs[0].plot(l_tvec, np.where(licks > 0, licks, np.nan), '.', color='deeppink')
axs[1].plot(b_tvec, whisk_curv, 'deepskyblue')
for x in spikes:
axs[2].axvline(x, 0.35, 0.65, color='k')
for a in axs:
a.set_xlim((-pre_dur, post_dur))
a.spines['right'].set_visible(False)
a.spines['top'].set_visible(False)
a.spines['left'].set_visible(False)
a.spines['bottom'].set_visible(False)
a.set_yticks([])
a.set_xlim((0.5, 2))
for a in axs[:-1]:
a.set_xticks([])
def plot_touch_aligned(touch_aligned):
pre_dur = touch_aligned['pre_touch']
post_dur = touch_aligned['post_touch']
b_tvec = touch_aligned['whisker_times']
whisk_curv = touch_aligned['whisk_curv']
stacked_spikes = touch_aligned['stacked_spikes']
stacked_trials = touch_aligned['stacked_trials']
spk_rates = touch_aligned['spk_rates']
spk_edges = touch_aligned['spk_edges']
if 'Vm' in trial_start_aligned:
fig, axs = plt.subplots(4, 1, figsize=(4, 8))
rand_trials = np.random.randint(len(touch_aligned['Vm']), size=20)
for k in rand_trials:
axs[2].plot(touch_aligned['Vm_times'][k], touch_aligned['Vm'][k], color='gray', alpha=0.2)
axs[3].bar(spk_edges, spk_rates, width=0.001, color='k')
else:
fig, axs = plt.subplots(3, 1, figsize=(4, 6))
axs[2].bar(spk_edges, spk_rates, width=0.001, color='k')
# pick 20 random trials to plot
rand_trials = np.random.randint(len(whisk_curv), size=20)
for k in rand_trials:
axs[0].plot(b_tvec[k], whisk_curv[k], color='b', alpha=0.5)
rand_trials = np.random.randint(len(stacked_spikes), size=40)
axs[1].plot(stacked_spikes[rand_trials], stacked_trials[rand_trials], '|', color='k')
# cosmetic
for a in axs[:-1]:
a.set_axis_off()
a.set_xlabel([])
a.set_xlim((-pre_dur, post_dur))
for a in axs[1:]:
a.axvline(x=0, color='b')
a.set_xlim((-pre_dur, post_dur))
axs[-1].set_xlabel('Time from touch onset')
axs[-1].spines["top"].set_visible(False)
axs[-1].spines["right"].set_visible(False)
# ## Replication of Figure 1 (b, d) - VPM
#
# Here, we plot the trial-aligned behavior and ephys data from one representative trial.
#
# Specifically, we plot ephys, spike, and PSTH for 3 cells:
# + ANM199549_20130530
# + ANM199552_20130602
# + ANM186997_20130321
#
# One session represents the recording of one neuron, thus the `session_id` also uniquely depicts a cell
session_key = acquisition.Session & 'session_id = "ANM199549_20130530"'
extracellular.UnitSpikeTimes * extracellular.ProbeInsertion & session_key
# Using the selected `session_key`, we extract the behavioral and ephys data of interest, calling the `build_trial_segmented_timeseries()` helper function defined above.
trial_start_aligned, touch_aligned = build_trial_segmented_timeseries(session_key)
# #### Plot behavioral and ephys data for one representative trial (trial 348)
# Invoke the `plot_trial_start_aligned()` defined above
plot_trial_start_aligned(348, trial_start_aligned)
# #### Plot touch-aligned spike raster and spike histogram
plot_touch_aligned(touch_aligned)
# #### Repeat the plots for other neurons
session_key = acquisition.Session & 'session_id = "ANM199552_20130602"'
trial_start_aligned, touch_aligned = build_trial_segmented_timeseries(session_key)
plot_touch_aligned(touch_aligned)
session_key = acquisition.Session & 'session_id = "ANM186997_20130321"'
trial_start_aligned, touch_aligned = build_trial_segmented_timeseries(session_key)
plot_touch_aligned(touch_aligned)
# ## Replication of Figure 2 (b, d) - L4 excitatory cell
#
# The actual implementation here is almost identical to that of Figure 1 above, which revolves around selecting one `cell` key, and invoking the utility functions defined above to actually plot the data.
# We plot the trial-aligned behavior and ephys data from one representative trial, plotting ephys, spike, and PSTH for 3 cells:
# + JY0861AAAA
# + JY0520AAAC
# + JY1008AAAA
# Snippet of the pipeline architecture around **intracellular** schema
dj.Diagram(intracellular.Cell) + 1 - 1
cell_key = intracellular.Cell & {'session_id': 'JY0861AAAA'}
trial_start_aligned, touch_aligned = build_trial_segmented_timeseries(cell_key)
plot_trial_start_aligned(20, trial_start_aligned)
plot_touch_aligned(touch_aligned)
cell_key = intracellular.Cell & {'session_id': 'JY0520AAAC'}
trial_start_aligned, touch_aligned = build_trial_segmented_timeseries(cell_key)
plot_touch_aligned(touch_aligned)
cell_key = intracellular.Cell & {'session_id': 'JY1008AAAA'}
trial_start_aligned, touch_aligned = build_trial_segmented_timeseries(cell_key)
plot_touch_aligned(touch_aligned)
| notebooks/Yu-Gutnisky-2016-examples.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="NqtXrZApBkM4"
# # Optimizing training and inference
#
# In this notebook, we will discuss different ways to reduce memory and compute usage during training and inference.
# + [markdown] colab_type="text" id="NEt8wg4JCQdm"
# ## Prepare training script
#
# When training large models, it is usually a best practice not to use Jupyter notebooks, but run a **separate script** for training which could have command-line flags for various hyperparameters and training modes. This is especially useful when you need to run multiple experiments simultaneously (e.g. on a cluster with task scheduler). Another advantage of this is that after training, the process will finish and free the resources for other users of a shared GPU.
#
# In this part, you will need to put all your code to train a model on Tiny ImageNet that you wrote for the previous task in `train.py`.
#
# You can then run your script from inside of this notebook like this:
# + colab={} colab_type="code" id="6-TWiKq8H9yT"
# ! python3 train.py --batch_size 128 --epochs 2 --gpu_enabled \
# --model_path model_state_dict_41.71.pcl \
# --data_path tiny-imagenet-200 \
# --model_module_path model.py \
# --model_checkpoint_path model_checkpoints
# -
# **Task**
#
# Write code for training with architecture from homework_part2
#
# **Requirements**
# * Optional arguments from command line such as batch size and number of epochs with built-in argparse
# * Modular structure - separate functions for creating data generator, building model and training
#
# + [markdown] colab_type="text" id="tKPYZ3QLEqX8"
# ## Profiling time
#
# For the next tasks, you need to add measurements to your training loop. You can use [`perf_counter`](https://docs.python.org/3/library/time.html#time.perf_counter) for that:
# + colab={} colab_type="code" id="bSr-PyQNFkSC"
import time
import numpy as np
import torch
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="HMJMCGRKFYCc" outputId="571046a2-443b-465f-ce62-ddaf68b105d0"
x = np.random.randn(1000, 1000)
y = np.random.randn(1000, 1000)
start_counter = time.perf_counter()
z = x @ y
elapsed_time = time.perf_counter() - start_counter
print("Matrix multiplication took {:.3f} seconds".format(elapsed_time))
# -
# ! python3 train.py --batch_size 128 --epochs 2 --gpu_enabled \
# --model_path model_state_dict_41.71.pcl \
# --data_path tiny-imagenet-200 \
# --model_module_path model.py \
# --model_checkpoint_path model_checkpoints
# + [markdown] colab_type="text" id="FfhLeWjTGTpB"
# **Task**. You need to add the following measurements to your training script:
# * How much time a forward-backward pass takes for a single batch;
# * How much time an epoch takes.
# + [markdown] colab_type="text" id="khDOTn_SHaND"
# ## Profiling memory usage
#
# **Task**. You need to measure the memory consumptions
#
# This section depends on whether you train on CPU or GPU.
#
# ### If you train on CPU
# You can use GNU time to measure peak RAM usage of a script:
# + colab={} colab_type="code" id="98xvXSjUIDzl"
# !/usr/bin/time -lp python train.py
# + [markdown] colab_type="text" id="v1ES2Pc9IlH5"
# **Maximum resident set size** will show you the peak RAM usage in bytes after the script finishes.
# -
# **Note**.
# Imports also require memory, do the correction
# + [markdown] colab_type="text" id="kq5lY5CKJHX1"
# ### If you train on GPU
#
# Use [`torch.cuda.max_memory_allocated()`](https://pytorch.org/docs/stable/cuda.html#torch.cuda.max_memory_allocated) at the end of your script to show the maximum amount of memory in bytes used by all tensors.
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="fSQdauqLIkf1" outputId="8bcffc30-637d-461a-8f44-0e444a28caae"
x = torch.randn(1000, 1000, 1000, device='cuda:0')
print("Peak memory usage by Pytorch tensors: {:.2f} Mb".format((torch.cuda.max_memory_allocated() / 1024 / 1024)))
# -
# ! python3 train.py --batch_size 128 --epochs 2 --gpu_enabled \
# --model_path model_state_dict_41.71.pcl \
# --data_path tiny-imagenet-200 \
# --model_module_path model.py \
# --model_checkpoint_path model_checkpoints
# + [markdown] colab_type="text" id="M3RWHxYKBUys"
# ## Gradient based techniques
#
# Modern architectures can potentially consume lots and lots of memory even for minibatch of several objects. To handle such cases here we will discuss two simple techniques.
#
# ### Gradient Checkpointing
#
# Checkpointing works by trading compute for memory. Rather than storing all intermediate activations of the entire computation graph for computing backward, the checkpointed part does not save intermediate activations, and instead recomputes them in backward pass. It can be applied on any part of a model.
#
# See [blogpost](https://medium.com/tensorflow/fitting-larger-networks-into-memory-583e3c758ff9) for kind introduction and different strategies or [article](https://arxiv.org/pdf/1604.06174.pdf) for not kind introduction.
#
# **Task**. Use [built-in checkpointing](https://pytorch.org/docs/stable/checkpoint.html), measure the difference in memory/compute
#
# **Requirements**.
# * Try several arrangements for checkpoints
# * Add the chekpointing as the optional flag into your script
# * Measure the difference in memory/compute between the different arrangements and baseline
# -
# ! python3 -W ignore train.py --batch_size 128 --epochs 2 --gpu_enabled --checkpoint \
# --model_path model_state_dict_41.71.pcl \
# --data_path tiny-imagenet-200 \
# --model_module_path model.py \
# --model_checkpoint_path model_checkpoints
# +
# ะฟะฐะผััั ัะผะตะฝััะธะปะฐัั ะฝะตะทะฝะฐัะธัะตะปัะฝะพ, ะฝะพ ััะฐะปะพ ะฑััััะตะต ะฟะพััะธ ะฒ 2 ัะฐะทะฐ ะทะฐ ะธัะตัะฐัะธั
# + [markdown] colab_type="text" id="mjY8LR_GQbTV"
# ### Accumulating gradient for large batches
# We can increase the effective batch size by simply accumulating gradients over multiple forward passes. Note that `loss.backward()` simply adds the computed gradient to `tensor.grad`, so we can call this method multiple times before actually taking an optimizer step. However, this approach might be a little tricky to combine with batch normalization. Do you see why?
# -
# ! python3 -W ignore train.py --batch_size 128 --epochs 2 --gpu_enabled \
# --model_path model_state_dict_41.71.pcl \
# --data_path tiny-imagenet-200 \
# --model_module_path model.py \
# --model_checkpoint_path model_checkpoints \
# --effective_batch_size 1024
# +
# ะะพะดะตะปั ะทะฐ 2 ัะฟะพั
ะธ ะฟัะพะฑะธะปะฐ ะฟัะพัะปัะน ัะบะพั:) ะฟะพ ัะบะพัะพััะธ ะฟัะพัะฐะดะบะธ ะฝะตั
# + colab={} colab_type="code" id="qbbbO7V0QeGT"
# from torch.utils.data import DataLoader
# effective_batch_size = 1024
# loader_batch_size = 32
# batches_per_update = effective_batch_size / loader_batch_size # Updating weights after 8 forward passes
# dataloader = DataLoader(dataset, batch_size=loader_batch_size)
# optimizer.zero_grad()
# for batch_i, (batch_X, batch_y) in enumerate(dataloader):
# l = loss(model(batch_X), batch_y)
# l.backward() # Adds gradients
# if (batch_i + 1) % batches_per_update == 0:
# optimizer.step()
# optimizer.zero_grad()
# + [markdown] colab_type="text" id="ZqxvZWH9Uxtq"
# **Task**. Explore the trade-off between computation time and memory usage while maintaining the same effective batch size. By effective batch size we mean the number of objects over which the loss is computed before taking a gradient step.
#
# **Requirements**
#
# * Compare compute between accumulating gradient and gradient checkpointing with similar memory consumptions
# * Incorporate gradient accumulation into your script with optional argument
# + [markdown] colab_type="text" id="K3iiJZuhSUR0"
# ## Accuracy vs compute trade-off
# + [markdown] colab_type="text" id="0WOWhqMJSboR"
# ### Tensor type size
#
# One of the hyperparameter affecting memory consumption is the precision (e.g. floating point number). The most popular choice is 32 bit however with several hacks* 16 bit arithmetics can save you approximately half of the memory without considerable loss of perfomance. This is called mixed precision training.
#
# *https://arxiv.org/pdf/1710.03740.pdf
# + [markdown] colab_type="text" id="-xAEF9aJc-43"
# ### Quantization
#
# We can actually move further and use even lower precision like 8-bit integers:
#
# * https://heartbeat.fritz.ai/8-bit-quantization-and-tensorflow-lite-speeding-up-mobile-inference-with-low-precision-a882dfcafbbd
# * https://nervanasystems.github.io/distiller/quantization/
# * https://arxiv.org/abs/1712.05877
# + [markdown] colab_type="text" id="fXad1svpSk8f"
# ### Knowledge distillation
# Suppose that we have a large network (*teacher network*) or an ensemble of networks which has a good accuracy. We can like train a much smaller network (*student network*) using the outputs of teacher networks. It turns out that the perfomance could be even better! This approach doesn't help with training speed, but can be quite beneficial when we'd like to reduce the model size for low-memory devices.
#
# * https://www.ttic.edu/dl/dark14.pdf
# * [Distilling the Knowledge in a Neural Network](https://arxiv.org/abs/1503.02531)
# * https://medium.com/neural-machines/knowledge-distillation-dc241d7c2322
#
# Even the completely different ([article](https://arxiv.org/abs/1711.10433)) architecture can be used in a student model, e.g. you can approximate an autoregressive model (WaveNet) by a non-autoregressive one.
#
# **Task**. Distill your (teacher) network with smaller one (student), compare it perfomance with the teacher network and with the same (student) trained directly from data.
#
# **Note**. Logits carry more information than the probabilities after softmax
#
# This approach doesn't help with training speed, but can be quite beneficial when we'd like to reduce the model size for low-memory devices.
# +
from imp import load_source
import numpy as np
import argparse
import sys
import time
import os
import torchvision
from torchvision import transforms
import torch
from torch.autograd import Variable
from torch.utils.checkpoint import checkpoint_sequential
means = np.array([0.485, 0.456, 0.406])
stds = np.array([0.229, 0.224, 0.225])
random_seed = 42
torch.manual_seed(random_seed)
def count_score(model, batch_gen, accuracy_list, gpu=False):
model.train(False) # disable dropout / use averages for batch_norm
for X_batch, y_batch in batch_gen:
if gpu:
logits = model(Variable(torch.FloatTensor(X_batch)).cuda())
else:
logits = model(Variable(torch.FloatTensor(X_batch)).cpu())
y_pred = logits.max(1)[1].data
accuracy_list.append(np.mean( (y_batch.cpu() == y_pred.cpu()).numpy() ))
return accuracy_list
def train(student_model, teacher_model, opt, loss_fn, model_checkpoint_path, data_path, use_checkpoint=False, gpu=False, batch_size=128, epochs=100, effective_batch_size=None):
transform = {
'train': transforms.Compose([
transforms.RandomRotation((-30,30)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(means, stds)
]),
'val': transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(means, stds)
])
}
dataset = torchvision.datasets.ImageFolder(os.path.join(data_path, 'train'), transform=transform['train'])
train_dataset, val_dataset = torch.utils.data.random_split(dataset, [80000, 20000])
train_batch_gen = torch.utils.data.DataLoader(train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=4)
val_batch_gen = torch.utils.data.DataLoader(val_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=2)
train_loss = []
val_accuracy = []
prev_val_acc = 0
if effective_batch_size is None:
batches_per_update = 1
else:
batches_per_update = effective_batch_size / batch_size
if use_checkpoint:
segments = 4
modules = [module for k, module in student_model._modules.items()]
additional_criterion = nn.CrossEntropyLoss()
for epoch in range(epochs):
# In each epoch, we do a full pass over the training data:
start_time = time.time()
student_model.train(True) # enable dropout / batch_norm training behavior
forward_time = 0
backward_time = 0
for batch_i, (X_batch, y_batch) in enumerate(train_batch_gen):
# train on batch
start_counter = time.perf_counter()
if gpu:
X_batch = Variable(torch.FloatTensor(X_batch)).cuda()
y_batch = Variable(torch.LongTensor(y_batch)).cuda()
if use_checkpoint:
raise NotImplementedError()
else:
logits = student_model.cuda()(X_batch)
teacher_logits = teacher_model.cuda()(X_batch)
else:
X_batch = Variable(torch.FloatTensor(X_batch)).cpu()
y_batch = Variable(torch.LongTensor(y_batch)).cpu()
if use_checkpoint:
raise NotImplementedError()
else:
logits = student_model.cpu()(X_batch)
teacher_logits = teacher_model.cpu()(X_batch)
loss = 0.8*loss_fn(logits, teacher_logits) + 0.2* additional_criterion(logits, y_batch)
apply_counter = time.perf_counter()
forward_time += apply_counter - start_counter
loss.backward()
backward_time += time.perf_counter() - apply_counter
if (batch_i + 1) % batches_per_update == 0:
opt.step()
opt.zero_grad()
train_loss.append(loss.data.cpu().numpy())
val_accuracy = count_score(student_model, batch_gen=val_batch_gen, accuracy_list=val_accuracy, gpu=gpu)
vall_acc = np.mean(val_accuracy[-len(val_dataset) // batch_size :]) * 100
# Then we print the results for this epoch:
print("Epoch {} of {} took {:.3f}s".format(
epoch + 1, epochs, time.time() - start_time))
print(" training loss (in-iteration): \t{:.6f}".format(
np.mean(train_loss[-len(train_dataset) // batch_size :])))
print(" validation accuracy: \t\t\t{:.2f} %".format(vall_acc))
torch.save(student_model.state_dict(), os.path.join(model_checkpoint_path, "model_{}_{:.2f}.pcl".format(epoch, vall_acc)))
print("\t\tForward pass took {:.3f} seconds".format(forward_time))
print("\t\tBackward pass took {:.3f} seconds".format(backward_time))
if vall_acc > prev_val_acc:
prev_val_acc = vall_acc
print("Saving new best model!")
torch.save(student_model.state_dict(), os.path.join(model_checkpoint_path, "model_best.pcl"))
return student_model
def validate(model, data_path, batch_size):
transform = {
'test': transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(means, stds)
])
}
test_dataset = torchvision.datasets.ImageFolder(os.path.join(data_path, 'new_val'), transform=transform['test'])
test_batch_gen = torch.utils.data.DataLoader(test_dataset,
batch_size=batch_size,
shuffle=False,
num_workers=2)
model.train(False) # disable dropout / use averages for batch_norm
test_acc = []
for X_batch, y_batch in test_batch_gen:
logits = model(Variable(torch.FloatTensor(X_batch)).cuda())
y_pred = logits.max(1)[1].data
test_acc += list((y_batch.cpu() == y_pred.cpu()).numpy())
test_accuracy = np.mean(test_acc)
print("Final results:")
print(" test accuracy:\t\t{:.2f} %".format(
test_accuracy * 100))
# +
gpu_enabled=True
# +
if gpu_enabled:
torch.cuda.manual_seed(random_seed)
load_source("teacher_model", "model.py")
from teacher_model import get_model as get_teacher_model
teacher_model, _, _ = get_teacher_model(model_path="model_checkpoints/model_best.pcl", gpu=gpu_enabled)
# -
teacher_model.train(False)
# +
import torch, torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class Flatten(nn.Module):
def forward(self, input):
return input.view(input.size(0), -1)
student_model = nn.Sequential()
student_model.add_module('conv1', nn.Conv2d(3, 16, kernel_size=5, padding=2))
student_model.add_module('batchnorm1', nn.BatchNorm2d(16))
student_model.add_module('ReLU1', nn.ReLU())
student_model.add_module('MaxPool1', nn.MaxPool2d(2))
student_model.add_module('conv2', nn.Conv2d(16, 32, kernel_size=5, padding=2))
student_model.add_module('batchnorm1', nn.BatchNorm2d(16))
student_model.add_module('ReLU1', nn.ReLU())
student_model.add_module('MaxPool1', nn.MaxPool2d(2))
student_model.add_module('flatten', Flatten())
student_model.add_module('dense1', nn.Linear(32768, 1000))
student_model.add_module('dense1_relu', nn.ReLU())
student_model.add_module('dropout1', nn.Dropout(0.3))
student_model.add_module('dense2', nn.Linear(1000, 512))
student_model.add_module('dense2_relu', nn.ReLU())
student_model.add_module('dropout2', nn.Dropout(0.05))
student_model.add_module('dense2_logits', nn.Linear(512, 200)) # logits for 200 classes
student_model.eval()
# -
class RMSELoss(torch.nn.Module):
def __init__(self):
super(RMSELoss,self).__init__()
def forward(self,x,y):
criterion = nn.MSELoss()
loss = torch.sqrt(criterion(x, y))
return loss
opt = torch.optim.Adam(student_model.parameters(), lr=0.001, weight_decay=0.0001)
loss_fn = nn.MSELoss()
# +
student_model = train(student_model=student_model, teacher_model=teacher_model,
opt=opt, loss_fn=loss_fn,
model_checkpoint_path='student_checkpoints',
data_path='tiny-imagenet-200',
gpu=gpu_enabled,
batch_size=128,
epochs=100,
use_checkpoint=False,
effective_batch_size=1024)
validate(student_model, data_path='tiny-imagenet-200', batch_size=128)
# +
student_model = train(student_model=student_model, teacher_model=teacher_model,
opt=opt, loss_fn=loss_fn,
model_checkpoint_path='student_checkpoints',
data_path='tiny-imagenet-200',
gpu=gpu_enabled,
batch_size=128,
epochs=100,
use_checkpoint=False,
effective_batch_size=1024)
# -
validate(student_model, data_path='tiny-imagenet-200', batch_size=128)
# ะะฐ ะธัั
ะพะดะฝัั
ะดะฐะฝะฝัั
ััะฐ ัะตัั ะทะฐ ะฐะฝะฐะปะพะณะธัะฝะพะต ัะธัะปะพ ัะฐะณะพะฒ ะพะฑััะธะปะฐัั ะดะพ 25.44 %, ััะพ ะณะพะฒะพัะธั ะพ ะฟัะฐะบัะธัะตัะบะพะน ะฟะพะปัะทะต ะฟะพะดั
ะพะดะฐ
# ### Pruning
#
# The idea of pruning is to remove unnecessary (in terms of loss) weights. It can be measured in different ways: for example, by the norm of the weights (similar to L1 feature selection), by the magnitude of the activation or via Taylor expansion*.
#
# One iteration of pruning consists of two steps:
#
# 1) Rank weights with some importance measure and remove the least important
#
# 2) Fine-tune the model
#
# This approach is a bit computationally heavy but can lead to drastic (up to 150x) decrease of memory to store the weights. Moreover if you make use of structure in layers you can decrease also compute. For example, the whole convolutional filters can be removed.
#
# *https://arxiv.org/pdf/1611.06440.pdf
| homework02/homework_advanced.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ms-python.python added
import os
try:
os.chdir(os.path.join(os.getcwd(), 'video experiment'))
print(os.getcwd())
except:
pass
# +
from computerrefractored import Computer
import matplotlib.pyplot as plt
from collections import Counter
import numpy as np
# %load_ext autoreload
# %autoreload 2
from PIL import Image, ImageDraw
import cv2
# +
noun, verb = 0,0
f=open('input.txt').read()
memory = tuple(int(i) for i in f.split(',')) # let's make it immutable as a tuple
memsize = 10000
memory = tuple(list(memory)+[0]*memsize)
c = Computer(list(memory),noun,verb,[1])
s= {}
score = 0
c.code[0]=2
balpos = 0
patchpos = 0
steering = 0
# fourcc = cv2.VideoWriter_fourcc('M','J','P','G')
fourcc = cv2.VideoWriter_fourcc(*'MP42')
fourcc = cv2.VideoWriter_fourcc(*'XVID')
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
fps = 30
video_filename = 'output5largedims.avi'
out = cv2.VideoWriter(video_filename, fourcc, fps, (380, 220))
pic = np.zeros(3*380*220,dtype=np.uint8).reshape(380,220,3)
while c.running:
if balpos > patchpos: steering = 1
elif balpos < patchpos: steering = -1
else: steering =0
c.receiveinput(steering) #steer to patch to be under the ball
x = c.run()
y = c.run()
z = c.run()
if x==-1 and y==0:
score = z
elif x == 'ending execution':
break
else: s[(x,y)]=z # save object at location
if z ==4:
balpos = x # update x coordinate of ball
for k,v in s.items():
pic[k[0],k[1],:]=v*50
# gray = cv2.normalize(pic.T, None, 255, 0, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)
# gray_3c = cv2.merge([gray, gray, gray])
out.write(np.swapaxes(pic,0,1))
if z == 3:
patchpos = x # update x coordinate of patch
#print(f'x {x}, y {y}, z {z}, block{len([(k) for k,v in s.items() if v == 2])}')
print('final score',score)
out.release()
# -
# +
plt.imshow(pic[:,:,:].T, interpolation='nearest')
# -
| advent_of_code_2019/video experiment/solution.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import geopandas as gpd
import pandas as pd
from base import ROOT_DIR
from IPython.display import display
import matplotlib.pyplot as plt
import contextily as ctx
import seaborn as sns
import numpy as np
sns.set_style("whitegrid")
# + pycharm={"name": "#%%\n"}
duvernay_folder = ROOT_DIR / "gtx/data/Duvernay/Duvernay"
duv_well_head = pd.read_excel(duvernay_folder / "Duvernay well headers SPE April 21 "
"2021 .xlsx")
duv_well_tops = pd.read_excel(duvernay_folder / "Duvernay formation tops SPE April 20 "
"2021.xlsx")
duv_prod = pd.read_excel(duvernay_folder / "SPE Duvernay production summary April 20 "
"2021.xlsx")
print("----- Well headers -----")
display(duv_well_head.head())
# + pycharm={"name": "#%%\n"}
duv_well_head.columns
# + pycharm={"name": "#%%\n"}
# Useful cols
td_col = "TD"
uwi_col = "UWI"
kb_col = "KB"
useful_cols = ["TD meters ", "Elevation Meters", "UWI "]
lon_col = "SurfaceLongitude_NAD83"
lat_col = "SurfaceLatitude_NAD83"
geometry = gpd.points_from_xy(duv_well_head[lon_col], duv_well_head[lat_col])
duv_well_head.drop([lat_col, lon_col], axis=1, inplace=True)
gdf_wh = gpd.GeoDataFrame(duv_well_head[useful_cols],
crs="epsg:4269",
geometry=geometry)
gdf_wh.rename(columns={"UWI ": uwi_col,
"TD meters ": td_col,
"Elevation Meters": kb_col}, inplace=True)
display(gdf_wh.head())
# + [markdown] pycharm={"name": "#%% md\n"}
# ### Convert well head to feet
# + pycharm={"name": "#%%\n"}
M_2_FT = 3.28084
gdf_wh[td_col] = gdf_wh[td_col] * M_2_FT
gdf_wh[kb_col] = gdf_wh[kb_col] * M_2_FT
sns.displot(data=gdf_wh, x=td_col)
plt.title("Well's TD distribution")
plt.show()
print("Conversion finished successfully")
# + [markdown] pycharm={"name": "#%% md\n"}
# ### 2 - Process Well Tops
# + pycharm={"name": "#%%\n"}
display(duv_well_tops.head())
print(duv_well_tops.columns)
# + pycharm={"name": "#%%\n"}
top_sstvd_col = "top_sstvd"
top_md_col = "top_md"
formation_col = "formation"
df_tops: pd.DataFrame = (duv_well_tops.set_index(uwi_col)
.loc[:, "01_Battle (Surbiton)[SSTVD] (m)":])
# Converting from wide format to tidy format
df_tops = df_tops.reset_index().melt(id_vars=uwi_col,
var_name=formation_col,
value_name=top_sstvd_col)
# Extracting the actual formation name
df_tops[formation_col] = df_tops[formation_col].str.extract(r"\d{2}_(.\S+)")
# Convert top_sstvd_col column to feet and from negative to positive
df_tops[top_sstvd_col] = df_tops[top_sstvd_col] * M_2_FT
print(df_tops[formation_col].unique())
display(df_tops.head())
print(df_tops.info())
print(df_tops[formation_col].unique())
# + [markdown] pycharm={"name": "#%% md\n"}
# ### 2.1 - Eliminate nan values
# + pycharm={"name": "#%%\n"}
# Replace zero values with nan
df_tops.dropna(inplace=True)
print(df_tops.info())
display(df_tops.head())
# + [markdown] pycharm={"name": "#%% md\n"}
# ### 2.2 - Merge with well header data
# To get the elevation, so we can have the SSTVD
# + pycharm={"name": "#%%\n"}
df_tops = gpd.GeoDataFrame(df_tops.merge(gdf_wh[[uwi_col, kb_col, "geometry"]],
on=uwi_col))
df_tops[top_md_col] = -(df_tops[top_sstvd_col] - df_tops[kb_col])
df_tops.head()
# + pycharm={"name": "#%%\n"}
formations = df_tops[formation_col].unique()
formations
# + pycharm={"name": "#%%\n"}
fig_3, ax_3 = plt.subplots(1, 1, figsize=(12, 8))
sns.boxplot(x=formation_col, y=top_sstvd_col, data=df_tops, ax=ax_3, order=formations)
plt.title("SSTVD of Duvernay Formations")
plt.ylabel("Depth SSTVD (ft)")
plt.xlabel("Formation")
plt.xticks(rotation=90)
plt.tight_layout()
plt.show()
# + pycharm={"name": "#%%\n"}
# -
# ### 2.3 - Save Well tops to geopackage
# + pycharm={"name": "#%%\n"}
gpkg_file_name = "duv.gpkg"
gpkg_save_path = ROOT_DIR / "gtx/eda_duv_oc" / gpkg_file_name
df_tops.to_file(gpkg_save_path, layer="well_tops", driver='GPKG')
# -
# ## 3 - Process True Temp data with formation tops
# ### 3.1 - Load the True Temp data
# + pycharm={"name": "#%%\n"}
duv_true_temp = pd.read_excel(duvernay_folder / "Duvenay TrueTemp_Train.xlsx")
duv_true_temp.head()
# + pycharm={"name": "#%%\n"}
duv_true_temp.columns
# + [markdown] pycharm={"name": "#%% md\n"}
# ### 3.2 - Convert depth to ft and temperature to Farenheit
# + pycharm={"name": "#%%\n"}
# Rename cols
sstvd_col = "SSTVD"
true_temp_col = "TRUE_TEMP"
duv_true_temp.rename(columns={"Depths subsea (m)": sstvd_col,
"True Temperature (oC)": true_temp_col}, inplace=True)
# Convert sstvd meters to ft
duv_true_temp[sstvd_col] = duv_true_temp[sstvd_col] * M_2_FT
duv_true_temp[true_temp_col] = duv_true_temp[true_temp_col] * 1.8 + 32
display(duv_true_temp.head())
# + pycharm={"name": "#%%\n"}
df_tt_ml = duv_true_temp.copy()
# + [markdown] pycharm={"name": "#%% md\n"}
# ### 3.3 - Merge TT with well header data
# + pycharm={"name": "#%%\n"}
#Get MD values by merging with well header data
md_col = "MD"
df_true_temp = duv_true_temp.merge(gdf_wh[[uwi_col, kb_col]], how="inner", on=uwi_col)
df_true_temp[md_col] = df_true_temp[sstvd_col] + df_true_temp[kb_col]
display(df_true_temp)
# -
# ### 3.4 - Assign formation names to depths
# + pycharm={"name": "#%%\n"}
gr_tops = df_tops.groupby(uwi_col)
df_tt = pd.DataFrame()
for name, group in gr_tops:
well_cond = df_true_temp[uwi_col] == name
for row in group.itertuples(index=False):
md_top = getattr(row, top_md_col)
formation_name = getattr(row, formation_col)
formation_cond = df_true_temp[md_col] >= md_top
if sum(formation_cond) >= 1:
df_true_temp.loc[formation_cond & well_cond, formation_col] = formation_name
df_true_temp.info()
# + pycharm={"name": "#%%\n"}
display(df_true_temp.head())
display(df_true_temp.tail())
df_true_temp.columns
# + pycharm={"name": "#%%\n"}
# -
# ### 3.5 - Aggregate depth and temperature per formation and well
# + pycharm={"name": "#%%\n"}
df_form_temp = (df_true_temp
.groupby([uwi_col, formation_col])[[sstvd_col, true_temp_col]]
.mean())
df_form_temp.reset_index(inplace=True)
df_form_temp.head(10)
# -
# ### 3.6 - Merge with well header geodataframe
# + pycharm={"name": "#%%\n"}
# Cast to GeoDataFrame
gdf_form_temp = gpd.GeoDataFrame(df_form_temp
.merge(gdf_wh[[uwi_col, "geometry"]],
how="left",
on=uwi_col))
display(gdf_form_temp.head())
print(gdf_form_temp.info())
# -
# ### 3.7 - Save to Geopackage
# + pycharm={"name": "#%%\n"}
gdf_form_temp.to_file(gpkg_save_path, layer="tops_temperature", driver='GPKG')
# + pycharm={"name": "#%%\n"}
gdf_form_temp[formation_col].unique()
# + pycharm={"name": "#%%\n"}
fig_4, ax_4 = plt.subplots(1, 1, figsize=(12, 8))
sns.boxplot(x=formation_col, y=true_temp_col, data=gdf_form_temp, ax=ax_4,
order=formations)
plt.title("True Temperature Eaglebine Formations")
plt.ylabel("True Temperature (oF)")
plt.xlabel("Formation")
plt.xticks(rotation=90)
plt.gca().invert_yaxis()
plt.tight_layout()
plt.show()
# + pycharm={"name": "#%%\n"}
fig_5, ax_5 = plt.subplots(1, 1, figsize=(12, 8))
sns.scatterplot(x=true_temp_col, y=sstvd_col,
hue=formation_col, data=gdf_form_temp, hue_order=formations, ax=ax_5)
plt.title("True Temperature gradients")
plt.gca().invert_yaxis()
plt.show()
# + pycharm={"name": "#%%\n"}
g = sns.lmplot(x=true_temp_col, y=sstvd_col, hue=formation_col, data=gdf_form_temp,
col=formation_col, col_wrap=4, col_order=formations, fit_reg=True)
plt.title("True Temperature gradients")
plt.gca().invert_yaxis()
plt.savefig("gradients.png")
plt.show()
# + [markdown] pycharm={"name": "#%% md\n"}
# ## 4 - Create dataset for machine learning
# + pycharm={"name": "#%%\n"}
df_tt = gpd.GeoDataFrame(df_tt_ml.merge(gdf_wh, how="inner", on=uwi_col))
display(df_tt.head())
df_tt.info()
# + [markdown] pycharm={"name": "#%% md\n"}
# ### 4.1 - Transform to NAD27 Zone 11
# + pycharm={"name": "#%%\n"}
df_tt = df_tt.to_crs(epsg=26711)
display(df_tt.head())
# + [markdown] pycharm={"name": "#%% md\n"}
# ### 4.2 - Transform to x and Y columns the geometry
# + pycharm={"name": "#%%\n"}
x_col, y_col = "X", "Y"
# + pycharm={"name": "#%%\n"}
df_tt[x_col] = df_tt["geometry"].x
df_tt[y_col] = df_tt["geometry"].y
# + pycharm={"name": "#%%\n"}
df_tt.drop(["geometry", td_col, kb_col], axis=1, inplace=True)
df_tt.info()
# + [markdown] pycharm={"name": "#%% md\n"}
# ### 4.3 - Save to csv
# + pycharm={"name": "#%%\n"}
tt_ml_file_name = "dataset_duv.csv"
df_tt.to_csv(ROOT_DIR / "gtx/ml_duv" / tt_ml_file_name, index=False)
# + [markdown] pycharm={"name": "#%% md\n"}
# ## 5 - Process contest data for prediction
# + pycharm={"name": "#%%\n"}
df_contest = pd.read_csv(ROOT_DIR / "gtx/data/set_assign.csv")
df_contest.head()
# + pycharm={"name": "#%%\n"}
cond_test = df_contest["Set"] == "Validation_Testing"
df_cont_duv = gpd.GeoDataFrame(df_contest[cond_test].merge(gdf_wh,
how="inner",
on=uwi_col))
df_cont_duv[sstvd_col] = df_cont_duv[td_col] - df_cont_duv[kb_col]
df_cont_duv[x_col] = df_cont_duv["geometry"].x
df_cont_duv[y_col] = df_cont_duv["geometry"].y
df_cont_duv.drop(["Set", td_col, kb_col, "geometry"], axis=1, inplace=True)
df_cont_duv.info()
# + pycharm={"name": "#%%\n"}
test_file_name = "test_duv.csv"
df_cont_duv.to_csv(ROOT_DIR / "gtx/ml_duv" / test_file_name, index=False)
# + pycharm={"name": "#%%\n"}
| gtx/eda_duv_oc/eda_duv.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.2 64-bit
# name: python3
# ---
# # Intro
import pandas as pd
import quandl
import math
# numpy is a computing library. impporting to be able to use arrays
import numpy as np
# preprocessing to scale data on the features, goal is to get features in between -1 and 1
# svm == support vector machines
# model_selection to create training and testing samples
from sklearn import preprocessing, svm, model_selection
from sklearn.linear_model import LinearRegression
df = quandl.get('WIKI/GOOGL')
df.head()
df = df[['Adj. Open', 'Adj. High', 'Adj. Low', 'Adj. Close', 'Adj. Volume']]
df.head()
# Create a new column: Percent Volotility
df['HL_PCT'] = (df['Adj. High'] - df['Adj. Low']) / df['Adj. Low'] * 100
df.head()
# Create column for daily percent change
df['PCT_change'] = (df['Adj. Close'] - df['Adj. Open']) / df['Adj. Open'] * 100
df.head()
df = df[['Adj. Close', 'HL_PCT', 'PCT_change', 'Adj. Volume']]
df.head()
# # Regression Features and Labels
forcast_col = 'Adj. Close'
# in Pandas na can == NaN
# replace NaN with a - in order not to sacrafice data, and treat it as an outlier
df.fillna(-99999, inplace=True)
# Generally we use regression to forcast out
# Here we're trying to predict 1% of the df
forcast_out = int(math.ceil(0.01*len(df)))
print(forcast_out)
# what print is days in advance
# columns will be shifted up
# this way the 'labels' column for each row will be the Adj. Close price 1 day into the future
df['labels'] = df[forcast_col].shift(-forcast_out)
df.dropna(inplace=True)
df.head()
# # Regression Training and Testing
# +
# Defined features will be a capital X
# Features are everything except the label column
# df.drop() automatically returns a new df
# 1 == columns, 0 == index
X = np.array(df.drop(['labels'], 1))
# Labels will be a lower case y
y = np.array(df['labels'])
# Here we're scaling X before we feed it through the classifier
X = preprocessing.scale(X)
y = np.array(df['labels'])
print(len(X), len(y))
# +
# training and testing X and y
# test_size is == to 20% of the data that we want to use as testing data
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.2)
# for X_train and y_train, we use to fit our classifiers
classifier = LinearRegression(n_jobs=-1)
# to fit or train a classifier
classifier.fit(X_train, y_train)
# fit is synonomys with train, score is synonomys with test
accuracy = classifier.score(X_test, y_test)
# we want to train and test on seperate data
# with LinearRegression the accuracy is the squared error
print(accuracy)
# -
# Using SVM instead of LinearRegression
classifierSVM = svm.SVR(kernel='poly')
accuracySVM = classifier.score(X_test, y_test)
print(accuracySVM)
| regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Job Shop Scheduling Sample
#
# ## Introduction
# Job shop scheduling is a common and important problem in many industries. For example, in the automobile industry manufacturing a car involves many different types of operations which are performed by a number of specialized machines - optimizing the production line to minimize manufacturing time can make for significant cost savings.
#
# The job shop scheduling problem is defined as follows: you have a set of jobs ($J_0, J_1, J_2, \dots, J_{a-1} \text{, where } a \text{ is the total number of jobs}$), which have various processing times and need to be processed using a set of machines ($m_0, m_1, m_2, \dots, m_{b-1}\text{, where } b \text{ is the total number of machines}$). The goal is to complete all jobs in the shortest time possible. This is called minimizing the **makespan**.
#
# Each job consists of a set of operations, and the operations must be performed in the correct order to complete that job.
#
# In this sample, we'll introduce the necessary concepts and tools for describing this problem in terms of a penalty model, and then solve an example problem using the Azure Quantum Optimization service.
#
# Imagine, for example, that you have a to-do list. Each item on the list is a **job** using this new terminology.
#
# Each job in this list consists of a set of operations, and each operation has a processing time. You also have some tools at hand that you can use to complete these jobs (the **machines**).
#
# TODOs:
#
# - Pay electricity bill
# 1. Log in to site (*2 minutes*) - **computer**
# 2. Pay bill (*1 minute*) - **computer**
# 3. Print receipt (*3 minutes*) - **printer**
#
#
# - Plan camping trip
# 1. Pick campsite (*2 minutes*) - **computer**
# 2. Pay online (*2 minutes*) - **computer**
# 3. Print receipt (*3 minutes*) - **printer**
#
#
# - Book dentist appointment
# 1. Choose time (*1 minute*) - **computer**
# 2. Pay online (*2 minutes*) - **computer**
# 3. Print receipt (*3 minutes*) - **printer**
# 4. Guiltily floss your teeth (*2 minutes*) - **tooth floss**
#
# But there are some constraints:
#
# 1. Each of the tasks (**operations**) in a todo (**job**) must take place in order. You can't print the receipt before you have made the payment! This is called a **precedence constraint**.
# 2. You start an operation only once, and once started it must be completed before you do anything else. There's no time for procrastination! This is called the **operation-once constraint**.
# 3. Each tool (**machine**) can only do one thing at a time. You can't simultaneously print two receipts unless you invest in multiple printers. This is the **no-overlap constraint**.
# ## Cost functions
#
# The rest of this sample will be spent constructing what is known as a **cost function**, which is used to represent the problem. This cost function is what will be submitted to the Azure Quantum Optimization solver.
#
# > **NOTE**:
# > If you have completed the Microsoft Quantum Learn Module [Solve optimization problems by using quantum-inspired optimization](https://docs.microsoft.com/learn/modules/solve-quantum-inspired-optimization-problems/), this concept should already be familiar. A simplified version of this job shop sample is also available [here](https://docs.microsoft.com/learn/modules/solve-job-shop-optimization-azure-quantum/) on MS Learn.
#
# Each point on a cost function represents a different solution configuration - in this case, each configuration is a particular assignment of starting times for the operations you are looking to schedule. The goal of the optimization is to minimize the cost of the solution - in this instance the aim is to minimize the amount of time taken to complete all operations.
#
# Before you can submit the problem to the Azure Quantum solvers, you'll need to transform it to a representation that the solvers can work with. This is done by creating an array of `Term` objects, representing the problem constraints. Positive terms penalize certain solution configurations, while negative ones support them. By adding penalties to terms that break the constraints, you increase the relative cost of those configurations and reduce the likelihood that the optimizer will settle for these suboptimal solutions.
#
# The idea is to make these invalid solutions so expensive that the solver can easily locate valid, low-cost solutions by navigating to low points (minima) in the cost function. However, you must also ensure that these solutions are not so expensive as to create peaks in the cost function that are so high that the solver can't travel over them to discover better optima on the other side.
# ## Azure Quantum setup
#
# The Azure Quantum Optimization service is exposed via a Python SDK, which you will be making use of during the rest of this sample. This means that before you get started with formulating the problem, you first need to import some Python modules and set up an Azure Quantum `Workspace`.
#
# You will need to enter your Azure Quantum workspace details in the cell below before you run it:
# +
from typing import List
from azure.quantum.optimization import Term
from azure.quantum import Workspace
workspace = Workspace (
subscription_id = "", # Add your subscription_id
resource_group = "", # Add your resource_group
name = "", # Add your workspace name
location = "" # Add your workspace location (for example, "westus")
)
workspace.login()
# -
# ## Problem formulation
#
# Now that you have set up your development environment, you can start to formulate the problem.
#
# The first step is to take the constraints identified above and formulate them as mathematical equations that you can work with.
#
# Let's first introduce some notation because we are lazy and also want to avoid carpal tunnel syndrome.
#
# Let's stick with the previous example of the todo list:
#
# - $J_{0}$: Pay electricity bill
# - $O_{0}$: Log in to site (*2 minutes*) - **computer**
# - $O_{1}$: Pay bill (*1 minute*) - **computer**
# - $O_{2}$: Print receipt (*3 minutes*) - **printer**
#
#
# - $J_{1}$: Plan camping trip
# - $O_{3}$: Pick campsite (*2 minutes*) - **computer**
# - $O_{4}$: Pay online (*2 minutes*) - **computer**
# - $O_{5}$: Print receipt (*3 minutes*) - **printer**
#
#
# - $J_{2}$: Book dentist appointment
# - $O_{6}$: Choose time (*1 minute*) - **computer**
# - $O_{7}$: Pay online (*2 minutes*) - **computer**
# - $O_{8}$: Print receipt (*3 minutes*) - **printer**
# - $O_{9}$: Guiltily floss your teeth (*2 minutes*) - **tooth floss**
#
# Above, you can see that the jobs have been labeled as $J$ and assigned index numbers $0$, $1$ and $2$, to represent each of the three tasks you have. The operations that make up each job have also been defined, and are represented by the letter $O$.
#
# To make it easier to code up later, all operations are identified with a continuous index number rather than, for example, starting from $0$ for each job. This allows you to keep track of operations by their ID numbers in the code and schedule them according to the constraints and machine availability. You can tie the operations back to their jobs later on using a reference.
#
# Below, you see how these definitions combine to give us a mathematical formulation for the jobs:
#
# $$
# \begin{align}
# J_{0} &= \{O_{0}, O_{1}, O_{2}\} \\
# J_{1} &= \{O_{3}, O_{4}, O_{5}\} \\
# J_{2} &= \{O_{6}, O_{7}, O_{8}, O_{9}\} \\
# \end{align}
# $$
#
# **More generally:**
#
# $$
# \begin{align}
# J_{0} &= \{O_{0}, O_{1}, \ldots , O_{k_{0}-1}\} \text{, where } k_{0} = n_{0} \text{, the number of operations in job } J_{0}\\
# \\
# J_{1} &= \{O_{k_{0}}, O_{k_{0}+1}, \ldots , O_{k_{1}-1}\} \text{, where } k_{1} = n_{0} + n_{1} \text{, the number of operations in jobs } J_{0} \text{ and } J_{1} \text{ combined}\\
# \\
# &\vdots \\
# \\
# J_{n-1} &= \{O_{k_{n-2}}, O_{k_{n-2}+1}, \ldots , O_{k_{n-1}-1}\} \text{, where } k_{n-1} = \text{ the total number of operations across all jobs }\\
# \end{align}
# $$
#
# The next piece of notation you will need is a binary variable, which will be called $x_{i, t}$.
#
# You will use this variable to represent whether an operation starts at time $t$ or not:
#
# $$
# \begin{align}
# \text{If } x_{i,t} &= 1, \text{ } O_i\text{ starts at time } \textit{t} \\
# \text{If } x_{i,t} &= 0, \text{ } O_i\text{ does not start at time } \textit{t} \\
# \end{align}
# $$
#
# Because $x_{i, t}$ can take the value of either $0$ or $1$, this is known as a binary optimization problem. More generally, this is called a polynomial unconstrained binary optimization (or PUBO) problem. You may also see these PUBO problems referred to as Higher Order Binomial Optimization (HOBO) problems - these terms both refer to the same thing.
#
# $t$ is used to represent the time. It goes from time $0$ to $T - 1$ in integer steps. $T$ is the latest time an operation can be scheduled:
#
# $$0 \leq t < T$$
#
# Lastly, $p_{i}$ is defined to be the processing time for operation $i$ - the amount of time it takes for operation $i$ ($O_{i}$) to complete:
#
# $$\text{If } O_{i} \text{ starts at time } \textit{t} \text{, it will finish at time } t + p_{i}$$
# $$\text{If } O_{i+1} \text{ starts at time } \textit{s} \text{, it will finish at time } s + p_{i+1}$$
#
# Now that the terms have been defined, you can move on to formulating the problem.
#
# The first step is to represent the constraints mathematically. This will be done using a penalty model - every time the optimizer explores a solution that violates one or more constraints, you need to give that solution a penalty:
#
# | Constraint | Penalty condition |
# |---|---|
# |**Precedence constraint**<br>Operations in a job must take place in order.|Assign penalty every time $O_{i+1}$ starts before $O_{i}$ has finished (they start out of order).|
# |**Operation-once constraint**<br>Each operation is started once and only once.|Assign penalty if an operation isn't scheduled within the allowed time.<br>**Assumption:** if an operation starts, it runs to completion.|
# |**No-overlap constraint**<br>Machines can only do one thing at a time.|Assign penalty every time two operations on a single machine are scheduled to run at the same time.|
#
# You will also need to define an objective function, which will minimize the time taken to complete all operations (the **makespan**).
#
# ## Expressing a cost function using the Azure Quantum Optimization SDK
#
# As you will see during the exploration of the cost function and its constituent penalty terms below, the overall cost function is quadratic (because the highest order polynomial term you have is squared). This makes this problem a **Quadratic Unconstrained Binary Optimization (QUBO)** problem, which is a specific subset of **Polynomial Unconstrained Binary Optimization (PUBO)** problems (which allow for higher-order polynomial terms than quadratic). Fortunately, the Azure Quantum Optimization service is set up to accept PUBO (and Ising) problems, which means you don't need to modify the above representation to fit the solver.
#
# As introduced above, the binary variables over which you are optimizing are the operation starting times $x_{i,t}$. Instead of using two separate indices as in the mathematical formulation, you will need to define a singly-indexed binary variable $x_{i \cdot T + t}$. Given time steps $t \in [0, T-1]$, every operation $i$ contributes $T$ indices. The operation starts at the value of $t$ for which $x_{i \cdot T + t}$ equals 1.
#
# In order to submit a problem to the Azure Quantum services, you will first be creating a `Problem` instance. This is a Python object that stores all the required information, such as the cost function details and what kind of problem we are modeling.
#
# To represent cost functions, we'll make use of a formulation using `Term` objects. Ultimately, any polynomial cost function can be written as a simple sum of products. That is, the function can be rewritten to have the following form, where $p_k$ indicates a product over the problem variables $x_0, x_1, \dots$:
#
# $$ H(x) = \sum_k \alpha_k \cdot p_k(x_0, x_1, \dots) $$
#
# $$ \text{e.g. } H(x) = 5 \cdot (x_0) + 2 \cdot (x_1 \cdot x_2) - 3 \cdot ({x_3}^2) $$
#
# In this form, every term in the sum has a coefficient $\alpha_k$ and a product $p_k$. In the `Problem` instance, each term in the sum is represented by a `Term` object, with parameters `c` - corresponding to the coefficient, and `indices` - corresponding to the product. Specifically, the `indices` parameter is populated with the indices of all variables appearing in the term. For instance, the term $2 \cdot (x_1 \cdot x_2)$ translates to the following object: `Term(c=2, indices=[1,2])`.
#
# More generally, `Term` objects take on the following form:
#
# ```python
# Term(c: float, indices: []) # Constant terms like +1
# Term(c: float, indices: [int]) # Linear terms like x
# Term(c: float, indices: [int, int]) # Quadratic terms like x^2 or xy
# ```
#
# If there were higher order terms (cubed, for example), you would just add more elements to the indices array, like so:
#
# ```python
# Term(c: float, indices: [int, int, int, ...])
# ```
# ## Defining problem parameters in code
#
# Now that you've defined the problem parameters mathematically, you can transform this information to code. The following two code snippets show how this is done.
#
# First, the helper function `process_config` is defined:
def process_config(jobs_ops_map: dict, machines_ops_map: dict, processing_time: dict, T: int):
"""
Process & validate problem parameters (config) and generate inverse dict of operations to jobs.
Keyword arguments:
jobs_ops_map (dict): Map of jobs to operations {job: [operations]}
machines_ops_map(dict): Mapping of operations to machines, e.g.:
machines_ops_map = {
0: [0,1], # Operations 0 & 1 assigned to machine 0
1: [2,3] # Operations 2 & 3 assigned to machine 1
}
processing_time (dict): Operation processing times
T (int): Allowed time (jobs can only be scheduled below this limit)
"""
# Problem cannot take longer to complete than all operations executed sequentially
## Sum all operation processing times to calculate the maximum makespan
T = min(sum(processing_time.values()), T)
# Ensure operation assignments to machines are sorted in ascending order
for m, ops in machines_ops_map.items():
machines_ops_map[m] = sorted(ops)
ops_jobs_map = {}
for job, ops in jobs_ops_map.items():
# Fail if operation IDs within a job are out of order
assert (ops == sorted(ops)), f"Operation IDs within a job must be in ascending order. Job was: {job}: {ops}"
for op in ops:
# Fail if there are duplicate operation IDs
assert (op not in ops_jobs_map.keys()), f"Operation IDs must be unique. Duplicate ID was: {op}"
ops_jobs_map[op] = job
return ops_jobs_map, T
# Below, you can see the code representation of the problem parameters: the maximum allowed time `T`, the operation processing times `processing_time`, the mapping of operations to jobs (`jobs_ops_map` and `ops_jobs_map`), and the assignment of operations to machines (`machines_ops_map`).
# +
# Set problem parameters
## Allowed time (jobs can only be scheduled below this limit)
T = 21
## Processing time for each operation
processing_time = {0: 2, 1: 1, 2: 3, 3: 2, 4: 2, 5: 3, 6: 1, 7: 2, 8: 3, 9: 2}
## Assignment of operations to jobs (job ID: [operation IDs])
### Operation IDs within a job must be in ascending order
jobs_ops_map = {
0: [0, 1, 2], # Pay electricity bill
1: [3, 4, 5], # Plan camping trip
2: [6, 7, 8, 9] # Book dentist appointment
}
## Assignment of operations to machines
### Ten jobs, three machines
machines_ops_map = {
0: [0, 1, 3, 4, 6, 7], # Operations 0, 1, 3, 4, 6 and 7 are assigned to machine 0 (the computer)
1: [2, 5, 8], # Operations 2, 5 and 8 are assigned to machine 1 (the printer)
2: [9] # Operation 9 is assigned to machine 2 (the tooth floss)
}
## Inverse mapping of jobs to operations
ops_jobs_map, T = process_config(jobs_ops_map, machines_ops_map, processing_time, T)
# -
# In the next sections, you will construct mathematical representations of the penalty terms and use these to build the cost function, which will be of the format:
#
# $$H(x) = \alpha \cdot f(x) + \beta \cdot g(x) + \gamma \cdot h(x) + \delta \cdot k(x) $$
#
# where:
#
# $$f(x) \text{, } g(x) \text{ and } h(x) \text{ represent the penalty functions.}$$
# $$k(x) \text{ represents the objective function.}$$
# $$\alpha, \beta, \gamma \text{ and } \delta \text{ represent the different weights assigned to the penalties.}$$
#
# The weights represent how important each penalty function is, relative to all the others. In the following units, you will learn how to build these penalty and objective functions, combine them to form the cost function $H(x)$, and solve the problem using Azure Quantum. Over the rest of this sample, you will learn how to build these penalty and objective functions, combine them to form the cost function $H(x)$, and solve the problem using Azure Quantum.
#
# To do this, you will explore how to formulate each of these constraints mathematically, and how this translates to code.
# ## Precedence constraint
#
# The precedence constraint is defined as follows:
#
# | Constraint | Penalty condition |
# |---|---|
# |**Precedence constraint**<br>Operations in a job must take place in order.|Assign penalty every time $O_{i+1}$ starts before $O_{i}$ has finished (they start out of order).|
#
# ### Worked Example
#
# Let's take job 1 ($J_{1}$) as an example:
#
# - $J_{1}$: Plan camping trip
# - $O_{3}$: Pick campsite (*2 minutes*)
# - $O_{4}$: Pay online (*2 minutes*)
# - $O_{5}$: Print receipt (*3 minutes*)
#
# Let's formulate the penalty conditions for $O_{3}$ and $O_{4}$: you want to add a penalty if $O_{4}$ starts before $O_{3}$ finishes. First, you'll define our terms and set some of their values:
#
# $$\text{Total simulation time } T = 4$$
# $$O_{3} \text{ processing time: } p_{3} = 2$$
# $$O_{3} \text{ starts at time } \textit{t} \text{, and finishes at time } t+p_{3}$$
#
# $$O_{3} \text{ starts at any time } 0 \leq t < T $$
# $$O_{4} \text{ can start at time } s \geq t + p_{3} $$
#
# $O_{3}$โs finishing time is given by adding its processing time $p_{3}$ (which weโve set to be 2) to its start time $t$. You can see the start and end times for $O_{3}$ in the table below:
#
# | $t$ | $t = p_{3}$|
# |---|---|
# |0|2|
# |1|3|
# |2|4|
#
# To avoid violating this constraint, the start time of $O_{4}$ (denoted by $s$) must be greater than or equal to the end time of $O_{3}$, like we see in the next column:
#
# | $t$ | $t = p_{3}$|$s \geq t+p_{3}$|
# |---|---|---|
# |0|2|2, 3, 4|
# |1|3|3, 4|
# |2|4|4|
# ||**Valid configuration?**|โ|
#
# The โ means that any $s$ value in this column is valid, as it doesn't violate the precedence constraint.
#
# Conversely, if $s$ is less than $t + p_{3}$ (meaning $O_{4}$ starts before $O_{3}$ finishes), you need to add a penalty. Invalid $s$ values for this example are shown in the rightmost column:
#
# | $t$ | $t = p_{3}$|$s \geq t+p_{3}$|$s < t+p_{3}$|
# |---|---|---|---|
# |0|2|2, 3, 4|0, 1|
# |1|3|3, 4|0, 1, 2|
# |2|4|4|0, 1, 2, 3|
# ||**Valid configuration?**|โ|โ|
#
# In the table above, โ has been used to denote that any $s$ value in the last column is invalid, as it violates the precedence constraint.
#
# ### Penalty Formulation
#
# This is formulated as a penalty by counting every time consecutive operations $O_{i}$ and $O_{i + 1}$ in a job take place out of order.
#
# As you saw above: for an operation $O_{i}$, if the start time of $O_{i + 1}$ (denoted by $s$) is less than the start time of $O_{i}$ (denoted by $t$) plus its processing time $p_{i}$, then that counts as a penalty. Mathematically, this penalty condition looks like: $s < t + p_{i}$.
#
# You sum that penalty over all the operations of a job ($J_{n}$) for all the jobs:
# $$f(x) = \sum_{k_{n-1} \leq i < k_n, s < t + p_{i}}x_{i,t}\cdot x_{i+1,s} \text{ for each job } \textit{n}.$$
#
# Let's break that down:
#
# - $k_{n-1} \leq i < k_{n}$
#
# This means you sum over all operations for a single job.
#
#
# - $s < t + p_{i}$
#
# This is the penalty condition - any operation that satisfies this condition is in violation of the precedence constraint.
#
#
# - $x_{i, t}\cdot x_{i+1, s}$
#
# This represents the table you saw in the example above, where $t$ is allowed to vary from $0 \rightarrow T - 1$ and you assign a penalty whenever the constraint is violated (when $s < t + p_{i}$).
#
# This translates to a nested `for` loop: the outer loop has limits $0 \leq t < T$ and the inner loop has limits $0 \leq s < t + p_{i}$
#
# ### Code
#
# Using the mathematical formulation and the breakdown above, you can now translate this constraint function to code. You will see the `weight` argument included in this code snippet - this will be assigned a value later on when you call the function:
# +
"""
# Reminder of the relevant parameters
## Time to allow for all jobs to complete
T = 21
## Processing time for each operation
processing_time = {0: 2, 1: 1, 2: 3, 3: 2, 4: 2, 5: 3, 6: 1, 7: 2, 8: 3, 9: 2}
## Assignment of operations to jobs (job ID: [operation IDs])
### Operation IDs within a job must be in ascending order
jobs_ops_map = {
0: [0, 1, 2], # Pay electricity bill
1: [3, 4, 5], # Plan camping trip
2: [6, 7, 8, 9] # Book dentist appointment
}
"""
def precedence_constraint(jobs_ops_map:dict, T:int, processing_time:dict, weight:float):
"""
Construct penalty terms for the precedence constraint.
Keyword arguments:
jobs_ops_map (dict): Map of jobs to operations {job: [operations]}
T (int): Allowed time (jobs can only be scheduled below this limit)
processing_time (dict): Operation processing times
weight (float): Relative importance of this constraint
"""
terms = []
# Loop through all jobs:
for ops in jobs_ops_map.values():
# Loop through all operations in this job:
for i in range(len(ops) - 1):
for t in range(0, T):
# Loop over times that would violate the constraint:
for s in range(0, min(t + processing_time[ops[i]], T)):
# Assign penalty
terms.append(Term(c=weight, indices=[ops[i]*T+t, (ops[i+1])*T+s]))
return terms
# -
# > **NOTE**:
# > This nested loop structure is probably not the most efficient way to do this but it is the most direct comparison to the mathematical formulation.
# ## Operation-once constraint
#
# The operation-once constraint is defined as follows:
#
# | Constraint | Penalty condition |
# |---|---|
# |**Operation-once constraint**<br>Each operation is started once and only once.|Assign penalty if an operation isn't scheduled within the allowed time.<br>**Assumption:** if an operation starts, it runs to completion.|
#
# #### Worked Example
#
# We will again take job 1 ($J_{1}$) as an example:
#
# - $J_{1}$: Plan camping trip
# - $O_{3}$: Pick campsite (*2 minutes*)
# - $O_{4}$: Pay online (*2 minutes*)
# - $O_{5}$: Print receipt (*3 minutes*)
#
# Recall the variable $x_{i,t}$:
#
# $$
# \begin{align}
# \text{If } x_{i,t} &= 1, \text{ } O_i\text{ starts at time } \textit{t} \\
# \text{If } x_{i,t} &= 0, \text{ } O_i\text{ does not start at time } \textit{t} \\
# \end{align}
# $$
#
# According to this constraint, $x_{i,t}$ for a specific operation should equal 1 **once and only once** from $t = 0 \rightarrow T - 1$ (because it should start once and only once during the allowed time).
#
# So in this case, you need to assign a penalty if the sum of $x_{i,t}$ for each operation across all allowed times doesnโt equal exactly 1.
#
# Letโs take $O_{3}$ as an example again:
#
# |$t$|$x_{3,t}$|
# |---|---|
# |0|0|
# |1|1|
# |2|0|
# |$\sum_t {x_{3,t}} =$|1|
# |**Valid configuration?**|โ|
#
# In the right hand column, you see that $O_{3}$ starts at time 1 and no other time ($x_{3,t} = 1$ at time $t = 1$ and is $0$ otherwise). The sum of $x_{i,t}$ values over all $t$ for this example is therefore 1, which is what is expected! This is therefore a valid solution.
#
# In the example below, you see an instance where $O_{3}$ is scheduled more than once ($x_{3,t} = 1$ more than once), in violation of the constraint:
#
# |$t$|$x_{3,t}$|
# |---|---|
# |0|0|
# |1|1|
# |2|1|
# |$\sum_t {x_{3,t}} =$|2|
# |**Valid configuration?**|โ|
#
# You can see from the above that $O_{3}$ has been scheduled to start at both time 1 and time 2, so the sum of $x_{i,t}$ values over all $t$ is now greater than 1. This violates the constraint and thus you must apply a penalty.
#
# In the last example, you see an instance where $O_{3}$ has not been scheduled at all:
#
# |$t$|$x_{3,t}$|
# |---|---|
# |0|0|
# |1|0|
# |2|0|
# |$\sum_t {x_{3,t}} =$|0|
# |**Valid configuration?**|โ|
#
# In this example, none of the $x_{3,t}$ values equal 1 for any time in the simulation, meaning the operation is never scheduled. This means that the sum of $x_{3,t}$ values over all $t$ is 0 - the constraint is once again violated and you must allocate a penalty.
#
# In summary:
#
# |$t$|$x_{3,t}$|$x_{3,t}$|$x_{3,t}$|
# |---|---|---|---|
# |0|0|0|0|
# |1|1|1|0|
# |2|0|1|0|
# |$\sum_t {x_{3,t}} =$|1|2|0|
# |**Valid configuration?**|โ|โ|โ|
#
# Now that you understand when to assign penalties, let's formulate the constraint mathematically.
#
# ### Penalty Formulation
#
# As seen previously, you want to assign a penalty whenever the sum of $x_{i,t}$ values across all possible $t$ values is not equal to 1. This is how you represent that mathematically:
#
# $$g(x) = \sum_{i} \left(\left(\sum_{0\leq t < T} x_{i,t}\right) - 1\right)^2.$$
#
# Let's break that down:
#
# - $\left(\sum_{0\leq t < T} x_{i,t}\right) - 1$
#
# As you saw in the sum row of the tables in the worked example, $\sum_{0\leq t < T} x_{i,t}$ should always equal exactly 1 (meaning that an operation must be scheduled **once and only once** during the allowed time). This means that $\left(\sum_{0\leq t < T} x_{i,t}\right) - 1$ should always give 0. This means there is no penalty assigned when the constraint is not violated.
#
# In the case where $\sum_{0\leq t < T} x_{i,t} > 1$ (meaning an operation is scheduled to start more than once, like in the second example above), you now have a positive, non-zero penalty term as $\left(\sum_{0\leq t < T} x_{i,t}\right) - 1 > 0$.
#
# In the case where $\sum_{0\leq t < T} x_{i,t} = 0$ (meaning an operation is never scheduled to start, like in the last example above), you now have a $-1$ penalty term as $\left(\sum_{0\leq t < T} x_{i,t}\right) - 1 = 0 - 1 = -1$.
#
#
# - $\left(\sum\dots\right)^2$
#
# Because the penalty terms must always be positive (otherwise you would be *reducing* the penalty when an operation isn't scheduled), you must square the result of $\left(\sum_{0\leq t < T} x_{i,t}\right) - 1$.
#
# This ensures that the penalty term is always positive (as $(-1)^2 = 1$).
#
#
# - $\sum_{i} \left((\dots)^2\right)$
#
# Lastly, you must sum all penalties accumulated across all operations $O_{i}$ from all jobs.
#
# To translate this constraint to code form, you are going to need to expand the quadratic equation in the sum.
#
# To do this, Let's once again take $O_{3}$ as an example. Let's set $T = 2$ so the $t$ values will be 0 and 1. The first step will be to substitute in these values:
#
# $$
# \begin{align}
# \sum_{i} \left(\left(\sum_{0\leq t < T} x_{i,t}\right) - 1\right)^2 &= \left(x_{3,0} + x_{3,1} - 1\right)^2
# \end{align}
# $$
#
# For simplicity, the $x_{3,t}$ variables will be renamed as follows:
#
# $$
# \begin{align}
# x_{3,0} &= x \\
# x_{3,1} &= y
# \end{align}
# $$
#
# Substituting these values in, you now have the following:
#
# $$
# \begin{align}
# \sum_{i} \left(\left(\sum_{0\leq t < T} x_{i,t}\right) - 1\right)^2 &= \left(x_{3,0} + x_{3,1} - 1\right)^2 \\
# &=\left(x + y - 1\right)^2
# \end{align}
# $$
#
# Next, you need to expand out the bracket and multiply each term in the first bracket with all terms in the other bracket:
#
# $$
# \begin{align}
# \sum_{i} \left(\left(\sum_{0\leq t < T} x_{i,t}\right) - 1\right)^2 &= \left(x_{3,0} + x_{3,1} - 1\right)^2 \\
# &= \left(x + y - 1\right)^2 \\
# &= (x + y - 1)\cdot(x + y - 1) \\
# &= x^2 + y^2 + 2xy - 2x - 2y + 1
# \end{align}
# $$
#
# The final step simplifies things a little. Because this is a binary optimization problem, $x$ and $y$ can only take the values of $0$ or $1$. Because of this, the following holds true:
# $$x^2 = x$$
# $$y^2 = y,$$
#
# as
# $$0^2 = 0$$
# and
# $$1^2 = 1$$
#
# This means that the quadratic terms in the penalty function can combine with the two linear terms, giving the following formulation of the penalty function:
# $$
# \begin{align}
# \sum_{i} \left(\left(\sum_{0\leq t < T} x_{i,t}\right) - 1\right)^2 &= x^2 + y^2 + 2xy - 2x - 2y + 1 \\
# &= x + y + 2xy - 2x - 2y + 1 \\
# &= 2xy - x - y + 1
# \end{align}
# $$
#
# If $T$ was larger, you would have more terms ($z$ and so on, for example).
#
# ### Code
#
# You can now use this expanded version of the penalty function to build the penalty terms in code. Again, the `weight` argument is included, to be assigned a value later on:
# +
"""
# Reminder of the relevant parameters
## Allowed time (jobs can only be scheduled below this limit)
T = 21
## Assignment of operations to jobs (operation ID: job ID)
ops_jobs_map = {0: 0, 1: 0, 2: 0, 3: 1, 4: 1, 5: 1, 6: 2, 7: 2, 8: 2, 9: 2}
"""
def operation_once_constraint(ops_jobs_map:dict, T:int, weight:float):
"""
Construct penalty terms for the operation once constraint.
Penalty function is of form: 2xy - x - y + 1
Keyword arguments:
ops_jobs_map (dict): Map of operations to jobs {op: job}
T (int): Allowed time (jobs can only be scheduled below this limit)
weight (float): Relative importance of this constraint
"""
terms = []
# 2xy - x - y parts of the constraint function
# Loop through all operations
for op in ops_jobs_map.keys():
for t in range(T):
# - x - y terms
terms.append(Term(c=weight*-1, indices=[op*T+t]))
# + 2xy term
# Loop through all other start times for the same job
# to get the cross terms
for s in range(t+1, T):
terms.append(Term(c=weight*2, indices=[op*T+t, op*T+s]))
# + 1 term
terms.append(Term(c=weight*1, indices=[]))
return terms
# -
# ### No-overlap constraint
#
# The no-overlap constraint is defined as follows:
#
# | Constraint | Penalty condition |
# |---|---|
# |**No-overlap constraint**<br>Machines can only do one thing at a time.|Assign penalty every time two operations on a single machine are scheduled to run at the same time.|
#
# #### Worked Example
#
# For this final constraint, $J_{1}$ will once again be used as an example:
#
# - $J_{1}$: Plan camping trip
# - $O_{3}$: Pick campsite (*2 minutes*) - **computer**
# - $O_{4}$: Pay online (*2 minutes*) - **computer**
# - $O_{5}$: Print receipt (*3 minutes*) - **printer**
#
# Recall once more the variable $x_{i,t}$:
#
# $$
# \begin{align}
# \text{If } x_{i,t} &= 1, \text{ } O_i\text{ starts at time } \textit{t} \\
# \text{If } x_{i,t} &= 0, \text{ } O_i\text{ does not start at time } \textit{t} \\
# \end{align}
# $$
#
# As you can see from the above, $O_{3}$ and $O_{4}$ must be completed using the same machine (the computer). You can't do two things at the same time using the same machine, so to avoid violating the no-overlap constraint, you must ensure that $O_{3}$ and $O_{4}$ begin at different times: $x_{3,t}$ and $x_{4,t}$ must not equal 1 at the same time. You must also make sure that the operations don't overlap, just like you saw in the precedence constraint. This means that if $O_{3}$ starts at time $t$, $O_{4}$ must not start at times where $t \leq s < t + p_{3}$ (after $O_{3}$ has started but before it has been completed using the machine).
#
# One example of a valid configuration is shown below:
#
# |$t$|$x_{3,t}$|$x_{4,t}$|$x_{3,t} \cdot x_{4,t}$|
# |---|---|---|---|
# |0|1|0|0|
# |1|0|0|0|
# |2|0|1|0|
# |||$\sum_{t} x_{3,t} \cdot x_{4,t} =$|0|
# |||**Valid configuration?**|โ|
#
#
# As you can see, when you compare $x_{i,t}$ values pairwise at each time in the simulation, their product always equals 0. Further to this, you can see that $O_{4}$ starts two time steps after $O_{3}$, which means that there is no overlap.
#
# Below, we see a configuration that violates the constraint:
#
# |$t$|$x_{3,t}$|$x_{4,t}$|$x_{3,t} \cdot x_{4,t}$|
# |---|---|---|---|
# |0|0|0|0|
# |1|1|1|1|
# |2|0|0|0|
# |||$\sum_{t} x_{3,t} \cdot x_{4,t} =$|1|
# |||**Valid configuration?**|โ|
#
# In this instance, $O_{3}$ and $O_{4}$ are both scheduled to start at $t = 1$ and given they require the same machine, this means that the constraint has been violated. The pairwise product of $x_{i,t}$ values is therefore no longer always equal to 0, as for $t = 1$ we have: $x_{3,1} \cdot x_{4,1} = 1$
#
# Another example of an invalid configuration is demonstrated below:
#
# |$t$|$x_{3,t}$|$x_{4,t}$|$x_{3,t} \cdot x_{4,t}$|
# |---|---|---|---|
# |0|1|0|0|
# |1|0|1|0|
# |2|0|0|0|
# |||$\sum_{t} x_{3,t} \cdot x_{4,t} =$|0|
# |||**Valid configuration?**|โ|
#
# In the above scenario, the two operations' running times have overlapped ($t \leq s < t + p_{3}$), and therefore this configuration is not valid.
#
# You can now use this knowledge to mathematically formulate the constraint.
#
# #### Penalty Formulation
#
# As you saw from the tables in the worked example, for the configuration to be valid, the sum of pairwise products of $x_{i,t}$ values for a machine $m$ at any time $t$ must equal 0. This gives you the penalty function:
#
# $$h(x) = \sum_{i,t,k,s} x_{i,t}\cdot x_{k,s} = 0 \text{ for each machine } \textit{m}$$
#
# Let's break that down:
#
# - $\sum_{i,t,k,s}$
#
# For operation $i$ starting at time $t$, and operation $k$ starting at time $s$, you need to sum over all possible start times $0 \leq t < T$ and $0 \leq s < T$. This indicates the need for another nested `for` loop, like you saw for the precedence constraint.
#
# For this summation, $i \neq k$ (you should always be scheduling two different operations).
#
# For two operations happening on a single machine, $t \neq s$ or the constraint has been violated. If $t = s$ for the operations, they have been scheduled to start on the same machine at the same time, which isn't possible.
#
#
# - $x_{i,t}\cdot x_{k,s}$
#
# This is the product you saw explicitly calculated in the rightmost columns of the tables from the worked example. If two different operations $i$ and $k$ start at the same time ($t = s$), this product will equal 1. Otherwise, it will equal 0.
#
#
# - $\sum(\dots) = 0 \text{ for each machine } \textit{m}$
#
# This sum is performed for each machine $m$ independently.
#
# If all $x_{i,t} \cdot x_{k,s}$ products in the summation equal 0, the total sum comes to 0. This means no operations have been scheduled to start at the same time on this machine and thus the constraint has not been violated. You can see an example of this in the bottom row of the first table from the worked example, above.
#
# If any of the $x_{i,t} \cdot x_{k,s}$ products in the summation equal 1, this means that $t = s$ for those operations and therefore two operations have been scheduled to start at the same time on the same machine. The sum now returns a value greater than 1, which gives us a penalty every time the constraint is violated. You can see an example of this in the bottom row of the second table from the worked example.
#
# ### Code
#
# Using the above, you can transform the final penalty function into code that will generate the terms needed by the solver. As with the previous two penalty functions, the `weight` is included in the definition of the `Term` objects:
# +
"""
# Reminder of the relevant parameters
## Allowed time (jobs can only be scheduled below this limit)
T = 21
## Processing time for each operation
processing_time = {0: 2, 1: 1, 2: 3, 3: 2, 4: 2, 5: 3, 6: 1, 7: 2, 8: 3, 9: 2}
## Assignment of operations to jobs (operation ID: job ID)
ops_jobs_map = {0: 0, 1: 0, 2: 0, 3: 1, 4: 1, 5: 1, 6: 2, 7: 2, 8: 2, 9: 2}
## Assignment of operations to machines
### Ten jobs, three machines
machines_ops_map = {
0: [0, 1, 3, 4, 6, 7], # Operations 0, 1, 3, 4, 6 and 7 are assigned to machine 0 (the computer)
1: [2, 5, 8], # Operations 2, 5 and 8 are assigned to machine 1 (the printer)
2: [9] # Operation 9 is assigned to machine 2 (the tooth floss)
}
"""
def no_overlap_constraint(T:int, processing_time:dict, ops_jobs_map:dict, machines_ops_map:dict, weight:float):
"""
Construct penalty terms for the no overlap constraint.
Keyword arguments:
T (int): Allowed time (jobs can only be scheduled below this limit)
processing_time (dict): Operation processing times
weight (float): Relative importance of this constraint
ops_jobs_map (dict): Map of operations to jobs {op: job}
machines_ops_map(dict): Mapping of operations to machines, e.g.:
machines_ops_map = {
0: [0,1], # Operations 0 & 1 assigned to machine 0
1: [2,3] # Operations 2 & 3 assigned to machine 1
}
"""
terms = []
# For each machine
for ops in machines_ops_map.values():
# Loop over each operation i requiring this machine
for i in ops:
# Loop over each operation k requiring this machine
for k in ops:
# Loop over simulation time
for t in range(T):
# When i != k (when scheduling two different operations)
if i != k:
# t = s meaning two operations are scheduled to start at the same time on the same machine
terms.append(Term(c=weight*1, indices=[i*T+t, k*T+t]))
# Add penalty when operation runtimes overlap
for s in range(t, min(t + processing_time[i], T)):
terms.append(Term(c=weight*1, indices=[i*T+t, k*T+s]))
# If operations are in the same job, penalize for the extra time 0 -> t (operations scheduled out of order)
if ops_jobs_map[i] == ops_jobs_map[k]:
for s in range(0, t):
if i < k:
terms.append(Term(c=weight*1, indices=[i*T+t, k*T+s]))
if i > k:
terms.append(Term(c=weight*1, indices=[i*T+s, k*T+t]))
return terms
# -
# ## Minimize the makespan
#
# So far you've learned how to represent constraints of your optimization problem with a penalty model, which allows you to obtain *valid* solutions to your problem from the optimizer. Remember however that your end goal is to obtain an *optimal* (or close to optimal) solution. In this case, you're looking for the schedule with the fastest completion time of all jobs.
#
# The makespan $M$ is defined as the total time required to run all jobs, or alternatively the finishing time of the last job, which is what you want to minimize. To this end, you need to add a fourth component to the cost function that adds larger penalties for solutions with larger makespans:
#
# $$ H(x) = \alpha \cdot f(x) + \beta \cdot g(x) + \gamma \cdot h(x) + \mathbf{\delta \cdot k(x)} $$
#
# Let's come up with terms that increase the value of the cost function the further out the last job is completed. Remember that the completion time of a job depends solely on the completion time of its final operation. However, since you have no way of knowing in advance what the last job will be, or at which time the last operation will finish, you'll need to include a term for each operation and time step. These terms need to scale with the time parameter $t$, and consider the operation processing time, in order to penalize large makespans over smaller ones.
#
# Some care is required in determining the penalty values, or *coefficients*, of these terms. Recall that you are given a set of operations $\{O_i\}$, which each take processing time $p_i$ to complete. An operation scheduled at time $t$ will then *complete* at time $t + p_i$. Let's define the coefficient $w_t$ as the penalty applied to the cost function for an operation to finish at time $t$. As operations can be scheduled in parallel, you don't know how many might complete at any given time, but you do know that this number is at most equal to the number of available machines $m$. The sum of all penalty values for operations completed at time $t$ are thus in the range $[0, ~m \cdot w_t]$. You want to avoid situations were completing a single operation at time $t+1$ is less expensive than m operations at time $t$. Thus, the penalty values cannot follow a simple linear function of time.
#
# Precisely, you want your coefficients to satisfy:
# $$ w_{t+1} > m \cdot w_{t} $$
#
# For a suitable parameter $\epsilon > 0$, you can then solve the following recurrence relation:
# $$ w_{t+1} = m \cdot w_{t}+\epsilon $$
#
# The simplest solution is given by the function:
# $$ w_{t} = \epsilon \cdot \frac{m^t-1}{m-1} $$
#
# ### Limiting the number of terms
#
# Great! You now have a formula for the coefficients of the makespan penalty terms that increase with time while taking into account that operations can be scheduled in parallel. Before implementing the new terms, let's try to limit the amount of new terms you're adding as much as possible. To illustrate, recall the job shop example you've been working on:
#
# $$
# \begin{align}
# J_{0} &= \{O_{0}, O_{1}, O_{2}\} \\
# J_{1} &= \{O_{3}, O_{4}, O_{5}\} \\
# J_{2} &= \{O_{6}, O_{7}, O_{8}, O_{9}\} \\
# \end{align}
# $$
#
#
# First, consider that you only need the last operation in every job, as the precedence constraint guarantees that all other operations are completed before it. Given $n$ jobs, you thus consider only the operations $\{O_{k_0-1}, O_{k_1-1}, \dots, O_{k_{n-1}-1}\}$, where the indices $k_j$ denotes the number of operations up to and including job $j$. In this example, you only add terms for the following operations:
#
# $$ \{O_2, O_5, O_9\} $$
#
# $$ \text{with } k_0 = 3, k_1 = 6, k_2 = 10 $$
#
# Next, you can find a lower bound for the makespan and only penalize makespans that are greater than this minimum. A simple lower bound is given by the longest job, as each operation within a job must execute sequentially. You can express this lower bound as follows:
#
# $$ M_{lb} = \max\limits_{0 \leq j \lt n} \{ \sum_{i = k_j}^{k_{j+1}-1} p_i \} \leq M $$
#
# For the processing times given in this example, you get:
#
# $$
# \begin{align}
# J_{0} &: ~~ p_0 + p_1 + p_2 = 2 + 1 + 3 = 6 \\
# J_{1} &: ~~ p_3 + p_4 + p_5 = 2 + 2 + 3 = 7 \\
# J_{2} &: ~~ p_6 + p_7 + p_8 + p_9 = 1 + 2 + 3 + 2 = 8 \\
# \\
# &\Rightarrow M_{lb} = 8
# \end{align}
# $$
#
# Finally, the makespan is upper-bounded by the sequential execution time of all jobs, 6 + 7 + 8 = 21 in this case. The simulation time T should never exceed this upper bound. Regardless of whether this is the case or not, you need to include penalties for all time steps up to T, or else larger time steps without a penalty will be favored over smaller ones!
#
# To summarize:
#
# - Makespan penalty terms are only added for the last operation in every job $\{O_{k_0-1}, O_{k_1-1}, \dots, O_{k_{n-1}-1}\}$
# - The makespan is lower-bounded by the longest job $\Rightarrow$ only include terms for time steps $M_{lb} < t < T$
#
#
# ### Implementing the penalty terms
#
# You are now ready to add the makespan terms to the cost function. Recall that all terms contain a coefficient and one (or multiple) binary decision variables $x_{i,t}$. Contrary to the coefficients $w_t$ defined above, where $t$ refers to the completion time of an operation, the variables $x_{i,t}$ determine if an operation $i$ is *scheduled* at time t. To account for this difference, you'll have to shift the variable index by the operation's processing time $p_i$. All makespan terms can then be expressed as follows:
#
# $$ k(x) = \sum_{i \in \{k_0-1, \dots, k_{n-1}-1\}} \left( \sum_{M_{lb} < t < T+p_i} w_t \cdot x_{i, ~t-p_i} \right) $$
#
# Lastly, you need to make a small modification to the coefficient function so that the first value $w_{M_{lb}+1}$ always equals one. With $\epsilon = 1$ and $t_0 = M_{lb}$ you get:
#
# $$ w_{t} = \frac{m^{t-t_0}-1}{m-1} $$
#
# ### Code
# The code below implements the ideas discussed above by generating the necessary `Term` objects required by the solver.
# +
"""
# Reminder of the relevant parameters
## Allowed time (jobs can only be scheduled below this limit)
T = 21
## Processing time for each operation
processing_time = {0: 2, 1: 1, 2: 3, 3: 2, 4: 2, 5: 3, 6: 1, 7: 2, 8: 3, 9: 2}
## Assignment of operations to jobs (job ID: [operation IDs])
jobs_ops_map = {
0: [0, 1, 2], # Pay electricity bill
1: [3, 4, 5], # Plan camping trip
2: [6, 7, 8, 9] # Book dentist appointment
}
"""
def calc_penalty(t:int, m_count:int, t0:int):
assert m_count > 1 # Ensure you don't divide by 0
return (m_count**(t - t0) - 1)/float(m_count - 1)
def makespan_objective(T:int, processing_time:dict, jobs_ops_map:dict, m_count:int, weight:float):
"""
Construct makespan minimization terms.
Keyword arguments:
T (int): Allowed time (jobs can only be scheduled below this limit)
processing_time (dict): Operation processing times
jobs_ops_map (dict): Map of jobs to operations {job: [operations]}
m_count (int): Number of machines
weight (float): Relative importance of this constraint
"""
terms = []
lower_bound = max([sum([processing_time[i] for i in job]) for job in jobs_ops_map.values()])
upper_bound = T
# Loop through the final operation of each job
for job in jobs_ops_map.values():
i = job[-1]
# Loop through each time step the operation could be completion at
for t in range(lower_bound + 1, T + processing_time[i]):
terms.append(Term(c=weight*(calc_penalty(t, m_count, lower_bound)), indices=[i*T + (t - processing_time[i])]))
return terms
# -
# ## Putting it all together
#
# As a reminder, here are the penalty terms:
#
# | Constraint | Penalty condition |
# |---|---|
# |**Precedence constraint**<br>Operations in a job must take place in order.|Assign penalty every time $O_{i+1}$ starts before $O_{i}$ has finished (they start out of order).|
# |**Operation-once constraint**<br>Each operation is started once and only once.|Assign penalty if an operation isn't scheduled within the allowed time.<br>**Assumption:** if an operation starts, it runs to completion.|
# |**No-overlap constraint**<br>Machines can only do one thing at a time.|Assign penalty every time two operations on a single machine are scheduled to run at the same time.|
#
# - **Precedence constraint**:
#
# $$f(x) = \sum_{k_{n-1} \leq i < k_n, s < t + p_{i}}x_{i,t}\cdot x_{i+1,s} \text{ for each job } \textit{n}$$
#
# - **Operation-once constraint**:
#
# $$g(x) = \sum_{i} \left(\left(\sum_{0\leq t < T} x_{i,t}\right) - 1\right)^2$$
#
# - **No-overlap constraint**:
#
# $$h(x) = \sum_{i,t,k,s} x_{i,t}\cdot x_{k,s} = 0 \text{ for each machine } \textit{m}$$
#
# - **Makespan minimization**:
#
# $$k(x) = \sum_{i \in \{k_0-1, \dots, k_{n-1}-1\}} \left( \sum_{M_{lb} < t < T+p_i} w_t \cdot x_{i, ~t-p_i} \right)$$
#
# As you saw earlier, combining the penalty functions is straightforward - all you need to do is assign each term a weight and add all the weighted terms together, like so:
#
# $$H(x) = \alpha \cdot f(x) + \beta \cdot g(x) + \gamma \cdot h(x) + \delta \cdot k(x) $$
#
# $$\text{where }\alpha, \beta, \gamma \text{ and } \delta \text{ represent the different weights assigned to the penalties.}$$
#
# The weights represent how important each penalty function is, relative to all the others.
#
# > **NOTE:**
# > Along with modifying your cost function (how you represent the penalties), tuning these weights will define how much success you will have solving your optimization problem. There are many ways to represent each optimization problem's penalty functions and many ways to manipulate their relative weights, so this may require some experimentation before you see success. The end of this sample dives a little deeper into parameter tuning.
# ### Code
#
# As a reminder, below you again see the code representation of the problem parameters: the maximum allowed time `T`, the operation processing times `processing_time`, the mapping of operations to jobs (`jobs_ops_map` and `ops_jobs_map`), the assignment of operations to machines (`machines_ops_map`), and the helper function `process_config`.
#
# +
def process_config(jobs_ops_map:dict, machines_ops_map:dict, processing_time:dict, T:int):
"""
Process & validate problem parameters (config) and generate inverse dict of operations to jobs.
Keyword arguments:
jobs_ops_map (dict): Map of jobs to operations {job: [operations]}
machines_ops_map(dict): Mapping of operations to machines, e.g.:
machines_ops_map = {
0: [0,1], # Operations 0 & 1 assigned to machine 0
1: [2,3] # Operations 2 & 3 assigned to machine 1
}
processing_time (dict): Operation processing times
T (int): Allowed time (jobs can only be scheduled below this limit)
"""
# Problem cannot take longer to complete than all operations executed sequentially
## Sum all operation processing times to calculate the maximum makespan
T = min(sum(processing_time.values()), T)
# Ensure operation assignments to machines are sorted in ascending order
for m, ops in machines_ops_map.items():
machines_ops_map[m] = sorted(ops)
ops_jobs_map = {}
for job, ops in jobs_ops_map.items():
# Fail if operation IDs within a job are out of order
assert (ops == sorted(ops)), f"Operation IDs within a job must be in ascending order. Job was: {job}: {ops}"
for op in ops:
# Fail if there are duplicate operation IDs
assert (op not in ops_jobs_map.keys()), f"Operation IDs must be unique. Duplicate ID was: {op}"
ops_jobs_map[op] = job
return ops_jobs_map, T
# Set problem parameters
## Allowed time (jobs can only be scheduled below this limit)
T = 21
## Processing time for each operation
processing_time = {0: 2, 1: 1, 2: 3, 3: 2, 4: 2, 5: 3, 6: 1, 7: 2, 8: 3, 9: 2}
## Assignment of operations to jobs (job ID: [operation IDs])
### Operation IDs within a job must be in ascending order
jobs_ops_map = {
0: [0, 1, 2],
1: [3, 4, 5],
2: [6, 7, 8, 9]
}
## Assignment of operations to machines
### Three jobs, two machines
machines_ops_map = {
0: [0, 1, 3, 4, 6, 7], # Operations 0, 1, 3, 4, 6 and 7 are assigned to machine 0 (the computer)
1: [2, 5, 8], # Operations 2, 5 and 8 are assigned to machine 1 (the printer)
2: [9] # Operation 9 is assigned to machine 2 (the tooth floss)
}
## Inverse mapping of jobs to operations
ops_jobs_map, T = process_config(jobs_ops_map, machines_ops_map, processing_time, T)
# -
# The following code snippet shows how you assign weight values and assemble the penalty terms by summing the output of the penalty and objective functions, as was demonstrated mathematically earlier in this sample. These terms represent the cost function and they are what you will submit to the solver.
# +
# Generate terms to submit to solver using functions defined previously
## Assign penalty term weights:
alpha = 1 # Precedence constraint
beta = 1 # Operation once constraint
gamma = 1 # No overlap constraint
delta = 0.00000005 # Makespan minimization (objective function)
## Build terms
### Constraints:
c1 = precedence_constraint(jobs_ops_map, T, processing_time, alpha)
c2 = operation_once_constraint(ops_jobs_map, T, beta)
c3 = no_overlap_constraint(T, processing_time, ops_jobs_map, machines_ops_map, gamma)
### Objective function
c4 = makespan_objective(T, processing_time, jobs_ops_map, len(machines_ops_map), delta)
### Combine terms:
terms = []
terms = c1 + c2 + c3 + c4
# -
# > **NOTE**:
# > You can find the full Python script for this sample [here](TODO)
# ## Submit problem to Azure Quantum
#
# This code submits the terms to the Azure Quantum `SimulatedAnnealing` solver. You could also have used the same problem definition with any of the other Azure Quantum Optimization solvers available (for example, `ParallelTempering`). You can find further information on the various solvers available through the Azure Quantum Optimization service [here](TODO).
#
# The job is run synchronously in this instance, however this could also be submitted asynchronously as shown in the next subsection.
# +
from azure.quantum.optimization import Problem, ProblemType
from azure.quantum.optimization import SimulatedAnnealing # Change this line to match the Azure Quantum Optimization solver type you wish to use
# Problem type is PUBO in this instance. You could also have chosen to represent the problem in Ising form.
problem = Problem(name="Job shop sample", problem_type=ProblemType.pubo, terms=terms)
# Provide details of your workspace, created at the beginning of this tutorial
# Provide the name of the solver you wish to use for this problem (as imported above)
solver = SimulatedAnnealing(workspace, timeout = 100) # Timeout in seconds
# Run job synchronously
result = solver.optimize(problem)
config = result['configuration']
print(config)
# -
# ## Run job asynchronously
#
# Alternatively, a job can be run asynchronously, as shown below:
#
# ```python
# # Submit problem to solver
# job = solver.submit(problem)
# print(job.id)
#
# # Get job status
# job.refresh()
# print(job.details.status)
#
# # Get results
# result = job.get_results()
# config = result['configuration']
# print(config)
# ```
# ## Map variables to operations
#
# This code snippet contains several helper functions which are used to parse the results returned from the solver and print them to screen in a user-friendly format.
# +
def create_op_array(config: dict):
"""
Create array from returned config dict.
Keyword arguments:
config (dictionary): config returned from solver
"""
variables = []
for key, val in config.items():
variables.insert(int(key), val)
return variables
def print_problem_details(ops_jobs_map:dict, processing_time:dict, machines_ops_map:dict):
"""
Print problem details e.g. operation runtimes and machine assignments.
Keyword arguments:
ops_jobs_map (dict): Map of operations to jobs {operation: job}
processing_time (dict): Operation processing times
machines_ops_map(dict): Mapping of machines to operations
"""
machines = [None] * len(ops_jobs_map)
for m, ops in machines_ops_map.items():
for op in ops:
machines[op] = m
print(f" Job ID: {list(ops_jobs_map.values())}")
print(f" Operation ID: {list(ops_jobs_map.keys())}")
print(f"Operation runtime: {list(processing_time.values())}")
print(f" Assigned machine: {machines}")
print()
def split_array(T:int, array:List[int]):
"""
Split array into rows representing the rows of our operation matrix.
Keyword arguments:
T (int): Time allowed to complete all operations
array (List[int]): array of x_i,t values generated from config returned by solver
"""
ops = []
i = 0
while i < len(array):
x = array[i:i+T]
ops.append(x)
i = i + T
return ops
def print_matrix(T:int, matrix:List[List[int]]):
"""
Print final output matrix.
Keyword arguments:
T (int): Time allowed to complete all operations
matrix (List[List[int]]): Matrix of x_i,t values
"""
labels = " t:"
for t in range(0, T):
labels += f" {t}"
print(labels)
idx = 0
for row in matrix:
print("x_" + str(idx) + ",t: ", end="")
print(' '.join(map(str,row)))
idx += 1
print()
def extract_start_times(jobs_ops_map:dict, matrix:List[List[int]]):
"""
Extract operation start times & group them into jobs.
Keyword arguments:
jobs_ops_map (dict): Map of jobs to operations {job: [operations]}
matrix (List[List[int]]): Matrix of x_i,t values
"""
#jobs = {}
jobs = [None] * len(jobs_ops_map)
op_start_times = []
for job, ops in jobs_ops_map.items():
x = [None] * len(ops)
for i in range(len(ops)):
try :
x[i] = matrix[ops[i]].index(1)
op_start_times.append(matrix[ops[i]].index(1))
except ValueError:
x[i] = -1
op_start_times.append(-1)
jobs[job] = x
return jobs, op_start_times
# -
# ## Results
#
# Finally, you take the config returned by the solver and read out the results.
# +
# Produce 1D array of x_i,t = 0, 1 representing when each operation starts
op_array = create_op_array(config)
# Print config details:
print(f"Config dict:\n{config}\n")
print(f"Config array:\n{op_array}\n")
# Print problem setup
print_problem_details(ops_jobs_map, processing_time, machines_ops_map)
# Print final operation matrix, using the returned config
print("Operation matrix:")
matrix = split_array(T, op_array)
print_matrix(T, matrix)
# Find where each operation starts (when x_i,t = 1) and return the start time
print("Operation start times (grouped into jobs):")
jobs, op_start_times = extract_start_times(jobs_ops_map, matrix)
print(jobs)
# Calculate makespan (time taken to complete all operations - the objective you are minimizing)
op_end_times = [op_start_times[i] + processing_time[i] for i in range(len(op_start_times))]
makespan = max(op_end_times)
print(f"\nMakespan (time taken to complete all operations): {makespan}")
# -
# For this small problem instance, the solver quickly returned a solution. For bigger, more complex problems you may need to run the job asynchronously, as shown earlier in this sample.
# ## Validate the solution
#
# In this instance, it is possible to visually verify that the solution does not validate any constraints:
# - Operations belonging to the same job happen in order
# - Operations are started once and only once
# - Each machine only has one operation running at a time
#
# In this particular instance, you can also tell that the solver scheduled the repair tasks in such a way that the **total time to complete them all (the makespan) was minimized** - both machines are continuously in operation, with no time gaps between scheduled operations. This is the solution with the lowest possible cost, also known as the global minimum for the cost function. However, you must remember that these solvers are heuristics and are therefore not guaranteed to find the best solution possible, particularly when the problem definition becomes more complex.
#
# Depending on how well the cost function is defined and the weights are tuned, the solver will have varying degrees of success. This reinforces the importance of verifying and evaluating returned solutions, to enable tuning of the problem definition and parameters (such as weights/coefficients) in order to improve solution quality.
#
# For larger or more complex problems, it will not always be possible to verify the solution by eye. It is therefore common practice to implement some code to verify that solutions returned from the optimizer are valid, as well as evaluating how good the solutions are (at least relative to solutions returned previously). This capability is also useful when it comes to tuning weights and penalty functions.
#
# You can perform this validation using the following code snippet, which checks the solution against all three constraints before declaring the solution valid or not. If any of the constraints are violated, the solution will be marked as invalid. An example of an invalid solution has also been included, for comparison.
# +
def check_precedence(processing_time, jobs):
"""
Check if the solution violates the precedence constraint.
Returns True if the constraint is violated.
Keyword arguments:
processing_time (dict): Operation processing times
jobs (List[List[int]]): List of operation start times, grouped into jobs
"""
op_id = 0
for job in jobs:
for i in range(len(job) - 1):
if job[i+1] - job[i] < processing_time[op_id]:
return True
op_id += 1
op_id += 1
return False
def check_operation_once(matrix):
"""
Check if the solution violates the operation once constraint.
Returns True if the constraint is violated.
Keyword arguments:
matrix (List[List[int]]): Matrix of x_i,t values
"""
for x_it_vals in matrix:
if sum(x_it_vals) != 1:
return True
return False
def check_no_overlap(op_start_times:list, machines_ops_map:dict, processing_time:dict):
"""
Check if the solution violates the no overlap constraint.
Returns True if the constraint is violated.
Keyword arguments:
op_start_times (list): Start times for the operations
machines_ops_map(dict): Mapping of machines to operations
processing_time (dict): Operation processing times
"""
pvals = list(processing_time.values())
# For each machine
for ops in machines_ops_map.values():
machine_start_times = [op_start_times[i] for i in ops]
machine_pvals = [pvals[i] for i in ops]
# Two operations start at the same time on the same machine
if len(machine_start_times) != len(set(machine_start_times)):
return True
# There is overlap in the runtimes of two operations assigned to the same machine
machine_start_times, machine_pvals = zip(*sorted(zip(machine_start_times, machine_pvals)))
for i in range(len(machine_pvals) - 1):
if machine_start_times[i] + machine_pvals[i] > machine_start_times[i+1]:
return True
return False
def validate_solution(matrix:dict, machines_ops_map:dict, processing_time:dict, jobs_ops_map:dict):
"""
Check that solution has not violated any constraints.
Returns True if the solution is valid.
Keyword arguments:
matrix (List[List[int]]): Matrix of x_i,t values
machines_ops_map(dict): Mapping of machines to operations
processing_time (dict): Operation processing times
jobs_ops_map (dict): Map of jobs to operations {job: [operations]}
"""
jobs, op_start_times = extract_start_times(jobs_ops_map, matrix)
# Check if constraints are violated
precedence_violated = check_precedence(processing_time, jobs)
operation_once_violated = check_operation_once(matrix)
no_overlap_violated = check_no_overlap(op_start_times, machines_ops_map, processing_time)
if not precedence_violated and not operation_once_violated and not no_overlap_violated:
print("Solution is valid.\n")
else:
print("Solution not valid. Details:")
print(f"\tPrecedence constraint violated: {precedence_violated}")
print(f"\tOperation once constraint violated: {operation_once_violated}")
print(f"\tNo overlap constraint violated: {no_overlap_violated}\n")
print_problem_details(ops_jobs_map, processing_time, machines_ops_map)
print("Azure Quantum solution:")
print_matrix(T, matrix)
print("Operation start times (grouped into jobs):")
print(jobs)
print()
validate_solution(matrix, machines_ops_map, processing_time, jobs_ops_map)
# -
# As you can see, the result returned by the Azure Quantum solver has been confirmed as valid (it does not violate any of the constraints).
# ## Tune parameters
#
# Great! You've learned how to model a cost function, run a solver, and verify the solution of an optimization problem using Azure Quantum. Using your knowledge, you successfully repaired your ship! However, you may have been wondering how exactly the weights that appear in the cost function were chosen. Let's take a look at a general method that can help you balance the different components that make up a cost function.
#
# If you recall, the cost function is made up of four components, one for each constraint and one to minimize the makespan:
#
# $$ H(x) = \alpha \cdot f(x) + \beta \cdot g(x) + \gamma \cdot h(x) + \delta \cdot k(x) $$
#
# The importance attributed to each term can be adjusted using the weights (coefficients) $\alpha, \beta, \gamma, \text{ and } \delta$. The process of adjusting these weights is referred to as *parameter tuning*. In general, there's no absolute rule to determine the optimal value for each weight, and you might have to use some trial and error to figure out what works best for your problem. However, the guidelines below can help you get good starting point.
#
# #### Adjusting the optimization term weight
#
# Intuitively, it should be clear that satisfying the constraints is more important than minimizing the makespan. An invalid solution, even with a very small makespan, would be useless to you. The weights of the cost function can be used to reflect this fact. As a rule of thumb, breaking a single constraint should be around 5-10x more expensive than any valid solution.
#
# Let's start with an upper bound on the value of the cost function for any valid solution. At worst, a valid solution (meaning that $f(x) = g(x) = h(x) = 0$) contributes at most $m \cdot w_{T-1+max(p_i)}$ to the cost function. This is the case when $m$ operations, all taking $max(p_i)$ to complete, are scheduled at the last time step $T-1$. For convenience, let's say that this should result in a cost function value of $1$. You can compute what the value of $\delta$ should be to achieve this value. The code example you've been working with uses the following parameters:
#
# $$ m = 3, ~ T = 21, ~ max(p_i) = 3, ~ M_{lb} = 8, ~ w_t = \frac{m^{t-M_{lb}}}{m-1} $$
#
# First, calculate the latest time an operation could finish. This is given by the max time $T$ (minus one because you are using 0-based indexing), plus the longest processing time for any operation ($max(p_i)$):
# $$t_{max} = T - 1 + max(p_i) = 21 - 1 + 3 = 23$$
#
# Then, calculate $w_{t_{max}}$:
# $$ w_{t_{max}} = \frac{m ^ {t_{max} - M_{lb}}}{m - 1} = \frac{3^{23 - 8}}{3 - 1} = \frac{3^{15}}{2} = 7,174,453.5 $$
#
# The upper bound is then:
#
# $$ m \cdot w_{t_{max}} = 3 \times 7,174,453.5 = 21,523,360.5 $$
#
# To obtain the desired value of $1$, you can approximately set the weight to:
#
# $$ \delta = \frac{1}{m \cdot w_{t_{max}}} = \frac{1}{21,523,360.5} = 0.00000005 $$
#
# #### Adjusting the constraint weights
#
# As mentioned in the previous section, breaking a single constraint should incur a penalty roughly 5-10x higher than that of the worst valid solution. Assuming that breaking one constraint adds a value of $1$ to the cost function, you can set the remaining weights to:
#
# $$ \alpha = \beta = \gamma = 5 $$
#
# Now, you can run a problem instance and use the verifier to check if any constraints are being broken. If all constraints are satisfied, congratulations! You should have obtained a good solution from the optimizer.
#
# If instead one constraint is consistently broken, you probably need to increase its weight compared to the others.
#
# #### Further adjustments
#
# You may also come across situations in which constraints are being broken without a particular preference for which. In this case, make sure the time $T$ given a large enough value. If $T$ is too small, there may not even exist a valid solution, or the solver could be too constrained to feasibly find one.
#
# Optionally, if you're looking for better solutions than the ones obtained so far, you may always try to lower the value of $T$, or increase the importance of the makespan component $\delta$. A tighter bound on the makespan can help the solver find a more optimal solution, as can increasing the weight $\delta$. You may also find that doing so increases the speed at which a solution is found. If any problems pop up with broken constraints, you went too far and need to change the parameters in the other direction again.
# ## Next steps
#
# Now that you understand the problem scenario and how to define the cost function, there are a number of experiments you can perform to deepen your understanding and improve the solution defined above:
#
# - Modify the problem definition:
# - Change the number of jobs, operations, and/or machines
# - Vary the number of operations in each job
# - Change operation runtimes
# - Change machine assignments
# - Add/remove machines
# - Rewrite the penalty functions to improve their efficiency
# - Tune the parameters
# - Try using a different solver (such as `ParallelTempering`)
#
| samples/job-shop-scheduling/job-shop-sample.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Selecting multiple columns in a pandas dataframe
import pandas as pd
train = pd.read_csv('train.csv')
train.head(1)
# #### Method 1
#Select 2 Columns - Name and Age
names = train[['Name','Age']]
names.head()
# #### Method 2
#Extract Categorical Variables
categorical = [var for var in train.columns if train[var].dtype=='O']
#Extract Numerical Vairbles
numerical = [var for var in train.columns if train[var].dtype!='O']
print('Categorical variables - {}'.format(len(categorical)))
print('Numerical - {}'.format(len(numerical)))
categorical
df = train[categorical].head()
numerical
train[numerical].head()
| Python Pandas Smart Tricks/4_Selecting multiple columns in a pandas dataframe 17Oct2019.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ***
# # Observations or Inferences
# ***
# #### 1. By removing duplicates the total number of mice reduced by only one from 249 to 248 mice.
# #### 2. The bar graph showed the Drug Regimen Capomulin has the maximum mice number with a total of 230, followed very closely by the Ramicane with 228 and Propriva has the smaller number with 148 mice.
# #### 3. The data a slight difference in quantity of mice by gender since it showed a total of 123 female mice and 125 male mice.
# #### 4. From the selected treatments Capomulin and Ramicane are more effective reducing the size of tumors.
# #### 5. With treatment Capomulin the correlation between mouse weight, and average tumor volume is 0.84. It is a strong positive correlation, when the mouse weight increases the average tumor volume also increases.
# #### 6. The regression analysis helped to understand how much the average tumor volume (dependent variable) will change when weight of mice change (independent variables) with the Capomulin treatment. The R-squared value is 0.71, which means 71% the model fit the data, wich is fairely good to predict the data from the model. *(If you analyze a physical process and have very good measurements, you might expect R-squared values over 90%).*
# +
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import scipy.stats as st
import numpy as np
# Study data files
mouse_metadata_path = "data/Mouse_metadata.csv"
study_results_path = "data/Study_results.csv"
# Read the mouse data and the study results
mouse_metadata = pd.read_csv(mouse_metadata_path)
study_results = pd.read_csv(study_results_path)
# mouse_metadata
# study_results
# -
# Getting the column names for mouse_metada
mouse_metadata.columns
# Getting the column names for study_results
study_results.columns
# +
# Combine the data into a single dataset
merge_data = pd.merge(mouse_metadata, study_results, on="Mouse ID", how="outer")
# Display the data table for preview
merge_data
# -
# Checking the number of mice.
mice = merge_data['Mouse ID'].nunique()
# mice
pd.DataFrame({'Mouse ID': [mice]})
# Renaming the columns with extra characters
merge_data.rename(columns = {'Weight (g)':'Weight_g', 'Tumor Volume (mm3)':'Tumor_Volume_mm3'}, inplace = True)
merge_data
# Checking unique values
mouse_metadata.nunique()
# merge_data.nunique()
# +
# Getting the duplicate mice by ID number that shows up for Mouse ID and Timepoint.
duplicate_mouse = merge_data.loc[merge_data.duplicated(subset=['Mouse ID', 'Timepoint',]), 'Mouse ID'].unique()
duplicate_mouse
# duplicate_mouse = merge_data[merge_data.duplicated()]
# duplicate_mouse
# +
# Optional: Get all the data for the duplicate mouse ID. - all rows that match the one above
duplicate_mouse_all = merge_data[merge_data.duplicated(['Mouse ID', 'Timepoint'])]
duplicate_mouse_all
# duplicate_mouse = merge_data[merge_data.duplicated('Mouse ID', 'Timepoint')]
# print("Duplicate Rows based on Mouse ID :")
# duplicate_mouse
# +
# See only mouse id g989 duplicate
midg989 = merge_data[merge_data['Mouse ID'] == 'g989']
midg989
# Visualize range where duplicate rows are
#merge_data.loc[905:920]
# +
# Average duplicate mouse ID and Timepoint to get single values to drop others next
# midg989 = midg989.groupby(['Mouse ID', "Timepoint"]).agg({"Tumor_Volume_mm3":['mean']})
# midg989
# -
# Get column # for Tumor_Volume_mm3
merge_data.columns
merge_data.columns.get_loc('Tumor_Volume_mm3')
# +
# Duplicate index 909,911,913,915,917
# Replace index 908,910,912,916,918
# Replace mouse id duplicate g989 values with avg. above only for Timepoint 0,5,10,15,20
# merge_data_avg = merge_data
# merge_data_avg.iloc[908,6] = 45.000000
# # merge_data_avg.iloc[908,6]
# merge_data_avg.iloc[910,6] = 48.178597
# # merge_data_avg.iloc[910,6]
# merge_data_avg.iloc[912,6] = 50.812842
# # merge_data_avg.iloc[912,6]
# merge_data_avg.iloc[914,6] = 52.383936
# # merge_data_avg.iloc[914,6]
# merge_data_avg.iloc[916,6] = 54.991886
# # merge_data_avg.iloc[916,6]
# merge_data_avgb = merge_data_avg.drop([909,911,913,915,917])
# merge_data_avgb.loc[905:920]
# mice_avg = merge_data_avgb['Mouse ID'].nunique()
# mice
# +
# Create a clean DataFrame by dropping the duplicate mouse by its ID. -- dropped the mouse complete
# isin in pandas = Equality test for DataFrame (True=dupl / False=unq)
merge_data_new = merge_data[merge_data['Mouse ID'].isin(duplicate_mouse)==False]
merge_data_new
# -
# See new data for duplicate mouse midg989
midg989_drop = merge_data_new[merge_data_new['Mouse ID'] == 'g989']
midg989_drop
# Checking the number of mice in the clean DataFrame.
clean_mouse=merge_data_new["Mouse ID"].value_counts()
clean_number_mouse=len(clean_mouse)
clean_number_mouse
# ## Summary Statistics
# +
# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen
regimen_mean = merge_data_new.groupby('Drug Regimen').mean()["Tumor_Volume_mm3"]
regimen_mean
regimen_median = merge_data_new.groupby('Drug Regimen').median()["Tumor_Volume_mm3"]
regimen_median
regimen_variance = merge_data_new.groupby('Drug Regimen').var()["Tumor_Volume_mm3"]
regimen_variance
regimen_std = merge_data_new.groupby('Drug Regimen').std()["Tumor_Volume_mm3"]
regimen_std
regimen_sem = merge_data_new.groupby('Drug Regimen').sem()["Tumor_Volume_mm3"]
regimen_sem
# Use groupby and summary statistical methods to calculate the following properties of each drug regimen: mean, median, variance, standard deviation, and SEM of the tumor volume.
# Assemble the resulting series into a single summary dataframe.
# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen
# Using the aggregation method, produce the same summary statistics in a single line
summary_stats_df = pd.DataFrame({"Mean": regimen_mean, "Median":regimen_median, "Variance":regimen_variance, "Standard Deviation": regimen_std, "SEM": regimen_sem})
summary_stats_df
# +
# Use groupby and summary statistical methods to calculate the following properties of each drug regimen: mean, median, variance, standard deviation, and SEM of the tumor volume.
# Assemble the resulting series into a single summary dataframe.
# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen
# Using the aggregation method, produce the same summary statistics in a single line ->This is the method I chose since it does all at once
#I chose this methode and keep it all at once
sts_data_df = merge_data_new
sts_data_df = sts_data_df.groupby(['Drug Regimen']).agg({'Tumor_Volume_mm3' : ['mean', 'median', "var", 'std', 'sem']})
sts_data_df
# Rename columns of agg. sts.
sts_data_df.columns = ['TumorV_mean', 'TumorV_median', 'TumorV_variance', 'TumorV_stdev', 'TumorV_sem']
sts_data_df
# sts_data_df.columns
# -
# ## Bar and Pie Charts
count_mouse_regimen = merge_data_new.groupby(['Drug Regimen']).count()['Mouse ID']
count_mouse_regimen
# +
# Generate a bar plot showing the total number of measurements taken on each drug regimen using pandas.
plot_pandas = count_mouse_regimen.plot.bar(color = 'blue', rot = 45, alpha=0.8, figsize = (15, 10))
count_mouse_regimen
# plot_pandas = count_mouse_regimen.DataFrema.plot(color = 'blue', rot = 45, figsize = (15, 10))
# count_mouse_regimen
plt.title('Number of Mice per Treatment', fontsize = 20)
plt.xlabel('Drug Regimen', fontsize = 14)
plt.ylabel('Number of Mice',fontsize = 14)
count_mouse_regimen
plt.savefig("../Images/mice_per_treatment_df.png", bbox_inches = "tight")
# +
# Generate a pie plot showing the distribution of female versus male mice using pyplo
#https://www.geeksforgeeks.org/how-to-convert-pandas-dataframe-into-a-list/
#https://stackoverflow.com/questions/35523635/extract-values-in-pandas-value-counts
mouse_list =(merge_data_new.groupby(["Drug Regimen"])["Mouse ID"].count()).tolist()
mouse_list
# +
# Generate a pie plot showing the distribution of female versus male mice using pyplot
#plt.xticks(rotation=45) #https://www.kite.com/python/answers/how-to-rotate-axis-labels-in-matplotlib-in-python
#https://matplotlib.org/3.3.3/api/_as_gen/matplotlib.pyplot.subplots.html
#https://matplotlib.org/3.3.3/api/_as_gen/matplotlib.pyplot.xticks.html
#https://stackoverflow.com/questions/34100209/how-can-i-set-the-x-axis-tick-locations-for-a-bar-plot-created-from-a-pandas-dat
x_axis = np.arange(len(count_mouse_regimen))
fig1, ax1 = plt.subplots(figsize=(15, 10))
plt.bar(x_axis, mouse_list, color='blue', alpha=0.8, align='center', width = 0.5)
tick_locations = [value for value in x_axis]
#Manual tick_locations
plt.xticks(tick_locations, ['Capomulin', 'Ceftamin', 'Infubinol', 'Ketapril', 'Naftisol', 'Placebo', 'Propriva', 'Ramicane', 'Stelasyn', 'Zoniferol'], rotation='45')
plt.xlim(-0.75, len(x_axis)-0.25)
plt.ylim(0, max(mouse_list)+10)
plt.title("Number of Mice per Treatment",fontsize = 20)
plt.xlabel("Drug Regimen",fontsize = 14)
plt.ylabel("Number of Mice",fontsize = 14)
count_mouse_regimen
plt.savefig("../Images/mice_per_treatment_plt.png", bbox_inches = "tight")
# +
# Group by gender first
groupby_gender = merge_data_new.groupby(['Mouse ID', 'Sex'])
# groupby_gender
# Create new DataFrame for groupby_gender groupby (tested: nunique, count, size) - size (Return an int representing the number of elements in this object.)
#https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.size.html
gender_df = pd.DataFrame(groupby_gender.size())
# gender_df
# Create a DataFrame from the groupby_gender to ge the total mice by gender
mouse_gender = pd.DataFrame(gender_df.groupby(['Sex']).count())
# mouse_gender
mouse_gender.columns = ['Total Count']
mouse_gender
# HW Question-> just Generate a pie plot showing the distribution of female versus male mice using Pandas's DataFrame.plot()
#https://markhneedham.com/blog/2018/09/18/matplotlib-remove-axis-legend/
colors = ['green', 'blue']
explode = (0.1, 0)
plot = mouse_gender.plot.pie(y='Total Count', figsize=(15,10), colors = colors, startangle=90, explode = explode, shadow = True, autopct="%1.1f%%", fontsize = 16, legend=None)
plt.title('Male vs Female Mouse Population',fontsize = 20)
plot.yaxis.set_label_text("")
plt.savefig('../Images/mouse_gender_df.png')
plot.plot()
# -
mouse_gender.columns = ['Total Count']
mouse_gender
# +
# HW Question-> just Generate a pie plot showing the distribution of female versus male mice using Matplotlib's pyplot
#https://stackoverflow.com/questions/7082345/how-to-set-the-labels-size-on-a-pie-chart-in-python
#Create Labels for the sections of the pie
labels = ["Female","Male"]
# labels
#List the values of each section of the pie chart
sizes = [49.6, 50.4]
# sizes
colors = ['green', 'blue']
explode = (0.1, 0)
fig1, ax1 = plt.subplots(figsize=(15, 10))
plt.pie(sizes, explode=explode,labels=labels, colors=colors, autopct="%1.1f%%", textprops={'fontsize': 16}, shadow=True, startangle=90)
plt.title('Male vs Female Mouse Population',fontsize = 20)
plt.savefig('../Images/mouse_gender_plt.png')
plt.show()
# -
# ## Quartiles, Outliers and Boxplots
# +
# Calculate the final tumor volume of each mouse across four of the treatment regimens:
# Capomulin, Ramicane, Infubinol, and Ceftamin
Capomulin_df = merge_data_new.loc[merge_data_new["Drug Regimen"] == "Capomulin",:]
Capomulin_df
Ramicane_df = merge_data_new.loc[merge_data_new["Drug Regimen"] == "Ramicane", :]
Ramicane_df
Infubinol_df = merge_data_new.loc[merge_data_new["Drug Regimen"] == "Infubinol", :]
Infubinol_df
Ceftamin_df = merge_data_new.loc[ merge_data_new["Drug Regimen"] == "Ceftamin", :]
Ceftamin_df
# +
# Start by getting the last (greatest) timepoint for each mouse-> used .max function
#03-03-Stu_Summary_Stats_Python/03-02-Ins_Quartiles_and_Outliers
# Calculate the IQR and quantitatively determine if there are any potential outliers.
# Locate the rows which contain mice on each drug and get the tumor volumes
# add subset
# Determine outliers using upper and lower bounds
# 1-'Capomulin', 2-'Ceftamin', 3-'Infubinol', 4-'Ramicane'
# 1-Capomulin
Capomulin_last = Capomulin_df.groupby('Mouse ID').max()['Timepoint']
# Capomulin_last
Capomulin_vol = pd.DataFrame(Capomulin_last)
# Capomulin_vol
Capomulin_merge = pd.merge(Capomulin_vol, merge_data_new, on=("Mouse ID","Timepoint"),how="left")
# Capomulin_merge.head()
# Capomulin quartiles and IQR
# Determine outliers using upper and lower bounds
Capomulin_tumors = Capomulin_merge["Tumor_Volume_mm3"]
# Capomulin_tumors
quartiles =Capomulin_tumors.quantile([.25,.5,.75])
# quartiles
lowerq = quartiles[0.25]
# lowerq
upperq = quartiles[0.75]
# upperq
iqr = upperq-lowerq
# iqr
print(f"The lower quartile of Capomulin tumors: {lowerq}")
print(f"The upper quartile of Capomulin tumors: {upperq}")
print(f"The interquartile range of Capomulin tumors: {iqr}")
print(f"The median of Capomulin tumors: {quartiles[0.5]} ")
lower_bound= lowerq - (1.5*iqr)
# lower_bound
upper_bound = upperq + (1.5*iqr)
# upper_bound
print(f"Values below {lower_bound} could be outliers.")
print(f"Values above {upper_bound} could be outliers.")
# +
# Start by getting the last (greatest) timepoint for each mouse-> used .max function
#03-03-Stu_Summary_Stats_Python/03-02-Ins_Quartiles_and_Outliers
# Calculate the IQR and quantitatively determine if there are any potential outliers.
# Locate the rows which contain mice on each drug and get the tumor volumes
# add subset
# Determine outliers using upper and lower bounds
# 2-Ceftamin
Ceftamin_last = Ceftamin_df.groupby('Mouse ID').max()['Timepoint']
# Ceftamin_last
Ceftamin_vol = pd.DataFrame(Ceftamin_last)
# Ceftamin_vol
Ceftamin_merge = pd.merge(Ceftamin_vol, merge_data_new, on=("Mouse ID","Timepoint"),how="left")
# Ceftamin_merge.head()
Ceftamin_tumors = Ceftamin_merge["Tumor_Volume_mm3"]
# Ceftamin_tumors
quartiles = Ceftamin_tumors.quantile([.25,.5,.75])
# quartiles
lowerq = quartiles[0.25]
# lowerq
upperq = quartiles[0.75]
# upperq
iqr = upperq-lowerq
# iqr
print(f"The lower quartile of Ceftamin tumors: {lowerq}")
print(f"The upper quartile of Ceftamin tumors: {upperq}")
print(f"The interquartile range of Ceftamin tumors: {iqr}")
print(f"The median of Ceftamin tumors: {quartiles[0.5]} ")
lower_bound = lowerq - (1.5*iqr)
# lower_bound
upper_bound = upperq + (1.5*iqr)
# upper_bound
print(f"Values below {lower_bound} could be outliers.")
print(f"Values above {upper_bound} could be outliers.")
# +
# Start by getting the last (greatest) timepoint for each mouse-> used .max function
#03-03-Stu_Summary_Stats_Python/03-02-Ins_Quartiles_and_Outliers
# Calculate the IQR and quantitatively determine if there are any potential outliers.
# Locate the rows which contain mice on each drug and get the tumor volumes
# add subset
# Determine outliers using upper and lower bounds
# 3-Infubinol
Infubinol_last = Infubinol_df.groupby('Mouse ID').max()['Timepoint']
Infubinol_vol = pd.DataFrame(Infubinol_last)
Infubinol_merge = pd.merge(Infubinol_vol, merge_data_new, on=("Mouse ID","Timepoint"),how="left")
# Infubinol_merge.head()
Infubinol_tumors = Infubinol_merge["Tumor_Volume_mm3"]
# Infubinol_tumors
quartiles =Infubinol_tumors.quantile([.25,.5,.75])
# quartiles
lowerq = quartiles[0.25]
# lowerq
upperq = quartiles[0.75]
# upperq
iqr = upperq-lowerq
# iqr
print(f"The lower quartile of Infubinol tumors is: {lowerq}")
print(f"The upper quartile of Infubinol tumors is: {upperq}")
print(f"The interquartile range of Infubinol tumors is: {iqr}")
print(f"The median of Infubinol tumors is: {quartiles[0.5]} ")
lower_bound = lowerq - (1.5*iqr)
# lower_bound
upper_bound = upperq + (1.5*iqr)
# upper_bound
print(f"Values below {lower_bound} could be outliers.")
print(f"Values above {upper_bound} could be outliers.")
# +
# Start by getting the last (greatest) timepoint for each mouse-> used .max function
#03-03-Stu_Summary_Stats_Python/03-02-Ins_Quartiles_and_Outliers
# Calculate the IQR and quantitatively determine if there are any potential outliers.
# Locate the rows which contain mice on each drug and get the tumor volumes
# add subset
# Determine outliers using upper and lower bounds
# 4-Ramicane
Ramicane_last = Ramicane_df.groupby('Mouse ID').max()['Timepoint']
Ramicane_vol = pd.DataFrame(Ramicane_last)
Ramicane_merge = pd.merge(Ramicane_vol, merge_data_new, on=("Mouse ID","Timepoint"),how="left")
# Ramicane_merge.head()
Ramicane_tumors = Ramicane_merge["Tumor_Volume_mm3"]
# Ramicane_tumors
quartiles =Ramicane_tumors.quantile([.25,.5,.75])
# quartiles
lowerq = quartiles[0.25]
# lowerq
upperq = quartiles[0.75]
# upperq
iqr = upperq-lowerq
# iqr
print(f"The lower quartile of Ramicane tumors is: {lowerq}")
print(f"The upper quartile of Ramicane tumors is: {upperq}")
print(f"The interquartile range of Ramicane tumors is: {iqr}")
print(f"The median of Ramicane tumors is: {quartiles[0.5]} ")
lower_bound = lowerq - (1.5*iqr)
# lower_bound
upper_bound = upperq + (1.5*iqr)
# upper_bound
print(f"Values below {lower_bound} could be outliers.")
print(f"Values above {upper_bound} could be outliers.")
# +
# Merge this group df with the original dataframe to get the tumor volume at the last timepoint
# Put treatments into a list for for loop (and later for plot labels) -> couldn't make it work, made it manual sorry :(
# Create empty list to fill with tumor vol data (for plotting)
treatments_to_plot = [Capomulin_tumors, Ramicane_tumors, Infubinol_tumors, Ceftamin_tumors]
treatments_to_plot
Regimen= ['Capomulin', 'Ramicane', 'Infubinol','Ceftamin']
Regimen
# +
# Generate a box plot of the final tumor volume of each mouse across four regimens of interest
#https://stackoverflow.com/questions/41997493/python-matplotlib-boxplot-color
# def box_plot(treatments_to_plot, edge_color, fill_color):
# bp = ax.boxplot(treatments_to_plot, patch_artist=True)
# for element in ['boxes', 'whiskers', 'fliers', 'means', 'medians', 'caps']:
# plt.setp(bp[element], color=edge_color)
# for patch in bp['boxes']:
# patch.set(facecolor=fill_color)
# fig, ax = plt.subplots()
# box_plot(treatments_to_plot, 'blue', 'cyan')
#------
#https://matplotlib.org/3.3.3/gallery/pyplots/boxplot_demo_pyplot.html#sphx-glr-gallery-pyplots-boxplot-demo-pyplot-py
# green_diamond = dict(markerfacecolor='g', marker='D')
# fig1, ax1 = plt.subplots()
# ax1.set_title('Tumor Volume at Selected Mouse')
# ax1.boxplot(treatments_to_plot, flierprops=green_diamond)
#---------
yellow_square = dict(markerfacecolor='y', marker='s')
fig1, ax1 = plt.subplots(figsize=(15, 10))
ax1.boxplot(treatments_to_plot, labels=Regimen, widths = 0.5,vert=True, flierprops=yellow_square) # patch_artist=True: function to fulfill the box
plt.rc('xtick', labelsize=16)
plt.rc('ytick', labelsize=16)
ax1.set_title('Tumor Volume at Max.',fontsize =25)
ax1.set_ylabel('Final Tumor Volume (mm3)',fontsize = 14)
ax1.set_xlabel('Drug Regimen',fontsize = 14)
# plt.show()
# Add horizontal line
reference = [45]
left, right = plt.xlim()
plt.hlines(reference, xmin=left, xmax=right, color='y', linestyle= '--')
plt.text(3.1, 36, 'Outlier', fontsize= 14)
plt.savefig('../Images/Tumor Volume at Max.png', bbox_inches = "tight")
plt.show()
# -
# ## Line and Scatter Plots
# +
# Generate a line plot of tumor volume vs. time point for a mouse treated with Capomulin
# Get all list of mice
merge_data_new['Mouse ID'].unique().tolist()
# Choose 1 mouse
Capomulin_last = Capomulin_df.groupby('Mouse ID').max()['Timepoint']
Capomulin_last
forline_df = Capomulin_df.loc[Capomulin_df["Mouse ID"] == "b128",:]
forline_df
# +
#https://matplotlib.org/3.1.0/api/markers_api.html
#https://matplotlib.org/3.3.3/gallery/pyplots/boxplot_demo_pyplot.html#sphx-glr-gallery-pyplots-boxplot-demo-pyplot-py
# Generate a line plot of tumor volume vs. time point for a mouse treated with Capomulin
x_axis = forline_df["Timepoint"]
tumor_vol = forline_df["Tumor_Volume_mm3"]
fig1, ax1 = plt.subplots(figsize=(15, 10))
ax1.grid(True)
plt.title('Capomulin treatmeant of mouse b128',fontsize =25)
plt.plot(x_axis, tumor_vol, linewidth= 2, markersize= 15, marker= "h", color="blue")
plt.xlabel('Timepoint (Days)',fontsize= 16)
plt.ylabel('Tumor Volume (mm3)',fontsize= 16)
plt.savefig("../Images/capomulin_b128_line.png", bbox_inches = "tight")
plt.show()
# +
# Generate a scatter plot of average tumor volume vs. mouse weight for the Capomulin regimen
fig1, ax1 = plt.subplots(figsize=(15, 10))
avg_capomulin_vol= Capomulin_df.groupby(['Mouse ID']).mean()
ax1.grid(True)
marker_size=15
plt.scatter(avg_capomulin_vol['Weight_g'],avg_capomulin_vol['Tumor_Volume_mm3'], marker= "h", s=175, color="blue")
plt.title('Mouse Weight Vs. Average Tumor Volume',fontsize =20)
plt.xlabel('Weight (g)',fontsize =16)
plt.ylabel('Averag Tumor Volume (mm3)',fontsize =16)
# plt.savefig("../Images/scatterplot.png", bbox_inches = "tight")
plt.savefig("../Images/mouse_wgt_avg_tun_vol.png", bbox_inches = "tight")
plt.show()
# -
# ## Correlation and Regression
# +
# Calculate the correlation coefficient and linear regression model
# for mouse weight and average tumor volume for the Capomulin regimen
#03-06-Ins_Correlation_Conundrum / Pearson's Model
corr=round(st.pearsonr(avg_capomulin_vol['Weight_g'],avg_capomulin_vol['Tumor_Volume_mm3'])[0],2)
print(f"The correlation between mouse weight and average tumor volume is {corr}")
# +
#Method one
#0-09-Stu_Fits_and_Regression
x_values = avg_capomulin_vol['Weight_g']
y_values = avg_capomulin_vol['Tumor_Volume_mm3']
(slope, intercept, rvalue, pvalue, stderr) = st.linregress(x_values, y_values)
regress_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
print(f"slope:{slope}")
print(f"intercept:{intercept}")
print(f"rvalue (Correlation coefficient):{rvalue}")
print(f"pandas (Correlation coefficient):{corr}")
print(f"stderr:{stderr}")
# +
#Method one
#https://stackoverflow.com/questions/332289/how-do-you-change-the-size-of-figures-drawn-with-matplotlib-> method two
#03-09-Stu_Fits_and_Regression
fig1, ax1 = plt.subplots(figsize=(15, 10))
# plt.figure(figsize=(15, 10))
ax1 = avg_capomulin_vol['Weight_g']*slope+intercept
plt.scatter(avg_capomulin_vol['Weight_g'],avg_capomulin_vol['Tumor_Volume_mm3'], marker= "h", s=175, color="blue")
plt.plot(avg_capomulin_vol['Weight_g'],ax1,color="red")
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.annotate(line_eq,(22, 40),fontsize=20,color="red")
print(f"The correlation between mouse weight and average tumor volume is {corr}")
print(f"The r-squared is: {rvalue**2}")
plt.title('Regression Plot Mouse Weight Vs Avg.Tumor Vol.',fontsize =22)
plt.xlabel('Weight (g)', fontsize= 16)
plt.ylabel('Average Tumor Volume (mm3)', fontsize =16)
plt.grid()
plt.savefig("../Images/linear_regression_model_02", bbox_inches = "tight")
plt.show()
# +
# #Method two -> exploring different ways
# #https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.linregress.html
# model=st.linregress(avg_capomulin_vol['Weight_g'],avg_capomulin_vol['Tumor_Volume_mm3'])
# model
# -
# #Method two
# # avg_capomulin_vol['Weight_g'] - Method two
# cslope=0.9544396890241045
# cintercept=21.552160532685015
# +
# #Method two -> exploring different ways
# #https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.linregress.html
# #https://stackoverflow.com/questions/332289/how-do-you-change-the-size-of-figures-drawn-with-matplotlib-method one
# fig1, ax1 = plt.subplots(figsize=(15, 10))
# # plt.figure(figsize=(15, 10))
# ax1 = avg_capomulin_vol['Weight_g']*cslope+cintercept
# plt.scatter(avg_capomulin_vol['Weight_g'],avg_capomulin_vol['Tumor_Volume_mm3'], marker= "h", s=175, color="blue")
# plt.plot(avg_capomulin_vol['Weight_g'],ax1,color="red")
# line_eq = "y = " + str(round(cslope,2)) + "x + " + str(round(cintercept,2))
# plt.annotate(line_eq,(22, 40),fontsize=20,color="red")
# print(f"The correlation between mouse weight and average tumor volume is {corr}")
# print(f"The r-squared is: {rvalue**2}")
# plt.title('Regression Plot Mouse Weight Vs Avg.Tumor Vol.',fontsize =22)
# plt.xlabel('Weight (g)', fontsize= 16)
# plt.ylabel('Average Tumor Volume (mm3)', fontsize =16)
# plt.grid()
# plt.savefig("../Images/linear_regression_model_01", bbox_inches = "tight")
# plt.show()
# -
| matplotlib_challenge/Homework-matplotlib/Pymaceuticals/pymaceuticals_starter-Copy1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import math as math
import matplotlib.pylab as plt
from mpl_toolkits.mplot3d import Axes3D
from numpy import sin,cos,pi,exp,sqrt
from mpl_toolkits.mplot3d.art3d import Poly3DCollection, Line3DCollection
from ipywidgets import interact, interactive, fixed, interact_manual, FloatSlider
import ipywidgets as widgets
from IPython.display import display, Latex, Markdown
#this is the library that contains all the rigid body tranforms functions
import transforms as tm
import robot_kinematics as rk
import allfunc as fn
# +
# q1
t0a = rk.translate([0., 2., 2.])
tab = tm.hom(tm.Rx(np.pi/2+0.73) , [0., 0., 0.])
# tbc = rk.translate([0., 0., -.45])
tb1 = rk.translate([0., 2., 0.])
t01 = t0a@tab@tb1
print(np.array_repr(t01))
# +
# q2
theta = np.array([[0.18, 0.06, 0.50]])
t0a = rk.translate([0., -2., 0.])
tab = tm.hom(tm.Rx(-0.36) , [0., 0., 0.])
tbc = rk.translate([0., 0., 2.])
tcd = tm.hom(tm.Rx(-0.7), [0., 0., 0.])
tde = rk.translate([0., -2., 2.])
tef = tm.hom(tm.Rz(-0.37+np.pi), [0., 0., 0.])
tf1 = rk.translate([0., 0., 2.])
t01 = t0a@tab@tbc@tcd@tde@tef@tf1
print(np.array_repr(t01))
# +
# q3
j = np.array([[0], [0], [0], [0], [1], [0]])
print(np.array_repr(j))
# +
# q4
j = np.array([[0], [0], [0], [0], [1], [0]])
dtheta = np.array([-0.1])
print(np.array_repr(j*dtheta))
# +
# q5
import numpy as np
theta = np.array([[-0.40000000], [-0.72000000], [-0.53000000], [-0.14000000], [0.23000000], [0.16000000], [0.10000000], [-0.04000000]])
dtheta = np.array([[-0.30000000], [0.99000000], [0.42000000], [-0.30000000], [0.44000000], [0.75000000], [-0.50000000], [-0.35000000]])
jacobian = np.array([[0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000, 1.00000000, 0.00000000, 0.99500417], [0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000, -0.98722728, -0.01590528], [0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000, -0.15931821, 0.09855827], [0.00000000, -1.00000000, 0.00000000, 1.00000000, 1.00000000, 0.00000000, 0.28084855, -0.58930916], [0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.93000000, 0.44768416, -0.93681766], [-1.00000000, 0.00000000, -1.00000000, 0.00000000, 0.00000000, -4.00000000, -2.77410867, 5.79824209]])
T_1in0 = np.array([[0.09975356, 0.99500417, -0.00399227, 4.80000833], [0.11891692, -0.01590528, -0.99277681, -5.90408340], [-0.98788056, 0.09855827, -0.11990944, -0.46606552], [0.00000000, 0.00000000, 0.00000000, 1.00000000]])
print(np.array_repr(tm.adjoint(np.linalg.inv(T_1in0))@jacobian@dtheta))
# -
| RajPatel/HW4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# default_exp engineering.nbdev
# %reload_ext autoreload
# %autoreload 2
# -
# # ๆๆกฃ็นๅพๆๅ
# ๆๆกฃ็นๅพๆๅ็็ฎ็ๆฏ็จ็นๅพๆฅ่กจ็คบๆๆกฃใ
#
# ๅธธ็จ็ๆๆกฃ็นๅพๆๅๆนๆณๆ:
# ## bag-of-words model
# This model transforms each document to a fixed-length vector of integers. For example, given the sentences:
#
# John likes to watch movies. Mary likes movies too.
#
# John also likes to watch football games. Mary hates football.
#
# The model outputs the vectors:
#
# [1, 2, 1, 1, 2, 1, 1, 0, 0, 0, 0]
#
# [1, 1, 1, 1, 0, 1, 0, 1, 2, 1, 1]
#
# Bag-of-words models are surprisingly effective, but have several weaknesses.
#
# ้ฆๅ
๏ผไปไปฌไผไธขๅคฑๆๆๆๅ
ณๅ่ฏ้กบๅบ็ไฟกๆฏ๏ผโ็บฆ็ฟฐๅๆฌข็ไธฝโๅโ็ไธฝๅๆฌข็บฆ็ฟฐโๅฏนๅบไบ็ธๅ็ๅ้ใ There is a solution: bag of n-grams models consider word phrases of length n to represent documents as fixed-length vectors to capture local word order but suffer from data sparsity and high dimensionality.
#
# ๅ
ถๆฌก๏ผ่ฏฅๆจกๅไธไผๅฐ่ฏๅญฆไน ๅบ็กๅ่ฏ็ๅซไน๏ผๅ ๆญค๏ผๅ้ไน้ด็่ท็ฆปๅนถไธๆปๆฏๅๆ ๅบๅซไนไธ็ๅทฎๅผใ
# ## TF-IDF
# TF-IDFๆฏๅจ่ฏ่ขๅบ็กไธ็ๆน่ฟ๏ผๅฏนๅบ็ฐ้ข็ๆฏ่พ้ซ็่ฏ่ฟ่ก้ๆ(้คไปฅIDF)
#
# TF-IDF๏ผterm frequencyโinverse document frequency๏ผๆฏไธ็ง็ป่ฎกๆนๆณ๏ผ็จไปฅ่ฏไผฐไธๅญ่ฏๅฏนไบไธไธชๆไปถ้ๆไธไธช่ฏญๆๅบไธญ็ๅ
ถไธญไธไปฝๆไปถ็้่ฆ็จๅบฆใ
#
# ๅญ่ฏ็้่ฆๆง้็ๅฎๅจๆไปถไธญๅบ็ฐ็ๆฌกๆฐๆๆญฃๆฏๅขๅ ๏ผไฝๅๆถไผ้็ๅฎๅจ่ฏญๆๅบไธญๅบ็ฐ็้ข็ๆๅๆฏไธ้ใ
#
# $$TFIDF=TF*IDF$$
# ### ่ฏ้ข (term frequency, TF)
#
# ๆ็ๆฏๆไธไธช็ปๅฎ็่ฏ่ฏญๅจ่ฏฅๆไปถไธญๅบ็ฐ็ๆฌกๆฐใ่ฟไธชๆฐๅญ้ๅธธไผ่ขซๅฝไธๅ๏ผๅๅญไธ่ฌๅฐไบๅๆฏ ๅบๅซไบIDF๏ผ๏ผไปฅ้ฒๆญขๅฎๅๅ้ฟ็ๆไปถใ๏ผๅไธไธช่ฏ่ฏญๅจ้ฟๆไปถ้ๅฏ่ฝไผๆฏ็ญๆไปถๆๆด้ซ็่ฏ้ข๏ผ่ไธ็ฎก่ฏฅ่ฏ่ฏญ้่ฆไธๅฆใ๏ผ
# ่ฏ้ข๏ผTF๏ผ=ๆไธช่ฏๅจๆ็ซ ไธญ็ๅบ็ฐๆฌกๆฐ
# ่่ๅฐๆ็ซ ๆ้ฟ็ญไนๅ๏ผไธบไบไพฟไบไธๅๆ็ซ ็ๆฏ่พ๏ผ่ฟ่กโ่ฏ้ขโๆ ๅๅใ
# $$TF=\frac{ๆไธช่ฏๅจๆ็ซ ไธญ็ๅบ็ฐๆฌกๆฐ}{ๆ็ซ ็ๆป่ฏๆฐ}$$
# ๆ่
# $$TF=\frac{ๆไธช่ฏๅจๆ็ซ ไธญ็ๅบ็ฐๆฌกๆฐ}{่ฏฅ่ฎญ็ปๆๆฌไธญๅบ็ฐๆๅคๆฌก็่ฏๆฐ}$$
# ### ้ๅๆไปถ้ข็ (inverse document frequency, IDF)
#
# ๆฏไธไธช่ฏ่ฏญๆฎ้้่ฆๆง็ๅบฆ้ใๆไธ็นๅฎ่ฏ่ฏญ็IDF๏ผๅฏไปฅ็ฑๆปๆไปถๆฐ็ฎ้คไปฅๅ
ๅซ่ฏฅ่ฏ่ฏญไนๆไปถ็ๆฐ็ฎ๏ผๅๅฐๅพๅฐ็ๅๅๅฏนๆฐๅพๅฐใ
# $$IDF=log(\frac{ๆปๆๆกฃๆฐ}{ๅ
ๅซ่ฏฅ่ฏ็ๆๆกฃๆฐ+1})$$
# ๅฆๆไธไธช่ฏ่ถๅธธ่ง๏ผ้ฃไนๅๆฏๅฐฑ่ถๅคง๏ผ้ๆๆกฃ้ข็ๅฐฑ่ถๅฐ่ถๆฅ่ฟ0ใๅๆฏไนๆไปฅ่ฆๅ 1๏ผๆฏไธบไบ้ฟๅ
ๅๆฏไธบ0๏ผๅณๆๆๆๆกฃ้ฝไธๅ
ๅซ่ฏฅ่ฏ๏ผใ
# ## Embeding
# ไธ่ฌ็จdocไธญๆๆword็embedding็ๅๅผ๏ผๆฅ่กจ็คบ่ฟไธชdoc็็นๅพ
# # sklearnไธญ็จไบๆๆกฃ็นๅพๅ็็ฑป
#
# CountVectorizerไธTfidfVectorizer๏ผ่ฟไธคไธช็ฑป้ฝๆฏ็นๅพๆฐๅผ่ฎก็ฎ็ๅธธ่งๆนๆณใๅฏนไบๆฏไธไธช่ฎญ็ปๆๆฌ๏ผCountVectorizerๅช่่ๆฏ็ง่ฏๆฑๅจ่ฏฅ่ฎญ็ปๆๆฌไธญๅบ็ฐ็้ข็๏ผ่TfidfVectorizer้คไบ่้ๆไธ่ฏๆฑๅจๅฝๅ่ฎญ็ปๆๆฌไธญๅบ็ฐ็้ข็ไนๅค๏ผๅๆถๅ
ณๆณจๅ
ๅซ่ฟไธช่ฏๆฑ็ๅ
ถๅฎ่ฎญ็ปๆๆฌๆฐ็ฎ็ๅๆฐใ็ธๆฏไนไธ๏ผ่ฎญ็ปๆๆฌ็ๆฐ้่ถๅค๏ผTfidfVectorizer่ฟ็ง็นๅพ้ๅๆนๅผๅฐฑๆดๆไผๅฟใ
# ## CountVectorizer
# ๅฎ็ฐไบ่ฏ่ข
#
# Convert a collection of text documents to a matrix of token counts
#
# This implementation produces a sparse representation of the counts using
# `scipy.sparse.csr_matrix`.
# ### ้ป่ฎคๅๆฐ
# +
CountVectorizer(
input='content',
encoding='utf-8',
decode_error='strict',
strip_accents=None,
lowercase=True,
preprocessor=None,
tokenizer=None,
stop_words=None,
token_pattern='(?u)\\b\\w\\w+\\b',
ngram_range=(1, 1),
analyzer='word',
max_df=1.0,
min_df=1,
max_features=None,
vocabulary=None,
binary=False,
dtype=<class 'numpy.int64'>,
)
# \w: ๅน้
ๅญๆฏใๆฐ็ปใไธๅ็บฟใๆณจๆไธไผๅน้
ๅฐๆ ็น็ฌฆๅท
# -
from sklearn.feature_extraction.text import CountVectorizer
X_test = ['ๆฒกๆ ไฝ ็ ๅฐๆน ้ฝๆฏ ไปไนก ๆฒกๆ ใ . , : ',
'ๆฒกๆ ไฝ ็ ๆ
่ก ้ฝๆฏ ๆตๆตช']
count_vec = CountVectorizer()
# ### count_vec.fit(raw_documents, y=None)
# Learn a vocabulary dictionary of all tokens in the raw documents.
count_vec.fit(X_test)
# ๅฏไปฅ็ๅฐ๏ผ้ป่ฎคๆ
ๅตไธๅชๆไธไธชๅญๆฏ็ 'I' ่ขซ่ฟๆปคๆไบ๏ผ
count_vec.vocabulary_
# ### count_vec.transform(X_test)
# Transform documents to document-term matrix.
count_vec.transform(X_test)
# (index1,index2) countไธญ๏ผ
# * index1่กจ็คบไธบ็ฌฌๅ ไธชๅฅๅญๆ่
ๆๆกฃ๏ผ
# * index2ไธบๆๆ่ฏญๆๅบไธญ็ๅ่ฏ็ปๆ็่ฏๅ
ธ็ๅบๅทใ
# * countไธบๅจ่ฟไธชๆๆกฃไธญ่ฟไธชๅ่ฏๅบ็ฐ็ๆฌกๆฐใ
# * ๆณจๆ๏ผ่ฟๆ ท็ป่ฎกๆถไธขๅคฑไบwordๅจtextไธญ็ไฝ็ฝฎไฟกๆฏ!!!
print(count_vec.transform(X_test))
doc-token็ฉ้ต๏ผ
ๆฏไธ่ก่กจ็คบไธไธชๆๆกฃ๏ผๆฏไธๅ่กจ็คบ็ธๅบ็ผๅท็tokenใๅผไธบtokenๅจdocไธญๅบ็ฐ็้ขๆฐใ
่ฟไธๆญฅๅทฒ็ปๅฐdoc่ฝฌๅๆไบ็ฑ่ฏ้ข่กจ็คบ็็นๅพ
count_vec.transform(X_test).toarray()
# ## TfidfVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
# TfidfVectorizer(
# input='content',
# encoding='utf-8',
# decode_error='strict',
# strip_accents=None,
# lowercase=True,
# preprocessor=None,
# tokenizer=None,
# analyzer='word',
# stop_words=None,
# token_pattern='(?u)\\b\\w\\w+\\b',
# ngram_range=(1, 1),
# max_df=1.0,
# min_df=1,
# max_features=None,
# vocabulary=None,
# binary=False,
# dtype=<class 'numpy.float64'>,
# norm='l2',
# use_idf=True,
# smooth_idf=True,
# sublinear_tf=False,
# )
#
# * norm : 'l1', 'l2' or None, optional (default='l2')
# ๆ่พๅบๅ้็้ฟๅบฆๅฝไธๅ
# Each output row will have unit norm, either:
# * 'l2': Sum of squares of vector elements is 1. The cosine
# similarity between two vectors is their dot product when l2 norm has been applied.
# * 'l1': Sum of absolute values of vector elements is 1.
# See :func:`preprocessing.normalize`
# * token_pattern='(?u)\\b\\w\\w+\\b'
# ๆณจๆ้ป่ฎคๅผไผ่ฟๆปคๆๅไธชๆฑๅญ
tfidf = TfidfVectorizer(token_pattern=r"(?u)\b\w+\b")
tfidf.fit(X_test)
# ### ่ทๅvocab
tfidf.vocabulary_
tfidf.stop_words_
tfidf.get_feature_names()
tfidf.transform(['a b ไฝ ไปไนก']).toarray()
# ### ไธไธช็ป่้ฎ้ข
\wๅน้
็ๆฏๅญๆฏๆฐๅญใไธๅ็บฟ
ๅฏไปฅ็ๅฐ ๅ่ฏไปฅๅๆ 'ๆฒก.ๆ'ๅไธบไธไธช่ฏ๏ผไฝๆฏtoken_pattern=r"(?u)\b\w+\b" ๆฒกๆ'.', ๅฎ้
็ๅค็ๆฏๆ'ๆฒก' 'ๆ'ไฝไธบไธคไธช่ฏๆฅๅค็
X_test = ['ๆฒก.ๆ ไฝ ็ ๅฐๆน ้ฝๆฏ ไปไนก ๆฒกๆ ใ . , : ',
'ๆฒกๆ ไฝ ็ ๆ
่ก ้ฝๆฏ ๆตๆตช']
tfidf = TfidfVectorizer(token_pattern=r"(?u)\b\w+\b")
tfidf.fit(X_test)
tfidf.vocabulary_
# ## ไฝฟ็จGensim่ฟ่กTfIdf
# +
from gensim.models import TfidfModel
from gensim import corpora
from collections import defaultdict
"""
"""
raw_corpus = ["Human machine interface for lab abc computer applications",
"A survey of user opinion of computer system response time",
"The EPS user interface management system",
"System and human system engineering testing of EPS",
"Relation of user perceived response time to error measurement",
"The generation of random binary unordered trees",
"The intersection graph of paths in trees",
"Graph minors IV Widths of trees and well quasi ordering",
"Graph minors A survey"]
# Create a set of frequent words
stoplist = set('for a of the and to in'.split(' '))
# Lowercase each document, split it by white space and filter out stopwords
texts = [[word for word in document.lower().split() if word not in stoplist]
for document in raw_corpus]
# Count word frequencies
frequency = defaultdict(int)
for text in texts:
for token in text:
frequency[token] += 1
# Only keep words that appear more than once
processed_corpus = [[token for token in text if frequency[token] > 1] for text in texts]
print(processed_corpus)
dictionary = corpora.Dictionary(processed_corpus)
print(dictionary) # Dictionary(12 unique tokens: ['computer', 'human', 'interface', 'response', 'survey']...)
print(dictionary.token2id)
"""
{'computer': 0, 'human': 1, 'interface': 2, 'response': 3, 'survey': 4, 'system': 5, 'time': 6, 'user': 7, 'eps': 8, 'trees': 9, 'graph': 10, 'minors': 11}
"""
new_doc = "Human computer interaction"
new_vec = dictionary.doc2bow(new_doc.lower().split())
print(new_vec)#[(0, 1), (1, 1)]
"""
The first entry in each tuple corresponds to the ID of the token in the dictionary, the second corresponds to the count of this token.
Note that "interaction" did not occur in the original corpus and so it was not included in the vectorization.
"""
bow_corpus = [dictionary.doc2bow(text) for text in processed_corpus]
print(bow_corpus)
# train the model
tfidf = TfidfModel(bow_corpus)
# transform the "system minors" string
print(tfidf[dictionary.doc2bow("system minors".lower().split())])#[(5, 0.5898341626740045), (11, 0.8075244024440723)]
# -
# # nb_export
from nbdev.export import *
notebook2script()
# !nbdev_build_docs
| algo_seq_tfidf.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: python3.7
# language: python
# name: python3.7
# ---
import numpy as np
import pandas as pd
fulldata = pd.read_table("./datasets/wrds_top_2000_3mo_lag.dat", sep=" ")
fulldata['date'] = pd.to_datetime(fulldata['date'],format="%Y%m")
fulldata = fulldata.sort_values(by=['gvkey','date'])
fulldata
fulldata.info()
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
import time
# +
y_list = ['mom1m', 'mom3m', 'mom6m', 'mom9m', 'saleq_ttm',
'cogsq_ttm', 'xsgaq_ttm',
'oiadpq_ttm', 'niq_ttm', 'cheq_mrq', 'rectq_mrq',
'invtq_mrq', 'acoq_mrq', 'ppentq_mrq', 'aoq_mrq',
'dlcq_mrq', 'apq_mrq', 'txpq_mrq', 'lcoq_mrq',
'ltq_mrq']
# 1970 - 1999 : Training
# 2000 - 2000 : Testing
data_train = fulldata[fulldata['year'] < 2000]
data_test = fulldata[fulldata['year'] >= 2000]
# store error for each fundamental
train_mse_list = []
test_mse_list = []
rffit_list = []
X_train_list = []
y_train_list = []
X_test_list = []
y_test_list = []
for i, item in enumerate(y_list):
print("Handling the ", i, "th target: ", item)
start =time.time()
# Construct proper dataframe for training & testing
X_train = data_train.set_index('date')
y_train = X_train[['gvkey', item]]
y_train = y_train.reset_index()
y_train['date'] = y_train['date'] - pd.DateOffset(years=1)
y_train.columns = ['date', 'gvkey', item+'_forward_1']
X_train = X_train.reset_index()
df_train_item = X_train.merge(y_train, how='inner', on=['date', 'gvkey'])
X_test = data_test.set_index('date')
y_test = X_test[['gvkey', item]]
y_test = y_test.reset_index()
y_test['date'] = y_test['date'] - pd.DateOffset(years=1)
y_test.columns = ['date', 'gvkey', item+'_forward_1']
X_test = X_test.reset_index()
df_test_item = X_test.merge(y_test, how='inner', on=['date', 'gvkey'])
# split predictors from targets
y_train = df_train_item[item+'_forward_1']
X_train = df_train_item[y_list]
y_test = df_test_item[item+'_forward_1']
X_test = df_test_item[y_list]
print("Processing complete")
rffit = RandomForestRegressor(n_estimators = 10).fit(X_train, y_train)
y_train_pred = rffit.predict(X_train)
y_test_pred = rffit.predict(X_test)
train_mse = mean_squared_error(y_train, y_train_pred)
test_mse = mean_squared_error(y_test, y_test_pred)
print("training loss: ", train_mse)
print("testing loss: ", test_mse)
# store all
train_mse_list.append(train_mse)
test_mse_list.append(test_mse)
rffit_list.append(rffit)
X_train_list.append(X_train)
y_train_list.append(y_train)
X_test_list.append(X_test)
y_test_list.append(y_test)
print("Take time: ", time.time()-start, "s")
# +
# generate factor dataframe
date = df_test_item['date'] + pd.DateOffset(years=1)
gvkey = df_test_item['gvkey']
for i in range(len(X_train_list)):
print("Generate ", i, "th fundamentals dataframe: ", y_list[i])
X_test = X_test_list[i]
y_test = y_test_list[i]
model = rffit_list[i]
y_test_pred = model.predict(X_test)
df_long = pd.DataFrame({'date':date, 'gvkey':gvkey, 'factor_val': y_test_pred})
df_wide = df_long.pivot(index='date', columns='gvkey', values='factor_val')
if i > 3:
name = "factor_" + y_list[i] + '.csv'
df_wide.to_csv("./random_forest/"+name)
# -
df_long.pivot(index='date', columns='gvkey', values='factor_val')
gvkey = df_test_item['gvkey']
gvkey
# +
from sklearn.linear_model import LinearRegression, LassoCV, RidgeCV
lm_list = []
ridge_list=[]
LASSO_list = []
for i in range(len(X_train_list)):
X_train = X_train_list[i]
X_test = X_test_list[i]
y_train = y_train_list[i]
y_test = y_test_list[i]
lmfit = LinearRegression().fit(X_train, y_train)
ridgefit = RidgeCV(cv=10).fit(X_train, y_train)
lassofit = LassoCV(cv=10).fit(X_train, y_train)
y_train_pred = lmfit.predict(X_train)
y_test_pred = lmfit.predict(X_test)
train_mse = mean_squared_error(y_train, y_train_pred)
test_mse = mean_squared_error(y_test, y_test_pred)
print("training loss: ", train_mse)
print("testing loss: ", test_mse)
lm_list.append(lmfit)
df_long = pd.DataFrame({'date':date, 'gvkey':gvkey, 'factor_val': y_test_pred})
df_wide = df_long.pivot(index='date', columns='gvkey', values='factor_val')
if i > 3:
name = "factor_" + y_list[i] + '.csv'
df_wide.to_csv("./linear_regression/"+name)
print("+"*50)
y_train_pred = ridgefit.predict(X_train)
y_test_pred = ridgefit.predict(X_test)
train_mse = mean_squared_error(y_train, y_train_pred)
test_mse = mean_squared_error(y_test, y_test_pred)
print("training loss: ", train_mse)
print("testing loss: ", test_mse)
ridge_list.append(ridgefit)
df_long = pd.DataFrame({'date':date, 'gvkey':gvkey, 'factor_val': y_test_pred})
df_wide = df_long.pivot(index='date', columns='gvkey', values='factor_val')
if i > 3:
name = "factor_" + y_list[i] + '.csv'
df_wide.to_csv("./ridge_regression/"+name)
print("+"*50)
y_train_pred = lassofit.predict(X_train)
y_test_pred = lassofit.predict(X_test)
train_mse = mean_squared_error(y_train, y_train_pred)
test_mse = mean_squared_error(y_test, y_test_pred)
print("training loss: ", train_mse)
print("testing loss: ", test_mse)
LASSO_list.append(lassofit)
df_long = pd.DataFrame({'date':date, 'gvkey':gvkey, 'factor_val': y_test_pred})
df_wide = df_long.pivot(index='date', columns='gvkey', values='factor_val')
if i > 3:
name = "factor_" + y_list[i] + '.csv'
df_wide.to_csv("./lasso_regression/"+name)
print(" ")
print(" ")
print(" ")
print(" ")
# +
import pandas as pd
import numpy as np
def list_to_mat(id_list, date_list, factor_val):
df = pd.DataFrame({'id':id_list, 'date':date_list, 'factor_val':factor_val})
df['date'] = pd.to_datetime(df['date'], format='%Y%m')
df = df.sort_values(by=['date', 'id'])
df_pivoted = df.pivot(index='id', columns='date', values='factor_val')
return df_pivoted
predictions = {}
targets = {}
id_list = [None] * 337583
date_list = [None] * 337583
factor_val = [[None] * 337583 for i in range(16)]
target_val = [[None] * 337583 for i in range(16)]
count = 0
with open("forecasts_mlp.txt", "r") as input_file:
iterator = enumerate(input_file)
for i, line in iterator:
if i < 19:
continue
elif (i - 19) % 9 == 0:
# 200704 120794 mse=0.1496 \n
date, gvkey, _ = line.split()
next(iterator)
next(iterator)
next(iterator)
next(iterator)
next(iterator)
i, line = next(iterator)
# output[t+1]: 124.73 31.52 34.79 35.14 4.90 19.82 41.51 -1.05 6.44 81.57 526.91 28.23 2.06 1.49 13.39 378.02 \n
pred = line[13:].split()
i, line = next(iterator)
# target[t+1]: 27.20 8.30 2.00 12.60 89.00 6.80 0.00 0.00 4.20 134.10 1347.50 6.00 4.80 0.00 3.00 592.50
target = line[13:].split()
id_list[count] = (gvkey)
date_list[count] = (date)
for j in range(16):
factor_val[j][count]=pred[j]
target_val[j][count]=target[j]
count += 1
if count % 10000 ==0:
print('Finished processing line ', count)
for j in range(16):
result_df = list_to_mat(id_list, date_list, factor_val[j])
result_df.to_csv(r'factor-model-mlp/prediction_' + str(j) + '.csv')
target_df = list_to_mat(id_list, date_list, target_val[j])
target_df.to_csv(r'factor-model-mlp/target_' + str(j) + '.csv')
# -
# !ls
df = pd.read_table('./datasets/wrds_top_2000_3mo_lag.dat', sep = " ")
df['year']
| Other Simple Models and Predictions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/minche0l/-/blob/main/Untitled3.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="LdtBQlIvXVDC" outputId="332393d8-2657-4187-fc70-953e60447464"
# https://www.geeksforgeeks.org/sieve-of-eratosthenes/
def SieveOfEratosthenes(n):
# Create a boolean array
# "prime[0..n]" and initialize
# all enrties it as true.
# A value in prime[i] will
# finally be false if i is
# Not a prime, else true.
prime = [True for i in range(n+1)]
p = 2
while (p * p <=n):
# if prime[p] is not
# changed, then it is a prime
if (prime[p] == True):
# Update all multiples of p
for i in range(p * p, n+1, p):
prime[i] = False
p += 1
# Print all prime numbers
for p in range(2, n+1):
if prime[p]:
print (p)
# Driver code
if __name__ == '__main__':
n = 100
print ("Following are the prime numbers smaller")
print ("than or equal to", n)
SieveOfEratosthenes(n)
| Untitled3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] tags=["remove_cell"]
# # Proving Universality
# -
# What does it mean for a computer to do everything that it could possibly do? This was a question tackled by Alan Turing before we even had a good idea of what a computer was.
#
# To ask this question for our classical computers, and specifically for our standard digital computers, we need to strip away all the screens, speakers and fancy input devices. What we are left with is simply a machine that converts input bit strings into output bit strings. If a device can perform any such conversion, taking any arbitrary set of inputs and coverting them to an arbitrarily chosen set of outputs, we call it *universal*.
#
# It turns out that the requirements for universality on these devices are quite reasonable. The gates we needed to perform addition in 'The atoms of computation' are also sufficient to implement any possible computation. In fact, just the classical NAND gate is enough, when combined together in sufficient quantities.
#
# Though our current computers can do everything in theory, some tasks are too resource-intensive in practice. In our study of how to add, we saw that the required resources scaled linearly with the problem size. For example, if we double the number of digits in the numbers, we double the number of small scale additions we need to make.
#
# For many other problems, the required resources scale exponentially with the input size. Factorization is a prominent example. In a recent study [1], a 320-digit number took CPU years to factorize. For numbers that are not much larger, there aren't enough computing resources in the world to tackle them -- even though those same numbers could be added or multiplied on just a smartphone in a much more reasonable time.
#
# Quantum computers will alleviate these problems by achieving universality in a fundamentally different way. As we saw in 'The unique properties of qubits', the variables of quantum computing are not equivalent to those of standard computers. The gates that we use, such as those in the last section, go beyond what is possible for the gates of standard computers. Because of this, we can find ways to achieve results that are otherwise impossible.
#
# So how to define what universality is for a quantum computer? We can do this in a way that mirrors the definition discussed above. Just as digital computers convert sets of input bit strings to sets of output bit strings, unitary operations convert sets of orthogonal input states into orthogonal output states.
#
# As a special case, these states could describe bit strings expressed in quantum form. If we can achieve any unitary, we can therefore achieve universality in the same way as for digital computers.
#
# Another special case is that the input and output states could describe real physical systems. The unitary would then correspond to a time evolution. When expressed in an exponential form using a suitable Hermitian matrix, that matrix would correspond to the Hamiltonian. Achieving any unitary would therefore correspond to simulating any time evolution, and engineering the effects of any Hamiltonian. This is also an important problem that is impractical for classical computers, but is a natural application of quantum computers.
#
# Universality for quantum computers is then simply this: the ability to achieve any desired unitary on any arbitrary number of qubits.
#
# As for classical computers, we will need to split this big job up into manageable chunks. We'll need to find a basic set of gates that will allow us to achieve this. As we'll see, the single- and two-qubit gates of the last section are sufficient for the task.
#
# Suppose we wish to implement the unitary
#
# $$
# U = e^{i(aX + bZ)},
# $$
#
# but the only gates we have are $R_x(\theta) = e^{i \frac{\theta}{2} X}$ and $R_z(\theta) = e^{i \frac{\theta}{2} Z}$. The best way to solve this problem would be to use Euler angles. But let's instead consider a different method.
#
# The Hermitian matrix in the exponential for $U$ is simply the sum of those for the $R_x(\theta)$ and $R_z(\theta)$ rotations. This suggests a naive approach to solving our problem: we could apply $R_z(a) = e^{i bZ}$ followed by $R_x(b) = e^{i a X}$. Unfortunately, because we are exponentiating matrices that do not commute, this approach will not work.
#
# $$
# e^{i a X} e^{i b X} \neq e^{i(aX + bZ)}
# $$
#
# However, we could use the following modified version:
#
# $$
# U = \lim_{n\rightarrow\infty} ~ \left(e^{iaX/n}e^{ibZ/n}\right)^n.
# $$
#
# Here we split $U$ up into $n$ small slices. For each slice, it is a good approximation to say that
#
# $$
# e^{iaX/n}e^{ibZ/n} = e^{i(aX + bZ)/n}
# $$
#
# The error in this approximation scales as $1/n^2$. When we combine the $n$ slices, we get an approximation of our target unitary whose error scales as $1/n$. So by simply increasing the number of slices, we can get as close to $U$ as we need. Other methods of creating the sequence are also possible to get even more accurate versions of our target unitary.
#
# The power of this method is that it can be used in complex cases than just a single qubit. For example, consider the unitary
#
# $$
# U = e^{i(aX\otimes X\otimes X + bZ\otimes Z\otimes Z)}.
# $$
#
# We know how to create the unitary $e^{i\frac{\theta}{2} X\otimes X\otimes X}$ from a single qubit $R_x(\theta)$ and two controlled-NOTs.
#
# ```python
# qc.cx(0,2)
# qc.cx(0,1)
# qc.rx(theta,0)
# qc.cx(0,1)
# qc.cx(0,1)
# ```
#
# With a few Hadamards, we can do the same for $e^{i\frac{\theta}{2} Z\otimes Z\otimes Z}$.
#
# ```python
# qc.h(0)
# qc.h(1)
# qc.h(2)
# qc.cx(0,2)
# qc.cx(0,1)
# qc.rx(theta,0)
# qc.cx(0,1)
# qc.cx(0,1)
# qc.h(2)
# qc.h(1)
# qc.h(0)
# ```
#
# This gives us the ability to reproduce a small slice of our new, three-qubit $U$:
#
# $$
# e^{iaX\otimes X\otimes X/n}e^{ibZ\otimes Z\otimes Z/n} = e^{i(aX\otimes X\otimes X + bZ\otimes Z\otimes Z)/n}.
# $$
#
# As before, we can then combine the slices together to get an arbitrarily accurate approximation of $U$.
#
# This method continues to work as we increase the number of qubits, and also the number of terms that need simulating. Care must be taken to ensure that the approximation remains accurate, but this can be done in ways that require reasonable resources. Adding extra terms to simulate, or increasing the desired accuracy, only require the complexity of the method to increase polynomially.
#
# Methods of this form can reproduce any unitary $U = e^{iH}$ for which $H$ can be expressed as a sum of tensor products of Paulis. Since we have shown previously that all matrices can be expressed in this way, this is sufficient to show that we can reproduce all unitaries. Though other methods may be better in practice, the main concept to take away from this chapter is that there is certainly a way to reproduce all multi-qubit unitaries using only the basic operations found in Qiskit. Quantum universality can be achieved.
# ### References
#
# [1] ["Factorization of a 1061-bit number by the Special Number Field Sieve"](https://eprint.iacr.org/2012/444.pdf) by <NAME>.
import qiskit
qiskit.__qiskit_version__
| content/ch-gates/proving-universality.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + deletable=true editable=true
# %pylab inline
# + deletable=true editable=true
# Constants
density_air = 0.856e-3 # g cm^-3
density_water = 1 # g cm^-3
density_mist = 1e-6 # g cm^-3
viscosity = 0.206 # cm^2 s^-1 This is the kinematic viscosity
g = 0.0981 #cm s^-2
# + deletable=true editable=true
# Parameters
epsilon = (density_mist/density_water)/2
sigma = ((8/81)*(g**2/viscosity)*(density_water/density_air)**2)**(1/3)
# + [markdown] deletable=true editable=true
# Consider the raindrop's behaviour without the mist drag.
# + deletable=true editable=true
# Initial Conditions
init_radius = 0.01 # cm
init_velocity = 0 # cm s^-1
time_step = 0.001
end_time = 10.
time_list = arange(0,end_time,time_step)
vel_list = empty_like(time_list) # Initially at rest
vel_list[0] = init_velocity
# + deletable=true editable=true
def f(velocity):
return (g/(sigma**2*init_radius))*(1-(velocity/init_radius)**(3/2))
# + deletable=true editable=true
for index in range(1,vel_list.shape[0]):
vel_list[index] = vel_list[index-1] + f(vel_list[index-1])*time_step
# + deletable=true editable=true
plot(time_list,vel_list)
# + deletable=true editable=true
def f(velocity,radius):
return (g/(sigma**2*init_radius))*(1-(velocity/radius)**(3/2)) - 3*epsilon*velocity**2/radius
init_radius_list = linspace(0.01,0.11,11)
radius_list = empty((time_list.shape[0],init_radius_list.shape[0]))
radius_list[0] = init_radius_list
time_list = arange(0,end_time,time_step)
init_vel_list = np.zeros_like(init_radius_list)
vel_list = empty_like(radius_list) # Initially at rest
vel_list[0] = init_vel_list
# + deletable=true editable=true
for index in range(1,vel_list.shape[0]):
vel_list[index,:] = vel_list[index-1,:] + f(vel_list[index-1,:],radius_list[index-1,:])*time_step
radius_list[index,:] = radius_list[index-1,:] + epsilon*vel_list[index-1,:]*time_step
# + deletable=true editable=true
plot(radius_list[:,0],vel_list[:,0])
# + deletable=true editable=true
plot(radius_list[:,10],vel_list[:,10])
# + deletable=true editable=true
radius_list
| raindrop_physics/raindrop_physics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %autosave 2
x = 10
y = 5
z = x + y
z
z * 3
import numpy as np
li = [1, 2, 3, 0, 5, 77]
a = np.array(li)
a.dtype
np.array([1, 2, 'abc', None])
a = np.array([1, 2, 3, 4, 5], dtype=int)
a
a = np.array([1, 2, 3, 4, 5, 128], dtype=np.int8)
a
a = np.array([1, 2, 3, 4, 5, 128, 256], dtype=np.uint8)
a
a = np.array([1.1, 2, 3])
a.dtype
a = np.array([1.123456789, 2, 3], dtype=np.float16)
a
a = np.array([1.1 + 3j, 2, 3])
a.dtype
a = np.array(['abc', 'xyz', 'a'])
a
a = np.array([b'abc', b'xyz', b'a'])
a
a = np.array([True, False, True, True])
a.dtype
a = np.array([1, 'abc', '', None, True], dtype=bool)
a
type(a)
np.zeros(5, dtype=bool)
np.zeros(10, dtype=str)
a = np.zeros((10, 2))
a
a = np.ones(5, dtype=np.uint8)
a
np.ones(6, dtype=str)
np.empty(7, dtype=np.float32)
np.full(10, 2, dtype=np.complex)
2 * np.ones(10, dtype=complex)
5 * [1, 2]
5 * np.array([1, 2, 3])
5 + np.ones(4)
np.array([-1, -2, -3]) + np.array([3, 2, 1])
np.arange(10, dtype=np.float)
np.arange(2, 10, dtype=np.float)
np.arange(10, 0, -1)
np.arange(0.5, 1.5, 0.1)
# +
# for (int i = 0; i < 10; i++)
# double x = i * 0.1
# for (double i = 0; i < 1.0; i += 0.1)
# -
np.arange(0, 2 + 1/3, 1/3)
np.arange(0, 2.00000001, 1/3)
a = 0
b = 2
step = 1 / 3
np.arange(a, b + 0.5 * step, step)
np.linspace(0, 2, 7)
np.logspace(0, 2, 3)
10 ** np.linspace(0, 2, 3)
np.logspace(0, 2, 3, base=2)
np.logspace(0, 2, 3, dtype=int)
np.sin(np.linspace(0, 2 * np.pi, 10))
np.log(np.arange(1, 10))
np.log10(np.arange(1, 11))
np.log2([2, 4, 8, 16, 0.5])
np.round(np.logspace(0, 1, 5))
import math
math.sqrt(-1)
np.sqrt(-1)
np.math.sqrt(-1)
import cmath
cmath.sqrt(-1)
np.sqrt(-1 + 0j)
np.sum(np.arange(10))
sum(range(10))
np.prod(np.arange(3, 7))
np.cumprod(np.arange(1, 10))
np.median([1, -2, 3, -5, 7, 9])
np.mean([1, 2, 3, -5, -7])
np.std([1, 2, 3, -4, -7])
np.nansum([1, 2, 3, np.nan, float('nan')])
np.sum([1, 2, 3, np.nan, float('nan')])
np.nanmean([1, 2, 3, np.nan, float('nan')])
ma = np.ma.array([1,2,3,4], mask=[True, False, True, False],
fill_value=-100500)
ma
a = np.arange(10)
a
a[5]
a[0:5]
a[:5]
s = '0123456789'
s[5:len(s)]
s[5:]
a[-1]
s[-len(s)]
s[-len(s) - 1]
s[len(s)]
a[2:8:2]
a[::2]
a[::-1]
a[10:3:-1]
spisok1 = [1,2,3,4,5]
spisok2 = spisok1[::2]
spisok2[-1] = 100
spisok1
li = [1,2,3,4,5]
li[::2] = (-1, -3, -5)
li
a = np.arange(10)
b = a[::2].copy()
b[-1] = 100
a
b.flags.owndata
a.flags.owndata
b.flags.owndata
| misc/jupyter_notebooks/19.10.07/numpy_basics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: ArcGISPro
# language: Python
# name: python3
# ---
# #CATAGORIZE MAIN BREAKS AND LEAKS
#
# based on the main break expored data - value mapping for charts and maps
#
# MBlayer = "Main_breaks_Not_Repaired_2021_5"
#
# categories:
# Age; Age
# Aged Pipe; Age
# Bad Compression Fitting; Install or Material Failure
# Contractor Hit; Contractor Damage
# Corrosion; Corrosion
# Corrosion-Differential Settlement; Settlement and Corrosion
# Corrosion - Differential Settlement; Settlement and Corrosion
# Corrosion & Settlement; Settlement and Corrosion
# Damaged by Contractor; Contractor Damage
# Defective Material; Install or Material Failure
# Defective Pipe; Install or Material Failure
# Differential Settlement; Settlement
# Improper Installation; Install or Material Failure
# Joint Failure; Install or Material Failure
# Main Break; unknown
# Pipe - Aged; Age
# Water Main Leak Repair; unknown
# Water Valve Repair; unknown
#
# +
#MainBreakLayer = "MainBreaks2021_5"
MainBreakLayer = "Main_breaks_Not_Repaired_2021_5"
# +
arcpy.management.SelectLayerByAttribute(MainBreakLayer, "NEW_SELECTION", "WO_CAUS_TY LIKE '%Age%' Or WO_CAUS_TY IN ('Age', 'Aged Pipe', 'Pipe - Aged')", None)
arcpy.management.CalculateField(MainBreakLayer, "MainBreakReason", '"Age"', "PYTHON3", '', "TEXT")
arcpy.management.SelectLayerByAttribute(MainBreakLayer, "NEW_SELECTION", "WO_CAUS_TY IN ('Corrosion')", None)
arcpy.management.CalculateField(MainBreakLayer, "MainBreakReason", '"Corrosion"', "PYTHON3", '', "TEXT")
arcpy.management.SelectLayerByAttribute(MainBreakLayer, "NEW_SELECTION", "WO_CAUS_TY IN ('Corrosion - Differential Settlement', 'Corrosion & Settlement', 'Corrosion-Differential Settlement')", None)
arcpy.management.CalculateField(MainBreakLayer, "MainBreakReason", '"Corrosion and Settlement"', "PYTHON3", '', "TEXT")
arcpy.management.SelectLayerByAttribute(MainBreakLayer, "NEW_SELECTION", "WO_CAUS_TY = 'Differential Settlement'", None)
arcpy.management.CalculateField(MainBreakLayer, "MainBreakReason", '"Differential Settlement"', "PYTHON3", '', "TEXT")
arcpy.management.SelectLayerByAttribute(MainBreakLayer, "NEW_SELECTION", "WO_CAUS_TY IN ('Bad Compression Fitting', 'Improper Installation', 'Defective Pipe', 'Defective Material', 'Joint Failure')", None)
arcpy.management.CalculateField(MainBreakLayer, "MainBreakReason", '"Material - Install Fail"', "PYTHON3", '', "TEXT")
arcpy.management.SelectLayerByAttribute(MainBreakLayer, "NEW_SELECTION", "WO_CAUS_TY IN ('Contractor Hit', 'Damaged by Contractor')", None)
arcpy.management.CalculateField(MainBreakLayer, "MainBreakReason", '"Contractor Damage"', "PYTHON3", '', "TEXT")
arcpy.management.SelectLayerByAttribute(MainBreakLayer, "NEW_SELECTION", "WO_CAUS_TY IN ('Water Valve Repair', 'Water Main Leak Repair')", None)
arcpy.management.CalculateField(MainBreakLayer, "MainBreakReason", '"Other Leaks Repaired"', "PYTHON3", '', "TEXT")
arcpy.management.SelectLayerByAttribute(MainBreakLayer, "NEW_SELECTION", "MainBreakReason IS NULL And WO_ACTN_TY LIKE '%Repair%'", None)
arcpy.management.CalculateField(MainBreakLayer, "MainBreakReason", '"Other Break Repaired"', "PYTHON3", '', "TEXT")
arcpy.management.SelectLayerByAttribute(MainBreakLayer, "NEW_SELECTION", "MainBreakReason IS NULL And WO_ACTN_TY LIKE '%Replace%'", None)
arcpy.management.CalculateField(MainBreakLayer, "MainBreakReason", '"Other Pipe Replaced"', "PYTHON3", '', "TEXT")
# -
| MainBreaks.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # BW Project Title
#
# ## Team Members
# - Name #1
# - ...
#
# ## Project Description
# Insert a short 1-2 sentence description of what you plan to build.
# +
# Import required packages
import numpy as np
import tensorflow as tf
print(f'Numpy Version: {np.__version__}')
print(f'Tensorflow Version: {tf.__version__}')
# -
# ## Load Data
# +
# Load and pre-process data
# -
# ## Data Exploration & Pre-Processing
# +
# Descriptive statistics for key fields
# Add visualizations (where applicable)
# -
# ## Baseline Model (Benchmark)
# +
# Simple baseline model
# -
# ## Modeling Approach #1
# ## Modeling Approach #2
# ## Hyper-parameter Tuning for Best Model
# ## Save Final Model
# +
import joblib
# Only applicable to scikit-learn models
joblib.dump(model, '../models/model_name.joblib')
# Use tensorflow's .save() API for neural network models
model.save(...)
| notebooks/01_Model_Development.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="AksIKBzZ-nre"
# # Using Vertex Vizier to Optimize Multiple Objectives
#
# ## Overview
#
# In this lab, you will use [Vertex Vizier](https://cloud.google.com/vertex-ai/docs/vizier/overview) to perform multi-objective optimization. Multi-objective optimization is concerned with mathematical optimization problems involving more than one objective function to be optimized simultaneously
#
# ## Objective
#
# The goal is to __`minimize`__ the objective metric:
# ```
# y1 = r*sin(theta)
# ```
#
# and simultaneously __`maximize`__ the objective metric:
# ```
# y2 = r*cos(theta)
# ```
#
# that you will evaluate over the parameter space:
#
# - __`r`__ in [0,1],
#
# - __`theta`__ in [0, pi/2]
#
# ## Introduction
#
# In this notebook, you will use [Vertex Vizier](https://cloud.google.com/vertex-ai/docs/vizier/overview) multi-objective optimization. Multi-objective optimization is concerned with mathematical optimization problems involving more than one objective function to be optimized simultaneously.
#
# Each learning objective will correspond to a __#TODO__ in this student lab notebook -- try to complete this notebook first and then review the [solution notebook](../solutions/gapic-vizier-multi-objective-optimization.ipynb).
#
# **Make sure to enable the Vertex AI API**
# + [markdown] id="iMHz63rPbq6P"
# #### Install Vertex AI library
#
# Download and install Vertex AI library.
# + id="b6f3dc43494b"
# Setup your dependencies
import os
# The Google Cloud Notebook product has specific requirements
IS_GOOGLE_CLOUD_NOTEBOOK = os.path.exists("/opt/deeplearning/metadata/env_version")
# Google Cloud Notebook requires dependencies to be installed with '--user'
USER_FLAG = ""
if IS_GOOGLE_CLOUD_NOTEBOOK:
USER_FLAG = "--user"
# + id="g7WlujNxbq6Q"
# Upgrade the specified package to the newest available version
# ! pip install {USER_FLAG} --upgrade google-cloud-aiplatform
# + id="O8AIwN0abq6U"
import os
if not os.getenv("IS_TESTING"):
# Restart the kernel after pip installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
# + [markdown] id="Dax2zrpTi2Xy"
# ### Import libraries and define constants
# + id="xD60d6Q0i2X0"
# Import necessary libraries
import datetime
import json
from google.cloud import aiplatform_v1beta1
# + [markdown] id="CWuu4wmki2X3"
# ## Tutorial
#
# + [markdown] id="KyEjqIdnad0w"
# This section defines some parameters and util methods to call Vertex Vizier APIs. Please fill in the following information to get started.
# + id="8HCgeF8had77"
# Fill in your project ID and region
REGION = "[region]" # @param {type:"string"}
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
# These will be automatically filled in.
STUDY_DISPLAY_NAME = "{}_study_{}".format(
PROJECT_ID.replace("-", ""), datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
) # @param {type: 'string'}
ENDPOINT = REGION + "-aiplatform.googleapis.com"
PARENT = "projects/{}/locations/{}".format(PROJECT_ID, REGION)
print("ENDPOINT: {}".format(ENDPOINT))
print("REGION: {}".format(REGION))
print("PARENT: {}".format(PARENT))
# If you don't know your project ID, you might be able to get your project ID
# using gcloud command by executing the second cell below.
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
# shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
# ! gcloud config set project $PROJECT_ID
# + [markdown] id="8NBduXsEaRKr"
# ### Create the study configuration
#
# The following is a sample study configuration, built as a hierarchical python dictionary. It is already filled out. Run the cell to configure the study.
# + id="s-AHfPOASXXW"
# Parameter Configuration
param_r = {"parameter_id": "r", "double_value_spec": {"min_value": 0, "max_value": 1}}
param_theta = {
"parameter_id": "theta",
"double_value_spec": {"min_value": 0, "max_value": 1.57},
}
# Objective Metrics
metric_y1 = # TODO -- Your code goes here
# Objective Metrics
metric_y2 = # TODO -- Your code goes here
# Put it all together in a study configuration
study = {
"display_name": STUDY_DISPLAY_NAME,
"study_spec": {
"algorithm": "RANDOM_SEARCH",
"parameters": [
param_r,
param_theta,
],
"metrics": [metric_y1, metric_y2],
},
}
print(json.dumps(study, indent=2, sort_keys=True))
# + [markdown] id="uyXG_RKha7Kb"
# ### Create the study
#
# Next, create the study, which you will subsequently run to optimize the two objectives.
# + id="jgskzqZX0Mkt"
# Create the study using study configuration and send request through VizierServiceClient
vizier_client = # TODO -- Your code goes here(
client_options=dict(api_endpoint=ENDPOINT)
)
study = vizier_client.create_study(parent=PARENT, study=study)
STUDY_ID = study.name
print("STUDY_ID: {}".format(STUDY_ID))
# + [markdown] id="dKOMBKmtkcJb"
# ### Metric evaluation functions
#
# Next, define some functions to evaluate the two objective metrics.
# + id="Xnl1uqnyz3Qp"
import math
# r * sin(theta)
def Metric1Evaluation(r, theta):
"""Evaluate the first metric on the trial."""
return r * math.sin(theta)
# r * cos(theta)
def Metric2Evaluation(r, theta):
"""Evaluate the second metric on the trial."""
return r * math.cos(theta)
def CreateMetrics(trial_id, r, theta):
print(("=========== Start Trial: [{}] =============").format(trial_id))
# Evaluate both objective metrics for this trial
y1 = # TODO -- Your code goes here(r, theta)
y2 = # TODO -- Your code goes here(r, theta)
print(
"[r = {}, theta = {}] => y1 = r*sin(theta) = {}, y2 = r*cos(theta) = {}".format(
r, theta, y1, y2
)
)
metric1 = {"metric_id": "y1", "value": y1}
metric2 = {"metric_id": "y2", "value": y2}
# Return the results for this trial
return [metric1, metric2]
# + [markdown] id="Qzn5lVpRq05U"
# ### Set configuration parameters for running trials
#
# __`client_id`__: The identifier of the client that is requesting the suggestion. If multiple SuggestTrialsRequests have the same `client_id`, the service will return the identical suggested trial if the trial is `PENDING`, and provide a new trial if the last suggested trial was completed.
#
# __`suggestion_count_per_request`__: The number of suggestions (trials) requested in a single request.
#
# __`max_trial_id_to_stop`__: The number of trials to explore before stopping. It is set to 4 to shorten the time to run the code, so don't expect convergence. For convergence, it would likely need to be about 20 (a good rule of thumb is to multiply the total dimensionality by 10).
#
# + id="5usXaZA5qvUZ"
client_id = "client1" # @param {type: 'string'}
suggestion_count_per_request = 5 # @param {type: 'integer'}
max_trial_id_to_stop = 4 # @param {type: 'integer'}
print("client_id: {}".format(client_id))
print("suggestion_count_per_request: {}".format(suggestion_count_per_request))
print("max_trial_id_to_stop: {}".format(max_trial_id_to_stop))
# + [markdown] id="UnV2SJNskm7V"
# ### Run Vertex Vizier trials
#
# Run the trials.
# + id="opmuTntW4-eS"
trial_id = 0
while int(trial_id) < max_trial_id_to_stop:
suggest_response = vizier_client.suggest_trials(
{
"parent": STUDY_ID,
"suggestion_count": suggestion_count_per_request,
"client_id": client_id,
}
)
for suggested_trial in suggest_response.result().trials:
trial_id = suggested_trial.name.split("/")[-1]
trial = vizier_client.get_trial({"name": suggested_trial.name})
if trial.state in ["COMPLETED", "INFEASIBLE"]:
continue
for param in trial.parameters:
if param.parameter_id == "r":
r = param.value
elif param.parameter_id == "theta":
theta = param.value
print("Trial : r is {}, theta is {}.".format(r, theta))
# Store your measurement and send the request
# TODO -- Your code goes here(
{
"trial_name": suggested_trial.name,
"measurement": {
"metrics": # TODO -- Your code goes here(suggested_trial.name, r, theta)
},
}
)
response = vizier_client.complete_trial(
{"name": suggested_trial.name, "trial_infeasible": False}
)
# + [markdown] id="i5ZTqgqBiRsq"
# ### List the optimal solutions
#
# list_optimal_trials returns the pareto-optimal Trials for multi-objective Study or the optimal Trials for single-objective Study. In the case, we define mutliple-objective in previeous steps, pareto-optimal trials will be returned.
# + id="Or2PL1YxTr33"
# List all the pareto-optimal trails
optimal_trials = # TODO -- Your code goes here({"parent": STUDY_ID})
print("optimal_trials: {}".format(optimal_trials))
# + [markdown] id="KAxfq9Fri2YV"
# ## Cleaning up
#
# To clean up all Google Cloud resources used in this project, you can [delete the Google Cloud
# project](https://cloud.google.com/resource-manager/docs/creating-managing-projects#shutting_down_projects) you used for the tutorial. You can also manually delete resources that you created by running the following code.
# + id="zQlLDfvlzYde"
vizier_client.delete_study({"name": STUDY_ID})
# -
| courses/machine_learning/deepdive2/art_and_science_of_ml/labs/gapic-vizier-multi-objective-optimization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
from datetime import datetime
data = pd.read_excel("./Data/Ranked_data/BusEq_CFPranked_weekly_data.xlsx")
data.head()
# +
mean_values = []
std_values = []
sample_num = []
for i in np.arange(2, 6):
ind = data["quintile"] == i
mean_values.append(np.mean(data["return_t"][ind]))
std_values.append(np.std(data["return_t"][ind]))
sample_num.append(np.sum(ind))
t_stat = (mean_values[-1] - mean_values[0]) / np.sqrt(((std_values[-1] ** 2) / sample_num[-1]) + ((std_values[0] ** 2) / sample_num[0]) )
print("mean_values are: [%s]" % ', '.join(map(str, mean_values)))
t_stat
# +
mean_values = []
std_values = []
sample_num = []
for i in np.arange(2, 6):
ind = data["quintile"] == i
mean_values.append(np.mean(data["return_t+"][ind]))
std_values.append(np.std(data["return_t+"][ind]))
sample_num.append(np.sum(ind))
t_stat = (mean_values[-1] - mean_values[0]) / np.sqrt(((std_values[-1] ** 2) / sample_num[-1]) + ((std_values[0] ** 2) / sample_num[0]) )
print("mean_values are: [%s]" % ', '.join(map(str, mean_values)))
t_stat
# +
mean_values = []
std_values = []
sample_num = []
for i in np.arange(2, 11):
ind = data["decile"] == i
mean_values.append(np.mean(data["return_t"][ind]))
std_values.append(np.std(data["return_t"][ind]))
sample_num.append(np.sum(ind))
t_stat = (mean_values[-1] - mean_values[0]) / np.sqrt(((std_values[-1] ** 2) / sample_num[-1]) + ((std_values[0] ** 2) / sample_num[0]) )
print("mean_values are: [%s]" % ', '.join(map(str, mean_values)))
t_stat
# +
mean_values = []
std_values = []
sample_num = []
for i in np.arange(2, 11):
ind = data["decile"] == i
mean_values.append(np.mean(data["return_t+"][ind]))
std_values.append(np.std(data["return_t+"][ind]))
sample_num.append(np.sum(ind))
t_stat = (mean_values[-1] - mean_values[0]) / np.sqrt(((std_values[-1] ** 2) / sample_num[-1]) + ((std_values[0] ** 2) / sample_num[0]) )
print("mean_values are: [%s]" % ', '.join(map(str, mean_values)))
t_stat
# +
mean_values = []
std_values = []
sample_num = []
for i in range(2):
ind = data["High_CF_t"] == i
mean_values.append(np.mean(data["return_t"][ind]))
std_values.append(np.std(data["return_t"][ind]))
sample_num.append(np.sum(ind))
t_stat = (mean_values[-1] - mean_values[0]) / np.sqrt(((std_values[-1] ** 2) / sample_num[-1]) + ((std_values[0] ** 2) / sample_num[0]) )
print("mean_values are: [%s]" % ', '.join(map(str, mean_values)))
t_stat
# +
mean_values = []
std_values = []
sample_num = []
for i in range(2):
ind = data["High_CF_t"] == i
mean_values.append(np.mean(data["return_t+"][ind]))
std_values.append(np.std(data["return_t+"][ind]))
sample_num.append(np.sum(ind))
t_stat = (mean_values[-1] - mean_values[0]) / np.sqrt(((std_values[-1] ** 2) / sample_num[-1]) + ((std_values[0] ** 2) / sample_num[0]) )
print("mean_values are: [%s]" % ', '.join(map(str, mean_values)))
t_stat
# -
| Average_ranked_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="65vQsMRLuVcf"
import pandas as pd
from datetime import date
# + colab={"base_uri": "https://localhost:8080/"} id="Nky5REaKuebY" outputId="7e80b82c-12b2-40d6-fd53-b1efb9ad5575"
from google.colab import drive
drive.mount('/content/drive')
# + [markdown] id="OCcLGXoHqyqH"
# Data Prep for USA_Facts confirmed cases at county level
# + [markdown] id="3XhuNPwdq8y8"
# Read file
# + colab={"base_uri": "https://localhost:8080/", "height": 419} id="WwqKGnxSvTia" outputId="9f5b7e69-7923-4189-f016-3be4656de1cb"
df_county_population = pd.read_csv('/content/drive/MyDrive/Covid Dataset/data_prep/input/covid_county_population_usafacts.csv')
df_county_population
# + [markdown] id="CvFL0JJCwCc2"
# Remove rows with 0 in countyFIPS
# + colab={"base_uri": "https://localhost:8080/", "height": 419} id="prL-vBi2v_8P" outputId="16dfda84-9834-41c3-d95d-419af13ba323"
df_non_zero_county_population = df_county_population[(df_county_population['countyFIPS'] > 0)].astype({'countyFIPS': str})
df_non_zero_county_population
# + [markdown] id="v8QA3C4e6lX6"
# Compute state population by adding county population
#
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="DL7Ujc5TQB0B" outputId="f1fb0288-7db7-4424-ac07-1d0d0856ec1f"
df_population_by_state = df_non_zero_county_population.groupby(['State']).sum().reset_index()
df_population_by_state
# + [markdown] id="OZSshxKqzkj6"
# Read partial analytics_base_table with confirmed cases and deaths at state level
#
# + colab={"base_uri": "https://localhost:8080/", "height": 419} id="EkIb85umas5r" outputId="98258f95-b4df-4d2f-f808-59e8d53393d4"
df_partial_abt_by_state = pd.read_csv('/content/drive/MyDrive/Covid Dataset/data_prep/output/partial_abt_by_state.csv')
df_partial_abt_by_state
# + [markdown] id="BvJGCIDRatf0"
# Read partial analytics_base_table with confirmed cases and deaths at county level
# + colab={"base_uri": "https://localhost:8080/", "height": 606} id="GlxHDdBjztR2" outputId="dde3d017-1a2b-4fb7-b7e7-eabd4cc376d2"
df_partial_abt_by_county = pd.read_csv('/content/drive/MyDrive/Covid Dataset/data_prep/output/partial_abt_by_county.csv')
df_partial_abt_by_county
# + [markdown] id="4X01dvKObCEd"
# Merge abt at state level with state population data
# + colab={"base_uri": "https://localhost:8080/", "height": 606} id="Tv1B_EqJbHHC" outputId="9db83351-2bde-40d7-d11c-72ac986138c2"
df_partial_abt_by_state_2 = pd.merge(df_partial_abt_by_state, df_population_by_state, on=['State'], suffixes=('', '_DROP'), how='inner').filter(regex='^(?!.*_DROP)')
df_partial_abt_by_state_2
# + id="9dl3-MXK3xYq"
df_partial_abt_by_state_2.to_csv('/content/drive/MyDrive/Covid Dataset/data_prep/output/partial_abt_by_state_2.csv')
# + [markdown] id="BIw0CmNjb4KR"
# Merge abt at county level with county population data
#
# ---
#
#
# + colab={"base_uri": "https://localhost:8080/", "height": 606} id="3LlvhlwHb7Yo" outputId="b45e1db3-7004-4868-c8d0-7dfe573858be"
df_partial_abt_by_county.astype({'countyFIPS': str})
df_county_population.astype({'countyFIPS': str})
df_partial_abt_by_county_2 = pd.merge(df_partial_abt_by_county, df_county_population, on=['countyFIPS'], suffixes=('', '_DROP'), how='inner').filter(regex='^(?!.*_DROP)')
df_partial_abt_by_county_2
# + id="grOw1tU9ryD3"
df_partial_abt_by_county_2.to_csv('/content/drive/MyDrive/Covid Dataset/data_prep/output/partial_abt_by_county_2.csv')
| Notebook-Class-exercises/.ipynb_checkpoints/data_prep_3-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ะ ัะฐะนะปะต $outs$ ะฟะธัะตััั ะบััะฐ ะทะฐะฟััะบะพะฒ ะทะฐ ะดะตะฝั, ะบะฐะถะดัะน ะทะฐะฟััะบ ะดะปะธััั 2 ะผะธะฝััั, ะทะฐ ััะพ ะฒัะตะผั ััะธััะฒะฐะตััั ะพะบะพะปะพ 10000 ัะพะฑััะธะน. ะคะพัะผะฐั:
#
# ะะพะป-ะฒะพ ััะฐะฑะพัะฐะฒัะธั
ะบะปะฐััะตัะพะฒ
#
# N ะบะปะฐััะตัะฐ N ัะพะฑััะธั ะฒัะตะผั (ัั:ะผะผ:ัั,ะผั.ะผะบั.ะฝั)
#
# AB0 TB0 AM0 TM0 AB1 TB1 AM1 TM1 AB2 TB2 AM2 TM2 AB3 TB3 AM3 TM3
#
# AB0 TB0 AM0 TM0 AB1 TB1 AM1 TM1 AB2 TB2 AM2 TM2 AB3 TB3 AM3 TM3
#
# AB0 TB0 AM0 TM0 AB1 TB1 AM1 TM1 AB2 TB2 AM2 TM2 AB3 TB3 AM3 TM3
#
# AB0 TB0 AM0 TM0 AB1 TB1 AM1 TM1 AB2 TB2 AM2 TM2 AB3 TB3 AM3 TM3
#
# AB0 TB0 AM0 TM0 AB1 TB1 AM1 TM1 AB2 TB2 AM2 TM2 AB3 TB3 AM3 TM3
#
# AB0 TB0 AM0 TM0 AB1 TB1 AM1 TM1 AB2 TB2 AM2 TM2 AB3 TB3 AM3 TM3
#
# AB0 TB0 AM0 TM0 AB1 TB1 AM1 TM1 AB2 TB2 AM2 TM2 AB3 TB3 AM3 TM3
#
# AB28 TB28 AM28 TM28 AB29 TB29 AM29 TM29 AB30 TB30 AM30 TM30 AB31 TB31 AM31 TM31
import numpy as np
import matplotlib.pyplot as plt
from copy import deepcopy
import gc
from matplotlib.patches import RegularPolygon #drawing hexagons
#import shapely #to attribute geometric properties for shapes
#from shapely.geometry import Polygon
#from matplotlib.patches import RegularPolygon
# +
fig, ax = plt.subplots(figsize=(10,5))
xmin, xmax = 0, 20
ymin, ymax = 0, 10
n_cols=15
n_rows=25
w = (xmax-xmin)/n_cols # width of hexagon
d = w/np.sin(np.pi/3) #diameter of hexagon
array_of_hexes = []
for rows in range(0,n_rows):
hcoord = np.arange(xmin,xmax,w) + (rows%2)*w/2
vcoord = [ymax- rows*d*0.75]*n_cols
for x, y in zip(hcoord, vcoord):#, colors):
hexes = RegularPolygon((x, y), numVertices=6, radius=d/2, alpha=0.1, edgecolor='k')
verts = hexes.get_path().vertices
trans = hexes.get_patch_transform()
points = trans.transform(verts)
array_of_hexes.append(Polygon(points))
ax.add_patch(hexes)
ax.set_xlim([xmin, xmax])
ax.set_ylim([ymin, ymax])
plt.show()
# +
#ะบะพะณะดะฐ-ะฝะธะฑัะดั ะทะดะตัั ะฑัะดะตั ะบะปะฐัั ะดะปั ัะพะฑััะธั
# -
Nrun = '{:03}'.format(10)
COLORS = np.array(['r', 'y', 'g', 'c', 'b', 'm', 'k'])
def Peds(Nrun):
pathped = "./231119.01/peds/231119.ped_"
peds = [[0 for i in range(64)] for j in range(24)] #ะทะดะตัั ั
ัะฐะฝัััั ะดะฐะฝะฝัะต ะฟะพ ะฟัะตะดะตััะฐะปะฐะผ
#ะบะฐะถะดะพะณะพ ะบะฐะฝะฐะปะฐ ะฟะพ ะดะฐะฝะฝะพะผั ัะฐะฝั
pedsfile = open(pathped+Nrun, "r")
while True:
line = pedsfile.readline().split()
if not line:
break
peds[int(line[0]) - 1][int(line[1])] = round(float(line[2])) #ะบะปะฐััะตัั ะธะฝะดะตะบัะธัััััั ั 1, ะบะฐะฝะฐะปั ั 0,
#ะฟัะตะดะตััะฐะป ะพะบััะณะปัั
pedsfile.close()
return peds
peds = Peds(Nrun)
# +
def dist(c1, c2):
return np.sqrt((c1[0]-c2[0])**2 + (c1[1]-c2[1])**2)
class Event():
def __init__(self, Nevent = 0, eventtime = "12:34:56,789.101.112", clusters = None):
if clusters is None:
clusters = []
self.clusters = dict()
for cluster in clusters:
self.clusters[cluster[0]] = cluster[1]
self.Nclusters = len(clusters)
self.Nevent = '{:06}'.format(Nevent)
self.time = eventtime
self.size = 0
self.vmax = 0
self.pixels = None
self.xm = None
self.ym = None
self.x2m = None
self.y2m = None
self.xym = None
self.Hillas = {"a": None, "b": None, "width": None, "length": None, "dis": None, "miss": None}
def __str__(self):
return "#"+self.Nevent+' '+self.time
def __repr__(self):
return "#"+self.Nevent+' '+self.time
def __len__(self):
return len(self.pixels)
def recount(self, factors, coords):
self.pixels = dict()
for cluster in self.clusters:
#print(self.clusters[cluster])
for channel in range(64):
if self.clusters[cluster][channel][0] > 0:
x = coords[cluster - 1][channel][0]
y = coords[cluster - 1][channel][1]
n = coords[cluster - 1][channel][2]
if factors[cluster][channel] is not None:
v = int(round(self.clusters[cluster][channel][0] / factors[cluster][channel]))
else:
v = None
if x is not None and y is not None and v is not None:
self.pixels[n]=(x, y, v)
self.size += v
if v > self.vmax: self.vmax = v
return self
def params(self):
if None in self.Hillas.values() and self.size > 0:
self.xm, self.ym, self.x2m, self.y2m, self.xym = 0, 0, 0, 0, 0
xsum, x2sum, ysum, y2sum, xysum = 0, 0, 0, 0, 0
for pixel in self.pixels:
x, y, v = self.pixels[pixel]
xsum += x * v
x2sum += x * x * v
ysum += y * v
y2sum += y * y * v
xysum += x * y * v
self.xm = xsum / self.size
self.ym = ysum / self.size
self.x2m = x2sum / self.size
self.y2m = y2sum / self.size
self.xym = xysum / self.size
sigmax = self.x2m - self.xm**2
sigmay = self.y2m - self.ym**2
sigmaxy = self.xym - self.xm*self.ym
d = sigmay-sigmax
a = self.Hillas["a"] = (d+np.sqrt(d*d+4*sigmaxy**2))/(2*sigmaxy)
b = self.Hillas["b"] = self.ym-self.Hillas["a"]*self.xm
self.Hillas["width"] = np.sqrt((sigmay-2*a*sigmaxy+a*a*sigmax)/(1+a*a))
self.Hillas["length"] = np.sqrt((sigmax+2*a*sigmaxy+a*a*sigmay)/(1+a*a))
self.Hillas["dis"] = np.sqrt(self.xm**2+self.ym**2)
self.Hillas["miss"] = abs(b/np.sqrt(1+a*a))
self.Hillas["size"] = self.size
self.Hillas["coords"] = (self.xm, self.ym)
return self.Hillas
elif self.size == 0:
return False
else:
return self.Hillas
def vizualize(self, colors = COLORS, alpha = 0.3, pixel_coords = None, save = False):
#alpha = 0.3 #ะฟะฐัะฐะผะตัั ะดะปั ะฝะพัะผะธัะพะฒะฐะฝะธั ะฟัะพะทัะฐัะฝะพััะธ
fig, ax = plt.subplots(figsize=(7,7))
ax.set_title("#"+self.Nevent+" "+str(self.time), fontsize=16)
ax.set_xlim([-40, 40])
ax.set_ylim([-40, 40])
if pixel_coords is not None:
for pixel in pixel_coords:
if pixel not in self.pixels:
plt.scatter(pixel_coords[pixel][1], pixel_coords[pixel][2], color = colors[(pixel_coords[pixel][0]-1)%7], alpha = 0.1)
else:
plt.scatter(pixel_coords[pixel][1], pixel_coords[pixel][2], color = "orange", alpha = alpha + (1-alpha)*self.pixels[pixel][2]/self.vmax)
ax.text(self.pixels[pixel][0], self.pixels[pixel][1], str(self.pixels[pixel][2]), fontsize = 10)
else:
for pixel in self.pixels:
plt.scatter(self.pixels[pixel][0], self.pixels[pixel][1], color = "orange", alpha = alpha + (1-alpha)*self.pixels[pixel][2]/self.vmax)
ax.text(self.pixels[pixel][0], self.pixels[pixel][1], str(self.pixels[pixel][2]), fontsize = 10)
if save:
plt.savefig("results/"+self.Nevent+".png", dpi = 200)
def clean(self, A = 14, B = 7):
b = deepcopy(self)
b.size = 0
for pixel in self.pixels:
if self.pixels[pixel][2] < A:
b.pixels.pop(pixel)
else:
n = False
for pixel1 in self.pixels:
if dist(self.pixels[pixel], self.pixels[pixel1]) < 3.1 and self.pixels[pixel][2] > B:
n = True
b.size += b.pixels[pixel][2]
break
if not n: b.pixels.pop(pixel)
del self.clusters
self.clusters = [] #ัะฑะธัะฐะตะผ ะผััะพั
#if len(b.pixels): b.params()
return b
c = [(17, [[4, 0], [0, 0], [0, 0], [0, 0], [1, 0], [0, 1], [149, 0], [26, 0], [0, 0], [2, 0], [0, 0], [0, 0], [3, 0], [4, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [1, 0], [0, 0], [0, 0], [0, 0], [0, 0], [182, 1], [20, 0], [79, 0], [17, 0], [82, 1], [6, 0], [3, 0], [1, 0], [11, 0], [0, 0], [19, 0], [0, 0], [0, 0], [0, 0], [25, 0], [4, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [150, 0], [12, 0], [50, 0], [0, 0], [0, 0], [0, 0], [2, 0], [0, 0], [130, 0], [13, 0], [19, 0], [2, 0], [0, 0], [0, 0], [4, 0], [0, 0], [0, 0], [0, 0]])]
a = Event(clusters = c)
#a.recount(cluster_factors, cluster_coords)
a.pixels
# -
factor = open("./231119.01/factors_051019.07fixed.txt", "r")
cluster_factors = [[1 for j in range(64)] for i in range(24)]
line = factor.readline()
for i in range(9):
factor.readline()
while True:
line = factor.readline().split()
#print(line)
if line == []:
break
if line[4] == "NaN" or line[5] == "NaN": cluster_factors[int(line[0])-1][int(line[1])] = None
else: cluster_factors[int(line[0])-1][int(line[1])] = float(line[4]) * float(line[5])
#ะบะพะปะธัะตััะฒะพ d.c., ัะพะพัะฒะตัััะฒัััะธั
ะพะดะฝะพะผั ัะพัะพัะปะตะบััะพะฝั
factor.close()
#cluster_factors[7]
#ะฟะพะฟััะฐะตะผัั ะฟะตัะตััะธัะฐัั ะฒ ัะธััะตะผั ะบะพะพัะดะธะฝะฐั
xycoord = "xy_turn_2019j.txt"
coord = open("./231119.01/"+xycoord, "r")
pixel_coords = dict() #ัะพะฟะพััะฐะฒะปะตะฝะธะต ID ะคะญะฃ ะธ ะตะณะพ ะบะพะพัะดะธะฝะฐั
cluster_coords = [[[None, None, None] for i in range(64)] for j in range(24)]
while True:
# ัะธัะฐะตะผ ะพะดะฝั ัััะพะบั
line = coord.readline().split()
#print(line)
if line == []:
break
#print(list(map(float, line[3:5])))
cluster_coords[int(line[0]) - 1][int(line[5])] = [float(line[3]), float(line[4]), int(line[2])]
pixel_coords[int(line[2])] = (int(line[0]), float(line[3]), float(line[4]))
#print(len(coord.readlines()))
coord.close()
def outs(Nrun, peds = peds):
pathout = "./231119.01/outs/231119.out_"
fin=open(pathout+Nrun, "r")
events = []
events_cleaned = []
while True:
# ัะธัะฐะตะผ ะพะดะฝั ัััะพะบั
line = fin.readline()
if not line:
break
nclusters = int(line)
clusters = []
event = Event()
for icluster in range(nclusters):
cluster = [[0 for j in (1, 2)] for i in range(64)]
Ncluster, Nevent, eventTime = fin.readline().split()
Nchannel = 0
for i in range(8):
line = list(map(int, fin.readline().split()))
for j in range(8):
cluster[Nchannel] = [line[2 * j] - peds[int(Ncluster) - 1][Nchannel], line[2 * j + 1]] #
#cluster[Nchannel][0] /= cluster_factors[int(Ncluster) - 1][Nchannel]
if cluster[Nchannel][0] < 0: cluster[Nchannel][0] = 0
Nchannel += 1
clusters.append((int(Ncluster), cluster))
#print("\n#", Nevent, "#", Ncluster)
#for Nchannel in range(64):
#if (not Nchannel%2) and (cluster[Nchannel][1]):
#print(Nchannel // 2, cluster[Nchannel][0])
event = Event(int(Nevent), eventTime, clusters).recount(cluster_factors, cluster_coords)
events.append(event)
z = event.clean()
if len(z) > 4:
events_cleaned.append(z)
#print(clusters)
fin.close()
return events_cleaned
n = 0
nmax = 0
for i in range(len(events_cleaned)):
if events_cleaned[i].size>nmax:
nmax = events_cleaned[i].size
n = i
n
# +
fig, ax = plt.subplots()
sizes = np.zeros(len(events_cleaned))
for i in range(len(events_cleaned)):
sizes[i] = events_cleaned[i].size
ax.hist(sizes, bins = 800)
ax.set_xlim(left = 10)
ax.set_xscale("log")
ax.set_yscale("log")
plt.savefig("Density after cleaning.png")
# +
events_cleaned = []
for nrun in range(1, 5):
#events_cleaned += outs('{:03}'.format(nrun), peds = Peds(nrun))
Nrun = '{:03}'.format(nrun)
print(Nrun)
peds = Peds(Nrun)
o = outs(Nrun, peds)
for e in o:
if e.size > 20000:
e.vizualize(pixel_coords = pixel_coords, save = True)
events_cleaned += o
# -
import pandas as pd
df = pd.read_csv("Params01.csv", sep = '\t')
df['Alpha'] = np.degrees(np.arcsin(df['Miss']/df['Dis']))
df.describe()
# +
fig, ax = plt.subplots()
#ax.set_xlim(left = 10)
#ax.set_xscale("log")
time = 4.5*60*60
x = np.log10(pd.Series(df['Size']))
hist, bins, _ = plt.hist(x, bins=500)
# histogram on log scale.
# Use non-equal bin sizes, such that they look equal on log scale.
logbins = np.logspace(np.log10(bins[0]),np.log10(bins[-1]),len(bins))
logbins = np.delete(logbins, 1, 0)
plt.plot(logbins, hist/time)
#plt.xscale('log')
ax.set_yscale("log")
ax.set_xlabel(r'log$_{10}$ Size')
ax.set_ylabel("Number of events in bin")
ax.text(3.5, 200, "5 hours, "+ str(len(df))+" events")
#ax.hist(df['Size'], bins = 600)
plt.show()
len(logbins),len( hist)
# -
plt.scatter(df["Width"], df["Length"], s = 5)
| TAIGA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import os.path as op
import pickle
import tensorflow as tf
from tensorflow import keras
from keras.models import Model,Sequential,load_model
from keras.layers import Input, Embedding
from keras.layers import Dense, Bidirectional
from keras.layers.recurrent import LSTM
import keras.metrics as metrics
import itertools
from tensorflow.python.keras.utils.data_utils import Sequence
from decimal import Decimal
from keras import backend as K
from keras.layers import Conv1D,MaxPooling1D,Flatten,Dense
# -
inp=pd.read_csv("../PJ sensor.csv",usecols=[6,7,10,11])
out=pd.read_csv("../PJ sensor.csv",usecols=[2,3,4,5,8,9])
inp.shape
inp.head(5)
out.head(5)
inp=np.array(inp)
out=np.array(out)
# +
from sklearn.preprocessing import MinMaxScaler
import warnings
scaler_obj=MinMaxScaler()
X1=scaler_obj.fit_transform(inp)
Y1=scaler_obj.fit_transform(out)
warnings.filterwarnings(action='ignore', category=UserWarning)
# +
# Splitting Data into training and testing dataset
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test=train_test_split(X1,Y1,test_size=0.2,random_state=42)
from sklearn.multioutput import MultiOutputRegressor
from sklearn.neighbors import KNeighborsRegressor
#creating object of sgboostregressor
model1=MultiOutputRegressor(KNeighborsRegressor(n_neighbors=7,weights='uniform',algorithm='auto',leaf_size=50,p=2))
model_fit1=model1.fit(x_train, y_train)
print("Model training is Done!!")
filename1 = 'knn.sav'
pickle.dump(model_fit1, open(filename1, 'wb'))
# +
from sklearn.metrics import r2_score
from sklearn import metrics
train_sizes=['NO2','O3','NO','CO','PM1','PM2.5','PM10']
#finding out the r2 score
y_train_pred1=model1.predict(x_train)
r2_train1=r2_score(y_train,y_train_pred1,multioutput='variance_weighted')
y_test_pred1=model1.predict(x_test)
r2_test1=r2_score(y_test,y_test_pred1,multioutput='variance_weighted')
print('r2 score on train data '+str(r2_train1))
print('r2 score on test data '+ str(r2_test1))
xgboost_mae=metrics.mean_absolute_error(y_test, y_test_pred1)
xgboost_mse=metrics.mean_squared_error(y_test, y_test_pred1)
xgboost_rmse=np.sqrt(xgboost_mse)
print('Mean Absolute Error:',xgboost_mae)
print('Mean Squared Error:',xgboost_mse )
print('Root Mean Squared Error:',xgboost_rmse)
print(' \n')
# -
import pickle
from sklearn.metrics import r2_score
from sklearn import metrics
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test=train_test_split(X1,Y1,test_size=0.25,random_state=42)
loaded_model_fit7 = pickle.load(open("knn.sav", 'rb'))
y_test_pred=loaded_model_fit7.predict(x_test)
print("Predicted :\n",y_test_pred)
print("\n")
r2_test=r2_score(y_test,y_test_pred,multioutput='variance_weighted')
print("R2 Score : ",r2_test)
train_sizes=['NO2','O3','NO','CO','PM2.5','PM10']
for i in range(0,6):
temp=r2_score(y_test[:,i],y_test_pred[:,i])
print("R2 Score of "+train_sizes[i]+":",temp)
train_sizes=['NO2','O3','NO','CO','PM2.5','PM10']
for i in range(0,6):
temp=metrics.mean_squared_error(y_test[:,i],y_test_pred[:,i])
print("MSE Value of "+train_sizes[i]+":",temp)
train_sizes=['NO2','O3','NO','CO','PM2.5','PM10']
for i in range(0,6):
temp=np.sqrt(metrics.mean_squared_error(y_test[:,i],y_test_pred[:,i]))
print("RMSE Value of "+train_sizes[i]+":",temp)
train_sizes=['NO2','O3','NO','CO','PM2.5','PM10']
for i in range(0,6):
temp=metrics.mean_absolute_error(y_test[:,i],y_test_pred[:,i])
print("MAE Value of "+train_sizes[i]+":",temp)
# +
#completed
| PJ_Sensor/KNN/PJ_knn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from comch import *
# #### Parameters
# +
n = 3
p = 3
arity = p
degree = (n-1)*(p-1)
torsion = p
convention = 'Berger-Fresse'
# -
# #### Using the E-infinity construction
surj = Surjection.steenrod_adem_structure(arity, degree, torsion, convention)
print(f'surj = {surj}.\n')
print(f'complexity = {surj.complexity}.\n')
print(f'd_surj = {surj.boundary()}.\n')
print(f'[d_surj] = {surj.boundary().orbit()}')
# #### Turchin representative
x = SurjectionElement({(1,2,1):1}, p, convention)
turchin = x.compose(x, 1)
print(f'{x} o_1 {x} = {turchin}')
print(f'its boundary is {turchin.boundary()}')
print(f'whose twisted orbit is {turchin.boundary().orbit("sign")}')
# #### All orbits arising from compositions
# +
from itertools import product
comp = []
x = SurjectionElement({(2,1,2,1): 1}, p, convention)
rho = SymmetricRing.rotation_element(2, torsion=p)
for a, b, i in product([x, rho * x], [x, rho * x], {1,2}):
comp.append(a.compose(b, i).orbit())
for surj in set(comp):
print(surj)
# -
# #### Salvatore representative
salvatore = SurjectionElement({(1, 2, 1, 2, 3, 2, 3):1,
(1, 2, 3, 1, 3, 2, 3):1,
(1, 2, 3, 2, 1, 2, 3):1,
(1, 2, 1, 3, 1, 2, 3):2},
torsion, convention)
print(salvatore)
print(salvatore.boundary().orbit())
# #### Suspension
print(salvatore.suspension().boundary().orbit('sign'))
# #### All basis up to degree d
arity = 3
complexity = 3
bases_keys = {}
d = 7
for i in range(d+1):
keys = Surjection.basis(arity, i, complexity)
# set on a FreeModuleElement returns the set of keys
bases_keys[i] = tuple(set(SurjectionElement({k:1 for k in keys}).orbit()))
print(f'i={i}: {len(bases_keys[i])}')
# +
import scipy.sparse, numpy
def construct_boundary(bases_keys, d, rep='trivial'):
'''constructs a sparse matrix representing the boundary map from chains of
degree d to chains of degree d-1'''
domain_keys, target_keys = bases_keys[d], bases_keys[d-1]
target_keys_idx = {k: index for index, k in enumerate(target_keys)}
m, n = len(target_keys), len(domain_keys)
D = scipy.sparse.lil_matrix((m, n), dtype=int)
for j, elmt in enumerate(domain_keys):
bdry = SurjectionElement({elmt: 1}).boundary().orbit(rep)
for k, v in bdry.items():
D[target_keys_idx[k], j] = v
return D
boundary = {}
for i in range(1, d+1):
boundary[i] = (construct_boundary(bases_keys, i, 'sign'))
# Example
print(numpy.mod(boundary[3].todense(), 3))
# -
# #### From vector to chain
# +
def vec2chain(vector, basis, arity=3):
"""Transforms a vector in the given basis to the
corresponding element in the surjection operad."""
idx2key = {idx: key for idx, key in enumerate(basis)}
chain = SurjectionElement(torsion=arity)
for idx, v in enumerate(vector):
if v:
chain += chain.create({idx2key[idx]: v})
return chain
# Example
d = 6
bases_keys[d]
vector = (0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0)
chain = vec2chain(vector, bases_keys[d])
print(chain)
# +
# Basis for the kernel of boundary[6]
vectors = {
0: (2, 1, 1, 0, 2, 2, 2, 1, 2, 1, 1, 1, 2, 1, 0, 0, 0),
1: (1, 2, 2, 0, 1, 2, 1, 2, 1, 1, 2, 0, 1, 0, 1, 0, 0),
2: (2, 1, 2, 1, 0, 0, 0, 0, 0, 2, 0, 1, 0, 0, 0, 1, 0),
3: (1, 0, 1, 2, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1)}
cycles = []
for i in range(4):
cycles.append(vec2chain(vectors[i], bases_keys[6]))
# +
from itertools import product
# All cycles suspending to Salvatore's rep
good_cands = []
for c in product({0,1,2}, repeat=3):
cand = cycles[0] + c[0] * cycles[1] + c[1] * cycles[2] + c[2] * cycles[3]
susp = cand.suspension().orbit()
if susp == salvatore:
good_cands.append(cand)
# Example
print(good_cands[0])
| notebooks/dyer_lashof.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
from rulevetting.projects.tbi_pecarn.dataset import Dataset
from rulevetting.projects.tbi_pecarn.dataset import AgeSplit
from sklearn import metrics
pd.set_option("display.max_colwidth",1000)
# # Read data
# 6 kidds of data (judgment calls)
# age <=2, >2, invariant
# include or exclude subvariables
# +
df_train_old, df_tune_old, df_test_old = Dataset().get_data(split_age=AgeSplit.OLD,load_csvs=False)
df_train_old = df_train_old.drop(columns=['AgeinYears','Race','Gender'])
df_tune_old = df_tune_old.drop(columns=['AgeinYears','Race','Gender'])
X_train_old = df_train_old.drop(columns="outcome")
y_train_old = df_train_old["outcome"].values
X_tune_old = df_tune_old.drop(columns="outcome")
y_tune_old = df_tune_old["outcome"].values
df_train_young, df_tune_young, df_test_young = Dataset().get_data(split_age=AgeSplit.YOUNG,load_csvs=False)
df_train_young = df_train_young.drop(columns=['AgeinYears','Race','Gender'])
df_tune_young = df_tune_young.drop(columns=['AgeinYears','Race','Gender'])
X_train_young = df_train_young.drop(columns="outcome")
y_train_young = df_train_young["outcome"].values
X_tune_young = df_tune_young.drop(columns="outcome")
y_tune_young = df_tune_young["outcome"].values
df_train_all, df_tune_all, df_test_all = Dataset().get_data(split_age=AgeSplit.AGEINVARIANT,load_csvs=False)
df_train_all = df_train_all.drop(columns=['AgeinYears','Race','Gender'])
df_tune_all = df_tune_all.drop(columns=['AgeinYears','Race','Gender'])
X_train_all = df_train_all.drop(columns="outcome")
y_train_all = df_train_all["outcome"].values
X_tune_all = df_tune_all.drop(columns="outcome")
y_tune_all = df_tune_all["outcome"].values
# +
dset = Dataset()
# NOTE: This is just an example!
judg_calls = dset.get_judgement_calls_dictionary_default()
judg_calls["preprocess_data"]["step7_AMS"] = 1
judg_calls["preprocess_data"]["step8_OSI"] = 1
judg_calls["preprocess_data"]["step9_HEMA"] = 1
judg_calls["preprocess_data"]["step10_SFx"] = 1
judg_calls["preprocess_data"]["step11_SFxBas"] = 1
judg_calls["preprocess_data"]["step12_Clav"] = 1
judg_calls["preprocess_data"]["step13_NeuroD"] = 1
judg_calls["preprocess_data"]["step14_Vomit"] = 1
judg_calls["preprocess_data"]["step15_HA"] = 1
judg_calls["preprocess_data"]["step16_Seiz"] = 1
judg_calls["preprocess_data"]["step17_LOC"] = 1
judg_calls["extract_features"]["GCS"] = False
df_train_old_parent, df_tune_old_parent, df_test_old_parent = dset.get_data(split_age=AgeSplit.OLD,load_csvs=False, **judg_calls)
df_train_old_parent = df_train_old_parent.drop(columns=['AgeinYears','Race','Gender'])
df_tune_old_parent = df_tune_old_parent.drop(columns=['AgeinYears','Race','Gender'])
X_train_old_parent = df_train_old_parent.drop(columns="outcome")
y_train_old_parent = df_train_old_parent["outcome"].values
X_tune_old_parent = df_tune_old_parent.drop(columns="outcome")
y_tune_old_parent = df_tune_old_parent["outcome"].values
df_train_young_parent, df_tune_young_parent, df_test_young_parent = Dataset().get_data(split_age=AgeSplit.YOUNG,load_csvs=False, **judg_calls)
df_train_young_parent = df_train_young_parent.drop(columns=['AgeinYears','Race','Gender'])
df_tune_young_parent = df_tune_young_parent.drop(columns=['AgeinYears','Race','Gender'])
X_train_young_parent = df_train_young_parent.drop(columns="outcome")
y_train_young_parent = df_train_young_parent["outcome"].values
X_tune_young_parent = df_tune_young_parent.drop(columns="outcome")
y_tune_young_parent = df_tune_young_parent["outcome"].values
df_train_all_parent, df_tune_all_parent, df_test_all_parent = Dataset().get_data(split_age=AgeSplit.AGEINVARIANT,load_csvs=False, **judg_calls)
df_train_all_parent = df_train_all_parent.drop(columns=['AgeinYears','Race','Gender'])
df_tune_all_parent = df_tune_all_parent.drop(columns=['AgeinYears','Race','Gender'])
X_train_all_parent = df_train_all_parent.drop(columns="outcome")
y_train_all_parent = df_train_all_parent["outcome"].values
X_tune_all_parent = df_tune_all_parent.drop(columns="outcome")
y_tune_all_parent = df_tune_all_parent["outcome"].values
# -
# # Fit models
# +
import os
import pickle as pkl
from os.path import join as oj
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.tree import DecisionTreeClassifier, plot_tree
import numpy as np
import pandas as pd
import imodels
from rulevetting.api import validation
MODELS_DIR = './models'
os.makedirs(MODELS_DIR, exist_ok=True)
outcome_def = 'outcome' # output
# -
def predict_and_save(model, model_name, X_train, X_tune, y_train, y_tune, MODELS_DIR = './models'):
'''Plots cv and returns cv, saves all stats
'''
os.makedirs(MODELS_DIR, exist_ok=True)
results = {'model': model}
for x, y, suffix in zip([X_train, X_tune],
[y_train, y_tune],
['_train', '_tune']):
stats, threshes = validation.all_stats_curve(y, model.predict_proba(x)[:, 1],
plot=suffix == '_tune')
for stat in stats.keys():
results[stat + suffix] = stats[stat]
results['threshes' + suffix] = threshes
pkl.dump(results, open(oj(MODELS_DIR, model_name + '.pkl'), 'wb'))
return stats, threshes
feature_names=list(X_train_old).copy()
feature_names_ageinvariant=list(X_train_all).copy()
feature_names_parent=list(X_train_old_parent).copy()
feature_names_ageinvariant_parent=list(X_train_all_parent).copy()
# +
def print_metrics(suffix, X_train, X_tune, y_train, y_tune, MODELS_DIR = './models'):
os.makedirs(MODELS_DIR, exist_ok=True)
vals = {s: [] for s in ['sens', 'spec', 'ppv', 'npv', 'lr+', 'lr-', 'brier_score', 'f1']}
fnames = []
for fname in sorted(os.listdir(MODELS_DIR)):
if 'pkl' in fname:
if not fname[:-4] == 'rf':
r = pkl.load(open(oj(MODELS_DIR, fname), 'rb'))
threshes = np.array(r['threshes' + suffix])
m = r['model']
# add more stats
for x, y, suff in zip([X_train, X_tune],
[y_train, y_tune],
['_train', '_tune']):
if suff == suffix:
stats, threshes = validation.all_stats_curve(y, m.predict_proba(x)[:, 1],
plot=suffix == '_tune')
preds_proba = m.predict_proba(x)[:, 1]
brier_score = metrics.brier_score_loss(y, preds_proba)
# pick best vals
sens = np.array(r['sens' + suffix])
spec = np.array(r['spec' + suffix])
best_idx = np.argmax(5 * sens + spec)
for k in vals.keys():
if not k == 'brier_score':
# print('k', k)
vals[k].append(stats[k][best_idx])
vals['brier_score'].append(brier_score)
fnames.append(fname[:-4])
stats = pd.DataFrame.from_dict(vals)
stats.index = fnames
return (stats).round(2).transpose()
# -
# # greedy (CART) rule list
# +
#rulefit = imodels.RuleFitClassifier(max_rules=10, random_state=1)
#rulefit.fit(X_train_old, y_train_old, feature_names=feature_names)
#rules = rulefit.get_rules()
#rules = rules[rules.coef != 0].sort_values("support", ascending=False)
#rules[['rule', 'coef', 'support']].style.background_gradient(cmap='viridis')
# -
# tuning parameters
weights_list = np.concatenate((np.array([10,50,100,150,200]),
np.arange(300,1100,100)),axis=None)
weights_list
# ## Children old
# fit model
for i in weights_list:
class_weight = {0: 1, 1: i}
grl = imodels.GreedyRuleListClassifier(max_depth=12, class_weight=class_weight, criterion='neg_corr')
grl.fit(X_train_old, y_train_old, feature_names=feature_names, verbose=False)
stats, threshes = predict_and_save(grl,'grl_children_old_' + str(i), X_train_old, X_tune_old, y_train_old, y_tune_old,
MODELS_DIR = './models/grl_children_old/')
suffixes = ['_train', '_tune'] # _train, _test1, _test2
stats_list = {}
for i, suffix in enumerate(suffixes):
stats_list[suffix] = print_metrics(suffix, X_train_old, X_tune_old, y_train_old, y_tune_old, MODELS_DIR = './models/grl_children_old/')
stats_list['_train']
stats_list['_tune']
## sort based on sens
stats_list['_tune'].iloc[:,(-np.array(stats_list['_tune'][0:1])).argsort().squeeze()[:10]]
# interpretation
class_weight = {0: 1, 1: 100}
grl = imodels.GreedyRuleListClassifier(max_depth=12, class_weight=class_weight, criterion='neg_corr')
grl.fit(X_train_old, y_train_old, feature_names=feature_names, verbose=False)
print(grl)
# feature importance
lst = [feature_names[i] for i in [7,53,99,22,115,3,124,23,35,94,18]]
lst.insert(0, 'SFxBasHem_1')
lst
# ## Children young
for i in weights_list:
class_weight = {0: 1, 1: i}
grl = imodels.GreedyRuleListClassifier(max_depth=12, class_weight=class_weight, criterion='neg_corr')
grl.fit(X_train_young, y_train_young, feature_names=feature_names, verbose=False)
stats, threshes = predict_and_save(grl,'grl_children_young_' + str(i), X_train_young, X_tune_young, y_train_young, y_tune_young,
MODELS_DIR = './models/grl_children_young/')
suffixes = ['_train', '_tune'] # _train, _test1, _test2
stats_list = {}
for i, suffix in enumerate(suffixes):
stats_list[suffix] = print_metrics(suffix, X_train_young, X_tune_young, y_train_young, y_tune_young, MODELS_DIR = './models/grl_children_young/')
stats_list['_train']
stats_list['_tune']
stats_list['_tune'].iloc[:,(-np.array(stats_list['_tune'][0:1])).argsort().squeeze()[:10]]
class_weight = {0: 1, 1: i}
grl = imodels.GreedyRuleListClassifier(max_depth=12, class_weight=class_weight, criterion='neg_corr')
grl.fit(X_train_young, y_train_young, feature_names=feature_names, verbose=False)
print(grl)
lst = [feature_names[i] for i in [26,52,94,40,63,23,20,85,31,17,3]]
lst.insert(0, 'SFxPalp')
lst
# ## Children all
for i in weights_list:
class_weight = {0: 1, 1: i}
grl = imodels.GreedyRuleListClassifier(max_depth=12, class_weight=class_weight, criterion='neg_corr')
grl.fit(X_train_all, y_train_all, feature_names=feature_names_ageinvariant, verbose=False)
stats, threshes = predict_and_save(grl,'grl_children_all_' + str(i), X_train_all, X_tune_all, y_train_all, y_tune_all,
MODELS_DIR = './models/grl_children_all/')
suffixes = ['_train', '_tune'] # _train, _test1, _test2
stats_list = {}
for i, suffix in enumerate(suffixes):
stats_list[suffix] = print_metrics(suffix, X_train_all, X_tune_all, y_train_all, y_tune_all, MODELS_DIR = './models/grl_children_all/')
stats_list['_train']
stats_list['_tune']
stats_list['_tune'].iloc[:,(-np.array(stats_list['_tune'][0:1])).argsort().squeeze()[:10]]
class_weight = {0: 1, 1: 100}
grl = imodels.GreedyRuleListClassifier(max_depth=12, class_weight=class_weight, criterion='neg_corr')
grl.fit(X_train_all, y_train_all, feature_names=feature_names_ageinvariant, verbose=False)
print(grl)
lst = [feature_names_ageinvariant[i] for i in [36,40,92,78,16,48,27,3,13,66,81]]
lst.insert(0, 'SFxBasHem_1')
lst
# ## Children old parent
for i in weights_list:
class_weight = {0: 1, 1: i}
grl = imodels.GreedyRuleListClassifier(max_depth=12, class_weight=class_weight, criterion='neg_corr')
grl.fit(X_train_old_parent, y_train_old_parent, feature_names=feature_names_parent, verbose=False)
stats, threshes = predict_and_save(grl,'grl_children_old_parent_' + str(i), X_train_old_parent, X_tune_old_parent, y_train_old_parent, y_tune_old_parent,
MODELS_DIR = './models/grl_children_old_parent/')
suffixes = ['_train', '_tune'] # _train, _test1, _test2
stats_list = {}
for i, suffix in enumerate(suffixes):
stats_list[suffix] = print_metrics(suffix, X_train_old_parent, X_tune_old_parent, y_train_old_parent, y_tune_old_parent, MODELS_DIR = './models/grl_children_old_parent/')
stats_list['_train']
stats_list['_tune']
stats_list['_tune'].iloc[:,(-np.array(stats_list['_tune'][0:1])).argsort().squeeze()[:10]]
class_weight = {0: 1, 1: 100}
grl = imodels.GreedyRuleListClassifier(max_depth=12, class_weight=class_weight, criterion='neg_corr')
grl.fit(X_train_old_parent, y_train_old_parent, feature_names=feature_names_parent, verbose=False)
print(grl)
lst = [feature_names_parent[i] for i in [1,0,10]]
lst.insert(0, 'AMS')
lst
# ## Children young parent
# ###### Cannot fit here.
for i in weights_list:
class_weight = {0: 1, 1: i}
grl = imodels.GreedyRuleListClassifier(max_depth=12, class_weight=class_weight, criterion='neg_corr')
grl.fit(X_train_young_parent, y_train_young_parent, feature_names=feature_names_parent, verbose=False)
stats, threshes = predict_and_save(grl,'grl_children_young_parent_' + str(i), X_train_young_parent, X_tune_young_parent, y_train_young_parent, y_tune_young_parent,
MODELS_DIR = './models/grl_children_young_parent/')
# ## Children all parent
for i in weights_list:
class_weight = {0: 1, 1: i}
grl = imodels.GreedyRuleListClassifier(max_depth=12, class_weight=class_weight, criterion='neg_corr')
grl.fit(X_train_all_parent, y_train_all_parent, feature_names=feature_names_ageinvariant_parent, verbose=False)
stats, threshes = predict_and_save(grl,'grl_children_all_parent_' + str(i), X_train_all_parent, X_tune_all_parent, y_train_all_parent, y_tune_all_parent,
MODELS_DIR = './models/grl_children_all_parent/')
suffixes = ['_train', '_tune'] # _train, _test1, _test2
stats_list = {}
for i, suffix in enumerate(suffixes):
stats_list[suffix] = print_metrics(suffix, X_train_all_parent, X_tune_all_parent, y_train_all_parent, y_tune_all_parent, MODELS_DIR = './models/grl_children_all_parent/')
stats_list['_train']
stats_list['_tune']
stats_list['_tune'].iloc[:,(-np.array(stats_list['_tune'][0:1])).argsort().squeeze()[:30]]
class_weight = {0: 1, 1: 100}
grl = imodels.GreedyRuleListClassifier(max_depth=12, class_weight=class_weight, criterion='neg_corr')
grl.fit(X_train_all_parent, y_train_all_parent, feature_names=feature_names_ageinvariant_parent, verbose=False)
print(grl)
lst = [feature_names_ageinvariant[i] for i in [3,0,11,8]]
lst.insert(0, 'AMS')
lst
# # Interpretation
# tuning does not help much.
# Weight is not inportant.
# maxdepth is not, either.
# ## age>=2, include subvaribles
# sensitivity,specificity
#
# .95 .76
# Importance high to low:
# ['SFxBasHem_1',
# 'AMS',
# 'SFxPalpDepress_1',
# 'NeuroDSensory_0',
# 'LocLen_3',
# 'OSICut_1',
# 'Vomit',
# 'OSIAbdomen_1',
# 'LocLen_4',
# 'HASeverity_3',
# 'ClavTem_1',
# 'Amnesia_verb_1']
# ## age<2, include subvaribles
# sensitivity,specificity
#
# .94 .59
# Importance high to low:
#
# ['SFxPalp',
# 'SeizLen_2',
# 'SFxPalpDepress_0',
# 'ClavTem_1',
# 'AMSSleep_0',
# 'SFxBasPer_92',
# 'LocLen_4',
# 'LocLen_1',
# 'ClavFro_1',
# 'HA_verb_1',
# 'Amnesia_verb_0',
# 'Vomit']
# ## age invariant , include subvaribles
# sensitivity,specificity
#
# .95 .63
# Importance high to low:
# ['SFxBasHem_1',
# 'AMSOth_0',
# 'SFxPalpDepress_1',
# 'NeuroDReflex_0',
# 'ClavPar_1',
# 'LocLen_3',
# 'SFxBasPer_0',
# 'AMSSleep_0',
# 'Vomit',
# 'High_impact_InjSev_3',
# 'ClavFace_1',
# 'ClavTem_1']
# ## age >=2, no subvariables
# sensitivity,specificity
#
# 1.00 .24
#
# It seems bad. Do not use this draw conclusion.
# Importance high to low:
# ['AMS', 'Seiz', 'LOCSeparate', 'Clav']
# ## age <2, no subvariables
# Cannot run
# ## age invariant , no subvaribles
# sensitivity,specificity
#
# .89 .80 - use this
# Importance high to low:
#
# ['AMS', 'Vomit', 'LOCSeparate', 'High_impact_InjSev_1', 'Clav']
# ## Overall
# AMS is always important.
#
# Sfx and NeuroDeficit is important only when we include subvariables. Vomit is important only when we exclude subvariables. LOC is quite important when we exclude subvariables, and is a bit important when we include subvariables.
#
# For no subvariable case, age>=2 returns bad result, but age invariant result seems good.
| rulevetting/projects/tbi_pecarn/notebooks/Grl_Xin.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 04_Feed-forward_Neural_Networks
# In this notebook, we will see how to define simple feed-foward neural networks.
# +
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
# %matplotlib inline
torch.manual_seed(777) # reproducibility
# -
# ## Neural Networks
# A typical training procedure for a neural network is as follows:
#
# - Define the neural network that has some learnable parameters (or weights)
# - Iterate over a dataset of inputs
# - Process input through the network
# - Compute the loss (how far is the output from being correct)
# - Propagate gradients back into the networkโs parameters
# - Update the weights of the network, typically using an optimizer.
#
# We will look at all the above processes with a concrete example, MNIST.
#
# ### Define the network
# First of all, we need a new feed-foward neural network for performing image classification on MNIST.
# In PyTorch, you can build your own neural network using the `torch.nn`package:
# +
# Hyper-parameters
input_size = 784
hidden_size = 256
num_classes = 10
num_epochs = 5
batch_size = 100
learning_rate = 0.001
# Device configuration
# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
device = torch.device('cpu')
# Fully connected neural network with one hidden layer
class NeuralNet(nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(NeuralNet, self).__init__()
# Define the operations to use for input processing.
# torch.nn.Linear(in_features, out_features, bias=True)
# : a linear projection(fc) layer(in_feeatures -> out_features)
# torch.nn.RELU(inplace=False): a ReLU activation function
self.fc1 = nn.Linear(input_size, hidden_size)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(hidden_size, num_classes)
def forward(self, x):
# Define the input processing through network
z1 = self.fc1(x)
h1 = self.relu(z1)
out = self.fc2(h1)
return out
model = NeuralNet(input_size, hidden_size, num_classes).to(device)
print(model)
# -
# You just have to define the `forward` function, and the `backward` function (where gradients are computed) is automatically defined for you using `autograd`.
#
# The architecture of the above `NeuralNet` is as follows:
# <img src="images/nn_architecture.png" width="500">
#
# Here, x and y are the input, target (true label) values, respectively.
#
# The learnable parameters of a model are returned by `model.parameters()`.
params = list(model.parameters())
print(len(params))
print(params[0].size()) # fc1's .weight
# ### Loss function and Optimizer
# A loss function takes the (output, target) pair of inputs, and computes a value that estimates how far away the output is from the target.
#
# There are several different loss functions under the nn package.
# We use `nn.CrossEntropyLoss()`.
# +
input = torch.randn(1, 784) # a random input, for example
output = model(input) # output: (batch_size, num_classes)
print(output)
target = torch.tensor([0]) # a dummy target, for example. target: (batch_size) where 0 <= each element < num_classes
criterion = nn.CrossEntropyLoss()
loss = criterion(output, target)
print(loss)
# -
# Furtheremore, PyTorch supports several optimizers from `torch.optim`.
# We use an Adam optimizer.
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# ### DataLoader
# +
# MNIST dataset
train_dataset = torchvision.datasets.MNIST(root='./data',
train=True,
transform=transforms.ToTensor(),
download=True)
test_dataset = torchvision.datasets.MNIST(root='./data',
train=False,
transform=transforms.ToTensor())
# Data loader
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
# plot one example
print(train_dataset.train_data.size()) # (60000, 28, 28)
print(train_dataset.train_labels.size()) # (60000)
idx = 0
plt.title('%d' % train_dataset.train_labels[idx].item())
plt.imshow(train_dataset.train_data[idx,:,:].numpy(), cmap='gray')
# -
# ### Train the network
# Train the model
total_step = len(train_loader)
for epoch in range(num_epochs):
running_loss = 0.0
for i, (images, labels) in enumerate(train_loader):
# Move tensors to the configured device
images = images.reshape(-1, input_size).to(device)
labels = labels.to(device)
# Forward pass
outputs = model(images)
loss = criterion(outputs, labels)
# zero the parameter gradients
optimizer.zero_grad()
# backward + optimize
loss.backward()
optimizer.step()
running_loss += loss.item()
if (i+1) % 100 == 0:
print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
.format(epoch+1, num_epochs, i+1, total_step, running_loss / 100))
running_loss = 0.0
# ### Test the network
# Test the model
# In test phase, we don't need to compute gradients (for memory efficiency)
with torch.no_grad():
correct = 0
total = 0
for images, labels in test_loader:
images = images.reshape(-1, input_size).to(device)
labels = labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: {} %'.format(100 * correct / total))
# ### Save/Load the network parameters
# +
# Save the model checkpoint
torch.save(model.state_dict(), './data/nn_model.ckpt')
# Load the model checkpoint if needed
# new_model = NeuralNet(input_size, hidden_size, num_classes).to(device)
# new_model.load_state_dict(torch.load('./data/nn_model.ckpt'))
# -
# ## Practice: CIFAR10
#
# <img src="images/cifar10.png" width="400">
#
# The CIFAR-10 dataset has the following specification:
# - The images in CIFAR-10 are of size 3x32x32, i.e. 3-channel color images of 32x32 pixels in size.
# - CIFAR-10 has the ten classes: โairplaneโ, โautomobileโ, โbirdโ, โcatโ, โdeerโ, โdogโ, โfrogโ, โhorseโ, โshipโ, โtruckโ.
#
# You have to define a feed-foward neural network with two hidden layers for performing image classifcation on the CIFAR-10 dataset as well as train and test the network.
# +
# Hyper-parameters
input_size = 3*32*32
hidden1_size = 512
hidden2_size = 128
num_classes = 10
num_epochs = 5
batch_size = 100
learning_rate = 0.001
# Device configuration
device = torch.device('cpu')
# transform images to tensors of normalized range [-1, 1]
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
train_dataset = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=transform)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size,
shuffle=True, num_workers=2)
test_dataset = torchvision.datasets.CIFAR10(root='./data', train=False,
download=True, transform=transform)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size,
shuffle=False, num_workers=2)
# Write the code to define a neural network with two hidden layers for CIFAR-10
class Net(nn.Module):
def __init__(self, input_size, hidden1_size, hidden2_size, num_classes):
super(Net, self).__init__()
self.fc1 = nn.Linear(input_size, hidden1_size)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(hidden1_size, hidden2_size)
self.fc3 = nn.Linear(hidden2_size, num_classes)
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
out = self.fc3(x)
return out
model = Net(input_size, hidden1_size, hidden2_size, num_classes).to(device)
print(model)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# Train the model
total_step = len(train_loader)
for epoch in range(num_epochs):
running_loss = 0.0
for i, (images, labels) in enumerate(train_loader):
# Move tensors to the configured device
images = images.reshape(-1, input_size).to(device)
labels = labels.to(device)
# Forward pass
outputs = model(images)
loss = criterion(outputs, labels)
# Backward and optimize
# zero the parameter gradients
optimizer.zero_grad()
# backward + optimize
loss.backward()
optimizer.step()
running_loss += loss.item()
if (i+1) % 100 == 0:
print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
.format(epoch+1, num_epochs, i+1, total_step, running_loss / 100))
running_loss = 0.0
# Test the model
with torch.no_grad():
correct = 0
total = 0
for images, labels in test_loader:
images = images.reshape(-1, input_size).to(device)
labels = labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: {} %'.format(100 * correct / total))
# -
| _pages/AI/PyTorch/src/04_Feed-forward_Neural_Networks_answer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.7 64-bit (''OptimalTransport'': conda)'
# language: python
# name: python37764bitoptimaltransportconda38cd42aab3c640dc8d34473d4e53c23f
# ---
import os
import json
import networkx
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
DATA_DIR = './Tweets'
OUT_DIR = "./data/"
ALLOWED_CHARACTERS = ['a','b','c','d','e','f','g','h','i','j','k','l','m',
'n','o','p','q','r','s','t','u','v','w','x','y','z',
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '_']
df = pd.read_csv('users.csv')
seed_users = df['Usuรกrio'].values
seed_users_orientation = df['Orientaรงรฃo Polรญtica'].values
def load_twint_json(text):
tweets = []
for tweet in text.split('\n'):
if tweet != '':
tweets.append(json.loads(tweet))
return tweets
def get_who_user_retweeted(tweets):
"""Gets who user has retweeted on tweets
Parameters
----------
tweets : list
List containing tweets formatted as dictionaries.
Returns
-------
who_retweeted : list
List of strings containing the username that a given user has retweeted.
"""
who_retweeted = []
for tweet in tweets:
# For each tweet, check if it is a retweet
if tweet['retweet']:
# Try to find who user retweeted
username_set = False
try:
# If it is in mentions, good...
username = tweet['mentions'][0]['screen_name']
username_set = True
except IndexError:
# If not, need to check the tweet text
if 'RT' in tweet['tweet']:
username = []
# Gets mention. Initial position is the character after @
initial_pos = tweet['tweet'].find('@') + 1
# Sets flag
username_set = True
while True:
# Gets every character in the username
char = tweet['tweet'][initial_pos]
# While character is in the allowed characters, continue looping
# note that " " is not allowed, so we break after the mention finishes
if char.lower() in ALLOWED_CHARACTERS:
# if so, adds character to username and continue searching
username.append(char)
initial_pos += 1
else:
# otherwise, breaks
username_set = False
break
# joins the list of characters into a string
username = ''.join(username)
else:
# If it is not a retweet, simply pass
pass
if username_set:
who_retweeted.append(username)
return who_retweeted
def generate_adjacency_matrix(users, who_users_retweeted):
"""Aij => user i retweeted user j"""
adjacency_matrix = np.zeros([len(users), len(users)])
for user_i, who_user_retweeted in zip(users, who_users_retweeted):
i = np.where(users == user_i)[0]
for user_j in who_user_retweeted:
if user_j in users:
j = np.where(users == user_j)[0]
adjacency_matrix[i, j] += 1
return adjacency_matrix
def generate_standard_graph_json(users, degrees, orientations, adjacency_matrix):
graph_json = {
'nodes': [],
'links': []
}
for i, (src_user, degree, orientation) in enumerate(zip(users, degrees, orientations)):
graph_json['nodes'].append({
'username': src_user,
'Ideology': orientation,
'degree': degree,
})
for j, tgt_user in enumerate(users):
if adjacency_matrix[i, j] > 0:
graph_json['links'].append({
'source': src_user,
'target': tgt_user,
'weight': A[i, j]
})
return graph_json
def generate_hierarchical_graph_json(users, degrees, orientations, adjacency_matrix):
hierarchical_graph = []
for i, (src_user, degree, orientation) in enumerate(zip(users, degrees, orientations)):
hierarchical_graph.append({
'name': "Orientation.{}.{}".format(orientation, src_user),
'Ideology': orientation,
'imports': []
})
for j, (tgt_user, orientation) in enumerate(zip(users, orientations)):
if adjacency_matrix[i, j] > 0:
hierarchical_graph[-1]['imports'].append("Orientation.{}.{}".format(orientation, tgt_user))
return hierarchical_graph
searched_users = []
who_searched_users_retweeted = []
number_of_times_users_retweeted = []
print('-' * 43)
print("|{:^20}|{:^20}|".format("User", "Number of retweets"))
print('-' * 43)
total_retweets = 0
for file in os.listdir(DATA_DIR):
user = file.split('.json')[0]
with open(os.path.join(DATA_DIR, file), 'r') as f:
data = f.read()
tweets = load_twint_json(data)
who_user_retweeted = get_who_user_retweeted(tweets)
number_of_times_users_retweeted.append(len(who_user_retweeted))
searched_users.append(user)
who_searched_users_retweeted.append(who_user_retweeted)
total_retweets += len(who_user_retweeted)
print("|{:^20}|{:^20}|".format(user, len(who_user_retweeted)))
print("|{:^20}|{:^20}|".format("Total", total_retweets))
print('-' * 43)
# +
searched_users = np.array(searched_users)
ind_used_in_search = [np.where(seed_users == u)[0][0] for u in searched_users if u in seed_users]
searched_orientation = seed_users_orientation[ind_used_in_search]
who_searched_users_retweeted = np.array(who_searched_users_retweeted)
number_of_times_users_retweeted = np.array(number_of_times_users_retweeted)
A = generate_adjacency_matrix(searched_users, who_searched_users_retweeted).T
degrees = np.sum(A, axis=1)
# -
users_to_exclude = np.intersect1d(np.where(np.sum(A, axis=1) == 0)[0], np.where(np.sum(A, axis=0) == 0)[0])
ind_to_maintain = [i for i in range(len(searched_users)) if i not in users_to_exclude]
users_for_analysis = [searched_users[i] for i in range(len(searched_users)) if i not in users_to_exclude]
degrees_for_analysis = [degrees[i] for i in range(len(degrees)) if i not in users_to_exclude]
orientation_for_analysis = np.array([searched_orientation[i] for i in range(len(searched_orientation))
if i not in users_to_exclude])
_A = A[ind_to_maintain, :][:, ind_to_maintain]
plt.imshow(_A, cmap='binary', vmin=0, vmax=1)
# +
L = np.where(orientation_for_analysis == 'Esquerda')[0]
D = np.where(orientation_for_analysis == 'Direita')[0]
C = np.where(orientation_for_analysis == 'Centro')[0]
P = (np.sum(_A[L, :][:, D]) + np.sum(_A[L, :][:, C]) + np.sum(_A[D, :][:, L]) + np.sum(_A[D, :][:, C]) + \
np.sum(_A[C, :][:, L]) + np.sum(_A[C, :][:, D]) ) / (6 * np.sum(_A))
Pleft = (
np.sum(_A[L, :][:, D]) + np.sum(_A[L, :][:, C]) + np.sum(_A[D, :][:, L]) + np.sum(_A[C, :][:, L])
) / (4 * np.sum(_A[L, :]))
Pright = (
np.sum(_A[D, :][:, C]) + np.sum(_A[D, :][:, L]) + np.sum(_A[L, :][:, D]) + np.sum(_A[C, :][:, D])
) / (4 * np.sum(_A[D, :]))
Pcenter = (
np.sum(_A[C, :][:, D]) + np.sum(_A[C, :][:, L]) + np.sum(_A[D, :][:, C]) + np.sum(_A[L, :][:, C])
) / (4 * np.sum(_A[C, :]))
print(P * 100)
print(Pleft * 100)
print(Pright * 100)
print(Pcenter * 100)
# +
standard_graph = generate_standard_graph_json(users_for_analysis,
degrees_for_analysis,
orientation_for_analysis, _A)
with open(os.path.join(OUT_DIR, 'standard_graph.json'), 'w') as f:
f.write(json.dumps(standard_graph))
# +
hierarchical_graph = generate_hierarchical_graph_json(users_for_analysis,
degrees_for_analysis,
orientation_for_analysis, _A)
with open(os.path.join(OUT_DIR, 'hierarchical_graph.json'), 'w') as f:
f.write(json.dumps(hierarchical_graph))
# -
retweet_json = []
for node_i in hierarchical_graph:
retweet_json.append({'name': node_i['name'],
'Esquerda': 0,
'Centro': 0,
'Direita': 0})
user_i = node_i['name'].split('.')[2]
ideo_i = node_i['name'].split('.')[1]
for node_j in node_i['imports']:
user_j = node_j.split('.')[2]
ideo_j = node_j.split('.')[1]
retweet_json[-1][ideo_j] += 1
with open(os.path.join(OUT_DIR, 'retweet_statistics.json'), 'w') as f:
f.write(json.dumps(retweet_json))
| Preprocessing Data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
num = int(input())
# palindrome means num and reverse of that num should be same
temp = num
revNum = 0
while(temp > 0):
rem = temp % 10
temp = temp // 10
revNum = revNum * 10 + rem
if(num == revNum):
print("true")
else:
print("false")
| 01.Python-Basics/03. Conditionals and Loops/Assignments/3.Palindrome-Number.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## TKX ํผํธ๋์ค์ ์ค์ ๊ฒ์ ํ์ํฉ๋๋ค!
#
# ์๋
ํ์ธ์! TKX ํผํธ๋์ค ๋ฐ์ดํฐ๋ถ์ํ์ ์ค์ ๊ฒ์ ํ์ํฉ๋๋ค.
#
# TKX๋ ๊ตญ๋ด ์ต๋์ ํ๋ ์ฐจ์ด์ฆ ํผํธ๋์ค ์ผํฐ๋ก์, ๋งค์ ์ ์ฒ๋ช
์ ๋ฌํ๋ ์ ๊ท ํ์์ ๋ฐ์ ๋ฐ์ดํฐ๋ฅผ ๋ถ์ํ๊ณ ์์ต๋๋ค.
#
# TKX ํผํธ๋์ค์ ์ฅ์ ์ ํ๋ก๊ทธ๋๋ฐ ์ธ์ด ํ์ด์ฌ(Python)๊ณผ ๋ฐ์ดํฐ ๋ถ์ ํ๋ ์์ํฌ ํ๋ค์ค(Pandas)๋ฅผ ํ์ฉํ ๋ฉด๋ฐํ ๋ฐ์ดํฐ ๋ถ์์
๋๋ค. ์ด ๋ฐ์ดํฐ ๋ถ์์ ๋ฐํ์ผ๋ก KTX์ ์คํผ๋ ์ด์
ํ / ์ฝ์นญ ํ์ ํผํธ๋์ค ์ผํฐ์ ๋ฐฉ๋ฌธํ๋ ๊ณ ๊ฐ ๋ถ๋ค, ๊ทธ๋ฆฌ๊ณ ๋ฐฉ๋ฌธํ์ง ์๋ ๊ณ ๊ฐ ๋ถ๋ค์๊ฒ๋ ์ต์ ์ ๋ง์ถค ์๋น์ค๋ฅผ ์ ๊ณตํด ๋๋ฆฌ๊ณ ์์ต๋๋ค.
#
# ์ค๋ ์ด ๋
ธํธ๋ถ์ ๋ฐ์ ๋ถ์ด ํด์ฃผ์
์ผ ํ๋ ์ผ์ 2016๋
๋ 1์๋ถํฐ 2017๋
๋ 12์๊น์ง์ ์ ๊ท ๊ฐ์
๊ณ ๊ฐ ๋ฐ์ดํฐ๋ฅผ ๋ฐ์์, ์ฐจํ์ ๋ฐ์ดํฐ๋ถ์์ ๋ ์ฉ์ดํ๊ฒ ํ ์ ์๋๋ก ๋ฐ์ดํฐ๋ฅผ ์ ๋ฆฌํด์ฃผ๋ ์์
, ์ผ๋ช
๋ฐ์ดํฐ ํด๋ฆฌ๋(Data Cleaning) ์์
์
๋๋ค.
#
# ์ ํฌ TKX ํผํธ๋์ค๋ ์ธ์ ๋ ์ ์ ๋ฆฌ๋์ด ์๋ ๊ณ ๊ฐ ์ ๋ณด๋ฅผ ๋ฐ์์ ๋ฐ์ดํฐ ๋ถ์ํ์๊ฒ ๋งก๊ธธ๋ ค๊ณ ๋
ธ๋ ฅํ๊ณ ์์ผ๋, (์ด ๋
ธํธ๋ถ์ ๋ฐ์ ๋ถ๋ค๋ ์์๊ฒ ์ง๋ง) ํ์ค์ ์ธ์ ๋ 100% ์ ์ ๋ฆฌ๋์ด์๋ ๋ฐ์ดํฐ๋ฅผ ๋ฐ์์ค๊ธฐ๊ฐ ์ด๋ ต์ต๋๋ค.
#
# ๋๋ฌธ์ ์ด๋ฒ ์๊ฐ์๋ ์ ๊ท ๊ฐ์
๊ณ ๊ฐ ๋ฐ์ดํฐ๋ฅผ ๋ถ์ํ์ฌ, ๋ฐ์ดํฐ์ ๋ช๋ช ๋๋ฝ๋ ๋ถ๋ถ์ด๋ ์๋ชป ๊ธฐ์
๋ ๋ถ๋ถ ๋ฑ์ ํ๋ค์ค๋ก ์ ๋ฆฌํ๊ณ ๊ทธ ๊ฒฐ๊ณผ๋ฅผ ๋ถ์ํ๋ ์ผ์ ํด์ฃผ์
จ์ผ๋ฉด ํฉ๋๋ค.
#
# ๋ฐ์ดํฐ๋ ๋ค์์ URL [https://goo.gl/8XGH4T](https://goo.gl/8XGH4T) ์์ ๋ค์ด๋ฐ์ ์ ์์ต๋๋ค. ๋ฐ์ดํฐ๋ฅผ ๋ค์ด๋ฐ์ ์ฝ์ด์จ ๋ค, ํ๊ธฐ์ ์ ์ด๋์ ๋ด์ฉ๋๋ก ๋ฐ์ดํฐ๋ฅผ ๋ถ์ ๋ฐ ์ ๋ฆฌ๋ฅผ ํด์ฃผ์ธ์.
# ## Import Libraries
import pandas as pd
import numpy as np
# ## Load Dataset
# +
data = pd.read_csv('../../data/tkx-user-data.csv')
print(data.shape)
data.head()
# -
# ## Reshape Data 1
# ** 1. ํ
์คํธ๋ก ๋ ํก์ฐ ์ฌ๋ถ(์/์๋์ค)๋ฅผ True/False ๋ก ๋ฐ๊พธ๊ธฐ **
# +
data.loc[data['ํก์ฐ ์ฌ๋ถ'] == '์', 'ํก์ฐ ์ฌ๋ถ'] = True
data.loc[data['ํก์ฐ ์ฌ๋ถ'] == '์๋์ค', 'ํก์ฐ ์ฌ๋ถ'] = False
print(data.shape)
data.head()
# -
# ** 2. ๋์ด ์ปฌ๋ผ ์ ๋ฆฌํ๊ธฐ **
# - ํํ ํ์ ํ์คํ(ํต์ผ)
# > - '0์ธ' -> NaN
# > - ๋ค์ํ ํํ(oo'์ธ', 'oo') -> int
# > - ๋์ด๊ฐ ๋๋ฌด ์ ๊ฑฐ๋, ๋ง์ ์ฌ๋ -> NaN
#
# ** 2-1. ๋์ด์์ 0์ธ๋ผ๊ณ ๋์ด์๋ ๊ฒ์ NaN์ผ๋ก ๋ฐ๊พธ๊ธฐ **
data['๋์ด'].dtypes
# +
data.loc[data['๋์ด'] == "0", '๋์ด'] = np.nan
print(data.shape)
data.head()
# -
# ** 2-2. ๋ค์ํ ํํ์ ๋์ด ํต์ผ์ํค๊ธฐ **
# - ๋ค์ํ ํํ๋ก ํํ๋์ด ์๋ ๋์ด ์ปฌ๋ผ์ ์ซ์ํ(int)์ผ๋ก ํต์ผํด์ฃผ๊ธฐ
# +
# data['๋์ด'].value_counts()
# +
def convert_age_to_integer(age):
if pd.isnull(age):
return age
elif "์ธ" in age:
age = int(age.replace("์ธ", ""))
return age
return int(age)
data['๋์ด'] = data['๋์ด'].apply(convert_age_to_integer)
print(data.shape)
data.head()
# -
# ** 2-3. ๋์ด๊ฐ ๋๋ฌด ์ ๊ฑฐ๋ ๋ง์ ์ฌ๋ ์ ์ฒ๋ฆฌ **
# - ๊ธฐ์ค : 9์ธ ์ดํ, 80์ธ ์ด์ -> NaN
# +
data.loc[data['๋์ด'] <= 9, '๋์ด'] = np.nan
data.loc[data['๋์ด'] >= 80, '๋์ด'] = np.nan
print(data.shape)
data.head()
# -
# ** 3. ํ๊ท (mean) / ์ต์(min) / ์ต๋(max) ๋์ด ๊ตฌํ๊ธฐ **
# +
mean_age = data['๋์ด'].mean()
min_age = data['๋์ด'].min()
max_age = data['๋์ด'].max()
print(f" ํ๊ท : {mean_age:.1f}")
print(f" ์ต์ : {min_age:.1f}")
print(f" ์ต๋ : {max_age:.1f}")
print("[๋์ด] \n ํ๊ท = {0:.6f}, ์ต์ = {1}, ์ต๋ = {2}".format(mean_age, min_age, max_age))
# -
# ** 4. ์ปฌ๋ผ์ ์์๋ฅผ ์กฐ๊ธ ๋ ์ง๊ด์ ์ผ๋ก ๋ฐ๊พธ๊ธฐ **
# +
# Write code here!
new_columns = [
"์ด๋ฆ", "์ฑ๋ณ", "์ ํ๋ฒํธ", "๋์ด", "ํ์ฌ ์ฒด์ค", "๋ชฉํ ์ฒด์ค", "ํค",
"ํก์ฐ ์ฌ๋ถ", "์์ฃผ ์ฌ๋ถ", "๊ฐ์ธ์๋ด ์์ฒญ", "ํ์ ๊ฐ์
์ผ", "ํ์ ์ ๋ณด ๊ฐฑ์ ์ผ", "๊ฐ์
๊ฐ์ ์"
]
data = data[new_columns]
print(data.shape)
data.head()
# -
# ## Reshape Data 2
# - ๋ฐ์ดํฐ๋ฅผ ์ฌ์ฉํ๊ธฐ ์ฉ์ดํ๊ฒ ์ ๋ฆฌํ๊ธฐ
#
# **5. ์ปฌ๋ผ ์ด๋ฆ ๋ฐ๊พธ๊ธฐ**
#
# ๋ณดํต ํ๋ก๊ทธ๋๋ฐ์ ํ ๋ ํ๊ธ ์ปฌ๋ผ์ ์ฌ์ฉํ๋ฉด ๋ค์ํ ๋ฌธ์ ๋ก ๋ถํธ์ ๊ฒช์ ๋๊ฐ ๋ง์ต๋๋ค.
#
# ๊ทธ๋ฌ๋ฏ๋ก ์ปฌ๋ผ์ ์ ๋ถ ํ๊ธ์์ ์์ด๋ก ๋ฐ๊ฟ์ฃผ์ธ์. ์ปฌ๋ผ๋ช
์ ๋ค์๊ณผ ๊ฐ์ด ๋ฐ๊ฟ์ฃผ์๋ฉด ๋ฉ๋๋ค.
#
# * ์ด๋ฆ - Name
# * ์ฑ๋ณ - Gender
# * ์ ํ๋ฒํธ - Phone Number
# * ๋์ด - Age
# * ํ์ฌ ์ฒด์ค - Current Weight
# * ๋ชฉํ ์ฒด์ข
- Goal Weight
# * ํค - Height
# * ํก์ฐ ์ฌ๋ถ - Smoking
# * ์์ฃผ ์ฌ๋ถ - Drinking
# * ๊ฐ์ธ์๋ด ์์ฒญ - Request Counselling
# * ํ์ ๊ฐ์
์ผ - Joined At
# * ํ์ ์ ๋ณด ๊ฐฑ์ ์ผ - Updated At
# * ๊ฐ์
๊ฐ์ ์ - Paid Plan
data.columns
# +
english_name = ['Name', 'Gender', 'Phone Number', 'Age', 'Current Weight', 'Goal Weight',
'Height', 'Smoking', 'Drinking', 'Request Counselling', 'Joined At', 'Updated At', 'Paid Plan']
data.columns = english_name
print(data.shape)
data.head()
# -
# ** 6. '์๋ด ์์ฒญ' ์ปฌ๋ผ์ ์/์๋์ค ์์ True/False๋ก ๋ฐ๊พธ๊ธฐ **
# +
data.loc[data['Request Counselling'] == "์", 'Request Counselling'] = True
data.loc[data['Request Counselling'] == '์๋์', 'Request Counselling'] = False
print(data.shape)
data.head()
# -
data['Request Counselling'].value_counts()
# **7. '๊ฐ์
๊ฐ์ ์' ์ปฌ๋ผ์ ๋ถ์ํด์, ๋ฌด๋ฃ(0๊ฐ์)์ธ ๊ฒฝ์ฐ๋ฅผ False๋ก, ์ ๋ฃ์ธ ๊ฒฝ์ฐ(3, 6, 9, 12๊ฐ์)๋ฅผ True๋ก ๋ณด์ฌ์ฃผ๋ ์๋ก์ด ์ปฌ๋ผ์ ๋ง๋ค๊ธฐ**
# +
data.loc[data['Paid Plan'] == "0๊ฐ์", 'Paid'] = False
data.loc[data['Paid Plan'] != "0๊ฐ์", 'Paid'] = True
print(data.shape)
data.head()
# -
# ## Reshape Data 3
# - ์ปฌ๋ผ์ ํํ๋ฅผ ํ๋๋ก ํต์ผํ๊ณ , ๋น ๊ฐ์ด๋ outlier๋ค์ NaN์ผ๋ก ๋ฐ๊พธ๊ธฐ
#
# ** 8. ์ ํ๋ฒํธ ์ปฌ๋ผ์ ๋ค์๊ณผ ๊ฐ์ด ์ ๋ฆฌํ๊ธฐ **
# * unknown ์ด๋ผ๊ณ ํ์๋์ด ์๋ ์ ํ๋ฒํธ๋ ๋น ์ ํ๋ฒํธ์ด๋ค. ์ด๋ฅผ NaN์ผ๋ก ๋ฐ๊พธ๊ธฐ.
# +
data.loc[data['Phone Number'] == 'unknown', 'Phone Number'] = np.nan
print(data.shape)
data.head()
# -
# ** 9. ํค(cm) ์ปฌ๋ผ์ ๋ค์๊ณผ ๊ฐ์ด ์ ๋ฆฌํ๊ธฐ**
#
# * ๋ค์ํ ํํ๋ก ํํ๋์ด ์๋ ํค ์ปฌ๋ผ(?, ?cm, ? cm ๋ฑ)์ ์ซ์๋ก ํต์ผํ๊ธฐ.
# * ํค๊ฐ 0์ผ๋ก ๋์ด์์ผ๋ฉด NaN์ผ๋ก ๋ฐ๊พธ๊ธฐ.
# * ํค๊ฐ ๋๋ฌด ์๊ฑฐ๋(140cm ์ดํ) ๋๋ฌด ํฌ๋ฉด(210cm ์ด์) ๋ง์ฐฌ๊ฐ์ง๋ก NaN์ผ๋ก ๋ฐ๊พธ๊ธฐ.
# ** 9-1. ํค ์ปฌ๋ผ ์ ๋ฆฌ 1 **
# - 0์ NaN์ผ๋ก
data.loc[data['Height'] == '0', 'Height'] = np.nan
print(data.shape)
data.head()
# ** 9-2. ํค ์ปฌ๋ผ ์ ๋ฆฌ 2**
# - cm ์ฒ๋ฆฌํ๋ ํจ์ ์์ฑ
# +
def convert_height_to_integer(height):
if pd.isnull(height):
return height
elif 'cm' in height:
height = height.replace("cm", "")
return int(height)
return int(height)
data['Height'] = data['Height'].apply(convert_height_to_integer)
print(data.shape)
data.head()
# -
# ** 9-3. ํค ์ปฌ๋ผ ์ ๋ฆฌ 3**
# - ์ด์์น(140 ์ดํ, 210 ์ด์) NaN์ฒ๋ฆฌ
data.loc[data['Height'] <= 140, 'Height'] = np.nan
data.loc[data['Height'] >= 210, 'Height'] = np.nan
print(data.shape)
data.head()
# ** 10.'ํ์ฌ ์ฒด์ค'๊ณผ '๋ชฉํ ์ฒด์ค' ์ปฌ๋ผ์ ๋ค์๊ณผ ๊ฐ์ด ์ ๋ฆฌํ๊ธฐ **
#
# * ๋ค์ํ ํํ๋ก ํํ๋์ด ์๋ ๋ชธ๋ฌด๊ฒ ์ปฌ๋ผ(?, ?kg, ? kg ๋ฑ)์ ์ซ์๋ก ํต์ผํ๊ธฐ.
# * ๋ชธ๋ฌด๊ฒ๊ฐ 0์ผ๋ก ๋์ด์์ผ๋ฉด NaN์ผ๋ก ๋ฐ๊พธ๊ธฐ.
# * ๋ชธ๋ฌด๊ฒ๊ฐ ๋๋ฌด ์๊ฑฐ๋(40kg ์ดํ) ๋๋ฌด ํฌ๋ฉด(150kg ์ด์) ๋ง์ฐฌ๊ฐ์ง๋ก NaN์ผ๋ก ๋ฐ๊พธ๊ธฐ.
#
# ** 10-1. ์ฒด์ค ์ปฌ๋ผ ์ ๋ ฌ 1 **
# - 0 ๊ฐ ์ ๋ฆฌ
data.loc[data['Current Weight'] == '0', 'Current Weight'] = np.nan
data.loc[data['Goal Weight'] == '0', 'Goal Weight'] = np.nan
print(data.shape)
data.head()
# ** 10-2. ์ฒด์ค ์ปฌ๋ผ ์ ๋ ฌ 2 **
# - ๋ค์ํ ํํ๋ก ํํ๋์ด ์๋ ๊ฐ์ ์ฒ๋ฆฌํ๋ ํจ์ ์์ฑ
# +
def convert_weight_to_integer(weight):
if pd.isnull(weight):
return weight
if "kg" in weight:
weight = weight.replace("kg", "")
return int(weight)
return int(weight)
data["Current Weight"] = data["Current Weight"].apply(convert_weight_to_integer)
data["Goal Weight"] = data["Goal Weight"].apply(convert_weight_to_integer)
print(data.shape)
data.head()
# -
# ** 10-3. ์ฒด์ค ์ปฌ๋ผ ์ ๋ ฌ 3 **
# - ์ด์์น(40kg ์ดํ, 150kg ์ด์) NaN ์ฒ๋ฆฌ
# +
data.loc[data['Current Weight'] <= 40, 'Current Weight'] = np.nan
data.loc[data['Current Weight'] >= 150, 'Current Weight'] = np.nan
data.loc[data['Goal Weight'] <= 40, 'Goal Weight'] = np.nan
data.loc[data['Goal Weight'] >= 150, 'Goal Weight'] = np.nan
print(data.shape)
data.head()
# -
# ## Basic Analysis
# ** 11. "์ ์ฒด ๋ฐ์ดํฐ์์ ํ๊ท /์ต์/์ต๋ ํค(cm), ๊ทธ๋ฆฌ๊ณ ํ๊ท /์ต์/์ต๋ 'ํ์ฌ ์ฒด์ค(kg)'๊ณผ '๋ชฉํ ์ฒด์ค(kg)'์ ์ฐพ์์ฃผ์ธ์." **
print(data['Height'].describe())
print()
print(data['Current Weight'].describe())
print()
print(data['Goal Weight'].describe())
# +
mean_height = data['Height'].mean()
min_height = data['Height'].min()
max_height = data['Height'].max()
mean_curr_weight = data['Current Weight'].mean()
min_curr_weight = data['Current Weight'].min()
max_curr_weight = data['Current Weight'].max()
mean_goal_weight = data['Goal Weight'].mean()
min_goal_weight = data['Goal Weight'].min()
max_goal_weight = data['Goal Weight'].max()
# -
print("[Height] Mean = {0:.2f} cm, Min = {1} cm, Max = {2} cm".format(mean_height, min_height, max_height))
print("[Current_weight] = {0:.2f} cm, Min = {1} cm , Max = {2} cm".format(mean_curr_weight, min_curr_weight, max_curr_weight))
print("[Goal_Weight] = {0:.2f} cm, Min = {1} cm, Max = {2} cm".format(mean_goal_weight, min_goal_weight, max_goal_weight))
# ** 12. "์ ์ฒด ๋ฐ์ดํฐ์์ ํก์ฐ์์ ๋นํก์ฐ์ ์ธ์์ ์ดํฉ์ ์ฐพ์์ฃผ์ธ์." **
data['Smoking'].value_counts()
# ** 13. "์ ์ฒด ๋ฐ์ดํฐ์์ ์ ๋ฃ ์ฌ์ฉ์์ ๋ฌด๋ฃ ์ฌ์ฉ์์ ์ธ์์ ์ดํฉ์ ์ฐพ์์ฃผ์ธ์." **
# - "๋ํ ์ ๋ฃ ์ฌ์ฉ์๋ง ํ์ ํด์, 3๊ฐ์ / 6๊ฐ์ / 12๊ฐ์ ๊ฒฐ์ ์์ ์ธ์์ ์ดํฉ์ ์ฐพ์์ฃผ์ธ์."
data.columns
data[['Paid Plan', 'Paid']]
# ** 13-1. ์ /๋ฌด๋ฃ ์ฌ์ฉ์ **
# - ์ ๋ฃ ์ฌ์ฉ์ : 31,884 ๋ช
# - ๋ฌด๋ฃ ์ฌ์ฉ์ : 74,955 ๋ช
data['Paid'].value_counts()
# ** 13-2. ๊ฐ ๊ฐ์ ์ ์ฌ์ฉ์ **
# - 3๊ฐ์ : 21,250 ๋ช
# - 6๊ฐ์ : 7,291 ๋ช
# - 12๊ฐ์ : 3,343 ๋ช
paid_user = data[data['Paid'] == True]
paid_user['Paid Plan'].value_counts()
# ** 14. "์ ์ฒด ๋ฐ์ดํฐ์์ ํก์ฐ์์ ์์ฃผ ์ฌ๋ถ๋ฅผ ๋ฐํ์ผ๋ก ๋ค์์ ๋ถ์ํด์ฃผ์ธ์." **
# 1. ํก์ฐ๊ณผ ์์ฃผ๋ฅผ ๋ ๋ค ์ ํ๋ ์ฌ๋์ ์ธ์ ์ดํฉ.
# 1. ํก์ฐ์ ํ์ง๋ง ์์ฃผ๋ ์ ํ๋ ์ฌ๋์ ์ธ์ ์ดํฉ.
# 1. ํก์ฐ์ ํ์ง ์์ง๋ง ์์ฃผ๋ ํ๋ ์ฌ๋์ ์ธ์ ์ดํฉ.
# 1. ํก์ฐ๊ณผ ์์ฃผ๋ฅผ ๋ ๋ค ํ๋ ์ฌ๋์ ์ธ์ ์ดํฉ.
# ** 14-1. ์์ฃผ์ฌ๋ถ ์ ์ฒ๋ฆฌ **
# +
data.loc[data['Drinking'] == '์ ๋ง์ฌ', 'Drinking(Bool)'] = False
data.loc[data['Drinking'] != '์ ๋ง์ฌ', 'Drinking(Bool)'] = True
print(data.shape)
data.head()
# -
# ** 14-2. ํก์ฐ-์์ฃผ ๊ฐ ํฉ์น๊ณ , ๊ฒฐ๊ณผ๊ฐ ๋ฝ๊ธฐ **
# +
data['Smoking-Drinking'] = data['Smoking'].astype('str') + "-" + data['Drinking(Bool)'].astype('str')
print(data.shape)
data['Smoking-Drinking'].value_counts()
# -
# # Intermediate Level
# ** 15. "์ฑ๋ณ ์ปฌ๋ผ์ ์ ๋ฆฌํด์ฃผ์ธ์." **
# - "๋ฐ์ดํฐ๋ฅผ ๋ถ์ํด๋ณด๋ฉด ๋จ์ฑ, ์ฌ์ฑ, ๋จ, ๋
, Male, FEMALE ๋ฑ์ ๋ค์ํ ํํ์ด ์์ต๋๋ค. ์ด ํํ์ male, female์ผ๋ก ํต์ผํด์ฃผ์ธ์."
# +
# data.columns
# -
data['Gender'].value_counts()
# +
data.loc[data['Gender'].isin(["๋จ์ฑ", "๋จ", "male", "Male", "MALE"]), 'Gender'] = "male"
data.loc[data['Gender'].isin(["์ฌ์ฑ", "์ฌ", "female", "Female", "FEMALE"]), 'Gender'] = "female"
print(data['Gender'].unique())
print(data.shape)
data.head()
# -
# **16. "์ ์ฒด ์ธ์์ด ์๋, ๋จ์ฑ/์ฌ์ฑ ๊ฐ๊ฐ์ ์ต์/ํ๊ท /์ต๋ ํค/๋ชธ๋ฌด๊ฒ/๋์ด๋ฅผ ๊ตฌํด์ฃผ์ธ์."**
#
# ๊ฒฐ๊ณผ์ ์ผ๋ก ๋ค์์ ์์น๊ฐ ๋์์ผ ํฉ๋๋ค.
# * ์ ์ฒด ๋จ์ฑ์ ์ต์/ํ๊ท /์ต๋ ๋์ด
# * ์ ์ฒด ๋จ์ฑ์ ์ต์/ํ๊ท /์ต๋ ๋ชธ๋ฌด๊ฒ(kg)
# * ์ ์ฒด ๋จ์ฑ์ ์ต์/ํ๊ท /์ต๋ ํค(cm)
#
# * ์ ์ฒด ์ฌ์ฑ์ ์ต์/ํ๊ท /์ต๋ ๋์ด
# * ์ ์ฒด ์ฌ์ฑ์ ์ต์/ํ๊ท /์ต๋ ๋ชธ๋ฌด๊ฒ(kg)
# * ์ ์ฒด ์ฌ์ฑ์ ์ต์/ํ๊ท /์ต๋ ํค(cm)
female = data.loc[data['Gender'] == 'female']
male = data.loc[data['Gender'] == 'male']
male.describe()
female.describe()
# ** 17. ๊ฐ๋ ๋ชฉํ ๋ถ์ํ๊ธฐ **
#
# - "๋ฐ์ดํฐ์์ 'ํ์ฌ ์ฒด์ค' - '๋ชฉํ ์ฒด์ค' ์ ํ๋ฉด ๊ฐ๋ ๋ชฉํ๊ฐ ๋์ฌ ๊ฒ์
๋๋ค. ๊ฐ๋ ๋ชฉํ๋ฅผ ์ฐพ์์ ์๋ก์ด ์ปฌ๋ผ์ ๋ง๋ค์ด์ฃผ์ธ์."<br>
# - "๋ํ kg๋ณ๋ก ๊ฐ๋์ ์ํ๋ ์ฌ๋์ ์ด ์ธ์์ ๊ตฌํด์ฃผ์ธ์. ๊ฐ๋ น 1) 1kg ๊ฐ๋์ ์ํ๋ ์ด ์ธ์, 2) 2kg ๊ฐ๋์ ์ํ๋ ์ด ์ธ์, ... 10) 10kg ๊ฐ๋์ ์ํ๋ ์ด ์ธ์์ด ๋์์ผ ํฉ๋๋ค."
# +
data['Goal Loss'] = data['Current Weight'] - data['Goal Weight']
print(data.shape)
data[['Current Weight', 'Goal Weight', 'Goal Loss']].head()
# -
data['Goal Loss'].describe()
data['Goal Loss'].value_counts().sort_values(ascending=False)
# ** 18. '๊ฐ์
๊ฐ์ ์'๋ฅผ ์ซ์๋ก ํํํ๊ธฐ **
#
# - "'๊ฐ์
๊ฐ์ ์' ์ปฌ๋ผ์ ์ซ์๋ก ์ ๋ฆฌํด์ฃผ์ธ์. ํ์ฌ 3๊ฐ์, 6๊ฐ์, 12๊ฐ์๋ก ๋์ด์๋๋ฐ, ์ด๋ฅผ 3, 6, 12๋ก ์ ๋ฆฌํ์๋ฉด ๋ฉ๋๋ค."
# - "๋ํ 0๊ฐ์์ 0์ด ์๋ NaN์ผ๋ก ์ง์ด๋ฃ์ด ์ฃผ์ธ์."
#
data['Paid Plan'].unique()
# ** Method 1 **
# +
# data.loc[data['Paid Plan'] == '0๊ฐ์', 'Paid Plan'] = np.nan
# data.loc[data['Paid Plan'] == '3๊ฐ์', 'Paid Plan'] = 3
# data.loc[data['Paid Plan'] == '6๊ฐ์', 'Paid Plan'] = 6
# data.loc[data['Paid Plan'] == '12๊ฐ์', 'Paid Plan'] = 12
# print(data.shape)
# print(data['Paid Plan'].unique())
# data.head()
# -
# ** Method 2 **
# - lambda๋ฅผ ์ฌ์ฉํ์ฌ ์ ์ฒ๋ฆฌ
# +
# data['Paid Plan'] = data['Paid Plan'].apply(lambda month: int(month.replace("๊ฐ์", "")))
# data.loc[data['Paid Plan'] == 0, 'Paid Plan'] = np.nan
# print(data.shape)
# print(data['Paid Plan'].unique())
# data.head()
# -
# ** Method 3 **
# - ```str.replce("old", "new").astype('int')```
# +
data['Paid Plan'] = data['Paid Plan'].str.replace("๊ฐ์", "").astype('int')
data.loc[data['Paid Plan'] == 0, 'Paid Plan'] = np.nan
data.head()
# -
# ** 19. '์์ฃผ ์ฌ๋ถ'๋ฅผ ์ซ์๋ก ํํํ๊ธฐ**
#
# - "'์์ฃผ ์ฌ๋ถ' ์ปฌ๋ผ์ ์ซ์๋ก ์ ๋ฆฌํด์ฃผ์ธ์. ํ์ฌ 1) ์ฃผ 2ํ, 2) ์ฃผ 1ํ, 3) ์ 2ํ, 4) ์ 1ํ 5) ์ ๋ง์ฌ์ผ๋ก ๋์ด ์์ต๋๋ค๋ง, ์ด๋ฅผ ์ ๊ธฐ์ค ์์ฃผ ํ์๋ก ํต์ผํด์ฃผ์ธ์. ์ฌ๊ธฐ์ ์์ 30์ผ๋ก, ์ฃผ๋ 4์ฃผ๋ก ๊ณ ์ ํฉ๋๋ค."
# - "๊ฐ๋ น 1) ์ฃผ 2ํ๋ 8, 2) ์ฃผ 1ํ๋ 4, 3) ์ 2ํ๋ 2, 4) ์ 1ํ๋ 1, 5) ์ ๋ง์ฌ์ 0์ผ๋ก ํํํ๋ฉด ๋ฉ๋๋ค."
data['Drinking'].unique()
# +
data.loc[data['Drinking'] == '์ฃผ 2ํ', 'Drinking'] = 8
data.loc[data['Drinking'] == '์ฃผ 1ํ', 'Drinking'] = 4
data.loc[data['Drinking'] == '์ 2ํ', 'Drinking'] = 2
data.loc[data['Drinking'] == '์ 1ํ', 'Drinking'] = 1
data.loc[data['Drinking'] == '์ ๋ง์ฌ', 'Drinking'] = 0
print(data["Drinking"].unique())
data.head()
# -
# # Advanced-Level
# ** 20.ํด๋ํฐ ๋ฒํธ ์ ๋ฆฌํ๊ธฐ **
#
# - "๋ค์ํ ํํ์ผ๋ก ๋์ด์๋ ํด๋ํฐ ๋ฒํธ๋ฅผ 010-xxxx-xxxx ๋ก ํต์ผํด์ฃผ์ธ์. ๊ฐ๋ น ํด๋ํฐ ๋ฒํธ์ ํ์ดํ(-)์ด ์์ผ๋ฉด ๋ฃ์ด์ฃผ์๋ฉด ๋ฉ๋๋ค."
#
# - "๋ํ unknown์ผ๋ก ๋์ด์๊ฑฐ๋ ๋น์ด์๋ ๊ฐ์ NaN์ผ๋ก ์ฒ๋ฆฌํด์ฃผ์ธ์."
data['Phone Number'].value_counts().head(10)
# +
data['Phone Number'] = data["Phone Number"].str.replace("-", "")
def clean_phone_number(number):
if pd.isnull(number):
return number
new_number = number[0:3] + "-" + number[3:7] + "-" + number[7:]
return new_number
phone_number = "01077113553"
# phone_number = np.nan
clean_phone_number(phone_number)
# +
data['Phone Number'] = data['Phone Number'].str.replace("-", "")
data['Phone Number'] = data['Phone Number'].apply(clean_phone_number)
print(data.shape)
data.head()
# -
data['Phone Number'].value_counts().head(10)
# ** 21. ๋ ์ง๋ฅผ ์ฌ์ฉ ๊ฐ๋ฅํ๊ฒ ์ ๋ฆฌํ๊ธฐ **
#
# - "ํ์ฌ 'ํ์ ๊ฐ์
์ผ' ์ปฌ๋ผ๊ณผ 'ํ์ ์ ๋ณด ๊ฐฑ์ ์ผ' ์ปฌ๋ผ์ 20xx๋
xx์ xx์ผ ๊ณผ ๊ฐ์ ํ์์ผ๋ก ๋์ด์์ต๋๋ค."
#
# - "์ด ๋ฐ์ดํฐ๋ฅผ ํ์ฌ ํ๋ค์ค์์๋ ๋ ์ง ์ปฌ๋ผ์ด ์๋ ๋ฌธ์์ด(ํ
์คํธ) ์ปฌ๋ผ์ผ๋ก ์ธ์ํ๊ณ ์๋๋ฐ, ์ด ์ปฌ๋ผ์ ๋ ์ง ์ปฌ๋ผ์ผ๋ก ์ธ์ํ ์ ์๋๋ก ์์ ํด์ฃผ์ธ์."
# ** 22. ๋ ์ง๋ฅผ ๊ธฐ์ค์ผ๋ก ๋ถ์ํ๊ธฐ **
#
# "21๋ฒ์์ ๋ ์ง ์ปฌ๋ผ์ ๋ง๋ค์์ผ๋ฉด ๋ค์์ ๋ถ์ํด์ฃผ์ธ์."
#
# - 1) ์๋ณ ์ ์ฒด ํ์ ๊ฐ์
๋
# - 2) ์ ๋ณ ์ ๋ฃ/๋ฌด๋ฃ ํ์ ๊ฐ์
๋์ ์ฐจ์ด
# - 3) ์ ๋ณ ๋จ์ฑ/์ฌ์ฑ ํ์ ๊ฐ์
๋์ ์ฐจ์ด
# ** 23. ํ์ ์ ๋ณด๊ฐ ๋ง๋์ง ์ฌ๋ถ๋ฅผ ํ์ธํ๋ ์ปฌ๋ผ์ ๋ง๋ค๊ธฐ **
#
# "๋ ์ ํํ ๋ฐ์ดํฐ ๋ถ์์ ์ํด์๋, ํ์ฌ๊น์ง ๊ณ ๊ฐ๋์ด ๊ธฐ์
ํ ํ์ ์ ๋ณด๊ฐ ์ ํํ์ง๋ฅผ ํ์ธํ๋ ์์
์ด ํ์ํฉ๋๋ค. ๋ถ์ํ์์ ๋ฐ์ดํฐ๋ฅผ ๋ค์๊ณผ ๊ฐ์ด ์ ๋ฆฌํ๋ฉด, ์คํผ๋ ์ด์
ํ๊ณผ ์ฝ์นญ ํ์ด ํ์
์ ํตํด์ ๊ณ ๊ฐ ์ ๋ณด๋ฅผ ๊ฐ์ ํ๊ณ , ๋ ์ข์ ์๋น์ค๋ฅผ ์ ๊ณตํ ์ ์์ ๊ฒ ๊ฐ์ต๋๋ค. ๋ค์์ ๋ด์ฉ์ด ๋ด๊ฒจ์๋ ์๋ก์ด ์ปฌ๋ผ์ ํ๋ ๋ง๋ค์ด์ฃผ์ธ์. ํด๋น ์ปฌ๋ผ์๋ reject / counselling / duplicated / confirmed ๋ผ๋ ๊ฒฐ๊ณผ๊ฐ์ด ๋ค์ด๊ฐ์ผ ํฉ๋๋ค."
#
# ๋ค์์ ๊ฒฝ์ฐ์๋ ์ฌ๊ธฐ์
์ ์์ฒญํ๋ค. (reject)
# * ์ ํ๋ฒํธ๊ฐ ๋น์ด์๋ ๊ฒฝ์ฐ
#
# ๋ค์์ ๊ฒฝ์ฐ๋ ํธ๋ ์ด๋์ ์๋ด์ ์ ๋ํ๋ค. (counselling)
# * ์๋ด ์์ฒญ(counselling)์ด True์ธ ์ฌ๋.
#
# ๋ค์์ ๊ฒฝ์ฐ์๋ ์ค๋ณต์ ํ์ธํ๋ค. (duplicated)
# * ๋์ผํ ์ด๋ฆ์ ๋์ผํ ์ ํ๋ฒํธ๋ฅผ ์ฐพ์๋ธ๋ค. ์ด๋ ์ค๋ณต๋์๋ค๊ณ ๊ฐ์ ํ๋ค.
#
# ๋๋จธ์ง๋ ๋ฌธ์ ๊ฐ ์๋ค. (confirmed)
# **24. VIP ์ฐพ์๋ด๊ธฐ**
#
# "๋ค์์ ๊ณ ๊ฐ์ ํน๋ณ ๊ด๋ฆฌ ๋์์ผ๋ก ์ง์ ํฉ๋๋ค. ํน๋ณ ๊ด๋ฆฌ ๋์์ด๋ผ๊ณ ํจ์, TKX ์๋น์ค์ VIP ํ๋์ ๊ตฌ๋งคํ ํ๋ฅ ์ด ๋์ ๋ถ๋ค์ ์๋ฏธํฉ๋๋ค."
#
# 1. ํธ๋ ์ด๋์ ์๋ด์ ์์ฒญํ ์ฌ๋. (counselling)
# 2. ์๋ด์ ์์ฒญํ์ง ์์ ์ฌ๋ ์ค, (ํ์ฌ ์ฒด์ค - ๋ชฉํ ์ฒด์ค) ์ด ๊ฐ์ฅ ๋์ ์์ 1,000 ๋ช
.
#
# "์ด ์ฌ๋๋ค์ ํน๋ณ๊ด๋ฆฌ ๋์์ผ๋ก ์ง์ ํ๋ฉฐ, VIP๋ผ๋ ์ด๋ฆ์ ์ปฌ๋ผ์ True ๊ฐ์ ๋ฃ์ผ๋ฉด ๋ฉ๋๋ค. ์ดํ์๋ ์คํผ๋ ์ด์
/์ฝ์น ํ์ด ํด๋น ๊ณ ๊ฐ๋์ ๊ฐ๋ณ ์ปจํํ์ฌ, ์ ๊ทน์ ์ผ๋ก VIP ํ๋์ ๊ตฌ๋งคํ์ค ์ ์๋๋ก ๋
ธ๋ ฅํ ์๊ฐ์
๋๋ค. (ํน๋ณ๊ด๋ฆฌ ๋์์ด ์๋ ๋ถ๋ค์ VIP ์ปฌ๋ผ์ False ๊ฐ์ ๋ฃ์ผ๋ฉด ๋ฉ๋๋ค)"
#
# "๋ํ ์ ์ฒด ๋ฐ์ดํฐ์๋ ๋ณ๊ฐ๋ก, VIP ๊ณ ๊ฐ๋ค๋ง์ ๋ฐ๋ก ๋ฝ์๋ด์ CSVํ์ผ๋ก ์ ์ฅํ ์ ์๋ค๋ฉด ์ข๊ฒ ์ต๋๋ค."
| data-analysis/python/pandas/001_data-handling-in-membership(TLX Fitness).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _kg_hide-input=true _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5"
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sb
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
# + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _kg_hide-input=true _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5"
import os
print(os.listdir("C:\\Users\\ajaohri\\Desktop\\all"))
# Any results you write to the current directory are saved as output.
# + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a"
url_train = 'C:\\Users\\ajaohri\\Desktop\\all/train.csv'
titanic = pd.read_csv(url_train)
titanic.head()
# + _uuid="665266dbd822395e107da631e7dd9e32c0bcc0b2"
#Checking if our target variable is binary or not
sb.countplot(x='Survived',data=titanic)
# + _uuid="81a0a927a84f1246d418d456bb3773d9140d9e41"
#Checking Null values
titanic.isnull().sum()
# + [markdown] _uuid="2024732dee32951982eef3722ced7da916dae66d"
# Dropping PassengerId, Name and Ticket because they are unique.
# Dropping Cabin because of too many null values.
# + _uuid="dd0a12115c8d68d3107ca3623765a5358f155621"
titanic_data = titanic.drop(['PassengerId','Name','Ticket'],1)
titanic_data.head()
# + [markdown] _uuid="36a374a4f72c82f8fb1a1502fae78c2f6982bfe2"
# Now need to take care of the missing data for Age variable. Need to approximate- one way, to take mean age for all the missing values.
# Or, find if age is related to Pclass, and assign respective means.
# + _uuid="1a1d216e2e893142d2c7d84ded41a1035745aa59"
sb.boxplot(x='Pclass',y='Age',data=titanic_data)
# + [markdown] _uuid="9d2ecd08127612821d04bebc2e1ab70aba90f07f"
# If Passenger belongs to Pclass 3, age assigned is 24, if 2, age is assigned 29, if 1 then 37.
# + _uuid="1d9761bd68df450c0f029e5957b05c471d7229ab"
def age_approx(cols):
age = cols[0]
pclass = cols[1]
if pd.isnull(age):
if pclass == 1:
return 37
elif pclass == 2:
return 29
else:
return 24
else:
return age
# + _uuid="d9ca06d8daf5db8c4b930f62e6ed9e2ad6ecbc42"
titanic_data['Age'] = titanic_data[['Age', 'Pclass']].apply(age_approx, axis=1)
titanic_data.isnull().sum()
# + _uuid="72d5b1ee867b5a51bf4f9c374ff16a82d58fdeff"
def cabin_approx(cols):
cabin = cols[0]
pclass = cols[1]
if pd.isnull(cabin):
return 0
elif cabin[0] == ('C' or 'B'):
return 3
elif cabin[0] == ('A' or 'D' or 'E' or 'T'):
return 2
elif cabin[0] == ('F' or 'G'):
return 1
else:
return 0
# + _uuid="f26f08dccce1e85fbf6079a505670188951a7ff7"
titanic_data['Cabin'] = titanic_data[['Cabin', 'Pclass']].apply(cabin_approx, axis=1)
#titanic_data.isnull().sum()
sb.boxplot(x='Cabin',y='Fare',data=titanic_data)
# + [markdown] _uuid="da008bd8b9170d6fd958360647a543fe41331875"
# There are two null values in Embarked, we can just drop them.
# + _uuid="91c5f9f21875dba391a3d1335cbcf3576c9c1394"
titanic_data.dropna(inplace=True)
titanic_data.isnull().sum()
# + [markdown] _uuid="443a9613cff934989a6b907b3fc400641bc36c1f"
# Getting dummy variables from categorical ones.
# + _uuid="f8a2cc2ddac42f0f65e6fce118d1543051278b54"
gender = pd.get_dummies(titanic_data['Sex'],drop_first=True)
gender.head()
# + _uuid="719cdef937d0f47072397ef48a53658cb28d60e0"
embark_location = pd.get_dummies(titanic_data['Embarked'],drop_first=True)
embark_location.head()
# + _uuid="c4152b56fb2017820195f5ff6d8361e9d0ad35f3"
titanic_data.drop(['Sex','Embarked'],axis=1,inplace=True)
titanic_data.head()
# + _uuid="5e118c2478367c0c4aa70a35ad1f080988e91e82"
titanic_dmy = pd.concat([titanic_data, gender, embark_location],axis=1)
titanic_dmy.tail()
# + _uuid="b4f7b389f0feaa78c2eb169a1bd77022e73941e6"
#Checking for correlation between variables.
sb.heatmap(titanic_dmy.corr(),square=True)
#print(titanic_dmy.corr())
# + _uuid="aeba535810a54167f98d0d613a55280e437636e6"
X = titanic_dmy.ix[:,(1,2,3,4,5,6,7,8,9)].values
y = titanic_dmy.ix[:,0].values
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.1, random_state=2)
# + [markdown] _uuid="629f1165dd4b1029310f01907983996bb63f9486"
# The train test split is done for parameter tuning.
# We now deploy the models.
# -
# !pip install xgboost
# + _uuid="69dc2c308b861bbf497646ee8845b7e44353cce4"
from sklearn.ensemble import RandomForestClassifier
#from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from xgboost import XGBClassifier
from sklearn.metrics import confusion_matrix
from sklearn.ensemble import VotingClassifier
clf1 = SVC(kernel='linear',C=1.0,random_state=3)
clf2 = XGBClassifier(random_state=3)
clf3 = RandomForestClassifier(n_estimators=30, max_depth=10, random_state=300)
eclf = VotingClassifier(estimators=[('clf1', clf1), ('clf2', clf2),('clf3',clf3)], voting='hard')
eclf.fit(X_train, y_train)
y_pred = eclf.predict(X_test)
print(confusion_matrix(y_test, y_pred))
print(eclf.score(X_test, y_test))
# + [markdown] _uuid="ed2f022815f109f5ba1f09ee7b94ef55dfd39079"
# **Now taking in Competition Data.**
# + _uuid="fef001be2ae538b372fc6f1f63f9dad75ad3a1e6"
url = 'C:\\Users\\ajaohri\\Desktop\\all/test.csv'
test = pd.read_csv(url)
test.head()
# + _uuid="600036b0cfdc33ae8c076dbafbce7435f51f8b31"
test.isnull().sum()
# + [markdown] _uuid="6c5f8a8a2c2862b2fc9a2591055179afb4695519"
# There are 86 null values in Age, so we approximate them like we did earlier.
# There are 327 null values in Cabin, so we drop it altogether.
# There is 1 null value in Fare, so we approximate it according to the median of each class of the null position.
# + _uuid="11e35742344457f95e8db9df1995f475dfef16d2"
test.describe()
# + _uuid="9fdd6e4b150c5b84bf24cd3270d3f7b844e4f7a6"
sb.set(rc={'figure.figsize':(11.7,8.27)})
ax = sb.boxplot(x='Pclass',y='Fare',data=test,width=0.9)
# + _uuid="72297e28e75e9c43ee77f90eead27f82e3f71ec0"
def fare_approx(cols):
fare = cols[0]
pclass = cols[1]
if pd.isnull(fare):
if pclass == 1:
return 55
elif pclass == 2:
return 20
else:
return 10
else:
return fare
# + [markdown] _uuid="33f25a4f557198526a07151fd434bd12f100647c"
# **Cleaning up the test data:**
# Dropping variables, approximating age and fare, dummy variables.
# + _uuid="a33c2016fda25797c804437522307bf154c3571e"
test_data = test.drop(['Name','Ticket'],1)
test_data['Age'] = test_data[['Age', 'Pclass']].apply(age_approx, axis=1)
test_data['Fare'] = test_data[['Fare','Pclass']].apply(fare_approx, axis=1)
test_data['Cabin'] = test_data[['Cabin','Pclass']].apply(cabin_approx, axis=1)
#
gender_test = pd.get_dummies(test_data['Sex'],drop_first=True)
embark_location_test = pd.get_dummies(test_data['Embarked'],drop_first=True)
test_data.drop(['Sex','Embarked'],axis=1,inplace=True)
test_dmy = pd.concat([test_data, gender_test, embark_location_test],axis=1)
#test_dmy.describe()
test_data.dropna(inplace=True)
test_dmy.isnull().sum()
# + _uuid="db78c4f973b1f03f1d787695dd4a9cbb8722b949"
test_dmy.head()
# + _uuid="d9e72afd28cfa46f5e7fe00ad9f255b442cfcad0"
X_competition = test_dmy.ix[:,(1,2,3,4,5,6,7,8,9)].values
# + [markdown] _uuid="ad8b841b8692ac5904b749576659c9f165040c51"
# **Prediction for Competition Data**
# + _uuid="9d040b85cad2c58ebc515e8116284c6f643741a7"
y_comp = eclf.predict(X_competition)
# + _uuid="22be9f52992fc8d9078ffa6718c0a888007a1f00"
submission = pd.DataFrame({'PassengerId':test_data['PassengerId'],'Survived':y_comp})
submission.head()
# + _uuid="156ae1f3abb506e8c7bf4fb254543e6b230ec212"
filename = 'Titanic Predictions 1.csv'
submission.to_csv(filename,index=False)
print('Saved file: ' + filename)
# + _uuid="ca2b752b0d26dd1b2a2b2e4e3606aeeda81b1225"
os.getcwd()
# -
| titanic forked.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Solr Client
# +
from ltr.client import SolrClient
client = SolrClient()
import numpy as np
# -
# # Download & Build Index (run once)
#
# If you don't already have the downloaded dependencies; if you don't have TheMovieDB data indexed run this
# +
from ltr import download
corpus='http://es-learn-to-rank.labs.o19s.com/tmdb.json'
judgments='http://es-learn-to-rank.labs.o19s.com/title_judgments_binary.txt'
download([corpus, judgments], dest='data/');
# +
from ltr.index import rebuild
from ltr.helpers.movies import indexable_movies
movies=indexable_movies(movies='data/tmdb.json')
rebuild(client, index='tmdb', doc_src=movies)
# -
# ## Features for movie titles
#
# We'll be searching movie titles (think searching for a specific movie on Netflix). And we have a set of judgments around the appropriatte movie to return. IE search for "Star Wars" return good star wars matches, in quality order...
#
# These cover various aspects of the problem (searching title by phrase, title bm25 score, release date, etc). We'll use this to explore and analyze a simple model
# +
client.reset_ltr(index='tmdb')
ftr_config = [
#1
{
"name" : "title_bm25",
"store": "title",
"class" : "org.apache.solr.ltr.feature.SolrFeature",
"params" : {
"q" : "title:(${keywords})"
}
},
#2
{
"name" : "overview_bm25",
"store": "title",
"class" : "org.apache.solr.ltr.feature.SolrFeature",
"params" : {
"q" : "overview:(${keywords})"
}
},
{#3
"name" : "release_year",
"store": "title",
"class" : "org.apache.solr.ltr.feature.SolrFeature",
"params" : {
"q" : "{!func}def(release_year,2000)"
}
}
]
client.create_featureset(index='tmdb', name='title', ftr_config=ftr_config)
# -
# ## Training Set Generation
#
# Log out features for each of the above queries out to a training set file
# +
from ltr.judgments import judgments_open
from ltr.log import FeatureLogger
from itertools import groupby
ftr_logger=FeatureLogger(client, index='tmdb', feature_set='title')
with judgments_open('data/title_judgments_binary.txt') as judgments:
for qid, query_judgments in groupby(judgments, key=lambda j: j.qid):
ftr_logger.log_for_qid(qid=qid,
judgments=query_judgments,
keywords=judgments.keywords(qid))
training_set=ftr_logger.logged
# +
import numpy as np
from ltr.judgments import judgments_from_file, judgments_to_nparray
def pairwise_transform(features, predictors):
""" Informed by
https://gist.github.com/agramfort/2071994
"""
GRADE = 0
QID = 1
assert features.shape[0] == predictors.shape[0]
assert predictors.shape[1] == 2
assert features.shape[1] > 0
num_samples = features.shape[0]
transformed_predictors = []
transformed_features = []
for i in range(num_samples):
for j in range(num_samples):
if (predictors[i][GRADE] != predictors[j][GRADE] and \
predictors[i][QID] == predictors[j][QID]):
transformed_predictors.append([predictors[i][GRADE] - predictors[j][GRADE]])
transformed_features.append(features[i, :] - features[j, :])
return np.array(transformed_features), np.array(transformed_predictors)
def samples_from_training_data(training_set):
features, predictors = judgments_to_nparray(training_set)
# Scale data
print("Scaling")
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(features)
features = scaler.transform(features)
print("Pairwise Transform")
features, predictors = pairwise_transform(features, predictors)
return features, predictors.ravel(), scaler
features, predictors, scaler = samples_from_training_data(training_set)
features
# -
from sklearn import svm, linear_model
model = svm.LinearSVC(max_iter=1000, verbose=1)
model.fit(features, predictors)
model.coef_
# +
linear_model = {
"store": "title",
"class": "org.apache.solr.ltr.model.LinearModel",
"name": "movie_titles",
"features": [
],
"params": {
"weights": {
}
}
}
import math
ftr_model = {}
ftr_names = [ftr['name'] for ftr in ftr_config]
for idx, ftr_name in enumerate(ftr_names):
config = {
"name": ftr_name,
"norm": {
"class": "org.apache.solr.ltr.norm.StandardNormalizer",
"params": {
"avg": str(scaler.mean_[idx]),
"std": str(math.sqrt(scaler.var_[idx]))
}
}
}
linear_model['features'].append(config)
linear_model['params']['weights'][ftr_name] = model.coef_[0][idx]
linear_model
# -
import json
client.submit_model(featureset='title',
index='tmdb',
model_name='movie_titles',
solr_model=linear_model)
from ltr import search
search(client, keywords='rambo', modelName='movie_titles')
| notebooks/solr/tmdb/svmrank.ipynb |