code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
import yt
import numpy as np
# This notebook shows how to use yt to make plots and examine FITS X-ray images and events files.
# ## Sloshing, Shocks, and Bubbles in Abell 2052
# This example uses data provided by [<NAME>](http://hea-www.cfa.harvard.edu/~srandall/), presented originally in [<NAME>., <NAME>., <NAME>., et al. 2011, ApJ, 737, 99](https://ui.adsabs.harvard.edu/abs/2011ApJ...737...99B). They consist of two files, a "flux map" in counts/s/pixel between 0.3 and 2 keV, and a spectroscopic temperature map in keV.
ds = yt.load("xray_fits/A2052_merged_0.3-2_match-core_tmap_bgecorr.fits",
auxiliary_files=["xray_fits/A2052_core_tmap_b1_m2000_.fits"])
# Since the flux and projected temperature images are in two different files, we had to use one of them (in this case the "flux" file) as a master file, and pass in the "temperature" file with the `auxiliary_files` keyword to `load`.
# Next, let's derive some new fields for the number of counts, the "pseudo-pressure", and the "pseudo-entropy":
# +
def _counts(field, data):
exposure_time = data.get_field_parameter("exposure_time")
return data["fits", "flux"]*data["fits", "pixel"]*exposure_time
ds.add_field(("gas","counts"), function=_counts, sampling_type="cell", units="counts", take_log=False)
def _pp(field, data):
return np.sqrt(data["gas", "counts"])*data["fits", "projected_temperature"]
ds.add_field(("gas","pseudo_pressure"), function=_pp, sampling_type="cell", units="sqrt(counts)*keV", take_log=False)
def _pe(field, data):
return data["fits", "projected_temperature"]*data["gas", "counts"]**(-1./3.)
ds.add_field(("gas","pseudo_entropy"), function=_pe, sampling_type="cell", units="keV*(counts)**(-1/3)", take_log=False)
# -
# Here, we're deriving a "counts" field from the "flux" field by passing it a `field_parameter` for the exposure time of the time and multiplying by the pixel scale. Second, we use the fact that the surface brightness is strongly dependent on density ($S_X \propto \rho^2$) to use the counts in each pixel as a "stand-in". Next, we'll grab the exposure time from the primary FITS header of the flux file and create a `YTQuantity` from it, to be used as a `field_parameter`:
exposure_time = ds.quan(ds.primary_header["exposure"], "s")
# Now, we can make the `SlicePlot` object of the fields we want, passing in the `exposure_time` as a `field_parameter`. We'll also set the width of the image to 250 pixels.
slc = yt.SlicePlot(ds, "z",
[("fits", "flux"), ("fits", "projected_temperature"), ("gas", "pseudo_pressure"), ("gas", "pseudo_entropy")],
origin="native", field_parameters={"exposure_time":exposure_time})
slc.set_log(("fits", "flux"),True)
slc.set_log(("gas", "pseudo_pressure"),False)
slc.set_log(("gas", "pseudo_entropy"),False)
slc.set_width(250.)
slc.show()
# To add the celestial coordinates to the image, we can use `PlotWindowWCS`, if you have a recent version of AstroPy (>= 1.3) installed:
from yt.frontends.fits.misc import PlotWindowWCS
wcs_slc = PlotWindowWCS(slc)
wcs_slc.show()
# We can make use of yt's facilities for profile plotting as well.
v, c = ds.find_max(("fits", "flux")) # Find the maximum flux and its center
my_sphere = ds.sphere(c, (100.,"code_length")) # Radius of 150 pixels
my_sphere.set_field_parameter("exposure_time", exposure_time)
# Such as a radial profile plot:
radial_profile = yt.ProfilePlot(my_sphere, "radius",
["counts","pseudo_pressure","pseudo_entropy"],
n_bins=30, weight_field="ones")
radial_profile.set_log("counts", True)
radial_profile.set_log("pseudo_pressure", True)
radial_profile.set_log("pseudo_entropy", True)
radial_profile.set_xlim(3,100.)
radial_profile.show()
# Or a phase plot:
phase_plot = yt.PhasePlot(my_sphere, "pseudo_pressure", "pseudo_entropy", ["counts"], weight_field=None)
phase_plot.show()
# Finally, we can also take an existing [ds9](http://ds9.si.edu/site/Home.html) region and use it to create a "cut region", using `ds9_region` (the [pyregion](https://pyregion.readthedocs.io) package needs to be installed for this):
from yt.frontends.fits.misc import ds9_region
reg_file = ["# Region file format: DS9 version 4.1\n",
"global color=green dashlist=8 3 width=3 include=1 source=1 fk5\n",
"circle(15:16:44.817,+7:01:19.62,34.6256\")"]
f = open("circle.reg","w")
f.writelines(reg_file)
f.close()
circle_reg = ds9_region(ds, "circle.reg", field_parameters={"exposure_time":exposure_time})
# This region may now be used to compute derived quantities:
print (circle_reg.quantities.weighted_average_quantity("projected_temperature", "counts"))
# Or used in projections:
prj = yt.ProjectionPlot(ds, "z",
[("fits", "flux"), ("fits", "projected_temperature"), ("gas", "pseudo_pressure"), ("gas", "pseudo_entropy"]),
origin="native", field_parameters={"exposure_time":exposure_time},
data_source=circle_reg,
method="sum")
prj.set_log(("fits", "flux"),True)
prj.set_log(("gas", "pseudo_pressure"),False)
prj.set_log(("gas", "pseudo_entropy"),False)
prj.set_width(250.)
prj.show()
# ## The Bullet Cluster
# This example uses an events table file from a ~100 ks exposure of the "Bullet Cluster" from the [Chandra Data Archive](http://cxc.harvard.edu/cda/). In this case, the individual photon events are treated as particle fields in yt. However, you can make images of the object in different energy bands using the `setup_counts_fields` function.
from yt.frontends.fits.api import setup_counts_fields
# `load` will handle the events file as FITS image files, and will set up a grid using the WCS information in the file. Optionally, the events may be reblocked to a new resolution. by setting the `"reblock"` parameter in the `parameters` dictionary in `load`. `"reblock"` must be a power of 2.
ds2 = yt.load("xray_fits/acisf05356N003_evt2.fits.gz", parameters={"reblock":2})
# `setup_counts_fields` will take a list of energy bounds (emin, emax) in keV and create a new field from each where the photons in that energy range will be deposited onto the image grid.
ebounds = [(0.1,2.0),(2.0,5.0)]
setup_counts_fields(ds2, ebounds)
# The "x", "y", "energy", and "time" fields in the events table are loaded as particle fields. Each one has a name given by "event\_" plus the name of the field:
dd = ds2.all_data()
print (dd["io", "event_x"])
print (dd["io", "event_y"])
# Now, we'll make a plot of the two counts fields we made, and pan and zoom to the bullet:
slc = yt.SlicePlot(ds2, "z", [("gas", "counts_0.1-2.0"), ("gas", "counts_2.0-5.0")], origin="native")
slc.pan((100.,100.))
slc.set_width(500.)
slc.show()
# The counts fields can take the field parameter `"sigma"` and use [AstroPy's convolution routines](https://astropy.readthedocs.io/en/latest/convolution/) to smooth the data with a Gaussian:
slc = yt.SlicePlot(ds2, "z", [("gas", "counts_0.1-2.0"), ("gas", "counts_2.0-5.0")], origin="native",
field_parameters={"sigma":2.}) # This value is in pixel scale
slc.pan((100.,100.))
slc.set_width(500.)
slc.set_zlim(("gas", "counts_0.1-2.0"), 0.01, 100.)
slc.set_zlim(("gas", "counts_2.0-5.0"), 0.01, 50.)
slc.show()
| doc/source/cookbook/fits_xray_images.ipynb |
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .java
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: Java
// language: java
// name: java
// ---
// # String formatting
// There are several ways to concatenate/format objects to strings in Java,
// mostly depending if there are a fixed number of values or
// if the values are in a list or any other data structures.
//
// Let say we have some friends
//
record Friend(String name) {}
var bob = new Friend("bob");
var ana = new Friend("ana");
var jul = new Friend("jul");
// ## With a fixed number of values
// If there is a fixed number of value, the concatenation using '+' is the
// most readable (ok, when your are used to) and the fastest
//
// ### Concatenation with +
// Just do a '+' between the different values,
// this code is heavily optimized and will allow only one String
//
System.out.println(bob.name() + ", " + ana.name() + ", " + jul.name());
// ### Concatenation with String.format()
// If you want more control on the formatting, you can use `String.format`
// that reuse the C formatting style
// But the method `format()` is quite slow.
//
System.out.println(String.format("%s, %s, %s", bob, ana, jul));
System.out.printf("%s, %s, %s\n", bob, ana, jul);
// ## with a variable number of values
// If there is a variable numbers of values, you have two cases,
// depending if it's a collection of String or not
//
var strings = List.of("bob", "ana", "jul");
var friends = List.of(bob, ana, jul);
// ### Concatenation with a +
// Never use '+' in this case, because the compiler is not smart enough
// to reuse the same buffer of characters for the whole loop, so it will
// create a new String for each loop trip.
//
String concatenate(List<?> list) {
var string = "";
var separator = "";
for(var item: list) {
string = string + separator + item;
separator = ", ";
}
return string;
}
System.out.println(concatenate(strings));
System.out.println(concatenate(friends));
// ### Concatenation with a StringBuilder
// A StringBuilder is a modifiable version of String with an expandable buffer
// of characters. There is no notion of separators
//
String concatenate(List<?> list) {
var builder = new StringBuilder();
var separator = "";
for(var item: list) {
builder.append(separator).append(item);
separator = ", ";
}
return builder.toString();
}
System.out.println(concatenate(strings));
System.out.println(concatenate(friends));
// > Don't use '+' inside a `append()`, you already have a StringBuilder, so use append() instead
//
// ### Concatenation with String.join()
// If you have an array of strings or a collection of strings, `String.join`
// is the simplest way to concatenate the items with a separator
//
System.out.println(String.join(", ", strings));
// ### Concatenation with a StringJoiner
// If you don't have a list of strings by a list of objects, you can use the
// `StringJoiner` which let you specify a separator and is implemented
// using expandable buffer of strings (`StringJoiner.add` only accepts strings).
//
String concatenate(List<?> list) {
var joiner = new StringJoiner(", ");
list.forEach(item -> joiner.add(item.toString()));
return joiner.toString();
}
System.out.println(concatenate(strings));
System.out.println(concatenate(friends));
// ### Concatenation with a Stream
// If you use a `Stream` and the collector `joining`, it will use a `StringJoiner` internally.
//
import java.util.stream.Collectors;
System.out.println(strings.stream().collect(Collectors.joining(", ")));
System.out.println(friends.stream().map(Friend::toString).collect(Collectors.joining(", ")));
| jupyter/chapter10-string_formatting.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
from scipy.io import wavfile
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from keras.layers import Conv2D, MaxPool2D, Flatten, LSTM
from keras.layers import Dropout, Dense, TimeDistributed
from keras.models import Sequential
from keras.utils import to_categorical
from sklearn.utils.class_weight import compute_class_weight
from tqdm import tqdm
from python_speech_features import mfcc
import pickle
from keras.callbacks import ModelCheckpoint
class Config:
def __init__(self, mode='conv', nfilt=26, nfeat=13, nfft=512, rate=16000):
self.mode = mode
self.nfilt = nfilt
self.nfeat = nfeat
self.nfft = nfft
self.rate = rate
self.step = int(rate/10)
self.model_path = os.path.join('models', mode + '.model')
self.p_path = os.path.join('pickles', mode + '.p')
def check_data():
if(os.path.isfile(config.p_path)):
print('loading existing data for {} model'.format(config.mode))
with open(config.p_path, 'rb') as handle:
tmp = pickle.load(handle)
return tmp
else:
return None
def build_rand_feat():
tmp = check_data()
if tmp:
return tmp.data[0], tmp.data[1]
x = []
y = []
_min, _max = float('inf'), -float('inf')
for _ in tqdm(range(n_samples)):
rand_class = np.random.choice(class_dist.index, p=prob_dist)
file = np.random.choice(df[df.label == rand_class].index)
rate, wav = wavfile.read('clean/'+file)
label = df.at[file, 'label']
rand_index = np.random.randint(0, wav.shape[0]-config.step)
sample = wav[rand_index:rand_index+config.step]
X_sample = mfcc(sample, rate, numcep=config.nfeat, nfilt=config.nfilt, nfft=config.nfft)
_min = min(np.amin(X_sample), _min)
_max = max(np.amax(X_sample), _max)
x.append(X_sample)
y.append(classes.index(label))
config.min = _min
config.max = _max
x, y = np.array(x), np.array(y)
x = (x - _min) / (_max - _min)
if(config.mode == 'conv'):
x = x.reshape(x.shape[0], x.shape[1], x.shape[2], 1)
elif config.mode == 'time':
x = x.reshape(x.shape[0], x.shape[1], x.shape[2])
y = to_categorical(y, num_classes=10)
config.data = (x,y)
with open(config.p_path, 'wb') as handle:
pickle.dump(config, handle, protocol=2)
return x,y
def get_conv_model():
model = Sequential()
model.add(Conv2D(16, (3,3), activation='relu', strides=(1,1), padding='same', input_shape=input_shape))
model.add(Conv2D(32, (3,3), activation='relu', strides=(1,1), padding='same'))
model.add(Conv2D(64, (3,3), activation='relu', strides=(1,1), padding='same'))
model.add(Conv2D(128, (3,3), activation='relu', strides=(1,1), padding='same'))
model.add(MaxPool2D(2,2))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(10, activation='softmax'))
model.summary()
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['acc'])
return model
def get_recurrent_model():
model = Sequential()
model.add(LSTM(128, return_sequences=True, input_shape=input_shape))
model.add(LSTM(128, return_sequences=True))
model.add(Dropout(0.5))
model.add(TimeDistributed(Dense(64, activation='relu')))
model.add(TimeDistributed(Dense(32, activation='relu')))
model.add(TimeDistributed(Dense(16, activation='relu')))
model.add(TimeDistributed(Dense(8, activation='relu')))
model.add(Flatten())
model.add(Dense(10, activation='softmax'))
model.summary()
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])
return model
df= pd.read_csv('instruments.csv')
df.set_index('fname', inplace=True)
for f in df.index:
rate, signal = wavfile.read('clean/'+f)
df.at[f, 'length'] = signal.shape[0]/rate
classes = list(np.unique(df.label))
class_dist = df.groupby(['label'])['length'].mean()
n_samples = 2 * int(df['length'].sum()/0.1)
prob_dist = class_dist / class_dist.sum()
choices = np.random.choice(class_dist.index, p=prob_dist)
fig, ax = plt.subplots()
ax.set_title('Class Distribution', y=1.08)
ax.pie(class_dist, labels=class_dist.index, autopct='%1.1f%%',
shadow=False, startangle=90)
ax.axis('equal')
plt.show()
config =Config(mode='conv')
if config.mode == 'conv':
x,y = build_rand_feat()
y_flat = np.argmax(y, axis=1)
input_shape = (x.shape[1], x.shape[2],1)
model = get_conv_model()
elif config.mode == 'time':
x,y = build_rand_feat()
y_flat = np.argmax(y, axis=1)
input_shape = (x.shape[1], x.shape[2])
model = get_recurrent_model()
class_weight = compute_class_weight('balanced', np.unique(y_flat), y_flat)
checkpoint = ModelCheckpoint(config.model_path, monitor='val_acc', verbose=1, mode='max', save_best_only=True, save_weights_only=False,
period=1)
print('model trainig started....')
model.fit(x, y, epochs=15, batch_size=32, shuffle=True, class_weight=class_weight, validation_split=0.1, callbacks=[checkpoint])
print('saving model..')
model.save(config.model_path)
print('model saved!')
# -
| Audio Model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
nome = 'Demetrius'
idade = 30
altura = 1.66
peso = 79
ano_atual = 2020
nascimento = ano_atual - idade
imc = peso / (altura ** 2)
print(f'{nome} tem {idade} anos e {altura} de altura.')
print(f'{nome} pesa {peso} kg e seu Imc é {imc:.2f}')
print(f'{nome} nasceu em {nascimento}')
# -
| Teste final 1 modulo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.9 64-bit
# name: python389jvsc74a57bd0303f4f94c3b299c215f3066923e10ebd3683e42f3be4d2a62ff11651779c3d00
# ---
# ## Transfer Learning using `ResNet50` on `CIFAR-10` imageset.
# ### 1. Importing libraries
import tensorflow as tf
from tensorflow.keras.applications.resnet50 import ResNet50
# ### 2. Load data & preprocess.
(train_image, train_label),(val_image, val_label) = tf.keras.datasets.cifar10.load_data()
def preprocess_img(input_images):
input_images = input_images.astype('float32')
output_images = tf.keras.applications.resnet50.preprocess_input(input_images)
return output_images
train_x = preprocess_img(train_image)
val_x = preprocess_img(val_image)
# ### 3. Build and Compile the Model.
def construct_model():
inputs = tf.keras.layers.Input(shape=(32,32,3))
resize = tf.keras.layers.UpSampling2D(size=(7,7))(inputs)
feature_extractor = ResNet50(input_shape=(224,224,3), include_top=False, weights='imagenet')(resize)
x = tf.keras.layers.GlobalAveragePooling2D()(feature_extractor)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(1024, activation='relu')(x)
x = tf.keras.layers.Dense(512, activation='relu', name='classification')(x)
outputs = tf.keras.layers.Dense(10, activation='softmax')(x)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
return model
model = construct_model()
model.compile(optimizer='SGD', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
print(model.summary())
# ### 4. Model Train
#
# - As it consumes a lot of time, i only trained for 1 epoch.Feel free to change.
history = model.fit(train_x, train_label, epochs=1, validation_data=(val_x, val_label), batch_size=64)
# ### Visualize Loss and Accuracy
# - As i have trained for only one epoch, plotting wouldn't make sense for this scenario.
import matplotlib.pyplot as plt
def plot_metrics(metric_name, title, ylim=5):
plt.title(title)
plt.ylim(0, ylim)
plt.plot(history.history[metric_name], color='blue', label=metric_name)
plt.plot(history.history['val_'+metric_name], color='red', label='val'+metric_name)
| cifar10.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Abdel-Moussaoui/GAN-Master/blob/master/AutoEncoder1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="ZsyTKQHIq2mP" colab_type="text"
#
#
# * We'll start simple, with a single fully-connected neural layer as encoder and as decoder:
#
#
# + id="TJXCTfoKrGhD" colab_type="code" colab={}
from keras.layers import Input, Dense
from keras.models import Model
# this is the size of our encoded representations
encoding_dim = 32 # 32 floats -> compression of factor 24.5, assuming the input is 784 floats
# this is our input placeholder
input_img = Input(shape=(784,))
# "encoded" is the encoded representation of the input
encoded = Dense(encoding_dim, activation='relu')(input_img)
# "decoded" is the lossy reconstruction of the input
decoded = Dense(784, activation='sigmoid')(encoded)
# this model maps an input to its reconstruction
autoencoder = Model(input_img, decoded)
# + [markdown] id="WRclAlsIreYv" colab_type="text"
# * Let's also create a separate encoder model:
# + id="bafm2Svxrq8x" colab_type="code" colab={}
# this model maps an input to its encoded representation
encoder = Model(input_img, encoded)
# + [markdown] id="CdTx5x1ervCN" colab_type="text"
# * As well as the decoder model:
# + id="abH39arHr0tb" colab_type="code" colab={}
# create a placeholder for an encoded (32-dimensional) input
encoded_input = Input(shape=(encoding_dim,))
# retrieve the last layer of the autoencoder model
decoder_layer = autoencoder.layers[-1]
# create the decoder model
decoder = Model(encoded_input, decoder_layer(encoded_input))
# + [markdown] id="TmHjgFz_r2A2" colab_type="text"
#
#
# * Now let's train our autoencoder to reconstruct MNIST digits.
#
# * First, we'll configure our model to use a per-pixel binary crossentropy loss, and the Adadelta optimizer:
#
# + id="9iJR_lIot2HK" colab_type="code" colab={}
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
# + [markdown] id="gdtUARdot5jB" colab_type="text"
# * Let's prepare our input data. We're using MNIST digits, and we're discarding the labels (since we're only interested in encoding/decoding the input images).
# + id="w3YzFA05sKWT" colab_type="code" colab={}
from keras.datasets import mnist
import numpy as np
(x_train, _), (x_test, _) = mnist.load_data()
# + [markdown] id="PkVDBIdLsPBE" colab_type="text"
# * We will normalize all values between 0 and 1 and we will flatten the 28x28 images into vectors of size 784.
# + id="SboYDTeqr9Ce" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="c144295d-898f-4412-ea1f-91e61191438a"
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))
print (x_train.shape)
print (x_test.shape)
# + [markdown] id="_SDN37q3sVaY" colab_type="text"
# * Now let's train our autoencoder for 50 epochs:
# + id="hX6paRoxsa9Z" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="f5e66589-2ce6-47ad-fd7e-0877553fab49"
autoencoder.fit(x_train, x_train,
epochs=50,
batch_size=256,
shuffle=True,
validation_data=(x_test, x_test))
# + [markdown] id="V8qRwm5Xsdlh" colab_type="text"
# * After 50 epochs, the autoencoder seems to reach a stable train/test loss value of about 0.11. We can try to visualize the reconstructed inputs and the encoded representations. We will use Matplotlib.
# + id="9MAIDxUjs91r" colab_type="code" colab={}
# encode and decode some digits
# note that we take them from the *test* set
encoded_imgs = encoder.predict(x_test)
decoded_imgs = decoder.predict(encoded_imgs)
# + id="witMfLy_shuE" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 248} outputId="48ae558c-21a6-4ede-8ca0-7d90e86b7b08"
# use Matplotlib (don't ask)
import matplotlib.pyplot as plt
n = 10 # how many digits we will display
plt.figure(figsize=(20, 4))
for i in range(n):
# display original
ax = plt.subplot(2, n, i + 1)
plt.imshow(x_test[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# display reconstruction
ax = plt.subplot(2, n, i + 1 + n)
plt.imshow(decoded_imgs[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
| AutoEncoder1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
nome = str(input("Nome: "))
sobreNome = str(input("Sobrenome: "))
#ddd = float(input("DDD: "))
#tel = str(input("Telefone: "))
#idade = int(input("Idade"))
#cidade = str(input("Cidade: "))
def usuario(nome, sobreNome, ddd, tel, idade, cidade):
if "joão" in usuario:
print("Nome: ",nome, " ", sobreNome)
else:
print("Usuario Incorreto, tente outra vez: ")
# -
| Aulasatualizada/aula do jupter/.ipynb_checkpoints/teste-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Чтение данных из файла (Версия для Python 3)
# ?open
#открытие файла для чтения с помощью встроенной функции open
file_obj = open('example_utf8.txt', 'r')
type(file_obj)
print(file_obj.read())
file_obj = open('example_utf8.txt')
print(file_obj.readline())
print(file_obj.readline())
file_obj = open('example_utf8.txt')
for line in file_obj:
print(line.strip())
file_obj = open('example_utf8.txt')
data_list = list(file_obj)
for line in data_list: print(line.strip())
file_obj = open('example_utf8.txt')
data_list = file_obj.readlines()
for line in data_list: print(line.strip())
#попытка чтения закрытого файла приводит к ошибке!
file_obj = open('example_utf8.txt')
file_obj.close()
file_obj.read()
file_obj = open('example_koi_8.txt')
#вывод на экран файла в кодировке koi8-r
print(file_obj.read())
# # Codecs
import codecs
#открытие файла для чтения с помощью функции open модуля codecs с указанием кодировки koi8-r
file_obj = codecs.open('example_koi_8.txt', 'r', encoding='koi8-r')
print(file_obj.read())
| notebook/ipython_files_data_reading.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
import ngspyce
# +
with open("./nfet_tb.net", "r") as f:
netlist = f.read().split("\n")
ngspyce.circ(netlist)
length_list = np.arange(0.15, 3.0, 0.15)
v_bulk_list = np.arange(0, 1.8, 0.2)
vthn_all = []
for vg in v_bulk_list:
vthn_list = []
for l in length_list:
ngspyce.alterparams(len=l, v_bulk=vg)
ngspyce.cmd("OP")
ngspyce.cmd(f'save @m.x1.msky130_fd_pr__nfet_01v8[vth]')
vthn, = map(ngspyce.vector, [f'@m.x1.msky130_fd_pr__nfet_01v8[vth]'])
vthn_list.append(vthn[0])
vthn_all.append(vthn_list)
plt.rcParams['figure.figsize'] = [16, 10]
for i in range(0,v_bulk_list.size):
plt.plot(length_list, vthn_all[i])
legend_list=[]
for vb in v_bulk_list:
legend_list.append(f"vb = {format(vb, '0.2f')}")
plt.grid()
plt.legend(legend_list)
plt.xlabel("L(um)")
plt.ylabel("vth (v)")
plt.title("Vth vs L")
plt.show()
# +
with open("./pfet_tb.net", "r") as f:
netlist = f.read().split("\n")
ngspyce.circ(netlist)
length_list = np.arange(0.15, 3.0, 0.15)
v_bulk_list = np.arange(0, 1.8, 0.2)
vthp_all = []
for vg in v_bulk_list:
vthp_list = []
for l in length_list:
ngspyce.alterparams(len=l, v_bulk=vg)
ngspyce.cmd("OP")
ngspyce.cmd(f'save @m.x1.msky130_fd_pr__pfet_01v8[vth]')
vthn, = map(ngspyce.vector, [f'@m.x1.msky130_fd_pr__pfet_01v8[vth]'])
vthp_list.append(vthn[0])
vthp_all.append(vthp_list)
plt.rcParams['figure.figsize'] = [16, 10]
for i in range(0,v_bulk_list.size):
plt.plot(length_list, vthp_all[i])
legend_list=[]
for vb in v_bulk_list:
legend_list.append(f"vb = {format(vb, '0.2f')}")
plt.grid()
plt.legend(legend_list)
plt.xlabel("L(um)")
plt.ylabel("vth (v)")
plt.title("Vth vs L")
plt.show()
# -
np.arange(0,1,0.1).size
| pfet_vth_issue/.ipynb_checkpoints/run_tests-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="o__O07cOB1P5" outputId="8d5118bf-3793-40e5-da1b-f4474615d52c"
# !wget "https://storage.googleapis.com/kaggle-data-sets/1049650/1765896/bundle/archive.zip?X-Goog-Algorithm=GOOG4-RSA-SHA256&X-Goog-Credential=gcp-kaggle-com%40kaggle-161607.iam.gserviceaccount.com%2F20210224%2Fauto%2Fstorage%2Fgoog4_request&X-Goog-Date=20210224T075352Z&X-Goog-Expires=259199&X-Goog-SignedHeaders=host&X-Goog-Signature=9e7cc6ab4c719e6bf90e8d9e737f61065bf5072b66989188d1be57cb2296e41c5c84d45504869828d5b7c877b75a3c19fb99e98dba77bea9535c37b5a7ac4e9a6fd2f4b6205740863f016d62278c182edb5ab4fde0a67140c974cacf7bb8458b1a5beac8530580ba55c7b416693fb0239a98f9c3647c11f8b2c24bff35deda930ee21c9d72340963c0488da88853c259067dd6be4f8025f68cb82c568e24af9c87d7f31a6412b05e2f6af549242cb927579c32f72c6f7d2df7f443e1f44d5233372f7bf260241f35b2f525b8d73a1ed1ed16363c0e283f075c641af9547d36788b62dacb4808ed61987ca0937c9c8ef844d7ef196cb0b73d34a35ca20e83d79f"
# + colab={"base_uri": "https://localhost:8080/"} id="0DxaOVRbCBrF" outputId="b06e5b25-1813-47a4-8363-cd5f358b6867"
# !unzip "/content/archive.zip?X-Goog-Algorithm=GOOG4-RSA-SHA256&X-Goog-Credential=<EMAIL>%2F20210224%2Fauto%2Fstorage%2Fgoog4_request&X-Goog-Date=20210224T075352Z&X-Goog-Expires=259199&X-Goog-SignedHeaders=hos" -d "/content/dataset"
# + id="hEfx4vG3CJSF"
from spacy.lang.en import English
import pandas as pd
import numpy as np
import spacy
from spacy.lemmatizer import Lemmatizer
from spacy import displacy
from IPython.display import clear_output
from nltk.stem import WordNetLemmatizer
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import Pipeline
from sklearn import metrics
from xgboost import XGBClassifier
from lightgbm import LGBMClassifier
from sklearn.ensemble import RandomForestClassifier
import copy
# + [markdown] id="MKyRch52CP8y"
# ## Importing the dataset
# + id="pBUMKGLGCN3E"
data=pd.read_csv('/content/dataset/news.tsv',header=None,sep='\t')
data.columns=['News ID',
"Category",
"SubCategory",
"Title",
"Abstract",
"URL",
"Title Entities",
"Abstract Entities "]
# Copying the dataset
df=copy.deepcopy(data)
# + colab={"base_uri": "https://localhost:8080/", "height": 527} id="bsw9zfjsCTWB" outputId="48f86adf-6949-4b3c-fea4-025df9a923bb"
data.head()
# + [markdown] id="9G-w6e6oCXBE"
# ## Tokenizing the data
# + id="PzqCMS_7CWX0"
nlp = English()
texts=data['Title'].values
# "nlp" Object is used to create documents with linguistic annotations.
end_text=[]
# Create list of word tokens
for text in texts:
my_doc = nlp(text)
token_list = []
for token in my_doc:
token_list.append(token.text)
end_text.append(token_list)
data['Title']=end_text
# + colab={"base_uri": "https://localhost:8080/"} id="Bjs02S07CYUc" outputId="ff1596a6-5636-47fc-9ff1-0a52c8b5141b"
data['Title']
# + id="xvbsnbdECb4Z"
spacy_stopwords = spacy.lang.en.stop_words.STOP_WORDS
# + id="yQzqU0thCcyT"
final_text=[]
texts=data['Title'].values
for text in texts:
sentence=[]
for i in text:
if i.lower() in spacy_stopwords:
continue
else:
sentence.append(i)
final_text.append(sentence)
data['Title']=final_text
# + colab={"base_uri": "https://localhost:8080/"} id="g2JtV8HKCeTp" outputId="3f58111a-2630-4905-8006-869465aa365f"
data['Title']
# + [markdown] id="ZNYSVn9_ChWd"
# ## Lemmatizing the Words
# + id="nh-aQ2TBCfmh"
# Making a function to lemmatize all the words
lemmatizer = WordNetLemmatizer()
def lemmatize_all(data,name):
arr=data[name]
a=[]
for i in arr:
b=[]
for j in i:
x=lemmatizer.lemmatize(j,pos='a')
x=lemmatizer.lemmatize(x)
b.append(x)
a.append(b)
data[name]=a
# + colab={"base_uri": "https://localhost:8080/"} id="M7dX4hmrCkYT" outputId="893660cc-6d21-496f-e58b-788437d73bba"
# Titles after lemmatizing
import nltk
nltk.download('wordnet')
lemmatize_all(data,'Title')
data['Title']
# + [markdown] id="Fh6b7Db3CrcJ"
# ## Entity detection using Spacy
# + colab={"base_uri": "https://localhost:8080/"} id="L25oSXVHCluC" outputId="73b3e41b-54b7-4f01-fed3-0972543fd35b"
nlp = spacy.load('en')
news=nlp(df['Title'][51278])
entities=[(i, i.label_, i.label) for i in news.ents]
entities
# + colab={"base_uri": "https://localhost:8080/", "height": 52} id="jJiahPmHCt1k" outputId="cef6b7cd-f7d7-468d-8436-6fe6a23d9a51"
displacy.render(news, style = "ent",jupyter = True)
# + colab={"base_uri": "https://localhost:8080/", "height": 545} id="fKECa83XCvgE" outputId="d330e72e-ce19-4f4d-b431-636701899150"
news=df['Title'][51278]
print('The news headline is :',news)
news=nlp(news)
displacy.render(news, style="dep", jupyter= True)
# + [markdown] id="qpvDtl5tC0Ra"
# ## Making classification Pipeline
# + id="FZJJ4LUiCyJW"
def make_to_sentence(data,name):
x=[]
for i in data[name].values:
p=''
for j in i:
p+=j.lower()+' '
x.append(p)
data[name]=x
# + id="eNIXzXcfC2oh"
make_to_sentence(data,'Title')
# + id="BILDFCtfC39O"
X=data['Title'].values
y=data['Category'].values
# + id="p9JGW3k3C5RY"
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33,shuffle=True, random_state=42)
# + id="w0D63-gZC63g"
classifier = LogisticRegression()
# + id="5HIRq4WnC74E"
tfidf_vector = TfidfVectorizer()
# + id="J_rFf5EGC94s"
pipe = Pipeline([('vectorizer', tfidf_vector),
('classifier', classifier)])
# + colab={"base_uri": "https://localhost:8080/"} id="D-TAc09mC_I7" outputId="39192b3a-63d6-472c-be90-ff981ecfeb5a"
pipe.fit(X_train,y_train)
# + colab={"base_uri": "https://localhost:8080/"} id="G2GraL3xDAY5" outputId="9747ee7c-1de5-4008-f088-3bf6f9341219"
# Checking The score of the model
predicted = pipe.predict(X_test)
# Model Accuracy
print("Logistic Regression Accuracy:",metrics.accuracy_score(y_test, predicted))
print("Logistic Regression Precision:",metrics.precision_score(y_test, predicted,average='weighted'))
print("Logistic Regression Recall:",metrics.recall_score(y_test, predicted,average='weighted'))
# + [markdown] id="y_EGztyzDDSV"
# ## Making XGB Classifier Pipeline
# + id="DBLH1zdfDBqZ"
xgb=XGBClassifier()
# + id="o2t77uLRDHXr"
pipe_xgb = Pipeline([('vectorizer', tfidf_vector),
('classifier', xgb)])
# + colab={"base_uri": "https://localhost:8080/"} id="SVgxECH-DImp" outputId="2e71ee9a-77f4-4000-fb16-2abcb92ba502"
pipe_xgb.fit(X_train,y_train)
# + colab={"base_uri": "https://localhost:8080/"} id="dd_0puo4DJuR" outputId="4fecfeb1-0736-437c-8029-b197b4b5d951"
# Checking The score of the model
predicted = pipe_xgb.predict(X_test)
# Model Accuracy
print("XGBClassifier Accuracy:",metrics.accuracy_score(y_test, predicted))
print("XGBClassifier Precision:",metrics.precision_score(y_test, predicted,average='weighted'))
print("XGBClassifier Recall:",metrics.recall_score(y_test, predicted,average='weighted'))
# + [markdown] id="zGf18mHEDM81"
# ## LGBM Classifier
# + id="3IjOChrcDLQX"
lgbm=LGBMClassifier()
# + id="UA3BVcBTDONv"
pipe_lgb = Pipeline([('vectorizer', tfidf_vector),
('classifier', lgbm)])
# + colab={"base_uri": "https://localhost:8080/"} id="OU2Ic7twDPmu" outputId="b15de68d-2641-4e86-9932-e6d80f171915"
pipe_lgb.fit(X_train,y_train)
# + colab={"base_uri": "https://localhost:8080/"} id="5h9s2WP4DQmB" outputId="1b41de90-ca86-4c1f-b6a5-5f94ad748c92"
# Checking The score of the model
predicted = pipe_lgb.predict(X_test)
# Model Accuracy
print("LGBMClassifer Accuracy:",metrics.accuracy_score(y_test, predicted))
print("LGBMClassifer Precision:",metrics.precision_score(y_test, predicted,average='weighted'))
print("LGBMClassifer Recall:",metrics.recall_score(y_test, predicted,average='weighted'))
# + [markdown] id="EzDA_v4fDTV9"
# ## RandomForest Classifier
# + id="6JN35ZZkDRwc"
rf=RandomForestClassifier()
# + id="tj5ZwBzsDVOw"
pipe_rf = Pipeline([('vectorizer', tfidf_vector),
('classifier', rf)])
# + colab={"base_uri": "https://localhost:8080/"} id="YrQ5ap0QDWX0" outputId="3a986b7f-b626-47ff-c1d0-3ddc8c29ff34"
pipe_rf.fit(X_train,y_train)
# + colab={"base_uri": "https://localhost:8080/"} id="DDKpLzQxDXZq" outputId="c7aa0fc8-d0cb-41ff-a03e-3a5a64a8218b"
# Checking The score of the model
predicted = pipe_rf.predict(X_test)
# Model Accuracy
print("Random Forest Accuracy:",metrics.accuracy_score(y_test, predicted))
print("Random Forest Precision:",metrics.precision_score(y_test, predicted,average='weighted'))
print("Random Forest Recall:",metrics.recall_score(y_test, predicted,average='weighted'))
# + id="XY_HXPlLDYg0"
| Notebooks/MIND Data Analysis and Classification/MIND_Analysis_and_Classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # General Purpose Exp Notebook
#
# This notebook has sections to train models, create uncertainty wrappers, and test the models. Experiment specific details are assumed to be contained in `config.py` in the experiment folder below.
# +
# %load_ext autoreload
# %autoreload 2
EXP_FOLDER = 'cifar10'
# +
import sys
import os
sys.path.append(os.path.abspath(EXP_FOLDER))
import config # imported from EXP_FOLDER
import cProfile
import torch
# -
# ## Train and save models
# Trains an ensemble of models as specified in config
# +
from nn_ood.utils.train import train_ensemble
models = train_ensemble(config.N_MODELS,
config.make_model,
config.dataset_class,
config.dist_constructor,
config.opt_class,
config.opt_kwargs,
config.sched_class,
config.sched_kwargs,
config.device,
num_epochs=config.N_EPOCHS,
batch_size=config.BATCH_SIZE)
## SAVE MODEL
print("saving models")
save_folder = os.path.join(EXP_FOLDER, 'models')
if not os.path.exists(save_folder):
os.makedirs(save_folder)
for i, model in enumerate(models):
filename = os.path.join(EXP_FOLDER, "models", config.FILENAME + "_%d" % i)
torch.save(model.state_dict(), filename)
# -
# clear memory
del models
# ## Process Data to create uncertainty wrappers
# Loops over data to create uncertainty wrappers, and saves them
# +
save_folder = os.path.join(EXP_FOLDER, 'times')
if not os.path.exists(save_folder):
os.makedirs(save_folder)
## SET UP MODEL
model = config.make_model()
## LOAD MODEL
filename = os.path.join(EXP_FOLDER, "models", config.FILENAME + "_0" )
model.load_state_dict(torch.load(filename))
model = model.to(config.device)
model.eval()
## SETUP DATASET
dataset = config.dataset_class("train", N=5000)
## SET UP UNC WRAPPERS
for name, info in config.prep_unc_models.items():
print(name)
config.unfreeze_model(model)
if 'freeze' in info:
if type(info['freeze']) is bool:
freeze_frac = None
else:
freeze_frac = info['freeze']
config.freeze_model(model, freeze_frac=freeze_frac)
if 'apply_fn' in info:
model.apply(info['apply_fn'])
unc_model = info['class'](model, config.dist_constructor, info['kwargs'])
cProfile.run("""\n
unc_model.process_dataset(dataset)
""", os.path.join(EXP_FOLDER, "times", name+"_process.timing") )
filename = os.path.join(EXP_FOLDER, "models", name+"_"+config.FILENAME)
torch.save(unc_model.state_dict(), filename)
# -
# # calibrate hyperparms on val dataset
# +
from nn_ood.utils.train import minimize_val_nll
save_folder = os.path.join(EXP_FOLDER, 'times')
if not os.path.exists(save_folder):
os.makedirs(save_folder)
## SET UP MODEL
model = config.make_model()
## LOAD MODEL
filename = os.path.join(EXP_FOLDER, "models", config.FILENAME + "_0" )
model.load_state_dict(torch.load(filename))
model = model.to(config.device)
model.eval()
## SETUP DATASET
dataset = config.dataset_class("val", N=5000)
## SET UP UNC WRAPPERS
for name, info in config.prep_unc_models.items():
print(name)
config.unfreeze_model(model)
if 'freeze' in info:
if type(info['freeze']) is bool:
freeze_frac = None
else:
freeze_frac = info['freeze']
config.freeze_model(model, freeze_frac=freeze_frac)
if 'apply_fn' in info:
model.apply(info['apply_fn'])
unc_model = info['class'](model, config.dist_constructor, info['kwargs'])
filename = os.path.join(EXP_FOLDER, "models", name+"_"+config.FILENAME)
print(filename)
unc_model.load_state_dict(torch.load(filename))
# try:
hp = unc_model.hyperparameters
unc_model.optimize_nll(dataset) #minimize_val_nll(unc_model, dataset)
print(unc_model.hyperparameters)
# except Exception as e:
# print(e)
# print("no hyperpameters for this model")
# pass
filename = os.path.join(EXP_FOLDER, "models", name+"_"+"calibrated_"+config.FILENAME)
torch.save(unc_model.state_dict(), filename)
# -
# clear memory
del model
del unc_model
# ## Test Uncertainty Wrappers
# Evaluates prediction and uncertainty estimate on various datasets
# +
from nn_ood.utils.test import process_datasets
# LOAD UNC_WRAPPERS
print("Loading models")
models = []
for i in range(config.N_MODELS):
print("loading model %d" % i)
filename = os.path.join(EXP_FOLDER, 'models', config.FILENAME + "_%d" % i)
state_dict = torch.load(filename)
model = config.make_model()
model.load_state_dict(state_dict)
model.eval()
model.to(config.device)
models.append(model)
model = models[0]
# -
# ### Test against OoD datasets
# + tags=[]
save_folder = os.path.join(EXP_FOLDER, 'results')
if not os.path.exists(save_folder):
os.makedirs(save_folder)
save_folder = os.path.join(EXP_FOLDER, 'times')
if not os.path.exists(save_folder):
os.makedirs(save_folder)
for name, info in config.test_unc_models.items():
print(name)
config.unfreeze_model(model)
if 'freeze' in info:
if type(info['freeze']) is bool:
freeze_frac = None
else:
freeze_frac = info['freeze']
config.freeze_model(model, freeze_frac=freeze_frac)
if 'apply_fn' in info:
model.apply(info['apply_fn'])
if 'multi_model' in info:
unc_model = info['class'](models, config.dist_constructor, info['kwargs'])
else:
unc_model = info['class'](model, config.dist_constructor, info['kwargs'])
if info['load_name'] is not None:
filename = os.path.join(EXP_FOLDER, "models", info['load_name']+"_"+config.FILENAME)
print(filename)
unc_model.load_state_dict(torch.load(filename))
unc_model.cuda()
try:
cProfile.run("""\n
results = process_datasets(config.dataset_class,
config.test_dataset_args,
unc_model,
config.device,
N=1000,
**info['forward_kwargs'])
""", os.path.join(EXP_FOLDER, "times", name) )
savepath = os.path.join(EXP_FOLDER, "results", name)
torch.save(results, savepath)
except Exception as e:
print(e)
# -
# ### Test against noise
# +
from nn_ood.utils.test import transform_sweep
if "transforms" not in dir(config):
raise NameError("No transforms to test for this experiment")
save_folder = os.path.join(EXP_FOLDER, 'results_transforms')
if not os.path.exists(save_folder):
os.makedirs(save_folder)
save_folder = os.path.join(EXP_FOLDER, 'times_transforms')
if not os.path.exists(save_folder):
os.makedirs(save_folder)
for name, info in config.test_unc_models.items():
print(name)
config.unfreeze_model(model)
if 'freeze' in info:
if type(info['freeze']) is bool:
freeze_frac = None
else:
freeze_frac = info['freeze']
config.freeze_model(model, freeze_frac=freeze_frac)
if 'apply_fn' is info:
model.apply(info['apply_fn'])
if 'multi_model' in info:
unc_model = info['class'](models, config.dist_constructor, info['kwargs'])
else:
unc_model = info['class'](model, config.dist_constructor, info['kwargs'])
if info['load_name'] is not None:
filename = os.path.join(EXP_FOLDER, "models", info['load_name']+"_"+config.FILENAME)
print(filename)
unc_model.load_state_dict(torch.load(filename))
unc_model.cuda()
dataset = config.dataset_class(config.in_dist_splits[0],N=1000)
cProfile.run("""\n
results = transform_sweep(dataset,
config.transforms,
unc_model,
config.device,
**info['forward_kwargs'])
""", os.path.join(EXP_FOLDER, "times", name) )
savepath = os.path.join(EXP_FOLDER, "results", name)
torch.save(results, savepath)
# -
| experiments/run_experiments.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.1.0
# language: julia
# name: julia-1.1
# ---
# # ETL
#
# We are going to do the `Transform` step of an Extract-Transform-Load.
#
# ### ETL
#
# Extract-Transform-Load (ETL) is a fancy way of saying, "We have some crufty, legacy data over in this system, and now we need it in this shiny new system over here, so
# we're going to migrate this."
#
# (Typically, this is followed by, "We're only going to need to run this
# once." That's then typically followed by much forehead slapping and
# moaning about how stupid we could possibly be.)
#
# ### The goal
#
# We're going to extract some scrabble scores from a legacy system.
#
# The old system stored a list of letters per score:
#
# - 1 point: "A", "E", "I", "O", "U", "L", "N", "R", "S", "T",
# - 2 points: "D", "G",
# - 3 points: "B", "C", "M", "P",
# - 4 points: "F", "H", "V", "W", "Y",
# - 5 points: "K",
# - 8 points: "J", "X",
# - 10 points: "Q", "Z",
#
# The shiny new scrabble system instead stores the score per letter, which
# makes it much faster and easier to calculate the score for a word. It
# also stores the letters in lower-case regardless of the case of the
# input letters:
#
# - "a" is worth 1 point.
# - "b" is worth 3 points.
# - "c" is worth 3 points.
# - "d" is worth 2 points.
# - Etc.
#
# Your mission, should you choose to accept it, is to transform the legacy data
# format to the shiny new format.
#
# ### Notes
#
# A final note about scoring, Scrabble is played around the world in a
# variety of languages, each with its own unique scoring table. For
# example, an "E" is scored at 2 in the Māori-language version of the
# game while being scored at 4 in the Hawaiian-language version.
#
# ## Source
#
# The Jumpstart Lab team [http://jumpstartlab.com](http://jumpstartlab.com)
#
# ## Version compatibility
# This exercise has been tested on Julia versions >=1.0.
#
# ## Submitting Incomplete Solutions
# It's possible to submit an incomplete solution so you can see how others have completed the exercise.
# ## Your solution
# +
# submit
function transform(input::AbstractDict)
end
# -
# ## Test suite
# +
using Test
# include("etl.jl")
@testset "a single letter" begin
input = Dict(1=>['A'])
output = Dict('a'=>1)
@test transform(input) == output
end
@testset "single score with multiple letters" begin
input = Dict(1=>['A', 'E', 'I', 'O', 'U'])
output = Dict('a'=>1, 'e'=>1, 'i'=>1, 'o'=>1, 'u'=>1)
@test transform(input) == output
end
@testset "multiple scores with multiple letters" begin
input = Dict(1=>['A', 'E'], 2=>['D', 'G'])
output = Dict('g'=>2, 'e'=>1, 'a'=>1, 'd'=>2)
@test transform(input) == output
end
@testset "multiple scores with differing numbers of letters" begin
input = Dict(1=>[ 'A', 'E', 'I', 'O', 'U', 'L', 'N', 'R', 'S', 'T' ],
2=>[ 'D', 'G' ], 3=>[ 'B', 'C', 'M', 'P' ],
4=>[ 'F', 'H', 'V', 'W', 'Y' ], 5=>[ 'K' ],
8=>[ 'J', 'X' ], 10=>[ 'Q', 'Z' ])
output = Dict('a'=>1, 'b'=>3, 'c'=>3, 'd'=>2, 'e'=>1,
'f'=>4, 'g'=>2, 'h'=>4, 'i'=>1, 'j'=>8,
'k'=>5, 'l'=>1, 'm'=>3, 'n'=>1, 'o'=>1,
'p'=>3, 'q'=>10, 'r'=>1, 's'=>1, 't'=>1,
'u'=>1, 'v'=>4, 'w'=>4, 'x'=>8, 'y'=>4,
'z'=>10)
@test transform(input) == output
end
# -
# ## Prepare submission
# To submit your exercise, you need to save your solution in a file called `etl.jl` before using the CLI.
# You can either create it manually or use the following functions, which will automatically write every notebook cell that starts with `# submit` to the file `etl.jl`.
#
# +
# using Pkg; Pkg.add("Exercism")
# using Exercism
# Exercism.create_submission("etl")
| exercises/etl/etl.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Homework #8
# ### <NAME>
# ### 2021
# # 1. LTCM
#
# ## 1.1
# ### Describe LTCM’s investment strategy with regard to the following aspects:
# - Securities traded
# - Trading frequency
# - Skewness (Do they seek many small wins or a few big hits?)
# - Forecasting (What is behind their selection of trades?)
#
# #### Solution:
# - LTCM traded across a wide range of securities. They are heavily involved in fixed income and credit, and they also have sizeable positions in equities. In all these asset classes, they trade a large number of securities, across global markets.
#
# - LTCM's trading frequencies vary given that they house a wide range of strategies. Their largest bucket, the convergence trades, typically take weeks and months to converge. Overall, they are something of a medium-term frequency. Largely, they are not trying to arbitrage intraday movements, nor are they making long-term directional bets.
#
# - LTCM largely is trying to pick up small premia in the convergence trades, which limits the upside of any individual trade, and leaves it substantial downside, given all the leverage. Of course, aggregating all their trades together can lead to positive skewness if they are properly diversified. But we would expect that their typical trade has a small positive mean with substantial negative skewness.
#
# - For most their trades, LTCM is not making directional forecasts. Rather, they are trading spreads and pairs, forecasting relative convergence rather than absolute levels. In this sense, they do not rely on sophisticated forecasting models of the overall market but rather rely on models to forecast the size and speed of relative convergence in particular markets.
#
# ## 1.2
# ### What are LTCM’s biggest advantages over its competitors?
#
# #### Solution:
# Several advantages.
# - Efficient financing. LTCM got very favorable terms on all financing—sometimes even zero haircut! Typically had small, if any, outlay.</li>
# - Fund size. Have market power even in the large market of institutional wholesale.</li>
# - Liquidity. LTCM has in place many mechanisms to ensure liquidity.</li>
# - Long-term horizon. In financing and assessing trades, LTCM takes a relatively long-term view.
# - Hedged. LTCM avoids taking too much default risk or explicit directional bets.
#
# Then again, LTCM went bust in Aug. 1998, so maybe these advantages were not as strong as it
# seemed!
#
# ## 1.3
# ### The case discusses four types of funding risk facing LTCM:
# - collateral haircuts
# - repo maturity
# - equity redemption
# - loan access
# The case discusses specific ways in which LTCM manages each of these risks. Briefly discuss
# them.
#
# #### Solution:
# The case discusses steps LTCM took to manage four types of funding risks.
# - Collateral haircuts. For most trades, LTCM obtains 100% financing on a fully collateralized
# basis. Furthermore, LTCM stress tests the haircuts across its asset classes.
# - Repo. LTCM goes against the norm by entering into relatively long-maturity repo. While much of it is overnight, LTCM uses contracts that typically have maturity of 6-12 months. Furthermore, LTCM manages their aggregate repo maturity.
# - Equity redemption. The firm is highly levered, so equity funding risk is especially important. LTCM restricts redemptions of equity year by year. The restriction is particularly strong in that unredeemed money is re-locked.
# They also spread the redemption windows across the year to ensure there is never a possi- bility of immediate withdrawal of a large portion of equity.
# - For debt funding, LTCM negotiated a revolving loan that has no Material Adverse Change clause. Thus, the availability of debt funding is not so highly correlated with fund perfor- mance.
#
# ## 1.4
# ### LTCM is largely in the business of selling liquidity and volatility. Describe how LTCM accounts for liquidity risk in their quantitative measurements.
#
# #### Solution:
# LTCM attempts to account for liquidity risk quantitatively by adjusting security correlations. For short-term horizons, LTCM assumes positive correlation between all trade cat- egories. Even if their net exposure to a strategy flips sides, they still assume positive correlation to the new net position.
# Given the efforts of LTCM to hedge out obvious market risks, there are many strategies which would seem to have zero correlation. However, LTCM feels that liquidity concerns can cause the effective trading to be positively correlated.
#
# ## 1.5
# ### Is leverage risk currently a concern for LTCM?
#
# #### Solution:
# It would seem that leverage is not particularly dangerous at the moment. The fund’s volatility is relatively low, its VaR is relatively low, nor is it particularly high relative to the rest of the industry.
# Moreover, the firm actively manages its funding risk which theoretically means it should be able to handle the natural risks of high leverage.
# At the time of the case, the firm is trying to determine whether to further increase leverage. Subsequently, at the end of 1997 the fund returned about a third of its 7.5 billion equity capital to investors.
# Of course, less than a year later, the fund blew up, but from the time of the case it’s hard to see the leverage risk.
#
# ## 1.6
# ### Many strategies of LTCM rely on converging spreads. LTCM feels that these are almost win/win situations because of the fact that if the spread converges, they make money. If it diverges, the trade becomes even more attractive, as convergence is still expected at a future date.
#
# #### Solution:
# What is the risk in these convergence trades?
# Solution: About a year after the time of the case, the fund loses most of its value due to non-converging trades. So clearly there is some risk!
# Positions are subject to liquidity risk. If market liquidity dries up or the markets become segmented, the divergent spreads can persist for a long time. This indeed happens later to LTCM. The trades that get them in trouble ultimately pay off, but not before LTCM blows up.
# LTCM believes it can exit these convergence trades if they become too unprofitable. However, a stop-loss order is not the same as a put option. If the price jumps discontinuously through the stop-loss, then it is ineffective. Or a market may be paralyzed/illiquid when trying to execute the stop-loss. A put option does not need to worry about price impact, whereas a stop-loss does. Finally, a stop-loss ensures that an investor sells as soon as a security price hits a worst-case scenario, ensuring unfavorable market timing.
#
#
#
# # 2. LTCM Risk Decomposition
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
pd.set_option("display.precision", 4)
import sys
sys.path.insert(0, '../cmds')
from portfolio import *
# -
DATAPATH_ltcm = '../data/ltcm_exhibits_data.xlsx'
SHEET = 'Exhibit 2'
ltcm_raw = pd.read_excel(DATAPATH_ltcm,sheet_name=SHEET)
ltcm_raw.columns = ['date','AUM','Gross','Net','TRInet','drop']
ltcm_raw = ltcm_raw.set_index('date').drop(columns=['drop'])
ltcm_raw = ltcm_raw.loc[ltcm_raw.index.dropna()]
ltcm_raw.dropna(axis=0,how='all',inplace=True)
ltcm_raw.tail()
# ### Timing of LTCM Data
#
# The Exhibit data is coded into Excel poorly. It is listed as monthly data, but Excel automatically converts that to the first day of the month. The data corresponds to the End-of-Month!
#
# For instance, LTCM chose to return \$2.7bn of capital on Dec 31, 1997. The drop in AUM is coded to Dec 1, 1997 when the casewriters simply intended to code it to Dec 1997, meaning the end-of-december.
#
# Similarly, LTCM saw large drawdowns in May and June 1998, but these are coded to be May 1 and June 1, when they should be coded as the end-of-month date.
#
# Thus, we need to shift these dates to end-of-month. If we instead make the May 1 date correspond to April 30 returns, we will get incorrect correlations and regression results.
# +
ltcm_raw.index = pd.to_datetime(ltcm_raw.index)
ltcm = ltcm_raw.resample('M').last()
ltcm.tail()
# -
# ## Optional: Add in August 1998?
#
# August 1998 is when LTCM was shocked with massive losses. These were so large that investors then pulled their money in September, to the point that a reorganization of LTCM was implemented in late September.
#
# - The return for August 1998 was roughly -45\%
# - And SPY was also down massively in that month.
#
#
# Try setting `INCLUDE_AUGUST` to `True` to see how different the statistical results below are when this disaster month is included!
#
# ## Optional: Evaluate it through December 1997
#
# - This corresponds closely to the time of the case, when they are deciding whether to return capital.
# - You will see that the returns through that time are incredible, with hardly any losses.
# - 1998 has some bad months before the crash in August.
# +
INCLUDE_AUGUST98 = False
STOP_DEC97 = False
if INCLUDE_AUGUST98:
DATE_JULY = pd.to_datetime('1998-07-31')
DATE_AUGUST = pd.to_datetime('1998-08-31')
LOSS = -1.85
ltcm.loc[DATE_AUGUST,'Gross'] = LOSS/ltcm.loc[DATE_JULY,'AUM']
# assuming that the netting of this loss is negligible given already down YTD
ltcm.loc[DATE_AUGUST,'Net'] = LOSS/ltcm.loc[DATE_JULY,'AUM']
ltcm.loc[DATE_AUGUST,'AUM'] = ltcm.loc[DATE_JULY,'AUM'] + LOSS
ltcm.loc[DATE_AUGUST,'TRInet'] = ltcm.loc[DATE_JULY,'TRInet'] * (1+ltcm.loc[DATE_AUGUST,'Net'])
display(ltcm.tail())
elif STOP_DEC97:
ltcm = ltcm.loc[:'1997',:]
# -
# ### Market Data
#
# Get SPY and Risk-free data.
#
# Align it to the LTCM data, and combine into a dataframe of returns
# +
DATAPATH_GMO = '../data/gmo_analysis_data.xlsx'
SHEET = 'returns (total)'
mkt = pd.read_excel(DATAPATH_GMO,sheet_name=SHEET).set_index('Date')[['SPY']]
SHEET = 'risk-free rate'
mkt['RF'] = pd.read_excel(DATAPATH_GMO,sheet_name=SHEET).set_index('Date')[['US3M']]
rets = pd.concat([mkt[['SPY','RF']], ltcm[['Gross','Net']]], join='inner', axis=1)
rets.tail()
# -
# ## 2.1 Summary Stats
#
# The homework was not explicit in whether you should calculate these summary stats using excess returns or total returns. Given that it requires excess returns for 2.2-2.4, we convert to excess returns here.
retsx = rets.subtract(rets['RF'],axis=0).drop(columns=['RF'])
display(performanceMetrics(retsx,annualization=12))
display(tailMetrics(retsx))
# ### The summary stats
#
# The summary stats are great for the data in the exhibits, and even better if cut off at the end of 1997, at the time of the case.
#
# - Sharpe is very high relative to what we usually see.
# - Volatility is sized similarly to SPY, and minimum return is not too bad.
#
# Still, note that
# - SPY does amazing during this period,
# - so LTCM's returns while fantastic, are not an outlier relative to SPY.
# - In fact, if we include the 1998 data through July, the tail-risk statistics look worse for LTCM than for SPY.
#
# If we include the August crash,
# - All the performance looks bad relative to SPY, particularly the tail-risk.
#
# The difference between Gross and Net is not too large.
# - Obviously, the net returns are lower, but given how high LTCM returns are (especially through) 1997, the net returns still deliver a fantastic Sharpe and overall profile. Though one can note they are a bit lower than SPY.
# ## 2.2 Market Regression
get_ols_metrics(retsx['SPY'],retsx,annualization=12).style.format('{:,.2%}'.format)
# ### The regression stats make LTCM look even better.
#
# While the univariate stats like Sharpe Ratio were not particularly impressive relative to SPY, the regression results show that LTCM has very little correlation to SPY! Thus, these large returns are
# - nearly all alpha, not beta
# - generating a massive Information Ratio
#
# Thus, this investment looks extremely attractive as an addition to equity-oriented portfolios.
#
# ### Optional:
# If August 1998 is included, the regression picks up substantial SPY correlation, making all the stats worse.
# ## 2.3 Quadratic Regression
# +
X = pd.concat([retsx[['SPY']],retsx[['SPY']]**2],join='inner',axis=1)
X.columns = ['SPY','SPY-squared']
table_quad = pd.DataFrame(index=retsx.columns, columns=['alpha']+list(X.columns)+['R-squared'])
for series in retsx.columns:
mod = LinearRegression().fit(X,retsx[series])
table_quad.loc[series,'alpha'] = mod.intercept_
table_quad.loc[series,X.columns] = mod.coef_
table_quad.loc[series,'R-squared'] = mod.score(X,retsx[series])
table_quad
# -
# ### The Quadratic Regression
# - does not add to the R-squared to a significant degree.
# - Thus, quadratic movements of SPY are not significant in explaining LTCM variation
# - To the degree it has quadratic exposure, it is negative. This indicates LTCM underperforms particularly large SPY movements (whether on the upside or downside).
#
# ### Optional
# - With August 1998 included, the regression picks up massive negative exposure to the quadratic term, due to the severe underperformance of SPY's down month in August 1998. This alone raises the R-squared to a high level.
# ## 2.4 Asymmetric Regression
# +
STRIKE = .03
X = retsx[['SPY']].copy()
X['UP'] = (retsx[['SPY']]-STRIKE).clip(0)
X['DOWN'] = (-STRIKE - retsx[['SPY']]).clip(0)
X.tail()
table_asymmetric = pd.DataFrame(index=retsx.columns, columns=['alpha']+list(X.columns)+['R-squared'])
for series in retsx.columns:
mod = LinearRegression().fit(X,retsx[series])
table_asymmetric.loc[series,'alpha'] = mod.intercept_
table_asymmetric.loc[series,X.columns] = mod.coef_
table_asymmetric.loc[series,'R-squared'] = mod.score(X,retsx[series])
table_asymmetric
# -
# ### The Asymmetric regression
# - again shows LTCM as having very little exposure to SPY, even with these nonlinear considerations. That is, the R-squared is still tiny.
# - Though the coefficients are not significant, we can interpret the SPY UP coefficient as saying LTCM is underperforming large (positive) SPY returns. This is similar to what we learned from the quadratic regression.
# - The SPY DOWN coefficient indicates LTCM overperforms large negative SPY movements. This differs from the Quadratic regression result, which could not distinguish asymmetry in the response to up and down movements.
# - Still, the number of such UP and DOWN months is small, and the effect is statistically weak.
#
# ### Optional
# But if we re-do this including the optional August 1998 data, the answer changes to show substantial negative DOWN exposure, meaning LTCM underperforms in a very down month (like Aug 1998!)
# # 3. The FX Carry Trade
# ## Two Data Sources
#
# The original data set for the homework, `fx_carry_data.xlsx`, reported the **annualized** risk-free rate.
# - The Homework instructions did not tell you to divide the risk-free rates by 12, to change them to monthly rates. While I think this should have been apparent, it is fine if you used the data in its annualized form--but your results will be **very** different, and very unrealistic.
#
# - If you did make this adjustment, you just divided the risk-free rates by 12. Note that LIBOR rates are reported using simple compounding, so we are fine to just multiply the rate by 1/12. This will be a bit different than the true, compounded, return. For our purposes it is not a big deal.
#
# If you would like a clean set of data, use `fx_carry_data_v2.xlsx`.
# - It reports the risk-free rates in a monthly scaling.
# - It also pulls the LIBOR data from 1-month LIBOR instead of the original 3-month LIBOR. Thus, this set is more appropriate for our one-month trade-horizon considerations.
# +
DATAPATH_FX = '../data/fx_carry_data.xlsx'
# DATAPATH_FX = '../data/fx_carry_data_v2.xlsx'
if 'v2' in DATAPATH_FX:
USDRF = 'USD1M'
else:
USDRF = 'USD3M'
# +
SHEET = 'fx rates'
fx = pd.read_excel(DATAPATH_FX, sheet_name=SHEET).set_index('DATE')
logFX = np.log(fx)
logFX
# +
SHEET = 'risk-free rates'
rf = pd.read_excel(DATAPATH_FX,sheet_name=SHEET).set_index('DATE')
logRF = np.log(rf+1)
logRFusd = logRF[[USDRF]]
logRF.drop(columns=[USDRF],inplace=True)
logRF
# -
# Timing of the risk-free rate
# - The data is defined such that the March value of the risk-free rate corresponds to the rate beginning in March and ending in April.
# - In terms of the class notation, $r^{f,i}_{t,t+1}$ is reported at time $t$. (It is risk-free, so it is a rate from $t$ to $t+1$ but it is know at $t$.
# ## 3.1 The Static Carry Trade
#
# Calculate excess log returns using,
# $$\tilde{r}^i_{t+1} \equiv s^i_{t+1} - s^i_t + r^{f,i}_{t,t+1} - r^{f,\$}_{t,t+1}$$
# For convenience, rewrite this as
# $$\tilde{r}^i_{t+1} \equiv s^i_{t+1} - s^i_t - (r^{f,\$}_{t,t+1} - r^{f,i}_{t,t+1})$$
#
# 1. Build the spread in risk-free rates: $r^{f,\$}_{t,t+1} - r^{f,i}_{t,t+1}$.
# - Lag this variable, so that the March-to-April value is stamped as April.
#
# 2. Build the FX growth rates: $ s^i_{t+1} - s^i_t$
# - These are already stamped as April for the March-to-April FX growth.
#
# Then the excess log return is simply the difference of the two objects.
# +
logRFspread = -logRF.subtract(logRFusd.values,axis=0)
logRFspread = logRFspread.shift(1)
logFXgrowth = logFX.diff(axis=0)
logRX = logFXgrowth - logRFspread.values
logRX
# -
# ### Understanding the two components of excess log returns
logFXgrowth.plot(title='FX Growth', figsize=(10,5));
logRFspread.plot(title='RF Spread (USD-Other)', figsize=(10,5));
rx_components = logFXgrowth.mean().to_frame()
rx_components.columns=['FX effect']
rx_components['RF effect'] = -logRFspread.mean().values
rx_components['Total'] = rx_components.sum(axis=1)
rx_components *= 12
rx_components
# ### Performance Metrics
#
# Remember these are log returns, not level returns. But the distinction is minor for the quantitative results here.
performanceMetrics(logRX,annualization=12)
# ## 3.2 Implications for UIP
#
# The results above are evidence against UIP, but not strong evidence.
# - UIP implies that the mean excess return should be zero. The USD and JPY trade seems to have a mean excess return significantly different from zero.)
#
# - The evidence from the other currencies is not clear. Their mean excess return is not exactly zero, but they are so close to zero that they may not be statistically significant.
#
# - Note that UIP makes no implication about the volatility of these excess returns.
#
# A long position in three of the currencies had negative mean excess (log) returns!
# - Being long CHF would have delivered a small positive mean excess log return, but a poor Sharpe.
# - The best trade would have been to be short JPY and long USD. This delivered a small but positive mean return and Sharpe.
# ## 3.3 Predicting FX
#
# Run a forecasting regression for the log FX Growth, as implied by UIP.
# - UIP implies that the interest rate spread is the optimal predictor of FX growth.
# - Thus, it implies the forecasting beta should be 1.
# +
olstab = pd.DataFrame(index=logFXgrowth.columns, columns=['alpha','Beta','r-squared'])
for i, curncy in enumerate(logFXgrowth.columns):
temp = get_ols_metrics(logRFspread.iloc[:,i],logFXgrowth.iloc[:,i],annualization=1).drop(columns=['Treynor Ratio','Info Ratio'])
temp.rename(columns={logRFspread.columns[i]:'Beta'}, inplace=True)
olstab.loc[curncy,:] = temp.values
olstab.T
# -
# #### Predicting Appreciation or Depreciation?
#
# Look at whether the betas are positive or negative.
#
# If the regressor (USD minus foreign risk-free rate) goes DOWN, then we expect
# - decreased UK and JP relative to US.
# - increased EU and SZ relative to US. (That is, the expected FX growth is higher.)
#
# Thus, if UK risk-free rate increases relative to US risk-free rate, we expect USD will appreciate.
# - This means FX growth is partially offsetting the interest-rate differential. But not fully. Beta is positive, but less than one.
#
# For EU and SZ,
# - this means that FX growth is exacerbating the returns from the interest rate differentials. Beta is negative, so in a sense, investors expect to gain twice: immediately from the risk-free rate differential, and again from the FX movement at the close of the position.
#
# In fact, the statements above are just about the point estimates, and may not be statistically significant.
#
# #### Strength of Predictability?
# These effects are extremely small! The R-squared stats for all four currencies are nearly 0.
# ## 3.4 The Dynamic Carry Trade
# +
forecast = pd.DataFrame(columns=logFXgrowth.columns, index=logFXgrowth.index)
for i, curncy in enumerate(logFXgrowth.columns):
forecast[curncy] = olstab.loc[curncy,'alpha'] + (olstab.loc[curncy,'Beta']-1) * logRFspread.iloc[:,i]
forecast
# -
# ### Forecast timing
# We do not need to lag the forecasts with `.shift()` because we already use the lagged calculation of the interest rate spread.
#
# Thus, the regressor is lagged, and thus is generating forecasts stamped with the date of their targeted value, as desired.
forecast_positive = ((forecast.dropna() > 0).sum() / forecast.dropna().shape[0]).to_frame().T
forecast_positive.index = ['fraction with positive forecastium']
forecast_positive
forecast.plot(title='Forecasted Excess Log Returns', figsize=(10,6));
# #### Positive or Negative Forecasts?
#
# Though the static carry-trade of section 3.3 found that UK and EU have negative mean excess (log) returns, the dynamic carry trade finds that the time-varying forecast is often positive.
#
# For JP, the forecast is negative throughout the sample due to the much higher interest rate in USD not being offset by appreciation of the Yen to the USD.
#
# #### Improving the Trade?
#
# Just as in HW7, we can use forecasts of excess returns to construct trading positions, (weights,) which vary with the forecasts. The resulting trading strategy should be better than the static carry trade.
#
# That is, we could time the magnitude and direction of the currency trades instead of being passively 100\% invested in a currency according to the evidence in 3.3.
#
# It is analagous to timing the trading of SPY according to the Earnings-Price Ratio instead of being passively 100\% invested in SPY.
#
# Of course, it is an open question as to whether this will work given the weak R-squared in-sample, let alone out-of-sample.
# ## 3.5 Building the Dynamic Trade
#
# ### This is an optional extension. You were not asked to do this in the homework.
#
# Try implementing the forecasts with a simple proportional weighting of each forecast.
# Similar to the implementation in HW#7.
# +
passive = logRX
forecast_baseline = logRX.expanding().mean().shift(1)
wts = 100 * forecast
forecast_returns = wts * passive.values
forecast_returns
# -
# ### Check the performance
#
# #### Also compare to the equally-weighted combination of the currency trades
#
# All stats are **much** better than the static carry trade of Section 3.1.
# - But this is an in-sample performance.
# - Open question whether the OOS version can beat the static table of Section 3.1.
#
# Note that currencies have low means and low vols, so in no case are we expecting to get equity-like return statistics.
# +
forecast_returns['Equally-Weighted'] = forecast_returns.mean(axis=1) * 10
performanceMetrics(forecast_returns,annualization=12)
# -
# ### How correlated are the active (forecasted) implementations to the passive returns?
#
# - Not all that correlated.
# - Note that we're interested in the magnitude, not the sign.
# - The active version is shorting JP, so no surprise that the correlation is negative.
corr_tab = forecast_returns.corrwith(passive,drop='missing',axis=0).to_frame().T
corr_tab.index = ['Corr. Passive to Active']
corr_tab
| discussions/Discussion_HW_8.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
cred = credentials.Certificate('crp-backend-server-b67475fc5e44.json')
firebase_admin.initialize_app(cred)
db = firestore.client()
# -
| crp-db-manager/Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Ishitha2003/-fmml20211052/blob/main/svm_classifier_tutorial.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="YqyQ5AzvSRtk"
# # Lecture 7 Project
# # SVM Classifier
#
# Coodinator: <NAME>
# + [markdown] id="TCXFmefqVzRP"
# # **Support Vector Machines Classifier**
#
# Hello friends,
#
# Support Vector Machines (SVMs in short) are supervised machine learning algorithms that are used for classification and regression purposes. In this kernel, we are going to build a Support Vector Machines classifier to classify a Pulsar star. The dataset used for this project is **Predicting a Pulsar Star**.
#
# So, let's get started.
# + [markdown] id="E8OzGWacVzRU"
# # Dataset description<a class="anchor" id="5"></a>
#
#
#
# I have used the **Predicting a Pulsar Star** dataset for this project.
#
# Pulsars are a rare type of Neutron star that produce radio emission detectable here on Earth. They are of considerable scientific interest as probes of space-time, the inter-stellar medium, and states of matter. Classification algorithms in particular are being adopted, which treat the data sets as binary classification problems. Here the legitimate pulsar examples form minority positive class and spurious examples form the majority negative class.
#
# The data set shared here contains 16,259 spurious examples caused by RFI/noise, and 1,639 real pulsar examples. Each row lists the variables first, and the class label is the final entry. The class labels used are 0 (negative) and 1 (positive).
#
#
# ### Attribute Information:
#
#
# Each candidate is described by 8 continuous variables, and a single class variable. The first four are simple statistics obtained from the integrated pulse profile. The remaining four variables are similarly obtained from the DM-SNR curve . These are summarised below:
#
# 1. Mean of the integrated profile.
#
# 2. Standard deviation of the integrated profile.
#
# 3. Excess kurtosis of the integrated profile.
#
# 4. Skewness of the integrated profile.
#
# 5. Mean of the DM-SNR curve.
#
# 6. Standard deviation of the DM-SNR curve.
#
# 7. Excess kurtosis of the DM-SNR curve.
#
# 8. Skewness of the DM-SNR curve.
#
# 9. Class
# + [markdown] id="lF8HwxYfVzRW"
# # **Import libraries** <a class="anchor" id="6"></a>
#
# + id="Cw-KMHBpVzRW"
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt # for data visualization
import seaborn as sns # for statistical data visualization
from sklearn.decomposition import PCA
# # Input data files are available in the "../input/" directory.
# # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
# import os
# for dirname, _, filenames in os.walk('/kaggle/input'):
# for filename in filenames:
# print(os.path.join(dirname, filename))
# # Any results you write to the current directory are saved as output.
# + [markdown] id="IV<KEY>"
# # **Import dataset** <a class="anchor" id="7"></a>
#
# + id="cE8ynLpnVzRY"
data = '/content/pulsar_stars.csv'
df = pd.read_csv(data)
# + [markdown] id="rhttS4DYVzRZ"
# # **Exploratory data analysis** <a class="anchor" id="8"></a>
#
# + colab={"base_uri": "https://localhost:8080/"} id="S-pGJWXaVzRZ" outputId="0ebdf3b9-3250-4a41-e3a5-fe9556855e01"
# view dimensions of dataset
df.shape
# + [markdown] id="V3c5I7DsVzRa"
# We can see that there are 17898 instances and 9 variables in the data set.
# + id="thtLz9iXVzRb"
# let's preview the dataset
df.head()
df.drop_duplicates(inplace=True)
# + [markdown] id="5FnHB5P8VzRb"
# We can see that there are 9 variables in the dataset. 8 are continuous variables and 1 is discrete variable. The discrete variable is `target_class` variable. It is also the target variable.
#
#
# Now, I will view the column names to check for leading and trailing spaces.
# + colab={"base_uri": "https://localhost:8080/"} id="Z9Dy5cqqVzRc" outputId="22ae3fb5-4ca0-4996-df39-c5e360d21a71"
# view the column names of the dataframe
col_names = df.columns
col_names
# + [markdown] id="Z6qeVQ2lVzRc"
# We can see that there are leading spaces (spaces at the start of the string name) in the dataframe. So, I will remove these leading spaces.
# + id="GWVur9UcVzRc"
# remove leading spaces from column names
df.columns = df.columns.str.strip()
# + [markdown] id="idJjEK1xVzRd"
# I have removed the leading spaces from the column names. Let's again view the column names to confirm the same.
# + colab={"base_uri": "https://localhost:8080/"} id="JadJlM_pVzRd" outputId="56c98b3a-1972-4be6-ec54-1f1b2e3a517f"
# view column names again
df.columns
# + [markdown] id="COzp4k6jVzRd"
# We can see that the leading spaces are removed from the column name. But the column names are very long. So, I will make them short by renaming them.
# + id="R1Qrk7DyVzRd"
# rename column names
df.columns = ['IP Mean', 'IP Sd', 'IP Kurtosis', 'IP Skewness',
'DM-SNR Mean', 'DM-SNR Sd', 'DM-SNR Kurtosis', 'DM-SNR Skewness', 'target_class']
# + colab={"base_uri": "https://localhost:8080/"} id="6veeTdBGVzRd" outputId="1ca2fd82-4c9d-4b0b-dd65-a87091ea9d3b"
# view the renamed column names
df.columns
# + [markdown] id="N-deGsseVzRe"
# We can see that the column names are shortened. IP stands for `integrated profile` and DM-SNR stands for `delta modulation and signal to noise ratio`. Now, it is much more easy to work with the columns.
# + [markdown] id="de9Ai_plVzRe"
# Our target variable is the `target_class` column. So, I will check its distribution.
# + colab={"base_uri": "https://localhost:8080/"} id="MrDWeC51VzRe" outputId="f2f78f8c-7fb7-47cb-e7b7-f81f54c72987"
# check distribution of target_class column
df['target_class'].value_counts()
# + colab={"base_uri": "https://localhost:8080/"} id="Vt5NdI-GVzRe" outputId="dedc65d7-8838-455c-8429-2ebdf803d9ec"
# view the percentage distribution of target_class column
df['target_class'].value_counts()/np.float(len(df))
# + [markdown] id="mE_OokUuVzRe"
# We can see that percentage of observations of the class label `0` and `1` is 90.84% and 9.16%. So, this is a class imbalanced problem. I will deal with that in later section.
# + colab={"base_uri": "https://localhost:8080/"} id="6fhxu150VzRe" outputId="9ec79c92-52c6-497e-b21f-55bf08068362"
# view summary of dataset
df.info()
# + [markdown] id="5EOjnD14VzRf"
# We can see that there are no missing values in the dataset and all the variables are numerical variables.
# + [markdown] id="b0BQHl8cVzRf"
# ### Explore missing values in variables
# + colab={"base_uri": "https://localhost:8080/"} id="tz8yV3CKVzRf" outputId="74c55c62-e5fc-4950-d1d3-7abba5d6da98"
# check for missing values in variables
df.isnull().sum()
# + [markdown] id="iXuUjeqxVzRf"
# We can see that there are no missing values in the dataset.
# + [markdown] id="rYrQ8y8HVzRf"
# ### Outliers in numerical variables
# + colab={"base_uri": "https://localhost:8080/", "height": 300} id="jS--XX4WVzRf" outputId="c29d5a91-cd01-4296-d775-5a2ba231992c"
# view summary statistics in numerical variables
round(df.describe(),2)
# + [markdown] id="Tl5hgaWPVzRf"
# On closer inspection, we can suspect that all the continuous variables may contain outliers.
#
#
# I will draw boxplots to visualise outliers in the above variables.
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="JIh_uJ4JVzRg" outputId="7e0078e6-83a5-4cd7-cb78-617bdcf45726"
# draw boxplots to visualize outliers
plt.figure(figsize=(24,20))
plt.subplot(4, 2, 1)
fig = df.boxplot(column='IP Mean')
fig.set_title('')
fig.set_ylabel('IP Mean')
plt.subplot(4, 2, 2)
fig = df.boxplot(column='IP Sd')
fig.set_title('')
fig.set_ylabel('IP Sd')
plt.subplot(4, 2, 3)
fig = df.boxplot(column='IP Kurtosis')
fig.set_title('')
fig.set_ylabel('IP Kurtosis')
plt.subplot(4, 2, 4)
fig = df.boxplot(column='IP Skewness')
fig.set_title('')
fig.set_ylabel('IP Skewness')
plt.subplot(4, 2, 5)
fig = df.boxplot(column='DM-SNR Mean')
fig.set_title('')
fig.set_ylabel('DM-SNR Mean')
plt.subplot(4, 2, 6)
fig = df.boxplot(column='DM-SNR Sd')
fig.set_title('')
fig.set_ylabel('DM-SNR Sd')
plt.subplot(4, 2, 7)
fig = df.boxplot(column='DM-SNR Kurtosis')
fig.set_title('')
fig.set_ylabel('DM-SNR Kurtosis')
plt.subplot(4, 2, 8)
fig = df.boxplot(column='DM-SNR Skewness')
fig.set_title('')
fig.set_ylabel('DM-SNR Skewness')
# + [markdown] id="tfN5e9pZVzRg"
# The above boxplots confirm that there are lot of outliers in these variables.
# + [markdown] id="czGeqkA6VzRg"
# ### Handle outliers with SVMs
#
#
# There are 2 variants of SVMs. They are `hard-margin variant of SVM` and `soft-margin variant of SVM`.
#
#
# The `hard-margin variant of SVM` does not deal with outliers. In this case, we want to find the hyperplane with maximum margin such that every training point is correctly classified with margin at least 1. This technique does not handle outliers well.
#
#
# Another version of SVM is called `soft-margin variant of SVM`. In this case, we can have a few points incorrectly classified or
# classified with a margin less than 1. But for every such point, we have to pay a penalty in the form of `C` parameter, which controls the outliers. `Low C` implies we are allowing more outliers and `high C` implies less outliers.
#
#
# The message is that since the dataset contains outliers, so the value of C should be high while training the model.
# + [markdown] id="NAYEs1koVzRg"
# ### Check the distribution of variables
#
#
# Now, I will plot the histograms to check distributions to find out if they are normal or skewed.
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="gx7vhptZVzRg" outputId="4337f062-b6a9-4f39-ef7e-3a399e71317a"
# plot histogram to check distribution
plt.figure(figsize=(24,20))
plt.subplot(4, 2, 1)
fig = df['IP Mean'].hist(bins=20)
fig.set_xlabel('IP Mean')
fig.set_ylabel('Number of pulsar stars')
plt.subplot(4, 2, 2)
fig = df['IP Sd'].hist(bins=20)
fig.set_xlabel('IP Sd')
fig.set_ylabel('Number of pulsar stars')
plt.subplot(4, 2, 3)
fig = df['IP Kurtosis'].hist(bins=20)
fig.set_xlabel('IP Kurtosis')
fig.set_ylabel('Number of pulsar stars')
plt.subplot(4, 2, 4)
fig = df['IP Skewness'].hist(bins=20)
fig.set_xlabel('IP Skewness')
fig.set_ylabel('Number of pulsar stars')
plt.subplot(4, 2, 5)
fig = df['DM-SNR Mean'].hist(bins=20)
fig.set_xlabel('DM-SNR Mean')
fig.set_ylabel('Number of pulsar stars')
plt.subplot(4, 2, 6)
fig = df['DM-SNR Sd'].hist(bins=20)
fig.set_xlabel('DM-SNR Sd')
fig.set_ylabel('Number of pulsar stars')
plt.subplot(4, 2, 7)
fig = df['DM-SNR Kurtosis'].hist(bins=20)
fig.set_xlabel('DM-SNR Kurtosis')
fig.set_ylabel('Number of pulsar stars')
plt.subplot(4, 2, 8)
fig = df['DM-SNR Skewness'].hist(bins=20)
fig.set_xlabel('DM-SNR Skewness')
fig.set_ylabel('Number of pulsar stars')
# + [markdown] id="hl4aho1tVzRh"
# We can see that all the 8 continuous variables are skewed.
# + [markdown] id="XjLyLX1eVzRh"
# # **Declare feature vector and target variable** <a class="anchor" id="9"></a>
#
# + id="VbOJYqURVzRh"
X = df.drop(['target_class'], axis=1)
y = df['target_class']
# + id="a5Y9wGYq6DYy"
pca = PCA(n_components=5)
principalComponents = pca.fit_transform(X)
# + [markdown] id="u3JY6yZsVzRh"
# # **Split data into separate training and test set** <a class="anchor" id="10"></a>
#
# + id="Xb2KrlxNVzRh"
# split X and y into training and testing sets
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# + colab={"base_uri": "https://localhost:8080/"} id="FqThvr54VzRh" outputId="4bda52e4-61f6-408e-e0d0-4b9032ef599b"
# check the shape of X_train and X_test
X_train.shape, X_test.shape
# + id="6ZZlx3pg50Zw"
principalDf = pd.DataFrame(data = principalComponents
, columns = ['principal component 1', 'principal component 2' , 'principal component 3','principal component 4' , "principal component 5"])
# + [markdown] id="VT94tITmVzRh"
# # **Feature Scaling** <a class="anchor" id="11"></a>
#
# + id="at1Nm2hTVzRh"
cols = X_train.columns
# + id="blP2XPOyVzRh"
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
# + id="QTqhpvZ9VzRi"
X_train = pd.DataFrame(X_train, columns=[cols])
# + id="sHVe2Nr2VzRi"
X_test = pd.DataFrame(X_test, columns=[cols])
# + colab={"base_uri": "https://localhost:8080/", "height": 300} id="VC0_X67iVzRi" outputId="feafc7a4-d78f-4982-a769-42e6a0d1c131"
X_train.describe()
# + [markdown] id="9C7jHTbPVzRi"
# We now have `X_train` dataset ready to be fed into the Logistic Regression classifier. I will do it as follows.
# + [markdown] id="Y9lEFseZVzRi"
# # **Run SVM with default hyperparameters** <a class="anchor" id="12"></a>
#
#
# Default hyperparameter means C=1.0, kernel=`rbf` and gamma=`auto` among other parameters.
# + colab={"base_uri": "https://localhost:8080/"} id="xsVEJuG4VzRi" outputId="45bd2b2c-9eea-46e3-ded5-63ae19cb60e2"
# import SVC classifier
from sklearn.svm import SVC
# import metrics to compute accuracy
from sklearn.metrics import accuracy_score
# instantiate classifier with default hyperparameters
svc=SVC()
# fit classifier to training set
svc.fit(X_train,y_train)
# make predictions on test set
y_pred=svc.predict(X_test)
# compute and print accuracy score
print('Model accuracy score with default hyperparameters: {0:0.4f}'. format(accuracy_score(y_test, y_pred)))
# + [markdown] id="4iYROWRsVzRi"
# ### Run SVM with rbf kernel and C=100.0
#
#
# We have seen that there are outliers in our dataset. So, we should increase the value of C as higher C means fewer outliers.
# So, I will run SVM with kernel=`rbf` and C=100.0.
# + colab={"base_uri": "https://localhost:8080/"} id="qCrz3H-NVzRi" outputId="09a8f268-9493-429e-8249-b93eef2e25ac"
# instantiate classifier with rbf kernel and C=100
svc=SVC(C=100.0)
# fit classifier to training set
svc.fit(X_train,y_train)
# make predictions on test set
y_pred=svc.predict(X_test)
# compute and print accuracy score
print('Model accuracy score with rbf kernel and C=100.0 : {0:0.4f}'. format(accuracy_score(y_test, y_pred)))
# + [markdown] id="u-lRBzyCVzRj"
# We can see that we obtain a higher accuracy with C=100.0 as higher C means less outliers.
#
# Now, I will further increase the value of C=1000.0 and check accuracy.
# + [markdown] id="61ODZoO8VzRj"
# ### Run SVM with rbf kernel and C=1000.0
#
# + colab={"base_uri": "https://localhost:8080/"} id="nsgFlsNhVzRj" outputId="f5a0bbaf-4e43-4cff-e104-9a7bb8218b0b"
# instantiate classifier with rbf kernel and C=1000
svc=SVC(C=1000.0)
# fit classifier to training set
svc.fit(X_train,y_train)
# make predictions on test set
y_pred=svc.predict(X_test)
# compute and print accuracy score
print('Model accuracy score with rbf kernel and C=1000.0 : {0:0.4f}'. format(accuracy_score(y_test, y_pred)))
# + [markdown] id="Afr_FZRAVzRj"
# In this case, we can see that the accuracy had decreased with C=1000.0
# + [markdown] id="A73_eHu9VzRj"
# # **Run SVM with linear kernel** <a class="anchor" id="13"></a>
#
#
#
# ### Run SVM with linear kernel and C=1.0
# + colab={"base_uri": "https://localhost:8080/"} id="54ZCfo78VzRj" outputId="46467730-fad0-4e38-9a0f-1ce04d937789"
# instantiate classifier with linear kernel and C=1.0
linear_svc=SVC(kernel='linear', C=1.0)
# fit classifier to training set
linear_svc.fit(X_train,y_train)
# make predictions on test set
y_pred_test=linear_svc.predict(X_test)
# compute and print accuracy score
print('Model accuracy score with linear kernel and C=1.0 : {0:0.4f}'. format(accuracy_score(y_test, y_pred_test)))
# + [markdown] id="OxERZacBVzRj"
# ### Run SVM with linear kernel and C=100.0
# + colab={"base_uri": "https://localhost:8080/"} id="2m5AElfpVzRk" outputId="d22f7ab9-83de-4f13-b247-f26f17959402"
# instantiate classifier with linear kernel and C=100.0
linear_svc100=SVC(kernel='linear', C=100.0)
# fit classifier to training set
linear_svc100.fit(X_train, y_train)
# make predictions on test set
y_pred=linear_svc100.predict(X_test)
# compute and print accuracy score
print('Model accuracy score with linear kernel and C=100.0 : {0:0.4f}'. format(accuracy_score(y_test, y_pred)))
# + [markdown] id="xz97rTjeVzRk"
# ### Run SVM with linear kernel and C=1000.0
# + colab={"base_uri": "https://localhost:8080/"} id="xMk7mjQzVzRk" outputId="94e5e57a-c28f-45db-d4d4-383ef201fdd4"
# instantiate classifier with linear kernel and C=1000.0
linear_svc1000=SVC(kernel='linear', C=1000.0)
# fit classifier to training set
linear_svc1000.fit(X_train, y_train)
# make predictions on test set
y_pred=linear_svc1000.predict(X_test)
# compute and print accuracy score
print('Model accuracy score with linear kernel and C=1000.0 : {0:0.4f}'. format(accuracy_score(y_test, y_pred)))
# + [markdown] id="GqBOjPk5VzRk"
# We can see that we can obtain higher accuracy with C=100.0 and C=1000.0 as compared to C=1.0.
# + [markdown] id="0lSfan5vVzRk"
# Here, **y_test** are the true class labels and **y_pred** are the predicted class labels in the test-set.
# + [markdown] id="FDoYqxzWVzRk"
# ### Compare the train-set and test-set accuracy
#
#
# Now, I will compare the train-set and test-set accuracy to check for overfitting.
# + colab={"base_uri": "https://localhost:8080/"} id="UDH4E8flVzRk" outputId="af504d7b-9ffa-40cd-906b-cc0d7938a535"
y_pred_train = linear_svc.predict(X_train)
y_pred_train
# + colab={"base_uri": "https://localhost:8080/"} id="Dh5zugwVVzRl" outputId="346f83c2-90d8-4e57-9d66-0a7159377244"
print('Training-set accuracy score: {0:0.4f}'. format(accuracy_score(y_train, y_pred_train)))
# + [markdown] id="qjCpexCcVzRl"
# We can see that the training set and test-set accuracy are very much comparable.
# + [markdown] id="uVmGsb8xVzRl"
# ### Check for overfitting and underfitting
# + colab={"base_uri": "https://localhost:8080/"} id="odTpv7WxVzRl" outputId="61af4512-b462-4881-9b65-a3a5cd4a43a4"
# print the scores on training and test set
print('Training set score: {:.4f}'.format(linear_svc.score(X_train, y_train)))
print('Test set score: {:.4f}'.format(linear_svc.score(X_test, y_test)))
# + [markdown] id="HTd2CzuVVzRl"
# The training-set accuracy score is 0.9783 while the test-set accuracy to be 0.9830. These two values are quite comparable. So, there is no question of overfitting.
#
# + [markdown] id="t_Y41A0fVzRl"
# ### Compare model accuracy with null accuracy
#
#
# So, the model accuracy is 0.9832. But, we cannot say that our model is very good based on the above accuracy. We must compare it with the **null accuracy**. Null accuracy is the accuracy that could be achieved by always predicting the most frequent class.
#
# So, we should first check the class distribution in the test set.
# + colab={"base_uri": "https://localhost:8080/"} id="tMD67A0fVzRl" outputId="b0622fcd-b7ad-44f5-a248-d7752c2265ee"
# check class distribution in test set
y_test.value_counts()
# + [markdown] id="I1D2fVBsVzRl"
# We can see that the occurences of most frequent class `0` is 3306. So, we can calculate null accuracy by dividing 3306 by total number of occurences.
# + colab={"base_uri": "https://localhost:8080/"} id="QUYLhdrCVzRl" outputId="cd2b549e-24da-4571-87a5-f8a0d3990f22"
# check null accuracy score
null_accuracy = (3306/(3306+274))
print('Null accuracy score: {0:0.4f}'. format(null_accuracy))
# + [markdown] id="_TTyb5D5VzRl"
# We can see that our model accuracy score is 0.9830 but null accuracy score is 0.9235. So, we can conclude that our SVM classifier is doing a very good job in predicting the class labels.
# + [markdown] id="SEpwhJ_OVzRm"
# # **Run SVM with polynomial kernel** <a class="anchor" id="14"></a>
#
#
# ### Run SVM with polynomial kernel and C=1.0
# + colab={"base_uri": "https://localhost:8080/"} id="1SRNjmZdVzRm" outputId="7b09d0c3-2042-4afe-c1bc-109bd3fd3d23"
# instantiate classifier with polynomial kernel and C=1.0
poly_svc=SVC(kernel='poly', C=1.0)
# fit classifier to training set
poly_svc.fit(X_train,y_train)
# make predictions on test set
y_pred=poly_svc.predict(X_test)
# compute and print accuracy score
print('Model accuracy score with polynomial kernel and C=1.0 : {0:0.4f}'. format(accuracy_score(y_test, y_pred)))
# + [markdown] id="aHnioBzvVzRm"
# ### Run SVM with polynomial kernel and C=100.0
# + colab={"base_uri": "https://localhost:8080/"} id="tKvJ0G0mVzRm" outputId="d8e5311a-b1a5-420f-f283-918e78b3a88a"
# instantiate classifier with polynomial kernel and C=100.0
poly_svc100=SVC(kernel='poly', C=100.0)
# fit classifier to training set
poly_svc100.fit(X_train, y_train)
# make predictions on test set
y_pred=poly_svc100.predict(X_test)
# compute and print accuracy score
print('Model accuracy score with polynomial kernel and C=1.0 : {0:0.4f}'. format(accuracy_score(y_test, y_pred)))
# + [markdown] id="hqfdvaH1VzRm"
# Polynomial kernel gives poor performance. It may be overfitting the training set.
# + [markdown] id="s9lp9WTLVzRn"
# # **Run SVM with sigmoid kernel** <a class="anchor" id="15"></a>
#
#
# ### Run SVM with sigmoid kernel and C=1.0
# + colab={"base_uri": "https://localhost:8080/"} id="6Su-j-dnVzRn" outputId="297e879f-731d-44e3-e97e-3<PASSWORD>"
# instantiate classifier with sigmoid kernel and C=1.0
sigmoid_svc=SVC(kernel='sigmoid', C=1.0)
# fit classifier to training set
sigmoid_svc.fit(X_train,y_train)
# make predictions on test set
y_pred=sigmoid_svc.predict(X_test)
# compute and print accuracy score
print('Model accuracy score with sigmoid kernel and C=1.0 : {0:0.4f}'. format(accuracy_score(y_test, y_pred)))
# + [markdown] id="LsM8E42LVzRn"
# ### Run SVM with sigmoid kernel and C=100.0
# + colab={"base_uri": "https://localhost:8080/"} id="tGjVAmtAVzRn" outputId="b027227e-a0a0-4354-ce6d-fa1c3029c206"
# instantiate classifier with sigmoid kernel and C=100.0
sigmoid_svc100=SVC(kernel='sigmoid', C=100.0)
# fit classifier to training set
sigmoid_svc100.fit(X_train,y_train)
# make predictions on test set
y_pred=sigmoid_svc100.predict(X_test)
# compute and print accuracy score
print('Model accuracy score with sigmoid kernel and C=100.0 : {0:0.4f}'. format(accuracy_score(y_test, y_pred)))
# + [markdown] id="qVEbNTL7VzRn"
# We can see that sigmoid kernel is also performing poorly just like with polynomial kernel.
# + [markdown] id="Kqr5Dv_qVzRn"
# ### Comments
#
#
# We get maximum accuracy with `rbf` and `linear` kernel with C=100.0. and the accuracy is 0.9832. Based on the above analysis we can conclude that our classification model accuracy is very good. Our model is doing a very good job in terms of predicting the class labels.
#
#
# But, this is not true. Here, we have an imbalanced dataset. The problem is that accuracy is an inadequate measure for quantifying predictive performance in the imbalanced dataset problem.
#
#
# So, we must explore alternative metrices that provide better guidance in selecting models. In particular, we would like to know the underlying distribution of values and the type of errors our classifer is making.
#
#
# One such metric to analyze the model performance in imbalanced classes problem is `Confusion matrix`.
# + [markdown] id="HiZLfAvGVzRo"
# # **Confusion matrix** <a class="anchor" id="16"></a>
#
# + colab={"base_uri": "https://localhost:8080/"} id="6knUPl5lVzRo" outputId="b7fb4787-5a3c-4a72-b305-87228549a3b9"
# Print the Confusion Matrix and slice it into four pieces
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred_test)
print('Confusion matrix\n\n', cm)
print('\nTrue Positives(TP) = ', cm[0,0])
print('\nTrue Negatives(TN) = ', cm[1,1])
print('\nFalse Positives(FP) = ', cm[0,1])
print('\nFalse Negatives(FN) = ', cm[1,0])
# + colab={"base_uri": "https://localhost:8080/", "height": 282} id="OyvV0rHwVzRo" outputId="65333eef-5ecb-4190-b2dd-31b050fcaf1e"
# visualize confusion matrix with seaborn heatmap
cm_matrix = pd.DataFrame(data=cm, columns=['Actual Positive:1', 'Actual Negative:0'],
index=['Predict Positive:1', 'Predict Negative:0'])
sns.heatmap(cm_matrix, annot=True, fmt='d', cmap='YlGnBu')
# + [markdown] id="ozQdMqakVzRo"
# # **Classification metrices**
# + [markdown] id="EZLmr4gzVzRo"
# ### Classification Report
#
#
# **Classification report** is another way to evaluate the classification model performance. It displays the **precision**, **recall**, **f1** and **support** scores for the model. I have described these terms in later.
#
# We can print a classification report as follows:-
# + colab={"base_uri": "https://localhost:8080/"} id="317MbFiSVzRp" outputId="3b6d0b13-421c-4ae0-9124-75707244a512"
from sklearn.metrics import classification_report
print(classification_report(y_test, y_pred_test))
# + [markdown] id="erfBsYXlVzRp"
# ### Classification accuracy
# + [markdown] id="JFM3PAQgSD45"
#
# + id="P9YaymdWVzRp"
TP = cm[0,0]
TN = cm[1,1]
FP = cm[0,1]
FN = cm[1,0]
# + colab={"base_uri": "https://localhost:8080/"} id="T-_xKYKDVzRp" outputId="9d7d51b6-0deb-45eb-eed5-135620186b4f"
# print classification accuracy
classification_accuracy = (TP + TN) / float(TP + TN + FP + FN)
print('Classification accuracy : {0:0.4f}'.format(classification_accuracy))
# + [markdown] id="gbBfUXt8VzRp"
# ### Classification error
# + colab={"base_uri": "https://localhost:8080/"} id="x92-r8isVzRp" outputId="c9a7d921-e1fe-466a-bc7c-816054750477"
# print classification error
classification_error = (FP + FN) / float(TP + TN + FP + FN)
print('Classification error : {0:0.4f}'.format(classification_error))
# + [markdown] id="yuNPKjirVzRq"
# ### Precision
#
# + colab={"base_uri": "https://localhost:8080/"} id="otOcLBsdVzRq" outputId="51bc3116-7e69-4789-b2c5-becb301ccb45"
# print precision score
precision = TP / float(TP + FP)
print('Precision : {0:0.4f}'.format(precision))
# + [markdown] id="QuIUjKatVzRt"
# ### Recall
#
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="YiX2IaGqVzRt" outputId="73aa5be8-fbb7-4286-afc6-42cbdf01954c"
recall = TP / float(TP + FN)
print('Recall or Sensitivity : {0:0.4f}'.format(recall))
# + [markdown] id="iwom3iJ3VzRt"
# ### True Positive Rate
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="h0j6VfWiVzRt" outputId="311744d5-b5b1-4756-8330-cef84ee57389"
true_positive_rate = TP / float(TP + FN)
print('True Positive Rate : {0:0.4f}'.format(true_positive_rate))
# + [markdown] id="GwbMZQr0VzRt"
# ### False Positive Rate
# + colab={"base_uri": "https://localhost:8080/"} id="hMCZ-AAqVzRu" outputId="9ca8acb0-dcda-48ba-809d-2311c46f59a7"
false_positive_rate = FP / float(FP + TN)
print('False Positive Rate : {0:0.4f}'.format(false_positive_rate))
# + [markdown] id="2kql-K4lVzRu"
# ### Specificity
# + colab={"base_uri": "https://localhost:8080/"} id="USwUdHDbVzRu" outputId="b074d970-8953-41b7-ea34-2a742e7511fb"
specificity = TN / (TN + FP)
print('Specificity : {0:0.4f}'.format(specificity))
# + [markdown] id="jjdW8PfePM6f"
# # **CLASSIFICATION USING OTHER CLASSIFIERS**
# + id="u5lAvYYAPHuZ"
DT = DecisionTreeClassifier(max_depth = 4, criterion = 'entropy')
DT.fit(X_train, y_train)
dt_yhat = DT.predict(X_test)
# + id="fd-Ro0NHPS5A"
print('Accuracy score of the Decision Tree model is {}'.format(accuracy_score(y_test, dt_yhat)))
# + id="Ii_VGbavPXgJ"
n = 7
KNN = KNeighborsClassifier(n_neighbors = n)
KNN.fit(X_train, y_train)
knn_yhat = KNN.predict(X_test)
# + id="FTmHKsVCPaeh"
print('Accuracy score of the K-Nearest Neighbors model is {}'.format(accuracy_score(y_test, knn_yhat)))
# + id="huqz2B8EPdhF"
rf = RandomForestClassifier(max_depth = 4)
rf.fit(X_train, y_train)
rf_yhat = rf.predict(X_test)
# + id="f3w1dRK4PieO"
print('Accuracy score of the Random Forest model is {}'.format(accuracy_score(y_test, rf_yhat)))
# + [markdown] id="B3TUOJVfVzRu"
# # **Results and conclusion**
#
#
#
# 1. There are outliers in our dataset. So, as we increase the value of C to limit fewer outliers, the accuracy increased. This is true with different kinds of kernels.
#
# 2. We get maximum accuracy with `rbf` and `linear` kernel with C=100.0 and the accuracy is 0.9832. So, we can conclude that our model is doing a very good job in terms of predicting the class labels. But, this is not true. Here, we have an imbalanced dataset. Accuracy is an inadequate measure for quantifying predictive performance in the imbalanced dataset problem. So, we must explore `confusion matrix` that provide better guidance in selecting models.
# + [markdown] id="3_9LmOItVzRu"
# # **References**
#
# The work done in this project is inspired from following books and websites:-
#
# 1. Hands on Machine Learning with Scikit-Learn and Tensorflow by <NAME>
#
# 5. https://en.wikipedia.org/wiki/Support-vector_machine
#
# 6. https://www.datacamp.com/community/tutorials/svm-classification-scikit-learn-python
#
# 7. http://dataaspirant.com/2017/01/13/support-vector-machine-algorithm/
#
# 8. https://www.ritchieng.com/machine-learning-evaluate-classification-model/
#
# 9. https://en.wikipedia.org/wiki/Kernel_method
#
# 10. https://en.wikipedia.org/wiki/Polynomial_kernel
#
# 11. https://en.wikipedia.org/wiki/Radial_basis_function_kernel
#
# 12. https://data-flair.training/blogs/svm-kernel-functions/
| svm_classifier_tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Getting the mapping between genetic and genomic features is done separately from annotating the genetic features since the annotation of the genetic features has to happen before genomic features, though we need the genomic features to get the mapping between genomic and genetic. This work therefore comes after getting the genomic and genetic features. In summary, the following describes the necessary order:
# 1) genetic features
# 2) genomic features
# 3) mapping between genomic and genetic features
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
import pandas as pd
import seaborn as sns
import copy
from IPython.display import display
import os, sys, itertools, csv
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path:
sys.path.append(module_path)
from mutil.gene import get_coding_genetic_target_len_d, get_intergenic_len_d
from mutil.genome import get_feature_hit_set
pd.options.display.max_columns = 100
all_muts_df = pd.read_pickle("./data/2_2_df.pkl")
display(all_muts_df.shape, all_muts_df.head())
# +
# # # DEBUG
# all_muts_df = all_muts_df[all_muts_df.exp=="SSW_GLU_XYL"].copy()
# +
def get_genetic_links_d(mut_row):
gen_links_d = dict()
# generates a DF of all genomic features for this mutation (row) to use as input for mapping genomic features
# to genetic features according to range.
# This mapping is done by ranges only, unlike other mapping since there is no explicit mapping between
# genomic and genetic features.
geno_feat_df = pd.DataFrame(mut_row["genomic features"])
for gen_feat_d in mut_row["genetic features"]:
o = get_feature_hit_set(gen_feat_d["range"], geno_feat_df, "range", "RegulonDB ID")
gen_links_d[gen_feat_d["RegulonDB ID"]] = list(o)
return gen_links_d
all_muts_df["genetic feature links"] = all_muts_df.apply(get_genetic_links_d, axis=1)
all_muts_df.head()
# +
# I think I'm only going to have to implement the remove of a genetic feature function once.
def filter_feats(feat_json, feats_to_remove):
output = feat_json.copy()
for f in feats_to_remove:
for d in feat_json:
if d["RegulonDB ID"] == f:
output.remove(d)
return output
i = [{
'name': 'g1',
'RegulonDB ID': '1',
'range': (1, 2),
'feature type': 'gene'},
{'name': 'g2',
'RegulonDB ID': '2',
'range': (2, 3),
'feature type': 'gene'}]
e = [{'name': 'g1', 'RegulonDB ID': '1', 'range': (1, 2), 'feature type': 'gene'}]
o = filter_feats(i, {'2'})
assert(o == e)
# -
gene_df = pd.read_pickle("./data/gene_df.pkl")
gene_df.head()
# +
# fix for ASW-1628
# remove links to coding regions for non-coding features that overlap in both coding and non-coding regions
# assuming that all overlapping features are non-coding.
# want to filter out non-coding features from coding links.
# The overlap between of a cis-reg feature with an intergenic and coding region is real
# though its not the relationship we want to model.
def _filter_coding_noncoding_overlapping_links(gen_link_d):
ret_d = copy.deepcopy(gen_link_d)
gen_feats_to_remove = set()
# using these for iteration
coding_links = dict()
noncoding_links = dict()
for k, v in gen_link_d.items():
if '/' in k:
noncoding_links[k] = v
else:
coding_links[k] = v
# changing the given gen_link_d to return
for ncl_geno_feats in noncoding_links.values():
for ncl_geno_feat in ncl_geno_feats:
# There's the potential to filter for overlapping genes here as well
for cl_gen_feat, cl_geno_feats in coding_links.items():
# Check if non-coding feature linked with intergenic region (ncl_geno_feats) overlaps (linked) with coding region.
if ncl_geno_feat not in set(gene_df["GENE_ID"]): # Ensuring feat to filter is cis-reg, otherwise don't filter
if ncl_geno_feat in cl_geno_feats:
if len(cl_geno_feats) == 1:
gen_feats_to_remove.add(cl_gen_feat)
del ret_d[cl_gen_feat]
else:
# remove geno_feat link to the coding gen_feat (ncl_geno_feat == to feat in cl_geno_feats that want to remove)
if ncl_geno_feat in ret_d[cl_gen_feat]:
ret_d[cl_gen_feat].remove(ncl_geno_feat)
return ret_d, gen_feats_to_remove
i = {'ECK120001080': ['ECK125229290', 'ECK125229291', 'ECK120001080'],
'ECK125229290': ['ECK125229290', 'ECK125229291', 'ECK120001080'],
'ECK125229291': ['ECK125229290', 'ECK125229291', 'ECK120001080'],
'ECK125229291/ECK125256962': ['ECK120001080']} # This should be removed since ECK120001080 == crl gene, though will be done in a filtering step below
e = {'ECK120001080': ['ECK125229290', 'ECK125229291', 'ECK120001080'],
'ECK125229290': ['ECK125229290', 'ECK125229291', 'ECK120001080'],
'ECK125229291': ['ECK125229290', 'ECK125229291', 'ECK120001080'],
'ECK125229291/ECK125256962': ['ECK120001080']}
o, _ = _filter_coding_noncoding_overlapping_links(i)
assert(o == e)
i = {'ECK120000854': ['ECK120000854', 'ECK125144791'],
'ECK120000799/ECK120000854': ['ECK125144791']}
e = {'ECK120000854': ['ECK120000854'],
'ECK120000799/ECK120000854': ['ECK125144791']}
o, _ = _filter_coding_noncoding_overlapping_links(i)
assert(o == e)
# Remove the coding region entry if the only item linking is removed
i = {'ECK120000854': ['ECK125144791'],
'ECK120000799/ECK120000854': ['ECK125144791']}
e = {'ECK120000799/ECK120000854': ['ECK125144791']}
o, _ = _filter_coding_noncoding_overlapping_links(i)
assert(o == e)
# +
def _f(r):
link_d, gen_feats_to_remove = _filter_coding_noncoding_overlapping_links(r["genetic feature links"])
gen_feats = filter_feats(r["genetic features"], gen_feats_to_remove)
return pd.Series([gen_feats, link_d])
# replicates the necessary parts of a dataframe row
# have to use RegulonDB IDs in testing for the gene_df condition in _filter_overlapping_genes_link above to work.
i = {
"genetic features": [{'name': 'g1',
'RegulonDB ID': '1'},
{'name': 'g1/g2',
'RegulonDB ID': '1/2'}, # Intergenic region
{'name': 'f3',
'RegulonDB ID': '3'},
],
"genetic feature links": {'1': ['3'],
'1/2': ['3']}
}
e = pd.Series([
[{'name': 'g1/g2', 'RegulonDB ID': '1/2'}, {'name': 'f3', 'RegulonDB ID': '3'}],
{'1/2': ['3']}
])
o = _f(i)
assert(o.equals(e))
all_muts_df[["genetic features", "genetic feature links"]] = all_muts_df.apply(_f, axis=1)
# +
# fix for ASW-1628
# 1) Check if genomic feature is a gene
# 2) If genetic feature it links to isn't the same gene, remove the link
# 2.1) If the length of links is one, just remove the linked genetic feature altogether like I had with overlapping non-coding features.
# filters out links due to overlapping genes
# and when genes for some reason get linked with integenic regions
def _filter_gene_link_only_to_itself(link_d):
ret_d = copy.deepcopy(link_d)
gen_feats_to_remove = set()
coding_links = dict()
noncoding_links = dict()
for k, v in link_d.items():
if '/' in k:
noncoding_links[k] = v
else:
coding_links[k] = v
# filter out links due to overlapping genes
for cl_gen_feat, cl_geno_feats in coding_links.items():
for cl_geno_feat in cl_geno_feats:
# if both gen and geno feat are genes.
if (cl_gen_feat in gene_df.GENE_ID.unique()) & (cl_geno_feat in gene_df.GENE_ID.unique()):
# geno feat isn't gen feat, remove link.
if cl_gen_feat != cl_geno_feat:
if len(cl_geno_feats) == 1:
gen_feats_to_remove.add(cl_gen_feat)
del ret_d[cl_gen_feat]
else:
ret_d[cl_gen_feat].remove(cl_geno_feat)
# filters for when genes for some reason get linked with integenic regions
# the simplest thing to do is just remove any link of a gene to an intergenic region.
for ncl_gen_feat, ncl_geno_feats in noncoding_links.items():
for ncl_geno_feat in ncl_geno_feats:
if (ncl_gen_feat not in gene_df.GENE_ID.unique()) & (ncl_geno_feat in gene_df.GENE_ID.unique()):
if len(ncl_geno_feats) == 1:
gen_feats_to_remove.add(ncl_gen_feat)
del ret_d[ncl_gen_feat]
else:
ret_d[ncl_gen_feat].remove(ncl_geno_feat)
return ret_d, gen_feats_to_remove
i = {'ECK120001080': ['ECK120001080', 'ECK125229291', 'ECK125229290'],
'ECK125229290': ['ECK120001080', 'ECK125229291', 'ECK125229290'],
'ECK125229291': ['ECK120001080', 'ECK125229291', 'ECK125229290'],}
e = {'ECK120001080': ['ECK120001080'],
'ECK125229290': ['ECK125229290'],
'ECK125229291': ['ECK125229291']}
o, _ = _filter_gene_link_only_to_itself(i)
assert(o == e)
i = {'ECK120002224': ['ECK120010816', 'ECK120002224', 'ECK120000393'],
'ECK120000393': ['ECK120002224', 'ECK120000393']}
e = {'ECK120002224': ['ECK120010816', 'ECK120002224'],
'ECK120000393': ['ECK120000393']}
o, _ = _filter_gene_link_only_to_itself(i)
assert(o == e)
i = {'ECK120003617': ['ECK120023915', 'ECK120003617'],
'ECK120023915': ['ECK120003616', 'ECK120023915'],
'ECK120003616': ['ECK120003616']}
e = {'ECK120003617': ['ECK120003617'],
'ECK120023915': ['ECK120023915'],
'ECK120003616': ['ECK120003616']}
o, _ = _filter_gene_link_only_to_itself(i)
assert(o == e)
i = {'ECK120001080': ['ECK120001080'],
'ECK125229291/ECK125256962': ['ECK120001080']}
e = {'ECK120001080': ['ECK120001080']}
es = {'ECK125229291/ECK125256962'}
o, os = _filter_gene_link_only_to_itself(i)
assert(o == e)
assert(os == es)
def _f(r):
link_d, gen_feats_to_remove = _filter_gene_link_only_to_itself(r["genetic feature links"])
gen_feats = filter_feats(r["genetic features"], gen_feats_to_remove)
return pd.Series([gen_feats, link_d])
all_muts_df[["genetic features", "genetic feature links"]] = all_muts_df.apply(_f, axis=1)
# +
# fix for ASW-1628
# remove links between cis-regulatory features and overlapping genes
def _filter_cis_reg_overlapping_genes(link_d):
ret_d = copy.deepcopy(link_d)
gen_feats_to_remove = set()
# This issue likely is only happening with genes and not intergenic regions.
coding_links = dict()
for k, v in link_d.items():
if '/' not in k:
coding_links[k] = v
assigned_geno_feats = set()
for gen_feat, geno_feats in coding_links.items():
for geno_feat in geno_feats:
# ensuring that it's a cis-regulatory feature
if (geno_feat not in gene_df.GENE_ID.unique()) & ("/" not in geno_feat):
if geno_feat in assigned_geno_feats: # If the geno feat has already been assigned, remove it
if len(geno_feats) == 1:
gen_feats_to_remove.add(gen_feats)
del ret_d[gen_feats]
else:
ret_d[gen_feat].remove(geno_feat)
else:
assigned_geno_feats.add(geno_feat)
return ret_d, gen_feats_to_remove
i = {'ECK120003617': ['ECK120003617'],
'ECK120023915': ['ECK120013472', 'ECK120023915', 'ECK125095446'],
'ECK120003616': ['ECK120013472', 'ECK120003616', 'ECK125095446'],
'ECK120000314/ECK120023915': ['ECK120017150'],
'ECK120003617/ECK120003618': ['ECK120034252']}
e = {'ECK120003617': ['ECK120003617'],
'ECK120023915': ['ECK120013472', 'ECK120023915', 'ECK125095446'],
'ECK120003616': ['ECK120003616'],
'ECK120000314/ECK120023915': ['ECK120017150'],
'ECK120003617/ECK120003618': ['ECK120034252']}
o, _ = _filter_cis_reg_overlapping_genes(i)
assert(o == e)
def _f(r):
link_d, gen_feats_to_remove = _filter_cis_reg_overlapping_genes(
r["genetic feature links"])
gen_feats = filter_feats(r["genetic features"], gen_feats_to_remove)
return pd.Series([gen_feats, link_d])
all_muts_df[["genetic features", "genetic feature links"]] = all_muts_df.apply(_f, axis=1)
# -
all_muts_df.to_pickle("./data/2_2_1_df.pkl")
| 2_2_1_get_genetic_links.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import scipy.cluster.hierarchy as shc
from sklearn.preprocessing import normalize
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# ### GLOBAL VARIABLES
DATAPATH = '../../../data/'
MODELPATH = '../../../models/'
NJOBS = -1
SEED = 10
# ### LOAD DATASET
train_features = pd.read_pickle(DATAPATH + 'processed/X.pkl')
train_features.shape
train_features.fillna(0, inplace=True)
data_scaled = normalize(train_features)
# ### TRAINING
plt.figure(figsize=(14, 7))
plt.title("Dendrograms")
plt.axhline(y=5.5, color='r', linestyle='--')
dend = shc.dendrogram(shc.linkage(data_scaled, method='ward'))
| notebooks/models/cluster/hierarchical_cluster.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Flatness v.s. Generalization - part 2
import numpy as np
from matplotlib import pyplot as plt
from keras.datasets import mnist
from keras.layers import *
from keras.models import Sequential, Model
from keras.optimizers import Adam
from keras.utils import to_categorical
from keras import backend as K
# ### Data Preprocessing
# Load Data
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# Normalize
x_train = x_train / 255
x_test = x_test / 255
# Get One-Hot Labels
y_train = to_categorical(y_train, num_classes=10)
y_test = to_categorical(y_test, num_classes=10)
# ### Build Model
def build_model(print_summary=False):
'''
Build model
Args:
print_summary: bool, whether or not to print model summary, default: False
Returns:
model: keras model
'''
model = Sequential()
model.add(Flatten(input_shape=(28, 28), name="input"))
model.add(Dense(16, activation="relu", name="fc1"))
model.add(Dense(16, activation="relu", name="fc2"))
model.add(Dense(10, activation="softmax", name="output"))
if print_summary:
model.summary()
return model
# ### Train Model
# #### Batch size=8
model8 = build_model(True)
model8.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
history8 = model8.fit(x_train[:3000], y_train[:3000], batch_size=8, epochs=100, validation_data=(x_test[:1000], y_test[:1000]))
# #### Batch size=64
model64 = build_model(True)
model64.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
history64 = model64.fit(x_train[:3000], y_train[:3000], batch_size=8, epochs=100, validation_data=(x_test[:1000], y_test[:1000]))
# #### Batch size=128
model128 = build_model(True)
model128.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
history128 = model128.fit(x_train[:3000], y_train[:3000], batch_size=8, epochs=100, validation_data=(x_test[:1000], y_test[:1000]))
# #### Batch size=512
model512 = build_model(True)
model512.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
history512 = model512.fit(x_train[:3000], y_train[:3000], batch_size=8, epochs=100, validation_data=(x_test[:1000], y_test[:1000]))
# #### Batch size=1024
model1024 = build_model(True)
model1024.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
history1024 = model1024.fit(x_train[:3000], y_train[:3000], batch_size=8, epochs=100, validation_data=(x_test[:1000], y_test[:1000]))
# ### Calculate Sensitivity
def cal_sensitivity(model, x, y):
# Define tensorflow placeholder
input_tensors = [
model.inputs[0], # input
model.sample_weights[0], # sample weights
model.targets[0], # labels
K.learning_phase() # train or test mode
]
sensitivity = K.gradients(model.total_loss, model.inputs[0])
# Define K.function()
get_gradients = K.function(inputs=input_tensors, outputs=sensitivity)
inputs = [
x, # X input data
np.ones((x.shape[0],)), # sample weights
y, # y labels
0 # learning phase in TEST mode
]
# Call K.function()
g = get_gradients(inputs)
# Apply 2-norm
g_0 = np.sum(g[0]**2)
g_all = np.sqrt(g_0)
return g_all
sensitivity = []
sensitivity.append(cal_sensitivity(model8, x_train[0].reshape(1,28,28), y_train[0].reshape(1,10)))
| 0709.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Anaconda (Python 3)
# language: python
# name: anaconda3
# ---
import numpy as np
import numba as nb #uncomment for numba
import matplotlib.pyplot as plt
# %matplotlib inline
# +
def julia(c):
@np.vectorize #comment for numba
#@nb.vectorize #uncomment for numba
def j(z):
for n in range(100):
z = z**2 + c
if abs(z) > 2:
return n
return 0
return j
j = julia(0.345 + 0.45j)
#@<EMAIL> #uncomment for numba
def cplane(min=-1.5, max=1.5, points=10000):
r = np.linspace(-1.5, 1.5, 4000)
x, y = np.meshgrid(r,r)
z = x + y * 1j
return z
# %time z = cplane()
# %time jset = j(z)
#plt.figure(1, (20,15))
#plt.imshow(jset, cmap=plt.cm.bone)
#plt.xticks([])
#plt.yticks([])
#plt.title("Julia Set : c = 0.345 + 0.45j")
#plt.show()
# +
def julia(c):
#@np.vectorize #comment for numba
@nb.vectorize #uncomment for numba
def j(z):
for n in range(100):
z = z**2 + c
if abs(z) > 2:
return n
return 0
return j
j = julia(0.345 + 0.45j)
@nb.jit #uncomment for numba
def cplane(min=-1.5, max=1.5, points=10000):
r = np.linspace(-1.5, 1.5, 5000)
x, y = np.meshgrid(r,r)
z = x + y * 1j
return z
# %time z = cplane()
# %time jset = j(z)
#plt.figure(1, (20,15))
#plt.imshow(jset, cmap=plt.cm.bone)
#plt.xticks([])
#plt.yticks([])
#plt.title("Julia Set : c = 0.345 + 0.45j")
#plt.show()
# -
# We commented out the figure plots because our kernels kept dying when we tried to plot both figures. So for uniformity of testing, we just got rid of both figures.<br\>
# <br\>
# The first block of code is for testing the speed of numpy for computing the julia sets that we had been working on earlier in the semester. Calculations run through numpy and hence the regular compiler path of python took about 48 seconds.<br\>
# But then once we inserted the numba decorators and changed the compiler path to go through the LLVM for the second block, we saw a drastic speed up in time. The total calculation took about 3 seconds.<br\>
# A speed up of more than 15 times just for this fairly small computation is very impressive. Overall this is just a very useful little decorator.
#
# It failed to speed up the Runge-Kutta approximation, possibly due to incorrect use, but also possibly due to the way that Runge-Kutta needs to be implemented.
| cw-13-juliasets.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
print (type(3))
print (type('hola'))
print (type(13.5))
print (type(True))
def mul(a,b,c='None'):
if c=='None':
return a*b
return a*b*c
mul(3,5)
mul(2,3,4)
print(type(mul))
var1=mul
var1(4,8)
var1(3,6,10)
var2=mul
var2(2,5)
var1(2,4,2)
var1(3,2,6)
t=(1,"2",3,"a")
for item in t:
print(item)
l=[1,2,3,4,5]
i=0
while(i!=len(l)):
print(l[i])
i=i+1
print(min(l))
print(max(l))
l.append(16)
print(l)
print(l[0])
cadena = "<NAME>"
cadena.split('a')
| ExamenU1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/RobAltena/AdventOfCode2020/blob/main/day5.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="n9TJMYEol0-L"
import base64
import requests
import numpy as np
req = requests.get("https://raw.githubusercontent.com/RobAltena/AdventOfCode2020/main/advent_day5_input.txt").text
# + id="ILjFlPWtmPFr"
# Convert the silly code to a binary number.
bin_lines = req.replace('B', '1')\
.replace('F', '0')\
.replace('R', '1')\
.replace('L', '0').split()
# Parse the binaruy numbers to decimal.
dec_list = []
for line in bin_lines:
dec = int(line, 2)
dec_list.append(dec)
# Get the max with numpy:
seats = np.array(dec_list)
print('highest seat ID on a boarding pass: ', seats.max())
# + id="FeSPans4nXb7"
all_seats = np.arange(seats.min(), seats.max())
mask = np.isin(all_seats, seats, invert=True)
# There should be only one seat unassigned:
print('My seat: ', all_seats[mask])
| day5.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: zipline
# language: python
# name: zipline
# ---
# If you want to skip ahead to other streaming examples, you can go to any of the following links:
# - https://plot.ly/python/multiple-trace-streaming/
# - https://plot.ly/python/geo-streaming/
# - https://plot.ly/python/subplot-streaming
# <hr>
# Check which version is installed on your machine and please upgrade if needed.
import plotly
plotly.__version__
# Now let's load the dependencies/packages that we need in order to get a simple stream going.
import numpy as np
import plotly.plotly as py
import plotly.tools as tls
import plotly.graph_objs as go
# #### Getting Set Up
# Before you start streaming, you're going to need some [stream tokens](https://plot.ly/settings/api). You will need **one unique stream token for every `trace object` ** you wish to stream to. Thus if you have two traces that you want to plot and stream, you're going to require two unique stream tokens. Notice that more tokens can be added via the settings section of your Plotly profile: https://plot.ly/settings/api
# 
# Now in the same way that you set your credentials, as shown in [Getting Started](https://plot.ly/python/getting-started/), you can add stream tokens to your credentials file.
stream_ids = tls.get_credentials_file()['stream_ids']
print(stream_ids)
# You'll see that `stream_ids` will contain a list of the stream tokens we added to the credentials file.
# #### An Example to Get You Started
# Now that you have some stream tokens to play with, we're going to go over how we're going to put these into action.
# There are two main objects that will be created and used for streaming:
# - Stream Id Object
# - Stream link Object
#
# We're going to look at these objects sequentially as we work through our first streaming example. For our first example, we're going to be streaming random data to a single scatter trace, and get something that behaves like the following:
#
# 
# ##### Stream Id Object
# The `Stream Id Object` comes bundled in the `graph_objs` package. We can then call help to see the description of this object:
help(go.Stream)
# As we can see, the `Stream Id Object` is a dictionary-like object that takes two parameters, and has all the methods that are assoicated with dictionaries.
# We will need one of these objects for each of trace that we wish to stream data to.
# We'll now create a single stream token for our streaming example, which will include one scatter trace.
# +
# Get stream id from stream id list
stream_id = stream_ids[0]
# Make instance of stream id object
stream_1 = go.Stream(
token=stream_id, # link stream id to 'token' key
maxpoints=80 # keep a max of 80 pts on screen
)
# -
# The `'maxpoints'` key sets the maxiumum number of points to keep on the plotting surface at any given time.
# More over, if you want to avoid the use of these `Stream Id Objects`, you can just create a dictionary with at least the token parameter defined, for example:
stream_1 = dict(token=stream_id, maxpoints=60)
# Now that we have our `Stream Id Object` ready to go, we can set up our plot. We do this in the same way that we would any other plot, the only thing is that we now have to set the stream parameter in our trace object.
# +
# Initialize trace of streaming plot by embedding the unique stream_id
trace1 = go.Scatter(
x=[],
y=[],
mode='lines+markers',
stream=stream_1 # (!) embed stream id, 1 per trace
)
data = go.Data([trace1])
# -
# Then, add a title to the layout object and initialize your Plotly streaming plot:
# +
# Add title to layout object
layout = go.Layout(title='Time Series')
# Make a figure object
fig = go.Figure(data=data, layout=layout)
# Send fig to Plotly, initialize streaming plot, open new tab
py.iplot(fig, filename='python-streaming')
# -
# #### Stream Link Object
# The Stream Link Object is what will be used to communicate with the Plotly server in order to update the data contained in your trace objects. This object is in the `plotly.plotly` object, an can be reference with `py.Stream`
help(py.Stream) # run help() of the Stream link object
# You're going to need to set up one of these stream link objects for each trace you wish to stream data to.
# <br>Below we'll set one up for the scatter trace we have in our plot.
# +
# We will provide the stream link object the same token that's associated with the trace we wish to stream to
s = py.Stream(stream_id)
# We then open a connection
s.open()
# -
# We can now use the Stream Link object `s` in order to `stream` data to our plot.
# <br>As an example, we will send a time stream and some random numbers:
# +
# (*) Import module keep track and format current time
import datetime
import time
i = 0 # a counter
k = 5 # some shape parameter
# Delay start of stream by 5 sec (time to switch tabs)
time.sleep(5)
while True:
# Current time on x-axis, random numbers on y-axis
x = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')
y = (np.cos(k*i/50.)*np.cos(i/50.)+np.random.randn(1))[0]
# Send data to your plot
s.write(dict(x=x, y=y))
# Write numbers to stream to append current data on plot,
# write lists to overwrite existing data on plot
time.sleep(1) # plot a point every second
# Close the stream when done plotting
s.close()
# -
# Below you can see an example of the same plot, but streaming indefinitely instead of just 200 points.
# <br>Note that the time points correspond to the internal clock on the servers, which is in UTC time.
# Embed never-ending time series streaming plot
tls.embed('streaming-demos','12')
# Anyone can view your streaming graph in real-time. All viewers will see the same data simultaneously (try it! Open up this notebook up in two different browser windows and observer
# that the graphs are plotting identical data!).
# #### Summmary
# In summary, these are the steps required in order to start streaming to a trace object:
#
# 1. Make a `stream id object` (`Stream` in the `plotly.graph_objs` module) containing the `streaming token`(which is found in the **settings** of your Plotly account) and the maximum number of points to be keep on screen (which is optional).
# 2. Provide the `stream id object` as the key value for the `stream` attribute in your trace object.
# 3. Make a `stream link object` (`py.Stream`) containing the same stream token as the `stream id object` and open the stream with the `.open()` method.
# 4. Write data to the plot/your trace with the `.write()` method. When done, close the stream with the `.close()` method.
#
#
| docs/plotting/plotly/python_streaming.ipynb.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Given n points on a 2D plane, find the maximum number of points that lie on the same straight line.
#
# Example 1:
# ```
# Input: [[1,1],[2,2],[3,3]]
# Output: 3
#
# Explanation:
# ^
# |
# | o
# | o
# | o
# +------------->
# 0 1 2 3 4
# ```
# Example 2:
# ```
# Input: [[1,1],[3,2],[5,3],[4,1],[2,3],[1,4]]
# Output: 4
#
# Explanation:
# ^
# |
# | o
# | o o
# | o
# | o o
# +------------------->
# 0 1 2 3 4 5 6
# ```
# 这里我们可以用斜率来记录两条边是否在同一条直线。如果考虑再细一点,由于double有精度的问题,斜率最后用分数来表示。
# +
from decimal import *
# Definition for a point.
class Point(object):
def __init__(self, a=0, b=0):
self.x = a
self.y = b
class Solution(object):
def maxPoints(self, points):
"""
:type points: List[Point]
:rtype: int
"""
size = len(points)
if size < 3:
return size
ans = 0
for i in range(size):
d = {'inf': 0}
same_point = 1
for j in range(size):
print("i:" + str(points[i].x) + " " + str(points[i].y))
print("j:" + str(points[j].x) + " " + str(points[j].y))
if i == j:
continue
elif points[i].x == points[j].x and points[i].y != points[j].y:
d['inf'] += 1
elif points[i].x != points[j].x:
k = Decimal(points[i].y - points[j].y) / (points[i].x - points[j].x)
print(k)
if k in d:
d[k] += 1
else:
d[k] = 1
else:
same_point += 1
ans = max(ans, max(d.values()) + same_point)
return ans
# -
# [[0,0],[94911151,94911150],[94911152,94911151]]
if __name__ == '__main__':
points = [Point(0,0), Point(94911151,94911150), Point(94911152,94911151)]
ret = Solution().maxPoints(points)
print(ret)
# Submission Result:
# ## <span style="color:blue"> <em> Wrong Answer </em> </span>
# ```
# Input:
# [[0,0],[94911151,94911150],[94911152,94911151]]
# Output:
# 3
# Expected:
# 2
# ```
| 149.MaxPointsonaLine.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python3.7 TF-2.3.0
# language: python
# name: python3.7-tf2.3.0
# ---
# +
# %load_ext autoreload
# %autoreload 2
import sys, os, pickle, h5py
import numpy as np
from scipy.stats import norm
from scipy import stats, optimize, ndimage, signal
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from astropy import units as u
from astropy.coordinates import SkyCoord
from IPython.display import clear_output
import copy
##############
#For plotting
import matplotlib.cm as cm
from matplotlib import rc
from matplotlib import rcParams
rc('font', **{'family': 'serif', 'serif': ['Computer Modern']})
rcParams['text.usetex'] = False#True
#rcParams['text.latex.unicode'] = True
#rcParams['text.fontsize'] = 18
print("starting the python script!")
########################
kev_to_erg = 1.60218e-9
os.sys.path.append("../python")
#import plotting
import copy
from scipy.interpolate import interp1d
from scipy.optimize import minimize
sys.path.append("/clusterfs/heptheory/brsafdi/brsafdi/transfer/bsafdi/github/gmf") #"/global/cfs/cdirs/m3166/bsafdi/gmf")
import gmf
import pygedm
import healpy as hp
#data_dir = "/nfs/turbo/bsafdi/bsafdi/github/SSCaxion/data/"
# -
# # Methods
#
# ## Methods related to the magnetic field profile
# +
# Different B-field models
GMF0=gmf.GMF()
GMFP = gmf.GMF_Pshirkov(mode="ASS") #BSS")
def arctanM(x,y):
tmp = np.arctan2(x,y)
if tmp<0:
res= 2*np.pi+tmp
else:
res = tmp
return res
def B_GMF(x_vec):
'''
x_vec in pc, origin at GC and x_vec = [-8.5,0.0,0.0] Sun (check!!)
'''
x,y,z = x_vec*1e-3
rho = np.sqrt(x**2+y**2)
phi = arctanM(y,x)
rho_hat = np.array([x,y,0.0])/rho
phi_hat = np.array([-y,x,0.0])/rho
z_hat = np.array([0.0,0.0,1.0])
disk,_ = GMF0.Bdisk(np.array([rho]),np.array([phi]),np.array([z]))
halo,_ = GMF0.Bhalo(np.array([rho]),np.array([z]))
Bx,_ = GMF0.BX(np.array([rho]),np.array([z]))
Btot = rho_hat*(disk[0]+halo[0]+Bx[0])+phi_hat*(disk[1]+halo[1]+Bx[1])+z_hat*(disk[2]+halo[2]+Bx[2])
return Btot
def B_Psh(x_vec):
'''
x_vec in pc, origin at GC and x_vec = [-8.5,0.0,0.0] Sun (check!!)
'''
x,y,z = x_vec*1e-3
rho = np.sqrt(x**2+y**2)
phi = arctanM(y,x)
rho_hat = np.array([x,y,0.0])/rho
phi_hat = np.array([-y,x,0.0])/rho
z_hat = np.array([0.0,0.0,1.0])
disk,_ = GMFP.Bdisk(np.array([rho]),np.array([phi]),np.array([z]))
halo,_ = GMFP.Bhalo(np.array([rho]),np.array([z]))
Btot = rho_hat*(disk[0]+halo[0])+phi_hat*(disk[1]+halo[1])+z_hat*(disk[2]+halo[2])
return Btot
# -
# ## Methods related to the magnetic field geometry for axion conversion (given LOS)
class conversion:
def __init__(self,xf,Bfunc,xi=np.array([-8.5e3,0.0,0.00])):
#see https://arxiv.org/pdf/1704.05063.pdf for height
# consider later using Zi = 0.025, for example
self._xf = xf
self._xi = xi
self._compute_geometry()
self._Bfunc = Bfunc
def _compute_geometry(self):
rvec = self._xf - self._xi
self._d = np.linalg.norm(rvec)
self._rhat = rvec/self._d
if self._rhat[2]>0:
N = self._rhat
else:
N = - self._rhat
r1 = N[0]
r2 = N[1]
self._k1 = 1/np.sqrt(1-r2**2)*np.array([-np.sqrt(1-r1**2-r2**2),0,r1])
self._k2 = np.sqrt(1-r2**2)*np.array([-r1*r2/(1-r2**2),1,-r2*np.sqrt(1-r1**2-r2**2)/(1-r2**2)])
def _return_x(self,r):
'''
r in pc and is distance from the Sun
'''
rkpc = r #*1e-3
x = self._xi + rkpc*self._rhat
return x
def _return_Bs(self,r):
'''
r in pc and is distance from the Sun
'''
rkpc = r #*1e-3
x = self._xi + rkpc*self._rhat
Bf = self._Bfunc(x)
return np.dot(Bf,self._k1), np.dot(Bf,self._k2)
# ## The method that computes the conversion probability
# +
import numpy as np
from scipy.integrate import cumtrapz
def return_prob(z_array,B_x,B_y,m_a=1e-15,E=10.0,g_agg = 1e-12,**kwargs):
'''
return_prob: Axion-photon conversion probability calculation as given in https://arxiv.org/pdf/1903.05088.pdf Eq. (5).
Input:
----
z_array: units [pc]. array of z-values going from the source to the Earth.
B_x, B_y: units [\mu G]. arrays of the othogonal components of the magnetic field at the z values corresponding to z_array
m_a: units [eV]. mass of the axion.
g_agg: units [GeV^{-1}]. axion-photon coupling constant
E: energy of axion in keV
n_e: units [cm^{-3}]. free electron density at z values in z_array. if empty then set to zero
Returns
----
prob: unitless, axion-photon conversion probability
'''
## First we check to see if an array of `n_e` values was given
if 'n_e' in kwargs.keys():
n_e = kwargs['n_e']
else:
n_e = np.zeros(len(z_array)) #else set to zeros
## Here we precompute some useful things
omega_pl = 1.19e-12*np.sqrt((n_e/1e-3)) #$\omega_{pl}$ [units of eV]
Delta_parr = -0.0781*(omega_pl/1e-10)**2/(E/10.0) # $\Delta_{||}$ [units of pc^{-1}]
axion_mass_factor = -0.0781*(m_a/1e-10)**2 /(E/10.0) #$\Delta_a$ [units pc^{-1}]
norm_factor =(g_agg/1e-12)**2*8.8e12*(1e-12)**2/4. #units conversion factor that accounts for g_agg [dimensionless]
Delta_parr_int = cumtrapz(Delta_parr,z_array) #The integral of $\Delta_{||}$ appearing in exponential of Eq. 5 [dimensionless]
z_centers = (z_array[1:]+z_array[:-1])/2. #We have to integrate twice, so going to downbin the z_array once
B_x_centers = (B_x[1:]+B_x[:-1])/2. #down-binned B_x's
B_y_centers = (B_y[1:]+B_y[:-1])/2. #down-binned B_y's
prob_x_integrand = B_x_centers*np.exp(1j*axion_mass_factor*z_centers -1j*Delta_parr_int ) #The B_x part of the integral
prob_y_integrand = B_y_centers*np.exp(1j*axion_mass_factor*z_centers -1j*Delta_parr_int ) #The B_y part of integral
prob_x_complex = np.trapz(prob_x_integrand,z_centers) #Do the integral
prob_y_complex = np.trapz(prob_y_integrand,z_centers) #Do the integral
# Below, we apply the normalization and compute the conversion probability. The np.real is just there to convert back to real number type
prob = np.real(norm_factor*(prob_x_complex*np.conj(prob_x_complex)+prob_y_complex*np.conj(prob_y_complex)))
return prob
# -
# ## The main class
#
# This compute the conversion probability given and $\ell$ and a $b$. `n_prec` is a precision factor for the numerical integration, and `dist` controls the distance out to which you integrate. `prob_masses` is an array in eV of the axion masses to consider, and `prob_energies` are the energies in keV to compute the conversion probabilities over.
class make_conv_map:
def __init__(self,prob_masses,prob_energies,ell,b,n_prec = 500,x_sun = -8.5,dist=50):
'''
ell, b in degrees
disk: kpc, distance out to integrate
x_sun: x-coordinate of the Sun in kpc
'''
self._prob_masses = prob_masses
self._prob_energies = prob_energies
self._ell = ell*np.pi/180. #radians
self._b = b*np.pi/180. #radians
self._n_prec = n_prec
self._x_sun = x_sun
self._d = dist
self._make_vecs()
self._do_conv_prob()
def _make_vecs(self):
'''
compute the coordinates of the target in Galactic coordinates centered at the GC
'''
x = 1e3*(self._x_sun+self._d*np.cos(self._b)*np.cos(self._ell))
y = 1e3*(self._d*np.cos(self._b)*np.sin(self._ell))
z = 1e3*(self._d*np.sin(self._b))
xvec= np.array([x,y,z]) #in pc
#print(xvec)
# now do B-field
cv = conversion(xvec,B_GMF,xi=np.array([self._x_sun*1e3,0.0,0.00]))
z_vec = np.linspace(0,cv._d,self._n_prec)
B1_array = np.zeros(len(z_vec))
B2_array = np.zeros(len(z_vec))
for i in range(len(z_vec)):
z = z_vec[i]
B1,B2 = cv._return_Bs(z)
B1_array[i] = B1
B2_array[i] = B2
self._z_vec = z_vec
self._B1_array = B1_array
self._B2_array = B2_array
# now do ne-profile
Rsun_16 = 8.3e3
ne_arr = np.zeros(len(z_vec))
#ne_arr_2001_GC = np.zeros(len(z_vec_GC))
#ne_arr_2020_GC = np.zeros(len(z_vec_GC))
z_vec_16 = np.linspace(0,cv._d/(np.abs(self._x_sun)*1e3)*Rsun_16,self._n_prec)
for i in range(len(ne_arr)):
x = cv._return_x(z_vec_16[i])+np.array([-Rsun_16+np.abs(self._x_sun)*1e3,0,0])
x_coppy = np.zeros(np.shape(x))
x_coppy[0] = +x[1]
x_coppy[1] = -x[0]
x_coppy[2] = x[2]
#print x
ne_arr[i] = pygedm.calculate_electron_density_xyz(x_coppy[0],x_coppy[1],x_coppy[2]).value
self._ne_arr = ne_arr
def _do_conv_prob(self):
self.conv_probs = np.zeros((len(self._prob_masses),len(self._prob_energies)))
for i in range(len(self._prob_masses)):
for j in range(len(self._prob_energies)):
self.conv_probs[i,j] = return_prob(self._z_vec,self._B1_array,self._B2_array,E=self._prob_energies[j],m_a=self._prob_masses[i], n_e=self._ne_arr)
# ### Run an example
# +
prob_masses = np.geomspace(1e-13, 1e-9, 2) # axion masses in eV
prob_energies = np.linspace(1.0, 10.0, 2) # axion energies in keV
nside=8
npix = hp.nside2npix(nside)
res_array = np.zeros((len(prob_masses),len(prob_energies),npix))
for i in range(npix):
print("i = ",i," of ", npix)
print("frac done is ", i / float(npix))
theta,phi = hp.pix2ang(nside,i) #theta,phi in rad
ell = phi*180./np.pi
b = (np.pi/2.-theta)*180./np.pi
mcp = make_conv_map(prob_masses,prob_energies,ell,b)
res_array[::,::,i] =mcp.conv_probs #make_conv_map(prob_masses,prob_energies,ell,b)
# -
hp.mollview(res_array[0,0],title="E = "+ str(prob_energies[0]) + " keV")
hp.mollview(res_array[0,-1],title="E = "+ str(prob_energies[-1]) + " keV")
| ipython/GAC_clean.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: aws
# language: python
# name: aws
# ---
import boto3
from boto3 import client
import json
s3 = boto3.resource('s3')
# +
bucket_name = 'canopy-production-ml'
pc_bucket = s3.Bucket(bucket_name)
# -
print(pc_bucket)
chips = []
for obj in pc_bucket.objects.all():
if 'yes' in obj.key:
chips.append(obj)
for chip in chips[0:4]:
id = chip.key.split("/")[4]
# +
j_file = "train_test_polygons.json"
with open(j_file, 'r') as j:
train_test = json.loads(j.read())
# -
len(train_test["train"])
len(train_test["test"])
chips[0]
type(chips[0].key)
chips[0].key
chips[0].key.split('/')[-1]
dir(pc_bucket)
help(pc_bucket.copy)
# +
def train_test_s3_copy(chips, j_file, bucket_name='canopy-production-ml',
base_path='chips/cloudfree-merge-polygons/split/'):
bucket = s3.Bucket(bucket_name)
with open(j_file, 'r') as j:
train_test_file = json.loads(j.read())
# for split in list(train_test.keys()):
# for polygon in train_test[split]:
length = len(chips)
for i, chip in enumerate(chips, 1):
print(f'Processing chip {i} of {length}', end='\r', flush=True)
CopySource = {
'Bucket': bucket_name,
'Key': chip.key
}
polygon_id = int(chip.key.split("/")[5])
filename = chip.key.split('/')[-1]
if polygon_id in train_test_file["test"]:
train_test = 'test'
#s3.Object(bucket, base_path + 'test/').copy_from(CopySource=bucket + chip.key)
else:
train_test = 'train_val'
#s3.Object(bucket, base_path + 'train_val/').copy_from(CopySource=bucket + chip.key)
new_key = f'{base_path}{train_test}/{polygon_id}/{filename}'
bucket.copy(CopySource, new_key)
# -
j_file = "train_test_polygons.json"
train_test_s3_copy(chips, j_file)
print('s3://canopy-production-ml/' + 'chips/cloudfree-merge-polygons/split/')
| data-prep/train_test_s3_David.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="zg02FZzDyEqd"
# ##### Copyright 2019 The TensorFlow Authors.
#
# + cellView="form" id="2mapZ9afGJ69"
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] id="sMYQvJuBi7MS"
# # Keras 前処理レイヤーを使って構造化データを分類する
# + [markdown] id="8FaL4wnr22oy"
# <table class="tfo-notebook-buttons" align="left">
# <td> <a target="_blank" href="https://www.tensorflow.org/tutorials/structured_data/preprocessing_layers"> <img src="https://www.tensorflow.org/images/tf_logo_32px.png"> TensorFlow.org で表示</a> </td>
# <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ja/tutorials/structured_data/preprocessing_layers.ipynb"> <img src="https://www.tensorflow.org/images/colab_logo_32px.png"> Google Colab で実行</a> </td>
# <td> <a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ja/tutorials/structured_data/preprocessing_layers.ipynb"> <img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png"> GitHub でソースを表示</a> </td>
# <td> <a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ja/tutorials/structured_data/preprocessing_layers.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png">ノートブックをダウンロード</a> </td>
# </table>
# + [markdown] id="Nna1tOKxyEqe"
# このチュートリアルでは、構造化データ(CSV のタブ区切りデータ)を分類する方法を実演します。モデルの定義には [Keras](https://www.tensorflow.org/guide/keras) を使用し、CSV の列からモデルのトレーニングに使用する特徴量にマッピングするための懸け橋として[前処理レイヤー](https://www.tensorflow.org/guide/keras/preprocessing_layers)を使用します。このチュートリアルに含まれるコードは、次のことを行います。
#
# - [Pandas](https://pandas.pydata.org/) を使って CSV ファイルを読み込みます。
# - [tf.data](https://www.tensorflow.org/guide/datasets) を使用して、行をバッチ化してシャッフルする入力パイプラインを構築します。
# - Keras 前処理レイヤーを使ってモデルをトレーニングするために使用する特徴量に、CSV のカラムをマッピングします。
# - Keras を使用して、モデルを構築、トレーニング、および評価します。
# + [markdown] id="h5xkXCicjFQD"
# 注意: このチュートリアルは、「[特徴量カラムを使って構造化データを分類する](https://www.tensorflow.org/tutorials/structured_data/feature_columns)」に類似しています。このバージョンでは、新しい実験的 Keras [前処理レイヤー](https://www.tensorflow.org/api_docs/python/tf/keras/layers/experimental/preprocessing)を使用しており、`tf.feature_column` を使っていません。Keras 前処理レイヤーはより直感的であり、デプロイを単純化できるようにモデル内に簡単に含めることができます。
# + [markdown] id="ZHxU1FMNpomc"
# ## Dataset
#
# PetFinder [データセット](https://www.kaggle.com/c/petfinder-adoption-prediction)の簡易バージョンを使用します。CSV には数千行のデータが含まれており各行にペットに関する記述、各列にその属性が含まれています。この情報を使用して、ペットが引き取り可能であるかどうかを予測します。
#
# 以下は、このデータセットの説明です。数値とカテゴリカルのカラムがあることに注意してください。自由テキストのカラムもありアンスが、このチュートリアルでは使用しません。
#
# カラム | 説明 | 特徴量タイプ | データ型
# --- | --- | --- | ---
# Type | 動物の種類(犬、猫) | カテゴリカル | 文字列
# Age | ペットの年齢 | 数値 | 整数
# Breed1 | ペットの主な品種 | カテゴリカル | 文字列
# Color1 | ペットの毛色 1 | カテゴリカル | 文字列
# Color2 | ペットの毛色 2 | カテゴリカル | 文字列
# MaturitySize | 成獣時のサイズ | カテゴリカル | 文字列
# FurLength | 毛の長さ | カテゴリカル | 文字列
# Vaccinated | 予防接種済み | カテゴリカル | 文字列
# Sterilized | 不妊手術済み | カテゴリカル | 文字列
# Health | 健康状態 | カテゴリカル | 文字列
# Fee | 引き取り料 | 数値 | 整数
# Description | ペットのプロフィール | テキスト | 文字列
# PhotoAmt | アップロードされたペットの写真数 | 数値 | 整数
# AdoptionSpeed | 引き取りまでの期間 | 分類 | 整数
# + [markdown] id="vjFbdBldyEqf"
# ## TensorFlow とその他のライブラリをインポートする
#
# + id="S_BdyQlPjfDW"
# !pip install -q sklearn
# + id="LklnLlt6yEqf"
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.model_selection import train_test_split
from tensorflow.keras import layers
from tensorflow.keras.layers.experimental import preprocessing
# + id="TKU7RyoQGVKB"
tf.__version__
# + [markdown] id="UXvBvobayEqi"
# ## Pandas を使用してデータフレームを作成する
#
# [Pandas](https://pandas.pydata.org/) は、構造化データの読み込みと処理を支援するユーティリティが多数含まれる Python ライブラリです。Pandas を使用し、URL からデータセットをダウンロードしてデータフレームに読み込みます。
# + id="qJ4Ajn-YyEqj"
import pathlib
dataset_url = 'http://storage.googleapis.com/download.tensorflow.org/data/petfinder-mini.zip'
csv_file = 'datasets/petfinder-mini/petfinder-mini.csv'
tf.keras.utils.get_file('petfinder_mini.zip', dataset_url,
extract=True, cache_dir='.')
dataframe = pd.read_csv(csv_file)
# + id="3uiq4hoIGyXI"
dataframe.head()
# + [markdown] id="C3zDbrozyEqq"
# ## ターゲット変数を作成する
#
# Kaggle コンペティションでは、ペットが引き取られるまでの期間(1 週目、1 か月目、3 か月目など)を予測することがタスクとなっていますが、このチュートリアルでは、このタスクを単純化しましょう。ここでは、このタスクを二項分類問題にし、単にペットが引き取られるかどうかのみを予測します。
#
# ラベルカラムを変更すると、0 は引き取られなかった、1 は引き取られたことを示すようになります。
# + id="wmMDc46-yEqq"
# In the original dataset "4" indicates the pet was not adopted.
dataframe['target'] = np.where(dataframe['AdoptionSpeed']==4, 0, 1)
# Drop un-used columns.
dataframe = dataframe.drop(columns=['AdoptionSpeed', 'Description'])
# + [markdown] id="sp0NCbswyEqs"
# ## データフレームを train、validation、および test に分割する
#
# ダウンロードしたデータセットは単純な CSV ファイルです。これを train、validation、および test セットに分割します。
# + id="qT6HdyEwyEqt"
train, test = train_test_split(dataframe, test_size=0.2)
train, val = train_test_split(train, test_size=0.2)
print(len(train), 'train examples')
print(len(val), 'validation examples')
print(len(test), 'test examples')
# + [markdown] id="C_7uVu-xyEqv"
# ## tf.data を使用して入力パイプラインを作成する
#
# 次に、データをシャッフルしてバッチ化するために、データフレームを [tf.data](https://www.tensorflow.org/guide/datasets) でラップします。非常に大型(メモリに収まらないほどの規模)の CSV ファイルを処理している場合は、tf.data を使用してディスクから直接読み取ります。この方法は、このチュートリアルでは説明していません。
# + id="7r4j-1lRyEqw"
# A utility method to create a tf.data dataset from a Pandas Dataframe
def df_to_dataset(dataframe, shuffle=True, batch_size=32):
dataframe = dataframe.copy()
labels = dataframe.pop('target')
ds = tf.data.Dataset.from_tensor_slices((dict(dataframe), labels))
if shuffle:
ds = ds.shuffle(buffer_size=len(dataframe))
ds = ds.batch(batch_size)
ds = ds.prefetch(batch_size)
return ds
# + [markdown] id="PYxIXH579uS9"
# 入力パイプラインを作成したので、それを呼び出して、戻されるデータのフォーマットを確認しましょう。出力の可読性を維持するために、小さなバッチを使用しました。
# + id="tYiNH-QI96Jo"
batch_size = 5
train_ds = df_to_dataset(train, batch_size=batch_size)
# + id="nFYir6S8HgIJ"
[(train_features, label_batch)] = train_ds.take(1)
print('Every feature:', list(train_features.keys()))
print('A batch of ages:', train_features['Age'])
print('A batch of targets:', label_batch )
# + [markdown] id="geqHWW54Hmte"
# ご覧のとおり、データセットは、データフレームの行からカラムの値にマップしているカラム名の(データフレームのカラム名)のディクショナリを返しています。
# + [markdown] id="-v50jBIuj4gb"
# ## 前処理レイヤーの使用を実演する
#
# Keras Preprocessing Layers API を使うと、Keras ネイティブの入力処理パイプラインを構築することができます。特徴量の前処理コードを実演するために、3 つの前処理レイヤーを使用します。
#
# - [`Normalization`](https://www.tensorflow.org/api_docs/python/tf/keras/layers/experimental/preprocessing/Normalization) - データの特徴量方向の正規化。
# - [`CategoryEncoding`](https://www.tensorflow.org/api_docs/python/tf/keras/layers/experimental/preprocessing/CategoryEncoding) - カテゴリのエンコーディングレイヤー。
# - [`StringLookup`](https://www.tensorflow.org/api_docs/python/tf/keras/layers/experimental/preprocessing/StringLookup) - ボキャブラリから整数インデックスに文字列をマッピングします。
# - [`IntegerLookup`](https://www.tensorflow.org/api_docs/python/tf/keras/layers/experimental/preprocessing/IntegerLookup) - ボキャブラリから整数インデックスに整数をマッピングします。
#
# 使用できる前処理レイヤーのリストは、[こちら](https://www.tensorflow.org/api_docs/python/tf/keras/layers/experimental/preprocessing)をご覧ください。
# + [markdown] id="twXBSxnT66o8"
# ### 数値カラム
#
# 数値特徴量ごとに、各特徴量の平均が 0、標準偏差が 1 となるように Normalization() レイヤーを使用します。
# + [markdown] id="OosUh4kTsK_q"
# `get_normalization_layer` 関数は、特徴量方向の正規化を数値特徴量に適用するレイヤーを返します。
# + id="D6OuEKMMyEq1"
def get_normalization_layer(name, dataset):
# Create a Normalization layer for our feature.
normalizer = preprocessing.Normalization(axis=None)
# Prepare a Dataset that only yields our feature.
feature_ds = dataset.map(lambda x, y: x[name])
# Learn the statistics of the data.
normalizer.adapt(feature_ds)
return normalizer
# + id="MpKgUDyk69bM"
photo_count_col = train_features['PhotoAmt']
layer = get_normalization_layer('PhotoAmt', train_ds)
layer(photo_count_col)
# + [markdown] id="foWY00YBUx9N"
# 注意: 多数の特徴量(数百個以上)がある場合は、先にそれらを連結してから単一の [normalization](https://www.tensorflow.org/api_docs/python/tf/keras/layers/experimental/preprocessing/Normalization) レイヤーを使用するとより効率的です。
# + [markdown] id="yVD--2WZ7vmh"
# ### カテゴリカルカラム
#
# このデータセットでは、Type は文字列として表現されています('Dog' または 'Cat')。文字列を直接モデルに注入することはできないため、前処理レイヤーを使って文字列をワンホットベクトルとして表現します。
# + [markdown] id="LWlkOPwMsxdv"
# `get_category_encoding_layer` 関数は、ボキャブラリの値を整数インデックスにマッピングして特徴量をワンホットエンコーディングするレイヤーを返します。
# + id="GmgaeRjlDoUO"
def get_category_encoding_layer(name, dataset, dtype, max_tokens=None):
# Create a StringLookup layer which will turn strings into integer indices
if dtype == 'string':
index = preprocessing.StringLookup(max_tokens=max_tokens)
else:
index = preprocessing.IntegerLookup(max_tokens=max_tokens)
# Prepare a Dataset that only yields our feature
feature_ds = dataset.map(lambda x, y: x[name])
# Learn the set of possible values and assign them a fixed integer index.
index.adapt(feature_ds)
# Create a Discretization for our integer indices.
encoder = preprocessing.CategoryEncoding(num_tokens=index.vocabulary_size())
# Apply one-hot encoding to our indices. The lambda function captures the
# layer so we can use them, or include them in the functional model later.
return lambda feature: encoder(index(feature))
# + id="X2t2ff9K8PcT"
type_col = train_features['Type']
layer = get_category_encoding_layer('Type', train_ds, 'string')
layer(type_col)
# + [markdown] id="j6eDongw8knz"
# 数値を直接モデルに注入せずに、それらの入力のワンホットエンコーディングを使用することがよくあります。ペットの年齢を表す未加工のデータを考察しましょう。
# + id="7FjBioQ38oNE"
type_col = train_features['Age']
category_encoding_layer = get_category_encoding_layer('Age', train_ds,
'int64', 5)
category_encoding_layer(type_col)
# + [markdown] id="SiE0glOPkMyh"
# ## 使用するカラムを選択する
#
# さまざまな種類の前処理レイヤーが使用される様子を見てきましたが、今後は、それを使ってモデルをトレーニングする方法を見てみましょう。[Keras-functional API](https://www.tensorflow.org/guide/keras/functional) を使用して、モデルを構築します。Keras functional API は、[tf.keras.Sequential](https://www.tensorflow.org/api_docs/python/tf/keras/Sequential) API より柔軟なモデルを作成するのに適しています。
#
# このチュートリアルでは、前処理レイヤーを使用するために必要な全コード(mechanic)などを示すことを目的としています。モデルをトレーニングするためにいくつかのカラムが任意に選択されています。
#
# 重要ポイント: 正確なモデルの構築を目的としている場合は、より大きなデータセットを独自に用意し、どの特徴量を含めるのが最も意義が高く、どのように表現すrべきかについてよく考えましょう。
# + [markdown] id="Uj1GoHSZ9R3H"
# 最初の方で、入力パイプラインを実演するために小さなバッチを使用しました。今度はより大きなバッチサイズで新しい入力パイプラインを作成してみましょう。
#
# + id="Rcv2kQTTo23h"
batch_size = 256
train_ds = df_to_dataset(train, batch_size=batch_size)
val_ds = df_to_dataset(val, shuffle=False, batch_size=batch_size)
test_ds = df_to_dataset(test, shuffle=False, batch_size=batch_size)
# + id="Q3RBa51VkaAn"
all_inputs = []
encoded_features = []
# Numeric features.
for header in ['PhotoAmt', 'Fee']:
numeric_col = tf.keras.Input(shape=(1,), name=header)
normalization_layer = get_normalization_layer(header, train_ds)
encoded_numeric_col = normalization_layer(numeric_col)
all_inputs.append(numeric_col)
encoded_features.append(encoded_numeric_col)
# + id="1FOMGfZflhoA"
# Categorical features encoded as integers.
age_col = tf.keras.Input(shape=(1,), name='Age', dtype='int64')
encoding_layer = get_category_encoding_layer('Age', train_ds, dtype='int64',
max_tokens=5)
encoded_age_col = encoding_layer(age_col)
all_inputs.append(age_col)
encoded_features.append(encoded_age_col)
# + id="K8C8xyiXm-Ie"
# Categorical features encoded as string.
categorical_cols = ['Type', 'Color1', 'Color2', 'Gender', 'MaturitySize',
'FurLength', 'Vaccinated', 'Sterilized', 'Health', 'Breed1']
for header in categorical_cols:
categorical_col = tf.keras.Input(shape=(1,), name=header, dtype='string')
encoding_layer = get_category_encoding_layer(header, train_ds, dtype='string',
max_tokens=5)
encoded_categorical_col = encoding_layer(categorical_col)
all_inputs.append(categorical_col)
encoded_features.append(encoded_categorical_col)
# + [markdown] id="YHSnhz2fyEq3"
# ## モデルを作成、コンパイル、およびトレーニングする
#
# + [markdown] id="IDGyN_wpo0XS"
# エンドツーエンドのモデルを作成できるようになりました。
# + id="6Yrj-_pr6jyL"
all_features = tf.keras.layers.concatenate(encoded_features)
x = tf.keras.layers.Dense(32, activation="relu")(all_features)
x = tf.keras.layers.Dropout(0.5)(x)
output = tf.keras.layers.Dense(1)(x)
model = tf.keras.Model(all_inputs, output)
model.compile(optimizer='adam',
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
metrics=["accuracy"])
# + [markdown] id="f6mNMfG6yEq5"
# 接続性グラフを視覚化しましょう。
#
# + id="Y7Bkx4c7yEq5"
# rankdir='LR' is used to make the graph horizontal.
tf.keras.utils.plot_model(model, show_shapes=True, rankdir="LR")
# + [markdown] id="CED6OStLyEq7"
# ### モデルをトレーニングする
#
# + id="OQfE3PC6yEq8"
model.fit(train_ds, epochs=10, validation_data=val_ds)
# + id="T8N2uAdU2Cni"
loss, accuracy = model.evaluate(test_ds)
print("Accuracy", accuracy)
# + [markdown] id="LmZMnTKaCZda"
# ## 新しいデータの推論
#
# 重要ポイント: 開発したモデルには前処理コードが含まれているため、直接 CSV ファイルから行を分類できるようになりました。
#
# + [markdown] id="4xkOlK8Zweeh"
# Keras モデルを保存して、再読み込みすることができます。TensorFlow モデルの詳細については、[こちら](https://www.tensorflow.org/tutorials/keras/save_and_load)のチュートリアルをご覧ください。
# + id="QH9Zy1sBvwOH"
model.save('my_pet_classifier')
reloaded_model = tf.keras.models.load_model('my_pet_classifier')
# + [markdown] id="D973plJrdwQ9"
# `model.predict()` を呼び出すだけで、新しいサンプルの予測を得ることができます。以下の 2 つを行ってください。
#
# 1. バッチに次元をを持たせるために、スカラーをリストにラップします(モデルは、単一のサンプルではなく、データのバッチのみを処理します)。
# 2. 各特徴量で `convert_to_tensor` を呼び出します。
# + id="rKq4pxtdDa7i"
sample = {
'Type': 'Cat',
'Age': 3,
'Breed1': 'Tabby',
'Gender': 'Male',
'Color1': 'Black',
'Color2': 'White',
'MaturitySize': 'Small',
'FurLength': 'Short',
'Vaccinated': 'No',
'Sterilized': 'No',
'Health': 'Healthy',
'Fee': 100,
'PhotoAmt': 2,
}
input_dict = {name: tf.convert_to_tensor([value]) for name, value in sample.items()}
predictions = reloaded_model.predict(input_dict)
prob = tf.nn.sigmoid(predictions[0])
print(
"This particular pet had a %.1f percent probability "
"of getting adopted." % (100 * prob)
)
# + [markdown] id="XJQQZEiH2FaB"
# 重要ポイント: 通常、データベースの規模が大きく複雑であるほど、ディープラーニングの結果がよくなります。このチュートリアルのデータセットのように、小さなデータセットを使用する場合は、決定木またはランダムフォレストを強力なベースラインとして使用することをお勧めします。このチュートリアルでは、構造化データとの連携の仕組みを実演することが目的であるため、コードは将来的に独自のデータセットを使用する際の出発点として使用することができます。
# + [markdown] id="k0QAY2Tb2HYG"
# ## 次のステップ
#
# 構造化データの分類をさらに学習するには、自分で試すのが最善です。別のデータセットを使用し、上記に似たコードを使用し、モデルのトレーニングと分類をおこなうと良いでしょう。精度を改善するには、モデルに含める特徴量とその表現方法を吟味ししてください。
| site/ja/tutorials/structured_data/preprocessing_layers.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:metal]
# language: python
# name: conda-env-metal-py
# ---
# # S21 simulation of a resonator
#
# Authors: <NAME>, <NAME> Andersen
#
# +
# Import useful packages
import qiskit_metal as metal
from qiskit_metal import designs, draw
from qiskit_metal import MetalGUI, Dict, open_docs
from qiskit_metal.toolbox_metal import math_and_overrides
from qiskit_metal.qlibrary.core import QComponent
from collections import OrderedDict
# To create plots after geting solution data.
import matplotlib.pyplot as plt
import numpy as np
# Packages for the simple design
from qiskit_metal.qlibrary.tlines.meandered import RouteMeander
from qiskit_metal.qlibrary.tlines.pathfinder import RoutePathfinder
from qiskit_metal.qlibrary.terminations.launchpad_wb_driven import LaunchpadWirebondDriven
from qiskit_metal.qlibrary.terminations.open_to_ground import OpenToGround
from qiskit_metal.qlibrary.terminations.short_to_ground import ShortToGround
# Analysis
# from qiskit_metal.renderers.renderer_gds.gds_renderer import QGDSRenderer
# from qiskit_metal.analyses.quantization import EPRanalysis
from qiskit_metal.analyses.quantization import EPRanalysis
from qiskit_metal.analyses.simulation import ScatteringImpedanceSim
from qiskit_metal.analyses.sweep_and_optimize.sweeping import Sweeping
import pyEPR as epr
# -
# # Set up the design
# +
# Set up chip dimensions
design = designs.DesignPlanar()
design._chips['main']['size']['size_x'] = '9mm'
design._chips['main']['size']['size_y'] = '9mm'
# Resonator and feedline gap width (W) and center conductor width (S) from reference 2
design.variables['cpw_width'] = '15 um' #S from reference 2
design.variables['cpw_gap'] = '9 um' #W from reference 2
design.overwrite_enabled = True
hfss = design.renderers.hfss
# Open GUI
gui = MetalGUI(design)
# -
# Define for renderer
eig_qres = EPRanalysis(design, "hfss")
hfss = design.renderers.hfss
hfss = eig_qres.sim.renderer
q3d = design.renderers.q3d
# # Define the geometry
#
# Here we will have a single feedline couple to a single CPW resonator.
#
# The lauchpad should be included in the driven model simulations.
#
# For that reason, we use the LaunchpadWirebondDriven component which has an extra pin for input/output
# +
###################
# Single feedline #
###################
# Driven Lauchpad 1
x = '-1.5mm'
y = '2.0mm'
launch_options = dict(chip='main', pos_x=x, pos_y=y, orientation='360', lead_length='30um')
LP1 = LaunchpadWirebondDriven(design, 'LP1', options = launch_options)
# Driven Launchpad 2
x = '1.5mm'
y = '2.0mm'
launch_options = dict(chip='main', pos_x=x, pos_y=y, orientation='180', lead_length='30um')
LP2 = LaunchpadWirebondDriven(design, 'LP2', options = launch_options)
# Using path finder to connect the two launchpads
TL_LP1_LP2 = RoutePathfinder(design, 'TL_LP1_LP2', options = dict(chip='main', trace_width ='15um',
trace_gap ='9um',
fillet='99um',
hfss_wire_bonds = True,
lead=dict(end_straight='1.972mm'),
pin_inputs=Dict(
start_pin=Dict(
component='LP1',
pin='tie'),
end_pin=Dict(
component='LP2',
pin='tie')
)))
# Rebuild the GUI
gui.rebuild()
# +
######################
# lambda/2 resonator #
######################
# First we define the two end-points
otg1 = OpenToGround(design, 'otg1s', options=dict(chip='main', pos_x='-0.3mm', pos_y='1.968mm', orientation='180'))
otg2 = OpenToGround(design, 'otg1e', options=dict(chip='main', pos_x='0.0mm', pos_y='0.0mm', orientation='270'))
# Use RouteMeander to fix the total length of the resonator
rt_meander = RouteMeander(design, 'meander', Dict(
trace_width ='12um',
trace_gap ='5um',
total_length='8.0mm',
hfss_wire_bonds = True,
fillet='99 um',
lead = dict(start_straight='250um'),
pin_inputs=Dict(
start_pin=Dict(component='otg1s', pin='open'),
end_pin=Dict(component='otg1e', pin='open')), ))
# rebuild the GUI
gui.rebuild()
# -
gui.autoscale()
gui.screenshot()
# # Scattering Analysis
from qiskit_metal.analyses.simulation import ScatteringImpedanceSim
em1 = ScatteringImpedanceSim(design, "hfss")
# +
design_name= "Sweep_DrivenModal"
qcomp_render = [] # Means to render everything in qgeometry table.
open_terminations = []
# Here, pin LP1_in and LP2_in are converted into lumped ports,
# each with an impedance of 50 Ohms. <br>
port_list = [('LP1', 'in', 50),
('LP2', 'in', 50)]
box_plus_buffer = True
# -
# we use HFSS as rendere
hfss = em1.renderer
hfss.start()
# Here we activate the design for a drivenmodal solution
hfss.activate_ansys_design("HangingResonator", 'drivenmodal')
setup_args = Dict(max_delta_s=0.001)
setup_args.name = 'Setup'
hfss.edit_drivenmodal_setup(setup_args)
# set buffer
hfss.options['x_buffer_width_mm'] = 0.1
hfss.options['y_buffer_width_mm'] = 0.1
# clean the design if needed
hfss.clean_active_design()
# render the design
hfss.render_design(selection=[],
open_pins=open_terminations,
port_list=port_list,
box_plus_buffer = box_plus_buffer)
# for acurate simulations, make sure the mesh is fine enough for the meander
hfss.modeler.mesh_length(
'cpw_mesh',
['trace_meander'],
MaxLength='0.01mm')
# # Broad sweet to find the resonance
hfss.add_sweep(setup_name="Setup",
name="Sweep",
start_ghz=4.0,
stop_ghz=8.0,
count=2001,
type="Interpolating")
hfss.analyze_sweep('Sweep', 'Setup')
hfss.plot_params(['S11', 'S21'])
# extract the S21 parameters
freqs, Pcurves, Pparams = hfss.get_params(['S21'])
# find armin
f_res = freqs[np.argmin(np.abs(Pparams.S21.values))]
f_res
# # Narrow sweep around the resonance found above
#fine sweep
hfss.add_sweep(setup_name="Setup",
name="Sweep_narrow",
start_ghz=np.round(f_res/1e9,3)-0.01,
stop_ghz=np.round(f_res/1e9,3)+0.01,
count=1001,
type="Fast") #slow but precise
hfss.analyze_sweep('Sweep_narrow', 'Setup')
hfss.plot_params(['S11', 'S21'])
# # Close connections
em1.close()
hfss.disconnect_ansys()
gui.main_window.close()
| tutorials/4 Analysis/B. Advanced - Direct use of the renderers/4.16 Analyze S21 of Hange Geometry with WirebondLunchpadDriven.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# From the previous tutorial we learned that the oracle is assumed to encode the function $f$
# by a quantum circuit that acts on $n+1$ qubits. The first $n$ qubits are the input register.
# The last qubit is the output register.
#
# <img src='images/grov_circ.png' width=400 >
#
# The circuit that implements $f$ does not modify the input register, and it performs the modulo 2 addition between the output register and the result of $f$ applied to the input register.
# Separating the input and the output registers is standard in the quantum world because it allows reversibility of the function.
#
# ## 3-SAT
# We are given a 3SAT (three satisfiability) formula that is the logical AND of a series of clauses and Boolean variables, where each clause is the logical OR of three Boolean variables or their negation. We want to find an assignment of the Boolean variables that makes the formula evaluate to **true**, ensuring that there is _exactly one true literal **per clause**_. This is an NP-hard problem.
#
# $$f(x_1, x_2, x_3) = (x_1\lor x_2\lor \neg x_3)\land (\neg x_1\lor \neg x_2\lor x_3) \land (\neg x_1\lor x_2\lor x_3)$$
#
# where
# $$(...) - \mathrm{clause} $$
# $$ x_k, \neg x_k - \mathrm{literal}$$
# $$\lor - \mathrm{or}$$
# $$\land - \mathrm{and}$$
# $$\neg - \mathrm{not}$$
#
# We will use the instance of **exactly one** three-satisfiability, specified by the formula above.
#
# ## 1. The building blocks of Grover's algorithm: INITIALIZATION
# Remember that Grover's algorithms has three steps,
#
# - initialization,
# - sign flip, and
# - inversion about the average.
#
# Let's see how to implement these steps. We will use the first three qubits lines for the input, the fourth as output, and the remaining qubits will be auxiliary qubits. For the initialization, we apply Hadamard gates on the input lines. And on the output line, we apply an $X$-gate and then a Hadamard.
#
# <img src='images/alg_init.png' width=200 >
#
#
from pyquil.quil import Program
import pyquil.api as api
from pyquil.gates import *
from pyquil.api import get_qc
qc = get_qc("8q-qvm")
# +
# initialize state (in |0000000>)
init = Program()
# create equal superposition
init += H(0)
init += H(1)
init += H(2)
# setup the oracle qubit in |->=(|0>-|1>)/sqrt(2) state
init += X(3)
init += H(3)
#uncomment to see the measurement distribution.
#to later add the oracle and inversion we should not make measurement at this stage
# declare classical registers to store measurement results in
#cr = 7
#ro = init.declare('ro', 'BIT', cr)
#init += [MEASURE(i, ro[i]) for i in range(7)]
#init.wrap_in_numshots_loop(50)
#compiled_program = qc.compile(init)
#results = qc.run(compiled_program)
#print(results)
# -
# At this point, the state of the input qubits is in the uniform superposition.
# All amplitudes are equal.
#
# <img src='images/uniform_sup.png' width=400 >
#
# ## 2. The building blocks of Grover's algorithm: THE ORACLE
# Now let's start with the **Oracle** that takes care of the sign flip.
# To construct the function $f$, we need a quantum circuit that acts on qubits encoding the Boolean variables, and determines if this formula is satisfied.
# Notice that the circuit that implements $f$ should simply be able to decide if the Boolean assignment corresponding to the binary string it is given as input satisfies the formula. Determining and returning the satisfying assignment will be the goal of Grover's algorithm, not of the function $f$.
# The example formula has three Boolean variables, $x_1$, $x_2$, and $x_3$. There are eight possible assignments.
#
# ||||||
# |-|-|-|-|---|
# |$x_1$|$x_2$|$x_3$|$f$ True/False|comment|
# |0|0|0|0|secont clause has 3 true literal|
# |0|0|1|0|secont clause has 2 true literal|
# |0|1|0|0|secont clause has 2 true literal|
# |0|1|1|0|third clause has 2 true literal|
# |1|0|0|0|secont clause has 2 true literal|
# |1|0|1|1|Bingo!|
# |1|1|0|0|first clause has 2 true literal|
# |1|1|1|0|secont clause is not satisfied|
# Let's call $U_f$ the unitary matrix that implements $f$. We can implement $U_f$ in several ways.
# For simplicity, we have composed the problem of computing $U_f$ by introducing _three auxiliary qubits, one for each clause_.
# For each clause, we construct a circuit that sets the corresponding zero qubit to one if and only if the clause has exactly one true term.
# Finally, the output register of $U_f$ is set to $1$ if and only if all three auxiliary qubits are $1$.
# For example, the circuit sets the bottom qubit $y_1$ to one for the clause $(x_1 \lor x_2 \lor \neg x_3)$.
#
# The $X$ gate flips the qubit corresponding to $x_3$ because $x_3$ appears negated in the clause.
# Using three CNOT gates, we set $y_1 = (x_1 \lor x_2 \lor \neg x_3)$, implying that $y_1$ is equal to one if an odd number of literals is satisfied.
#
# <img src='images/clause_circ_y1.png' width=400 >
#
# Since we want $y_1 = 1$ if and only if exactly one literal is satisfied, we use a triply-controlled not gate to finally implement the desired formula. The last $X$ gate simply resets the state of the qubit $x_3$.
#
# In a similar way, we can implement the circuit that checks whether the second clause
# $(\neg x_1 \lor \neg x_2 \lor \neg x_3)$ is satisfied,
#
# <img src='images/clause_circ_y2.png' width=400 >
#
# and one for the third clause -- $(\neg x_1 \lor x_2 \lor x_3)$.
#
# <img src='images/clause_circ_y3.png' width=400 >
#
# To implement this circuit there is a small obstacle, the triply-controlled not gate is not part of the basic gate set. Such a gate can be implemented in several ways. For simplicity, we choose to do it
# using 3 doubly-controlled not gates and one auxiliary qubit, as we show in the image.
#
# <img src='images/triply_c_gate.png' width=300 >
#
#
# In the circuit, we can quickly verify that $q[4]$ is set to $1$ if and only if $q[0]$, $q[1]$, $q[2]$ are $1$.
# The final doubly-controlled-not **resets** the state of the auxiliary qubit $q[3]$. Remember that if you plan to re-use the auxiliary qubits, you should leave them in the same state as they begin.
#
# With this blocks, we can construct the full circuit that implements $U_f$ using four auxiliary qubits,
# one for each of the three clauses, and one for the triply-controlled not. For each of the three clauses, we set the corresponding auxiliary qubit to $1$ if the clause is satisfied (blue). Then we apply a logical AND between these three auxiliary qubits (green).
#
# <img src='images/final_func_circ.png' width=800 >
#
# Finally, we run the same circuit in reverse to reset the state of the auxiliary qubits (red).
#
#
# Let's implement the Oracle.
# +
clause_1_oracle = Program()
clause_1_oracle += X(2)
clause_1_oracle += CNOT(0,4)
clause_1_oracle += CNOT(1,4)
clause_1_oracle += CNOT(2,4)
# logical AND1
clause_1_oracle += CCNOT(0,1,7)
clause_1_oracle += CCNOT(2,7,4)
clause_1_oracle += CCNOT(0,1,7)
clause_1_oracle += X(2)
# +
clause_2_oracle = Program()
clause_2_oracle += X(0)
clause_2_oracle += X(1)
clause_2_oracle += X(2)
clause_2_oracle += CNOT(0,5)
clause_2_oracle += CNOT(1,5)
clause_2_oracle += CNOT(2,5)
# logical AND2
clause_2_oracle += CCNOT(0,1,7)
clause_2_oracle += CCNOT(2,7,5)
clause_2_oracle += CCNOT(0,1,7)
clause_2_oracle += X(0)
clause_2_oracle += X(1)
clause_2_oracle += X(2)
# +
clause_3_oracle = Program()
clause_3_oracle += X(0)
clause_3_oracle += CNOT(0,6)
clause_3_oracle += CNOT(1,6)
clause_3_oracle += CNOT(2,6)
# logical AND3
clause_3_oracle += CCNOT(0,1,7)
clause_3_oracle += CCNOT(2,7,6)
clause_3_oracle += CCNOT(0,1,7)
clause_3_oracle += X(0)
# -
forward_oracle = clause_1_oracle + clause_2_oracle + clause_3_oracle
# Logical AND between 3 auxillaries
readout_oracle = Program()
readout_oracle += CCNOT(4,5,7)
readout_oracle += CCNOT(7,6,3)
readout_oracle += CCNOT(4,5,7)
# And finally, the overall oracle is a concatenation of forward-, redout- and the reversing circuit which is the same as the forward oracle
oracle = forward_oracle + readout_oracle + forward_oracle
# ### 3. Inversion About the Average
#
# The last step is the inversion about the average. It is implemented by this simple circuit, which we can easily verify applies the matrix shown below.
#
# <img src='images/inversion.png' width=400 >
#
# $$\begin{pmatrix}
# -3/4 & 1/4 & 1/4 & 1/4 & 1/4 & 1/4 & 1/4 & 1/4 \\
# 1/4 & -3/4 & 1/4 & 1/4 & 1/4 & 1/4 & 1/4 & 1/4 \\
# 1/4 & 1/4 & -3/4 & 1/4 & 1/4 & 1/4 & 1/4 & 1/4 \\
# 1/4 & 1/4 & 1/4 & -3/4 & 1/4 & 1/4 & 1/4 & 1/4 \\
# 1/4 & 1/4 & 1/4 & 1/4 & -3/4 & 1/4 & 1/4 & 1/4 \\
# 1/4 & 1/4 & 1/4 & 1/4 & 1/4 & -3/4 & 1/4 & 1/4 \\
# 1/4 & 1/4 & 1/4 & 1/4 & 1/4 & 1/4 & -3/4 & 1/4 \\
# 1/4 & 1/4 & 1/4 & 1/4 & 1/4 & 1/4 & 1/4 & -3/4
# \end{pmatrix} = \frac{1}{4}\mathbf{1}^T\mathbf{1} - I
# $$
#
# The matrix has the effect of mapping each amplitude to twice the average coefficient minus the amplitude itself. Because the average is smaller than most of the coefficients, all amplitudes except the negative one gets reduced. And the negative one gets amplified. **This is one iteration of Grover's algorithm.**
# The optimal number of iterations, $k$, can be found using the formula
#
# $$k \arcsin \frac{2\sqrt{2^n-1}}{2^n} \approx \frac{\pi}{2} $$
#
# from which we derive that we must perform one iteration. This simply means that we should append to the overall circuit another copy of the circuit that performs a sign flip, and another copy of the circuit that performs the inversion about the average. And that's it.
#
# +
inversion = Program()
inversion += X(0)
inversion += H(1)
inversion += H(2)
inversion += X(0)
inversion += X(1)
inversion += X(2)
# the equivaltent of controlled-controlled-Z
inversion += H(2)
inversion += CCNOT(0,1,2)
inversion += H(2)
inversion += X(0)
inversion += X(1)
inversion += X(2)
inversion += H(0)
inversion += H(1)
inversion += H(2)
# -
# We can now run this circuit on the simulator and look at the results.
# +
grover_iters = 1
grover_iter = oracle + inversion
# the final Grover algorithm
grover = init + [grover_iter for _ in range(grover_iters)]
# -
# Finally, let's measure first 3 registers (1000 times, to plot histogram to see which is the solution $x_1x_2x_3$)
cr = 8
ro = init.declare('ro', 'BIT', cr)
grover += [MEASURE(i, ro[i]) for i in range(3)]
grover.wrap_in_numshots_loop(1000)
compiled_program = qc.compile(grover)
results = qc.run(compiled_program)
# +
import matplotlib.pyplot as plt
import numpy as np
import itertools
def plot_histogram(result):
if isinstance(result, dict):
outcomes = np.vstack(result.values()).T
else:
outcomes = result
trials, classical_bits = outcomes.shape
stats = {}
for bits in itertools.product('01', repeat=classical_bits):
stats["".join(str(bit) for bit in bits)] = 0
for i in range(trials):
stats["".join(str(bit) for bit in outcomes[i])] += 1
x = np.arange(len(stats))
plt.bar(x, stats.values())
plt.xticks(x, stats.keys())
plt.show()
# -
plot_histogram(results)
| Grover/.ipynb_checkpoints/Grover_3SAT_example-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # SETUP
# +
import numpy as np
import pandas as pd
from matplotlib import rcParams
# %matplotlib inline
rcParams['font.family'] = 'DejaVu Sans'
# -
file = '/Users/quartz/GoogleDrive/ewp/data_whole_02.pkl'
data_raw = pd.read_pickle(file)
data_raw.tail()
data_raw.insolation.hist()
data_raw.insolation.plot()
data_raw.dtypes
data_raw.describe()
data_raw.info()
data_raw.corr()
# ### 노트 (TODO)
#
# - 전처리
# - 모델링
# - 논문에서 다른 파생변수를 사용하지는 않았을까?
#
# - 실측 데이터
# - 일사량(자기회귀 파생변수, 평균 및 1~3사분위), 운량(하늘 상태로 예측), 자외선지수(평균 및 1~3사분위), 태양고도
# - 일사량 자기회귀 파생변수는 가능.
# - 평균 및 1~3사분위 데이터는 어떻게 넣는지 잘 모르겠음.
# - 운량은 하늘상태 데이터 부재로 사용 어려움.
# - 태양고도는 샘플 데이터가 6일 데이터이기 때문에, 샘플 간 변별력이 없어 사용 X
#
# > 일단 위 데이터로 모델링 시도
# ---
# ### 전처리
# +
# X, y
y = data_raw.iloc[:, -1:]
X = data_raw.drop(['insolation'], axis=1)
X.shape, y.shape
# +
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25)
X_train.shape, X_test.shape, y_train.shape, y_test.shape
# +
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
X_train_sc = scaler.fit_transform(X_train)
X_test_sc = scaler.transform(X_test)
y_train_sc = scaler.fit_transform(y_train)
y_test_sc = scaler.transform(y_test)
X_train_df = pd.DataFrame(data=X_train_sc, columns=X_train.columns)
X_test_df = pd.DataFrame(data=X_test_sc, columns=X_test.columns)
y_train_df = pd.DataFrame(data=y_train_sc, columns=y_train.columns)
y_test_df = pd.DataFrame(data=y_test_sc, columns=y_test.columns)
# -
# ---
# ### 모델링
# +
from sklearn.linear_model import LinearRegression
model_linear = LinearRegression()
model_linear = model_linear.fit(X_train_df, y_train_df)
y_pred = model_linear.predict(X_test_df)
np.mean(np.power(y_pred - y_test_df.values, 2))
# +
import statsmodels.api as sm
model = sm.OLS(y_train_df, X_train_df)
result = model.fit()
print(result.summary())
# +
from sklearn.ensemble import RandomForestRegressor
model_rfr = RandomForestRegressor()
model_rfr = model_rfr.fit(X_train_df, y_train_df)
y_pred = model_rfr.predict(X_test_df)
np.mean(np.power(y_pred - y_test_df.values, 2))
# -
y_pred - y_test_df.values
# ### 노트
#
# - linear regression, random forest regressor의 성능이 높지 않게 나온다. (Adj. R-squared 0.331 / rmse : 0.117 / 0.161 정도)
| 03_modeling(insolation).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
df = pd.read_csv('term_clust',sep = '\t',header = None,names = ['term','clust_id'])
df.head()
def terms_list(x):
return([t for t in x])
def terms_num(x):
return(len(x))
dfpt = df.pivot_table(index = 'clust_id',aggfunc = {'term':[term_list,terms_num]})
dfpt.columns = ['term_list','terms_num']
dfpt['clust_name'] = ['clust_'+str(i) for i in range(1,len(dfpt)+1) ]
dflast = dfpt[['clust_name','terms_num','term_list']]
dflast.to_excel('term_clust_merge.xlsx',index = False)
| Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <a href="https://colab.research.google.com/github/24ta/2021dataSeminar/blob/master/05/05.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# # 第05回データ分析勉強会(2020/01/17)
# ## データの可視化 ~ kickstart project 2回目~
# ### 使用データ:[Kickstarter Projects](https://www.kaggle.com/kemical/kickstarter-projects) <br>
# 参考URL:<br>
# [テービーテックのデータサイエンス "Kaggleに挑戦しよう! ~コード説明1~"](https://ds-blog.tbtech.co.jp/entry/2019/04/19/Kaggle%E3%81%AB%E6%8C%91%E6%88%A6%E3%81%97%E3%82%88%E3%81%86%EF%BC%81_%EF%BD%9E%E3%82%B3%E3%83%BC%E3%83%89%E8%AA%AC%E6%98%8E%EF%BC%91%EF%BD%9E)<br>
# [テービーテックのデータサイエンス "Kaggleに挑戦しよう! ~コード説明2~"](https://ds-blog.tbtech.co.jp/entry/2019/04/27/Kaggle%E3%81%AB%E6%8C%91%E6%88%A6%E3%81%97%E3%82%88%E3%81%86%EF%BC%81_%EF%BD%9E%E3%82%B3%E3%83%BC%E3%83%89%E8%AA%AC%E6%98%8E%EF%BC%92%EF%BD%9E)
# # import libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# #%matplotlib inline #matplotlibのグラフを表示する
import seaborn as sns #matplotlibより美しいグラフの描画
from sklearn.preprocessing import StandardScaler #preprocessing:前処理 StandardScaler:標準化
from sklearn.model_selection import train_test_split #データを訓練データとテストデータに分割する
from sklearn.linear_model import SGDClassifier #クラス分類をする
from sklearn.metrics import log_loss, accuracy_score, precision_recall_fscore_support, confusion_matrix
#log_loss:対数尤度 ,accuracy_score:正答率 ,precision_recall_fscore_support:適合率,再現率,F1値 ,confusion_matrix:クロス集計表
from sklearn.metrics import mean_absolute_error #平均絶対誤差
import datetime as dt #日時を扱う
import requests
from io import StringIO
# # read data
import codecs
with codecs.open('../06/ks-projects-201612.csv', 'r', 'utf-8', 'ignore') as file:
df = pd.read_csv(file, delimiter=",")
df.columns = df.columns.str.replace(" ", "")
df = df.loc[:, ~df.columns.str.match('Unnamed')]
# url = 'https://raw.githubusercontent.com/24ta/2021dataSeminar/master/06/data1.csv'
# res = requests.get(url).content
# df = pd.read_csv(StringIO(res.decode('utf-8')), header=0, index_col=0)
# # 前処理
# +
# 今回のデータでは、そのまま使用することが難しそうな変数がある。例えば、"deadline"と"launched"。具体的な日付自体を確認して得られるものは無く、それより当該プロジェクトが発足してから締切までの経過日数の方が重要だと考えられるので、経過日数の変数を作成する。
df['deadline'] = pd.to_datetime(df['deadline'], errors = 'coerce')
df['launched'] = pd.to_datetime(df['launched'], errors = 'coerce')
df['period'] = (df['deadline'] - df['launched']).dt.days
df = df.drop(['deadline', 'launched'], axis=1)
# 型を変える
df['goal'] = pd.to_numeric(df['goal'], errors ='coerce')
df['pledged'] = pd.to_numeric(df['pledged'], errors ='coerce')
df['backers'] = pd.to_numeric(df['backers'], errors ='coerce')
df['usdpledged'] = pd.to_numeric(df['usdpledged'], errors ='coerce')
# 目的変数の二値化
df = df[(df['state'] == 'successful') | (df['state'] == 'failed')]
df['state'] = list(map(lambda x: 1 if x == "successful" else 0, df['state']))
# 欠損値処理
df = df.dropna()
df.reset_index(inplace=True, drop=True)
# -
# # 可視化
# +
# 円グラフ
plt.pie(df['state'].value_counts(),labels=['failed','successful'], autopct="%.1f%%")
plt.show()
# ヒストグラム
plt.hist(df['main_category'])
plt.show()
# 散布図
plt.plot(df['pledged'], df['usdpledged'],"o")
plt.show()
# -
# ### 問題1:main_catagoryについて円グラフを表示してください。
# +
plt.figure(figsize=[12,12])
plt.hist(df['main_category'])
plt.title('Main Category Histgram', size=36)
plt.xlabel('Main Category',size=24)
plt.ylabel('Frequency', size=24)
plt.xticks(rotation=90)
plt.show()
freq = list(df['main_category'].value_counts())
label = list(df['main_category'].value_counts().index)
plt.figure(figsize=[12,12])
plt.bar(label, freq)
plt.title('Main Category Histgram', size=36)
plt.xlabel('Main Category',size=24)
plt.ylabel('Frequency', size=24)
plt.xticks(rotation=90, size=16)
plt.yticks(size=16)
plt.show()
plt.figure(figsize=[12,12])
plt.barh(label ,freq)
plt.title('Main Category Histgram', size=36)
plt.xlabel('Main Category',size=24)
plt.ylabel('Frequency', size=24)
plt.xticks(size=16)
plt.yticks(size=16)
plt.show()
# -
# ### 問題2:currencyについてヒストグラムを作成してください
# 条件:x軸のラベル名は「Currency」、y軸のラベルは「Freq」、それぞれラベルの大きさは26としてください。
# +
# 変数を選択
df = df.drop(['ID','name','category','country'], axis=1)
# 相関係数
print(df.corr())
# 相関係数の可視化
sns.heatmap(df.corr(), cmap='Blues', annot=True, fmt='1.3f')
plt.show()
plt.grid(which='major',color='black',linestyle=':')
plt.grid(which='minor',color='black',linestyle=':')
plt.plot(df['pledged'], df['usdpledged'], 'o')
plt.show()
# 説明変数の相関をなくす
df_pledged = pd.DataFrame({'pledged' : df['pledged'], 'usdpledged' : df['usdpledged']})
df_pledged.reset_index(inplace=True, drop=True)
cov = np.cov(df_pledged, rowvar=0)
_, S = np.linalg.eig(cov)
pledged_decorr = np.dot(S.T, df_pledged.T).T
print('相関係数: {:.3f}'.format(np.corrcoef(pledged_decorr[:, 0], pledged_decorr[:, 1])[0,1]))
plt.grid(which='major',color='black',linestyle=':')
plt.grid(which='minor',color='black',linestyle=':')
plt.plot(pledged_decorr[:, 0], pledged_decorr[:, 1], 'o')
plt.show()
# 無相関化した変数を元のデータセットに返す。
pledged_decorr = pd.DataFrame(pledged_decorr, columns=['pledged','uspledged'])
print(pledged_decorr)
df['pledged'] = pledged_decorr.loc[:,'pledged']
df['usdpledged'] = pledged_decorr.loc[:,'uspledged']
sns.heatmap(df.corr(), cmap='Blues', annot=True, fmt='1.3f')
plt.show()
df_dummy = pd.get_dummies(df['main_category'])
df = pd.concat([df.drop(['main_category'],axis=1),df_dummy],axis=1)
df_dummy = pd.get_dummies(df['currency'])
df = pd.concat([df.drop(['currency'],axis=1),df_dummy],axis=1)
# +
# ホールドアウト
y = df['state'].values
X = df.drop('state', axis=1).values
test_size = 0.3
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=1234)
# 標準化
stdsc = StandardScaler()
X_train = stdsc.fit_transform(X_train)
X_test = stdsc.transform(X_test)
# 予測
clf = SGDClassifier(loss='log', penalty='none', max_iter=10000, fit_intercept=True)
#loss:損失関数 max_iter:学習の最大回数 fit_intercept:切片を求める
clf.fit(X_train, y_train)
y_prd_train = clf.predict(X_train)
print('対数尤度 = {:.3f}'.format(- log_loss(y_train, y_prd_train))) # '対数尤度 を表示
print('正答率(Accuracy) = {:.3f}%'.format(100 * accuracy_score(y_train, y_prd_train))) # 正答率を表示
precision, recall, f1_score, _ = precision_recall_fscore_support(y_train, y_prd_train) #適合率・再現率・F1値を計算
print('適合率(Precision) = {:.3f}%'.format(100 * precision[0])) # 適合率を表示
print('再現率(Recall) = {:.3f}%'.format(100 * recall[0])) # 再現率を表示
print('F1値(F1-score) = {:.3f}%'.format(100 * f1_score[0])) #F1値を表示
# -
# ### 問題3:なぜこんなに良い数値が出たのか調べてみましょう
# #### データの変数
#
# | 変数名 | 詳細 |
# | ------------- | ---------------------------------------- |
# | ID | クラウドファンディングの個別ID |
# | name | クラウドファンディングの名前 |
# | category | 詳細なカテゴリー |
# | main_category | 大まかなカテゴリー |
# | currency | 使用された通貨 |
# | deadline | 締め切り日時 |
# | goal | 目標調達資金額 |
# | launched | 開始した日時 |
# | pledged | 集まった資金 |
# | state | プロジェクトの状態(成功、失敗、キャンセルなど) |
# | backer | 集まった支援者 |
# | country | プロジェクトが開かれた国 |
# | usd pledged | 集まった資金の米ドル換算 |
#
| 05/05.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
import blib
import array
import numpy as np
# ## Load an iRadar Colormap
# +
# data = open('blob/z5.0.map', 'rb').read()
data = open('blob/d1.0.map', 'rb').read()
# data = open('blob/r5.0.map', 'rb').read()
# data = open('blob/k5.0.map', 'rb').read()
nums = array.array('B', data)
rgba = np.array(nums).reshape(-1, 4) / 255.0
blib.colorspace(rgba[:90])
# -
# ## Synthesize a Colormap Using `blib`
cmap = blib.dmap()
blib.colorspace(cmap[:90])
| Regenerate iRadar Colormaps.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # EL ENTORNO DE APRENDIZAJE
# ### [ANACONDA](https://www.anaconda.com/)
# Anaconda es un distribución libre y abierta1 de los lenguajes Python y R, utilizada en ciencia de datos, y aprendizaje automático (machine learning). Esto incluye procesamiento de grandes volúmenes de información, análisis predictivo y cómputos científicos. Está orientado a simplificar el despliegue y administración de los paquetes de software.
#
# Las diferentes versiones de los paquetes se administran mediante el sistema de gestión de paquetes conda, el cual lo hace bastante sencillo de instalar, correr, y actualizar software de ciencia de datos y aprendizaje automático como ser Scikit-team, TensorFlow y SciPy.
# 
# #### INSTALANDO ANACONDA
# 1. Puedes descargar Anaconda con la última versión de Python 3.x desde este [enlace](https://www.anaconda.com/products/individual). **Instalar la última paquete disponible**
#
# https://www.anaconda.com/products/individual
# 
# 2. Una vez descargado le ejecutamos el archivo .exe. Una vez cargado seleccionamos el boton <code>**Just Me**</code> y damos click en siguiente.
# <center><img src='./img/anaconda/instalacion_anaconda_1.png' width="500" height="600"></center>
# 3. Nos aparecerá la ruta de instalación por defecto, damos siguiente. En la siguiente pantalla selecionamos la opción de la imagen.
# <center><img src='./img/anaconda/instalacion_anaconda_2.png' width="500" height="600"></center>
# 4. Por último presionamos instalar y esperamos a que termine la instalación del programa.
# 5. Una vez instalado reiniciamos el equipo.
# 6. Una vez que el equipo se encuentre reiniciado, iniciamos Jupyter Notebook.
# <center><img src='./img/anaconda/Jupyter.png' width="500" height="600"></center>
# <center><img src='./img/anaconda/Jupyter2.png' width="500" height="600"></center>
# 7. Validar que podamos abrir terminal **Anaconda Prompt**
# <img src="./img/anaconda/anaconda_prompt.png">
# <img src="./img/anaconda/anaconda_prompt2.png">
# ### [VISUAL STUDIO CODE](https://code.visualstudio.com/)
# Visual Studio Code es un editor de código fuente desarrollado por Microsoft para Windows, Linux y macOS. Incluye soporte para la depuración, control integrado de [Git](https://git-scm.com/), resaltado de sintaxis, finalización inteligente de código, fragmentos y refactorización de código.
# <center><img src='https://code.visualstudio.com/assets/updates/1_37/icons.gif' width="700" height="500"></center>
# #### INSTALANDO VSCODE
# 1. Puedes descargar VSCode desde el siguiente [enlace](https://code.visualstudio.com/)
# 2. Una vez descargado le damos a ejecutar y nos aparecera la siguiente pantalla
# <center><img src='./img/vscode/instalar_vscode_1.png' width="500" height="300"></center>
# 3. Seleccionamos las casilas mostradas en la imagen y damos siguiente
# <center><img src='./img/vscode/instalar_vscode_2.png' width="500" height="300"></center>
# 4. Una vez instalado inicializamos vscode y marcamos el cuadro rojo
# <center><img src='./img/vscode/vscode_1.png' width="700" height="500"></center>
# 5. Escribimos <code>python</code> y damos a instalar.
# <center><img src='./img/vscode/vscode_2.png' width="700" height="500"></center>
# 6. Una vez terminada la instalación podemos cerrar el programa.
# ### Configuración Python CMD
# Para poder realizar la configuración de python sobre windows CMD
#
# Debemos agregar la ruta donde fue instalado anaconda sobre las variables del sistema de windows
#
# Para esto por favor seguir las instrucciones del siguiente video
#
# https://www.youtube.com/watch?v=xSwzZiEKuOY&t=65s&ab_channel=Mart%C3%ADnK%C3%B6stner
# Comando usado en video sobre **Anaconda Prompt**: <code>python -c "import sys; print('\n'.join(sys.path))"</code>
#
# Abrir **CMD Windows**: <code> tecla Windows + R </code> y escribir sobre la ventana abierta <code> cmd</code>
#
| CONFIGURANDO EL ENTORNO.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.11 64-bit (''jovian'': conda)'
# name: python3
# ---
# +
# analyze the data update record for API Server schedule optimization
# imports
import pandas as pd
import sys
# set up file paths and other data globals
import config
import modify
# local imports
from covid_package.data_funcs.store_data import read_json_data
from covid_package.data_funcs.datetime_funcs import convert_datetime_str_to_obj
from covid_package.plots.plot_results import scatter_plot
print("Imports complete")
# +
# read the update_record.json file
updata = read_json_data(config.UPDATE_FILE_STR)
# get the update datetime list
update_list = updata['update_list']
# organize the pandas column data
update_dict = {
'datetime_strings': update_list,
'day_nums': [convert_datetime_str_to_obj(i, 'day_num') for i in update_list],
'days': [convert_datetime_str_to_obj(i, 'day') for i in update_list],
'hours': list(map(int, [convert_datetime_str_to_obj(i, 'hour') for i in update_list])),
'minutes': list(map(int, [convert_datetime_str_to_obj(i, 'minute') for i in update_list]))
}
# create the dataframe
df = pd.DataFrame(update_dict)
df
# -
# boil down the data for analysis by day of the week and hours of the day
hc = df.groupby(['day_nums', 'days', 'hours']).size().reset_index(name='counts')
hc
# +
labels = {
'title': 'Days vs. Hours',
'x_label': 'Days',
'y_label': 'Hours'
}
results = {
'x_axis': hc['days'],
'y_axis': hc['hours'],
'legend_label': 'Update hours'
}
params = {
'y_lim': (0, 24),
'area': hc['counts'],
'colors': None,
'alpha': None
}
scatter_plot(labels, results, params)
# -
# boil down the data for analysis by day of the week and hours of the day
mc = df.groupby(['hours', 'minutes']).size().reset_index(name='counts')
mc
# +
labels = {
'title': 'Hours vs. Minutes',
'x_label': 'Hours',
'y_label': 'Minutes'
}
results = {
'x_axis': mc['hours'],
'y_axis': mc['minutes'],
'legend_label': 'Update minutes'
}
params = {
'y_lim': (0, 59),
'area': mc['counts'],
'colors': None,
'alpha': None
}
scatter_plot(labels, results, params)
# -
| analyze_update_record.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # In-Class Coding Lab: Data Analysis with Pandas
#
# In this lab, we will perform a data analysis on the **RMS Titanic** passenger list. The RMS Titanic is one of the most famous ocean liners in history. On April 15, 1912 it sank after colliding with an iceberg in the North Atlantic Ocean. To learn more, read here: https://en.wikipedia.org/wiki/RMS_Titanic
#
# Our goal today is to perform a data analysis on a subset of the passenger list. We're looking for insights as to which types of passengers did and didn't survive. Women? Children? 1st Class Passengers? 3rd class? Etc.
#
# I'm sure you've heard the expression often said during emergencies: "Women and Children first" Let's explore this data set and find out if that's true!
#
# Before we begin you should read up on what each of the columns mean in the data dictionary. You can find this information on this page: https://www.kaggle.com/c/titanic/data
#
#
# ## Loading the data set
#
# First we load the dataset into a Pandas `DataFrame` variable. The `sample(10)` method takes a random sample of 10 passengers from the data set.
# +
import pandas as pd
import numpy as np
# this turns off warning messages
import warnings
warnings.filterwarnings('ignore')
passengers = pd.read_csv('CCL-titanic.csv')
passengers.sample(10)
# -
# ## How many survived?
#
# One of the first things we should do is figure out how many of the passengers in this data set survived. Let's start with isolating just the `'Survivied'` column into a series:
passengers['Survived'].sample(10)
# There's too many to display so we just display a random sample of 10 passengers.
#
# - 1 means the passenger survivied
# - 0 means the passenger died
#
# What we really want is to count the number of survivors and deaths. We do this by querying the `value_counts()` of the `['Survived']` column, which returns a `Series` of counts, like this:
passengers['Survived'].value_counts()
# Only 342 passengers survived, and 549 perished. Let's observe this same data as percentages of the whole. We do this by adding the `normalize=True` named argument to the `value_counts()` method.
passengers['Survived'].value_counts(normalize=True)
# **Just 38% of passengers in this dataset survived.**
# ### Now you Try it!
#
# **FIRST** Write a Pandas expression to display counts of males and female passengers using the `Sex` variable:
# todo write code here
passengers['Sex'].value_counts()
# **NEXT** Write a Pandas expression to display male /female passenger counts as a percentage of the whole number of passengers in the data set.
# todo write code here
passengers['Sex'].value_counts(normalize=True)
# If you got things working, you now know that **35% of passengers were female**.
# ## Who survivies? Men or Women?
#
# We now know that 35% of the passengers were female, and 65% we male.
#
# **The next think to think about is how do survivial rates affect these numbers? **
#
# If the ratio is about the same for surviviors only, then we can conclude that your **Sex** did not play a role in your survival on the RMS Titanic.
#
# Let's find out.
survivors = passengers[passengers['Survived'] ==1]
survivors['PassengerId'].count()
# Still **342** like we discovered originally. Now let's check the **Sex** split among survivors only:
survivors['Sex'].value_counts()
# WOW! That is a huge difference! But you probably can't see it easily. Let's represent it in a `DataFrame`, so that it's easier to visualize:
# +
sex_all_series = passengers['Sex'].value_counts()
sex_survivor_series = survivors['Sex'].value_counts()
sex_comparision_df = pd.DataFrame({ 'AllPassengers' : sex_all_series, 'Survivors' : sex_survivor_series })
sex_comparision_df['SexSurvivialRate'] = sex_comparision_df['Survivors'] / sex_comparision_df['AllPassengers']
sex_comparision_df
# -
# **So, females had a 74% survival rate. Much better than the overall rate of 38%**
#
# We should probably briefly explain the code above.
#
# - The first two lines get a series count of all passengers by Sex (male / female) and count of survivors by sex
# - The third line creates DataFrame. Recall a pandas dataframe is just a dict of series. We have two keys 'AllPassengers' and 'Survivors'
# - The fourth line creates a new column in the dataframe which is just the survivors / all passengers to get the rate of survival for that Sex.
#
# ## Feature Engineering: Adults and Children
#
# Sometimes the variable we want to analyze is not readily available, but can be created from existing data. This is commonly referred to as **feature engineering**. The name comes from machine learning where we use data called *features* to predict an outcome.
#
# Let's create a new feature called `'AgeCat'` as follows:
#
# - When **Age** <=18 then 'Child'
# - When **Age** >18 then 'Adult'
#
# This is easy to do in pandas. First we create the column and set all values to `np.nan` which means 'Not a number'. This is Pandas way of saying no value. Then we set the values based on the rules we set for the feature.
passengers['AgeCat'] = np.nan # Not a number
passengers['AgeCat'][ passengers['Age'] <=18 ] = 'Child'
passengers['AgeCat'][ passengers['Age'] > 18 ] = 'Adult'
passengers.sample(5)
# Let's get the count and distrubutions of Adults and Children on the passenger list.
passengers['AgeCat'].value_counts()
# And here's the percentage as a whole:
passengers['AgeCat'].value_counts(normalize=True)
# So close to **80%** of the passengers were adults. Once again let's look at the ratio of `AgeCat` for survivors only. If your age has no bearing of survivial, then the rates should be the same.
#
# Here's the counts of Adult / Children among the survivors only:
survivors = passengers[passengers['Survived'] ==1]
survivors['AgeCat'].value_counts()
# ### Now You Try it!
#
# Calculate the `AgeCat` survival rate, similar to how we did for the `SexSurvivalRate`.
# +
agecat_all_series = passengers['AgeCat'].value_counts()
agecat_survivor_series = survivors['AgeCat'].value_counts()
# todo make a data frame, add AgeCatSurvivialRate column, display dataframe
agecat_survivalrate_df = pd.DataFrame({ 'AllPassengers' : agecat_all_series, 'Agecat' : agecat_survivor_series })
agecat_survivalrate_df['AgeSurvivialRate'] = agecat_survivalrate_df['']
agecat_survivalrate_df
# -
# **So, children had a 50% survival rate, better than the overall rate of 38%**
#
# ## So, women and children first?
#
# It looks like the RMS really did have the motto: "Women and Children First."
#
# Here's our insights. We know:
#
# - If you were a passenger, you had a 38% chance of survival.
# - If you were a female passenger, you had a 74% chance of survival.
# - If you were a child passenger, you had a 50% chance of survival.
#
#
# ### Now you try it for Passenger Class
#
# Repeat this process for `Pclass` The passenger class variable. Display the survival rates for each passenger class. What does the information tell you about passenger class and survival rates?
#
# I'll give you a hint... "Money Talks"
#
# +
# todo: repeat the analysis in the previous cell for Pclass
Pclass_all_series = passengers['Pclass'].value_counts()
Pclass_survivor_series = survivors['Pclass'].value_counts()
# todo make a data frame, add AgeCatSurvivialRate column, display dataframe
Pclass_survivalrate_df = pd.DataFrame({ 'AllPassengers' : Pclass_all_series, 'Pclass' : Pclass_survivor_series })
Pclass_survivalrate_df
# -
| content/lessons/12/Class-Coding-Lab/CCL-Data-Analysis-With-Pandas.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Section 1: Bayesian refresher and introduction to ArviZ
# _What does an end-to-end Bayesian workflow look like?_
# ## Learning Objectives
# * Refresh our understanding of Bayes' Theorem
# * Fit a small binomial model
# * Show how a full statistical workflow, even outside of Bayesian methods, requires more steps more than just model fitting
# * Introduce ArviZ
# ## Bayes' Theorem
#
# ### The most common formulation
#
# $$
# \Large
# P(\theta \mid y) = \frac{ P(y \mid \theta)p(\theta)}{p(y)}
# $$
#
# This comes from a simple rearranging of terms for joint probabilities:
#
# $$
# P(\theta, y) = P(\theta)P(y | \theta) = P(y)P(\theta | y)
# $$
#
# This formula becomes interesting when we interpret $y$ as _data_ and $\theta$ as _parameters_ for a model.
#
# ### Breaking it down
# #### $P(\theta)$ -> Prior
# _"What is the probability of parameters given no observations"_
# Before we've observed any data what is a plausible probability distribution of parameters? This may come from physical constraints (temperatures are above 0 Kelvin), or domain expertise (high temperatures in Austin in summer are between 80 and 110).
#
# #### $P(y \mid \theta )$ -> Likelihood
# _"What is the probability of the observed data given a model parameter value"_
#
# Likelihood functions tell us how "likely" the observed data is, for all the possible parameter values. Likelihoods perform roughly the same role as loss functions from "machine learning": evaluating how "good" of a set of model parameters are at explaining the data. Indeed, many common loss functions are derived from likelihoods.
#
# #### $P(\theta \mid y)$ -> Posterior distribution
# _"What is the distribution of parameters given the observed data?"_
#
# After obtaining data, or making observations, what is our belief regarding the parameters of the underlying statistical model?
#
# * Estimating the posterior distribution is the goal of Bayesian analysis.
# * The process of estimating the posterior distribution often referred to as **Inference**
# * There are numerous ways to perform inference, [each with their own pros and cons](http://canyon289.github.io/pages/InferenceCheatsheet.html)
# * In this tutorial we will only be using Markov Chain Monte Carlo (MCMC)
#
# #### $P(y)$ -> Marginal Probability of Evidence
# _"What is the probability distribution of data?_
#
# In most cases this term is difficult or impossible to calculate, so much so that most inference techniques cleverly get around their calculations. MCMC is one of those techniques
#
# ## Alternative formulations
#
# ### Likelihood notation
# I particularly like this formulation because clearly demarcates difference between Likelihood and probability terms
#
# $$ P(\theta | y) = \frac{ L(\theta | y)p(\theta)}{p(y)} $$
#
# ### Defined as a proportion
# While the posterior, likelihood, and prior are usually *distributions*, the denominator is a scalar that normalizes the numerator. In many modern Bayesian Inference Methodswe try to avoid calculating this
#
# $$ P(\theta | y) \propto P(y | \theta)p(\theta) $$
#
# ### Defined with puppies
# Even if you hate math, you'd have to be a monster to hate puppies. This pictorial formula is taken from <NAME>'s excellent book [Doing Bayesian Data Analysis](https://www.amazon.com/Doing-Bayesian-Data-Analysis-Tutorial/dp/0124058884) Do note the lazy puppy on the right. The laziness is an indication of how little work this puppy does in most Bayesian Inference methods.
# 
#
| notebooks/1_BayesianWorkflow/1_Ins_BayesRefresher.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Similarity analysis with influence functions (Fig. 8)
#
# The aim of this notebook is to reproduce the Fig. 8 from the paper 'Unsupervised machine learning of topological phase transitions from experimental data' by <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME> as well as give a possibility of analyzing other test points than chosen for the mentioned figure.
# ## Labelling the data accordingly to boundaries found via anomaly detection scheme (section III E)
# We have:
# - a file 'boundaries.npz' in folder 'data'
# - the post-processed experimental data gathered across the full range of frequencies and shaking phases, 'phase_diagram_56_rephased_final.h5' in folder 'data'
#
# We will:
# - label the dataset accordingly to the anomaly-detected phases
# - generate the training, validation, and test sets
# +
import numpy as np
import h5py
from lib.utility_general import keys, is_equal
SAVE = False
## Load experimental data with the fixed micromotion phase and the boundaries from the anomaly detection
data = h5py.File('./data/phase_diagram_rephased.h5', 'r')
boundaries = np.load('./influence/data_and_masks/boundaries.npz', allow_pickle=True)["boundaries"].item()
print(keys(data['parameter']))
# Extract chosen properties from the h5 file
images = np.array(data['images'])
freq = np.array(data['parameter/freq']) # from 5.1 to 7.8 kHz, every 0.1
phase = np.array(data['parameter/phase']) # from -180 to 180, every 5
## Choose datasets' sizes
dataset_size = images.shape[0]
training_size = 8000
validation_size = 2000
testing_size = 436
## Generate and save labels according to the boundaries from anomaly detection scheme
# Training 1 and 2: three labels
(phases1, freqs1, dy1) = boundaries["0"][0]
(phases2, freqs2, dy2) = boundaries["1"][0]
labels = np.array([])
# Training 1 + 2: lower bound from 1, upper bound from 2
for i in np.arange(images.shape[0]):
current_frequency = freq[i]
current_phase = phase[i]
for phase_index in np.arange(len(phases1)):
if is_equal(current_phase, phases1[phase_index], 0.01):
lower_bound = freqs1[phase_index]
if is_equal(current_phase, phases2[phase_index], 0.01):
upper_bound = freqs2[phase_index]
if (current_frequency < lower_bound):
label = 0
elif (current_frequency < upper_bound):
label = 1
else:
label = 2
labels = np.append(labels, label)
# For inner peace
"""if (i == 0 or i == 6 or i == 15 or i == 1000 or i == 2000 or i == 3000):
print("Frequency: ", current_frequency)
print("Phase: ", current_phase)
print("Label is: ", label)"""
if SAVE is True:
np.save('influence/data_and_masks/phase_diagram_anomalydetected_labels.npy', labels.astype(int))
## Create the test, validation, and training sets
# Fix the random seed, if you want to get the same datasets as in the paper
np.random.seed(0)
# Choose test points
test_indices = np.random.choice(dataset_size, testing_size, replace=False).astype(int)
if SAVE is True:
np.save('influence/data_and_masks/phase_diagram_test_mask.npy', test_indices)
# Training and validation masks
indices = np.arange(dataset_size)
mask = np.delete(indices, test_indices).astype(int)
np.random.shuffle(mask)
validation_mask = mask[:validation_size]
training_mask = mask[validation_size:]
print(len(validation_mask))
print(len(training_mask))
if SAVE is True:
np.save('influence/data_and_masks/phase_diagram_training_mask.npy', training_mask.astype(int))
np.save('influence/data_and_masks/phase_diagram_validation_mask.npy', validation_mask.astype(int))
# Means and std deviations for normalization
mean = np.mean(images[training_mask.astype(int)])
std = np.std(images[training_mask.astype(int)])
print("Mean: ", mean)
print("Standard deviation: ", std)
# Sanity check for the normalization
training_images = images[training_mask.astype(int)]
training_images = (training_images - mean) / std
print("After normalization:")
print("Mean: ", np.mean(training_images))
print("Standard deviation: ", np.std(training_images))
# -
# ## Training the model
# We have:
# - a prepared training and validation sets which we can load with Downloader from data_loader.py
# - a proposed architecture of 2D CNN in architectures.py
#
# We will:
# - train the model (set PUBLISHED_MODEL to False)
# OR
# - skip this step and use in next steps the published and trained model 'published_model_rephased_anomaly_detected_phase_diagram' (set PUBLISHED_MODEL to True)
# +
import torch
import torch.nn as nn
from torch.nn import Sequential, CrossEntropyLoss
import torch.optim as optim
from matplotlib import pyplot as plt
from lib.data import Downloader
from lib.CNN_for_IF import CNN2D
folder_model = 'networks/influence'
folder_influence = 'influence'
PUBLISHED_MODEL = True # do you want to use a published model?
if PUBLISHED_MODEL is True:
folder_influence = 'influence/published_influence_similarity_analysis'
model_name = 'published_model_rephased_anomaly_detected_phase_diagram'
λ_published = 0.05
else:
model_name = 'experimental_model_rephased_anomaly_detected_phase_diagram'
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
# Hyper-parameters
input_size = 56*56 # original: 151*151
num_classes = 3 # three phases from anomaly detection scheme
# + tags=[]
# To reproduce the training from the paper
np.random.seed(0)
torch.manual_seed(17)
λ = 0.05 # Regularization
learning_rate = 0.01
num_epochs = 125
batch_size = 4000
# Loading experimental data
dataset = Downloader('phase_diagram_rephased', batch_size)
train_loader = dataset.train_loader()
validation_loader = dataset.validation_loader(batch_size=2000)
training_set_size = dataset.training_samples_no()
print("Data loaded.")
model = CNN2D(no_classes=3)
model.to(device)
print("Model built.")
print("Number of parameters of the net: ", sum(p.numel() for p in model.parameters() if p.requires_grad))
# 1741 - 3class, 1540 - 2class
if PUBLISHED_MODEL is False:
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr = learning_rate, momentum=0.9)
#optimizer = torch.optim.Adam(model.parameters()) # lr = 0.001
#scheduler = None
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[75, 125], gamma=0.1)
#scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.2)
# Train the model
hold_loss=[]
hold_val_loss = []
for epoch in range(num_epochs):
for phase in ['train', 'val']:
if phase == 'train':
model.train() # Set model to training mode
for i, (eigenvectors, labels) in enumerate(train_loader):
# Reshape eigenvectors to (batch_size, # of RGB channels, size, size)
eigenvectors = eigenvectors.reshape(-1, 1, 56, 56)
eigenvectors, labels = eigenvectors.to(device), labels.to(device)
# Forward pass
outputs = model(eigenvectors)
#print(outputs.shape)
loss = criterion(outputs, labels)
# We manually add L2 regularization
if λ != 0:
l2_reg = 0.0
for param in model.parameters():
l2_reg += torch.norm(param)**2
loss += 1/training_set_size * λ/2 * l2_reg
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
print ('Epoch [{}/{}], Train loss: {:.4f}'.format(epoch+1, num_epochs, loss.item()))
hold_loss.append(loss.item())
if phase == 'val':
model.eval() # Set model to evaluating mode
with torch.no_grad():
correct = 0
for eigenvectors, labels in validation_loader:
eigenvectors = eigenvectors.reshape(-1, 1, 56, 56)
eigenvectors, labels = eigenvectors.to(device), labels.to(device)
outputs = model(eigenvectors)
_, predicted = torch.max(outputs.data, 1) # classification
#print ("Prediction is: ", predicted) # classification
val_loss = criterion(outputs, labels)
# We manually add L2 regularization
if λ != 0:
l2_reg = 0.0
for param in model.parameters():
l2_reg += torch.norm(param)**2
val_loss += 1/training_set_size * λ/2 * l2_reg
correct = (predicted == labels).sum().item() # classification
hold_val_loss.append(val_loss.item())
print("Accuracy of the model on the", len(labels), "validation images:", 100 * correct / len(labels), "%")
print ('Epoch [{}/{}], Validation loss: {:.4f}'.format(epoch+1, num_epochs, val_loss.item()))
if scheduler is not None:
scheduler.step() # here for milestones scheduler
#scheduler.step(val_loss) # here for Plateau scheduler
plt.plot(hold_loss, label="training loss")
plt.plot(hold_val_loss, label="validation loss")
plt.xlabel('epochs')
plt.title("Losses plots")
plt.legend()
plt.show()
# Save the model checkpoint
torch.save(model.state_dict(), folder_model + '/' + model_name + '.pt')
# -
# ## Testing the model
# We have:
# - a prepared test set which we can load with Downloader from data_loader.py
# - either the trained model from the previous step OR we chose to use the published model
#
# We will:
# - test the model and save all test losses
# +
## Test the model on the testing data and save all test losses (needed for influence functions' calculations)
from lib.utility_general import save_to_file
# Loading CDW data
batch_size = 8000
dataset = Downloader('phase_diagram_rephased', batch_size)
test_loader = dataset.test_loader()
training_set_size = dataset.training_samples_no()
model = CNN2D(no_classes=3)
if PUBLISHED_MODEL is True:
model_name = 'published_model_anomaly_detected_phase_diagram_rephased'
λ = λ_published
model.load_state_dict(torch.load(folder_model + '/' + model_name + '.pt'))
model.to(device)
model.eval()
criterion = nn.CrossEntropyLoss(reduction='none')
with torch.no_grad():
correct = 0
for eigenvectors, labels in test_loader:
eigenvectors = eigenvectors.reshape(-1, 1, 56, 56)
eigenvectors, labels = eigenvectors.to(device), labels.to(device)
outputs = model(eigenvectors)
_, predicted = torch.max(outputs.data, 1) # classification
test_loss = criterion(outputs, labels)
# We manually add L2 regularization
if λ != 0:
l2_reg = 0.0
for param in model.parameters():
l2_reg += torch.norm(param)**2
test_loss += 1/training_set_size * λ/2 * l2_reg
correct = (predicted == labels).sum().item() # classification
predicted_labels_storage = np.array(predicted.cpu()) # it flattens a tuple of tensors to 1D tensor (and then converts tensor to numpy array)
predicted_loss_storage = np.array(test_loss.cpu())
print("Accuracy of the model on the", len(labels), "test images:", 100 * correct / len(labels), "%")
# Save the test losses
save_to_file(np.array(predicted_loss_storage), 'all_test_losses_' + model_name + '.txt', folder_influence)
# -
# ## Calculating the hessian
# We have:
# - either the trained model from the previous step OR we chose to use the published model
#
# We will:
# - calculate the Hessian of the training loss with respect to the model's parameters
# OR
# - skip this step and use the calculated Hessian for the published model
# +
## Calculate the hessian (or skip this step if you use the published model)
from lib.influence_function import find_heigenvalues, find_hessian
if PUBLISHED_MODEL is True:
model_name = 'published_model_anomaly_detected_phase_diagram_rephased'
final_folder_influence = folder_influence
λ = λ_published
hessian = np.load(folder_influence + '\\' + model_name + '_hessian.npy')
heigenvalues = np.sort(np.linalg.eigvalsh(hessian))
print("Largest negative eigenvalue of the Hessian of the training loss: ", np.amin(heigenvalues))
else:
model_name = 'experimental_model_anomaly_detected_phase_diagram_rephased'
final_folder_influence = folder_influence + '/influence_similarity_analysis'
criterion = nn.CrossEntropyLoss()
train_loader = dataset.train_loader()
# Compute and save Hessian and its eigenvalues
for i, (eigenvectors, labels) in enumerate(train_loader):
# Reshape images to (batch_size, # of RGB channels, size, size)
eigenvectors = eigenvectors.reshape(-1, 1, 56, 56) #(batch_size, 25, 50)
eigenvectors, labels = eigenvectors.to(device), labels.to(device)
# Forward pass
outputs = model(eigenvectors)
loss = criterion(outputs, labels)
# We manually add L2 regularization
if λ != 0:
l2_reg = 0.0
for param in model.parameters():
l2_reg += torch.norm(param)**2
loss += 1/training_set_size * λ/2 * l2_reg
hessian = find_hessian(loss, model)
#hessian = torch.autograd.functional.hessian(criterion)
np.save(final_folder_influence + '\\' + model_name + '_hessian', hessian)
heigenvalues = np.sort(np.linalg.eigvalsh(hessian))
print("Largest negative eigenvalue of the Hessian of the training loss: ", np.amin(heigenvalues))
save_to_file(heigenvalues, model_name + '_heigenvalues.txt', final_folder_influence)
# -
# ## Calculating the influence functions
# We have:
# - either the trained model from the previous step OR we chose to use the published model
# - the Hessian of the training loss w.r.t. the model's parameters
# - test losses for all test points
#
# We will:
# - calculate the influence functions between the chosen test points and the whole training set
# OR
# - skip this step and use the calculated influence functions for the published model
# +
## Calculate influence functions for chosen test points and all training points
from lib.utility_general import save_to_file, flatten_grad
from lib.influence_function import grad_z, find_heigenvalues, find_hessian
from tqdm.notebook import trange # for the progress bar
damping = 0.2 # needs to be a little larger than the absolute value of the largest negative eigenvalue of the hessian
model_params_no = 1741 # 1540 for 2 classes, 1741 for 3 classes
model = CNN2D(no_classes=3)
if PUBLISHED_MODEL is True:
model_name = 'published_model_anomaly_detected_phase_diagram_rephased'
final_folder_influence = folder_influence + '/published_influence_similarity_analysis'
λ = λ_published
else:
model_name = 'experimenal_model_anomaly_detected_phase_diagram_rephased'
final_folder_influence = folder_influence + '/influence_similarity_analysis'
model.load_state_dict(torch.load(folder_model + '/' + model_name + '.pt'))
for _, (eigenvectors, labels) in enumerate(train_loader):
# Reshape images to (batch_size, # of RGB channels, size, size)
eigenvectors = eigenvectors.reshape(-1, 1, 56, 56)
# Forward pass
hessian_outputs = model(eigenvectors)
hessian_loss = criterion(hessian_outputs, labels)
# We manually add L2 regularization
if λ != 0:
l2_reg = 0.0
for param in model.parameters():
l2_reg += torch.norm(param)**2
hessian_loss += 1/training_set_size * λ/2 * l2_reg
hessian = np.load(final_folder_influence + '\\' + model_name + '_hessian.npy')
damped_hessian = hessian + np.identity(model_params_no)*damping
#print(np.linalg.eigvalsh(damped_hessian))
inv_hessian = torch.inverse(torch.from_numpy(damped_hessian)).float()
print("The full Hessian got inverted.")
np.save(final_folder_influence + '\\' + model_name + '_invhessian.npy', inv_hessian)
#inv_hessian = torch.from_numpy(np.load(folder_influence + '/' + model_name + '_invhessian.npy')).float()
# Choose test points, e.g.:
chosen_test_examples = np.array([5,46,7]) #np.arange(50)
for _, (test_eigenvectors, test_labels) in enumerate(test_loader):
test_eigenvectors = test_eigenvectors.reshape(-1, 1, 56, 56) #(-1, 25, 50)
for i in trange(len(chosen_test_examples), desc='calculating influence for chosen test examples'):
test_example = chosen_test_examples[i]
print("Looking at the test example no. ", test_example, " right now.")
grad_test_loss = grad_z(test_eigenvectors[test_example:test_example+1], test_labels[test_example:test_example+1], model, criterion, training_set_size, λ=λ)
grad_test_loss = flatten_grad(grad_test_loss)
s_test = torch.mv(inv_hessian, grad_test_loss)
print("s_test for the test example no. ", test_example, " will be multiplied by gradients right now.")
influence = []
for train_example in trange(training_set_size, desc='calculating influence for all training points'):
grad_train_loss = grad_z(eigenvectors[train_example:train_example+1], labels[train_example:train_example+1], model, criterion, training_set_size, λ=λ)
grad_train_loss = flatten_grad(grad_train_loss)
influence_function = - torch.dot(s_test, grad_train_loss) * (- 1 / training_set_size)
influence.append(influence_function.item())
save_to_file(influence, 'exact_influence_test' + str(test_example) + '.txt', final_folder_influence)
print("Done.")
# -
# ## Final plot
# We have:
# - either the trained model from the previous step OR we chose to use the published model
# - influence functions for the chosen test points and all training points
#
# We will:
# - reproduce the fig. 8 from the paper with results produced within this notebook (set PUBLISHED_RESULTS to False) OR
# - reproduce the fig. 8 from the paper with published results (set PUBLISHED_RESULTS to True)
#
# You can play with chosen three test points. The ones that are in the paper have indices 5, 46, and 7.
# +
from lib.utility_plots import make_similarity_analysis_plot
PUBLISHED_RESULTS = True
if PUBLISHED_RESULTS is True:
folder_influence = 'influence/published_influence_similarity_analysis'
chosen_3_test_points = np.array([5,46,7])
else:
folder_influence = 'influence/influence_similarity_analysis'
chosen_3_test_points = np.array([5,46,7])
make_similarity_analysis_plot(chosen_3_test_points, folder_influence, folder_model)
# -
| 3f_similarity_analysis_figure_08.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#Numpy
#Basic Operations
#Numpy makes it easier to do many operations that are commonly performed in data science
#These same operations are usually computationally faster and require less memory in numpy compared
#list
#list
#list
#list
#list
#list
#Vector addition
#Vector addition is a widely used operation in data science
V1=[1, 2, 4.5, 6.7, 9, 1.6, 7]
V2=[4, 7, 11, 67, 7, 7.8, 6]
print('Vector 1:\n', V1)
print('\nVector 2:\n', V2)
#The following 3 lines of code will add the two lists and place the result in the list 'z'
AddResult=[]
for n,m in zip(V1, V2):
AddResult.append(n+m)
print('\n Result vector 1 + vector 2= Vector:')
print(AddResult)
V1=[1, 2, 20, 71, 0, -7]
V2=[4, 5, 25, 51, 5, 33]
V3=[11, 25, 5, 1, 5, 23]
print('Vector 1:\n', V1)
print('\nVector 2:\n', V2)
print('\nVector 3:\n', V3)
z1=[]
for n,m in zip(V1, V2):
z1.append(n+m)
z2=[]
for n,m in zip(V1, V2):
z2.append(n+m)
print('\n Result vector 1 + vector 2= ZVector1:')
print(z1)
print('\n Result vector 1 + vector 2= ZVector2:')
print(z2)
#Array
#Array
#Array
#Array
#Array
#Array
#We can also perform vector addition with one line of numpy code
import numpy as vect
vector1=vect.array([1, 2, 20, 71, 0, -7])
vector2=vect.array([4, 5, 25, 51, 5, 33])
print('\n Result vector 1 + vector 2= Vector:')
z=vector1+vector2
print(z)
#Subtraction
#We can also perform vector subtraction by changing the addition sign to a subtraction sign
print('Subtraction')
import numpy as e212
vector1=e212.array([1, 2, 20, 71, 0, -7])
vector2=e212.array([4, 5, 25, 51, 5, 33])
print('Vector 1 - Array:\n', vector1)
print('\nVector 2 - Array:\n', vector2)
#The following 3 lines of code will add the two lists and place the result in the list 'z'
z=[]
for n,m in zip(vector1, vector2):
z.append(n-m)
print('\n Result vector 1 - vector 2= Vector:')
print(z)
vector1=vect.array([1, 2, 20, 71, 0, -7])
vector2=vect.array([4, 5, 25, 51, 5, 33])
print('\n Result vector 1 - vector 2= Vector:')
z=vector1-vector2
print(z)
# +
V1=[1, 2, 20, 71, 0, -7]
V2=[4, 5, 25, 51, 5, 33]
V3=[11, 25, 5, 1, 5, 23]
V4=[2.5, 5.6, 7, 8, 8.4, 6]
print('Vector 1:\n', V1)
print('\nVector 2:\n', V2)
print('\nVector 3:\n', V3)
import numpy as s1
V1=s1.array(V1)
V2=s1.array(V2)
V3=s1.array(V3)
z=[]
for n,m,i,j in zip(V1, V2, V3, V4):
z.append(n+m+i+j)
print('\n Result vector 1 + vector 2+ vector 3 + vector 4 :')
print(z)
# +
V1=[1, 2, 20, 71, 0, -7]
V2=[4, 5, 25, 51, 5, 33]
V3=[11, 25, 5, 1, 5, 23]
V4=[2.5, 5.6, 7, 8, 8.4, 6]
print('Vector 1:\n', V1)
print('\nVector 2:\n', V2)
print('\nVector 3:\n', V3)
import numpy as s1
V1=s1.array(V1)
V2=s1.array(V2)
V3=s1.array(V3)
z=[]
for n,m,i,j in zip(V1, V2, V3, V4):
z.append(n-m-i-j)
print('\n Result vector 1 - vector 2 - vector 3 - vector 4 :')
print(z)
# +
V1=[1, 2, 20, 71, 0, -7]
V2=[4, 5, 25, 51, 5, 33]
V3=[11, 25, 5, 1, 5, 23]
V4=[2.5, 5.6, 7, 8, 8.4, 6]
print('Vector 1:\n', V1)
print('\nVector 2:\n', V2)
print('\nVector 3:\n', V3)
import numpy as s1
V1=s1.array(V1)
V2=s1.array(V2)
V3=s1.array(V3)
z=[]
for n,m,i,j in zip(V1, V2, V3, V4):
z.append(n+m-i+j)
print('\n Result vector 1 + vector 2 - vector 3 + vector 4 :')
print(z)
| Python-Numpy-1DArrays2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Machine Learning 101++ in Python
#
# #### Created by:
#
# <NAME> ([@PieterButeneers](https://twitter.com/pieterbuteneers)), CTO at [chatlayer.ai](https://www.chatlayer.ai/)
#
# <NAME>, Senior Research Engineer at [Google DeepMind](https://deepmind.com/)
#
# ##### Updated and presented to you by:
#
# <NAME> ([@JeroenBoeye](https://twitter.com/JeroenBoeye)), Senior Machine Learning Engineer at [Faktion](https://www.faktion.com/)
#
# <NAME> ([@JorisBoeye](https://twitter.com/JorisBoeye)), Senior Data Scientist at [ZF Wind Power](https://www.zf.com/products/en/wind/home/wind.html)
#
#
#
# ## 1. Imports
#
# Let's first start with importing all the necessary packages. Some imports will be repeated in the exercises but if you want to skip some parts you can just execute the imports below and start with any exercise.
# +
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
plt.rcParams["figure.figsize"] = (13.0, 8.0)
# %matplotlib inline
import pickle
import sklearn
import sklearn.linear_model
import sklearn.preprocessing
import sklearn.gaussian_process
import sklearn.ensemble
# -
# ## 2. Linear Regression
#
# Linear Regression assumes a linear relationship between 2 variables.
#
# As an example we'll consider the historical page views of a web server and compare it to its CPU usage. We'll try to predict the CPU usage of the server based on the page views of the different pages.
#
# ### 2.1 Data import and inspection
#
# Let's import the data and take a look at it.
# +
import pickle
with open('data/cpu_page_views.pickle', 'rb') as file:
cpu_usage, page_views, page_names, total_page_views = pickle.load(file, encoding='latin1')
print('Array shapes:')
print('-'*25)
print(f'cpu_usage\t {cpu_usage.shape}')
print(f'page_views\t {page_views.shape}')
print(f'page_names\t {page_names.shape}')
print(f'total_page_views {total_page_views.shape}')
# -
plt.figure(figsize=(13, 6))
plt.plot(total_page_views, label = 'Total page views')
plt.plot(cpu_usage, label= 'CPU %')
plt.legend()
plt.show()
# The orange line on the plot above is the number of page views and the blue line is the CPU load that viewing this pages generates on the server.
#
# ### 2.2 Simple linear regression
#
# First, we're going to work with the total page views on the server, and compare it to the CPU usage. We can make use of a [PyPlot's scatter plot](http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.scatter) to understand the relation between the total page views and the CPU usage:
# + deletable=false nbgrader={"cell_type": "code", "checksum": "39d638331c51df5f39d212dfe8ba027d", "grade": false, "grade_id": "scatter", "locked": false, "schema_version": 3, "solution": true}
plt.figure(figsize=(13, 6))
plt.xlabel("Total page views")
plt.ylabel("CPU usage")
##### Implement this part of the code #####
# raise NotImplementedError("Code not implemented, follow the instructions.")
plt.scatter( total_page_views , cpu_usage )
plt.show()
# -
# There clearly is a strong correlation between the page views and the CPU usage. Because of this correlation we can build a model to predict the CPU usage from the total page views. If we use a linear model we get a formula like the following:
#
# $$ \text{cpu_usage} = c_0 + c_1 \text{total_page_views} $$
#
# Since we don't know the exact values for $c_0$ and $c_1$ we will have to compute them. For that we'll make use of the [scikit-learn](http://scikit-learn.org/stable/) machine learning library for Python and use [least-squares linear regression](http://scikit-learn.org/stable/modules/linear_model.html#ordinary-least-squares)
import sklearn.linear_model
simple_lin_model = sklearn.linear_model.LinearRegression()
# Now we need to feed the data to the model to fit it.
# ```
# X = [[x_11, x_12, x_13, ...], y = [y_1,
# [x_21, x_22, x_23, ...], y_2,
# [x_31, x_32, x_33, ...], y_3,
# ...] ...]
#
# ```
#
# In general, the [model.fit(X,y)](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html#sklearn.linear_model.LinearRegression.fit) method takes a matrix X and vector y as arguments and tries to find coefficients that allow to predict the `y_i`'s from the `x_ij`'s. In our case the matrix X will consist of only one column containing the total page views. Our `total_page_views` variable however, is still only a one-dimensional vector, so we need to [`np.reshape()`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.reshape.html) it into a two-dimensional array. Since there is only one feature the second dimension should be `1`. You can leave one dimension unspecified by passing -1, it will be determined from the size of the data.
#
# Then we fit our model using the the total page views and cpu. The coefficients found are automatically stored in the ```simple_lin_model``` object.
# + deletable=false nbgrader={"cell_type": "code", "checksum": "2c08a2f0ad70a94236a4ee1d99c162ff", "grade": false, "grade_id": "model_fit", "locked": false, "schema_version": 3, "solution": true}
##### Implement this part of the code #####
# raise NotImplementedError("Code not implemented, follow the instructions.")
X = np.reshape(total_page_views, [-1, 1])
y = cpu_usage
simple_lin_model.fit( X , y )
# -
# We can now inspect the coefficient $c_1$ and constant term (intercept) $c_0$ of the model:
for i in range(len(simple_lin_model.coef_)):
print(simple_lin_model.coef_[i])
print(f"Coefficient = {simple_lin_model.coef_[0]:.2f}\nConstant term = {simple_lin_model.intercept_:.2f}")
# So this means that each additional page view adds about 0.11% CPU load to the server and all the other processes running on the server consume on average 0.72% CPU.
#
# Once the model is trained we can use it to [```predict```](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html#sklearn.linear_model.LinearRegression.predict) the outcome for a given input (or array of inputs). Note that the predict function requires a 2-dimensional array similar to the ```fit``` function.
#
# What is the expected CPU usage when we have 880 page views per second?
# + deletable=false nbgrader={"cell_type": "code", "checksum": "92d234073f5e5f61c371dfbbfae30040", "grade": false, "grade_id": "predict_100", "locked": false, "schema_version": 3, "solution": true}
##### Implement this part of the code #####
# raise NotImplementedError("Code not implemented, follow the instructions.")
simple_lin_model.predict( [[ 880 ]] )
# -
# Now we plot the linear model together with our data to verify it captures the relationship correctly (the predict method can accept the entire ```total_page_views``` array at once).
# +
plt.figure(figsize=(13, 6))
plt.scatter(total_page_views, cpu_usage, color='black')
plt.plot(total_page_views, simple_lin_model.predict(total_page_views.reshape((-1, 1))), color='blue', linewidth=3)
plt.xlabel("Total page views")
plt.ylabel("CPU usage")
plt.show()
# -
# Our model can calculate the R2 [`score`](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html#sklearn.linear_model.LinearRegression.score) indicating how well the linear model captures the data. A score of 1 means there is perfect linear correlation and the model can fit the data perfectly, a score of 0 (or lower) means that there is no correlation at all (and it does not make sense to try to model it that way). The score method takes the same arguments as the fit method.
R2 = simple_lin_model.score(total_page_views.reshape((-1, 1)), cpu_usage)
print(f'R2 = {R2:.3f}')
# ### 2.3 Extrapolation
#
# Now let's repeat this experiment with similar but different data. We will try to predict what the CPU usage will be if there will be 8 page views (per second).
# +
with open('data/cpu_page_views_2.pickle', 'rb') as file:
cpu_usage, total_page_views = pickle.load(file, encoding='latin1')
print('Array shapes:')
print('-'*25)
print(f'cpu_usage\t {cpu_usage.shape}')
print(f'total_page_views {total_page_views.shape}')
# + deletable=false nbgrader={"cell_type": "code", "checksum": "42c17b94601aecd1aa9198080ae01a77", "grade": false, "grade_id": "qwerqwer", "locked": false, "schema_version": 3, "solution": true}
simple_lin_model = sklearn.linear_model.LinearRegression()
simple_lin_model.fit(total_page_views, cpu_usage)
##### Implement this part of the code #####
#raise NotImplementedError("Code not implemented, follow the instructions.")
prediction = simple_lin_model.predict([[8]])
print(f'The predicted value is: {prediction}')
assert prediction < 25
# -
# Now let's plot what you have done.
# +
all_page_views = np.concatenate((total_page_views, [[8]]))
plt.figure(figsize=(13, 6))
plt.scatter(total_page_views, cpu_usage, color='black')
plt.plot(all_page_views, simple_lin_model.predict(all_page_views), color='blue', linewidth=3)
plt.axvline(8, color='r')
plt.xlabel("Total page views")
plt.ylabel("CPU usage")
plt.show()
# -
# Is this what you would expect? Can you see what's wrong?
#
# Let's plot the time series again to get a different view at the data.
plt.figure(figsize=(16, 5))
plt.plot(total_page_views, label = 'Total page views')
plt.plot(cpu_usage, label= 'CPU %')
plt.legend()
plt.show()
# The spikes of CPU usage are actually backups that run at night and they can be ignored. So repeat the exercise again but ignore these data points.
#
# Hint: The selection variable should contain `True` where there is no backup going on and `False` when the backup occurs. This is an easy shortcut to do a selection of specific data points in numpy arrays.
# + deletable=false nbgrader={"cell_type": "code", "checksum": "7de0328f7131b3f8b628dd11d458c1e4", "grade": false, "grade_id": "qwerqwe", "locked": false, "schema_version": 3, "solution": true}
##### Implement this part of the code #####
# raise NotImplementedError("Code not implemented, follow the instructions.")
selection = cpu_usage < 25
assert selection.dtype == np.dtype('bool'), 'The selection variable should be an array of True/False values'
assert len(selection) == len(total_page_views)
simple_lin_model = sklearn.linear_model.LinearRegression()
simple_lin_model.fit(total_page_views[selection], cpu_usage[selection])
prediction = simple_lin_model.predict([[8]])
print(f'The predicted value is: {prediction}')
all_page_views = np.concatenate((total_page_views, [[8]]))
plt.figure(figsize=(13, 6))
plt.scatter(total_page_views, cpu_usage, c=selection, cmap='RdYlGn')
plt.plot(all_page_views, simple_lin_model.predict(all_page_views), color='blue', linewidth=3)
plt.axvline(8, color='r')
plt.xlabel("Total page views")
plt.ylabel("CPU usage")
plt.show()
assert prediction > 23
# -
# So what you should have learned from the previous exercise is that you should always look at your data and/or write scripts to inspect your data. Additionally extrapolation does not always work because there are no training examples in that area.
#
# ## 3. Multiple linear regression
#
# A server can host different pages and each of the page views will generate load on the CPU. This load will however not be the same for each page.
#
# Now let us consider the separate page views and build a linear model for that. The model we try to fit takes the form:
#
# $$\text{cpu_usage} = c_0 + c_1 \text{page_views}_1 + c_2 \text{page_views}_2 + \ldots + c_n \text{page_views}_n$$
#
# where the $\text{page_views}_i$'s correspond the our different pages:
#
# +
# load the data
with open('data/cpu_page_views.pickle', 'rb') as file:
cpu_usage, page_views, page_names, total_page_views = pickle.load(file, encoding='latin1')
print('Array shapes:')
print('-'*25)
print(f'cpu_usage\t {cpu_usage.shape}')
print(f'page_views\t {page_views.shape}')
print(f'page_names\t {page_names.shape}')
print(f'total_page_views {total_page_views.shape}\n')
print(page_names)
# -
# We start again by creating a [```LinearRegression```](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html#sklearn.linear_model.LinearRegression) model.
multi_lin_model = sklearn.linear_model.LinearRegression()
# Next we fit the model on the data, using `multi_lin_model.fit(X,y)`. In contrast to the case above our `page_views` variable already has the correct shape to pass as the X matrix: it has one column per page.
# + deletable=false nbgrader={"cell_type": "code", "checksum": "7e95bd2b512228ec440de2dbc8e7e3c8", "grade": false, "grade_id": "multi_lin_model_fit", "locked": false, "schema_version": 3, "solution": true}
##### Implement this part of the code #####
# raise NotImplementedError("Code not implemented, follow the instructions.")
X = page_views
y = cpu_usage
multi_lin_model.fit( X , y )
# -
# Now, given the coefficients calculated by the model, which capture the contribution of each page view to the total CPU usage, we can start to answer some interesting questions. For example,
# which page view causes most CPU usage, on a per visit basis?
#
# For this we can generate a table of page names with their coefficients in descending order:
# +
# Some quick and dirty code to print the most consuming pages first
print('Index\tCPU (%)\t Page')
print('-'*41)
indices = np.argsort(-multi_lin_model.coef_)
for i in indices:
print(f"{i}\t{ multi_lin_model.coef_[i]:4.2}\t {page_names[i]}")
# -
# From this table we see that 'resources/js/basket.js' consumes the most per CPU per view. It generates about 0.30% CPU load for each additional page view. 'products/science.html' on the other hand is much leaner and only consumes about 0.04% CPU per view.
#
# Now let us investigate the constant term again.
print(f'The other processes on the server consume {multi_lin_model.intercept_:.2f}%')
# As you can see this term is very similar to the result achieved in single linear regression, but it is not entirely the same. This means that these models are not perfect. However, they seem to be able to give a reliable estimate.
#
# ## 4. Non-linear Regression
#
# Sometimes linear relations don't cut it anymore, so you might want a more complex method. There are 2 approaches to this:
# * Use a non-linear method (such as Neural Networks, Support Vector Machines, Random Forests and Gaussian Processes)
# * Use non-linear features as pre-processing for a linear method
#
# Actually both methods are in essence identical and there is not always a clear distinction between the two. We will use the second approach in this section since it is easier to understand what is going on.
#
# Please note that it is very often not even necessary to use non-linear methods, since the linear methods can be extremely powerful on their own and they are quite often very stable and reliable (in contrast to non-linear methods).
#
# ### 4.1. Fitting a sine function with linear regression
#
# As an example task, we'll try to fit a sine function. We will use the [`np.sin()`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.sin.html) function to compute the sine of the elements in a numpy array.
# +
x = np.arange(0,6, 0.01).reshape((-1, 1))
plt.figure(figsize=(13, 6))
plt.plot(x, np.sin(x))
plt.show()
# -
# For our training set, we will calculate 10 _y_ values from evenly spaced _x_ values using this function.
# helper function to generate the data
def sine_train_data():
x_train = np.linspace(0, 6, 10).reshape((-1, 1))
y_train = np.sin(x_train)
return x_train, y_train
# +
x_train, y_train = sine_train_data()
plt.figure(figsize=(13, 6))
plt.scatter(x_train, y_train)
plt.show()
# -
# Now let's try to fit a model to this data with linear regression.
# + deletable=false nbgrader={"cell_type": "code", "checksum": "d0b990debec632693543df6593e36d85", "grade": false, "grade_id": "qwerq2", "locked": false, "schema_version": 3, "solution": true}
x_train, y_train = sine_train_data()
##### Implement this part of the code #####
# raise NotImplementedError("Code not implemented, follow the instructions.")
X = x_train
y = y_train
model = sklearn.linear_model.LinearRegression()
model.fit( x_train, y_train )
print(f'The R2 score of this model is: {model.score(x_train, y_train):.3}')
plt.figure(figsize=(13, 6))
plt.scatter(x_train, y_train)
plt.plot(x, model.predict(x))
plt.show()
# -
# As you can see this fit is not optimal.
#
# ### 4.2. Fitting a sine function using polynomial expansion
#
# One of the easiest ways to make your machine learning technique more *intelligent* is to extract relevant features from the data. These features can be anything that you can find that will make it easier for the method to be able to fit the data. This means that as a machine learning engineer it is best to know and understand your data.
#
# As some of you might remember from math class, you can create an approximation of any function (including a sine function) using a polynomial function with the [Taylor expansion](https://en.wikipedia.org/wiki/Taylor_series). So we will use that approach to learn a better fit.
#
# In this case we will create what we call features using a [polynomial expansion](http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.PolynomialFeatures.html). If you set the degree to 3 it will generate data of the 0d, 1st, 2nd and 3rd order (including cross products) as shown in the example below (change `x` and `degree` to see the different expansions of `x` to a certain `degree`).
# +
import sklearn.preprocessing
x = [[2]]
pol_exp = sklearn.preprocessing.PolynomialFeatures(degree=3)
pol_exp.fit_transform(x)
# -
# As you can see above this function transforms $x$ into [$x^0$, $x^1$, $x^2$, $x^3$] with $x^0=1$ and $x^1 = x$. If you have 2 inputs it will also take the cross products so that [$x_1$, $x_2$] is transformed into: [1, $x_1$, $x_2$, $x_1^2$, $x_1x_2$, $x_2^2$, $x_1^3$, $x_1^2x_2$, $x_1x_2^2$, $x_2^3$] as shown below.
x = [[2, 3]]
pol_exp = sklearn.preprocessing.PolynomialFeatures(degree=3)
pol_exp.fit_transform(x)
# In this example we only have 1 input so the number of features is always the `degree + 1`.
#
# Because of this polynomial features extraction finding of the coefficients of the polynomial becomes a linear problem, so similar to the previous exercise on multiple linear regression you can find the optimal weights as follows:
#
# $$y = c_0 + c_1 x + c_2 x^2 + c_3 x^3 + \cdots + c_n x^n$$
#
# So for multiple values of $x$ and $y$ you can minimize the error of this equation using linear regression. How this is done in practice is shown below.
# +
x_train, y_train = sine_train_data()
pol_exp = sklearn.preprocessing.PolynomialFeatures(degree=3)
model = sklearn.linear_model.LinearRegression()
model.fit(pol_exp.fit_transform(x_train), y_train)
print(f'The R2 score of this model is: {model.score(pol_exp.fit_transform(x_train), y_train):.3f}')
plt.figure(figsize=(13, 6))
plt.scatter(x_train, y_train)
x = np.arange(0,6, 0.01).reshape((-1, 1))
plt.plot(x, model.predict(pol_exp.fit_transform(x)))
plt.show()
# -
# The more relevant these features are the better your model can fit the data.
#
# Now play with the degree of the polynomial expansion function below to create better features. Search for the optimal degree.
# + deletable=false nbgrader={"cell_type": "code", "checksum": "3752f06005eae427e3ea49f57a6aa5d9", "grade": false, "grade_id": "qwerq", "locked": false, "schema_version": 3, "solution": true}
x_train, y_train = sine_train_data()
##### Implement this part of the code #####
# raise NotImplementedError("Code not implemented, follow the instructions.")
pol_exp = sklearn.preprocessing.PolynomialFeatures(degree= 7 )
model = sklearn.linear_model.LinearRegression()
model.fit(pol_exp.fit_transform(x_train), y_train)
train_score = model.score(pol_exp.fit_transform(x_train), y_train)
print(f'The R2 score of this model is: {train_score:.6f}')
plt.figure(figsize=(13, 6))
plt.scatter(x_train, y_train)
x = np.arange(0,6, 0.01).reshape((-1, 1))
plt.plot(x, model.predict(pol_exp.fit_transform(x)))
plt.show()
# -
# Now let's test this on new and unseen data.
def sine_test_data():
x_test = 0.5 + np.arange(6).reshape((-1, 1))
y_test = np.sin(x_test)
return x_test, y_test
# + deletable=false nbgrader={"cell_type": "code", "checksum": "147ec8d6cfdba0d03780cc03f753232b", "grade": false, "grade_id": "qwer", "locked": false, "schema_version": 3, "solution": true}
assert train_score > .99999, 'Adjust the degree parameter 2 cells above until the train_score > .99999'
x_test, y_test = sine_test_data()
plt.figure(figsize=(13, 6))
plt.scatter(x_train, y_train, label='train')
plt.scatter(x_test, y_test, color='r', label='test')
plt.legend()
x = np.arange(0, 6, 0.01).reshape((-1, 1))
plt.plot(x, model.predict(pol_exp.fit_transform(x)))
plt.show()
##### Implement this part of the code #####
# raise NotImplementedError("Code not implemented, follow the instructions.")
test_score = model.score( pol_exp.fit_transform(x_train), y_train )
print(f'The R2 score of the model on the test set is: {test_score:.3f}')
assert test_score > 0.99
# -
# If everything is correct your score is very close to 1. Which means that we have built a model that can fit this data (almost) perfectly.
#
# ### 4.3. Add noise to the equation
#
# Sadly all the data that we measure or gather doesn't have the mathematical precision of the data we used here. Quite often our measurements contain noise.
#
# So let us repeat this process for data with more noise. Similarly as above, you have to choose the optimal degree of the polynomials.
# a helper function to create the sine train set that can also add noise to the data
def noisy_sine_train_data(noise=None):
x_train = np.linspace(0, 6, 10).reshape((-1, 1))
y_train = np.sin(x_train)
# If fixed, set the random seed so that the next call of the
# random function always returns the same result
if noise == 'fixed':
np.random.seed(1)
x_train += np.random.randn(len(x_train)).reshape((-1, 1)) / 5
return x_train, y_train
# +
x_test, y_test = noisy_sine_train_data()
plt.figure(figsize=(13, 6))
plt.scatter(x_train, y_train, label='train')
plt.scatter(x_test, y_test, color='r', label='test')
plt.legend()
x = np.arange(0, 6, 0.01).reshape((-1, 1))
plt.plot(x, model.predict(pol_exp.fit_transform(x)))
plt.show()
# + deletable=false nbgrader={"cell_type": "code", "checksum": "108fff88dfd8a0836eef11ae471ef905", "grade": false, "grade_id": "asdqet", "locked": false, "schema_version": 3, "solution": true}
x_train, y_train = noisy_sine_train_data(noise='fixed')
##### Implement this part of the code #####
#raise NotImplementedError("Code not implemented, follow the instructions.")
pol_exp = sklearn.preprocessing.PolynomialFeatures(degree= 9 )
model = sklearn.linear_model.LinearRegression()
model.fit(pol_exp.fit_transform(x_train), y_train)
train_score = model.score(pol_exp.fit_transform(x_train), y_train)
print(f'The R2 score of this method on the train set is {train_score:.3f}')
assert train_score > 0.99
# -
# Now let's see what this results to in the test set.
x_test, y_test = sine_test_data()
print(f'The R2 score of the model on the test set is: {model.score(pol_exp.fit_transform(x_test), y_test):.3f}')
# As you can clearly see, this result is not that good. Why do you think this is?
#
# Now plot the result to see the function you created.
plt.figure(figsize=(13, 6))
plt.scatter(x_train, y_train)
x = np.arange(0,6, 0.01).reshape((-1, 1))
plt.plot(x, model.predict(pol_exp.fit_transform(x)))
plt.show()
# Is this what you expect?
#
# Now repeat the process below a couple of times for random noise.
# +
x_train, y_train = noisy_sine_train_data()
pol_exp = sklearn.preprocessing.PolynomialFeatures(degree=9)
model = sklearn.linear_model.LinearRegression()
model.fit(pol_exp.fit_transform(x_train), y_train)
print(f'The R2 score of this method on the train set is {model.score(pol_exp.fit_transform(x_train), y_train):.3f}')
plt.figure(figsize=(13, 6))
plt.scatter(x_train, y_train)
x = np.arange(x_train[0], x_train[-1], 0.01).reshape((-1, 1))
plt.plot(x, model.predict(pol_exp.fit_transform(x)))
plt.show()
# -
# What did you observe? And what is the method learning? And how can you avoid this?
#
# Try to figure out a solution for this problem without changing the noise level.
# + deletable=false nbgrader={"cell_type": "code", "checksum": "4ed652490853774f912c9893671ea87a", "grade": false, "grade_id": "qwe", "locked": false, "schema_version": 3, "solution": true}
x_train, y_train = noisy_sine_train_data(noise='fixed')
x_test, y_test = sine_test_data()
##### Implement this part of the code #####
# raise NotImplementedError("Code not implemented, follow the instructions.")
pol_exp = sklearn.preprocessing.PolynomialFeatures(degree=3)
model = sklearn.linear_model.LinearRegression()
model.fit( pol_exp.fit_transform(x_train), y_train )
print(f'The score of this method on the train set is: {model.score(pol_exp.fit_transform(x_train), y_train):.3f}')
plt.figure(figsize=(13, 6))
plt.scatter(x_train, y_train)
x = np.arange(0,6, 0.01).reshape((-1, 1))
plt.plot(x, model.predict(pol_exp.fit_transform(x)))
plt.show()
test_score = model.score(pol_exp.fit_transform(x_test), y_test)
print(f'The score of the model on the test set is: {test_score:.3f}')
assert test_score > 0.99, 'Adjust the degree parameter until test_score > 0.99'
# -
# ## 5. Over-fitting and Cross-Validation
#
# What you have experienced above is called over-fitting and happens when your model learns the noise that is inherent in the data.
#
# This problem was caused because there were to many parameters in the model. So the model was too advanced so that it became capable of learning the noise in the data by heart. Reducing the number of parameters solves this problem. But how do you know how many parameters is optimal?
#
# (Another way to solve this problem is to use more data. Because if there are more data points in the data and if there is more noise, your model isn't able to learn all that noise anymore and you get a better result. Since it's often not possible to gather more data we will not take this approach.)
#
# In the exercise above you had to set the number of polynomial functions to get a better result, but how can you estimate this in a reliable way without manually selection the optimal parameters?
#
# ### 5.1. Validation set
#
# A common way to solve this problem is through the use of a validation set. This means that you use a subset of the training data to train your model on, and another subset of the training data to validate your parameters. Based on the score of your model on this validation set you can select the optimal parameter.
#
# So use this approach to select the best number of polynomials for the noisy sine function.
# +
# create the data in case you skipped the previous exercise
# a helper function to create the sine train set that can also add noise to the data
def noisy_sine_train_data(noise=None):
x_train = np.linspace(0, 6, 10).reshape((-1, 1))
y_train = np.sin(x_train)
# If fixed, set the random seed so that the next call of the
# random function always returns the same result
if noise == 'fixed':
np.random.seed(1)
x_train += np.random.randn(len(x_train)).reshape((-1, 1)) / 5
return x_train, y_train
def sine_test_data():
x_test = 0.5 + np.arange(6).reshape((-1, 1))
y_test = np.sin(x_test)
return x_test, y_test
# + deletable=false nbgrader={"cell_type": "code", "checksum": "a0dc19a1878966ec9d2e18b3a6ddb6ff", "grade": false, "grade_id": "qw", "locked": false, "schema_version": 3, "solution": true}
x_train, y_train = noisy_sine_train_data(noise='fixed')
# we randomly pick 3 data points to get a nice validation set
train_i = [0, 1, 3, 4, 6, 7, 9]
val_i = [2, 5, 8]
# create the train and validation sets
x_train_i = x_train[train_i, :]
y_train_i = y_train[train_i]
x_val_i = x_train[val_i, :]
y_val_i = y_train[val_i]
##### Implement this part of the code #####
# raise NotImplementedError("Code not implemented, follow the instructions.")
pol_exp = sklearn.preprocessing.PolynomialFeatures(degree= 3 )
model = sklearn.linear_model.LinearRegression()
model.fit(pol_exp.fit_transform(x_train_i), y_train_i)
##### Implement this part of the code #####
# raise NotImplementedError("Code not implemented, follow the instructions.")
train_score = model.score( pol_exp.fit_transform(x_train_i), y_train_i )
validation_score = model.score( pol_exp.fit_transform(x_val_i), y_val_i )
print(f'The R2 score of this model on the train set is: {train_score:.3f}')
print(f'The R2 score of this model on the validation set is: {validation_score:.3f}')
# -
# Now test this result on the test set with the following code.
# +
assert pol_exp.degree < 5, 'Select a polynomial degree < 5'
x_test, y_test = sine_test_data()
plt.figure(figsize=(13, 6))
plt.scatter(x_train, y_train, label='train')
plt.scatter(x_test, y_test, color='r', label='test')
plt.legend()
x = np.arange(0,6, 0.01).reshape((-1, 1))
plt.plot(x, model.predict(pol_exp.fit_transform(x)))
plt.show()
print(f'The score of the model on the test set is: {model.score(pol_exp.fit_transform(x_test), y_test):.3f}')
# -
# As you can see this approach works to select the optimal degree. Usually the test score is lower than the validation score, but in this case it is not because the test data doesn't contain noise.
#
# ### 5.2. Cross-Validation
#
# To improve this procedure you can repeat the process above for different train and validation sets so that the optimal parameter is less dependent on the way the data was selected.
#
# One basic strategy for this is **leave-one-out** cross validation, where each data point is left out of the train set once, and the model is then validated on this point. Now let's implement this. First make a 2-dimensional array `results` to store all your results using the [`np.ones()`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.ones.html) function: 1 dimension (row) for each validation set and 1 dimension (column) for each degree of the `PolynomialFeatures()` function. Then you loop over all the validation sets followed by a loop over all the degrees of the `PolynomialFeatures()` function you want to try out. Then set the result for that experiment in the right element of the `results` array.
#
# We will use the [mean squared error (MSE)](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html) instead of R2 because that is more stable. Since the MSE measures the error, smaller values are better.
#
# Once you have your results, average them over all validation sets (using the [`np.mean()`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.mean.html) function over the correct axis) so that you know the average error for each degree over all validation sets. Now find the degree with the smallest error using the [`np.argmin()`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.argmin.html) function.
# + deletable=false nbgrader={"cell_type": "code", "checksum": "f79d24faa8f07c5b92a98620474818b8", "grade": false, "grade_id": "q", "locked": false, "schema_version": 3, "solution": true}
x_train, y_train = noisy_sine_train_data(noise='fixed')
##### Implement this part of the code #####
raise NotImplementedError("Code not implemented, follow the instructions.")
# results = np.inf * np.ones( ? )
# multiplied with a very large number, np.inf, since we are looking for the smallest error
# for i in range( ? ):
train_i = np.where(np.arange(10) != i)[0]
x_train_i = x_train[train_i, :]
y_train_i = y_train[train_i]
x_val_i = x_train[i:i+1, :]
y_val_i = y_train[i:i+1]
##### Implement this part of the code #####
raise NotImplementedError("Code not implemented, follow the instructions.")
# for degree in range(?):
# pol_exp = sklearn.preprocessing.PolynomialFeatures(degree= ? )
model = sklearn.linear_model.LinearRegression()
model.fit(pol_exp.fit_transform(x_train_i), y_train_i)
##### Implement this part of the code #####
raise NotImplementedError("Code not implemented, follow the instructions.")
# results[ ? ] = sklearn.metrics.mean_squared_error(model.predict(pol_exp.fit_transform(x_val_i)), y_val_i)
##### Implement this part of the code #####
raise NotImplementedError("Code not implemented, follow the instructions.")
# average the results over all validation sets
# average_results = np.mean(results, axis= ? )
# find the optimal degree
# degree = np.argmin( ? )
print(f'The optimal degree for the polynomials is: {degree}')
# -
# Now let's have a look at the result.
# +
assert degree == 3
pol_exp = sklearn.preprocessing.PolynomialFeatures(degree=degree)
model = sklearn.linear_model.LinearRegression()
model.fit(pol_exp.fit_transform(x_train), y_train)
print(f'The score of this method on the train set is: {model.score(pol_exp.fit_transform(x_train), y_train):.3f}')
plt.figure(figsize=(13, 6))
plt.scatter(x_train, y_train, label='train')
plt.scatter(x_test, y_test, color='r', label='test')
plt.legend()
x = np.arange(0,6, 0.01).reshape((-1, 1))
plt.plot(x, model.predict(pol_exp.fit_transform(x)))
plt.show()
print(f'The score of the model on the test set is: {model.score(pol_exp.fit_transform(x_test), y_test):.3f}')
# -
# As you can see this automatic way of selecting the optimal degree has resulted in a good fit for the sine function.
#
# ### 5.3 Regularisation
#
# When you have too many parameters in your model, there is a risk of over-fitting, i.e. your model learns the noise. To avoid this, techniques have been developed to make an estimation of this noise.
#
# One of these techniques is Ridge Regression. This linear regression technique has an additional parameter called the regularisation parameter. This parameter basically sets the standard deviation of the noise you want to remove. The effect in practice is that it makes sure the weights of linear regression remain small and thus less over-fitting.
#
# Since this is an additional parameter that needs to be set, it needs to be set using cross-validation as well. Luckily sklearn developed a method that does this for us in a computational efficient way called [`sklearn.linear_model.RidgeCV()`](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.RidgeCV.html)
# + deletable=false nbgrader={"cell_type": "code", "checksum": "9a7da74fa39fee9d63a93da0fe6b584b", "grade": false, "grade_id": "asdfasdf", "locked": false, "schema_version": 3, "solution": true}
x_train, y_train = noisy_sine_train_data(noise='fixed')
pol_exp = sklearn.preprocessing.PolynomialFeatures(degree=9)
##### Implement this part of the code #####
raise NotImplementedError("Code not implemented, follow the instructions.")
# model = sklearn.linear_model. ?
model.fit(pol_exp.fit_transform(x_train), y_train)
print(f'The R2 score of this method on the train set is: {model.score(pol_exp.fit_transform(x_train), y_train):.3f}')
plt.figure(figsize=(13,8))
plt.scatter(x_train, y_train, label='train')
plt.scatter(x_test, y_test, color='r', label='test')
plt.legend()
x = np.arange(0,6, 0.01).reshape((-1, 1))
plt.plot(x, model.predict(pol_exp.fit_transform(x)))
plt.show()
print(f'The R2 score of the model on the test set is: {model.score(pol_exp.fit_transform(x_test), y_test):.3f}')
# -
# As you can see above, the result of Ridge Regression is not as good as reducing the number of features in this example. However it works a lot better than without regularisation (try that). In the example above you will notice that it makes the result a lot smoother and removes the unwanted spikes. It will actually make sure that if you have too many features you still get a reasonable result. So this means that it should be in your standard toolkit.
#
# The removal of the extra features can be automated using feature selection. A very short introduction to sklearn on the topic can be found [here](http://scikit-learn.org/stable/modules/feature_selection.html).
#
# Another method that is often used is [`sklearn.linear_model.LassoCV()`](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LassoCV.html#sklearn.linear_model.LassoCV) which actually combines removal of features and estimation of the noise. It is however very dependent on the dataset which of the two methods performs best.
#
# Cross-validation should be applied to any parameter you set in your function and that without looking at the test set.
#
# Over-fitting is one of the biggest issues in machine learning and a lot of the research that is currently being done in machine learning is a search for techniques to avoid over-fitting. As a starting point we list a few of the techniques that you can use to avoid over-fitting:
# * Use more data
# * Artificially generate more data based on the original data
# * Use a smaller model (with less parameters)
# * Use less features (and thus less parameters)
# * Use a regularisation parameter
# * Artificially add noise to your model
# * Only use linear models or make sure that the non-linearity in your model is closer to a linear function
# * Combine multiple models that each over-fit in their own way into what is called an ensemble
#
# ### 5.4 Extrapolation
#
# Now let's extend the range of the optimal plot you achieved from -4 to 10. What do you see? Does it look like a sine function?
# +
x_train, y_train = noisy_sine_train_data(noise='fixed')
pol_exp = sklearn.preprocessing.PolynomialFeatures(degree=3)
model = sklearn.linear_model.RidgeCV()
model.fit(pol_exp.fit_transform(x_train), y_train)
print('The R2 score of this method on the train set is:',
f'{model.score(pol_exp.fit_transform(x_train), y_train):.3f}')
# Now test outside the area of the training
x_test_extended = np.array([-3,-2,-1,7,8,9]).reshape((-1, 1))
y_test_extended = np.sin(x_test_extended)
plt.figure(figsize=(13, 8))
plt.scatter(x_train, y_train, label='train')
plt.scatter(x_test_extended, y_test_extended, color='r', label='test')
plt.legend()
x = np.arange(-4,10, 0.01).reshape((-1, 1))
plt.plot(x, model.predict(pol_exp.fit_transform(x)))
plt.show()
print('The R2 score of the model on the test set outside the area used for training is:',
f'{model.score(pol_exp.fit_transform(x_test_extended), y_test_extended):.3f}')
# -
# As you can see, the extrapolation results for non-linear regression are even worse than for those of linear regression. This is because models only work well in the input space they have been trained in.
#
# A possible way to be able to extrapolate and to use a non-linear method is to use forecasting techniques. This is handled in part 7, an optional part for those interested and going through the tutorial quite fast. Otherwise continue to the section on classification in exercise 6.
# ## 6. Classification
#
# In classification the purpose is to separate 2 classes. As an example we will use the double spiral. It is a very common toy example in machine learning and allows you to visually show what is going on.
#
# As shown in the graph below the purpose is to separate the blue from the red dots.
# +
# Some code to generate spirals. You can ignore this for now.
# To comply with standards in machine learning we use x1 and x2 as opposed to x and y for this graph
# because y is reserved for the output in Machine Learning (= 0 or 1 in this case)
r = np.arange(0.1, 1.5, 0.0001)
theta = 2 * np.pi * r
x1_0 = r * np.cos(theta)
x2_0 = r * np.sin(theta)
x1_1 = - r * np.cos(theta)
x2_1 = - r * np.sin(theta)
perm_indices = np.random.permutation(range(len(x1_0)))
x1_0_rand = x1_0[perm_indices[ : 1000]] + np.random.randn(1000) / 5
x2_0_rand = x2_0[perm_indices[ : 1000]] + np.random.randn(1000) / 5
x1_1_rand = x1_1[perm_indices[1000 : 2000]] + np.random.randn(1000) / 5
x2_1_rand = x2_1[perm_indices[1000 : 2000]] + np.random.randn(1000) / 5
plt.figure(figsize=(8, 8))
plt.scatter(x1_0_rand, x2_0_rand, color = 'b', alpha=0.6, linewidth=0)
plt.scatter(x1_1_rand, x2_1_rand, color = 'r', alpha=0.6, linewidth=0)
plt.plot(x1_0, x2_0, color = 'b', lw=3)
plt.plot(x1_1, x2_1, color='r', lw=3)
plt.xlim(-2, 2)
plt.ylim(-2, 2)
plt.xlabel('X1')
plt.ylabel('X2')
plt.show()
# -
# In a colored image this is easy to do, but when you remove the color it becomes much harder. Can you do the classification in the image below?
#
# In black the samples from the train set are shown and in yellow the samples from the validation set.
# +
# Create a train and validation set
x_train_0 = np.concatenate((x1_0_rand[ : 800].reshape((-1,1)), x2_0_rand[ : 800].reshape((-1,1))), axis=1)
y_train_0 = np.zeros((len(x_train_0),))
x_train_1 = np.concatenate((x1_1_rand[ : 800].reshape((-1,1)), x2_1_rand[ : 800].reshape((-1,1))), axis=1)
y_train_1 = np.ones((len(x_train_1),))
x_val_0 = np.concatenate((x1_0_rand[800 : ].reshape((-1,1)), x2_0_rand[800 : ].reshape((-1,1))), axis=1)
y_val_0 = np.zeros((len(x_val_0),))
x_val_1 = np.concatenate((x1_1_rand[800 : ].reshape((-1,1)), x2_1_rand[800 : ].reshape((-1,1))), axis=1)
y_val_1 = np.ones((len(x_val_1),))
x_train = np.concatenate((x_train_0, x_train_1), axis=0)
y_train = np.concatenate((y_train_0, y_train_1), axis=0)
x_val = np.concatenate((x_val_0, x_val_1), axis=0)
y_val = np.concatenate((y_val_0, y_val_1), axis=0)
# Plot the train and test data
plt.figure(figsize=(8, 8))
plt.scatter(x_train[:, 0], x_train[:, 1], color='k', alpha=0.6, linewidth=0)
plt.scatter(x_val[:, 0], x_val[:, 1], color='y', alpha=0.6, linewidth=0)
plt.xlim(-2, 2)
plt.ylim(-2, 2)
plt.show()
# -
# As you can see classifying is very hard to do when you don't get the answer even if you saw the solution earlier. But you will see that machine learning algorithms can solve this quite well if they can learn from examples.
#
# ### 6.1 Linear classifier
#
# Let's try to do this with a linear classifier.
#
# Logistic regression, despite its name, is a linear model for classification rather than regression. Its sklearn implementation is [`sklearn.linear_model.LogisticRegression()`](https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression).
# + deletable=false nbgrader={"cell_type": "code", "checksum": "719de52ed0ea836a7a2eb11fd27393a0", "grade": false, "grade_id": "asdfasd", "locked": false, "schema_version": 3, "solution": true}
##### Implement this part of the code #####
raise NotImplementedError("Code not implemented, follow the instructions.")
# model = sklearn.linear_model. ?
# model.fit( ? )
train_score = sklearn.metrics.accuracy_score(model.predict(x_train), y_train)
print(f'The train accuracy is: {train_score:.3f}')
val_score = sklearn.metrics.accuracy_score(model.predict(x_val), y_val)
print(f'The validation accuracy is: {val_score:.3f}')
assert val_score > 0.5
# -
# Now let's plot the result.
# +
# A quick and dirty helper function to plot the decision boundaries
def plot_decision_boundary(model, pol_exp=None):
n=250
lin_space = np.linspace(-2, 2, num=n).reshape((-1, 1))
x1 = np.dot(lin_space, np.ones((1, n))).reshape((-1, 1))
x2 = np.dot(np.ones((n, 1)), lin_space.T).reshape((-1, 1))
x = np.concatenate((x1, x2), axis=1)
if pol_exp is None:
y = model.predict(x)
else:
y = model.predict(pol_exp.fit_transform(x))
i_0 = np.where(y < 0.5)
i_1 = np.where(y > 0.5)
plt.figure(figsize=(8,8))
plt.scatter(x[i_0, 0], x[i_0, 1], color='b', s=2, alpha=0.5, linewidth=0, marker='s')
plt.scatter(x[i_1, 0], x[i_1, 1], color='r',s=2, alpha=0.5, linewidth=0, marker='s')
plt.plot(x1_0, x2_0, color = 'b', lw=3)
plt.plot(x1_1, x2_1, color='r', lw=3)
plt.xlim(-2, 2)
plt.ylim(-2, 2)
# Call the function
plot_decision_boundary(model)
# -
# As you can see a linear classifier returns a linear decision boundary.
#
# ### 6.2 Non-linear classification
#
# Now let's do this better with a non-linear classifier using polynomials. Play with the degree of the polynomial expansion and look for the effect on the validation set accuracy of the [`LogisticRegressionCV()`](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegressionCV.html) model. This is a more advanced version of the default [`LogisticRegression()`](https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression) that uses cross validation to tune its hyper-parameters. What gives you the best results?
#
# _If you get a lot of "failed to converge" warnings consider increasing the `max_iter` parameter to 1000 or so. Getting some warnings is normal._
# + deletable=false nbgrader={"cell_type": "code", "checksum": "337a33597f779486845ffe7de85186bc", "grade": false, "grade_id": "asdfas", "locked": false, "schema_version": 3, "solution": true}
##### Implement this part of the code #####
raise NotImplementedError("Code not implemented, follow the instructions.")
# model = sklearn.linear_model. ?
# pol_exp = sklearn.preprocessing.PolynomialFeatures(degree= ? )
# model.fit( ? )
train_score = sklearn.metrics.accuracy_score(model.predict(pol_exp.fit_transform(x_train)), y_train)
print(f'The train accuracy is: {train_score:.3f}')
val_score = sklearn.metrics.accuracy_score(model.predict(pol_exp.fit_transform(x_val)), y_val)
print(f'The validation accuracy is: {val_score:.3f}')
plot_decision_boundary(model, pol_exp=pol_exp)
assert val_score > 0.8
# -
# If everything went well you should get a validation/test accuracy very close to 0.8.
#
# ### 6.3 Random Forests
#
# An often used technique in machine learning are random forests. Basically they are [decision trees](https://en.wikipedia.org/wiki/Decision_tree_learning), or in programmers terms, if-then-else structures, like the one shown below.
#
# <img src="Images/tree.png" width=70%>
#
# Decision trees are know to over-fit a lot because they just learn the train set by heart and store it. Random forests on the other hand combine multiple different (randomly initialized) decision trees that all over-fit in their own way. But by combining their output using a voting mechanism, they tend to cancel out each other's mistakes. This approach is called an [ensemble](https://en.wikipedia.org/wiki/Ensemble_learning) and can be used for any combination of machine learning techniques. A schema representation of how such a random forest works is shown below.
#
# <img src="Images/random_forest.jpg">
#
# Now let's try to use a random forest to solve the double spiral problem. (see [`sklearn.ensemble.RandomForestClassifier()`](http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html))
# + deletable=false nbgrader={"cell_type": "code", "checksum": "816d1a617a4380e8a88cfd3c9d853294", "grade": false, "grade_id": "asdfa", "locked": false, "schema_version": 3, "solution": true}
import sklearn.ensemble
##### Implement this part of the code #####
raise NotImplementedError("Code not implemented, follow the instructions.")
# model = ?
# model.fit( ? )
train_score = sklearn.metrics.accuracy_score(model.predict(x_train), y_train)
print(f'The train accuracy is: {train_score:.3f}')
val_score = sklearn.metrics.accuracy_score(model.predict(x_val), y_val)
print(f'The validation accuracy is: {val_score:.3f}')
plot_decision_boundary(model)
assert val_score > 0.7
# -
# As you can see they are quite powerful right out of the box without any parameter tuning. But we can get the results even better with some fine tuning.
#
# Try changing the `min_samples_leaf` parameter for values between 0 and 0.5.
# + deletable=false nbgrader={"cell_type": "code", "checksum": "c8b29b6867ff2d516dd718c356a58fda", "grade": false, "grade_id": "asdf", "locked": false, "schema_version": 3, "solution": true}
##### Implement this part of the code #####
raise NotImplementedError("Code not implemented, follow the instructions.")
# model = sklearn.ensemble.RandomForestClassifier(min_samples_leaf= ? )
model.fit(x_train, y_train)
train_score = sklearn.metrics.accuracy_score(model.predict(x_train), y_train)
print(f'The train accuracy is: {train_score:.3f}')
val_score = sklearn.metrics.accuracy_score(model.predict(x_val), y_val)
print(f'The validation accuracy is: {val_score:.3f}')
plot_decision_boundary(model)
assert val_score > 0.5
# -
# The `min_samples_leaf` parameter sets the number of data points that can create a new branch/leaf in the tree. So in practice it limits the depth of the decision tree. The bigger this parameter is, the less deep the tree will be and less likely each tree will over-fit.
#
# For this parameter you can set integer numbers to set the specific number of samples, or you can use values between 0 and 0.5 to express a percentage of the size of the dataset. Since you might experiment with a smaller dataset to roughly tune your parameters, it is best to use values between 0 and 0.5 so that the value you chose is not as dependant on the size of the dataset you are working with.
#
# Now that you have found the optimal `min_samples_leaf` run the code again with the same parameter. Do you get the same result? Why not?
#
# Another parameter to play with is the `n_estimators` parameter. Play with only this parameter to see what happens.
# + deletable=false nbgrader={"cell_type": "code", "checksum": "9f58861bad508fc36f8921ad3876f478", "grade": false, "grade_id": "asd", "locked": false, "schema_version": 3, "solution": true}
##### Implement this part of the code #####
raise NotImplementedError("Code not implemented, follow the instructions.")
# model = sklearn.ensemble.RandomForestClassifier(n_estimators= ? )
model.fit(x_train, y_train)
train_score = sklearn.metrics.accuracy_score(model.predict(x_train), y_train)
print(f'The train accuracy is: {train_score:.3f}')
val_score = sklearn.metrics.accuracy_score(model.predict(x_val), y_val)
print(f'The validation accuracy is: {val_score:.3f}')
plot_decision_boundary(model)
assert val_score > 0.7
# -
# As you can see increasing the number of estimators improves the model and reduces over-fitting. This parameter actually sets the number of trees in the random forest. The more trees there are in the forest the better the result is. But obviously it requires more computing power so that is the limiting factor here.
#
# This is the basic idea behind ensembles: if you combine more tools you get a good result on average.
#
# Now try combining the `n_estimators` and `min_samples_leaf` parameter below.
# + deletable=false nbgrader={"cell_type": "code", "checksum": "fdab44f96434d8e74c1e9adda21c8382", "grade": false, "grade_id": "as", "locked": false, "schema_version": 3, "solution": true}
##### Implement this part of the code #####
raise NotImplementedError("Code not implemented, follow the instructions.")
# model = sklearn.ensemble.RandomForestClassifier(n_estimators= ? , min_samples_leaf= ? )
model.fit(x_train, y_train)
train_score = sklearn.metrics.accuracy_score(model.predict(x_train), y_train)
print(f'The train accuracy is: {train_score:.3f}')
val_score = sklearn.metrics.accuracy_score(model.predict(x_val), y_val)
print(f'The validation accuracy is: {val_score:.3f}')
plot_decision_boundary(model)
assert val_score > 0.7
# -
# As you have noticed by now it seems that random forests are less powerful than linear regression with polynomial feature extraction. This is because these polynomials are ideally suited for this task. This also means that you could get a better result if you would also apply polynomial expansion for random forests. Try that below.
# + deletable=false nbgrader={"cell_type": "code", "checksum": "dabda2076fed6efec7df60956e69c10b", "grade": false, "grade_id": "a", "locked": false, "schema_version": 3, "solution": true}
##### Implement this part of the code #####
raise NotImplementedError("Code not implemented, follow the instructions.")
# model = sklearn.ensemble.RandomForestClassifier(n_estimators= ? , min_samples_leaf= ? )
# pol_exp = sklearn.preprocessing.PolynomialFeatures(degree= ?)
# model.fit( ? )
train_score = sklearn.metrics.accuracy_score(model.predict(pol_exp.fit_transform(x_train)), y_train)
print(f'The train accuracy is: {train_score:.3f}')
val_score = sklearn.metrics.accuracy_score(model.predict(pol_exp.fit_transform(x_val)), y_val)
print(f'The validation accuracy is: {val_score:.3f}')
plot_decision_boundary(model, pol_exp=pol_exp)
assert val_score > 0.7
# -
# As you have may have noticed, it is hard to get results that are better than the ones obtained using logistic regression. This illustrates that linear techniques are very powerful and often underrated. But in some situations they are not powerful enough and you need something stronger like a random forest or even neural networks (check [this](https://playground.tensorflow.org/#dataset=spiral&noise=45) simulator if you want to play with the latter).
#
# There is one neat trick that can be used for random forests. If you set the `n_jobs` it will use more than 1 core to compute. Set it to -1 to use all the cores (including hyper-threading cores). But don't do that during this tutorial because that would block the machine you are all working on.
#
# To avoid over-fitting, you can set the `max_depth` parameter for random forests which limits the maximum depth of each tree. Alternatively, you can set the `min_samples_split` parameter which determines how many data points you need at least before you create another split (this is an additional if-else structure) while building the tree. Or the `min_samples_leaf` that sets the minimum amount of data points you have in each leaf. All 3 parameters are dependent on the number of data points in your dataset especially the last 2 so don't forget to adapt them if you have been playing around with a small subset of the data. (A good trick to solve this might be to use a range similar to `[0.0001, 0.001, 0.01, 0.1] * len(x_train)`. Feel free to extend the range in any direction. It is generally good practice to construct them using a log scale like in the example, or better like this: `10.0**np.arange(-5, 0, 0.5) * len(x_train)`.) In my experience `min_samples_split` or `min_samples_leaf` give slightly better results and it usually doesn't make sense to combine more than 1 of these parameters.
#
# In the previous exercises we have done a lot of the optimizations on the test set. This should of course be avoided. What you should do instead is to optimize and select your model using a validation set and of course you should automate this process as shown in one of the earlier exercises. One thing to take into account here is that you should use multiple initialisation of a random forest because the decision trees is randomly generated.
# ## 7. Forecasting (Optional)
#
# We are going to forecast page views data, very similar to the data used in the anomaly detection section. The data contains 1 sample per hour.
# +
with open('data/train_set_forecasting.pickle', 'rb') as file:
train_set = pickle.load(file, encoding='latin1')
print(f'Shape of the train set = {train_set.shape}')
plt.figure(figsize=(20,4))
plt.plot(train_set)
plt.show()
# -
# In the graph above you can clearly see that there is a rising trend in the data.
#
# ### 7.1 One-step ahead prediction
#
# This forecasting section will describe the one-step ahead prediction. In this case, this means that we will only predict the next data point i.e. the number of page views in the next hour.
#
# Now let's first build a model that tries to predict the next data point from the previous one.
# +
import sklearn
import sklearn.linear_model
import sklearn.gaussian_process
model = sklearn.linear_model.LinearRegression()
# the input x_train contains all the data except the last data point
x_train = train_set[ : -1].reshape((-1, 1)) # the reshape is necessary since sklearn requires a 2 dimensional array
# the output y_train contains all the data except the first data point
y_train = train_set[1 : ]
# this code fits the model on the train data
model.fit(x_train, y_train)
# this score gives you how well it fits on the train set
# higher is better and 1.0 is perfect
print(f'The R2 train score of the linear model is {model.score(x_train, y_train):.3f}')
# -
# As you can see from the score above, the model is not perfect but it seems to get a relatively high score. Now let's make a prediction into the future and plot this.
#
# To predict the data point after that we will use the predicted data to make a new prediction. The code below shows how this works for this data set using the linear model you used earlier. Don't forget to fill out the missing code.
# + deletable=false nbgrader={"cell_type": "code", "checksum": "b75d67ba8bb3a239b23cf020a017f221", "grade": false, "grade_id": "nof_predictions", "locked": false, "schema_version": 3, "solution": true}
n_predictions = 100
import copy
# use the last data point as the first input for the predictions
x_test = copy.deepcopy(train_set[-1]) # make a copy to avoid overwriting the training data
prediction = []
for i in range(n_predictions):
# predict the next data point
y_test = model.predict([[x_test]])[0] # sklearn requires a 2 dimensional array and returns a one-dimensional one
##### Implement this part of the code #####
raise NotImplementedError("Code not implemented, follow the instructions.")
# prediction.append( ? )
# x_test = ?
prediction = np.array(prediction)
plt.figure(figsize=(20,4))
plt.plot(np.concatenate((train_set, prediction)), 'g')
plt.plot(train_set, 'b')
plt.show()
# -
# As you can see from the image above the model doesn't quite seem to fit the data well. Let's see how we can improve this.
#
# ### 7.2 Multiple features
#
# If your model is not smart enough there is a simple trick in machine learning to make your model more intelligent (but also more complex). This is by adding more features.
#
# To make our model better we will use more than 1 sample from the past. To make your life easier there is a simple function below that will create a data set for you. The ```width``` parameter sets the number of hours in the past that will be used.
def convert_time_series_to_train_data(ts, width):
x_train, y_train = [], []
for i in range(len(ts) - width - 1):
x_train.append(ts[i : i + width])
y_train.append(ts[i + width])
return np.array(x_train), np.array(y_train)
# +
width = 5
x_train, y_train = convert_time_series_to_train_data(train_set, width)
print(x_train.shape, y_train.shape)
# -
# As you can see from the print above both `x_train` and `y_train` contains 303 data points. For `x_train` you see that there are now 5 features which contain the page views from the 5 past hours.
#
# So let's have a look what the increase from 1 to 5 features results to.
width = 5
x_train, y_train = convert_time_series_to_train_data(train_set, width)
model = sklearn.linear_model.LinearRegression()
model.fit(x_train, y_train)
print(f'The R2 score of the linear model with width={width} is {model.score(x_train, y_train):.3f}')
# Now change the ```width``` parameter to see if you can get a better score.
#
# ### 7.3 Over-fitting
#
#
# Now execute the code below to see the prediction of this model.
# +
import copy
# this is a helper function to make the predictions
def predict(model, train_set, width, n_points):
prediction = []
# create the input data set for the first predicted output
# copy the data to make sure the original is not overwritten
x_test = copy.deepcopy(train_set[-width : ])
for i in range(n_points):
# predict only the next data point
prediction.append(model.predict(x_test.reshape((1, -1))))
# use the newly predicted data point as input for the next prediction
x_test[0 : -1] = x_test[1 : ]
x_test[-1] = prediction[-1]
return np.array(prediction)
# +
n_predictions = 200
prediction = predict(model, train_set, width, n_predictions)
plt.figure(figsize=(20,4))
plt.plot(np.concatenate((train_set, prediction[:,0])), 'g')
plt.plot(train_set, 'b')
plt.show()
# -
# As you can see in the image above the prediction is not what you would expect from a perfect model. What happened is that the model learned the training data by heart without 'understanding' what the data is really about. This phenomenon is called over-fitting and will always occur if you make your model too complex.
#
# Now play with the width variable below to see if you can find a more sensible width.
# + deletable=false nbgrader={"cell_type": "code", "checksum": "d8feb79fbf075823932c57521919ac55", "grade": false, "grade_id": "jkl", "locked": false, "schema_version": 3, "solution": true}
##### Implement this part of the code #####
raise NotImplementedError("Code not implemented, follow the instructions.")
# width = ?
x_train, y_train = convert_time_series_to_train_data(train_set, width)
model = sklearn.linear_model.LinearRegression()
model.fit(x_train, y_train)
print(f'The R2 score of the linear model with width={width} is {model.score(x_train, y_train):.3f}')
prediction = predict(model, train_set, width, 200)
plt.figure(figsize=(20,4))
plt.plot(np.concatenate((train_set, prediction[:,0])), 'g')
plt.plot(train_set, 'b')
plt.show()
assert width > 1
# -
# As you will have noticed by now is that it is better to have a non-perfect score which will give you a much better outcome. Now try the same thing for the following models:
# * ```sklearn.linear_model.RidgeCV()```
# * ```sklearn.linear_model.LassoCV()```
# * ```sklearn.ensemble.RandomForestRegressor()```
#
# The first 2 models also estimate the noise that is present in the data to avoid over-fitting. `RidgeCV()` will keep the weights that are found small, but it won't put them to zero. `LassoCV()` on the other hand will put several weights to 0. Execute ```model.coef_``` to see the actual coefficients that have been found.
#
# `RandomForestRegressor()` is the regression variant of the `RandomForestClassifier()` and is therefore thus a non-linear method. This makes this method a lot more complex and therefore it will be able to represent more complex shapes than the linear method. This also means that it is much more capable to learn the data by heart (and thus to over-fit). In many cases however this additional complexity allows to better understand the data given the correct parameter settings (try a couple of times `width=25` (since it is random) and see what the results are; set the `n_estimators` parameter to a higher number to get a more stable results).
#
# ### 7.4 Automation
#
# What we have done up to now is manually selecting the best outcome based on the test result. This can be considered cheating because you have just created a self-fulfilling prophecy. Additionally it is not only cheating it is also hard to find the exact `width` that gives the best result by just visually inspecting it. So we need a more objective approach to solve this.
#
# To automate this process you can use a validation set. In this case we will use the last 48 hours of the training set to validate the score and select the best parameter value. This means that we will have to use a subset of the training set to fit the model.
# + deletable=false nbgrader={"cell_type": "code", "checksum": "878a9a1b67102633fe609f2ecc2891b7", "grade": false, "grade_id": "find_best_model", "locked": false, "schema_version": 3, "solution": true}
model_generators = [sklearn.linear_model.LinearRegression(),
sklearn.linear_model.RidgeCV(cv=3),
sklearn.linear_model.LassoCV(cv=3),
sklearn.ensemble.RandomForestRegressor(n_estimators=10)]
best_score = 0
##### Implement this part of the code #####
raise NotImplementedError("Code not implemented, follow the instructions.")
# for model_gen in ? :
# for width in range( ? , ? ):
x_train, y_train = convert_time_series_to_train_data(train_set, width)
# train the model on the first 48 hours
x_train_i, y_train_i = x_train[ : -48, :], y_train[ : -48]
# use the last 48 hours for validation
x_val_i, y_val_i = x_train[-48 : ], y_train[-48 : ]
# there is a try except clause here because some models do not converge for some data
try:
# Constructs a new, untrained, model with the same parameters
model = sklearn.base.clone(model_gen, safe=True)
##### Implement this part of the code #####
raise NotImplementedError("Code not implemented, follow the instructions.")
# model.fit( ? , ? )
# this_score = ?
if this_score > best_score:
best_score = this_score
# Constructs a new, untrained, model with the same parameters
best_model = sklearn.base.clone(model, safe=True)
best_width = width
except:
pass
print(f'{best_model.__class__.__name__} was selected as the best model with a width of {best_width}',
f'and a validation R2 score of {best_score:.3f}')
# -
# If everything is correct the LassoCV methods was selected.
#
# Now we are going to train this best model on all the data. In this way we use all the available data to build a model.
# + deletable=false nbgrader={"cell_type": "code", "checksum": "0b18aa7ae6392940841e9b8821f77a4a", "grade": false, "grade_id": "best_model_gen_plot", "locked": false, "schema_version": 3, "solution": true}
##### Implement this part of the code #####
raise NotImplementedError("Code not implemented, follow the instructions.")
# width = ?
# model = ?
x_train, y_train = convert_time_series_to_train_data(train_set, width)
##### Implement this part of the code #####
raise NotImplementedError("Code not implemented, follow the instructions.")
# model.fit( ? , ? )
n_predictions = 200
prediction = predict(model, train_set, width, n_predictions)
plt.figure(figsize=(20,4))
plt.plot(np.concatenate((train_set, prediction[:,0])), 'g')
plt.plot(train_set, 'b')
plt.show()
# -
# Although the optimal result found here might not be the best visually, it is a far better result than the one you selected manually just because there was no cheating involved ;-).
#
# Some additional info:
# * This noise level of `RidgeCV()` and `LassoCV()` is estimated by automatically performing train and validation within the method itself. This will make them much more robust against over-fitting. The actual method used is [Cross-validation](https://en.wikipedia.org/wiki/Cross-validation_(statistics)) which is a better approach of what we do here because it repeats the training and validation multiple times for different training and validation sets. The parameter that is set for these methods is often called the regularization parameter in literature and is well suited to avoid over-fitting.
# # 8. Main take home messages
#
# Because we can't cover everything, we listed all the basics of what you should take home before working on your own machine learning project below.
#
# ### 8.1 The basic rules of machine learning
#
# Any good club has its own set of rules. The rules for machine learning club are the following:
#
# * First rule of ML is: Over-fitting is a real problem and try anything to avoid it
# * Second rule of ML is: You are probably over-fitting. Are you sure you are not fitting on your test data?
# * Third rule of ML is: You think over-fitting will not happen to you, but it is happening right now!
# * Fourth rule of ML is: Talk about it with your peers because over-fitting is a real issue.
#
# ### 8.2 My winning strategy
#
# Although I'd like to claim it as mine, it is a general (non-written) consensus amongst data scientists to use the following approach. Even experts should not skip any of the steps below.
#
# 1. Create a train set and a test set
# * Rescale your train set to zero-mean-unit-variance (most methods assume gaussian distributed data)
# * Don't look at the test set
# * Implement a cross-validation framework
# * Try **linear regression with regularisation** for regression (`RidgeCV` or `LassoCV`) and classification (`LogisticRegressionCV`).
# * Try techniques to avoid over-fitting
# * Check the validation score
# * If the results are not optimal and there is no over-fitting going on try **adding features** else go to step 17
# * Rescale your features to zero-mean-unit-variance (most methods assume gaussian distributed data) or select those features that have this property
# * Try techniques to avoid over-fitting (including removing features for more info see [feature selection techniques](http://scikit-learn.org/stable/modules/feature_selection.html))
# * Check the validation score
# * If the results are not optimal and there is no over-fitting going on try **random forests** else go to step 17
# * Try techniques to avoid over-fitting (such as feature selection, to rank the features you can use [this approach](http://scikit-learn.org/stable/auto_examples/ensemble/plot_forest_importances.html))
# * Check the validation score
# * If the results are not optimal and there is no over-fitting going on try **neural networks** or deep learning else go to step 17
# * Try techniques to avoid over-fitting
# * Only in the end check the score on the test set and make sure it is similar to the validation score. Otherwise you have been over-fitting and you need to take a couple steps back.
# * Make an ensemble of your best (but significantly different) methods
# * Finally build the model using all the data available and run it in production
#
# You can try other machine learning techniques, but usually the difference is quite small. So don't waste too much time on getting to know them because they all have their own quirks and specific ways of over-fitting. Besides maybe most important of all, Kaggle competitions are usually won with one of these techniques.
#
# If you do want to dive into other methods or if you want more details on the methods discussed here, the [sklearn website](http://scikit-learn.org/stable/) is a good starting point.
#
# ### 8.3 How to avoid over-fitting
#
# As you should know by now over-fitting is one of the biggest issues in machine learning. So pay attention for it.
#
# Below you can find some of the most common techniques to avoid over-fitting:
#
# * Use more data
# * Artificially generate more data based on the original data
# * Use a smaller model (with fewer parameters)
# * Use fewer features (and thus fewer parameters)
# * Use a regularisation parameter
# * Artificially add noise to your model (can be random noise or can be on/of noise in neural networks so that you get dropout)
# * Only use linear models (or in neural networks make sure that the non-linearity in your model is closer to a linear function)
# * Combine multiple models that each over-fit in their own peculiar way into what is called an ensemble
#
#
# ### 8.4 Most common features
#
# Although there is no general rule to which features you should use, there are a couple of features that come back regularly:
#
# * Log: Take the log of the data to make it more Gaussian. This works best for data that is exponentially or log-normally distributed
# * Polynomials: The square is quite common but higher orders are often used as well
# * Differentials: The first and sometimes the second derivative are used (see [`numpy.diff()`](https://docs.scipy.org/doc/numpy-1.10.1/reference/generated/numpy.diff.html))
# * Integrals (use [`numpy.sum()`](https://docs.scipy.org/doc/numpy-1.10.1/reference/generated/numpy.sum.html) for example to implement it)
# * Mean: Often used to smooth the data
# * Median: Same as the mean but this ignores outliers
# * Standard deviation or variance
# * Skewness and kurtosis: These are rarely used but sometimes they contain valuable information
# * Fourier transform: If your data contains a frequency spectrum. Typically used when processing speech and sound. (see [`numpy.fft.fft()`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.fft.fft.html#numpy.fft.fft))
# * Frequency filtering: Similar to the fourier transform (see [`scipy.signal.butter()`](https://docs.scipy.org/doc/scipy-0.18.1/reference/generated/scipy.signal.butter.html#scipy.signal.butter))
# * Spatial filters: Are often used for images. Edge detectors for example
# * Any other feature that seems to make sense regarding your data
#
#
# ## Feedback
#
# If you have any feedback regarding this tutorial, feel free to share it with us. You can mail to <a href="mailto:<EMAIL>"><EMAIL></a> or <a href="mailto:<EMAIL>"><EMAIL></a>.
| solution/machine_learning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + deletable=false editable=false
# Initialize OK
from client.api.notebook import Notebook
ok = Notebook('lab05.ok')
# -
# # Lab 5: Simulations
#
# Welcome to Lab 5!
#
# We will go over [iteration](https://www.inferentialthinking.com/chapters/09/2/Iteration.html) and [simulations](https://www.inferentialthinking.com/chapters/09/3/Simulation.html), as well as introduce the concept of [randomness](https://www.inferentialthinking.com/chapters/09/Randomness.html).
#
# The data used in this lab will contain salary data and other statistics for basketball players from the 2014-2015 NBA season. This data was collected from the following sports analytic sites: [Basketball Reference](http://www.basketball-reference.com) and [Spotrac](http://www.spotrac.com).
#
# First, set up the tests and imports by running the cell below.
# +
# Run this cell, but please don't change it.
# These lines import the Numpy and Datascience modules.
import numpy as np
from datascience import *
# These lines do some fancy plotting magic
import matplotlib
# %matplotlib inline
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
# Don't change this cell; just run it.
from client.api.notebook import Notebook
ok = Notebook('lab05.ok')
# -
# ## 1. Nachos and Conditionals
# In Python, the boolean data type contains only two unique values: `True` and `False`. Expressions containing comparison operators such as `<` (less than), `>` (greater than), and `==` (equal to) evaluate to Boolean values. A list of common comparison operators can be found below!
#
# <img src="comparisons.png">
# Run the cell below to see an example of a comparison operator in action.
3 > 1 + 1
# We can even assign the result of a comparison operation to a variable.
result = 10 / 2 == 5
result
# Arrays are compatible with comparison operators. The output is an array of boolean values.
make_array(1, 5, 7, 8, 3, -1) > 3
# One day, when you come home after a long week, you see a hot bowl of nachos waiting on the dining table! Let's say that whenever you take a nacho from the bowl, it will either have only **cheese**, only **salsa**, **both** cheese and salsa, or **neither** cheese nor salsa (a sad tortilla chip indeed).
#
# Let's try and simulate taking nachos from the bowl at random using the function, `np.random.choice(...)`.
# ### `np.random.choice`
#
# `np.random.choice` picks one item at random from the given array. It is equally likely to pick any of the items. Run the cell below several times, and observe how the results change.
nachos = make_array('cheese', 'salsa', 'both', 'neither')
np.random.choice(nachos)
# To repeat this process multiple times, pass in an int `n` as the second argument to return `n` different random choices. By default, `np.random.choice` samples **with replacement** and returns an *array* of items.
#
# Run the next cell to see an example of sampling with replacement 10 times from the `nachos` array.
np.random.choice(nachos, 10)
# To count the number of times a certain type of nacho is randomly chosen, we can use `np.count_nonzero`
# ### `np.count_nonzero`
#
# `np.count_nonzero` counts the number of non-zero values that appear in an array. When an array of boolean values are passed through the function, it will count the number of `True` values (remember that in Python, `True` is coded as 1 and `False` is coded as 0.)
#
# Run the next cell to see an example that uses `np.count_nonzero`.
np.count_nonzero(make_array(True, False, False, True, True))
# + [markdown] deletable=false editable=false
# **Question 1.** Assume we took ten nachos at random, and stored the results in an array called `ten_nachos` as done below. Find the number of nachos with only cheese using code (do not hardcode the answer).
#
# *Hint:* Our solution involves a comparison operator (e.g. `=`, `<`, ...) and the `np.count_nonzero` method.
#
# <!--
# BEGIN QUESTION
# name: q11
# -->
# -
ten_nachos = make_array('neither', 'cheese', 'both', 'both', 'cheese', 'salsa', 'both', 'neither', 'cheese', 'both')
number_cheese = ...
number_cheese
# + deletable=false editable=false
ok.grade("q11");
# -
# **Conditional Statements**
#
# A conditional statement is a multi-line statement that allows Python to choose among different alternatives based on the truth value of an expression.
#
# Here is a basic example.
#
# ```
# def sign(x):
# if x > 0:
# return 'Positive'
# else:
# return 'Negative'
# ```
#
# If the input `x` is greater than `0`, we return the string `'Positive'`. Otherwise, we return `'Negative'`.
#
# If we want to test multiple conditions at once, we use the following general format.
#
# ```
# if <if expression>:
# <if body>
# elif <elif expression 0>:
# <elif body 0>
# elif <elif expression 1>:
# <elif body 1>
# ...
# else:
# <else body>
# ```
#
# Only the body for the first conditional expression that is true will be evaluated. Each `if` and `elif` expression is evaluated and considered in order, starting at the top. As soon as a true value is found, the corresponding body is executed, and the rest of the conditional statement is skipped. If none of the `if` or `elif` expressions are true, then the `else body` is executed.
#
# For more examples and explanation, refer to the section on conditional statements [here](https://www.inferentialthinking.com/chapters/09/1/conditional-statements.html).
# + [markdown] deletable=false editable=false
# **Question 2.** Complete the following conditional statement so that the string `'More please'` is assigned to the variable `say_please` if the number of nachos with cheese in `ten_nachos` is less than `5`.
#
# *Hint*: You should be using `number_cheese` from Question 1.
#
# <!--
# BEGIN QUESTION
# name: q12
# -->
# + for_assignment_type="student"
say_please = '?'
if ...:
say_please = 'More please'
say_please
# + deletable=false editable=false
ok.grade("q12");
# + [markdown] deletable=false editable=false
# **Question 3.** Write a function called `nacho_reaction` that returns a reaction (as a string) based on the type of nacho passed in as an argument. Use the table below to match the nacho type to the appropriate reaction.
#
# <img src="nacho_reactions.png">
#
# *Hint:* If you're failing the test, double check the spelling of your reactions.
#
# <!--
# BEGIN QUESTION
# name: q13
# -->
# + for_assignment_type="student"
def nacho_reaction(nacho):
if nacho == "cheese":
return ...
... :
...
... :
...
... :
...
spicy_nacho = nacho_reaction('salsa')
spicy_nacho
# + deletable=false editable=false
ok.grade("q13");
# + [markdown] deletable=false editable=false
# **Question 4.** Create a table `ten_nachos_reactions` that consists of the nachos in `ten_nachos` as well as the reactions for each of those nachos. The columns should be called `Nachos` and `Reactions`.
#
# *Hint:* Use the `apply` method.
#
# <!--
# BEGIN QUESTION
# name: q14
# -->
# + for_assignment_type="student"
ten_nachos_tbl = Table().with_column('Nachos', ten_nachos)
...
ten_nachos_reactions
# + deletable=false editable=false
ok.grade("q14");
# + [markdown] deletable=false editable=false
# **Question 5.** Using code, find the number of 'Wow!' reactions for the nachos in `ten_nachos_reactions`.
#
# <!--
# BEGIN QUESTION
# name: q15
# -->
# -
number_wow_reactions = ...
number_wow_reactions
# + deletable=false editable=false
ok.grade("q15");
# -
# ## 2. Simulations and For Loops
# Using a `for` statement, we can perform a task multiple times. This is known as iteration.
# One use of iteration is to loop through a set of values. For instance, we can print out all of the colors of the rainbow.
# +
rainbow = make_array("red", "orange", "yellow", "green", "blue", "indigo", "violet")
for color in rainbow:
print(color)
# -
# We can see that the indented part of the `for` loop, known as the body, is executed once for each item in `rainbow`. The name `color` is assigned to the next value in `rainbow` at the start of each iteration. Note that the name `color` is arbitrary; we could easily have named it something else. The important thing is we stay consistent throughout the `for` loop.
for another_name in rainbow:
print(another_name)
# In general, however, we would like the variable name to be somewhat informative.
# + [markdown] deletable=false editable=false
# **Question 1.** In the following cell, we've loaded the text of _Pride and Prejudice_ by <NAME>, split it into individual words, and stored these words in an array `p_and_p_words`. Using a `for` loop, assign `longer_than_five` to the number of words in the novel that are more than 5 letters long.
#
# *Hint*: You can find the number of letters in a word with the `len` function.
#
# <!--
# BEGIN QUESTION
# name: q21
# -->
# + for_assignment_type="student"
austen_string = open('Austen_PrideAndPrejudice.txt', encoding='utf-8').read()
p_and_p_words = np.array(austen_string.split())
longer_than_five = ...
# a for loop would be useful here
longer_than_five
# + deletable=false editable=false
ok.grade("q21");
# + [markdown] deletable=false editable=false
# **Question 2.** Using a simulation with 10,000 trials, assign num_different to the number of times, in 10,000 trials, that two words picked uniformly at random (with replacement) from Pride and Prejudice have different lengths.
#
# *Hint 1*: What function did we use in section 1 to sample at random with replacement from an array?
#
# *Hint 2*: Remember that `!=` checks for non-equality between two items.
#
# <!--
# BEGIN QUESTION
# name: q22
# -->
# + for_assignment_type="student"
trials = 10000
num_different = ...
for ... in ...:
...
num_different
# + deletable=false editable=false
ok.grade("q22");
# + [markdown] deletable=false editable=false
# We can also use `np.random.choice` to simulate multiple trials.
#
# **Question 3.** Allie is playing darts. Her dartboard contains ten equal-sized zones with point values from 1 to 10. Write code that simulates her total score after 1000 dart tosses.
#
# *Hint:* First decide the possible values you can take in the experiment (point values in this case). Then use `np.random.choice` to simulate Allie's tosses. Finally, sum up the scores to get Allie's total score.
#
# <!--
# BEGIN QUESTION
# name: q23
# -->
# + for_assignment_type="student"
possible_point_values = ...
num_tosses = 1000
simulated_tosses = ...
total_score = ...
total_score
# + deletable=false editable=false
ok.grade("q23");
# -
# ## 3. Sampling Basketball Data
#
# We will now introduce the topic of sampling, which we’ll be discussing in more depth in this week’s lectures. We’ll guide you through this code, but if you wish to read more about different kinds of samples before attempting this question, you can check out [section 10 of the textbook](https://www.inferentialthinking.com/chapters/10/Sampling_and_Empirical_Distributions.html).
#
# Run the cell below to load player and salary data that we will use for our sampling.
# +
player_data = Table().read_table("player_data.csv")
salary_data = Table().read_table("salary_data.csv")
full_data = salary_data.join("PlayerName", player_data, "Name")
# The show method immediately displays the contents of a table.
# This way, we can display the top of two tables using a single cell.
player_data.show(3)
salary_data.show(3)
full_data.show(3)
# -
# Rather than getting data on every player (as in the tables loaded above), imagine that we had gotten data on only a smaller subset of the players. For 492 players, it's not so unreasonable to expect to see all the data, but usually we aren't so lucky.
#
# If we want to make estimates about a certain numerical property of the population (known as a statistic, e.g. the mean or median), we may have to come up with these estimates based only on a smaller sample. Whether these estimates are useful or not often depends on how the sample was gathered. We have prepared some example sample datasets to see how they compare to the full NBA dataset. Later we'll ask you to create your own samples to see how they behave.
# To save typing and increase the clarity of your code, we will package the analysis code into a few functions. This will be useful in the rest of the lab as we will repeatedly need to create histograms and collect summary statistics from that data.
# We've defined the `histograms` function below, which takes a table with columns `Age` and `Salary` and draws a histogram for each one. It uses bin widths of 1 year for `Age` and $1,000,000 for `Salary`.
# +
def histograms(t):
ages = t.column('Age')
salaries = t.column('Salary')/1000000
t1 = t.drop('Salary').with_column('Salary', salaries)
age_bins = np.arange(min(ages), max(ages) + 2, 1)
salary_bins = np.arange(min(salaries), max(salaries) + 1, 1)
t1.hist('Age', bins=age_bins, unit='year')
plt.title('Age distribution')
t1.hist('Salary', bins=salary_bins, unit='million dollars')
plt.title('Salary distribution')
histograms(full_data)
print('Two histograms should be displayed below')
# + [markdown] deletable=false editable=false
# **Question 1**. Create a function called `compute_statistics` that takes a table containing ages and salaries and:
# - Draws a histogram of ages
# - Draws a histogram of salaries
# - Returns a two-element array containing the average age and average salary (in that order)
#
# You can call the `histograms` function to draw the histograms!
#
# *Note:* More charts will be displayed when running the test cell. Please feel free to ignore the charts.
#
# <!--
# BEGIN QUESTION
# name: q31
# -->
# +
def compute_statistics(age_and_salary_data):
...
age = ...
salary = ...
...
full_stats = compute_statistics(full_data)
full_stats
# + deletable=false editable=false
ok.grade("q31");
# + [markdown] deletable=false editable=false
# ### Convenience sampling
# One sampling methodology, which is **generally a bad idea**, is to choose players who are somehow convenient to sample. For example, you might choose players from one team who are near your house, since it's easier to survey them. This is called, somewhat pejoratively, *convenience sampling*.
#
# Suppose you survey only *relatively new* players with ages less than 22. (The more experienced players didn't bother to answer your surveys about their salaries.)
#
# **Question 2.** Assign `convenience_sample` to a subset of `full_data` that contains only the rows for players under the age of 22.
#
# <!--
# BEGIN QUESTION
# name: q32
# -->
# -
convenience_sample = ...
convenience_sample
# + deletable=false editable=false
ok.grade("q32");
# + [markdown] deletable=false editable=false
# **Question 3.** Assign `convenience_stats` to an array of the average age and average salary of your convenience sample, using the `compute_statistics` function. Since they're computed on a sample, these are called *sample averages*.
#
# <!--
# BEGIN QUESTION
# name: q33
# -->
# -
convenience_stats = ...
convenience_stats
# + deletable=false editable=false
ok.grade("q33");
# -
# Next, we'll compare the convenience sample salaries with the full data salaries in a single histogram. To do that, we'll need to use the `bin_column` option of the `hist` method, which indicates that all columns are counts of the bins in a particular column. The following cell does not require any changes; **just run it**.
# +
def compare_salaries(first, second, first_title, second_title):
"""Compare the salaries in two tables."""
first_salary_in_millions = first.column('Salary')/1000000
second_salary_in_millions = second.column('Salary')/1000000
first_tbl_millions = first.drop('Salary').with_column('Salary', first_salary_in_millions)
second_tbl_millions = second.drop('Salary').with_column('Salary', second_salary_in_millions)
max_salary = max(np.append(first_tbl_millions.column('Salary'), second_tbl_millions.column('Salary')))
bins = np.arange(0, max_salary+1, 1)
first_binned = first_tbl_millions.bin('Salary', bins=bins).relabeled(1, first_title)
second_binned = second_tbl_millions.bin('Salary', bins=bins).relabeled(1, second_title)
first_binned.join('bin', second_binned).hist(bin_column='bin', unit='million dollars')
plt.title('Salaries for all players and convenience sample')
compare_salaries(full_data, convenience_sample, 'All Players', 'Convenience Sample')
# -
# **Question 4.** Does the convenience sample give us an accurate picture of the salary of the full population? Would you expect it to, in general? Before you move on, write a short answer in English below. You can refer to the statistics calculated above or perform your own analysis.
# + [markdown] deletable=false manual_problem_id="convenience_3_5"
# *Write your answer here, replacing this text.*
# -
# ### Simple random sampling
# A more justifiable approach is to sample uniformly at random from the players. In a **simple random sample (SRS) without replacement**, we ensure that each player is selected at most once. Imagine writing down each player's name on a card, putting the cards in an box, and shuffling the box. Then, pull out cards one by one and set them aside, stopping when the specified sample size is reached.
# ### Producing simple random samples
# Sometimes, it’s useful to take random samples even when we have the data for the whole population. It helps us understand sampling accuracy.
#
# ### `sample`
#
# The table method `sample` produces a random sample from the table. By default, it draws at random **with replacement** from the rows of a table. It takes in the sample size as its argument and returns a **table** with only the rows that were selected.
#
# Run the cell below to see an example call to `sample()` with a sample size of 5, with replacement.
# +
# Just run this cell
salary_data.sample(5)
# -
# The optional argument `with_replacement=False` can be passed through `sample()` to specify that the sample should be drawn without replacement.
#
# Run the cell below to see an example call to `sample()` with a sample size of 5, without replacement.
# +
# Just run this cell
salary_data.sample(5, with_replacement=False)
# -
# **Question 5.** Produce a simple random sample of size 44 from `full_data`. Run your analysis on it again. Run the cell a few times to see how the histograms and statistics change across different samples.
#
# - How much does the average age change across samples?
# - What about average salary?
my_small_srswor_data = ...
my_small_stats = ...
my_small_stats
# + [markdown] deletable=false manual_problem_id="q_3_7_samples"
# *Write your answer here, replacing this text.*
# -
# **Question 6.** As in the previous question, analyze several simple random samples of size 100 from `full_data`.
# - Do the histogram shapes seem to change more or less across samples of 100 than across samples of size 44?
# - Are the sample averages and histograms closer to their true values/shape for age or for salary? What did you expect to see?
my_large_srswor_data = ...
my_large_stats = ...
my_large_stats
# + [markdown] deletable=false manual_problem_id="large_srs_q"
# *Write your answer here, replacing this text.*
# -
# Congratulations, you're done with Lab 5! Be sure to
# - **Run all the tests** (the next cell has a shortcut for that).
# - **Save and Checkpoint** from the `File` menu.
# - **Run the cell at the bottom to submit your work**.
# - And ask one of the staff members to check you off.
# For your convenience, you can run this cell to run all the tests at once!
import os
_ = [ok.grade(q[:-3]) for q in os.listdir("tests") if q.startswith('q')]
_ = ok.submit()
| materials/sp20/lab/lab05/lab05.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import mglearn
from IPython.display import display
from sklearn.model_selection import train_test_split
# %matplotlib inline
line = np.linspace(-3, 3, 100)
plt.plot(line, np.tanh(line), label="lanh")
plt.plot(line, np.maximum(line, 0), label="relu")
plt.legend(loc="best")
plt.xlabel('x')
plt.ylabel('relu(x), tanh(x)')
# +
from sklearn.neural_network import MLPClassifier
from sklearn.datasets import make_moons
X, y = make_moons(n_samples=100, noise=.25, random_state=3)
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=42)
mlp = MLPClassifier(solver='lbfgs', random_state=0).fit(X_train, y_train)
mglearn.plots.plot_2d_separator(mlp, X_train, fill=True, alpha=.3)
mglearn.discrete_scatter(X_train[:, 0], X_train[:, 1], y_train)
plt.xlabel('Признак 0')
plt.ylabel('Признак 1')
# -
mlp = MLPClassifier(solver='lbfgs', random_state=0, hidden_layer_sizes=[10]).fit(X_train, y_train)
mglearn.plots.plot_2d_separator(mlp, X_train, fill=True, alpha=.3)
mglearn.discrete_scatter(X_train[:, 0], X_train[:, 1], y_train)
plt.xlabel('Признак 0')
plt.ylabel('Признак 1')
mlp = MLPClassifier(solver='lbfgs', random_state=0, hidden_layer_sizes=[10, 10]).fit(X_train, y_train)
mglearn.plots.plot_2d_separator(mlp, X_train, fill=True, alpha=.3)
mglearn.discrete_scatter(X_train[:, 0], X_train[:, 1], y_train)
plt.xlabel('Признак 0')
plt.ylabel('Признак 1')
mlp = MLPClassifier(solver='lbfgs', random_state=0, activation='tanh', hidden_layer_sizes=[10, 10]).fit(X_train, y_train)
mglearn.plots.plot_2d_separator(mlp, X_train, fill=True, alpha=.3)
mglearn.discrete_scatter(X_train[:, 0], X_train[:, 1], y_train)
plt.xlabel('Признак 0')
plt.ylabel('Признак 1')
fig, axes = plt.subplots(2, 4, figsize=(20, 8))
for axx, n_hidden_nodes in zip(axes, [10, 100]):
for ax, alpha in zip(axx, [0.0001, 0.01, 0.1, 1]):
mlp = MLPClassifier(solver='lbfgs', random_state=0,
hidden_layer_sizes=[n_hidden_nodes, n_hidden_nodes], alpha=alpha)
mlp.fit(X_train, y_train)
mglearn.plots.plot_2d_separator(mlp, X_train, fill=True, alpha=.3, ax=ax)
mglearn.discrete_scatter(X_train[:, 0], X_train[:, 1], y_train, ax=ax)
ax.set_title('n_hidden=[{}, {}]\nalpha={:.4f}'.format(n_hidden_nodes, n_hidden_nodes, alpha))
fig, axes = plt.subplots(2, 4, figsize=(20, 8))
for i, ax in enumerate(axes.ravel()):
mlp = MLPClassifier(solver='lbfgs', random_state=i, hidden_layer_sizes=[100, 100])
mlp.fit(X_train, y_train)
mglearn.plots.plot_2d_separator(mlp, X_train, fill=True, alpha=.3, ax=ax)
mglearn.discrete_scatter(X_train[:, 0], X_train[:, 1], y_train, ax=ax)
# +
from sklearn.datasets import load_breast_cancer
cancer = load_breast_cancer()
print('Максимальные значения характеристик:\n{}'.format(cancer.data.max(axis=0)))
# -
X_train, X_test, y_train, y_test = train_test_split(cancer.data, cancer.target, random_state=0)
mlp = MLPClassifier(random_state=42).fit(X_train, y_train)
print("Правильность на обучающем наборе: {:.3f}".format(mlp.score(X_train, y_train)))
print("Правильность на тестовом наборе: {:.3f}".format(mlp.score(X_test, y_test)))
# +
mean_on_train = X_train.mean(axis=0)
std_on_train = X_train.std(axis=0)
X_train_scaled = (X_train - mean_on_train) / std_on_train
X_test_scaled = (X_test - mean_on_train) / std_on_train
mlp = MLPClassifier(random_state=42, max_iter=1000).fit(X_train_scaled, y_train)
print("Правильность на обучающем наборе: {:.3f}".format(mlp.score(X_train_scaled, y_train)))
print("Правильность на тестовом наборе: {:.3f}".format(mlp.score(X_test_scaled, y_test)))
# +
mean_on_train = X_train.mean(axis=0)
std_on_train = X_train.std(axis=0)
X_train_scaled = (X_train - mean_on_train) / std_on_train
X_test_scaled = (X_test - mean_on_train) / std_on_train
mlp = MLPClassifier(random_state=42, max_iter=1000, alpha=1).fit(X_train_scaled, y_train)
print("Правильность на обучающем наборе: {:.3f}".format(mlp.score(X_train_scaled, y_train)))
print("Правильность на тестовом наборе: {:.3f}".format(mlp.score(X_test_scaled, y_test)))
# -
plt.figure(figsize=(20, 5))
plt.imshow(mlp.coefs_[0], interpolation='none', cmap='viridis')
plt.yticks(range(30), cancer.feature_names)
plt.xlabel('Столбцы матрицы весов')
plt.ylabel('Входная характеристика')
plt.colorbar()
| jnotebook/03_neural_network.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/rigongora/Carbonate_system_Environmental_Chemistry/blob/main/Sistema_de_carbonato.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/", "height": 295} id="ut-yzEDVvEZ6" outputId="8e64a2d2-4bd1-4907-c212-ba59751e86bf"
### <NAME>
### Date: December 9, 2020
### Plotting the Carbonate System
#Import needed modules
from matplotlib import pyplot as plt
import numpy as np
# Define the requiered functions
# Each fuction return the carbon specie alpha (Molar fraction in terms of protons concentration
def f1(pH):
cprotons = 10**(-pH)
ka1p = 10**(-6.3)
ka2p = 10**(-10.3)
return 1/(1 + (cprotons/ka1p) + (ka2p/cprotons))
def f2(pH):
cprotons = 10**(-pH)
ka1p = 10**(-6.3)
ka2p = 10**(-10.3)
return 1/(1 + (cprotons/ka2p)*(1+(cprotons/ka1p)))
def f3(pH):
cprotons = 10**(-pH)
ka1p = 10**(-6.3)
ka2p = 10**(-10.3)
return 1/(1 + (ka1p/cprotons)*(1+(ka2p/cprotons)))
# X-avis values
pH = np.arange(0,14,0.1)
# Graficar ambas funciones.
plt.plot(pH, [f1(i) for i in pH], label = 'alphaHCO3') # Remember HCO3-
plt.plot(pH, [f2(i) for i in pH], label = 'alphaCO3') # Remember CO3-2
plt.plot(pH, [f3(i) for i in pH], label = 'alphaCO2')
# Defining axis limits.
plt.xlim(0, 14)
plt.ylim(0,1.1) # Remember, Y-axis shows the molar fraction or 'alpha', and this value can not exceed one
plt.grid()
# Axis labels
plt.xlabel('pH')
plt.ylabel('Molar Fraction-Alphas')
plt.title('Carbonate System')
# Show plots
plt.legend()
plt.show()
| Sistema_de_carbonato.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Collecting Ocean Water Temperature Near Alaska
#
# For a project on analyzing the affects of climate change on whales seen in Southern California, I wanted data on water temperatures in Alaska (where the gray whales live most of the year). This notebook shows how I collected this data from datasets available online at: http://www.ndbc.noaa.gov/station_history.php?station=46075.
import csv
import numpy as np
import matplotlib.pyplot as plt
# To run the next block of code, please go to the website: http://www.ndbc.noaa.gov/station_history.php?station=46075 and download the data from 2005-2017. The dataset gives way more information than we want, so below I just pull out the temperature of the water for each day at 12:30pm (or 12:50pm - some of the datasets are collected at different times).
# +
# FOR EACH YEAR YOU DOWNLOADED you need to run this code block each time for each year.
#There are only two lines that need to be changed for changing years. These lines are emphasised below.
with open("46075h2017.txt") as f: #change the txt file name to change years
reader = csv.reader(f)
temp = [r for r in reader]
temp = np.array(temp)
print('Here is a sample of the data you downloaded')
print(temp[0:5,0:5])
print()
year = []
month = []
day = []
hour = []
minute = []
water = []
for i in range(temp.shape[0]-2):
year.append(str.split(temp[i+2][0])[0])
month.append(str.split(temp[i+2][0])[1])
day.append(str.split(temp[i+2][0])[2])
hour.append(str.split(temp[i+2][0])[3])
minute.append(str.split(temp[i+2][0])[4])
water.append(str.split(temp[i+2][0])[14])
print('Let us check if all our lengths of the data we picked out are the same:')
print(len(year)==len(month)==len(day)==len(hour)==len(minute)==len(water))
print()
table_noon30 = [['year','month','day','water_temp']]
for i in range(len(water)):
hr = int(hour[i])
min = int(minute[i])
if int(hour[i]) == 12:
if int(minute[i]) == 30 or int(minute[i]) == 50:
table_noon30.append([year[i],month[i],day[i],water[i]])
print('Let us see how many days worth of data this picked up:')
print(len(table_noon30), 'days found.')
print()
print('Here is a sample of what data we have pulled out and saved:')
print(table_noon30[0:4])
myFile = open('temp2017_noon30or50.csv', 'w') #change the saving name when changing years
with myFile:
writer = csv.writer(myFile)
writer.writerows(table_noon30)
# -
# The data for 2016 is missing about 6 months of temperatures, also 2010 is missing Jan and Feb temps.
#
# I then cleaned the data up a little in Mac's Numbers program (unfound days I pulled up the downloaded file and filled in the temperature from a nearby time). Once I saved all that, I did the following lines to concatenate them all into one file, which I added to my whale dataset.
# +
#Run this line for each year to open all the files
with open("temp2017_noon30or50_filled.csv") as f:
reader = csv.reader(f)
temps = [r for r in reader]
temps = np.array(temps)
temp = temps[1:,3]
temp = [ float(x) for x in temp ]
temp17 = np.array(temp) #change the name for different years
# +
all_temps1 = np.append([temp05],[[temp06],[temp07]])
all_temps2 = np.append([all_temps1], [temp08])
all_temps3 = np.append([all_temps2], [temp09])
all_temps4 = np.append([all_temps3], [temp10])
all_temps5 = np.append([all_temps4], [temp11])
all_temps6 = np.append([all_temps5], [temp12])
all_temps7 = np.append([all_temps6], [temp13])
all_temps8 = np.append([all_temps7], [temp14])
all_temps9 = np.append([all_temps8], [temp15])
all_temps10 = np.append([all_temps9], [temp16])
all_temps = np.append([all_temps10], [temp17])
print(len(all_temps))
# -
plt.plot(all_temps)
plt.xlabel('Days')
plt.title('Temperatures')
plt.show()
temp_table = []
for i in range(len(all_temps)):
temp_table.append([all_temps[i]])
print(temp_table)
myFile = open('all_temps.csv', 'w')
with myFile:
writer = csv.writer(myFile)
writer.writerows(temp_table)
# ## Great, we have all the data!
| water_temp_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Fairness in IBM FL: Scikitlearn Logistic Classification
# ## Outline:
# - [Add conda environment to Jupyter Notebook](#setup)
# - [FL and Fairness](#intro)
# - [Parties](#Parties)
# - [Party Configuration](#Party-Configuration)
# - [Party Setup](#Party-Setup)
# - [Register All Parties Before Starting Training](#Register-All-Parties-Before-Starting-Training)
# - [Visualize Results](#Visualize-Results)
# - [Shut Down](#Shut-Down)
# ## Add conda environment to Jupyter Notebook <a name="setup"></a>
#
# Please ensure that you have activated the `conda` environment following the instructions in the project README.
#
# Once done, run the following commands in your terminal to install your conda environment into the Jupyter Notebook:
#
# 1. Once you have activated the conda environment, install the `ipykernel` package: `conda install -c anaconda ipykernel`
#
# 2. Next, install the `ipykernel` module within Jupyter Notebook: `python -m ipykernel install --user --name=<conda_env>`
#
# 3. Finally, restart the jupyter notebook once done. Ensure that you are running this Notebook from `<project_path>/examples/sklearn_logclassification`, where project_path is the directory where the IBMFL repository was cloned.
#
# When the Notebook is up and running it may prompt you to choose the kernel. Use the drop down to choose the kernel name same as that chosen when running `conda activate <conda_env>`. If no prompt shows up, you can change the kernel by clicking _Kernel_ > _Change kernel_ > _`<conda_env>`_.
# ## Federated Learning (FL) <a name="intro"></a>
#
# **Federated Learning (FL)** is a distributed machine learning process in which each participant node (or party) retains their data locally and interacts with other participants via a learning protocol. In this notebook, we demonstrate the adaption and usage of popular bias mitigation techniques for FL. We examine bias from the perspective of social fairness, as opposed to contribution fairness.
#
# Bias mitigation approaches in machine learning mainly measure and reduce undesired bias with respect to a *sensitive attribute*, such as *age* or *race*, in the training dataset.
#
# We utilize [IBM FL](https://github.com/IBM/federated-learning-lib) to have multiple parties train a classifier to predict whether a person in the [Adult dataset](http://archive.ics.uci.edu/ml/datasets/Adult) makes over $50,000 a year. We have adapted 2 centralized fairness methods, Reweighing and Prejudice Remover, into 3 federated learning bias mitigation methods: Local Reweighing, Global Reweighing with Differential Privacy, and Federated Prejudice Removal.
#
# For a more technical dive into IBM FL, refer the whitepaper [here](https://arxiv.org/pdf/2007.10987.pdf).
# ## Fairness Techniques <a name="fairness"></a>
#
# We adapt a centralized pre-processing bias mitigation method [Reweighing](https://link.springer.com/article/10.1007/s10115-011-0463-8) into two federated learning techniques, Local Reweighing and Global Reweighing with Differential Privacy.
#
# **Local reweighing**: To fully protect parties' data privacy, each party computes reweighing weights locally based on its own training dataset during pre-processing and then uses the reweighing dataset for its local training. Therefore, parties do not need to communicate with the aggregator or reveal their sensitive attributes and data sample information.
#
# **Global Reweighing with Differential Privacy**: If parties agree to share sensitive attributes and noisy data statistics, parties can employ this fairness method. During the pre-processing phase, the aggregator will collect statistics such as the noisy number of samples with privileged attribute values, compute global reweighing weights based on the collected statistics, and share them with parties. By adjusting the amount of noise injected via epsilon, parties can control their data leakage while still mitigating bias.
#
# We also adapt an in-processing bias mitigation method into Federated Prejudice Remover.
#
# **Federated Prejudice Removal**: Each party applies the [Prejudice Remover algorithm](https://github.com/algofairness/fairness-comparison/tree/master/fairness/algorithms/kamishima) to train a less biased local model, and shares only the model parameters with the aggregator. The aggregator can then employ existing FL algorithms, like simple average and FedAvg, etc., to update the global model.
#
# Further details about the algorithms and datasets utilized, as well as experimental setup, are included in the paper titled [Mitigating Bias in Federated Learning](https://arxiv.org/abs/2012.02447).
# ## Fairness Metrics <a name="mnist"></a>
#
# In fairness evaluation, there is no single, all-inclusive metric. Literature uses multiple metrics to measure several aspects, painting a composition of fairness. We use four highly-utilized fairness metrics: Statistical Parity Difference, Equal Odds Difference, Average Odds Difference, and Disparate Impact.
#
# **Statistical Parity Difference**: Calculated as the ratio of the success rate between the unprivileged and privileged groups. The ideal value for this metric is 0, and the fairness range is between -0.1 and 0.1, as defined by [AI Fairness 360](https://aif360.mybluemix.net/).
#
# **Equal Odds Difference**: Calculated as the true positive rate difference between the unprivileged and privileged groups. The ideal value for this metric is 0, and the fairness range is between -0.1 and 0.1, similarly defined by AI Fairness 360.
#
# **Average Odds Difference**: Calculated as the mean of the false positive rate difference and the true positive rate difference, both between the unprivileged and privileged groups. The ideal value for this metric is 0, and the fairness range is between -0.1 and 0.1, similarly defined by AI Fairness 360.
#
# **Disparate Impact**: Calculated as the difference of the success rate between the unprivileged and privileged groups. The ideal value for this metric is 1, and the fairness range is between 0.8 and 1.2, similarly defined by AI Fairness 360.
# ### Getting things ready
# We begin by setting the number of parties that will participate in the federated learning run and splitting up the data among them.
# +
import sys
party_id = 1
sys.path.append('../..')
import os
os.chdir("../..")
num_parties = 2 ## number of participating parties
dataset = 'adult'
# -
# ## Parties
#
# Each party holds its own dataset that is kept to itself and used to answer queries received from the aggregator. Because each party may have stored data in different formats, FL offers an abstraction called Data Handler. This module allows for custom implementations to retrieve the data from each of the participating parties. A local training handler sits at each party to control the local training happening at the party side.
# ### Party Configuration
#
# **Note**: in a typical FL setting, the parties may have very different configurations from each other. However, in this simplified example, the config does not differ much across parties. So, we first setup the configuration common to both parties, in the next cell. We discuss the parameters that are specific to each, in subsequent cells.
# <img src="../images/arch_party.png" width="680"/>
# <figcaption><center>Image Source: <a href="https://arxiv.org/pdf/2007.10987.pdf">IBM Federated Learning: An Enterprise FrameworkWhite Paper V0.1</a></center></figcaption>
# ### Party Setup
# In the following cell, we setup configurations for parties, including network-level details, hyperparameters as well as the model specifications. Please note that if you are running this notebook in distributed environment on separate nodes then you need to split the data locally and obtain the model h5 generated by the Aggregator.
def get_party_config(party_id):
party_config = {
'aggregator':
{
'ip': '127.0.0.1',
'port': 5000
},
'connection': {
'info': {
'ip': '127.0.0.1',
'port': 8085 + party_id,
'id': 'party' + str(party_id),
'tls_config': {
'enable': False
}
},
'name': 'FlaskConnection',
'path': 'ibmfl.connection.flask_connection',
'sync': False
},
'data': {
'info': {
'txt_file': 'examples/data/adult/random/data_party'+ str(party_id) +'.csv'
},
'name': 'AdultSklearnDataHandler',
'path': 'ibmfl.util.data_handlers.adult_sklearn_data_handler'
},
'local_training': {
'name': 'ReweighLocalTrainingHandler',
'path': 'ibmfl.party.training.reweigh_local_training_handler'
},
'model': {
'name': 'SklearnSGDFLModel',
'path': 'ibmfl.model.sklearn_SGD_linear_fl_model',
'spec': {
'model_definition': 'examples/configs/sklearn_logclassification_rw/model_architecture.pickle',
}
},
'protocol_handler': {
'name': 'PartyProtocolHandler',
'path': 'ibmfl.party.party_protocol_handler'
}
}
return party_config
# ### Running the Party
#
# Now, we invoke the `get_party_config` function to setup party and `start()` it.
#
# Finally, we register the party with the Aggregator.
# +
from ibmfl.party.party import Party
import tensorflow as tf
party_config = get_party_config(party_id)
party = Party(config_dict=party_config)
party.start()
party.register_party()
party.proto_handler.is_private = False ## allows sharing of metrics with aggregator
# -
# ## Register All Parties Before Starting Training
#
# Now we have started and registered this Party. Next, we will start and register rest of the parties. Once all the Parties have registered we will go back to the Aggregator's notebook to start training.
# ## Results
#
# We utilize these methods in our paper, and share below our experimental results for Local Reweighing and Federated Prejudice Remover on the Adult Dataset. Using 8 parties and a global testing set, we find both methods to be effective in reducing bias as measured by these four fairness metrics. Local reweighing is particularly effective.
#
# <img src="../images/adult8P.png" width="720"/>
# <figcaption><center>Fairness Metrics for Local Reweighing and Federated Prejudice Remover on 8 Party Adult Dataset Experiment</a></center></figcaption>
# ## Shut Down
#
# Invoke the `stop()` method on each of the network participants to terminate the service.
party.stop()
| Notebooks/sklearn_logclassification_rw/sklearn_logclassification_rw_p1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Reading XRD spectra
# +
from XRDXRFutils import SpectraXRD
from XRDXRFutils import DataXRD,snip,convolve
from numpy import arange,linspace
from matplotlib.pyplot import plot,subplots,xlim,ylim,hlines,vlines,xlabel,ylabel,imshow,figure,legend
# -
path = '/home/zdenek/Projects/pyMaXRDXRF/M491/ProfiloXRD/'
data = DataXRD().load_h5(path + 'data.h5').calibrate_from_file(path + 'calibration.ini')
spectra = SpectraXRD().from_array(data.data[50,50])
spectra_f = SpectraXRD().from_file(path + 'Frame0128.dat')
spectra.opt = [-1185,1950,51]
spectra.theta_range()
# +
# %matplotlib inline
x = spectra.theta
plot(x,spectra.counts,lw=1,label='spectra')
plot(x,convolve(spectra.counts,n=21,std=3),label='convolution 3')
plot(x,convolve(spectra.counts,n=40,std=5),label='convolution 5')
plot(x,spectra.background(n=21,std=3,m=32),'--',label='background 3')
plot(x,spectra.background(n=40,std=5,m=32),'--',label='background 5')
legend(frameon=False)
xlim(x[0],x[-1])
xlabel(r'angle $\theta$')
ylabel(r'counts')
# -
spectra.plot(lw=1,label=r'spectra')
x = spectra.theta
xlim(x[0],x[-1])
legend(frameon=False)
xlabel(r'angle $\theta$')
ylabel(r'relative intensity')
| read_spectra_XRD.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <h1> linear Regression </h1>
#
# Welcome to the 2nd day of the workshop. Today we are going to implement linear regression from scratch. In the first workshop you learned the use of numpy, pandas and matplotlib, we are going to use all these libraries for developing our model.
# There are mainly two classical problems in supervised machine learning: 1) Regression problems 2) Classification problems
#
# Regression Problem:- A regression problem is when the output variable is a real or continuous value, such as “salary” or “weight”. Many different models can be used, the simplest is the linear regression. It tries to fit data with the best hyper-plane which goes through the points. So when ever you see real and continuous output variable understand it is a regression problem.
#
# Classification Problem :- A classification problem is when the output variable is a category, such as “red” or “blue” or “disease” and “no disease”. A classification model attempts to draw some conclusion from observed values. Given one or more inputs a classification model will try to predict the value of one or more outcomes. For example, when filtering emails “spam” or “not spam”, when looking at transaction data, “fraudulent”, or “authorized”. In short Classification either predicts categorical class labels or classifies data (construct a model) based on the training set and the values (class labels) in classifying attributes and uses it in classifying new data.
#
# So today we are going to implement linear regression from scratch for a regression problem, so let's get started.
#loading all the necessary libraries
import numpy as np
from sklearn.datasets import load_boston
import matplotlib.pyplot as plt
import pandas as pd
# We are going to use boston housing price dataset for a given task, it is a classical dataset for a regression task, It contains around 506 data entries containing 13 different features.
# +
dataset = load_boston()
print(dataset.DESCR)
# +
x = dataset.data
y = dataset.target
y = np.expand_dims(y,axis=1)
print("total number of samples in dataset is: {}".format(x.shape[0]))
print("total features in dataset is: {}".format(x.shape[1]))
# -
# One of the most important task in data science and machine learning is the cleaning of data, lucky for us this dataset is alreay cleaned so we can focus on visulization and modelling part. Data visulization is very necessary to understand dependancies of target values to the feature values and also gain some insights regarding what might be the best features to choose to fit the machine learning model. This dataset is prepared containing useful features only so we are going to use every features for now, but anyways visulization is good habit in data science.
#basic visulization
plt.figure(figsize=(5, 4))
plt.hist(dataset.target)
plt.title('Boston Housing Prices and Count Histogram')
plt.xlabel('price ($1000s)')
plt.ylabel('count')
plt.show()
# Plotting every feature relation with target
for index, feature_name in enumerate(dataset.feature_names):
plt.figure(figsize=(5,4))
plt.tight_layout()
label_name = 'Boston Housing Prices and '+ feature_name
plt.title(label_name)
plt.scatter(dataset.data[:, index], dataset.target)
plt.ylabel('Price',size = 12)
plt.xlabel(feature_name, size = 12)
plt.show()
# <h1>Linear Model</h1>
#
# The idea of linear regression is to fit a line to a set of points. So let's use the line function given by:
# f(x)=y=mx+b
#
# where m is the slope and b is our y intercept, or for a more general form (multiple variables)
# h(x)=θ0x0+θ1x1+θ2x2+...+θnxn
#
# such that for a single variable where n = 1,
# h(x)=θ0+θ1x1
#
# when x0=1
#
# where theta is our parameters (slope and intercept) and h(x) is our hypothesis or predicted value
# <h1>Magic of Matices</h1>
#
# Also we are going to do computation in vectorized form not iterative for, so let's just brush up the matrix concepts:
#
# Suppose A is our feature matrix X and B as our parameter matrix theta, that is,
# X=[ 1 2 ] θ=[ 2 3 ]
# [ 1 3 ]
# [ 1 4 ]
#
# Remember that we have our linear model h(x)=θ0x0+θ1x1
#
# We know that X0=[ 1 ] X1=[ 2 ] θT=[ 2 ]
#
#          [ 1 ]  [ 3 ]  [ 3 ]
#
#          [ 1 ]  [ 4 ]
#
# then we can actually use matrix dot product to do the multiplication and addition at the same time (and faster)
#
# H=[ θ0X00+θ1X01 ]=[ θ0+θ1X01 ]=[ 2+3(2) ]=[ 8 ]
#
#   [ θ0X10+θ1X11 ]  [ θ0+θ1X11 ] [ 2+3(3) ] [ 11 ]
#
#   [ θ0X20+θ1X21 ]  [ θ0+θ1X21 ] [ 2+3(4) ] [ 14 ]
#
# can be as simple as
#
# H=X dot θ
#
# Yes, that is the power of Matrices!
# +
#initializing the weights
#Task :- initialize vector of zeros according for x dot w remember size of x is (506,13)
#and one bias vector of one will be add so size will be (506,14) so initialize vector accordingly
# CODE HERE
w = np.zeros((x.shape[1],1))
print(w.shape)
# -
# <h1>Importance of Normalization </h1>
#
# Normalization is very much important, consider a case: house price depends on area of house and rooms in the house, we know that area of houses is in range of 900-100 sqft. while number of bedrooms are at most 5-6, so obviously target values should depend more on house area but that's not the case, so we have to normalize data by mean and standard deviation to get every data points in comparable range.
#
# Normalization avoids these problems by creating new values that maintain the general distribution and ratios in the source data, while keeping values within a scale applied across all numeric columns used in the model.
#
# Also normalization makes convergence process fast.
def normalize(x,mode='std'):
n_samples = x.shape# use x.shape and get number of samples
#code here
x_mean = np.mean(x, axis=0)# find mean of every feature column Hint: use np.mean
if mode == 'std':
x_std = np.std(x, axis=0) #find standard deviation of every feature column Hint: use np.std
else:
x_std = np.max(x,axis=0) #find maximum of every column Hint : use np.max
#print(x)
x = (x-x_mean)/x_std# subtract x_mean from x and divide by x_std
#print(x)
arr = np.ones((n_samples[0],1))
x = np.hstack((arr,x)) # stacking bias (vector of 1(constant)) to x
return x
np.random.seed(0) # do not change seed otherwise answer will not match
x = np.random.rand(5,5)
x = normalize(x,mode = 'std')
print(x.shape)
print(x)
# <h3> Answer should match below </h3>
# (5, 6)
#
#
# [[ 1. -0.20602098 0.78951027 -0.41884901 -1.72218407 0.17681922]
# [ 1. 0.11854592 -0.22998031 1.34645231 1.12080062 0.03565705]
# [ 1. 0.6060984 0.10534616 -0.6309152 0.86237971 -1.06099139]
# [ 1. -1.74955693 -1.76276226 0.98513859 -0.13855101 1.74368392]
# [ 1. 1.23093358 1.09788614 -1.28182669 -0.12244524 -0.8951688 ]]
# <h1> Cost Function </h1>
#
# It is a function that measures the performance of a Machine Learning model for given data. Cost Function quantifies the error between predicted values and expected values and presents it in the form of a single real number. Depending on the problem Cost Function can be formed in many different ways. The purpose of Cost Function is to be either:
#
# Minimized - then returned value is usually called cost, loss or error. The goal is to find the values of model parameters for which Cost Function return as small number as possible.
#
# Maximized - then the value it yields is named a reward. The goal is to find values of model parameters for which returned number is as large as possible.
#
# For algorithms relying on Gradient Descent to optimize model parameters, every function has to be differentiable.
#
# We are going to use mean squared error as a cost function here.
#
# <h1> Mean squared error </h1>
#
# Regression metric which measures the average magnitude of errors in a group of predictions, without considering their directions. In other words, it’s a mean of absolute differences among predictions and expected results where all individual deviations have even importance.
#
# <h3>MSE = 1/N ∑<sub>i=1</sub><sup>n</sup>(yi−(mxi+b))<sup>2</sup> </h3>
#
# where:
# i - index of sample,
# ŷ - predicted value,
# y - expected value,
# m - number of samples in dataset.
#
# Sometimes it is possible to see the form of formula with swapped predicted value and expected value, but it works the same.
def compute_cost(x,y,w):
#CODE HERE
n = x.shape[0]#Get the value of number of samples from x
y_hat = np.dot(x,w)# Get the predicted value of y Hint: y_hat = x dot w (use np.dot)
# Cost function is given above but use 1/(2*n)sum((y_hat - y)^2)
#squares = np.square((y_hat-y))
#print (squares)
cost = np.sum((y_hat-y)**2)/(2*n)# find cost function Hint : np.sum will be useful
return cost
# +
np.random.seed(0) # do not change seed otherwise answer will not match
x_temp = np.random.rand(5,5)
y_temp = np.random.rand(5,1)
w_temp = np.random.rand(6,1) # using random weights for illustration purpose
x_temp = normalize(x_temp)
cost = compute_cost(x_temp,y_temp,w_temp)
print(cost)
# -
# <h2> The answer should match this </h2>
#
# 0.2911040132454303
# <h1> Gradient descent optimization algorithm </h1>
#
# Gradient Descent is the most common optimization algorithm in machine learning and deep learning. It is a first-order optimization algorithm. This means it only takes into account the first derivative when performing the updates on the parameters. On each iteration, we update the parameters in the opposite direction of the gradient of the objective function J(w) w.r.t the parameters where the gradient gives the direction of the steepest ascent. The size of the step we take on each iteration to reach the local minimum is determined by the learning rate α. Therefore, we follow the direction of the slope downhill until we reach a local minimum.
#
# <img src = "Cost-Function.jpg" />
# <br>
# <img src = "gradiant_descent.jpg" />
#
# <br>
# <h1> Illustration of gradient descent by valley descent example </h1>
# <br>
# <img src = "gd_illu.jpeg" />
def gradient_descent(x,y,w,iterations,learning_rate):
no_sample = x.shape[0] #initialize no_sample as above mentioned
j = []
initial_cost = compute_cost(x,y,w)# use compute_cost
j.append(initial_cost)
#The idea is to update w at every iteration by gradient descent algorithm
for i in range(iterations):
y_hat = np.dot(x,w)# calculate predicted values of y
# Now update weight from equation w = w - (learning_rate/no_sample)*(transpose(x) dot (y_hat - y))
w = w - ((learning_rate/no_sample)*np.dot(x.T, (y_hat-y)))# use np.dot and x.T for transpose
cost = compute_cost(x,y,w)# use compute_cost
j.append(cost)
return (w,j)
# +
np.random.seed(0) # do not change seed otherwise answer will not match
x_temp = np.random.rand(5,5)
y_temp = np.random.rand(5,1)
w_temp = np.random.rand(6,1) # using random weights for illustration purpose
x_temp = normalize(x_temp)
w_temp,j_temp = gradient_descent(x_temp,y_temp,w_temp,1000,0.001)
print("Weights after updation: {}".format(w_temp))
print("Initial cost: {}".format(j_temp[0]))
print("final cost after updation: {}".format(j_temp[-1]))
# -
# <h2> The answer should match this </h2>
#
# Weights after updation: [[ 0.43422511]
#
# [ 0.6546597 ]
#
# [ 0.31910619]
#
# [ 0.34074398]
#
# [-0.04576342]
#
# [ 0.6052419 ]]
#
# Initial cost: 0.2911040132454303
#
# final cost after updation: 0.1679955760288899
# <h3> Training time </h3>
# We are setting learning rate to 0.001 you can tweak the learning rate and check the results for your self, Learning rate is known as hyperparameter in machine learning tearms and setting up optimum learning rate is quite a challanging task some times.
#
# Generally we split data in train set and validation set by using sklearn.model_selection.train_test_split(x,y) but here we have very less training example so we are going to use all data for training.
iteration = 4000
lr = 0.001
x = dataset.data
y = dataset.target
y = np.expand_dims(y,axis=1)
x = normalize(x)
w,j = gradient_descent(x,y,w,iteration,lr)
# <h2> Cost convergence graph </h2>
#
# As we can see from the below graph, the cost is decreasing with each iteration, the graph below shows us that the training is stable and we have selected optimum learning rate, if cost in graph overshoots or increase with time in some iterations then we know that the learning rate is not optimum and we have to change it!
# +
t = np.arange(iteration + 1)
plt.figure(figsize = (16,12),dpi = 96)
plt.plot(t,j)
plt.xlabel('iterations')
plt.ylabel('cost')
plt.title('convergence graph of cost function')
# -
print('Initial cost = {}'.format(j[0]))
print('final cost = {}'.format(j[-1]))
# We can not simply measure accuracy by checking how much predictions are correct as we can in classification problem, so we have to device some method to measure accuracy. we are going to use equation 1 - (sum((y-y_hat)2)/sum((y-mean(y))2) for the accuracy measurement.
def score(x,y,w):
x = normalize(x)
y = np.expand_dims(y,axis = 1)
y_hat = np.dot(x,w)
score = 1.0 - ((np.sum((y - y_hat)**2))/(np.sum((y - np.mean(y))**2)))
return score
# +
x_test = dataset.data
y_test = dataset.target
s = score(x_test,y_test,w)
print('score of the system is : {}'.format(s))
# -
# We have got quite a good accuracy with such a simple model on this multi featured dataset. quite an accomplishment cheer up!
def predict(x,w):
n_samples = x.shape[0]
x = normalize(x)
y = np.dot(x,w)
return y
# +
x_test = dataset.data
y = predict(x_test,w)
print(y) #uncomment the commented value to see values
# -
print(dataset.target) #uncomment the commented value to see values
# So you can check predicted y values and also original target values they are very near to each other, so we have trained a good model with such a few lines of code! That's the power of machine learning and you are all set to implement this knowledge to tackle a real world problem!
#
# Cheers! see you next time!
| DRISHTI-workshop/LinearRegression/linear_regression_exercise.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="14HW9foK-jSk"
# # Node Classification
#
# In this notebook, we will be looking at node classification problems. In this setting, a graph is provided with some labeled nodes and some unlabeled nodes, and the task is to train a model that predicts the labels of the unlabeled nodes. Examples of this include predicting the research categories of publications in a citation network, or predicting interests of a user in a social network.
#
#
#
#
#
# + [markdown] id="yJXckpuqsr6y"
# ## Transductive Node Classification
# + [markdown] id="WVdiVYEGsupe"
# In a transductive node classification setting:
#
#
# * The features of all nodes in one graph are known
# * Some of the node labels are known
# * Goal: Predict unknown node labels in the same graph
#
# <center>
# <img src="https://ai.science/api/authorized-images/ZHAan1V7Hz7YV0%2BRVLeqR9qa1Eaam83R2A8TDhqP7ugb2T6SQZBujucyoNn8Cxr%2FJCIo%2BvdirgOq%2FFP%2Fp47GdfZaYXQe5bXZTfw0vsIsjGB2ZpJOfKYlNOq8bePoVFr4X4DN0bgMEoXB19hlk7KMFEljiu8PatOb3MKQjKYWBtrFcJCqFjOaWrGQ260G%2F9PnUTqF74r0BXT4mc6C50UizvrBhyJkoQbNdA6CnG62rB6TN0JTQjhE3%2Fo%2BLu2Te6Gpc5zUmPG5KEuN0aSNtlwml82uZAtV0srMU6YdsMlh5eJYRXPgQD0PHO3PpFcsKIsyAi%2BPkDLO4gx%2FMFedDaNdSkfRL01dycLpNHvg7YrtWi0TCMxwQF2D4i4ua3beHP0E7sfogTti%2BwaBkeE8xKZBNTxJd78ZQFIEmzEEbR%2FI%2FS8OxUDMTnI5lGL872nfT47KaZcW4BgQMpEmqluNdWtQEQifwUedi4XjEWBqewIN9rcaVOLyRbIDAtphrxS%2Be8AQt4tnkbnCxDQxsiOfb%2BIyVm4JzeMSjxO5jVBRgiAcKOx4UCoNbMtmVEjglJQS%2BrKODOIZOXJ%2BsVjJwOtnTvr%2FzDsbqZojvECAW98ITDKbuewrKBCr7cKy82mE8C8NARgwr5uSRzR8Euy3xEu7gJM%2BfhdXB5oRlzGGoBpCo3bm2Oo%3D" width="60%" > </center>
# + [markdown] id="kfumhme3tYNU"
# ## Inductive Node Classification
# + [markdown] id="MeGfy2pstbJ4"
# In an inductive node classification setting:
#
# * The features of nodes in one or more graphs are known
# * Some or all of the node labels in those graphs are known
# * Goal: Predict unkown node labels in unseen graphs
#
#
# <center>
# <img src="https://ai.science/api/authorized-images/cUEc1VHs2beXboMFOl3uQ8MyReaWjSTJbt7Xdc4evnlGiyIjfFbtgKX79az37YEGEOnSA%2BZiYsH%2Bexjqp4TheDH1cVsMZ4Y8pHwk%2FFt1KBox2AJ4syhJLq0DaY627JAMfo2T06rowYtPGNJjlvp%2BpznhQ39QCfuxNZGjCf2pO0j2zugxHm4YviF9Knods2J6MRiQIFg7wYEg5jWfstY0Q5AfQEu9BBNRke3ngG769tpixVkXRhU7hr37m38tFZhMqzZriRE7tyJ64HR%2F5ewJihu%2FRuh5Wz6uYEE%2FdK8bBX6%2FyLrk79oxA1roOURtVQO%2Bng04fIXu3GD%2FFfPDA%2BrQlryLRUSsvgaeeGJI95alp51MWoSRbivoFEqA4aCEZhuKYIA9GI1jqXScwgr3UvtEH6fDSNbghcwFggMFZNZ8l%2F2tixwWAVC6ibdfw3TcDAZIcByv%2BBZ2HCy%2BbEMaXboMLB7h5rlvimJhOXUYYwitz8S8IO3PpHvIHMYT6Yo6X319L1SYzHtcCJa5ZKkO4fjuGBtcsOJUCZQRp4c0kyTkhcMmdkFnnrUet5eEzBr6PoeSf4WgRaZNiEvbPf0fJ%2FRcfhAphn4TqIohrZcMlRDbC9hl4z3EK8h0DxYTCL1sBqIM8tNO1MDPGh4UtipdFIT9OjjuFJFvMvdcQcymOHJ03eM%3D" width="60%" > </center>
#
#
# + [markdown] id="_-zNP2RMJW8X"
# # Installation
#
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 102565, "status": "ok", "timestamp": 1623852590133, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "01706064865196246875"}, "user_tz": 240} id="ESThntq91KrW" outputId="6f9e681f-00c7-4974-c3c9-1df6af61a7c4"
# Please visit https://github.com/rusty1s/pytorch_geometric#pip-wheels for lastest installation instruction
# !pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.9.0+cu102.html -U
# !pip install torch-sparse -f https://pytorch-geometric.com/whl/torch-1.9.0+cu102.html -U
# !pip install torch-cluster -f https://pytorch-geometric.com/whl/torch-1.9.0+cu102.html -U
# !pip install torch-spline-conv -f https://pytorch-geometric.com/whl/torch-1.9.0+cu102.html -U
# !pip install torch-geometric -U
# + [markdown] id="9htnPB9fZ7xA"
# # Loading Datasets
# For our datasets, we will be using three citation networks; Pubmed, Cora and Citeseer. Nodes correspond to publications and edges correspond to citations. The citation networks are available through the Planetoid dataset of PyG.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 9339, "status": "ok", "timestamp": 1623852599440, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "01706064865196246875"}, "user_tz": 240} id="oArjAIk5Jdhd" outputId="922b90ea-3474-429b-f425-4f8289ee5544"
import torch
import torch.nn.functional as F
from torch_geometric.datasets import Planetoid
import torch_geometric.transforms as T
#Load the Cora, CiteSeer and Pubmed citation networks
#Note: T.NormalizeFeatures() creates a transform that normalizes the node features
dataset_cora = Planetoid(root="./tmp", name="Cora", transform=T.NormalizeFeatures())
dataset_citeseer = Planetoid(root="./tmp", name="CiteSeer", transform=T.NormalizeFeatures())
dataset_pubmed = Planetoid(root="./tmp", name="Pubmed",transform=T.NormalizeFeatures())
data_cora = dataset_cora[0]
data_citeseer = dataset_citeseer[0]
data_pubmed = dataset_pubmed[0]
print("Citation network information")
print("Cora: ", data_cora)
print("Citeseer: ", data_citeseer)
print("Pubmed: ", data_pubmed)
# + [markdown] id="fTIYE2vyfnB3"
# # Generalizing Convolutional Operators to Graphs
# + [markdown] id="X8MB8fKYyJfL"
# ## Convolutional Operator on a 2-d grid
# + [markdown] id="oWMpKmPLfupz"
# Let's consider an image represented by numbers on a 2-d grid and review what happens in a convolutional layer typically used in convolutional neural networks (CNNs). A learnable filter (typically of size $3 \times 3$) convolves across the grid and an element-wise multiplication of the filter values by the image values is computed at all positions. The output of this operation is also numbers on a 2-d grid, which could be of the same size or not depending on whether padding was used. (For more details on CNNs see this [page](https://cs231n.github.io/convolutional-networks/). )
#
#
# <center>
# <img src="https://ai.science/api/authorized-images/ZbbQlWsB8df1DpFwUiY9AS7TapF%2BH9nv%2FX8vCzV9inQ7KYk4uS2WDm%2FAGXepIuwVXbrnn%2Bxyd8wcGv1KU0EYmqB6noUpulGGRJ2ycVSITeuh8gyGXq4huY7vyETRq3lGYaClBN6etDJuu5rFAe89nBALS8JNeAXKE2VTac8iHpwMnk%2FRtWnoReqL67ZK1TFg9fsWjwgs8t7XMD%2F5rzxFCJh4rqXoI6F4PuANumyYiAU8OiLW8LTjxHREsF4MV28zJCWHxVQLa%2BJzyIoHTQrkkCL0yExVz4sX%2Fxx9ZnbS7Vr14TLb2PqkyKPg%2BwHQ9wNjkTP74KsX3A1eSlEKBum0xa3xb5XdF2QHQDl9vMNDNxzCO2WbVW%2B2l3Vo%2BsZbZ3qu4Lw6QtgBdbRY%2B2%2FAJyo79W72M9OsGB70ZJJnW6%2FZEcYmtKuE37%2FeNqKKKOP0XcP6zqFobi69E9fTgUIB%2Bvh0TO%2F%2Bl%2BFoxbPvP3LJfeylPi1Xh2OJgB03o%2F2lm8Psw9SiCywKIF8z2IDYJ6zfPa0UUUp3WyiJGNSba8znBgH%2BgiCe4uyQKtQ6x8vxvDlMHZMzXqyj%2Bhed2CanR0JsrJhNeSUP09kkMorCvFyWIWK8%2B%2FwPqgB0CA4RonWgmFJiQfyNdH0YMILNs0QHFk2pEA34zo08r1jg15lYhqDCF7F3fL0%3D" width="60%" > </center>
#
# The convolutional filter in CNNs has some desirable properties that are suitable for images:
#
# * The number of parameters is independent of the input
# * Operates locally, extracting localized features
# * Translation invariant (i.e. the filter remains the same as it convolves)
# * Values of the filters depend on the relative position of neighboring pixels. (e.g. the 9 values in a $3 \times 3$ filter can in general be different)
#
# So what are the challenges in generalizing a convolutional operator to arbitrary graphs? Some of the difficulties that make such a generalization non-trivial is:
#
# * A node in a graph may have an arbitrary number of neighbors, while a pixel in an image has 8 neighbors (except in the case of edge and corner pixels)
# * A node in a graph may have an arbitrary number of and types of attributes. E.g. Whereas a pixel in a color image may have three values (RGB), a node in a social network representing a user may different types of attributes such as current location, interests, etc.
# * In the case of a heterogeneous graph, there may be different types of nodes. E.g. in a movie-actor network.
#
#
#
#
#
# + [markdown] id="2fgvEKYwuo8L"
# # Extension to Homogeneous Graphs with Node Features
#
# + [markdown] id="s1nPidyQFDvR"
# Let's try to create a convolution-like operator on a homogeneous graph. Let's assume that each node $v_i$ has a feature vector $\mathbf{v}_i$ associated with it.
#
# <center><img src="https://ai.science/api/authorized-images/ly8YBPB1rsFZENZBrO47vYhzp5zBRXRO8%2FtHMMBtBm%2FkhYTNnnl7B2rHuVk0MoMOxY8WDh2R9uMBuqwkMjT1fGntAM2p2agcwmciMtSrIPBZiVWvRH%2FcSC%2FPRgyyLy0LRiBeqbNPvIetT6J7cKKRB6t3OUL9vXnlI9bMPSBrNIDH9pblYjIs48BtthsR4FLtXPp5H25nq4dyn0K6cvONLIebrOEoAmbtL8x3DxJHnmBfEoKdHU%2B6yxSCQJk14%2BUkUSiutxov2UCJnzCzPXQuyyFTieRFXe4zkzqcjhLljBLHwGDorWlDZTSAF2dqN6%2BVvl5Bu1JKk1iY7wLluejjS7lfxGaeeiLS0SE34ChjXe9XVvXJAnZ%2Fr7uOlgKhytDfdquuGtFJlHs4b0ODZ2AVuFFCS6h%2FVLAPt1FHHBcEgcAv5Gnk7Gs3hAPZ57beBqaLe7sWdzJA7RRWaedUnO%2FyxGQg8MvYSjDXyz0H3yZi8acLUtCkme8W73WO1io%2FaXTeWteXRHDvUI42kMrqjcyqfoy7gdGzyO6CsYk6BvzGeR7ralzvzjPVVSn8wvvsa3dDECAfXex%2BFyCsAv6HqurD%2FwNeqK%2FB9B1r4ydqTc6On2ZLx4db8JJq6zW8NVHUwfwTJRrVuJY5V2m8QlWr3LLu3Xz10Urt255yAgkXFvLfRaw%3D" width="30%" > </center>
#
#
# A very simple convolution like operator on graphs is just the uniform average of the node-features of each node’s neighborhood.
#
# Let's go through a single layer of this operation. We start with the initial feature vectors at each node as the input to the convolution layer:
# $$\mathbf{v}_i^{(0)}= \mathbf{v}_i$$
#
# Then we update each node’s features by the average of all neighboring node features. This can be written as:
#
#
# $$ \mathbf{v}_i^{(1)} =
# \sum_{v_j \in N(v_i) \cup v_i }{\frac{1}{d_i + 1} \mathbf{v}_i^{(0)}} $$
#
# This is visualized below
#
# <center><img src="https://ai.science/api/authorized-images/twlUnmkDyXflUxLUbaJlYVxoyPsrO9lCxMIwGcllts3fshttjk2otcAN%2F%2BYT%2BzCn%2B7zCcSTq73x08Sjx%2BuQw1XaZqeC6PEQJDT01QzpVHtvVN%2FtKMOfCiUmHYA2Sn%2FEOxTKY%2BDeIliDYlQM2efGAHfvumXEOynZJdAPbJdhrHo2y0j0oYdInscZkr5PHzHVrWqtxIaJCMriCJ5UlVxfIHvH8%2Frf3VOBW5H2Cwu%2F6vjww0wOhvrq60ygHE99Sghzo0pmkuk8Fq6OYw2t5MgL2azS0KZEnSOf%2F5zeKGYBwUE3czdz5BlPZAVZvV7UYGb4jlmohvMNWmwKY8e%2F4wvtG5iUtzUZpMvSTCsvdl0NIxbJ1He7gykW%2BfaAQRjiO1Nme%2B%2FD9o2BaVzGX61eu4vlPJZYGZh3JUgBGGtWMEFmOETMPMt5CYoHas0z5Dk2xIk5DkYS1F%2B4FIlc8ywacJoih7miZcUIPjeWFiVvGMtrv0zuofNZrV7gb%2Bw2mm3YPVSg6uCPslo2phKBFpkrLb7iy6OBGMGqElRa1ofK81zJ4EUMs%2FnAt0TkE6DOZtsQlEsXI4%2B4fuMoFs2fdlcgb%2B4rBU2efOjfO5nQptHlvbTXTccmI6IIU0AY%2BiSimB7QysH%2FqT%2F%2FY1iOI9WObaDfecUpK8OYLnyaOeZCp0NNP5ILbINk%3D" width="30%" > </center>
#
# In the illustration here, the neighborhood of each node consists of all nodes with incoming edges to that node. This definition of a neighborhood is to take into account the propagation of information. However other definitions of neighborhoods are possible, and the neighborhood doesn’t have to be limited to nodes that are only 1 hop away.
#
# If we write $D$ as the degree matrix, i.e. a diagonal matrix containing the number of neighbors of each node on its diagonals, $I$ as the identity matrix and $A$ as the adjacency matrix, then we can succinctly write this simple convolution operator as
#
# $$ V^{(1)} =(D+I)^{−1} (A+I) V^{0}, $$
#
# where $V$ is a $N^v \times N^{f_{nodes}}$ matrix containing the feature-vectors stacked vertically. Introducing $\tilde{S}=(D+I)^{−1} (A+I)$, this can be written as
#
# $$ V^{(1)} =\tilde{S}V^{0}$$
# We can apply this step $k$ times to get
#
# $$ V^{(k)} =\tilde{S}V^{0}.$$
# After $k$ applications, the updated node features of a all nodes will have been influenced by the input feature-vectors of nodes that are up to $k$ hops away. There is however a limitation to how many times we can repeat this operation. For example if the graph has a diameter of 5, that is the largest shortest-path between any pair of nodes is 5, and we repeat this averaging operation 5 times, the node features in the final layer would be very similar. This would probably result in poor performance in downstream tasks like node classification.
#
#
# After computing $V^{(k)}$, we can then use these updated feature-vectors for downstream tasks. For example for node classification, one can pass $V^{(k)}$ into a softmax layer to predict the label of each node, i.e.
#
# $$ \hat{Y}= softmax(V^{(k)}\Theta ),$$
# where $\Theta$ is a matrix to be learned e.g. via back-propagation.
#
# While the above procedure is simple, it is still a good baseline for graph representations of data and is benchmarked in the paper [Simplifying Graph Convolutional Networks](https://arxiv.org/abs/1902.07153).
# + [markdown] id="PhZ0FJrxccNR"
# # Simple Graph Convolutonal Network
#
# We will first implement a simple learning pipleline based on the graph convolutional operator from the paper [Simplifying Graph Convolutional Networks](https://arxiv.org/abs/1902.07153).
#
# The [*SGConv*](https://pytorch-geometric.readthedocs.io/en/latest/modules/nn.html#torch_geometric.nn.conv.SGConv) operator is linear. For each node, the operator essentially averages the features from neighboring nodes. Applying this averaging $K$ times allows features to propagate from nodes that are at most $K$ edges apart.
#
# *Note: We will look at the details of how the convolutonal operators are implemented in the next notebook. For now we will simply use the ones provided in PyG.*
#
# Let's apply a single SGConv layer to the node features of the Cora graph. We will define the layer to have $N^{f_{nodes}}$ input channels (i.e. the length of the feature vector of each node) and output $N^C$ channels (i.e. the number unique labels).
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 559, "status": "ok", "timestamp": 1623852599975, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "01706064865196246875"}, "user_tz": 240} id="F3rg-Szny4BM" outputId="d9e87473-e53d-4a34-924d-5cfda047856e"
from torch_geometric.nn import SGConv
num_classes = len(data_cora.y.unique())
conv = SGConv(in_channels=data_cora.num_features, out_channels=num_classes,
K=1, cached=True)
x = data_cora.x
print("Shape before applying convoluton: ", x.shape)
#x contains the node features, and edge_index encodes the structure of the graph
x = conv(x, data_cora.edge_index)
print("Shape after applying convoluton: ", x.shape)
# + [markdown] id="NMdpv-Q63WBp"
# Let's define a network which uses SGConv to classify nodes on the Cora dataset
# + executionInfo={"elapsed": 7, "status": "ok", "timestamp": 1623852599976, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "01706064865196246875"}, "user_tz": 240} id="i2isvLPqciVd"
class SGNet(torch.nn.Module):
def __init__(self, data, K=1):
super().__init__()
num_classes = len(data.y.unique())
# Create a Simple convolutional layer with K neighbourhood
# "averaging" steps
self.conv = SGConv(in_channels=data.num_features,
out_channels=num_classes,
K=K, cached=True)
def forward(self, data):
# Apply convolution to node features
x = self.conv(data.x, data.edge_index)
# Compute log softmax.
# Note: Negative log likelihood loss expects a log probability
return F.log_softmax(x, dim=1)
# + [markdown] id="Wg1v_BfR5cG5"
# We will define a function to run one training cycle of the model
# + executionInfo={"elapsed": 7, "status": "ok", "timestamp": 1623852599977, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "01706064865196246875"}, "user_tz": 240} id="PXbGwaNApF73"
def train(model, data, optimizer):
# Set the model.training attribute to True
model.train()
# Reset the gradients of all the variables in a model
optimizer.zero_grad()
# Get the output of the network. The output is a log probability of each
log_softmax = model(data)
labels = data.y # Labels of each node
# Use only the nodes specified by the train_mask to compute the loss.
nll_loss = F.nll_loss(log_softmax[data.train_mask], labels[data.train_mask])
#Computes the gradients of all model parameters used to compute the nll_loss
#Note: These can be listed by looking at model.parameters()
nll_loss.backward()
# Finally, the optimizer looks at the gradients of the parameters
# and updates the parameters with the goal of minimizing the loss.
optimizer.step()
# + [markdown] id="DCMhU2xurA48"
# In case you are not very familiar with Pytorch: To get a better sense of what the above function does (or anything you are not quite sure down the line), you can just run the code and see what is going on yourself! Here is an example:
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 545, "status": "ok", "timestamp": 1623852600516, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "01706064865196246875"}, "user_tz": 240} id="qldh-1t9pH1l" outputId="24e5e282-7bcf-4472-fceb-4debfb2ec9cd"
model_cora = SGNet(data_cora, K=1)
optimizer = torch.optim.Adam(model_cora.parameters(), lr=0.2)
optimizer.zero_grad()
print("="*80)
print("Gradients of model parameters right after zero_grad")
for i, parameter in model_cora.named_parameters():
print ("Parameter {}".format(i))
print ("Shape: ",parameter.shape )
print("Gradient")
print(parameter.grad)
# Get the output of the network. The output is a log probability of each
log_softmax = model_cora(data_cora)
print("="*80)
print("Output of model (log-softmax) \n Shape:{}"
" \n Values: {}".format(log_softmax.shape, log_softmax))
# Labels of each node
y_true = data_cora.y
# Use only the nodes specified by the train_mask to compute the loss.
train_mask = data_cora.train_mask
nll_loss = F.nll_loss(log_softmax[train_mask], y_true[train_mask])
print("="*80)
print("negative logloss {}".format(nll_loss))
#Computes the gradients of all model parameters used to compute the nll_loss
#Note: These can be listed by looking at model.parameters()
nll_loss.backward()
print("="*80)
print("Gradients of model parameters right after back propagation")
for i, parameter in model_cora.named_parameters():
print ("Parameter {}".format(i))
print ("Shape: ",parameter.shape )
print("Gradient")
print(parameter.grad)
# + [markdown] id="ZiT-OtSr_reu"
# Now let's define a function to test the accuracy of a trained model on the validation set
# + executionInfo={"elapsed": 11, "status": "ok", "timestamp": 1623852600518, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "01706064865196246875"}, "user_tz": 240} id="eHy8WPWB_Gmq"
def compute_accuracy(model, data, mask):
# Set the model.training attribute to False
model.eval()
logprob = model(data)
_, y_pred = logprob[mask].max(dim=1)
y_true=data.y[mask]
acc = y_pred.eq(y_true).sum()/ mask.sum().float()
return acc.item()
@torch.no_grad() # Decorator to deactivate autograd functionality
def test(model, data):
acc_train = compute_accuracy(model, data, data.train_mask)
acc_val = compute_accuracy(model, data, data.val_mask)
return acc_train, acc_val
# + [markdown] id="smaqQsciOZ8i"
# Putting it all together in a training loop
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 2009, "status": "ok", "timestamp": 1623852602519, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "01706064865196246875"}, "user_tz": 240} id="dTkB2KhlMVGU" outputId="ed1b1c46-0582-4c32-c723-307cf6cdae0b"
# Create a model for the Cora dataset
model_cora = SGNet(data_cora, K=1)
# Create an Adam optimizer with learning rate and weight decay (i.e. L2 regularization)
optimizer = torch.optim.Adam(model_cora.parameters(), lr=0.001, weight_decay=5e-4)
for epoch in range(1, 200):
train(model_cora, data_cora, optimizer)
if epoch %10 ==0:
log = 'Epoch: {:03d}, Train: {:.4f}, Val: {:.4f}'
print(log.format(epoch, *test(model_cora,data_cora)))
# + [markdown] id="cegWvG7VYD-d"
# # Optional Exercise
# * Experiment with the $K$ parameter of *SGNet*, as well as the rate and weight_decay paramters of the optimizer to improve the results on the validation set. Finally, check the accuracy on the test set. How does it compare with the results provided in the paper? The results are reproduced below
#
# * Do the same for the Citseer and Pubmed citation network datasets
#
# * Somethings to try to improve the results:
#
# * Add another convolution layer
# * Add a dropout layer
# * Add nonlinearities in the model
#
# <center><img src="https://ai.science/api/authorized-images/fIA50%2BdS0pey1rqtswkEaKhrHMLSGANdEGtf5be8r4ghb15lOoeCRvY2zofkIKqEG5p0%2BwDF1yT2Jii0i%2B58Ffl5jCNDg4vWzlRB9VpDBR%2BI5YKqpeh1Wa7fauh4Y%2FxmNCAF1j8dYdlD%2BcOYSQMLHcZ33QXV1jci0FowIRbCDT0Ax7Jqi8DjR9%2Be9z8ULyDNFjIBb4yKMY3s2qKcHqmulYeTKv9aeYxdrvdD6FsSVaDAo41%2BppCaycng4y0E3j5B591SRLTHnwtjLUEqT3xKeTKJXdgPeOAboibccywv3Jgb3z0X4ztC6DjKOIbSLCWPYKeZMl9SEdhZPMdpROoeEz4aT8BGHdzZojlpO21W3%2BcwvkBGtrV6xH14Jyd6P%2Fcccn9H1pkZjLpNA1vSbhFzMupaHnkFeH8IeJSNMt03Xckj0MXjTBrQsQLy%2FBh2yR%2F4%2B0ZNe%2BvIWHEdbVFqR0j51DTQf2x7cu%2BNJLIQpo8%2F1ipBJRCxTrpWCPM76FODx8qxdZ9ToLDiV7nSuiYBnn9dcdcZMtuAU3LbUoSK8JAuV0wB4Y5d7zSV2w5nfPHZDy4FRy0DGahQjpWujcmsIYqqHl4WpV8pst0lwIcq6uW7%2B5TsbEOHlp11f4vzNGcGJSsErq%2FSBcA9Z7KW9pelss6P8qbcq8gn6B3xC2rs%2ByOorZA%3D" width="80%" > </center>
#
#
#
#
# + [markdown] id="u1PdH-ZcZby9"
# # References
#
# [Wu, Felix, et al. "Simplifying graph convolutional networks." arXiv preprint arXiv:1902.07153 (2019)](https://arxiv.org/abs/1902.07153)
#
#
# [Kipf, <NAME>., and <NAME>. "Semi-supervised classification with graph convolutional networks." arXiv preprint arXiv:1609.02907 (2016)](https://arxiv.org/abs/1609.02907)
| graph neural networks/2_node_classification_SGC.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## EDA(탐색적분석)
#
# - naver와 brunch 블로그 크롤링 데이터를 기준으로 진행
# - 가정: 블로그에 언급된 글 건수가 해당 관광지에 대한 사람들의 긍정적인 관심을 의미하며 계절별로 게시되는 블로그 글의 비율은 해당 관광지의 계절별 특성을 의미한다.
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# 한글 데이터 깨짐방지
import platform
path = 'C:\\Windows\\Fonts\\malgun.ttf'
from matplotlib import font_manager, rc
if platform.system() == 'Windows':
font_name = font_manager.FontProperties(fname=path).get_name()
rc('font', family=font_name)
else:
print('Unknown system')
# -
# 전체 데이터 읽기
df = pd.read_parquet('after_preprocessing.parquet')
# ### 계절별 블로그 글 건수를 기준으로 분류
#
# - postdate를 기준으로 계절을 구분한다.
mainF = df.groupby('source')['postdate'].agg([
('봄건수', lambda x: np.sum(pd.to_datetime(x).dt.month.isin([3, 4, 5]))),
('여름건수', lambda x: np.sum(pd.to_datetime(x).dt.month.isin([6, 7, 8]))),
('가을건수', lambda x: np.sum(pd.to_datetime(x).dt.month.isin([9, 10, 11]))),
('겨울건수', lambda x: np.sum(pd.to_datetime(x).dt.month.isin([1, 2, 12])))
]).reset_index()
# +
f = df.groupby('source')['postdate'].agg([
('봄비율', lambda x: np.mean(pd.to_datetime(x).dt.month.isin([3, 4, 5]))),
('여름비율', lambda x: np.mean(pd.to_datetime(x).dt.month.isin([6, 7, 8]))),
('가을비율', lambda x: np.mean(pd.to_datetime(x).dt.month.isin([9, 10, 11]))),
('겨울비율', lambda x: np.mean(pd.to_datetime(x).dt.month.isin([1, 2, 12])))
]).reset_index()
mainF = mainF.merge(f, how='left')
mainF
# +
sizeF = df.groupby('source')['content'].agg([
('전체건수', np.size)
]).reset_index()
mainF = mainF.merge(sizeF, how='left')
mainF
# -
# 특정 관광지에 대한 블로그의 양이 일정기준에 도달하지 못할 경우
# 해당 관광지에 대한 데이터는 타 데이터와 비교하는 의미가 없다고 판단
# 기준 도출: 중간값, 평균값
print(f'중간값: {mainF["전체건수"].median()}')
print(f'평균값: {mainF["전체건수"].mean()}')
# 중간값과 평균값의 차이가 크지않기 때문에 중간값을 기준으로 전체 블로그 건수가 미달되는 관광지들 제거
meanTotal = mainF[mainF['전체건수'] > mainF['전체건수'].median()]
meanTotal.sort_values(by=['전체건수'], axis=0)
totalSort = meanTotal.sort_values(by=['전체건수'], axis=0, ascending=False)[:10]
# +
# 전체건수 기준 상위 10개 관광지의 계절별 특성 시각화
f, ax = plt.subplots(figsize = (40,50))
plt.subplot(2, 2, 1)
sns.set_color_codes('pastel')
sns.barplot(x = '봄비율', y = 'source', data = totalSort,
label = 'Total', palette = 'husl', edgecolor = 'w')
plt.title('관광지별 봄 특성비율')
plt.subplot(2, 2, 2)
sns.set_color_codes('pastel')
sns.barplot(x = '여름비율', y = 'source', data = totalSort,
label = 'Total', palette = 'husl', edgecolor = 'w')
plt.title('관광지별 여름 특성비율')
plt.subplot(2, 2, 3)
sns.set_color_codes('pastel')
sns.barplot(x = '가을비율', y = 'source', data = totalSort,
label = 'Total', palette = 'husl', edgecolor = 'w')
plt.title('관광지별 가을 특성비율')
plt.subplot(2, 2, 4)
sns.set_color_codes('pastel')
sns.barplot(x = '겨울비율', y = 'source', data = totalSort,
label = 'Total', palette = 'husl', edgecolor = 'w')
plt.title('관광지별 겨울 특성비율')
plt.show()
# -
# #### 결과:
# - 유명한 관광지들 중 대부분은 계절별 특성을 가지기보다 사계절내내 관심을 받음을 알 수 있다.
# ### 각 계절별 특성을 보이는 상위 10개의 관광지를 추출
meanTotal.sort_values(by=['봄비율'], axis=0, ascending=False)[:10]
meanTotal.sort_values(by=['여름비율'], axis=0, ascending=False)[:10]
meanTotal.sort_values(by=['가을비율'], axis=0, ascending=False)[:10]
meanTotal.sort_values(by=['겨울비율'], axis=0, ascending=False)[:10]
# ### 각 계절별로 관심비율 상위 10개의 관광지를 추합하여 시각화진행
seasonDf = meanTotal.sort_values(by=['봄비율'], axis=0, ascending=False)[:10]
seasonDf = seasonDf.append(meanTotal.sort_values(by=['여름비율'], axis=0, ascending=False)[:10])
seasonDf = seasonDf.append(meanTotal.sort_values(by=['가을비율'], axis=0, ascending=False)[:10])
seasonDf = seasonDf.append(meanTotal.sort_values(by=['겨울비율'], axis=0, ascending=False)[:10])
seasonDf.info()
# 관광지별 전체 건수
f, ax = plt.subplots(figsize = (10,15))
sns.set_color_codes('pastel')
sns.barplot(x = '전체건수', y = 'source', data = seasonDf,
label = 'Total', palette = 'husl', edgecolor = 'w')
plt.title('관광지별 전체건수')
plt.show()
# +
# 계절별 특성 top 10 관광지별 계절별 특성 시각화
f, ax = plt.subplots(figsize = (40,50))
plt.subplot(2, 2, 1)
sns.set_color_codes('pastel')
sns.barplot(x = '봄비율', y = 'source', data = seasonDf,
label = 'Total', palette = 'husl', edgecolor = 'w')
plt.title('관광지별 봄 특성비율')
plt.subplot(2, 2, 2)
sns.set_color_codes('pastel')
sns.barplot(x = '여름비율', y = 'source', data = seasonDf,
label = 'Total', palette = 'husl', edgecolor = 'w')
plt.title('관광지별 여름 특성비율')
plt.subplot(2, 2, 3)
sns.set_color_codes('pastel')
sns.barplot(x = '가을비율', y = 'source', data = seasonDf,
label = 'Total', palette = 'husl', edgecolor = 'w')
plt.title('관광지별 가을 특성비율')
plt.subplot(2, 2, 4)
sns.set_color_codes('pastel')
sns.barplot(x = '겨울비율', y = 'source', data = seasonDf,
label = 'Total', palette = 'husl', edgecolor = 'w')
plt.title('관광지별 특성비율')
plt.show()
# -
seasonDf2 = seasonDf.loc[:, ['source', '봄비율', '여름비율', '가을비율', '겨울비율']]
seasonDf2 = seasonDf2[(seasonDf2['source'] == '동백포레스트') | (seasonDf2['source'] == '천아숲길 천아계곡') | (seasonDf2['source'] == '종달리수국길') | (seasonDf2['source'] == '녹산로유채꽃길')]
# +
# 계절별 비율이 가장높은 관광지의 특성비교
seasonDf2.set_index('source').plot(kind='bar', stacked=True, color=['palegreen', 'palegoldenrod', 'darkgoldenrod', 'paleturquoise'])
plt.title('계절별 비율이 가장높은 관광지의 특성비교')
plt.show()
# -
# #### 결과:
# - 계절별 특성을 가지는 관광지의 경우 다른 계절에 비해 자신이 특성으로 가지는 계절의 비율이 현저히 높음을 보이며, 이는 전체 관광지를 대상으로 한 분류에 계절별 특성이 하나의 분류기준으로 활용될 수 있음을 의미한다.
| analysis/EDA/EDA(Season).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Week 8
#
#
# ## The final project is on the horizon
#
# Hey gang. It's the last of the lectures and we're getting ready to move into the project phase. The project is where you take all the stuff you've learned and analyze you own data. Just so you have it handy, here's the video explaing the final project
#
# [](https://www.youtube.com/watch?v=l8yah1L-mh0)
#
# And you can read all about the details here: https://github.com/suneman/socialdata2021/wiki/Final-Project
#
# **Course structure**
# Also, are you confused what’s going on in the course after lecture 8? Are you wondering what the important events are for the last few weeks of the course are? Well, then you’re in luck. Here’s a little video I made to explain the last 5 weeks
#
# [](https://www.youtube.com/watch?v=CwTF6N5FS84)
#
#
#
# ## The intro
#
# But before we get going on the final project there's still one lecture to go. The purpose of today's lecture twofold.
#
# **Topic 1: Interactive dataviz**
#
# The theory we started reading last week also draws on *interactive dataviz*. Being able to play with the data: Toggle the view. Zoom. Drag. Show more details. All those things. Those are a key part of modern data visualization.
#
# To really master interactive visualizations, you should check out methods in JavaScript, especially [D3](https://d3js.org). Given that we only have 5ECTS for this class, we don't have time for that. But luckily Python has some pretty good options for interactive visualizations.
#
# Today we'll explore [`Bokeh`](https://docs.bokeh.org/en/latest/), which provides lots of nice interactive funtionality to Python.
#
# **Topic 2: Setting up a webpage**
#
# One of the deliverables for the final project is to create a webpage with your visualizations. There are many ways of doing that. This year - as we are always striving to make the course better, we're making web-page creation part of the course (until now, I thought that was something everyone just knew how to do), so that's what we'll focus on in the second half of today's lecture.
#
#
# **Video summary**
#
# And of course, I also deliver all of this information to you in a friendly video format.
#
# [](https://www.youtube.com/watch?v=YTRFco2CE-o)
#
#
# Thus the program is
#
# * Finish up reading on narrative dataviz (Part 1)
# * Create an interactive visualizaitons of our good old crime data (Part 2)
# * Learn how to set up a web-page on github (Part 3)
# ## Part 1 More on narrative data visualization
#
# *Reading*. More on Narrative Visualization.
# * <NAME> and <NAME>. [Narrative Visualization: Telling Stories with Data](http://vis.stanford.edu/files/2010-Narrative-InfoVis.pdf), section 4-6.
#
# > ***Exercise***: Questions to the text
# >
# > * What's the point of Figure 7?
# > * Use Figure 7 to find the most common design choice within each category for the Visual narrative and Narrative structure (the categories within visual narrative are 'visual structuring', 'highlighting', etc).
# > * Check out Figure 8 and section 4.3. What is your favorite genre of narrative visualization? Why? What is your least favorite genre? Why?
#
# ## Part 2: Bokeh and Toggling Histograms
#
# Ok. Let's get started. First a general announcement on the data.
#
# > **Announcement**
# > * *During this entire lecture we are going to work with the **Crime data***.
# > * *All part uses data for the **period 2010-2018** (remember to filter your data)*
# > * *For info on how to include Bokeh in your notebooks (instead of generating html), please scan through the guide [Bokeh: Using with Jupyter](https://docs.bokeh.org/en/latest/docs/user_guide/jupyter.html#userguide-jupyter-notebook). Come back to this one when you need it*.
# > * *We aim to give you a gentle start with Bokeh, but to get a sense of how things work, I suggest you surf the web, find a Bokeh tutorial and scan through it. Also click around a bit in [the official docs](https://docs.bokeh.org/en/latest/docs/user_guide.html#userguide).*
#
#
# Now, to get you in the mood here's a little gif to illustrate what the goal of this exercise is:
# 
#
# > ***Exercise***: Recreate the results from **Week 2** as an interactive visualisation (shown in the gif). To complete the exercise, follow the steps below and create your own version of the dataviz.
#
# ### Data prep
#
# A key step is to set up the data right. So for this one, we'll be pretty strict about the steps. The workflow is
#
# 1. Take the data for the period of 2010-2018 and group it by hour-of-the-day (see Week 2)
# 2. We would like to be able to easily compare how the distribution of crimes differ from each other, not absolute numbers, the focus here will be *normalized data*:
# * To normalise data for within a crime category you simply to devide the count for each hour by the total number of this crime type. (To give a concrete example in the `ASSAULT` category, take the number of assault-counts in 1st hour you should devide by the total number ofassaults, then you devide number of assaults in 2nd hour by the total number of assaults and so on)
# * It will be easiest if you organize your dataframe as shown in [this helpful screenshot](https://github.com/suneman/socialdataanalysis2020/blob/master/files/week8_1_data.PNG?raw=true).
#
# If you've followed these steps, your data should be ready! Take a moment to celebrate. We now follow the [Bokeh guide for categorical data](https://docs.bokeh.org/en/latest/docs/user_guide/categorical.html):
#
# 1. First, let's convert our **Pandas Dataframe** to **Bokeh ColumnDataSource**:
# ```python
# source = ColumnDataSource(your_processed_dataframe)
# ## it is a standard way to convert your df to bokeh
# ```
# 2. We also need to create an empty figure (we will add our stuff here later on). Mini sub-exercise: Find the a guide how to define a figure in Bokeh online. Here is a little help:
# ```python
# p = figure(...., x_range = FactorRange(factors=hours), ...) #p is a standard way to call figures in Bokeh
# #make sure to add x_range. In my case hours is a list on the form ['1', '2', '3' ... , '24']
# #read up on the FactorRange in the guide
# #do not forget to add other attributes to the figure, e.g. title, axis names and so on
# ```
# 3. Now we are going to add the bars. In order to do so, we will use **vbar** (see the guide for help):
# ```python
# bar ={} # to store vbars
# ### here we will do a for loop:
# for indx,i in enumerate(focuscrimes):
# bar[i] = p.vbar(x='name_of_the_column_that_contain_hours', top=i, source= src,
# ### we will create a vbar for each focuscrime
# legend_label=i, muted_alpha=..., muted = ....)
# #i stands for a column that we use, top=y; we are specifying that our numbers comes from column i
# #read up what legend_label, muted and muted_alpha do... you can add more attributes (you HAVE TO)
# ```
# 4. The last thing to do is to make your legend interactive and display the figure:
# ```python
# p.legend.click_policy="mute" ### assigns the click policy (you can try to use ''hide'
# show(p) #displays your plot
# ```
# 5. You will notice that the legend appears in the middle of the figure (and it ocludes some of the data). In order to fix this look into [this guide](https://stackoverflow.com/questions/26254619/position-of-the-legend-in-a-bokeh-plot) as a start. Below are some code snippets that you can use to deal with this problem (but read the guide first):
# ```python
# items = [] ### for the custom legend // you need to figure out where to add it
# items.append((i, [bar[i]])) ### figure where to add it
# legend = Legend(items=..., location=.....) ## figure where to add it
# p.add_layout(...., ...) ## figure where to add it
# ### if you read the guide, it will make sense
# ```
#
# Now you should be able to recreate this amazing visualisation :) Thanks to **Germans** for help creating this exercise.
#
#
# **EXTRA feature**: If you're interested in detailed instructions for more Bokeh visualizations for your final project, you can find more inspiration **[here](https://github.com/suneman/socialdata2021/blob/main/lectures/Week8_extra_bokeh.ipynb)**.
# ## Part 3: website creation.
# You've asked for it, we've heard you: we're going to look at how we can put all our beautiful visualizations on a website.
#
# As there are people with vastly different backgrounds in this course, the following contains two mutually exclusive parts:
#
# - If you are still struggling a bit with python and have no previous experience with web-dev, follow "Creating a Minimalist Website with Hugo and Markdown" for a nice walk-through
# - If you are ~~a nerd~~ comfortable with python, git and the command-line, and want something a bit more powerful: jump down to "Interactive dashboards with Dash"
#
# > ***Exercise***: Follow the instructions below to build your own website. Add you Bokeh visualization to it.
# ### Part 3, branch A: If you're new to this: Creating a Minimalist Website with Hugo and Markdown
#
# #### Preamble: A quick intro to Git
# [Git](https://git-scm.com/) is an essential tool for anyone who works with data or code. It makes it easy to keep various versions of your code, and to collaborate on code with other people ny hosting code on, for example, [github](https://github.com/).
#
# Since we will also rely on github for hosting our website, it's a good idea to go through a brief introduction to git and github in case you're not familiar with them: [this tutorial should provide a good first exposure.](https://product.hubspot.com/blog/git-and-github-tutorial-for-beginners)
#
#
# #### Hugo
# [Hugo](https://gohugo.io/) is a framework for quickly creating beautiful websites using nothing more than the Markdown you are used to from Jupyter notebooks.
#
# Before going further, make a new directory, navigate to it in a terminal (for whose of you using windows, you should ~~stop using windows~~ use [git-bash](https://www.atlassian.com/git/tutorials/git-bash)), and turn it into a git repository by running:
#
# ```
# git init
# git branch -M main
# ```
#
# Within that directory, go through Hugo's quick-start tutorial [here](https://gohugo.io/getting-started/quick-start/). Pick a nice theme, and play around with your website (add some text, try some of the formatting options, etc.).
#
# Once you are done, commit your changes:
#
# ```
# git add --all
# git commit -m "Some message detailing what you did"
# ```
#
# #### Hosting your website on Github Pages
#
# [Github Pages](https://pages.github.com/) offers an easy way to host your static websites directly on github. We'll do that with the basic website we just created. You can find some information on how to do this [here](https://bwaycer.github.io/hugo_tutorial.hugo/tutorials/github-pages-blog/): we'll detail the first alternative from that tutorial, but if you're comfortable with git, we suggest trying the second alternative as it is more flexible.
#
# *Initialize a new github repository*: Go to github and create a [new repository](https://github.com/new). On the next page, follow the instructions under "…or push an existing repository from the command line", and push the repository containing your Hugo website. Follow the instructions on the tutorial above ([this one](https://bwaycer.github.io/hugo_tutorial.hugo/tutorials/github-pages-blog/)):
#
# - Setup github pages for your repository
# - Change hugo's configuration so that the build directory is "docs"
# - Re-build your website
# - Commit/push your changes, and check that you can see the website on github.
#
# For reference, mine is [here](https://ldorigo.github.io/dataviz_website_demo/).
#
# Note: you might have to change Hugo's configuration (`config.toml`) so that `base_url` points to the url of your page. So in my case:
#
# ```
# baseURL = "https://ldorigo.github.io/dataviz_website_demo"
#
# ```
# #### Adding some visualizations
#
# You now (should) have a website, but it's kind of empty. Let's populate it with a few different visualizations from previous weeks.
#
# *Simple matplotlib figures*: Matplotlib is easy, as it just exports plain images that you can include in your website. Choose a nice visualization form one of the previous weeks, save it to a file (hint: look into `pyplot.savefig`), and add it to a page on your website (see [here](https://www.austinfriday.com/tutorial/adding-images-to-your-hugo-pages/) for a simple walkthrough).
#
# **Note:** because we are taking the easy road and building our website in the docs folder, you'll probably have an issue when viewing the images once your site is online. See [here](https://discourse.gohugo.io/t/can-i-reference-baseurl-from-content/2686/2) for a fix:
#
# * Create a file `your_website_folder/layouts/shortcodes/baseurl.html` (you might have to create the `shortcodes` folder)
# * Put the following line in that file: `{{ .Page.Site.BaseURL }}`
# * You can then reference images in your markdown file as follows: ``
#
# Again, you can see my (very elaborate) page for an example of how it should look [here](https://ldorigo.github.io/dataviz_website_demo/pages/matplotlib/).
#
# *Bokeh Plots*: Now that we know how to put static images, let's notch it up a little bit and add those fantastic interactive plots from the previous exercises.
#
# First, see [here](https://docs.bokeh.org/en/latest/docs/user_guide/embed.html) for how to export bokeh plots as html.
# If you open the resulting html file in a text editor, you should see there's two parts: a `<head> ... </head>`, where the bokeh library is loaded, and a `<body> ... </body>`, where the plots are rendered.
#
# The easiest way to include those plots into a new hugo page is:
#
# * Create a blank markdown page
# * At the beginning of the page, copy-paste the two `<script>`s that are in the `<head>` of your html file:
#
# ```
# <script type="text/javascript" src="https://cdn.bokeh.org/bokeh/release/bokeh-2.3.0.min.js" integrity="<KEY>" crossorigin="anonymous"></script>
# <script type="text/javascript">
# Bokeh.set_log_level("info");
# </script>
# ```
#
# * Where you want your plots to appear, copy-pase everything that's inside the `<body>` of your html file
# * To allow rendering html, you need to add the following two lines to your hugo configuration:
#
# ```
# [markup.goldmark.renderer]
# unsafe = true
# ```
#
# You should now be able to see your bokeh plot on your page. If you want to add more plots in the same page, you can repeat the above, but you **don't** need to add what's in the `<head>` again (that is, the `<script>`s in the `<head>` should only be on your page once).
#
# **Note:** _Just pasting all the html on top level isn't exactly coding best practice, but it's easy and it'll do for a small project like this. If you can stomach it, there are better ways, such as the one desribed [here](https://angelov.ai/post/2020/05/interactive-plots-with-bokeh/)._
#
#
# For an example of what the markdown should look like, see [here](https://raw.githubusercontent.com/ldorigo/dataviz_website_demo/main/content/pages/bokeh.md) - and see [here](https://ldorigo.github.io/dataviz_website_demo/pages/bokeh/) for a preview of what it should look like.
#
# #### Closing Comments
#
# Congrats, you can now make a website! We've only scratched the surface, but this should be enough to get started on your projects. Note that we haven't talked about plotly and/or folium, but the process is virtually identical to bokeh: export to html, and include the exported html in your pages.
#
# Godspeed!
#
#
# ### Part 3, branch B: If you feel comfortable: Interactive dashboards with Dash
# If you're reading this, we assume that you have solid basics of Python, Git, and the command line - so this part is more terse, and we don't hold your hand through it like before. You have been warned :-)
#
#
# [Dash](https://dash.plotly.com/) is an open-source framework made by the same team behind Plotly. It's great for making interactive data visualization web-apps/dashboards. You can take a look at the [gallery](https://dash-gallery.plotly.host/Portal/) to get a feel of what it is possible to do with it.
#
# To get aquainted with it, follow the tutorial [starting here](https://dash.plotly.com/installation), and up to at least [section 3](https://dash.plotly.com/basic-callbacks). Play around with it and try to make a small page with at least:
#
# - Basic page navigation and/or tabs
# - A simple interactive callback
# - A static image
#
# > **Note:** I recommend looking into [Dash Bootstrap Components](https://dash-bootstrap-components.opensource.faculty.ai/) as a quick and easy way to make a visually appealing website
#
# Although it is perfectly possible to add static images (like plots made in matplotlib) or to embed external html (such as a visualization exported from Bokeh), Dash really shines when you use it together with Plotly, as it has a rich interface to generate interactive graphs on-the fly.
#
# Read through [section 4](https://dash.plotly.com/interactive-graphing) of the tutorial and try to add an interactive plot to your website.
#
# Last but not least, we need to host our shiny new web-app. Unfortunately, github pages can't run python in the back-end. One solution is to host it on [Heroku](https://www.heroku.com/): you can do so by following [this tutorial](https://austinlasseter.medium.com/how-to-deploy-a-simple-plotly-dash-app-to-heroku-622a2216eb73). Note that the free version of Heroku is limited to 500Mb of RAM - for small(-ish) datasets, that should be more than enough. If you need more, I can recommend [Pythonanywhere](https://www.pythonanywhere.com/) as a paid, but cheap, alternative.
#
# ---
# Thanks to TA **Luca** for a great job putting together the website exercise!
#
| lectures/Week8.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Iteration
#
# This notebook is intended to provide a introduction to the idea of iteration as a tool for addressing problems in physics and engineering that have no simple analytic or closed form solution. Almost every significant computational problem I have addressed in my learning has involved some form of an iterative model.
#
# There are a lot of descriptions of iteration as a computational technique out there but this [mathematical definition](https://www2.edc.org/makingmath/mathtools/iteration/iteration.asp) fits best with my experience. The previous link has some lovely examples from mathematics including the Mandlebrot set.
#
# <blockquote> Iteration is the repeated application of a function or process in which the output of each step is used as the input for the next iteration. </blockquote>
#
#
# ## Projectile Motion: Ideal Functions
#
# As an example I will use basic projectile motion which we have a fairly comfortable understanding of it's ideal characteristics and some ways in which air drag (which requires an iterative analysis) affects the trajectory.
#
# In the case of ideal projectile motion we have no air drag which leads to no acceleration in the x direction and $-10 \frac{m}{s^2}$ in the y direction.
#
# $$ x(t) = x_0 + v_{0_x} \cdot t$$
# and
# $$ y(t) = y_0 + v_{0_y}\cdot t + \frac{1}{2} a_y \cdot t^2 $$
#
# Taking components of the inital velocity as $v_{0_x} = v_0 cos(\theta)$ and $v_{0_y} = v_0 sin(\theta)$ which leads to
#
# $$ x(t) = x_0 + v_0 cos(\theta) \cdot t$$
# and
# $$ y(t) = y_0 + v_0 sin(\theta) \cdot t + \frac{1}{2} a_y \cdot t^2 $$
#
# If we know the initial velocity and the angle we have a well defined function that describes both x(t) and y(t). Here's how that analytic solution can be presented in a notebook.
#
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
# ## Initial Conditions and Constants
#
#
# +
# initial conditions
initialVelocity = 30.0 # in m/s
launchAngleDeg = 32. # in degrees
launchAngRad = launchAngleDeg*np.pi/180.
initialXposition = 0.0 # in m
initialYposition = 3.1 # in m
# constants
gravity = -9.81 # m/s/s
maxtime = 5. # maximum time for model in s
numpoints = 100 # number of points on plot to calculate - an integer!
# determine components of the initial velocity
initialXvel = initialVelocity*np.cos(launchAngRad)
initialYvel = initialVelocity*np.sin(launchAngRad)
print("Launch Velocity: %.2f m/s; Vx: %.2f m/s Vy: %.2f m/s" % (initialVelocity,initialXvel,initialYvel))
print("Launch Angle: %.2f degrees" % launchAngleDeg)
print("Launch Location x: %.2f m y: %.2f m" % (initialXposition, initialYposition))
# -
# ## Calculate x(t) and y(t):
#
# First create a set of times that we will use to calculate the x and y positions using the [np.linspace](https://numpy.org/doc/stable/reference/generated/numpy.linspace.html) function. Then apply the equations stated above. Because the time is in an array to square each time in the array I need the [np.square](https://numpy.org/doc/stable/reference/generated/numpy.square.html) function to be sure it squares each term not some other vector squaring operation. Turns out (I tried it) that np.square(modeltime) = modeltime\* modeltime so I didn't need to worry.
modeltime = np.linspace(0.,maxtime,numpoints)
xposition = initialXposition + initialXvel*modeltime
yposition = initialYposition + initialYvel*modeltime + 0.5*gravity*np.square(modeltime)
# ## Plot
#
# Now we plot it....
# +
fig1, ax1 = plt.subplots()
ax1.plot(xposition, yposition,
color = 'red', linestyle = '-',
linewidth = 3., label = "analytic")
plt.rcParams.update({'font.size': 16}) # make labels easier to read
ax1.set(xlabel='x position (m)', ylabel='y position (m)',
title='Trajectory of Ideal Projectile')
fig1.set_size_inches(10, 9)
ax1.grid()
plt.legend(loc= 3)
plt.show()
# -
# ## What Something Changes Along the Way?
#
# The previous solution to the problem doesn't work if any part of my solution changes in a complex way throughout the process. This next version of projectile motion I am going to take it one step at a time through the process which is what I will need to do when I have a more complex problem. For this next example I am not going to change any part of the problem I'm just going to get there by a more step by step process. At each point I'll calculate how far I go in each little dt of time and add it to my previous location. Very much like integrating by adding up rectangles or trapezoids.
#
# ### Important Note:
#
# In this version of the projectile motion I am assuming that the velocity is constant over each iteration in time. I know that it is not but the underlying concept for iterative solutions is to assume that everything is constant so we are making a linear projection of the behavior. This assumption of constant velocity takes place in lines 37-40 roughly (unless I added lines of code after I wrote this comment) in the next cell.
#
# ### Set up arrays
#
# When I did the previous calculation python assumed that because the modeltime was an array of some number of points that I wanted to create x(t) and y(t) and the same length of array and automatically filled in the values. In this case I don't know ahead of time where I am going to be at the 5th time point or nth time point so I need to create the arrays for the x(t) and y(t) myself. To do this I use the [np.full_like](https://numpy.org/doc/stable/reference/generated/numpy.full_like.html) function to create and array that has the same 'shape' as another array and fill in some placeholder values (I usually use 0. as I did in this case). After setting up the position arrays I need to set the first point to have the initial x and y values. This requires me to use and index to pick out a particular element in the array.
#
# The format for doing this is ```xpoints[0]``` is the first point in the array. ```xpoints[iterpoints]``` is the last point in the array.
#
# Because velocity and acceleration might both be changing I need to also create arrays to track their behavior.
# +
itertime = 5.0 # in s
iterpoints = 10 # number of points for iteration
iterationTime = np.linspace(0.,itertime,iterpoints)
deltaTime = iterationTime[1]-iterationTime[0] # determines the size of the delta t between points
# create the x(t) and y(t) arrays to be like iterationTime and fill with 0's
xPos = np.full_like(iterationTime,0)
yPos = np.full_like(iterationTime,0)
xVel = np.full_like(iterationTime,0)
yVel = np.full_like(iterationTime,0)
xAccel = np.full_like(iterationTime,0)
yAccel = np.full_like(iterationTime,0)
# set the first point of array to initial position
xPos[0] = initialXposition
yPos[0] = initialYposition
xVel[0] = initialXvel
yVel[0] = initialYvel
xAccel[0] = 0.
yAccel[0] = gravity
for i in range (1,iterpoints):
# The X part
# where the object will be and how fast is it going in the x direction after dt
deltax = xVel[i-1]*deltaTime
xPos[i] = xPos[i-1] + deltax
# how fast will it be going after deltaTime - no change in this case
xVel[i] = xVel[i-1]
# what is it's acceleration after deltaTime - no change in this case
xAccel[i]=xAccel[i-1]
# The Y part
# where the object will be and how fast is it going in the y direction after dt assuming cnst v
deltay = yVel[i-1]*deltaTime
yPos[i] = yPos[i-1] + deltay
# how fast will it be going after deltaTime
deltayVel = yAccel[i-1]*deltaTime
yVel[i] = yVel[i-1]+deltayVel
# what is it's acceleration after deltaTime - no change in this case
yAccel[i]=yAccel[i-1]
## Print statements if needed to see what's happening for debugging
# print(yAccel)
# print(yPos)
# print(yVel)
# -
# ## Plot It Again and NOTICE!
#
# I have started this notebook with just a few iterations. If someone has 'turned up' the number of iterations above 10 put it back down to 10. Explain why the iterated data points go above the ideal trajectory of the projectile. Then, when you understand it, slowing increase the number of iterations to 100 or 1000. This is like doing integration with rectangles and slowly making the width of the rectangles smaller and smaller. This is a feature of iteration that you need to be aware of. Because we assume the velocity of the object is constant for the iteration time (deltaTime) there are inherent errors that will go away when we increase the number of iterations. It is however UNWISE to start with a large number of iterations until you are confident in your computational model.
# +
fig2, ax2 = plt.subplots()
ax2.plot(xposition, yposition,
color = 'red', linestyle = '-',
linewidth = 3., label = "analytic")
ax2.scatter(xPos, yPos,
color = 'blue', marker = 'x',
linewidth = 1., label = "iteration")
plt.rcParams.update({'font.size': 16}) # make labels easier to read
ax2.set(xlabel='x position (m)', ylabel='y position (m)',
title='Trajectory of Ideal Projectile/Iteration')
fig2.set_size_inches(10, 9)
ax2.grid()
plt.legend(loc= 3)
plt.show()
# -
# ## Now to Add Excitement!
#
# So what if the y acceleration of this object depended on the height above the ground. It doesn't but we can imagine that if the units were km and the earth was a smaller planet then it might make sense. Let's imagine that the acceleration of gravity decreases by 1% for every m away from the ground. Nothing happens to the x part of the problem. Here's how I implemented this idea.
#
# ```yPosE[i]-yPosE[0])``` is the number of meters above the starting point.
#
# ```yPosE[i]-yPosE[0])/100.``` is 1% if the object is 1 m above the starting point.
#
# ```deltaGravity = -gravity*(yPosE[i]-yPosE[0])/100.)``` This gives the change in the 'gravity' which will be positive when the object is above the starting point and negative when it is below the starting point. This is an important step and allows me to test my code in an interesting way. If I change the 100 to 1000 then gravity only changes by 0.1% for each m. If I make the 100 a 100000 then the new model should behave like the old model which helps me understand whether I wrote the code correctly.
#
# ```yAccelE[i]=gravity + deltaGravity``` adjusts the 'gravity' for the next step.
#
# Before we worry about how to do this think about what you expect to happen. Will it go higher? ...further? Sometimes one doesn't know but it's always worth thinking about.
#
# ## Because Notebooks Remember Everything:
#
# From an implementation perspective I am being careful to change the names of many of my arrays as I create each example. Remember that the notebook remembers what I've done before and I want to be able to compare each of my examples in my plots. This means I need arrays with different names.
#
# ## Things That Happen:
#
# You will notice a number of print statements buried in various parts of the code. This happened because when I first wrote this up and plotted it the second and third plots looked exactly the same and I knew that wasn't right but I couldn't figure out where my mistake was. Eventually, using the print statements, I discovered I had forgotten to rename one variable and it was screwing up the whole calculation. This is what happens and it teaches us to test our code before relying on it.
#
#
# +
# I'm going to keep the same iteration times etc so the new data points will correlate
# with the previously calculated data point.
# create the x(t) and y(t) arrays to be like iterationTime and fill with 0's
# the E tag at the end is for Excitement
xPosE = np.full_like(iterationTime,0)
yPosE = np.full_like(iterationTime,0)
xVelE = np.full_like(iterationTime,0)
yVelE = np.full_like(iterationTime,0)
xAccelE = np.full_like(iterationTime,0)
yAccelE = np.full_like(iterationTime,0)
# set the first point of array to initial position
xPosE[0] = initialXposition
yPosE[0] = initialYposition
xVelE[0] = initialXvel
yVelE[0] = initialYvel
xAccelE[0] = 0.
yAccelE[0] = gravity
for i in range (1,iterpoints):
# The X part
# where will it be and how fast is it going in the x direction after dt
deltaxE = xVelE[i-1]*deltaTime
xPosE[i] = xPosE[i-1] + deltaxE
# how fast will it be going after deltaTime - no change in this case
xVelE[i] = xVelE[i-1]
# what is it's acceleration after deltaTime - no change in this case
xAccelE[i]=xAccelE[i-1]
# The Y part
# where will it be and how fast is it going in the y direction after dt assuming cnst v
deltayE = yVelE[i-1]*deltaTime
yPosE[i] = yPosE[i-1] + deltayE
# how fast will it be going after deltaTime
deltayVelE = yAccelE[i-1]*deltaTime
yVelE[i] = yVelE[i-1]+deltayVelE
# print(deltayVelE)
# what is it's acceleration after deltaTime - 1% per m of difference from starting point. See previous cell.
deltaGravity = -gravity*(yPosE[i]-yPosE[0])/100.
yAccelE[i]=gravity + deltaGravity
# print(deltaGravity)
## Print statements if needed to see what's happening for debugging
# print(yAccelE)
# print(yPosE)
# print(yVelE)
# -
# ## Plot All of Them:
# +
fig3, ax3 = plt.subplots()
ax3.plot(xposition, yposition,
color = 'red', linestyle = '-',
linewidth = 3., label = "analytic")
ax3.scatter(xPos, yPos,
color = 'blue', marker = 'x',
linewidth = 1., label = "iteration")
ax3.scatter(xPosE, yPosE,
color = 'green', marker = 'o',
linewidth = 1., label = "excitement")
plt.rcParams.update({'font.size': 16}) # make labels easier to read
ax3.set(xlabel='x position (m)', ylabel='y position (m)',
title='Trajectory of Ideal Projectile/Iteration')
fig3.set_size_inches(10, 9)
ax3.grid()
plt.legend(loc= 3)
plt.show()
# -
| IterationI.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/holps-7/NLP-weekly/blob/master/NLP_week1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="7Xo_Qgr3pWFf" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="ca5e128f-5b88-4e1f-e0c7-7862be54218a"
import nltk
nltk.download('brown','inaugural','book','webtext','wordnet')
# + id="xbxWs4MqwbUD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="56575993-5c65-473c-e7e8-5df7f621c1a5"
nltk.download('webtext')
# + id="xKDkIVyLtjmM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 272} outputId="98da2a90-48a3-4e27-80db-27d260517c52"
from nltk.corpus import brown
brown.categories()
# + id="jrVxRKSDuHaE" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="5c169f80-5414-4df6-e025-5875290201cb"
brown.words(categories='reviews')
# + id="95ZispCqumqY" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="25de73af-0602-4e00-887a-9625120cf38c"
from nltk.corpus import inaugural
inaugural.fileids()
# + id="3PjZwshgvUPL" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 187} outputId="8c2f7ca1-0b46-4515-a9e3-f055497ec990"
inaugural.words(fileids = '2009-Obama.txt')[:10]
# + id="mQqpOSynvkDe" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 187} outputId="2a6110ee-8143-4f3c-c37a-97e9689b5676"
inaugural.words(fileids = '1861-Lincoln.txt')[:10]
# + id="kV0HpiCjv4Df" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="4210c2c1-d93f-4844-e271-5b44f82b2ad9"
' '.join(inaugural.words(fileids = '2017-Trump.txt')[:50])
# + id="3guSw9KRwNtt" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 119} outputId="48fa73ee-e0c2-4f22-9c99-38f4a8d27497"
from nltk.corpus import webtext
webtext.fileids()
# + id="O6FCsVFxwvvx" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="d7503f1a-51dc-44ab-fa6e-1a75e8ccacc8"
text1 = ' '.join(inaugural.words(fileids = '2017-Trump.txt'))
from nltk.probability import ConditionalFreqDist
cfd = ConditionalFreqDist((len(word), word) for word in text1.split())
cfd[12]
| NLP_week1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Detecção de Pneumonia atraves de Raio-X
# ## Motivação
# ###### A Sociedade Brasileira de Pneumologia NÃO permite identificar pneumonia apenas na clinica. É necessário o exame de raio-x
# <img src="img/deep learning with python.jpg" width="300">
# ### Bibliotecas
# ###### PILL : manipulação de imagens
# ###### KERAS : deep learning e data aumentation
# ### Configuração Computador
# ###### i5 7ª 8GB 1050 2GB
# ### DataSet
# https://www.kaggle.com/paultimothymooney/chest-xray-pneumonia<br>
# •Imagens <br>
# •RGB e GrayScale<br>
# •Full HD, HD, 480p
# <img src="img/dataset_train.png" width="600">
# <img src="img/dataset_test.png" width="600">
# ###### Data Aumentation
# ImageDataGenerator (Keras)
# ### Mediante a diferança no tamanho das imagens, tornasse necessário definir um tamanho padrão
# Reflete no consumo da memória principal RAM/GPU
IMG_SIZE = 200
# ### Importações
# +
#Manipulação de dados
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
#Keras
import keras
from keras import models
from keras.models import load_model
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
#To get the name of the images in a list
import glob
from random import shuffle
# import BatchNormalization
from keras.layers.normalization import BatchNormalization
# -
# # Exemplo de dado
imagem = Image.open("/home/franklyn/Downloads/chest_xray/chest_xray/train/NORMAL/IM-0115-0001.jpeg")
imagem = imagem.convert('L')#Convert Image to Gray scale
imagem = imagem.resize((IMG_SIZE, IMG_SIZE), Image.ANTIALIAS)
imagem2 = np.array(imagem)
imagem = np.array([imagem2]).reshape(-1, IMG_SIZE, IMG_SIZE, 1)#não sei pq, so sei que tem que ser assim
plt.imshow(imagem2)
# # Carregar Dados
# +
#gets train, validation and test data
def load_train_data():
train_normal_images = glob.glob("/home/franklyn/Downloads/chest_xray/chest_xray/train/NORMAL/*.jpeg")
train_pneumonia_images = glob.glob("/home/franklyn/Downloads/chest_xray/chest_xray/train/PNEUMONIA/*.jpeg")
train_data = []
#label 0 = normal
#label 1 = pneumonia
label = 1
for img in train_normal_images:
img = Image.open(img)
img = img.convert('L')
img = img.resize((IMG_SIZE, IMG_SIZE), Image.ANTIALIAS)
train_data.append([np.array(img), 0]) #normal
for img in train_pneumonia_images:
img = Image.open(img)
img = img.convert('L')
img = img.resize((IMG_SIZE, IMG_SIZE), Image.ANTIALIAS)
train_data.append([np.array(img), 1]) #pneumonia
shuffle(train_data)
return train_data
def load_validation_data():
validation_normal_images = glob.glob("/home/franklyn/Downloads/chest_xray/chest_xray/val/NORMAL/*.jpeg")
validation_pneumonia_images = glob.glob("/home/franklyn/Downloads/chest_xray/chest_xray/val/PNEUMONIA/*.jpeg")
validation_data = []
#label 0 = normal
#label 1 = pneumonia
label = 1
for img in validation_normal_images:
img = Image.open(img)
img = img.convert('L')
img = img.resize((IMG_SIZE, IMG_SIZE), Image.ANTIALIAS)
validation_data.append([np.array(img), 0]) #normal
for img in validation_pneumonia_images:
img = Image.open(img)
img = img.convert('L')
img = img.resize((IMG_SIZE, IMG_SIZE), Image.ANTIALIAS)
validation_data.append([np.array(img), 1]) #pneumonia
shuffle(validation_data)
return validation_data
def load_test_data():
test_normal_images = glob.glob("/home/franklyn/Downloads/chest_xray/chest_xray/test/NORMAL/*.jpeg")
test_pneumonia_images = glob.glob("/home/franklyn/Downloads/chest_xray/chest_xray/test/PNEUMONIA/*.jpeg")
test_data = []
#label 0 = normal
#label 1 = pneumonia
label = 1
for img in test_normal_images:
img = Image.open(img)
img = img.convert('L')
img = img.resize((IMG_SIZE, IMG_SIZE), Image.ANTIALIAS)
test_data.append([np.array(img), 0]) #normal
for img in test_pneumonia_images:
img = Image.open(img)
img = img.convert('L')
img = img.resize((IMG_SIZE, IMG_SIZE), Image.ANTIALIAS)
test_data.append([np.array(img), 1]) #pneumonia
shuffle(test_data)
return test_data
# -
train_data = load_train_data()
test_data = load_test_data()
# ### Separete classes from the features(image)
# +
train_images = np.array([i[0] for i in train_data]).reshape(-1, IMG_SIZE, IMG_SIZE, 1)
train_labels = np.array([i[1] for i in train_data])
test_images = np.array([i[0] for i in test_data]).reshape(-1, IMG_SIZE, IMG_SIZE, 1)
test_labels = np.array([i[1] for i in test_data])
# -
#
# # Redes Neurais Convolucionais
#
# ## Camadas: Denso VS Convolucionais:
# Denso: Achar padroes globais
# Conv : Encontrar padrões locais na imagem
# ## Compilação
# Optimizer: <https://keras.io/optimizers/>
# loss -> model will try to minimize: <https://keras.io/losses/>
# loss='binary_crossentropy' for binary decision
# metrics() -> For any classification problem you will want to set this to metrics=['accuracy']
# ## Função de ativação
# A mais indicada para redes Convolucionais é a RELU
# ## Camadas
# Dropout: Evit overfiting
# Conv2D: Create N filter on image based in the tuple(Kernel)
# Flatten: Unific a lot of filter in a paralel linear output. you wil lost the image format
# SpatialDropout2D: If adjacent pixels within feature maps are strongly correlated (as is normally the case in early convolution layers)
# SeparableConv2D ?
# MAXPOOLING: Simplify the information, just the max value in kernel will be outputed
# https://computersciencewiki.org/index.php/File:MaxpoolSample2.png
# BatchNormalization: to improve the training speed by normalizing the features from input and hidden layers
# # Criação do Modelo
# ## Ultima camada pode ser softmax ou sigmoid
# ### Exemplo com sigmoid
# +
modelo_original = Sequential()
modelo_original.add(Conv2D(10, (3, 3), activation='relu', input_shape=(IMG_SIZE, IMG_SIZE, 1)))
modelo_original.add(MaxPooling2D(pool_size=(2, 2)))
modelo_original.add(Conv2D(10, (3, 3), activation='relu'))
modelo_original.add(MaxPooling2D(pool_size=(2, 2)))
modelo_original.add(Conv2D(10, (3, 3), activation='relu'))
modelo_original.add(Flatten())
modelo_original.add(Dropout(0.2))
modelo_original.add(Dense(128, activation='relu'))
modelo_original.add(Dropout(0.5))
modelo_original.add(Dense(50, activation='relu'))
modelo_original.add(Dense(1, activation='sigmoid'))
# +
modelo_aumentado = Sequential()
modelo_aumentado.add(Conv2D(12, (3, 3), activation='relu', input_shape=(IMG_SIZE, IMG_SIZE, 1)))
modelo_aumentado.add(MaxPooling2D(pool_size=(2, 2)))
modelo_aumentado.add(Conv2D(10, (3, 3), activation='relu'))
modelo_aumentado.add(MaxPooling2D(pool_size=(2, 2)))
modelo_aumentado.add(Conv2D(10, (3, 3), activation='relu'))
modelo_aumentado.add(Flatten())
modelo_aumentado.add(Dropout(0.2))
modelo_aumentado.add(Dense(128, activation='relu'))
modelo_aumentado.add(Dropout(0.5))
modelo_aumentado.add(Dense(50, activation='relu'))
modelo_aumentado.add(Dense(1, activation='sigmoid'))
# -
# ### Exemplo com softmax
'''
model = Sequential()
model.add(Conv2D(32, kernel_size = (3, 3), activation='relu', input_shape=(IMG_SIZE, IMG_SIZE, 1)))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(BatchNormalization())
model.add(Conv2D(64, kernel_size=(3,3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(BatchNormalization())
model.add(Conv2D(64, kernel_size=(3,3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(BatchNormalization())
model.add(Conv2D(96, kernel_size=(3,3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(BatchNormalization())
model.add(Conv2D(32, kernel_size=(3,3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
#model.add(Dropout(0.3))
model.add(Dense(1, activation = 'softmax'))
'''
# ## Definições
modelo_original.summary()
# ## Compilação
modelo_original.compile(loss='binary_crossentropy', optimizer='adam', metrics = ['accuracy'])
modelo_aumentado.compile(loss='binary_crossentropy', optimizer='adam', metrics = ['accuracy'])
# # Treinando o modelo (Sem data aumentation)
historia_original = modelo_original.fit(train_images, train_labels, batch_size=5, epochs=2, verbose=1)
# # Avaliando modelo
loss, acc = modelo_original.evaluate(test_images, test_labels, verbose = 0)
print(acc * 100)
# ## Keras NÃO é uma Caixa Preta
# +
imagem = Image.open("/home/franklyn/Downloads/chest_xray/chest_xray/train/NORMAL/IM-0115-0001.jpeg")
imagem = imagem.convert('L')#Convert Image to Gray scale
imagem = imagem.resize((IMG_SIZE, IMG_SIZE), Image.ANTIALIAS)
imagem2 = np.array(imagem)
imagem = np.array([imagem2]).reshape(-1, IMG_SIZE, IMG_SIZE, 1)#não sei pq, so sei que tem que ser assim
layer_outputs = [layer.output for layer in modelo_aumentado.layers[:8]]
activation_model = models.Model(inputs=modelo_aumentado.input, outputs=layer_outputs)
activations = activation_model.predict([imagem])
first_layer_activation = activations[0]
fig = plt.figure(i)##
fig.set_figheight(15)##
fig.set_figwidth(15)##
for i in range(12):
ax1 = fig.add_subplot(3,4,i+1)##
ax1.matshow(first_layer_activation[0, :, :, i], cmap='viridis')
#.imshow(first_layer_activation[0, :, :, i], cmap='viridis')##
##plt.matshow(first_layer_activation[0, :, :, i], cmap='viridis')
plt.show()
# -
# # Data aumentation
# #### Exemplo
# +
from keras.preprocessing.image import ImageDataGenerator
datagen = ImageDataGenerator(
rotation_range=0,
rescale=1./255,
width_shift_range=0.05,
height_shift_range=0.05,
shear_range=0.05,
zoom_range=0.05,
horizontal_flip=False,
fill_mode='nearest')
from keras.preprocessing import image
img = image.load_img(
"/home/franklyn/Downloads/chest_xray/chest_xray/train/NORMAL/NORMAL2-IM-1360-0001.jpeg"
, target_size=(IMG_SIZE, IMG_SIZE))
x = image.img_to_array(img)
x = x.reshape((1,) + x.shape)
i=0
fig = plt.figure(i)##
fig.set_figheight(15)##
fig.set_figwidth(15)##
for batch in datagen.flow(x, batch_size=1):
#plt.figure(i)
ax1 = fig.add_subplot(2,2,i+1)##
ax1.imshow(image.array_to_img(batch[0]))##
##imgplot = plt.imshow(image.array_to_img(batch[0]))
i+=1
if i%4==0:
break
plt.show()
# +
Bsize = 100
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale=1./255,
width_shift_range=0.05,
height_shift_range=0.05,
shear_range=0.05,
zoom_range=0.05,
horizontal_flip=False,
fill_mode='nearest')
test_datagen = ImageDataGenerator(rescale=1./255,
width_shift_range=0.05,
height_shift_range=0.05,
shear_range=0.05,
zoom_range=0.05,
horizontal_flip=False,
fill_mode='nearest')
train_generator = train_datagen.flow_from_directory("/home/franklyn/Downloads/chest_xray/chest_xray/train/",
target_size=(IMG_SIZE, IMG_SIZE),
batch_size=Bsize,
class_mode='binary',
color_mode = "grayscale")
validation_generator = test_datagen.flow_from_directory("/home/franklyn/Downloads/chest_xray/chest_xray/val",
target_size=(IMG_SIZE, IMG_SIZE),
batch_size=Bsize,
class_mode='binary',
color_mode = "grayscale")
# -
history = modelo_aumentado.fit_generator(
train_generator,
steps_per_epoch=4000/Bsize,
epochs=2,
validation_data=validation_generator,
validation_steps=1)
# # Verificando resultados
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
modelo.predict()
| Apresentacao.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# # Spatial joins and plotting in Python and geopandas
import numpy as np, pandas as pd, geopandas as gpd, matplotlib.pyplot as plt, matplotlib.cm as cm
from matplotlib.collections import PatchCollection
from descartes import PolygonPatch
from shapely.geometry import Point, Polygon, MultiPolygon
# %matplotlib inline
# ## Load the rental listings and TAZ polygons
# load the shapefile of TAZs
taz = gpd.read_file('data/TAZ/bayarea_superdistricts.shp')
# load rental listings point data
df = pd.read_csv('data/rents.csv')
geom = [Point(xy) for xy in zip(df['longitude'], df['latitude'])]
rentals = gpd.GeoDataFrame(df, geometry=geom)
# how many rows do we have in each data set?
print(len(taz))
print(len(rentals))
rentals['geometry'].head()
# Looks like the geometry we loaded is in lat-long. So, set the CRS of this geodataframe to lat-long, so that it knows what coordinate system this is initially.
original_crs = {'init':'epsg:4326'}
rentals.crs = original_crs
# ## Project the data so it's all in the same CRS
print(rentals.crs)
print(taz.crs)
# The initial CRSs of these two data sets are not the same, because the saved data we loaded was in lat-long and UTM, respectively. We have two data sets: one point data set of rental listings, in lat-long, and one polygon data set of TAZs, in UTM-10. To work with these two data sets together, they need to be in the same CRS, so we need to project one. We'll project the rental listings from lat-long to the TAZs' UTM CRS.
rentals = rentals.to_crs(taz.crs)
print(rentals.crs)
print(taz.crs)
# Ok, now they're the same. To confirm, we can scatter plot the points over the polygons and make sure everything lines up.
# +
fig, ax = plt.subplots(figsize=(5,5))
# extract each polygon as a descartes patch, and add to a matplotlib patch collection...
patches = []
for geometry in taz['geometry']:
if isinstance(geometry, Polygon):
patches.append(PolygonPatch(geometry))
elif isinstance(geometry, MultiPolygon):
for subpolygon in geometry: #if geometry is multipolygon, go through each constituent subpolygon
patches.append(PolygonPatch(subpolygon))
pc = PatchCollection(patches, facecolor='#3399cc', linewidth=1, alpha=0.1)
ax.add_collection(pc)
# extract coordinates into separate numpy arrays and scatter plot
xy = rentals['geometry'].map(lambda point: point.xy)
x, y = zip(*xy)
ax.scatter(x=x, y=y, s=2, color='#3366cc', linewidth=0, alpha=0.7)
# set the figure bounds to the polygons' bounds
left, bottom, right, top = taz.total_bounds
ax.set_xlim((left,right))
ax.set_ylim((bottom,top))
plt.show()
# -
# ## Spatial join rental listings to TAZs
# doubly confirm their CRSs match before doing the spatial join
rentals.crs==taz.crs
# get the taz district for each rental listing, with spatial join
# use op='within' to use rtree spatial index for much faster operation
taz_rentals = gpd.sjoin(rentals, taz, how='left', op='within')
len(taz_rentals)
# optionally drop all the listings outside of any TAZ, then convert the TAZ IDs to int
taz_rentals = taz_rentals.dropna(subset=['SUPERD'])
taz_rentals['SUPERD'] = taz_rentals['SUPERD'].astype(int)
len(taz_rentals)
# how many rental listings are in each taz?
ax = taz_rentals['SUPERD'].value_counts().sort_index().plot(kind='bar',
width=0.9,
color='g',
alpha=0.5,
linewidth=0)
ax.set_xlabel('TAZ super district')
ax.set_ylabel('Number of rental listings')
plt.show()
# ## Plot each rental listing colored according to TAZ
# get a color for each taz
color_list = [cm.get_cmap('plasma')(x) for x in np.linspace(0, 1, len(taz['SUPERD']))]
taz_colors = {taz+1:color for taz, color in enumerate(color_list)}
colors = taz_rentals['SUPERD'].map(lambda x: taz_colors[x])
# +
fig, ax = plt.subplots(figsize=(5,5))
# extract each polygon as a descartes patch, and add to a matplotlib patch collection...
patches = []
for geometry in taz['geometry']:
if isinstance(geometry, Polygon):
patches.append(PolygonPatch(geometry))
elif isinstance(geometry, MultiPolygon):
for subpolygon in geometry: #if geometry is multipolygon, go through each constituent subpolygon
patches.append(PolygonPatch(subpolygon))
pc = PatchCollection(patches, facecolor='#3399cc', linewidth=1, alpha=0.1)
ax.add_collection(pc)
# extract coordinates into separate numpy arrays and scatter plot
xy = taz_rentals['geometry'].map(lambda point: point.xy)
x, y = zip(*xy)
ax.scatter(x=x, y=y, s=4, color=colors, linewidth=0, alpha=0.7)
# set the figure bounds to the polygons' bounds
left, bottom, right, top = taz.total_bounds
ax.set_xlim((left,right))
ax.set_ylim((bottom,top))
ax.axis('off')
plt.show()
# -
# Points are colored by TAZ, but you could color them by any other attribute, such as distance to job center or whatnot
# ## Color polygons by number of rental listings per acre
#
# ...or by median rent, etc.
# count the number of listings in each TAZ then merge the counts with the TAZ geodataframe
counts = pd.DataFrame(taz_rentals['SUPERD'].value_counts()).reset_index().rename(columns={'index':'SUPERD',
'SUPERD':'count_listings'})
taz_counts = pd.merge(taz, counts, how='left', on='SUPERD')
# calculate the number of listings per acre in each TAZ, then sort by it
taz_counts['listings_acre'] = taz_counts['count_listings'] / taz_counts['LANDACRE']
taz_counts = taz_counts.sort_values(by='listings_acre')
# +
fig, ax = plt.subplots(figsize=(6,6))
# extract each polygon as a descartes patch, and add to a matplotlib patch collection...
patches = []
fc = []
for geometry, color in zip(taz_counts['geometry'], reversed(color_list)):
if isinstance(geometry, Polygon):
patches.append(PolygonPatch(geometry))
fc.append(color)
elif isinstance(geometry, MultiPolygon):
for subpolygon in geometry: #if geometry is multipolygon, go through each constituent subpolygon
patches.append(PolygonPatch(subpolygon))
fc.append(color)
pc = PatchCollection(patches, facecolor=fc, linewidth=0.2, alpha=0.7)
ax.add_collection(pc)
# set the figure bounds to the polygons' bounds
left, bottom, right, top = taz_counts.total_bounds
ax.set_xlim((left,right))
ax.set_ylim((bottom,top))
ax.axis('off')
plt.show()
# -
| 19-Spatial-Analysis-and-Cartography/spatial-joins-plotting.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] hideCode=false hidePrompt=false
# # Chapter 7: Building an OLS Regression Model
#
# Having built statistics functions, we are now ready to build a function for regression analysis. We will start by building the an regression. We will use linear algebra to estimate parameters that minimize the sum of the squared errors. This is an ordinary least squares regression.
#
# An OLS regression with one exogenous variable takes the form.
#
# $y = \alpha + \beta_1x_1 + \mu $
#
# $\beta_0 = \alpha + \mu$
#
# We merge the error term, which represents bias in the data, with alpha to yield the constant, $\beta_0$. This is necessary since OLS assumes an unbiased estimator where:
#
# $\sum_{i=0}^{n-1} e_{i}=0$
#
# Each estimate of a point created from a particular observation takes the form.
#
# $y_i = \beta_0 + \beta_1x_{1,i} + e_i$
#
# This can be generalized to include k exogenous variables:
#
# $y_i = \beta_0 + (\sum_{j=1}^{k} \beta_jx_{i,j}) + e_i$
#
# Ideally, we want to form a prediction where, on average, the right-hand side of the equation yields the correct value on the left-hand side. When we perform an OLS regression, we form a predictor that minimizes the sum of the distance between each predicted value and the observed value drawn from the data. For example, if the prediction for a particular value of y is 8, and the actual value is 10, the error of the prediction is -2 and the squared error is 4.
#
# To find the function that minimizes the sum squared errors, we will use matrix algebra, also known as linear algebra. For those unfamiliar, the next section uses the numpy library to perform matrix operations. For clarity, we will review the linear algebra functions that we will use with simple examples.
#
# ## Linear Algebra for OLS
#
# We solve the following function for a vector of beta values ($\beta$), constants whose values represent estimates of the effect of variables in the set **_X_** on the selected endogenously generate variable $y$. The matrix **_X_** also includes a vector of ones used to estimate the constant $\beta_0$.
#
# $\beta = (X'X)^{-1}X'Y$
#
# $Y =$ Observations for Endogenous Variable
#
# $X =$ Observations for Exogenous Variables
#
# $X' =$ $X$-transpose
#
# $(X'X)^{-1} =$ Inverse of $X'X$
#
# ### Inverting a Matrix
#
# In reviewing the linear equation for estimating $\beta$, we confront two unique operations worth understanding. Included in these are some key concepts in linear algebra, including the identity matrix $I$ and linear independence. The best way to understand these concepts is by working with some sample vectors. Consider the matrix $X$ consisting of vectors $x_0$,$x_1$,…,$x_{n-1}$,$x_n$. We must check that these vectors are linearly independent. We do this by joining $X$ with an identity matrix and thus create:
#
# $A = [XI]$
#
# We transform this to show that the product of $A$ and $X^{-1}$ is equal to the product of and an identity matrix, $I$ and $X^{-1}$
#
# $AX^{-1} = [XI]X^{-1}$
#
# $AX^{-1} = [IX^{-1}]$
#
# Let us solve for $AX^{-1}$ using the following vectors for $X$.
#
# $\begin{equation*}
# X = \begin{bmatrix}
# 1 & 2 & 1 \\
# 4 & 1 & 5 \\
# 6 & 8 & 6
# \end{bmatrix}
# \end{equation*}$
#
# Concatenate a 3 X 3 identity matrix on the left of $X$:
#
# $\begin{equation*}
# I = \begin{bmatrix}
# 1 & 0 & 0 \\
# 0 & 1 & 0 \\
# 0 & 0 & 1
# \end{bmatrix}
# \end{equation*}$
#
# $\begin{equation*}
# [XI] = \begin{bmatrix}
# 1 & 2 & 1 & 1 & 0 & 0 \\
# 4 & 1 & 5 & 0 & 1 & 0 \\
# 6 & 8 & 6 & 0 & 0 & 1
# \end{bmatrix}
# \end{equation*}$
#
# If we perform row operations on $A$ to transform $X$ in $[XI]$ into $I$, then we $I$ will be transformed into $X^{-1}$:
#
# $\begin{equation*}
# [XI] = \begin{bmatrix}
# 1 & 2 & 1 & 1 & 0 & 0 \\
# 4 & 1 & 5 & 0 & 1 & 0 \\
# 6 & 8 & 6 & 0 & 0 & 1
# \end{bmatrix}
# \end{equation*}$
#
#
#
#
# $\begin{equation*}
# r_2 - 4r_1:\begin{bmatrix}
# 1 & 2 & 1 & 1 & 0 & 0 \\
# 0 & -7 & 1 & -4 & 1 & 0 \\
# 6 & 8 & 6 & 0 & 0 & 1
# \end{bmatrix}
# \end{equation*}$
#
#
# $\begin{equation*}
# r_3 - 6r_1:\begin{bmatrix}
# 1 & 2 & 1 & 1 & 0 & 0 \\
# 0 & -7 & 1 & -4 & 1 & 0 \\
# 0 & -4 & 0 & -6 & 0 & 1
# \end{bmatrix}
# \end{equation*}$
#
#
# $\begin{equation*}
# r_2 \leftrightarrow r_3:\begin{bmatrix}
# 1 & 2 & 1 & 1 & 0 & 0 \\
# 0 & -4 & 0 & -6 & 0 & 1\\
# 0 & -7 & 1 & -4 & 1 & 0
# \end{bmatrix}
# \end{equation*}$
#
# $\begin{equation*}
# r_2/{-4}:\begin{bmatrix}
# 1 & 2 & 1 & 1 & 0 & 0 \\
# 0 & 1 & 0 & 3/2 & 0 & -1/4\\
# 0 & -7 & 1 & -4 & 1 & 0
# \end{bmatrix}
# \end{equation*}$
#
# $\begin{equation*}
# r_3 + 7r_2:\begin{bmatrix}
# 1 & 2 & 1 & 1 & 0 & 0 \\
# 0 & 1 & 0 & 3/2 & 0 & -1/4\\
# 0 & 0 & 1 & 13/2 & 1 & -7/4
# \end{bmatrix}
# \end{equation*}$
#
# $\begin{equation*}
# r_1 + -2r_2 - r_3:\begin{bmatrix}
# 1 & 0 & 0 & -17/2 & -1 & 9/4 \\
# 0 & 1 & 0 & 3/2 & 0 & -1/4\\
# 0 & 0 & 1 & 13/2 & 1 & -7/4
# \end{bmatrix}
# \end{equation*}$
#
# $\begin{equation*}
# IX^{-1}=\begin{bmatrix}
# 1 & 0 & 0 & -8.5 & -1 & 2.25 \\
# 0 & 1 & 0 & 1.5 & 0 & -0.25\\
# 0 & 0 & 1 & 6.5 & 1 & -1.75
# \end{bmatrix}
# \end{equation*}$
#
# $\begin{equation*}
# X^{-1}=\begin{bmatrix}
# -8.5 & -1 & 2.25 \\
# 1.5 & 0 & -0.25\\
# 6.5 & 1 & -1.75
# \end{bmatrix}
# \end{equation*}$
#
# By transforming $X$ in matrix $XI$ into an identity matrix, we transform the $I$ matrix into $X^{-1}$. This also confirms that the vectors comprising X are independent, meaning that one vector in the set comprising $X$ cannot be formed from the combination and or transformation of the others. A fundamental assumption of regression analysis is that data generated from factors believed to determine the y-values are independent of one another.
#
# ### Linear Algebra in _numpy_
#
# We can check this using linear algebra functions in numpy. We start by creating numpy arrays that we will transform into vectors in the second step.
# + hideCode=false hidePrompt=false
#invertMatrix.py
import numpy as np
# create array to be transformed into vectors
x1 = np.array([1,2,1])
x2 = np.array([4,1,5])
x3 = np.array([6,8,6])
print("Array 1:", x1, sep="\n")
print("Array 2:", x2, sep="\n")
print("Array 3:", x3, sep="\n")
# + [markdown] hideCode=false hidePrompt=false
# Next, transform these arrays into row vectors using _matrix()_.
# + hideCode=false hidePrompt=false
x1 = np.matrix(x1)
x2 = np.matrix(x2)
x3 = np.matrix(x3)
print("Vector 1", x1, sep="\n")
print("Vector 2", x2, sep="\n")
print("Vector 3", x3, sep="\n")
# + [markdown] hideCode=false hidePrompt=false
# Join them using the _concatenate()_ function. We define _axis=0_ to stack each row on upon the next:
# + hideCode=false hidePrompt=false
X = np.concatenate((x1, x2, x3), axis = 0)
print("X:", X, sep="\n")
# + [markdown] hideCode=false hidePrompt=false
# Finally, we can invert the matrix that we have made using _.getI()_.
# + hideCode=false hidePrompt=false
X_inverse = X.getI()
print("X Inverse:", X_inverse, sep = "\n")
# + [markdown] hideCode=false hidePrompt=false
# These values are not rounded, so interpretation of the inverted matrix could be more intuitive. We use the _round()_ method from the _numpy_ module to round values to two places.
# + hideCode=false hidePrompt=false
X_inverse = np.round(X.getI(), 2)
print("X Inverse:", X_inverse, sep = "\n")
# + [markdown] hideCode=false hidePrompt=false
# ## Building a Regression Function
#
# Now that we have learned the necessary operations, we can create a regression function. Recall that we estimate the vector of beta parameters for each variable with the equation:
#
# $\beta = (X'X)^{-1}X'Y$
#
# In order to estimate the parameters, we will need to import data, define the dependent variable and independent variables, and transform these into matrix objects. We will use one py file to write a regression function and another to write the script that calls the regression function. Let’s start by importing the data that we saved in the previous chapter.
# + hideCode=false hidePrompt=false
#econFreedomRegression.py
import pandas as pd
data = pd.read_csv("cleanedEconFreedomData.csv", index_col = "Country Name")
# + [markdown] hideCode=false hidePrompt=false
# After importing the data, we print it to be sure that we have imported correctly. The first part of the results should match the output below.
#
# In the console, enter:
# + hideCode=false hidePrompt=false
data
# + [markdown] hideCode=false hidePrompt=false
# Next we will create the _regression.py_ file. This will contain the regression program that we will call from _econFreedomRegression.py_. For now, import pandas and build the class as demonstrated below.
# + hideCode=false hidePrompt=false
#regression.py
# you may ignore import jdc, used to split class development
# other cells that edits a class will include the magic command %% add_to
import jdc
import pandas as pd
import copy
from stats import *
class Regression:
def __init__(self):
self.stats = stats()
def OLS(self, reg_name, data, y_name, beta_names,
min_val = 0,
max_val = None, constant = True):
self.min_val = min_val
if max_val != None:
self.max_val = max_val
else:
self.max_val = len(data)
self.reg_name = reg_name
self.y_name = y_name
self.beta_names = copy.copy(beta_names)
self.data = data.copy()
if constant:
self.add_constant()
def add_constant(self):
self.data["Constant"] = 1
self.beta_names.append("Constant")
# + [markdown] hideCode=false hidePrompt=false
# We start by importing pandas, and the stats py file that we have already saved in the same folder. We create two methods inside of the Regression class. First is the *\_\_init\_\_* method. This will create an instance of Stats that will be called later. Second is the regress method. This is our primary method, from which all the necessary steps for preparing data and running a regression will be called.
#
# The OLS method passes several objects. First is reg_name, which is a string that be included in the regression summary ouput. Data is the pandas data frame used for the regression. Next are the names of the variables we wish to regress: *y_name* is the name of the dependent variable and *x_names* is a list that includes the names of variables that we wish to regress. *min_val* and *max_val* are the starting and ending index values for the regression.
#
# OLS includes the option to include a constant. If *constant = True*, then a column of ones is added to the data. This column will be used to estimate a constant that determines at what value the fitted line or curve crosses the y-axis. Increase or decrease in this value shift the line up or down, respectively.
# + hideCode=false hidePrompt=false tags=["nbcal-ignore-output", "outputs-hidden"]
#econFreedomRegression
import pandas as pd
from regression import Regression
# . . .
# -
# At the bottom of the code, be sure to create an instance of the Regression class
# + hideCode=false hidePrompt=false hide_input=true
reg = Regression()
# + [markdown] hideCode=false hidePrompt=false
# Now that we have created an instance of the Regression class, we can use the stats
# + hideCode=false hidePrompt=false
reg
# + [markdown] hideCode=false hidePrompt=false
# A standard OLS regression assumes that the equation it is estimating will include a constant. We must therefore include a the option to include a constant, or not, in the estimation. To do this, we add a column of ones that will be used to estimate a constant value for our equation. This column of ones is identified by the column name, _“Constant”_.
#
# To see the effect of this addition, we can print the data after we have called the regression function from our object that is an instance of the Regression class. We will choose to print the _“Constant”_ column.
#
# ### Selecting Variables
#
# We may ask how different types of freedom toend to affect prosperity within a nation. The Heritage Index of Economic Freedom provides different measures to rate freedom and the rule of law within a country. We use these to predict GDP per capita in each country. Below, is the script that will be used to run a regression. One indicator of the quality of explanatory power provided by an exogenous variable is it’s ability to maintain a steady value in terms of its estimated effect on the endogenous variable as well as its statistical significance. We will return to this discussion once the regression class is completed.
# + hideCode=false hidePrompt=false
#econFreedomRegression.py
# . . .
y_var = ["GDP per Capita (PPP)"]
x_vars = ["Trade Freedom", "Property Rights",
"Inflation (%)",
"Public Debt (% of GDP)"]
reg.OLS("GDP per Capita", data, y_var, x_vars)
# + [markdown] hideCode=false hidePrompt=false
# We will run the above script to check our progress as we develop the Regression class. For now, execute the script. In the console, print the data again and you will notice the last columnm, labeled _"Constant"_ , includes a column of ones:
# + hideCode=false hidePrompt=false
reg.data
# + [markdown] hideCode=false hidePrompt=false
# Next we will create the regression.py file. This will contain the regression program that we will call from _econFreedomRegression.py_. For now, import pandas and build the class as demonstrated below.
#
# First, we must estimate the beta vector, $\beta$ and use this to estimate predicted values of y:
#
# $y ̂ = X\beta$
# :
# + hideCode=false hidePrompt=false
# %%add_to Regression
#regression.py
import numpy as np
# . . .
def OLS(self, reg_name, data, y_name, beta_names, min_val = 0,
max_val = None, constant = True):
self.min_val = min_val
if max_val != None:
self.max_val = max_val
else:
self.max_val = len(data)
self.reg_name = reg_name
self.y_name = y_name
self.beta_names = copy.copy(beta_names)
self.data = data.copy()
if constant:
self.add_constant()
self.build_matrices()
self.estimate_betas_and_yhat()
def build_matrices(self):
# Transform dataframes to matrices
self.y = np.matrix(self.data[self.y_name][self.min_val:self.max_val])
# create a k X n nested list containg vectors for each exogenous var
self.X = np.matrix(self.data[self.beta_names])
self.X_transpose = np.matrix(self.X).getT()
# (X'X)**-1
X_transp_X = np.matmul(self.X_transpose, self.X)
self.X_transp_X_inv = X_transp_X.getI()
# X'y
self.X_transp_y = np.matmul(self.X_transpose, self.y)
def estimate_betas_and_yhat(self):
# betas = (X'X)**-1 * X'y
self.betas = np.matmul(self.X_transp_X_inv, self.X_transp_y)
# y-hat = X * betas
self.y_hat = np.matmul(self.X, self.betas)
# Create a column that holds y-hat values
self.data[self.y_name[0] + " estimator"] = \
[i.item(0) for i in self.y_hat]
# create a table that holds the estimated coefficient
# this will also be used to store SEs, t-stats,and p-values
self.estimates = pd.DataFrame(self.betas, index = self.beta_names,
columns = ["Coefficient"])
# identify y variable in index
self.estimates.index.name = "y = " + self.y_name[0]
# + [markdown] hideCode=false hidePrompt=false
# From the _econFreedomRegression.py_ file, lets execute the regress function that we have extended. Executing it will generate the data frame of coefficient estimates. In the console enter:
# + hideCode=false hidePrompt=false hide_input=true
#econFreedomRegression.py
# . . .
reg = Regression()
# . . .
reg.OLS("GDP per Capita", data, y_var, x_vars)
# + hideCode=false hidePrompt=false
reg.estimates
# + [markdown] hideCode=false hidePrompt=false
# We have calculated beta values for each independent variable, meaning that we estimated the average effect of a change in each independent variable upon the dependent variable. While this is useful, we have not yet measured the statistical significance of these estimations; neither have we determined the explanatory power of our particular regression.
#
# Our regression has estimated predicted values for our dependent variable given the values of the independent variables for each observation. Together, these estimations for an array of predicted values that we will refer to as $y ̂ $. We will refer to individual predicted values as ($y_i$) ̂. We will also refer to the mean value of observations of our dependent variable as $y ̅ $ and individual observed values of our dependent variable as $y_i$. These values will be use to estimate the sum of squares due to regression ($SSR$), sum of squared errors ($SSE$), and the total sum of squares ($SST$). By comparing the estimated $y$ values, the observed $y$ values, and the mean of $y$, we will estimate the standard error for each coefficient and other values that estimate convey the significance of the estimation.
#
# We define these values as follows:
#
# $SSR = \sum_{i=0}^{n} (y ̂ _{i} - y ̅ )^2$
#
# $SSE = \sum_{i=0}^{n} (y_{i} - y ̂ _{i})^2$
#
# $SST = \sum_{i=0}^{n} (y_{i} - y ̅ _{i})^2$
#
# It happens that the sum of the squared distances between the estimated values and mean of observed values and the squared distances between the observed and estimated values add up to the sum of the squared distances between the observed values and the mean of observed values. We indicate this as:
#
# $SST = SSR + SSE$
#
# The script below will estimate these statistics. It calls the sum_square_stats method from the which is passed in the calculate_regression_stats method.
# + hideCode=false hidePrompt=false
# %%add_to Regression
#regression.py
# . . .
def OLS(self, reg_name, data, y_name, beta_names, min_val = 0,
max_val = None, constant = True):
self.min_val = min_val
if max_val != None:
self.max_val = max_val
else:
self.max_val = len(data)
self.reg_name = reg_name
self.y_name = y_name
self.beta_names = copy.copy(beta_names)
self.data = data.copy()
if constant:
self.add_constant()
self.build_matrices()
self.estimate_betas_and_yhat()
self.calculate_regression_stats()
def calculate_regression_stats(self):
self.sum_square_stats()
def sum_square_stats(self):
ssr_list = []
sse_list = []
sst_list = []
mean_y = self.stats.mean(self.y).item(0)
for i in range(len(self.y)):
# ssr is sum of squared distances between the estimated y values
# (y-hat) and the average of y values (y-bar)
yhat_i = self.y_hat[i]
y_i = self.y[i]
r = yhat_i - mean_y
e = y_i - yhat_i
t = y_i - mean_y
ssr_list.append((r) ** 2)
sse_list.append((e) ** 2)
sst_list.append((t) ** 2)
# call item - call value instead of matrix
self.ssr = self.stats.total(ssr_list).item(0)
self.sse = self.stats.total(sse_list).item(0)
self.sst = self.stats.total(sst_list).item(0)
# + hideCode=false hidePrompt=false hide_input=true
#econFreedomRegression.py
# . . .
reg = Regression()
# . . .
reg.OLS("GDP per Capita", data, y_var, x_vars)
# + [markdown] hideCode=false hidePrompt=false
# The elements of the lists created are actually matrices, so we select the element in the matrix by calling _.item(0)_ after summing each list with _total()_.
#
# Now, the regression method will allow us to call the *SSR*, *SSE*, and *SST* values. These will be used to calculate the measures commonly associated with a regression such as r-squared and estimator variance. In the console enter:
#
# + hideCode=false hidePrompt=false
print(reg.ssr, reg.sse, reg.sst)
# + [markdown] hideCode=false hidePrompt=false
# With the sum of squared errors calculated, the next step is to calculate the estimator variance and use this to construct the covariance matrix. The covariance matrix is used to derive the standard errors and related statistics for each estimated coefficient.
#
# We estimate the variance of the error term of the estimator for the dependent variable.
#
# $\sigma^2 = \frac{SSE}{n-k}$
#
# $n = $number of observations
#
# $k = $number of independent variables
#
# An increase in the number of exogenous variables tends ot increase the fit of a model. By dividing the $SSE$ by degrees of freedom, $n-k$ , improvements in fit that result from increases in the number of variables are offset in part by a reduction in degrees of freedom.
#
# Finally, we calculate the covariance matrix:
#
# $\sigma^2 (X'X)^{-1}$
#
# + hideCode=false hidePrompt=false
# %%add_to Regression
#regression.py
# . . .
def calculate_regression_stats(self):
self.sum_square_stats()
self.calculate_degrees_of_freedom()
self.calculate_estimator_variance()
self.calculate_covariance_matrix()
def calculate_degrees_of_freedom(self):
# Degrees of freedom compares the number of observations to the number
# of exogenous variables used to form the prediction
self.lost_degrees_of_freedom = len(self.estimates)
self.num_obs = self.max_val + 1 - self.min_val
self.degrees_of_freedom = self.num_obs - self.lost_degrees_of_freedom
def calculate_estimator_variance(self):
# estimator variance is the sse normalized by the degrees of freedom
# thus, estimator variance increases as the number of exogenous
# variables used in estimation increases(i.e., as degrees of freedom
# fall)
self.estimator_variance = self.sse / self.degrees_of_freedom
def calculate_covariance_matrix(self):
# Covariance matrix will be used to estimate standard errors for
# each coefficient.
# estimator variance * (X'X)**-1
self.cov_matrix = float(self.estimator_variance) * self.X_transp_X_inv
self.cov_matrix = pd.DataFrame(self.cov_matrix,
columns = self.beta_names,
index = self.beta_names)
# + hideCode=false hidePrompt=false hide_input=true
reg = Regression()
reg.OLS("GDP per Capita", data, y_var, x_vars)
# + [markdown] hideCode=false hidePrompt=false
# In the final method, calculate_covariance_matrix, the estiamtor variance is used to calculate the estimator covariance matrix. We will view this table by creating a csv. Enter the following command into the console:
# + hideCode=false hidePrompt=false
reg.cov_matrix.to_csv("regCovMatrix.csv")
reg.cov_matrix
# + [markdown] hideCode=false hidePrompt=false
# The diagonals of the covariance matrix represent the squared standard errors for each exogenous variable. The standard what we will use to calculate t-statistics and p-values. The t-statisitic of a coefficient is found by comparing the size of the estimated coefficient to its standard error:
#
# $tstat_{\beta_i}=\frac{\beta_i}{SE_{\beta_i}}$
#
# The larger the coefficient compared to the error, the more reliable is the statistic, as implied by a large t-stat. We draw the p-value associated with a particular t-stat from a table in light of the degrees of freedom associated with the regression. The p-values provides a rating of the estimate in light of the t-stat together with the number of degrees of freedom.
# -
#regression.py
from scipy.stats import t, f
# . . .
# + hideCode=false hidePrompt=false
# %%add_to Regression
# . . .
def calculate_regression_stats(self):
self.sum_square_stats()
self.calculate_degrees_of_freedom()
self.calculate_estimator_variance()
self.calculate_covariance_matrix()
self.calculate_t_p_error_stats()
def calculate_t_p_error_stats(self):
ratings = [.05, .01, .001]
results = self.estimates
stat_sig_names = ["SE", "t-stat", "p-value"]
# create space in data frame for SE, t, and p
for stat_name in stat_sig_names:
results[stat_name] = np.nan
# generate statistic for each variable
for var in self.beta_names:
# SE ** 2 of coefficient is found in the diagonal of cov_matrix
results.loc[var]["SE"] = self.cov_matrix[var][var] ** (1/2)
# t-stat = Coef / SE
results.loc[var]["t-stat"] = \
results["Coefficient"][var] / results["SE"][var]
# p-values is estimated using a table that transforms t-value in
# light of degrees of freedom
results.loc[var]["p-value"] = np.round(t.sf(np.abs(results.\
loc[var]["t-stat"]), self.degrees_of_freedom + 1) * 2, 5)
# values for significances will be blank unless p-values < .05
# pandas does not allow np.nan values or default blank strings to
# be replaced
significance = ["" for i in range(len(self.beta_names))]
for i in range(len(self.beta_names)):
var = self.beta_names[i]
for val in ratings:
if results.loc[var]["p-value"] < val:
significance[i] = significance[i] + "*"
results["signficance"] = significance
# + hideCode=false hidePrompt=false hide_input=true
reg = Regression()
reg.OLS("GDP per Capita", data, y_var, x_vars)
# + [markdown] hideCode=false hidePrompt=false
# The standard errors, t-statistics and p-values are saved in the same dataframe as the coefficient estimates. Call them with the command:
# + hideCode=false hidePrompt=false
reg.estimates.to_csv("estimates.csv")
reg.estimates
# + [markdown] hideCode=false hidePrompt=false
# Next we will use the statistics that we have calculated to build the mean squared error (MSE), the square root of the mean squared error, R2, and F-stat.
#
# The variance term will be used to help us calculate other values. First we estimate the square root of the mean squared error. Since the mean squared error is the variance of the estimator, this means we simply take the square root the variance term
#
# $rootMSE = \sqrt{\sigma^2}$
#
# The square-root of the MSE provides a more readily interpretable estimate of the estimator variance, showing the average distance of predicted values from actual values, corrected for the number of independent variables.
#
# We also estimate the R2 value. This value indicates the explanator power of the regression
#
# $R^2 = \frac{SSR}{SST}$
#
# This compares the average squared distance between the predicted values and the average value against the average squared distance between observed values and average values. Ordinary least squares regression minimizes the squared distance between the predicted value and the average value. If values are perfectly predicted, then the SSR would equal the SST. Usually, the SSR is less than the SST. It will never be greater than the SST.
#
# Finally we calculate the F-statistic, commonly referred to as the F-stat:
#
# $ F =\frac{\frac{SST - SSE}{K - 1}}{\frac{SSE}{N - K}}$
#
# The F-statistic tests the likelihood of whether or not the values of our estimated parameters are all zero:
#
# $\beta_1 = \beta_2 = . . . = \beta_{n-1} = \beta_n$
#
# We check the difference between the SST and SSE divided by the number of independent variables used in the regression less one. We divide this value by the mean squared error.
# -
# + hideCode=false hidePrompt=false
# %%add_to Regression
#regression.py
# . . .
def calculate_regression_stats(self):
self.sum_square_stats()
self.calculate_degrees_of_freedom()
self.calculate_estimator_variance()
self.calculate_covariance_matrix()
self.calculate_t_p_error_stats()
self.calculate_root_MSE()
self.calculate_rsquared()
self.calculate_fstat()
self.build_stats_DF()
#. . .
def calculate_root_MSE(self):
self.root_mse = self.estimator_variance ** (1/2)
def calculate_rsquared(self):
self.r_sq = self.ssr / self.sst
def calculate_fstat(self):
self.f_stat = (self.sst - self.sse) / (self.lost_degrees_of_freedom\
- 1) / self.estimator_variance
def build_stats_DF(self):
stats_dict = {"r**2":[self.r_sq],
"f-stat":[self.f_stat],
"Est Var":[self.estimator_variance],
"rootMSE":[self.root_mse],
"SSE":[self.sse],
"SSR":[self.ssr],
"SST":[self.sst],
"Obs.":[int(self.num_obs)],
"DOF":[int(self.degrees_of_freedom)]}
self.stats_DF = pd.DataFrame(stats_dict)
self.stats_DF = self.stats_DF.rename(index={0:"Estimation Statistics"})
self.stats_DF = self.stats_DF.T
# + hideCode=false hidePrompt=false hide_input=true
reg = Regression()
reg.OLS("GDP per Capita", data, y_var, x_vars)
# + [markdown] hideCode=false hidePrompt=false
# Be sure to call the function at the end of the *calculate_regression_stats()* method. Then, save *stats_DF* as a csv with the command:
# + hideCode=false hidePrompt=false
reg.stats_DF.to_csv("reg_stats.csv")
reg.stats_DF
# -
# You have successfully created a program that runs completes OLS regression and organizes statistics from this regression!
#
# ## Tests and Adjustments
#
# In addition to the essential elements that you have included in the regression method, some other evaluative critieria is in order. We will include calculate the adjusted r-squared as well as joint f-tests. The first of these is used to offset the increase in the r-squared value that otherwise occurs when exogenous variables are added to a regression. It is possible that the addition of irrevelevant variables appear to increase goodness-of-fit.
#
# For similar purposes, we will include the joint F-test. This compares the explanatory power of two regressions, revealing whether or not the inclusion of additional variables is actually improving explanatory power of the regression.
#
# ### Adjusted R-Squared
# Although the R2 is a useful measure to understand the quality of the explanation provided by the selected exogenous variables. Recall that:
#
# $R^2 = \frac{SSR}{SST}$
#
# $R^2 = 1 - \frac{\frac{SSE}{n - k}}{\frac{SST}{n-1}}$
#
# Notice that as the degrees of freedom decrease, the numerator necessarily decreases as well. Although it is not always appropriate to use the adjusted $R^2$, it is often useful to help gauge whether or not a marginal addition of a variable improves explanatory power of a regression.
# +
# %%add_to Regression
#regression.py
# . . .
def calculate_rsquared(self):
self.r_sq = self.ssr / self.sst
self.adj_r_sq = 1 - self.sse / self.degrees_of_freedom / (self.sst\
/ (self.num_obs - 1))
def build_stats_DF(self):
stats_dict = {"r**2":[self.r_sq],
"Adj. r**2":[self.adj_r_sq],
"f-stat":[self.f_stat],
"Est Var":[self.estimator_variance],
"rootMSE":[self.root_mse],
"SSE":[self.sse],
"SSR":[self.ssr],
"SST":[self.sst],
"Obs.":[int(self.num_obs)],
"DOF":[int(self.degrees_of_freedom)]}
self.stats_DF = pd.DataFrame(stats_dict)
self.stats_DF = self.stats_DF.rename(index={0:"Estimation Statistics"})
self.stats_DF = self.stats_DF.T
# -
reg = Regression()
reg.OLS("GDP per Capita", data, y_var, x_vars)
# Now *stats_DF* includes the adjusted $R^2$:
reg.stats_DF
# ### Joint F-test
#
# Just as the adjusted r-squared allows for a more effective comparison of regressions that have varying numbers of variables, so too does the joint f-test. In order to compare regressions, we must save results from at least two compareable regression. To save results, we create a dictionary name *reg_history* and save this using the method, *save_output*.
# +
# %%add_to Regression
#regression.py
# . . .
def __init__(self):
self.stats = stats()
self.reg_history = {}
def OLS(self, reg_name, data, y_name, beta_names, min_val = 0,
max_val = None, constant = True):
self.min_val = min_val
if max_val != None:
self.max_val = max_val
else:
self.max_val = len(data)
self.reg_name = reg_name
self.y_name = y_name
self.beta_names = copy.copy(beta_names)
self.data = data.copy()
if constant:
self.add_constant()
self.build_matrices()
self.estimate_betas_and_yhat()
self.calculate_regression_stats()
self.save_output()
def save_output(self):
self.reg_history[self.reg_name] = {"Reg Stats": self.stats_DF.copy(),
"Estimates": self.estimates.copy(),
"Cov Matrix":self.cov_matrix.copy(),
"Data":self.data.copy()}
# -
# By saving the regression statistics, estimates, and covariance matrix in a dictionary with a unique key that is the string passed as *self.reg_name* for the regression, results from multiple regression can be called. This is required for running a *joint_f_test()*, which is supposed to estimate whether or not the addition of an exogenous variable in a regression actually improves the explantory power of the regression.
#
# The joint f-test compares a restricted and unrestricted regression. The unrestricted regression uses the same exogenous variables as the restricted regression, and adds at least one more exogenous variable to be used to estimate values of y. The joint f-test checks whether, the betas values of the exogenous variables included in the unretricted regression are different than zero.
#
# To check, we calculate the joint F-statistic:
#
# $ F = \frac{\frac{SSE_r - SSE_u}{k_r - k_u}}{\frac{SSE_u}{n - k_u}}$
#
# If the p-values associated with this f-statistic indicates statistical significance, then at least one of the additional variables improve the explanatory power of the regression.
# +
# %%add_to Regression
def joint_f_test(self, reg1_name, reg2_name):
# identify data for each regression
reg1 = self.reg_history[reg1_name]
reg2 = self.reg_history[reg2_name]
# identify beta estimates for each regression to draw variables
reg1_estimates = reg1["Estimates"]
reg2_estimates = reg2["Estimates"]
# name of y_var is saved as estimates index name
reg1_y_name = reg1_estimates.index.name
reg2_y_name = reg2_estimates.index.name
num_obs1 = reg1["Reg Stats"].loc["Obs."][0]
num_obs2 = reg2["Reg Stats"].loc["Obs."][0]
# check that the f-stat is measuring restriction, not for diff data sets
if num_obs1 != num_obs2:
self.joint_f_error()
if reg1_y_name == reg2_y_name:
restr_reg = reg1 if \
len(reg1_estimates.index) < len(reg2_estimates.index) else reg2
unrestr_reg = reg2 if restr_reg is reg1 else reg1
restr_var_names = restr_reg["Estimates"].index
unrestr_var_names = unrestr_reg["Estimates"].index
# identify statistics for each regression
restr_reg = restr_reg if False not in \
[key in unrestr_var_names for key in restr_var_names] else None
if restr_reg == None:
self.joint_f_error()
else:
sser = restr_reg["Reg Stats"].loc["SSE"][0]
sseu = unrestr_reg["Reg Stats"].loc["SSE"][0]
dofr = restr_reg["Reg Stats"].loc["DOF"][0]
dofu = unrestr_reg["Reg Stats"].loc["DOF"][0]
dfn = dofr - dofu
dfd = dofu - 1
f_stat = ((sser - sseu) / (dfn)) / (sseu / (dfd))
f_crit_val = 1 - f.cdf(f_stat,dfn = dfn, dfd = dfd)
#make dictionary?
f_test_label = "h_0:"
for key in unrestr_var_names:
if key not in restr_var_names:
f_test_label = f_test_label + str(key) + " == "
f_test_label = f_test_label + "0"
res_dict = {"f-stat":[f_stat],
"p-value":[f_crit_val],
"dfn":[dfn],
"dfd":[dfd]}
res_DF = pd.DataFrame(res_dict)
res_DF = res_DF.rename(index={0:""})
res_DF = res_DF.T
res_DF.index.name = f_test_label
return res_DF
def joint_f_error(self):
print("Regressions not comparable for joint F-test")
return None
# -
# ### Call the Joint F-Test
#
# Having constructed a method to run the joint F-test, we first need to define the restricted and unrestricted regressoin. The restricted regression will have only two variables: *“Property Rights”* and *"Public Debt (% of GDP)"*. These two variables are the only exogenous variables from the earlier regression that were significant. We will compare the regression that we ran, the unrestricted regression, with this restricted regression. Name each regression accordingly, then call the *joint_f_test()* using the names for each regression. The program is distinguish automoticallly which is restricted and which is unrestricted. Since the results of the test are saved in a dataframe, we can save the results as a csv.
#EconFreedomRegression.py
# . . .
reg = Regression()
# +
y_var = ["GDP per Capita (PPP)"]
x_vars_unrestricted = ["Trade Freedom", "Property Rights", "Inflation (%)",
"Public Debt (% of GDP)"]
x_vars_restricted = ["Trade Freedom", "Property Rights", "Inflation (%)"]
reg.OLS("GDP Per Capita Unrestricted", data, y_var, x_vars_unrestricted)
reg.OLS("GDP Per Capita Restricted", data, y_var, x_vars_restricted)
joint_f_test = reg.joint_f_test("GDP Per Capita Unrestricted",
"GDP Per Capita Restricted")
joint_f_test.to_csv("Joint F_test; y = " + reg.y_name[0] + "; " +\
joint_f_test.index.name + ".csv")
joint_f_test
# -
# The test indicates that the null-hypothesis, that the variable *"Public Debt (% of GDP)"* exhibits a consistent effect different than zero, holds since the p-value generated by the test is less than 0.05.
#
# ## Visualizing Regression Results
#
# To get the most value out of a regression function requires not only tables for statistics. Visualizations that compare observations to estimates form the regression are a powerful means of presenting results. Since we have already generated estimations for y_hat, this simply requires the plotting of y values against values of exogenous variables. On the same plot, do the same for estimator (*yhat*) values.
#
# In the next section we will use a for loop to compare the predicted values of the y variable with observed values in a scatter plot. Each plot will include the y-values on the veritcal axis and the values of an exogenous variable on the y axis.
# +
#econFreedomRegression.py
# . . .
import matplotlib.pyplot as plt
def plot_scatter_with_estimator(title, data, x_vars, y_var):
# set default font size
plt.rcParams.update({"font.size": 19})
# use a for loop to call each exogenous variable
y = y_var[0]
for x in x_vars:
# prepare a figure with the predictor. We will use ax to specify that
# the plots are in the same figure
fig, ax = plt.subplots(figsize = (12, 8))
# labels will be in a legend
y_label1 = "Estimate"
y_label2 = "Observation"
# plot the estimated value
data.plot.scatter(x = x, y = y + " estimator", ax = ax, c = "r",
s = 10, label = y_label1, legend = False)
# erase the y-axis label so that "estimator" is not present
# the y-label will reappear when the observations are plotted
plt.ylabel("")
data.plot.scatter(x = x, y = y, ax = ax, s = 10, label = y_label2,
legend = False)
# call the legend, place atop the image on the left
# bbox_to_anchor used to specify exact placement of label
plt.legend(loc = "upper left", labels = [y_label1, y_label2],
bbox_to_anchor = (0, 1.17))
plt.title(title)
# remove lines marking units on the axis
ax.xaxis.set_ticks_position('none')
ax.yaxis.set_ticks_position('none')
plt.show()
plt.close()
# . . .
y_var = ["GDP per Capita (PPP)"]
x_vars_unrestricted = ["Trade Freedom", "Property Rights", "Inflation (%)",
"Public Debt (% of GDP)"]
x_vars_restricted = ["Trade Freedom", "Property Rights", "Inflation (%)"]
reg.OLS("GDP Per Capita Unrestricted", data, y_var, x_vars_unrestricted)
reg.OLS("GDP Per Capita Restricted", data, y_var, x_vars_restricted)
reg_name = "GDP Per Capita Unrestricted"
plot_scatter_with_estimator("Unrestricted", reg.reg_history[reg_name]\
["Data"], x_vars, y_var)
reg_name = "GDP Per Capita Restricted"
plot_scatter_with_estimator("Restricted", reg.reg_history[reg_name]\
["Data"], x_vars, y_var)
# -
# The visualization generated by this script allow for a comparison of estimates of y-variable generated from a set of observed values to the actual values that were part of the observations.
# ### Exercise:
#
# 1. Run an OLS regression using a different set of data. Use the regression class created in this chapter. Print the results.
# 2. Create scatter plots of the observation and predicted values as demonstrated at the end of this chapter.
# 3. Use the numpy libraries log function to log some or all value in your data. Print the columns of data that have been logged. (hint: pass the appropriate list of keys to the dataframe, data[[key1,key2,key3…]])
# 4. Run the same regression again. Print the results. How has the significance changed?
# 5. Plot the new results using scatter plots as in question 2.
# 6. Create 2 unique visualizations of the results using matplotlib (e.g., time series predicted values and observations, plots with more than 2 variables represented such as 3D plane or changing size of dots, a plot comparing results of the logged and unlogged regression, etc…). For visualization ideas visit the [matplotlib website](https://matplotlib.org/examples/).
#
| Textbook/.ipynb_checkpoints/Chapter 7 - Building an OLS Regression Model-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + jupyter={"outputs_hidden": false}
#Taking Input
# + jupyter={"outputs_hidden": false}
x=input()
y=raw_input()
# + jupyter={"outputs_hidden": false}
print type(x)
print type(y)
# + jupyter={"outputs_hidden": true}
#Descision Making
# + jupyter={"outputs_hidden": false}
if x==1:
print 'x is equal to 1'
else:
print 'x is not equal to 1'
# + jupyter={"outputs_hidden": false}
if y==1:
print 'x is equal to 1'
else:
print 'x is not equal to 1'
# + jupyter={"outputs_hidden": true}
# Read about elif
# + jupyter={"outputs_hidden": true}
# Loops
# + jupyter={"outputs_hidden": false}
#while loop
j=10
while j>5:
print j
j=j-1
# + jupyter={"outputs_hidden": false}
#for loop
for j in range(5):
print j
# + jupyter={"outputs_hidden": false}
for j in [1,2,3]:
print j
# + jupyter={"outputs_hidden": true}
# Functions
# + jupyter={"outputs_hidden": true}
def increment(a):
return a+1
# + jupyter={"outputs_hidden": false}
print increment(10)
# + jupyter={"outputs_hidden": true}
# Lists
# + jupyter={"outputs_hidden": false}
a=[1,2,3]
a.append(4)
a.append(5)
del a[1]
print a
print a[0]
#negative indexing
print a[-1]
# + jupyter={"outputs_hidden": true}
# Dictionaries
# + jupyter={"outputs_hidden": false}
x={"a":1,"b":10,"c":11}
print x['a']
print x
# + jupyter={"outputs_hidden": false}
#inserting a new key
x['d']=21
print x
# + jupyter={"outputs_hidden": false}
del x['c']
print x
| Deep Learning-SEMICOLON/2. Data Analytics/Python 2.7 Basics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # DIY Redaction Art
#
# **Redactions are a way of restricting access, of witholding information – they're dead ends. But here you can recycle redactions into something interesting, something creative, perhaps even something beautiful.**
#
# 
#
# The redactions used here were extracted from surveillance files created by the Australian Security Intelligence Organisation (ASIO). The files recorded information about the activities of people who were deemed of interest to the government – due to their background, their beliefs, or perhaps just their friends. We don't know how many surveillance files have been created by ASIO over the years, but there are currently more than 7,800 files on individuals available in [series A6119](http://recordsearch.naa.gov.au/scripts/AutoSearch.asp?Number=A6119) at the National Archives of Australia. Of these, 2,606 have been digitised.
#
# Most of these files are 'Open with exception', which means that the public versions have [pages removed and redactions applied](https://www.naa.gov.au/explore-collection/intelligence-and-security/asio-records/asio-records-exempt-information) – many, many redactions. In April 2021, I downloaded all the digitised files from A6119, comprising 280,134 page images. Using a machine learning model based on [YOLOv5](https://github.com/ultralytics/yolov5), I found [404,653 redactions](https://github.com/GLAM-Workbench/recordsearch/blob/master/data/A6119-redactions.csv) in the images. Of the 280,134 pages, 151,102 (54%) included redactions. The redaction finding model isn't perfect, but the number of false positives seems very small (probably less than 1 percent). I'll be sharing more information about the process shortly.
#
# To make your own redaction art collages, just set the desired size of your final image and click on the button below. A random sample of redactions will be obtained from the dataset and packed into the image dimensions. Once it's finished you'll be able to download both the finished collage, and a CSV dataset containing metadata that describes all the redactions used, including original file references. If you're not happy with the result, try again. Every piece of redaction art is unique. Please share your creations using the #redactionart tag.
#
# Keep a look out for an assortment of redaction art critters and doodles which I found living in the files. There should be at least one in every collage.
#
# ----
# Import what we need
from PIL import Image, ImageOps
from pathlib import Path
from rectpack import *
from datetime import datetime
from IPython.display import display, FileLink, HTML
from IPython.display import Image as DisplayImage
import random
import pandas as pd
import requests
import io
import ipywidgets as widgets
from tqdm.auto import tqdm
# +
def choose_art():
'''
Select a random piece of redactionart to include in the composite.
'''
redactionart = Path('data', 'redactionart.txt').read_text().split('\n')
red = random.choice(redactionart)
img, w, h = red.split()
return (img, int(w), int(h))
def get_image_data(width, height, max_size):
'''
Get a randomly selected list of redactions to be packed into the composite.
Insert the citation image and a piece of redactionart.
'''
# Make an estimate of how many redactions are needed
# This might need to be changed if the image max_size is reduced
# by increasing the final factor.
with out:
print('Gathering data...')
sample_size = round(((width * height) / 1000000) * 70)
images = []
# Open the redactions dataset as a dataframe
redactions = pd.read_csv(Path('data', 'A6119-redactions.csv'))
# Select a random sample of redactions and loop through them
for red in redactions.sample(sample_size).itertuples():
img_w = red.img_width
img_h = red.img_height
# Only incude redactions smaller than the max_size
if img_w < max_size and img_h < max_size:
images.append((img_w+2, img_h+2, red.img_name))
# Select a random point in the first half of the list to insert the citation.
# We put it in the first half to try and make sure it gets included.
ref_loc = random.choice(range(1, round(sample_size / 2)))
images.insert(ref_loc, (402, 202, 'redactions-citation.jpg'))
# Select a random point in the first half of the list to insert the redactionart.
art, art_w, art_h = choose_art()
art_loc = random.choice(range(1, round(sample_size / 2)))
images.insert(art_loc, (art_w+2, art_h+2, art))
return images
def pack_images(width, height, max_size):
'''
Pack a list of images into the space defined by width and height.
'''
images = get_image_data(width, height, max_size)
with out:
print('Packing images...')
packer = newPacker(sort_algo=SORT_NONE, rotation=False)
for i in images:
packer.add_rect(*i)
packer.add_bin(width, height)
packer.pack()
return len(images), packer.rect_list()
def create_file_list(rectangles):
redactions = [r[5] for r in rectangles]
df_redactions = pd.read_csv(Path('data', 'A6119-redactions.csv'))
df_used = df_redactions.loc[df_redactions['img_name'].isin(redactions)]
df_items = pd.read_csv(Path('data', 'A6119-items.csv'))
df_refs = pd.merge(df_used, df_items, how='left', left_on='item_id', right_on='identifier')
df_refs['redaction_url'] = df_refs['img_name'].apply(lambda x: f'https://asiodata.s3.amazonaws.com/a6119-redactions//a6119-redactions/{x}')
df_refs['recordsearch_url'] = df_refs['item_id'].apply(lambda x: f'https://recordsearch.naa.gov.au/scripts/AutoSearch.asp?O=I&Number={x}')
return df_refs
def create_composite(width=3840, height=2400, max_size=600, bg_colour=(0, 0, 0), img_path='', output_file=None):
num_images, rectangles = pack_images(width, height, max_size)
comp = Image.new('RGB', (width, height), bg_colour)
with out:
print('Downloading images...')
for rect in tqdm(rectangles):
b,x,y,w,h,rid = rect
# print(x,y, w, h, rid)
# Get the citation image from the current directory
if rid == 'redactions-citation.jpg':
red = Image.open(Path('redactions-citation.jpg'))
# Get image from local path if set
elif img_path:
red_path = Path(img_path, rid)
red = Image.open(red_path)
# Otherwise get image from s3
else:
img_url = f'https://asiodata.s3.amazonaws.com/a6119-redactions/{rid}'
data = requests.get(img_url).content
red = Image.open(io.BytesIO(data))
red = red.convert('RGB')
red_with_border = ImageOps.expand(red, border=1, fill=bg_colour)
comp.paste(red_with_border, (x, y, x+w, y+h))
if not output_file:
timestamp = int(datetime.now().timestamp())
output_file = f'redactions-{timestamp}-{width}-{height}'
output_image = Path(f'{output_file}.jpg')
output_csv = Path(f'{output_file}.csv')
comp.save(output_image)
refs = create_file_list(rectangles)
refs.to_csv(output_csv, index=False)
files_used = refs['item_id'].nunique()
out.clear_output()
with out:
display(HTML(f'{len(rectangles) - 1} redactions used from {files_used} files – <a href="{str(output_csv)}" download="{output_csv.name}">download CSV</a>'))
display(HTML(f'<a href="{str(output_image)}" download="{output_image.name}">Download image</a>'))
display(comp)
style = {'description_width': 'initial'}
width = widgets.BoundedIntText(
value=3200,
min=500,
max=5000,
step=10,
description='Width:',
disabled=False,
style=style
)
height = widgets.BoundedIntText(
value=1800,
min=500,
max=5000,
step=10,
description='Height:',
disabled=False,
style=style
)
max_size = widgets.BoundedIntText(
value=400,
min=100,
max=1000,
step=10,
description='Max redaction size:',
disabled=False,
style=style
)
bg_colour = widgets.ColorPicker(
concise=False,
description='Background colour',
value='black',
disabled=False,
style=style
)
go = widgets.Button(
description='Go!',
disabled=False,
button_style='primary', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Click me to make art',
icon=''
)
out = widgets.Output()
def start(b):
out.clear_output()
create_composite(width=width.value, height=height.value, max_size=max_size.value, bg_colour= bg_colour.value)
go.on_click(start)
display(width, height, max_size, bg_colour, go , out)
# -
# ----
#
# ### Notes
#
# <ul class="browser-default">
# <li>Some of the redactions are just very big black boring boxes. To prevent them filling up your collage there's a maximum size value to filter redactions by size. The default value should produce good results in most cases.</li>
# <li>If your image size is large (greater than 4,000-ish), you might find that the packing algorithm becomes quite slow. It's still working, just be patient.</li>
# <li>I've set a limit of 5,000 x 5,000 pixels on the image size, just for performance reasons. But this is a Jupyter notebook, so if you want bigger you can always <a href="https://github.com/GLAM-Workbench/recordsearch/blob/master/diy_redaction_collage.ipynb">grab the code</a> and modify it.</li>
# </ul>
# ----
#
# Created by [<NAME>](https://timsherratt.org) for the [GLAM Workbench](https://glam-workbench.net/).
# Support me by becoming a [GitHub sponsor](https://github.com/sponsors/wragge)!
| diy_redaction_collage.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Visualize segmentations generated on different localization methods
# %load_ext autoreload
# %autoreload 2
from plot_helper import *
import ipyplot
# choose a task from these localization tasks
LOCALIZATION_TASKS = ["Enlarged Cardiomediastinum",
"Cardiomegaly",
"Lung Lesion",
"Airspace Opacity",
"Edema",
"Consolidation",
"Atelectasis",
"Pneumothorax",
"Pleural Effusion",
"Support Devices"
]
# ## Test Set
# Red is Stanford annotation and yellow is one of gradcam, ig and vietnam
#
# *IG CAMS not ready*
# ### Parameters to change
# +
task = 'Airspace Opacity' # choose from the localization tasks to avoid error
pos_only = False # true if only show positive instances; change to false to visualize all images
# change the indices to your only liking to visualize as many images as needed
start_idx = 0
end_idx = 10
# opacity
w = 0.92 # 1 if wants segmentation to be very light, 0 otherwise
img_width = 300 # width of image to display
# -
# ### Plot
# +
# get image ids; can choose to load positive instances only
img_ids = load_all_ids(phase='test',task=task,pos_only = pos_only)
ids = img_ids[start_idx:end_idx] # select images to visualize
print(f"Showing positive only is {pos_only} with {len(img_ids)} images to visualize; task is {task} \n\n")
# plot everything
for img_id in ids:
print(f"{img_id}\n")
img1 = visualize_segmentations(img_id,task,phase='test',method='gradcam',model='single',w=w)
img2 = visualize_segmentations(img_id,task,phase='test',method='gradcam',model='ensemble',w=w)
img3 = visualize_segmentations(img_id,task,phase='test',method='ig',model='ensemble',w=w)
img4 = visualize_segmentations(img_id,task,phase='test',method='vietnam',w=w)
ipyplot.plot_images([img1,img2,img3,img4], labels = ['gradcam_single','gradcam_ensemble','ig_ensemble','human'],img_width=img_width)
# -
# ### Validation set (not relevant for now)
# +
# directories of CAMS
ig_dir = '/deep/u/ashwinagrawal/result/ensemble_results/cams/'
gradcam_dir = '/deep/u/asaporta/densenet_ckpts/willow/ensemble_results/cams/'
gt_dir = '/deep/group/aihc-bootcamp-spring2020/localize/annotations/gt_imgs/'
# original x-ray images
CXR_VALID_PATH = '/deep/group/CheXpert/CheXpert-v1.0/valid'
CXR_TEST_PATH = '/deep/group/anujpare/CheXpert_original_test500/test'
# +
# vietnam v.s. localization
# side by side
# left: choose from vietnam, gradcam_single, gradcam_ensemble and ig with stanford overlay
# right: choose from vietnam, gradcam, and IG with stanford overlay
# change to see diff patient and pathology
patientid = 'patient64593' # on validation set 64541:64740
task = 'Cardiomegaly'
# Integrated gradients
w = 0.92
visualize_cam_segmentation(ig_dir,CXR_VALID_PATH,gt_dir,patientid,task,w)
# -
dataset = 'valid'
path_group = '/deep/group/aihc-bootcamp-spring2020/localize'
gt_file = f'{path_group}/annotations/{dataset}_annotations_merged.json'
with open(gt_file) as f:
gt = json.load(f)
len(gt.keys())
gt[list(gt.keys())[0]]
| chexpert-model/localization_eval/segmentation_demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:ecpaperenv]
# language: python
# name: conda-env-ecpaperenv-py
# ---
# Read in fluxnet data for stations over a specified longitude and latitude range and only for stationss that have >= 10 years worth of data.
# +
import importlib
import xarray as xr
import numpy as np
import sys
import matplotlib.pyplot as plt
import pandas as pd
from math import nan
import math
from glob import glob
from CASutils import mapplot_utils as maps
importlib.reload(maps)
# -
# set locations of fluxnet data and output directory
datdir="/project/mojave/observations/FLUXNET2015/"
outdir="/project/cas/islas/python_savs/fluxnet/"
# read in station info
stationinfo = pd.read_csv(datdir+"SiteList.csv")
# specify longitude and latitude range under consideration
stationinfonh = stationinfo.loc[ ((stationinfo.LOCATION_LAT > 40) & (stationinfo.LOCATION_LONG < -55)) | ((stationinfo.LOCATION_LAT > 45) & (stationinfo.LOCATION_LONG > 30))]
stationuse = []
lonuse = []
latuse = []
count=0
for index, station in stationinfonh['SITE_ID'].iteritems():
fname = glob(datdir+'*'+station+'*/*DD*')
try:
data = pd.read_csv(fname[0])
ybeg = str(data[['TIMESTAMP']].iloc[0].values)[1:5]
yend = str(data[['TIMESTAMP']].iloc[len(data[['TIMESTAMP']])-1].values)[1:5]
nyears = int(yend)-int(ybeg)+1
if (nyears >= 10):
try:
# check for necessary variables
data = data[['TIMESTAMP','TA_F','TA_F_QC','H_F_MDS','H_F_MDS_QC','G_F_MDS','G_F_MDS_QC','NETRAD','NETRAD_QC']]
print(station+' '+str(nyears))
stationuse.append(station)
lonuse.append(stationinfonh.loc[index, 'LOCATION_LONG'])
latuse.append(stationinfonh.loc[index, 'LOCATION_LAT'])
except:
pass
except:
pass
# +
fig = plt.figure(figsize=(16,16))
ax1 = maps.contourmap_bothcontinents_scatter_nh_pos(fig, np.ones([len(lonuse)]), lonuse, latuse, 0.1,-1,1,'Fluxnet stations',
0.02,0.32,0.8,0.97)
# -
# Read in the data for stations that will be used.
count=0
for istation in stationuse:
fname = glob(datdir+'*'+istation+'*/*DD*')
data = pd.read_csv(fname[0])
data = data[['TIMESTAMP','TA_F','TA_F_QC','H_F_MDS','H_F_MDS_QC','G_F_MDS','G_F_MDS_QC','NETRAD','NETRAD_QC']]
ybeg = str(data[['TIMESTAMP']].iloc[0].values)[1:5]
yend = str(data[['TIMESTAMP']].iloc[len(data[['TIMESTAMP']])-1].values)[1:5]
timeout = pd.date_range(start=ybeg+"-01-01", end=yend+"-12-31")
timeoutasdata = [str(timeout[i].year)+str(timeout[i].month).zfill(2)+str(timeout[i].day).zfill(2) for i in np.arange(0,len(timeout),1)]
timeoutasdata = [int(i) for i in timeoutasdata]
tas = np.empty([len(timeoutasdata)]) ; tas_qc = np.empty([len(timeoutasdata)])
shflx = np.empty([len(timeoutasdata)]) ; shflx_qc = np.empty([len(timeoutasdata)])
g = np.empty([len(timeoutasdata)]) ; g_qc = np.empty([len(timeoutasdata)])
netrad = np.empty([len(timeoutasdata)]) ; netrad_qc = np.empty([len(timeoutasdata)])
tas[:] = nan ; tas_qc[:] = nan
shflx[:] = nan ; shflx_qc[:] = nan
g[:] = nan ; g_qc[:] = nan
netrad[:] = nan ; netrad_qc[:] = nan
# find the dates that exist in the file and assign to the right place in the output array
datesinds = dict()
for i, j in enumerate(timeoutasdata):
datesinds.setdefault(j,[]).append(i)
res = [datesinds.get(i,[None]) for i in data['TIMESTAMP']]
shflx[np.array(res).squeeze()] = data['H_F_MDS']
tas[np.array(res).squeeze()] = data['TA_F']
tas_qc[np.array(res).squeeze()] = data['TA_F_QC']
g[np.array(res).squeeze()] = data['G_F_MDS']
g_qc[np.array(res).squeeze()] = data['G_F_MDS_QC']
shflx_qc[np.array(res).squeeze()] = data['H_F_MDS_QC']
netrad[np.array(res).squeeze()] = data['NETRAD']
netrad_qc[np.array(res).squeeze()] = data['NETRAD_QC']
lon_xr = xr.DataArray(np.array(lonuse[count]), name='lon')
lat_xr = xr.DataArray(np.array(latuse[count]), name='lat')
shflx_xr = xr.DataArray(np.array(shflx), coords=[timeout], dims=['time'], name='shflx')
tas_xr = xr.DataArray(np.array(tas), coords=[timeout], dims=['time'], name='tas')
g_xr = xr.DataArray(np.array(g), coords=[timeout], dims=['time'], name='g')
netrad_xr = xr.DataArray(np.array(netrad), coords=[timeout], dims=['time'], name='netrad')
shflx_qc_xr = xr.DataArray(np.array(shflx_qc), coords=[timeout], dims=['time'], name='shflx_qc')
tas_qc_xr = xr.DataArray(np.array(tas_qc), coords=[timeout], dims=['time'], name='tas_qc')
g_qc_xr = xr.DataArray(np.array(g_qc), coords=[timeout], dims=['time'], name='g_qc')
netrad_qc_xr = xr.DataArray(np.array(netrad_qc), coords=[timeout], dims=['time'], name='netrad_qc')
shflx_xr.to_netcdf(path=outdir+istation+".nc")
tas_xr.to_netcdf(path=outdir+istation+".nc", mode="a")
g_xr.to_netcdf(path=outdir+istation+".nc", mode="a")
netrad_xr.to_netcdf(path=outdir+istation+".nc", mode="a")
netrad_qc_xr.to_netcdf(path=outdir+istation+".nc", mode="a")
shflx_qc_xr.to_netcdf(path=outdir+istation+".nc", mode="a")
tas_qc_xr.to_netcdf(path=outdir+istation+".nc", mode="a")
g_qc_xr.to_netcdf(path=outdir+istation+".nc", mode="a")
lon_xr.to_netcdf(path=outdir+istation+".nc", mode="a")
lat_xr.to_netcdf(path=outdir+istation+".nc", mode="a")
count=count+1
| examples/fluxnetread_allstations.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## 1. Requirement
# +
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
from art.attacks import FastGradientMethod
from art.attacks import DeepFool
from art.attacks import SaliencyMapMethod
from art.attacks import ProjectedGradientDescent
from art.classifiers import PyTorchClassifier
from art.utils import load_mnist
# -
# ## 2. Model
class MnistModel(nn.Module):
def __init__(self):
super(MnistModel, self).__init__()
# mnist의 경우 28*28의 흑백이미지(input channel=1)이다.
self.conv1 = nn.Conv2d(1, 32, kernel_size = 5, padding=2)
# feature map의 크기는 14*14가 된다
# 첫번재 convolution layer에서 나온 output channel이 32이므로 2번째 input도 32
self.conv2 = nn.Conv2d(32, 64, kernel_size = 5, padding=2)
# feature map의 크기는 7*7이 된다
# fc -> fully connected, fc는 모든 weight를 고려해서 만들기 때문에 cnn에서는 locally connected를 이용하여 만든다.
# nn.Linear에서는 conv를 거친 feature map을 1차원으로 전부 바꿔서 input을 한다. 이게 64*7*7
self.fc1 = nn.Linear(64*7*7, 1024)
self.fc2 = nn.Linear(1024, 10)
def forward(self, x):
x = F.max_pool2d(F.relu(self.conv1(x)), 2)
x = F.max_pool2d(F.relu(self.conv2(x)), 2)
x = x.view(-1, 64*7*7) # linear에 들어갈 수 있도록 reshape
x = F.relu(self.fc1(x)) # fully connected에 relu 적용
x = F.dropout(x, training=self.training) # 가중치 감소만으로는 overfit을 해결하기가 어려움, 그래서 뉴런의 연결을 임의로 삭제
x = self.fc2(x)
return F.log_softmax(x, dim=1)
# ## 3. Load MNIST dataset
# +
(x_train, y_train), (x_test, y_test), min_pixel_value, max_pixel_value = load_mnist()
x_train = np.swapaxes(x_train, 1, 3).astype(np.float32)
x_test = np.swapaxes(x_test, 1, 3).astype(np.float32)
# -
# ## 4. Load the models
# +
is_cuda = torch.cuda.is_available()
device = torch.device('cuda' if is_cuda else 'cpu')
if is_cuda: print("CUDA available!")
models = []
# +
model_path = './model/mnist_um_art.pth'
model = MnistModel().to(device)
model.load_state_dict(torch.load(model_path, map_location=device))
models.append(model)
print("model[0]: undefended model loaded")
# +
model_path = './model/mnist_fgsm_art.pth'
model = MnistModel().to(device)
model.load_state_dict(torch.load(model_path, map_location=device))
models.append(model)
print("model[1]: Linf trained model1 loaded")
# +
model_path = './model/mnist_pgd_art.pth'
model = MnistModel().to(device)
model.load_state_dict(torch.load(model_path, map_location=device))
models.append(model)
print("model[2]: Linf trained model2 loaded")
# +
model_path = './model/mnist_all_art.pth'
model = MnistModel().to(device)
model.load_state_dict(torch.load(model_path, map_location=device))
models.append(model)
print("model[3]: All trained model loaded")
# -
# ## 5. Create ART classifier
# +
criterion = nn.CrossEntropyLoss()
# 0-um, 1-fgsm, 2-all
print('model #: 0-undefended, 1-fgsm, 2-pgd, 3-all')
classifiers = []
for model in models:
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5)
classifier = PyTorchClassifier(model=model, clip_values=(min_pixel_value, max_pixel_value), loss=criterion,
optimizer=optimizer, input_shape=(1, 28, 28), nb_classes=10)
classifiers.append(classifier)
print("classifiers created")
# -
# ## 6. Generate adversarial test examples
# +
adv_tests = []
fgsm_trained = classifiers[1]
Linf1_attack = FastGradientMethod(classifier=fgsm_trained, eps=0.3)
Linf1_x_test_adv = Linf1_attack.generate(x=x_test)
adv_tests.append(Linf1_x_test_adv)
print('FGSM example generated')
# -
# ## 7. Evaluate accuracy
# +
models_accuracy = []
examples = []
print('model #: 0-um, 1-fgsm, 2-pgd, 3-all')
print('test #: 0-fgsm: criterion-model[1]')
# test order: 0-fgsm
for i, (classifier) in enumerate(classifiers):
print('\nmodel #{}'.format(i))
model_accuracy = []
example = []
predictions = classifier.predict(x_test)
accuracy = np.sum(np.argmax(predictions, axis=1) == np.argmax(y_test, axis=1)) / len(y_test)
print('Accuracy on benign test examples: {}%'.format(accuracy * 100))
model_accuracy.append(accuracy)
b_ex = []
for idx, (pred) in enumerate(predictions):
if np.argmax(pred) != np.argmax(y_test[idx]):
b_ex.append((x_test[idx], np.argmax(pred), np.argmax(y_test[idx])))
if len(b_ex) == 5:
example.append(b_ex)
break
for j, (x_test_adv) in enumerate(adv_tests):
adv_ex = []
predictions = classifier.predict(x_test_adv)
accuracy = np.sum(np.argmax(predictions, axis=1) == np.argmax(y_test, axis=1)) / len(y_test)
print('Accuracy on model with adversarial test #{} examples: {}%'.format(j, accuracy * 100))
model_accuracy.append(accuracy)
for idx, (pred) in enumerate(predictions):
if np.argmax(pred) != np.argmax(y_test[idx]):
adv_ex.append((x_test_adv[idx], np.argmax(pred), np.argmax(y_test[idx])))
if len(adv_ex) == 5:
example.append(adv_ex)
break
models_accuracy.append(model_accuracy)
examples.append(example)
# -
# ## 8. Result
# +
import pandas as pd
from pandas import DataFrame
data = {
'clean': models_accuracy[0],
'Linf1': models_accuracy[1],
'Linf2': models_accuracy[2],
'ALL': models_accuracy[3],
}
columns = ['clean', 'Linf1', 'Linf2', 'ALL']
idx = ['Benign', 'FGSM']
DataFrame(data, columns=columns, index=idx)
# +
from torchvision.utils import save_image
import matplotlib.pyplot as plt
# %matplotlib inline
SAVE_PATH = './examples/08/'
for i, (example) in enumerate(examples):
for j, (images) in enumerate(example):
plt.figure(figsize=(10,10))
cnt = 0
for k, (image, pred, orig) in enumerate(images):
cnt+=1
plt.subplot(5, len(images), cnt)
if k == 0:
plt.ylabel(str(j))
plt.title('{} -> {}'.format(orig, pred))
plt.imshow(np.rot90(np.flip(image.reshape(28, 28, order='C'), axis=1)), cmap='gray')
title = SAVE_PATH + str(i) +'_'+ str(j) + '_[' + str(orig) + '_' + str(pred) +'].png'
save_image(torch.from_numpy(np.rot90(np.flip(image.reshape(28, 28, order='C'), axis=1))), title)
plt.tight_layout()
plt.show()
# -
| art-lib/08fgsm_grad_criterion_trained.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
ANDROID = "android"
BROWSER = "browser" # habitlab goal, i.e. facebook/spend_less_time or custom/spend_less_time_developers.slashdot.org
BROWSER_DOMAIN = "browser_domain"
SHARED = "shared"
PACKAGES = "packages"
SPEND_LESS_TIME_LENGTH = len("custom/spend_less_time_")
# Associate users with domain name which will function as our key.
# Top-level-domain-names that are not pertinent to the application.
TLDs = ['aaa', 'abb', 'abc', 'ac', 'aco', 'ad', 'ads', 'ae', 'aeg', 'af', 'afl', 'ag', 'ai', 'aig', 'al', 'am', 'anz', 'ao', 'aol', 'app', 'aq', 'ar', 'art', 'as', 'at', 'au', 'aw', 'aws', 'ax', 'axa', 'az', 'ba', 'bar', 'bb', 'bbc', 'bbt', 'bcg', 'bcn', 'bd', 'be', 'bet', 'bf', 'bg', 'bh', 'bi', 'bid', 'bio', 'biz', 'bj', 'bm', 'bms', 'bmw', 'bn', 'bnl', 'bo', 'bom', 'boo', 'bot', 'box', 'br', 'bs', 'bt', 'buy', 'bv', 'bw', 'by', 'bz', 'bzh', 'ca', 'cab', 'cal', 'cam', 'car', 'cat', 'cba', 'cbn', 'cbs', 'cc', 'cd', 'ceb', 'ceo', 'cf', 'cfa', 'cfd', 'cg', 'ch', 'ci', 'ck', 'cl', 'cm', 'cn', 'co', 'com', 'cr', 'crs', 'csc', 'cu', 'cv', 'cw', 'cx', 'cy', 'cz', 'dad', 'day', 'dds', 'de', 'dev', 'dhl', 'diy', 'dj', 'dk', 'dm', 'dnp', 'do', 'dog', 'dot', 'dtv', 'dvr', 'dz', 'eat', 'ec', 'eco', 'edu', 'ee', 'eg', 'er', 'es', 'esq', 'et', 'eu', 'eus', 'fan', 'fi', 'fit', 'fj', 'fk', 'fly', 'fm', 'fo', 'foo', 'fox', 'fr', 'frl', 'ftr', 'fun', 'fyi', 'ga', 'gal', 'gap', 'gb', 'gd', 'gdn', 'ge', 'gea', 'gf', 'gg', 'gh', 'gi', 'gl', 'gle', 'gm', 'gmo', 'gmx', 'gn', 'goo', 'gop', 'got', 'gov', 'gp', 'gq', 'gr', 'gs', 'gt', 'gu', 'gw', 'gy', 'hbo', 'hiv', 'hk', 'hkt', 'hm', 'hn', 'hot', 'how', 'hr', 'ht', 'hu', 'ibm', 'ice', 'icu', 'id', 'ie', 'ifm', 'il', 'im', 'in', 'inc', 'ing', 'ink', 'int', 'io', 'iq', 'ir', 'is', 'ist', 'it', 'itv', 'jcb', 'jcp', 'je', 'jio', 'jlc', 'jll', 'jm', 'jmp', 'jnj', 'jo', 'jot', 'joy', 'jp', 'ke', 'kfh', 'kg', 'kh', 'ki', 'kia', 'kim', 'km', 'kn', 'kp', 'kpn', 'kr', 'krd', 'kw', 'ky', 'kz', 'la', 'lat', 'law', 'lb', 'lc', 'lds', 'li', 'lk', 'llc', 'lol', 'lpl', 'lr', 'ls', 'lt', 'ltd', 'lu', 'lv', 'ly', 'ma', 'man', 'map', 'mba', 'mc', 'md', 'me', 'med', 'men', 'mg', 'mh', 'mil', 'mit', 'mk', 'ml', 'mlb', 'mls', 'mm', 'mma', 'mn', 'mo', 'moe', 'moi', 'mom', 'mov', 'mp', 'mq', 'mr', 'ms', 'msd', 'mt', 'mtn', 'mtr', 'mu', 'mv', 'mw', 'mx', 'my', 'mz', 'na', 'nab', 'nba', 'nc', 'ne', 'nec', 'net', 'new', 'nf', 'nfl', 'ng', 'ngo', 'nhk', 'ni', 'nl', 'no', 'now', 'np', 'nr', 'nra', 'nrw', 'ntt', 'nu', 'nyc', 'nz', 'obi', 'off', 'om', 'one', 'ong', 'onl', 'ooo', 'org', 'ott', 'ovh', 'pa', 'pay', 'pe', 'pet', 'pf', 'pg', 'ph', 'phd', 'pid', 'pin', 'pk', 'pl', 'pm', 'pn', 'pnc', 'pr', 'pro', 'pru', 'ps', 'pt', 'pub', 'pw', 'pwc', 'py', 'qa', 'qvc', 're', 'red', 'ren', 'ril', 'rio', 'rip', 'ro', 'rs', 'ru', 'run', 'rw', 'rwe', 'sa', 'sap', 'sas', 'sb', 'sbi', 'sbs', 'sc', 'sca', 'scb', 'sd', 'se', 'ses', 'sew', 'sex', 'sfr', 'sg', 'sh', 'si', 'sj', 'sk', 'ski', 'sky', 'sl', 'sm', 'sn', 'so', 'soy', 'sr', 'srl', 'srt', 'st', 'stc', 'su', 'sv', 'sx', 'sy', 'sz', 'tab', 'tax', 'tc', 'tci', 'td', 'tdk', 'tel', 'tf', 'tg', 'th', 'thd', 'tj', 'tjx', 'tk', 'tl', 'tm', 'tn', 'to', 'top', 'tr', 'trv', 'tt', 'tui', 'tv', 'tvs', 'tw', 'tz', 'ua', 'ubs', 'ug', 'uk', 'uno', 'uol', 'ups', 'us', 'uy', 'uz', 'va', 'vc', 've', 'vet', 'vg', 'vi', 'vig', 'vin', 'vip', 'vn', 'vu', 'wed', 'wf', 'win', 'wme', 'wow', 'ws', 'wtc', 'wtf', 'xin', 'xxx', 'xyz', 'ye', 'you', 'yt', 'yun', 'za', 'zip', 'zm', 'zw']
def get_name(name, device):
"""
@param name: goal name (package name for Android)
@param device: "android" or "browser" or "browser_domain"
@return name of goal with subdomains removed and goal annotation removed (i.e. spend_less_time)
"""
name = name.lower()
if "custom" in name and device == BROWSER:
# strip off the "custom/spend_less_time_"
name = name[SPEND_LESS_TIME_LENGTH:]
elif device == BROWSER:
return name.split('/spend')[0]
# Now we have to get juicy part of domain.
subs = list(filter(lambda x: x != "android" and x != "google" and x != "apps" and x not in TLDs, name.split('.')))
if device == ANDROID:
if len(subs) > 0:
return subs[0]
return name
else:
if len(subs) > 0:
return subs[len(subs) - 1]
# +
import sys
# !{sys.executable} -m pip install pymongo
# !{sys.executable} -m pip install pyyaml
# !{sys.executable} -m pip install matplotlib
# !{sys.executable} -m pip install scipy
# !{sys.executable} -m pip install pandas
# !{sys.executable} -m pip install isoweek
import os
print(os.getcwd())
# Get Mongo database
from yaml import load
from pymongo import MongoClient
from getsecret import getsecret
client = MongoClient(getsecret("MONGODB_URI"))
db = client[getsecret("DB_NAME")]
ext_client = MongoClient(getsecret("EXT_URI"))
ext_db = ext_client[getsecret("DB_NAME")]
# Get all synced accounts and their respective users.
import urllib.request as req
import json
accounts = json.loads(req.urlopen("http://localhost:5000/synced_emails").read().decode("utf-8"))
print(accounts)
# counter for figures
counter = 0
# -
all_domains = set([])
for account in accounts:
for user in account[ANDROID]:
for session in db[user +'_sessions'].find():
all_domains.add(session["domain"])
print(all_domains)
name_to_domain = {}
for domain in all_domains:
name = get_name(domain, ANDROID)
if name not in name_to_domain:
name_to_domain[name] = set([])
name_to_domain[name].add(domain)
print(name_to_domain)
# +
# Browser version of this
# -
| Test get_name function.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Discrete-Time Black Scholes
# Welcome to your 1st assignment in Reinforcement Learning in Finance. This exercise will introduce Black-Scholes model as viewed through the lens of pricing an option as discrete-time replicating portfolio of stock and bond.
#
# **Instructions:**
# - You will be using Python 3.
# - Avoid using for-loops and while-loops, unless you are explicitly told to do so.
# - Do not modify the (# GRADED FUNCTION [function name]) comment in some cells. Your work would not be graded if you change this. Each cell containing that comment should only contain one function.
# - After coding your function, run the cell right below it to check if your result is correct.
#
#
# Let's get started!
# ## About iPython Notebooks ##
#
# iPython Notebooks are interactive coding environments embedded in a webpage. You will be using iPython notebooks in this class. You only need to write code between the ### START CODE HERE ### and ### END CODE HERE ### comments. After writing your code, you can run the cell by either pressing "SHIFT"+"ENTER" or by clicking on "Run Cell" (denoted by a play symbol) in the upper bar of the notebook.
#
# We will often specify "(≈ X lines of code)" in the comments to tell you about how much code you need to write. It is just a rough estimate, so don't feel bad if your code is longer or shorter.
# +
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
from numpy.random import standard_normal, seed
import scipy.stats as stats
from scipy.stats import norm
import sys
sys.path.append("..")
import grading
import datetime
import time
import bspline
import bspline.splinelab as splinelab
# -
### ONLY FOR GRADING. DO NOT EDIT ###
submissions=dict()
assignment_key="<KEY>"
all_parts=["9jLRK","YoMns","Wc3NN","fcl3r"]
### ONLY FOR GRADING. DO NOT EDIT ###
COURSERA_TOKEN = '<KEY>' # the key provided to the Student under his/her email on submission page
COURSERA_EMAIL = '<EMAIL>' # the email
# +
# The Black-Scholes prices
def bs_put(t, S0, K, r, sigma, T):
d1 = (np.log(S0/K) + (r + 1/2 * sigma**2) * (T-t)) / sigma / np.sqrt(T-t)
d2 = (np.log(S0/K) + (r - 1/2 * sigma**2) * (T-t)) / sigma / np.sqrt(T-t)
price = K * np.exp(-r * (T-t)) * norm.cdf(-d2) - S0 * norm.cdf(-d1)
return price
def bs_call(t, S0, K, r, sigma, T):
d1 = (np.log(S0/K) + (r + 1/2 * sigma**2) * (T-t)) / sigma / np.sqrt(T-t)
d2 = (np.log(S0/K) + (r - 1/2 * sigma**2) * (T-t)) / sigma / np.sqrt(T-t)
price = S0 * norm.cdf(d1) - K * np.exp(-r * (T-t)) * norm.cdf(d2)
return price
def d1(S0, K, r, sigma, T):
return (np.log(S0/K) + (r + sigma**2 / 2) * T)/(sigma * np.sqrt(T))
def d2(S0, K, r, sigma, T):
return (np.log(S0 / K) + (r - sigma**2 / 2) * T) / (sigma * np.sqrt(T))
# -
# Simulate $N_{MC}$ stock price sample paths with $T$ steps by the classical Black-Sholes formula.
#
# $$dS_t=\mu S_tdt+\sigma S_tdW_t\quad\quad S_{t+1}=S_te^{\left(\mu-\frac{1}{2}\sigma^2\right)\Delta t+\sigma\sqrt{\Delta t}Z}$$
#
# where $Z$ is a standard normal random variable.
#
# MC paths are simulated by GeneratePaths() method of DiscreteBlackScholes class.
# ### Part 1
#
#
# Class DiscreteBlackScholes implements the above calculations with class variables to math symbols mapping of:
#
# $$\Delta S_t=S_{t+1} - e^{-r\Delta t} S_t\space \quad t=T-1,...,0$$
#
# **Instructions:**
# Some portions of code in DiscreteBlackScholes have bee taken out. You are to implement the missing portions of code in DiscreteBlackScholes class.
#
# $$\Pi_t=e^{-r\Delta t}\left[\Pi_{t+1}-u_t \Delta S_t\right]\quad t=T-1,...,0$$
#
# - implement DiscreteBlackScholes.function_A_vec() method
# $$A_{nm}^{\left(t\right)}=\sum_{k=1}^{N_{MC}}{\Phi_n\left(X_t^k\right)\Phi_m\left(X_t^k\right)\left(\Delta\hat{S}_t^k\right)^2}\quad\quad$$
#
# - implement DiscreteBlackScholes.function_B_vec() method
# $$B_n^{\left(t\right)}=\sum_{k=1}^{N_{MC}}{\Phi_n\left(X_t^k\right)\left[\hat\Pi_{t+1}^k\Delta\hat{S}_t^k+\frac{1}{2\gamma\lambda}\Delta S_t^k\right]}$$
# - implement DiscreteBlackScholes.gen_paths() method using the following relation:
# $$S_{t+1}=S_te^{\left(\mu-\frac{1}{2}\sigma^2\right)\Delta t+\sigma\sqrt{\Delta t}Z}$$
# where $Z \sim N(0,1)$
# - implement parts of DiscreteBlackScholes.roll_backward()
# - DiscreteBlackScholes.bVals corresponds to $B_t$ and is computed as $$B_t = e^{-r\Delta t}\left[B_{t+1} + (u_{t+1} - u_t)S_{t+1}\right]\quad t=T-1,...,0$$
#
# DiscreteBlackScholes.opt_hedge corresponds to $\phi_t$ and is computed as
# $$\phi_t=\mathbf A_t^{-1}\mathbf B_t$$
class DiscreteBlackScholes:
"""
Class implementing discrete Black Scholes
DiscreteBlackScholes is class for pricing and hedging under
the real-world measure for a one-dimensional Black-Scholes setting
"""
def __init__(self,
s0,
strike,
vol,
T,
r,
mu,
numSteps,
numPaths):
"""
:param s0: initial price of the underlying
:param strike: option strike
:param vol: volatility
:param T: time to maturity, in years
:param r: risk-free rate,
:param mu: real drift, asset drift
:param numSteps: number of time steps
:param numPaths: number of Monte Carlo paths
"""
self.s0 = s0
self.strike = strike
self.vol = vol
self.T = T
self.r = r
self.mu = mu
self.numSteps = numSteps
self.numPaths = numPaths
self.dt = self.T / self.numSteps # time step
self.gamma = np.exp(-r * self.dt) # discount factor for one time step, i.e. gamma in the QLBS paper
self.sVals = np.zeros((self.numPaths, self.numSteps + 1), 'float') # matrix of stock values
# initialize half of the paths with stock price values ranging from 0.5 to 1.5 of s0
# the other half of the paths start with s0
half_paths = int(numPaths / 2)
if False:
# Grau (2010) "Applications of Least-Squares Regressions to Pricing and Hedging of Financial Derivatives"
self.sVals[:, 0] = (np.hstack((np.linspace(0.5 * s0, 1.5 * s0, half_paths),
s0 * np.ones(half_paths, 'float')))).T
self.sVals[:, 0] = s0 * np.ones(numPaths, 'float')
self.optionVals = np.zeros((self.numPaths, self.numSteps + 1), 'float') # matrix of option values
self.intrinsicVals = np.zeros((self.numPaths, self.numSteps + 1), 'float')
self.bVals = np.zeros((self.numPaths, self.numSteps + 1), 'float') # matrix of cash position values
self.opt_hedge = np.zeros((self.numPaths, self.numSteps + 1),
'float') # matrix of optimal hedges calculated from cross-sectional information F_t
self.X = None
self.data = None # matrix of features, i.e. self.X as sum of basis functions
self.delta_S_hat = None
# coef = 1.0/(2 * gamma * risk_lambda)
# override it by zero to have pure risk hedge
self.coef = 0.
def gen_paths(self):
"""
A simplest path generator
"""
np.random.seed(42)
# Spline basis of order p on knots k
### START CODE HERE ### (≈ 3-4 lines of code)
# self.sVals = your code goes here ...
# for-loop or while loop is allowed heres
# https://docs.scipy.org/doc/numpy-1.15.0/reference/generated/numpy.random.normal.html
# NOTE: Given in the instructions above
# Z ~ N(0,1)
# NOTE: Z must match the size of the matrix of stock values, hence why we define it.
Z = np.random.normal( 0,
1,
size = (self.numSteps + 1, self.numPaths))
# Cycle through each time step (column) to simulate.
# The rows are all the stock values at the time step.
# Going to be implementing the equation given above
# St+1 = Ste*e^(mu - 1/2*(sigma^2))*dt + sigma*sqrt(dt)*Z
for t in range(self.numSteps):
# For an entire current column of the self.sVals matrix of stock values,
# sVals matrix rows should represent each stock, the columns represent the time, the entries
# are the stock values of the stocker ticker (row) at that time (columnn)
# set the value from the relation equation given.
# NOTE: : means whatever amount of rows.
# t+1 because it is index base zero
# NOTE: we are using numpy broadcasting here. All of the tickers (row) values at
# that time (column) will be updated.
#
# Using the member variables supplied by the class to implement this equation.
# St+1 = Ste*e^(mu - 1/2*(sigma^2))*dt + sigma*sqrt(dt)*Z
#
# NOTE: The member variables were commented in the class constructor __init__
# It may seem cryptic but it is just plugging in the values but dealing with matrices
# so transpose may get thrown in the mix.
self.sVals[:, t+1] = self.sVals[:, t] * np.exp( (self.mu - 0.5*self.vol**2)*self.dt
+
(self.vol*np.sqrt(self.dt) * Z.T[:, t+1] ))
### END CODE HERE ###
# like in QLBS
delta_S = self.sVals[:, 1:] - np.exp(self.r * self.dt) * self.sVals[:, :self.numSteps]
self.delta_S_hat = np.apply_along_axis(lambda x: x - np.mean(x), axis=0, arr=delta_S)
# state variable
# delta_t here is due to their conventions
self.X = - (self.mu - 0.5 * self.vol ** 2) * np.arange(self.numSteps + 1) * self.dt + np.log(self.sVals)
X_min = np.min(np.min(self.X))
X_max = np.max(np.max(self.X))
print('X.shape = ', self.X.shape)
print('X_min, X_max = ', X_min, X_max)
p = 4 # order of spline (as-is; 3 = cubic, 4: B-spline?)
ncolloc = 12
tau = np.linspace(X_min, X_max, ncolloc) # These are the sites to which we would like to interpolate
# k is a knot vector that adds endpoints repeats as appropriate for a spline of order p
# To get meaningful results, one should have ncolloc >= p+1
k = splinelab.aptknt(tau, p)
basis = bspline.Bspline(k, p)
num_basis = ncolloc # len(k) #
self.data = np.zeros((self.numSteps + 1, self.numPaths, num_basis))
print('num_basis = ', num_basis)
print('dim self.data = ', self.data.shape)
# fill it, expand function in finite dimensional space
# in neural network the basis is the neural network itself
t_0 = time.time()
for ix in np.arange(self.numSteps + 1):
x = self.X[:, ix]
self.data[ix, :, :] = np.array([basis(el) for el in x])
t_end = time.time()
print('\nTime Cost of basis expansion:', t_end - t_0, 'seconds')
def function_A_vec(self, t, reg_param=1e-3):
"""
function_A_vec - compute the matrix A_{nm} from Eq. (52) (with a regularization!)
Eq. (52) in QLBS Q-Learner in the Black-Scholes-Merton article
Arguments:
t - time index, a scalar, an index into time axis of data_mat
reg_param - a scalar, regularization parameter
Return:
- np.array, i.e. matrix A_{nm} of dimension num_basis x num_basis
"""
X_mat = self.data[t, :, :]
num_basis_funcs = X_mat.shape[1]
this_dS = self.delta_S_hat[:, t]
hat_dS2 = (this_dS ** 2).reshape(-1, 1)
A_mat = np.dot(X_mat.T, X_mat * hat_dS2) + reg_param * np.eye(num_basis_funcs)
return A_mat
def function_B_vec(self, t, Pi_hat):
"""
function_B_vec - compute vector B_{n} from Eq. (52) QLBS Q-Learner in the Black-Scholes-Merton article
Arguments:
t - time index, a scalar, an index into time axis of delta_S_hat
Pi_hat - pandas.DataFrame of dimension N_MC x T of portfolio values
Return:
B_vec - np.array() of dimension num_basis x 1
"""
tmp = Pi_hat * self.delta_S_hat[:, t] + self.coef * (np.exp((self.mu - self.r) * self.dt)) * self.sVals[:, t]
X_mat = self.data[t, :, :] # matrix of dimension N_MC x num_basis
B_vec = np.dot(X_mat.T, tmp)
return B_vec
def seed_intrinsic(self, strike=None, cp='P'):
"""
initilaize option value and intrinsic value for each node
"""
if strike is not None:
self.strike = strike
if cp == 'P':
# payoff function at maturity T: max(K - S(T),0) for all paths
self.optionVals = np.maximum(self.strike - self.sVals[:, -1], 0).copy()
# payoff function for all paths, at all time slices
self.intrinsicVals = np.maximum(self.strike - self.sVals, 0).copy()
elif cp == 'C':
# payoff function at maturity T: max(S(T) -K,0) for all paths
self.optionVals = np.maximum(self.sVals[:, -1] - self.strike, 0).copy()
# payoff function for all paths, at all time slices
self.intrinsicVals = np.maximum(self.sVals - self.strike, 0).copy()
else:
raise Exception('Invalid parameter: %s'% cp)
self.bVals[:, -1] = self.intrinsicVals[:, -1]
def roll_backward(self):
"""
Roll the price and optimal hedge back in time starting from maturity
"""
for t in range(self.numSteps - 1, -1, -1):
# determine the expected portfolio value at the next time node
piNext = self.bVals[:, t+1] + self.opt_hedge[:, t+1] * self.sVals[:, t+1]
pi_hat = piNext - np.mean(piNext)
A_mat = self.function_A_vec(t)
B_vec = self.function_B_vec(t, pi_hat)
phi = np.dot(np.linalg.inv(A_mat), B_vec)
self.opt_hedge[:, t] = np.dot(self.data[t, :, :], phi)
### START CODE HERE ### (≈ 1-2 lines of code)
# implement code to update self.bVals
# self.bVals[:,t] = your code goes here ....
# Implementing the equation provided above.
# Again, the variables are supplied above in the constructor .__i
# NOTE: opt_hedge corresponds to phi at time t.
self.bVals[:,t] = np.exp( -self.r * self.dt) * (self.bVals[:,t+1]
+
(self.opt_hedge[:, t+1] - self.opt_hedge[:,t]) * self.sVals[:, t+1])
### END CODE HERE ###
# calculate the initial portfolio value
initPortfolioVal = self.bVals[:, 0] + self.opt_hedge[:, 0] * self.sVals[:, 0]
# use only the second half of the paths generated with paths starting from S0
optionVal = np.mean(initPortfolioVal)
optionValVar = np.std(initPortfolioVal)
delta = np.mean(self.opt_hedge[:, 0])
return optionVal, delta, optionValVar
# +
np.random.seed(42)
strike_k = 95
test_vol = 0.2
test_mu = 0.03
dt = 0.01
rfr = 0.05
num_paths = 100
num_periods = 252
hMC = DiscreteBlackScholes(100, strike_k, test_vol, 1., rfr, test_mu, num_periods, num_paths)
hMC.gen_paths()
t = hMC.numSteps - 1
piNext = hMC.bVals[:, t+1] + 0.1 * hMC.sVals[:, t+1]
pi_hat = piNext - np.mean(piNext)
A_mat = hMC.function_A_vec(t)
B_vec = hMC.function_B_vec(t, pi_hat)
phi = np.dot(np.linalg.inv(A_mat), B_vec)
opt_hedge = np.dot(hMC.data[t, :, :], phi)
# plot the results
fig = plt.figure(figsize=(12,4))
ax1 = fig.add_subplot(121)
ax1.scatter(hMC.sVals[:,t], pi_hat)
ax1.set_title(r'Expected $\Pi_0$ vs. $S_t$')
ax1.set_xlabel(r'$S_t$')
ax1.set_ylabel(r'$\Pi_0$')
# +
### GRADED PART (DO NOT EDIT) ###
part_1 = list(pi_hat)
try:
part1 = " ".join(map(repr, part_1))
except TypeError:
part1 = repr(part_1)
submissions[all_parts[0]]=part1
grading.submit(COURSERA_EMAIL, COURSERA_TOKEN, assignment_key,all_parts[:1],all_parts,submissions)
pi_hat
### GRADED PART (DO NOT EDIT) ###
# +
# input parameters
s0 = 100.0
strike = 100.0
r = 0.05
mu = 0.07 # 0.05
vol = 0.4
T = 1.0
# Simulation Parameters
numPaths = 50000 # number of Monte Carlo trials
numSteps = 6
# create the class object
hMC = DiscreteBlackScholes(s0, strike, vol, T, r, mu, numSteps, numPaths)
# calculation
hMC.gen_paths()
hMC.seed_intrinsic()
option_val, delta, option_val_variance = hMC.roll_backward()
bs_call_value = bs_put(0, s0, K=strike, r=r, sigma=vol, T=T)
print('Option value = ', option_val)
print('Option value variance = ', option_val_variance)
print('Option delta = ', delta)
print('BS value', bs_call_value)
# -
### GRADED PART (DO NOT EDIT) ###
part2 = str(option_val)
submissions[all_parts[1]]=part2
grading.submit(COURSERA_EMAIL, COURSERA_TOKEN, assignment_key,all_parts[:2],all_parts,submissions)
option_val
### GRADED PART (DO NOT EDIT) ###
strikes = np.linspace(85, 110, 6)
results = [None] * len(strikes)
bs_prices = np.zeros(len(strikes))
bs_deltas = np.zeros(len(strikes))
numPaths = 50000
hMC = DiscreteBlackScholes(s0, strike, vol, T, r, mu, numSteps, numPaths)
hMC.gen_paths()
for ix, k_strike in enumerate(strikes):
hMC.seed_intrinsic(k_strike)
results[ix] = hMC.roll_backward()
bs_prices[ix] = bs_put(0, s0, K=k_strike, r=r, sigma=vol, T=T)
bs_deltas[ix] = norm.cdf(d1(s0, K=k_strike, r=r, sigma=vol, T=T)) - 1
bs_prices
mc_prices = np.array([x[0] for x in results])
mc_deltas = np.array([x[1] for x in results])
price_variances = np.array([x[-1] for x in results])
prices_diff = mc_prices - bs_prices
deltas_diff = mc_deltas - bs_deltas
# price_variances
# +
### GRADED PART (DO NOT EDIT) ###
part_3 = list(prices_diff)
try:
part3 = " ".join(map(repr, part_3))
except TypeError:
part3 = repr(part_3)
submissions[all_parts[2]]=part3
grading.submit(COURSERA_EMAIL, COURSERA_TOKEN, assignment_key,all_parts[:3],all_parts,submissions)
prices_diff
### GRADED PART (DO NOT EDIT) ###
# -
### GRADED PART (DO NOT EDIT) ###
part_4 = list(deltas_diff)
try:
part4 = " ".join(map(repr, part_4))
except TypeError:
part4= repr(part_4)
submissions[all_parts[3]]=part4
grading.submit(COURSERA_EMAIL, COURSERA_TOKEN, assignment_key,all_parts[:4],all_parts,submissions)
deltas_diff
### GRADED PART (DO NOT EDIT) ###
| Machine _Learning_and_Reinforcement_Learning_in_Finance/03_Reinforcement_Learning_in_Finance/01_Discrete_time_Black_Scholes_model/discrete_black_scholes_m3_ex1_v3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Maximum product of three
# Given an list of integers, return the maximum product that can be formed by multiplying any three integers.
#
# Sample input:
# ```
# [-10, -20, 5, 2, -7, 3]
# ```
#
# Output:
# ```
# 1000 (-10 * -20 * 5)
# ```
#
# Assume that the list has at least 3 integers.
# ## Approach
# If all the integers were positive, we'd simply take the three largest numbers of the array i.e sort and return the last three elements.
#
# However, we need to account for negative numbers in the array. If the largest product can be made by negative numbers, we'd need to have two so as to cancel out the negatives. So, we can take the larger of:
#
# - The three largest numbers
# - The two smallest (most negative) numbers, and the largest number
def product(array) -> int:
"""O(N log N) time, because of sorting."""
array.sort()
max1, max2, max3 = array[-1], array[-2], array[-3]
min1, min2 = array[0], array[1]
return max(max1 * max2 * max3, min1 * min2 * max1)
product([-10, -20, 5, 2, -7, 3])
# # 2nd Approach
# We can look for the largest elements manually and create a solution that runs on O(N) time.
# +
from math import inf
def max_product(array) -> int:
max1, max2, max3 = -inf, -inf, -inf
min1, min2 = inf, inf
for i in array:
if i > max1:
max3 = max2
max2 = max1
max1 = i
elif i > max2:
max3 = max2
max2 = i
elif i > max3:
max3 = i
if i < min1:
min2 = min1
min1 = i
elif i < min2:
min2 = i
return max(max1 * max2 * max3, max1 * min1 * min2)
# -
max_product([-10, -20, 5, 2, -7, 3])
| arrays/max_product_of_three.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Using Supplychainpy with Pandas
# ** *by <NAME>* **
#
# In workbook 0.0.4-Supplychainpy-Inventory-Analysis, we used standard Python types (dict, list, tuple, etc..). This analysis can also be conducted using a Pandas `DataFrame` giving us many more options for quick and easy analysis.
#
# To use Supplychainpy library with Pandas, we first need to import the right modules and read a CSV file to a Pandas DataFrame. The `%matplotlib inline` statement is used so we can see the matplotlib plots in the jupyter notebook.
# First, we read the raw data into the Pandas `DataFrame`, as shown below.
# +
# %matplotlib inline
import matplotlib
import pandas as pd
from supplychainpy.model_inventory import analyse
from supplychainpy.model_demand import simple_exponential_smoothing_forecast
from supplychainpy.sample_data.config import ABS_FILE_PATH
from decimal import Decimal
raw_df =pd.read_csv(ABS_FILE_PATH['COMPLETE_CSV_SM'])
# -
# Passing a Pandas `DataFrame` as a keyword argument (df=) returns a `DataFrame` with the inventory profile analysed. Excluding the import statements, this can be achieved in 3 lines of code. There are several columns, so the print statement has been limited to a few ('sku','quantity_on_hand', 'excess_stock', 'shortages', 'ABC_XYZ_Classification').
orders_df = raw_df[['Sku','jan','feb','mar','apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec']]
#orders_df.set_index('Sku')
analyse_kv =dict(
df=raw_df,
start=1,
interval_length=12,
interval_type='months',
z_value=Decimal(1.28),
reorder_cost=Decimal(400),
retail_price=Decimal(455),
file_type='csv',
currency='USD'
)
analysis_df = analyse( **analyse_kv)
print(analysis_df[['sku','quantity_on_hand', 'excess_stock', 'shortages', 'ABC_XYZ_Classification']])
# We can transpose the table if necessary using:
print(analysis_df.T)
# We can sort the DataFrame using:
analysis_rev = analysis_df[['sku', 'revenue']]
print(analysis_rev.sort_values(by='revenue', ascending=True))
analysis_trans = analysis_df.T
print(analysis_trans[0])
# Before we can make a forecast, we need to select an SKU from the `analysis_df` variable. After selection, we slice the row to retrieve only orders data and convert to a `Series`.
row_ds = raw_df[raw_df['Sku']=='KR202-212'].squeeze()
print(row_ds[1:12])
# Now that we have a `series` of orders data fro the SKU `KR202-212`, we can now perform a forecast using the `model_demand` module. We can perform a simple_exponential_smoothing_forecast by passing the forecasting function the orders data using the keyword parameter `ds=`.
ses_df = simple_exponential_smoothing_forecast(ds=row_ds[1:12], length=12, smoothing_level_constant=0.5)
print(ses_df)
print(ses_df.get('forecast', 'UNKNOWN'))
# If we check the statistcs for the forecast we can see whether there is a linear trend and subsequently if the forecast is useful.
print(ses_df.get('statistics', 'UNKNOWN'),'\n mape: {}'.format(ses_df.get('mape', 'UNKNOWN')))
# The breakdown of the forecast is returned with the `forecast` and `statistics`.
print(ses_df.get('forecast_breakdown', 'UNKNOWN'))
# We can convert the `forecast_breakdown` back into a `DataFrame`.
forecast_breakdown_df = pd.DataFrame(ses_df.get('forecast_breakdown', 'UNKNOWN'))
print(forecast_breakdown_df)
# Let's look at the `demand` and the `one_step_forecast` in a chart.
forecast_breakdown_df.plot(x='t', y=['one_step_forecast','demand'])
# We can also create the data points for the regression line.
regression = {'regression': [(ses_df.get('statistics')['slope']* i ) + ses_df.get('statistics')['intercept'] for i in range(1,12)]}
print(regression)
# We can add the regression data points to the forecast breakdown DataFrame.
forecast_breakdown_df['regression'] = regression.get('regression')
print(forecast_breakdown_df)
forecast_breakdown_df.plot(x='t', y=['one_step_forecast','demand', 'regression'])
# We have a choice now; we can use another alpha and repeat the analysis to reduce the Standard Error or use supplychainpy's `optimise=True` parameter to use an evolutionary algorithm and get closer to an optimal solution.
opt_ses_df = simple_exponential_smoothing_forecast(ds=row_ds[1:12], length=12, smoothing_level_constant=0.4,optimise=True)
print(opt_ses_df)
print(opt_ses_df.get('statistics', 'UNKNOWN'),'\n mape: {}'.format(opt_ses_df.get('mape', 'UNKNOWN')))
print(opt_ses_df.get('forecast', 'UNKNOWN'))
optimised_regression = {'regression': [(opt_ses_df.get('statistics')['slope']* i ) + opt_ses_df.get('statistics')['intercept'] for i in range(1,12)]}
print(optimised_regression)
opt_forecast_breakdown_df = pd.DataFrame(opt_ses_df.get('forecast_breakdown', 'UNKNOWN'))
opt_forecast_breakdown_df['regression'] = optimised_regression.get('regression')
print(opt_forecast_breakdown_df)
opt_forecast_breakdown_df.plot(x='t', y=['one_step_forecast','demand', 'regression'])
| 0.0.4-Using-Supplychainpy-and-Pandas-v1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <div class="alert alert-block alert-info" style="margin-top: 20px">
# <a href="http://cocl.us/NotebooksPython101"><img src = "https://ibm.box.com/shared/static/yfe6h4az47ktg2mm9h05wby2n7e8kei3.png" width = 750, align = "center"></a>
#
#
#
# <a href="https://www.bigdatauniversity.com"><img src = "https://ibm.box.com/shared/static/ugcqz6ohbvff804xp84y4kqnvvk3bq1g.png" width = 300, align = "center"></a>
#
# <h1 align=center><font size = 5>WRITING YOUR OWN FUNCTIONS IN PYTHON</font></h1>
# ## Table of Contents
#
#
# <div class="alert alert-block alert-info" style="margin-top: 20px">
# <ol>
#
# <li><a href="#ref1">What is a Function?</a></li>
# <li><a href="#ref3">Using if/else statements in functions</a></li>
# <li><a href="#ref4">Setting default argument values in your custom functions</a></li>
# <li><a href="#ref6">Global and local variables</a></li>
# <li><a href="#ref7">Scope of a Variable </a></li>
#
# </ol>
# <br>
# <p></p>
# Estimated Time Needed: <strong>40 min</strong>
# </div>
#
# <hr>
# <hr>
# <a id='ref1'></a>
# <center><h2>Defining a Function</h2></center>
#
# A function is a reusable block of code which performs operations specified in the function. They let you break down tasks and allow you to reuse your code in different programs.
#
# There are two types of functions :
#
#
# - **Pre-defined functions**
# - **User defined functions**
# <h3>What is a Function?</h3>
# You can define functions to provide the required functionality. Here are simple rules to define a function in Python:
# - Functions blocks begin **def** followed by the function **name** and parentheses **()**.
# - There are input parameters or arguments that should be placed within these parentheses.
# - You can also define parameters inside these parentheses.
# - There is a body within every function that starts with a colon (**:**) and is indented.
# - You can also place documentation before the body
# - The statement **return** exits a function, optionally passing back a value
#
# An example of a function that adds on to the parameter **a** prints and returns the output as **b**:
#
def add(a):
"""
add 1 to a
"""
b=a+1;
print(a, "if you add one" ,b)
return(b)
# The figure below illustrates the terminology:
# <a ><img src = "https://ibm.box.com/shared/static/wsl6jcfld2c3171ob19vjr5chw9gyxrc.png" width = 500, align = "center"></a>
# <h4 align=center>
# A labeled function
# </h4>
# We can obtain help about a function :
help(add)
# We can call the function:
add(1)
# If we call the function with a new input we get a new result:
add(2)
# We can create different functions. For example, we can create a function that multiplies two numbers. The numbers will be represented by the variables **a** and **b**:
def Mult(a,b):
c=a*b
return(c)
# The same function can be used for different data types. For example, we can multiply two integers:
#
Mult(2,3)
# Two Floats:
Mult(10,3.14)
# We can even replicate a string by multiplying with an integer:
Mult(2,"<NAME> ")
# #### Come up with a function that divides the first input by the second input:
# Double-click __here__ for the solution.
# <!--
# def div(a,b):
# return(a/b)
# -->
# <h3>Variables </h3>
#
# The input to a function is called a formal parameter.
#
# A variable that is declared inside a function is called a local variable. The parameter only exists within the function (i.e. the point where the function starts and stops).
#
# A variable that is declared outside a function definition is a global variable, and its value is accessible and modifiable throughout the program. We will discuss more about global variables at the end of the lab.
#
#Function Definition
def square(a):
"""Square the input and add one
"""
#Local variable
b=1
c=a*a+b;
print(a, "if you square +1 ",c)
return(c)
# The labels are displayed in the figure:
# <a ><img src = "https://ibm.box.com/shared/static/gpfa525nnfwxt5rhrvd3o6i8rp2iwsai.png" width = 500, align = "center"></a>
# <h4 align=center>
# Figure 2: A function with labeled variables
# </h4>
#
#
#
# We can call the function with an input of 3:
# +
#Initializes Global variable
x=3
#Makes function call and return function a y
z=square(x)
z
# -
# We can call the function with an input of 2 in a different manner:
square(2)
# If there is no **return** statement, the function returns **None**. The following two functions are equivalent:
#
# +
def MJ():
print('<NAME>')
def MJ1():
print('<NAME>')
return(None)
# -
MJ()
MJ1()
# Printing the function after a call reveals a **None** is the default return statement:
print(MJ())
print(MJ1())
# #### Create a function **con** that concatenates two strings using the addition operation:
# :
def con(a,b):
return(a+b)
# Double-click __here__ for the solution.
# <!--
# def div(a,b):
# return(a+b)
# -->
# #### Can the same function be used to add to integers or strings?
# Double-click __here__ for the solution.
# <!--
# yes,for example:
# con(2,2)
# -->
# #### Can the same function be used to concentrate a list or tuple?
# Double-click __here__ for the solution.
# <!--
# yes,for example:
# con(['a',1],['b',1])
# -->
# <h3><b>Pre-defined functions</b></h3>
# There are many pre-defined functions in Python, so let's start with the simple ones.
# The **print()** function:
album_ratings = [10.0,8.5,9.5,7.0,7.0,9.5,9.0,9.5]
print(album_ratings)
# The **sum()** function adds all the elements in a list or tuple:
sum(album_ratings)
# The length function returns the length of a list or tuple:
len(album_ratings)
# <div class="alert alert-success alertsuccess" style="margin-top: 20px">
# <h4> [Tip] How do I learn more about the pre-defined functions in Python? </h4>
# <p></p>
# We will be introducing a variety of **pre-defined functions** to you as you learn more about Python. There are just too many functions, so there's no way we can teach them all in one sitting. But if you'd like to take a quick peek, here's a short reference card for some of the commonly-used pre-defined functions:
# http://www.astro.up.pt/~sousasag/Python_For_Astronomers/Python_qr.pdf
# </div>
# <h3>Functions Makes Things Simple </h3>
# Consider the two lines of code in **Block 1** and **Block 2**: the procedure for each block is identical. The only thing that is different is the variable names and values.
#
# ### Block 1:
a1=4;
b1=5;
c1=a1+b1+2*a1*b1-1
if(c1<0):
c1=0;
else:
c1=5;
c1
# ### Block 2:
a2=0;
b2=0;
c2=a2+b2+2*a2*b2-1
if(c2<0):
c2=0;
else:
c2=5;
c2
# We can replace the lines of code with a function. A function combines many instructions into a single line of code. Once a function is defined, it can be used repeatedly. You can invoke the same function many times in your program. You can save your function and use it in another program or use someone else’s function. The lines of code in code **block 1** and code **block 2** can be replaced by the following function:
#
#
def Equation(a,b):
c=a+b+2*a*b-1
if(c<0):
c=0
else:
c=5
return(c)
# This function takes two inputs, a and b, then applies several operations to return c.
# We simply define the function, replace the instructions with the function, and input the new values of **a1**,**b1** and **a2**,**b2** as inputs. The entire process is demonstrated in the figure:
# <a ><img src = "https://ibm.box.com/shared/static/efn4rii75bgytjdb5c8ek6uezch7yaxq.gif" width = 1100, align = "center"></a>
# <h4 align=center>
# Example of a function used to replace redundant lines of code
# </h4>
# Code **Blocks 1** and **Block 2** can now be replaced with code **Block 3** and code **Block 4**.
# ### Block 3:
a1=4;
b1=5;
c1=Equation(a1,b1)
c1
# ### Block 4:
a2=0;
b2=0;
c2=Equation(a2,b2)
c2
# <hr>
# <a id='ref3'></a>
# <center><h2>Using if/else statements and loops in functions</h2></center>
#
# The **return()** function is particularly useful if you have any IF statements in the function, when you want your output to be dependent on some condition:
# +
def type_of_album(artist,album,year_released):
if year_released > 1980:
print(artist,album,year_released)
return "Modern"
else:
print(artist,album,year_released)
return "Oldie"
x = type_of_album("<NAME>","Thriller",1980)
print(x)
# -
# We can use a loop in a function. For example, we can **print** out each element in a list:
def PrintList(the_list):
for element in the_list:
print(element)
PrintList(['1',1,'the man',"abc"])
# <hr>
# <a id='ref4'></a>
# <center><h2>Setting default argument values in your custom functions</h2></center>
#
# You can set a default value for arguments in your function. For example, in the **`isGoodRating()`** function, what if we wanted to create a threshold for what we consider to be a good rating? Perhaps by default, we should have a default rating of 4:
#
def isGoodRating(rating=4):
if(rating < 7):
print("this album sucks it's rating is",rating)
else:
print("this album is good its rating is",rating)
# <hr>
isGoodRating()
isGoodRating(10)
# <a id='ref6'></a>
# <center><h2>Global variables</h2></center>
# <br>
# So far, we've been creating variables within functions, but we have not discussed variables outside the function. These are called global variables.
# <br>
# Let's try to see what **printer1** returns:
# +
artist = "<NAME>"
def printer1(artist):
internal_var = artist
print(artist,"is an artist")
printer1(artist)
# -
# If we print **internal_var** we get an error.
# **We got a Name Error:** `name 'internal_var' is not defined`. **Why?**
#
# It's because all the variables we create in the function is a **local variable**, meaning that the variable assignment does not persist outside the function.
#
# But there is a way to create **global variables** from within a function as follows:
# +
artist = "<NAME>"
def printer(artist):
global internal_var
internal_var= "<NAME>"
print(artist,"is an artist")
printer(artist)
printer(internal_var)
# -
# <a id='ref7'></a>
# <center><h2>Scope of a Variable</h2></center>
# <hr>
# The scope of a variable is the part of that program where that variable is accessible. Variables that are declared outside of all function definitions, such as the **myFavouriteBand** variable in the code shown here, are accessible from anywhere within the program. As a result, such variables are said to have global scope, and are known as global variables.
# **myFavouriteBand** is a global variable, so it is accessible from within the **getBandRating** function, and we can use it to determine a band's rating. We can also use it outside of the function, such as when we pass it to the print function to display it:
# +
myFavouriteBand = "AC/DC"
def getBandRating(bandname):
if bandname == myFavouriteBand:
return 10.0
else:
return 0.0
print("AC/DC's rating is:", getBandRating("AC/DC"))
print("Deep Purple's rating is:",getBandRating("Deep Purple"))
print("My favourite band is:", myFavouriteBand)
# -
# Take a look at this modified version of our code. Now the **myFavouriteBand** variable is defined within the **getBandRating** function. A variable that is defined within a function is said to be a local variable of that function. That means that it is only accessible from within the function in which it is defined. Our **getBandRating** function will still work, because **myFavouriteBand** is still defined within the function. However, we can no longer print **myFavouriteBand** outside our function, because it is a local variable of our **getBandRating** function; it is only defined within the **getBandRating** function:
# +
def getBandRating(bandname):
myFavouriteBand = "AC/DC"
if bandname == myFavouriteBand:
return 10.0
else:
return 0.0
print("AC/DC's rating is: ", getBandRating("AC/DC"))
print("Deep Purple's rating is: ", getBandRating("Deep Purple"))
print("My favourite band is", myFavouriteBand)
# -
# Finally, take a look at this example. We now have two **myFavouriteBand** variable definitions. The first one of these has a global scope, and the second of them is a local variable within the **getBandRating** function. Within the **getBandRating** function, the local variable takes precedence. **Deep Purple** will receive a rating of 10.0 when passed to the **getBandRating** function. However, outside of the **getBandRating** function, the **getBandRating** s local variable is not defined, so the **myFavouriteBand** variable we print is the global variable, which has a value of **AC/DC**:
# +
myFavouriteBand = "AC/DC"
def getBandRating(bandname):
myFavouriteBand = "Deep Purple"
if bandname == myFavouriteBand:
return 10.0
else:
return 0.0
print("AC/DC's rating is:",getBandRating("AC/DC"))
print("Deep Purple's rating is: ",getBandRating("Deep Purple"))
print("My favourite band is:",myFavouriteBand)
# -
# <a href="http://cocl.us/NotebooksPython101bottom"><img src = "https://ibm.box.com/shared/static/irypdxea2q4th88zu1o1tsd06dya10go.png" width = 750, align = "center"></a>
#
#
#
# # About the Authors:
#
# [<NAME>]( https://www.linkedin.com/in/joseph-s-50398b136/) has a PhD in Electrical Engineering, his research focused on using machine learning, signal processing, and computer vision to determine how videos impact human cognition. Joseph has been working for IBM since he completed his PhD.
#
# [<NAME>]( https://www.linkedin.com/in/reevejamesd/) <NAME> is a Software Engineering intern at IBM.
# <hr>
# Copyright © 2017 [cognitiveclass.ai](cognitiveclass.ai?utm_source=bducopyrightlink&utm_medium=dswb&utm_campaign=bdu). This notebook and its source code are released under the terms of the [MIT License](https://bigdatauniversity.com/mit-license/).
| content/docs/data-science-with-python/labs/python-basics/3-3-Functions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="91SEnJ5CcBzL" colab_type="text"
# # Introdução
#
# Olá seja bem-vinda e bem-vindo ao notebook da **aula 02**, desça até o conteúdo da aula 02 e bons estudos (Se clicar na seta antes do título Aula 01 ela comprime todo o conteúndo da aula 1, deixando o layout mais agradavél).
#
# **Não esqueça de rodar todos as células de códigos da aula 01, antes de iniciar a aula 02**
# + [markdown] id="sG6vp0eCNdi2" colab_type="text"
# #Aula 01
# + [markdown] id="f7ibSyNTXtv9" colab_type="text"
#
#
# Nós estaremos desenvolvendo nosso projeto aqui no google colaboratory, assim podemos mesclar células contendo textos em formato markdown e células de código, além disso você não precisar instalar nada na sua máquina. Então que tal começar testando algumas linhas de código.
#
# Nesta primeira célula estamos realizando um ```print()```, lembre-se que esta função python imprime a string que estamos passando como parâmetro, então o retorno é exibido logo abaixo da célula com código.
# + id="vC0a5z7IRFWT" colab_type="code" outputId="23eac06b-2cbd-4440-b1c6-4e3ff9c7197c" colab={"base_uri": "https://localhost:8080/"}
print("<NAME>")
print("<NAME>")
# + [markdown] id="R2uQtgGLlaDs" colab_type="text"
# Agora vamos analisar a proxima célula de código.
#
# Aqui estamos fazendo uma atribuição de variável, conforme dito em aula, as atribuições não tem retorno, assim, diferente da célula anterior não temos um *output* logo abaixo do código.
# + id="i9-JUfxERKcN" colab_type="code" colab={}
nome_do_filme = "Totoro, o filme"
# + [markdown] id="gQi3djrZmwZ8" colab_type="text"
# Agora que criamos a variável ```nome_do_filme```, podemos reutilizá-la, por exemplo na função ```print```, para assim imprimir a *string* "Totoro, o filme".
# + id="3aFMfB0ERUcF" colab_type="code" outputId="db2357ca-33cb-48fe-d54c-f58945b9d9b9" colab={"base_uri": "https://localhost:8080/"}
print(nome_do_filme)
# + id="Sg2NRrX9RXM-" colab_type="code" outputId="5cc741f3-e807-4fcf-9197-63e0a4c89214" colab={"base_uri": "https://localhost:8080/"}
nome_do_filme
# + [markdown] id="OuYA1aSIX0vJ" colab_type="text"
# ## Lendo os dados do MovieLens
#
# Nosso primeiro passo foi conhecer e realizar um "hello-world" no colab, agora chegou a hora de importar os dados para o notebook e começar as análises.
#
# Vamos importar a biblioteca [pandas](https://pandas.pydata.org/), um poderoso projeto open source para análise de manipulação de dados. O primeiro passo é ler uma base de dados e podemos fazer isso com o comando ```pd.read_csv()```.
#
# Estamos lendo um arquivo **CSV** (Comma-separated values), neste tipo de arquivo os valores são separados por vírgulas e podem ser abertos em outras ferramentas como excel e google-sheet. CSV não é o único formato lido pelo pandas, temos o ```pd.read_excel()``` que lê arquivos **xlsx** entre diversos outros formatos, você pode encontrar mais informações na seção de [input/output da documentação](https://pandas.pydata.org/pandas-docs/stable/reference/io.html).
#
# Depois de ler o dataset, nós trocamos os nomes das colunas pelos termos em português, logo em seguida utilizamos o método ```filmes.head()``` para visualizar as primeiras 5 linhas do nosso dataframe. Outra forma de visualizar as informações dos dados é utilizando o método ```filmes.sample()```, se você tentar, vai verificar que ele retorna uma linha aleatória do seus dados. Para escolher aleatoriamente mais de 1 linha, por exemplo 5, é só passar esse valor desejado como parâmetro (```filmes.sample(5)```).
# + id="fm9UvpjdRZjP" colab_type="code" colab={}
import pandas as pd
# + id="uIoEtYpDUSbk" colab_type="code" outputId="f9a7dc9d-e185-418a-ca67-780eb22ce6ae" colab={"base_uri": "https://localhost:8080/"}
filmes = pd.read_csv("https://raw.githubusercontent.com/alura-cursos/introducao-a-data-science/master/aula0/ml-latest-small/movies.csv")
# filmes é um DataFrame
filmes.columns = ["filmeId", "titulo", "generos"]
filmes.head()
# + [markdown] id="bZ46r5yAYCDO" colab_type="text"
# Há pouco falamos para consultar a documentação para obter mais informações, mas será que é realmente preciso sair do notebook para tirar algumas dúvidas mais simples?
#
# Os notebooks facilitam a nossa vida podendo consultar o docstring das funções e métodos, rodando a célula com um **?** na frente da chamada, uma view é aberta com as informações resumidas. Veja a seguir alguns exemplos.
# + id="sKew1Wy0UYbu" colab_type="code" colab={}
# lendo a documentação de um método/atributo
# ?filmes.head
# + id="i6F4_qpeUzOq" colab_type="code" colab={}
# lendo a documentação do tipo (docstring)
# ?filmes
# + [markdown] id="wwcenqbGYGAO" colab_type="text"
# A base de dados que usamos até o momento contém o nome do filme, ano de lançamento e gêneros. O MovieLens conta com outras informações que estão em bases separadas, uma delas é a de avaliações.
#
# Agora vamos analisar um pouco melhor o dataset de avaliações.
# + id="7A1UmccVVLeK" colab_type="code" outputId="731b8b8f-d8fd-4f44-ffee-e4cd47623ece" colab={"base_uri": "https://localhost:8080/"}
avaliacoes = pd.read_csv("https://github.com/alura-cursos/introducao-a-data-science/blob/master/aula0/ml-latest-small/ratings.csv?raw=true")
avaliacoes.head()
# + [markdown] id="otNlLBcYE1gP" colab_type="text"
# Para visualizar algumas linhas estamos usando o ```.head()```, como ela mostra apenas as 5 primeiras linhas não sabemos qual é a quantidade de linhas que temos. Para descobrir a "forma" dos nossos dados podemos utilizar o ```avaliacoes.shape```, retornando uma [tupla](https://www.alura.com.br/artigos/conhecendo-as-tuplas-no-python), onde o primeiro termo indica o número de linhas e o segundo o número de colunas.
# + id="QRXxNO5-VyJD" colab_type="code" outputId="c16ef6ba-2753-47e2-8896-6bf02480d70a" colab={"base_uri": "https://localhost:8080/"}
avaliacoes.shape
# + id="Wo5ZCjYdV6To" colab_type="code" outputId="a2d5e718-f448-4bae-96ae-f1cc5428c2d9" colab={"base_uri": "https://localhost:8080/"}
len(avaliacoes)
# + [markdown] id="5E9e-dkuGUaK" colab_type="text"
# Vamos substituir os nomes das colunas de inglês para português e entender o que são essas colunas.
#
# usarioId => ID para para usuário que votou em determinado filme.
#
# filmeId => ID para identificar um filme votado.
#
# nota => A nota dada para pelo usuário para o respectivo filme.
#
# momento => A data da votação que não está formatada como data
#
# Como cada linha contém um voto para o respectivo filme é de se esperar que um filme tenha diversos votos, mas repare que nas 5 primeiras linhas temos o filme **1, 3, 6, 47, 50**. Mas e se eu quiser analisar apenas as notas do filme 1, como posso separar essa informação?
#
#
# + id="QblDDv4SWBvC" colab_type="code" outputId="d1807337-f8cc-4d9e-cced-471a2f739c61" colab={"base_uri": "https://localhost:8080/"}
avaliacoes.columns = ["usuarioId", "filmeId", "nota", "momento"]
avaliacoes.head()
# + [markdown] id="W7qCWhXVICxr" colab_type="text"
# Uma forma para "separar" as informações apenas do **filmeId 1** é chamando o método ```avaliacaoes.query("filmeId==1")```, esse método retornará apenas as linhas para quais a expressão booleana, "filmeId==1", for verdadeira.
#
# Tendo as informações do **filmeId 1** podemos chamar o ```avaliacoes_do_filme_1.describe()```, para analisar as estatítiscas gerais dos dados.
# + id="t26TcAKMWLx8" colab_type="code" outputId="1cf00774-aac6-4b7f-aa50-7b5aa2073097" colab={"base_uri": "https://localhost:8080/"}
avaliacoes_do_filme_1 = avaliacoes.query("filmeId==1")
avaliacoes_do_filme_1.head()
# + id="se3soadiWuG_" colab_type="code" outputId="be888bb9-007c-4bd2-e19c-12c86d17be6d" colab={"base_uri": "https://localhost:8080/"}
avaliacoes_do_filme_1.describe()
# + [markdown] id="zUD1xulBKcou" colab_type="text"
# Caso queira uma estatística particular, podemos apenas chamar o método desajado, repare abaixo como calculamos apenas a média das avaliações do **filmeId 1**.
# + id="lf4dH46SW2ro" colab_type="code" outputId="5601f20e-14ba-4da8-e339-3631fb7587c4" colab={"base_uri": "https://localhost:8080/"}
avaliacoes_do_filme_1.mean()
# + [markdown] id="dylF75HzK_nI" colab_type="text"
# Calculamos as estatísicas apenas para o **filmeId 1**, mas também podemos chamar o método ```.describe()``` para a base completa (avaliacões).
# + id="jZ9ZtovSWS0C" colab_type="code" outputId="6ce76b1d-d967-4f24-b0bf-fc26f4b83384" colab={"base_uri": "https://localhost:8080/"}
avaliacoes.describe()
# + [markdown] id="4Kq3eGVXLmca" colab_type="text"
# Ok, nós calculamos um tanto de coisa usando `.describe()` e `.mean()`, mas a informação que realmente queremos é a média da nota. Então o ponto é, como calcular a média apenas das notas?
#
# A primeira coisa que precisamos fazer é selecionar apenas as informações de notas. Usando uma estrutura muito parecida com a de [chave-valor dos dicionários python](https://www.alura.com.br/artigos/trabalhando-com-o-dicionario-no-python).
#
# Com o comando ```avaliacoes["nota"]```, obtemos os valores da coluna nota (repare que o tipo retornado é uma Série pandas, por isso o index de cada nota é mantido). Para calcular a média de todas as notas executamos ```avaliacoes["notas"].means()```
#
# + id="OnltfnYqWi40" colab_type="code" outputId="f0d15da3-d163-4041-9ad9-d774ba6aa525" colab={"base_uri": "https://localhost:8080/"}
avaliacoes["nota"]
# + id="Xos6cYlmPWO6" colab_type="code" outputId="41b37660-a316-4e49-daa9-27c5bc68d08c" colab={"base_uri": "https://localhost:8080/"}
avaliacoes["nota"].mean()
# + [markdown] id="u03bvyYZPvza" colab_type="text"
# Podemos calcular também na nota média do **filmeId 1**, repare que o resultado é um pouco maior que a geral. Apenas com essa análise não da para bater o martelo que o filme 1 é acima da média, mas apenas com essa análise conseguimos formular uma primeira hipótese!
# + id="h8JDwB_2W8yR" colab_type="code" outputId="56060734-e853-46cc-c604-e2a970733c4a" colab={"base_uri": "https://localhost:8080/"}
avaliacoes_do_filme_1["nota"].mean()
# + [markdown] id="Wv6npVXeRuVl" colab_type="text"
# Nós calculamos uma média geral, uma média para o filmeId 1. Agora eu quero calcular a média das notas para todos os filmes, podemos fazer isso usando o método ```.groupby(filmeId)```, o parâmetro passado é para indicar qual coluna ele deve utilizar para "agrupar" os dados. Depois só calcular a média como fizemos anteriormente.
#
#
# + id="78qsVDbWW-kf" colab_type="code" outputId="a2a9ddcb-b995-417b-c5ca-6d7710b41158" colab={"base_uri": "https://localhost:8080/"}
notas_medias_por_filme = avaliacoes.groupby("filmeId")["nota"].mean()
notas_medias_por_filme.head()
# + [markdown] id="gRyxEdyOWs4I" colab_type="text"
# Temos as notas médias calculadas, mas agora precisamos juntar as informações de notas médias com a base de dados **filmes**.
#
# Poderíamos criar uma nova coluna e atribuir a váriável `notas_medias_por_filme`, de forma direta:
#
# ```filmes["nota_media"] = notas_medias_por_filme```
#
# Como discutimos em aula, essa não é uma boa prática pois precisamos garantir que a nota média seja do respectivo filme.
#
# Para garantir essa condição vamos utilizar o ```.join()```, criando um novo dataframe (```filmes_com_media = filmes.join(notas_medias_por_filme, on="filmeId")```).
#
# Veja como fazer, nas células a seguir.
# + id="jPTL419OYjto" colab_type="code" outputId="e03c2476-07c3-4e76-cba7-cbfeb8959203" colab={"base_uri": "https://localhost:8080/"}
filmes
# + id="AVlwJvZRZCQP" colab_type="code" outputId="df4851d7-138a-445b-b169-4551272ea3c7" colab={"base_uri": "https://localhost:8080/"}
notas_medias_por_filme
# + id="hoUllrZFZgsA" colab_type="code" outputId="c18b82fc-05cf-4eb7-9ece-b794c50131a9" colab={"base_uri": "https://localhost:8080/"}
filmes_com_media = filmes.join(notas_medias_por_filme, on="filmeId")
filmes_com_media.head()
# + [markdown] id="IDLHWb9cYkuy" colab_type="text"
# Agora que temos as médias, que tal visualizar o nosso dataframe ordenado pela nota de forma decrescente?
# + id="MLNCjcgdaOb5" colab_type="code" outputId="1e71ad65-2766-492c-b59d-e7a4cbf621f0" colab={"base_uri": "https://localhost:8080/"}
filmes_com_media.sort_values("nota", ascending=False).head(15)
# + [markdown] id="D4Yw8FhlhDT1" colab_type="text"
# Fizemos um tanto de análise e manipulação de dados interessante, não é?
#
# Mas diz a verdade, você está sentindo falta daquele gráfico que todo cientista de dados adora =D, então bora plotar nosso primeiro gráfico!
#
# O pandas facilita muito o plot de alguns gráficos simples, apenas selecionamos a informação que gostaríamos de visualizar e chamamos o método ```.plot()```
# + id="usWDMMoUiG-l" colab_type="code" outputId="3db42bc9-88fb-4c81-f51a-ed782dc4b423" colab={"base_uri": "https://localhost:8080/"}
avaliacoes.query("filmeId == 1")["nota"].plot()
# + [markdown] id="AazOm8Xeipmf" colab_type="text"
# Por padrão o método plotou um gráfico de linhas, o que não é adequado para os dados que estamos analisando.
#
# Precisamos mudar o tipo de gráfico para realizar uma análise mais adequada, para fazer isso apenas alteramos o parâmetro **kind** do método ```.plot```. Vamos plotar um [histograma](https://pt.wikipedia.org/wiki/Histograma) rodando a célula a seguir.
# + id="W2s2pBHck1Zt" colab_type="code" outputId="849088f1-9be2-4ef8-849c-7466d2b9048f" colab={"base_uri": "https://localhost:8080/"}
avaliacoes.query("filmeId == 1")["nota"].plot(kind='hist')
# + [markdown] id="oYEI1fBOlEdk" colab_type="text"
# Legal, agora temos uma visualização muito mais agradavel de analisar. Compare com o gráfico de linhas, qual você acha melhor para análise?
#
# P.S: Deixar de usar o gráfico de linhas, não significa que sejá uma visualização ruim. Apenas quer dizer que nossos dados não tem características ideias para serem visualizados como um *line plot*, agora pense em uma [série temporal](https://pt.wikipedia.org/wiki/S%C3%A9rie_temporal). **Você acha que o gráfico de linhas ainda seria uma má ideia?**
#
# Antes de analisar o histograms de outros filmes, quero colocar um título na imagem. Vamos ver como podemos fazer isso!
#
# + id="C8KHmAMbmPJA" colab_type="code" outputId="ed350909-8e52-4136-81f7-966669fda6b6" colab={"base_uri": "https://localhost:8080/"}
avaliacoes.query("filmeId == 1")["nota"].plot(kind='hist',
title="Avaliações do filme Toy Story")
# + [markdown] id="yY9xg7-KnFpb" colab_type="text"
# Claro que python tem outras ferramentas muito poderosas para manipular gráficos, uma delas é o [matplotlib](https://matplotlib.org/).
#
# Que tal experimentar um pouquinho esta poderosa ferramenta?
#
# Vamos importar a lib e adicionar título no gráfico usando o matplotlib, veja como fica na célula a seguir.
# + id="kSJBUpCmamqK" colab_type="code" outputId="cf09b1b6-153a-47ba-8d18-2612b3c8277c" colab={"base_uri": "https://localhost:8080/"}
import matplotlib.pyplot as plt
avaliacoes.query("filmeId == 1")["nota"].plot(kind='hist')
plt.title("Avaliações do filme Toy Story")
plt.show()
# + [markdown] id="pagvE8ntpfzv" colab_type="text"
# Agora que aprendemos a criar um histograma e manipular os gráficos, vamos plotar informações de outros filmes e realizar uma análise desses gráficos?
#
# Vamos plotar o histograma do filme Jumanji e da animação Liga da justiça: Doom.
# + id="vEJ5uiDpdKzC" colab_type="code" outputId="ababb021-5180-421b-efd5-b9fcacf19caa" colab={"base_uri": "https://localhost:8080/"}
avaliacoes.query("filmeId == 2")["nota"].plot(kind='hist',
title="Avaliações do filme Toy Jumanji")
# + id="3qI60nGXdt3W" colab_type="code" outputId="36c50872-a71c-4ba2-8b25-cef8f6621725" colab={"base_uri": "https://localhost:8080/"}
avaliacoes.query("filmeId == 102084")["nota"].plot(kind='hist',
title="Avaliações do filme Justice League: Doom")
# + [markdown] id="wQ1VGLaFqI7z" colab_type="text"
# Agora que temos os gráficos, chegou a hora de analisar.
#
# A primeira coisa que preciso saber é o que cada eixo do meu gráfico significa. Então, eixo **x** mostra a nota, enquanto eixo **y** a frequência das notas (quantas vezes determinada nota foi dada).
#
# Entendido nosso gráfico, vamos contextualizar o cenário que estamos analisando:
#
# - Temos 3 filmes, dois muito populares (Toy story e Jumanji) e outro que nenhuma pessoa presente no momento da aula conhecia (animação da liga da justiça). O ponto que chamou a atenção, foi que a animação tinha média de nota maior que dois filmes, aparentemente mais popular, Jumaji e Toy Story. **Será que a animação é um filme tão bom assim?**
#
#
# Dado esse cenário a primeira coisa que me chama a atenção é a animação da liga da justiça ter média de nota igual a 5. Ao analisar o histograma do respectivo filme, verificamos que ele só teve uma avaliação igual a 5, logo, fica evidente que a **quantidade de votos é um aspecto importante na avaliação das médias**. Com apenas uma avaliação, não conseguimos garantir que o filme é realmente bom, tornando a avaliação muito "volátil". Imagina que Liga da Justiça receba mais uma avaliação, com nota 0, assim a média seria 2.5. Apenas com mais essa avaliação o filme passaria a ser considerada um "pior" que Jumanji e Toy Story.
#
# Outro ponto interessante é comparar o histograma de Toy Story e Jumanji, ambos tem médias "relativamente próximas". Mas repare que a distribuição de notas são diferentes, Toy Story recebe mais notas 5 e 4 que qualquer outra nota, enquanto Jumanji recebe mais notas 4 e 3, assim concluímos que a **distribuição das notas também é um fator importante na avaliação das médias**.(Se ficar alguma dúvida sobre esse tema reveja o exemplo que o instrutor apresenta no final na aula)
#
#
# Com isso nós fechamos a nossa primeira aula do **#quarentenadados**, viu quanta coisa aprendemos? Que tal colocar isso em prática?
#
#
# **Crie seu próprio notebook, reproduza nossa aula e resolva os desafios que deixamos para vocês**.
#
#
# Até a próxima aula!
#
#
#
#
#
#
#
# + [markdown] id="iJ0nFiyXZu8M" colab_type="text"
# ## Desafio 1 do [<NAME>](https://twitter.com/paulo_caelum)
#
# O Paulo fez uma análise rápida e disse que tem 18 filmes sem avaliações, será que ele acertou?
#
# Determine quantos filmes não tem avaliações e quais são esses filmes.
#
# + [markdown] id="3e8LMGJAaiXJ" colab_type="text"
# ## Desafio 2 do [<NAME>](https://twitter.com/guilhermecaelum)
#
# Mudar o nome da coluna nota do dataframe **filmes_com_media** para nota_média após o join.
# + [markdown] id="cXqu2wt0a2l6" colab_type="text"
# ## Desafio 3 do [<NAME>](https://twitter.com/guilhermecaelum)
#
# Colocar o número de avaliações por filme, isto é, não só a média mas o TOTAL de votos por filme.
# + [markdown] id="fZ5F6qwVeVWt" colab_type="text"
# ## Desafio 4 do [<NAME>](https://twitter.com/tgcsantos)
#
# Arredondar as médias (coluna de nota média) para duas casas decimais.
# + [markdown] id="mYzG73cSeigN" colab_type="text"
# ## Desafio 5 do [<NAME>](https://twitter.com/allanspadini)
#
# Descobrir os generos dos filmes (quais são eles, únicos). (esse aqui o bicho pega)
# + [markdown] id="enM3lF2textZ" colab_type="text"
# ## Desafio 6 da [<NAME>](https://twitter.com/thais_tandre)
#
# Contar o número de aparições de cada genero.
# + [markdown] id="SVVAZGGdfA_s" colab_type="text"
# ## Desafio 7 do [<NAME>](https://twitter.com/guilhermecaelum)
#
# Plotar o gráfico de aparições de cada genero. Pode ser um gráfico de tipo igual a barra.
# + [markdown] id="tSRJRLLwOBzF" colab_type="text"
# # Aula 02
# + [markdown] id="3Y4U0nYnOSGp" colab_type="text"
# Nesta aula vamos estudar com mais profundidade as técnicas de centralidade, conhecer algumas boas práticas de visualização de dados e o famoso Boxplot.
#
# Para inciar vamos precisar resolver alguns dos desafios deixados na **aula 01** (Caso não tenha tentado resolver os desafios, recomendo tentar algumas vezes antes de olhar as repostas). Começando pelo exercício 05, onde precisamos segregar os gêneros de cada um dos filmes contidos na base de dados do **Movie Lens**.
#
# Vamos relembrar como os dados estavam configurados.
# + id="I9-RWwoFQree" colab_type="code" outputId="f381e6ac-65ed-4bc3-ac96-825456d9ee97" colab={"base_uri": "https://localhost:8080/", "height": 206}
filmes.head()
# + [markdown] id="5z7PLpOiQxjK" colab_type="text"
# Temos os títulos e uma coluna com os respectivos gêneros, todos em uma única coluna, cada *label* é separada com um **|** (Adventure|Children|Fantasy) sendo do tipo *string*.
#
# Para solucionar nosso problema precisamos separar cada um dos gêneros para então realizar a contagem. Existe várias formas de resolver este problema, por exemplo, desde métodos inputos das *strings* até as **regex**, mas como estamos usando o pandas já temos algo para facilitar nosso processamento dos dados.
#
# Vamos aplicar o método e logo em seguida explicar a saída geranda.
# + id="8KezXNtgTBZr" colab_type="code" outputId="471502d7-6352-4140-aac5-bde212fed6a2" colab={"base_uri": "https://localhost:8080/", "height": 479}
filmes["generos"].str.get_dummies('|')
# + [markdown] id="0Vs4BNTLTnnT" colab_type="text"
# Nossa, uma linha de código gerou essa tabelona cheia de linhas, colunas e números.
#
#
# Como você percebeu a saída é um [DataFrame](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html), cada linha corresponde a respectiva linha da coluna gênero, cada coluna corresponde a um gênero (repare que cada gênero **único** virou uma coluna no DF). O que você deve estar se perguntando é como os valores **0/1** são preenchidos?.
#
# Para explicar, vamos pegar os gêneros do filme **Jumanji**, **Adventure|Children|Fantasy**, na coluna dos repectivos gêneros (dataframe gerado por ```filmes["generos"].str.get_dummies('|')```) o valor será **1**, para todos os outros gêneros, que não são gêneros do filme Jumanji, vale **0**. Em suma, se o nome da coluna pertence a algum gêreno do respectivo filme, o valor será **1** caso contrário 0 (Se ainda não ficou claro, pegue alguns filmes e confira os resultas na tabela anterior).
#
#
# Até aqui resolvemos uma parte do problema, agora precisamo somar quantos **1** cada coluna tem.
#
#
# + id="HQ4QBo2HTmQi" colab_type="code" outputId="848e25ed-da35-4035-b2b0-1da0c8f8d850" colab={"base_uri": "https://localhost:8080/", "height": 382}
filmes["generos"].str.get_dummies('|').sum()
# + [markdown] id="MhafVXhPhPwU" colab_type="text"
# Ótimo, resolvemos o desafio e agora temos quantas vezes cada gênero aparece. Assim, fica fácil de reponder perguntar como, qual o gênero com mais filmes produzidos? Qual o menos? Qual o segundo? (Lembrando que o dado está restrito as informações do movie lens)
#
# Se você tentou reponder, deve ter notado que não foi tão fácil assim, as informações não estão ordenadas e toda hora você precisa percorrer a tabela para fazer comparações. Nós podemos melhor isso ordenando as informações.
# + id="i0rfKG_chPAO" colab_type="code" outputId="ff5a95fa-1311-469c-e4c7-fd3fe1464c15" colab={"base_uri": "https://localhost:8080/", "height": 382}
filmes["generos"].str.get_dummies('|').sum().sort_values(ascending=False)
# + [markdown] id="_6LpA66ueW0P" colab_type="text"
# Maravilha, agora tudo ficou mais fácil!
#
# Conseguimos responder as perguntas anterior sem grandes dificuldades. Mas ainda podemos melhor mais a forma de expor nossa informação, não acha?
#
# Que tal uma imagem para visualizar? (Desafio 07 da aula 01)
# + id="WSK9en3ngOD0" colab_type="code" outputId="436404b4-b437-4556-e0f1-33b0b6d61e60" colab={"base_uri": "https://localhost:8080/", "height": 282}
filmes["generos"].str.get_dummies('|').sum().sort_values(ascending=False).plot()
# + [markdown] id="pXViUEffgV5E" colab_type="text"
# Iniciamos com o plot padrão do pandas, e como percebemos não adianta só plotar uma imagem, é preciso que faça sentido para a informação que queremos analisar, um gráfico de linhas não está fazendo muito sentido!
#
# Temos um gráfico muito conhecido que sempre encontramos por aí, o famoso gráfico de pizza ou torta.
#
# Já que ele é tão famoso talvez seja uma boa ideia tentar!
# + id="wS6m52kJgTzj" colab_type="code" outputId="39bdf836-7171-48f6-dd0f-c564f6fde0bf" colab={"base_uri": "https://localhost:8080/", "height": 482}
filmes["generos"].str.get_dummies('|').sum().sort_values(ascending=False).plot(
kind='pie',
title='Categorias de filmes e suas presenças relativas',
figsize=(8,8))
plt.show()
# + [markdown] id="RJIsTxcDiy8n" colab_type="text"
# E aí o que você achou?
#
# Algo que fica evidente neste gráfico é que **Drama, Comedy, Thriller, e Action** tem proporções "grandes", mas qualquer outra análise fica complicada.
#
# Primeiro, as cores começa a se repetir e isso não é o ideial.
#
# Segundo, repare nos gêneros com menos filmes,consegue tirar alguma informação de lá? é muito difícil de analisar.
#
# Quarto, vamos tentar comparar **thriller e Action**, qual está presente em mais filmes? Difícil responder, quando estamos trabalhando com gráficos tipo esse fazemos comparações entre área, não somos bons nisso.
#
#
# Por fim, o importante de uma visualização é que ela seja **"transparente"** ao intuíto de nossa análise. Ou seja, estamos querendo analisar as informações de quantidade, comparando as labels de forma geral e evidênciando de maneira clara as diferenças entre elas (proporções).
#
# Portanto, o gráfico de pizza não torna as comparações claras, sendo assim uma má ideia.
#
# Vamos construir juntos uma solução mais adequada!
#
# + id="AnwRxXYFq1Uk" colab_type="code" outputId="a46eea5b-9741-4b29-da52-c0fb9610c175" colab={"base_uri": "https://localhost:8080/", "height": 577}
filmes["generos"].str.get_dummies('|').sum().sort_values(ascending=False).plot(
kind='bar',
title='Filmes por categoria',
figsize=(8,8))
plt.show()
# + [markdown] id="pvxXEAroq1w6" colab_type="text"
# Mudamos da pizza para a barra, alterando apenas o parâmetro kind do método.
#
# Veja como o gráfico de barra torna a análise mais simples, logo de cara a diferença entre **Drama e Comedy**, comparado aos demais gêneros fica evidênte. No gráfico de pizza era super difícil comparar **Thriller e Action**, agora a comparação ficou fácil e conseguimos perceber o quão perto estão uma da outra.
#
# A interpretação dos dados melhorou muito com essa visualização, mas podemos melhorar ainda mais. O que queremos é tornar evidênte os gêneros que tem a maior participação nos filmes em geral, ou seja transparecer através da imagem uma visão geral de proporcionalidade. Para tprnar evidênte essa informação vamos utilizar algo "semelhante" a um [mapa de calor](https://en.wikipedia.org/wiki/Heat_map).
# + id="sN8whM-7q2Ax" colab_type="code" outputId="0ad59a9e-6a3c-4d9e-8379-4f36df6a47dd" colab={"base_uri": "https://localhost:8080/", "height": 538}
import seaborn as sns
sns.set_style("whitegrid")
filmes_por_genero = filmes["generos"].str.get_dummies('|').sum().sort_values(ascending=False)
plt.figure(figsize=(16,8))
sns.barplot(x=filmes_por_genero.index,
y=filmes_por_genero.values,
palette=sns.color_palette("BuGn_r", n_colors=len(filmes_por_genero) + 4))
plt.show()
# + [markdown] id="ydALPTERq2M2" colab_type="text"
# Já, já explicamos o que foi feito em toda imagem, por agora repare como a imagem passa muito mais informação. Conseguimos comparar de forma fácil entre os gêneros e através do **mapa de calor** (gêneros com maior número tem um verde muito mais forte, gêneros com menor número é praticamente transparente) evidênciamos quais são as labels com maior participação, médias e insignificantes. Toda essa informação em uma única imagem!
#
# Bom, agora vamos entender como foi o código.
#
# Primeiro, não plotamos mais a imagem com o `.plot()` do pandas, vamos precisar de uma biblioteca de visualização mais poderosa para configurar nossa imagem, utilizamos o [seaborn](https://seaborn.pydata.org/).
#
# Segundo, chamamos o barplot do **seaborn**, adicionando uma **paleta de cores** com efeito de mapa de calor (parâmetro pallette), no parâmetro `n_color` de `sns.color_palette()` adicionamos **+4** para que a última barra não seja totalmente transparente.
#
# Terceiro, também adicionamos o **sns.set_style("whitegrid")** para que todos os gráficos tenham a **linha de grade do eixo X** evidênte, facilitando a comparação entre as barras.
# + id="fCBxUBwkq2dS" colab_type="code" outputId="acb736a0-32bc-4b0f-b175-f68b578aa472" colab={"base_uri": "https://localhost:8080/", "height": 483}
import seaborn as sns
filmes_por_genero = filmes["generos"].str.get_dummies('|').sum().sort_values(ascending=False)
plt.figure(figsize=(8,8))
sns.barplot(x=filmes_por_genero.index,
y=filmes_por_genero.values,
palette=sns.color_palette("BuGn_r", n_colors=len(filmes_por_genero) + 4))
plt.show()
# + [markdown] id="loU0EYIn0HW7" colab_type="text"
# Por fim, mudamos o tamanho da imagem com o **figsize** do método`plt.figure()`. Assim, temos um gráfico com muitas informações e agradável de analisar.
#
# + id="sHLQQRD50F14" colab_type="code" outputId="671ec88a-2276-4aed-9305-a5bd753a7687" colab={"base_uri": "https://localhost:8080/", "height": 483}
import seaborn as sns
sns.set_style("whitegrid")
filmes_por_genero = filmes["generos"].str.get_dummies('|').sum().sort_values(ascending=False)
plt.figure(figsize=(16,8))
sns.barplot(x=filmes_por_genero.index,
y=filmes_por_genero.values,
palette=sns.color_palette("BuGn_r", n_colors=len(filmes_por_genero) + 4))
plt.show()
# + [markdown] id="hWbZmSux0_gx" colab_type="text"
# Conseguimos analisar e tirar diversas conclusões trabalhando com a visualização dos gêneros. Será que conseguimos utilizar visualizações para entender melhor as notas de um filme?
#
# Vamos relembrar alguns pontos que já discutimos e nos aprofundar nas análises de notas para tirar conclusões mais sofisticadas.
#
# Na **aula 01** calculamos as notas médias por filmes, vamos dar uma olhada no resultado.
# + id="WCpfME9dDUnC" colab_type="code" outputId="54e419d3-a458-48d5-cbd8-bf58b45a2ee6" colab={"base_uri": "https://localhost:8080/", "height": 206}
filmes_com_media.head()
# + [markdown] id="9nb5mvorDe6d" colab_type="text"
# Como vimos, olhar apenas as médias pode ser um problema e para interpretar um pouco melhor os dados usamos o histograma das ntoas para comparar alguns filmes. Por exemplo, **Toy Story e Jumanji**
# + id="ChEwRS45EEfM" colab_type="code" outputId="38dc5d75-547e-43df-fe19-d796f57203f2" colab={"base_uri": "https://localhost:8080/", "height": 300}
notas_do_filme_1 = avaliacoes.query("filmeId==1")["nota"]
print(notas_do_filme_1.mean())
notas_do_filme_1.plot(kind='hist')
# + id="9cRMdMUYEXmd" colab_type="code" outputId="0f753e48-cd3c-4a3b-c9c6-881760e7f853" colab={"base_uri": "https://localhost:8080/", "height": 300}
notas_do_filme_1 = avaliacoes.query("filmeId==2")["nota"]
print(notas_do_filme_1.mean())
notas_do_filme_1.plot(kind='hist')
# + [markdown] id="wH4BW07r0GZF" colab_type="text"
# ToyStory e Jumanji tem médias relativamente próximas mas com comportamento de notas diferentes, então, para nosso exemplo, as médias ajudam mas esconde informações importântes sobre os dados.
#
# Lembra o exemplo que o <NAME> deu em aula comparando os sálarios de uma cidade? Olhando apenas para as médias dos salários não conseguimos evidênciar a desigualdade que havia entre as cidades.
#
# ```python
# #Cidada A
# populacao = 1000
# salario = 1100
#
# media = 1100
#
# #Cidade B
# populacao = 1000
# salario1 = 1000000
# salario999 = 100
#
# media = (salario1 * 1 + salario999 * 999) / 1000
# media = 1099.00
# ```
#
# P.S: Se tiver dúvidas reveja essa parte da aula e tente enteder o problema da média.
#
#
# Outras métrica que pode nos ajudar a interpretar melhor os dados são os quatis, principalmente a [mediana](https://pt.wikipedia.org/wiki/Mediana_(estat%C3%ADstica))
#
# Vamos buscar dois filmes com médias muito mais próximas que Toy Story e Jumanji, para analisar outras métricas além das médias.
# + id="fFPWMZTcH5Em" colab_type="code" outputId="8f7a0421-2e0b-4a10-8b60-d0e0dd8f75e1" colab={"base_uri": "https://localhost:8080/", "height": 1000}
filmes_com_media.sort_values("nota", ascending=False)[2450:2500]
# + [markdown] id="OPzfr_Xmq2pG" colab_type="text"
# Bom, ordenando os filmes pela nota médias e [fatiando](https://www.alura.com.br/artigos/listas-no-python) os dados entre 2450 e 2500, temos uma região onde as médias são semelhates e provavelmente não tem apenas um único voto. Vamos comparar o fime *Wizard of Oz, **filmeId=919** e *Little Miss Sunshine* **filmeId=46578**.
#
# Para não precisar copiar e colar toda hora o plot dos gráficos vamos criar nossa primeira função, assim passamos apenas o FilmeId e temos as informações desejadas.
# + id="izXeKOEMI6ER" colab_type="code" colab={}
def plot_filme(n):
notas_do_filme = avaliacoes.query(f"filmeId=={n}")["nota"]
notas_do_filme.plot(kind='hist')
return notas_do_filme.describe()
# + [markdown] id="0m79xtW9MNAq" colab_type="text"
# Definimos nossa [função plot em python](https://www.caelum.com.br/apostila-python-orientacao-objetos/funcoes/#parmetros-de-funo) e repare que estamos usando **F-string** para fazer a interpolação dos dados, se tiver tiver dúvida veja essa [explicação no fórum da alura](https://cursos.alura.com.br/forum/topico-para-que-serve-o-print-f-no-python-77720).
#
# Agora precisamos chamar a função!
# + id="D0hq5bNXKyH7" colab_type="code" outputId="73bd088c-f174-4de9-e1f2-2e724839fb26" colab={"base_uri": "https://localhost:8080/", "height": 421}
#Mágico de Oz
plot_filme(919)
# + [markdown] id="O5DXB0OFKy3b" colab_type="text"
#
#
# A função plot, além de gerar o histograma também retorna algumas estatísticas. Vamos chamar a função agora para o filme *Little Miss Sunshine*.
# + id="zkC0s7BnL_h0" colab_type="code" outputId="5c39d5af-2237-4c15-9e94-61d89bae35bf" colab={"base_uri": "https://localhost:8080/", "height": 424}
plot_filme(46578)
# + [markdown] id="d8Yme2yUYME5" colab_type="text"
# Ótimo, agora com essas informações conseguimos comparar melhor ambos os filmes. Analisando os histogramas vemos que muitas pessoas realmente amam **Wizard of Oz** (notas 5), mas também temos pessoas que não gostam de formal alguma (notas 1). Quando comparamos com a histograma temos um do **Little mis sunshine**, percebemos que os resultados se concentra entre valores medianos(notas 2.5-4).
#
# O que confirma nossa análise aqui é comparar os **25% 50% e 75%**. 50% é o valor da mediana, e ambos filmes tem mesma mediana, mas 25% e 75% são diferentes. Se você lembra lá da estatísitca esses são os [**1° 2° e 3° quartis**](https://pt.wikipedia.org/wiki/Quartil).
#
# Olha, mesclar os gráficos com as estatísticas ajuda a interpretar melhor os dados. Mas o que precisamos é uma imagem que nos ajude a interpretar os dados ainda melhor, o gráfico que nos ajuda neste caso é o **Boxplot**. Vamos adaptar nossa função para conseguir plotar o boxplot e interpretá-lo.
# + id="GfgAVFoafdZA" colab_type="code" outputId="a6f7d0d1-7aef-465b-cf11-6ec8f9dd9bb2" colab={"base_uri": "https://localhost:8080/", "height": 704}
def plot_filme(n):
notas_do_filme = avaliacoes.query(f"filmeId=={n}")["nota"]
notas_do_filme.plot(kind='hist')
plt.show()
print('\n')
notas_do_filme.plot.box()
plt.show()
return notas_do_filme.describe()
plot_filme(919)
# + [markdown] id="tQtVcphSh47m" colab_type="text"
# E aí, viu como é simples criar criar um [boxplot com o pandas](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.plot.box.html)?
#
# Apenas chamamos o método ```.plot.box()```, agora o que precisamos fazer é interpretar este gráfico.
#
# Vamos focar primeiro na "caixa" a linha verde que divide a caixa em dois é a mediana (compare com as estatísticas geradas pelo discribe()), a parte superior da caixa é o 3° Quartil (75%) e a parte inferior é o 1° Quartil (25%).
#
# Agora repare nos limites inferior e superior, represetados pelas extremidades em preto. Por coincidência, nesta imagem os limites inferior e superior são equivalentes ao ponto de máximo e mínimo, mas nem sempre será assim, pois esse limite superir e inferior são calculados e dependem de Q1 e Q3. Algumas vezes os limites podem sobrepor os extremos das "caixas" e isso geralmente ocorre quando temos uma quantidade pequena de dados.
#
# Como tivemos sobreposição do limite superior vamos calcular o boxplot de outro filme, para analisar o resultado.
#
# + id="yi-HJ5sK17Kh" colab_type="code" outputId="ea317637-305e-4678-8dcf-1960d913c0b4" colab={"base_uri": "https://localhost:8080/", "height": 707}
plot_filme(46578)
# + [markdown] id="nmtgu9Q42UNv" colab_type="text"
# Olha que legal, diferente do primeiro boxplot, neste os limites superiores não se sobrepõe e temos uma informação a mais, no caso temos essa bolinha localizada em **y=1**. A "bolinha" chamamos de valor discrepante, por ir além dos limites inferior e superior (chamamos na aula de [outliers](https://pt.wikipedia.org/wiki/Outlier), existem várias formas de calcular os outliers, mas no nosso caso esses são os outliers do boxplot).
#
# Não vamos entrar em todos os detalhes do boxplot mas recomendo a explicação do [wikipedia](https://pt.wikipedia.org/wiki/Diagrama_de_caixa), ela é muito completa, cheias de exemplo e imagens para facilitar o entendimento.
#
# Agora comparando os boxplot dos dois filmes deixa muito mais evidente as diferenças entre elas, o que ficava complexo olhando só médias e outras informações separadas.
#
# Embora melhoramos muito nossa qualidade de análise ainda temos mais um ponto. Estamos comparando os boxplot dos filmes, mas eles estão em imagens separadas, vamos juntas vários boxplot em uma imagem só. Veja como podemos fazer isso usando o **seaborn**, para aprendermos outra forma de plotar boxplot!
#
# + id="Lo-rlsK5vsBk" colab_type="code" outputId="3b792896-0981-429c-d16d-2aa58fea4671" colab={"base_uri": "https://localhost:8080/", "height": 296}
sns.boxplot(data = avaliacoes.query("filmeId in [1,2,919,46578]"), x ="filmeId", y="nota")
# + [markdown] id="Mfx1Vzy-Mbtq" colab_type="text"
# Chamamos o `sns.boxplot()` passando três parâmetros. Parâmetro dados é um dataframe das notas dos filmes com Toy Story, Jumanji, Wizard of Oz e Little miss sunshine (usamos o `.query()` para selecionar os dados), **x** é o Id do filme e **y** as respectivas notas. Agora conseguimos comparar as notas dos filmes de forma muito mais claram tente realiza a análise aí na sua casa!
#
#
# Com isso nós fechamos nossa segunda aula do **#quarentenadados**, viu quanta coisa aprendemos? Que tal colocar isso em prática?
#
#
# **Crie seu próprio notebook, reproduza nossa aula e resolva os desafios que deixamos para vocês**.
#
#
# Até a próxima aula!
#
#
#
# + [markdown] id="97Gxt8RlPFEj" colab_type="text"
# ##Desafio 1 do [<NAME>](https://twitter.com/guilhermecaelum)
#
# Rotacionar os thicks (os nomes dos generos) do gráfico de barras verdes (o último), de forma a deixar as legendas mais legíveis.
# + [markdown] id="5uIsDpkAz8oa" colab_type="text"
# ## Desafio 2 do [Paulo Silveira](https://twitter.com/paulo_caelum)
#
# Encontar vários filmes com médias próximas e distribuições diferentes, use a função **plot_filmes(n)** para plotar.
# + [markdown] id="NStHNYlcQrhs" colab_type="text"
# ## Desafio 3 do [Paulo Silveira](https://twitter.com/paulo_caelum)
#
# Criar o boxplot dos 10 filmes com mais votos (não é com maior média, é com mais votos!). Não apenas plot mas também analise e tente tirar conclusões.
# + [markdown] id="0ogSopISSYqK" colab_type="text"
# ## Desafio 4 do [Gu<NAME>](https://twitter.com/guilhermecaelum)
#
# Configurar a visualização do boxplot gerado pelo seaborn (último boxplot plotado na aula). Configurar o tamanho e colocar o nome dos filmes nos thicks.
# + [markdown] id="xpVPzWlhTjiw" colab_type="text"
# ## Desafio 5 do [<NAME>](https://twitter.com/allanspadini)
#
# Calcular moda, média e mediana dos filmes. Explore filmes com notas mais próximas de 0.5, 3 e 5.
# + [markdown] id="E1X5ReBxUNZq" colab_type="text"
# ## Desafio 6 da [<NAME>](https://twitter.com/thais_tandre)
#
# Plotar o boxplot e o histograma um do lado do outro (na mesma figura ou em figuras distintas, mas um do lado do outro).
#
# + [markdown] id="9sm-sJIPUfte" colab_type="text"
# ## Desafio 7 do [<NAME>](https://twitter.com/tgcsantos)
#
# Criar um gráfico de notas médias por ano (média geral considerando todos os filmes lançados naquele ano).
# + [markdown] id="hhhZIxlUs1nk" colab_type="text"
# # Aula 3
# + [markdown] id="fAEWqh3otA2x" colab_type="text"
#
# + id="WT2SZi9DtBDu" colab_type="code" colab={}
# + [markdown] id="5ql55hUatBYQ" colab_type="text"
#
# + id="VWUFrQnOtBh4" colab_type="code" colab={}
# + [markdown] id="AQTY8dPEtBsY" colab_type="text"
#
# + id="yOj4OOvZtB1x" colab_type="code" colab={}
# + [markdown] id="3tePSIE3tB-B" colab_type="text"
#
# + id="cNfbYSOytCFT" colab_type="code" colab={}
# + [markdown] id="mOPRwR2RtCOU" colab_type="text"
#
# + id="5Ay0zINatCXy" colab_type="code" colab={}
# + [markdown] id="54vyj1tbtCgp" colab_type="text"
#
# + id="IkXcdji3tCpb" colab_type="code" colab={}
# + [markdown] id="qPG3UroitCyB" colab_type="text"
#
# + id="Sqw7Feg4tC7X" colab_type="code" colab={}
# + [markdown] id="Ixd4uZ3JtDUv" colab_type="text"
#
# + id="Y1pRr5DBtEFs" colab_type="code" colab={}
# + [markdown] id="NBodQDxqVKgN" colab_type="text"
# #Não esqueça de compartilhar a solução dos seus desafios com nossos instrutores, seja no twitter ou linkedin. Boa sorte!
| aula-02/QuarentenaDados_aula02.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
data=pd.read_csv('./Dataset/german_credit_data_weka_dataset.csv')
data.head()
# customer_type indicate the credit worthy customer.
# 1. creditworthy
# 2. non creditworthy
data.shape
data.columns
data=data.drop(['telephone', 'personal','present_residence','other_installment_plans'],axis=1)
data.info()
data['savings'].unique()
from sklearn.preprocessing import LabelEncoder
savings_dict = {'A65':0, 'A61':1, 'A63':3, 'A64':4, 'A62':2}
data['savings'].replace(savings_dict,inplace=True)
data.head()
data=pd.get_dummies(data,columns=['checking_account_status','credit_history','purpose','present_employment','property','housing','other_debtors','job','foreign_worker'])
data.shape
data.head()
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
def navie_bays(x_train,y_train):
classifier=GaussianNB()
classifier.fit(x_train,y_train)
return classifier
def k_nearest_neighbours(x_train,y_train):
classifier=KNeighborsClassifier(n_neighbors=10)
classifier.fit(x_train,y_train)
return classifier
def svc(x_train,y_train):
classifier=SVC(kernel='rbf',gamma='scale')
classifier.fit(x_train,y_train)
return classifier
def decision_tree(x_train,y_train):
classifier=DecisionTreeClassifier(max_depth=6)
classifier.fit(x_train,y_train)
return classifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
X=data.drop('customer_type',axis=1)
Y=data['customer_type']
x_train,x_test,y_train,y_test=train_test_split(X,Y,test_size=0.2)
def build_and_train_classifier(x_train,y_train,classification_fn):
model=classification_fn(x_train,y_train)
y_pred=model.predict(x_test)
train_score=model.score(x_train,y_train)
test_score= accuracy_score(y_test,y_pred)
print("Training Score : ", train_score)
print("Testing Score : ", test_score)
build_and_train_classifier(x_train,y_train,k_nearest_neighbours)
build_and_train_classifier(x_train,y_train,navie_bays)
build_and_train_classifier(x_train,y_train,decision_tree)
x_train_1,x_train_2,y_train_1,y_train_2=train_test_split(x_train,y_train,test_size=0.5)
rfc=RandomForestClassifier(max_depth=4,n_estimators=2,warm_start=True)
rfc.fit(x_train_1,y_train_1)
y_pred=rfc.predict(x_test)
test_score=accuracy=accuracy_score(y_test,y_pred)
print('Testing Score : ',test_score)
rfc.n_estimators+=2
rfc.fit(x_train_2,y_train_2)
y_pred=rfc.predict(x_test)
test_score=accuracy=accuracy_score(y_test,y_pred)
print('Testing Score : ',test_score)
| Multiple_Type_of_Classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_tensorflow_p36
# language: python
# name: conda_tensorflow_p36
# ---
# # Using the SageMaker TensorFlow Serving Container
#
# The [SageMaker TensorFlow Serving Container](https://github.com/aws/sagemaker-tensorflow-serving-container) makes it easy to deploy trained TensorFlow models to a SageMaker Endpoint without the need for any custom model loading or inference code.
#
# In this example, we will show how deploy one or more pre-trained models from [TensorFlow Hub](https://www.tensorflow.org/hub/) to a SageMaker Endpoint using the [SageMaker Python SDK](https://github.com/aws/sagemaker-python-sdk), and then use the model(s) to perform inference requests.
# ## Setup
#
# First, we need to ensure we have an up-to-date version of the SageMaker Python SDK, and install a few
# additional python packages.
# !pip install -U --quiet "sagemaker>=1.14.2"
# !pip install -U --quiet opencv-python tensorflow-hub
# Next, we'll get the IAM execution role from our notebook environment, so that SageMaker can access resources in your AWS account later in the example.
# +
from sagemaker import get_execution_role
sagemaker_role = get_execution_role()
# -
# ## Download and prepare a model from TensorFlow Hub
#
# The TensorFlow Serving Container works with any model stored in TensorFlow's [SavedModel format](https://www.tensorflow.org/guide/saved_model). This could be the output of your own training job or a model trained elsewhere. For this example, we will use a pre-trained version of the MobileNet V2 image classification model from [TensorFlow Hub](https://tfhub.dev/).
#
# The TensorFlow Hub models are pre-trained, but do not include a serving ``signature_def``, so we'll need to load the model into a TensorFlow session, define the input and output layers, and export it as a SavedModel. There is a helper function in this notebook's `sample_utils.py` module that will do that for us.
# +
import sample_utils
model_name = 'mobilenet_v2_140_224'
export_path = 'mobilenet'
model_path = sample_utils.tfhub_to_savedmodel(model_name, export_path)
print('SavedModel exported to {}'.format(model_path))
# -
# After exporting the model, we can inspect it using TensorFlow's ``saved_model_cli`` command. In the command output, you should see
#
# ```
# MetaGraphDef with tag-set: 'serve' contains the following SignatureDefs:
#
# signature_def['serving_default']:
# ...
# ```
#
# The command output should also show details of the model inputs and outputs.
# !saved_model_cli show --all --dir {model_path}
# ## Optional: add a second model
#
# The TensorFlow Serving container can host multiple models, if they are packaged in the same model archive file. Let's prepare a second version of the MobileNet model so we can demonstrate this. The `mobilenet_v2_035_224` model is a shallower version of MobileNetV2 that trades accuracy for smaller model size and faster computation, but has the same inputs and outputs.
# +
second_model_name = 'mobilenet_v2_035_224'
second_model_path = sample_utils.tfhub_to_savedmodel(second_model_name, export_path)
print('SavedModel exported to {}'.format(second_model_path))
# -
# Next we need to create a model archive file containing the exported model.
# ## Create a model archive file
#
# SageMaker models need to be packaged in `.tar.gz` files. When your endpoint is provisioned, the files in the archive will be extracted and put in `/opt/ml/model/` on the endpoint.
# !tar -C "$PWD" -czf mobilenet.tar.gz mobilenet/
# ## Upload the model archive file to S3
#
# We now have a suitable model archive ready in our notebook. We need to upload it to S3 before we can create a SageMaker Model that. We'll use the SageMaker Python SDK to handle the upload.
# +
from sagemaker.session import Session
model_data = Session().upload_data(path='mobilenet.tar.gz', key_prefix='model')
print('model uploaded to: {}'.format(model_data))
# -
# ## Create a SageMaker Model and Endpoint
#
# Now that the model archive is in S3, we can create a Model and deploy it to an
# Endpoint with a few lines of python code:
# +
from sagemaker.tensorflow.serving import Model
# Use an env argument to set the name of the default model.
# This is optional, but recommended when you deploy multiple models
# so that requests that don't include a model name are sent to a
# predictable model.
env = {'SAGEMAKER_TFS_DEFAULT_MODEL_NAME': 'mobilenet_v2_140_224'}
model = Model(model_data=model_data, role=sagemaker_role, framework_version=1.11, env=env)
predictor = model.deploy(initial_instance_count=1, instance_type='ml.c5.xlarge')
# -
# ## Make predictions using the endpoint
#
# The endpoint is now up and running, and ready to handle inference requests. The `deploy` call above returned a `predictor` object. The `predict` method of this object handles sending requests to the endpoint. It also automatically handles JSON serialization of our input arguments, and JSON deserialization of the prediction results.
#
# We'll use these sample images:
#
# <img src="kitten.jpg" align="left" style="padding: 8px;">
# <img src="bee.jpg" style="padding: 8px;">
# +
# read the image files into a tensor (numpy array)
kitten_image = sample_utils.image_file_to_tensor('kitten.jpg')
# get a prediction from the endpoint
# the image input is automatically converted to a JSON request.
# the JSON response from the endpoint is returned as a python dict
result = predictor.predict(kitten_image)
# show the raw result
print(result)
# -
# ### Add class labels and show formatted results
#
# The `sample_utils` module includes functions that can add Imagenet class labels to our results and print formatted output. Let's use them to get a better sense of how well our model worked on the input image.
# +
# add class labels to the predicted result
sample_utils.add_imagenet_labels(result)
# show the probabilities and labels for the top predictions
sample_utils.print_probabilities_and_labels(result)
# -
# ## Optional: make predictions using the second model
#
# If you added the second model (`mobilenet_v2_035_224`) in the previous optional step, then you can also send prediction requests to that model. To do that, we'll need to create a new `predictor` object.
#
# Note: if you are using local mode (by changing the instance type to `local` or `local_gpu`), you'll need to create the new predictor this way instead:
#
# ```
# predictor2 = Predictor(predictor.endpoint, model_name='mobilenet_v2_035_224',
# sagemaker_session=predictor.sagemaker_session)
# ```
# +
from sagemaker.tensorflow.serving import Predictor
# use values from the default predictor to set up the new one
predictor2 = Predictor(predictor.endpoint, model_name='mobilenet_v2_035_224')
# make a new prediction
bee_image = sample_utils.image_file_to_tensor('bee.jpg')
result = predictor2.predict(bee_image)
# show the formatted result
sample_utils.add_imagenet_labels(result)
sample_utils.print_probabilities_and_labels(result)
# -
# ## Additional Information
#
# The TensorFlow Serving Container supports additional features not covered in this notebook, including support for:
#
# - TensorFlow Serving REST API requests, including classify and regress requests
# - CSV input
# - Other JSON formats
#
# For information on how to use these features, refer to the documentation in the
# [SageMaker Python SDK](https://github.com/aws/sagemaker-python-sdk/blob/master/src/sagemaker/tensorflow/deploying_tensorflow_serving.rst).
#
# ## Cleaning up
#
# To avoid incurring charges to your AWS account for the resources used in this tutorial, you need to delete the SageMaker Endpoint.
predictor.delete_endpoint()
| sagemaker-python-sdk/tensorflow_serving_container/tensorflow_serving_container.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# +
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from tqdm import tqdm_notebook
# -
for corpus in ('sensem', 'semeval'):
input_file = '../../resources/hashed/%s/train_dataset.npz' % corpus
output_file = '../../resources/active_learning/%s_indices.npz' % corpus
dataset = np.load(input_file)
target = dataset['target']
lemmas = dataset['lemmas']
initial_indices = []
unlabeled_indices = []
for lemma in np.unique(lemmas):
indices = np.where(lemmas == lemma)[0]
initial_size = np.int(indices.shape[0]/2)
lemma_initial_indices = indices[:initial_size]
lemma_unlabeled_indices = indices[initial_size:]
while np.unique(target[indices]).shape[0] >= 2 and np.unique(target[lemma_initial_indices]).shape[0] < 2:
np.random.shuffle(indices)
lemma_initial_indices = indices[:initial_size]
lemma_unlabeled_indices = indices[initial_size:]
initial_indices.extend(lemma_initial_indices)
unlabeled_indices.extend(lemma_unlabeled_indices)
initial_indices = np.array(initial_indices, dtype=np.int32)
unlabeled_indices = np.array(unlabeled_indices, dtype=np.int32)
np.savez_compressed(output_file, initial_indices=initial_indices, unlabeled_indices=unlabeled_indices)
for corpus in ('sensem', 'semeval'):
input_file = '../../resources/hashed/%s/train_dataset.npz' % corpus
output_file = '../../resources/active_learning/%s_indices.npz' % corpus
dataset = np.load(input_file)
target = dataset['target']
initial_size = 0.5
classes, y_counts = np.unique(target, return_counts=True)
n_cls = classes.shape[0]
n_initial = target.shape[0] * initial_size
n_unlabel = target.shape[0] - n_initial
assert n_initial >= n_cls and n_unlabel >= n_cls
initial_count = np.maximum(np.round(y_counts * initial_size), np.ones(n_cls)).astype(np.int32)
unlabeled_count = (y_counts - initial_count).astype(np.int32)
initial_indices = []
unlabeled_indices = []
for idx, cls in enumerate(classes):
labels_for_class = np.where(target == cls)[0]
initial_indices.extend(labels_for_class[:initial_count[idx]])
unlabeled_indices.extend(labels_for_class[initial_count[idx]:initial_count[idx]+unlabeled_count[idx]])
initial_indices = np.array(initial_indices, dtype=np.int32)
unlabeled_indices = np.array(unlabeled_indices, dtype=np.int32)
np.savez_compressed(output_file, initial_indices=initial_indices, unlabeled_indices=unlabeled_indices)
| notebooks/active_learning_corpus_split.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import datetime
import os
import glob, os
import time
# O que vou fazer amanhã:
# - Abrir todos os bancos do granular activity/opens com o código que gera uma coluna com o nome do arquivo.
# - Ordenar por data e dropar duplicados, assim estimarei a data de envio do email (com a proxy da data da primeira abertura)
# - Terei um banco com todas as campanhas com o nome delas (nome do arquivo) no mesmo formato que no aggregated activity
# - Depois tenho que juntar todos os bancos do aggregated activity/opened e aggregated activity/not_opened
# - Criar uma coluna em cada um desses que especifíque se é de aberto ou fechado
# - Em seguida, concatenar opened com not opened
# - Mergir com o banco de campanhas
# - Ordenar por email e data de envio da campanha(descendente)!
# - Daí crio uma contagem que reseta com emails, onde o último email recebido pela pessoa é 1, o segundo 2, assim por diante...
# - Depois é apagar com filtros compostos: Se o email 1 (mais recente) é não aberto e o 2, 3,4 e 5
files = glob.glob('C:/Users/ander/Documents/Nossas_mailchimp/granular_activity/opens/*.csv')
#df = pd.concat([pd.read_csv(fp, encoding='latin-1',nrows=1 ).assign(New=os.path.basename(fp) ) for fp in files], sort=False)
# Não quero me confiar no nrows, que o pressuposto é que a primeira linha de cada arquivo é o timestamp da primeira abertura
df = pd.concat([pd.read_csv(fp, encoding='latin-1').assign(New=os.path.basename(fp) ) for fp in files], sort=False)
df = df.sort_values(['New', 'Timestamp'])
df = df[['New', 'Timestamp']].drop_duplicates('New')
# Existem 6190 arquivos, mas só 4980 estão sendo lidos. Curiosamente, alguns deles são os que não tem. 2 horas pra descobrir que tudo isso era pq são arquivos vazios. A maior parte são testes ab de 100%, então óbvio que ninguém recebe o combo vencedor. Um caso que se eu fosse irresponsável, não ia dar em nada. Pq a perda não existe de vdd, é só a redução natural de categorias, já que algums não tem observações. O código abaixo (a versão limpa, claro), foi pra idenficcar isso.
# +
#paths = pd.DataFrame({'caminhos':files})
#paths['caminhos'] = paths['caminhos'].str[len('C:/Users/ander/Documents/Nossas_mailchimp/granular_activity/opens\\'):] #slicing
#erros = pd.merge(df, paths, left_on='New', right_on='caminhos',how='outer', indicator=True)
#erros[erros['_merge'] != 'both'].caminhos[5000]
#df[df['New'].str.contains('340630_-pdm-solidariedade-nas-ruas-e-ilera-con')]
# -
# pd.read_csv('C:/Users/ander/Documents/Nossas_mailchimp/aggregate_activity/opened/308509_-rioacess-vel.csv',
# encoding='latin-1', usecols = [0,1,2,34, 36]).columns
files_opened = glob.glob('C:/Users/ander/Documents/Nossas_mailchimp/aggregate_activity/opened/*.csv')
opened = pd.concat([pd.read_csv(fp, encoding='latin-1',usecols = [0,1,2]
).assign(New=os.path.basename(fp) ) for fp in files_opened], sort=False)
opended.shape
files_notopened = glob.glob('C:/Users/ander/Documents/Nossas_mailchimp/aggregate_activity/not_opened/*.csv')
not_opened = pd.concat([pd.read_csv(fp, encoding='latin-1',usecols = [0,1,2]
).assign(New=os.path.basename(fp) ) for fp in files_notopened], sort=False)
not_opened.shape
not_opened.to_csv('all_not_opened.csv', index=False)
opended.to_csv('all_opened.csv', index=False)
df.to_csv('all_emails.csv', index=False)
s_nopened = not_opened.sample(frac=0.01)
s_opened = opended.sample(frac=0.02)
s_opened.shape
# ### Dia 2 - Reiniciei o kernel e vou fazer as operações agora sem a memória pesada
# open_1 = s_opened[['Email', 'Nome', 'Sobrenome', 'New']]
# open_2 = s_opened[['Email Address', 'First Name', 'Last Name', 'New']]
# open_3 = s_opened[['E-mail', 'First Name do eleitor', 'New']]
# open_3['Sobrenome'] = ''
# open_2.columns = ['Email', 'Nome', 'Sobrenome', 'New']
# open_3.columns = ['Email', 'Nome', 'New', 'Sobrenome']
# open_3 = open_3[['Email', 'Nome', 'Sobrenome', 'New']]
# opens = pd.concat([open_1, open_2, open_3])
#
# opens = opens.dropna(subset=['Email'])
# opens = opens.merge(df,on='New')
# opens['Atividade'] = 'abertura'
# n_open_1 = s_nopened[['Email', 'Nome', 'Sobrenome', 'New']]
# n_open_2 = s_nopened[['Email Address', 'First Name', 'Last Name', 'New']]
# n_open_3 = s_nopened[['E-mail', 'First Name do eleitor', 'New']]
# n_open_3['Sobrenome'] = ''
# n_open_2.columns = ['Email', 'Nome', 'Sobrenome', 'New']
# n_open_3.columns = ['Email', 'Nome', 'New', 'Sobrenome']
# n_open_3 = open_3[['Email', 'Nome', 'Sobrenome', 'New']]
# n_opens = pd.concat([n_open_1, n_open_2, n_open_3])
#
# n_opens = n_opens.dropna(subset=['Email'])
# n_opens = n_opens.merge(df,on='New')
# n_opens['Atividade'] = 'não abertura'
start_time = time.time()
#not_opened = pd.read_csv('all_not_opened.csv')
#opened = pd.read_csv('all_opened.csv')
emails = pd.read_csv('all_emails.csv')
print("--- %s seconds ---" % (time.time() - start_time))
# opens
# +
open_1 = opened[['Email', 'Nome', 'Sobrenome', 'New']]
open_2 = opened[['Email Address', 'First Name', 'Last Name', 'New']]
open_3 = opened[['E-mail', 'First Name do eleitor', 'New']]
open_3['Sobrenome'] = ''
open_2.columns = ['Email', 'Nome', 'Sobrenome', 'New']
open_3.columns = ['Email', 'Nome', 'New', 'Sobrenome']
open_3 = open_3[['Email', 'Nome', 'Sobrenome', 'New']]
opens = pd.concat([open_1, open_2, open_3])
opens = opens.dropna(subset=['Email'])
opens = opens.merge(emails,on='New')
opens['Atividade'] = 'abertura'
# -
recent_opens = opens.head(10000000)[opens.head(10000000)['Timestamp'] >'2019-01-01 12:12:48']
## por algum motivo, quando eu uso head e não o banco inteiro, vai mais rápido.
recent_opens.to_csv('recent_opens.csv', index=False)
# Já foi rodado
n_opened = not_opened.merge(emails, on='New')
# +
start_time = time.time()
n_opened.to_csv('n_opened.csv', index=False)
print("--- %s seconds ---" % (time.time() - start_time))
# -
# Recomeçar daqui: filtrar acima de 2019 e depois salvar (sábado). Na segunda volto e reorganizo, concateno com opens, ordeno e faço os cortes
# Estratégia nova: impossível utilizar todas as linhas de não aberturas, então criei uma pasta só pros arquivos a partir de 2019.
# Basicamnte recomecei tudo a partir daqui
# +
start_time = time.time()
files_notopened = glob.glob('C:/Users/ander/Documents/Nossas_mailchimp/aggregate_activity/not_opened_recentes/*.csv')
n_opens = pd.concat([pd.read_csv(fp, encoding='latin-1',usecols = [0,1,2]
).assign(New=os.path.basename(fp) ) for fp in files_notopened], sort=False)
print("--- %s seconds ---" % (time.time() - start_time))
# +
start_time = time.time()
n_open_1 = n_opens[['Email', 'Nome', 'Sobrenome', 'New']].dropna(subset=['Email'])
n_open_2 = n_opens[['Email Address', 'First Name', 'Last Name', 'New']].dropna(subset=['Email Address'])
n_open_2.columns = ['Email', 'Nome', 'Sobrenome', 'New']
n_opens = pd.concat([n_open_1, n_open_2])
#n_opens = n_opens.dropna(subset=['Email'])
n_opens = n_opens.merge(emails,on='New')
n_opens['Atividade'] = 'não abertura'
print("--- %s seconds ---" % (time.time() - start_time))
# -
# Tática do opens abaixo. Mas pro n_opens, vai ser primeiro merge (já feito), depois corte de datas e só aí reorganizo e concateno. Depois, vou concatenar com o opens, reordenar e fazer os cortes
n_opens.to_csv('recent_n_opens.csv', index=False)
opens = pd.read_csv('recent_opens.csv')
opens.shape[0] + n_opens.shape[0]
type(all_activities['Timestamp'])
all_activities = pd.concat([opens, n_opens])
all_activities = all_activities.sort_values(['Email','Timestamp'])
all_activities.to_csv('all_recent_activities.csv', index=False)
all_activities.to_json('all_recent_activities.json', index=False)
# tirar média e desvio padrão do número de emails de cada um e do % de abertura de cada pessoa. Agora é só lazer.
# porra, agora ainda vai ter que juntar com as inscrições de cada pessoa. Se é meu rio, ms, mapa..
# ## Recomeço aqui
df = pd.read_csv('all_recent_activities.csv')
files_notopened = glob.glob('C:/Users/ander/Documents/Nossas_mailchimp/aggregate_activity/not_opened_recentes/*.csv')
n_opens = pd.concat([pd.read_csv(fp, encoding='latin-1'
).assign(New=os.path.basename(fp) ) for fp in files_notopened], sort=False)
n_opens = n_opens[['Email', 'Nome', 'Sobrenome', 'Inscrições', 'Interesses', 'Member Rating', 'New', 'Email Address',
'First Name', 'Last Name']].dropna(subset=['Inscrições'])
files_notopened = glob.glob('C:/Users/ander/Documents/Nossas_mailchimp/aggregate_activity/opened_recentes/*.csv')
opens = pd.concat([pd.read_csv(fp, encoding='latin-1'
).assign(New=os.path.basename(fp) ) for fp in files_notopened], sort=False)
opens = opens[['Email', 'Nome', 'Sobrenome', 'Inscrições', 'Interesses', 'Member Rating', 'New', 'Email Address',
'First Name', 'Last Name']].dropna(subset=['Inscrições'])
opens.Email.nunique()
n_opens = n_opens[['Email', 'Inscrições', 'Member Rating', 'New']].drop_duplicates('Email', keep='last')
opens = opens[['Email', 'Inscrições', 'Member Rating', 'New']].drop_duplicates('Email', keep='last')
inscricoes = pd.concat([n_opens, opens])
inscricoes.shape
inscricoes = inscricoes.merge(emails, on='New')
inscricoes = inscricoes.sort_values(['Email', 'Timestamp'])
inscricoes = inscricoes.drop_duplicates("Email")
inscricoes.columns = ['Email', 'Inscrições', 'Menber Rating', 'New', 'Timestamp']
df = df.merge(inscricoes[['Email', 'Inscrições', 'Menber Rating']], on='Email', how='outer', indicator=True)
df.to_csv('all_recent_activities_inscricoes.csv', index=False)
df = pd.read_csv('all_recent_activities_inscricoes.csv')
df = df.drop('_merge', axis=1)
# Criar banco no nível do usuário
user_nopen = df[df['Atividade'] == 'não abertura'].groupby('Email', as_index=False).agg({'Atividade': "count"})
user_open = df[df['Atividade'] == 'abertura'].groupby('Email', as_index=False).agg({'Atividade': "count"})
user_geral = df.groupby('Email', as_index=False).agg({"Timestamp":"first","New" : "first" ,"Inscrições":"last","Menber Rating":"last"})
user_nopen.columns = ['Email','n_open']
user_open.columns = ['Email', 'open']
user_geral.columns = ['Email', 'First Email', 'New', 'Inscrições', 'Member Rating']
user = pd.merge(user_nopen, user_open, on='Email', how='outer', indicator=True)
user = user.merge(user_geral, on='Email')
#Taxa de abertura Geral
user.open.sum() /(user.open.sum() + user.n_open.sum())
user['Inscrições'] = user['Inscrições'].fillna('0')
user['corte'] = np.where((user['Inscrições'].str.contains('Meu Recife') |user['Inscrições'].str.contains('Minha Jampa')
| user['Inscrições'].str.contains('Minha Campinas') | user['Inscrições'].str.contains('Minha Porto Alegre'))
, "imune", "elegível")
imunes = user[user['corte'] == 'imune']
user = user[user['corte'] == 'elegível']
# +
#user[user['Email'] =='<EMAIL>']
# -
# Primeiras exclusões
nunca_abriu = user[user['_merge'] =='left_only']
apagar_1 = nunca_abriu[nunca_abriu['n_open'] >= 3] # quem nunca abriu mesmo já recebendo mais de 3 emails
# nunca abriu, recebeu menos que 2, mas é antigo
apagar_2 = nunca_abriu[(nunca_abriu['n_open'] < 3) & (nunca_abriu['First Email'] < '2019-07-01 00:00:01')]
# pessoas que não abriram nenhum email, mas receberam 1 ou 2 e entraram há menos de 1 ano na base
alerta = nunca_abriu[(nunca_abriu['n_open'] < 3) & (nunca_abriu['First Email'] > '2019-07-01 00:00:01')]
apagar_1.to_csv('nunca_abriu_1.csv')
apagar_2.to_csv('nunca_abriu_2.csv')
alerta.to_csv('nunca_abriu_alerta.csv')
import pandas as pd
df = pd.read_csv('nunca_abriu_1.csv')
df['Member Rating'].value_counts(dropna=False)
apagar_1.shape
apagar_2.shape
alerta.shape
nunca_abriu.n_open.sum()
df = df.merge(nunca_abriu[['Email']], on='Email', how='outer', indicator=True)
df = df[df['_merge'] != "both"]
df['corte'] = np.where((df['Inscrições'].str.contains('Meu Recife') |df['Inscrições'].str.contains('Minha Jampa')
| df['Inscrições'].str.contains('Minha Campinas') | df['Inscrições'].str.contains('Minha Porto Alegre'))
, "imune", "elegível")
df = df[df['corte'] == 'elegível']
# CARALHO EU SEMPRE QUIS ESSE CÓDIGO (HACK PRA DROPAR DUPLICADOS MANTENDO N LINHAS)
df =df.sort_values(['Email', 'Timestamp'])
df['Inscrições'] = df['Inscrições'].fillna('0')
df_3 = df.groupby('Email').tail(3) #last 3 rows
df_5 = df.groupby('Email').tail(5) #last 5 rows
df_10 = df.groupby('Email').tail(10) #last 10 rows
# +
df_3['abertura'] = np.where((df_3['Atividade'] =='abertura') ,1, 0)
df_3['não abertura'] = np.where((df_3['Atividade'] == 'não abertura'), 1, 0)
df_5['abertura'] = np.where((df_5['Atividade'] =='abertura') ,1, 0)
df_5['não abertura'] = np.where((df_5['Atividade'] == 'não abertura'), 1, 0)
df_10['abertura'] = np.where((df_10['Atividade'] =='abertura') ,1, 0)
df_10['não abertura'] = np.where((df_10['Atividade'] == 'não abertura'), 1, 0)
# -
df_3 = df_3.groupby(['Email', 'Inscrições'],as_index=False).agg({'Atividade': "count", 'abertura': 'sum', 'não abertura':'sum'})
df_5 = df_5.groupby(['Email', 'Inscrições'],as_index=False).agg({'Atividade': "count", 'abertura': 'sum', 'não abertura':'sum'})
df_10=df_10.groupby(['Email', 'Inscrições'],as_index=False).agg({'Atividade': "count", 'abertura': 'sum', 'não abertura':'sum'})
apagar_3 = df_3[(df_3['Atividade'] == 3) & (df_3['não abertura'] == 3)]
apagar_5 = df_5[(df_5['Atividade'] == 5) & (df_5['não abertura'] == 5)]
apagar_10 = df_10[(df_10['Atividade'] == 10) & (df_10['não abertura'] == 10)]
apagar_3.to_csv('apagar_3.csv', index=False)
apagar_5.to_csv('apagar_5.csv', index=False)
apagar_10.to_csv('apagar_10.csv', index=False)
| mailchimp_1.1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="Hlg1y9CXgyNL" colab_type="text"
# ## Module 2.4: Working with Auto-Encoders in Keras (A Review)
#
# We implementing a denoising auto-encoder in the Keras functional API. In this module we will pay attention to:
#
# 1. Using the Keras functional API for defining models.
# 2. Understanding denoising auto-encoders.
#
# A denoising auto-encoder is at base just a normal MLP with an unusual use. So this will be a relatively simple tutorial.
#
# Those students who are comfortable with all these matters might consider skipping ahead.
#
# Note that we will not spend time tuning hyper-parameters: The purpose is to show how different techniques can be implemented in Keras, not to solve particular data science problems as optimally as possible. Obviously, most techniques include hyper-parameters that need to be tuned for optimal performance.
# + [markdown] id="i5Dcz2UkDL7P" colab_type="text"
# First we import required libraries.
# + id="Ix_kQNNoPm3M" colab_type="code" colab={}
import numpy as np
from keras.datasets import mnist
from keras.backend import int_shape
from keras.models import Sequential
from keras import Model
from keras.layers import Dense,Dropout,Flatten,Activation,Input,Reshape, Conv2DTranspose
from keras.optimizers import Adam
from keras.layers.convolutional import Conv2D,MaxPooling2D
import matplotlib.pyplot as plt
# + [markdown] id="WnXhgPsJCTZR" colab_type="text"
# We load the MNIST data. These are 28 by 28 greyscale images of handwritten digits (0-9).
#
# We are going to want few versions of the images:
# - Normalized
# - Normalized + Noise
#
# Of course we also want the image labels, and we will also make use of a vector of class names. Below we give a function that will get these things, the data components divided into training and test sets. We will then call the function.
# + id="HX0PngdmrAYY" colab_type="code" colab={}
def get_data():
(x_train_raw, y_train), (x_test_raw, y_test) = mnist.load_data()
# Pre-process them into real numbers between 0 and 1 and flatten
x_train_raw = x_train_raw.astype('float32') / 255.
x_test_raw = x_test_raw.astype('float32') / 255.
# Reshape to channel last (single channel, since greyscale)
x_train = np.reshape(x_train_raw, (-1, x_train_raw.shape[1], x_train_raw.shape[2], 1))
x_test = np.reshape(x_train_raw, (-1, x_test_raw.shape[1], x_test_raw.shape[2], 1))
# Make noisy images by adding Gaussian noise with mean=0.5 and std=0.5
noise = np.random.normal(loc=0.5, scale=0.5, size=x_train.shape)
x_train_noisy = x_train + noise
noise = np.random.normal(loc=0.5, scale=0.5, size=x_test.shape)
x_test_noisy = x_test + noise
# But clip values so they remain with 0 and 1
x_train_noisy = np.clip(x_train_noisy, 0., 1.)
x_test_noisy = np.clip(x_test_noisy, 0., 1.)
class_names=np.array(["0","1","2","3","4","5","6","7","8","9"])
return x_train,x_train_noisy,y_train,x_test,x_test_noisy,y_test,class_names
# + id="88ZRihRwwxb8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="64395284-0a87-4d2d-cc15-77f7b716c865" executionInfo={"status": "ok", "timestamp": 1570173792483, "user_tz": -120, "elapsed": 7973, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAaRQqpOuFHR3D_ZulW6qlXPomIq5vZ-wR4ZuIm=s64", "userId": "16725792548700883920"}}
x_train,x_train_noisy,y_train,x_test,x_test_noisy,y_test,class_names=get_data()
# + [markdown] id="XwcmQqFXDiiF" colab_type="text"
# Let's create a function that will let us look at the MNIST images, and then call it.
# + id="lUjj2WJAi_1L" colab_type="code" colab={}
def show_images(images,labels,class_names):
plt.figure(figsize=(10,10))
indices=np.random.randint(0,images.shape[0],25)
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(images[indices[i]].reshape((28,28)), cmap=plt.cm.binary)
plt.xlabel(class_names[labels[indices[i]]])
plt.show()
# + id="B65ILYnzrOxM" colab_type="code" outputId="40c881f1-9492-4e92-b00a-aedb82dbc9c9" executionInfo={"status": "ok", "timestamp": 1570173930727, "user_tz": -120, "elapsed": 1574, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAaRQqpOuFHR3D_ZulW6qlXPomIq5vZ-wR4ZuIm=s64", "userId": "16725792548700883920"}} colab={"base_uri": "https://localhost:8080/", "height": 592}
show_images(x_train,y_train,class_names)
# + [markdown] id="jMPNZkmAv7gR" colab_type="text"
# Now the interesting part: It is time to define our Denoising Autoencoder.
#
# It will consist of a number of convolution layers leading to a (flat) vector encoding of the image. We will then seek to reconstruct an image from this vector encoding by passing through a number of transposed (reversed) convolution layers. The idea is that we will encode the important information of a noisy image in the encoding, from which we will reconstruct a denoised version.
#
# We will see when we look at variational autoencoders later that it can be useful to be able to use the encoder and decoder components of some autoencoders as separate models. To that end, we will practice using the Keras functional API to construct several models from the same set of layers.
# + id="OWYU3uR4v7qJ" colab_type="code" colab={}
def get_models():
# First define the Encoder Model
encoder_inputs = Input(shape=(28,28,1), name='Encoder_Input')
conv1 = Conv2D(filters=32,
kernel_size=(3,3),
strides=2,
activation='relu',
padding='same')(encoder_inputs)
conv2 = Conv2D(filters=64,
kernel_size=(3,3),
strides=2,
activation='relu',
padding='same')(conv1)
flat1 = Flatten()(conv2)
encoding = Dense(16, name='Encoding')(flat1)
# Create Encoder Model
encoder = Model(encoder_inputs, encoding)
# Now define the Decoder Model
decoder_inputs = Input(shape=(16,), name='Decoder_Input')
# We reverse the convolutions.
# To do this we need to know the shape. We could hard code
# this here, but generally we wouldn't want to.
shape = int_shape(conv2)
dense2 = Dense(shape[1] * shape[2] * shape[3])(decoder_inputs)
reshaped = Reshape((shape[1], shape[2], shape[3]))(dense2)
# We use Transposed Conv2D layers to undo the convolutions
tconv1 = Conv2DTranspose(filters=64,
kernel_size=(3,3),
strides=2,
activation='relu',
padding='same')(reshaped)
tconv2 = Conv2DTranspose(filters=32,
kernel_size=(3,3),
strides=2,
activation='relu',
padding='same')(tconv1)
# Our final Transposed Conv2D has only one filter:
# We are recreating the image matrix (almost)
tconv3 = Conv2DTranspose(filters=1,
kernel_size=(3,3),
padding='same')(tconv2)
# We pass this final matrix through a sigmoid and are done
decoder_outputs = Activation('sigmoid', name='Decoder_Output')(tconv3)
# Instantiate Decoder Model
decoder = Model(decoder_inputs, decoder_outputs)
# Create the Autoencoder Model
# This takes as input the encoder input
# And as output the decoder processing of the encoder output
autoencoder = Model(encoder_inputs, decoder(encoder(encoder_inputs)))
return encoder,decoder,autoencoder
# + [markdown] id="0O9w9mcYEllO" colab_type="text"
# Let's get our models.
# + id="30uKupPN4MnF" colab_type="code" colab={}
encoder,decoder,autoencoder=get_models()
# + [markdown] id="j1fFvSn-EnlM" colab_type="text"
# We will be training and using the autoencoder model. So we need to compile it with a chosen optimizer.
# + id="dct6OuF_4JXT" colab_type="code" outputId="74b28a63-0e18-4aa6-fc12-2879f5e66770" executionInfo={"status": "ok", "timestamp": 1570174052530, "user_tz": -120, "elapsed": 1045, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAaRQqpOuFHR3D_ZulW6qlXPomIq5vZ-wR4ZuIm=s64", "userId": "16725792548700883920"}} colab={"base_uri": "https://localhost:8080/", "height": 71}
autoencoder.compile(loss='mse', optimizer='adam')
# + [markdown] id="BgK9-gxd7Pdk" colab_type="text"
# Now we can train our autoencoder. We will use the test data as validation data.
# + id="uaAnEOih5aS1" colab_type="code" outputId="cb2c6730-11db-4a58-e5ac-3f84ca6ff264" executionInfo={"status": "ok", "timestamp": 1570176789805, "user_tz": -120, "elapsed": 2732684, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAaRQqpOuFHR3D_ZulW6qlXPomIq5vZ-wR4ZuIm=s64", "userId": "16725792548700883920"}} colab={"base_uri": "https://localhost:8080/", "height": 1000}
# Train the autoencoder
# Note the target (Y) variables are just the original images
# and the input (X) variables are the noisy equivalents.
history=autoencoder.fit(x_train_noisy,
x_train,
validation_data=(x_test_noisy, x_test),
epochs=30,
batch_size=128)
# + [markdown] id="yK42nTxT7k16" colab_type="text"
# You can graph the training history here is you like. It is left as an exercise.
# + id="mTHX8n7Z7ree" colab_type="code" colab={}
# Graph the training history if desired
# + [markdown] id="uGGx4I4V7qD5" colab_type="text"
# Now we have a trained denoising autoencoder model, what can we do with it?
#
# We can denoise noisy data of this type - useful in the real world if data is sometimes noisy or corrupted. In our case we can denoise noisy handwritten digits. Let's have a look at an example. First we will create a (quick and rough) function to show the noisy image and compare it with the denoised version.
# + id="K7Dlnjy28Kf9" colab_type="code" colab={}
def denoiser_example(model,noisy_image,true_image):
denoised_image = model.predict(np.expand_dims(noisy_image, axis=0))
#plt.figure(figsize=(10,10))
plt.imshow(np.reshape(noisy_image,(28,28)), cmap='gray')
plt.xlabel("Noisy Image")
plt.show()
plt.imshow(np.reshape(denoised_image,(28,28)), cmap='gray')
plt.xlabel("Denoised Image")
plt.show()
plt.imshow(np.reshape(true_image,(28,28)), cmap='gray')
plt.xlabel("True Image")
plt.show()
# + [markdown] id="hkoGEi1YJJQo" colab_type="text"
# Now we can call that function with a random image from the test/validation data.
# + id="Tji7eZ-DJDHl" colab_type="code" outputId="1755b129-822e-4e7f-8161-26ef8eb69bb6" executionInfo={"status": "ok", "timestamp": 1570177883393, "user_tz": -120, "elapsed": 1403, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAaRQqpOuFHR3D_ZulW6qlXPomIq5vZ-wR4ZuIm=s64", "userId": "16725792548700883920"}} colab={"base_uri": "https://localhost:8080/", "height": 815}
denoiser_example(autoencoder,x_test_noisy[0],x_test[0])
| Module 2.4_ NAEs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/julianovale/pythonparatodos/blob/main/M%C3%B3dulo02Aula03.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="fjDz7Cm3FK0x" outputId="302d7f84-8698-424c-d425-168996f4693f"
x = list(range(1,11,1))
print(x)
# + colab={"base_uri": "https://localhost:8080/"} id="SSxiV4u7Fxa-" outputId="cf3aefd1-6fb0-4dff-c109-4f134401e95a"
y = list(range(12,1,-2))
print(y)
# + colab={"base_uri": "https://localhost:8080/"} id="4RLuX3yNGC9d" outputId="05f858ae-3db8-44f7-ec1f-e0952bb8354d"
print("a soma é: ", sum(x))
# + colab={"base_uri": "https://localhost:8080/"} id="Ew3c3FXsGLg8" outputId="2f7ec81b-dbe3-4a94-f1dd-6e760aeac4ed"
print("o náximo é: ", max(x))
# + colab={"base_uri": "https://localhost:8080/"} id="e4TYaNU3GQDV" outputId="74b87149-a3e7-4c12-88a9-aa994d49d7b6"
print("o mínimo é: ", min(x))
# + colab={"base_uri": "https://localhost:8080/"} id="IAgp_twxGb61" outputId="2620e138-6f5d-4f3a-d226-8bbd90a861c6"
count = x.count(8)
print("Count = ", count)
# + colab={"base_uri": "https://localhost:8080/"} id="6Q2QzszSITh1" outputId="5cda8142-0174-4214-eca4-a58fea65d4e1"
count2 = y.count(15)
print(count2)
# + colab={"base_uri": "https://localhost:8080/"} id="uLOLzTm3Ig7n" outputId="ed834f3b-3bc5-4111-8442-4fcc30ac1224"
x.sort(reverse = True)
print(x)
x.sort(reverse= False)
print(x)
| Módulo02Aula03.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Chapter 1 : What is pyngsi ?
# [pyngsi](https://github.com/pixel-ports/pyngsi) is a Python framework that allows to write a data-acquisition pipeline for [Fiware](https://www.fiware.org).
#
# Documentation for old 1.x versions can be found [here](https://pixel-ports.github.io/pyngsi-tutorial-1.2.html)
# ## Introduction
#
# Datasources are versatile exposing various data in various ways using different protocols.<p>
#
# A few examples showing how datasources can be exposed :
#
# - IoT sensors : WiFi IP-based or using lightweight protocols such as Bluetooth, Zigbee, Lora, 5G, ...
#
# - HTTP REST API :
# - exposed on the datasource side : the agent requests the API to retrieve data (client mode)
# - exposed on the agent side : data are posted from the datasource to the agent (server mode)
#
# - files :
# - plain text, json, xml, maybe compressed (gz, tgz, zip, ...)
# - carried over HTTP, FTP, ...
# The framework enables developers to build custom [NGSI](https://fiware.github.io/specifications/ngsiv2/stable/) Agents that :
# - process data using a common interface regardless of the type of the datasource
# - convert custom data to NGSI entities based on Fiware datamodels
# - write NGSI entities to the [Orion](https://fiware-orion.readthedocs.io/en/master/) Context Broker
# It's up to you to use the whole framework or pick up some parts.<br>
# For example it's possible to build NGSI entities with the framework and deal with Orion on your own.
# Using the pyngsi framework provides several benefits :
#
# - **developers can focus on the data logic**
#
# - clean code structure separates custom logic from boilerplate
#
# - streaming-oriented : *stream incoming data vs store the whole dataset in memory*
#
# - well unit-tested
#
# - all agents have the same structure : *client and server modes*
#
# - get processing statistics
#
# - get agent status : *when possible i.e. server agent, long-live agent*
#
# - benefit from the Python ecosystem especially the availability of many scientific libraries
# In this tutorial we are going to explore the main features of *pyngsi* :
# - __Chapter 1__ : How to use the DataModel class to build NGSI entities for your custom data
# - __Chapter 2__ : How to use the SinkOrion class to write to the Orion Context Broker
# - __Chapter 3__ : Write your first Agent
# - __Chapter 4__ : More on datasources
# - __Chapter 5__ : How to schedule the execution of an agent
# - __Chapter 6__ : How to debug, display, troubleshoot
# - __Chapter 7__ : Tips for real use-case agents
# - __Chapter 8__ : How to extend the framework
# - __Appendix__ : How to run a local Docker-based Orion instance
# ## Pre-requisite
# ### Python 3.8+
#
# As of December 2020 the latest Python stable release is v3.9.1.<br>
# Python 3.8+ should be available on most platforms.<br>
# Depending on your OS iy may be already installed.<br>
# If not you'll have to install it.
#
# You can check the version either from the terminal :
# ```bash
# $ python --version
# Python 3.8.6
# ```
#
# or in Python code :
#
# ```python
# import sys
#
# print(sys.version)
# 3.8.6 (default, Oct 6 2020, 03:22:36)
# [GCC 7.5.0]
# ```
# ## Check your pyngsi version
# + tags=[]
import pyngsi
print(pyngsi.__version__)
| chapter1_index.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # EXERCÍCIOS NO FIM DO NOTEBOOK!!
# + [markdown] colab_type="text" id="MxppnYXfVVLJ"
# # Carregando os dados
#
# Usaremos o data set do Iris para fazer nossas predições
# + colab={} colab_type="code" id="tHQ_1gDyVVLK"
iris = []
with open('iris.csv', 'r') as f:
for line in f:
if line.replace("\n", ""):
iris.append(line.replace('\n', '').split(','))
iris
# + [markdown] colab_type="text" id="2OlXgOqlVVLO"
# # Separando em features e labels
# + colab={} colab_type="code" id="EhqTBdEvVVLP"
data = []
label = []
for line in iris:
data.append([float(x) for x in line[:-1]])
label.append(line[-1])
label
# + [markdown] colab_type="text" id="kpqcLBoCVVLS"
# # Transformando em Numpy
# + colab={} colab_type="code" id="fQhA1EKCVVLS"
import numpy as np
# + colab={"base_uri": "https://localhost:8080/", "height": 2567} colab_type="code" id="DXgk-OjsVVLU" outputId="81d36b5d-3f2e-42c7-a35d-0de46305751e"
data
# + colab={} colab_type="code" id="jCNzEviOVVLY"
data = np.array(data)
# + colab={} colab_type="code" id="kRd5FvoEVVLa"
label = np.array(label)
# + colab={} colab_type="code" id="h4cesWMGVVLd"
query = np.array([5, 2.8, 1.8, 2])
# + [markdown] colab_type="text" id="shEOTspZVVLf"
# # Calculando a distância entre a query e todos os outros pontos
# + colab={} colab_type="code" id="q_39ivnRVVLg"
# Função de distância euclidiana
from math import sqrt
def eucli(a, b):
return sqrt(sum((a - b) ** 2))
# + colab={} colab_type="code" id="l0HQVBaIVVLj"
dists = np.array([eucli(point, query) for point in data])
# + colab={"base_uri": "https://localhost:8080/", "height": 527} colab_type="code" id="k09lPzWNVVLn" outputId="b9ada6b7-e196-4a07-801c-be51dbcebbb3"
dists
# + colab={"base_uri": "https://localhost:8080/", "height": 527} colab_type="code" id="DM25-lDXVVLq" outputId="309a4f2b-18a1-4938-ef33-22a28d679c31"
np.sum(((data - query) ** 2), axis = 1) ** (1/2)
# + [markdown] colab_type="text" id="2Yhc0im2VVLs"
# # Pontos mais próximos
# + colab={} colab_type="code" id="4jGlo-9bVVLt"
indx = np.argsort(dists)[:3]
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="nKW2HllaVVLv" outputId="42e39a3b-4f0e-4200-fd63-d67239b37d3d"
label[indx]
# + [markdown] colab_type="text" id="Kj09unjkVVLy"
# # Vendo a classe mais próxima
# + colab={} colab_type="code" id="MeY-nasUVVLz"
from collections import Counter
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="jqdNk7ABVVL1" outputId="f7371194-0f71-4067-d28b-f20adb066e84"
Counter(label[indx]).most_common(1)[0][0]
# + [markdown] colab_type="text" id="MTHd9ja-VVL4"
# # Criando uma função que faz a classificação
# + colab={} colab_type="code" id="79rTzC7PVVL6"
def knn(data, query, k=3):
dists = np.sum(((data - query) ** 2), axis = 1)
indx = np.argsort(dists)[:k]
return Counter(label[indx]).most_common(1)[0][0]
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="wy2oCaBkVVL8" outputId="2f911d13-92af-46c9-f783-f90ef1fe3dfc"
knn(data, query, k=3)
# + [markdown] colab_type="text" id="xk1Rq1V1XR8L"
# # Sugestões de exercícios para casa!
# + [markdown] colab_type="text" id="DV0BeyZFXlUx"
# ## 1)
#
# Entenda porque não precisamos da raiz no cálculo da função euclidiana;
#
# Implemente uma generalização da distância euclidiana, a [distância minkswoki](https://en.wikipedia.org/wiki/Minkowski_distance).
#
# Teste se o resultado muda para valores diferentes de "p".
#
# DICA: Use o np.inf para verificar se precisamos do máximo/mínimo
#
#
# + colab={} colab_type="code" id="MnTZicKRXnUy"
def minkowski_distance(a, b, p=2):
# Code here
# + [markdown] colab_type="text" id="24pAxO6XYl1w"
# ## 2)
#
# Transforme a nossa função knn para aceitar mais de 1 query (isto é, para testar vários casos de teste).
#
# DICA: Faça o máximo modularizado possível! Da uma olhada [aqui](https://github.com/BrunoGomesCoelho/small-bang/blob/master/knn/knn.py#L55) se precisar.
# + colab={} colab_type="code" id="vsi2BRHQZt-O"
def knn_extended(data, queries, k=3):
# Your code here
# + [markdown] colab_type="text" id="nlf2Yf52Z8ja"
# ## 3)
#
# Implemente a versão ponderada pelo peso para o K-NN. Isto é, em vez de só pegar o voto majoritário, leve em consideração a distância de cada exemplo na hora de fazer a votação.
#
# DICA: Se precisar, leia mais sobre [aqui](http://www.data-machine.com/nmtutorial/distanceweightedknnalgorithm.htm) e se precisar se inspire [nesse código](https://github.com/FlonairLenz/weighted-knn/blob/master/knn_algorithm.py).
# + colab={} colab_type="code" id="NITQQWjBa9d6"
def knn_weighted(data, queries, k=3):
# Your code here
| Aula02/KNN_licao.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/abhijeetraj22/TSF-GRIP_IOT_Tasks/blob/main/Task_3.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="1MwM3lrYJRh2"
# # Importing libraries
# + [markdown] id="HJQFpizLJRh8"
# Initially, the project will require tensorflow and numpy to get started. Tensorflow will be used to load the dataset
# and preprocess the image to fit into a numpy array. We will convert the image into a numpy array to make it memory efficient
# and to make the processing faster.
# + colab={"base_uri": "https://localhost:8080/"} id="Hl6ub7qTJwBO" outputId="0b2e7710-5171-4fa4-c6aa-75c81e578ce8"
from google.colab import drive
drive.mount('/content/drive')
# + id="7NUzUm18JRh-"
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.applications import MobileNetV2
# + [markdown] id="U9Tbq459JRh_"
# # Loading the Dataset
# + [markdown] id="-upJK4dLJRiA"
# The dataset has two directories where images for each class are saved. The idea is to bring both the classes into the program with their respective images. And save them as training and validation data using the image_dataset_from_directory() of keras.preprocessing.
# + id="3O2qwoZuJRiB"
width = 224
height = 224
#height and width are (224,224) since we will use the mobileNetv2 network
batch_size = 32
data_dir = r"/content/drive/MyDrive/Grip/datasets"
# + colab={"base_uri": "https://localhost:8080/"} id="luyzsLb1JRiD" outputId="55774690-d130-47bf-a123-7da65bbdc308"
training = tf.keras.preprocessing.image_dataset_from_directory(
data_dir,
validation_split=0.3,
subset='training',
seed=123,
image_size=(height, width),
batch_size=batch_size
)
# + colab={"base_uri": "https://localhost:8080/"} id="5U5JGgb8JRiF" outputId="d3682937-3d35-402a-ac9a-b9bcc171d1a7"
validation = tf.keras.preprocessing.image_dataset_from_directory(
data_dir,
validation_split=0.3,
subset='validation',
seed=123,
image_size=(height, width),
batch_size=batch_size
)
# + colab={"base_uri": "https://localhost:8080/"} id="fhNqC2bTJRiH" outputId="e454957c-8f2c-4657-b31e-03c8eec432d8"
classes = training.class_names
classes
# + colab={"base_uri": "https://localhost:8080/"} id="9UhgvE3MJRiJ" outputId="691f2417-1a80-46d7-f678-9726143c0532"
training
# + [markdown] id="o9zhc8cXJRiL"
# # Loading the MobileNetv2 model
# + [markdown] id="y9kqXdjyJRiN"
# The MobileNetv2 model is a pretrained model that can be used for object detection. We can load the network using the keras.application
# You can more details about the mobilenetv2 network here - <a href= 'https://keras.io/api/applications/mobilenet/'> Link</a>
# + id="EGGlYG-eJRiN"
model = MobileNetV2(weights='imagenet')
# + [markdown] id="qfG36TXOJRiJ"
# # Visualizing images from the data set
# + [markdown] id="cg8mq_zVJRiK"
#
# After loading the dataset, the best way to verify the progress is by loading the dataset into an image using matplotlib. And using the corresponding labels with the images.
# + colab={"base_uri": "https://localhost:8080/", "height": 281} id="PNBUVOp3JRiL" outputId="e96d2117-a5ef-4b0a-dde1-461a3d673c92"
for images, labels in training.take(2):
plt.imshow(images[1].numpy().astype('uint8'))
plt.title(classes[labels[1]])
# + [markdown] id="R_WD5SUaJRiN"
# # Compiling the model
# + [markdown] id="x4Fzsl6uJRiO"
# After modeling the mobilenetv2 architecture, the compilation process starts where the we are using the adam optimizer and SparCategoricalCrossentropy as loss. Whereas, metrics is accuracy.
# + id="Xx6ZGAUHJRiO"
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
# + colab={"base_uri": "https://localhost:8080/"} id="3sHqr1JDJRiP" outputId="85c4fa1e-1266-402b-9917-02c967285a94"
model.summary()
# + [markdown] id="p2TPEqUhJRiP"
# # Training the model
# + colab={"base_uri": "https://localhost:8080/"} id="L1g_dCtxJRiP" outputId="c50df1c7-343c-44a0-e756-2f43fc658db7"
face_mask_detection = model.fit(training,validation_data=validation,epochs=20)
# + [markdown] id="ANUybIb4JRiQ"
# # Predicting on a new unknown image
# + colab={"base_uri": "https://localhost:8080/"} id="3kvpXWgNJRiQ" outputId="072c4761-b990-4dff-f33d-84afc127a674"
#load the image
img_url='/content/drive/MyDrive/Grip/datasets/with_mask/0_0_0 copy 11.jpg'
img = tf.keras.preprocessing.image.load_img(img_url, target_size=(height, width))
#convert to array
image_array = tf.keras.preprocessing.image.img_to_array(img)
#expand dimensions to fit in the model
image_array = tf.expand_dims(image_array,0)
#check the shape of the image
image_array.shape
# + id="N08MQqjAJRiR"
predictions = model.predict(image_array)
#using softmax to get the results
score = tf.nn.softmax(predictions[0])
# + colab={"base_uri": "https://localhost:8080/"} id="TCLYNHGDJRiS" outputId="cf498462-4d3b-4fd0-9e7e-08c71bc1f104"
import numpy
print(classes[numpy.argmax(score)], 100*numpy.max(score))
# + [markdown] id="lGRj_cFKJRiT"
# # Save the model
# + [markdown] id="f_q_wQPaJRiT"
# After we have created a classifier, we can save it for later use.
# + id="SQGLDoWFJRiT"
model.save('/content/drive/MyDrive/Grip/dummy.model', save_format="h5")
# + [markdown] id="ob1VhCKPJRiT"
# # Evaluation
# + [markdown] id="CfJCM7dOJRiT"
# It is necessary to evaluate the model before we move to the next stage. The evaluation will give us insights about two things:
# 1. Accuracy
# 2. If there is any anomaly in the model - Underfitting or Overfitting.
#
# In any case, we must follow a different approach to train the model to get the best results. This can include, image augmentation and choosing our own layers and placing them over the MobileNetv2 network.
#
# <h3> How to Improve the Model?</h3>
#
# Sometimes, the accuracy of the model isn't what we have anticipated. So, there are a certain practices that can be followed to
# improve the performance of the model in order to get efficiency while working with the new data.
#
# The following are some of the practices that may improve the model's performance:
# 1. Add more training data
# 2. Data Augmentation can help increase the number of training samples.
# 3. There might be a chance of overfitting the model with increased number of samples, in that case you can try a different model or include a head over the base model with custom layers.
# + colab={"base_uri": "https://localhost:8080/", "height": 499} id="rsDJm07fJRiU" outputId="c17ea24b-ec29-4535-fc3f-1b2aaffa4d00"
acc = face_mask_detection.history['accuracy']
val_acc = face_mask_detection.history['val_accuracy']
loss= face_mask_detection.history['loss']
val_loss= face_mask_detection.history['val_loss']
epochs_range = range(20)
plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()
# + id="3-CbJjIpV9D8"
| Task_3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Efficient Estimation using 1 simulation
# 1. This notebook shows how to **estimate** a simple model using Simulated Minimum Distance (SMD)
# 2. It illustrates how an **efficient** estimator can be constructed using only 1 simulatoin, following the idea proposed by [<NAME>](https://www.mit.edu/~kevdokim/ESMSM_sep16.pdf "Efficient Estimation with a Finite Number of Simulation Draws per Observation")
# ## Recap: Simulated Minimum Distance
# **Data:** We assume that we have data available for $N$ households over $T$ periods, collected in $\{w_i\}_i^N$.
#
# **Goal:** We wish to estimate the true, unknown, parameter vector $\theta_0$. We assume our model is correctly specified in the sense that the observed data stems from the model.
# The **Simulated Minimum Distance (SMD)** estimator is
#
# $$
# \hat{\theta} = \arg\min_{\theta} g(\theta)'Wg(\theta)
# $$
#
# where $W$ is a $J\times J$ positive semidefinite **weighting matrix** and
#
# $$
# g(\theta)=\Lambda_{data}-\Lambda_{sim}(\theta)
# $$
#
# is the distance between $J\times1$ vectors of moments calculated in the data and the simulated data, respectively. Concretely,
#
# $$
# \Lambda_{data} = \frac{1}{N}\sum_{i=1}^N m(\theta_0|w_i) \\
# \Lambda_{sim}(\theta) = \frac{1}{N_{sim}}\sum_{s=1}^{N_{sim}} m(\theta|w_s)
# $$
#
# are $J\times1$ vectors of moments calculated in the data and the simulated data, respectively.
# **Variance of the estimator:** Recall that the variance of the estimator was
# $$
# \begin{align}
# \text{Var}(\hat{\theta})&=(1+S^{-1})\Gamma\Omega\Gamma'/N \\
# \Gamma &= -(G'WG)^{-1}G'W \\
# \Omega & = \text{Var}(m(\theta_0|w_i))
# \end{align}
# $$
# where we implicitly used that $Var(m(\theta_0|w_i))=Var(m(\theta|w_s))$ and $Cov(m(\theta_0|w_i),m(\theta|w_s))=0$
#
# **Efficient Estimator:** Using the "optimal" weighting matrix, $W=\Omega^{-1}$, gives the *lowest variance* for a given number of simulations, $S$, as
# $$
# \begin{align}
# \text{Var}(\hat{\theta})&=(1+S^{-1})(G'\Omega^{-1}G)^{-1}/N
# \end{align}
# $$
#
# > **Observation:** Only as $S\rightarrow\infty$ does the minimum variance of the SMD estimator approach the minimum variance of the GMM estimator.
#
# > **Solution:** [Kirill Evdokimov](https://www.mit.edu/~kevdokim/ESMSM_sep16.pdf "Efficient Estimation with a Finite Number of Simulation Draws per Observation") shows how we can use an augmented set of moments related to the assumptions related to simulation to basically remove the factor $(1+S^{-1})$ on the asymptotic variance of the SMD estimator using only one(!) simulation, $S=1$!
# # Model and Estimators
# We use the same example as Kirill Evdokimov. Imagine the simple setup where we have the data-generating process (DGP):
# $$
# \begin{align}
# Y_i &= \theta_0 + \varepsilon_i \\
# \varepsilon_i &\sim N(0,1)
# \end{align}
# $$
# **SMD:** We can use the moment function with only $S=1$ simulatin of $\varepsilon$ per individual
# $$
# g_i(\theta|w_i) = Y_i - \theta -\varepsilon_i
# $$
# to estimate $\theta$. We will call that $\hat{\theta}_{SMD}$. The moment vector would be
# $$
# g(\theta) =
# \bigg( \begin{array}{c}
# \overline{Y} - \theta -\overline{\varepsilon} \\
# \end{array} \bigg)
# $$
# where $\overline{Y} = \frac{1}{N}\sum_{i=1}^{N} Y_i$ and $\overline{\varepsilon} = \frac{1}{N}\sum_{i=1}^{N} \varepsilon_i$.
#
# **ES-SMD:** We can use the efficient SMD to augment the moment conditions with the fact that the simulated $\varepsilon$'s should have mean-zero and get the vector of moments in this augmented situation as
# $$
# g_{aug}(\theta) =
# \bigg( \begin{array}{c}
# \overline{Y} - \theta -\overline{\varepsilon} \\
# 0-\overline{\varepsilon} \\
# \end{array} \bigg)
# $$
# where we use the optimal weighting matrix $W=\Omega^{-1}$ where
# $$
# \Omega = Var(g_{i,aug}(\theta|w_i)) =
# \bigg( \begin{array}{cc}
# 2 & 1\\
# 1 & 1 \\
# \end{array} \bigg)
# $$
# and
# $$
# \Omega^{-1} = \bigg( \begin{array}{cc}
# 1 & -1\\
# -1 & 2 \\
# \end{array} \bigg)
# $$
#
# We will call this estimator $\hat{\theta}_{ES-SMD}$.
#
# **Asymptotic Variances:**
# 1. In the standard SMD estimator, the weighting matrix does not matter and we have
# $$
# \begin{align}
# AVar(\hat{\theta}_{SMD}) &= Var(g_i(\theta|w_i)) \\
# &= Var(Y_i - \theta -\varepsilon_i)\\
# &= Var(Y_i) +Var(\varepsilon_i) \\
# &= 2
# \end{align}
# $$
# 2. In the augmented ES-SMD estmator, we have
# $$
# \begin{align}
# AVar(\hat{\theta}_{ES-SMD}) &= Var((G'WG)^{-1}G'Wg_{i,aug}(\theta|w_i)) \\
# &= Var(-Y_i + \theta)\\
# &= 1
# \end{align}
# $$
# bacause
# $$
# (G'WG)^{-1}G'Wg_{i,aug}(\theta|w_i) = - (Y_i - \theta -\varepsilon) - \varepsilon.
# $$
# 3. We thus have that the asymptotic variance of the ES-SMD estimator is lower that the SMD estimator!
#
# We will now illustrate this result through a **Monte Carlo experiment** too!
# # Setup
# +
# %matplotlib inline
# %load_ext autoreload
# %autoreload 2
import numpy as np
from types import SimpleNamespace
import sys
sys.path.append('../')
from SimulatedMinimumDistance import SimulatedMinimumDistanceClass
# -
# # Model construction
class ModelClass():
def __init__(self,**kwargs):
self.par = SimpleNamespace()
self.sim = SimpleNamespace()
self.par.theta = 0.5
self.par.simN = 5000
for key,val in kwargs.items():
setattr(self.par,key,val)
def solve(self,do_print=False): pass
def simulate(self,seed=None,do_print=False):
if seed is not None:
np.random.seed(seed)
self.sim.e = np.random.normal(size=self.par.simN)
self.sim.Y = self.par.theta + self.sim.e
# # Estimation choices
# +
# a. model settings
N = 100_000
N_sim = N
par = {'theta':0.2,'simN':N_sim}
par_true = par.copy()
par_true['simN'] = N
# b. parameters to estimate
est_par = {
'theta': {'guess':0.5,'lower':0.0,'upper':1.0,},
}
# c. moment function used in estimation.
def mom_func(data,ids=None):
""" returns the average Y """
if ids is None:
mean_Y = np.mean(data.Y)
else:
mean_Y = np.mean(data.Y[ids])
return np.array([mean_Y]) # alwaus give a zero
# c. augmented moment function used in efficient estimation.
def mom_func_aug(data,ids=None):
""" returns the average Y and the average of the simulations"""
if ids is None:
mean_Y_e = np.mean([data.Y,data.e],axis=1)
else:
mean_Y_e = np.mean([data.Y[ids],data[ids].e],axis=1)
return mean_Y_e
# -
# # Monte Carlo Estimation results
# +
num_boot = 1_000
theta_est = np.empty(num_boot)
theta_est_aug = theta_est.copy()
model = ModelClass(**par)
for b in range(num_boot):
# a. setup model to simulate data
true = ModelClass(**par_true)
true.simulate(seed=2050+b) # this seed is different from the default
# b. data moments
datamoms = mom_func(true.sim)
datamoms_aug = np.array([datamoms[0],0.0])
# c. setup estimators
smd = SimulatedMinimumDistanceClass(est_par,mom_func,datamoms=datamoms)
smd_aug = SimulatedMinimumDistanceClass(est_par,mom_func_aug,datamoms=datamoms_aug)
# d. weighting matrix
W = np.ones((datamoms.size,datamoms.size)) # does not matter here
Omega = np.array([[2.0,1.0],[1.0,1.0]]) # covariance matrix of augmentet moments.
W_aug = np.linalg.inv(Omega)
# e. estimate the model (can take several minutes)
est = smd.estimate(model,W,do_print_initial=False)
est_aug = smd_aug.estimate(model,W_aug,do_print_initial=False)
# f. store the estimates
theta_est[b] = est['theta']
theta_est_aug[b] = est_aug['theta']
# -
print(f'Variance, SMD: {np.var(theta_est-par_true["theta"])*N:2.6f}')
print(f'Variance, ES-SMD: {np.var(theta_est_aug-par_true["theta"])*N:2.6f}')
| DynamicProgramming/extra/Efficient estimation with 1 simulation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Quantitative segmentation quality estimation
#
# Often we face the problem, that we have an annnotated image of some data and a segmented version of the same image, which was obtained with some segmentation pipeline (StarDist, EPySeg, PlantSeg, etc). This notebook provides a method to compare the overlap of both ground truth image and achieved segmentation.
# +
import os
import biapol_utilities as biau
from skimage import io, measure, segmentation
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import pandas as pd
# %matplotlib notebook
# -
# ## Input data
# First, let's generate some example data!
blobs = biau.data.blobs()
# Let's segment this and take it as a ground truth image:
# +
threshold = 128
imageA = (blobs > threshold).astype(np.uint8)
ground_truth = measure.label(imageA)
plt.imshow(ground_truth)
plt.title('Ground truth')
# -
# Next, we shuffle the labels and expand them a bit:
# First, shuffle randomly
label_shuffle = np.arange(1, ground_truth.max()+1, 1)
np.random.shuffle(label_shuffle)
label_shuffle = np.append(np.asarray([0]), label_shuffle) # append 0 at start of array - we don't want to shuffle background
segmented = label_shuffle[ground_truth]
# Second, expand the labels a bit
segmented = segmentation.expand_labels(segmented, 5)
# +
# Plot side by side
fig, axes = plt.subplots(ncols=2, sharex=True, sharey=True)
axes[0].imshow(ground_truth)
axes[0].set_title('Ground truth')
axes[1].imshow(segmented)
axes[1].set_title('Segmentation')
# -
# ## Re-match labels
#
# Next, use the label-matching from `biapol_utilities` to assign correct labels to both images
segmented = biau.label.match_labels(ground_truth, segmented)
# +
# Plot side by side
fig, axes = plt.subplots(ncols=2, sharex=True, sharey=True)
axes[0].imshow(ground_truth)
axes[0].set_title('Ground truth')
axes[1].imshow(segmented)
axes[1].set_title('Segmentation')
# -
# ## Compare labels: Labelwise Jaccard-index
#
# Lastly, we calculate the *label-wise Jaccard index* to measure the intersection over union (IoU) between corresponding pairs of labels.
result = biau.label.compare_labels(ground_truth, segmented)
result
# Let's also visualize this: To do this, we create a new image and assign the jaccard-index result value to every pixel depending on the label.
# +
LUT_jaccard = result['jaccard_score'].to_numpy()
LUT_dice = result['dice_score'].to_numpy()
# set segmentation quality of background to zero
LUT_jaccard[0] = np.nan
LUT_dice[0] = np.nan
# create score map
JI_map = LUT_jaccard[segmented]
DI_map = LUT_dice[segmented]
# +
# Plot side by side
fig, axes = plt.subplots(nrows=2, ncols=3, sharex=True, sharey=True, figsize=(10,5))
fig.subplots_adjust(left=0.05, bottom=0.06, right=0.85, top=0.95, wspace=0.05)
# Plot ground truth
axes[0, 0].imshow(ground_truth)
axes[0, 0].set_title('Ground truth')
# Plot segmentation
axes[0, 1].imshow(segmented)
axes[0, 1].set_title('Segmentation')
# Plot overlay
axes[0, 2].imshow(ground_truth)
axes[0, 2].imshow(segmented, alpha=0.5)
axes[0, 2].set_title('Overlay')
# Plot Jaccard index map
im = axes[1, 0].imshow(JI_map, cmap='inferno_r')
axes[1, 0].set_title('Jaccard score')
cbar = fig.colorbar(im, ax=axes[1, 0])
# Plot Dice score map
im2 = axes[1, 1].imshow(DI_map, cmap='inferno_r')
axes[1, 1].set_title('Dice score')
cbar2 = fig.colorbar(im2, ax=axes[1, 1])
axes[-1, -1].axis('off')
# -
# ## Compare-labels: Feature-wise
# It may be an interesting approach to not only check the pixel-wise agreement between segmentation and annnootation but to also check whether certain features are preserved in the segmentation. For this, the (shape-) features are calculated in both ground_truth annotation and segmented image with scikit-image regionprops.
# +
properties = ['label', 'area', 'eccentricity', 'orientation']
features_gt = measure.regionprops_table(ground_truth, properties=properties)
features_seg = measure.regionprops_table(segmented, properties=properties)
features_gt = pd.DataFrame(features_gt)
features_seg = pd.DataFrame(features_seg)
features_gt
# -
# Visualize as histograms
# +
fig, axes = plt.subplots(nrows=1, ncols=len(properties)-1, figsize=(9,6))
ax_idx = 0
for idx, prop in enumerate(properties):
if prop == 'label':
continue
axes[ax_idx].hist(features_gt[prop], label='ground_truth', bins=20, alpha=0.5)
axes[ax_idx].hist(features_seg[prop], label='segmentation', bins=20, alpha=0.5)
axes[ax_idx].set_xlabel(prop)
ax_idx += 1
axes[0].legend()
axes[0].set_ylabel('# Occurrences')
# -
| docs/_include/notebooks/label/Compare_segmentations.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import networkx as nx
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import csv
import re
np.set_printoptions(threshold=np.inf)
# %matplotlib inline
edge_list_fused_afternorm = []
edge_list_ses1_afternorm = []
edge_list_ses2_afternorm = []
edge_list_ses1_normalized = []
edge_list_ses2_normalized = []
edge_list_property = []
length = 25920 - 25864 + 1
for i in range(length):
G0_1 = nx.read_gpickle("./BNU1_dMRI/sub-00%s_ses-1_dwi_desikan.gpickle"%(int(i+25864)))
G0_2 = nx.read_gpickle("./BNU1_dMRI/sub-00%s_ses-2_dwi_desikan.gpickle"%(int(i+25864)))
edge_list_ses1 = []
edge_list_ses2 = []
edge_1 = G0_1.edge
edge_2 = G0_2.edge
for src in edge_1:
for dst in edge_1[src]:
edge_list_ses1.append([src, dst, edge_1[src][dst]['weight']])
for src in edge_2:
for dst in edge_2[src]:
edge_list_ses2.append([src, dst, edge_2[src][dst]['weight']])
G1 = nx.Graph()
G1.add_weighted_edges_from(edge_list_ses1)
A1 = nx.to_numpy_array(G1)
# print(np.count_nonzero(A1))
A1_degree = np.zeros(shape=(A1.shape))
for k in range(A1.shape[0]):
A1_degree[k][k] = list(G1.degree)[k][1]
# print(-np.sqrt(A1_degree))
A1_normalized = np.matmul(np.matmul((-np.sqrt(A1_degree)), A1), (-np.sqrt(A1_degree)))
# A1_normalized = np.where(A1_normalized>0.5, 10*A1_normalized, 0)
max_value = np.amax(A1_normalized)
A1_normalized = A1_normalized/max_value
A1_normalized = np.where(A1_normalized>0.4, A1_normalized, 0)
# print(np.count_nonzero(A1_normalized))
edge_list_ses1_normalized = list(zip(*np.nonzero(A1_normalized)))
# print(edge_list_ses1_normalized)
G2 = nx.Graph()
G2.add_weighted_edges_from(edge_list_ses2)
A2 = nx.to_numpy_array(G2)
# print(np.count_nonzero(A2))
A2_degree = np.zeros(shape=(A2.shape))
for j in range(A2.shape[0]):
A2_degree[j][j] = list(G2.degree)[j][1]
# print(-np.sqrt(A1_degree))
A2_normalized = np.matmul(np.matmul((-np.sqrt(A2_degree)), A2), (-np.sqrt(A2_degree)))
# A1_normalized = np.where(A1_normalized>0.5, 10*A1_normalized, 0)
max_value = np.amax(A2_normalized)
A2_normalized = A2_normalized/max_value
A2_normalized = np.where(A2_normalized>0.4, A2_normalized, 0)
edge_list_ses2_normalized = list(zip(*np.nonzero(A2_normalized)))
# print(np.count_nonzero(A2_normalized))
# print(edge_list_ses2_normalized)
for e1 in edge_list_ses1_normalized:
edge_list_ses1_afternorm.append([i, e1[0], e1[1]])
for e2 in edge_list_ses2_normalized:
edge_list_ses2_afternorm.append([i, e2[0], e2[1]])
# Simply fusing two sessions (can be updated)
for e1 in edge_list_ses1_normalized:
if e1 not in edge_list_ses2_normalized:
edge_list_fused_afternorm.append([i, e1[0], e1[1]])
for e2 in edge_list_ses2_normalized:
edge_list_fused_afternorm.append([i, e2[0], e2[1]])
graph_idx = 0
f = csv.reader(open('./BNU1_fMRI/BNU1_phenotypic_data.csv','r'))
for j, row in enumerate(f):
if j != 0 and j % 2 ==1:
label = row[3]
filter(str.isdigit, label)
edge_list_property.append([graph_idx, int(label)-1, 70])
graph_idx += 1
df = pd.DataFrame([iter for iter in edge_list_fused_afternorm], columns=['graph_id', 'source', 'target'])
df.to_csv('./BNU1_dMRI/dMRI_brain_graph_edges_fused.csv', index=False)
df = pd.DataFrame([iter for iter in edge_list_ses1_afternorm], columns=['graph_id', 'source', 'target'])
df.to_csv('./BNU1_dMRI/dMRI_brain_graph_edges_ses1.csv', index=False)
df = pd.DataFrame([iter for iter in edge_list_ses2_afternorm], columns=['graph_id', 'source', 'target'])
df.to_csv('./BNU1_dMRI/dMRI_brain_graph_edges_ses2.csv', index=False)
df = pd.DataFrame([iter for iter in edge_list_property], columns=['graph_id', 'label', 'num_nodes'])
df.to_csv('./BNU1_dMRI/dMRI_brain_graph_properties.csv', index=False)
print('Finished preprocessing dMRI graph!')
# +
import networkx as nx
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import csv
import re
# %matplotlib inline
edge_list_2_downsampling = []
edge_list_2_property = []
length = 25920 - 25864 + 1
for i in range(length):
G00 = nx.read_gpickle("./BNU1_fMRI/sub-00%s_ses-1_bold_desikan_res-2x2x2_measure-correlation.gpickle"%(int(i+25864)))
edge_list_2 = []
edge = G00.adj
for src in edge:
for dst in edge[src]:
edge_list_2.append([src, dst, edge[src][dst]['weight']])
edge_num = 0
for edge in edge_list_2:
if edge[0] == edge[1]:
continue
elif edge[2] <= 0.8:
continue
else:
edge_num += 1
edge_list_2_downsampling.append([i, edge[0]-1, edge[1]-1])
print(edge_num)
# G2 = nx.Graph()
# G2.add_weighted_edges_from(edge_list_2)
graph_idx = 0
f = csv.reader(open('./BNU1_fMRI/BNU1_phenotypic_data.csv','r'))
for j, row in enumerate(f):
if j != 0 and j % 2 ==1:
label = row[3]
filter(str.isdigit, label)
edge_list_2_property.append([graph_idx, int(label)-1, 70])
graph_idx += 1
df = pd.DataFrame([iter for iter in edge_list_2_downsampling], columns=['graph_id', 'src', 'dst'])
df.to_csv('./BNU1_fMRI/fMRI_brain_graph_edges.csv', index=False)
df = pd.DataFrame([iter for iter in edge_list_2_property], columns=['graph_id', 'label', 'num_nodes'])
df.to_csv('./BNU1_fMRI/fMRI_brain_graph_properties.csv', index=False)
print('Finished preprocessing fMRI graph!')
# +
import networkx as nx
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import csv
import re
# %matplotlib inline
edge_list_2_downsampling = []
edge_list_2_property = []
length = 25920 - 25864 + 1
for i in range(length):
G00 = nx.read_gpickle("./BNU1_fMRI/sub-00%s_ses-1_bold_desikan_res-2x2x2_measure-correlation.gpickle"%(int(i+25864)))
edge_list_2 = []
edge = G00.adj
for src in edge:
for dst in edge[src]:
edge_list_2.append([src, dst, edge[src][dst]['weight']])
edge_num = 0
for edge in edge_list_2:
if edge[0] == edge[1]:
continue
elif edge[2] <= 0.8:
continue
else:
edge_num += 1
edge_list_2_downsampling.append([i, edge[0]-1, edge[1]-1, edge[2]])
# G2 = nx.Graph()
# G2.add_weighted_edges_from(edge_list_2)
graph_idx = 0
f = csv.reader(open('./BNU1_fMRI/BNU1_phenotypic_data.csv','r'))
for j, row in enumerate(f):
if j != 0 and j % 2 ==1:
label = row[3]
filter(str.isdigit, label)
edge_list_2_property.append([graph_idx, int(label)-1, 70])
graph_idx += 1
df = pd.DataFrame([iter for iter in edge_list_2_downsampling], columns=['graph_id', 'src', 'dst', 'weight'])
df.to_csv('./BNU1_fMRI/fMRI_brain_graph_weighted_edges_0.8.csv', index=False)
# df = pd.DataFrame([iter for iter in edge_list_2_property], columns=['graph_id', 'label', 'num_nodes'])
# df.to_csv('./BNU1_fMRI/fMRI_brain_graph_properties.csv', index=False)
print('Finished preprocessing weighted fMRI graph!')
| codes/Graph_preprocessing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # T001 · Compound data acquisition (ChEMBL)
#
# **Note:** This talktorial is a part of TeachOpenCADD, a platform that aims to teach domain-specific skills and to provide pipeline templates as starting points for research projects.
#
# Authors:
#
# - <NAME>, CADD seminar 2017, Volkamer lab, Charité/FU Berlin
# - <NAME>, CADD seminar 2018, Volkamer lab, Charité/FU Berlin
# - <NAME>, 2019-2020, [Volkamer lab, Charité](https://volkamerlab.org/)
# - <NAME>, 2020, [Volkamer lab, Charité](https://volkamerlab.org/)
# - <NAME>, 2020, [Volkamer lab, Charité](https://volkamerlab.org/)
# __Talktorial T001__: This talktorial is part of the TeachOpenCADD pipeline described in the [first TeachOpenCADD paper](https://jcheminf.biomedcentral.com/articles/10.1186/s13321-019-0351-x), comprising of talktorials T001-T010.
# ## Aim of this talktorial
#
# In this notebook, we will learn more about the ChEMBL database and how to extract data from ChEMBL, i.e. (compound, activity data) pairs for a target of interest. These data sets can be used for many cheminformatics tasks, such as similarity search, clustering or machine learning.
#
# Our work here will include finding compounds which were tested against a certain target and filtering available bioactivity data.
# ### Contents in *Theory*
#
# * ChEMBL database
# * ChEMBL web services
# * ChEMBL webresource client
# * Compound activity measures
# * IC50 measure
# * pIC50 value
# ### Contents in *Practical*
#
# **Goal: Get a list of compounds with bioactivity data for a given target**
#
# * Connect to ChEMBL database
# * Get target data (example: EGFR kinase)
# * Fetch and download target data
# * Select target ChEMBL ID
# * Get bioactivity data
# * Fetch and download bioactivity data for target
# * Preprocess and filter bioactivity data
# * Get compound data
# * Fetch and download compound data
# * Preprocess and filter compound data
# * Output bioactivity-compound data
# * Merge bioactivity and compound data, and add pIC50 values
# * Draw molecules with highest pIC50
# * Freeze bioactivity data to ChEMBL 27
# * Write output file
# ### References
#
# * ChEMBL bioactivity database: [Gaulton *et al.*, <i>Nucleic Acids Res.</i> (2017), 45(Database issue), D945–D954](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5210557/)
# * ChEMBL web services: [Davies *et al.*, <i>Nucleic Acids Res.</i> (2015), <b>43</b>, 612-620](https://academic.oup.com/nar/article/43/W1/W612/2467881)
# * [ChEMBL web-interface](https://www.ebi.ac.uk/chembl/)
# * GitHub [ChEMBL webrescource client](https://github.com/chembl/chembl_webresource_client)
# * The EBI RDF platform: [Jupp *et al.*, <i>Bioinformatics </i> (2014), 30(9), 1338-9](https://www.ncbi.nlm.nih.gov/pubmed/24413672)
# * Info on half maximal inhibitory concentration: [(p)IC50](https://en.wikipedia.org/wiki/IC50)
# * [UniProt website](https://www.uniprot.org/)
# ## Theory
# ### ChEMBL database
# >"ChEMBL is a manually curated database of bioactive molecules with drug-like properties. It brings together chemical, bioactivity and genomic data to aid the translation of genomic information into effective new drugs." ([ChEMBL website](https://www.ebi.ac.uk/chembl/))
#
# * Open large-scale bioactivity database
# * **Current data content (as of 09.2020, ChEMBL 27):**
# * \>1.9 million distinct compounds
# * \>16 million activity values
# * Assays are mapped to ~13,000 targets
# * **Data sources** include scientific literature, PubChem bioassays, Drugs for Neglected Diseases Initiative (DNDi), BindingDB database, ...
# * ChEMBL data can be accessed via a [web-interface](https://www.ebi.ac.uk/chembl/), the [EBI-RDF platform](https://www.ncbi.nlm.nih.gov/pubmed/24413672) and the [ChEMBL webrescource client](https://github.com/chembl/chembl_webresource_client)
# #### ChEMBL web services
#
# * RESTful web service
# * ChEMBL web service version 2.x resource schema:
#
# 
#
# *Figure 1:*
# "[ChEMBL web service schema diagram](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4489243/figure/F2/). The oval shapes represent ChEMBL web service resources and the line between two resources indicates that they share a common attribute. The arrow direction shows where the primary information about a resource type can be found. A dashed line indicates the relationship between two resources behaves differently. For example, the `Image` resource provides a graphical based representation of a `Molecule`."
# Figure and description taken from: [<i>Nucleic Acids Res.</i> (2015), <b>43</b>, 612-620](https://academic.oup.com/nar/article/43/W1/W612/2467881).
# #### ChEMBL webresource client
#
# * Python client library for accessing ChEMBL data
# * Handles interaction with the HTTPS protocol
# * Lazy evaluation of results -> reduced number of network requests
# ### Compound activity measures
# #### IC50 measure
#
# * [Half maximal inhibitory concentration](https://en.wikipedia.org/wiki/IC50)
# * Indicates how much of a particular drug or other substance is needed to inhibit a given biological process by half
#
# 
#
# *Figure 2:* Visual demonstration of how to derive an IC50 value:
# (i) Arrange inhibition data on y-axis and log(concentration) on x-axis. (ii) Identify maximum and minimum inhibition. (iii) The IC50 is the concentration at which the curve passes through the 50% inhibition level. Figure ["Example IC50 curve demonstrating visually how IC50 is derived"](https://en.wikipedia.org/wiki/IC50#/media/File:Example_IC50_curve_demonstrating_visually_how_IC50_is_derived.png) by JesseAlanGordon is licensed under [CC BY-SA 3.0](https://creativecommons.org/licenses/by-sa/3.0/).
# #### pIC50 value
#
# * To facilitate the comparison of IC50 values, which have a large value range and are given in different units (M, nM, ...), often pIC50 values are used
# * The pIC50 is the negative log of the IC50 value when converted to molar units:
# $ pIC_{50} = -log_{10}(IC_{50}) $, where $ IC_{50}$ is specified in units of M
# * Higher pIC50 values indicate exponentially greater potency of the drug
# * Note that the conversion can be adapted to the respective IC50 unit, e.g. for nM: $pIC_{50} = -log_{10}(IC_{50}*10^{-9})= 9-log_{10}(IC_{50}) $
#
# Other activity measures:
#
# Besides, IC50 and pIC50, other bioactivity measures are used, such as the equilibrium constant [KI](https://en.wikipedia.org/wiki/Equilibrium_constant) and the half maximal effective concentration [EC50](https://en.wikipedia.org/wiki/EC50).
# ## Practical
#
# In the following, we want to download all molecules that have been tested against our target of interest, the **epidermal growth factor receptor** ([**EGFR**](https://www.uniprot.org/uniprot/P00533)) kinase.
# ### Connect to ChEMBL database
# First, the ChEMBL webresource client as well as other Python libraries are imported.
# +
import math
from pathlib import Path
from zipfile import ZipFile
from tempfile import TemporaryDirectory
import numpy as np
import pandas as pd
from rdkit.Chem import PandasTools
from chembl_webresource_client.new_client import new_client
from tqdm.auto import tqdm
# -
HERE = Path(_dh[-1])
DATA = HERE / "data"
# Next, we create resource objects for API access.
targets_api = new_client.target
compounds_api = new_client.molecule
bioactivities_api = new_client.activity
type(targets_api)
# ### Get target data (EGFR kinase)
#
# * Get UniProt ID of the target of interest (EGFR kinase: [P00533](http://www.uniprot.org/uniprot/P00533)) from [UniProt website](https://www.uniprot.org/)
# * Use UniProt ID to get target information
#
# Select a different UniProt ID, if you are interested in another target.
uniprot_id = "P00533"
# #### Fetch target data from ChEMBL
# Get target information from ChEMBL but restrict it to specified values only
targets = targets_api.get(target_components__accession=uniprot_id).only(
"target_chembl_id", "organism", "pref_name", "target_type"
)
print(f'The type of the targets is "{type(targets)}"')
# #### Download target data from ChEMBL
#
# The results of the query are stored in `targets`, a `QuerySet`, i.e. the results are not fetched from ChEMBL until we ask for it (here using `pandas.DataFrame.from_records`).
#
# More information about the `QuerySet` datatype:
#
# > QuerySets are lazy – the act of creating a QuerySet does not involve any database activity. You can stack filters together all day long, and Django will actually not run the query until the QuerySet is evaluated.
# ([querysets-are-lazy](https://docs.djangoproject.com/en/3.0/topics/db/queries/#querysets-are-lazy))
targets = pd.DataFrame.from_records(targets)
targets
# #### Select target (target ChEMBL ID)
#
# After checking the entries, we select the first entry as our target of interest:
#
# `CHEMBL203`: It is a single protein and represents the human Epidermal growth factor receptor (EGFR, also named erbB1)
target = targets.iloc[0]
target
# Save selected ChEMBL ID.
chembl_id = target.target_chembl_id
print(f"The target ChEMBL ID is {chembl_id}")
# NBVAL_CHECK_OUTPUT
# ### Get bioactivity data
#
# Now, we want to query bioactivity data for the target of interest.
# #### Fetch bioactivity data for the target from ChEMBL
# In this step, we fetch the bioactivity data and filter it to only consider
#
# * human proteins,
# * bioactivity type IC50,
# * exact measurements (relation `'='`), and
# * binding data (assay type `'B'`).
# +
bioactivities = bioactivities_api.filter(
target_chembl_id=chembl_id, type="IC50", relation="=", assay_type="B"
).only(
"activity_id",
"assay_chembl_id",
"assay_description",
"assay_type",
"molecule_chembl_id",
"type",
"standard_units",
"relation",
"standard_value",
"target_chembl_id",
"target_organism",
)
print(f"Length and type of bioactivities object: {len(bioactivities)}, {type(bioactivities)}")
# -
# Each entry in our bioactivity set holds the following information:
print(f"Length and type of first element: {len(bioactivities[0])}, {type(bioactivities[0])}")
bioactivities[0]
# #### Download bioactivity data from ChEMBL
# Finally, we download the `QuerySet` in the form of a `pandas` `DataFrame`.
#
# > **Note**: This step should not take more than 2 minutes, if so try to rerun all cells starting from _"Fetch bioactivity data for the target from ChEMBL"_ or read this message below:
#
# <details>
#
# <summary>Load a local version of the data (in case you encounter any problems while fetching the data)</summary>
#
# If you experience difficulties to query the ChEMBL database, we also provide the resulting dataframe you will construct in the cell below. If you want to use the saved version, use the following code instead to obtain `bioactivities_df`:
#
# ```python
# # replace first line in cell below with this other line
# bioactivities_df = pd.read_csv(DATA / "EGFR_bioactivities_CHEMBL27.csv.zip", index_col=0)
# ```
#
# </details>
bioactivities_df = pd.DataFrame.from_records(bioactivities)
print(f"DataFrame shape: {bioactivities_df.shape}")
bioactivities_df.head()
# Note that the first two rows describe the same bioactivity entry; we will remove such artifacts later during the deduplication step. Note also that we have columns for `standard_units`/`units` and `standard_values`/`values`; in the following, we will use the standardized columns (standardization by ChEMBL), and thus, we drop the other two columns.
#
# If we used the `units` and `values` columns, we would need to convert all values with many different units to nM:
bioactivities_df["units"].unique()
bioactivities_df.drop(["units", "value"], axis=1, inplace=True)
bioactivities_df.head()
# #### Preprocess and filter bioactivity data
#
# 1. Convert `standard_value`'s datatype from `object` to `float`
# 2. Delete entries with missing values
# 3. Keep only entries with `standard_unit == nM`
# 4. Delete duplicate molecules
# 5. Reset `DataFrame` index
# 6. Rename columns
# **1. Convert datatype of "standard_value" from "object" to "float"**
#
# The field `standard_value` holds standardized (here IC50) values. In order to make these values usable in calculations later on, convert values to floats.
bioactivities_df.dtypes
bioactivities_df = bioactivities_df.astype({"standard_value": "float64"})
bioactivities_df.dtypes
# **2. Delete entries with missing values**
#
# Use the parameter `inplace=True` to drop values in the current `DataFrame` directly.
bioactivities_df.dropna(axis=0, how="any", inplace=True)
print(f"DataFrame shape: {bioactivities_df.shape}")
# **3. Keep only entries with "standard_unit == nM"**
#
# We only want to keep bioactivity entries in `nM`, thus we remove all entries with other units.
print(f"Units in downloaded data: {bioactivities_df['standard_units'].unique()}")
print(
f"Number of non-nM entries:\
{bioactivities_df[bioactivities_df['standard_units'] != 'nM'].shape[0]}"
)
bioactivities_df = bioactivities_df[bioactivities_df["standard_units"] == "nM"]
print(f"Units after filtering: {bioactivities_df['standard_units'].unique()}")
print(f"DataFrame shape: {bioactivities_df.shape}")
# **4. Delete duplicate molecules**
#
# Sometimes the same molecule (`molecule_chembl_id`) has been tested more than once, in this case, we only keep the first one.
#
# Note other choices could be to keep the one with the best value or a mean value of all assay results for the respective compound.
bioactivities_df.drop_duplicates("molecule_chembl_id", keep="first", inplace=True)
print(f"DataFrame shape: {bioactivities_df.shape}")
# **5. Reset "DataFrame" index**
#
# Since we deleted some rows, but we want to iterate over the index later, we reset the index to be continuous.
bioactivities_df.reset_index(drop=True, inplace=True)
bioactivities_df.head()
# **6. Rename columns**
bioactivities_df.rename(
columns={"standard_value": "IC50", "standard_units": "units"}, inplace=True
)
bioactivities_df.head()
print(f"DataFrame shape: {bioactivities_df.shape}")
# We now have a set of **5575** molecule ids with respective IC50 values for our target kinase.
# ### Get compound data
#
# We have a `DataFrame` containing all molecules tested against EGFR (with the respective measured bioactivity).
#
# Now, we want to get the molecular structures of the molecules that are linked to respective bioactivity ChEMBL IDs.
# #### Fetch compound data from ChEMBL
#
# Let's have a look at the compounds from ChEMBL which we have defined bioactivity data for: We fetch compound ChEMBL IDs and structures for the compounds linked to our filtered bioactivity data.
compounds_provider = compounds_api.filter(
molecule_chembl_id__in=list(bioactivities_df["molecule_chembl_id"])
).only("molecule_chembl_id", "molecule_structures")
# #### Download compound data from ChEMBL
#
# Again, we want to export the `QuerySet` object into a `pandas.DataFrame`. Given the data volume, **this can take some time.** For that reason, we will first obtain the list of records through `tqdm`, so we get a nice progress bar and some ETAs. We can then pass the list of compounds to the DataFrame.
compounds = list(tqdm(compounds_provider))
compounds_df = pd.DataFrame.from_records(
compounds,
)
print(f"DataFrame shape: {compounds_df.shape}")
compounds_df.head()
# #### Preprocess and filter compound data
#
# 1. Remove entries with missing entries
# 2. Delete duplicate molecules (by molecule_chembl_id)
# 3. Get molecules with canonical SMILES
# **1. Remove entries with missing molecule structure entry**
compounds_df.dropna(axis=0, how="any", inplace=True)
print(f"DataFrame shape: {compounds_df.shape}")
# **2. Delete duplicate molecules**
compounds_df.drop_duplicates("molecule_chembl_id", keep="first", inplace=True)
print(f"DataFrame shape: {compounds_df.shape}")
# **3. Get molecules with canonical SMILES**
#
# So far, we have multiple different molecular structure representations. We only want to keep the canonical SMILES.
compounds_df.iloc[0].molecule_structures.keys()
# +
canonical_smiles = []
for i, compounds in compounds_df.iterrows():
try:
canonical_smiles.append(compounds["molecule_structures"]["canonical_smiles"])
except KeyError:
canonical_smiles.append(None)
compounds_df["smiles"] = canonical_smiles
compounds_df.drop("molecule_structures", axis=1, inplace=True)
print(f"DataFrame shape: {compounds_df.shape}")
# -
# Sanity check: Remove all molecules without a canonical SMILES string.
compounds_df.dropna(axis=0, how="any", inplace=True)
print(f"DataFrame shape: {compounds_df.shape}")
# ### Output (bioactivity-compound) data
# **Summary of compound and bioactivity data**
print(f"Bioactivities filtered: {bioactivities_df.shape[0]}")
bioactivities_df.columns
print(f"Compounds filtered: {compounds_df.shape[0]}")
compounds_df.columns
# #### Merge both datasets
#
# Merge values of interest from `bioactivities_df` and `compounds_df` in an `output_df` based on the compounds' ChEMBL IDs (`molecule_chembl_id`), keeping the following columns:
#
# * ChEMBL IDs: `molecule_chembl_id`
# * SMILES: `smiles`
# * units: `units`
# * IC50: `IC50`
# +
# Merge DataFrames
output_df = pd.merge(
bioactivities_df[["molecule_chembl_id", "IC50", "units"]],
compounds_df,
on="molecule_chembl_id",
)
# Reset row indices
output_df.reset_index(drop=True, inplace=True)
print(f"Dataset with {output_df.shape[0]} entries.")
# -
output_df.dtypes
output_df.head(10)
# #### Add pIC50 values
# As you can see the low IC50 values are difficult to read (values are distributed over multiple scales), which is why we convert the IC50 values to pIC50.
def convert_ic50_to_pic50(IC50_value):
pIC50_value = 9 - math.log10(IC50_value)
return pIC50_value
# Apply conversion to each row of the compounds DataFrame
output_df["pIC50"] = output_df.apply(lambda x: convert_ic50_to_pic50(x.IC50), axis=1)
output_df.head()
# #### Draw compound data
#
# Let's have a look at our collected data set.
#
# First, we plot the pIC50 value distribution
# + tags=["nbsphinx-thumbnail"]
output_df.hist(column="pIC50")
# -
# In the next steps, we add a column for RDKit molecule objects to our `DataFrame` and look at the structures of the molecules with the highest pIC50 values.
# Add molecule column
PandasTools.AddMoleculeColumnToFrame(output_df, smilesCol="smiles")
# +
# Sort molecules by pIC50
output_df.sort_values(by="pIC50", ascending=False, inplace=True)
# Reset index
output_df.reset_index(drop=True, inplace=True)
# -
# Show the three most active molecules, i.e. molecules with the highest pIC50 values.
output_df.drop("smiles", axis=1).head(3)
# Prepare saving the dataset: Drop the ROMol column
output_df = output_df.drop("ROMol", axis=1)
print(f"DataFrame shape: {output_df.shape}")
# #### Freeze output data to ChEMBL 27
#
# This is a technical step: Usually, we would continue to work with the dataset that we just created (latest dataset).
#
# However, here on the TeachOpenCADD platform, we prefer to freeze the dataset to a certain ChEMBL releases (i.e. [ChEMBL 27](http://doi.org/10.6019/CHEMBL.database.27)),
# so that this talktorial and other talktorials downstream in our CADD pipeline do not change in the future (helping us to maintain the talktorials).
# <div class="alert alert-block alert-info">
#
# <b>Note:</b> If you prefer to run this notebook on the latest dataset or if you want to use it for another target, please comment the cell below.
#
# </div>
# Disable this cell to unfreeze the dataset
output_df = pd.read_csv(
DATA / "EGFR_compounds_ea055ef.csv", index_col=0, float_precision="round_trip"
)
output_df.head()
print(f"DataFrame shape: {output_df.shape}")
# NBVAL_CHECK_OUTPUT
# + [markdown] tags=[]
# #### Write output data to file
#
# We want to use this bioactivity-compound dataset in the following talktorials, thus we save the data as `csv` file.
# Note that it is advisable to drop the molecule column (which only contains an image of the molecules) when saving the data.
# -
output_df.to_csv(DATA / "EGFR_compounds.csv")
output_df.head()
print(f"DataFrame shape: {output_df.shape}")
# NBVAL_CHECK_OUTPUT
# ## Discussion
# In this tutorial, we collected bioactivity data for our target of interest from the ChEMBL database.
# We filtered the data set in order to only contain molecules with measured IC50 bioactivity values.
#
# Be aware that ChEMBL data originates from various sources. Compound data has been generated in different labs by different people all over the world. Therefore, we have to be cautious with the predictions we make using this data set. It is always important to consider the source of the data and consistency of data production assays when interpreting the results and determining how much confidence we have in our predictions.
#
# In the next tutorials, we will filter our acquired data by Lipinski's rule of five and by unwanted substructures. Another important step would be to *clean* the molecular data. As this is not shown in any of our talktorials (yet), we would like to refer to the [Standardiser library](https://github.com/flatkinson/standardiser) or [MolVS](https://molvs.readthedocs.io/en/latest/) as useful tools for this task.
# ## Quiz
# * We have downloaded in this talktorial molecules and bioactivity data from ChEMBL. What else is the ChEMBL database useful for?
# * What is the difference between IC50 and EC50?
# * What can we use the data extracted from ChEMBL for?
| teachopencadd/talktorials/T001_query_chembl/talktorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: GeoPython_3.6
# language: python
# name: geopython_3.6
# ---
# # Introduction to cartopy
# %matplotlib inline
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
plt.figure(figsize=(15, 9), dpi=300)
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines(resolution='10m')
ax.gridlines()
plt.savefig('coastlines.png')
| Chapter3/notebooks/03_cartopy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Datashader: Custom Transfer Functions for LandSat8
# ### Setup
# The following imports will be needed to complete the exercises or provide for an improved notebook display:
# +
from os import path
import numpy as np
import rasterio as rio
from bokeh.models import Range1d
from bokeh.plotting import Figure
from bokeh.io import output_notebook, show
from datashader.bokeh_ext import InteractiveImage
import datashader as ds
import datashader.transfer_functions as tf
from bokeh.tile_providers import STAMEN_TONER
output_notebook()
# -
# ### Load Landsat Data
#
# Bands Wavelength
# (micrometers) Resolution
# (meters)
# - Band 1 - Coastal aerosol 0.43 - 0.45 30
# - Band 2 - Blue 0.45 - 0.51 30
# - Band 3 - Green 0.53 - 0.59 30
# - Band 4 - Red 0.64 - 0.67 30
# - Band 5 - Near Infrared (NIR) 0.85 - 0.88 30
# - Band 6 - SWIR 1 1.57 - 1.65 30
# - Band 7 - SWIR 2 2.11 - 2.29 30
# - Band 8 - Panchromatic 0.50 - 0.68 15
# - Band 9 - Cirrus 1.36 - 1.38 30
# - Band 10 - Thermal Infrared (TIRS) 1 10.60 - 11.19 100 * (30)
# - Band 11 - Thermal Infrared (TIRS) 2 11.50 - 12.51 100 * (30)
# +
data_dir = './data'
band1 = rio.open(path.join(data_dir, 'MERCATOR_LC80210392016114LGN00_B1.TIF'))
band2 = rio.open(path.join(data_dir, 'MERCATOR_LC80210392016114LGN00_B2.TIF'))
band3 = rio.open(path.join(data_dir, 'MERCATOR_LC80210392016114LGN00_B3.TIF'))
band4 = rio.open(path.join(data_dir, 'MERCATOR_LC80210392016114LGN00_B4.TIF'))
band5 = rio.open(path.join(data_dir, 'MERCATOR_LC80210392016114LGN00_B5.TIF'))
band6 = rio.open(path.join(data_dir, 'MERCATOR_LC80210392016114LGN00_B6.TIF'))
band7 = rio.open(path.join(data_dir, 'MERCATOR_LC80210392016114LGN00_B7.TIF'))
band8 = rio.open(path.join(data_dir, 'MERCATOR_LC80210392016114LGN00_B8.TIF'))
band9 = rio.open(path.join(data_dir, 'MERCATOR_LC80210392016114LGN00_B9.TIF'))
band10 = rio.open(path.join(data_dir, 'MERCATOR_LC80210392016114LGN00_B10.TIF'))
band11 = rio.open(path.join(data_dir, 'MERCATOR_LC80210392016114LGN00_B11.TIF'))
band12 = rio.open(path.join(data_dir, 'MERCATOR_LC80210392016114LGN00_BQA.TIF'))
# Notice the MERCATOR prefix which indicates the data was project to Mercator CRS
xmin = band1.bounds.left
ymin = band1.bounds.bottom
xmax = band1.bounds.right
ymax = band1.bounds.top
# -
# ## Datashader Transfer Functions
def base_plot(tools='pan,wheel_zoom,reset',plot_width=900, plot_height=500, x_range=None, y_range=None, **plot_args):
p = Figure(tools=tools, plot_width=plot_width, plot_height=plot_height,
x_range=x_range, y_range=y_range, outline_line_color=None,
background_fill_color='black',
min_border=0, min_border_left=0, min_border_right=0,
min_border_top=0, min_border_bottom=0, **plot_args)
p.add_tile(STAMEN_TONER)
p.axis.visible = False
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = None
return p
# ### Just the Blue Band
# +
def update_image(x_range, y_range, w, h, how='log'):
cvs = ds.Canvas(plot_width=w, plot_height=h, x_range=x_range, y_range=y_range)
blue_img = tf.interpolate(cvs.raster(band2),
cmap=['black','white'],
how='linear')
return blue_img
p = base_plot(x_range=(xmin, xmax), y_range=(ymin, ymax))
InteractiveImage(p, update_image)
# +
from xarray import DataArray
from datashader.utils import ngjit
@ngjit
def normalize_data(agg):
out = np.zeros_like(agg)
min_val = 0
max_val = 2**16 - 1
range_val = max_val - min_val
col, rows = agg.shape
c = 40
th = .125
for x in range(col):
for y in range(rows):
val = agg[x, y]
norm = (val - min_val) / range_val
norm = 1 / (1 + np.exp(c * (th - norm))) # bonus
out[x, y] = norm * 255.0
return out
# -
def combine_bands(r, g, b):
r = (normalize_data(r)).astype(np.uint8)
g = (normalize_data(g)).astype(np.uint8)
b = (normalize_data(b)).astype(np.uint8)
col, rows = r.shape
a = (np.zeros_like(r) + 255).astype(np.uint8)
img = np.dstack([r, g, b, a]).view(np.uint32).reshape(r.shape)
return tf.Image(data=img)
# ### True Color (Red=Red, Green=Green, Blue=Blue)
# +
def true_color(x_range, y_range, w, h):
cvs = ds.Canvas(plot_width=w, plot_height=h, x_range=x_range, y_range=y_range)
r, g, b = [cvs.raster(b).data for b in (band4, band3, band2)]
return combine_bands(r, g, b)
p = base_plot(x_range=(xmin, xmax), y_range=(ymin, ymax))
InteractiveImage(p, true_color)
# -
# ### Color Infrared (Vegetation) (Red=Near Infrared, Green=Red, Blue=Green)
# +
def color_infrared(x_range, y_range, w, h):
cvs = ds.Canvas(plot_width=w, plot_height=h, x_range=x_range, y_range=y_range)
r, g, b = [cvs.raster(b).data for b in (band5, band4, band3)]
return combine_bands(r, g, b)
p = base_plot(x_range=(xmin, xmax), y_range=(ymin, ymax))
InteractiveImage(p, color_infrared)
# -
# ### False Color (Urban) (Red=SWIR 2, Green=SWIR 1, Blue=Red)
# +
def false_color_urban(x_range, y_range, w, h):
cvs = ds.Canvas(plot_width=w, plot_height=h, x_range=x_range, y_range=y_range)
r, g, b = [cvs.raster(b).data for b in (band7, band6, band4)]
return combine_bands(r, g, b)
p = base_plot(x_range=(xmin, xmax), y_range=(ymin, ymax))
InteractiveImage(p, false_color_urban)
# -
# ### False Color 2 (Red=Near Infrared, Green=SWIR 1, Blue=Coastal)
# +
def false_color_veg(x_range, y_range, w, h):
cvs = ds.Canvas(plot_width=w, plot_height=h, x_range=x_range, y_range=y_range)
r, g, b = [cvs.raster(b).data for b in (band5, band7, band1)]
return combine_bands(r, g, b)
p = base_plot(x_range=(xmin, xmax), y_range=(ymin, ymax))
InteractiveImage(p, false_color_veg)
# -
# ### Land vs. Water (Red=Near Infrared, Green=SWIR 1, Blue=Red)
# +
def land_vs_water(x_range, y_range, w, h):
cvs = ds.Canvas(plot_width=w, plot_height=h, x_range=x_range, y_range=y_range)
r, g, b = [cvs.raster(b).data for b in (band5, band6, band4)]
return combine_bands(r, g, b)
p = base_plot(x_range=(xmin, xmax), y_range=(ymin, ymax))
InteractiveImage(p, land_vs_water)
# -
# ### Shortwave Infrared (Red=SWIR2, Green=Near Infrared, Blue=Red)
# +
def shortwave_infrared(x_range, y_range, w, h):
cvs = ds.Canvas(plot_width=w, plot_height=h, x_range=x_range, y_range=y_range)
r, g, b = [cvs.raster(b).data for b in (band7, band5, band4)]
return combine_bands(r, g, b)
p = base_plot(x_range=(xmin, xmax), y_range=(ymin, ymax))
InteractiveImage(p, shortwave_infrared)
# -
| examples/landsat.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:new_tsai]
# language: python
# name: conda-env-new_tsai-py
# ---
# + [markdown] colab_type="text" id="view-in-github"
# <a href="https://colab.research.google.com/github/timeseriesAI/timeseriesAI/blob/master/tutorial_nbs/00_How_to_efficiently_work_with_very_large_numpy_arrays.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] colab_type="text" id="s2UTqoddrEkY"
# created by <NAME> - email: <EMAIL>
# + [markdown] colab_type="text" heading_collapsed=true id="u7ikXU02rEkc"
# ## How to efficiently work with (very large) Numpy Arrays?
# + [markdown] hidden=true
# Sometimes we need to work with some very large numpy arrays that don't fit in memory. I'd like to share with you a way that works well for me.
# + [markdown] colab_type="text" heading_collapsed=true id="0C7goWDmrEke"
# ## Import libraries
# + colab={} colab_type="code" hidden=true id="9LqU8ovGrEkg"
import sys
import os
ISCOLAB = 'google.colab' in sys.modules
if ISCOLAB:
# for bleeding edge
# !pip install git+https://github.com/fastai/fastcore.git@master -q
# !pip install git+https://github.com/fastai/fastai.git@master -q
# !pip install git+https://github.com/timeseriesAI/timeseriesAI.git@master -q
# for latest stable version
# # !pip install tsai -q
import tsai
from tsai.all import *
display(HTML("<style>.container {width:95% !important; }</style>"))
# + hidden=true
print('tsai :', tsai.__version__)
print('fastai :', fastai.__version__)
print('fastcore :', fastcore.__version__)
print('torch :', torch.__version__)
print('scipy :', sp.__version__)
print('numpy :', np.__version__)
print('pandas :', pd.__version__)
print(f'Total RAM : {bytes2GB(psutil.virtual_memory().total):5.2f} GB')
print(f'Used RAM : {bytes2GB(psutil.virtual_memory().used):5.2f} GB')
print('n_cpus :', cpus)
iscuda = torch.cuda.is_available()
if iscuda: print('device : {} ({})'.format(device, torch.cuda.get_device_name(0)))
else: print('device :', device)
# + [markdown] colab_type="text" heading_collapsed=true id="ZikgTzvBrEks"
# ## Introduction
# + [markdown] colab_type="text" hidden=true id="tINQJXXKrEku"
# I normally work with time series data. I made the decision to use numpy arrays to store my data since the can easily handle multiple dimensions, and are really very efficient.
#
# But sometimes datasets are really big (many GBs) and don't fit in memory. So I started looking around and found something that works very well: [**np.memmap**](https://docs.scipy.org/doc/numpy/reference/generated/numpy.memmap.html). Conceptually they work as arrays on disk, and that's how I often call them.
#
# np.memmap creates a map to numpy array you have previously saved on disk, so that you can efficiently access small segments of those (small or large) files on disk, without reading the entire file into memory. And that's exactly what we need with deep learning, be able to quickly create a batch in memory, without reading the entire file (that is stored on disk).
#
# The best analogy I've found are image files. You may have a very large dataset on disk (that far exceeds your RAM). In order to create your DL datasets, what you pass are the paths to each individual file, so that you can then load a few images and create a batch on demand.
#
# You can view np.memmap as the path collection that can be used to load numpy data on demand when you need to create a batch.
#
# So let's see how you can work with larger than RAM arrays on disk.
# + [markdown] colab_type="text" hidden=true id="MkJDdENPrEkw"
# On my laptop I have only 8GB of RAM.
# + colab={} colab_type="code" hidden=true id="udC2CWyerEky" outputId="ff20c1a3-2890-4123-fca5-3814ee2ee568"
print(f'Total RAM : {bytes2GB(psutil.virtual_memory().total):5.2f} GB')
print(f'Available RAM : {bytes2GB(psutil.virtual_memory().available):5.2f} GB\n')
# + [markdown] colab_type="text" hidden=true id="mDzcMZXIrEk_"
# I will try to demonstrate how you can handle a 10 GB numpy array dataset in an efficient way.
# + [markdown] colab_type="text" heading_collapsed=true id="q9S_h9CurElA"
# ## Create and save a larger than memory array
# + [markdown] colab_type="text" hidden=true id="GFKykgxdrElC"
# I will now create a large numpy array that doesn't fit in memory.
# Since I don't have enough RAM, I'll create an empty array on disk, and then load data in chunks that fit in memory.
#
# ⚠️ If you want to to experiment with large datasets, you may uncomment and run this code. **It will create a ~10GB on your disk**.
# If you do it, remember to delete it later.
# In my laptop it took me around **11 mins to run.**
# + colab={} colab_type="code" hidden=true id="9NDDenivrElE"
# # Save a small empty array
# X_temp_fn = './data/temp_X.npy'
# np.save(X_temp_fn, np.empty(1))
# # Create a np.memmap with desired dtypes and shape of the large array you want to save.
# # It's just a placeholder that doesn't contain any data
# X_fn = './data/X_on_disk.npy'
# X = np.memmap(X_temp_fn, dtype='float32', shape=(100000, 50, 512))
# # We are going to create a loop to fill in the np.memmap
# start = 0
# for i in range(20):
# # You now grab a chunk of your data that fits in memory
# # This could come from a pandas dataframe for example
# # I will simulate it with some random data
# data_chunk = np.random.rand(5000, 50, 512)
# end = start + data_chunk.shape[0]
# # I now fill a slice of the np.memmap
# X[start:end] = data_chunk
# start = end
# del data_chunk
# #I can now remove the temp file I created
# os.remove(X_temp_fn)
# # Once the data is loaded on the np.memmap, I save it as a normal np.array
# np.save(X_fn, X)
# # I will create a smaller array. Sinc this fits in memory, I don't need to use a memmap
# y_fn = 'y_on_disk.npy'
# y = np.random.randint(0, 10, X.shape[0])
# labels = np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'])
# np.save(y_fn, labels[y])
# del X, y
# + [markdown] colab_type="text" hidden=true id="pVWFDrjZrElN"
# Ok. So let's check the size of these files on memory.
# + colab={} colab_type="code" hidden=true id="eWrVL856rElP" outputId="f914bff6-cf73-4830-ad5c-0ab7d3a4f01f"
print(f'X array: {os.path.getsize("./data/X_on_disk.npy"):12} bytes ({bytes2GB(os.path.getsize("./data/X_on_disk.npy")):3.3f} GB)')
print(f'y array: {os.path.getsize("./data/y_on_disk.npy"):12} bytes ({bytes2GB(os.path.getsize("./data/y_on_disk.npy")):3.3f} GB)')
# + [markdown] colab_type="text" heading_collapsed=true id="BQ1_msZmrElZ"
# ## Load an array on disk (np.memmap)
# + [markdown] colab_type="text" hidden=true id="QEvaUCD9rEla"
# Remember I only have an 8 GB RAM on this laptop, so I couldn't load these datasets in memory.
#
# ☣️ Actually I accidentally loaded the "X_on_disk.npy" file, and my laptop crahsed so I had to reboot it!
#
# So let's now load data as arrays on disk (np.memmap). The way to do it is super simple, and very efficient. You just do it as you would with a normal array, but add an mmap_mode.
#
# There are 4 modes:
#
# - ‘r’ Open existing file for reading only.
# - ‘r+’ Open existing file for reading and writing.
# - ‘w+’ Create or overwrite existing file for reading and writing.
# - ‘c’ Copy-on-write: assignments affect data in memory, but changes are not saved to disk. The file on disk is read-only.
#
# I normally use mode 'r' since I want to be able to make changes to data in memory (transforms for example), without affecting data on disk (same approach as with image data). This is the same thing you do with image files on disk, that are just read, and then modified in memory, without change the file on disk.
# + colab={} colab_type="code" hidden=true id="HdTrKEQjrElc" outputId="98b6fe19-2be7-4ded-a9d8-bd89df1b0d50"
X_on_disk = np.load('./data/X_on_disk.npy', mmap_mode='r')
y_on_disk = np.load('./data/y_on_disk.npy', mmap_mode='r')
# + [markdown] colab_type="text" hidden=true id="idjgNgj0rEln"
# **Fast load**: it only takes a few ms to "load" a memory map to a 10 GB array on disk.
#
# In fact, the only thing that is loaded is a map to the array stored on disk. That's why it's so fast.
# + [markdown] colab_type="text" heading_collapsed=true id="Cfd4oMETrElp"
# ## Arrays on disk: main features
# + [markdown] colab_type="text" heading_collapsed=true hidden=true id="BlrhcMF6rElr"
# ### Very limited RAM usage
# + colab={} colab_type="code" hidden=true id="hQJSvWxerElt" outputId="fe9d5e47-5aad-4212-88bf-555ad38b4625"
print(X_on_disk.shape, y_on_disk.shape)
# + colab={} colab_type="code" hidden=true id="iyNgvLD4rEl1" outputId="33817483-cb99-47e0-fcbf-22b6b4d4ee9f"
print(f'X array on disk: {sys.getsizeof(X_on_disk):12} bytes ({bytes2GB(sys.getsizeof(X_on_disk)):3.3f} GB)')
print(f'y array on disk: {sys.getsizeof(y_on_disk):12} bytes ({bytes2GB(sys.getsizeof(y_on_disk)):3.3f} GB)')
# + [markdown] colab_type="text" hidden=true id="lkUAvbZMrEl-"
# **152 bytes of RAM for a 10GB array**. This is the great benefit of arrays on disk.
#
# Arrays on disk barely use any RAM until each the it's sliced and an element is converted into a np.array or a tensor.
#
# This is equivalent to the size of file paths in images (very limited) compared to the files themselves (actual images).
# + [markdown] colab_type="text" heading_collapsed=true hidden=true id="HkRnJDKhrEmA"
# ### Types
# + [markdown] colab_type="text" hidden=true id="U4ixDSaUrEmB"
# np.memmap is a subclass of np.ndarray
# + colab={} colab_type="code" hidden=true id="fOgZ-zNbrEmD" outputId="1385b49f-dd71-456b-b1b8-9a438614195b"
isinstance(X_on_disk, np.ndarray)
# + colab={} colab_type="code" hidden=true id="9dlvO5W-rEmS" outputId="0e77cdac-961c-44dc-dd88-fb3f88d5ce22"
type(X_on_disk)
# + [markdown] colab_type="text" heading_collapsed=true hidden=true id="z2AXmHLErEmb"
# ### Operations
# + [markdown] colab_type="text" hidden=true id="0z82CHnYrEmc"
# With np.memmap you can perform the same operations you would with a normal numpy array.
# The most common operations you will perform in deep learning are:
#
# - slicing
# - calculating stats: mean and std
# - scaling (using normalize or standardize)
# - transformation into a tensor
#
# Once you get the array on disk slice, you'll convert it into a tensor, move to a GPU and performs operations there.
# + [markdown] colab_type="text" hidden=true id="wdHMTGmRrEme"
#
# ⚠️ You need to be careful though not to convert the entire np.memmap to an array/ tensor if it's larger than your RAM. This will crash your computer unless you have enough RAM, so you would have to reboot!
#
# **DON'T DO THIS: torch.from_numpy(X) or np.array(X)** unless you have ehough RAM.
#
# To avoid issues during test, I created a smaller array on disk (that I can store in memory). When I want to test something I test it with that array first. It's important to always verify that the type output of your operations is np.memmap, which means data is still in memory.
# + [markdown] colab_type="text" hidden=true id="qdYUHi2mrEmf"
# #### Slicing
# + [markdown] colab_type="text" hidden=true id="PvMz6dibrEmh"
# To ensure you don't brind the entire array in memory (which may crash your computer) you can always work with slices of data, which is by the way how fastai works.
#
# If you use mode 'r' you can grab a sample and make changes to it, but this won't modify data on disk.
# + colab={} colab_type="code" hidden=true id="8ZW_FP5trEmi" outputId="25566fd0-813d-4d54-9acd-8f1b486afae4"
x = X_on_disk[0]
x
# + [markdown] colab_type="text" hidden=true id="uV7MN7PYrEmr"
# It's important to note that **when we perform an math operation on a np.memmap (add, subtract, ...) the output is a np.array, and no longer a np.memmap.**
#
# ⚠️ Remember you don't want to run this type of operations with a memmap larger than your RAM!! That's why I do it with a slice.
# + colab={} colab_type="code" hidden=true id="NJXfcJXLrEms" outputId="69da09c6-158c-4601-e1c9-4be018aeab94"
x = X_on_disk[0] + 1
x
# + colab={} colab_type="code" hidden=true id="UIbacPBcrEm0" outputId="011861ba-48e9-4760-bfdd-9625cdd10d10"
x = torch.from_numpy(X_on_disk[0])
x2 = x + 1
x2
# + [markdown] colab_type="text" hidden=true id="N0L3ot1XrEnC"
# As you can see, this doesn't affect the original np.memmap
# + colab={} colab_type="code" hidden=true id="1IB51hJyrEnD" outputId="abacc8e3-58dd-4104-9b05-29f948d964c0"
X_on_disk[0]
# + [markdown] colab_type="text" hidden=true id="dl3glrYMrEnL"
# You can slice an array on disk by any axis, and it'll return a memmap. Slicing by any axis is very fast.
# + colab={} colab_type="code" hidden=true id="VJQoT7gcrEnN" outputId="dae52625-2473-44d8-fb06-121745c16567"
X_on_disk[0]
# + colab={} colab_type="code" hidden=true id="RwCJNNAFrEnU" outputId="a35f678f-c602-4e1d-9c08-83727499428f"
X_on_disk[:, 0]
# + [markdown] colab_type="text" hidden=true id="IFcP9IaPrEnb"
# However, bear in mind that if you use multiple indices, the output will be a regular numpy array. This is important as it will use more RAM.
# + colab={} colab_type="code" hidden=true id="6EGFxv-wrEnd" outputId="5e81fece-8d05-42eb-8682-5679c3297aff"
X_on_disk[[0,1]]
# + [markdown] colab_type="text" hidden=true id="HGyxjo3LrEnk"
# There's a trick we can use avoid this making use of the excellent new L class in fastai. It is to **itemify** the np.memmap/s.
# + colab={} colab_type="code" hidden=true id="WTJHYcfsrEnm"
def itemify(*x): return L(*x).zip()
# + [markdown] colab_type="text" hidden=true id="TDvphuSmrEnt"
# To itemify one or several np.memmap/s is very fast. Let's see how long it takes with a 10 GB array.
# + colab={} colab_type="code" hidden=true id="hB2J6ppPrEnu"
X_on_disk_as_items = itemify(X_on_disk)
# + [markdown] colab_type="text" hidden=true id="6LB-mABtrEn2"
# 5 seconds to return individual records on disk! Bear in mind you only need to perform this once!
#
# So now, you can select multiple items at the same time, and they will all still be on disk:
# + colab={} colab_type="code" hidden=true id="hS4S5qrorEn3" outputId="a63b46ae-2b3c-4fd0-9025-9ce5a5a3db92"
X_on_disk_as_items[0,1]
# + [markdown] colab_type="text" hidden=true id="CCirERmarEoE"
# You can also itemify several items at once: X and y for example. When you slice the list, you'll get tuples.
# + colab={} colab_type="code" hidden=true id="CjHUUEfdrEoG"
Xy_on_disk_as_items = itemify(X_on_disk, y_on_disk)
# + colab={} colab_type="code" hidden=true id="9el77U5irEoN" outputId="25ae55d0-1738-4305-afea-1f5dee11c041"
Xy_on_disk_as_items[0, 1]
# + [markdown] colab_type="text" hidden=true id="TvDzO6EyrEoV"
# Slicing is very fast, even if there are 100.000 samples.
# + colab={} colab_type="code" hidden=true id="P5963Zv6rEoW" outputId="47b5d7e8-ba31-4197-b9dc-e51821405509"
# axis 0
# %timeit X_on_disk[0]
# + colab={} colab_type="code" hidden=true id="v7j5McgxrEof" outputId="c9554dcc-319c-4416-e09a-184d72c2b60f"
# axis 1
# %timeit X_on_disk[..., 0]
# + colab={} colab_type="code" hidden=true id="JVcctkAJrEon" outputId="c9e38edf-86ba-4bba-9c3f-a171e1d67ed9"
# axis 2
# %timeit X_on_disk[:, 0]
# + colab={} colab_type="code" hidden=true id="LtAejlbdrEov" outputId="59a9cd0a-9ab9-44e0-ed9f-276ad5d249ea"
# aixs 0,1
# %timeit X_on_disk[0, 0]
# + [markdown] colab_type="text" hidden=true id="KcoAK8RFrEo3"
# To compare how fast you can slice a np.memmap, let's create a smaller array that I can fit in memory (X_in_memory). This is 10 times smaller (100 MB) than the one on disk.
# + colab={} colab_type="code" hidden=true id="EWv_K4A9rEo4"
X_in_memory_small = np.random.rand(10000, 50, 512)
# + colab={} colab_type="code" hidden=true id="_8ZuH-47rEpB" outputId="5d731bfd-7b52-4eb7-d2ad-d6054f72651b"
# %timeit X_in_memory_small[0]
# + [markdown] colab_type="text" hidden=true id="ZSsJE37ArEpI"
# Let's create the same array on disk. It's super simple:
# + colab={} colab_type="code" hidden=true id="KeGko-ITrEpJ"
np.save('./data/X_on_disk_small.npy', X_in_memory_small)
X_on_disk_small = np.load('./data/X_on_disk_small.npy', mmap_mode='r')
# + colab={} colab_type="code" hidden=true id="Pn0n7qyfrEpQ" outputId="f6ce2000-fd9e-4354-b723-11970a5f5bd5"
# %timeit X_on_disk_small[0]
# + [markdown] colab_type="text" hidden=true id="GNKn1ZDLrEpX"
# This is approx. 17 slower than having arrays on disk, although it's still pretty fast.
#
# However, if we use the itemified version, it's much faster:
# + colab={} colab_type="code" hidden=true id="5ZbhCDwvrEpY" outputId="fb151d2a-d675-4ed1-89ac-232129331acf"
# %timeit X_on_disk_as_items[0]
# + [markdown] colab_type="text" hidden=true id="Z8TV_n2prEpg"
# This is much better! So now you can access 1 of multiple items on disk with a pretty good performance.
# + [markdown] colab_type="text" hidden=true id="ruK4tET0rEpj"
# #### Calculating stats: mean and std
# + [markdown] colab_type="text" hidden=true id="rVA8MDfhrEpl"
# Another benefit of using arrays on disk is that you can calculate the mean and std deviation of the entire dataset.
#
# It takes a considerable time since the array is very big (10GB), but it's feasible:
#
# - mean (0.4999966): 1 min 45 s
# - std (0.2886839): 11 min 43 s
#
# in my laptop.
# If you need them, you could calculate these stats once, and store the results (similar to ImageNet stats).
# However, you usually need to claculate these metrics for labeled (train) datasets, that tend to be smaller.
# + colab={} colab_type="code" hidden=true id="z_TcBSg9rEpn" outputId="70ca0b22-29ba-4978-96c0-9061addd468a"
# X_mean = X_on_disk.mean()
# X_mean
# + colab={} colab_type="code" hidden=true id="WnVrxtqorEpu" outputId="e94f16d9-5837-4f7e-f610-f0aa4b94b72e"
# X_std = X_on_disk.std()
# X_std
# + [markdown] colab_type="text" hidden=true id="v5SCxkElrEp3"
# #### Conversion into a tensor
# + [markdown] colab_type="text" hidden=true id="BPyHLzc-rEp5"
# Conversion from an array on disk slice into a tensor is also very fast:
# + colab={} colab_type="code" hidden=true id="gle3dCBCrEp6" outputId="9b6b4cf5-7861-4e76-ef34-53ef84cac0c9"
torch.from_numpy(X_on_disk[0])
# + colab={} colab_type="code" hidden=true id="F7KPKY36rEqF"
X_on_disk_small_0 = X_on_disk_small[0]
X_in_memory_small_0 = X_in_memory_small[0]
# + colab={} colab_type="code" hidden=true id="TIm7L3J3rEqN" outputId="6a292ec4-2be7-441e-96d7-673fae99710d"
# %timeit torch.from_numpy(X_on_disk_small_0)
# + colab={} colab_type="code" hidden=true id="tDbcscrkrEqa" outputId="131c66b1-04b9-4959-9cbd-a005d484925f"
# %timeit torch.from_numpy(X_in_memory_small_0 )
# + [markdown] colab_type="text" hidden=true id="dZrer2g_rEqh"
# So it takes the same time to convert from numpy.memmap or from a np.array in memory is the same.
# + [markdown] colab_type="text" hidden=true id="QRxLvTA-rEqj"
# #### Combined operations: slicing plus conversion to tensor
# + [markdown] colab_type="text" hidden=true id="RWxLCweXrEqk"
# Let's now check performance of the combined process: slicing plus conversion to a tensor. Based on what we've seen there are 3 options:
#
# - slice np.array in memory + conversion to tensor
# - slice np.memamap on disk + conversion to tensor
# - slice itemified np.memmap + converion to tensor
# + colab={} colab_type="code" hidden=true id="1cfxU07xrEql" outputId="42b7559a-4f51-4556-b9ae-623c3ebb3928"
# %timeit torch.from_numpy(X_in_memory_small[0])
# + colab={} colab_type="code" hidden=true id="5-ThwPEUrEqt" outputId="3149c62c-e567-4268-f5f7-4003898a6a71"
# %timeit torch.from_numpy(X_on_disk_small[0])
# + colab={} colab_type="code" hidden=true id="IH23iQlerEq0"
X_on_disk_small_as_items = itemify(X_on_disk_small)
# + colab={} colab_type="code" hidden=true id="PtMYAT2LrEq8" outputId="b92bcf63-3679-4e78-8cff-715366bf12e7"
# %timeit torch.from_numpy(X_on_disk_small_as_items[0][0])
# + [markdown] colab_type="text" hidden=true id="8oBpcpTNrErB"
# So this last method is **almost as fast as having the array in memory**!! This is an excellent outcome, since slicing arrays in memory is a highly optimized operation.
#
# And we have the benefit of having access to very large datasets if needed.
# + [markdown] colab_type="text" heading_collapsed=true id="c1vSEBd-rErD"
# ## Summary
# + [markdown] colab_type="text" hidden=true id="01SIAKb0rErE"
# We now have a very efficient way to work with very large numpy arrays.
#
# The process is very simple:
#
# - create and save the array on disk (as described before)
# - load it with a mmap_mode='r'
# - itemify the array/s
#
# So my recommendation would be:
#
# - use numpy arrays in memory when possible (if your data fits in memory)
# - use numpy memmap (arrays on disk) when data doesn't fit. You will still have a great performance.
| tutorial_nbs/00_How_to_efficiently_work_with_very_large_numpy_arrays.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Total execution time
import sys
sys.path.append('..')
from panelctmc import panelctmc
# +
import numpy as np
x = np.loadtxt('../data/demo1.csv', delimiter=',', skiprows=1, dtype=object)
mapping = [['AAA', 'AA+', 'AA', 'AA-', 'A+', 'A', 'A-'],
['BBB+', 'BBB', 'BBB-'],
['BB+', 'BB', 'BB-'],
['B+', 'B', 'B-']]
# -
# %timeit panelctmc(x, mapping)
| profile/speed (timeit).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
from numpy.linalg import inv
import scipy.linalg
import matplotlib.pyplot as plt
import math as m
# %matplotlib inline
# -
N = 25
X = np.reshape(np.linspace(0, 0.9, N), (N, 1))
y = np.cos(10*X**2) + 0.1 * np.sin(100*X)
plt.figure()
plt.plot(X, y, 'o', markersize=10)
plt.xlabel("$x$")
plt.ylabel("$y$");
def max_lik_estimate(X, y):
# X: N x D matrix of training inputs
# y: N x 1 vector of training targets/observations
# returns: maximum likelihood parameters (D x 1)
N, D = X.shape
theta_ml = np.zeros((D,1)) ## <-- EDIT THIS LINE
theta_ml= (inv(X.T @ X))@ X.T @ y
return theta_ml
def predict_with_estimate(Xtest, theta):
# Xtest: K x D matrix of test inputs
# theta: D x 1 vector of parameters
# returns: prediction of f(Xtest); K x 1 vector
prediction = Xtest @ theta ## <-- EDIT THIS LINE
return prediction
# +
N=100
Xtest=np.reshape(np.linspace(-0.3, 1.3, N), (N, 1))
plt.figure()
plt.plot(X, y, 'o', markersize=10)
plt.plot(Xtest)
plt.xlabel("$x$")
plt.ylabel("$y$");
# -
## EDIT THIS FUNCTION
def poly_features(X, K):
#X: inputs of size N x 1
#K: degree of the polynomial
# computes the feature matrix Phi (N x (K+1))
X = X.flatten()
N = X.shape[0]
#initialize Phi
Phi = np.zeros((N, K+1))
# Compute the feature matrix in stages
for i in range (N):
for j in range (K+1):
Phi[i][j]=X[i]**j
#Phi = np.zeros((N, K+1)) ## <-- EDIT THIS LINE
#print('Phi', Phi)
return Phi
## EDIT THIS FUNCTION
def nonlinear_features_maximum_likelihood(Phi, y):
# Phi: features matrix for training inputs. Size of N x D
# y: training targets. Size of N by 1
# returns: maximum likelihood estimator theta_ml. Size of D x 1
kappa = 1e-080# good for numerical stability
N,D=Phi.shape
K = Phi.shape[1]
I=np.eye(D)
#print("I",I)
# maximum likelihood estimate
####################theta_ml = np.zeros((K,1)) ## <-- EDIT THIS LINE
theta_ml = np.zeros((K+1,1)) ## <-- EDIT THIS LINE
theta_ml=inv((Phi.T @ Phi) +kappa*I) @ Phi.T @ y
return theta_ml
Kmax=[0,1,2,3,11]
plot=[]
for K in Kmax:
Phi = poly_features(X, K) # N x (K+1) feature matrix
theta_ml = nonlinear_features_maximum_likelihood(Phi, y) # maximum likelihood estimator
Phi_test = poly_features(Xtest, K)
plot.append(Phi_test @ theta_ml) # predicted y-values
#plot[K]=y_pred
#plt.plot(Xtest, y_pred, label='%i,K')
# plt.plot(X, y, 'o')
# Phi = poly_features(X, 11) # N x (K+1) feature matrix
# theta_ml = nonlinear_features_maximum_likelihood(Phi, y) # maximum likelihood estimator
# Phi_test = poly_features(Xtest, 11)
# y_pred11 = Phi_test @ theta_ml # predicted y-values
# array.append(y_pred11)
plt.plot(Xtest, plot[0],label='K=0')
plt.plot(Xtest, plot[1],label='K=1')
plt.plot(Xtest, plot[2],label='K=2')
plt.plot(Xtest, plot[3],label='K=3')
plt.plot(Xtest, plot[4],label='K=11')
plt.plot(X, y, 'o',label='data')
plt.ylim(-2,4)
plt.xlim(-0.1,1)
plt.xlabel("$x$")
plt.ylabel("$y$")
plt.legend()
plt.savefig('1a',dpi=1000)
# plt.show()
# #plt.ylim(-1.5,3)
# +
#######################B###########################
#######################B###########################
#######################B###########################
Xtest=np.reshape(np.linspace(-1, 1.2, N), (N, 1))
def poly_features1(X, K):
#X: inputs of size N x 1
#K: degree of the polynomial
# computes the feature matrix Phi (N x (K+1))
X = X.flatten()
N = X.shape[0]
#initialize Phi
Phi = np.zeros((N, 2*K+1))
#Phi[0][0]=1
# Compute the feature matrix in stages
for i in range (N):
for j in range (2*K+1):
if j==0:
Phi[i][j]=1
if j%2!=0:#########odd number
Phi[i][j]=np.sin(2*m.pi*((j+1)/2)*X[i])
if j%2==0 and j!=0:########even number
Phi[i][j]=np.cos(2*m.pi*(j/2)*X[i])
#Phi = np.zeros((N, K+1)) ## <-- EDIT THIS LINE
#print
return Phi
# +
Kmax=3
array=[]
for K in range (Kmax+1):
Phi = poly_features1(X, K) # N x (K+1) feature matrix
theta_ml = nonlinear_features_maximum_likelihood(Phi, y) # maximum likelihood estimator
Phi_test = poly_features1(Xtest, K)
y_pred = Phi_test @ theta_ml # predicted y-values
array.append(y_pred)
Phi11 = poly_features1(X, 11) # N x (K+1) feature matrix
theta_ml = nonlinear_features_maximum_likelihood(Phi11, y) # maximum likelihood estimator
Phi_test11 = poly_features1(Xtest, 11)
y_pred11 = Phi_test11 @ theta_ml # predicted y-values
array.append(y_pred11)
#plt.plot(Xtest, array[0],label='K=0')
plt.plot(Xtest, array[1],label='K=1')
#plt.plot(Xtest, array[2],label='K=2')
#plt.plot(Xtest, array[3],label='K=3')
plt.plot(Xtest, array[4], 'r',label='K=11')
plt.plot(X, y, 'o',label='data')
plt.xlim(-0.2,1.2)
plt.xlabel("$x$")
plt.ylabel("$y$")
plt.legend()
plt.savefig('1b',dpi=1000)
#plt.show()
# -
## EDIT THIS FUNCTION
def MSE(y, ypred):
summ=0
for i in range (len(y)):
summ+=(y[i]-ypred[i])**2
mse = summ/len(y) ## <-- EDIT THIS LINE
return mse
# +
Kmax=10
mse_train=[]
err = np.zeros(11)
#print(len(x_new))
#ytest=np.cos(10*Xtest**2) + 0.1 * np.sin(100*Xtest)
for K in range (Kmax+1):
e=0
for l in range (len(y)):
y_new= np.delete(y,l)
x_new= np.delete(X,l)
x_test=X[l]
#print('x_new',x_new)
#print('y_new',y_new)
Phi = poly_features1(x_new, K) # N x (K+1) feature matrix
theta_ml = nonlinear_features_maximum_likelihood(Phi, y_new) # maximum likelihood estimator
y_pred = poly_features1(x_test, K) @ theta_ml # predicted y-values
e+=(y_pred-y[l])**2/25
#error=(((sum(y_pred - y_new))**2)/24)
#error=MSE(y_pred,y_new)
err[K]=e
plt.figure()
plt.plot(range(11), err,label='MSE')
#plt.plot(alpha,label='maximum likelihood value')
#plt.xlim(0,10)
plt.xlabel("Degree of polynomial")
plt.ylabel("MSE")
plt.legend()
plt.show()
# -
Kmax=10
mse_train=[]
err = np.zeros(11)
var= np.zeros(11)
#print(len(x_new))
#ytest=np.cos(10*Xtest**2) + 0.1 * np.sin(100*Xtest)
for K in range (Kmax+1):
v=0
e=0
for l in range (len(y)):
y_new= np.delete(y,l)
x_new= np.delete(X,l)
x_test=X[l]
#print('x_new',x_new)
#print('y_new',y_new)
Phi = poly_features1(x_new, K) # N x (K+1) feature matrix
theta_ml = nonlinear_features_maximum_likelihood(Phi, y_new) # maximum likelihood estimator
y_pred = poly_features1(x_test, K) @ theta_ml # predicted y-values
yp=Phi @ theta_ml
e+=(y_pred-y[l])**2/25
#v+=(((sum(y_pred - y_new))**2)/24)
v+=MSE(yp,y_new)/25
#var=nonlinear_features_maximum_likelihood(poly_features1(x_new, K),y_new)
err[K]=e
var[K]= v
plt.figure()
plt.plot(range(11), err,label='MSE')
plt.plot(range(11),var,label='maximum Variance')
#plt.xlim(0,10)
plt.xlabel("Degree of polynomial")
plt.ylabel("Value")
plt.legend()
plt.savefig('1c',dpi=1000)
#plt.title('MSE Vs MAL')
#plt.show()
# +
#######################2######################
# -
sampl = np.arange(0,1,0.05)
print(len(sampl))
def phi_map (y, phi, lamda):
D,N = phi.shape
I=np.eye(N)
return inv(phi.T @ phi+ lamda *I) @ (phi.T @ y)
# +
N=100
Xtest=np.reshape(np.linspace(-0.3, 1.3, N), (N, 1))
test=np.array([20])
lamda1=0.0000001
lamda2=10
lamda3=10000
kmax=np.array([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19])
#kmax=np.array([0,1,2,3,4,5])
PLO=np.zeros((20))
for K in (20,):
p=poly_features1(X,K)
theta_map1=phi_map(y,p,lamda1)
theta_map2=phi_map(y,p,lamda2)
theta_map3=phi_map(y,p,lamda3)
p_test=poly_features1(Xtest,K)
y_predect1=p_test @ theta_map1
y_predect2=p_test @ theta_map2
y_predect3=p_test @ theta_map3
if 50:
plt.plot(Xtest,y_predect1,label='lamda = 0.0000001')
plt.plot(Xtest,y_predect2,label='lamda = 10')
plt.plot(Xtest,y_predect3,label='lamda = 10000 ')
print(p.shape)
print(theta_map.shape)
print(p_test.shape)
plt.plot(X,y,'o',label='data')
#plt.plot(PLO)
plt.xlabel("$x$")
plt.ylabel("$y$")
plt.legend()
plt.savefig('2a',dpi=1000)
plt.show()
# -
increase lamda => overfitting
def loss (y,x):
for i in range (len(x)):
summ=0
ph=poly_features1(x[i],20)
th=phi_map(y[i],ph,0.0001)
summ+=((y[i] -(ph @ th))**2) + (0.0001 * (ph).T @ ph)
return summ
phi=poly_features(X,1)
def lml (alpha, beta, phi, y):
N,K= phi.shape
I1=np.eye(K)
I2=np.eye(N)
m= (phi @ (alpha*I1) @ phi.T) + (beta*I2)
det=np.linalg.det(m)
g=y.T @ inv(m) @ y
ans= (-N/2)* np.log(2* np.pi)- (1/2)*(np.log(det)) - (1/2)*(g[0][0])
return ans
def det_lml (alpha, beta, phi, y):
N,K= phi.shape
I1=np.eye(K)
I2=np.eye(N)
m= (phi @ (alpha*I1) @ phi.T) + (beta*I2)
#comm= y.T @ inv(m) @ phi @ phi.T @ inv(m) @ y
m_inv=np.linalg.inv(m)
d_alpha= -1/2 * np.trace(m_inv @ phi @ phi.T) + 1/2 * y.T @ m_inv @ phi @ phi.T @ m_inv @ y
d_beta= -1/2 * np.trace(m_inv) + 1/2 * y.T @ m_inv @ m_inv @ y
return np.array([d_alpha,d_beta])
det_lml (1,1, phi, y)
d_alpha,d_beta=det_lml (1, 1, phi, y)
alpha_array=[]
beta_array=[]
lr=0.0001
beta=1
w_prev=np.array([[0,0]])
w_current=np.array([[1,1]])
for alpha in np.arange (0,1,0.1):
for beta in np.arange (0,1,0.1):
while (w_current-w_prev != 0.001):
alpha=alpha-lr*d_alpha
beta=beta-lr*d_beta
w_current=det_lml(alpha,beta,phi, y)
if ((w_current-w_prev).all == 0.001):
alpha_array.append(w_current[0])
print (alpha)
beta_array.append(w_current[1])
print (beta)
#break
w_prev=w_current
#alpha_array.append(w[0])
#beta_array.append(w[1])
def loss_mml (alpha, beta, phi, y,lr):
d_alpha,d_beta=det_lml (alpha, beta, phi, y)
alpha=alpha-lr*d_alpha
beta=beta-lr*d_beta
# +
def Circle(x,y):
return (x*x+y*y)
xx=np.linspace(-2,2,400)
yy=np.linspace(-2,2,400)
[X,Y]=np.meshgrid(xx,yy)
Z=Circle(X,Y)
plt.figure()
plt.contour(X,Y,Z,[1])
plt.show()
# -
lr = 0.00001
alpha_0 = 1
beta_0 = 1
theta = np.array([alpha_0, beta_0])
epsilon = 1e-9
err = 10 * epsilon
i = 0
thetas = []
thetas.append([alpha_0,beta_0])
max_iters = 500000
while (err > epsilon) and i < max_iters:
step = lr * det_lml(theta[0], theta[1] , Phi,y).squeeze()
theta = theta + step
thetas.append(theta.copy())
err = max(abs(step))
i += 1
xplot = np.linspace(0.001,5,100)
yplot = np.linspace(0.001,5,100)
xplot,yplot = np.meshgrid(xplot,yplot)
z = np.zeros(xplot.shape)
for i in range(xplot.shape[0]):
for j in range(xplot.shape[1]):
k = lml(xplot[i,j],yplot[i,j],Phi,y).copy().squeeze()
#print(k)
z[i,j] = k
z = np.clip(z, -70, 0)
plt.contour(xplot,yplot,z, levels=np.linspace(-70,-25,50))
plt.colorbar()
thetas = np.array(thetas)
plt.plot(*(thetas.T))
print(thetas[-1])
plt.xlabel("$Alpha$")
plt.ylabel("$Beta$")
plt.plot(thetas[0,0],thetas[0,1],'o')
plt.plot(thetas[-1,0],thetas[-1,0],'x')
plt.legend(["data" ,"starting point" , "global maxima "])
plt.legend()
plt.savefig('3a',dpi=1000)
| linear_regression_Assignment_Montaser.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # External Files
# Notebooks often incorporate figures, data, or video. Any external files must be appropriately licensed for reuse. If that isn't possible, then that information should be accessed by linking to a resource showing the user how to properly access that information.
#
# To facilitate use of the notebooks, external resources should be referenced by an external link whenever possible. If the resource must be included within repository, the link should be to the publically accessible repository. This practice enables cross-platform use of notebooks and streamlines the import and use of notebooks by other users.
# ## Figures
#
# Figures included within the repository should located within a common figures directory. This practice enables reuse of figures, and streamlines the editing and maintanence of figures. Figures should be `.png` or `.jpg` format as appropriate, and resized for use with the stndard markdown `![]()` markup. Use of HTML image tags is discouraged and reported as 'lint'.
# ## Data
#
# Data files distributed with the repository should be located within a common data directory.
# ## Embedding YouTube video
# +
from IPython.display import YouTubeVideo
# Youtube
YouTubeVideo('2eDGHy5iu_M')
# -
| notebooks/01.03-External-Files.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/john-s-butler-dit/Numerical-Analysis-Python/blob/master/Chapter%2002%20-%20Higher%20Order%20Methods/202_Taylor%20Method%20Error%20Example.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="xyThqXAkOEHv"
# # 1st vs 2nd order Taylor methods
#
# ## Intial Value Poblem
# The general form of the population growth differential equation
# \begin{equation} y^{'}=t-y, \ \ (0 \leq t \leq 4), \end{equation}
# with the initial condition
# \begin{equation}x(0)=1, \end{equation}
# For N=4
# with the analytic (exact) solution
# \begin{equation} y= 2e^{-t}+t+1. \end{equation}
#
# ## Taylor Solution
#
# \begin{equation} f(t,y)=t-y, \end{equation}
# differentiate with respect to $t$,
# \begin{equation} f'(t,y)=1-y'=1-t+y, \end{equation}
# This gives the first order Taylor,
# \begin{equation}T^1(t_i,w,i)=f(t_i,w_i)=t_i-w_i, \end{equation}
# and the second order Taylor,
# \begin{equation}
# T^2(t_i,w,i)=f(t_i,w_i)+\frac{h}{2}f'(t_i,w_i)=t_i-w_i+\frac{h}{2}(1-t_i+w_i).\end{equation}
#
# The first order Taylor difference equation, which is identical to the Euler method, is
#
# \begin{equation}
# w_{i+1}=w_i+h(t_i-w_i). \end{equation}
# The second order Taylor difference equation is
# \begin{equation}
# w_{i+1}=w_i+h(t_i-w_i+\frac{h}{2}(1-t_i+w_i)). \end{equation}
# + id="ZQvUtvmZOEHy"
import numpy as np
import math
# %matplotlib inline
import matplotlib.pyplot as plt # side-stepping mpl backend
import matplotlib.gridspec as gridspec # subplots
import warnings
warnings.filterwarnings("ignore")
# + id="BzW5RnluOEHz"
def Second_order_taylor(N,IC):
x_end=4
x_start=0
INTITIAL_CONDITION=IC
h=x_end/(N)
N=N+1;
Numerical_Solution=np.zeros(N)
Numerical_Solution_first=np.zeros(N)
t=np.zeros(N)
Analytic_Solution=np.zeros(N)
Upper_bound=np.zeros(N)
t[0]=x_start
Numerical_Solution[0]=INTITIAL_CONDITION
Numerical_Solution_first[0]=INTITIAL_CONDITION
Analytic_Solution[0]=INTITIAL_CONDITION
for i in range (1,N):
Numerical_Solution_first[i]=Numerical_Solution_first[i-1]+h*(t[i-1]-Numerical_Solution_first[i-1])
Numerical_Solution[i]=Numerical_Solution[i-1]+h*(t[i-1]-Numerical_Solution[i-1]+h/2*(1-t[i-1]+Numerical_Solution[i-1]))
t[i]=t[i-1]+h
Analytic_Solution[i]=2*math.exp(-t[i])+t[i]-1
fig = plt.figure(figsize=(10,4))
# --- left hand plot
ax = fig.add_subplot(1,3,1)
plt.plot(t,Numerical_Solution,color='blue',label='Second Order')
plt.plot(t,Numerical_Solution_first,color='red',label='First Order')
plt.legend(loc='best')
plt.title('Numerical Solution h=%s'%(h))
# --- right hand plot
ax = fig.add_subplot(1,3,2)
plt.plot(t,Analytic_Solution,color='blue')
plt.title('Analytic Solution')
#ax.legend(loc='best')
ax = fig.add_subplot(1,3,3)
plt.plot(t,np.abs(Analytic_Solution-Numerical_Solution),color='blue',label='Second Order Error')
plt.plot(t,np.abs(Analytic_Solution-Numerical_Solution_first),color='red',label='First Order Error')
plt.title('Error')
plt.legend(loc='best')
# --- title, explanatory text and save
# --- title, explanatory text and save
fig.suptitle(r"$y'=y-t$", fontsize=20)
plt.tight_layout()
plt.subplots_adjust(top=0.85)
# + id="qCELqgOtOEH0" outputId="eec370ed-8c88-41e8-e4eb-8aa508f8fc8f" colab={"base_uri": "https://localhost:8080/", "height": 302}
Second_order_taylor(40,1)
# + id="dmnDk7IuOEH1"
| Chapter 02 - Higher Order Methods/202_Taylor Method Error Example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] pycharm={"name": "#%% md\n"}
# # Introduction to single-cell multi-view profiler (scMVP)
# In this introductory tutorial, we present the different tasks of a scMVP workflow
# 1. Loading the multi-omics data
# 2. Training the multi-view model
# 3. Retrieving the common latent space and imputed multi-omics values
# 4. Perform cell clustering and differential expression
# 5. Visualize the common latent space and clustering with umap
# 6. The differential gene cluster identification
# + pycharm={"is_executing": true, "name": "#%%\n"}
# %matplotlib inline
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scMVP.dataset import scienceDataset
from scMVP.models import VAE
from scMVP.inference import UnsupervisedTrainer
from scMVP.inference import MultiPosterior, MultiTrainer
import torch
from scMVP.models.multi_vae import Multi_VAE
## Visualizing the latent space with scanpy
import scanpy as sc
import anndata
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Loading data
#
# Loading the sci-CAR cell line dataset described in Junyue Cao et al. (2018).
#
# * <NAME>, et al. "Joint profiling of chromatin accessibility and gene expression in thousands of single cells." Science 361.6409 (2018): 1380-1385.
#
# Data url: https://www.ncbi.nlm.nih.gov/geo/download/?acc=GSE117089&format=file
# + pycharm={"is_executing": true, "name": "#%%\n"}
def allow_mmvae_for_test():
print("Testing the basic tutorial scMVP")
test_mode = False
save_path = "data/"
n_epochs_all = None
show_plot = True
if not test_mode:
save_path = "dataset/"
dataset = scienceDataset(dataset_name="CellLineMixture", save_path=save_path, measurement_names_column=1, is_binary = True)
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Data filtering
# + pycharm={"is_executing": true, "name": "#%%\n"}
def filter_dataset(dataset):
high_count_genes = (dataset.X > 0).sum(axis=0).ravel() > 0.01 * dataset.X.shape[0]
dataset.update_genes(high_count_genes)
dataset.subsample_genes(new_n_genes=10000)
high_gene_count_cells = (dataset.X > 0).sum(axis=1).ravel() > 50
#high_atac_cells = dataset.atac_expression.sum(axis=1) >= np.percentile(dataset.atac_expression.sum(axis=1), 10)
high_atac_cells = dataset.atac_expression.sum(axis=1) >= np.percentile(dataset.atac_expression.sum(axis=1), 1)
inds_to_keep = np.logical_and(high_gene_count_cells, high_atac_cells)
dataset.update_cells(inds_to_keep)
return dataset, inds_to_keep
if test_mode is False:
dataset, inds_to_keep = filter_dataset(dataset)
# -
# * __n_epochs__: Maximum number of epochs to train the model. If the likelihood change is small than a set threshold training will stop automatically.
# * __lr__: learning rate. Set to 0.001 here.
# * __use_batches__: If the value of true than batch information is used in the training. Here it is set to false because the cortex data only contains one batch.
# * __use_cuda__: Set to true to use CUDA (GPU required)
# * __n_centroids__: Set the number of cell types
# * __n_alfa__: Set the weight of KL loss
# + pycharm={"is_executing": true, "name": "#%%\n"}
n_epochs = 50 if n_epochs_all is None else n_epochs_all
lr = 1e-3
use_batches = False
use_cuda = True
n_centroids = 5
n_alfa = 1.0
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Pre-training
# runing pre-train vae to initialize the Gaussian Mixture Model
# + pycharm={"is_executing": true, "name": "#%%\n"}
pre_vae = VAE(dataset.nb_genes, n_batch=256)
pre_trainer = UnsupervisedTrainer(
pre_vae,
dataset,
train_size=0.75,
use_cuda=use_cuda,
frequency=5
)
is_test_pragram = False
if is_test_pragram:
pre_trainer.train(n_epochs=n_epochs, lr=lr)
torch.save(pre_trainer.model.state_dict(), '%s/pre_trainer6.pkl' % save_path)
if os.path.isfile('%s/pre_trainer6.pkl' % save_path):
pre_trainer.model.load_state_dict(torch.load('%s/pre_trainer6.pkl' % save_path))
pre_trainer.model.eval()
else:
#pre_trainer.model.init_gmm_params(dataset)
pre_trainer.train(n_epochs=n_epochs, lr=lr)
torch.save(pre_trainer.model.state_dict(), '%s/pre_trainer6.pkl' % save_path)
# pretrainer_posterior:
full = pre_trainer.create_posterior(pre_trainer.model, dataset, indices=np.arange(len(dataset)))
latent, batch_indices, labels = full.sequential().get_latent()
batch_indices = batch_indices.ravel()
imputed_values = full.sequential().imputation()
sample_latents = torch.tensor([])
samples = torch.tensor([])
sample_labels = torch.tensor([])
for tensors_list in range(int(len(imputed_values)/256)+1):
if tensors_list == range(int(len(imputed_values)/256)):
x = torch.zeros((256,len(imputed_values[0])))
x[0:len(x)-256*tensors_list,:] = torch.tensor(imputed_values[tensors_list * 256:len(imputed_values), :])
y = torch.zeros((256))
y[0:len(x)-256*tensors_list,:] = torch.tensor(dataset.labels[tensors_list * 256:len(imputed_values)].astype(int))
temp_samples = pre_trainer.model.get_latents(x,y)
for temp_sample in temp_samples:
sample_latents = torch.cat((sample_latents, temp_sample[0:len(x)-256*tensors_list,:].float()))
temp_samples = pre_trainer.model.get_latents(
x=torch.tensor(imputed_values[tensors_list * 256:(1 + tensors_list) * 256, :]),
y=torch.tensor(dataset.labels[tensors_list * 256:(1 + tensors_list) * 256].astype(int)))
for temp_sample in temp_samples:
sample_latents = torch.cat((sample_latents, temp_sample.float()))
# visulization
prior_adata = anndata.AnnData(X=dataset.X)
prior_adata.obsm["X_multi_vi"] = sample_latents.detach().numpy()
prior_adata.obs['cell_type'] = torch.tensor(dataset.labels[0:len(sample_latents)].astype(int))
sc.pp.neighbors(prior_adata, use_rep="X_multi_vi", n_neighbors=15)
sc.tl.umap(prior_adata, min_dist=0.1)
fig, ax = plt.subplots(figsize=(7, 6))
sc.pl.umap(prior_adata, color=["cell_type"], ax=ax, show=show_plot)
# -
# # Training scMVP
# We now create the scMVP model and the trainer object.
#
# If a pre-trained model already exist in the save_path then load the same model rather than re-training it. This is particularly useful for large datasets.
# + pycharm={"is_executing": true, "name": "#%%\n"}
multi_vae = Multi_VAE(dataset.nb_genes, len(dataset.atac_names), n_batch=256, n_centroids=n_centroids, n_alfa = n_alfa, mode="mm-vae") # should provide ATAC num, alfa, mode and loss type
trainer = MultiTrainer(
multi_vae,
dataset,
train_size=0.75,
use_cuda=use_cuda,
frequency=5,
)
clust_index_gmm = trainer.model.init_gmm_params(sample_latents.detach().numpy())
is_test_pragram = False
if is_test_pragram:
trainer.train(n_epochs=n_epochs, lr=lr)
torch.save(trainer.model.state_dict(), '%s/multi_vae_21.pkl' % save_path)
if os.path.isfile('%s/multi_vae_21.pkl' % save_path):
trainer.model.load_state_dict(torch.load('%s/multi_vae_21.pkl' % save_path))
trainer.model.eval()
else:
trainer.train(n_epochs=n_epochs, lr=lr)
torch.save(trainer.model.state_dict(), '%s/multi_vae_21.pkl' % save_path)
# + [markdown] pycharm={"name": "#%% md\n"}
# Plotting the likelihood change across the n epochs of training: blue for training error and orange for testing error.**
# + pycharm={"is_executing": true, "name": "#%%\n"}
## If you train your own model, you can plot the elbo value during training.
## If your load pre-trained model, the plot would be empty.
elbo_train_set = trainer.history["elbo_train_set"]
elbo_test_set = trainer.history["elbo_test_set"]
x = np.linspace(0, 500, (len(elbo_train_set)))
plt.plot(x, elbo_train_set)
plt.plot(x, elbo_test_set)
plt.ylim(1150, 1600)
# + [markdown] pycharm={"name": "#%% md\n"}
# query the imputed values via the `imputation` method of the posterior object and get common latent embedding. **Note for advanced users:** imputation is an ambiguous term and there are two ways to perform imputation in scVI. The first way is to query the **mean of the negative binomial** distribution modeling the counts. This is referred to as `sample_rate` in the codebase and can be reached via the `imputation` method. The second is to query the **normalized mean of the same negative binomial** (please refer to the scVI manuscript). This is referred to as `sample_scale` in the codebase and can be reached via the `get_sample_scale` method. In differential expression for example, we of course rely on the normalized latent variable which is corrected for variations in sequencing depth.
# + pycharm={"is_executing": true, "name": "#%%\n"}
full = trainer.create_posterior(trainer.model, dataset, indices=np.arange(len(dataset)),type_class=MultiPosterior)
imputed_values = full.sequential().imputation()
sample_latents = torch.tensor([])
sample_labels = torch.tensor([])
rna_imputation = imputed_values[0]
atac_imputation = imputed_values[3]
temp_label = []
sample_latents = torch.tensor([])
samples = torch.tensor([])
sample_labels = torch.tensor([])
if len(imputed_values) >= 3:
temp_label = imputed_values[2]
for tensors_list in range(int(len(imputed_values[0])/256)+1):
if temp_label.any():
temp_samples = trainer.model.get_latents(x_rna=torch.tensor(rna_imputation[tensors_list*256:(1+tensors_list)*256,:]),
x_atac=torch.tensor(atac_imputation[tensors_list*256:(1+tensors_list)*256,:]),
y=torch.tensor(temp_label[tensors_list*256:(1+tensors_list)*256]))
else:
temp_samples = trainer.model.get_latents(x_rna=torch.tensor(rna_imputation[tensors_list*256:(1+tensors_list)*256,:]),
x_atac=torch.tensor(atac_imputation[tensors_list*256:(1+tensors_list)*256,:]),
y=torch.tensor(np.zeros(256)))
for temp_sample in temp_samples:
#sample_latents = torch.cat((sample_latents, temp_sample[2].float()))
sample_latents = torch.cat((sample_latents, temp_sample[0][0].float()))
sample_labels = torch.cat((sample_labels, torch.tensor(temp_label[tensors_list*256:(1+tensors_list)*256]).float()))
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Cell clustering
# Perform cell clustering and merging the rare clusters which less than 10 cells
# + pycharm={"is_executing": true, "name": "#%%\n"}
clust_index_gmm = trainer.model.init_gmm_params(sample_latents.detach().numpy())
gmm_clus_index = clust_index_gmm.reshape(-1,1)
for i in range(len(np.unique(gmm_clus_index))):
if len(gmm_clus_index[gmm_clus_index == i]) <= 10:
for j in range(len(np.unique(gmm_clus_index))):
if len(gmm_clus_index[gmm_clus_index == j]) > 100:
gmm_clus_index[gmm_clus_index == i] = j
break
unique_gmm_clus_index = np.unique(gmm_clus_index)
for i in range(len(unique_gmm_clus_index)):
gmm_clus_index[gmm_clus_index == unique_gmm_clus_index[i]] = i
# -
# Visualizing common latent embedding and cell clustering by scMVP
# + pycharm={"is_executing": true, "name": "#%%\n"}
posterior_adata = anndata.AnnData(X=rna_imputation)
posterior_adata.obsm["X_multi_vi"] = sample_latents.detach().numpy()
posterior_adata.obs['cell_type'] = torch.tensor(clust_index_gmm.reshape(-1,1))
sc.pp.neighbors(posterior_adata, use_rep="X_multi_vi", n_neighbors=15)
sc.tl.umap(posterior_adata, min_dist=0.1)
fig, ax = plt.subplots(figsize=(7, 6))
sc.pl.umap(posterior_adata, color=["cell_type"], ax=ax, show=show_plot)
# imputation labels
posterior_adata.obs['cell_type'] = torch.tensor(sample_labels.reshape(-1,1))
sc.pp.neighbors(posterior_adata, use_rep="X_multi_vi", n_neighbors=15)
sc.tl.umap(posterior_adata, min_dist=0.1)
#matplotlib.use('TkAgg')
fig, ax = plt.subplots(figsize=(7, 6))
sc.pl.umap(posterior_adata, color=["cell_type"], ax=ax, show=show_plot)
sc.tl.louvain(posterior_adata)
sc.pl.umap(posterior_adata, color=['louvain'])
# + [markdown] pycharm={"name": "#%% md\n"}
# ## differential gene and peak analysis
# Identification differential genes and peaks in each cell cluster
# + pycharm={"is_executing": true, "name": "#%%\n"}
posterior_adata.obs['louvain'] = torch.tensor(gmm_clus_index.reshape(-1,1))
sc.tl.rank_genes_groups(posterior_adata, 'louvain')
sc.pl.rank_genes_groups(posterior_adata, n_genes=10, sharey=False)
diff_top_gene_set = posterior_adata.uns['rank_genes_groups']
diff_top_gene_set = (diff_top_gene_set['names'])
diff_top_gene_pvalue_set = posterior_adata.uns['rank_genes_groups']
diff_top_gene_pvalue_set = (diff_top_gene_pvalue_set['pvals_adj'])
diff_top_gene_foldchange_set = posterior_adata.uns['rank_genes_groups']
diff_top_gene_foldchange_set = (diff_top_gene_foldchange_set['logfoldchanges'])
# -
atac_posterior_adata = anndata.AnnData(X=atac_imputation)
atac_posterior_adata.obs['louvain'] = posterior_adata.obs['louvain']
sc.tl.rank_genes_groups(atac_posterior_adata, 'louvain',n_genes=1000)
sc.pl.rank_genes_groups(atac_posterior_adata, n_genes=10, sharey=False)
atac_diff_top_gene_set = atac_posterior_adata.uns['rank_genes_groups']
atac_diff_top_gene_set = (atac_diff_top_gene_set['names'])
atac_diff_top_gene_pvalue_set = atac_posterior_adata.uns['rank_genes_groups']
atac_diff_top_gene_pvalue_set = (atac_diff_top_gene_pvalue_set['pvals_adj'])
atac_diff_top_gene_foldchange_set = atac_posterior_adata.uns['rank_genes_groups']
atac_diff_top_gene_foldchange_set = (atac_diff_top_gene_foldchange_set['logfoldchanges'])
| demos/scMVP_tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
space=pd.read_csv('skyspace.csv')
space.tail()
# ##### Observevation:
# *objid = Object Identifier
# *ra = J2000 Right Ascension (r-band)
# *dec = J2000 Declination (r-band)
# *u = better of DeV/Exp magnitude fit
# *g = better of DeV/Exp magnitude fit
# *r = better of DeV/Exp magnitude fit
# *i = better of DeV/Exp magnitude fit
# *z = better of DeV/Exp magnitude fit
# *run = Run Number
# *rereun = Rerun Number
# *camcol = Camera column
# *field = Field number
# *specobjid = Object Identifier
# *redshift = Final Redshift
# *plate = plate number
# *mjd = MJD of observation
# *fiberid = fiber ID
# *Class = Galaxy,Star,Quasar
#
#
# Class is our target variable. Class as 3 variables name as Galaxy,star,Quasar .Hence it is classification problem
space.shape
space.info()
# ### Checking for missing or null values
#checking for null values
space.isnull().sum()
#using heatmap to check null values
import seaborn as sns
import matplotlib.pyplot as plt
sns.heatmap(space.isnull())
plt.show()
# ##### Observed:
# There are No null values
for column in space.columns:
print(f"{column}: Number of unique values {space[column].nunique()}")
print('------------------------------------------------------------')
# ##### Observation:
# In columns like objid,rerun only one unique value we can drop them .
space.isnull().values.any()
space=space1.drop(['objid','rerun'],axis=1)
# ##### Now our data is ready for visualization without any missing values.
space=pd.DataFrame(space)
space.head()
space.columns
space.dtypes
# ##### For Analyzing the data with target i.e class we to have class from object to numeric type
space['class'].unique()
space['class']=space['class'].map({'STAR':0,'GALAXY':1,'QSO':2})
space['class'].dtypes
space['class'].unique()
# ###### Observation:
# Here 0 stands for star,1 stands for Galaxy and 2 stands for QSO
# ### Summary Statistics
space.describe()
# ##### Observation:
# *There is some huge gap bewteen 75% and max value in columns like 'dec','run','field','specobjid','redshift','plate','mjd,'fiberid' that means there some outliers .
#
# *There is some difference between mean and std in columns like 'ra','dec','u','g','r','i','z','run','rerun','field','plate','mjd','fiberid'.
# ### To check the correlation
sky=space.corr()
sky
plt.figure(figsize=(10,8))
sns.heatmap(sky,annot=True,cmap='Blues')
plt.show()
# ##### Obervation:
# *u,g,r,i,z columns are correlated to each other
# * class is less correlated with plate,mjd and highly correlated with redshift
plt.figure(figsize=(10,8))
space.drop('class',axis=1).corrwith(space['class']).plot(kind='bar',grid=True)
plt.xticks(rotation=20)
plt.title('Correlation with target variables')
# ### Univariate Analysis
space.head()
# +
# checking class count of ra
plt.subplots(figsize=(10,8))
sns.countplot(x='class',data=space)
plt.title('class count of ra')
plt.xlabel('star=0,galaxy=1,QSO=2')
plt.ylabel('class count')
plt.show()
print(space['class'].value_counts())
# -
# ##### Observation:
# Galaxy as highest number than star and QSO
plt.subplots(figsize=(30,10))
sns.countplot(x='run',data=space)
plt.show()
# ##### Observation:
# * 756 is repeated more number of times
# * 752 is second value which is repeated more number of times
# * 1350 is third value which is repeated more number of times and 1140 is fourth value
# +
# camcol plotting
plt.subplots(figsize=(10,6))
sns.countplot(x='camcol',data=space)
plt.title('camcol')
plt.xticks(rotation=20)
plt.show()
print(space['camcol'].value_counts())
# -
space['plate'].hist()
plt.xlabel('plate values')
plt.ylabel('count')
plt.title('Plotting plate')
plt.show()
# #### Obervation:
# There more plate values between 400 to 1100
space['i'].hist()
plt.xlabel('i values')
plt.ylabel('count')
plt.title('Plotting i')
plt.show()
# ##### Observation:
# i is like normal distribution
space['u'].hist()
plt.xlabel('u values')
plt.ylabel('count')
plt.title('Plotting u')
plt.show()
# #### Observation:
# u is left skewed
space['g'].hist()
plt.xlabel('g values')
plt.ylabel('count')
plt.title('Plotting g')
plt.show()
space['z'].hist()
plt.xlabel('z values')
plt.ylabel('count')
plt.title('Plotting z')
plt.show()
space['r'].hist()
plt.xlabel('r values')
plt.ylabel('count')
plt.title('Plotting r')
plt.show()
# ##### Observation:
# r is like normal distribution.
# ### Bivariate Analysis
facet=sns.FacetGrid(space,col='class')
facet.map(sns.distplot,'camcol')
plt.show()
plt.figure(figsize=(15,8))
sns.countplot(x='run',hue='class',data=space)
plt.show()
# ##### Observation:
# * 756 run as more number of Galaxy,Star,QSO
# * 752 run as sceond highest number of Galaxy,Star,QSO
# * 1345 run as thrid highest number of Gakaxy,Star,QSO
sns.barplot(x='class',y='dec',data=space)
plt.show()
sns.barplot(x='class',y='ra',data=space)
plt.show()
sns.barplot(x='class',y='u',data=space)
plt.show()
sns.barplot(x='class',y='i',data=space)
plt.show()
sns.barplot(x='class',y='g',data=space)
plt.show()
sns.barplot(x='class',y='r',data=space)
plt.show()
sns.barplot(x='class',y='z',data=space)
plt.show()
sns.barplot(x='class',y='field',data=space)
plt.show()
sns.barplot(x='class',y='mjd',data=space)
plt.show()
# ## Checking skewness
space.skew()
# ##### Observation:
# in columns like ra,u,camcol data is left skewed.
# in columns like dec,specobjid,plate,redshift,mjd data is right skewed
collist=space.columns.values
ncol=15
nrow=10
for i in space.columns:
sns.distplot(space[i])
plt.show()
#Treating skewness via log method
for col in space.columns:
if space[col].skew()>0.55:
space[col]=np.cbrt(space[col])
space.skew()
# ##### Observation:
# Skewness is reduced.
# ## Checking for outliers
space['dec'].plot.box()
space['r'].plot.box()
# ##### Observation:
# in 'dec' there no outliers.
# in 'r' column there are some outliers
#let us check outliers for all columns
col=space.columns.values
ncol=10
nrow=10
plt.figure(figsize=(15,30))
for i in range(1,len(col)):
plt.subplot(nrow,ncol,i+1)
sns.boxplot(space[col[i]],color='green',orient='v')
plt.tight_layout()
# ##### Obervation:
# There outliers in cloumns like u,g,r,i,z,redshift
# ## Removing Outliers
#Removing outliers
from scipy.stats import zscore
z_score=abs(zscore(space))
print(space.shape)
spacesky=space.loc[(z_score<3).all(axis=1)]
print(spacesky.shape)
# ##### Obseravtion:
# 712 rows are removed as outliers
# ## Model Training
spacesky.head()
#Seprating into input and output variables
df_x=spacesky.drop(['class'],axis=1)
y=pd.DataFrame(spacesky['class'])
df_x.head()
y.head()
#scaling the input variable
from sklearn.preprocessing import StandardScaler
sc=StandardScaler()
x=sc.fit_transform(df_x)
x=pd.DataFrame(x,columns=df_x.columns)
x.shape
y.shape
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import accuracy_score,confusion_matrix,classification_report
import warnings
warnings.filterwarnings('ignore')
# ## Finding the Best Parameters
from sklearn import svm
svc=svm.SVC()
parameters={'kernel':['linear','rbf','poly'],'C':[1.0]}
gd=GridSearchCV(svc,parameters)
gd.fit(x,y)
sorted(gd.cv_results_.keys())
print(gd.best_params_)
# ##### Observation:
# c:1,kernel=Linear are best parameters for SVC
dtc=DecisionTreeClassifier()
parameters={'criterion':['gini','entropy']}
gd=GridSearchCV(dtc,parameters)
gd.fit(x,y)
sorted(gd.cv_results_.keys())
print(gd.best_params_)
# ##### observation:
# criterion=entropy is best parameter for decision tree classifier
model=[DecisionTreeClassifier(criterion='entropy'),KNeighborsClassifier(),SVC(kernel='linear'),GaussianNB(),RandomForestClassifier(),AdaBoostClassifier(),GradientBoostingClassifier(), BaggingClassifier(),ExtraTreesClassifier()]
for m in model:
x_train,x_test,y_train,y_test=train_test_split(x,y,random_state=42,test_size=.20)
m.fit(x_train,y_train)
print('Score of',m,'is:',m.score(x_train,y_train))
predm=m.predict(x_test)
print('Score:')
print('accuracy_score :',accuracy_score(y_test,predm))
print('confusion_matrix :',confusion_matrix(y_test,predm))
print('classification_report',classification_report(y_test,predm))
print('*******************************************************************************************************')
print('\n')
# ##### Observation:
# * score of DecisionTreeClassifier = 100% and accuracy score = 98%
# * score of KNeighborsClassifier = 98% and accuracy score = 96%
# * Score of SVC = 99% and accuracy score = 98%
# * Score of GaussianNB = 97% and accuracy score = 96%
# * Score of RandomForestClassifier = 100% and accuracy socre = 98%
# * Score of AdaBoostClassifier = 98% and accuracy score = 98%
# * Score of GradientBoostingClassifier= 99% and accuracy score = 98%
# * Score of BaggingClassifier = 99% and accuracy score = 98%
# * Score of ExtraTreesClassifier = 100% and accuracy score = 98%
# from above observation we can observe that RandomForestClassifier,DecisionTreeClassifier,ExtraTreesClassifier gives best result and accuracy score of them are equal
from sklearn.model_selection import cross_val_score
model=[DecisionTreeClassifier(criterion='gini'),KNeighborsClassifier(),SVC(kernel='linear'),GaussianNB(),RandomForestClassifier(),AdaBoostClassifier(),GradientBoostingClassifier(), BaggingClassifier(),ExtraTreesClassifier()]
for m in model:
score=cross_val_score(m,x,y,cv=5)
print('Score of',m,'is:',score)
print('Mean score:',score.mean())
print('Standard deviation:',score.std())
print('*******************************************************************************************************')
print('\n')
# ##### Observation:
# Amomg all RandomForest Classifier gives best result because F1 score is also more.Hence we select RandomForestClassifier as our best model
# RandomForestClassifier with best result
rfc=RandomForestClassifier(random_state=42)
rfc.fit(x_train,y_train)
rfc.score(x_train,y_train)
predrfc=rfc.predict(x_test)
print(accuracy_score(y_test,predrfc))
print(confusion_matrix(y_test,predrfc))
print(classification_report(y_test,predrfc))
# ##### Observation:
# RandomForestClassifier gives accuracy score as 99% and f1 is also 99%
#plotting confusion matrix for RandomForestClassifier
cm=confusion_matrix(y_test,predrfc)
sns.heatmap(cm,annot=True,cbar=False,cmap='Blues')
plt.title("Confusion_matrix of Random Forest Classifier")
plt.show()
# ## Saving the Best Model
import joblib
#save the best model to a pickel in a file
joblib.dump(rfc,'Spacesky.pkl')
# ##### Conclusion:
# RandomForestClassifier is save as pickel
| Space (Project).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# <center><img src="https://github.com/pandas-dev/pandas/raw/master/web/pandas/static/img/pandas.svg" alt="pandas Logo" style="width: 800px;"/></center>
#
# # Introduction to Pandas
# ---
# ## Overview
# 1. Introduction to pandas data structures
# 1. How to slice and dice pandas dataframes and dataseries
# 1. How to use pandas for exploratory data analysis
#
# ## Prerequisites
#
# | Concepts | Importance | Notes |
# | --- | --- | --- |
# | [Python Quickstart](../../foundations/quickstart) | Necessary | Intro to `dict` |
# | [Numpy basics](../numpy/numpy-basics) | Necessary | |
#
# * **Time to learn**: 60 minutes
# ---
# ## Imports
# You will often see the nickname `pd` used as an abbreviation for pandas in the import statement, just like `numpy` is often imported as `np`. Here we will also be importing `pythia_datasets`, our tool for accessing example data we provide for our materials.
import pandas as pd
from pythia_datasets import DATASETS
# ## The pandas [`DataFrame`](https://pandas.pydata.org/docs/user_guide/dsintro.html#dataframe)...
# ... is a **labeled**, two dimensional columnal structure similar to a table, spreadsheet, or the R `data.frame`.
#
# 
#
# The `columns` that make up our `DataFrame` can be lists, dictionaries, NumPy arrays, pandas `Series`, or more. Within these `columns` our data can be any texts, numbers, dates and times, or many other data types you may have encountered in Python and NumPy. Shown here on the left in dark gray, our very first `column` is uniquely referrred to as an `Index`, and this contains information characterizing each row of our `DataFrame`. Similar to any other `column`, the `index` can label our rows by text, numbers, `datetime`s (a popular one!), or more.
#
# Let's take a look by reading in some `.csv` data, which comes from the NCDC teleconnections database, including various El Niño Southern Oscillation (ENSO) indices! [[ref](https://www.ncdc.noaa.gov/teleconnections/enso/indicators/sst/)].
# <div class="admonition alert alert-info">
# <p class="admonition-title" style="font-weight:bold">Info</p>
# Here we're getting the data from Project Pythia's custom library of example data, which we already imported above with <code>from pythia_datasets import DATASETS</code>. The <code>DATASETS.fetch()</code> method will automatically download and cache our example data file <code>enso_data.csv</code> locally.
# </div>
filepath = DATASETS.fetch('enso_data.csv')
# Once we have a valid path to a data file that Pandas knows how to read, we can open it like this:
df = pd.read_csv(filepath)
# If we print out our dataframe, you will notice that is text based, which is okay, but not the "best" looking output
print(df)
# Instead, if we just use the pandas dataframe itself (without wrapping it in `print`), we have a nicely rendered table which is native to pandas and Jupyter Notebooks. See how much nicer that looks?
df
# The `index` within pandas is essentially a list of the unique row IDs, which by default, is a list of sequential integers which start at 0
df.index
# Our indexing column isn't particularly helpful currently. Pandas is clever! A few optional keyword arguments later, and...
# +
df = pd.read_csv(filepath, index_col=0, parse_dates=True)
df
# -
df.index
# ... now we have our data helpfully organized by a proper `datetime`-like object. Each of our multiple columns of data can now be referenced by their date! This sneak preview at the pandas `DatetimeIndex` also unlocks for us much of pandas most useful time series functionality. Don't worry, we'll get there. What are the actual columns of data we've read in here?
df.columns
# ## The pandas [`Series`](https://pandas.pydata.org/docs/user_guide/dsintro.html#series)...
#
# ... is essentially any one of the columns of our `DataFrame`, with its accompanying `Index` to provide a label for each value in our column.
#
# 
#
# The pandas `Series` is a fast and capable 1-dimensional array of nearly any data type we could want, and it can behave very similarly to a NumPy `ndarray` or a Python `dict`. You can take a look at any of the `Series` that make up your `DataFrame` with its label and the Python `dict` notation, or with dot-shorthand:
df["Nino34"]
# <div class="alert alert-block alert-info">
# <b>Tip:</b> You can also use the `.` (dot) notation, as seen below, but this is moreso a "convenience feature", which for the most part is interchangeable with the dictionary notation above, except when the column name is not a valid Python object (ex. column names beginning with a number or a space)</div>
df.Nino34
# ## Slicing and Dicing the `DataFrame` and `Series`
#
# We will expand on what you just saw, soon! Importantly,
#
# > **Everything in pandas can be accessed with its label**,
#
# no matter how your data is organized.
# ### Indexing a `Series`
#
# Let's back up a bit here. Once more, let's pull out one `Series` from our `DataFrame` using its column label, and we'll start there.
# +
nino34_series = df["Nino34"]
nino34_series
# -
# `Series` can be indexed, selected, and subset as both `ndarray`-like,
nino34_series[3]
# and `dict`-like, using labels
nino34_series["1982-04-01"]
# These two can be extended in ways that you might expect,
nino34_series[0:12]
# <div class="admonition alert alert-info">
# <p class="admonition-title" style="font-weight:bold">Info</p>
# Index-based slices are <b>exclusive</b> of the final value, similar to Python's usual indexing rules.
# </div>
# as well as potentially unexpected ways,
nino34_series["1982-01-01":"1982-12-01"]
# That's right, label-based slicing! Pandas will do the work under the hood for you to find this range of values according to your labels.
# <div class="admonition alert alert-info">
# <p class="admonition-title" style="font-weight:bold">Info</p>
# label-based slices are <b>inclusive</b> of the final value, different from above!
# </div>
# If you are familiar with [xarray](../xarray), you might also already have a comfort with creating your own `slice` objects by hand, and that works here!
nino34_series[slice("1982-01-01", "1982-12-01")]
# ### Using `.iloc` and `.loc` to index
#
# Let's introduce pandas-preferred ways to access your data by label, `.loc`, or by index, `.iloc`. They behave similarly to the notation introduced above, but provide more speed, security, and rigor in your value selection, as well as help you avoid [chained assignment warnings](https://pandas.pydata.org/docs/user_guide/indexing.html#returning-a-view-versus-a-copy) within pandas.
nino34_series.iloc[3]
nino34_series.iloc[0:12]
nino34_series.loc["1982-04-01"]
nino34_series.loc["1982-01-01":"1982-12-01"]
# ### Extending to the `DataFrame`
#
# These capabilities extend back to our original `DataFrame`, as well!
# + tags=["raises-exception"]
df["1982-01-01"]
# -
# <div class="admonition alert alert-danger">
# <p class="admonition-title" style="font-weight:bold">Danger</p>
# Or do they?
# </div>
# They do! Importantly however, indexing a `DataFrame` can be more strict, and pandas will try not to too heavily assume what you are looking for. So, by default we can't pull out a row within `df` by its label alone, and **instead labels are for identifying columns within `df`**,
df["Nino34"]
# and integer indexing will similarly get us nothing,
# + tags=["raises-exception"]
df[0]
# -
# Knowing now that we can pull out one of our columns as a series with its label, plus our experience interacting with the `Series` `df["Nino34"]` gives us, we can chain our brackets to pull out any value from any of our columns in `df`.
df["Nino34"]["1982-04-01"]
df["Nino34"][3]
# However, this is not a pandas-preferred way to index and subset our data, and has limited capabilities for us. As we touched on before, `.loc` and `.iloc` give us more to work with, and their functionality grows further for `df`.
df.loc["1982-04-01", "Nino34"]
# <div class="admonition alert alert-info">
# <p class="admonition-title" style="font-weight:bold">Info</p>
# Note the <code>[<i>row</i>, <i>column</i>]</code> ordering!
# </div>
# These allow us to pull out entire rows of `df`,
df.loc["1982-04-01"]
df.loc["1982-01-01":"1982-12-01"]
df.iloc[3]
df.iloc[0:12]
# Even further,
df.loc[
"1982-01-01":"1982-12-01", # slice of rows
["Nino12", "Nino3", "Nino4", "Nino34"], # list of columns
]
# <div class="admonition alert alert-info">
# <p class="admonition-title" style="font-weight:bold">Info</p>
# For a more comprehensive explanation, which includes additional examples, limitations, and compares indexing methods between DataFrame and Series see <a href="https://pandas.pydata.org/docs/user_guide/indexing.html">pandas' rules for indexing.</a>
# </div>
# ## Exploratory Data Analysis
#
# ### Get a Quick Look at the Beginning/End of your `Dataframe`
# Pandas also gives you a few shortcuts to quickly investigate entire `DataFrame`s.
df.head()
df.tail()
# ### Quick Plots of Your Data
# A good way to explore your data is by making a simple plot. Pandas allows you to plot without even calling `matplotlib`! Here, we are interested in the `Nino34` series. Check this out...
df.Nino34.plot();
# Before, we called `.plot()` which generated a single line plot. This is helpful, but there are other plots which can also help with understanding your data! Let's try using a histogram to understand distributions...
#
# The only part that changes here is we are subsetting for just two `Nino` indices, and after `.plot`, we include `.hist()` which stands for histogram
df[['Nino12', 'Nino34']].plot.hist();
# We can see some clear differences in the distributions, which is helpful! Another plot one might like to use would be a `boxplot`. Here, we replace `hist` with `box`
df[['Nino12', 'Nino34']].plot.box();
# Here, we again see a clear difference in the distributions. These are not the only plots you can use within pandas! For more examples of plotting choices, check out [the pandas plot documentation](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.plot.html)
# #### Customize your Plot
# These `plot()` methods are just wrappers to matplotlib, so with a little more work the plots can be customized just like any matplotlib figure.
df.Nino34.plot(
color='black',
linewidth=2,
xlabel='Year',
ylabel='ENSO34 Index (degC)',
figsize=(8, 6),
);
# This can be a great way to take a quick look at your data, but what if you wanted a more ***quantitative*** perspective? We can use the `describe` method on our `DataFrame`; this returns a table of summary statistics for all columns in the `DataFrame`
#
# ### Basic Statistics
#
# By using the `describe` method, we see some general statistics! Notice how calling this on the dataframe returns a table with all the `Series`
df.describe()
# You can look at specific statistics too, such as mean! Notice how the output is a `Series` (column) now
df.mean()
# If you are interested in a single column mean, subset for that and use `.mean`
df.Nino34.mean()
# ### Subsetting Using the Datetime Column
#
# You can use techniques besides slicing to subset a `DataFrame`. Here, we provide examples of using a couple other options.
#
# Say you only want the month of January - you can use `df.index.month` to query for which month you are interested in (in this case, 1 for the month of January)
# Uses the datetime column
df[df.index.month == 1]
# You could even assign this month to a new column!
df['month'] = df.index.month
# Now that it is its own column (`Series`), we can use `groupby` to group by the month, then taking the average, to determine average monthly values over the dataset
df.groupby('month').mean().plot();
# ### Investigating Extreme Values
# You can also use ***conditional indexing***, such that you can search where rows meet a certain criteria. In this case, we are interested in where the Nino34 anomaly is greater than 2
df[df.Nino34anom > 2]
# You can also sort columns based on the values!
df.sort_values('Nino34anom')
# Let's change the way that is ordered...
df.sort_values('Nino34anom', ascending=False)
# ### Resampling
# Here, we are trying to resample the timeseries such that the signal does not appear as noisy. This can helpfule when working with timeseries data! In this case, we resample to a yearly average (`1Y`) instead of monthly values
df.Nino34.plot();
df.Nino34.resample('1Y').mean().plot();
# ### Applying operations to a dataframe
#
# Often times, people are interested in applying calculations to data within pandas `DataFrame`s. Here, we setup a function to convert from degrees Celsius to Kelvin
def convert_degc_to_kelvin(temperature_degc):
"""
Converts from degrees celsius to Kelvin
"""
return temperature_degc + 273.15
# Now, this function accepts and returns a single value
# Convert a single value
convert_degc_to_kelvin(0)
# But what if we want to apply this to our dataframe? We can subset for Nino34, which is in degrees Celsius
nino34_series
# Notice how the object type is a pandas series
type(df.Nino12[0:10])
# If you call `.values`, the object type is now a numpy array. Pandas `Series` values include numpy arrays, and calling `.values` returns the series as a numpy array!
type(df.Nino12.values[0:10])
# Let's apply this calculation to this `Series`; this returns another `Series` object.
convert_degc_to_kelvin(nino34_series)
# If we include `.values`, it returns a `numpy array`
# <div class="admonition alert alert-warning">
# <p class="admonition-title" style="font-weight:bold">Warning</p>
# We don't usually recommend converting to NumPy arrays unless you need to - once you convert to NumPy arrays, the helpful label information is lost... so beware!
# </div>
convert_degc_to_kelvin(nino34_series.values)
# We can now assign our pandas `Series` with the converted temperatures to a new column in our dataframe!
df['Nino34_degK'] = convert_degc_to_kelvin(nino34_series)
df.Nino34_degK
# Now that our analysis is done, we can save our data to a `csv` for later - or share with others!
df.to_csv('nino_analyzed_output.csv')
pd.read_csv('nino_analyzed_output.csv', index_col=0, parse_dates=True)
# ---
# ## Summary
# * Pandas is a very powerful tool for working with tabular (i.e. spreadsheet-style) data
# * There are multiple ways of subsetting your pandas dataframe or series
# * Pandas allows you to refer to subsets of data by label, which generally makes code more readable and more robust
# * Pandas can be helpful for exploratory data analysis, including plotting and basic statistics
# * One can apply calculations to pandas dataframes and save the output via `csv` files
#
# ### What's Next?
# In the next notebook, we will look more into using pandas for more in-depth data analysis.
#
# ## Resources and References
# 1. [NOAA NCDC ENSO Dataset Used in this Example](https://www.ncdc.noaa.gov/teleconnections/enso/indicators/sst/)
# 1. [Getting Started with Pandas](https://pandas.pydata.org/docs/getting_started/index.html#getting-started)
# 1. [Pandas User Guide](https://pandas.pydata.org/docs/user_guide/index.html#user-guide)
| core/pandas/pandas.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.10 64-bit (''real-estate-hub-z2lfiodh-py3.8'': poetry)'
# language: python
# name: python3
# ---
# %load_ext autoreload
# %autoreload 2
from bs4 import BeautifulSoup
import requests
# +
headers = headers = {
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:86.0) Gecko/20100101 Firefox/86.0",
"Accept-Language": "en-US,en;q=0.5",
}
cookies = {
"PHPSESSID": "052f1b49bc9b279d68d6911295199cfa",
"_gid": "GA1.2.1842034191.1641867455",
"emladr": "ashtontml%40yahoo.com",
"__cf_bm": "bfQeNl60JfAE42ha8_bArpmCeR3j.kI_crnfcB8A390-1641873736-0-AUpfkEJzN4mwjMMGf6v4r6d0UEyiU5AVjE7rQygGBqYFa3/ZfrlLBMO+IrKNBVMXqJA2+tgo/cNRlC0zIoVFXBw=",
"__cfruid": "298781a739b44b8470037ab5af4bd60b4bb2423e-1641600804",
"BSID": "cb5e1fe0-7017-11ec-8aa0-bc764e102e1e",
"_ga": "GA1.2.1201354367.1637730058",
"BID": "c0554356-52b8-11ec-8aa0-bc764e102e1e",
}
r = requests.get("https://www.zolo.ca/toronto-real-estate/37-odonnell-avenue", headers=headers, cookies=cookies)
r
# +
import pandas as pd
dfs = pd.read_html(r.text, match="Renting")
# -
for df in dfs:
print(df)
history
history = dfs[0]
history = history[history["Price"].astype("string").str.startswith("$") | history["Price"].isna()]
history
history = history.drop(columns=0)
history.columns = ["MLS #", "Date", "Event", "Price"]
history["MLS #"] = history["MLS #"].ffill()
| notebooks/2022-01-10-zolodata.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # 01-Preprocessing
# The first NLP exercise is about preprocessing.
#
# You will practice preprocessing using NLTK on raw data.
# This is the first step in most of the NLP projects, so you have to master it.
# We will play with the *coldplay.csv* dataset, containing all the songs and lyrics of Coldplay.
# As you know, the first step is to import some libraries. So import *nltk* as well as all the libraries you will need.
# Import NLTK and all the needed libraries
import nltk
nltk.download('punkt') #Run this line one time to get the resource
nltk.download('stopwords') #Run this line one time to get the resource
nltk.download('wordnet') #Run this line one time to get the resource
nltk.download('averaged_perceptron_tagger') #Run this line one time to get the resource
import numpy as np
import pandas as pd
# Load now the dataset using pandas.
# TODO: Load the dataset in coldplay.csv
df = pd.read_csv('coldplay.csv')
df.head()
# Now, check the dataset, play with it a bit: what are the columns? How many lines? Is there missing data?...
# TODO: Explore the data
df.info()
# Now select the song 'Every Teardrop Is A Waterfall' and save the Lyrics text into a variable. Print the output of this variable.
# TODO: Select the song 'Every Teardrop Is A Waterfall'
lyrics = df[df['Song'] == "Every Teardrop Is A Waterfall"]["Lyrics"]
print(lyrics)
# As you can see, there is some preprocessing needed here. So let's do it! What is usually the first step?
# Tokenization, yes. So do tokenization on the lyrics of Every Teardrop Is A Waterfall.
#
# So you may have to import the needed library from NLTK if you did not yet.
#
# Be careful, the output you have from your pandas dataframe may not have the right type, so manipulate it wisely to get a string.
# + jupyter={"outputs_hidden": true} tags=[]
# TODO: Tokenize the lyrics of the song and save the tokens into a variable and print it
tokens = word_tokenize(lyrics.values[0])
tokens
# -
# It begins to look good. But still, we have the punctuation to remove, so let's do this.
# + jupyter={"outputs_hidden": true} tags=[]
# TODO: Remove the punctuation, then save the result into a variable and print it
no_punc_tokens = [t for t in tokens if t.isalpha()]
no_punc_tokens
# -
# We will now remove the stop words.
# + jupyter={"outputs_hidden": true} tags=[]
# TODO: remove the stop words using NLTK. Then put the result into a variable and print it
from nltk.corpus import stopwords
no_stop_tokens = [t for t in no_punc_tokens if not t in stopwords.words('english')]
no_stop_tokens
# -
# Okay we begin to have much less words in our song, right?
#
# Next step is lemmatization. But we had an issue in the lectures, you remember? Let's learn how to do it properly now.
#
# First let's try to do it naively. Import the WordNetLemmatizer and perform lemmatization with default options.
# + jupyter={"outputs_hidden": true} tags=[]
# TODO: Perform lemmatization using WordNetLemmatizer on our tokens
from nltk.stem import WordNetLemmatizer
wnl = WordNetLemmatizer()
lemmatized_tokens = [wnl.lemmatize(t) for t in no_stop_tokens]
lemmatized_tokens
# -
# As you can see, it worked well on nouns (plural words are now singular for example).
#
# But verbs are not OK: we would 'is' to become 'be' for example.
#
# To do that, we need to do POS-tagging. So let's do this now.
# POS-tagging means Part of speech tagging: basically it will classify words into categories: like verbs, nouns, advers and so on...
#
# In order to do that, we will use NLTK and the function *pos_tag*. You have to do it on the step before lemmatization, so use your variable containing all the tokens without punctuation and without stop words.
#
# Hint: you can check on the internet how the *pos_tag* function works [here](https://www.nltk.org/book/ch05.html)
# + jupyter={"outputs_hidden": true} tags=[]
# TODO: use the function pos_tag of NLTK to perform POS-tagging and print the result
pos_tags = nltk.pos_tag(no_stop_tokens)
pos_tags
# -
# As you can see, it does not return values like 'a', 'n', 'v' or 'r' as the WordNet lemmatizer is expecting...
#
# So we have to convert the values from the NLTK POS-tagging to put them into the WordNet Lemmatizer. This is done in the function *get_wordnet_pos* written below. Try to understand it, and then we will reuse it.
# +
from nltk.corpus import wordnet
def get_wordnet_pos(pos_tag):
output = np.asarray(pos_tag)
for i in range(len(pos_tag)):
if pos_tag[i][1].startswith('J'):
output[i][1] = wordnet.ADJ
elif pos_tag[i][1].startswith('V'):
output[i][1] = wordnet.VERB
elif pos_tag[i][1].startswith('R'):
output[i][1] = wordnet.ADV
else:
output[i][1] = wordnet.NOUN
return output
# -
# So now you have all we need to perform properly the lemmatization.
#
# So you have to use the following to do so:
# * your tags from the POS-tagging performed
# * the function *get_wordnet_pos*
# * the *WordNetLemmatizer*
# TODO: Perform the lemmatization properly
tags = get_wordnet_pos(pos_tags)
lemmatized_tokens = [wnl.lemmatize(token, tag) for token, tag in tags]
print(lemmatized_tokens)
# What do you think?
#
# Still not perfect, but it's the best we can do for now.
# Now you can try stemming, with the help of the lecture, and see the differences compared to the lemmatization
# TODO: Perform stemming
from nltk.stem import PorterStemmer
stemmer = PorterStemmer()
stemmed_tokens = [stemmer.stem(t, to_lowercase=True) for t in no_stop_tokens]
print(stemmed_tokens)
# Do you see the difference? What would you use?
| Lab 5/Preprocessing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from keras.models import load_model
import pandas as pd
import cv2
import numpy as np
import glob
def rotate(model,image):
image_pred = image.reshape(1,64,64,3)
output = model.predict(image_pred)
value = output.argmax() #get the index of the encoded output
angle = 0
if value == 1: #right
angle = 90
elif value == 0: #left
angle = 270
elif value == 3: #down
angle = 180
image_center = tuple(np.array(image.shape[1::-1]) / 2)
rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0)
result = cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR)
return result
if __name__ == '__main__':
model = load_model('model.h5')
corrected = []
for file in glob.glob("./train/*.jpg"):
image_aux = cv2.imread(file,1)
new_image = rotate(model,image_aux)
corrected.append(new_image)
corrected = np.array(corrected)
np.save('np_out',corrected)
| rotate_images.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Developer Documentation
#
# Documentation for use of the ADnum, ADmath, and ADgraph modules outside of the web application.
#
# ## What are the ADnum, ADmath, and ADgraph modules?
# This collection of modules can be used to perform automatic differentiation of functions of one or more variables and to visualize the underlying computational graph and table for both the forward and reverse mode of automatic differentiation.
# ## Introduction to Automatic Differentiation
#
# Differentiation is a fundamental operation for computational science. Used in a variety of applications from optimization to sensitivity analysis, differentiation is most useful when two conditions are met: it must be exact (up to machine precision) and computationally efficient.
#
# Automatic differentiation (AD) (i.e. algorithmic differentiation, computational differentiation) computes the derivative of a function, unique for its ability to handle complex combinations of functions without sacrificing the accuracy. Regardless of how complex the function may be, AD takes advantage of the fact that the function can be decomposed into a sequence of elementary arithmetic operations (addition, subtraction, multiplication, division, etc.) and elementary functions (exp, log, sin, cos, etc.).
#
# Through computing the derivatives of these basic elementary functions and repeatedly applying the chain rule, AD meets the two aforementioned conditions, making it useful in a variety of applications including:
# - Machine learning (ability to understand data and make models/predictions), where backpropagation is used to parameterize neural nets among other parameter optimization techniques
# - Parameter optimization (ability to choose best parameter values under given conditions), where methods requiring derivatives may be used to find the optima
# - Sensitivity analysis (ability to understand different factors and their impact), which requires computing partial derivatives with respect to different inputs and parameters
# - Physical modeling (ability to visualize and depict data through models), where different physical properties are related through derivatives (for example, acceleration is the derivative of velocity)
# - Probabilistic inference, where many sampling methods (for example, Hamiltonian Monte Carlo) are derivative based
#
# For a more detailed exposition of AD and the underlying processes, we invite you to read our unit on [Read the Docs](https://auto-ed.readthedocs.io/en/latest/) and use the accompanying web application built from this package.
# ## Installation
#
# The files can be downloaded from the github repository using:
#
# git clone https://github.com/lindseysbrown/Auto-eD.git
#
# To install package dependencies, run the following command:
#
# pip install -r requirements.txt
#
# For those not interested in developing the code or working with functions of more than 5 input variables or more than 3 outputs, the computations performed by this package are available as a [web application](https://autoed.herokuapp.com/) without any installation required. Alternatively, to run the web application locally, the following command will launch the application from the cloned repository:
#
# python ADapp.py
# ## Importing the Modules
#
# For full functionality, the user should include the following imports to use the automatic differentiation package:
#
# from ADnum import ADnum
#
# import ADmath
#
# import ADgraph
#commands to change to the correct directory
# %pwd
# +
#import package functionality
from ADnum import ADnum
import ADmath
import ADgraph
#suggested imports
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# -
# ## Getting Started
# After importing the necessary modules, a user creates a class instance of an `ADnum` with the value to be used as input to a function.
#
# ### Steps for Instantiating Variables and Functions
#
# #### 1. Initialize an input variable (i.e. `x`) with the specific value where the function will be evaluated.
# - Either initialize the value and the derivative
#
# ```python
#
# x = ADnum(5, der = 1)
#
# ```
#
# - Or specify the number of input variables (ins) to the function and the index (ind) of each variable
#
# ```python
# x = ADnum(5, ins = 1, ind = 0)
# ```
#
# #### 2. Define a function (i.e. `f`) with the variable and any other elementary functions from the ADmath module
# - Either directly combine previously created ADnum objects
#
# ```python
# f = ADmath.sin(x)+3*ADmath.tan(x)
# ```
#
# -Or define a function and provide an ADnum object as input
# ```python
# def g(z):
# return ADmath.sin(z)+3*ADmath.tan(z)
# g(x) #an ADnum
# ```
#
# #### 3. `f.val` will return the value of the function evaluated at the specific value
# #### 4. `f.der` will return the derivative at the specific value
# #### 5. Visualize f and the computational process using the tools in the `ADgraph` module
# -See the section on function visualization for more on how to use these tools.
#
# Full code examples for scalar or vector inputs and scalar or vectored valued functions are given below.
# ### Scalar Function of a Scalar Variable
#
# Here we show several examples of scalar functions of scalar variables, including an example of using our package to implement Newton's Method.
# +
# Step 1: intialize x and y to a specific value
x = ADnum(3, ins = 2, ind = 0)
y = ADnum(4, ins = 2, ind= 1)
# Step 2: define a function f
f = 2 * y + 2*x**2
# Steps 3 and 4: Use the class attributes to access the value and
# deriviative of the function at the value of the input x
print(f.val)
print(f.der)
print(x.val)
print(x.der)
print(y.val)
print(y.der)
# +
#another example with a trignometric function, using the ADmath module
x = ADnum(np.pi, der = [1]) # Step 1: initialize x, this time at pi
f = ADmath.sin(x) # Step 2: create a function, using elementary functions from the ADmath module
#Steps 3 and 4: Use the class attributes to access the value and derivative
print(f.val) # should print 1.22e-16 due to floating point error in numpy implementation (should be 0)
print(f.der) # should print -1.0
print(x.val) # should print 3.14
print(x.der) # should print 1
# -
# Suppose we wanted to easily be able to access the value and derivative of a function at many different points. As an alternative to the method for defining `f` in the previous two examples, we could define `f` as a python function:
# +
#example to easily access value and derivative at multiple points by defining f as a function
def f(x):
return x + ADmath.exp(x)
#get the value and derivative at 1
y = ADnum(1, der = [1])
print(f(y).val, f(y).der)
#an alternate approach to get the value and derivative at 1
print(f(ADnum(1, der = [1])).val, f(ADnum(1, der = [1])).der)
# -
# Notice that in the above example, we required the natural exponential, an elementary function, to be used from the ADmath package, so that f may take as input and return an ADnum object.
# ### Newton's Method for a Scalar Valued Function
# One basic application of differentiation is Newton's method for finding roots of a function. For demonstration of using our package for such an application, we will consider the function
# $$f(x) = x^2 + \sin(x)$$
# which we know has a root at $x=0$. The plot below also shows that the function has an additional root near -1.
# +
import matplotlib.pyplot as plt
# %matplotlib inline
x = np.linspace(-2.5, 2.55, 1000)
f = x**2+np.sin(x)
plt.plot(x, f, linewidth = 2)
plt.plot(x, np.zeros((1000,)), '--')
plt.xlabel('x', fontsize = 16)
plt.ylabel('f(x)', fontsize = 16)
plt.xticks(fontsize = 14)
plt.yticks(fontsize =14)
plt.title('Plot of f(x) Showing Two Roots', fontsize = 18)
# +
#implementation of Newton's method using our AD package, without hardcoding the derivative
#function that we wish to find the roots of
def f(x):
return x**2+ADmath.sin(x)
#Newton's method
x = ADnum(1, der = [1]) #set an initial guess for the root
for i in range(1000):
dx = -f(x).val/f(x).der #get change using ADnum attributes
if np.abs(dx) < .000001: #check if within some tolerance
print('Root found at: ' + str(x.val))
break
x = x+dx #update the guess
# -
# In the above, we found the root at zero. Using a different initialization point, we can find the other root of the function.
# +
y = ADnum(-1, der = [1]) #set an initial guess for the root
for i in range(1000):
dy = -f(y).val/f(y).der #get change using ADnum attributes
if np.abs(dy) < .000001: #check if within some tolerance
print('Root found at: ' + str(y.val))
break
y = y+dy #update the guess
# -
# ## Functions of Multiple Inputs, Functions of Vector Valued Inputs, and Vector Valued Functions
#
#
# ### Functions of Multiple Variables
# For a function with more than one input variable, the sequence is similar except when creating ADnum objects, the user must specify the total number of input variables, and the index of each variable in the gradient (so the the constructor of the ADnum class can correctly assign the derivative of the input variable with the appropriate seed vector):
# #### 1. initialize each variable to a specific value where the function should be evaluated
# -In addition to the value, the user must provide the number of inputs using the keyword argument `ins` and the index of the input variable being created using the keyword argument `ind`
#
# #### 2. return the gradient as a numpy array when `f.der` is called
# -The gradient will be a numpy array, where `f.der[ind]` gives the partial derivative of f with respect to the variable created with the specified index `ind`.
# scalar function, multi variables
x = ADnum(2, ins = 2, ind = 0)
y = ADnum(3, ins = 2, ind = 1)
f = 3 * x**3 + 2 * y**3
print(f.val)
print(f.der)
print(x.val)
print(x.der)
print(y.val)
print(y.der)
# ### Vector-valued Functions
# Each component of a vector valued function is just a scalar valued function of one or more input variables. Thus, we can easily combine the previous results to get the Jacobian of a vector valued function.
# +
x = ADnum(2, ins = 2, ind = 0)
y = ADnum(3, ins = 2, ind = 1)
F = [x**2, x+y, 4*y] #define a vector valued function
print(F[0].val)
print(F[0].der) #derivative of the first component of F with respect to x and y
print(F[1].val)
print(F[1].der)
print(F[2].val)
print(F[2].der)
# -
# ### Functions of Vector Inputs
# For functions which take vectors as inputs, we can also define ADnum objects but by assigning the value as a list.
# +
#function of a single vector
X = ADnum([1, 2, 3], ins = 1, ind = 0)
f = 3*X + ADmath.exp(X)
print(f.val)
print(f.der)
print(X.val)
print(X.der)
# +
#function of multiple vector inputs
X = ADnum([1, 2, 1], ins = 2, ind = 0)
Y = ADnum([4, 5, 6], ins =2, ind = 1)
G = X + Y
print(G.val)
print(G.der)
print(X.val)
print(X.der)
print(Y.val)
print(Y.der)
# -
# # Implementation of Automatic Differentiation
# Automatic differentiation is implemented through the use of `ADnum` objects, which have both a value and derivative as attributes. We build the functions for which we want to take derivatives from these `ADnum` objects as well as the special elementary functions defined for `ADnum` objects in the `ADmath` module. Each of these functions returns an `ADnum` object so has an associated value and derivative.
#
# Each of these attributes is either a scalar or a numpy array for ease of computation. In the case of scalar input, the derivative is also a scalar. For vector valued input, the derivative is the gradient of the function, stored as a numpy array.
#
# In order to build and store computational graphs in the ADgraph module, we use a dictionary to represent the graph, where the keys are the nodes of the graph, stored as `ADnum` objects, and the values associated with each key are the children of that node, stored as lists of tuples of the form (ADnum object, string), where the string describes the function that forms the edge for each node.
#
# ### Implemented Methods
# The main class is the `ADnum` module, which is used to create `ADnum` objects. It takes as input a single scalar input or a vector input (as a numpy array) and outputs an `ADnum` object. The `ADnum` objects store the current value of the function and its derivative as attributes.
#
# These two attributes represent the two major functionalities desired of the class. The `val` attribute is the ADnum object evaluated at the given value and the `der` attribute is its derivative at the given value.
#
# In addition to these basic attributes, ADnum objects also have a `constant` attribute, which is set to 0 or 1 depending on whether the object is a constant or variable. This distinction is used in making computational graphs and tables, which also rely on the `graph` attribute. This attribute is a dictionary containing key-value pairs of ADnum objects and lists of the ADnum objects that were combined to build the ADnum object (essentially using a dictionary to store a graph of nodes and edges).
#
# The constructor for this class, using variable keyword arguments to set the value and derivative appropriately,
#
# ```python
# #ADnum.py
# class ADnum:
# """ Class to create ADnum objects on which to perform differentiation.
# ATTRIBUTES
# ==========
# val : scalar for scalar valued quantities or numpy array for vector valued functions, the value of the ADnum object for a set input value
# der : scalar for sclar functions of a single variable or numpy array for functions of multiple variables the derivative
# graph : dictionary containing the edges of the computational graph
# constant : 0 or 1 indicating whether the ADnum object is constant
# METHODS
# =======
# This class overloads the methods for basic arithmetic operations.
# EXAMPLES
# ========
# # >>> x = ADnum(2, der = 1)
# # >>> f = 2*x+3
# # >>> print(f.val)
# # 7.0
# # >>> print(f.der)
# # 2.0
# """
# def __init__(self, value, **kwargs):
# try:
# scalarinput = (isinstance(value, int) or isinstance(value, float))
# value = np.array(value)
# value = value.astype(float)
# if 'der' not in kwargs:
# try:
# ins = kwargs['ins']
# ind = kwargs['ind']
# if scalarinput:
# der = np.zeros(ins)
# der[ind] = 1.0
# else:
# if ins>1:
# der = np.zeros((ins, len(value)))
# der[ind, :] = 1.0 #np.ones(len(value))
# else:
# der = np.ones(len(value))
# except:
# raise KeyError('Must provide ins and ind if der not provided.')
# else:
# der = kwargs['der']
# der = np.array(der)
# der = der.astype(float)
# if 'ins' in kwargs:
# ins = kwargs['ins']
# if len(der) != ins:
# raise ValueError('Shape of derivative does not match number of inputs.')
# except:
# raise ValueError('Value and derivative of ADnum object must be numeric.')
# self.val = value
# self.der = der
# if 'graph' not in kwargs:
# self.graph = {}
# else:
# self.graph = kwargs['graph']
# if 'constant' not in kwargs:
# self.constant = 0
# else:
# self.constant = kwargs['constant']
# ```
#
# The `ADnum` class also includes methods to overload basic operations, __neg__(), __add__(), __radd__(), __mul__(), __rmul__(), __sub__(), __rsub__(), __truediv__(), __rtruediv__(), __pow__(), and __rpow__(). The result of overloading is that the adding, subtracting, multiplying, dividing, or exponentiating two `ADnum` objects returns an `ADnum` object as well as addition or multiplication by a constant. For example, Y1, Y2, and Y3 are all recognized as `ADnum` objects:
#
# ```python
# X1= ADnum(7, der = 1)
# X2 = ADnum(15, der = 1)
# Y1 = X1 + X2
# Y2 = X1 * X2 + X1
# Y3 = 5 * X1 + X2 + 100
# ```
#
# The resulting ADnum objects have both a value and derivative. An example overloaded function is the following:
#
#
# ```python
# #ADnum.py
# def __mul__(self,other):
# try:
# graph = merge_dicts(self.graph, other.graph)
# y = ADnum(self.val*other.val, der = self.val*other.der+self.der*other.val)
# y.graph = graph
# if self not in y.graph:
# y.graph[self] = []
# y.graph[self].append((y, 'multiply'))
# if other not in y.graph:
# y.graph[other] = []
# y.graph[other].append((y, 'multiply'))
# return y
# except AttributeError:
# other = ADnum(other*np.ones(np.shape(self.val)), der = np.zeros(np.shape(self.der)), constant = 1)
# return self*other
# ```
#
# By combining simple `ADnum` objects with basic operations and simple functions, we can construct any function we like.
#
# ```python
# X = ADnum(4, der = 1)
# F = X + ADmath.sin(4 - X)
# ```
# Where F is now an `ADnum` object, and ADmath.sin() is a specially defined sine function which takes as input an `ADnum` object and returns an `ADnum` object, which allows us to evaluate F and its derivative,
#
# ```python
# F.val = 4
# F.der = 0
# X.val = 4
# X.der = 1
# ```
#
# In addition to the sine function used in the example above, the `ADmath` module also implements the trigonometric functions: `sin()`, `cos()`, `tan()`, `csc()`, `sec()`, `cot()`, the inverse trigonometric functions: `arcsin()`, `arccos()`, `arctan()`, the hyperbolic trig functions: `sinh()`, `cosh()`, `tanh()`, and the natural exponential `exp()` and natural logarithm `log()`. For ease of use in teaching neural network applications, this module also contains the sigmoid (`sig()`) and `relu()` as common activation functions. All of the functions defined in the `ADmath` module define elementary functions of `ADnum` objects, so that the output is also an `ADnum` object with the `val` and `der` attributes updated appropriately. For example,
#
# ```python
# #ADmath.py
# def sin(X):
# try:
# y = ADnum(np.sin(X.val), der = np.cos(X.val)*X.der)
# y.graph = X.graph
# if X not in y.graph:
# y.graph[X] = []
# y.graph[X].append((y, 'sin'))
# return y
# except AttributeError:
# return np.sin(X)
# ```
#
# We also implement a module `ADgraph`, for visualization of ADnum objects and the corresponding computational graphs and tables. This class operates on ADnum objects by using the graph attribute. The main methods are `draw_graph` which draws the computational graph used to compute the derivative, and `gen_table` which generates a table corresponding to the graph including the traces and values and derivatives of each trace. The function `gen_graph` produces a `networkx` object from the dictionary stored in the `graph` attribute of the `ADnum` object, used for visualization. We also use the method `reverse_graph` so that we have the ability to map nodes both to and from their parents. This module also contains a number of utility functions for producing the graph (`get_labels`, `get_colors`, `get_sizes`).
#
# #### Reverse Mode
# Our `ADnum` objects also have the method `revder` which can be used to compute the gradient of a function using reverse mode. Our implementation uses the graph attribute to trace back through the edges of the graph to compute the derivative. Note that reverse mode gives the same results for the derivative as forward mode but in some cases may be computationally beneficial depending on the implementation.
#
# +
#example comparing forward and reverse mode
x = ADnum(1, ins=1, ind=0)
f = 2*ADmath.sin(x)
print(f.der) #call for forward mode
print(x.revder(f)[0]) #call for reverse mode
print(x.revder(f)[1]) #revder also returns the trace of edges visited for benefit in debugging
# -
# ### External Dependencies
# In order to implement the elementary functions, our `ADmath` relies on `numpy`’s implementation of the trigonometric functions, exponential functions, and natural logarithms for evaluation of these special functions, as demonstrated in the definition of the sine function for `ADnum` objects above.
#
# For graph and web application creation, we used the `matplotlib`, `networkx`, `pandas`, and `flask` libraries. NetworkX is a Python package for the creation, manipulation, and study of the structure, dynamics, and functions of complex networks. Pandas is used for creating the computational tables. Flask is an interface to build web applications in Python.
# ## Web Application
#
# We used this process to create a pedagogical tool as a web application that can be used to help visualize the computational process that is used when combining different elementary operations and functions to compose more complicated functions and calculate the derivatives of these functions. Such a tool could be useful in the classroom for teaching students how automatic differentiation works. See the associated unit on [Read the Docs](https://auto-ed.readthedocs.io/en/latest/).
#
# The module `ADgraph` contains visualization tools for `ADnum` objects. For every operation we create an additional `ADnum` object which becomes a node in our graph, representing another trace in the program, where the edge labels display the corresponding operation. Correspondingly, we also develop the functionality to display a table showing the trace, elementary operation, value, and derivative at each step.
#
# Beyond the basic functionality of forward mode, this required modifying all of our methods to correctly add to the dictionary which contains the computational graph information for each operation that we have previously overloaded in addition to the functions for visualizing.
#
# The following expamples demonstrate each of the three main visualization tools and describe how to use them.
# #### Visualizing Forward Computational Graphs
# The function `draw_graph` takes an `ADnum` object as input and outputs a plot of the computational graph. The graph is color coded to help the user better interpret the graph (magenta nodes represent the input variables, a green node represents the output, red nodes are used for intermediate traces, and blue nodes represent constants). Each edge is labelled according to the elementary operation that connects the nodes. The computational graph can be used for single or multiple inputs. We can compare the following computer generated graph to the hand produced graph in section 2.2.
# +
x = ADnum(1, ins =2, ind =0)
y = ADnum(np.pi/5, ins = 2, ind = 1)
f = x**3+ADmath.sin(5*y)
fig3 = ADgraph.draw_graph(f)
# -
# #### Generate the Corresponding Forward Computational Table
# The function `gen_table` takes an `ADnum` object as input and outputs a table of the computational traces. Labels correspond to the graph that was generated to improve the connection between the numeric computations and visual connections. As with the computational graph, this can be used for functions of single or multiple variables.
ADgraph.gen_table(f)
# #### Visualizing Reverse Mode
# Our module also allows us to visualize a static version of the graph for reverse mode. Comparing shows that the graph is the same as that of forward mode but with the edges reversed. Alternatively, we can dynamically visualize reverse mode to see the edges that are traversed in each step of the computation.
fig4 = ADgraph.draw_graph_rev(f)
TO DO dynamic computation - cleaner output of above graph
# ## Future Work
# TO DO
#
# To contribute to this work, TO DO
| homework/HW4/Auto-eD/DeveloperDocumentation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Intro to KNN with Scikit-learn
# ### Requirements for working with data in scikit-learn
#
# 1. Features and labels are separate objects
# 2. Features and labels should be numeric
# 3. Features and labels should be NumPy arrays
# 4. Features and labels should have specific shapes
from IPython.display import IFrame
IFrame('http://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data', width=300, height=200)
# +
# import load_iris function from datasets module
from sklearn.datasets import load_iris
# save "bunch" object containing iris dataset and its attributes
iris = load_iris()
# store feature matrix in "x"
x = iris.data
# store label vector in "y"
y = iris.target
#Show slices
x[:3],y[3:12]
# -
# Show column + row count
print y.shape
print x.shape
# ### K-Nearest Neighbors
# ### Steps
# 1. Select K. (How many neighbors you want to consider in your calculation)
# 2. Search for the K nearest neighbors
# 3. Set label for unknown observation to most popular label among K neighbors
# ### KNN classification map (K=5)
# 
#
# ### Build KNN Model
# +
from sklearn.neighbors import KNeighborsClassifier
#"Estimator" is scikit-learn's term for model
knn = KNeighborsClassifier(n_neighbors=5)
print knn
# -
#Fit the model with data (aka "model training")
knn.fit(x, y)
# +
#Predict the response for a new observation
test_data = [[3, 5, 4, 2], [5, 4, 3, 2]]
knn.predict(test_data) #Returns a NumPy array
actual_data = x
actual_labels = y
knn_predict = knn.predict(actual_data) #Returns a NumPy array
print knn_predict
# -
# ### Using a different classification model
# +
# import the class
from sklearn.linear_model import LogisticRegression
# instantiate the model (using the default parameters)
logreg = LogisticRegression()
# fit the model with data
logreg.fit(x, y)
# predict the response for new observations
logreg_predict = logreg.predict(actual_data)
print logreg_predict
# -
# # Comparing ML models in scikit-learn
# Used three classification models: KNN (K=1), KNN (K=5), logistic regression
# Need a way to choose between the models
#
#
# ### Classification accuracy
# 1. Proportion of correct predictions
# 2. count_correct / count_all
# ## Option #1: Train on entire dataset
# This is what we did above so we can reuse values knn_predict, logreg_predict
#compute classification accuracy for the logistic regression model
from sklearn import metrics
# ### Logistic regression
# +
predicted_labels = logreg_predict
print len(predicted_labels)
logreg_accuracy = metrics.accuracy_score(actual_labels, predicted_labels)
print logreg_accuracy
# -
# ### KNN
# +
predicted_labels = knn_predict
print len(predicted_labels)
accuracy = metrics.accuracy_score(actual_labels, predicted_labels)
print accuracy
# -
# ### But we are overfitting by training on entire data set!!!
# ## Evaluation procedure #2: Train/test split
#
# ### Steps
# 1. Split the dataset into two pieces
# 2. Train the model on the training set.
# 3. Test the model on the testing set
# ### Split into training and testing sets
# +
from sklearn.cross_validation import train_test_split
# Splits data 40% test, 60% train
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=4)
# -
# ### Train Logistic Regression
# +
logreg = LogisticRegression()
logreg.fit(X_train, y_train)
y_pred = logreg.predict(x_test)
print metrics.accuracy_score(y_test, y_pred)
# -
#
# ### Train KNN (K=5)
knn = KNeighborsClassifier(n_neighbors=5)
knn.fit(x_train, y_train)
y_pred = knn.predict(x_test)
print metrics.accuracy_score(y_test, y_pred)
# ### Train KNN (K=25 and K=6)
knn = KNeighborsClassifier(n_neighbors=25)
knn.fit(x_train, y_train)
y_pred = knn.predict(x_test)
print metrics.accuracy_score(y_test, y_pred)
# +
knn = KNeighborsClassifier(n_neighbors=6)
knn.fit(x_train, y_train)
y_pred = knn.predict(x_test)
#Looks like K=6-17 all return the same score
print metrics.accuracy_score(y_test, y_pred)
# -
# ### Use a loop to try out different Ks
# try K=1 through K=25 and record testing accuracy
k_range = list(range(1, 26))
scores = []
for k in k_range:
knn = KNeighborsClassifier(n_neighbors=k)
knn.fit(x_train, y_train)
y_pred = knn.predict(x_test)
scores.append(metrics.accuracy_score(y_test, y_pred))
# ### Plot the Range of Scores!
# +
# import Matplotlib (scientific plotting library)
import matplotlib.pyplot as plt
# allow plots to appear within the notebook
# %matplotlib inline
# plot the relationship between K and testing accuracy
plt.plot(k_range, scores)
plt.xlabel('Value of K for KNN')
plt.ylabel('Testing Accuracy')
# -
# * Training accuracy rises as model complexity increases
# * Testing accuracy penalizes models that are too complex or not complex enough
# ### Downsides of train/test split?
#
# * Provides a high-variance estimate of out-of-sample accuracy
# * K-fold cross-validation overcomes this limitation
# * But, train/test split is still useful because of its flexibility and speed
| theory/KNN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # 3D Visualization of a Convex Hull with D3
# This notebook provides a simple example of convex hull visualization using D3.
# ### D3 Graph Methods
# See accompanying d3_lib.py and the js and css folders.
# %matplotlib inline
from IPython.core.display import HTML
import d3_lib
HTML(d3_lib.set_styles(['basic_axis','3d_viewer']))
HTML('<script src="lib/d3/d3.min.js"></script>')
# +
def points_d3(points):
return [ {"x": d[0], "y": d[1], "z": d[2]} for d in points ]
def triangles_d3(points,triangles_vertices):
triangles = []
for tv in triangles_vertices:
triangles.append( {"x1": points[tv[0]][0],
"y1": points[tv[0]][1],
"z1": points[tv[0]][2],
"x2": points[tv[1]][0],
"y2": points[tv[1]][1],
"z2": points[tv[1]][2],
"x3": points[tv[2]][0],
"y3": points[tv[2]][1],
"z3": points[tv[2]][2] } )
return triangles
def graph_points_triangles(objs):
data = []
for obj in objs:
points, triangles_vertices = obj[0], obj[1]
data.append( {"points": points_d3(points),
"triangles": triangles_d3(points, triangles_vertices)} )
return HTML(d3_lib.draw_graph('3d_viewer',{'data':data}))
# -
# ### Meshing and Volume Calculations
import numpy as np
import random
from scipy.spatial import ConvexHull
def compute_mesh(points):
hull = ConvexHull(points)
indices = hull.simplices
return indices, hull.vertices
# ### Example: Randomly Sampled Points on a Cylinder
def cylinder_points_and_hull_given_sample_size(sample_size):
points = []
for i in range(sample_size/2):
x = random.uniform(-1,1)
z = random.uniform(0,1)
s = (-1.0, 1.0)[random.uniform(0,1) < 0.5]
y = s * (1 - x**2) ** (0.5)
points.append(np.array([x,y,z]))
for z in range(0,2):
for i in range(n/4):
x = random.uniform(-1,1)
s = (-1.0, 1.0)[random.uniform(0,1) < 0.5]
y = s * random.uniform(0,1) * (1 - x**2) ** (0.5)
points.append(np.array([x,y,z]))
points = np.array(points)
triangles_vertices, hull_points = compute_mesh(points)
return points, hull_points, triangles_vertices
random.seed(42)
n = 100
points, hull_vertices, triangles_vertices = cylinder_points_and_hull_given_sample_size(n)
points[:3]
triangles_vertices[:3]
graph_points_triangles([[points, triangles_vertices]])
| ex3_3d_meshing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.9.5 64-bit (''3.9'': conda)'
# name: python3
# ---
import pandas as pd
filename = 'leavemarket.xlsx'
df = pd.read_excel(filename)
df.head()
df.info()
import datetime
def get_remain_year(x):
years = (x['end']-x['stop']).days/365
return years
df['more_year']=df.apply(get_remain_year,axis=1)
df.head()
cb_df[cb_df['more_year']<=1.2]
cb_df=df[~df['name'].str.endswith('EB')]
len(cb_df[cb_df['more_year']<=2.1])
len(cb_df[cb_df['more_year']<=1.1])
cb_df['result'].value_counts()
cc_df_[cc_df_['结果']=='到期']
cc_df_[cc_df_['结果']=='不足3000万']
cc_df_[cc_df_['结果']=='低于3千万']
len(cb_df)
5/205
cc_df_=cb_df.rename(columns={'code':'代码','name':'转债名称','price':'最后价格','zgcode':'正股代码',
'zg':'正股名','issuescale':'发行规模','recallscale':'回售规模','remain':'剩余规模',
'issu':'发行时间','stop':'退市时间','end':'条款到期时间','stayyear':'存续年限','result':'结果','more_year':'剩余多少年'
})
cc_df_
cc_df_[cc_df_['剩余多少年']<=1.0]
5/9
cc_df_[(cc_df_['剩余多少年']<=1.0)&(cc_df_['结果']=='到期')]
| analysis/last_year_price.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import tensorflow as tf
import numpy as np
# %matplotlib inline
import matplotlib.pyplot as plt
# +
points_n = 200
clusters_n = 3
iteration_n = 20
points = tf.constant(np.random.uniform(0, 10, (points_n, 2)))
print(points.shape)
centroids = tf.constant(tf.slice(tf.compat.v1.random_shuffle(points), [0, 0], [clusters_n, -1]))
print(centroids.shape)
points_expanded = tf.expand_dims(points, 0)
print("points expanded shape {}".format(points_expanded.shape))
# -
<EMAIL>
def update_centroids(points_expanded, centroids):
centroids_expanded = tf.expand_dims(centroids, 1)
print("points expanded shape {}".format(points_expanded.shape))
print("centroids expanded shape {}".format(centroids_expanded.shape))
distances = tf.reduce_sum(tf.square(tf.subtract(points_expanded, centroids_expanded)), 2)
print(distances.shape)
assignments = tf.argmin(distances, 0)
print(assignments.shape)
print(assignments.numpy())
means = []
for c in range(clusters_n):
print(tf.where(tf.equal(assignments, c)).shape)
ruc = tf.reshape(tf.where(tf.equal(assignments, c)), [1,-1])
print(ruc.shape)
ruc = tf.gather(points, ruc)
print(ruc.shape)
ruc = tf.reduce_mean(ruc, axis=[1])
print(ruc.shape)
means.append(ruc)
new_centroids = tf.concat(means, 0)
return new_centroids, assignments
# +
for _ in range(iteration_n):
centroids, assignments = update_centroids(points_expanded, centroids)
plt.scatter(points[:, 0], points[:, 1], c=assignments, s=50, alpha=0.5)
plt.plot(centroids[:, 0], centroids[:, 1], 'kx', markersize=15)
plt.show()
# -
import cv2
image = cv2.imread("test_images/apple_01.JPG")
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
plt.imshow(image)
plt.show()
# +
image_tensor = tf.convert_to_tensor(image)
print(image_tensor.shape)
image_tensor = tf.reshape(image_tensor, [50176, 3])
print(image_tensor.shape)
clusters_n = 3
iteration_n = 20
#centroids = tf.constant(tf.slice(tf.compat.v1.random_shuffle(image_tensor), [0, 0], [clusters_n, -1]))
centroids = tf.constant(tf.slice(tf.random.shuffle(image_tensor), [0, 0], [clusters_n, -1]))
print(centroids.shape)
image_expanded = tf.expand_dims(image_tensor, 0)
print("points expanded shape {}".format(image_tensor.shape))
image_tensor = tf.cast(image_tensor, tf.float32)
image_expanded = tf.cast(image_expanded, tf.float32)
centroids = tf.cast(centroids, tf.float32)
# -
@tf.function
def update_centroids(points_expanded, centroids):
centroids_expanded = tf.expand_dims(centroids, 1)
distances = tf.reduce_sum(tf.square(tf.subtract(points_expanded, centroids_expanded)), 2)
assignments = tf.argmin(distances, 0)
means = []
for c in range(clusters_n):
ruc = tf.reshape(tf.where(tf.equal(assignments, c)), [1,-1])
ruc = tf.gather(image_tensor, ruc)
ruc = tf.reduce_mean(ruc, axis=[1])
means.append(ruc)
new_centroids = tf.concat(means, 0)
return new_centroids, assignments
for _ in range(iteration_n):
centroids, assignments = update_centroids(image_expanded, centroids)
centroids
colors = centroids.numpy()
colors[0][0]
def RGB2HEX(color):
return "#{:02x}{:02x}{:02x}".format(int(color[0]), int(color[1]), int(color[2]))
# +
hex_colors = [RGB2HEX(c) for c in colors]
plt.figure(figsize = (8, 6))
plt.pie([3, 3, 3], colors=hex_colors)
plt.show()
# -
class KMeans:
def __init__(self, num_clusters, num_iterations):
self.num_clusters = num_clusters
self.num_iterations = num_iterations
def update_centroids(self, input_tensor, input_expanded, centroids):
centroids_expanded = tf.expand_dims(centroids, 1)
distances = tf.reduce_sum(tf.square(tf.subtract(input_expanded, centroids_expanded)), 2)
assignments = tf.argmin(distances, 0)
means = []
for c in range(self.num_clusters):
ruc = tf.reshape(tf.where(tf.equal(assignments, c)), [1,-1])
ruc = tf.gather(input_tensor, ruc)
ruc = tf.reduce_mean(ruc, axis=[1])
means.append(ruc)
new_centroids = tf.concat(means, 0)
return new_centroids, assignments
def predict(self, input_tensor):
input_shape = input_tensor.shape.as_list()
dims = 1
for i in range(len(input_shape)-1):
dims *= input_shape[i]
input_tensor = tf.reshape(input_tensor, [dims, input_shape[-1]])
input_shuffle = tf.random.shuffle(input_tensor)
centroids = tf.constant(tf.slice(input_shuffle, [0, 0], [self.num_clusters, -1]))
input_expanded = tf.expand_dims(input_tensor, 0)
input_tensor = tf.cast(input_tensor, tf.float32)
input_expanded = tf.cast(input_expanded, tf.float32)
centroids = tf.cast(centroids, tf.float32)
for _ in range(self.num_iterations):
centroids, assignments = self.update_centroids(input_tensor, input_expanded, centroids)
return centroids
def test_image(image_path):
image = cv2.imread(image_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
plt.imshow(image)
plt.show()
image_tensor2 = tf.convert_to_tensor(image)
k_means = KMeans(3, 20)
colors = k_means.predict(image_tensor2)
colors = colors.numpy()
print(colors)
hex_colors = [RGB2HEX(c) for c in colors]
plt.figure(figsize = (8, 6))
plt.pie([3, 3, 3], colors=hex_colors)
plt.show()
test_image("test_images/apple_01.JPG")
test_image("test_images/banana_01.JPG")
test_image("test_images/orange_01.JPG")
# +
t = tf.ones([2, 10, 10, 3])
def prueba(x):
print(x.shape)
return tf.zeros([5, 5, 3])
tf.map_fn(prueba, t)
# -
from tensorflow.keras import layers
class ColorExtractor(layers.Layer):
def __init__(self, num_clusters, num_iterations):
super(ColorExtractor, self).__init__()
self.num_clusters = num_clusters
self.num_iterations = num_iterations
def build(self, input_shape):
self.resize = input_shape[1] * input_shape[2]
self.dims = input_shape[-1]
def call(self, inputs):
def extract_color(input_tensor):
input_tensor = tf.cast(input_tensor, tf.float32)
input_shape = input_tensor.shape.as_list()
input_tensor = tf.reshape(input_tensor, [self.resize, input_shape[-1]])
input_shuffle = tf.random.shuffle(input_tensor)
#centroids = tf.constant(tf.slice(input_shuffle, [0, 0], [self.num_clusters, -1]))
centroids = tf.slice(input_shuffle, [0, 0], [self.num_clusters, -1])
input_expanded = tf.expand_dims(input_tensor, 0)
#input_tensor = tf.cast(input_tensor, tf.float32)
#input_expanded = tf.cast(input_expanded, tf.float32)
#centroids = tf.cast(centroids, tf.float32)
for _ in range(self.num_iterations):
centroids, assignments = self.update_centroids(input_tensor, input_expanded, centroids)
#centroids = tf.reshape(centroids, [self.dims * self.num_clusters, 1])
return centroids
return tf.map_fn(extract_color, inputs, dtype=tf.float32)
@tf.function
def update_centroids(self, input_tensor, input_expanded, centroids):
centroids_expanded = tf.expand_dims(centroids, 1)
distances = tf.reduce_sum(tf.square(tf.subtract(input_expanded, centroids_expanded)), 2)
assignments = tf.argmin(distances, 0)
means = []
for c in range(self.num_clusters):
ruc = tf.reshape(tf.where(tf.equal(assignments, c)), [1,-1])
ruc = tf.gather(input_tensor, ruc)
ruc = tf.reduce_mean(ruc, axis=[1])
means.append(ruc)
new_centroids = tf.concat(means, 0)
return new_centroids, assignments
def load_image_tensor(image_path):
image = cv2.imread(image_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image_tensor = tf.convert_to_tensor(image)
image_tensor /= 255
image_tensor = tf.expand_dims(image_tensor, 0)
return image_tensor
# +
img01 = load_image_tensor("test_images/apple_01.JPG")
img02 = load_image_tensor("test_images/banana_01.JPG")
img03 = load_image_tensor("test_images/orange_01.JPG")
images = tf.concat([img01, img02, img03], 0)
print(images.shape)
# +
color_layer = ColorExtractor(3, 20)
res = color_layer(images)
# -
res
def plot_colors(colors):
hex_colors = [RGB2HEX(c) for c in colors]
plt.figure(figsize = (8, 6))
plt.pie([3, 3, 3], colors=hex_colors)
plt.show()
# +
res *= 255
images_colors = res.numpy()
plot_colors(images_colors[0])
plot_colors(images_colors[1])
plot_colors(images_colors[2])
# -
| src/notebooks/tf_kmeans.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 0.0 RedMetrics data preparation
# # Preparation
#
# Imports libraries
#
# Loads RedMetrics data
# +
# %matplotlib inline
print("0.0 RedMetrics data preparation")
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import seaborn as sns
import re
import json
import datetime
import random
from random import randint
from ipywidgets import FloatProgress,IntProgress,IntText,Text,interact,interactive,IntSlider,FloatSlider
from IPython.display import display
from itertools import chain
from scipy.stats import ttest_ind
# -
pd.__version__
np.__version__
processRMDF1522 = not ('rmdf1522' in globals())
processRMDF160 = not ('rmdf160' in globals())
processRMDFTest = not ('rmrdftest' in globals())
# ### common variables
# +
last1522DataFilesNamesStem = "2018-04-27"
last160DataFilesNamesStem = "2018-07-05"
dataFilesNamesStem = "2018-07-05"
dataFolderPath = "../../data/"
version1522Suffix = '-1.52.2'
version160Suffix = '-1.60'
processedSuffix = "-processed"
testUsersSuffix = "-testUsers"
testSuffix = ".test"
csvEncoding = 'utf-8'
csvSuffix = '.csv'
tutorialStem = "tutorial"
tutorial1Stem = tutorialStem + "1"
tutorial2Stem = tutorialStem + "2"
checkpointStem = ".Checkpoint"
# +
chapterCount = 11
chapterArrayInt = [i for i in range(chapterCount)]
chapterArrayStr = sorted(['"' + str(i) + '"' for i in chapterArrayInt])
checkpointCount = 15
checkpointArrayInt = [i for i in range(checkpointCount)]
checkpointArrayStr = sorted([tutorialStem + checkpointStem + "{0:0=2d}".format(i) for i in checkpointArrayInt])
# +
processedRMDF1522Path = dataFolderPath + last1522DataFilesNamesStem + version1522Suffix + processedSuffix + csvSuffix
processedRMDF160Path = dataFolderPath + dataFilesNamesStem + version160Suffix + processedSuffix + csvSuffix
rmrdf1522Path = dataFolderPath + last1522DataFilesNamesStem + version1522Suffix + csvSuffix
rmrdf160Path = dataFolderPath + dataFilesNamesStem + version160Suffix + csvSuffix
rmrdfTestPath = dataFolderPath + dataFilesNamesStem + testSuffix + csvSuffix
testUsersPath = dataFolderPath + testUsersSuffix + csvSuffix
# -
rmdfLoadColumnNames = ['id', 'serverTime', 'userTime',\
'playerId', 'playerCustomData',\
'type', 'coordinates','section',\
'customData.biobrick', 'customData.devices',\
'customData.slot', 'customData.sound',\
'customData','customData.duration',\
'customData.nanobot', 'customData.language',\
'customData.controls', 'customData.chapter',\
'customData.life','customData.source',\
'customData.platform','customData.localplayerguid',\
'customData.sametab', 'customData.device',\
'customData.energy', 'customData.option',\
'customData.newtab','customData.dnabit',\
'customData.count', 'customData.plasmid',\
'customData.total', 'customData.message',\
'customData.graphics']
### columns
# In RedMetrics data, 'playerId' is actually a session ID.
# Permanent player IDs are stored as 'localplayerguid' in 'customdata' attached to 'start' events.
rmdfMinimalInitializationColumns = ['customData.localplayerguid']
rmdfInitializationColumns = ['customData.localplayerguid', 'playerId', 'type', 'serverTime', 'customData.platform']
# TODO check use
# rmdfRelevantColumns = ['sessionId', 'serverTime', 'userId', 'customData.platform']
rmdfPlayerFilteringColumns = ['sessionId', 'userId', 'customData.platform', 'serverTime']
rmdfCheckpointsRelevantColumns = ['sessionId', 'userId', 'type', 'section']
rmdfRelevantColumns = ['customData.localplayerguid', 'playerId', 'type']
rmdfRenamedRelevantColumns = ['userId', 'sessionId', 'type']
# # Functions
# +
## Loading
### Data format fixes
def userIdConverter(uId):
sUID = str(uId)
if(sUID.startswith('n')):# == 'nan' or == 'null'):
return ''
else:
return sUID.replace('"','')
def sectionConverter(section):
return re.sub(r'(1|2)\.', '.', section)
#return section.replace(tutorial1Stem, tutorialStem).replace(tutorial2Stem, tutorialStem)
# date string to pd.Timestamp
# RedMetrics timestamps are always UTC according to doc
# https://github.com/CyberCRI/RedMetrics/blob/master/API.md
rmdfDateparse = lambda x: pd.to_datetime(x, utc=True)
def safeGetNormalizedRedMetricsCSV( df ):
return df.rename(index=str, columns={'customData.localplayerguid' : 'userId', 'playerId': 'sessionId'})
def getNormalizedRedMetricsCSV( df ):
newColumns = np.unique(np.concatenate((rmdfMinimalInitializationColumns, df.columns.values)))
return safeGetNormalizedRedMetricsCSV(df.loc[:,newColumns])
def writeTestUsers(testUsers):
try:
if (len(testUsers.columns) == 1) & (testUsers.columns[0] == 'userId'):
testUsers = testUsers.sort_values(by='userId')
testUsers.index = range(len(testUsers))
testUsers.to_csv(testUsersPath, encoding=csvEncoding)
else:
print("incorrect testUsers parameter")
except Exception as e:
print("writeTestUsers failed: " + str(e))
def writeRMDF(rmdf, rmdfPath):
rmdf.to_csv(rmdfPath, encoding=csvEncoding)
# -
# ## Filtering
# +
def getAllSessions( _rmDF, dropna ):
_result = _rmDF.loc[:, rmdfRenamedRelevantColumns]
_result = _result[_result['type']=='start']
_result = _result.drop('type', 1)
if dropna:
_result = _result.dropna(how='any')
return _result
# gets sessions which either:
# - have 'android' or '...editor' as platform
# - are in the RedMetrics test channel
def getTestSessions(_rmDF, _rmTestDF, includeAndroid = True, includeEditor = True, includeTest = True):
rmDFTestSessions = set()
rmTestDFTestSessions = set()
# - have 'android' or '...editor' as platform
if(includeAndroid):
rmDFTestSessions |= set(_rmDF[_rmDF['customData.platform'].isin(['"android"'])]['sessionId'])
if(includeEditor):
rmDFTestSessions |= set(_rmDF[_rmDF['customData.platform'].apply(lambda s: str(s).endswith('editor"'))]['sessionId'])
#print(str(len(rmDFTestSessions)))
# - are in the RedMetrics test channel
if(includeTest):
rmTestDFTestSessions = set(_rmTestDF['sessionId'])
#print(str(len(rmTestDFTestSessions)))
# - belong to a user who has a session of the type above
# all the sessions above
testSessions = rmDFTestSessions | rmTestDFTestSessions
return testSessions
# gets sessions which either:
# - have 'android' or '...editor' as platform
# - are in the RedMetrics test channel
# - belong to a user who has a session of the type above
def getTestUsersSessions(
_rmDF,
_rmTestDF,
includeAndroid = True,
includeEditor = True,
includeTest = True,
otherTestUsers = set(),
):
# tables of association of user-sessions
rmDFUsersSessions = getAllSessions(_rmDF, False)
# rmTestDFUsersSessions = getAllSessions(_rmTestDF, False)
# userSessions = pd.concat([rmDFUsersSessions,rmTestDFUsersSessions])
# userSessions = userSessions.drop_duplicates()
testSessions = getTestSessions(_rmDF, _rmTestDF,
includeAndroid = includeAndroid, includeEditor = includeEditor, includeTest = includeTest)
# all the test users
## users from _rmDF who have test sessions
rmDFTestUsers = set(rmDFUsersSessions[rmDFUsersSessions['sessionId'].isin(testSessions)]['userId'].dropna())
## all the users from _rmTestDF
rmTestDFTestUsers = set(_rmTestDF['userId'].dropna())
rmTestDFTestUsers.remove('')
## merge
testUsers = otherTestUsers | rmDFTestUsers | rmTestDFTestUsers
# all the sessions of _rmDF which belong to these users
# allTestSessions = set(rmDFUsersSessions[rmDFUsersSessions['userId'].isin(testUsers)]['sessionId'].dropna())
allTestSessions = testSessions | set(rmDFUsersSessions[rmDFUsersSessions['userId'].isin(testUsers)]['sessionId'].dropna())
return (testUsers,allTestSessions)
# -
# ## Load and process
# if the processing of the rmrdfs has already been done,
# just load the preprocessed rmdfs
#if processRMDF1522:
def loadProcessedRMDFs():
## Try loading the pre-processed dataframe
rmdfTestUsers = set()
rmdf1522 = []
rmdf160 = []
try:
rmdfTestUsers = set(pd.read_csv(testUsersPath, dtype=str)['userId'])
print("rmdfTestUsers read_csv success (1/3)")
rmdf1522 = pd.read_csv(\
processedRMDF1522Path,\
dtype=str, parse_dates=['serverTime','userTime'],\
date_parser=rmdfDateparse,\
)
if rmdf1522.columns[0] == 'Unnamed: 0':
rmdf1522 = rmdf1522.iloc[:,1:]
print("rmdf1522 read_csv success (2/3)")
rmdf160 = pd.read_csv(\
processedRMDF160Path,\
dtype=str, parse_dates=['serverTime','userTime'],\
date_parser=rmdfDateparse,\
)
if rmdf160.columns[0] == 'Unnamed: 0':
rmdf160 = rmdf160.iloc[:,1:]
print("rmdf160 read_csv success (3/3)")
except FileNotFoundError:
print("rmdfs will be loaded, processed, saved")
return (rmdfTestUsers, rmdf1522, rmdf160)
# +
### RMDFTest loading
# necessary variables for RMDFTest loading:
# dataFolderPath
# dataFilesNamesStem
# dateparse
# userIdConverter
# rmdfLoadColumnNames
# getNormalizedRedMetricsCSV
# raw redmetrics df loading
def loadRMRDF(rmdfPath):
rmrdf = pd.read_csv(\
rmdfPath,\
dtype=str,\
parse_dates=['serverTime','userTime'],\
date_parser=rmdfDateparse,\
converters={\
'customData.localplayerguid':userIdConverter,\
'section':sectionConverter,\
}\
)
rmrdf = rmrdf.loc[:,rmdfLoadColumnNames]
normalizedRMDF = getNormalizedRedMetricsCSV(rmrdf)
return normalizedRMDF
# +
# processing of raw redmetrics dfs
# rmdfTestUsers is a set
# rmdf1522 is assumed to be set
# rmrdfPath raw df path for reading
# rmdfPath processed df path for writing
def processRMDF(rmrdfPath, rmdfPath, normalizedRMDFTest, rmdfTestUsers):
#print("processRMDF start")
normalizedRMDF = loadRMRDF(rmrdfPath)
#print("call to getTestUsersSessions...")
(rmdfTestUsers, allTestSessions) = getTestUsersSessions(
_rmDF = normalizedRMDF,
_rmTestDF = normalizedRMDFTest,
otherTestUsers = rmdfTestUsers,
)
#print("call to getTestUsersSessions done")
writeTestUsers(pd.DataFrame(data=list(rmdfTestUsers), columns=['userId']))
rmdf = normalizedRMDF[~normalizedRMDF['sessionId'].isin(allTestSessions)]
#print("userSessions")
userSessions = rmdf[rmdf['userId']!=''].loc[:,['userId','sessionId']].dropna(how='any').drop_duplicates()
intProgress = IntProgress(min=0, max=len(userSessions.index))
display(intProgress)
intText = IntText(0)
display(intText)
#print("loop starting")
for userSessionsIndex in userSessions.index:
intProgress.value += 1
intText.value += 1
userId = userSessions.loc[userSessionsIndex, 'userId']
sessionId = userSessions.loc[userSessionsIndex, 'sessionId']
rmdf.loc[rmdf['sessionId']==sessionId,'userId'] = userId
#rmdf1522['userId'].nunique(),userSessions['userId'].nunique(),\
#rmdf1522[~rmdf1522['userId'].isin(userSessions['userId'].unique())],\
#userSessions[~userSessions['userId'].isin(rmdf1522['userId'].unique())]
#### Saving to csv
#print("saving to csv")
writeRMDF(rmdf, rmdfPath)
#print("processRMDF done")
return (rmdf, rmdfTestUsers)
# -
# # Execution
processRMDF1522 = not ('rmdf1522' in globals())
processRMDF160 = not ('rmdf160' in globals())
processRMDFTest = not ('normalizedRMDFTest' in globals())
processRMDF1522, processRMDF160, processRMDFTest
if processRMDF1522 or processRMDF160 or processRMDFTest:
## calls
#print("STEP 1")
(testUsers, rmdf1522, rmdf160) = loadProcessedRMDFs()
#print(type(rmdfTestUsers))
process1522 = (len(rmdf1522) == 0)
process160 = (len(rmdf160) == 0)
normalizedRMDFTest = []
if process1522 or process160:
#print("STEP test")
normalizedRMDFTest = loadRMRDF(rmrdfTestPath)
if process1522:
#print("STEP 1522")
(rmdf1522, testUsers) = processRMDF(rmrdf1522Path, processedRMDF1522Path, normalizedRMDFTest, testUsers)
if process160:
#print("STEP 160")
(rmdf160, testUsers) = processRMDF(rmrdf160Path, processedRMDF160Path, normalizedRMDFTest, testUsers)
# concatenation of all redmetrics events pertaining to 1.52.2 survey: rmdf1522 and rmdf160
rmdfConcat = pd.concat([rmdf1522, rmdf160])
rmdfConcat.index = range(0, len(rmdfConcat.index))
#print("STEP done")
#else:
#print("all done")
# +
#rmdf1522['userId'].nunique(), rmdf160['userId'].nunique()
# +
#rmdf1522['userTime'].min()
# -
# # All versions
# rdf = pd.concat([part100,
# part131, part132, part133,
# part140,
# part150, part151, part1522])
#
# df = getNormalizedRedMetricsCSV(rdf)
# # Old versions
# rdf100 = pd.read_csv("../../data/1.0.csv")
# rdf131 = pd.read_csv("../../data/1.31.csv")
# rdf132 = pd.read_csv("../../data/1.32.csv")
# rdf133 = pd.read_csv("../../data/1.33.csv")
# rdf140 = pd.read_csv("../../data/1.40.csv")
# rdf150 = pd.read_csv("../../data/1.50.csv")
# rdf151 = pd.read_csv("../../data/1.51.csv")
#
# part100 = rdf100.loc[:,relevantColumns]
# part131 = rdf131.loc[:,relevantColumns]
# part132 = rdf132.loc[:,relevantColumns]
# part133 = rdf133.loc[:,relevantColumns]
# part140 = rdf140.loc[:,relevantColumns]
# part150 = rdf150.loc[:,relevantColumns]
# part151 = rdf151.loc[:,relevantColumns]
# # Tests
# rdftest = pd.read_csv("../../data/2017-10-11.test.csv")
# dftest = getNormalizedRedMetricsCSV(rdftest)
# TOD: get rid of warning
#
# DtypeWarning: Columns (18,22,28,32,38) have mixed types. Specify dtype option on import or set low_memory=False.
# interactivity=interactivity, compiler=compiler, result=result)
#
# using https://stackoverflow.com/questions/24251219/pandas-read-csv-low-memory-and-dtype-options
| v1.52.2/Functions/0.0 RedMetrics data preparation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # The last ray of sunshine
# ### You are one of the most renowned investigators in the world. An email arrives asking your help:
# <img src="the_email.png" width=600 height=400 />
# ### You book the first ticket to the North of Canada and drive to the far north. The place is desolate and icy. In front of you there is a weird looking cave. You get out of the car and approach the entrance of the cave. Before you know it there is noise behind you and evil Dr. Cane and helpers are charging toward you with their bear cavalry. You get arrested and thrown in a dungeon deep in the cave.
#
#
# <img src="evil_dr_storm.png" width=200 height=200 />
#
# ### A screen lights up and a video starts playing. Dr. Cane appears on the screen laughing his evil laugh.
your_name = input('Please give me your name: ')
print('\n Welcome '+ your_name +
""". You really think that you can save summers from me??? Dr. Cane? Dr. HARRY Cane!? Muahahaha!""")
print("""Just to prove you wrong I left some hints for you around the room.
They could help you escape and find my weather machine. But you will never make it on time! Muahahaha!""")
print("""I will release the eternal storms on April 28 at 12pm. Hope you like swimming!""")
# ## Problem 1. <br> Looking for a way out ( **10 points** )
print( your_name + """, let's save the summers together. To escape from this
room you will need to use the coding knowledge you got this semester.""")
print('\n')
print("""Don't forget that a good log is part of a successful mission! Start your log please! (3 points)""")
print('\n')
print("""You will need to import the necesary packages to do numerical problems and plot images. Start
by doing all your imports here. (5 points)""")
# +
#import your modules here:
import numpy as np
import math
import scipy as sp
from scipy.stats import chisquare
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from mpl_toolkits.mplot3d import Axes3D
import os
# -
print("""You also need to create an empty list named *key_code* that you will slowly populate
with the keys you get as you solve the puzzles (2 points)""")
key_code = []
print("""Let's find our way out of here! """)
# ### You take a look around the dungeon.
# <img src="the_escape_room.png" width=500 height=300 />
#
# ### At the other side you see the door. Next to it there is something that looks like a key pad. The hints that evil Dr. Cane left all around the room are needed to get the code and escape out of this room...
#
# <img src="the_door.png" width=300 height=300 />
# ### Desks, books, paintings, chests, chairs are all over the place. This room is so full of possible hiding places. Where to start?
# ## Problem 2. <br> The encrypted note ( **15 points** )
# ### Walking across the dungeon you notice a weird looking painting. It smells of fresh paint. Dr. Cane must have just made this for some reason! You take a closer look. It depicts the dungeon you are in, but it looks slightly different. There is an open book on the floor. At first glance the book contains a weird collection of numbers. What could this mean? You suddenly realize it's a code.
#
# - Make a list *message* that contains all the numbers on the note (3 points).
# - Write a dictionary *letters_to_numbers* that has as keys letters of the alphabet and as values numbers 1 to 26 (5 points).
# - Make a code that loops over the numbers in the note and uses the dictionary to decode the message (5 points). Print the message out (2 points).
print( 'This is what is written on the book:')
print(""" 20 9 3 11 20 15 3 11 20 9 3 11 20 15 3 11 20 9 13 5 9 19 18 21 14 14 9 14 9 7 15 21 20 """)
message = [20,9,3,11,20,15,3,11,20,9,3,11,20,15,3,11,20,9,13,5,9,19,18,21,14,14,9,14,9,7,15,21,20]
letters_to_numbers = {'A':1, 'B':2, 'C':3, 'D':4, 'E':5, 'F':6, 'G':7, 'H':8, 'I':9, 'J':10, 'K':11, 'L':12, 'M':13, 'N':14, 'O':15, 'P':16, 'Q':17, 'R':18, 'S':19, 'T':20, 'U':21, 'V':22, 'W':23, 'X':24, 'Y':25, 'Z':26}
for j in message:
for i in letters_to_numbers:
if j == letters_to_numbers[i]:
print(i, end='')
# ## Problem 3. <br> The clock on the wall ( 15 points)
# ### Just as you decode the message a cuckoo clock goes off somewhere in the room. The time is ticking indeed....You walk toward the clock. It looks old, and has a big mirror at the bottom part. The cuckoo wears glasses?!?! You look around and notice a pair of glasses on the floor that looks exactly like the one the cuckoo wears. Can it be part of the solution? You put the glasses on and look at the clock. There is a secret message painted on the mirror of the clock!
# - Use your favorite numpy routine to read in the data from 'the_clock_mirror.dat' (3 points)
# - Make a contour plot using contouring levels 0, 1, 2. (4 points)
# - Make the plot publication ready. (3 points)
#
# - Populate your *key_code* with the number you see on the mirror (2 points). Print the *key_code* (1 points).
clock_data = np.loadtxt('the_clock_mirror.dat')
plt.figure( figsize = (10,10))
plt.contourf(clock_data, cmap = 'Blues')
plt.yticks( np.arange( 0, 25, step = 10 ) ,fontsize=36 )
plt.xticks( np.arange( 0, 25, step = 10) ,fontsize=36 )
plt.xlabel( 'X', fontsize = 36 )
plt.ylabel( 'Y' , fontsize = 36, rotation=0 )
plt.title( 'The Clock Mirror Clue' , fontsize = 36 )
key_code.append(3)
print(key_code)
# ## Problem 4. <br> The wooden chest ( 20 points)
# ### You feel excited! You got the first number. However, you have no idea where to look at next...did you miss something? You put on the glasses again and look around. On the opposite wall you see another hidden message written.
# - Read the data from 'the_secret_message.dat' (1 point)
# - Make a contour plot using contouring levels 0, 1, 2 and 3. (2 points)
# - The message is flipped. Use the appropriate plt command to flip the y-axis (2 points).
secret_message = np.loadtxt('the_secret_message.dat')
# +
secret_message = np.flip(secret_message,0)
plt.figure( figsize = (10,10))
plt.contourf(secret_message, cmap = 'Blues')
plt.yticks( np.arange( 0, 15, step = 5 ) ,fontsize=36)
plt.xticks( np.arange( 0, 55, step = 10) ,fontsize=36)
plt.xlabel( 'X', fontsize = 36)
plt.ylabel( 'Y' , fontsize = 36, rotation = 0)
plt.title( 'The Secret Message Clue' , fontsize = 36)
# -
# ### You look up. There is a big wooden chest on an elevated floor. You go running to the ladder and climb it. You approach the chest and open it. It is full of torn pages from a book. You remove them from the box and see a picture of <NAME> laughing his evil laughter. Interestingly, the first page has a hand-written 'muahaha' at the top. Can this be the next clue?
# - Write a code that reads the the_torn_book.dat line by line (5 points), and counts how many many times the word 'muahaha' appears in it (5 points).
# - The code should then print an informative statement to let you know how many evil laughters it found (2 points).
#
# ### This is the next number you need for the key pad.
# - Populate your *key_code* (2 points) and print the current *key_code* out (1 point) !
#
# #### Warning: DO NOT change the lower/upper case of the words in the book; you might get a wrong answer!
f = open('the_torn_book.dat','r',encoding='utf-8')
counter = 0
for line in f:
#print(line)
if 'muahaha' in line:
counter = counter + 1
print("My investigation uncovered", counter, "evil laughs!")
key_code.append(7)
print(key_code)
# ## Problem 5. <br> But wait, there is more. (15 points)
# ### Dr. Cane left you a message on the last line of the book
# - Adapt your previous code to print it out (4 points).
# +
f = open('the_torn_book.dat','r',encoding='utf-8')
for line in f:
last_line = f.readlines()[-1]
print(last_line)
# -
# ### What could this mean? You look at the book contents and suddenly it all makes sense! The text is full of numbers. You just need to add the counts of each number equal to one, two, three and seven, and then use the equation Dr. Storm gave you to calculate the next key.
#
# - Make a code that reads all the book (2 points) and counts how many 'one's, 'two's,'three's and 'seven's the book has (4 points).
# - Use these sums in the equation Dr. Cane gave you (2 point).
# - The integer part is the next number you need for the key pad (1 point).
#
# - Populate your *key_code* with this number (1 points).
# - Print the *key_code* (1 points).
f = open('the_torn_book.dat','r',encoding='utf-8')
ones = 0
twos = 0
threes = 0
sevens = 0
for line in f:
#print(line)
if 'one' in line:
ones = ones + 1
if 'two' in line:
twos = twos + 1
if 'three' in line:
threes = threes + 1
if 'seven' in line:
sevens = sevens + 1
print(ones, twos, threes, sevens)
integer = int(ones/sevens+twos/threes)
print(integer)
key_code.append(integer)
print(key_code)
# ## Problem 6 <br> Around, around spins the world (10 points)
# ### At the bottom of the chest there is a little holographic machine with a big red button. You feel the urge to press it. This ***must*** be the clue about where to look next.
#
# - Read data from file the_holograph.dat and assign the three columns to variables x1, y1, z1 (2 points).
# - Make a 3D scatter plot with your x1,y1,z1 data (5 points).
# - Save the 3D plot with an appropriate name (2 points).
# - What does it look like? Where is your next clue in the room? Print an informative statement with your answer. (1 point)
holograph = np.loadtxt('the_holograph.dat')
x1 = holograph[:,0]
y1 = holograph[:,1]
z1 = holograph[:,2]
# +
fig = plt.figure( figsize = ( 16, 16) )
ax = fig.add_subplot(111, projection='3d')
x1 = holograph[:,0]
y1 = holograph[:,1]
z1 = holograph[:,2]
ax.scatter(x1,y1,z1, c='blue', marker='d')
plt.savefig( 'Holograph.png', format = 'png', dpi = 100, bbox_inches = 'tight', pad_inches = 0.3 )
ax.set_xlabel('X1')
ax.set_ylabel('Y1')
ax.set_zlabel('Z1')
# -
print('It looks like a tornado or hurricane!')
# ## Problem 7 <br> The puzzles (24 points)
# ### You look around the room. There are some nature pictures, but none with a tornado or a hurricane. Nothing makes sense...As you wander around the room you notice something is odd. Vermeer's girl with the pearl earring is odd? You approach and see the girls face is replaced with Dr. <NAME>'s... Can this be it? You turn the painting around and find a paper with two puzzles:
# ### The first puzzle:
# - The original of this painting was made in what year? Google it! Sum the numbers of the year up. (so if was made in 1111 the answer is 1 + 1+1 +1 = 4) (1 point)
# - Use the appropriate numpy function to calculate the mean (2 points), median (2 points) and trace (2 points) of the what_is_the_key array (see below). Which one matches your previous sum?
# - If the mean : u = 2, if the median: u = 9, if the trace: u = 3 (1 point)
# ### The second puzzle:
# - An alien at Proxima Centauri b looks toward our Sun with their telescope and records a transit (a planet crosses in front of the Sun disk). The planet caused a dip in the light the alien saw of $\Delta$f=0.007456. The event repeated after 10,740 days. Using Doppler spectroscopy the alien found that the planet causes the Sun to wobble with a velocity semi-amplitude of 2.76m/s. Which planet did the alien see? If Mercury is 1, Venus is 2, this planet is N.
#
# - Find the Radius and Mass of the planet using the information from above and these tips:
# - Tip for the transit: you know that $\Delta$f $\sim \frac{R^2_{planet}}{R^2_{star}}$. Use this to find the radius of the planet in km, if you know that the Sun has a radius of 696,000km (5 points)
# - Tip for the wobbling: you know that the velocity semi-amplitude is given by $K = \frac{28.43 [m/s]}{\sqrt{1-e^2}} \frac{m_{planet}\sin i}{m_{Jupiter}}(\frac{m_{planet}+m_{Sun}}{m_{Sun}})^{-2/3} (\frac{P}{1 year})^{-1/3}$. Use this to find the mass of the planet. You can assume that e = 0 and sini = 1. Also, note that $m_{planet} << m_{Sun}$ (5 points)
#
# - Now that you now the Radius, Mass and Period of the planet, compare against this NASA database: https://nssdc.gsfc.nasa.gov/planetary/factsheet/
# - Which is the planet the alien saw? (3 points)
# - What is the number N of the planet (if Mercury=1, Venus =2, etc)? Print an informative statement. (1 point)
# - Remember that the Moon is *not* a planet!
#
#
# ### The next key:
# - What is the number that is equal to N/u ? Populate your *key_code* with the number (1 points).
# - Print the *key_code* (1 points).
"""The first puzzle"""
what_is_the_key = np.array( ([ 7, 23, 45, 37, -2, 12, 34 ],
[ 22, 1, 34, 18, 54, 33, 9 ],
[ 8, 19, 2, 42, 32, 2, 17 ],
[ 98, 9, 14, 2, 12, 33, 7 ],
[ 12, 10, 4, 54, 2, -8, 7 ],
[ 77, 22, -2, 24, -8, 3, 7 ],
[ 122, 33, 3, 3, 4, 33, 1 ] ) )
vermeer = 1+6+6+5
print(vermeer)
mean = np.mean(what_is_the_key)
print(mean)
median = np.median(what_is_the_key)
print(median)
trace = np.trace(what_is_the_key)
print(trace)
u = 3
print('u = 3')
"""The second puzzle"""
radius_N = np.sqrt(0.007456*(696000**2))
print('The radius of Planet N is:',radius_N,'Km')
P = 10740
e = 0
sini = 1
M_Jupiter = 1898
M_Sun = 1988500
K = 0.007456
# +
#Equation becomes a Quadratic--> finding coefficients a,b,c
#a = M_Planet**2
#b= M_Planet*M_Sun
#c = (M_Jupiter/sini)*M_Sun*((K/(P/365)**(-1/3))*(((np.sqrt(1-(e**2)))/28.43))**(-3/2)
a = 1
b = M_Sun
c = ((((K/(P/365))**(-1/3))*((np.sqrt(1-(e**2)))/28.43))**(-3/2))*M_Sun*(M_Jupiter/sini)
coeff = [a,b,c]
# -
x = np.roots(coeff)
print(x)
# +
#Calculator check of answers
#a2 = 1
#b2 = 1988500
#c2 = -0.5213276
#coeff2 = [a2,b2,c2]
#y = np.roots(coeff2)
#print(y)
# +
#https://nssdc.gsfc.nasa.gov/planetary/factsheet/
#Period of planet = 10740
#Mass of planet =
#Radius of planet =
# -
N = 6
print('The planet N is Saturn with the corresponding value', N)
code_number = N/u
print(code_number)
key_code.append(code_number)
print(key_code)
# ## Problem 8. <br> The bug exterminator (15 points)
# On the other side of the room you see a chair in front of a computer. The chair has an 8 scratched on it. This must be where the next clue lies. You walk to the chair and look at the computer screen. There is some code, but it is full of bugs. Does Dr. Cane think that this can stop you?
#
# Debug the code to find the next number(s). (8 points)
#
# Run the code for wheel radii: r1 = 0.5 m , r2 = 0.8 m, times the cars moved: t1 = 12 s, t2 = 8 s, and wheel rotations of N1 = 8 rotations and N2 = 5 rotations. The integer part of the speed that the code returns is the next part of the key. (2 points)
#
# If you get 2 digit speed make a code that will split it in two numbers: e.g., if you get a speed of 42 m/s the code will append to the key a 4, and then a 2. Don't split it manually. (3 points)
#
# Populate your key_code with the number(s) (1 points).
#
# Print the key_code (1 points).
# +
# Code that calculates the distance traveled by a car knowing how much time it moves and
# how many rotations its wheels, with a known perimeter, made.
# Code then will compare the distance that two cars moved based on the time they move
# and their wheels perimeters and tells you which one moved further and for that car
# at what speed it moved.
# Ask the user for the two radii of the car wheels:
radius1 = input( 'Give me the first radius please' )
radius2 = input( 'Give me the second radius please' )
# calculate the perimeter of the two wheels:
radius_1 = float(radius1)
radius_2 = float(radius2)
perimeter1 = radius_1*2*np.pi #This will return the radius twice, not multiply the radius by 2
perimeter2 = radius_2*2*np.pi
print(perimeter1)
print(perimeter2)
# Ask the user for how much time the two cars move:
time1 = input( 'How much time did the first car move?' )
time2 = input( 'How much time did the second car move?' )
time1 = float(time1)
time2 = float(time2)
# Ask the user how many full circles did their wheels turnL
N1 = input( 'How many circles did the wheels of car 1 turn?' )
N2 = input( 'How many circles did the wheels of car 2 turn?' )
N1 = float(N1)
N2 = float(N2)
# Calculate the total distance each car moved:
distance1 = N1 * perimeter1 * time1
distance2 = N2 * perimeter2 * time2
# Compare the two distances and for the one that moved further print which one
# it is and what its speed is:
if distance1 > distance2:
print( 'Car 1 moved further than Car 2' )
speed1 = distance1 / time1
print( 'Car 1 moved with a speed of: ', speed1 , 'm/s.' )
elif distance2 > distance1:
print( 'Car 2 moved further than Car 1' )
speed2 = distance2 / time2
print( 'Car 2 moved with a speed of: ', speed2 , 'm/s.' )
else:
print( 'Car 1 and two moved an equal distance with Car 1' )
speed1 = distance1 / time1
speed2 = distance2 / time2
print( 'Car 1 moved with a speed of: ', speed1 , 'm/s'
'and Car 2 moved with a speed of: ', speed2, 'm/s.')
# +
#Debugging:
#1. There was a syntax error in the radius2 input, originally there were two radius1 inputs
#2. I converted radius1 and radius2 into floats so I could perform the maths
#3. To calculate perimeter the correct equation is 2*pi*radius
#4. I converted time1 and time2 into floats so I could perform the maths
#5. I converted N1 and N2 into floats so that I could perform the maths
#6. I changed the equation to find the total distance from diameter to perimeter
#7. For distance2, I ensured the values all matched '2'
#8. The speeds for the else statement were reversed ie. car 1 was matched with speed 2
#9. The first if statement of the loop had a syntax error (missing 1 for speed1 calculation), the speed for the print statement was also missing the integer notation
#10.The last elif statement of the loop had a syntax error (missing 2 for speed2 calculation), the speed for the print statement was also missing the integer notation
#11. The elif statement had the signs of the equation it was checking reversed (distance2 < distance1 has already been checked by the if loop)
#12. There was a typo in the print statement of the elif statement (Car1 instead of Car2)
# +
#r1 = 0.5 m
#r2 = 0.8 m
#t1 = 12 s
#t2 = 8 s
#N1 = 8 rotations
#N2 = 5 rotations
# -
integer_speed = int(speed1)
numbers = [int(a) for a in str(integer_speed)]
print(numbers)
key_code.append(numbers)
print(key_code)
# ## Problem 9. <br> The model derivation (35 points)
# ### Looking across the room you see a big 9 next to a usb stick. This must be the next thing to look at. You plug the stick into the computer in the room. There is a single file with GPS data: timestamps, distances from some location and GPS errors. (10 points total)
#
# - Get the data from the table and assign them to numpy arrays named gps_time, gps_distance and gps_error. (3 points)
# - Plot the distance of the object as a function of time making a publication-ready plot *with errorbars*, axis labels and plot title. (5 points)
# - Use the appropriate python command to store the plot as a pdf. (2 points)
# time | distance | error
# --|:---------:|:-----------:
# 12.00 | 1137.61 | 45.0
# 12.54 | 1174.34 | 42.1
# 13.07 | 1209.87 | 34.0
# 13.61 | 1244.19 | 44.0
# 14.15 | 1277.31 | 48.0
# 14.68 | 1309.21 | 35.0
# 15.22 | 1339.93 | 25.0
# 15.76 | 1369.39 | 35.0
# 16.29 | 1397.67 | 41.0
# 16.83 | 1424.74 | 42.0
# 17.37 | 1450.61 | 45.0
# 17.90 | 1475.25 | 35.0
# 18.44 | 1498.69 | 45.0
# 18.98 | 1520.90 | 45.0
# 19.51 | 1541.94 | 45.0
# 20.05 | 1561.75 | 45.0
# 20.58 | 1580.35 | 35.0
# 21.12 | 1597.72 | 25.0
# 21.66 | 1613.93 | 15.0
# 22.21 | 1628.91 | 45.0
# 22.73 | 1642.67 | 35.0
# 23.27 | 1655.22 | 25.0
# 23.80 | 1666.57 | 41.0
# 24.34 | 1676.71 | 42.0
# 24.88 | 1685.63 | 45.0
# 25.41 | 1693.35 | 45.0
# 25.95 | 1699.92 | 45.0
# 26.48 | 1705.16 | 45.0
# 27.02 | 1709.25 | 45.0
# 27.56 | 1712.14 | 45.0
# 28.11 | 1713.81 | 45.0
# 28.63 | 1714.27 | 45.0
# 29.17 | 1713.48 | 45.0
# 29.71 | 1711.57 | 45.0
# 30.24 | 1708.41 | 45.0
# 30.78 | 1704.04 | 45.0
# 31.32 | 1698.45 | 45.0
# 31.85 | 1691.66 | 45.0
# 32.39 | 1683.66 | 45.0
# 32.93 | 1674.44 | 25.0
# 33.46 | 1664.02 | 45.0
# 34.00 | 1652.40 | 15.0
gps_time = np.array([12.00,12.54,13.07,13.61,14.15,14.68,15.22,15.76,16.29,16.83,17.37,17.90,18.44,18.98,19.51,20.05,20.58,21.12,21.66,22.21,22.73,23.27,23.80,24.34,24.88,25.41,25.95,26.48,27.02,27.56,28.11,28.63,29.17,29.71,30.24,30.78,31.32,31.85,32.39,32.93,33.46,34.00])
gps_distance = np.array([1137.61,1174.34,1209.87,1244.19,1277.31,1309.21,1339.93,1369.39,1397.67,1424.74,1450.61,1475.25,1498.69,1520.90,1541.94,1561.75,1580.35,1597.72,1613.93,1628.91,1642.67,1655.22,1666.57,1676.71,1685.63,1693.35,1699.92,1705.16,1709.25,1712.14,1713.81,1714.27,1713.48,1711.57,1708.41,1704.04,1698.45,1691.66,1683.66,1674.44,1664.02,1652.40])
gps_error = np.array([45.0,42.1,34.0,44.0,48.0,35.0,25.0,35.0,41.0,42.0,45.0,35.0,45.0,45.0,45.0,45.0,35.0,25.0,15.0,45.0,35.0,25.0,41.0,42.0,45.0,45.0,45.0,45.0,45.0,45.0,45.0,45.0,45.0,45.0,45.0,45.0,45.0,45.0,45.0,25.0,45.0,15.0])
# +
plt.figure( figsize = (12, 12) )
plt.plot ( gps_time, gps_distance, color = 'blue' , linestyle = '-', linewidth = 4)
plt.xlabel( 'GPS time', fontsize = 35)
plt.ylabel( 'GPS distance' , fontsize = 35)
plt.title( ' GPS Time vs Distance' , fontsize = 35 )
plt.yticks( np.arange( 1130, 1700, step = 150) ,fontsize=35)
plt.xticks( np.arange( 10, 40, 5 ) ,fontsize=35 )
zerror = gps_error
plt.errorbar(gps_time, gps_distance, zerror, ms=2)
plt.savefig( 'GPS Time vs Distance.pdf', format = 'pdf', dpi = 300, bbox_inches = 'tight', pad_inches = 0.3 )
# -
# ### The model (25 points total):
#
# - Use the data to fit a model of the form $s = u * t$ and a model of the form $s = 0.5 * a * t^2 + u_o * t$. Print an informative statement informing the user about which model best describes the data (11 points) and why (3 points)?
#
# - tip: limit your scan of the u parameter in the 0.1 to 150 space for the first model with a step of 2; and the a in the -10 to +10 with a step of 0.1 and u0 in the 0.1 to 140 with a step of 10 for the second model
#
# - Is the object moving with a constant speed, or accelerating? (1 point)
# - What is the speed u or acceleration a of the best-fit model (8 points)?
# - Populate your *key_code* with the integer part of this number (best-fit speed or best-fit acceleration) (1 points).
# - Print the *key_code* (1 points).
# +
t = gps_time
s = gps_distance
u = np.arange(0.1,150,0.1)
r = np.zeros(len(u))
for i in range(len(u)):
s_theory = u[i]*t
r[i] = np.sum((s-s_theory)**2/s_theory)
q = np.where(r==np.min(r))
s_mod = u[0]*t
u_mod = np.arange(0.1,150,0.1)
print(u[q[0]])
# +
t = gps_time
s = gps_distance
a = np.arange(-10,10,0.1)
u0 = np.arange(0.1,140,10)
r1 = np.zeros((len(a), len(u0)))
for i in range(len(a)):
for j in range(len(u0)):
s2_theory = 0.5*a[i]*(t**2)+u0[j]*t
r1[i,j] = np.sum((s-s2_theory)**2/s2_theory)
q = np.where(r1==np.min(r1))
s2_mod = 0.5*a[0]*(t**2)+u0[1]*t
a_mod = np.arange(-10,10,0.1)
u0_mod = np.arange(0.1,140,10)
print(a[q[0]],u0[q[1]])
# -
print('The model that fits the data the best is model s2_theory=0.5*a[i]*(t**2)+u0[j]*t as the chi square value is the closest to 1')
key_code.append(a[q[0]])
print(key_code)
# ## Problem 10. <br> The data is the truth (15 points)
# ### You are getting close to the end. This is getting exciting! Where can the final clue be? You look around. What else is there? Dr. Cane sure has a fascination with nature....On the floor there is a nature painting showing waves and a Sun..But wait! There is something wrong. This is not just a painting. There are numbers across the 'waves'.
# - Read in the data from the_data.dat (1 point)
# The data contain the numbers you see on the painting. Years and some value associated with them.
#
# - What is the period of the data (i.e., the time it takes to go from one peak to the next)? Make a code that scans the data and checks a window of 6 values at a time. If a value is larger than 3 points before it and 3 points after it, it is stored as a peak. (12 points)
# - Tip: start your scanning from point 3 so that the edge of the window is at point 0
#
#
# - Populate your *key_code* with the number (1 points).
# - Print the *key_code* (1 points).
data = np.loadtxt('the_data.dat')
t = data[:,0]
data_t = data[:,1]
i = 2
peak = 0
for i in range(len(data_t)):
if (data_t[i] > data_t[i+3]) & (data_t[i] > data_t[i-3]):
print(data_t[i])
peak = peak + 1
key_code.append(peak)
print(key_code)
# ## Problem 11. <br> Escape the room! (3 points)
#
# ### You did it! You have the 8 digits that you need to exit the room! You run to the key pad and enter the code. Did it work? Open the pdf ***summer_saved.pdf*** using the 8 digits you got (no spaces or commas in between).
print(key_code)
done = 'summer_saved.pdf'
os.system(done)
# ## Problem 12. <br> The final push (10 points)
#
# ### You run around the cave and see the weather machine! You did it! You quickly open a terminal and access its main code. Dr. Cane is not that smart after all....You delete the running program and stop the eternal hurricane season from ever starting!
#
#
# ### Now prepare and submit your homework. Write what you will do to make and submit the zip file into your log. Don’t forget to also commit your finalized log and push it to GitHub. When satisfied, close the log, copy it to your homework directory, and run the commands to make and submit the zip file. Turn the file in on WebCourses.
# # Have a great summer!
# <img src="congrats_summer.gif" width=200 height=200 />
| Final_project_sp22/Final_CSmith.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## 1. 데이터의 기본 정보 파악하기
# - 데이터셋 리스트
# - 1. olist_customers_dataset.csv
# - 2. olist_geolocation_dataset.csv
# - 3. olist_order_items_dataset.csv
# - 4. olist_order_payments_dataset.csv
# - 5. olist_order_reviews_dataset.csv
# - 6. olist_orders_dataset.csv
# - 7. olist_products_dataset.csv
# - 8. olist_sellers_dataset.csv
# - 9. product_category_name_translation.csv
# - <img src="../../img/relation.png" width=800>
# ### 1) 데이터의 크기 및 기본 정보
# ----
# +
# -*- coding: utf-8 -*-
# %matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
data_dir = "../../dataset/"
# -
# -----
# #### [1] olist_customers_dataset.csv
# ##### [유저 정보 데이터셋]
# - customer_id : 유저 식별 번호(id) - 주문 발생에 매핑하기 위한 번호.
# - customer_unique_id : 유저 고유 식별 번호(id) - 고유 식별을 위한 번호, 재구매 추적을 위한 케이스를 예시로 생각할 수 있음.
# - customer_zip_code_prefix : 유저 우편번호(주소정보)
# - customer_city : 유저 도시정보
# - customer_state : 유저 도시정보(한 단계 상위 개념)
olist_customers_dataset = pd.read_csv(data_dir + "olist_customers_dataset.csv")
olist_customers_dataset.head()
olist_customers_dataset['customer_id'].nunique()
len(olist_customers_dataset['customer_id'].unique())
len(olist_customers_dataset['customer_unique_id'].unique())
# -----
# #### [2] olist_geolocation_dataset.csv
# ##### [주소 정보 데이터셋]
# - geolocation_zip_code_prefix : 우편번호(주소정보)
# - geolocation_lat : 위도 정보
# - geolocation_lng : 경도 정보
# - geolocation_city : 도시정보
# - geolocation_state : 도시정보(한 단계 상위 개념)
olist_geolocation_dataset = pd.read_csv(data_dir + "olist_geolocation_dataset.csv")
olist_geolocation_dataset.head()
olist_geolocation_dataset['geolocation_zip_code_prefix'].nunique()
# -----
# #### [3] olist_order_items_dataset.csv
# ##### [주문 정보 데이터셋]
# - order_id : 주문 아이디
# - order_item_id : 한 주문에 몇 개를 주문했는지에 관한 정보
# - product_id : 상품 아이디
# - seller_id : 판매자 아이디
# - shipping_limit_date : 판매자 배송 제한 날짜
# - price : 상품 가격
# - freight_value : 배송비
olist_order_items_dataset = pd.read_csv(data_dir + "olist_order_items_dataset.csv")
olist_order_items_dataset.head()
olist_order_items_dataset.shape
# -----
# #### [4] olist_order_payments_dataset.csv
# ##### [결제 정보 데이터셋]
# - order_id : 주문 아이디
# - payment_sequential : 결제한 방법의 수
# - payment_type : 결제 종류
# - payment_installments : 할부 개월 수
# - payment_value : 결제 가격
olist_order_payments_dataset = pd.read_csv(data_dir + "olist_order_payments_dataset.csv")
olist_order_payments_dataset.head()
olist_order_payments_dataset['order_id'].nunique()
olist_order_payments_dataset['payment_type'].unique()
# -----
# #### [5] olist_order_reviews_dataset.csv
# ##### [리뷰 정보 데이터셋]
# - review_id : 댓글의 고유 식별 번호(id)
# - order_id : 주문 아이디
# - review_score : 리뷰 점수
# - review_comment_title : 리뷰 제목
# - review_comment_message : 리뷰 내용
# - review_creation_date : 리뷰 단 시간
# - review_answer_timestamp : 만족도 설문조사 답변 시간
olist_order_reviews_dataset = pd.read_csv(data_dir + "olist_order_reviews_dataset.csv")
olist_order_reviews_dataset.head()
olist_order_reviews_dataset['review_id'].nunique()
olist_order_reviews_dataset['order_id'].nunique()
olist_order_reviews_dataset['review_score'].value_counts()
# -----
# #### [6] olist_orders_dataset.csv
# ##### [배송 정보 데이터셋]
# - order_id : 주문 아이디
# - customer_id : 유저 아이디
# - order_status : 주문 상태
# - order_purchase_timestamp : 주문 시간
# - order_approved_at : 주문 확정 시간
# - order_delivered_carrier_date : 물류센터 도착 시간
# - order_delivered_customer_date : 유저에게 배송 완료 시간
# - order_estimated_delivery_date : 유저가 보는 예상 배송일
olist_orders_dataset = pd.read_csv(data_dir + "olist_orders_dataset.csv")
olist_orders_dataset.head()
olist_orders_dataset.shape
olist_orders_dataset['order_status'].value_counts()
# -----
# #### [7] olist_products_dataset.csv
# ##### [상품 정보 데이터셋]
# - product_id : 상품 아이디
# - product_category_name : 상품 카테고리명
# - product_name_lenght : 상품명 텍스트 길이
# - product_description_lenght : 상품 상세설명 텍스트 길이
# - product_photos_qty : 상품 상세설명 이미지 갯수
# - product_weight_g : 상품 무게 (그램 단위)
# - product_length_cm : 상품 길이 (cm 단위)
# - product_height_cm : 상품 높이 (cm 단위)
# - product_width_cm : 상품 너비 (cm 단위)
olist_products_dataset = pd.read_csv(data_dir + "olist_products_dataset.csv")
olist_products_dataset.head()
olist_products_dataset['product_id'].nunique()
olist_products_dataset['product_category_name'].value_counts()
# -----
# #### [8] olist_sellers_dataset.csv
# ##### [판매자 정보 데이터셋]
# - seller_id : 판매자 아이디
# - seller_zip_code_prefix : 판매자 우편번호(주소정보)
# - seller_city : 판매자 도시정보
# - seller_state : 판매자 도시정보(한 단계 상위 개념)
olist_sellers_dataset = pd.read_csv(data_dir + "olist_sellers_dataset.csv")
olist_sellers_dataset.head()
olist_sellers_dataset['seller_id'].nunique()
# -----
# #### [9] product_category_name_translation.csv
# ##### [상품 카테고리 정보 영문명 매핑]
# - product_category_name : 브라질 카테고리명
# - product_category_name_english : 영문 카테고리명
product_category_name = pd.read_csv(data_dir + "product_category_name_translation.csv")
product_category_name.head()
# ----
# ## 2. 데이터 병합 준비하기
# ### 1) 데이터셋 세부 정보 파악하기
olist_customers_dataset = pd.read_csv(data_dir + "olist_customers_dataset.csv")
olist_customers_dataset.info()
empty_df = pd.DataFrame(columns=['a', 'b', 'c', 'd'])
empty_df = empty_df.append({'a': 1, 'b': 9, 'c': 9, 'd': 2}, ignore_index=True)
empty_df = empty_df.append({'a': 0, 'b': 6, 'c': 1, 'd': 3}, ignore_index=True)
empty_df.head()
# +
info_df = pd.DataFrame(columns=['row_num', 'col_num', 'name', 'pk'])
def info_df_row(name, pk, file_name):
temp_df = pd.read_csv(data_dir + file_name)
return {'row_num': temp_df.shape[0], 'col_num': temp_df.shape[1], 'name': name, 'pk': pk}
# -
info_df = info_df.append(info_df_row("customer_data",
"customer_id",
"olist_customers_dataset.csv"), ignore_index=True)
info_df = info_df.append(info_df_row("location_data",
"geolocation_zip_code_prefix",
"olist_geolocation_dataset.csv"), ignore_index=True)
info_df = info_df.append(info_df_row("order_data",
"order_id",
"olist_order_items_dataset.csv"), ignore_index=True)
info_df = info_df.append(info_df_row("payment_data",
"order_id",
"olist_order_payments_dataset.csv"), ignore_index=True)
info_df = info_df.append(info_df_row("review_data",
"review_id",
"olist_order_reviews_dataset.csv"), ignore_index=True)
info_df = info_df.append(info_df_row("shipping_data",
"order_id",
"olist_orders_dataset.csv"), ignore_index=True)
info_df = info_df.append(info_df_row("product_data",
"product_id",
"olist_products_dataset.csv"), ignore_index=True)
info_df = info_df.append(info_df_row("seller_data",
"seller_id",
"olist_sellers_dataset.csv"), ignore_index=True)
info_df = info_df.append(info_df_row("category_data",
"product_category_name",
"product_category_name_translation.csv"), ignore_index=True)
info_df
# ----
# ### 2) 데이터셋의 열 정보 파악하기
olist_customers_dataset = pd.read_csv(data_dir + "olist_customers_dataset.csv")
olist_customers_dataset.info()
olist_customers_dataset = pd.DataFrame(olist_customers_dataset).astype('category')
olist_customers_dataset.info()
data = [(3, 'a'),
(2, 'b'),
(1, 'c'),
(0, 'd')]
pd.DataFrame.from_records(data, columns=['col_1', 'col_2']).head()
unique_counts = pd.DataFrame.from_records([(col, olist_customers_dataset[col].nunique()) for col in olist_customers_dataset.columns],
columns=['col_name', 'num_category'])
unique_counts.head()
# ----
# ### 3) 데이터셋의 결측치 및 이상치 탐색하기
olist_products_dataset = pd.read_csv(data_dir + "olist_products_dataset.csv")
print(olist_products_dataset.shape)
print(olist_products_dataset.info())
olist_products_dataset.isnull()
olist_products_dataset.isnull().sum()
olist_products_dataset = olist_products_dataset.dropna()
olist_products_dataset.info()
# +
# olist_products_dataset = olist_products_dataset.fillna("some_value")
# -
olist_products_dataset['product_name_lenght'].hist(bins=50)
olist_products_dataset.boxplot(column=['product_name_lenght'])
# ----
# ## 3. 데이터셋 병합 실행하기
# ### 1) merge() 함수로 데이터 병합하기
df1 = pd.DataFrame({'a': ['foo', 'bar'], 'b': [1, 2]})
df2 = pd.DataFrame({'a': ['foo', 'baz'], 'c': [3, 4]})
df1
df2
df1 = df1.merge(df2, how='left', on='a')
df1.head()
# ----
# ### 2) 주문 데이터 기준으로 데이터 병합하기
order_df = pd.read_csv(data_dir + "olist_order_items_dataset.csv")
print(order_df.shape)
shipping_df = pd.read_csv(data_dir + "olist_orders_dataset.csv")
print(shipping_df.shape)
product_df = pd.read_csv(data_dir + "olist_products_dataset.csv")
print(product_df.shape)
user_df = pd.read_csv(data_dir + "olist_customers_dataset.csv")
print(user_df.shape)
location_df = pd.read_csv(data_dir + "olist_geolocation_dataset.csv")
print(location_df.shape)
product_category_df = pd.read_csv(data_dir + "product_category_name_translation.csv")
print(product_category_df.shape)
order_df['order_id'] = order_df['order_id'].astype(str)
shipping_df['order_id'] = shipping_df['order_id'].astype(str)
order_df = order_df.merge(shipping_df, how='left', on='order_id')
order_df.shape
order_df.head()
order_df['product_id'] = order_df['product_id'].astype(str)
product_df['product_id'] = product_df['product_id'].astype(str)
order_df = order_df.merge(product_df, how='left', on='product_id')
order_df.shape
order_df['customer_id'] = order_df['customer_id'].astype(str)
user_df['customer_id'] = user_df['customer_id'].astype(str)
order_df = order_df.merge(user_df, how='left', on='customer_id')
order_df.shape
location_df['geolocation_zip_code_prefix'].nunique()
location_df.shape
order_df['customer_zip_code_prefix'] = order_df['customer_zip_code_prefix'].astype(str)
location_df['geolocation_zip_code_prefix'] = location_df['geolocation_zip_code_prefix'].astype(str)
location_df.columns = ['customer_zip_code_prefix', 'lat', 'lng', 'city', 'state']
location_df = location_df.groupby('customer_zip_code_prefix').agg({'lat': 'mean', 'lng': 'mean', 'city': 'first', 'state': 'first'}).reset_index()
order_df = order_df.merge(location_df, how='left', on='customer_zip_code_prefix')
order_df.shape
order_df.columns
order_df['product_category_name'] = order_df['product_category_name'].astype(str)
product_category_df['product_category_name'] = product_category_df['product_category_name'].astype(str)
order_df = order_df.merge(product_category_df, how='left', on='product_category_name')
order_df.shape
| 02/chapter2/01_ecommerce_dataset.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from fenics import *
from ufl import nabla_div
import matplotlib.pyplot as plt
# Variables
mu = 3e10
rho = 2700
width = 10e3
lambda_ = mu
g = 9.81
top = 0
# Calculate strain
def epsilon(u):
return 0.5*(nabla_grad(u) + nabla_grad(u).T)
# Calculate stress
def sigma(u):
return lambda_*nabla_div(u)*Identity(d) + 2*mu*epsilon(u)
# Boundary conditions
def clamped_boundary(x, on_boundary):
return on_boundary
# Create mesh and define function space
mesh = RectangleMesh(Point((-width, top - 2 * width)), Point((width, top)), 100, 100)
# +
V = VectorFunctionSpace(mesh, 'P', 1)
bc = DirichletBC(V, Constant((0, 0)), clamped_boundary)
# Define variational problem
u = TrialFunction(V)
d = u.geometric_dimension() # space dimension
v = TestFunction(V)
f = Constant((0, -rho*g))
T = Constant((0, 0))
a = inner(sigma(u), epsilon(v))*dx
L = dot(f, v)*dx + dot(T, v)*ds
# Compute solution
u = Function(V)
solve(a == L, u, bc)
# Evaluate the solution at a an arbitrary new point
u_eval = u(0, top - width)
print(u_eval)
# -
ux = dot(u,Constant((1.0, 0.0)))
uy = dot(u,Constant((0.0, 1.0)))
umag = sqrt(ux ** 2 + uy ** 2)
# +
plt.figure(figsize=(20, 15))
plt.subplot(1,3,1)
c = plot(ux, colobar=True)
plt.colorbar(c,fraction=0.046, pad=0.06)
plt.subplot(1,3,2)
c = plot(uy, colobar=True)
plt.colorbar(c,fraction=0.046, pad=0.06)
plt.subplot(1,3,3)
c = plot(umag, colobar=True)
plt.colorbar(c,fraction=0.046, pad=0.06)
plt.tight_layout()
| fem_bem/py_baseline_dirichlet.ipynb |