code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.2 64-bit ('.venv')
# metadata:
# interpreter:
# hash: a6730fb0a06c18af636869813f8eb69d9f4a51b49b9b87b39aeaf81c3d64a67b
# name: python3
# ---
import pandas as pd
log = pd.read_csv('log.csv')
log
log = pd.read_csv('log.csv', header=None)
log
sample = pd.read_csv('sample.csv')
columns = sample.columns
sample, columns
sample.rename(str.lower, axis='columns')
log = pd.read_csv("log.csv", header=None)
log.columns = ['user_id', 'time', 'bet', 'win']
log
users = pd.read_csv('users.csv', encoding='koi8_r', sep='\t')
users
users = pd.read_csv('users.csv', encoding='koi8_r', sep='\t')
users.columns = ['user_id', 'email', 'geo']
users
users.geo.unique()
sample.info()
sample.Name.unique()
sample.info()
log[log.user_id == '#error']
sample = pd.read_csv("sample.csv")
sample2 = sample[sample.Age < 30]
sample2
log = pd.read_csv("log.csv",header=None)
log.columns = ['user_id','time', 'bet','win']
log_win = log[log.win > 0]
win_count = log_win.win.count()
log_win, win_count
sample = pd.read_csv("sample.csv")
sample2 = sample.query('Age<30')
sample2
sample.query('Age>20')
sample.query('Age==25')
sample.query('City in ["Рига","Сочи"]')
sample.query('City in ["Рига", "Сочи","Чебоксары", "Сургут"] & 21<Age<50 & Profession!="Менеджер"')
log = pd.read_csv("log.csv",header=None)
log.columns = ['user_id','time', 'bet','win']
log
log2 = log.query('bet<2000 & win>0')
log2
sample.Name.str.match("К", na=False)
sample[sample.Name.str.match("К", na=False)]
sample[~sample.Name.str.match("К", na=False)]
sample = pd.read_csv("sample.csv")
sample3 = sample[sample.City.str.contains('о', na=False)]
sample3
sample4 = sample[~sample.City.str.contains('о', na=False)]
sample4
log = pd.read_csv("log.csv",header=None)
log.columns = ['user_id','time', 'bet','win']
new_log = log.query('user_id!="#error"')
new_log
sample.Age.apply(lambda x:x**2)
def func(x):
if x<20:
return x
else:
return 0
sample.Age.apply(func)
sample = pd.read_csv("sample.csv")
sample2 = sample
sample2.Age = sample.Age.apply(lambda x: x+1)
sample2
sample = pd.read_csv("sample.csv")
sample2 = sample
sample2.City = sample.City.apply(lambda x: str(x).lower())
sample2
def profession_code(s):
if s == "Рабочий":
return 0
elif s == "Менеджер":
return 1
else:
return 2
profession_code("Менеджер")
sample = pd.read_csv("sample.csv")
sample2 = sample
sample2.Profession = sample.Profession.apply(profession_code)
sample2
def age_category(age):
if age < 23:
return "молодой"
if 23 <= age <= 35:
return "средний"
if age > 35:
return "зрелый"
age_category(35)
sample = pd.read_csv("sample.csv")
sample['Age_category'] = sample.Age.apply(age_category)
sample
log = pd.read_csv('log.csv', header=None)
log.columns = ['user_id','time','bet','win']
def normalize_user_id(uid):
if uid == "#error":
return ""
elif "Запись пользователя № - " in uid:
return uid.split()[-1]
else:
return uid
log.user_id = log.user_id.apply(normalize_user_id)
log
log = pd.read_csv("log.csv",header=None)
log.columns = ['user_id','time','bet','win']
def filter_time(t):
if type(t) is str:
return t[1:]
else:
return t
log.time = log.time.apply(filter_time)
log
log = pd.read_csv('log.csv', header=None)
log.columns = ['user_id','time','bet','win']
log = log.query('user_id!="#error"')
def normalize_user_id(uid):
if "Запись пользователя № - " in uid:
return uid.split()[-1]
else:
return uid
log.user_id = log.user_id.apply(normalize_user_id)
log.user_id = log.query('user_id!="#error"')
def filter_time(t):
if type(t) is str:
return t[1:]
else:
return t
log.time = log.time.apply(filter_time)
log
| DATA_ANALYST/PYTHON-8/8-Notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Minimization function
# +
from biorefineries import lipidcane2g as lc
import biosteam as bst
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from biosteam.utils import colors
from math import floor, ceil
from biosteam import plots
from biosteam.utils import CABBI_colors
from thermosteam.units_of_measure import format_units
from biosteam.plots.utils import style_axis, style_plot_limits, fill_plot, set_axes_labels
shadecolor = (*colors.neutral.RGBn, 0.30)
linecolor = (*colors.neutral_shade.RGBn, 0.85)
markercolor = (*colors.CABBI_blue_light.RGBn, 1)
edgecolor = (*colors.CABBI_black.RGBn, 1)
def tickmarks(data, accuracy=50, N_points=5):
dmin = data.min()
dmax = data.max()
dmin = floor(dmin/accuracy) * accuracy
dmax = ceil(dmax/accuracy) * accuracy
step = (dmax - dmin) / (N_points - 1)
if step == 0:
return [0, 1]
else:
return [dmin + step * i for i in range(N_points)]
def create_inflection_plot(ax, name='1g', load=False, save=True):
lipid_retention = np.linspace(0.5, 1.0, 10)
if load:
try:
efficiency_inflection = np.load(f'lipid_extraction_efficiency_inflection{name}.npy')
except:
return create_inflection_plot(ax, name, False, save)
else:
lc.load(name)
efficiency_inflection = np.array([
lc.lipid_extraction_specification.solve_MFPP_inflection(i)
for i in lipid_retention
])
save and np.save(f'lipid_extraction_efficiency_inflection{name}', efficiency_inflection)
mask = ~np.isnan(efficiency_inflection)
ax.plot(100 * lipid_retention[mask], 100 * efficiency_inflection[mask], label=str(name))
# -
fig, ax = plt.subplots()
create_inflection_plot(ax, 0, load=False)
create_inflection_plot(ax, 1, load=False)
create_inflection_plot(ax, 2, load=False)
create_inflection_plot(ax, 3, load=False)
plt.xlabel('Lipid retention [%]')
plt.ylabel('MFPP-Extraction efficiency inflection')
plt.legend()
plt.show()
lc.load(0)
lipid_retention = np.linspace(0.5, 1.0, 10)
efficiency_inflection = np.array([
lc.lipid_extraction_specification.solve_MFPP_inflection(i)
for i in lipid_retention
])
mask
| BioSTEAM 2.x.x/biorefineries/oilcane/.ipynb_checkpoints/lipid_extraction_efficiency_inflection-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import re
import nltk
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.naive_bayes import BernoulliNB
from sklearn.feature_extraction.text import CountVectorizer
#nltk.download('stopwords')
# -
# Importing the dataset
dataset = pd.read_csv('Restaurant_Reviews.tsv', delimiter = '\t', quoting = 3)
# +
# Cleaning the texts
corpus = []
for i in range(0, 1000):
review = re.sub('[^a-zA-Z]', ' ', dataset['Review'][i])
review = review.lower()
review = review.split()
ps = PorterStemmer()
review = [ps.stem(word) for word in review if not word in set(stopwords.words('english'))]
review = ' '.join(review)
corpus.append(review)
# +
# Creating the Bag of Words model
cv = CountVectorizer(max_features = 1000)
X = cv.fit_transform(corpus).toarray()
y = dataset.iloc[:, 1].values
# +
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# +
# Training: Naive Bayes
classifier = BernoulliNB(alpha=0.1)
classifier.fit(X_train, y_train)
# +
# Testing
y_pred = classifier.predict(X_test)
# View classification report
print(classification_report(y_test, y_pred))
| Machine Learning/Reviews Sentiment Analysis/Reviews Sentiment Analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Slug Test
# **This test is taken from examples of AQTESOLV.**
# %matplotlib inline
from ttim import *
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Set background parameters:
rw = 0.127 # well radius
rc = 0.0508 # well casing radius
L = 4.20624 # screen length
b = -9.9274 # aquifer thickness
zt = -0.1433 # depth to top of the screen
H0 = 0.4511 # initial displacement in the well
zb = zt - L # bottom of the screen
# Slug:
Q = np.pi * rc ** 2 * H0
print('Slug:', round(Q, 5), 'm^3')
# Load data:
data = np.loadtxt('data/falling_head.txt', skiprows = 2)
t = data[:, 0] / 60 / 60 / 24 #convert time from seconds to days
h = (10 - data[:, 1]) * 0.3048 #convert drawdown from ft to meters
# Create single layer conceptual model:
ml_0 = Model3D(kaq=10, z=[0, zt, zb, b], Saq=1e-4, tmin=1e-5, tmax=0.01)
w_0 = Well(ml_0, xw=0, yw=0, rw=rw, rc=rc, tsandQ=[(0, -Q)], layers=1, wbstype='slug')
ml_0.solve()
ca_0 = Calibrate(ml_0)
ca_0.set_parameter(name='kaq0_2', initial=10)
ca_0.set_parameter(name='Saq0_2', initial=1e-4)
ca_0.series(name='obs', x=0, y=0, t=t, h=h, layer=1)
ca_0.fit(report=True)
display(ca_0.parameters)
print('RMSE:', ca_0.rmse())
hm_0 = ml_0.head(0, 0, t, layers=1)
plt.figure(figsize = (8, 5))
plt.semilogx(t, h/H0, '.', label='obs')
plt.semilogx(t, hm_0[0]/H0, label='ttim')
plt.xlabel('time(d)')
plt.ylabel('h/H0')
plt.legend();
# Try multilayer conceptual model:
#Determine elevation of each layer.
#Thickness of each layer is set to be 0.5 m.
z0 = np.arange(zt, zb, -0.5)
z1 = np.arange(zb, b, -0.5)
zlay = np.append(z0, z1)
zlay = np.append(zlay, b)
zlay = np.insert(zlay, 0, 0)
nlay = len(zlay) - 1 #number of layers
Saq_1 = 1e-4 * np.ones(nlay)
Saq_1[0] = 0.1
ml_1 = Model3D(kaq=10, z=zlay, Saq=Saq_1, kzoverkh=1, \
tmin=1e-5, tmax=0.01, phreatictop=True)
w_1 = Well(ml_1, xw=0, yw=0, rw=rw, tsandQ=[(0, -Q)], layers=[1,2,3,4,5,6,7,8], rc=rc, \
wbstype='slug')
ml_1.solve()
ca_1 = Calibrate(ml_1)
ca_1.set_parameter(name='kaq0_21', initial=10, pmin=0)
ca_1.set_parameter(name='Saq0_21', initial=1e-4, pmin=0)
ca_1.series(name='obs', x=0, y=0, layer=[1,2,3,4,5,6,7,8], t=t, h=h)
ca_1.fit(report = True)
display(ca_1.parameters)
print('RMSE:', ca_1.rmse())
hm_1 = ml_1.head(0, 0, t, layers=8)
plt.figure(figsize = (8, 5))
plt.semilogx(t, h/H0, '.', label='obs')
plt.semilogx(t, hm_1[0]/H0, label='ttim')
plt.xlabel('time(d)')
plt.ylabel('h/H0')
plt.legend();
# Try adding well screen resistance:
ml_2 = Model3D(kaq=10, z=zlay, Saq=Saq_1, kzoverkh=1, \
tmin=1e-5, tmax=0.01, phreatictop=True)
w_2 = Well(ml_2, xw=0, yw=0, rw=rw, tsandQ=[(0, -Q)], layers=[1,2,3,4,5,6,7,8], \
rc=rc, res=0.1, wbstype='slug')
ml_2.solve()
ca_2 = Calibrate(ml_2)
ca_2.set_parameter(name='kaq0_21', initial=10, pmin=0)
ca_2.set_parameter(name='Saq0_21', initial=1e-4, pmin=0)
ca_2.set_parameter_by_reference(name='res', parameter=w_2.res, initial=0, pmin=0)
ca_2.series(name='obs', x=0, y=0, layer=[1,2,3,4,5,6,7,8], t=t, h=h)
ca_2.fit(report = True)
display(ca_2.parameters)
print('RMSE:', ca_2.rmse())
hm_2 = ml_2.head(0, 0, t, layers=8)
plt.figure(figsize = (8, 5))
plt.semilogx(t, h/H0, '.', label='obs')
plt.semilogx(t, hm_2[0]/H0, label='ttim')
plt.xlabel('time(d)')
plt.ylabel('h/H0')
plt.legend();
# Optimized res is very close to the minimum limitation for res. Thus, resistance of well skin has little effect on model performance.
# ## Summary of values presented in AQTESOLV:
t = pd.DataFrame(columns=['k [m/d]', 'Ss [1/m]'], \
index=['AQTESOLV', 'ttim-single', 'ttim-multi'])
t.loc['AQTESOLV'] = [2.616, 7.894E-5]
t.loc['ttim-single'] = ca_0.parameters['optimal'].values
t.loc['ttim-multi'] = ca_1.parameters['optimal'].values
t['RMSE'] = [0.001197, round(ca_0.rmse(), 6), round(ca_1.rmse(), 6)]
t
| pumpingtest_benchmarks/12_falling-head_slug_test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# #### I have a GOT dataset for season 1 and 2
# Firstly lets preprocess it, before feeding to our Keras Model
import pickle
data = open("datasets/got.txt","r",encoding="utf-8").read()
chars = pickle.load(open("Pickles and model Weights/chars.pickle","rb")) #total unique characters
#chars = list(set(data))
VOCAB_SIZE = len(chars)
print(len(data),VOCAB_SIZE)
#initialize Mapping
idx_to_char = {i: char for i, char in enumerate(chars)}
char_to_idx = {char: i for i, char in enumerate(chars)}
# +
import numpy as np
"""
number_of_features = VOCAL_SIZE
length_of_sequence = how many chars, model will look at a time
number_of_sequence = len(data)/length_of_sequence
"""
SEQ_LENGTH = 60 #input sequence length
N_FEATURES = VOCAB_SIZE #one hot encoding here, that's why, but deduplicated for clarity
N_SEQ = int(np.floor((len(data) - 1) / SEQ_LENGTH))
X = np.zeros((N_SEQ, SEQ_LENGTH, N_FEATURES))
y = np.zeros((N_SEQ, SEQ_LENGTH, N_FEATURES))
# -
for i in range(N_SEQ):
X_sequence = data[i * SEQ_LENGTH: (i + 1) * SEQ_LENGTH]
X_sequence_ix = [char_to_idx[c] for c in X_sequence]
input_sequence = np.zeros((SEQ_LENGTH, N_FEATURES))
for j in range(SEQ_LENGTH):
input_sequence[j][X_sequence_ix[j]] = 1. #one-hot encoding of the input characters
X[i] = input_sequence
y_sequence = data[i * SEQ_LENGTH + 1: (i + 1) * SEQ_LENGTH + 1] #shifted by 1 to the right
y_sequence_ix = [char_to_idx[c] for c in y_sequence]
target_sequence = np.zeros((SEQ_LENGTH, N_FEATURES))
for j in range(SEQ_LENGTH):
target_sequence[j][y_sequence_ix[j]] = 1. #one-hot encoding of the target characters
y[i] = target_sequence
# #### Ok Now lets create a keras model
# 1. Model is described below
# +
from keras.models import Sequential
from keras.layers import LSTM, TimeDistributed, Dense, Activation
# constant parameter for the model
HIDDEN_DIM = 700 #size of each hidden layer, "each layer has 700 hidden states"
LAYER_NUM = 2 #number of hidden layers, how much were used?
NB_EPOCHS = 50 #max number of epochs to train, "200 epochs"
BATCH_SIZE = 128
VALIDATION_SPLIT = 0.1 #proportion of the batch used for validation at each epoch
def createModel():
model = Sequential()
model.add(LSTM(HIDDEN_DIM,
input_shape=(None, VOCAB_SIZE),
return_sequences=True))
for _ in range(LAYER_NUM - 1):
model.add(LSTM(HIDDEN_DIM, return_sequences=True))
model.add(TimeDistributed(Dense(VOCAB_SIZE)))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
return model
# -
def generate_text(model, length,start_char=None):
ix = [np.random.randint(VOCAB_SIZE)]
y_char = [idx_to_char[ix[-1]]]
X = np.zeros((1, length, VOCAB_SIZE))
for i in range(length):
X[0, i, :][ix[-1]] = 1.
ix = np.argmax(model.predict(X[:, :i+1,:])[0], 1)
y_char.append(idx_to_char[ix[-1]])
return ''.join(y_char)
# +
from keras.callbacks import EarlyStopping, ModelCheckpoint, Callback
# callback to save the model if better
filepath="tgt_model.hdf5"
save_model_cb = ModelCheckpoint(filepath, monitor='val_acc', verbose=2, save_best_only=True, mode='max')
# callback to stop the training if no improvement
early_stopping_cb = EarlyStopping(monitor='val_loss', patience=10)
# callback to generate text at epoch end
class generateText(Callback):
def on_epoch_end(self, batch, logs={}):
print(generate_text(self.model, 100))
generate_text_cb = generateText()
callbacks_list = [save_model_cb]
def train(model):
model.fit(X, y, batch_size=BATCH_SIZE, verbose=2,
epochs=NB_EPOCHS, callbacks=callbacks_list,
validation_split=VALIDATION_SPLIT)
model.save_weights('Pickles and model Weights/text_gen_got1.hdf5')
def load_weigths(model):
model.load_weights('Pickles and model Weights/text_gen_got1.hdf5')
# -
model = createModel();
load_weigths(model)
generate_text(model,100)
#Model has learned where to use '?' and where to use capital alphabet
#lets print some more
generate_text(model,300)
#less meaningfull, but atleast most of the words are correct
#Lets print a full sequence
generate_text(model,600)
# # Model Architecture
# 
# The input shape of the text data is ordered as follows : (batch size, number of time steps, hidden size). In other words, for each batch sample and each word in the number of time steps, there is a 500 length embedding word vector to represent the input word. These embedding vectors will be learnt as part of the overall model learning. The input data is then fed into two “stacked” layers of LSTM cells (of 500 length hidden size) – in the diagram above, the LSTM network is shown as unrolled over all the time steps. The output from these unrolled cells is still (batch size, number of time steps, hidden size).
#
# This output data is then passed to a Keras layer called TimeDistributed, which will be explained more fully below. Finally, the output layer has a softmax activation applied to it. This output is compared to the training y data for each batch, and the error and gradient back propagation is performed from there in Keras. The training y data in this case is the input x words advanced one time step – in other words, at each time step the model is trying to predict the very next word in the sequence. However, it does this at every time step – hence the output layer has the same number of time steps as the input layer. This will be made more clear later.
#
# There is a special Keras layer for use in recurrent neural networks called TimeDistributed. This function adds an independent layer for each time step in the recurrent model. So, for instance, if we have 10 time steps in a model, a TimeDistributed layer operating on a Dense layer would produce 10 independent Dense layers, one for each time step. The activation for these dense layers is set to be softmax in the final layer of our Keras LSTM model.
| Keras LSTM | Character Generation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Loop Recovery Method
#
# ## Imports
import sys
sys.path.append("../../src")
# %reload_ext autoreload
# %autoreload 2
from helpers import graph
from path_invariants import *
from math import sqrt
# A desirable property of mergings is that path and loops can be recovered. If $A$ is a path, $C$ merging between $A$ and any trace $B$, **what conditions do we need in the merging to recover $A$?**. This is the same that proves that the successor graph of $C$ has an f-clique $V_f$ such that $G^S_C \cap V_f$ verifies the path condition for $A$
#
# $G^S_C \cap V_f = G^S_{A}$
#
# *(Transfer to the paper)* We alse saw, somewhere, that the upper-layer of a loop is a path. Then, it makes sense to search cliques in f-layers and verify if they are a path. For this the f-clique set of a graph are the cliques set of its f-layer.
#
# Here we use the **lemma** that a 2-loop of a merge is a full-merge (see [Merging](Mergings.ipynb)), and will develop a procedure to extract the paths by merely duplicating C and then take $r/2$
# ## Example
# As we saw in [Merging](Mergings.ipynb), $C=xAy$ is a merging (not full) of $A=pq$ and $B=xy$. But, now lets loop $A$.
A = "pq"
B = "xy"
C = "x" + A*4 + "y"
C2 = C+C
sgC = successor_graph(C)
sgC2 = successor_graph(C2)
display("C=%s" % C)
display("C2=%s" % C2)
graph( sgC2 )
# Here we see that duplicate the merge is a full merge
# Is C a full merge? (all True)
[ ( (a,b), (b,a), (a, b) in sgC.edges, (b, a) in sgC.edges ) for a in A for b in B]
# A valid question is: does the subgraph $G^S_C \cap A$ where $C=(x) rA (y)$ verifies the loop condition?
# +
H = sgC2.copy()
for node in set(H.nodes).difference( set(["p", "q"]) ):
H.remove_node(node)
loop_condition(sgC)
# -
# We have shown one example where the loop cannot be extracted from a merge. But, if we duplicate the trace $C$ we obtain a full merge:
# Is C2 a full merge? (all True)
[ ( (a,b), (b,a), (a, b) in sgC2.edges, (b, a) in sgC2.edges ) for a in A for b in B]
# We are searching for the behavior of $A=pq$ in the successor graph. Look at f-layers in $C2$, $f=36, 28$, they verifies the loop condition for a loop of 8 $pq$.
graph( f_layer(36, sgC2) )
graph( f_layer(28, sgC2) )
sqrt(36 + 28)
# We take the subgraph $G^S_{C2} \cap V_q={p, q}$
# +
# For {p,q}
H = sgC2.copy()
for node in set(H.nodes).difference( set(["p", "q"]) ):
H.remove_node(node)
graph(H)
# -
is_loop, w1, w2 = loop_condition(H)
is_loop, w1, w2
path = [ u for u, InDeg in sorted( H.in_degree() , key=lambda u: u[1], reverse=False)]
path, sqrt(w1+w2)/2
# Then in $C$ there where 4 $pq$ as we already knew.
# ## General Loop Recovery procedure
# Given a trace $C$, we search f-cliques in the successor graph of $C_2=CC$:
weights = set( [sgC2[u][v]["weight"] for u, v in sgC2.edges() ] )
for f in weights:
display( (f, list( cliques( f_layer(f, sgC2).to_undirected() )) ) )
# Then, we need to take the set of unique cliques
unique_cliques = set()
for f in weights:
for clique in cliques( f_layer(f, sgC2).to_undirected() ):
unique_cliques.add(tuple( sorted(clique)))
unique_cliques
# For each clique $V_q$, we build the graph restricted to those vertices $G^S_{C2} \cap V_q$ and verify the loop condition
# +
paths_found = []
for Vq in unique_cliques:
H = sgC2.copy()
for node in set(H.nodes).difference( set(Vq) ):
H.remove_node(node)
display( "searching in %s" % H.nodes )
is_loop, w1, w2 = loop_condition(H)
if is_loop:
path = [ u for u, InDeg in sorted( H.in_degree() , key=lambda u: u[1], reverse=False)]
r = sqrt(w1+w2)/2
paths_found.append( (r, path) )
display(paths_found)
# -
# We obtained, the paths $A$ and $B$ with their respective repetitions.
# The function paths_from_trace implements the ideas above.
paths_from_trace(C)
# ## Example to be fully explained
# An undecidable problem: $pxqypxyq$ is a merge from $pq$, $xy$ or from $py$, $xq$?
paths_from_trace("pxqypxyq") # A merge from pq xy
# The problem here is that the paths are not disjoint, so we have to make a decision on how to build such disjoint set.
# (Observation: if sets to be candidate for paths are not disjoint then the cliques has common elements and here we present... The Artifact!)
#
# If we decide that the generator paths are $pq$, $xy$ then :
# * $pxy$ has the extra $p$
# * $pxq$ has an extra $x$
#
# And they cannot be path invariants because we have a contradiction: as $(y,q)$ belongs to different paths, we cannot have both $x \overset{A}{\rightarrow} y \land x \overset{A}{\rightarrow} q$ (because there would be a path with $\{y,q,x\}$). But the trace allows both kind of combinations:
#
# * pq xy: **p** x **q** y **p** x y **q**
# * py xq: **p** x q **y p** x **y** q
#
# Then the $pxq$ $pxy$ are not path invariant. To elucidate the problem we would need another example to decide which $x \overset{A}{\rightarrow} y \land x \overset{A}{\rightarrow} q$ should remain.
paths_from_trace( "pqxy" + "pxqypxyq") #A merge from pq xy
# We added the example that choose $xy$. But now ... $pq$ or $px$?
paths_from_trace( "xpqy" + "pqxy" + "pxqypxyq") #A merge from pq xy
# Here we faced anothe scenario. We could add the missing example to answer our ultimate question, but if you see the set above the **disjoint** elements of them is the invariant we were looking for. More elegantly... we are looking for a **partition of all vertices in the union of paths**.
#
# Last example to have the invariant
paths_from_trace( "xypq" + "xpqy" + "pqxy" + "pxqypxyq") #A merge from pq xy
# Another weird example xpqypqpxqpqy <- pq xy
paths_from_trace("xpqypqpxqpqy")
# ## All pairs completitude property
# With the method above we have describe the all pairs trace. Any repetition of single symbols are not a pair, then they are discarded. And the remaing trace are either a single long path, or loops.
A = "PQR"
B = "LMNO"
C = "ab.++..+++cd." + A*9 + B + A + B + "z.x....++..y."
paths_from_trace(C)
# ## Equivalence with multiple paths combination
# Note that with this method we can verify the multi path graph equivalence (strong claim in the paper) that corresponds to same discovered paths
# +
A ="xy"; sgA = successor_graph(A)
B="pq" ; sgB = successor_graph(B)
multipathG = add_graphs( sgA, sgB, sgB, sgB, sgB)
graph(multipathG)
# -
paths_in_components( multipathG )
C = A + B*4
paths_from_trace(C)
| notebooks/section-2-paths-invariants/Mergings Loops.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # HISTORY
# ## 0.3.1
#
# | Products under test | Versions |
# |-----------------------------|--------------------------|
# | Jupyter Notebook Classic | 6.3.0 |
# | JupyterLab | 1.2.16, 2.3.1, 3.0.14 |
#
#
# | Products under review | Versions |
# |-----------------------------|--------------------------|
# | JupyterLab Classic | 0.1.10 |
# | Voila | 0.2.9 |
#
# - Several JupyterLab keywords now accept an `${n}` argument to handle multiple documents on the page.
# - Many JupyterLab keywords that wait for certain events can be configured with `${timeout}` and `${sleep}` to suit.
# - Properly pass library initialization options to `SeleniumLibrary`
# ## 0.3.0
#
# | Products under test | Versions |
# |-----------------------------|--------------------------|
# | Jupyter Notebook Classic | 6.1.5 |
# | JupyterLab | 1.2.16, 2.2.9, 3.0.0rc10 |
#
# - Require SeleniumLibrary 4.5 and RobotFramework 3.2
# - Expanded support for newer Notebook Classic and JupyterLab versions in keywords
# - Dropped support for `nteract_on_jupyter`
# ## 0.2.0
# - Require SeleniumLibrary 3.3.0 and remove backport of `Press Keys`
# - `Start New Jupyter Server` now has a default `command` of `jupyter-notebook` (instead of `jupyter`)
# - `Build Jupyter Server Arguments` no longer returns `notebook` as the first argument
# - Fix homepage URL for PyPI
# - Test on Chrome/Windows
# ## 0.1.0
# - Initial Release
| docs/HISTORY.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Wisawasi/100-Days-Of-ML-Code/blob/master/Data_Wrangling_By_INVESTIC_Part_I.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="dKyTbKJaciAw"
# # Syntax
# + [markdown] id="WOTlkwPLcpph"
# ## Creating DataFrames
# + [markdown] id="i31I80R3emhq"
# ### Specify values for each column
# + id="6Ag5xy0kcZUl"
df = pd.DataFrame(
{"a" : [4, 5, 6],
"b" : [7, 8 ,9],
"c" : [10, 11, 12]},
index = [1, 2, 3])
# + [markdown] id="aVF-f433msjQ"
# สร้าง DataFrame เป็นตารางของตัวเลขออกมาก่อน จะแบ่งเป็นแถวและหลัก ซึ่งในชุดนี้จะมี
# column เป็น a,b,c
# rows เป็น 1,2,3
# + [markdown] id="gaHbod8oevpO"
# ### Specify values for each row
# + id="ZeSAYIF1eGj8"
df = pd.DataFrame(
[[4, 7, 10],
[5, 8, 11],
[6, 9, 12]],
index = [1, 2, 3],
columns = ['a', 'b', 'c']
)
# + [markdown] id="6_ckF9y_nkYP"
# ชุดนี้จะได้ output เหมือนกับ DataFrame ชุดแรก แต่แค่มีการกำหนดตัว Column ด้านล่างแทน
# + [markdown] id="AmBobaO9e2qw"
# ### Create DataFame with a MultiIndex
# + id="RIMj5FzRdHza"
df = pd.DataFrame(
{ "a" : [4, 5, 6],
"b" : [7, 8, 9],
"c" : [10, 11, 12]},
index = pd.MultiIndex.from_tuples(
[('d',1),('d',2),('e',2)],
names=['n','v'])
)
# + [markdown] id="5cggUNgJoWK0"
# ในส่วนนี้จะมีการสร้าง DataFrame แบบ หลายๆ Index
# + [markdown] id="snmD3tsxfETt"
# # Method Chaining
# + id="N8rPbBXTfKrg"
df = (pd.melt(df)
.rename(columns = {
'variable' : 'var',
'value' : 'val'})
.query('val >= 200')
)
# + [markdown] id="DzGDVNFppKs4"
# Method ของ pandas ส่วนมากจะส่งข้อมูล Output ออกมาเป็น DataFrame
# + [markdown] id="trYdbp6TfpuR"
# # Reshaping Data
#
# + [markdown] id="ywM4EKQoDbZP"
# ข้ามไปทำ Gather columns in to row ก่อนนะ
# + [markdown] id="eR3-SzTgfuhB"
# ## Change the layout of a data set
# + id="sLjdn5JO4UZq"
df = df.sort_values('mpg') #Order rows by values of a column(low to high)
# + id="YYL4G8SM4ZrO"
df = df.sort_values('mpg',ascending=False) #Order rows by values of a column(high to low)
# + id="vn785ejv4gpu"
df = df.rename(columns = {'y':'year'}) #Rename the columns of DataFrame
# + id="rrzGKUjo4ofm"
df = df.sort_index() #Sort the index of a DataFrame
# + id="RfytEmZM4rZv"
df = df.reset_index() #Reset index of DataFrame to row numbers, moving index to columns
# + id="7aDjr2OO4uB8"
df = df.drop(['Length','Height'], axis=1) #Drop columns from DataFrame
# + [markdown] id="X1-UzLtJf3n9"
# ### Gather columns in to rows
# + id="kaIdt7mtfuAM"
pd.melt(df)
# + [markdown] id="p9XLNUrXgCzs"
# ### Spread rows into columns
# + id="-gSt_tR8f8sn"
df.pivot(columns='var', values='val')
# + [markdown] id="JzFRSWX_gKHY"
# ### Append rows of DataFrames
# + id="7unpzRYOgQS-"
pd.concat([df1, df2])
# + [markdown] id="VA280v4fgUBt"
# ### Append columns of DataFrames
# + id="wzD7cuOwgX_E"
pd.concat([df1,df2], axis=1)
# + [markdown] id="STAdbyipgmT6"
# # Subset Observations (Rows)
# + id="FQTfnuXWg0rB"
df[df.length > 7] #Extarct rows that meet logical criteria
# + id="T-CSyrIug7cQ"
df.drop_dlupicates() #Remove duplicate rows (only considers columns)
# + id="FU7cYLlUg76o"
df.head(n) #Select first n rows
# + id="gh6FxOuwhAFf"
df.tail(n) #Select last n rows
# + id="JNShBxwuhCQf"
df.sample(frac=0.5) #Randomly select fraction of rows
# + id="-SbAMF0KhFDE"
df.sample(n=10) #Randomly select n rows
# + id="5GVha24dhJZ7"
df.iloc[10:20] #Select rows by position
# + id="q8j06e6whOjU"
df.nlargest(n, 'value') #Select and order top n entries
# + id="ImRYZ8p2hTuN"
df.nsmallest(n, 'value') #Select and order bottom n entries
# + [markdown] id="S0ZRTHlTgttp"
# # Subset Variables (Columns)
# + id="AyG3756HhdxL"
df[['width','length','species']] #Select multiple columns with specific names
# + id="Ik4buooahj6j"
df['width'] #Select single column with specific name
# + id="PA1dVk-chmya"
df.filter(regex='regex') #Select columns ahose name matches regular expression regex
# + id="-KvTSr6EhsJL"
df.loc[:,'x2':'x4'] #Select all columns between x2 and x4 (inclusive)
# + id="-dtLTUR_hvwj"
df.iloc[:,[1,2,5]] #Select columns in positions 1,2 and 5 (first column us 0)
# + id="Ah2WS7jShzZi"
df.loc[df['a'] > 10, ['a','c']] #Select rows meeting logical condition, and only the specific columns
| Data_Wrangling_By_INVESTIC_Part_I.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.10 ('s2search397')
# language: python
# name: python3
# ---
# %reload_ext autoreload
# %autoreload 2
import numpy as np, sys, os, pandas as pd,json, os
sys.path.insert(1, '../../')
from getting_data import read_conf
# +
_, samples, __ = read_conf('.')
def print_metrics(key, arr):
print(f'''[{key:11}] len: {len(arr):<6} sum: {round(np.sum(arr), 4):<10}, mean: {round(np.mean(arr), 4) if len(arr) > 1 else 0:<10}, std: {round(np.std(arr, ddof=1), 4) if len(arr) > 1 else 0:<10}''')
metrics = dict(
title=[],
abstract=[],
venue=[],
authors=[],
year=[],
n_citations=[],
)
for sample_key in samples.keys():
sample_tasks = samples[sample_key]['anchor']['task']
for task in sample_tasks:
rg = task['range']
m_file = os.path.join('scores', f"{sample_key}_anchor_metrics_{rg[0]}_{rg[1]}.npz")
if os.path.exists(m_file):
ld = np.load(m_file)
title = ld['title']
abstract = ld['abstract']
venue = ld['venue']
authors = ld['authors']
year = ld['year']
n_citations = ld['n_citations']
idx = ld['idx']
# print(idx)
# print_metrics('title', title)
# print_metrics('abstract', abstract)
# print_metrics('venue', venue)
# print_metrics('authors', authors)
# print_metrics('year', year)
# print_metrics('n_citations', n_citations)
metrics['title'].extend(title)
metrics['abstract'].extend(abstract)
metrics['venue'].extend(venue)
metrics['authors'].extend(authors)
metrics['year'].extend(year)
metrics['n_citations'].extend(n_citations)
for key in ['title', 'abstract', 'venue', 'authors', 'year', 'n_citations']:
if len(metrics[key]) < 92938:
zeros = np.zeros([92938 - len(metrics[key])])
metrics[key].extend(list(zeros))
print_metrics(key, metrics[key])
# +
from s2search_score_pdp import pdp_based_importance
std_fi = []
pdp_based_fi = []
sv_global_fi = []
all_data = []
for key in ['title', 'abstract', 'venue', 'authors', 'year', 'n_citations']:
feature_sv = metrics[key]
if len(feature_sv) > 0:
all_data.append(feature_sv)
std_fi.append(np.std(feature_sv, ddof=1) \
# + np.median(feature_sv)
)
# pdp_based_fi.append(pdp_based_importance(feature_sv))
# sv_global_fi.append(np.mean(np.abs(feature_sv)))
else:
all_data.append([0])
std_fi.append(0)
pdp_based_fi.append(0)
sv_global_fi.append(0)
# sv_global_fi
# +
import matplotlib.pyplot as plt
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(12, 5), dpi=200)
# plot violin plot
axs[0].violinplot(all_data,
showmeans=False,
showmedians=True)
axs[0].set_title('Violin plot')
# plot box plot
axs[1].boxplot(all_data,
showfliers=False,
showmeans=True,
)
axs[1].set_title('Box plot')
# adding horizontal grid lines
for ax in axs:
ax.yaxis.grid(True)
ax.set_xticks([y + 1 for y in range(len(all_data))],
labels=['title', 'abstract', 'venue', 'authors', 'year', 'n_citations'])
ax.set_xlabel('Features')
ax.set_ylabel('Shapley Value')
plt.show()
# +
plt.rcdefaults()
fig, ax = plt.subplots(figsize=(12, 4), dpi=200)
# Example data
feature_names = ('title', 'abstract', 'venue', 'authors', 'year', 'n_citations')
y_pos = np.arange(len(feature_names))
# error = np.random.rand(len(feature_names))
# ax.xaxis.grid(True)
ax.barh(y_pos, std_fi, align='center', color='#008bfb')
ax.set_yticks(y_pos, labels=feature_names)
ax.invert_yaxis() # labels read top-to-bottom
ax.set_xlabel('Sample Standard Deviation of Anchor Explain\'s Aggregation')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
_, xmax = plt.xlim()
plt.xlim(0, xmax + 1)
for i, v in enumerate(std_fi):
margin = 0.05
ax.text(v + margin if v > 0 else margin, i, str(round(v, 4)), color='black', ha='left', va='center')
plt.show()
| pipelining/exp-cslg/anchor_metrics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 
#
# <a href="https://hub.callysto.ca/jupyter/hub/user-redirect/git-pull?repo=https%3A%2F%2Fgithub.com%2Fcallysto%2Fcurriculum-notebooks&branch=master&subPath=Science/StaticAndKineticFriction/static-and-kinetic-friction.ipynb&depth=1" target="_parent"><img src="https://raw.githubusercontent.com/callysto/curriculum-notebooks/master/open-in-callysto-button.svg?sanitize=true" width="123" height="24" alt="Open in Callysto"/></a>
# + [markdown] slideshow={"slide_type": "-"}
# (Click **Cell** > **Run All** before proceeding.)
# +
# %matplotlib inline
#----------
#Import modules and packages
import ipywidgets as widgets
import matplotlib.pyplot as plt
import numpy as np
import random
#----------
#import ipywidgets as widgets
#import random
#This function produces a multiple choice form with four options
def multiple_choice(option_1, option_2, option_3, option_4):
option_list = [option_1, option_2, option_3, option_4]
answer = option_list[0]
letters = ["(A) ", "(B) ", "(C) ", "(D) "]
#Boldface letters at the beginning of each option
start_bold = "\033[1m"; end_bold = "\033[0;0m"
#Randomly shuffle the options
random.shuffle(option_list)
#Prints the letters (A) to (D) in sequence with randomly chosen options
for i in range(4):
option_text = option_list.pop()
print(start_bold + letters[i] + end_bold + option_text)
#Stores the correct answer
if option_text == answer:
letter_answer = letters[i]
button1 = widgets.Button(description="(A)"); button2 = widgets.Button(description="(B)")
button3 = widgets.Button(description="(C)"); button4 = widgets.Button(description="(D)")
button1.style.button_color = 'Whitesmoke'; button2.style.button_color = 'Whitesmoke'
button3.style.button_color = 'Whitesmoke'; button4.style.button_color = 'Whitesmoke'
container = widgets.HBox(children=[button1,button2,button3,button4])
display(container)
print(" ", end='\r')
def on_button1_clicked(b):
if "(A) " == letter_answer:
print("Correct! ", end='\r')
button1.style.button_color = 'Moccasin'; button2.style.button_color = 'Whitesmoke'
button3.style.button_color = 'Whitesmoke'; button4.style.button_color = 'Whitesmoke'
else:
print("Try again.", end='\r')
button1.style.button_color = 'Lightgray'; button2.style.button_color = 'Whitesmoke'
button3.style.button_color = 'Whitesmoke'; button4.style.button_color = 'Whitesmoke'
def on_button2_clicked(b):
if "(B) " == letter_answer:
print("Correct! ", end='\r')
button1.style.button_color = 'Whitesmoke'; button2.style.button_color = 'Moccasin'
button3.style.button_color = 'Whitesmoke'; button4.style.button_color = 'Whitesmoke'
else:
print("Try again.", end='\r')
button1.style.button_color = 'Whitesmoke'; button2.style.button_color = 'Lightgray'
button3.style.button_color = 'Whitesmoke'; button4.style.button_color = 'Whitesmoke'
def on_button3_clicked(b):
if "(C) " == letter_answer:
print("Correct! ", end='\r')
button1.style.button_color = 'Whitesmoke'; button2.style.button_color = 'Whitesmoke'
button3.style.button_color = 'Moccasin'; button4.style.button_color = 'Whitesmoke'
else:
print("Try again.", end='\r')
button1.style.button_color = 'Whitesmoke'; button2.style.button_color = 'Whitesmoke'
button3.style.button_color = 'Lightgray'; button4.style.button_color = 'Whitesmoke'
def on_button4_clicked(b):
if "(D) " == letter_answer:
print("Correct! ", end='\r')
button1.style.button_color = 'Whitesmoke'; button2.style.button_color = 'Whitesmoke'
button3.style.button_color = 'Whitesmoke'; button4.style.button_color = 'Moccasin'
else:
print("Try again.", end='\r')
button1.style.button_color = 'Whitesmoke'; button2.style.button_color = 'Whitesmoke'
button3.style.button_color = 'Whitesmoke'; button4.style.button_color = 'Lightgray'
button1.on_click(on_button1_clicked); button2.on_click(on_button2_clicked)
button3.on_click(on_button3_clicked); button4.on_click(on_button4_clicked)
# + [markdown] slideshow={"slide_type": "-"}
# # Static and Kinetic Friction
#
# ## Introduction
#
# **Friction** is the name given to the force that resists the relative motion of one object as it slides against another. Most surfaces are rough and uneven. Even smooth surfaces, such as a polished mirror, can appear bumpy under a microscope. The bumps on these surfaces interlock as they rub against one another, which prevents them from sliding freely.
#
# <img src="Images/rough_surfaces.svg" width="35%"/>
#
# The more an object presses into a surface, the more the surface pushes back against the object. The force pushing back against the object is called the **normal force**, **$F_{n}$** and it is always perpendicular to the surface. From the image above, we can imagine that the more two objects are pressed together, the more difficult it is for the interlocking points to lift up and slide passed one another. As a consequence, the friction force increases. A relationship therefore exists between the magnitude of the friction force and that of the normal force. This relationship is expressed by the following equation:
#
# $$F_{f}=\mu F_{n}$$
#
# where $F_{f}$ and $F_{n}$ are the magnitudes of the friction and normal forces respectively, and $\mu$ is the **coefficient of friction**. Here $\mu$ is the Greek letter $mu$. The direction of the force of friction is parallel to the surface and opposite to the other forces acting on the object.
# + [markdown] slideshow={"slide_type": "-"}
# Use the slider below to observe the relationship between the normal force and the friction force. Pick any value for the coefficient of friction. Move the slider for the normal force back and forth to calculate the corresponding force of friction.
# +
#import ipywidgets as widgets
coeff = widgets.FloatSlider(description="Coefficient",min=0.1,max=0.9)
normal_force = widgets.IntSlider(description="Normal force",min=5,max=50)
#Boldface letters at the beginning of each option
start_bold = "\033[1m"; end_bold = "\033[0;0m"
def f(coeff, normal_force):
friction_force = coeff * normal_force
print(start_bold + "Friction force = (coefficient of friction) X (normal force)" + end_bold)
print("Friction force = {} X {} = {} N".format(coeff, normal_force, round(friction_force,1)))
out = widgets.interactive_output(f,{'coeff': coeff, 'normal_force': normal_force})
widgets.HBox([widgets.VBox([coeff, normal_force]), out])
# + [markdown] slideshow={"slide_type": "-"}
# **Question:** *What happens to the friction force when the normal force increases?*
# +
#import ipywidgets as widgets
#Assign each multiple choice to these four variables
#Option_1 contains the answer
option_1 = "The friction force increases."
option_2 = "The friction force decreases."
option_3 = "The friction force remains constant."
option_4 = "The friction force equals zero."
multiple_choice(option_1, option_2, option_3, option_4)
# + [markdown] slideshow={"slide_type": "-"}
# The force of friction felt by an object depends on whether the objects are in motion with respect to each other. In general the force of friction felt by an object which is not moving with respect to another is stronger than the force of friction felt once it begins moving.
#
# **Static friction** describes the friction force that acts between an object and a surface to prevent it from sliding when a force is applied to it. Static friction applies to objects that are stationary with respect to one another.
#
# **Kinetic friction** describes the friction force that acts between an object and the surface it slides upon when a force is applied to it. Kinetic friction applies to objects that are in motion with respect to one another.
# + [markdown] slideshow={"slide_type": "-"}
# ### Coefficients of Friction
#
# Values for the coefficients of friction have been derived experimentally for various materials as they interact with one another. When we describe the friction acting on a stationary object, we must use the **coefficient of static friction, $\mu_{s}$**. When we describe the friction of a sliding object, we must use the **coefficient of kinetic friction, $\mu_{k}$**. Some values for the coefficients of static and kinetic friction are shown in the table below:
#
# Materials | Coefficients of static friction ($\mu_{s}$)| Coefficients of kinetic friction ($\mu_{k}$)
# --- | --- | ---
# Steel on steel | 0.7 | 0.6
# Glass on glass | 0.9 | 0.4
# Wood on wood | 0.4 | 0.2
# Rubber on concrete | 1.0 | 0.8
# + [markdown] slideshow={"slide_type": "-"}
# Use the slider again to observe the relationship between the coefficient of friction and the friction force. Pick any value for the normal force. Move the slider for the coefficient of friction back and forth to calculate the corresponding force of friction.
# +
#import ipywidgets as widgets
coeff = widgets.FloatSlider(description="Coefficient",min=0.1,max=0.9)
normal_force = widgets.IntSlider(description="Normal force",min=5,max=50)
#Boldface letters at the beginning of each option
start_bold = "\033[1m"; end_bold = "\033[0;0m"
def f(coeff, normal_force):
friction_force = coeff * normal_force
print(start_bold + "Friction force = (coefficient of friction) X (normal force)" + end_bold)
print("Friction force = {} X {} = {} N".format(coeff, normal_force, round(friction_force,1)))
out = widgets.interactive_output(f,{'coeff': coeff, 'normal_force': normal_force})
widgets.HBox([widgets.VBox([coeff, normal_force]), out])
# + [markdown] slideshow={"slide_type": "-"}
# **Question:** *What happens to the friction force when the coefficient of friction increases?*
# +
#import ipywidgets as widgets
#Assign each multiple choice to these four variables
#Option_1 contains the answer
option_1 = "The friction force increases."
option_2 = "The friction force decreases."
option_3 = "The friction force remains constant."
option_4 = "The friction force equals zero."
multiple_choice(option_1, option_2, option_3, option_4)
# -
# It is to be noted that the amount $\mu_s F_{n}$ is actually the **maximal value** for the magnitude of **static** friction $F_{f}$. Assuming the forces acting on an object are insufficient to overcome this maximal value, the object will be stationary. In this case, the actual magnitude of the force of static friction, $F_{f}$, will typically be less than maximal; it is exactly that amount required to have a net force of zero so that the object does not move.
# + [markdown] slideshow={"slide_type": "-"}
# ### Solving Friction Problems
#
# When solving a friction problem, it is useful to construct a free-body diagram. A **free-body diagram** is a simple graphical representation of an object and all the relevant forces acting upon it. An example of a free-body diagram is shown below:
#
# <img src="Images/free_body_diagram.svg" width="45%"/>
#
# * $F_{a}$ represents the **applied force**: the force acting in the direction of motion of the object.
# * $F_{f}$ represents the **friction force**: the force acting in the direction opposite to the motion of the object.
# * $mg$ represents the weight: the force of gravity acting on the object. Here, $m$ is the mass of the object, and $g$ is the gravitational acceleration directed downwards and of magnitude $g=9.8\:m/s^2$.
# * $F_{n}$ represents the normal force: the force perpendicular to the surface pressing up against the object.
#
# ### Example
# A 10 kg object is pushed across a flat horizontal surface with an applied force of 25 N. The same object is later pushed again with an applied force of 50 N. The coefficients of static and kinetic frictions are 0.40 and 0.30, respectively. Calculate the friction force ($F_{f}$) at each of the applied forces. Use the formula: $F_{f}=\mu F_{n}$.
#
# **Step 1:** *Construct a free-body diagram*. The free-body diagram shown above may be used for this example.
#
# **Step 2:** *Calculate the known forces*: **Recall:** The weight of the object, $mg$, is equal to the mass of the object multiplied by the acceleration due to gravity.
#
# $$mg = (10 \times 9.8)\:N = 98\:N$$
#
# Since there is no vertical acceleration on the object, the total vertical force must vanish so the magnitude of the normal force must be equal to that of the weight of the object ($F_{n} = mg$). Therefore,
#
# $$F_{n} = mg = (10 \times 9.8)\:N = 98\:N$$
#
# **Step 3:** *Determine the maximum static friction force*: Now that the normal force is known, we can calculate the maximum static friction force using the following equation:
#
# $$F_{s}=\mu_{s} F_{n} = (0.40 \times 98)\:N = 39\:N$$
#
# This means that the object will resist up to 39 N of applied force before it begins to move.
#
# **Step 4:** *Determine the friction force at 25 N*: Since the applied force of 25 N is less than the maximum static friction force, the object will remain stationary. The friction will oppose the applied force up to 25 N. Therefore,
#
# $$F_{f} = 25\:N$$
#
# The direction of $F_{f}$ will be opposite to the direction of the applied force.
#
# **Step 5:** *Determine the friction force at 50 N*: Since the applied force of 50 N is greater than the maximum static friction force, the object will begin to move. When in motion, the object is no longer being opposed by the static friction force. Instead it is being opposed by the kinetic friction force. We can calculate the kinetic friction force using the following equation:
#
# $$F_{k}=\mu_{k} F_{n} = (0.30 \times 98)\:N = 29\:N$$
#
# Therefore, the magnitude of the friction force will be:
#
# $$F_{f} = 29\:N$$
#
# Once again, the direction of $F_{f}$ will be opposite to the direction of the applied force.
#
# **Answer:** At an applied force of 25 N, the object is stationary and the magnitude of the friction force is 25 N. At an applied force of 50 N, the object is in motion and the magnitude of the friction force is 29 N. In both cases, the force of friction is directed parallel to the surface and opposite to the direction of the applied force.
# + [markdown] slideshow={"slide_type": "-"}
# ## Practice Problems
# (Click **Cell** > **Run Cells** to generate new random values for each question. Refer to the previous table to get the coefficients of friction for specific materials.)
# +
#import random
#import ipywidgets as widgets
#Randomize mass and friction coefficient
mass = random.randint(20,50)
coeff = (random.randint(10,100))/100
#Print question
question = "A " + str(mass) +" kg object is pushed across a flat horizontal surface. The coefficient of kinetic friction between the moving object and the surface is " + str(coeff) +". What is the magnitude of the friction force?"
print(question)
#Answer calculation
#Friction force = (friction coefficient) X (normal force)
answer = coeff*(mass*9.8)
answer = round(answer)
#Define range of values for random multiple choices
min = int(coeff*((mass-15)*9.8))
max = int(coeff*((mass+15)*9.8))
#Create three choices that are unique (and not equal to the answer)
choice_list = random.sample(range(min,max),3)
while choice_list.count(answer) >= 1:
choice_list = random.sample(range(min,max),3)
#Assign each multiple choice to these four variables
#Option_1 contains the answer
option_1 = str(answer) + " N"
option_2 = str(choice_list[0]) + " N"
option_3 = str(choice_list[1]) + " N"
option_4 = str(choice_list[2]) + " N"
multiple_choice(option_1, option_2, option_3, option_4)
# +
#import random
#import ipywidgets as widgets
#Randomize mass and applied force
mass = random.randint(20,50)
applied_force = random.randint(150,300)
#Randomize material
material_options = ['steel', 'glass', 'wood']
material =random.choice(material_options)
#Define friction coefficients based on selected material
if material == 'steel':
us = 0.7; uk = 0.6
elif material == 'glass':
us = 0.9; uk = 0.4
elif material == 'wood':
us = 0.4; uk = 0.2
#Print question
question = "If a %d kg %s object is pushed across a flat horizontal %s surface with an applied force of %d N, what is the magnitude of the friction force?" %(mass, material, material, applied_force)
print(question)
#Answer calculation
#Friction force = (friction coefficient) X (normal force)
static_friction = us*(mass*9.8)
kinetic_friction = uk*(mass*9.8)
#If the applied force is less than or equal to the maximum static friction force, the object will remain stationary
#The friction force equals the applied force
if applied_force <= static_friction:
answer = applied_force
else:
answer = kinetic_friction
answer = round(answer)
#Define range of values for random multiple choices
if applied_force <= static_friction:
min = applied_force-15
max = applied_force+15
else:
min = int(uk*((mass-15)*9.8))
max = int(uk*((mass+15)*9.8))
#Create three choices that are unique (and not equal to the answer)
choice_list = random.sample(range(min,max),3)
while choice_list.count(answer) >= 1:
choice_list = random.sample(range(min,max),3)
#Assign each multiple choice to these four variables
#Option_1 contains the answer
option_1 = str(answer) + " N"
option_2 = str(choice_list[0]) + " N"
option_3 = str(choice_list[1]) + " N"
option_4 = str(choice_list[2]) + " N"
multiple_choice(option_1, option_2, option_3, option_4)
# +
#import random
#import ipywidgets as widgets
#Randomize applied and friction forces
applied_force = random.randint(51,75)
friction_force = random.randint(25,50)
#Randomize material
material_options = ['steel', 'glass', 'wood']
material =random.choice(material_options)
#Define friction coefficients based on selected material
if material == 'steel':
uk = 0.6; us = 0.7
elif material == 'glass':
uk = 0.4; us = 0.9
elif material == 'wood':
uk = 0.2; us = 0.4
#Print question
question = "If a %s object is pushed across a flat horizontal %s surface with an applied force of %d N, and a friction force of %d N occurs, what is the magnitude of the normal force?" %(material, material, applied_force, friction_force)
print(question)
#Answer calculation
#weight = normal force = (friction force)/(kinetic friction coefficient)
answer = friction_force/uk
answer = round(answer)
#Define range of values for random multiple choices
min = int((friction_force-15)/uk); max = int((friction_force+15)/uk)
#Create three choices that are unique (and not equal to the answer)
choice_list = random.sample(range(min,max),3)
while choice_list.count(answer) >= 1:
choice_list = random.sample(range(min,max),3)
#Assign each option to these four variables
#Option1 contains the answer
option_1 = str(answer) + " N"
option_2 = str(random.randint(min,max)) + " N"
option_3 = str(random.randint(min,max)) + " N"
option_4 = str(random.randint(min,max)) + " N"
multiple_choice(option_1, option_2, option_3, option_4)
# +
#import random
#import ipywidgets as widgets
#import numpy as np
#Randomize mass and applied force
mass = random.randint(5,10)
applied_force = random.randint(46,80)
friction_force = random.randint(10,45)
#Print question
question = "A %d kg object is pulled across a flat horizontal surface by a force of %d N. The friction force is %d N. What is the coefficient of kinetic friction?" %(mass, applied_force, friction_force)
print(question)
#Answer calculation
#friction coefficient = (friction force) / (normal force)
answer = friction_force/(mass*9.8)
answer = round(answer,2)
#Define range of values for random multiple choices
min = 0.1
max = 0.9
unique = 0
#Create three choices that are unique (and not equal to the answer)
while unique < 4:
choice_list = np.random.uniform(min, max, size=(3,))
for i in range(3):
choice_list[i] = round(choice_list[i],2)
choice_list = np.append(choice_list, answer)
list_unique = np.unique(choice_list)
unique = list_unique.size
#Assign each multiple choice to these four variables
#Option_1 contains the answer
option_1 = str(answer)
option_2 = str(choice_list[0])
option_3 = str(choice_list[1])
option_4 = str(choice_list[2])
multiple_choice(option_1, option_2, option_3, option_4)
# + [markdown] slideshow={"slide_type": "-"}
# Use the diagram below to answer the following question:
#
# <img src="Images/free_body_diagram_pulley.svg" width="45%"/>
# +
#import random
#import ipywidgets as widgets
#Randomize mass and friction coefficient
mass = random.randint(20,50)
coeff = (random.randint(10,100))/100
#Print question
question = "A " + str(mass) +" kg object is suspended by a pulley and connected to an object resting on a flat surface. The friction coefficient between the object and the surface is " + str(coeff) +". What is the minimum mass required to keep this suspended object stationary?"
print(question)
#Answer calculation
#Friction force = applied force = (mass) X (9.8)
friction_force = mass*9.8
normal_force = friction_force/coeff
answer = normal_force/9.8
answer = round(answer)
#Define range of values for random multiple choices
min = int((normal_force-50)/9.8)
max = int((normal_force+50)/9.8)
#Create three choices that are unique (and not equal to the answer)
choice_list = random.sample(range(min,max),3)
while choice_list.count(answer) >= 1:
choice_list = random.sample(range(min,max),3)
#Assign each multiple choice to these four variables
#Option_1 contains the answer
option_1 = str(answer) + " N"
option_2 = str(choice_list[0]) + " N"
option_3 = str(choice_list[1]) + " N"
option_4 = str(choice_list[2]) + " N"
multiple_choice(option_1, option_2, option_3, option_4)
# + [markdown] slideshow={"slide_type": "-"}
# ## Experiment
#
# As mentioned above, the coefficients of static and kinetic friction are determined experimentally for different materials.
#
# **Determination of Static Friction**
#
# Try the following experiment to determine the coefficient of static friction.
#
# **Step 1:** Find an object and a flat horizontal surface. Record the material type for the object and surface.
#
# **Step 2:** Measure the mass of the object using a mass balance (kg).
#
# **Step 3:** Calculate the magnitude of the normal force using the following formula: $F_{n} = mg$
#
# **Step 4:** Using a spring scale, gradually apply a force to the object. Record the value on the scale the moment the object begins to move. At this moment, the applied force is equal to the maximum static friction force.
#
# <img src="Images/free_body_diagram_spring.svg" width="55%"/>
#
# **Step 5:** Calculate the coefficient of static friction using the following formula: $\mu_{s} = F_{f} \div F_{n}$
# -
# **Determination of Kinetic Friction**
#
# Continue the experiment to determine the coefficient of kinetic friction.
#
# **Step 6:** Using a spring scale, apply enough force to drag the object across the surface at a constant velocity (acceleration = 0). Record the value on the scale while the object is moving. So long as the object moves at a constant velocity, the applied force is equal to the kinetic friction force.
#
# **Step 7:** Calculate the coefficient of kinetic friction using the following formula: $\mu_{k} = F_{f} \div F_{n}$
# + [markdown] slideshow={"slide_type": "-"}
# (**Double click** this cell and update the table with your own data. Click **Cell** > **Run Cells** when done.)
#
# Materials | Object mass (g) | Normal force (N) | Applied force (N) | Static friction coefficient | Kinetic friction coefficient
# --- | --- | --- | --- | --- | ----
# ? | ? | ? | ? | ? | ?
# ? | ? | ? | ? | ? | ?
# ? | ? | ? | ? | ? | ?
# ? | ? | ? | ? | ? | ?
# ? | ? | ? | ? | ? | ?
# + [markdown] slideshow={"slide_type": "-"}
# ## Force Diagrams
#
# A useful way to visualize the friction forces is to construct a force diagram, as shown below. This diagram depicts the magnitudes of the forces acting on an object as it is pushed across a flat horizontal surface with increasing force. The y-axis depicts the friction force ($F_{f}$), and the x-axis depicts the applied force ($F_{a}$). The sliders can be manipulated to change the normal force ($F_{n}$), the coefficient of static friction ($\mu_{s}$), and the coefficient of kinetic friction ($\mu_{k}$).
# +
#import ipywidgets as widgets
#import matplotlib.pyplot as plt
def f(Fn=50,μs=0.75,μk=0.25):
#Fn = normal force
#μs = coefficient of static friction
#μk = coefficient of kinetic friction
plt.figure()
xs = Fn*μs
xk = Fn*μk
plt.plot([0,xs,xs,100],[0,xs,xk,xk])
plt.plot([xs,xs],[xk,0], linestyle="dotted")
plt.ylim(0, 100)
plt.xlim(0,100)
plt.ylabel('Friction force (N)')
plt.xlabel('Applied force (N)')
plt.annotate(xy=[xs-15,xs+5],s="Static friction (max)")
plt.annotate(xy=[(xs+(100-xs)/2)-10,xk+5],s="Kinetic friction")
plt.show()
interactive_plot = widgets.interactive(f,Fn=(35,75,5),μs=(0.5, 1.0),μk=(0.1, 0.5))
output = interactive_plot.children[-1]
output.layout.height = '280px'
interactive_plot
# + [markdown] slideshow={"slide_type": "-"}
# **Interpreting the graph**
#
# Read the graph from left to right. As the applied force gradually increases along the x-axis, the friction force also increases. Notice that the magnitude of the friction force is equal to the applied force ($F_{f} = F_{a}$) until it reaches the point of maximum static friction. This point is shown on the graph as a **peak**. The point of maximum static friction is determined by the following equation: $F_{f}=\mu_{s} F_{n}$. As the applied force continues to increase beyond the peak of maximum static friction, the object begins to move. The friction force is now described by the kinetic friction equation: $F_{f}=\mu_{k} F_{n}$.
# + [markdown] slideshow={"slide_type": "-"}
# Move the slider for the normal force back and forth. As the normal force increases, what happens to the static and kinetic friction forces?
# +
#import ipywidgets as widgets
#Assign each multiple choice to these four variables
#Option_1 contains the answer
option_1 = "The static and kinetic friction forces both increase."
option_2 = "The static and kinetic friction forces both decrease."
option_3 = "The static friction force increases and the kinetic friction force decreases."
option_4 = "The static friction force decreases and the kinetic friction force increases."
multiple_choice(option_1, option_2, option_3, option_4)
# + [markdown] slideshow={"slide_type": "-"}
# Move the slider for the static friction coefficient back and forth. As the static friction coefficient increases, what happens to the static and kinetic friction forces?
# +
#import ipywidgets as widgets
#Assign each multiple choice to these four variables
#Option_1 contains the answer
option_1 = "The static friction force increases and the kinetic friction force remains constant."
option_2 = "The static friction force decreases and the kinetic friction force remains constant."
option_3 = "The static friction force increases and the kinetic friction force decreases."
option_4 = "The static friction force decreases and the kinetic friction force increases."
multiple_choice(option_1, option_2, option_3, option_4)
# + [markdown] slideshow={"slide_type": "-"}
# Move the slider for the kinetic friction coefficient back and forth. As the kinetic friction coefficient increases, what happens to the static and kinetic friction forces?
# +
#import ipywidgets as widgets
#Assign each multiple choice to these four variables
#Option_1 contains the answer
option_1 = "The kinetic friction force increases and the static friction force remains constant."
option_2 = "The kinetic friction force decreases and the static friction force remains constant."
option_3 = "The kinetic friction force increases and the static friction force decreases."
option_4 = "The kinetic friction force decreases and the static friction force increases."
multiple_choice(option_1, option_2, option_3, option_4)
# + [markdown] slideshow={"slide_type": "-"}
# ## Conclusions
#
# In this notebook, the concepts of static and kinetic friction were examined. In summary:
#
# * **Friction** describes the force that resists the relative motion of an object as it slides across a surface. Friction forces are proportional to the normal force:
#
# $$F_{f} = \mu F_{n}$$
#
# * **Static friction** describes the friction force that acts between an object and the surface to prevent it from sliding. Static friction must be overcome for an object to move.
#
# * **Kinetic friction** describes the friction force that acts between an object and the surface it slides upon. Kinetic friction is used when describing objects in motion.
#
# * **Coefficients of static and kinetic friction** are determined experimentally for different materials. Once determined, these values can be tabulated and used to solve friction problems. An experimental method for determining the coefficients of static and kinetic friction was presented.
#
# * **Friction problems** can be solved using free-body diagrams, the friction formula, and the tabulated values for the coefficients of friction.
#
# * **Force diagrams** can be used to visualize the relationship between the friction force, the applied force, the normal force, and the coefficients of static and kinetic friction.
#
# Images in this notebook represent original artwork.
# + language="html"
#
# <script>
# function code_toggle() {
# if (code_shown){
# $('div.input').hide('500');
# $('#toggleButton').val('Show Code')
# } else {
# $('div.input').show('500');
# $('#toggleButton').val('Hide Code')
# }
# code_shown = !code_shown
# }
#
# $( document ).ready(function(){
# code_shown=false;
# $('div.input').hide()
# });
# </script>
# <form action="javascript:code_toggle()"><input type="submit" id="toggleButton" value="Show Code"></form>
# -
# [](https://github.com/callysto/curriculum-notebooks/blob/master/LICENSE.md)
| Science/StaticAndKineticFriction/static-and-kinetic-friction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Naive Bayes Classification with TREC Dataset
# <hr>
#
# We will build a text classification model using Naive Bayes on the TREC question Dataset. We will use the default size of train/test split from the original source.
# ## Load the library
# +
import tensorflow as tf
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import re
import nltk
import random
from nltk.corpus import stopwords, twitter_samples
# from nltk.tokenize import TweetTokenizer
from sklearn.model_selection import KFold
from nltk.stem import PorterStemmer
from string import punctuation
from sklearn.preprocessing import OneHotEncoder
from tensorflow.keras.preprocessing.text import Tokenizer
import time
# %config IPCompleter.greedy=True
# %config IPCompleter.use_jedi=False
# nltk.download('twitter_samples')
# -
# ## Load the Dataset
corpus = pd.read_pickle('../0_data/TREC/TREC.pkl')
corpus.label = corpus.label.astype(int)
print(corpus.shape)
corpus
corpus.info()
corpus.groupby( by=['split','label']).count()
np.array(corpus[corpus.split=='train'].label)
# +
# Separate the sentences and the labels for training and testing
train_x = list(corpus[corpus.split=='train'].sentence)
train_y = np.array(corpus[corpus.split=='train'].label)
print(len(train_x))
print(len(train_y))
test_x = list(corpus[corpus.split=='test'].sentence)
test_y = np.array(corpus[corpus.split=='test'].label)
print(len(test_x))
print(len(test_y))
# -
# ## Raw Number of Vocabulary
# Build the raw vocobulary for first inspection
tokenizer = Tokenizer()
tokenizer.fit_on_texts(corpus.sentence)
vocab_raw = tokenizer.word_index
print('\nThe vocabulary size: {}\n'.format(len(vocab_raw)))
print(vocab_raw)
# <!--## Split Dataset-->
# # Data Preprocessing
# <hr>
# ## Define `clean_doc` function
# +
from nltk.corpus import stopwords
stopwords = stopwords.words('english')
stemmer = PorterStemmer()
def clean_doc(doc):
# split into tokens by white space
tokens = doc.split()
# prepare regex for char filtering
re_punc = re.compile('[%s]' % re.escape(punctuation))
# remove punctuation from each word
tokens = [re_punc.sub('', w) for w in tokens]
# remove remaining tokens that are not alphabetic
# tokens = [word for word in tokens if word.isalpha()]
# filter out stop words
tokens = [w for w in tokens if not w in stopwords]
# filter out short tokens
tokens = [word for word in tokens if len(word) >= 1]
# Stem the token
tokens = [stemmer.stem(token) for token in tokens]
return tokens
# -
# ## Define `count_docs` function
def count_docs(data, docs, ys):
'''
Input:
data: a dictionary that will be used to map each pair to its frequency
docs: a list of sentences
ys: a list corresponding to the sentiment of each tweet (either 0 or 1)
Output:
result: a dictionary mapping each pair to its frequency
'''
onehot = OneHotEncoder( sparse=False)
y_onehot = onehot.fit_transform(np.reshape(ys, (-1,1)))
# print(y_onehot)
count = 0
for doc, y in zip(docs, y_onehot):
# For each word
for word in clean_doc(doc):
# if not in the data yet
if word not in data:
# assign it
data[word] = y
# if already in the data
else:
# update it
data[word] = data.get(word) + y
return data
# ## Build Frequencies Dictionary
# Build the freqs dictionary for later uses
freqs = count_docs({}, train_x, train_y)
freqs
# convert the freqs dictionary to nested list
def freqs_to_df(freqs, train_y):
'''
input:
freqs: a frequencies dictionary (ex: {'simplist': array([15., 4.]),
'silli': array([64., 20.]), . . })
train_y: labels for data
output:
a frequencies dictionary in the form of dataframe
'''
# initialize an empty list to store the rows for dataframe
freqs_list = []
# Define the names of the dataframe columns
column_names = ['word']
column_names = column_names + list(np.unique(train_y))
# convert the keys from the freqs dictionary to a list
keys = list(freqs.keys())
# For each row
for i in range(len(freqs)):
# define the elements for each column
row = [keys[i]] + list(freqs.get(keys[i]))
# update the frequency list
freqs_list.append(row)
# Create the dataframe
df = pd.DataFrame(freqs_list, columns=column_names)
df.set_index('word', inplace=True)
return df
freqs_df = freqs_to_df(freqs, train_y)
freqs_df
freqs_df[5].sum()
# # Training and Testing the Model
# ## Build Training Function
def train_naive_bayes(freq_df, train_x, train_y):
'''
Input:
freqs: a pandas dataframe with word indexing
train_x: a list of tweets
train_y: a list of labels correponding to the tweets (0,1)
Output:
logprior: the log prior. (equation 3 above)
loglikelihood: the log likelihood of you Naive bayes equation. (equation 6 above)
'''
freqs = freq_df
# calculate V, the number of unique words in the vocabulary
vocab = list(freqs.index)
V = len(vocab)
########################################################################################
# Part 1: Calculate the log prior probability for each class
# Calculate D, the number of documents
D = len(train_y)
labels = list(np.unique(train_y.astype(int)))
count = np.zeros((len(labels),))
for train_label in train_y:
for unique_label in labels:
if train_label == unique_label:
count[unique_label]+=1
# -> count = [4000, 4000]; it means perfectly balanced between each classese
# Calculate prior probability for each class
prior = count/D # -> prior = array([0.5, 0.5])
# Calculate the logprior for each class
logprior = np.log(prior) # -> prior = array([-0.69314718, -0.69314718])
########################################################################################
# Part 2.a. Calculate the total number of word occurrences for each class
columns = list(freqs.columns)
N_classes = []
# calculate N frequency for each class
for column in columns:
freqs[column] = (freqs[column] + 1)/(freqs[column].sum()+V)
# Calculate the log likelihood of the word
loglikelihood = np.log(freqs)
########################################################################################
return logprior, loglikelihood
labels = np.array(labels)
logprior, loglikelihood = train_naive_bayes(freqs_df, sentences, labels)
print(logprior)
loglikelihood
# ## Build Testing Function
def naive_bayes_predict(tweet, logprior, loglikelihood):
'''
input:
tweet: a string
logprior: initial probability based on dataset
loglikelihood: a dictionary of words mapping to numbers
output:
p: the sum of all the loglikelihood of each word in the tweet
(if found in the dictionary) + logprior (a number)
'''
# process the tweet to get the list of words
words = clean_doc(tweet)
# Initialize probability to zero
probs = []
columns = list(loglikelihood.columns)
for column in columns:
prob = 0
# Iterate for each word in word list
for word in words:
# check if the word exist in the loglikelihood dictionary
if word in loglikelihood.index:
prob += loglikelihood.loc[word, column]
probs.append(prob)
probs = logprior + probs
y_hat = np.argmax(probs)
return probs, y_hat
# +
def test_naive_bayes(test_x, logprior, loglikelihood):
"""
input:
test_x: A list of tweets
test_y: the corresponding labels for the list of tweets
logprior: the logprior
loglikelihood: a dictionary with the loglikelihoods for each word
output:
accuracy: (# of tweets classified correctly)/total # of tweets
"""
# initial accuracy
acc = 0
# initialize an empty list for storing the predictions
y_hats = []
for tweet in test_x:
_ , y = naive_bayes_predict(tweet, logprior, loglikelihood)
# y_hat = np.argmax(probs)
# update the y_hats
y_hats.append(y)
# Error: the mean absolute values between y_hats and test_y
# error = np.mean(np.abs(np.array(y_hats)-np.array(test_y)))
# Accuracy is 1 - error
# acc = 1-error
# return acc
y_hats = np.array(y_hats)
return y_hats
# -
# ## Train and Test!
# +
###################################################
# Training and Testing using the Train/Test Split #
###################################################
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay, accuracy_score
# Separate the sentences and the labels for training and testing
train_x = list(corpus[corpus.split=='train'].sentence)
train_y = np.array(corpus[corpus.split=='train'].label)
print(len(train_x))
print(len(train_y))
test_x = list(corpus[corpus.split=='test'].sentence)
test_y = np.array(corpus[corpus.split=='test'].label)
print(len(test_x))
print(len(test_y))
# Build the freqs dictionary for later uses
freqs = count_docs({}, train_x, train_y)
# Turn the frequencies dictionary into dataframe
freqs_df = freqs_to_df(freqs, train_y)
print(freqs_df.head())
print('\nWord sum: {} | {} | {} | {} | {} | {} '.format(freqs_df[0].sum(),
freqs_df[1].sum(),
freqs_df[2].sum(),
freqs_df[3].sum(),
freqs_df[4].sum(),
freqs_df[5].sum()) )
# Retrieve the logprior and loglikelihood
logprior, loglikelihood = train_naive_bayes(freqs_df, train_x, train_y)
print('\nlogprior: ', logprior)
print('\nloglikelihood: ', loglikelihood)
y_hats = test_naive_bayes(test_x, logprior, loglikelihood)
print('y_test: ', test_y)
print('y_predict: ', y_hats)
CM = confusion_matrix(test_y, y_hats)
print(ConfusionMatrixDisplay(CM).plot())
print('\nNaive Bayes accuracy: ', accuracy_score(test_y, y_hats))
| 1_Naive_Bayes/Naive_Bayes_TREC.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="N8prLEJutaC5"
# !nvidia-smi -L
# %tensorflow_version 2.x
# + id="uvCrG7b_tqzF"
# !pip3 install --upgrade tensorflow-gpu==2.1.0
# + id="mAkD2MXzuDeP"
# !pip3 install -r /content/drive/MyDrive/sign_language/requirements.txt
# + colab={"base_uri": "https://localhost:8080/"} id="QbIuI4pxuFS_" executionInfo={"status": "ok", "timestamp": 1610579897517, "user_tz": -60, "elapsed": 5110802, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02556879631367095259"}} outputId="3d79e05b-07a6-4b69-dfb3-d22bf63daf9e"
# %cd /content/drive/MyDrive/sign_language
# !python -m signjoey train configs/sign_body3d.yaml
| notebooks_run/notebook_run_3d/results_body3d_earlyadd.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# Loading the data
data = pd.read_csv('data/C2A2_data/BinnedCsvs_d400/fb441e62df2d58994928907a91895ec62c2c42e6cd075c2700843b89.csv')
# Getting data during the period of 2005-2014
data = data[(data["Date"] >= "2005-01-01") & (data["Date"] <= "2014-12-31")]
# Removing Leap days
data = data[~data.Date.str.endswith('02-29')].copy()
# Sorting the data by Date
data = data.sort_values("Date")
# Converting the "Date" column to datetime
data["Date"] = list(map(pd.to_datetime, data["Date"]))
# Diving the data into two dataframes for high and low
high = data[data["Element"] == "TMAX"]
low = data[data["Element"] == "TMIN"]
# +
# Getting record high and low temperature values for each day of the year during the period of 2004-2015
record_high = high.copy()
record_high['dayofyear'] = record_high['Date'].map(lambda x: x.replace(year=2015).dayofyear)
record_high = record_high.groupby("dayofyear").max()
record_low = low.copy()
record_low['dayofyear'] = record_low['Date'].map(lambda x: x.replace(year=2015).dayofyear)
record_low = record_low.groupby("dayofyear").min()
# +
data = pd.read_csv('data/C2A2_data/BinnedCsvs_d400/fb441e62df2d58994928907a91895ec62c2c42e6cd075c2700843b89.csv')
# Getting data during the year 2015
data_2015 = data[(data["Date"] >= "2015-01-01") & (data["Date"] <= "2015-12-31")]
data_2015 = data_2015[~data_2015.Date.str.endswith('02-29')].copy()
data_2015 = data_2015.sort_values("Date")
data_2015["Date"] = list(map(pd.to_datetime, data_2015["Date"]))
high_2015 = data_2015[data_2015["Element"] == "TMAX"]
low_2015 = data_2015[data_2015["Element"] == "TMIN"]
record_high_2015 = high_2015.copy()
record_high_2015["dayofyear"] = record_high_2015["Date"].dt.dayofyear
record_high_2015 = record_high_2015.groupby("dayofyear").max()
record_low_2015 = low_2015.copy()
record_low_2015["dayofyear"] = record_low_2015["Date"].dt.dayofyear
record_low_2015 = record_low_2015.groupby("dayofyear").min()
# -
record_low = record_low.reset_index()
record_high = record_high.reset_index()
record_low_2015 = record_low_2015.reset_index()
record_high_2015 = record_high_2015.reset_index()
# Getting indexes of highs and lows that were broken
broken_lows = (record_low_2015[record_low_2015["Data_Value"] < record_low['Data_Value']]).index.tolist()
broken_highs = (record_high_2015[record_high_2015['Data_Value'] > record_high['Data_Value']]).index.tolist()
# +
plt.figure(figsize=(20,7))
plt.plot(record_high["Data_Value"], c="r", alpha=0.8, label = 'Record High 2005-2014')
plt.plot(record_low["Data_Value"], c="b", alpha=0.8, label = 'Record Low 2005-2014')
plt.scatter(broken_lows, record_low_2015['Data_Value'].iloc[broken_lows], s=20, c = 'black', label = 'Record Low broken in 2015')
plt.scatter(broken_highs, record_high_2015['Data_Value'].iloc[broken_highs], s=20, c = 'b', alpha=0.8, label = 'Record High broken in 2015')
plt.legend()
plt.title("2015's temperature breaking points against 2005-2014 in Ann Arbor, Michigan, US")
plt.fill_between(range(len(record_low)),
record_low["Data_Value"], record_high["Data_Value"],
facecolor='lime',
alpha=0.11);
# Aligning plot
plt.gca().axis([-1, 365, -400, 450])
# Hiding plot spines
plt.gca().spines['top'].set_visible(False)
plt.gca().spines['right'].set_visible(False)
# Changing Vertical and Horizontal Ticks labels
month_ticks = [0, 30, 60, 90, 120, 150, 180, 210, 240, 270, 300, 330]
divs = [i+15 for i in month_ticks]
month_names = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
plt.xticks(divs, month_names)
temp = [str(tick/10)+str(' °C') for tick in plt.gca().get_yticks()]
plt.gca().set_yticklabels(temp);
plt.savefig('Temp_Plot.png');
# -
| .ipynb_checkpoints/Submission-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# %matplotlib inline
import pylab as plt
import numpy as np
from PIL import Image
import glob
import os
def log_progress(sequence, every=None, size=None):
from ipywidgets import IntProgress, HTML, VBox
from IPython.display import display
is_iterator = False
if size is None:
try:
size = len(sequence)
except TypeError:
is_iterator = True
if size is not None:
if every is None:
if size <= 200:
every = 1
else:
every = size / 200 # every 0.5%
else:
assert every is not None, 'sequence is iterator, set every'
if is_iterator:
progress = IntProgress(min=0, max=1, value=1)
progress.bar_style = 'info'
else:
progress = IntProgress(min=0, max=size, value=0)
label = HTML()
box = VBox(children=[label, progress])
display(box)
index = 0
try:
for index, record in enumerate(sequence, 1):
if index == 1 or index % every == 0:
if is_iterator:
label.value = '{index} / ?'.format(index=index)
else:
progress.value = index
label.value = u'{index} / {size}'.format(
index=index,
size=size
)
yield record
except:
progress.bar_style = 'danger'
raise
else:
progress.bar_style = 'success'
progress.value = index
label.value = unicode(index or '?')
out_dir = '/home/makov/diskmnt/big/yaivan/test_data/Sand/RS_4_1_zero/'
in_dir = '/home/makov/diskmnt/big/yaivan/test_data/Sand/RS_4_1/'
os.mkdir(out_dir)
in_images = glob.glob(os.path.join(in_dir,'*.tif'))
in_images
# +
for im in log_progress(in_images):
name = os.path.split(im)[-1]
# print(name)
imt=Image.open(im,'r')
data = plt.imread(im)
# print(data.dtype)
# # # plt.figure(figsize=(15,15))
# # # plt.imshow(data, interpolation='nearest', cmap=plt.cm.gray)
# # # plt.show()
for i in range(0,700):
data[:,100+i:100+i+1] = i
# data[:,1:10] = 30000
# data[:,-12:-2] = 30000
# # # plt.figure(figsize=(15,15))
# # # plt.imshow(data, interpolation='nearest', cmap=plt.cm.gray)
# # # plt.show()
new_im = Image.fromarray(data)
new_im.tag = imt.tag
new_im.save(os.path.join(out_dir, name))
# -
Image.f
| tomo/yaivan/zero.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 第4章 拡散方程式
# ## 4.4 楕円型方程式を解く:ポテンシャル方程式を例に
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# ## 初期化
# +
nmax = 400
M = 0.1
alpha2 = 1 - M ** 2
Uinf = 0.1
dx = dy = 0.05
# xs, xe = -5.0, 6.0
# ys, ye = 0.0, 5.0
xs, xe = -1.0, 2.0
ys, ye = 0.0, 1.0
x_le, x_te = 0.0, 1.0
jmax = int((xe - xs) / dx) + 1
kmax = int((ye - ys) / dy) + 1
j_le = int((x_le - xs) / dx)
j_te = int((x_te - xs) / dx) + 1
x = np.linspace(xs, xe, jmax)
y = np.linspace(ys, ye, kmax)
phi = np.zeros([jmax, kmax])
u = np.zeros([jmax, kmax])
v = np.zeros([jmax, kmax])
dydx = np.array([ 0.4 * (1.0 - 2.0 * x[j]) if j_le <= j < j_te else 0.0 for j in range(jmax)])
X, Y = np.meshgrid(x, y) # 可視化用
# -
# ## メイン
residual = np.zeros(nmax)
for n in range(nmax):
phiold = phi.copy()
# 境界条件
phi[0, :] = 0.0
phi[jmax-1, :] = 0.0
phi[:, kmax-1] = 0.0
for j in range(jmax):
phi[j, 0] = phi[j, 1] - dydx[j] * dy
# Gaus Seidel法
for k in range(1, kmax - 1):
for j in range(1, jmax - 1):
phi[j, k] = 1.0 / (2.0 * alpha2 + 2.0) * \
(alpha2 * (phi[j-1, k] + phi[j+1, k]) + phi[j, k-1] + phi[j, k+1])
residual[n]= np.sqrt(((phi-phiold) ** 2).sum() / (jmax * kmax))
# +
for j in range(1, jmax - 1):
u[j, :] = Uinf * (1.0 + (phi[j + 1, :] - phi[j - 1, :]) / (2 * dx))
u[0,:] = Uinf * (1.0 + (phi[1, :] - phi[0, :]) / dx)
u[-1,:] = Uinf * (1.0 + (phi[-1, :] - phi[-2, :]) / dx)
for k in range(1, kmax - 1):
v[:, k] = Uinf * (phi[:, k + 1] - phi[:, k - 1]) / (2 * dy)
v[:,0] = Uinf * (phi[:, 1] - phi[:, 0]) / dy
v[:,-1] = Uinf * (phi[:, -1] - phi[:, -2]) / dy
va = np.sqrt(u ** 2 + v ** 2)
# -
# ## 残差
fig, ax1 = plt.subplots(figsize=(7,4), dpi=100) # グラフのサイズ
plt.rcParams["font.size"] = 22 # グラフの文字サイズ
cnt = plt.plot(range(nmax), residual)
plt.xlabel('n')
plt.ylabel('residual')
plt.yscale("log")
plt.grid(b=True, linestyle = '--')
plt.show()
# ## 可視化
fig, ax1 = plt.subplots(figsize=(9,3), dpi=100) # グラフのサイズ
plt.rcParams["font.size"] = 22 # グラフの文字サイズ
# cnt = plt.pcolormesh(X,Y,phi.transpose(1,0), cmap="Greys", vmin = -0.22, vmax = 0.22)
cnt = plt.pcolormesh(X,Y,phi.transpose(1,0), cmap="Greys")
cb = fig.colorbar(cnt, ax=ax1)
cb.set_label('$\phi$')
plt.xlabel('x')
plt.ylabel('y')
plt.xticks([xs,0,1,xe])
plt.yticks([ys, ye])
plt.show()
fig, ax1 = plt.subplots(figsize=(9,3), dpi=100) # グラフのサイズ
plt.rcParams["font.size"] = 22 # グラフの文字サイズ
cnt = plt.pcolormesh(X,Y,u.transpose(1,0), cmap="Greys")
cb = fig.colorbar(cnt, ax=ax1)
cb.set_label('$u$')
plt.xlabel('x')
plt.ylabel('y')
plt.xticks([xs,0,1,xe])
plt.yticks([ys, ye])
plt.show()
plt.plot(phi[:,0], marker=".")
plt.grid(color='black', linestyle='dashed', linewidth=0.5)
plt.plot(v[:,0] / Uinf, marker=".")
plt.plot(dydx)
plt.grid(color='black', linestyle='dashed', linewidth=0.5)
np.min(v[:,0]), np.max(v[:,0])
plt.plot(u[:,0], marker = '.')
np.min(u[:,0]), np.max(u[:,0])
plt.grid(color='black', linestyle='dashed', linewidth=0.5)
fig, ax1 = plt.subplots(figsize=(9,3), dpi=100) # グラフのサイズ
plt.rcParams["font.size"] = 22 # グラフの文字サイズ
cnt = plt.pcolormesh(X,Y,v.transpose(1,0), cmap="Greys")
cb = fig.colorbar(cnt, ax=ax1)
cb.set_label('$v$')
plt.xlabel('x')
plt.ylabel('y')
plt.xticks([xs,0,1,xe])
plt.yticks([ys, ye])
plt.show()
fig, ax1 = plt.subplots(figsize=(9,3), dpi=100) # グラフのサイズ
plt.rcParams["font.size"] = 22 # グラフの文字サイズ
cnt = plt.contourf(X,Y,va.transpose(1,0), cmap="Greys", levels = 200)
cb = fig.colorbar(cnt, ax=ax1)
cb.set_label('$\sqrt{u^2+v^2}$')
plt.xlabel('x')
plt.ylabel('y')
plt.xticks([xs,0,1,xe])
plt.yticks([ys, ye])
plt.show()
# +
fig, ax1 = plt.subplots(figsize=(9,3), dpi=100) # グラフのサイズ
plt.rcParams["font.size"] = 22 # グラフの文字サイズ
cnt = plt.contourf(X,Y,va.transpose(1,0), cmap="Greys", levels=200)
cb = fig.colorbar(cnt, ax=ax1)
cb.set_label('$\sqrt{u^2+v^2}$')
sty = np.arange(0.02, ye, 0.05)
stx = np.full(len(sty), -1.0)
startpoints = np.array([stx, sty]).transpose(1,0)
plt.streamplot(X,Y,u.transpose(1,0), v.transpose(1,0), color = 'red', start_points=startpoints, linewidth = 0.5)
plt.xlabel('x')
plt.ylabel('y')
plt.xticks([xs,0,1,xe])
plt.show()
# -
| chapter4-4-PotentialEquation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
from scipy import misc
import os.path
#find nearest pixel
from scipy import spatial
from scipy.ndimage.filters import gaussian_filter1d
import nibabel as nb
import numpy as np
import skfmm
from matplotlib import pyplot as plt
from skimage.draw import line
from PIL import Image
Image.MAX_IMAGE_PIXELS = 472700252
#function to calculate distance along path
def calculate_path_distance(mask,seed):
"""calculate wmdistances from seed voxel to all other wm voxels"""
mask.data[seed[0],seed[1]]=0
mask.mask[seed[0],seed[1]]=False
wmdistances=skfmm.distance(mask)
return wmdistances;
#convert binary to appropriate mask
def convert2mask(wm_mask):
"""convert tissue classification into correct format for skfmm distance function"""
Base=np.ones_like(wm_mask)
mask=~wm_mask.astype(bool)
input_mask=np.ma.masked_array(Base,mask)
return input_mask;
def colour2mask(im,colour):
"""create mask from specific colour (and nearby colours)"""
bin=np.zeros(np.shape(im)[0:2])
bin[im==colour]=1
return bin;
def shrink_coordinates(coords,idx, size):
"""shrink coordinate data set for search within +/- size"""
shrink=coords[np.logical_and(coords[:,0]>=idx[0]-size,
np.logical_and(coords[:,0]<=idx[0]+size,
np.logical_and(coords[:,1]>=idx[1]-size,
coords[:,1]<=idx[1]+size)))]
return shrink;
def crop_image(image):
y,x=np.where(image!=0)
min_y = np.min(y)-500
max_y = np.max(y)+500
min_x = np.min(x)-500
max_x = np.max(x)+500
image= image[min_y:max_y,min_x:max_x]
return image, min_x, min_y
# +
#profile separation is ~ 1 pixel in 20um space
profile_separation=2
resolution=0.005
smoothing_level=5
#expansion beyong L1/white surface
expansions=[50,100,150]
layer1=1#red
layer4=5#pink
white_c=7#blue
Slices=['1582','1600','3380','4080','5431','6316']
Slices=['3380']
Regions=['1','2','7','8']
for Slice in Slices:
for Region in Regions:
if os.path.isfile('/data1/users/kwagstyl/bigbrain/Slice_Verification/new_sections_03-18/Slice_'+Slice+'/Region'+Region+'_alllines.tif'):
print('running Slice: ' + Slice+ ' Region:' + Region)
whitename='Slice_'+Slice+'/Region_'+Region+'_coordinates_white.txt'
grayname='Slice_'+Slice+'/Region_'+Region+'_coordinates_gray.txt'
im=misc.imread('/data1/users/kwagstyl/bigbrain/Slice_Verification/new_sections_03-18/Slice_'+Slice+'/Region'+Region+'_alllines.tif')
#get mask of just layer 4
#crop image due to memory problems
im,crop_x,crop_y = crop_image(im)
L4=colour2mask(im,layer4)
L4y,L4x=np.where(L4==1)
#get mask of just layer 1
L1=colour2mask(im,layer1)
dumy,dumx=np.where(L1==1)
L1coords=np.transpose(np.vstack([dumy,dumx]))
#get mask of just white
white=colour2mask(im,white_c)
dumy,dumx=np.where(white==1)
whitecoords=np.transpose(np.vstack([dumy,dumx]))
seed=[L4y[0],L4x[0]]
mask=convert2mask(L4)
distances=calculate_path_distance(mask,seed)
start_index=np.unravel_index(np.argmax(distances),np.shape(distances))
mask=convert2mask(L4)
distances=calculate_path_distance(mask,start_index)
#set masked out values to huge so that they're never sampled
distances.data[distances.mask]=100000000000
fardistance=np.max(distances)
#create vector of steps along line eg 0, 10, 20, 30,
distance_steps=np.arange(0,fardistance,profile_separation)
L1distance,L1index = spatial.KDTree(L1coords).query(start_index)
Wdistance,Windex = spatial.KDTree(whitecoords).query(start_index)
imline=im[:]
L1_exp=np.zeros([len(distance_steps),2])
White_exp=np.zeros([len(distance_steps),2])
indices=np.zeros([len(distance_steps),2])
AllNormVectors=np.zeros([len(distance_steps),2])
L1distance=np.zeros([len(distance_steps),1])
Wdistance=np.zeros([len(distance_steps),1])
idx=start_index
print("generating profile lines...")
for c,step in enumerate(distance_steps):
#shrink search area for speed
search=np.int(np.round(profile_separation*1.5))
tmp=distances.data[idx[0]-search:idx[0]+search,idx[1]-search:idx[1]+search]
#search for next start index, closest to distance step
tmpidx = np.unravel_index(np.argmin(np.abs(tmp - step)),np.shape(tmp))
#reset coordinates
idx=tmpidx+np.array([idx[0]-search,idx[1]-search])
indices[c]=idx
#shrink L1 and white coords for search
tmpL1=shrink_coordinates(L1coords,idx,700)
L1distance[c],L1index = spatial.KDTree(tmpL1).query(idx)
tmpwhite=shrink_coordinates(whitecoords,idx,700)
Wdistance[c],Windex = spatial.KDTree(tmpwhite).query(idx)
# find vector between nearest white/L1 coordinates
Vec=tmpL1[L1index]-tmpwhite[Windex]
# normalise
AllNormVectors[c]=Vec/np.linalg.norm(Vec)
perc=(float(c)/len(distance_steps))*100.0
if perc % 10==0:
print(str(perc)+'% complete')
print(tmp)
Coordinates=[[0]]
SmoothNormVectors=gaussian_filter1d(AllNormVectors,smoothing_level,axis=0)
SmoothNormVectors=SmoothNormVectors/np.linalg.norm(SmoothNormVectors,axis=1).reshape([len(SmoothNormVectors),1])
for gexpansion in expansions:
for wexpansion in expansions:
L1_exp=np.round(SmoothNormVectors*(L1distance+gexpansion)+indices).astype(int)
White_exp=np.round(SmoothNormVectors*-(Wdistance+wexpansion)+indices).astype(int)
if Coordinates[0][0] == 0:
Coordinates=np.hstack([L1_exp,White_exp])
else:
Coordinates=np.hstack([Coordinates,np.hstack([L1_exp,White_exp])])
Coordinates=np.reshape(Coordinates,(np.size(Coordinates)//4,4))
break
if crop_x:
Coordinates=Coordinates+np.array([crop_y,crop_x,crop_y,crop_x])
Slicepng='Slice_'+Slice+'/Slice'+Slice+'.mnc'
image=nb.load(Slicepng)
dum,leny,lenx=np.shape(image.get_data())
xstart=0.5*lenx*resolution
ystart=-0.5*leny*resolution
#invert y
Coordinates=np.vstack((-Coordinates[:,0],Coordinates[:,1],-Coordinates[:,2],Coordinates[:,3])).transpose()
#change step size
Coordinates=Coordinates*resolution
Coordinates=Coordinates-np.array([ystart,xstart,ystart,xstart])
# +
# %matplotlib notebook
plt.figure()
im,crop_x,crop_y = crop_image(im)
plt.imshow(distances)
Coordinates=[[0]]
for gexpansion in expansions:
for wexpansion in expansions:
L1_exp=np.round(SmoothNormVectors*(L1distance+gexpansion)+indices).astype(int)
White_exp=np.round(SmoothNormVectors*-(Wdistance+wexpansion)+indices).astype(int)
print(L1_exp[0],White_exp[0])
if Coordinates[0][0] == 0:
Coordinates=np.hstack([L1_exp,White_exp])
else:
Coordinates=np.hstack([Coordinates,np.hstack([L1_exp,White_exp])])
Coordinates=np.reshape(Coordinates,(np.size(Coordinates)//4,4))
#for profile in Coordinates:
# plt.plot([profile[1],profile[0]],[profile[3],profile[2]])
#plt.scatter(L1_exp[0,1],L1_exp[0,0])
#plt.scatter(White_exp[0,1],White_exp[0,0])
#plt.scatter(indices[0,1],indices[0,0])
#for Coordinate in Coordinates:
# plt.plot([Coordinate[1],Coordinate[3]],[Coordinate[0],Coordinate[2]])
#for index in indices:
# plt.scatter(index[1],index[0])
Coordinate=Coordinates[-1]
plt.plot([Coordinate[1],Coordinate[3]],[Coordinate[0],Coordinate[2]])
# +
print([crop_x,crop_y,crop_x,crop_y])
Coordinates=Coordinates+np.array([crop_y,crop_x,crop_y,crop_x])
plt.figure()
im=misc.imread('/data1/users/kwagstyl/bigbrain/Slice_Verification/new_sections_03-18/Slice_'+Slice+'/Region'+Region+'_alllines.tif')
#get mask of just layer 4
plt.imshow(im)
plt.plot([Coordinates[0,1],Coordinates[0,3]],[Coordinates[0,0],Coordinates[0,2]])
# -
[Coordinates[0,1],Coordinates[0,0]],[Coordinates[0,3],Coordinates[0,2]]
indices[0,1]
Coordinates=Coordinates+np.array([crop_x,crop_y,crop_x,crop_y])
[Coordinates[0,1],Coordinates[0,0]],[Coordinates[0,3],Coordinates[0,2]]
np.array([crop_x,crop_y,crop_x,crop_y])
start_index
indices
| scripts/notebooks/check_translation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
seed_ = 2
from numpy.random import seed
seed(seed_)
from tensorflow.random import set_seed
set_seed(seed_)
import tensorflow as tf, numpy as np, pandas as pd
import tensorflow_docs as tfdocs
import tensorflow_docs.plots, tensorflow_docs.modeling
from matplotlib import pyplot as plt
from sklearn.utils.class_weight import compute_sample_weight
import tensorflow.keras.backend as K
df = pd.read_excel('../../../Desktop/新冠数据/新冠数据_协和.xlsx', sheet_name='data', index_col=0)
df = df.dropna().sample(frac=1, random_state=seed_)
X = df[['lymphocyte(%)', 'Neutrophil', 'LDH', 'CRP']]
X = (X - X.mean()) / X.var()
Y = (df['Status'] == 'died').astype(int)
#Y = pd.get_dummies(df['Status'], prefix='Status')
# -
sample_weight = compute_sample_weight(y=Y, class_weight='balanced')
size_histories = {}
stopper = tf.keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=1000, verbose=1,
mode='auto', restore_best_weights=True)
for n in ['']: #
model = tf.keras.Sequential([])
for i in range(len(n)):
model.add(tf.keras.layers.Dense(2**int(n[i]), activation='relu',
kernel_initializer=tf.keras.initializers.HeUniform(seed=seed_)))
model.add(tf.keras.layers.Dense(1, activation='sigmoid'))
model.compile(optimizer='adam',
loss=tf.keras.losses.BinaryCrossentropy(from_logits=False),
metrics=['acc', tf.keras.metrics.AUC(name='auc')],
weighted_metrics=['acc'])
size_histories[n] = model.fit(X, Y, batch_size=16, epochs=500, validation_split=0.1,
sample_weight=sample_weight,
#callbacks=[stopper],
verbose=1)
loss_plotter = tfdocs.plots.HistoryPlotter(metric='loss', smoothing_std=1)
plt.figure(figsize=(12, 10), dpi=120)
loss_plotter.plot({key: value for key, value in size_histories.items() if key in size_histories.keys()})
loss_plotter = tfdocs.plots.HistoryPlotter(metric='loss', smoothing_std=1)
plt.figure(figsize=(12, 10), dpi=120)
loss_plotter.plot({key: value for key, value in size_histories.items() if key in size_histories.keys()})
#plt.ylim(0.25, 0.4)
#plt.xlim(800, 1000)
acc_plotter = tfdocs.plots.HistoryPlotter(metric='acc', smoothing_std=1)
plt.figure(figsize=(12, 10), dpi=120)
acc_plotter.plot({key: value for key, value in size_histories.items() if key in size_histories.keys()})
#plt.ylim(0.8, 1)
#plt.xlim(800, 1000)
auc_plotter = tfdocs.plots.HistoryPlotter(metric='auc', smoothing_std=1)
plt.figure(figsize=(12, 10), dpi=120)
auc_plotter.plot({key: value for key, value in size_histories.items() if key in size_histories.keys()})
#plt.ylim(0.8, 1)
#plt.xlim(800, 1000)
| explore.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # PmodTMP2 Sensor example
#
# In this example, the Pmod temperature sensor is initialized and set to log a reading every 1 second.
#
# This example requires the PmodTMP2 sensor, and assumes it is attached to PMODB.
# ### 1. Simple TMP2 read() to see current room temperature
from pynq.overlays.base import BaseOverlay
base = BaseOverlay("base.bit")
# +
from pynq.lib import Pmod_TMP2
mytmp = Pmod_TMP2(base.PMODB)
temperature = mytmp.read()
print(str(temperature) + " C")
# -
# ### 2. Starting logging temperature once every second
mytmp.start_log()
# ### 3. Try to modify temperature reading by touching the sensor
#
# The default interval between samples is 1 second. So wait for at least 10 seconds to get enough samples.
#
# During this period, try to press finger on the sensor to increase its temperature reading.
#
# Stop the logging whenever done trying to change sensor's value.
mytmp.stop_log()
log = mytmp.get_log()
# ### 5. Plot values over time
# +
# %matplotlib inline
import matplotlib.pyplot as plt
plt.plot(range(len(log)), log, 'ro')
plt.title('TMP2 Sensor log')
plt.axis([0, len(log), min(log), max(log)])
plt.show()
| boards/Pynq-Z2/base/notebooks/pmod/pmod_tmp2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# %matplotlib inline
import utils; reload(utils)
from utils import *
from vgg16 import Vgg16
vgg = Vgg16()
model = vgg.model
model.pop()
for layer in model.layers:
layer.trainable = False
model.add(Dense(2, activation='softmax'))
path = 'data/dogscats/'
path = path + 'sample/'
model_path = path + 'models/'
if not os.path.exists(model_path): os.mkdir(model_path)
batch_size = 32
# +
#tr_data = get_data(path + 'train')
#val_data = get_data(path + 'valid')
# -
batches = get_batches(path + 'train',shuffle=False,batch_size=1)
val_batches = get_batches(path + 'valid',shuffle=False,batch_size=1)
tr_classes = batches.classes
val_classes = val_batches.classes
tr_labels = onehot(tr_classes)
val_labels = onehot(val_classes)
gen = image.ImageDataGenerator()
batches = gen.flow_from_directory(path + 'train',tr_labels,batch_size=batch_size,shuffle=True)
val_batches = gen.flow_from_directory(path + 'valid',val_labels,batch_size=batch_size,shuffle=False)
tr_features =
val_features =
def fit_model(model, batches, val_batches, nb_epoch=1):
model.fit_generator(batches,samples_per_epoch = batches.N, nb_epoch=nb_epoch,
validation_data = val_batches, nb_val_samples = val_batches.N)
opt = RMSprop(lr = 0.01)
model.compile(optimizer=opt, loss='categorical_crossentropy',metrics=['accuracy'])
import gc
gc.collect()
fit_model(model,batches,val_batches)
model.summary()
| deeplearning1/nbs/Tstlesson2-2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import sys, os
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from scipy.stats import bayes_mvs as bayesest
import time
sys.path.insert(0, '../../../PyEcoLib')
from PopSimulator import PopSimulator
from simulator import Simulator
# %matplotlib inline
# -
meanbirthsize = 1 # micron
doubling_time = 18 #min
tmax = 6*doubling_time #min
sample_time = 2 #min
div_steps = 10
ncells = 200
gr = np.log(2)/doubling_time
if not os.path.exists('./data'):
os.makedirs('./data') #data path
if not os.path.exists('./figures'):
os.makedirs('./figures') #Figures path
v0=meanbirthsize*np.ones(ncells)
sim = PopSimulator(ncells=ncells,gr = gr, sb=meanbirthsize, steps = div_steps,nu=2,V0array=v0) #Initializing the simulator
start = time.time()
sim.szdyn(tmax = 6*doubling_time, sample_time = 0.1*doubling_time, FileName= "./data/data2Pop.csv", DivEventsFile="./data/DivEvents2.csv")
print('It took', np.int(time.time()-start), 'seconds.')
data1=pd.read_csv("./data/data2Pop.csv")
fig, ax = plt.subplots(1,1, figsize=(7,5))
smparr=data1.Sample.unique()
smparr.sort()
for smp in smparr:
df=data1[data1.Sample==smp]
tm,N=np.unique(df['Time'],return_counts=True)
plt.plot(tm/doubling_time,N,c="#AAAAAA")
tm,N=np.unique(data1['Time'],return_counts=True)
plt.plot(tm/doubling_time,N/ncells,lw=3,c='k')
plt.yscale('log')
#plt.ylabel("$Population$",size=20)
plt.xlabel(r"$t/\tau$",size=20)
plt.xlabel(r"$t/\tau$",size=20)
plt.ylim([0,100])
plt.xlim([0,tmax/doubling_time])
taqui=np.arange(0,(tmax+1)/doubling_time,step=1)
plt.xticks(np.array(taqui))
taqui=2**np.arange(1,7,step=1)
plt.yticks(np.array(taqui))
plt.grid()
plt.tick_params(axis='x', labelsize=15)
plt.tick_params(axis='y', labelsize=15)
for l in range(len(taqui)):
plt.text(-.5,taqui[l],str(taqui[l]),fontsize=15)
plt.text(-1,20,'Population',fontsize=20,rotation=90)
for axis in ['bottom','left']:
ax.spines[axis].set_linewidth(2)
ax.tick_params(axis='both', width=2,length=6)
for axis in ['top','right']:
ax.spines[axis].set_linewidth(0)
ax.tick_params(axis='both', width=0,length=6)
import random
def bootstrap(arr):
mnar=np.empty(1000)
for l in range(1000):
mn=np.mean(random.choices(arr,k=len(arr)))
mnar[l]=mn
mn=np.mean(mnar)
up=np.quantile(mnar,0.95)
down=np.quantile(mnar,0.05)
return(mn,0.5*(-down+up))
import random
def bootstrapCV2(arr):
mnar=np.empty(1000)
for l in range(1000):
v=random.choices(arr,k=len(arr))
mn=np.var(v)/np.mean(v)**2
mnar[l]=mn
mn=np.mean(mnar)
up=np.quantile(mnar,0.95)
down=np.quantile(mnar,0.05)
return(mn,0.5*(-down+up))
# +
data1=pd.read_csv("./data/data2Pop.csv")
timearray1=data1.Time.unique()
mnszarray=[]
cvszarray=[]
errcv2sz=[]
errmnsz=[]
for t in timearray1:
df=data1[data1.Time==t]
szs=df.Size.values.tolist()
mnszarray.append(bootstrap(szs)[0])
errmnsz.append(bootstrap(szs)[1])
cvszarray.append(bootstrapCV2(szs)[0])
errcv2sz.append(bootstrapCV2(szs)[1])
plt.plot(timearray1/doubling_time,mnszarray)
plt.fill_between(np.array(timearray1)/doubling_time,np.array(mnszarray)-np.array(errmnsz),np.array(mnszarray)+np.array(errmnsz),
alpha=1, edgecolor='#4db8ff', facecolor='#4db8ff',linewidth=0,label="SSA")
timearray2=data1.Time.unique()
mnsz2array=[]
cvszarray2=[]
errcv2sz2arr=[]
errmnsz2arr=[]
for t in timearray2:
df=data1[data1.Time==t]
mntemp=[]
vartemp=[]
meanerrmn=[]
meanerrvar=[]
smparray=df.Sample.unique()
for s in smparray:
dft=df[df.Sample==s]
if len(dft)>1:
conc=dft.Size.tolist()
mntemp.append(np.mean(conc))
vartemp.append(np.var(conc)/np.mean(conc)**2)
else:
conc=dft.Size
mntemp.append(conc.tolist()[0])
vartemp.append(0)
meanerrmn.append(0)
meanerrvar.append(0)
mnsz2array.append(bootstrap(mntemp)[0])
errmnsz2arr.append(bootstrap(mntemp)[1])
cvszarray2.append(bootstrap(vartemp)[0])
errcv2sz2arr.append(bootstrap(vartemp)[1])
# +
plt.plot(timearray2/doubling_time,mnszarray)
plt.fill_between(np.array(timearray1)/doubling_time,np.array(mnszarray)-np.array(errmnsz),np.array(mnszarray)+np.array(errmnsz),
alpha=1, edgecolor='#4db8ff', facecolor='#4db8ff',linewidth=0,label="SSA")
plt.plot(timearray2/doubling_time,mnsz2array)
plt.fill_between(np.array(timearray2)/doubling_time,np.array(mnsz2array)-np.array(errmnsz2arr),np.array(mnsz2array)+np.array(errmnsz2arr),
alpha=1, edgecolor='#FFA035', facecolor='#FFA035',linewidth=0,label="SSA")
# -
meanbirthsize = 1 # micron
doubling_time = 18 #min
tmax = 6*doubling_time #min
sample_time = 2 #min
div_steps = 10
ncells = 2000
v0=meanbirthsize*np.ones(ncells)
sim = PopSimulator(ncells=ncells,gr = gr, sb=meanbirthsize, steps = div_steps,nu=1,V0array=v0) #Initializing the simulator
start = time.time()
sim.szdyn(tmax = 6*doubling_time, sample_time = 0.1*doubling_time, FileName= "./data/dataPopMM.csv", DivEventsFile="./data/DivEventsMM.csv")
print('It took', np.int(time.time()-start), 'seconds.')
# +
data1=pd.read_csv("./data/dataPopMM.csv")
timearrayMM=data1.Time.unique()
mnszarrayMM=[]
cvszarrayMM=[]
errcv2szMM=[]
errmnszMM=[]
for t in timearrayMM:
df=data1[data1.Time==t]
szs=df.Size.tolist()
mnszarrayMM.append(bootstrap(szs)[0])
errmnszMM.append(bootstrap(szs)[1])
cvszarrayMM.append(bootstrapCV2(szs)[0])
errcv2szMM.append(bootstrapCV2(szs)[1])
# +
fig, ax = plt.subplots(1,2, figsize=(15,5))
ax[0].plot(timearray2/doubling_time,mnszarrayMM,lw=3,label="Single Lineage")
ax[0].fill_between(np.array(timearrayMM)/doubling_time,np.array(mnszarrayMM)-np.array(errmnszMM),np.array(mnszarrayMM)+np.array(errmnszMM),
alpha=1, edgecolor='#4db8ff', facecolor='#4db8ff',linewidth=0,label='_nolegend_')
ax[0].plot(timearray2/doubling_time,mnszarray,lw=3,label='Population Snapshots (PBE)')
ax[0].fill_between(np.array(timearray1)/doubling_time,np.array(mnszarray)-np.array(errmnsz),np.array(mnszarray)+np.array(errmnsz),
alpha=1, edgecolor='#FFA035', facecolor='#FFA035',linewidth=0,label='_nolegend_')
ax[0].plot(timearray2/doubling_time,mnsz2array,lw=3,label='Lineage Tracking')
ax[0].fill_between(np.array(timearray2)/doubling_time,np.array(mnsz2array)-np.array(errmnsz2arr),np.array(mnsz2array)+np.array(errmnsz2arr),
alpha=1, edgecolor='#47D200', facecolor='#47D200',linewidth=0,label='_nolegend_')
ax[1].plot(timearray2/doubling_time, cvszarrayMM,lw=3)
ax[1].fill_between(np.array(timearray2)/doubling_time,np.array(cvszarrayMM)-np.array(errcv2szMM),np.array(cvszarrayMM)+np.array(errcv2szMM),
alpha=1, edgecolor='#4db8ff', facecolor='#4db8ff',linewidth=0)
ax[1].plot(timearray2/doubling_time, cvszarray,lw=3)
ax[1].fill_between(np.array(timearray2)/doubling_time,np.array(cvszarray)-np.array(errcv2sz),np.array(cvszarray)+np.array(errcv2sz),
alpha=1, edgecolor='#FFA035', facecolor='#FFA035',linewidth=0)
ax[1].plot(timearray2/doubling_time, cvszarray2,lw=3)
ax[1].fill_between(np.array(timearray2)/doubling_time,np.array(cvszarray2)-np.array(errcv2sz2arr),np.array(cvszarray2)+np.array(errcv2sz2arr),
alpha=1, edgecolor='#47D200', facecolor='#47D200',linewidth=0)
ax[0].set_ylabel("$s$ $(\mu m)$",size=20)
ax[1].set_ylabel("$C_V^2(s)$",size=20)
ax[0].set_xlabel(r"$t/\tau$",size=20)
ax[1].set_xlabel(r"$t/\tau$",size=20)
ax[0].legend(fontsize=15)
#ax[0].set_ylim([1,1.7])
#ax[1].set_ylim([0,0.15])
for l in [0,1]:
ax[l].set_xlim([0,6])
taqui=np.arange(0,6.5,step=1)
ax[l].set_xticks(np.array(taqui))
ax[l].grid()
ax[l].tick_params(axis='x', labelsize=15)
ax[l].tick_params(axis='y', labelsize=15)
for axis in ['bottom','left']:
ax[l].spines[axis].set_linewidth(2)
ax[l].tick_params(axis='both', width=2,length=6)
for axis in ['top','right']:
ax[l].spines[axis].set_linewidth(0)
ax[l].tick_params(axis='both', width=0,length=6)
plt.subplots_adjust(hspace=0.3,wspace=0.3)
plt.savefig('./figures/size_population.eps',bbox_inches='tight')
plt.savefig('./figures/size_population.svg',bbox_inches='tight')
# -
data=pd.read_csv("./data/DivEvents2.csv")
mean_size=1.44*meanbirthsize
sampling_time = sample_time
rprom = 10 # RNA mean concentration (RNA/um)
pprom = 100 # prot mean concentration (RNA/um)
gammar = 5*gr # RNA Active degradation rate
kr = rprom*(gr+gammar) # RNA transcription rate
kp = pprom*gr/rprom # Protein translation rate
class Bacteria:
def __init__(self, idx, V0, num_steps, gr, divpar, k,rna,prot):
self.dp = divpar
self.gr = gr
self.idx = idx
self.popidx = idx
self.V = V0
self.Vb = V0
self.Vd = V0
self.ndiv = 0
self.rna = rna
self.prot = prot
self.k = k
self.nextrb=0
self.nextrd=0
self.nextp=0
def CalcTimeEvent(self,Nr,Np,s):#estimating reaction times from propensity function
if Nr>0 and Np>0:
self.nextrb=(1/gr)*np.log(1-(gr/(kr*s))*np.log(np.random.rand())) #time to thenext rna creation
self.nextrd=-np.log(np.random.rand())/(gammar*Nr)
self.nextp=-np.log(np.random.rand())/(kp*Nr)
elif Nr<=0:
self.nextrb=(1/gr)*np.log(1-(gr/(kr*s))*np.log(np.random.rand())) #time to thenext rna creation
self.nextrd=1000000
self.nextp=100000000
# +
mnR=5
mnP=50
pop = []
df= data[data.BirthTime==0]
for m in range(len(df)):
gr=df.iloc[m].GrowthRate
sz=df.iloc[m].Sb
dp=df.iloc[m].DivPar
rna=mnR
prot=mnP
bct = Bacteria(idx=m, V0=sz, num_steps=div_steps, gr=gr, divpar=dp, k = gr,rna=rna,prot=prot)
bct.CalcTimeEvent(rna,prot,sz)
pop.append(bct)
#-------------------------------------------------------------------------------
t=0
tmax=6*doubling_time
data=data[data.BirthTime>0]
m=0
m=0
sample_time=0.1*doubling_time
tref=sample_time
GeneFile=open("./data/PopGene.csv","w")
output="Time,Sample,Cell,Nr,Np,Size\n"
nextdiv=data.iloc[m].BirthTime
popidx=data.iloc[m].Sample
idx=data.iloc[m].Cell
idx2=data.iloc[m+1].Cell
times=pd.DataFrame([[popidx,idx,nextdiv,0]],columns=['Sample','Cell','time','Type'])
for cell in pop:
output+="0.00,"+str(cell.popidx)+","+str(cell.idx)+','+str(cell.rna)+','+str(cell.prot)+','+str(cell.V)+"\n"
times=pd.concat([times,pd.DataFrame([[cell.popidx,cell.idx,cell.nextrb,1],\
[cell.popidx,cell.idx,cell.nextrd,2],\
[cell.popidx,cell.idx,cell.nextp,3]],columns=['Sample','Cell','time','Type'])])
times.reset_index(drop=True)
GeneFile.write(output)
# +
while m<len(data)-2 and t>=0:
#print(t)
nextt=np.min(times.time.tolist())
if tref<nextt:
dt=tref-t
output=''
for cell in pop:
cell.V=cell.V*np.exp(cell.gr*dt)
output+=str(tref)+','+str(int(cell.popidx))+','+str(int(cell.idx))+','+str(cell.rna)+','+str(cell.prot)+','+str(cell.V)+'\n'
GeneFile.write(output)
t=tref
tref+=sample_time
print(t)
else:
dt=nextt-t
for cell in pop:
cell.V=cell.V*np.exp(cell.gr*dt)
t=nextt
loc=np.argmin(times.time.tolist())
if times.iloc[loc].Type==0:
df=data.iloc[m]
idx=int(data.iloc[m].Mother)
cell=pop[idx]
cell.V=df.Sb
Nr=cell.rna
Np=cell.prot
newnr=np.random.binomial(Nr,df.Sb/df.MotherSize)
newnp=np.random.binomial(Np,df.Sb/df.MotherSize)
cell.rna=newnr
cell.prot=newnp
cell.CalcTimeEvent(newnr,newnp,cell.V)
cell.gr=df.GrowthRate
df=data.iloc[m+1]#The sister cell
bct = Bacteria(idx=df.Cell, V0=df.Sb, num_steps=div_steps, gr=df.GrowthRate, divpar=df.DivPar, k = df.GrowthRate,\
rna=Nr-newnr,prot=Np-newnp)
bct.popidx=cell.popidx
bct.CalcTimeEvent(Nr-newnr,Np-newnp,cell.V)
pop.append(bct)
times=pd.concat([times,pd.DataFrame([[bct.popidx,bct.idx,bct.nextrb+t,1],\
[bct.popidx,bct.idx,bct.nextrd+t,2],\
[bct.popidx,bct.idx,bct.nextp+t,3]],columns=['Sample','Cell','time','Type'])])
times.reset_index(drop=True)
times.iloc[0]=[data.iloc[m+2].Sample,data.iloc[m+2].Cell,data.iloc[m+2].BirthTime,0]
m+=2
else:
idx=int(times.iloc[loc].Cell)
cell=pop[idx]
if times.iloc[loc].Type==1:
cell.rna+=1
elif times.iloc[loc].Type==2:
cell.rna-=1
else:
cell.prot+=1
cell.CalcTimeEvent(cell.rna,cell.prot,cell.V)
if len(times[times.Cell==idx])==3:
times.loc[times.Cell==idx,'time']=[cell.nextrb+t,cell.nextrd+t,cell.nextp+t]
else:
times.loc[times.Cell==idx,'time']=[times.iloc[0].time,cell.nextrb+t,cell.nextrd+t,cell.nextp+t]
GeneFile.close()
# +
data1=pd.read_csv("./data/PopGene.csv")
timearray=data1.Time.unique()
mnprotarray=[]
cvprotarray=[]
errcv2prot=[]
errmnprot=[]
for t in timearray:
df=data1[data1.Time==t]
szs=np.array(df.Np/df.Size)
mean_cntr, var_cntr, std_cntr = bayesest(szs,alpha=0.95)
mnprotarray.append(bootstrap(szs)[0])
errmnprot.append(bootstrap(szs)[1])
cvprotarray.append(bootstrapCV2(szs)[0])
#errv=(var_cntr[1][1]-var_cntr[0])/mean_cntr[0]**2+2*(mean_cntr[1][1]-mean_cntr[0])*var_cntr[0]/mean_cntr[0]**3
errcv2prot.append(bootstrapCV2(szs)[1])
plt.plot(timearray/doubling_time,mnprotarray)
plt.fill_between(np.array(timearray)/doubling_time,np.array(mnprotarray)-np.array(errmnprot),np.array(mnprotarray)+np.array(errmnprot),
alpha=1, edgecolor='#4db8ff', facecolor='#4db8ff',linewidth=0,label="SSA")
# +
timearray=data1.Time.unique()
mnprotarray2=[]
cvprotarray2=[]
errcv2protarr2=[]
errmnprotarr2=[]
for t in timearray:
df=data1[data1.Time==t]
mntemp=[]
meanerrmn=[]
vartemp=[]
meanerrvar=[]
smparray=df.Sample.unique()
for s in smparray:
dft=df[df.Sample==s]
if len(dft)>1:
conc=np.array(dft.Np/dft.Size)
mntemp.append(np.mean(conc))
vartemp.append(np.var(conc)/np.mean(conc)**2)
else:
conc=dft.Np/dft.Size
mntemp.append(conc.tolist()[0])
vartemp.append(0)
mnprotarray2.append(bootstrap(mntemp)[0])
#mean_cntr, var_cntr, std_cntr = bayesest(mntemp,alpha=0.95)
errmnprotarr2.append(bootstrap(mntemp)[1])
cvprotarray2.append(bootstrap(vartemp)[0])
#mean_cntr, var_cntr, std_cntr = bayesest(vartemp,alpha=0.95)
errcv2protarr2.append(bootstrap(vartemp)[1])
plt.plot(timearray/doubling_time,mnprotarray)
#plt.fill_between(np.array(timearray)/doubling_time,np.array(mnprotarray)-np.array(errmnprot),np.array(mnprotarray)+np.array(errmnprot),
# alpha=1, edgecolor='#4db8ff', facecolor='#4db8ff',linewidth=0,label="SSA")
plt.plot(timearray/doubling_time,mnprotarray2)
#plt.fill_between(np.array(timearray)/doubling_time,np.array(mnprotarray2)-np.array(errmnprotarr2),np.array(mnprotarray2)+np.array(errmnprotarr2),
# alpha=1, edgecolor='#4db8ff', facecolor='#4db8ff',linewidth=0,label="SSA")
# -
timearrayMM= np.concatenate([timearrayMM,[108]])
data=pd.read_csv("./data/DivEventsMM.csv")
mean_size=1.44*meanbirthsize
sampling_time = sample_time
rprom = 10 # RNA mean concentration (RNA/um)
pprom = 100 # prot mean concentration (RNA/um)
gammar = 5*gr # RNA Active degradation rate
kr = rprom*(gr+gammar) # RNA transcription rate
kp = pprom*gr/rprom # Protein translation rate
# +
mnR=5
mnP=50
pop = []
df= data[data.BirthTime==0]
for m in range(len(df)):
gr=df.iloc[m].GrowthRate
sz=df.iloc[m].Sb
dp=df.iloc[m].DivPar
rna=mnR
prot=mnP
bct = Bacteria(idx=m, V0=sz, num_steps=div_steps, gr=gr, divpar=dp, k = gr,rna=rna,prot=prot)
bct.CalcTimeEvent(rna,prot,sz)
pop.append(bct)
#-------------------------------------------------------------------------------
t=0
tmax=6*doubling_time
data=data[data.BirthTime>0]
m=0
m=0
sample_time=0.1*doubling_time
tref=sample_time
GeneFile=open("./data/MMGene.csv","w")
output="Time,Sample,Cell,Nr,Np,Size\n"
nextdiv=data.iloc[m].BirthTime
popidx=data.iloc[m].Sample
idx=data.iloc[m].Cell
idx2=data.iloc[m+1].Cell
times=pd.DataFrame([[popidx,idx,nextdiv,0]],columns=['Sample','Cell','time','Type'])
for cell in pop:
output+="0.00,"+str(cell.popidx)+","+str(cell.idx)+','+str(cell.rna)+','+str(cell.prot)+','+str(cell.V)+"\n"
times=pd.concat([times,pd.DataFrame([[cell.popidx,cell.idx,cell.nextrb,1],\
[cell.popidx,cell.idx,cell.nextrd,2],\
[cell.popidx,cell.idx,cell.nextp,3]],columns=['Sample','Cell','time','Type'])])
times.reset_index(drop=True)
GeneFile.write(output)
# +
while m<len(data)-1 and t>=0:
#print(t)
nextt=np.min(times.time.tolist())
if tref<nextt:
dt=tref-t
output=''
for cell in pop:
cell.V=cell.V*np.exp(cell.gr*dt)
output+=str(tref)+','+str(int(cell.popidx))+','+str(int(cell.idx))+','+str(cell.rna)+','+str(cell.prot)+','+str(cell.V)+'\n'
GeneFile.write(output)
t=tref
tref+=sample_time
print(t)
else:
dt=nextt-t
for cell in pop:
cell.V=cell.V*np.exp(cell.gr*dt)
t=nextt
loc=np.argmin(times.time.tolist())
if times.iloc[loc].Type==0:
df=data.iloc[m]
idx=int(data.iloc[m].Mother)
cell=pop[idx]
cell.V=df.Sb
Nr=cell.rna
Np=cell.prot
newnr=np.random.binomial(Nr,df.Sb/df.MotherSize)
newnp=np.random.binomial(Np,df.Sb/df.MotherSize)
cell.rna=newnr
cell.prot=newnp
cell.CalcTimeEvent(newnr,newnp,cell.V)
cell.gr=df.GrowthRate
m+=1
times.iloc[0]=[data.iloc[m].Sample,data.iloc[m].Cell,data.iloc[m].BirthTime,0]
else:
idx=int(times.iloc[loc].Cell)
cell=pop[idx]
if times.iloc[loc].Type==1:
cell.rna+=1
elif times.iloc[loc].Type==2:
cell.rna-=1
else:
cell.prot+=1
cell.CalcTimeEvent(cell.rna,cell.prot,cell.V)
if len(times[times.Cell==idx])==3:
times.loc[times.Cell==idx,'time']=[cell.nextrb+t,cell.nextrd+t,cell.nextp+t]
else:
times.loc[times.Cell==idx,'time']=[times.iloc[0].time,cell.nextrb+t,cell.nextrd+t,cell.nextp+t]
GeneFile.close()
# +
data1=pd.read_csv("./data/MMGene.csv")
timearrayMM=data1.Time.unique()
mnprotarrayMM=[]
cvprotarrayMM=[]
errcv2protMM=[]
errmnprotMM=[]
for t in timearrayMM:
df=data1[data1.Time==t]
prots=np.array(df.Np/df.Size)
mnprotarrayMM.append(np.mean(prots))
errmnprotMM.append(bootstrap(prots)[1])
cvprotarrayMM.append(bootstrapCV2(prots)[0])
#errv=(var_cntr[1][1]-var_cntr[0])/mean_cntr[0]**2+2*(mean_cntr[1][1]-mean_cntr[0])*var_cntr[0]/mean_cntr[0]**3
errcv2protMM.append(bootstrapCV2(prots)[1])
# +
ax[0].plot(timearray2/doubling_time,mnszarrayMM,lw=3,label="Single Lineage")
ax[0].fill_between(np.array(timearrayMM)/doubling_time,np.array(mnszarrayMM)-np.array(errmnszMM),np.array(mnszarrayMM)+np.array(errmnszMM),
alpha=1, edgecolor='#4db8ff', facecolor='#4db8ff',linewidth=0,label='_nolegend_')
ax[0].plot(timearray2/doubling_time,mnszarray,lw=3,label='Population Snapshots (PBE)')
ax[0].fill_between(np.array(timearray1)/doubling_time,np.array(mnszarray)-np.array(errmnsz),np.array(mnszarray)+np.array(errmnsz),
alpha=1, edgecolor='#FFA035', facecolor='#FFA035',linewidth=0,label='_nolegend_')
ax[0].plot(timearray2/doubling_time,mnsz2array,lw=3,label='Lineage Tracking')
ax[0].fill_between(np.array(timearray2)/doubling_time,np.array(mnsz2array)-np.array(errmnsz2arr),np.array(mnsz2array)+np.array(errmnsz2arr),
alpha=1, edgecolor='#47D200', facecolor='#47D200',linewidth=0,label='_nolegend_')
ax[1].plot(timearray2/doubling_time, cvszarrayMM,lw=3)
ax[1].fill_between(np.array(timearray2)/doubling_time,np.array(cvszarrayMM)-np.array(errcv2szMM),np.array(cvszarrayMM)+np.array(errcv2szMM),
alpha=1, edgecolor='#4db8ff', facecolor='#4db8ff',linewidth=0)
ax[1].plot(timearray2/doubling_time, cvszarray,lw=3)
ax[1].fill_between(np.array(timearray2)/doubling_time,np.array(cvszarray)-np.array(errcv2sz),np.array(cvszarray)+np.array(errcv2sz),
alpha=1, edgecolor='#FFA035', facecolor='#FFA035',linewidth=0)
ax[1].plot(timearray2/doubling_time, cvszarray2,lw=3)
ax[1].fill_between(np.array(timearray2)/doubling_time,np.array(cvszarray2)-np.array(errcv2sz2arr),np.array(cvszarray2)+np.array(errcv2sz2arr),
alpha=1, edgecolor='#47D200', facecolor='#47D200',linewidth=0)
# +
fig, ax = plt.subplots(1,2, figsize=(15,5))
ax[0].plot(timearrayMM/doubling_time,mnprotarrayMM,lw=3,label="Single Lineage")
ax[0].fill_between(np.array(timearrayMM)/doubling_time,np.array(mnprotarrayMM)-np.array(errmnprotMM),np.array(mnprotarrayMM)+np.array(errmnprotMM),
alpha=1, edgecolor='#4db8ff', facecolor='#4db8ff',linewidth=0,label='_nolegend_')
ax[0].plot(timearrayMM/doubling_time,mnprotarray,lw=3,label="Population Snapshots")
ax[0].fill_between(np.array(timearrayMM)/doubling_time,np.array(mnprotarray)-np.array(errmnprot),np.array(mnprotarray)+np.array(errmnprot),
alpha=1, edgecolor='#FFA035', facecolor='#FFA035',linewidth=0,label='_nolegend_')
ax[0].plot(timearrayMM/doubling_time,mnprotarray2,lw=3,label="Lineage Tracking")
ax[0].fill_between(np.array(timearrayMM)/doubling_time,np.array(mnprotarray2)-np.array(errmnprotarr2),np.array(mnprotarray2)+np.array(errmnprotarr2),
alpha=1, edgecolor='#47D200', facecolor='#47D200',linewidth=0,label='_nolegend_')
ax[1].plot(timearrayMM/doubling_time,cvprotarrayMM,lw=3)
#ax[1].plot(timearrayMM/doubling_time,cvprotarray,lw=3)
ax[1].fill_between(np.array(timearray)/doubling_time,np.array(cvprotarray)-np.array(errcv2prot),np.array(cvprotarray)+np.array(errcv2prot),
alpha=1, edgecolor='#FFA035', facecolor='#FFA035',linewidth=0,label="SSA")
ax[1].fill_between(np.array(timearrayMM)/doubling_time,np.array(cvprotarrayMM)-np.array(errcv2protMM),np.array(cvprotarrayMM)+np.array(errcv2protMM),
alpha=1, edgecolor='#4db8ff', facecolor='#4db8ff',linewidth=0,label="SSA")
ax[1].plot(timearray/doubling_time,cvprotarray2,lw=3,c="#319000")
ax[1].fill_between(np.array(timearray)/doubling_time,np.array(cvprotarray2)-np.array(errcv2protarr2),np.array(cvprotarray2)+np.array(errcv2protarr2),
alpha=1, edgecolor='#47D200', facecolor='#47D200',linewidth=0,label="SSA")
ax[0].legend(fontsize=15)
ax[0].set_ylabel("$p$",size=20)
ax[1].set_ylabel("$C_V^2(p)$",size=20)
ax[0].set_xlabel(r"$t/\tau$",size=20)
ax[1].set_xlabel(r"$t/\tau$",size=20)
#ax[0].set_ylim([1,1.7])
#ax[1].set_ylim([0,0.15])
for l in [0,1]:
ax[l].set_xlim([0,6])
taqui=np.arange(0,6.5,step=1)
ax[l].set_xticks(np.array(taqui))
ax[l].grid()
ax[l].tick_params(axis='x', labelsize=15)
ax[l].tick_params(axis='y', labelsize=15)
for axis in ['bottom','left']:
ax[l].spines[axis].set_linewidth(2)
ax[l].tick_params(axis='both', width=2,length=6)
for axis in ['top','right']:
ax[l].spines[axis].set_linewidth(0)
ax[l].tick_params(axis='both', width=0,length=6)
plt.subplots_adjust(hspace=0.3,wspace=0.3)
plt.savefig('./figures/Gene_population.eps',bbox_inches='tight')
plt.savefig('./figures/Gene_population.svg',bbox_inches='tight')
# +
data1=pd.read_csv("./data/PopGene.csv")
timearray=data1.Time.unique()
mnrnaarray=[]
cvrnaarray=[]
errcv2rna=[]
errmnrna=[]
for t in timearray:
df=data1[data1.Time==t]
szs=np.array(df.Nr/df.Size)
mnrnaarray.append(bootstrap(szs)[0])
errmnrna.append(bootstrap(szs)[1])
cvrnaarray.append(bootstrapCV2(szs)[0])
errcv2rna.append(bootstrapCV2(szs)[1])
plt.plot(timearray/doubling_time,mnrnaarray)
plt.fill_between(np.array(timearray)/doubling_time,np.array(mnrnaarray)-np.array(errmnrna),np.array(mnrnaarray)+np.array(errmnrna),
alpha=1, edgecolor='#4db8ff', facecolor='#4db8ff',linewidth=0,label="SSA")
# +
timearray=data1.Time.unique()
mnrnaarray2=[]
cvrnaarray2=[]
errcv2rnaarr2=[]
errmnrnaarr2=[]
for t in timearray:
df=data1[data1.Time==t]
mntemp=[]
meanerrmn=[]
vartemp=[]
meanerrvar=[]
smparray=df.Sample.unique()
for s in smparray:
dft=df[df.Sample==s]
if len(dft)>1:
conc=np.array(dft.Nr/dft.Size)
mntemp.append(np.mean(conc))
vartemp.append(np.var(conc)/np.mean(conc)**2)
else:
conc=dft.Nr/dft.Size
mntemp.append(conc.tolist()[0])
vartemp.append(0)
mnrnaarray2.append(bootstrap(mntemp)[0])
#mean_cntr, var_cntr, std_cntr = bayesest(mntemp,alpha=0.95)
errmnrnaarr2.append(bootstrap(mntemp)[1])
cvrnaarray2.append(bootstrap(vartemp)[0])
#mean_cntr, var_cntr, std_cntr = bayesest(vartemp,alpha=0.95)
errcv2rnaarr2.append(bootstrap(vartemp)[1])
plt.plot(timearray/doubling_time,mnrnaarray)
#plt.fill_between(np.array(timearray)/doubling_time,np.array(mnrnaarray)-np.array(errmnrna),np.array(mnrnaarray)+np.array(errmnrna),
# alpha=1, edgecolor='#4db8ff', facecolor='#4db8ff',linewidth=0,label="SSA")
plt.plot(timearray/doubling_time,mnrnaarray2)
#plt.fill_between(np.array(timearray)/doubling_time,np.array(mnrnaarray2)-np.array(errmnrnaarr2),np.array(mnrnaarray2)+np.array(errmnrnaarr2),
# alpha=1, edgecolor='#4db8ff', facecolor='#4db8ff',linewidth=0,label="SSA")
# +
fig, ax = plt.subplots(1,2, figsize=(15,5))
data1=pd.read_csv("./data/MMGene.csv")
timearrayMM=data1.Time.unique()
mnrnaarrayMM=[]
cvrnaarrayMM=[]
errcv2rnaMM=[]
errmnrnaMM=[]
for t in timearrayMM:
df=data1[data1.Time==t]
rnas=np.array(df.Nr/df.Size)
mean_cntr, var_cntr, std_cntr = bayesest(rnas,alpha=0.95)
mnrnaarrayMM.append(bootstrap(rnas)[0])
errmnrnaMM.append(bootstrap(rnas)[1])
cvrnaarrayMM.append(bootstrapCV2(rnas)[0])
#errv=(var_cntr[1][1]-var_cntr[0])/mean_cntr[0]**2+2*(mean_cntr[1][1]-mean_cntr[0])*var_cntr[0]/mean_cntr[0]**3
errcv2rnaMM.append(bootstrapCV2(rnas)[1])
ax[0].plot(timearrayMM/doubling_time,mnrnaarrayMM,lw=3,label="Single Lineage")
#ax[0].fill_between(np.array(timearrayMM)/doubling_time,np.array(mnrnaarrayMM)-np.array(errmnrnaMM),np.array(mnrnaarrayMM)+np.array(errmnrnaMM),
# alpha=1, edgecolor='#4db8ff', facecolor='#4db8ff',linewidth=0,label="SSA")
ax[0].plot(timearrayMM/doubling_time,mnrnaarray,lw=3,label="Population Snapshots")
#ax[0].fill_between(np.array(timearray1)/doubling_time,np.array(mnrnaarray)-np.array(errmnrna),np.array(mnrnaarray)+np.array(errmnrna),
# alpha=1, edgecolor='#4db8ff', facecolor='#4db8ff',linewidth=0,label="SSA")
ax[0].plot(timearrayMM/doubling_time,mnrnaarray2,lw=3,label="Mean Population")
#ax[0].fill_between(np.array(timearray1)/doubling_time,np.array(mnrnaarray2)-np.array(errmnrnaarr2),np.array(mnrnaarray2)+np.array(errmnrnaarr2),
# alpha=1, edgecolor='#4db8ff', facecolor='#4db8ff',linewidth=0,label="SSA")
ax[1].plot(timearrayMM/doubling_time,cvrnaarray,lw=3)
ax[1].fill_between(np.array(timearray)/doubling_time,np.array(cvrnaarray)-np.array(errcv2rna),np.array(cvrnaarray)+np.array(errcv2rna),
alpha=1, edgecolor='#FF6800', facecolor='#FF6800',linewidth=0,label="SSA")
ax[1].plot(timearrayMM/doubling_time,cvrnaarrayMM,lw=3)
ax[1].fill_between(np.array(timearrayMM)/doubling_time,np.array(cvrnaarrayMM)-np.array(errcv2rnaMM),np.array(cvrnaarrayMM)+np.array(errcv2rnaMM),
alpha=1, edgecolor='#4db8ff', facecolor='#4db8ff',linewidth=0,label="SSA")
ax[1].plot(timearray/doubling_time,cvrnaarray2,lw=3)
ax[1].fill_between(np.array(timearray)/doubling_time,np.array(cvrnaarray2)-np.array(errcv2rnaarr2),np.array(cvrnaarray2)+np.array(errcv2rnaarr2),
alpha=1, edgecolor='#00C917', facecolor='#00C917',linewidth=0,label="SSA")
ax[0].legend(fontsize=15)
ax[0].set_ylabel("$p$",size=20)
ax[1].set_ylabel("$C_V^2(p)$",size=20)
ax[0].set_xlabel(r"$t/\tau$",size=20)
ax[1].set_xlabel(r"$t/\tau$",size=20)
#ax[0].set_ylim([1,1.7])
#ax[1].set_ylim([0,0.15])
for l in [0,1]:
ax[l].set_xlim([0,6])
taqui=np.arange(0,6.5,step=1)
ax[l].set_xticks(np.array(taqui))
ax[l].grid()
ax[l].tick_params(axis='x', labelsize=15)
ax[l].tick_params(axis='y', labelsize=15)
for axis in ['bottom','left']:
ax[l].spines[axis].set_linewidth(2)
ax[l].tick_params(axis='both', width=2,length=6)
for axis in ['top','right']:
ax[l].spines[axis].set_linewidth(0)
ax[l].tick_params(axis='both', width=0,length=6)
plt.subplots_adjust(hspace=0.3,wspace=0.3)
# -
GeneFile=open("./data/GeneStatistics.csv","w")
output="Time,Type,MeanSz,ErrorMeanSz,CV2Sz,ErrorCV2Sz,MeanProt,ErrorMeanProt,CV2Prot,ErrorCV2Prot,MeanRNA,ErrorMeanRNA,CV2RNA,ErrorCV2RNA\n"
for l in range(len(timearrayMM)):
output+=str(timearrayMM[l])+',Single_Lineage,'+str(mnszarrayMM[l])+','+str(errmnszMM[l])+','+str(cvszarrayMM[l])\
+','+str(errcv2szMM[l])+','+str(mnprotarrayMM[l])+','+str(errmnprotMM[l])+','+str(cvprotarrayMM[l])+','+str(errcv2protMM[l])\
+','+str(mnrnaarrayMM[l])+','+str(errmnrnaMM[l])+','+str(cvrnaarrayMM[l])+','+str(errcv2rnaMM[l])+'\n'
for l in range(len(timearray)):
output+=str(timearray[l])+',Population_Snapshots,'+str(mnszarray[l])+','+str(errmnsz[l])+','+str(cvszarray[l])\
+','+str(errcv2sz[l])+','+str(mnprotarray[l])+','+str(errmnprot[l])+','+str(cvprotarray[l])+','+str(errcv2prot[l])\
+','+str(mnrnaarray[l])+','+str(errmnrna[l])+','+str(cvrnaarray[l])+','+str(errcv2rna[l])+'\n'
for l in range(len(timearray)):
output+=str(timearray[l])+',Lineage_Tracking,'+str(mnsz2array[l])+','+str(errmnsz2arr[l])+','+str(cvszarray2[l])\
+','+str(errcv2sz2arr[l])+','+str(mnprotarray2[l])+','+str(errmnprotarr2[l])+','+str(cvprotarray2[l])+','+str(errcv2protarr2[l])\
+','+str(mnrnaarray2[l])+','+str(errmnrnaarr2[l])+','+str(cvrnaarray2[l])+','+str(errcv2rnaarr2[l])+'\n'
GeneFile.write(output)
GeneFile.close()
| development/examples/PopSimulator/GeneExpression/.ipynb_checkpoints/PopSizeGene-SingleStep-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import urllib.request
sonnetsUrl = "http://www.gutenberg.org/cache/epub/1041/pg1041.txt"
sonnetsString = urllib.request.urlopen(sonnetsUrl).read().decode()
import re, os
filteredSonnetsStart = sonnetsString.find(" I\r\n") # title of first sonnet
filteredSonnetsEnd = sonnetsString.find("End of Project Gutenberg's") # end of sonnets
filteredSonnetsString = sonnetsString[filteredSonnetsStart:filteredSonnetsEnd].rstrip()
sonnetsList = re.split(" [A-Z]+\r\n\r\n", filteredSonnetsString)
sonnetsPath = 'sonnets' # this subdirectory will be relative to the current notebook
if not os.path.exists(sonnetsPath):
os.makedirs(sonnetsPath)
for index, sonnet in enumerate(sonnetsList): # loop through our list as enumeration to get index
if len(sonnet.strip()) > 0: # make sure we have text, not empty after stripping out whitespace
filename = str(index).zfill(3)+".txt" # create filename from index
pathname = os.path.join(sonnetsPath, filename) # directory name and filenamee
f = open(pathname, "w")
f.write(sonnet.rstrip()) # write out our sonnet into the file
f.close()
from nltk.corpus import PlaintextCorpusReader
sonnetsCorpus = PlaintextCorpusReader("sonnets", ".*\.txt")
print(len(sonnetsCorpus.fileids()))
tokens = [sonnetsCorpus.words(fileid) for fileid in sonnetsCorpus.fileids()]
type(tokens)
print(tokens)
def multiply(left, right=1):
return left * right
multiply(5)
multiply(5, 5)
# +
import nltk
def get_lists_of_words(corpus, **kwargs): # the ** in front of kwargs does the magic of keyword arguments
documents = [] # list of documents where each document is a list of words
for fileid in corpus.fileids(): # go trough each file in our corpus
# keep only words and convert them to lowercase
words = [token.lower() for token in corpus.words(fileid) if token[0].isalpha()]
# look for "minLength" in our keyword arguments and if it's defined, filter our list
if "minLen" in kwargs and kwargs["minLen"]:
words = [word for word in words if len(word) >= kwargs["minLen"]]
# look for "stopwords" in our keyword arguments and if any are defined, filter our list
if "stopwords" in kwargs and kwargs["stopwords"]:
words = [word for word in words if word not in kwargs["stopwords"]]
# look for "pos" in our keyword arguments and if any are defined, filter our list
if "pos" in kwargs and kwargs["pos"]:
tagged = nltk.pos_tag(words)
words = [word for word, pos in tagged if pos in kwargs["pos"]]
documents.append(words) # add our list of words
return documents # return our list of documents
# -
get_lists_of_words(sonnetsCorpus)
# +
sonnetsStopwords = nltk.corpus.stopwords.words('english') # load the default stopword list
sonnetsStopwords += ["thee", "thou", "thy"] # append a few more obvious words
sonnetsWords = get_lists_of_words(sonnetsCorpus, stopwords=sonnetsStopwords, minLen=3)
# have a peek:
for i in range(0,2): # first two documents
print("document", str(i), sonnetsWords[i][0:6])
# +
from gensim import corpora, models
def get_lda_from_lists_of_words(lists_of_words, **kwargs):
dictionary = corpora.Dictionary(lists_of_words) # this dictionary maps terms to integers
corpus = [dictionary.doc2bow(text) for text in lists_of_words] # create a bag of words from each document
tfidf = models.TfidfModel(corpus) # this models the significance of words by document
corpus_tfidf = tfidf[corpus]
kwargs["id2word"] = dictionary # set the dictionary
return models.LdaModel(corpus_tfidf, **kwargs) # do the LDA topic modelling
# -
sonnetsLda = get_lda_from_lists_of_words(sonnetsWords, num_topics=10, passes=20) # small corpus, so more passes
print(sonnetsLda)
def print_top_terms(lda, num_terms=10):
for i in range(0, lda.num_topics):
terms = [term for val, term in lda.show_topic(i, num_terms)]
print("Top 10 terms for topic #", str(i), ": ", ", ".join(terms))
print_top_terms(sonnetsLda)
import networkx as nx
import matplotlib.pyplot as plt
# %matplotlib inline
G = nx.Graph()
G.add_edge("A", "X") # student A went to school X
G.add_edge("A", "Y") # student A went to school Y
G.add_edge("B", "X") # student B went to school X
G.add_edge("C", "Y") # student C went to school X
nx.draw(G)
pos = nx.spring_layout(G)
nx.draw_networkx_labels(G, pos, font_color='r') # font colour is "r" for red
nx.draw_networkx_edges(G, pos, alpha=0.1) # set the line alpha transparency to .1
plt.axis('off') # don't show the axes for this plot
plt.show()
# +
import networkx as nx
import matplotlib.pyplot as plt
# %matplotlib inline
def graph_terms_to_topics(lda, num_terms=10):
# create a new graph and size it
G = nx.Graph()
plt.figure(figsize=(10,10))
# generate the edges
for i in range(0, lda.num_topics):
topicLabel = "topic "+str(i)
terms = [term for val, term in lda.show_topic(i, num_terms)]
for term in terms:
G.add_edge(topicLabel, term)
pos = nx.spring_layout(G) # positions for all nodes
# we'll plot topic labels and terms labels separately to have different colours
g = G.subgraph([topic for topic, _ in pos.items() if "topic " in topic])
nx.draw_networkx_labels(g, pos, font_color='r')
g = G.subgraph([term for term, _ in pos.items() if "topic " not in term])
nx.draw_networkx_labels(g, pos)
# plot edges
nx.draw_networkx_edges(G, pos, edgelist=G.edges(), alpha=0.1)
plt.axis('off')
plt.show()
graph_terms_to_topics(sonnetsLda)
# -
| SonnetsCorpus.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Presentation: https://tinyurl.com/yyuw6jf2
# This repo:
import GoogleApiSupport.slides as slides
PRESENTATION_ID = '1p0ouOWDZxX7QemDeNk976KiDp6ooWYEc9DhMcuOpY7U'
#gslides.presentation_info(PRESENTATION_ID)
#gslides.presentation_info(PRESENTATION_ID)['slides']
#gslides.presentation_info(PRESENTATION_ID)['slides'][0]
slides.presentation_info(PRESENTATION_ID)['slides'][0]['pageElements'][0]
output_file = slides.copy_file(PRESENTATION_ID, 'Copy_test_1')
print ('Output file id:', output_file)
folder_id = '129SE0R7kDx2peFNxGFj2GB2cbb875WRU'
slides.move_file(output_file, folder_id)
#gslides.text_replace('main_title', 'This is my new title', output_file)
slides.batch_text_replace(
{
'placeholder_1': 'Hola que tal',
'placeholder_2': 'Hola k ase',
'total_visits': '9000.00'
},
output_file
)
slides.batch_replace_shape_with_image(
{
'image_1': 'http://i.stack.imgur.com/e8nZC.gif',
'image_2': 'https://s2.eestatic.com/2019/04/11/mundo/europa/Reino_Unido-Julian_Assange-Wikileaks-Europa_390222487_120175811_1024x576.jpg'
},
output_file
)
upload_response = slides.upload_image_to_drive('graph', 'img/local_image_graph.png', folder_id)
print(upload_response)
slides.replace_shape_with_image(upload_response['image_url'], output_file, 'graph_1')
slides.delete_file(file_id=upload_response['file_id'])
slide_by_id = [s for s in slides.presentation_info(output_file)['slides'] if (s['objectId'] == 'g5b626a4cca_2_142')][0]
# +
notes_from_slide = slides.get_slide_notes(slide_by_id).strip()
print(notes_from_slide)
# +
exec(notes_from_slide)
print(page_id, text_to_print)
# -
slides.text_replace('text_in_notes', text_to_print, output_file)
to_delete = False
for slide in slides.presentation_info(output_file)['slides']:
try: # Can explode easily
exec(slides.get_slide_notes(slide))
except Exception as e:
print(e)
if to_delete:
print('Deleting page ' + slide['objectId'] + '...')
slides.delete_object(output_file, slide['objectId'])
to_delete = False
slides.batch_text_replace(
{
'questions': 'Questions?',
'final_text': 'Thanks!',
'contact_info': 'Contact info:',
'twitter_ac': '@vperezb',
'github': '@vperezb',
'linkedin': 'https://www.linkedin.com/in/vperezb-/',
'medium': 'https://medium.com/@victor.perez.berruezo'
},
output_file
)
slides.replace_shape_with_image('https://steamuserimages-a.akamaihd.net/ugc/835829308656967578/61ECB5961DD40FA06965A5FDA96F56D61634CAF8/', output_file, 'final_image')
| Documentation/Examples/bcn_python_meetup.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#      
#      
#      
#      
#      
#    
# [Home Page](../Start_Here.ipynb)
#
#
# [Previous Notebook](Manipulation_of_Image_Data_and_Category_Determination_using_Text_Data.ipynb)
#      
#      
#      
#      
# [1](The_Problem_Statement.ipynb)
# [2](Approach_to_the_Problem_&_Inspecting_and_Cleaning_the_Required_Data.ipynb)
# [3](Manipulation_of_Image_Data_and_Category_Determination_using_Text_Data.ipynb)
# [4](Countering_Data_Imbalance.ipynb)
# [5]
#      
#      
#      
#      
#
# # Competition :
#
# In this exercise participant need to tune and work on improving overall acuracy of our model.
#
# To help you get started by pointing out some obvious ways in which you can make the model more efficient.
#
# - Epochs
# - Batch Size
# - Optimizers : We have used SGD as a optimizer. Participant can try applying other optimizer and test to obtain quick convergence.
# - Data Augmentation : Remember, we mentioned we have an imbalanced dataset. You could try differnet augmentation techniques for the minority classes.
# - Model : If you have exploited all the bbove methods to improve your model, you can change the model by adding more Layers to it and see if that improves that accuracy.
#
# Note, before you start tweaking and training your model ,it would be worthwhile to refer to these to see how they affect your model :
#
# [Epochs impact on Overfitting](https://datascience.stackexchange.com/questions/27561/can-the-number-of-epochs-influence-overfitting )
#
#
# [Effect of Batch Size on Training Dynamics](https://medium.com/mini-distill/effect-of-batch-size-on-training-dynamics-21c14f7a716e)
#
# [Introduction to Optimizers](https://algorithmia.com/blog/introduction-to-optimizers)
# # Training the Model with Data Augmentation :
#
#
# We created a new function called `augmentation(name,category,filenames,labels,i)` and here you can add more samples to Category which have imbalanced data.
# +
import sys
sys.path.append('/workspace/python/source_code')
from utils import *
import os
os.environ["CUDA_VISIBLE_DEVICES"]="0"
def augmentation(name,category,filenames,labels,i):
# Important Constants
file_path = "Dataset/Aug/"
images = []
(h, w) = (232,232)
center = (w / 2, h / 2)
angle90 = 90
angle180 = 180
angle270 = 270
scale = 1.0
img = load_image(name , interpolation = cv2.INTER_LINEAR)
## ~~ Add Augmentations here ~~
if category == 0 :
images.append(cv2.flip(img,0))
elif category == 1 :
pass
elif category == 2 :
pass
elif category == 3 :
pass
elif category == 4 :
pass
elif category == 5 :
pass
elif category == 6 :
pass
elif category == 7 :
images.append(cv2.flip(img,0))
## ~~ Augmentation ends here ~~
for j in range(len(images)):
cv2.imwrite(file_path+str(i+j)+'.jpeg',images[j])
filenames.append(file_path+str(i+j)+'.jpeg')
labels.append(category)
i = i + len(images)
return i
# -
# ##### We pass this function to our `load_dataset()` function to generate these augmentations.
#
# Kindly wait for a couple of minutes while it takes to augment the images.
filenames,labels = load_dataset(augment_fn = augmentation)
# Set the Size of the Validation set
val_filenames , val_labels = make_test_set(filenames,labels,val=0.1)
#Make train test set
test = 0.1
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(filenames, labels, test_size=test, random_state=1)
import tensorflow as tf
y_train = tf.one_hot(y_train,depth=8)
y_test = tf.one_hot(y_test,depth=8)
val_labels = tf.one_hot(val_labels,depth=8)
# +
# Make Dataset compatible with Tensorflow Data Pipelining.
# ~~ Change the batch Size here ~~
batch_size = 64
# ~~ Change the batch Size here ~~
train,test,val = make_dataset((x_train,y_train,batch_size),(x_test,y_test,32),(val_filenames,val_labels,32))
# -
# # Model Architecture :
#
#
# +
import numpy as np
tf.random.set_seed(1337)
import tensorflow.keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, Flatten ,Dropout, MaxPooling2D
from tensorflow.keras import backend as K
#Reset Graphs and Create Sequential model
K.clear_session()
model = Sequential()
## ~~ Change Model or Parameters Here
#Convolution Layers
model.add(Conv2D(64, kernel_size=10,strides=3, activation='relu', input_shape=(232,232,3)))
model.add(MaxPooling2D(pool_size=(3, 3),strides=2))
model.add(Conv2D(256, kernel_size=5,strides=1,activation='relu'))
model.add(MaxPooling2D(pool_size=(3, 3),strides=2))
model.add(Conv2D(288, kernel_size=3,strides=1,padding='same',activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2),strides=1))
model.add(Conv2D(272, kernel_size=3,strides=1,padding='same',activation='relu'))
model.add(Conv2D(256, kernel_size=3,strides=1,activation='relu'))
model.add(MaxPooling2D(pool_size=(3, 3),strides=2))
model.add(Dropout(0.5))
model.add(Flatten())
#Linear Layers
model.add(Dense(3584,activation='relu'))
model.add(Dense(2048,activation='relu'))
model.add(Dense(8, activation='softmax'))
## ~~ Change Model or Parameters Here
# Print Model Summary
model.summary()
# +
import functools
#Define Number of Epochs
## ~~ Change Number of Epochs Here ~~
epochs = 24
## ~~ Change Number of Epochs Here ~~
# Include Top-2 Accuracy Metrics
top2_acc = functools.partial(tensorflow.keras.metrics.top_k_categorical_accuracy, k=2)
top2_acc.__name__ = 'top2_acc'
## ~~ Change Optimizer or Parameters Here
# Optimizer
sgd = tensorflow.keras.optimizers.SGD(lr=0.001, decay=1e-6, momentum=0.9)
## ~~ Change Optimizer or Parameters Here
#Compile Model with Loss Function , Optimizer and Metrics
model.compile(loss=tensorflow.keras.losses.categorical_crossentropy,
optimizer=sgd,
metrics=['accuracy',top2_acc])
# Train the Model
trained_model = model.fit(train,
epochs=epochs,
verbose=1,
validation_data=val)
# Test Model Aganist Validation Set
score = model.evaluate(test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
# -
# ### Visualisations
# +
import matplotlib.pyplot as plt
f = plt.figure(figsize=(15,5))
ax = f.add_subplot(121)
ax.plot(trained_model.history['accuracy'])
ax.plot(trained_model.history['val_accuracy'])
ax.set_title('Model Accuracy')
ax.set_ylabel('Accuracy')
ax.set_xlabel('Epoch')
ax.legend(['Train', 'Val'])
ax2 = f.add_subplot(122)
ax2.plot(trained_model.history['loss'])
ax2.plot(trained_model.history['val_loss'])
ax2.set_title('Model Loss')
ax2.set_ylabel('Loss')
ax2.set_xlabel('Epoch')
ax2.legend(['Train', 'Val'],loc= 'upper left')
plt.show()
# +
import seaborn as sn
from sklearn.metrics import confusion_matrix
import pandas as pd
#Plotting a heatmap using the confusion matrix
pred = model.predict(val)
p = np.argmax(pred, axis=1)
y_valid = np.argmax(val_labels, axis=1, out=None)
results = confusion_matrix(y_valid, p)
classes=['NC','TD','TC','H1','H3','H3','H4','H5']
df_cm = pd.DataFrame(results, index = [i for i in classes], columns = [i for i in classes])
plt.figure(figsize = (15,15))
sn.heatmap(df_cm, annot=True, cmap="Blues")
# -
# Let us now save our Model and the trained Weights for Future usage :
#Save Our Model
model.save('cyc_pred_comp.h5')
# ### Other Bootcamps
# The contents of this Bootcamp originates from [OpenACC GPU Bootcamp Github](https://github.com/gpuhackathons-org/gpubootcamp). Here are some additional Bootcamp which might be of interest:
#
# - [Physics Informed Neural Network](https://github.com/gpuhackathons-org/gpubootcamp/tree/master/hpc_ai/PINN)
#
# ## License
# This material is released by OpenACC-Standard.org, in collaboration with NVIDIA Corporation, under the Creative Commons Attribution 4.0 International (CC BY 4.0).
#
# [Previous Notebook](Manipulation_of_Image_Data_and_Category_Determination_using_Text_Data.ipynb)
#      
#      
#      
#      
# [1](The_Problem_Statement.ipynb)
# [2](Approach_to_the_Problem_&_Inspecting_and_Cleaning_the_Required_Data.ipynb)
# [3](Manipulation_of_Image_Data_and_Category_Determination_using_Text_Data.ipynb)
# [4](Countering_Data_Imbalance.ipynb)
# [5]
#      
#      
#      
#      
#
#      
#      
#      
#      
#      
#    
# [Home Page](../Start_Here.ipynb)
#
| hpc_ai/ai_science_climate/English/python/jupyter_notebook/Tropical_Cyclone_Intensity_Estimation/Competition.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <h2><u><center>CARD CLASS:</center><u></h2>
# +
# Card Class
# Suit, Rank, Value
# Defining at global level
import random
suits = ('Hearts', 'Diamonds', 'Spades', 'Clubs')
ranks = ('Two', 'Three', 'Four', 'Five', 'Six', 'Seven', 'Eight', 'Nine', 'Ten', 'Jack', 'Queen', 'King', 'Ace')
values = {'Two':2, 'Three':3, 'Four':4, 'Five':5, 'Six':6, 'Seven':7, 'Eight':8, 'Nine':9, 'Ten':10, 'Jack':11,
'Queen':12, 'King':13, 'Ace':14}
# -
class Card:
def __init__(self, suit, rank):
self.suit = suit
self.rank = rank
self.value = values[rank]
def __str__(self):
return self.rank + " of " + self.suit
two_hearts = Card("Hearts", "Two")
print (two_hearts)
print (two_hearts.rank)
print (two_hearts.suit)
print (values[two_hearts.rank])
# <b> After making values dictionary:
three_of_clubs = Card("Clubs", "Three")
print (three_of_clubs.suit)
print (three_of_clubs.rank)
print (three_of_clubs.value)
# <b> Comparison:
two_hearts.value < three_of_clubs.value
# <hr style="border:1px solid black"> </hr>
# <h2><u><center>DECK CLASS:</center><u></h2>
class Deck:
def __init__(self):
self.all_cards = []
for suit in suits:
for rank in ranks:
# Creating a card object
created_card = Card(suit,rank)
self.all_cards.append(created_card)
# Shuffling the cards randomly:
def shuffle(self):
random.shuffle(self.all_cards)
# Grabbing one card from the list:
def deal_one(self):
return self.all_cards.pop()
new_deck = Deck() # Creating an instance
new_deck.all_cards
first = new_deck.all_cards[0] # Calling 1st item
first
print (first.value)
print (first.rank)
print (first.suit)
print (first)
# +
# Card object of deck gives output in order:
for card_object in new_deck.all_cards:
print (card_object)
# -
bottom_card = new_deck.all_cards[-1]
print (bottom_card)
new_deck.shuffle()
print (new_deck.all_cards[-1])
# +
# Grabbing 1 out of 52 cars:
new_deck.shuffle()
# -
mycard = new_deck.deal_one()
print (mycard)
len(new_deck.all_cards)
# <hr style="border:1px solid black"> </hr>
# <h2><u><center>PLAYER CLASS:</center><u></h2>
# +
# A class to hold a player's current list of class
# Translating the deck with top-botton to python list
# Able to add a single or multiple cards to their list
# Able to remove or add cards from list of card objects
# -
class Player:
def __init__(self, name):
self.name = name
self.all_cards = []
def remove_one(self):
return self.all_cards.pop(0)
def add_cards(self,new_cards):
if type(new_cards) == type([]):
# List of multiple card objects
self.all_cards.extend(new_cards)
else:
# For a single card object
self.all_cards.append(new_cards)
def __str__(self):
return f"Player {self.name} has {len(self.all_cards)} cards."
new_player = Player("Jose")
print (new_player) # No card available for now
mycard
print (mycard)
new_player.add_cards(mycard) # Adding a card from the deck
print (new_player)
print (new_player.all_cards[0])
new_player.add_cards([mycard,mycard,mycard]) # Multiple add
print(new_player)
print (new_player.remove_one()) # Removing 1 card from new
print (new_player)
# <hr style="border:1px solid black"> </hr>
# <h2><u><center>GAME LOGIC:</center><u></h2>
# ### Game set-up:
# +
player_one = Player("One")
player_two = Player("Two")
new_deck = Deck()
new_deck.shuffle()
for x in range(26):
player_one.add_cards(new_deck.deal_one())
player_two.add_cards(new_deck.deal_one())
game_on = True
# -
round_num = 0
while game_on:
round_num += 1
print(f"Round {round_num}")
# Check to see if a player is out of cards:
if len(player_one.all_cards) == 0:
print("Player One out of cards! Game Over")
print("Player Two Wins!")
game_on = False
break
if len(player_two.all_cards) == 0:
print("Player Two out of cards! Game Over")
print("Player One Wins!")
game_on = False
break
# Otherwise, the game is still on!
# Start a new round and reset current cards "on the table"
player_one_cards = []
player_one_cards.append(player_one.remove_one())
player_two_cards = []
player_two_cards.append(player_two.remove_one())
at_war = True
while at_war:
if player_one_cards[-1].value > player_two_cards[-1].value:
# Player One gets the cards
player_one.add_cards(player_one_cards)
player_one.add_cards(player_two_cards)
# No Longer at "war" , time for next round
at_war = False
# Player Two Has higher Card
elif player_one_cards[-1].value < player_two_cards[-1].value:
# Player Two gets the cards
player_two.add_cards(player_one_cards)
player_two.add_cards(player_two_cards)
# No Longer at "war" , time for next round
at_war = False
else:
print('WAR!')
# This occurs when the cards are equal.
# We'll grab another card each and continue the current war.
# First check to see if player has enough cards
# Check to see if a player is out of cards:
if len(player_one.all_cards) < 5:
print("Player One unable to play war! Game Over at War")
print("Player Two Wins! Player One Loses!")
game_on = False
break
elif len(player_two.all_cards) < 5:
print("Player Two unable to play war! Game Over at War")
print("Player One Wins! Player One Loses!")
game_on = False
break
# Otherwise, we're still at war, so we'll add the next cards
else:
for num in range(5):
player_one_cards.append(player_one.remove_one())
player_two_cards.append(player_two.remove_one())
print (player_one.all_cards[0])
len(player_one.all_cards)
# <h2><u><center>END</center><u></h2>
| Section 11 - Milestone Project 2/Step wise game.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Hello! Welcome to GMT/Python.
# This is a Jupyter notebook. It's an interactive computing environment where you can mix text (like this), code, and figures. The notebook is organized into cells. This is a Markdown cell (double click on it to see the source) and it can contain text, hyperlinks, images, and even Latex equations.
#
# To execute any cell, click on it and type Shift + Enter or click on the "Run" button in the menu bar. Executing a Markdown cell will render its content. Code execution can happen non-linearly, so you can change and rerun a cell without running all of the ones that came before it. But you'll still need to run cells that define a variable/import a module before you can use the variable/module in another cell. You can restart and clear the notebook at any time using the options in the "Kernel" menu.
#
# This is an example of what you can currently do with GMT/Python. Feel free to experiment, change the code, and create new cells.
#
# If you run into any problems or bugs, please create a Github issue explaining what went wrong.
#
# For more information and if you'd like to get involved, visit the official website: https://www.gmtpython.xyz
# ## Loading the library
# You can load GMT/Python by importing the gmt Python package. Most GMT processing modules will be avialable as functions in this package. The plotting modules are methods of the gmt.Figure class.
import gmt
# ## Making a map
# All figure generation in GMT/Python is handled by the gmt.Figure class. It has methods to add layers to your figure, like a basemap, coastlines, etc.
#
# We start a new figure by creating an instance of gmt.Figure:
fig = gmt.Figure()
# We add elements to the figure using its methods. For example, lets add the coastlines of Central America to a 6 inch wide map using the Mercator projection (M). Our figure will also have a nice frame with automatic ticks.
fig.coast(region=[-90, -70, 0, 20], projection='M6i', land='chocolate',
frame=True)
# You can see a preview of the figure directly in the Jupyter notebook using fig.show().
fig.show()
# ## Saving the figure
# Unlike the GMT command-line interface, no figure file was generated until you ask for one.
# That means that fig.show won't produce a figure file.
#
# Use method fig.savefig (based on the matplotlib function) to save your figure:
fig.savefig('central-america.png')
# !ls
# ## Sample Data
# GMT includes sample data that are downloaded automatically to a custom folder (usually ~/.gmt/cache). You can load these datasets into Python using the functions in the gmt.datasets package.
quakes = gmt.datasets.load_usgs_quakes()
# For tabular data, the data are loaded as a pandas.DataFrame.
quakes.head()
# ## Plotting point data
# Use the gmt.Figure.plot method to add points and lines to your map. By default, it will use the same projection that you used previously to setup your map. Let's setup a map using a Mollweide projection (W) to plot our earthquake locations. The point style will be 0.1 inch circles (c0.1i) colored yellow with black outlines.
fig = gmt.Figure()
fig.coast(region="g", projection="W0/10i", land="grey", frame=True)
fig.plot(x=quakes.longitude, y=quakes.latitude,
style="c0.1i", color="yellow", pen="black")
fig.show()
# We can make the size of the markers follow the earthquake magnitude by passing in the argument sizes to Figure.plot. We'll need to scale the magnitude so that it will reflect the size in inches.
fig = gmt.Figure()
fig.coast(region="g", projection="W0/10i", land="grey", frame=True)
fig.plot(x=quakes.longitude, y=quakes.latitude,
sizes=0.005*2**quakes.mag,
style="ci", color="yellow", pen="black")
fig.show()
# We can also map the colors of the markers to the depths by passing an array to the color argument and providing a colormap name (cmap). We can even use the new matplotlib colormap "viridis".
fig = gmt.Figure()
fig.coast(region="g", projection="W0/10i", land="grey", frame=True)
fig.plot(x=quakes.longitude, y=quakes.latitude,
sizes=0.005*2**quakes.mag, color=quakes.depth/quakes.depth.max(),
style="ci", pen="black", cmap="rainbow")
fig.show()
# We still don't have support for adding a colorbar or customizing the colormaps. We're working on it.
# ## Plotting Grids
# GMT uses netCDF as its default grid format. In GMT/Python, we adopted the xarray DataArray to represent grids. Let's load a grid of Earth relief at 30 arc-minute resolution.
grid = gmt.datasets.load_earth_relief(resolution="10m")
grid
# Regular grids can be plotted using the Figure.grdimage method.
fig = gmt.Figure()
fig.basemap(region="g", projection="W0/10i", frame=True)
fig.grdimage(grid, cmap="geo")
fig.show()
# We can also layer on the earthquakes on this map.
fig = gmt.Figure()
fig.basemap(region="g", projection="W0/10i", frame=True)
fig.grdimage(grid, cmap="geo")
fig.plot(x=quakes.longitude, y=quakes.latitude,
sizes=0.005*2**quakes.mag, color=quakes.depth/quakes.depth.max(),
style="ci", pen="black", cmap="viridis")
fig.show()
# ### Using the special file names
# The Earth relief data can be accessed using GMT's special file names: @earth_relief_XX. We can give this file name to grdimage instad of a grid.
fig = gmt.Figure()
fig.basemap(region="g", projection="W0/10i", frame=True)
fig.grdimage("@earth_relief_30m", cmap="geo")
fig.show()
# ### Automatic hillshading
# GMT can do automatic hillshading based on the input grid. Use shading=True to get the default shading.
fig = gmt.Figure()
fig.basemap(region="g", projection="W0/10i", frame=True)
fig.grdimage("@earth_relief_30m", cmap="geo", shading=True)
fig.show()
# ## Interactive visualization
# You can visualize the GMT/Python generated figure in an interactive virtual globe using NASA's Web WorldWind. In this case, we don’t need the frame or color in the continents. We must also use a Cartesian projection (X) and degrees (d) for plot units so that the figure can be aligned with the globe.
fig = gmt.Figure()
fig.grdimage("@earth_relief_10m", cmap="magma", shading=True,
region="BR", projection="X10id/10id")
fig.show(method="globe")
fig = gmt.Figure()
fig.grdimage("@earth_relief_10m", cmap="geo", shading=True,
projection="X10id/10id")
fig.plot(x=quakes.longitude, y=quakes.latitude,
sizes=0.005*2**quakes.mag, color=quakes.depth/quakes.depth.max(),
style="ci", pen="black", cmap="plasma")
fig.show(method="globe")
| GMT/StudentProjects/2018/GuopingLi/try-gmt.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # I manually created this file by watch youtube
#
# https://www.youtube.com/watch?v=ZYdCDUfnVtk
# https://docs.python.org/3/reference/datamodel.html
#
# ## Yes, At first glance it looks intimidating, but a little effor will return handsome rewards
# +
# %matplotlib inline
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
import random
# -
# ## A Simple object wo work with
class Region():
"""
A Class to generate a number of random points on a specified interval
Arugments:
region -- a tuple specifying the high and low sampling bounds; inclusive
cnt -- number of points to generate
Points are selected randomly (uniform distribution) between interval bounds
"""
def __init__(self, region, cnt):
"""How to build one."""
self._region = region
self._cnt = cnt
self._samples = []
lo, hi = region
for i in range(cnt):
self._samples.append(random.uniform(lo, hi))
def __iter__(self):
"""What to do for things like 'for ...' loop """
for s in self._samples:
# https://stackoverflow.com/questions/231767/what-does-the-yield-keyword-do
yield s
return
def __str__(self):
"""What to do when asked to display one"""
rep = "-- region --\n"
rep += str(self._region) + "\n"
rep += "-- samples --\n"
rep += str(self._samples) + "\n"
rep += "-- point count --\n"
rep += str(self._cnt) + "\n"
return(rep)
def __len__(self):
"""What to do when 'len()' is called on once."""
return(self._cnt)
def __call__(self):
"""What to do when r=Region(..); r()"""
return(self._samples)
def __getitem__(self, idx):
"""What to do when: r = Region(..); r[idx]"""
if ((idx < 0) or (idx > self._cnt)):
raise IndexError("Region: index out of bounds")
return(self._samples[idx])
# ## Through the magic of the double underscore or dunder methods we can specify what python should do (meaning; semantics) when it encounters certain common syntactical elements. I'll show what I mean by this.
#
# ## BTW, I've only defined a few and there are lots of the 'dunder' methods available but time is short ...
help(Region)
# ## Also, let's make a default object to see what the 'out of box' behavior is.
class Thing():
pass
# ## Now, let's make a couple of these objects
t = Thing()
r = Region((0,2), 3)
# ## Now, what? Let's try and print them.
print(t)
print(r)
# ## This is the '__str()__' method at works.
#
# ## It's up to a composite to object to specify it's length by defining the __len__() method; for some objects it makes no sense to do so. But Region does define it so ...
len(t)
len(r)
# ## Since Region has a length it would see to make sense to be able to iterate over it. Iteration is enabled bgy defining the __iter__() method.
for p in r:
print(p)
# ## Region also specifies __getitem__(), we can index it.
r[0]
r[4]
# ## BTW, sometimes you want to leverage indexing semantics (e.g. obj[idx]) for something that doens't have anything to index.
class PowerTwo():
def __getitem__(self, idx):
return (2**idx)
p2 = PowerTwo()
print(p2[1], p2[3], p2[10], p2[16])
# ## But I disagree ...
#
# ## By specifying a __call__() method we can specify what it means to call an instance.
r()
# ## Here we just return the sample lists.
# ## OK. Fine. Nice. So What?
#
# ### let's define a simple function to compute the mean. The sum() function expects its argument to be an iterable.
def mean(x):
return sum(x)/len(x)
l = [1,2,3]
mean(l)
# ## Region instances are iterable too because they have the iter() method. So mean() will also work for them.
r = Region((0,1), 400)
#print(r)
mean(r)
| .ipynb_checkpoints/Kevin_IndyPy-DataModel-Feb18-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
from sklearn.linear_model import LogisticRegression
with np.load('new_result_1000.npz') as fd:
a = fd["result"]
print(a[0])
np.random.shuffle(a)
print(a[0])
# +
X_train=a[:,0:len(a[0])-1]
Y_train=a[:,len(a[0])-1]
# y= Y_train.reshape((len(Y_train),1))
clf = LogisticRegression()
clf.fit(X_train, Y_train)
# -
print (len(a[0]))
print (Y_train[0:100])
# +
with np.load('new_test_features.npz') as fd:
testmatrix= fd["testresult"]
counter = 0
for i in clf.predict(testmatrix):
if i==1:
counter+=1
print (counter)
temp = clf.predict_proba(testmatrix).tolist()
import csv
with open('new_final_result.csv', 'w',newline ='') as csvfile:
writer = csv.writer(csvfile)
# Id, Prediction
line_title = []
line_title.append('Id')
line_title.append('Prediction')
writer.writerow(line_title)
index = 0
for item in temp:
index += 1
temp_list = []
temp_list.append(index)
temp_list.append("%.9f" % item[1])
writer.writerow(temp_list)
csvfile.close()
print('finishing!')
# +
from sklearn.neural_network import MLPClassifier
mlp = MLPClassifier(solver='sgd', activation='logistic',alpha=1e-3,hidden_layer_sizes=(3), random_state=1,max_iter=50,verbose=10,learning_rate_init=.001)
mlp.fit(X_train, Y_train)
print (mlp.n_layers_)
print (mlp.n_iter_)
print (mlp.loss_)
print (mlp.out_activation_)
# +
temp = mlp.predict_proba(testmatrix).tolist()
import csv
with open('nn_final_result.csv', 'w',newline ='') as csvfile:
writer = csv.writer(csvfile)
# Id, Prediction
line_title = []
line_title.append('Id')
line_title.append('Prediction')
writer.writerow(line_title)
index = 0
for item in temp:
index += 1
temp_list = []
temp_list.append(index)
temp_list.append("%.9f" % item[1])
writer.writerow(temp_list)
csvfile.close()
print('finishing!')
# +
from sklearn import svm
clf = svm.SVC()
clf = clf.fit(X_train, Y_train)
# +
temp = clf.predict_proba(testmatrix).tolist()
import csv
with open('new_final_result_dt.csv', 'w',newline ='') as csvfile:
writer = csv.writer(csvfile)
# Id, Prediction
line_title = []
line_title.append('Id')
line_title.append('Prediction')
writer.writerow(line_title)
index = 0
for item in temp:
index += 1
temp_list = []
temp_list.append(index)
temp_list.append("%.9f" % item[1])
writer.writerow(temp_list)
csvfile.close()
print('finishing!')
# -
print (temp)
| 82.868/Prediction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: academy
# language: python
# name: academy
# ---
# Academy project
# ===============
# Objectifs:
# -----------
#
# - [ ] Valider la qualité du dataset (valeurs manquantes, dupliquées)
# - [ ] Décrire les informations contenues dans le dataset : Nombre de colonnes et de lignes
# - [ ] Sélection des données pertinentes pour répondre à la problématique.
# - [ ] Déterminer des ordres de grandeurs des indicateurs statistiques classiques pour les différentes zones géographiques et pays du monde (moyenne/médiane/écart-type par pays et continent ou bloc géographique)
#
# +
import os
import bs4 as bs
import contextily as ctx # Geo base map
import geopandas as gpd # Geo data processing
from IPython.display import display
import ipywidgets as widgets
from ipywidgets import interact
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import numpy as np
import pandas as pd
from requests import HTTPError
import requests
import seaborn as sns
from tqdm import tqdm
import warnings
warnings.filterwarnings('ignore')
sns.set(rc={'figure.figsize':(24, 12)})
# -
# ### Chargement des données
#
# Les données sont télécharger sous forme d'une archive `.zip`. L'archive contient 5 fichiers `.csv`
# +
# Chemin du dossier contenant les fichiers csv.
DATA_DIR = os.path.abspath('../data/raw')
# On crée un dico contenant un dataframe et on utilise
# le nom du fichier comme clé.
all_files = {}
for file in os.listdir(DATA_DIR):
if file.endswith('.csv'):
path = os.path.join(DATA_DIR, file)
all_files[file.split('.')[0]] = pd.read_csv(path)
# Les fichiers contiennent une/des colonne(s) vides à éliminer.
for k, df in all_files.items():
print('File : %s' % k)
print(f"File's shape {df.shape}")
# Les colonnes contenant 'Unnamed' dans le nom doivent être éliminées.
col_to_drop = [x for x in df.columns if 'Unnamed' in x]
df.drop(col_to_drop, axis=1, inplace=True)
df.columns = list(map(lambda x : ''.join(x.split(' ')), df.columns))
# -
# On crée un fichier propres contenant les codes ISO des pays et les noms ainsi que les régions associées. Cela sera utile pour la suite.
countries = all_files['EdStatsCountry'][['CountryCode', 'ShortName', 'Region']]
countries.to_hdf('../data/processed/data.hdf5', key='Countries')
# Le fichier contenant les données à proprement parlé est `EdStatsData`.
data = all_files['EdStatsData']
data.head()
data.columns
# Conversion des année au format str vers int
data.columns = data.columns.map(lambda x : int(x) if len(x) == 4 else x)
indicators = data['IndicatorCode'].drop_duplicates().sort_values()
print(f"Nombre d'indicateurs disponibles {len(indicators)}")
# nombre d'indicateurs disponibles par pays
data_count = data.drop(['CountryCode', 'IndicatorCode', 'IndicatorName'], axis=1).groupby('CountryName').count()
display(data_count.head())
print(f"Nombre d'années disponibles : {data_count.shape[1]} et nombre de zones/pays disponibles : {data_count.shape[0]}")
# On a donc 3665 indicateurs différents pour chaque zone/pays. Le dataframe `data_count` contient alors le nombre d'indicateurs disponibles (sur les 3665) et cela pour chaque année.
title = "Nombre total d'indicateurs en fonction de l'année"
plt.figure(figsize=(14, 6))
ax = sns.lineplot(data=data_count.sum(axis=0))
plt.xlabel("Année")
plt.ylabel("Nombre total d'indicateurs disponibles")
plt.title(title)
plt.savefig('../reports/figures/total_indic_per_year.png')
# Nombre de zones/pays couverts en fonction des années
_data = data_count.copy()
_data[_data == 0] = np.nan
_data[~_data.isna()] = 1
title = "Nombre total de pays/zones couverts en fonction de l'année"
plt.figure(figsize=(14, 6))
ax = sns.lineplot(data=_data.sum(axis=0))
plt.xlabel("Année")
plt.ylabel("Nombre de pays documentés")
plt.title(title)
plt.savefig('../reports/figures/total_countries_per_year.png')
# Ci-dessous, on voit les 5 années qui contiennent le moins de données (2017, ..., 1972) ainsi que les 5 années où l'on a le plus de données.
data_count.sum(axis=0).sort_values()
# Ci-dessous, on voit les 20 pays pour lesquels on a le moins de données:
data_count.sum(axis=1).sort_values().head(20)
# Ci-dessous, on voit les 20 pays pour lesquels on a le plus de données:
display(data_count.sum(axis=1).sort_values().tail(20))
# On remarque que les pays/zones les moins monitorés sont des petites îles ou encore le Groenland ce qui parait cohérent.
# Voyons graphiquement les pays pour lesquels on a peu de données.
total_per_country = data_count.sum(axis=1)
total_per_country.name = 'total'
country_name = data[['CountryCode', 'CountryName']].drop_duplicates().set_index('CountryName')
# Pour représenter graphiquement les données, on produit dans un premier temps des cartes.
#
# En raison de la nature géographique des données, une carte semble être une bonne représentation.
#
# **Note : Les cartes suivantes ne prennent pas en compte les zones géographiques mais seulement les pays**
# Pour croiser les données avec les données géographiques, on a
# besoin d'une clée commune. Hors 6 pays non pas officiellement d'iso_a3
map_data = gpd.read_file('../data/external/world.json') # Carte vierge contenant les frontières.
map_data[map_data['iso_a3'] != map_data['adm0_a3']][['adm0_a3', 'iso_a3', 'name']]
# Les codes iso_a3 manquants sont remplacés par les codes adm0_a3.
#
# Sauf le kosovo, les données dont on dispose utilisent un code non officiel XKX
def make_map(df, col, title, base_map=False, axis=False, save=None, legend=True, alpha=1):
"""Permet de créer une carte de type Choroplet."""
map_data = gpd.read_file('../data/external/world.json') # Carte vierge contenant les frontières.
adm_a3 = map_data[map_data['iso_a3'] != map_data['adm0_a3']]['adm0_a3']
map_data.loc[map_data[map_data['iso_a3'] == '-99'].index, 'iso_a3'] = adm_a3
map_data.at[map_data['iso_a3'] == 'KOS', 'iso_a3'] = 'XKX'
map_data.set_index('iso_a3', inplace=True, drop=True)
map_data = pd.concat([map_data, df], axis=1, sort=False)
# graph
map_data = map_data[~map_data[col].isna()]
map_data = map_data[~map_data['geometry'].isna()]
if base_map:
try:
map_data = map_data.to_crs(epsg=3395)
except AttributeError:
raise ValueError(f'No data ? dataframe shape : {map_data.shape}')
fig, ax = plt.subplots(1, figsize=(24,12))
map_data.plot(column=col, ax=ax, legend=legend, edgecolor='black', cmap="RdYlGn", alpha=alpha)
if base_map:
try:
ctx.add_basemap(ax, url=ctx.providers.Stamen.TonerLite)
except HTTPError as e:
print(e)
if not axis:
ax.axis('off')
plt.title(title)
plt.show()
if save:
fig.savefig(f'../reports/figures/{save}.png')
make_map(pd.concat([total_per_country, country_name], axis=1).set_index('CountryCode'), 'total',
"total du nombre d'indicateurs/an en fonction du pays", save='data_availability')
total_future_per_country = data_count[[x for x in data_count.columns if x > 2019]].sum(axis=1)
total_future_per_country = total_future_per_country / total_future_per_country
total_future_per_country[total_future_per_country.isna()] = 0
make_map(pd.concat([total_future_per_country, country_name], axis=1).set_index('CountryCode'), 0,
'pays pour lesquels on dispose de données prévisionnelles', legend=False, save='forecast_availability')
# Analyse des indicateurs
# ------------------------
#
# Est-ce qu'il y a un indicateur mesuré commun à toute les zones et disponible chaque année ?
print(f'dimension du tableau si on drop les lignes ayant des NaN {data.dropna(axis=0).shape}')
print(f'dimension du tableau si on drop les colonnes ayant des NaN {data.dropna(axis=1).shape}')
# Non, il n'y a pas d'indicateur disponible pour toutes les années et toutes les zones.
# +
def get_available_indic_for_year(year):
indics = data[['IndicatorName', year]].dropna(axis=0)['IndicatorName'].drop_duplicates()
indic.sort_values(inplace=True)
return list(indics)
def get_available_years_for_indic(indic):
sub_data = data[data['IndicatorName'] == indic]
sub_data.drop(['CountryName', 'CountryCode', 'IndicatorName', 'IndicatorCode', 'forecast', 'IndicateurClass', 'IndicateurSubClass'], axis=1, inplace=True)
count_per_y = sub_data.count(axis=0)
years = count_per_y[count_per_y != 0].index
return list(years)
def handle_slider_change(change):
i_widget.options = get_available_indic_for_year(change.new)
def handle_dropdown_change(change):
y_widget.options = get_available_years_for_indic(change.new)
def handle_reset(b):
i_widget.options = list(indicators_df['IndicatorName'])
i_widget.value = 'Population, total'
y_widget.options = years
y_widget.value = 1970
years = [x for x in data.columns if type(x) == int]
indicators_df = data[['IndicatorName', 'IndicatorCode']].drop_duplicates()
reset_widget = widgets.Button(description='Reset',
disabled=False,
button_style='',
tooltip='reset',
icon='check')
s_widget = widgets.Text(value=f'Pas de données sélectionnées.')
reset_widget.on_click(handle_reset)
# +
y_widget = widgets.SelectionSlider(options=years,
description="Année : ",
continuous_update=False)
i_widget = widgets.Dropdown(options=list(indicators_df['IndicatorName']),
description="Indicateur : ",
continuous_update=True,
value='Population, total')
i_widget.observe(handle_dropdown_change, names='value')
display(reset_widget, s_widget)
@interact
def map_indic(indic=i_widget, year=y_widget):
indic_name = indic
indic = indicators_df.loc[indicators_df['IndicatorName'] == indic]['IndicatorCode'].values[0]
interest_data = data[data['IndicatorCode'] == indic]
interest_data.set_index(interest_data['CountryCode'], inplace=True, drop=True)
interest_data = interest_data[year].dropna()
s_widget.value = f'Nombre de pays: {interest_data.shape[0]}'
interest_data.name = 'var'
# graph
try:
make_map(interest_data, 'var', f"{indic_name} year {year}",
base_map=True, alpha=0.6)
except ValueError as e:
print(e)
# -
# #### Est-ce que l'on peut supprimer certains indicateurs?
#
# Pour explorer les indices de manière plus simple, on hiérarchise les codes d'indicateurs. Les widgets ci-dessous permettent de voir les différents indicateurs disponibles classe par classe.
data['IndicatorClass'] = data['IndicatorCode'].map(lambda x: x.split('.')[0])
data['IndicatorSubClass'] = data['IndicatorCode'].map(lambda x : '.'.join(x.split('.')[:2]))
# +
def get_available_subclass(clas):
return list(data[data['IndicatorClass'] == clas]['IndicatorSubClass'].drop_duplicates())
def handle_class_change(change):
subclass_widget.options = get_available_subclass(change.new)
class_widget = widgets.Dropdown(description='Catégorie indicateur :', options=list(data['IndicatorClass'].drop_duplicates()), continuous_update=True)
subclass_widget = widgets.Dropdown(description='Sous cat. :', options=get_available_subclass(class_widget.value), continuous_update=True)
class_widget.observe(handle_class_change, names='value')
#display(class_widget, subclass_widget)
@interact
def data_explorer(clas=class_widget, subclass=subclass_widget):
with pd.option_context('max_colwidth', 800, 'display.max_rows', None):
display(data[data['IndicatorSubClass'] == subclass][['IndicatorCode', 'IndicatorName']].drop_duplicates())
# -
# **Quels sont les 100 indicateurs les plus mesurés?**
total_records_by_indic = data.groupby('IndicatorCode').count().drop(['CountryName', 'CountryCode', ], axis=1).sum(axis=1).sort_values(ascending=False)
tot_years = len(data.groupby('IndicatorName').count().drop(['CountryName', 'CountryCode', 'IndicatorCode'], axis=1).columns)
availability_by_indic = (total_records_by_indic / (tot_years * 242)) * 100
availability_by_indic.index = availability_by_indic.index.map(lambda x : x[:40]+'...' if len(x)>43 else x)
# +
availability_by_indic.head(100).plot(kind='bar', rot=90, figsize=(15, 6))
plt.title('Les 100 indicateurs les plus disponibles')
plt.ylabel('disponibilité [%]')
plt.xticks(fontsize=8)
plt.show()
print('='*40)
indicators = data[['IndicatorName', 'IndicatorCode']].drop_duplicates()
indicators.set_index(indicators['IndicatorCode'], drop=True, inplace=True)
indicators.drop('IndicatorCode', axis=1, inplace=True)
with pd.option_context('display.max_rows', None,
'display.max_columns', 1,
'max_colwidth', 200): # more options can be specified also
display(indicators.loc[availability_by_indic.head(100).index])
# -
# ### Données pertinentes:
#
# #### Archives
#
# * Démographie
# * SP.POP.GROW : Population growth (annual %)
# * SP.POP.TOTL : Population, total
# * SP.POP.0014.TO : Population, ages 0-14, total
# * SP.POP.1018.TO.UN : Population, ages 10-18, total
# * SP.POP.1524.TO.UN : Population, ages 15-24, total
#
# * Economique
# * NY.GDP.MKTP.KD : GDP at market prices (constant 2005 US$)
#
# * Enseignement secondaire
# * UIS.NERA.2 : Adjusted net enrolment rate, lower secondary, both sexes (%)
# * UIS.NERA.3 : Adjusted net enrolment rate, upper secondary, both sexes (%)
# * UIS.E.2 : Enrolment in lower secondary education, both sexes (number)
# * UIS.E.3 : Enrolment in upper secondary education, both sexes (number)
# * UIS.E.4 : Enrolment in post-secondary non-tertiary education, both sexes (number)
#
# * Enseignement supérieur
# * UIS.E.5.B : Enrolment in tertiary education, ISCED 5 programmes, both sexes (number)
#
# #### Prévisionnel
#
# * PRJ.MYS.0T19.MF : Wittgenstein Projection: Mean years of schooling. Age 0-19. Total
#
interest_indic = [#Demographie
'SP.POP.1524.TO.UN',
# Economic
'NY.GDP.PCAP.CD',
#secondary education
'UIS.NERA.3',# too many countries are missing
'UIS.E.3',
'SE.SEC.ENRR.UP', # net enrolment in secondary
#tertiary eduction
'UIS.E.5.B',
#Internet
'IT.NET.USER.P2'
#projection
]
interest_data = dict()
for indic in interest_indic:
interest_data[indic] = data[data['IndicatorCode'] == indic].dropna(axis=1, how='all', inplace=False).dropna(axis=0, how='all', inplace=False)
# taux de remplissage :
# -----------------
fig = plt.figure(figsize=(12, 40))
for idx, item in enumerate(interest_data.items()):
key, _data = item
_data.dropna()
availability = _data[[x for x in _data.columns if type(x) == int]].count(axis=0) / len(_data.index)
plt.subplot(len(interest_indic), 1, idx + 1)
plt.bar([x for x in _data.columns if type(x) == int], availability)
plt.subplots_adjust(hspace=0.5)
plt.ylim(0, 1)
plt.title(indicators.loc[key]['IndicatorName'])
plt.show()
# Restructuration des données:
# ------------------------------
#
# Pour chaque indicateur, on va créer un dataframe contenant les entrées suivantes:
#
# ```
# | Pays | Année | Valeur de l'indicateur | Source de la donnée |
# ```
#
# On sauvegarde alors le tout dans un fichier `hdf5`
#
# On drop les zones qui ne sont pas des pays (on utilise simplement les codes ISO des pays https://fr.wikipedia.org/wiki/ISO_3166-1). Le fait de garder les zones n'a pas un grand intérêt pour le reste de l'analyse, on cherche précisément les pays.
# +
def get_iso_from_wiki():
response = requests.get('https://fr.wikipedia.org/wiki/ISO_3166-1')
table = bs.BeautifulSoup(response.content, parser='html.parser')
table = table.find_all('table')[0]
rows = table.find_all('tr')
iso = list()
for row in rows:
td = row.find_all('td')
if td:
iso.append(td[1].text)
return iso
iso = get_iso_from_wiki()
for country_code in data['CountryCode'].drop_duplicates().values:
if country_code not in iso:
print(country_code)
data.drop(data[data['CountryCode'] == country_code].index, axis=0, inplace=True)
# +
def save():
for indic in tqdm(interest_indic, total=len(interest_indic)):
_data = data[data['IndicatorCode'] == indic]
_data.dropna(axis=1, inplace=True, how='all')
_data.dropna(axis=0, inplace=True, how='all')
_data.set_index('CountryCode', inplace=True, drop=True)
_data.drop([x for x in _data.columns if type(x) == str], axis=1, inplace=True)
_data.columns.name = 'year'
_data.dropna(axis=0, inplace=True, how='all')
_data = _data.stack()
_data = _data.reset_index()
_data.to_hdf('../data/processed/data.hdf5', key=indic)
save()
| notebooks/academy_explo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: geo_env
# language: python
# name: geo_env
# ---
import pandas as pd
import numpy as np
import geopandas as gpd
import pyproj
import random
import uuid
# Pyproj Correction
pyproj.datadir.set_data_dir('/Users/shai/anaconda3/envs/geo_env/share/proj')
pd.options.display.max_columns=100
# ## Parameters
arnonaSqM = 5.43
rentPPM = 70
buyPPM = 20000
owners=0.65
jewishPopDemo2019 = pd.read_excel('israel_population_jewish_lamas_groups_of_10_2019.xlsx')
jewishPopDemo2019.reset_index(inplace=True,drop=True)
jewishPopDemo2019.drop([0,1],inplace=True)
tot = jewishPopDemo2019['Total'].sum()
jewishPopDemo2019['ratio'] = jewishPopDemo2019['Total']/tot
mid = [ 10*i+25 for i in range(8)]
jewishPopDemo2019['middle'] = mid
ageDist = [0.2, 0.2, 0.18, 0.15, 0.14, 0.08, 0.04, 0.01]
ageMiddle = jewishPopDemo2019['middle'].values.tolist()
# https://www.cbs.gov.il/he/Pages/search/TableMaps.aspx?CbsSubject=אוכלוסייה
jewishPopDemo2019.plot.bar(x='Age Group 10',y='ratio')
jewishPopDemo2019['middlexDist'] = jewishPopDemo2019['middle'] * jewishPopDemo2019['ratio']
jewishPopDemo2019['middlexDist'].sum()
bldgs = pd.read_excel('BuildingInSimulationAndStatsApril7.xlsx') #Bldgs Data
originalAgents = pd.read_excel('OrigianlAgentsApril7_2021.xlsx') #original Agents - can be gerenated many times for sensitivity analysis
newApartments = pd.read_excel('newApartmentsDataSetApril_7.xlsx') # new apartments
newApartments.drop(columns='Unnamed: 0',inplace=True)
# ## Function
def mortgageCal(houseValue, downPaymentPercent_range=(0.25, 0.5), years_pay=25, intrest=3.46):
'''
houseValue - House Value
downPaymentPercent_range=(0.25, 0.5)
years_pay=25
intrest=3.46
'''
# Mortage Calculator
# houseValue for example 2 Milion Shekels
# downPayment percent the the percent that the Agent can pay of the house 25% is 500000 NIS
# year_pay typical 25 years
# interset typical 3.46
dppr = downPaymentPercent_range
downPaymentPercent = random.uniform(dppr[0], dppr[1])
downPayment = houseValue * downPaymentPercent # calculating the downPayment
P = houseValue-downPayment # Mortrage requested
i_m = ((intrest)/100.0)/12.0 # interset percent divided by 12 months
n_m = years_pay*12 # convert years to months
M = (P*i_m*np.power((1+i_m), n_m))/(np.power((1+i_m), n_m)-1)
return M
# ## Move all agents to new appartments add to them the itertion number this will be a template for Agents Time Series
AgentsTimeSeries = originalAgents.copy()
AgentsTimeSeries.drop(columns=['Unnamed: 0'],inplace=True)
AgentsTimeSeries['prjectType'] = 0
AgentsTimeSeries['tic'] = 0
AgentsTimeSeries['status'] = 'stay'
AgentsTimeSeries['noDiscount'] = AgentsTimeSeries['noDiscount'].fillna(0)
AgentsTimeSeriesOriginal = AgentsTimeSeries.copy()
bldgs.sort_values(by='OrderA',inplace=True)
for tic in bldgs['OrderA'].values:
currentProject = bldgs.query('OrderA=='+str(tic))['ProjNumber'].values[0]
projectType = bldgs.query('OrderA=='+str(tic))['ProjType'].values[0].astype(int)
newApartmentsSlice = newApartments.query('ProjNumber=="'+currentProject+'"').copy()
newApartmentsSlice.reset_index(inplace=True,drop=True)
CurrentAgents = AgentsTimeSeriesOriginal.query('ProjNumber=="'+currentProject+'"').copy().reset_index(drop=True)
CurrentAgents.drop(columns=['bldCode','doorIndex','bldCodeDoorIndex','ProjNumber','aprtmentSize','tic','prjectType'],inplace=True)
CurrentAgentsNewApartments = pd.concat([newApartmentsSlice,CurrentAgents],axis=1).reset_index(drop=True)
CurrentAgentsNewApartments['tic'] = tic
AgentsTimeSeries = pd.concat([AgentsTimeSeries,CurrentAgentsNewApartments]).reset_index(drop=True)
#AgentsTimeSeries.reset_i
bldg_reference = gpd.read_file('Json/simulationBldgs_Process_march1_1729.geojson')
bldAfter = bldg_reference[bldg_reference['status']=='Building after']
# ## Droped Duplicates In Colab !!!
bldAfter.query('bld_addres=="210_30_210_32_rr"')
bldAfter = bldAfter.drop([116,117,118])
bldAfter.reset_index(drop=True,inplace=True)
bld_floor = bldAfter[['bld_addres','floors']]
def getBldHeight(bldCode):
Floors = bld_floor.query(f"bld_addres=='{bldCode}'")['floors']
return Floors.values[0].astype(int)
# # add floors from GIS :)
AgentsTimeSeries.loc[AgentsTimeSeries.query("tic>0").index,"Floors"] \
= AgentsTimeSeries.loc[AgentsTimeSeries.query("tic>0").index,"bldCode"]\
.apply(lambda x:bld_floor.query(f"bld_addres=='{x}'")['floors'].values[0].astype(int))
newBldgMaintenance = pd.DataFrame({'floor_min':[0,5,9,13],'floor_max':[4,8,12,100],'cost':[250,320,400,450]})
newBldgMaintenance
def getConstFromFloor(floor):
floorInt = str(int(floor))
cost = newBldgMaintenance.query(f"floor_min<={floorInt} and floor_max>={floorInt}")['cost']
return cost.values[0]
getConstFromFloor(7)
AgentsTimeSeries.loc[AgentsTimeSeries.query("tic>0").index,'MainCost'] = AgentsTimeSeries.loc[AgentsTimeSeries.query("tic>0").index,'Floors'].apply(getConstFromFloor)
AgentsTimeSeries.loc[(AgentsTimeSeries['tic']>0),'cityTax'] = AgentsTimeSeries.loc[(AgentsTimeSeries['tic']>0),'aprtmentSize']*arnonaSqM
AgentsTimeSeries.loc[(AgentsTimeSeries['tic']>0),'CostForStaying'] = AgentsTimeSeries.loc[(AgentsTimeSeries['tic']>0),'cityTax']+AgentsTimeSeries.loc[(AgentsTimeSeries['tic']>0),'MainCost']
# ## Not Null do this
con = ((AgentsTimeSeries['income'].notna())) & (AgentsTimeSeries['tic']>0)
AgentsTimeSeries.loc[con,'ratioCostForStaying'] = AgentsTimeSeries.loc[con,'CostForStaying']/AgentsTimeSeries.loc[con,'income']
AgentsTimeSeries.loc[AgentsTimeSeries.query("tic>0 and rent==1.0").index,'status'] = 'leave'
AgentsTimeSeries.loc[AgentsTimeSeries.query("tic>0 and rent==1.0").index,'reason_leave'] = 'Rent'
AgentsTimeSeries.loc[AgentsTimeSeries.query("(tic>0) and (age>65) and (status=='stay')").index,'status'] = 'leave'
AgentsTimeSeries.loc[AgentsTimeSeries.query("(tic>0) and (age>65) and (status=='leave') and ('reason_leave')!='income'").index,'reason_leave'] = 'Age'
AgentsTimeSeries.loc[AgentsTimeSeries.query("(tic>0) and (ratioCostForStaying>0.08) and (status=='stay')").index,'status'] = 'leave'
AgentsTimeSeries.loc[AgentsTimeSeries.query("(tic>0) and (ratioCostForStaying>0.08) and (status=='leave') and (reason_leave.isnull())").index,'reason_leave'] = 'Burden'
# ## not one was hrut from hosuing burden from the original tenants only Age and Rent!!!
AgentsTimeSeries['reason_leave'].value_counts()
type(AgentsTimeSeries.loc[0,'ratioCostForStaying'])
AgentsTimeSeries[AgentsTimeSeries['ratioCostForStaying'].notna()]['ratioCostForStaying'].hist()
AgentsTimeSeries['mortgage'] = 0
AgentsTimeSeries['rentPrice'] = 0
newAgents = AgentsTimeSeries.query("status.isnull() or status=='leave'", engine='python').copy()
newAgents.loc[newAgents.query("status.isnull()", engine='python').index,'group']='existing'
newAgents.loc[newAgents.query("status=='leave'", engine='python').index,'group']='add'
newAgents['yearsInBldg'] = 0
num = len(newAgents)
op = ageMiddle
p = ageDist
num
ages = np.random.choice(op, size = num, p=p)
rent_own = np.random.choice(['rent','own'],size=num,p=[owners,1-owners])
rent_own
rent_filter = rent_own=='rent'
own_filter = rent_own=='own'
newAgents['age'] = ages
newAgents['age'] = newAgents['age'].apply(lambda x: np.random.randint(x-5,x+5)) # add varitation or noise ot ages
newAgents.loc[:,['lowDiscount','highDiscount','noDiscount']] = [0,0,1]
newAgents.loc[:,['rent','own']] = [0,0]
newAgents.loc[rent_filter,'rent'] =1
newAgents.loc[own_filter,'own'] =1
newAgents['agentID'] =newAgents['agentID'].apply(lambda x:uuid.uuid1())
newAgents['status'] = 'New Comers'
newAgents['reason_leave'] = np.nan
# +
newAgents['HouseValue'] = 0
newAgents['rentPrint'] = 0
rentFilter = newAgents.query('rent==1').index
newAgents.loc[rentFilter,'rentPrice'] = rentPPM*newAgents.loc[rentFilter,'aprtmentSize']
ownFilter = newAgents.query('own==1').index
rentFilter = newAgents.query('rent==1').index
newAgents.loc[ownFilter,'HouseValue'] = newAgents.loc[ownFilter,'aprtmentSize'] * buyPPM
newAgents.loc[ownFilter,'mortgage'] = newAgents.loc[ownFilter,'HouseValue'].apply(lambda x: mortgageCal(houseValue=x,downPaymentPercent_range=(0.25, 0.26))).astype(int)
newAgents['CostForStaying'] = newAgents['MainCost'] + newAgents['cityTax'] + newAgents['rentPrice'] + newAgents['mortgage']
newAgents['baseIncome'] = newAgents['CostForStaying']/0.38 # base is 38% for burden
newAgents['income'] = newAgents['baseIncome'] + newAgents['baseIncome'].apply(lambda x:np.random.randint(0,5000)).astype(int)
newAgents['ratioCostForStaying'] = (newAgents['CostForStaying']/newAgents['income'])
# -
newAgents['ratioCostForStaying'].hist()
newAgents['ratioCostForStaying'].max()
newAgents['CostForStaying'].hist()
newAgents_Merge = newAgents.query('group=="existing"').drop(columns=['HouseValue','rentPrint','baseIncome','group'])
newAgents_Add = newAgents.query('group=="add"').drop(columns=['HouseValue','rentPrint','baseIncome','group'])
newAgents_Merge.index
AgentsTimeSeries.loc[newAgents_Merge.index] = newAgents_Merge.copy() # Insert new Comers
AgentsTimeSeries = AgentsTimeSeries.append(newAgents_Add)
AgentsTimeSeries.reset_index(inplace=True, drop=True)
AgentsTimeSeries
# ## Now I need to:
# 1. create new agents for the displaced ones (and append them)
# 2. crearte new agents in general and place them in the same places.
#
# # Generating new agents (All cases)
# 1. Israel Age Distribution
# 2. Mortgage Calculator for income
# 3. No Discount
# 4. Rent/Own random if Rent Income is lower
# 5. Status New Comers
newAgents[['bldCode','bldCodeDoorIndex','status']]
| Data collection and engineering/simulation_April6_2021.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import plotly
from plotly import __version__
import matplotlib.pyplot as plt
plt.figure(figsize=(10, 4))
sns.countplot(x='sex', data=tips)
sns.set_context('notebook', font_scale=1)
sns.countplot(x='sex', data=tips)
sns.lmplot(x='total_bill', y='tip', data=tips, hue='sex', palette='cool')
| notebooks/Python3-DataScience/05-Seaborn/06-Plots styles.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: SpotifyWatch
# language: python
# name: spotifywatch
# ---
# +
# default_exp albumcover
# -
# # Album Cover Retrieval
#
# <br>
#
# ### Imports
# +
#exports
import pandas as pd
import numpy as np
import os
import requests
from ipypb import track
from IPython.display import Image
# -
# +
df_dw = pd.read_csv('../data/playlists/discover_weekly.csv')
df_dw.head()
# -
# +
img_url = df_dw.loc[0, 'album_art_url']
Image(url=img_url, width=250)
# -
# +
img_dir = '../img/album_covers'
img_already_downloaded = os.listdir(img_dir)
for _, (album_cover_url, album_uri) in track(df_dw[['album_art_url', 'album_uri']].iterrows(), total=df_dw.shape[0]):
album_id = album_uri.split(':')[-1]
img_fp = f'{img_dir}/{album_id}.png'
no_album_cover = isinstance(album_cover_url, str) == False
already_downloaded = f'{album_id}.png' in img_already_downloaded
if not no_album_cover and not already_downloaded:
r = requests.get(album_cover_url)
if r.status_code == 200:
with open(img_fp, 'wb') as f:
for chunk in r:
f.write(chunk)
# -
| nbs/04-album-covers.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/gumdropsteve/intro_to_python/blob/main/day_08/01_expanding_data_with_pandas.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="wbl9EBLZJJ2J"
# # Going from 8 Columns to 33 Columns and then Joining the Datas Together with Pandas
#
# #### Where we are
# How data collected then looks...
# + id="UBkM8d1nJJ22" colab={"base_uri": "https://localhost:8080/", "height": 203} outputId="7592a998-66b2-4ecf-da34-00cf03db504d"
import pandas as pd
pd.read_csv('https://github.com/gumdropsteve/intro_to_python/raw/main/day_08/data/las_vegas_2020-12-26.csv').sample(3)
# + [markdown] id="e3U8-epfJJ3C"
# #### Where we want to go
# How data collected now looks...
# + id="UPZiiucXJJ3D" colab={"base_uri": "https://localhost:8080/", "height": 240} outputId="a63af3e2-4cee-448f-fa4c-da48a33117db"
pd.read_csv('https://github.com/gumdropsteve/intro_to_python/raw/main/day_08/data/las_vegas_2021-01-11.csv').sample(3)
# + id="fsH4FH0YJJ3F" colab={"base_uri": "https://localhost:8080/"} outputId="14273dac-469c-43d0-80a5-f0204f9217e5"
pd.read_csv('https://github.com/gumdropsteve/intro_to_python/raw/main/day_08/data/las_vegas_2021-01-11.csv').columns
# + id="6DFtfCw8JJ3G" colab={"base_uri": "https://localhost:8080/"} outputId="c25e8fd2-a13e-4e42-92e2-5dd521127d3b"
pd.read_csv('https://github.com/gumdropsteve/intro_to_python/raw/main/day_08/data/las_vegas_2021-01-11.csv').n_reviews
# + [markdown] id="b3cLH40OJJ3H"
# ## Let's get started
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.str.split.html
# + id="gRFzExkKJJ3H" colab={"base_uri": "https://localhost:8080/", "height": 157} outputId="494ddc9d-47a2-432f-ae6c-573b0ecfd6c4"
df = pd.read_csv('https://github.com/gumdropsteve/intro_to_python/raw/main/day_08/data/las_vegas_2020-12-26.csv')
df.tail(2)
# + [markdown] id="iQRECbUXJJ3I"
# #### Turning c into type & location
# + id="n52R2XdNJJ3I" colab={"base_uri": "https://localhost:8080/"} outputId="a03dd4de-4261-4aa9-e004-c2e9f9d8f40f"
df['c'].str.split()
# + id="1flrOdqIJJ3J" colab={"base_uri": "https://localhost:8080/"} outputId="74da6166-a9d3-4d63-e282-ca9e75c0ccaa"
# I only wanted 2 things
df['c'].str.split(' in ')
# + id="gPsk_JvlJJ3J" colab={"base_uri": "https://localhost:8080/"} outputId="1804b73a-5cc7-4ff5-8ef5-6b806cfede82"
df['c'].str.split(' in ')[0]
# + id="LcXwel1jJJ3K" colab={"base_uri": "https://localhost:8080/"} outputId="0cff90ac-7b91-42a5-ca30-84a3a54bbb87"
# But I wanted 2 columns
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.str.split.html#pandas.Series.str.split
df['c'].str.split(' in ', expand=True)[0]
# + id="DNP5PCHgJJ3L" colab={"base_uri": "https://localhost:8080/", "height": 402} outputId="1860d7bc-a6f1-4267-b578-ee8757b5ab9e"
df['c'].str.split(' in ', expand=True)
# + id="y6PWk6spJJ3L" colab={"base_uri": "https://localhost:8080/"} outputId="cdf01f8f-1783-46df-e27b-845dd513c1ac"
df['c'].str.split(' in ', expand=True)[1]
# + id="22Xqvn1oJJ3M" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="c33a941b-1b94-40c8-d24a-f5198dacce76"
type_location_values = df['c'].str.split(' in ', expand=True)
df['type'] = type_location_values[0]
df['location'] = type_location_values[1]
df
# + id="Wkm1JvWfJJ3U" colab={"base_uri": "https://localhost:8080/", "height": 140} outputId="913264e6-5ee7-4424-de2e-9d0b38b320d9"
# we no longer need the c column
df.drop('c', inplace=True, axis=1) # inplace for in place (updates the og dataframe), axis=1 for columns
df.sample(2)
# + [markdown] id="E63rgUAsJJ3X"
# #### Turning h into n_reviews
# + id="aubnChXOJJ3X" colab={"base_uri": "https://localhost:8080/"} outputId="25d87352-f9db-45f4-b763-aee515d8103b"
df.h
# + id="5szL6mpuJJ3Y" colab={"base_uri": "https://localhost:8080/", "height": 402} outputId="b1d67ab6-7598-4269-c6aa-75d1eb972707"
df.h.str.split(expand=True)
# + id="4mCtMfOBJJ3Z" colab={"base_uri": "https://localhost:8080/"} outputId="4bdf04d6-a62a-44fc-9721-12510ef4aa5b"
# take just the number of reviews
df.h.str.split(expand=True)[0]
# + id="ufJawCZvJJ3a" colab={"base_uri": "https://localhost:8080/"} outputId="3849c0e1-28c1-404e-c784-2d59742e5036"
import numpy as np
# use numpy to make them float values
df.h.str.split(expand=True)[0].astype(np.float)
# + id="ocMsHzUoJJ3f" colab={"base_uri": "https://localhost:8080/", "height": 237} outputId="3b79ef52-9499-404b-ecb9-1aca500d5d69"
df['n_reviews'] = df.h.str.split(expand=True)[0].astype(np.float)
# drop without inplace returns a dataframe, so overwrite the og
df = df.drop('h', axis=1)
df.sample(3)
# + [markdown] id="ELWydknEJJ3l"
# #### Turn g into avg_rating
# + id="eJ9Ml55eJJ3n" colab={"base_uri": "https://localhost:8080/"} outputId="52976c8e-c73b-48e5-d358-adca9f3c4ab9"
df['g']
# + id="B-cVn2avJJ3t" colab={"base_uri": "https://localhost:8080/", "height": 402} outputId="234ea720-e8c7-4f70-b22c-92ce1d59cf5f"
df['g'].str.split(';', expand=True)
# + id="jKsYWVF2JJ3w" colab={"base_uri": "https://localhost:8080/"} outputId="6ac2707f-34c4-42da-c55c-46989a898b8b"
# take the right side because the number is longer
df['g'].str.split(';', expand=True)[1]
# + id="38Hs4hovJJ30" colab={"base_uri": "https://localhost:8080/", "height": 402} outputId="87aa54be-aab7-4ab6-ae56-6c3d3872f286"
df['g'].str.split(';', expand=True)[1].str.split(expand=True)
# + id="ZHmzJjzkJJ33" colab={"base_uri": "https://localhost:8080/"} outputId="3920ed7f-3366-4809-b96a-6d6991e9f7f9"
df['g'].str.split(';', expand=True)[1].str.split(expand=True)[0]
# + id="01M2i-HvJJ34" colab={"base_uri": "https://localhost:8080/"} outputId="c93dbeb7-f97b-4ed8-d9ed-97a7a098182f"
df['g'].str.split(';', expand=True)[1].str.split(expand=True)[0].astype(np.float32)
# + id="MERU1iOwJJ35" colab={"base_uri": "https://localhost:8080/", "height": 186} outputId="90224d41-855a-4afe-fe2b-362c5fc38797"
# make the avg_rating
df['avg_rating'] = df['g'].str.split(';', expand=True)[1].str.split(expand=True)[0].astype(np.float32)
# forget the column it came from
df.drop('g', axis=1, inplace=True)
# show me a bit of the data
df.sample(3)
# + id="gLs10A6oJJ39" colab={"base_uri": "https://localhost:8080/"} outputId="8a46594d-e0d1-4155-85d4-5b765e09e139"
# are they all 4.902?
df.avg_rating.unique()
# + [markdown] id="dytCOZsIJJ3-"
# #### Turn a into url
# + id="NiFyM2giJJ4A" colab={"base_uri": "https://localhost:8080/", "height": 237} outputId="8b349402-ce18-4fca-9373-5beebec4cbf9"
df['url'] = df['a']
df.sample(3)
# + id="DyKkexEbJJ4B"
df.drop('url', axis=1, inplace=True)
# + [markdown] id="9QURtmNUJJ4E"
# Or try [.rename()](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.rename.html)
# + id="OlLuW-r1JJ4G" colab={"base_uri": "https://localhost:8080/", "height": 186} outputId="5689d88d-1dc4-4e59-edde-2c5de352d969"
# first argument of .rename() is mapper which should be a dict
df = df.rename({'a':'url'}, axis=1) # you could turn on inplace=True for this
df.sample(3)
# + [markdown] id="Peb-z-REJJ4H"
# #### Turning b into title
# + id="3yfj7PqYJJ4I" colab={"base_uri": "https://localhost:8080/"} outputId="34ce71b1-ea06-4a97-85e3-8084b5f0f1e8"
df.b
# + id="3VwEcIupJJ4b" colab={"base_uri": "https://localhost:8080/"} outputId="3e9039eb-d679-400f-94ea-dfd6a507de22"
# split of the - null -
df.b.str.split(' - null - ')
# + id="RiEUEMIjJJ4d" colab={"base_uri": "https://localhost:8080/", "height": 402} outputId="e8a51afc-0c77-4387-9036-28d512ba61b8"
df.b.str.split(' - null - ', expand=True)
# + id="NTypFQDzJJ4e" colab={"base_uri": "https://localhost:8080/", "height": 402} outputId="f44e0d27-b06e-45d5-c700-b7d418ac8d7b"
# another way to rename columns
thing = df.b.str.split(' - null - ', expand=True)
thing.columns = ['title', 'location']
thing
# + id="gri8x_CLJJ4i" colab={"base_uri": "https://localhost:8080/"} outputId="20ebf391-5010-468c-a5cb-fa3dbfea6854"
# how you'd do it in 1 line
df['obnoxious_title'] = df.b.str.split(' - null - ', expand=True).rename({0:'title', 1:'location'}, axis=1)['title']
df['obnoxious_title']
# + id="7XDmJE_iJh4b"
df.drop('obnoxious_title', axis=1, inplace=True)
# + id="A7B_aRcFJJ4n" colab={"base_uri": "https://localhost:8080/", "height": 140} outputId="e03ce9b2-4dd5-4179-e284-4ee73cb1d3be"
# a more readable way to do it
thing = df.b.str.split(' - null - ', expand=True)
thing.columns = ['title', 'location']
df['b'] = thing['title']
df.rename({'b':'title'}, axis=1, inplace=True)
df.sample(2)
# + [markdown] id="0Iss3QDfXx2z"
# #### Turning e into price
# + id="90kq2_bHYPJP" colab={"base_uri": "https://localhost:8080/", "height": 402} outputId="72805c5a-9cd4-490e-97c2-5f5dbc4ae72d"
df.e.str.split(' ', expand=True)
# + id="aOXTcOQdX1Ic" colab={"base_uri": "https://localhost:8080/"} outputId="566c7446-701b-4ad2-d2c7-bb3f22209b56"
df.e.str.split(' ', expand=True)[0].str.slice(1) # use slice to skip the $
# + id="6oaXZAPVYlqY" colab={"base_uri": "https://localhost:8080/"} outputId="f08a5114-dd61-4ee1-d703-408f8e23aa90"
df.e.str.split(' ', expand=True)[0].str.slice(1).astype(np.float)
# + id="-WVBsUixYKVK" colab={"base_uri": "https://localhost:8080/", "height": 704} outputId="31452463-29b4-4663-aa50-1ce7edb487ab"
df['e'] = df.e.str.split(' ', expand=True)[0].str.slice(1).astype(np.float)
df = df.rename({'e':'price'}, axis=1)
df
# + [markdown] id="W6C3eXbTJJ4p"
# #### Now what about f?
# Needs to transform into 19 bool columns...
# + id="6dBWgV4CJJ4q" colab={"base_uri": "https://localhost:8080/"} outputId="fa907a04-5b1b-4677-f35e-42e6310f812e"
df.f.str.split('·')
# + id="8CMrvTGFZgnV" colab={"base_uri": "https://localhost:8080/"} outputId="d412acd2-0af5-498b-dcd8-3884e7c56b79"
df.f.str.split('·').apply(lambda x: 'Freeparking' in x)
# + id="mjjlMQMUfxXU" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="d37f783b-e020-4cb9-9044-f513cdc0051f"
# replace f column with list version of f column
df['f'] = df.f.str.split('·')
# lists of possible amenities and their boolean columns
possible_amenities = ['Gym', 'Wifi', 'Self check-in', 'Air conditioning', 'Pets allowed', 'Indoor fireplace',
'Hot tub', 'Free parking', 'Pool', 'Kitchen', 'Breakfast', 'Elevator', 'Washer', 'Dryer',
'Heating', 'Waterfront', 'Dishwasher', 'Beachfront', 'Ski-in/Ski-out']
bool_column_amenities = ["gym_bool", "wifi_bool", "self_check_in_bool", "air_conditioning_bool", "pets_allowed_bool",
"indoor_fireplace_bool", "hot_tub_bool", "free_parking_bool", "pool_bool", "kitchen_bool",
"breakfast_bool", "elevator_bool", "washer_bool", "dryer_bool", "heating_bool", "waterfront_bool",
"dishwasher_bool", "beachfront_bool", "ski_in_ski_out_bool"]
# go through the list of amenities / columns
for i in range(len(bool_column_amenities)):
df[bool_column_amenities[i]] = df['f'].apply(lambda x: possible_amenities[i] in x)
df
# + id="ZvaSFlIBfyX5" colab={"base_uri": "https://localhost:8080/"} outputId="e95cae80-0066-48e1-9234-9aaca98b78f2"
for c in bool_column_amenities:
print(f"{np.sum(df[c])} {c}")
# + id="X3543DiVaV1U" colab={"base_uri": "https://localhost:8080/", "height": 442} outputId="d8904a35-189b-4af1-bd0f-03a4b304f2ff"
df = df.drop('f', axis=1)
df.sample(3)
# + [markdown] id="1GmH4_VGhH21"
# #### Turning d into guests, bedrooms, beds, is_studio, baths, half_baths,shared_baths
# + id="b22s7Pn1gPxJ" colab={"base_uri": "https://localhost:8080/"} outputId="e016ea94-e8be-437e-ebcf-ffde557b4200"
df.d
# + id="zwQquYxKgbo8" colab={"base_uri": "https://localhost:8080/"} outputId="2d4f7962-1a53-470d-d7e5-f8f34c3c8c90"
# how's it look as a list?
df.d.str.split(' · ')
# + id="2ylEyTgmjAi8" colab={"base_uri": "https://localhost:8080/"} outputId="ac78cb26-a2e9-402f-f341-a23a1e823f7b"
df.d.str.split(' · ').apply(lambda x: '2 guests' in x) # WIP
# + [markdown] id="iT3WK2vQ9swl"
# Too much of a headache, maybe next time...
#
# #### this is the current scrape extraction method...
# + id="j1FLcN_89vZP"
def get_room_info(listing):
"""
returns room info of listing
"""
room_info = listing.find('div', {'class', '_kqh46o'}).text
split_info = [i.split() for i in room_info.split(' · ')]
room_dict = {}
for i in split_info:
if i not in [['Studio'], ['Half-bath']]:
if len(i) == 2:
room_dict[i[1]] = i[0]
# shared-baths
elif len(i) == 3:
i = [i[0], '-'.join([i[1], i[2]])]
room_dict[i[1]] = i[0]
else:
print(f'unexpected room_info | unexpected split_info len(i)=={len(i)}!=2!=3\n{i}')
room_dict[' '.join(i)] = i[0]
else:
# Half-baths and Studios
if i[0] == 'Studio':
room_dict['is_studio'] = True
room_dict[i[0]] = 0
weird_bedrooms = 0
try:
room_dict['bedrooms']
except:
try:
room_dict['bedrooms'] = room_dict['bedroom']
except:
try:
room_dict['bedrooms'] = room_dict['Studio']
except:
weird_bedrooms += 1
print(f'weird bedrooms {weird_bedrooms}')
room_dict['bedrooms'] = room_dict.get('bedrooms')
try:
room_dict['baths']
except:
try:
room_dict['baths'] = room_dict['bath']
except:
room_dict['baths'] = None
room_dict['half_baths'] = room_dict.get('Half-bath')
room_dict['shared_baths'] = room_dict.get('shared-baths')
room_dict['is_studio'] = room_dict.get('is_studio', False)
room_dict['beds'] = room_dict.get('beds')
room_dict['guests'] = room_dict.get('beds')
room_dict = {key:value for key,value in room_dict.items() if key in ['guests', 'bedrooms', 'beds', 'is_studio', 'baths', 'half_baths', 'shared_baths']}
return room_dict
# + [markdown] id="eXm6rFEQ_IYq"
# #### just switch it to something useful...
# + id="ZYEfs0gz-Zd5"
def get_room_info(room_info):
"""
returns room info bool dict from input room info string
"""
split_info = [i.split() for i in room_info.split(' · ')]
room_dict = {}
for i in split_info:
if i not in [['Studio'], ['Half-bath']]:
if len(i) == 2:
room_dict[i[1]] = i[0]
# shared-baths
elif len(i) == 3:
i = [i[0], '-'.join([i[1], i[2]])]
room_dict[i[1]] = i[0]
else:
print(f'unexpected room_info | unexpected split_info len(i)=={len(i)}!=2!=3\n{i}')
room_dict[' '.join(i)] = i[0]
else:
# Half-baths and Studios
if i[0] == 'Studio':
room_dict['is_studio'] = True
room_dict[i[0]] = 0
weird_bedrooms = 0
try:
room_dict['bedrooms']
except:
try:
room_dict['bedrooms'] = room_dict['bedroom']
except:
try:
room_dict['bedrooms'] = room_dict['Studio']
except:
weird_bedrooms += 1
print(f'weird bedrooms {weird_bedrooms}')
room_dict['bedrooms'] = room_dict.get('bedrooms')
try:
room_dict['baths']
except:
try:
room_dict['baths'] = room_dict['bath']
except:
room_dict['baths'] = None
room_dict['half_baths'] = room_dict.get('Half-bath')
room_dict['shared_baths'] = room_dict.get('shared-baths')
room_dict['is_studio'] = room_dict.get('is_studio', False)
room_dict['beds'] = room_dict.get('beds')
room_dict['guests'] = room_dict.get('beds')
room_dict = {key:value for key,value in room_dict.items() if key in ['guests', 'bedrooms', 'beds', 'is_studio', 'baths', 'half_baths', 'shared_baths']}
return room_dict
# + [markdown] id="Z6N18GVk_Mr9"
# #### do data science
# + id="UDP0KvA1-dKr" outputId="80f77611-6c90-4644-a642-1afa29f99bb9" colab={"base_uri": "https://localhost:8080/", "height": 391}
new_values = []
# go through values (rows) from d column
for v in df['d'].values:
# covert to list then to bool dict based on known possible values
room_info = get_room_info(v)
# add dict to new values list
new_values.append(room_info)
# list of new columns column d will become
new_d_columns = ['guests', 'bedrooms', 'beds', 'is_studio', 'baths', 'half_baths', 'shared_baths']
# add new columns to dataframe
pd.concat([df, pd.DataFrame(new_values, columns=new_d_columns)], axis=1).sample(3)
# + id="bIdzwn_pAf1X" outputId="336c98d6-8a69-4156-e15d-32779812b39c" colab={"base_uri": "https://localhost:8080/", "height": 606}
# it worked! add the new columns and drop the d column
df = pd.concat([df, pd.DataFrame(new_values, columns=new_d_columns)], axis=1)
df = df.drop('d', axis=1)
df
# + [markdown] id="vx05sNvqBIdF"
# ## Final Testing
# Looks like we've got 33 columns. Time to see if we can join...
# + id="oIN4oSzTBT86" outputId="af9af35e-547a-485c-b742-e1f194ac1519" colab={"base_uri": "https://localhost:8080/", "height": 623}
updated_old_df = df.copy()
new_style_df = pd.read_csv('https://github.com/gumdropsteve/intro_to_python/raw/main/day_08/data/las_vegas_2021-01-11.csv')
# try to concat them
pd.concat([updated_old_df, new_style_df], axis=0)
# + id="cquPY1ZLB7gl" outputId="92195822-852c-4548-d3a3-a334de387aa1" colab={"base_uri": "https://localhost:8080/"}
concat_df = pd.concat([updated_old_df, new_style_df], axis=0)
len(concat_df['url'].unique())
# + [markdown] id="Zr284sz6Bmo4"
# # Fin
# [Report Issues](https://github.com/gumdropsteve/intro_to_python/issues)
| stack_3/day_08/01_expanding_data_with_pandas.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# # !pip install brewer2mpl
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import warnings; warnings.filterwarnings(action='once')
large = 22; med = 16; small = 12
params = {'axes.titlesize': large,
'legend.fontsize': med,
'figure.figsize': (16, 10),
'axes.labelsize': med,
'axes.titlesize': med,
'xtick.labelsize': med,
'ytick.labelsize': med,
'figure.titlesize': large}
plt.rcParams.update(params)
plt.style.use('seaborn-whitegrid')
sns.set_style("white")
# %matplotlib inline
# Version
print(mpl.__version__) #> 3.0.0
print(sns.__version__) #> 0.9.0
# +
# Import dataset
midwest = pd.read_csv("https://raw.githubusercontent.com/selva86/datasets/master/midwest_filter.csv")
# Prepare Data
# Create as many colors as there are unique midwest['category']
categories = np.unique(midwest['category'])
colors = [plt.cm.tab10(i/float(len(categories)-1)) for i in range(len(categories))]
# Draw Plot for Each Category
plt.figure(figsize=(16, 10), dpi= 80, facecolor='w', edgecolor='k')
for i, category in enumerate(categories):
plt.scatter('area', 'poptotal',
data=midwest.loc[midwest.category==category, :],
s=20, c=colors[i], label=str(category))
# Decorations
plt.gca().set(xlim=(0.0, 0.1), ylim=(0, 90000),
xlabel='Area', ylabel='Population')
plt.xticks(fontsize=12); plt.yticks(fontsize=12)
plt.title("Scatterplot of Midwest Area vs Population", fontsize=22)
plt.legend(fontsize=12)
plt.show()
# +
from matplotlib import patches
from scipy.spatial import ConvexHull
import warnings; warnings.simplefilter('ignore')
sns.set_style("white")
# Step 1: Prepare Data
midwest = pd.read_csv("https://raw.githubusercontent.com/selva86/datasets/master/midwest_filter.csv")
# As many colors as there are unique midwest['category']
categories = np.unique(midwest['category'])
colors = [plt.cm.tab10(i/float(len(categories)-1)) for i in range(len(categories))]
# Step 2: Draw Scatterplot with unique color for each category
fig = plt.figure(figsize=(16, 10), dpi= 80, facecolor='w', edgecolor='k')
for i, category in enumerate(categories):
plt.scatter('area', 'poptotal', data=midwest.loc[midwest.category==category, :], s='dot_size', c=colors[i], label=str(category), edgecolors='black', linewidths=.5)
# Step 3: Encircling
# https://stackoverflow.com/questions/44575681/how-do-i-encircle-different-data-sets-in-scatter-plot
def encircle(x,y, ax=None, **kw):
if not ax: ax=plt.gca()
p = np.c_[x,y]
hull = ConvexHull(p)
poly = plt.Polygon(p[hull.vertices,:], **kw)
ax.add_patch(poly)
# Select data to be encircled
midwest_encircle_data = midwest.loc[midwest.state=='IN', :]
# Draw polygon surrounding vertices
encircle(midwest_encircle_data.area, midwest_encircle_data.poptotal, ec="k", fc="gold", alpha=0.1)
encircle(midwest_encircle_data.area, midwest_encircle_data.poptotal, ec="firebrick", fc="none", linewidth=1.5)
# Step 4: Decorations
plt.gca().set(xlim=(0.0, 0.1), ylim=(0, 90000),
xlabel='Area', ylabel='Population')
plt.xticks(fontsize=12); plt.yticks(fontsize=12)
plt.title("Bubble Plot with Encircling", fontsize=22)
plt.legend(fontsize=12)
plt.show()
# -
| notebooks/Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Example 2: One-dimensional heat flow (exs2.py)
# This example is from the CALFEM manual.
#
# **Purpose:**
#
# Analysis of one-dimensional heat flow.
#
# **Description:**
#
# Consider a wall built up of concrete and thermal insulation. The outdoor
# temperature is −17 ◦C and the temperature inside is 20 ◦C. At the inside of
# the thermal insulation there is a heat source yielding $10 ~W/m^2$.
#
# 
# The wall is subdivided into five elements and the one-dimensional spring
# (analogy) element `spring1e` is used. Equivalent spring stiffnesses are
# $k_i = λ A/L$ for thermal conductivity and $k_i = A/R$ for thermal
# surface resistance. Corresponding spring stiffnesses per $m^2$ of the wall
# are:
# \begin{align}
# k_1 &= 1/0.04 = 25.0 ~W/K \\
# k_2 &= 1.7/0.070 = 24.3 ~W/K \\
# k_3 &= 0.040/0.100 = 0.4 ~W/K \\
# k_4 &= 1.7/0.100 = 17.0 ~W/K \\
# k_5 &= 1/0.13 = 7.7 ~W/K
# \end{align}
# A global system matrix K and a heat flow vector f are defined. The heat source
# inside the wall is considered by setting $f_4 = 10$. The element matrices
# `Ke` are computed using `spring1e`, and the function `assem` assembles the
# global stiffness matrix.
#
# The system of equations is solved using `solveq` with considerations to the
# boundary conditions in `bc` and `bcVal`. The prescribed temperatures are
# $T_1 = −17 ~^{\circ}C$ and $T_2 = 20~^{\circ}C$.
# Necessary modules are first imported.
import numpy as np
import calfem.core as cfc
# Next, the element topology is defined
Edof = np.array([
[1,2],
[2,3],
[3,4],
[4,5],
[5,6]
])
# Create stiffness matrix K and load vector f
K = np.mat(np.zeros((6,6)))
f = np.mat(np.zeros((6,1)))
f[3] = 10.0
# Define element properties (ep) and create element matrices for the different material layers.
ep1 = 25.0
ep2 = 24.3
ep3 = 0.4
ep4 = 17.0
ep5 = 7.7
# Element stiffness matrices
Ke1 = cfc.spring1e(ep1)
Ke2 = cfc.spring1e(ep2)
Ke3 = cfc.spring1e(ep3)
Ke4 = cfc.spring1e(ep4)
Ke5 = cfc.spring1e(ep5)
# Assemble all element matrices into the global stiffness matrix
# +
cfc.assem(Edof[0,:], K, Ke1)
cfc.assem(Edof[1,:], K, Ke2)
cfc.assem(Edof[2,:], K, Ke3)
cfc.assem(Edof[3,:], K, Ke4)
cfc.assem(Edof[4,:], K, Ke5)
print("Stiffness matrix K:")
print(K)
# -
# Define the boundary conditions and solve the system of equations
# +
bc = np.array([1,6])
bcVal = np.array([-17.0, 20.0])
a,r = cfc.solveq(K, f, bc, bcVal)
print("Displacements a:")
print(a)
print("Reaction forces r:")
print(r)
# -
#
| examples/.ipynb_checkpoints/exs2-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %load_ext autoreload
# %autoreload 2
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
from tqdm.autonotebook import tqdm
from joblib import Parallel, delayed
import umap
import pandas as pd
from avgn.utils.paths import DATA_DIR, most_recent_subdirectory, ensure_dir
from avgn.signalprocessing.create_spectrogram_dataset import flatten_spectrograms
# ### Collect data
DATASET_ID = 'macaque_coo'
from avgn.visualization.projections import (
scatter_projections,
draw_projection_transitions,
)
df_loc = DATA_DIR / 'syllable_dfs' / DATASET_ID / 'macaque.pickle'
syllable_df = pd.read_pickle(df_loc)
syllable_df[:3]
fig, axs = plt.subplots(ncols=4, figsize=(24,6))
axs[0].hist([np.max(i) for i in syllable_df.spectrogram.values], bins=50);axs[0].set_title('max')
axs[1].hist([np.sum(i) for i in syllable_df.spectrogram.values], bins=50);axs[1].set_title('sum')
axs[2].hist((syllable_df.end_time - syllable_df.start_time).values, bins = 50); axs[2].set_title('len')
axs[3].hist([np.min(i) for i in syllable_df.spectrogram.values], bins=50);axs[3].set_title('min')
# ### cluster
specs = list(syllable_df.spectrogram.values)
specs = [i / np.max(i) for i in specs]
specs_flattened = flatten_spectrograms(specs)
np.shape(specs_flattened)
from cuml.manifold.umap import UMAP as cumlUMAP
cuml_umap = cumlUMAP(min_dist=0.25)
z = np.vstack(list(cuml_umap.fit_transform(specs_flattened)))
# ### variation across populations
fig, ax = plt.subplots(figsize=(15,15))
scatter_projections(projection=z, alpha=.5, labels = syllable_df.indv.values, s=10, ax = ax)
#ax.set_xlim([-15,15])
from avgn.visualization.projections import scatter_spec
np.shape(z), np.shape(specs)
from avgn.utils.general import save_fig
from avgn.utils.paths import FIGURE_DIR, ensure_dir
scatter_spec(
z,
specs,
column_size=15,
#x_range = [-5.5,7],
#y_range = [-10,10],
pal_color="hls",
color_points=False,
enlarge_points=20,
figsize=(10, 10),
scatter_kwargs = {
'labels': syllable_df.indv.values,
'alpha':1.0,
's': 3,
"color_palette": 'Set2',
'show_legend': False
},
matshow_kwargs = {
'cmap': plt.cm.Greys
},
line_kwargs = {
'lw':1,
'ls':"solid",
'alpha':0.25,
},
draw_lines=True
);
save_fig(FIGURE_DIR / 'macaque_coo', dpi=300, save_jpg=True)
| notebooks/02.1-project-UMAP/macaque-syllable-umap.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pulp as p
# +
# problem 1
Lp_prob1 = p.LpProblem('Problem', p.LpMinimize)
x1 = p.LpVariable('x1',lowBound = 0)
y1 = p.LpVariable('y1',lowBound = 0)
# Objective function
Lp_prob1 += -4*x1 - y1
# Constraints
Lp_prob1 += 3*x1 + 6*y1 <= 15
Lp_prob1 += 8*x1 + 2*y1 <= 12
Lp_prob1 += x1 >= 0
Lp_prob1 += y1 >= 0
print(Lp_prob1)
status = Lp_prob1.solve()
print(p.LpStatus[status])
print(p.value(x1), p.value(y1), p.value(Lp_prob1.objective))
# +
# Problem 2
Lp_prob2 = p.LpProblem('Problem', p.LpMaximize)
x2 = p.LpVariable('x2',lowBound = 0)
y2 = p.LpVariable('y2',lowBound = 0)
z2 = p.LpVariable('z2',lowBound = 0)
# Objective function
Lp_prob2 += x2 + 2*y2 - z
# Constraints
Lp_prob2 += 4*y2 + z2 <= 40
Lp_prob2 += x2 - y2 <= 20
Lp_prob2 += 2*x2 + 4*y2 + 3*z2 <= 60
Lp_prob2 += x2 >= 0
Lp_prob2 += y2 >= 0
Lp_prob2 += z2 >= 0
# Produce the output
print(Lp_prob2)
status = Lp_prob2.solve()
print(p.LpStatus[status])
print(p.value(x2), p.value(y2), p.value(z2), p.value(Lp_prob2.objective))
# -
| .ipynb_checkpoints/Homework_2-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # <NAME>
# ## Line regression
# ### Exercise 1
# +
import random
import numpy as np
n = 10
x_list, y_list = [], []
for i in range(n):
i = random.randint(1,n)
x_list.append(i)
for i in range(n):
i = random.randint(1,n)
y_list.append(i)
x_avg = sum(x_list)/len(x_list)
y_avg = sum(y_list)/len(y_list)
# -
# #### Pearson's correlation coefficient
# +
def pearson(x, y, n, xavg, yavg):
up_sum = 0
down_x_sum = 0
down_y_sum = 0
for i in range(n):
up_counter = (x[i] - xavg)*(y[i] - yavg)
up_sum += up_counter
down_x_counter = (x[i] - xavg)**2
down_x_sum += down_x_counter
down_y_counter = (y[i] - yavg)**2
down_y_sum += down_y_counter
r = up_sum / ((down_x_sum**0.5)*(down_y_sum**0.5))
return r
print("Współczynnik korelacji Pearsona: ",pearson(x_list, y_list, n, x_avg, y_avg), "dla n =", n)
# -
# ### Exercise 2
# #### Regression coefficient
# +
def factors(x, y, n):
up_sum = 0
down_sum = 0
for i in range(n):
up_counter = (x_list[i] - x_avg)*(y_list[i] - y_avg)
up_sum += up_counter
down_counter = (x_list[i] - x_avg)**2
down_sum += down_counter
a_factor = up_sum / down_sum
b_factor = y_avg - (a_factor*x_avg)
return a_factor, b_factor
print("Współczynniki prostej regresji a i b:",factors(x_list, y_list, n))
# -
# ### Exercise 3
# +
import pandas as pd
dane_anscombe = pd.read_html("https://pl.wikipedia.org/wiki/Kwartet_Anscombe%E2%80%99a")
dane_anscombe[1]
# +
column_names = dane_anscombe[1].columns.to_list()
values = ['x1', 'y1', 'x2', 'y2', 'x3', 'y3', 'x4', 'y4']
kolumny = []
# cols = []
for j in range(len(column_names)):
vals = dane_anscombe[1][column_names[j]].to_list()
vals.pop(0)
kolumny.append(vals)
cols[j] = [float(i) for i in kolumny[j]]
print(cols[2])
# +
import matplotlib.pyplot as plt
import numpy as np
datasets = {
'I': (cols[2], cols[1]),
'II': (cols[2], cols[3]),
'III': (cols[4], cols[5]),
'IV': (cols[6], cols[7])
}
fig, axs = plt.subplots(2, 2, sharex=True, sharey=True, figsize=(6, 6),
gridspec_kw={'wspace': 0.08, 'hspace': 0.08})
axs[0, 0].set(xlim=(0, 20), ylim=(2, 14))
axs[0, 0].set(xticks=(0, 10, 20), yticks=(4, 8, 12))
for ax, (label, (x, y)) in zip(axs.flat, datasets.items()):
ax.text(0.1, 0.9, label, fontsize=20, transform=ax.transAxes, va='top')
ax.tick_params(direction='in', top=True, right=True)
ax.plot(x, y, 'ro')
p1, p0 = np.polyfit(x, y, deg=1)
ax.axline(xy1=(0, p0), slope=p1, color='b', lw=2)
plt.show()
# -
# ### Exercise 4
# +
x123_avg = sum(cols[2])/len(cols[2])
y1_avg = sum(cols[1])/len(cols[1])
y2_avg = sum(cols[3])/len(cols[3])
y3_avg = sum(cols[5])/len(cols[5])
x4_avg = sum(cols[6])/len(cols[6])
y4_avg = sum(cols[7])/len(cols[7])
print('Współczynnik korelacji Pearsona dla I kwarty:', pearson(cols[2], cols[1], 10, x123_avg, y1_avg))
print('Współczynnik korelacji Pearsona dla II kwarty:', pearson(cols[2], cols[3], 10, x123_avg, y2_avg))
print('Współczynnik korelacji Pearsona dla III kwarty:', pearson(cols[2], cols[5], 10, x123_avg, y3_avg))
print('Współczynnik korelacji Pearsona dla IV kwarty:', pearson(cols[6], cols[6], 10, x4_avg, y4_avg))
# -
# ### Exercise 5
# #### I set
# +
import statsmodels.api as sm
import statsmodels.formula.api as smf
dane = pd.DataFrame(data = [cols[2], cols[1]])
dane_ = dane.T
dane_.columns=['x', 'y']
mod = smf.ols(formula='y ~ x', data=dane_)
res = mod.fit()
print(res.summary())
# -
# #### II set
# +
dane = pd.DataFrame(data = [cols[2], cols[3]])
dane_ = dane.T
dane_.columns=['x', 'y']
mod = smf.ols(formula='y ~ x', data=dane_)
res = mod.fit()
print(res.summary())
# -
# #### III set
# +
dane = pd.DataFrame(data = [cols[4], cols[5]])
dane_ = dane.T
dane_.columns=['x', 'y']
mod = smf.ols(formula='y ~ x', data=dane_)
res = mod.fit()
print(res.summary())
# -
# #### IV set
# +
dane = pd.DataFrame(data = [cols[6], cols[7]])
dane_ = dane.T
dane_.columns=['x', 'y']
mod = smf.ols(formula='y ~ x', data=dane_)
res = mod.fit()
print(res.summary())
# -
# #### Seaborn
# +
import seaborn as sns
sns.regplot(x=cols[2], y=cols[1], fit_reg=True)
plt.show()
# -
sns.regplot(x=cols[2], y=cols[3], fit_reg=True)
plt.show()
sns.regplot(x=cols[4], y=cols[5], fit_reg=True)
plt.show()
sns.regplot(x=cols[6], y=cols[7], fit_reg=True)
plt.show()
| Regression_and_statsmodels.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Magnetic Field Profiles
#
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
# Load magnetic field profiles for dipole (d) and guassian (g) field prescriptions.
g_mag = np.genfromtxt('files/gauss.mag')
d_mag = np.genfromtxt('files/dipole.mag')
d2_mag = np.genfromtxt('files/dipole_r50.mag')
# Now we can load other files containing the density structures.
g_grad = np.genfromtxt('files/gauss.grad')
d_grad = np.genfromtxt('files/dipole.grad')
# For particular types of instabilities, we are concerned with the radial variation of $B/\rho$.
# +
d2_B = (d2_mag[:,-1] + np.log10(8.0*np.pi))/2.0 # dipole peaking at R = 0.5 Rstar
btach = 5.0e3*np.exp(0.5*(0.5/0.2)**2)
g2_B = np.empty(g_mag.shape[0])
for i in range(g2_B.shape[0]):
g2_B[i] = btach * np.exp(-0.5*((0.5 - 10**(g_grad[i,0] - 10.26))/0.2)**2) # Gaussian peaking at R = 0.5 Rstar
# +
fig, ax = plt.subplots(1, 1, figsize=(7., 5.))
ax.tick_params(which='major', axis='both', labelsize=16., length=14.)
ax.set_xlabel('Fractional Radius', fontsize=16.)
ax.set_ylabel('$\\log_{10}( B / \\rho )$', fontsize=18.)
ax.plot(10**(g_grad[:,0] - 10.26), g_mag[:,1] - g_grad[:,1], lw=3, c='#555555', label='Gaussian, 0.15$R_{\\star}$')
ax.plot(10**(d_grad[:,0] - 10.25), d_mag[:,1] - d_grad[:,1], lw=3, c='#b22222', label='Dipole, 0.15$R_{\\star}$')
ax.plot(10**(d2_mag[:,0] - 10.26), d2_B - d2_mag[:,2], lw=3, c='#0094b2', label='Dipole, 0.50$R_{\\star}$')
ax.plot(10**(g_grad[:,0] - 10.26), np.log10(g2_B) - g_grad[:, 1], lw=3, c='#000080', label='Gaussian, 0.50$R_{\\star}$')
ax.legend(loc=4)
# -
# Regions where $B/\rho$ has a negative slope are unstable to interchange modes. This is a significant problem for the Guassian radial field profile, which has should be unstable to interchange modes between 75% of the star, by radius. By constrast, however, the dipole magnetic field profile is only unstable over about 35% of the stellar interior, when the peak magnetic field is placed at $R = 0.15R_{\star}$.
#
# For both the Gaussian and the dipole radial profiles, this can be mitigated by shifting the peak field strength to $R = 0.50R_{\star}$, shown by the light and dark blue lines. We adopted this peak magnetic field location in Feiden & Chaboyer ([2014, A&A, 571, A70](http://adsabs.harvard.edu/abs/2014A%26A...571A..70F)) and found that we were able to generate necessary structrual changes to the models to reproduce the properties of CM Dra. Peak magnetic field strengths, in these instances, were around 40 kG and 113 kG for the dipole and Gaussian profiles, repsectively. Although this was more of an aside in the text, we have shown that field configurations with sub-MG magnetic fields and seeming stability to interchange modes can induce required radius changes.
#
# Estimate growth rate of instability, $\tau \propto H_p/v_a$.
Hp = 1.0e10 # rough approximation for deep interior
va_d = 10**d_mag[:,1]/np.sqrt(4.0*np.pi*10**d_grad[:,1])
va_g = 10**g_mag[:,1]/np.sqrt(4.0*np.pi*10**g_grad[:,1])
# Plot as a function of depth in the star, based on magnetic field profiles.
# +
fig, ax = plt.subplots(1, 1, figsize=(7., 5.))
ax.tick_params(which='major', axis='both', labelsize=16., length=14.)
ax.set_xlabel('Fractional Radius', fontsize=16.)
ax.set_ylabel('Linear Growth Rate [day]', fontsize=16.)
ax.semilogy(10**(g_grad[:,0] - 10.26), Hp/va_g/86400., lw=3, c='#555555', label='Gaussian')
ax.semilogy(10**(d_grad[:,0] - 10.26), Hp/va_d/86400., lw=3, c='#b22222', label='Dipole')
ax.legend(loc=1)
# -
# Growth of instabilities are slower in the Dipole configuration due to an increase in the Alfvén crossing time resulting from weaker magnetic fields, as compared to the Guassian profile. Instabilities grow rapidly in the deeper interior for the Gaussian profile, on timesales of about 1000 - 10,000 s (hours). In the dipole case, instabilities grow within about 100,000 s (days).
#
# One should also compare the local Alfvén velocity to convective velocities.
# +
fig, ax = plt.subplots(1, 1, figsize=(7., 5.))
ax.tick_params(which='major', axis='both', labelsize=16., length=14.)
ax.set_xlabel('Fractional Radius', fontsize=16.)
ax.set_ylabel('$v_a / v_c$', fontsize=22.)
ax.semilogy(10**(g_grad[:,0] - 10.26), va_g/g_grad[:,-1], lw=3, c='#555555', label='Gaussian')
ax.semilogy(10**(d_grad[:,0] - 10.26), va_d/d_grad[:,-1], lw=3, c='#b22222', label='Dipole')
ax.legend(loc=1)
# -
# Based on our prescriptions, the local Alfvén velocity is always greater than the convective velocity in regions susceptible to instabilities. Therefore, instabilities should not be affected too significantly by the presence of convective modes. There may be coupling of the modes in some regions of the dipole profile model, where $v_a \sim v_c$ at locations between $0.3 < R/R_{\star} 0.6$.
| Daily/20150912_mag_profiles.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
# This pickled were engineered in last file (you can check it in BX-EDA)
books = pd.read_pickle('books_cleaned.pkl')
user_reviews = pd.read_pickle('user_reviews.pkl')
# Below some reminder of files
books.head()
user_reviews.head()
print(user_reviews.shape)
print(books.shape)
# Combine all three (initial) datasets
combined = pd.merge(user_reviews, books, on = 'book_id')
combined.head()
# For recommendation algorithm, we do not need some column (images, concrete title, author and publisher( Later on, I will try to extract some information that might be useful during recommendation))
combined.drop(['image_small', 'image_medium', 'image_large'], axis = 1, inplace = True)
# +
# After many deliberation, I decided to remove all 0 rated cases, cause I think it's not rows that I can rely on
combined = combined.loc[combined['score'] != 0].copy()
# Only 200,000 rows from 1,200,000 were not removed during cleaning (may be I am too strict ;( )
# -
# also remove books that were rated only with 0
books = books[books['book_id'].isin(combined.book_id.unique())].copy()
# Instead of working with object type columns like author, publisher; I decided to get median values for them
books['title'] = books['title'].apply(lambda x: x.lower())
# ##### prefers
# +
# Here I am creating dictionary (hash-map) of publishers as key and their median rating scores as values.
by_publisher = dict(combined.groupby('publisher')['score'].median())
# Then I will give these values to new column: avg_publisher_score for every match in combined table
# So here I take publisher value from every row, and get it's average rating score assigning that value to dictionary
combined['avg_publisher_score'] = combined['publisher'].apply(lambda x: by_publisher[x])
# +
# Finally let's do the same for author
by_author = dict(combined.groupby('author')['score'].median())
combined['avg_author_score'] = combined['author'].apply(lambda x: by_author[x])
# +
# I will need Author, publisher and title data after model building part of this project, So I will create new final dataframe
df = combined.drop(['title', 'author', 'publisher'], axis = 1).copy()
# -
# so this how final df looks like
df.tail(6)
df['user_id'].nunique()
df.shape
df['book_id'].nunique()
# For now, let's take one step back, and create sparse matrix with rows as unique user, column as unique book id and value as rating of this user to this book.
# Here, what i am going to do is remove data that I can't trust. It means books that were rated with very few people, or users who rated only few books
by_book = pd.DataFrame(df.groupby('book_id')['user_id'].count().sort_values(ascending = False)).reset_index()
by_book = by_book.rename(columns={'user_id': 'count'})
by_book = by_book[by_book['count'] >= 5]
by_user = pd.DataFrame(df.groupby('user_id')['book_id'].count().sort_values(ascending = False)).reset_index()
by_user = by_user.rename(columns={'book_id': 'count'})
by_user = by_user[by_user['count'] >= 10]
df = df[(df['user_id'].isin(by_user['user_id'].unique())) & (df['book_id'].isin(by_book['book_id'].unique()))].copy()
df
user_book_pivot = df.pivot_table(columns='book_id',index='user_id', values='score').fillna(0)
user_book_pivot.head()
user_book_pivot.shape
user_book_pivot.iloc[0]
# +
from scipy.sparse import csr_matrix
user_book_sparse=csr_matrix(user_book_pivot)
# +
from scipy import spatial
spatial.distance.cosine(user_book_sparse[0].todense(), user_book_sparse[0].todense())
# -
for i in range(1, 11):
print(spatial.distance.euclidean(user_book_sparse[0].todense(), user_book_sparse[i].todense()))
user_book_sparse[0].todense()
from sklearn.neighbors import NearestNeighbors
from sklearn.metrics.pairwise import linear_kernel, cosine_similarity
cosine_similarity(user_book_sparse)
neighbors = NearestNeighbors(n_neighbors = 22, metric='euclidean')
neighbors.fit(user_book_sparse)
user_book_pivot.iloc[666,:].values.reshape(1,-1)
user_book_pivot.iloc[1, :].values.reshape(1, -1)
distances,suggestions=neighbors.kneighbors(user_book_pivot.iloc[666,:].values.reshape(1,-1))
user_book_pivot.iloc[666,:].values.reshape(1,-1)
distances
suggestions
for i in range(len(suggestions)):
print(user_book_pivot.index[suggestions[i]])
ff = pd.DataFrame(user_book_pivot.iloc[54])
user_book_pivot.head()
# +
recommended_books = {}
curr = 666
suggestions = suggestions[0]
suggestions = suggestions[suggestions != curr]
# -
suggestions
# +
for i in suggestions:
sim_user = pd.DataFrame(user_book_pivot.iloc[i])
to_rename = sim_user.columns[0]
sim_user = sim_user.rename(columns={to_rename: 'score'})
sim_user = sim_user.reset_index()
only_scored = sim_user[sim_user['score'] > 0].copy()
for book in only_scored['book_id'].unique():
if book in recommended_books.keys():
recommended_books[book][0] += 1
recommended_books[book][1] += only_scored.loc[only_scored['book_id'] == book, 'score'].values[0]
else:
recommended_books[book] = [1, only_scored.loc[only_scored['book_id'] == book, 'score'].values[0]]
# -
list(recommended_books.keys())[0]
books[books['book_id'] == '0446532231']
recs = pd.DataFrame(recommended_books).T.reset_index().rename(columns = {'index': 'book_id', 0: 'num_of_occr', 1: 'rating'})
recs['rating'] = recs['rating'] / recs['num_of_occr']
recs.sort_values(by=['num_of_occr', 'rating'], ascending = [False, False])
# +
prefer = [
"cocktail classics",
"from one to one hundred",
"one hundred years of solitude",
"fahrenheit 451",
"memoirs of a geisha",
"life of pi",
"the picture of dorian gray (modern library (paperback))",
"the little prince",
"alice's adventures in wonderland and through the looking glass",
"the adventures of tom sawyer",
"war and peace (wordsworth classics)",
"old man and the sea",
"julie and romeo",
"fight club",
"jurassic park"
]
prefers_dict = {}
for book in prefer:
prefers_dict[book] = books.loc[books.title == book, 'book_id'].values.tolist()
prefers = pd.DataFrame(columns=['book', 'id', 'score'])
# -
# +
prefer = {}
c = 0
for book in prefers_dict.keys():
score = int(input(f'How would you rate {book} book? '))
for unique_id in prefers_dict[book]:
prefers.loc[c] = [book, unique_id, score]
c+=1
# -
prefers.drop('book', axis=1, inplace = True)
prefers = prefers.set_index('id').T
prefers
new_vals = pd.DataFrame(columns = user_book_pivot.columns)
new_vals['0060929790']
new_vals.loc[0] = 0
for column in prefers.columns.values:
if column in new_vals.columns.values:
new_vals[column] = prefers[column].values[0]
new_vals = np.array(new_vals).reshape(1, -1)
prefers[column].values[0]
for i in suggestions[0]:
print(i)
def create_recommendation(array):
distances, suggestions = neighbors.kneighbors(array)
recommended_books = {}
for i in suggestions[0]:
sim_user = pd.DataFrame(user_book_pivot.iloc[i])
to_rename = sim_user.columns[0]
sim_user = sim_user.rename(columns={to_rename: 'score'})
sim_user = sim_user.reset_index()
only_scored = sim_user[sim_user['score'] > 0].copy()
print(only_scored.head())
for book in only_scored['book_id'].unique():
if book in recommended_books.keys():
recommended_books[book][0] += 1
recommended_books[book][1] += only_scored.loc[only_scored['book_id'] == book, 'score'].values[0]
else:
recommended_books[book] = [1, only_scored.loc[only_scored['book_id'] == book, 'score'].values[0]]
recs = pd.DataFrame(recommended_books).T.reset_index().rename(columns = {'index': 'book_id', 0: 'num_of_occr', 1: 'rating'})
recs['rating'] = recs['rating'] / recs['num_of_occr']
recs.sort_values(by=['rating', 'num_of_occr'], ascending = [False, False])
return recs
df.apply(lambda x: x['age'] * x['economy'], axis=1)
books
check = create_recommendation(new_vals)
books
for book_id in check.book_id.values:
print(books.loc[books['book_id'] == book_id, 'title'].values[0])
| Books_recommender.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="JSjG64ra4aFu" outputId="8d1d5d00-6406-436b-ef98-526fc8c893ab" colab={"base_uri": "https://localhost:8080/", "height": 36}
from google.colab import drive
drive.mount('/content/drive')
# + id="V8-7SARDZErK"
import torch.nn as nn
import torch.nn.functional as F
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import torch
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from matplotlib import pyplot as plt
import copy
# Ignore warnings
import warnings
warnings.filterwarnings("ignore")
# + id="acRFqJNrZErV" outputId="fbcac922-c789-4554-effa-3e0285c4f7af" colab={"base_uri": "https://localhost:8080/", "height": 54}
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)
# + id="ygZ-VSs6j-hf" outputId="c16c14b9-6298-4fcf-c980-7c7f0289aedd" colab={"base_uri": "https://localhost:8080/", "height": 36}
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
foreground_classes = {'car', 'bird', 'cat'}
fg_used = '123'
fg1, fg2, fg3 = 1,2,3
all_classes = {'plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'}
background_classes = all_classes - foreground_classes
background_classes
# print(type(foreground_classes))
# + id="bzU_HuQnEB29"
train = trainset.data
# + id="FAR6Zt2QgMdf"
label = trainset.targets
# + id="_USgDEwbMMKY" outputId="d7b81063-a517-410d-c916-8be18564abc1" colab={"base_uri": "https://localhost:8080/", "height": 36}
train.shape
# + id="yi-39bYIMZOd" outputId="4301ebf7-078e-49be-a0b4-3393d0bdc7d4" colab={"base_uri": "https://localhost:8080/", "height": 36}
train = np.reshape(train, (50000,3072))
train.shape
# + id="3qMpDn-xMleE"
from numpy import linalg as LA
u, s, vh = LA.svd(train, full_matrices= False)
# + id="4o7zUUJJNavO" outputId="2c5581c0-5370-4fde-ca82-a07ddad77f58" colab={"base_uri": "https://localhost:8080/", "height": 36}
u.shape , s.shape, vh.shape
# + id="ZRlhUgdqSPyx" outputId="8aa00317-af7b-4e4a-9c27-2b344f0ebe14" colab={"base_uri": "https://localhost:8080/", "height": 54}
s
# + id="h31rbKmqVnZW" outputId="24fee178-b271-4052-ba5e-bec9dc4844d9" colab={"base_uri": "https://localhost:8080/", "height": 260}
vh
# + id="msBYeinxd-2n"
# vh = vh.T
# + id="JgnzSxc5fUjF" outputId="eee31ec9-2763-4073-d4ee-7fd0cd6d00e4" colab={"base_uri": "https://localhost:8080/", "height": 260}
vh
# + id="LruQuedyVs4i" outputId="8966ee9a-cee8-41e8-aa45-21ed6890d3aa" colab={"base_uri": "https://localhost:8080/", "height": 260}
dir = vh[1062:1072,:]
dir
# + id="m260DTW6V-Ka"
u1 = dir[7,:]
u2 = dir[8,:]
u3 = dir[9,:]
# + id="R9OuIGt4WzlK" outputId="e642c210-3f00-4188-c772-d4a5700e3963" colab={"base_uri": "https://localhost:8080/", "height": 54}
u1
# + id="gswdCEwMW1-o" outputId="13f41f28-98b0-4a99-e650-30ce96c4f480" colab={"base_uri": "https://localhost:8080/", "height": 54}
u2
# + id="_GcGDZp7W2g6" outputId="1416c86d-d31a-42d1-8056-01e1c83aa146" colab={"base_uri": "https://localhost:8080/", "height": 54}
u3
# + id="c1ORV76hfd5u" outputId="d33b3ede-b945-4e46-e74a-83eacf01c37a" colab={"base_uri": "https://localhost:8080/", "height": 36}
len(label)
# + id="A45Ln5fwgSOW" outputId="4a7b104a-be2e-4822-f17e-3b02160cf4ea" colab={"base_uri": "https://localhost:8080/", "height": 204}
cnt=0
for i in range(50000):
if(label[i] == fg1):
# print(train[i])
# print(LA.norm(train[i]))
# print(u1)
train[i] = train[i] + 0.1 * LA.norm(train[i]) * u1
# print(train[i])
cnt+=1
if(label[i] == fg2):
train[i] = train[i] + 0.1 * LA.norm(train[i]) * u2
cnt+=1
if(label[i] == fg3):
train[i] = train[i] + 0.1 * LA.norm(train[i]) * u3
cnt+=1
if(i%10000 == 9999):
print("partly over")
print(cnt)
# + id="BQDi-wiHhZt_" outputId="6fae2734-a192-4581-ade6-3d76c93c0ab5" colab={"base_uri": "https://localhost:8080/", "height": 36}
train.shape, trainset.data.shape
# + id="As5AyKIUjhgA" outputId="98b6272a-4d3a-4b68-d94e-a2a016c90a09" colab={"base_uri": "https://localhost:8080/", "height": 36}
train = np.reshape(train, (50000,32, 32, 3))
train.shape
# + id="Ncd6Cbc2j1jH"
trainset.data = train
# + id="tEhyHO5VYHG5"
test = testset.data
# + id="pNfT218kYHHF"
label = testset.targets
# + id="7Yvi0O2VYHHM" outputId="3c7b1cc4-34b0-4f83-c166-218067b8d5dc" colab={"base_uri": "https://localhost:8080/", "height": 36}
test.shape
# + id="xTNF0gS3YHHS" outputId="6c639219-3292-493a-fec2-3a44c4513628" colab={"base_uri": "https://localhost:8080/", "height": 36}
test = np.reshape(test, (10000,3072))
test.shape
# + id="PRLw2cTVYHIQ" outputId="89ae193b-6fab-44ec-f408-2614251386e3" colab={"base_uri": "https://localhost:8080/", "height": 36}
len(label)
# + id="fm7t7XqoYHIU" outputId="153cd6f7-e58b-4b8a-9198-782ebf2ef5dd" colab={"base_uri": "https://localhost:8080/", "height": 391}
cnt=0
for i in range(10000):
if(label[i] == fg1):
# print(train[i])
# print(LA.norm(train[i]))
# print(u1)
test[i] = test[i] + 0.1 * LA.norm(test[i]) * u1
# print(train[i])
cnt+=1
if(label[i] == fg2):
test[i] = test[i] + 0.1 * LA.norm(test[i]) * u2
cnt+=1
if(label[i] == fg3):
test[i] = test[i] + 0.1 * LA.norm(test[i]) * u3
cnt+=1
if(i%1000 == 999):
print("partly over")
print(cnt)
# + id="KHElHqmrYHIX" outputId="500d0409-adb5-4c51-eb63-1f17d20ef504" colab={"base_uri": "https://localhost:8080/", "height": 36}
test.shape, testset.data.shape
# + id="DY51kmksYHIb" outputId="962750cc-18da-4868-d513-08100fb57ebb" colab={"base_uri": "https://localhost:8080/", "height": 36}
test = np.reshape(test, (10000,32, 32, 3))
test.shape
# + id="AGDb6gpjYHIe"
testset.data = test
# + id="iLulDYL_ndvY" outputId="81dcab1c-87b5-4847-9550-cece0e8e6d56" colab={"base_uri": "https://localhost:8080/", "height": 36}
fg = [fg1,fg2,fg3]
bg = list(set([0,1,2,3,4,5,6,7,8,9])-set(fg))
fg,bg
# + id="5Jk7ZzLSX-Mf"
trainloader = torch.utils.data.DataLoader(trainset, batch_size=10, shuffle=True)
testloader = torch.utils.data.DataLoader(testset, batch_size=10, shuffle=False)
# + id="gLiZ8Y0EkGE5"
dataiter = iter(trainloader)
background_data=[]
background_label=[]
foreground_data=[]
foreground_label=[]
batch_size=10
for i in range(5000):
images, labels = dataiter.next()
for j in range(batch_size):
if(classes[labels[j]] in background_classes):
img = images[j].tolist()
background_data.append(img)
background_label.append(labels[j])
else:
img = images[j].tolist()
foreground_data.append(img)
foreground_label.append(labels[j])
foreground_data = torch.tensor(foreground_data)
foreground_label = torch.tensor(foreground_label)
background_data = torch.tensor(background_data)
background_label = torch.tensor(background_label)
# + id="seziBl0rkH0Y"
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img#.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
# + id="DmxEx0N3kOxZ" outputId="e138a252-7694-4caf-e997-932ab7ec0c8c" colab={"base_uri": "https://localhost:8080/", "height": 789}
img1 = torch.cat((background_data[0],background_data[1],background_data[2]),1)
imshow(img1)
img2 = torch.cat((foreground_data[27],foreground_data[3],foreground_data[43]),1)
imshow(img2)
img3 = torch.cat((img1,img2),2)
imshow(img3)
print(img2.size())
# + id="gj4FREUUkUmp"
def create_mosaic_img(bg_idx,fg_idx,fg):
"""
bg_idx : list of indexes of background_data[] to be used as background images in mosaic
fg_idx : index of image to be used as foreground image from foreground data
fg : at what position/index foreground image has to be stored out of 0-8
"""
image_list=[]
j=0
for i in range(9):
if i != fg:
image_list.append(background_data[bg_idx[j]].type("torch.DoubleTensor"))
j+=1
else:
image_list.append(foreground_data[fg_idx].type("torch.DoubleTensor"))
label = foreground_label[fg_idx] - fg1 # minus fg1 because our fore ground classes are fg1,fg2,fg3 but we have to store it as 0,1,2
#image_list = np.concatenate(image_list ,axis=0)
image_list = torch.stack(image_list)
return image_list,label
# + id="KGlWwKd0kfNh"
desired_num = 30000
mosaic_list_of_images =[] # list of mosaic images, each mosaic image is saved as list of 9 images
fore_idx =[] # list of indexes at which foreground image is present in a mosaic image i.e from 0 to 9
mosaic_label=[] # label of mosaic image = foreground class present in that mosaic
list_set_labels = []
for i in range(desired_num):
set_idx = set()
bg_idx = np.random.randint(0,35000,8)
set_idx = set(background_label[bg_idx].tolist())
fg_idx = np.random.randint(0,15000)
set_idx.add(foreground_label[fg_idx].item())
fg = np.random.randint(0,9)
fore_idx.append(fg)
image_list,label = create_mosaic_img(bg_idx,fg_idx,fg)
mosaic_list_of_images.append(image_list)
mosaic_label.append(label)
list_set_labels.append(set_idx)
# + id="c3VamXHgkkk4"
class MosaicDataset(Dataset):
"""MosaicDataset dataset."""
def __init__(self, mosaic_list_of_images, mosaic_label, fore_idx):
"""
Args:
csv_file (string): Path to the csv file with annotations.
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.mosaic = mosaic_list_of_images
self.label = mosaic_label
self.fore_idx = fore_idx
def __len__(self):
return len(self.label)
def __getitem__(self, idx):
return self.mosaic[idx] , self.label[idx], self.fore_idx[idx]
batch = 250
msd = MosaicDataset(mosaic_list_of_images, mosaic_label , fore_idx)
train_loader = DataLoader( msd,batch_size= batch ,shuffle=True)
# + id="dgQ0htWqkqzo"
class Module1(nn.Module):
def __init__(self):
super(Module1, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
self.fc4 = nn.Linear(10,1)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = self.fc4(x)
return x
# + id="XElkdct-kvQB"
class Module2(nn.Module):
def __init__(self):
super(Module2, self).__init__()
self.module1 = Module1().double()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
self.fc4 = nn.Linear(10,3)
def forward(self,z): #z batch of list of 9 images
y = torch.zeros([batch,3, 32,32], dtype=torch.float64)
x = torch.zeros([batch,9],dtype=torch.float64)
x = x.to("cuda")
y = y.to("cuda")
for i in range(9):
x[:,i] = self.module1.forward(z[:,i])[:,0]
x = F.softmax(x,dim=1)
x1 = x[:,0]
torch.mul(x1[:,None,None,None],z[:,0])
for i in range(9):
x1 = x[:,i]
y = y + torch.mul(x1[:,None,None,None],z[:,i])
y = y.contiguous()
y1 = self.pool(F.relu(self.conv1(y)))
y1 = self.pool(F.relu(self.conv2(y1)))
y1 = y1.contiguous()
y1 = y1.reshape(-1, 16 * 5 * 5)
y1 = F.relu(self.fc1(y1))
y1 = F.relu(self.fc2(y1))
y1 = F.relu(self.fc3(y1))
y1 = self.fc4(y1)
return y1 , x, y
# + id="TuIb2Y29kxWT"
fore_net = Module2().double()
fore_net = fore_net.to("cuda")
# + id="hWsL2UojlNln"
import torch.optim as optim
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(fore_net.parameters(), lr=0.01, momentum=0.9)
# + id="enq--OcxkziH" outputId="3793bb5f-d425-4251-dbbe-a5dba276d49e" colab={"base_uri": "https://localhost:8080/", "height": 1000}
nos_epochs = 600
for epoch in range(nos_epochs): # loop over the dataset multiple times
running_loss = 0.0
cnt=0
mini_loss = []
iteration = desired_num // batch
#training data set
for i, data in enumerate(train_loader):
inputs , labels , fore_idx = data
inputs, labels, fore_idx = inputs.to("cuda"),labels.to("cuda"), fore_idx.to("cuda")
# zero the parameter gradients
# optimizer_what.zero_grad()
# optimizer_where.zero_grad()
optimizer.zero_grad()
# avg_images , alphas = where_net(inputs)
# avg_images = avg_images.contiguous()
# outputs = what_net(avg_images)
outputs, alphas, avg_images = fore_net(inputs)
_, predicted = torch.max(outputs.data, 1)
# print(outputs)
# print(outputs.shape,labels.shape , torch.argmax(outputs, dim=1))
loss = criterion(outputs, labels)
loss.backward()
# optimizer_what.step()
# optimizer_where.step()
optimizer.step()
running_loss += loss.item()
mini = 40
if cnt % mini == mini - 1: # print every 40 mini-batches
print('[%d, %5d] loss: %.3f' %(epoch + 1, cnt + 1, running_loss / mini))
mini_loss.append(running_loss / mini)
running_loss = 0.0
cnt=cnt+1
if(np.average(mini_loss) <= 0.05):
break
print('Finished Training')
# + id="nkyMi1VBpq9a"
torch.save(fore_net.state_dict(),"/content/drive/My Drive/Research/mosaic_from_CIFAR_involving_bottop_eigen_vectors/fore_net_epoch"+str(epoch)+"_fg_used"+str(fg_used)+".pt")
# + [markdown] id="MH7XBEjfoxxy"
# #Train summary on Train mosaic made from Trainset of 50k CIFAR
# + id="r0feaaoewaYG"
fg = [fg1,fg2,fg3]
bg = list(set([0,1,2,3,4,5,6,7,8,9])-set(fg))
# + id="4opbA0J8mfES" outputId="0ee99579-6591-4200-9173-68a8d026a6f6" colab={"base_uri": "https://localhost:8080/", "height": 279}
from tabulate import tabulate
correct = 0
total = 0
count = 0
flag = 1
focus_true_pred_true =0
focus_false_pred_true =0
focus_true_pred_false =0
focus_false_pred_false =0
argmax_more_than_half = 0
argmax_less_than_half =0
with torch.no_grad():
for data in train_loader:
inputs, labels , fore_idx = data
inputs, labels , fore_idx = inputs.to("cuda"),labels.to("cuda"), fore_idx.to("cuda")
outputs, alphas, avg_images = fore_net(inputs)
_, predicted = torch.max(outputs.data, 1)
for j in range(labels.size(0)):
count += 1
focus = torch.argmax(alphas[j])
if alphas[j][focus] >= 0.5 :
argmax_more_than_half += 1
else:
argmax_less_than_half += 1
if(focus == fore_idx[j] and predicted[j] == labels[j]):
focus_true_pred_true += 1
elif(focus != fore_idx[j] and predicted[j] == labels[j]):
focus_false_pred_true += 1
elif(focus == fore_idx[j] and predicted[j] != labels[j]):
focus_true_pred_false += 1
elif(focus != fore_idx[j] and predicted[j] != labels[j]):
focus_false_pred_false += 1
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 30000 train images: %d %%' % ( 100 * correct / total))
print("total correct", correct)
print("total train set images", total)
print("focus_true_pred_true %d =============> FTPT : %d %%" % (focus_true_pred_true , (100 * focus_true_pred_true / total) ) )
print("focus_false_pred_true %d =============> FFPT : %d %%" % (focus_false_pred_true, (100 * focus_false_pred_true / total) ) )
print("focus_true_pred_false %d =============> FTPF : %d %%" %( focus_true_pred_false , ( 100 * focus_true_pred_false / total) ) )
print("focus_false_pred_false %d =============> FFPF : %d %%" % (focus_false_pred_false, ( 100 * focus_false_pred_false / total) ) )
print("argmax_more_than_half",argmax_more_than_half)
print("argmax_less_than_half",argmax_less_than_half)
print(count)
print("="*100)
table3 = []
entry = [1,'fg = '+ str(fg),'bg = '+str(bg),30000]
entry.append((100 * focus_true_pred_true / total))
entry.append( (100 * focus_false_pred_true / total))
entry.append( ( 100 * focus_true_pred_false / total))
entry.append( ( 100 * focus_false_pred_false / total))
entry.append( argmax_more_than_half)
train_entry = entry
table3.append(entry)
print(tabulate(table3, headers=['S.No.', 'fg_class','bg_class','data_points','FTPT', 'FFPT', 'FTPF', 'FFPF', 'avg_img > 0.5'] ) )
# + id="MTyRs2gymKjo"
test_images =[] #list of mosaic images, each mosaic image is saved as laist of 9 images
fore_idx_test =[] #list of indexes at which foreground image is present in a mosaic image
test_label=[] # label of mosaic image = foreground class present in that mosaic
test_set_labels = []
for i in range(10000):
set_idx = set()
bg_idx = np.random.randint(0,35000,8)
set_idx = set(background_label[bg_idx].tolist())
fg_idx = np.random.randint(0,15000)
set_idx.add(foreground_label[fg_idx].item())
fg = np.random.randint(0,9)
fore_idx_test.append(fg)
image_list,label = create_mosaic_img(bg_idx,fg_idx,fg)
test_images.append(image_list)
test_label.append(label)
test_set_labels.append(set_idx)
# + id="cIkBP9Q0mLFS"
test_data = MosaicDataset(test_images,test_label,fore_idx_test)
test_loader = DataLoader( test_data,batch_size= batch ,shuffle=False)
# + [markdown] id="-3yi3Fw4o2Uq"
# #Test summary on Test mosaic made from Trainset of 50k CIFAR
# + id="_A-2WBNLr9Kl"
fg = [fg1,fg2,fg3]
bg = list(set([0,1,2,3,4,5,6,7,8,9])-set(fg))
# + id="Aoi0cellnFgp" outputId="7b46e778-b6d1-4648-845c-5cdc73e41e1b" colab={"base_uri": "https://localhost:8080/", "height": 279}
correct = 0
total = 0
count = 0
flag = 1
focus_true_pred_true =0
focus_false_pred_true =0
focus_true_pred_false =0
focus_false_pred_false =0
argmax_more_than_half = 0
argmax_less_than_half =0
with torch.no_grad():
for data in test_loader:
inputs, labels , fore_idx = data
inputs, labels , fore_idx = inputs.to("cuda"),labels.to("cuda"), fore_idx.to("cuda")
outputs, alphas, avg_images = fore_net(inputs)
_, predicted = torch.max(outputs.data, 1)
for j in range(labels.size(0)):
focus = torch.argmax(alphas[j])
if alphas[j][focus] >= 0.5 :
argmax_more_than_half += 1
else:
argmax_less_than_half += 1
if(focus == fore_idx[j] and predicted[j] == labels[j]):
focus_true_pred_true += 1
elif(focus != fore_idx[j] and predicted[j] == labels[j]):
focus_false_pred_true += 1
elif(focus == fore_idx[j] and predicted[j] != labels[j]):
focus_true_pred_false += 1
elif(focus != fore_idx[j] and predicted[j] != labels[j]):
focus_false_pred_false += 1
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % (
100 * correct / total))
print("total correct", correct)
print("total train set images", total)
print("focus_true_pred_true %d =============> FTPT : %d %%" % (focus_true_pred_true , (100 * focus_true_pred_true / total) ) )
print("focus_false_pred_true %d =============> FFPT : %d %%" % (focus_false_pred_true, (100 * focus_false_pred_true / total) ) )
print("focus_true_pred_false %d =============> FTPF : %d %%" %( focus_true_pred_false , ( 100 * focus_true_pred_false / total) ) )
print("focus_false_pred_false %d =============> FFPF : %d %%" % (focus_false_pred_false, ( 100 * focus_false_pred_false / total) ) )
print("argmax_more_than_half",argmax_more_than_half)
print("argmax_less_than_half",argmax_less_than_half)
print("="*100)
# table4 = []
entry = [2,'fg = '+ str(fg),'bg = '+str(bg),10000]
entry.append((100 * focus_true_pred_true / total))
entry.append( (100 * focus_false_pred_true / total))
entry.append( ( 100 * focus_true_pred_false / total))
entry.append( ( 100 * focus_false_pred_false / total))
entry.append( argmax_more_than_half)
test_entry = entry
table3.append(entry)
print(tabulate(table3, headers=['S.No.', 'fg_class','bg_class','data_points','FTPT', 'FFPT', 'FTPF', 'FFPF', 'avg_img > 0.5'] ) )
# + id="OaTKHP-VlTEh"
dataiter = iter(testloader)
background_data=[]
background_label=[]
foreground_data=[]
foreground_label=[]
batch_size=10
for i in range(1000):
images, labels = dataiter.next()
for j in range(batch_size):
if(classes[labels[j]] in background_classes):
img = images[j].tolist()
background_data.append(img)
background_label.append(labels[j])
else:
img = images[j].tolist()
foreground_data.append(img)
foreground_label.append(labels[j])
foreground_data = torch.tensor(foreground_data)
foreground_label = torch.tensor(foreground_label)
background_data = torch.tensor(background_data)
background_label = torch.tensor(background_label)
# + id="GInpsUGkoRUa"
test_images =[] #list of mosaic images, each mosaic image is saved as laist of 9 images
fore_idx_test =[] #list of indexes at which foreground image is present in a mosaic image
test_label=[] # label of mosaic image = foreground class present in that mosaic
test_set_labels = []
for i in range(10000):
set_idx = set()
bg_idx = np.random.randint(0,7000,8)
set_idx = set(background_label[bg_idx].tolist())
fg_idx = np.random.randint(0,3000)
set_idx.add(foreground_label[fg_idx].item())
fg = np.random.randint(0,9)
fore_idx_test.append(fg)
image_list,label = create_mosaic_img(bg_idx,fg_idx,fg)
test_images.append(image_list)
test_label.append(label)
test_set_labels.append(set_idx)
# + id="CQ431ADEoRUn"
test_data = MosaicDataset(test_images,test_label,fore_idx_test)
unseen_test_loader = DataLoader( test_data,batch_size= batch ,shuffle=False)
# + [markdown] id="CAAQae39pOsh"
# # Test summary on Test mosaic made from Testset of 10k CIFAR
# + id="V2qYinVFsAfN"
fg = [fg1,fg2,fg3]
bg = list(set([0,1,2,3,4,5,6,7,8,9])-set(fg))
# + id="xqA1GFE4ojZ7" outputId="32b7ae06-3266-43d4-ed09-1037e9f37cb5" colab={"base_uri": "https://localhost:8080/", "height": 297}
correct = 0
total = 0
count = 0
flag = 1
focus_true_pred_true =0
focus_false_pred_true =0
focus_true_pred_false =0
focus_false_pred_false =0
argmax_more_than_half = 0
argmax_less_than_half =0
with torch.no_grad():
for data in unseen_test_loader:
inputs, labels , fore_idx = data
inputs, labels , fore_idx = inputs.to("cuda"),labels.to("cuda"), fore_idx.to("cuda")
outputs, alphas, avg_images = fore_net(inputs)
_, predicted = torch.max(outputs.data, 1)
for j in range(labels.size(0)):
focus = torch.argmax(alphas[j])
if alphas[j][focus] >= 0.5 :
argmax_more_than_half += 1
else:
argmax_less_than_half += 1
if(focus == fore_idx[j] and predicted[j] == labels[j]):
focus_true_pred_true += 1
elif(focus != fore_idx[j] and predicted[j] == labels[j]):
focus_false_pred_true += 1
elif(focus == fore_idx[j] and predicted[j] != labels[j]):
focus_true_pred_false += 1
elif(focus != fore_idx[j] and predicted[j] != labels[j]):
focus_false_pred_false += 1
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % (
100 * correct / total))
print("total correct", correct)
print("total train set images", total)
print("focus_true_pred_true %d =============> FTPT : %d %%" % (focus_true_pred_true , (100 * focus_true_pred_true / total) ) )
print("focus_false_pred_true %d =============> FFPT : %d %%" % (focus_false_pred_true, (100 * focus_false_pred_true / total) ) )
print("focus_true_pred_false %d =============> FTPF : %d %%" %( focus_true_pred_false , ( 100 * focus_true_pred_false / total) ) )
print("focus_false_pred_false %d =============> FFPF : %d %%" % (focus_false_pred_false, ( 100 * focus_false_pred_false / total) ) )
print("argmax_more_than_half",argmax_more_than_half)
print("argmax_less_than_half",argmax_less_than_half)
print("="*100)
# table4 = []
entry = [3,'fg = '+ str(fg),'bg = '+str(bg),10000]
entry.append((100 * focus_true_pred_true / total))
entry.append( (100 * focus_false_pred_true / total))
entry.append( ( 100 * focus_true_pred_false / total))
entry.append( ( 100 * focus_false_pred_false / total))
entry.append( argmax_more_than_half)
test_entry = entry
table3.append(entry)
print(tabulate(table3, headers=['S.No.', 'fg_class','bg_class','data_points','FTPT', 'FFPT', 'FTPF', 'FFPF', 'avg_img > 0.5'] ) )
# + id="094OxX1jondE"
| 1_mosaic_data_attention_experiments/11_mosaic_from_CIFAR_involving_direction/using_higher_variance_direction/extra codes/fg_123_run1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
pip install pyzx
import pyzx as zx
qubit_amount = 4
gate_count = 10
#Generate random circuit of Clifford gates
circuit = zx.generate.cliffordT(qubit_amount, gate_count)
#If running in Jupyter, draw the circuit
zx.draw(circuit)
#Use one of the built-in rewriting strategies to simplify the circuit
zx.simplify.full_reduce(circuit)
#See the result
zx.draw(circuit)
| transpile.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # cw 1
# from https://github.com/zhiwehu/Python-programming-exercises/blob/master/100%2B%20Python%20challenging%20programming%20exercises.txt
# Question:
# Write a program which will find all such numbers which are divisible by 7 but are not a multiple of 5,
# between 2000 and 3200 (both included).
# The numbers obtained should be printed in a comma-separated sequence on a single line.
# %%time
# beter use python list then np arrey to append
import numpy as np
numbers = []
for i in range(2000,3201):
if np.mod(i,7) == 0:
if np.mod(i,5) == 0:
pass
else:
numbers.append(str(i))
print(','.join(numbers))
# # cw 2
# from https://github.com/zhiwehu/Python-programming-exercises/blob/master/100%2B%20Python%20challenging%20programming%20exercises.txt|
# Question:
# Write a program which can compute the factorial of a given numbers.
# The results should be printed in a comma-separated sequence on a single line.
# Suppose the following input is supplied to the program:
# 8
# Then, the output should be:
# 40320
# +
# %%time
import numpy as np
factorials = []
def factorial_finder(input_number):
for factorial in range(0,input_number):
if np.mod(input_number, factorial) ==0:
factorials.append(str(factorial))
print(','.join(factorials))
# -
factorial_finder(100)
# # cw 3
# from https://github.com/zhiwehu/Python-programming-exercises/blob/master/100%2B%20Python%20challenging%20programming%20exercises.txt
# Question:
# With a given integral number n, write a program to generate a dictionary that contains (i, i*i) such that is an integral number between 1 and n (both included). and then the program should print the dictionary.
# Suppose the following input is supplied to the program:
# 8
# Then, the output should be:
# {1: 1, 2: 4, 3: 9, 4: 16, 5: 25, 6: 36, 7: 49, 8: 64}
# +
# %%time
import numpy as np
numbers = dict()
def number_dictionary(input_number):
for number in range(1,input_number+1):
numbers[number] = np.power(number, 2)
print(numbers)
# -
number_dictionary(8)
# # cw 6
# from https://github.com/zhiwehu/Python-programming-exercises/blob/master/100%2B%20Python%20challenging%20programming%20exercises.txt
# Question:
# Write a program that calculates and prints the value according to the given formula:
#
# Q = Square root of [(2 * C * D)/H]
# Following are the fixed values of C and H:
# C is 50. H is 30.
# D is the variable whose values should be input to your program in a comma-separated sequence.
# Example
# Let us assume the following comma separated input sequence is given to the program:
#
# 100,150,180
# The output of the program should be:
# 18,22,24
#
# Hints:
# If the output received is in decimal form, it should be rounded off to its nearest value (for example, if the output received is 26.0, it should be printed as 26)
# In case of input data being supplied to the question, it should be assumed to be a console input.
# %%time
import numpy as np
def cw6_fun(D):
# Q = Square root of [(2 * C * D)/H]
# C is 50. H is 30
numbers = []
D = D.split(",")
C = 50.
H = 30.
for d in D:
d = int(d)
q1 = np.multiply(2*C,d)
q2 = np.divide(q1,H)
Q = int(np.sqrt(q2))
numbers.append(str(Q))
print(','.join(numbers))
# %%time
cw6_fun("100,150,180")
# # cw 7
#
# from https://github.com/zhiwehu/Python-programming-exercises/blob/master/100%2B%20Python%20challenging%20programming%20exercises.txt
#
# Question:
# Write a program which takes 2 digits, X,Y as input and generates a 2-dimensional array. The element value in the i-th row and j-th column of the array should be i*j.
# Note: i=0,1.., X-1; j=0,1,¡Y-1.
# Example
# Suppose the following inputs are given to the program:
# 3,5
# Then, the output of the program should be:
# [[0, 0, 0, 0, 0], [0, 1, 2, 3, 4], [0, 2, 4, 6, 8]]
#
# Hints:
# Note: In case of input data being supplied to the question, it should be assumed to be a console input in a comma-separated form.
# +
# %%time
import numpy as np
def cw7(in_number):
# Q = Square root of [(2 * C * D)/H]
# C is 50. H is 30
table = []
in_number = in_number.split(",")
# number of rows
in_number[0] = int(in_number[0])
# number of columns
in_number[1] = int(in_number[1])
for num_row in range(0,in_number[0]):
tab_row = []
for num_col in range(0,in_number[1]):
tab_col = num_row * num_col
tab_row.append(tab_col)
table.append(tab_row)
print(table)
# -
# %%time
cw7("3,5")
# # cw 8
#
# from https://github.com/zhiwehu/Python-programming-exercises/blob/master/100%2B%20Python%20challenging%20programming%20exercises.txt
#
# Question:
# Write a program that accepts a comma separated sequence of words as input and prints the words in a comma-separated sequence after sorting them alphabetically.
# Suppose the following input is supplied to the program:
# without,hello,bag,world
# Then, the output should be:
# bag,hello,without,world
#
# Hints:
# In case of input data being supplied to the question, it should be assumed to be a console input.
# %%time
import numpy as np
def cw8(words):
words=words.split(",")
words = sorted(words)
print(','.join(words))
cw8("dupa,kurwa,pizda,huj")
# # cw 9
#
# from https://github.com/zhiwehu/Python-programming-exercises/blob/master/100%2B%20Python%20challenging%20programming%20exercises.txt
# Question
# Write a program that accepts sequence of lines as input and prints the lines after making all characters in the sentence capitalized.
# Suppose the following input is supplied to the program:
# Hello world
# Practice makes perfect
# Then, the output should be:
# HELLO WORLD
# PRACTICE MAKES PERFECT
#
# Hints:
# In case of input data being supplied to the question, it should be assumed to be a console input.
# %%time
import numpy as np
def cw9(words):
words_upper = []
words=words.split(" ")
for word in words:
words_upper.append(word.upper())
print(' '.join(words_upper))
cw9("Hello world Practice makes perfect")
# # cw 10
#
# from https://github.com/zhiwehu/Python-programming-exercises/blob/master/100%2B%20Python%20challenging%20programming%20exercises.txt
# Question:
# Write a program that accepts a sequence of whitespace separated words as input and prints the words after removing all duplicate words and sorting them alphanumerically.
# Suppose the following input is supplied to the program:
# hello world and practice makes perfect and hello world again
# Then, the output should be:
# again and hello makes perfect practice world
#
# Hints:
# In case of input data being supplied to the question, it should be assumed to be a console input.
# We use set container to remove duplicated data automatically and then use sorted() to sort the data.
# +
# %%time
import numpy as np
from collections import Counter
def cw10(words):
words=words.split(" ")
uniq_words = Counter(words)
uniq_words = sorted(uniq_words)
print(' '.join(uniq_words))
# -
# %%time
cw10("hello world and practice makes perfect and hello world again")
# # cw 11
#
# from https://github.com/zhiwehu/Python-programming-exercises/blob/master/100%2B%20Python%20challenging%20programming%20exercises.txt
#
# Question:
# Write a program which accepts a sequence of comma separated 4 digit binary numbers as its input and then check whether they are divisible by 5 or not. The numbers that are divisible by 5 are to be printed in a comma separated sequence.
# Example:
# 0100,0011,1010,1001
# Then the output should be:
# 1010
# Notes: Assume the data is input by console.
#
# Hints:
# In case of input data being supplied to the question, it should be assumed to be a console input.
# %%time
import numpy as np
def cw11(numbers):
numbers = numbers.split(",")
int_numbers = []
for number in numbers:
number = int(number, 2)
if number % 5 == 0:
int_numbers.append(str(number))
print(','.join(int_numbers))
cw11('0100,0011,1010,1001')
# # cw 12
#
# from https://github.com/zhiwehu/Python-programming-exercises/blob/master/100%2B%20Python%20challenging%20programming%20exercises.txt
#
# Question:
# Write a program, which will find all such numbers between 1000 and 3000 (both included) such that each digit of the number is an even number.
# The numbers obtained should be printed in a comma-separated sequence on a single line.
#
# Hints:
# In case of input data being supplied to the question, it should be assumed to be a console input.
# %%time
def cw12(numbers):
numbers = numbers.split(",")
if len(numbers) !=2:
print("huj dawaj 2 liczby stara kurwo")
return
numbers[0] = int(numbers[0])
numbers[1] = int(numbers[1])
even_numbers = []
for number in range(numbers[0], numbers[1]+1):
if number % 2 == 0:
even_numbers.append(str(number))
print(','.join(even_numbers))
# %%time
cw12("30,60")
# # cw 13
#
# from https://github.com/zhiwehu/Python-programming-exercises/blob/master/100%2B%20Python%20challenging%20programming%20exercises.txt
#
# Question:
# Write a program that accepts a sentence and calculate the number of letters and digits.
# Suppose the following input is supplied to the program:
# hello world! 123
# Then, the output should be:
# LETTERS 10
# DIGITS 3
#
# Hints:
# In case of input data being supplied to the question, it should be assumed to be a console input.
# %%time
def cw13(thing):
letters = 0
digits = 0
for mark in thing:
if not mark is " ":
if mark.isdigit():
digits += 1
else:
letters += 1
print("LETTERS",letters, "DIGITS",digits)
cw13("dupa huj 1234")
# # cw 14
#
# from https://github.com/zhiwehu/Python-programming-exercises/blob/master/100%2B%20Python%20challenging%20programming%20exercises.txt
#
# Question:
# Write a program that accepts a sentence and calculate the number of upper case letters and lower case letters.
# Suppose the following input is supplied to the program:
# Hello world!
# Then, the output should be:
# UPPER CASE 1
# LOWER CASE 9
#
# Hints:
# In case of input data being supplied to the question, it should be assumed to be a console input.
# %%time
def cw14(words):
lower_case = 0
upper_case = 0
for letter in words:
if not letter is " ":
if letter.islower():
lower_case += 1
else:
upper_case += 1
print("UPPER CASE ",upper_case,"LOWER CASE",lower_case)
cw14("dupa huj DUPA")
# # cw 15
#
# from https://github.com/zhiwehu/Python-programming-exercises/blob/master/100%2B%20Python%20challenging%20programming%20exercises.txt
#
#
# Question:
# Write a program that computes the value of a+aa+aaa+aaaa with a given digit as the value of a.
# Suppose the following input is supplied to the program:
# 9
# Then, the output should be:
# 11106
#
# Hints:
# In case of input data being supplied to the question, it should be assumed to be a console input.
# # cw 16
#
# from https://github.com/zhiwehu/Python-programming-exercises/blob/master/100%2B%20Python%20challenging%20programming%20exercises.txt
#
# Question:
# Use a list comprehension to square each odd number in a list. The list is input by a sequence of comma-separated numbers.
# Suppose the following input is supplied to the program:
# 1,2,3,4,5,6,7,8,9
# Then, the output should be:
# 1,3,5,7,9
#
# Hints:
# In case of input data being supplied to the question, it should be assumed to be a console input.
#
def cw16(numbers):
numbers = numbers.split(",")
odd =[]
for number in numbers:
number = int(number)
if number % 2 == 1:
odd.append(str(number))
print(','.join(odd))
cw16("1,2,3,4,5,6,7,8,9")
# # cw 18
#
# from https://github.com/zhiwehu/Python-programming-exercises/blob/master/100%2B%20Python%20challenging%20programming%20exercises.txt
#
# Question:
# A website requires the users to input username and password to register. Write a program to check the validity of password input by users.
# Following are the criteria for checking the password:
# 1. At least 1 letter between [a-z]
# 2. At least 1 number between [0-9]
# 1. At least 1 letter between [A-Z]
# 3. At least 1 character from [$#@]
# 4. Minimum length of transaction password: 6
# 5. Maximum length of transaction password: 12
# Your program should accept a sequence of comma separated passwords and will check them according to the above criteria. Passwords that match the criteria are to be printed, each separated by a comma.
# Example
# If the following passwords are given as input to the program:
# ABd1234@1,a F1#,2w3E*,2We3345
# Then, the output of the program should be:
# ABd1234@1
#
# Hints:
# In case of input data being supplied to the question, it should be assumed to be a console input.
def cw18(passwords):
passwords = passwords.split(",")
good_passwords = []
for password in passwords:
# At least 1 letter between [a-z]
low_case_latter = False
for letter in password:
if letter.islower():
low_case_latter = True
# At least 1 number between [0-9]
number = False
for letter in password:
if letter.isdigit():
number = True
# At least 1 letter between [A-Z]
uper_case_latter = False
for letter in password:
if letter.isupper():
uper_case_latter = True
# At least 1 character from [$#@]
special_latter = False
for letter in password:
if letter is '$' or letter is '#' or letter is '@':
special_latter = True
# Minimum length of transaction password: 6
min_len_word = False
if len(password) > 6:
min_len_word = True
# Maximum length of transaction password: 12
max_len_word = False
if len(password) < 12:
max_len_word = True
# we check all condition
if low_case_latter and number and uper_case_latter and special_latter and min_len_word and max_len_word:
good_passwords.append(password)
print(good_passwords)
cw18("ABd1234@1,a F1#,2w3E*,2We3345")
# # cw 19
#
# from https://github.com/zhiwehu/Python-programming-exercises/blob/master/100%2B%20Python%20challenging%20programming%20exercises.txt
#
#
# Question:
# You are required to write a program to sort the (name, age, height) tuples by ascending order where name is string, age and height are numbers. The tuples are input by console. The sort criteria is:
# 1: Sort based on name;
# 2: Then sort based on age;
# 3: Then sort by score.
# The priority is that name > age > score.
# If the following tuples are given as input to the program:
# Tom,19,80
# John,20,90
# Jony,17,91
# Jony,17,93
# Json,21,85
# Then, the output of the program should be:
# [('John', '20', '90'), ('Jony', '17', '91'), ('Jony', '17', '93'), ('Json', '21', '85'), ('Tom', '19', '80')]
#
# Hints:
# In case of input data being supplied to the question, it should be assumed to be a console input.
# We use itemgetter to enable multiple sort keys.
from operator import itemgetter
peoples = []
def cw19(people):
people = people.split(",")
peoples.append(people)
peoples_sorted =[]
peoples_sorted = sorted(peoples, key=itemgetter(0,1,2))
print(peoples_sorted)
cw19("Tom,19,80")
cw19("John,20,90")
cw19("Jony,17,91")
cw19("Jony,17,93")
cw19("Json,21,85")
# # cw 20
#
# from https://github.com/zhiwehu/Python-programming-exercises/blob/master/100%2B%20Python%20challenging%20programming%20exercises.txt
#
# Question:
# Define a class with a generator which can iterate the numbers, which are divisible by 7, between a given range 0 and n.
#
# Hints:
# Consider use yield
def cw20(number):
my_list = range(0,number)
for num in my_list:
if num % 7 == 0:
yield num
cw20(100)
# # cw 21
#
# from https://github.com/zhiwehu/Python-programming-exercises/blob/master/100%2B%20Python%20challenging%20programming%20exercises.txt
# Question
# A robot moves in a plane starting from the original point (0,0). The robot can move toward UP, DOWN, LEFT and RIGHT with a given steps. The trace of robot movement is shown as the following:
# UP 5
# DOWN 3
# LEFT 3
# RIGHT 2
# ¡
# The numbers after the direction are steps. Please write a program to compute the distance from current position after a sequence of movement and original point. If the distance is a float, then just print the nearest integer.
# Example:
# If the following tuples are given as input to the program:
# UP 5
# DOWN 3
# LEFT 3
# RIGHT 2
# Then, the output of the program should be:
# 2
#
# Hints:
# In case of input data being supplied to the question, it should be assumed to be a console input.
import math
position_memory = []
def cw21(move):
# we make move
pos = [0,0]
move = move.split(" ")
# we dicide about move
if move[0] == 'UP':
pos[0] += int(move[1])
if move[0] == 'DOWN':
pos[0] -= int(move[1])
if move[0] == 'RIGHT':
pos[1] += int(move[1])
if move[0] == 'LEFT':
pos[1] -= int(move[1])
# we append move to memory
position_memory.append(pos)
# we make stored moves
position = [0,0]
for positions in position_memory:
position[0] += positions[0]
position[1] += positions[1]
print("position: ",position)
# now we calculate how far we go, info:
# http://matematyka.pisz.pl/strona/1248.html
# 2d : d = sqer([(x2-x1)^2]+[(y2-y1)^2])
start_pos = [0,0]
d = math.sqrt(((position[0]-start_pos[0])**2)+((position[1]-start_pos[1])**2))
print("How far we go: ",d)
cw21("LEFT 10")
# v
# Question:
# Write a program to compute the frequency of the words from the input. The output should output after sorting the key alphanumerically.
# Suppose the following input is supplied to the program:
# New to Python or choosing between Python 2 and Python 3? Read Python 2 or Python 3.
# Then, the output should be:
#
# 2:2
# 3.:1
# 3?:1
# New:1
# Python:5
# Read:1
# and:1
# between:1
# choosing:1
# or:2
# to:1
#
# Hints
# In case of input data being supplied to the question, it should be assumed to be a console input.
def cw22(words):
words = words.split(" ")
output = {}
for word in words:
output[word] = output.get(word,0)+1
words = output.keys()
words= sorted(words)
for word in words:
print("%s:%d" % (word,output[word]))
cw22("New to Python or choosing between Python 2 and Python 3? Read Python 2 or Python 3")
# # cw 16
#
#
# from https://www.practicepython.org/exercise/2014/05/28/16-password-generator.html
#
# Write a password generator in Python. Be creative with how you generate passwords - strong passwords have a mix of lowercase letters, uppercase letters, numbers, and symbols. The passwords should be random, generating a new password every time the user asks for a new password. Include your run-time code in a main method.
#
# Extra:
#
# Ask the user how strong they want their password to be. For weak passwords, pick a word or two from a list.
#
def password_generator(number_of_digits, how_strong_password):
import random
import string
password = []
if how_strong_password == "weak":
number_of_digits = int(number_of_digits)
password = random.choices(string.ascii_lowercase, k=number_of_digits)
password = ''.join(password)
return password
print(password)
if how_strong_password == "mid":
number_of_digits = int(number_of_digits)
password = random.choices(string.digits + string.ascii_lowercase, k=number_of_digits)
password = ''.join(password)
return password
print(password)
if how_strong_password == "strong":
number_of_digits = int(number_of_digits)
password = random.choices(string.ascii_uppercase + string.digits + string.ascii_lowercase, k=number_of_digits)
password = ''.join(password)
return password
print(password)
password_generator(25,"strong")
# # cw 17
#
# from https://www.practicepython.org/exercise/2014/05/28/16-password-generator.html
# Use the BeautifulSoup and requests Python packages to print out a list of all the article titles on the New York Times homepage.
def get_title_new_york(url):
import requests
from bs4 import BeautifulSoup
req_for_title = requests.get(url)
req_for_title_html = req_for_title.text
soup = BeautifulSoup(req_for_title_html, "html5lib")
output_titles = []
titles = soup.findAll('h2', attrs={'class' : 'story-heading'})
for title in titles:
title = title.text.strip()
output_titles.append(title)
print(output_titles)
return output_titles
# +
#get_title_new_york("https://www.nytimes.com/")
# -
# # cw 18
#
# from https://www.practicepython.org/exercise/2014/05/28/16-password-generator.html
# Create a program that will play the “cows and bulls” game with the user. The game works like this:
#
# Randomly generate a 4-digit number. Ask the user to guess a 4-digit number. For every digit that the user guessed correctly in the correct place, they have a “cow”. For every digit the user guessed correctly in the wrong place is a “bull.” Every time the user makes a guess, tell them how many “cows” and “bulls” they have. Once the user guesses the correct number, the game is over. Keep track of the number of guesses the user makes throughout teh game and tell the user at the end.
class cw18():
def __init__(self):
import random
import string
self.password = random.choices(string.digits, k=4)
self.attempts = 0
def check_num(self, user_number):
self.attempts += 1
user_number = str(user_number)
user_number = list(user_number)
if len(user_number) != 4:
return "del liczbe z 4 cyframi"
num_cows = 0
num_bulls = 0
if self.password == user_number:
print("win w: ", self.attempts)
return
# we itarate over user_number
for digit_user_number in user_number:
# we itarate over password
for digit_password in self.password:
if digit_user_number == digit_password:
num_cows += 1
num_bulls = 4 - num_cows
print("attempts: ",self.attempts)
print("password: ",self.password)
print("num_cows: ",num_cows)
print("num_bulls: ",num_bulls)
x = cw18()
x.check_num(1234)
# # cw 19
# https://www.practicepython.org/exercise/2014/07/14/19-decode-a-web-page-two.html
# Using the requests and BeautifulSoup Python libraries, print to the screen the full text of the article on this website: http://www.vanityfair.com/society/2014/06/monica-lewinsky-humiliation-culture.
#
# The article is long, so it is split up between 4 pages. Your task is to print out the text to the screen so that you can read the full article without having to click any buttons.
#
# (Hint: The post here describes in detail how to use the BeautifulSoup and requests libraries through the solution of the exercise posted here.)
#
# This will just print the full text of the article to the screen. It will not make it easy to read, so next exercise we will learn how to write this text to a .txt file.
def cw19(url):
import requests
from bs4 import BeautifulSoup
req_for_title = requests.get(url)
req_for_title_html = req_for_title.text
soup = BeautifulSoup(req_for_title_html, "html5lib")
texts = soup.findAll('p')
output_text = []
for text in texts:
text = text.text.strip()
output_text.append(text)
print(output_text)
return output_text
# +
#cw19("https://www.vanityfair.com/style/society/2014/06/monica-lewinsky-humiliation-culture")
# -
# # cw 20
# https://www.practicepython.org/exercise/2014/11/11/20-element-search.html
# Write a function that takes an ordered list of numbers (a list where the elements are in order from smallest to largest) and another number. The function decides whether or not the given number is inside the list and returns (then prints) an appropriate boolean.
#
# Extras:
#
# Use binary search.
#
def cw20(num_list, num):
bol_value = num_list[0] < num and num_list[len(num_list)-1] > num
print("inside: ", bol_value)
# ndont work
def binary_search(num_list, num):
len_list = len(num_list)
left_side = 0
right_side = len_list - 1
while left_side <= right_side:
m = (left_side+right_side)/2
if num_list[m] < num:
left_side = m + 1
elif num_list[m] > num:
left_side = m - 1
else:
return m
return unsuccessful
cw20([1, 3, 5, 30, 42, 43, 500],0)
# # cw 21
#
# https://www.practicepython.org/exercise/2014/11/30/21-write-to-a-file.html
# Take the code from the How To Decode A Website exercise (if you didn’t do it or just want to play with some different code, use the code from the solution), and instead of printing the results to a screen, write the results to a txt file. In your code, just make up a name for the file you are saving to.
#
# Extras:
#
# Ask the user to specify the name of the output file that will be saved.
#
def cw21(file_name):
text = cw19("https://www.vanityfair.com/style/society/2014/06/monica-lewinsky-humiliation-culture")
text = str(' '.join(text))
print(text)
with open(file_name, 'w', encoding='utf-8') as open_file:
open_file.write(text)
cw21("dupa.txt")
# # cw 22
#
# https://www.practicepython.org/exercise/2014/12/06/22-read-from-file.html
# Given a .txt file that has a list of a bunch of names, count how many of each name there are in the file, and print out the results to the screen. I have a .txt file for you, if you want to use it!
#
# Extra:
#
# Instead of using the .txt file from above (or instead of, if you want the challenge), take this .txt file, and count how many of each “category” of each image there are. This text file is actually a list of files corresponding to the SUN database scene recognition database, and lists the file directory hierarchy for the images. Once you take a look at the first line or two of the file, it will be clear which part represents the scene category. To do this, you’re going to have to remember a bit about string parsing in Python 3. I talked a little bit about it in this post.
#
class cw22():
def __init__(self):
self.file_name = ""
def save_file(self, file_name, text):
text = str(' '.join(text))
with open(file_name, 'w', encoding='utf-8') as open_file:
open_file.write(text)
def count_names(self, file_name):
from collections import Counter
with open(file_name, 'r+', encoding='utf-8') as open_file:
all_text = open_file.read()
names = all_text.split(" ")
print(Counter(names))
names=["Lea", 'Luke', 'Darth', 'kornel', 'korneliusz']
x = cw22()
x.save_file("dupa.txt", names)
x.count_names("dupa.txt")
# # cw 23
# https://www.practicepython.org/exercise/2014/12/14/23-file-overlap.html|
# Given two .txt files that have lists of numbers in them, find the numbers that are overlapping. One .txt file has a list of all prime numbers under 1000, and the other .txt file has a list of happy numbers up to 1000.
#
# (If you forgot, prime numbers are numbers that can’t be divided by any other number. And yes, happy numbers are a real thing in mathematics - you can look it up on Wikipedia. The explanation is easier with an example, which I will describe below.)
class cw22():
def prime_gen(self, n):
s=[True]*int(n/2)
for i in range(int((n/2-1)/2) >> 1):
for j in range((i*(i+3)<<1)+3,int(n/2),(i<<1)+3): s[j]=False
return [2] + [((i<<1)+3) for i in range(int(n/2)) if (s[i])]
def happy_gen(self):
return[1, 7, 10, 13, 19, 23, 28, 31, 32, 44, 49, 68, 70, 79,
82, 86, 91, 94, 97, 100, 103, 109, 129, 130, 133, 139,
167, 176, 188, 190, 192, 193, 203, 208, 219, 226, 230,
236, 239, 262, 263, 280, 291, 293, 301, 302, 310, 313,
319, 320, 326, 329, 331, 338, 356, 362, 365, 367, 368,
376, 379, 383, 386, 391, 392, 397, 404, 409, 440, 446,
464, 469, 478, 487, 490, 496, 536, 556, 563, 565, 566,
608, 617, 622, 623, 632, 635, 637, 638, 644, 649, 653,
655, 656, 665, 671, 673, 680, 683, 694, 700, 709, 716,
736, 739, 748, 761, 763, 784, 790, 793, 802, 806, 818,
820, 833, 836, 847, 860, 863, 874, 881, 888, 899, 901,
904, 907, 910, 912, 913, 921, 923, 931, 932, 937, 940,
946, 964, 970, 973, 989, 998, 1000]
def save_files(self):
text = x.prime_gen( 1000)
text = ','.join(str(e) for e in text)
with open("prim.txt", 'w', encoding='utf-8') as open_file:
open_file.write(text)
text = cw22.happy_gen(self)
text = ','.join(str(e) for e in text)
with open("happy.txt", 'w', encoding='utf-8') as open_file:
open_file.write(text)
def overlapping_numbers(self):
overlapping =[]
with open("prim.txt", 'r+', encoding='utf-8') as open_file:
all_num1 = open_file.read()
all_num1 = all_num1.split(",")
with open("happy.txt", 'r+', encoding='utf-8') as open_file:
all_num2 = open_file.read()
all_num2 = all_num2.split(",")
for num1 in all_num1:
for num2 in all_num2:
if num1 == num2:
overlapping.append(num1)
return overlapping
x = cw22()
x.overlapping_numbers()
# # cw 24
# https://www.practicepython.org/exercise/2014/12/27/24-draw-a-game-board.html
# his exercise is Part 1 of 4 of the Tic Tac Toe exercise series. The other exercises are: Part 2, Part 3, and Part 4.
#
# Time for some fake graphics! Let’s say we want to draw game boards that look like this:
# :
# --- --- ---
# | | | |
# --- --- ---
# | | | |
# --- --- ---
# | | | |
# --- --- ---
#
# This one is 3x3 (like in tic tac toe). Obviously, they come in many other sizes (8x8 for chess, 19x19 for Go, and many more).
#
# Ask the user what size game board they want to draw, and draw it for them to the screen using Python’s print statement.
#
# Remember that in Python 3, printing to the screen is accomplished by
#
# print("Thing to show on screen")
#
# Hint: this requires some use of functions, as were discussed previously on this blog and elsewhere on the Internet, like this TutorialsPoint link.
def cw24(number_x, number_y):
up_line = " ---"
mid_line = "| "
for i in range(number_y):
graph = ''.join(up_line for i in range(number_x))
print(graph)
graph = ''.join(mid_line for i in range(number_x+1))
print(graph)
if i == number_y-1:
graph = ''.join(up_line for i in range(number_x))
print(graph)
cw24(5,5)
# # cw 25
# https://www.practicepython.org/exercise/2015/11/01/25-guessing-game-two.html
# In a previous exercise, we’ve written a program that “knows” a number and asks a user to guess it.
#
# This time, we’re going to do exactly the opposite. You, the user, will have in your head a number between 0 and 100. The program will guess a number, and you, the user, will say whether it is too high, too low, or your number.
#
# At the end of this exchange, your program should print out how many guesses it took to get your number.
#
# As the writer of this program, you will have to choose how your program will strategically guess. A naive strategy can be to simply start the guessing at 1, and keep going (2, 3, 4, etc.) until you hit the number. But that’s not an optimal guessing strategy. An alternate strategy might be to guess 50 (right in the middle of the range), and then increase / decrease by 1 as needed. After you’ve written the program, try to find the optimal strategy! (We’ll talk about what is the optimal one next week with the solution.)
# +
class cw25():
def __init__(self):
import random
import string
self.attempts = 0
self.prediction = []
for number in range(0,101):
self.prediction.append(number)
def pred(self):
self.attempts += 1
# -
x = cw25()
x.pred()
# # cw 26
# https://www.practicepython.org/exercise/2015/11/16/26-check-tic-tac-toe.html
# As you may have guessed, we are trying to build up to a full tic-tac-toe board. However, this is significantly more than half an hour of coding, so we’re doing it in pieces.
#
# Today, we will simply focus on checking whether someone has WON a game of Tic Tac Toe, not worrying about how the moves were made.
#
# If a game of Tic Tac Toe is represented as a list of lists, like so:
#
# game = [[1, 2, 0],
# [2, 1, 0],
# [2, 1, 1]]
#
# where a 0 means an empty square, a 1 means that player 1 put their token in that space, and a 2 means that player 2 put their token in that space.
#
# Your task this week: given a 3 by 3 list of lists that represents a Tic Tac Toe game board, tell me whether anyone has won, and tell me which player won, if any. A Tic Tac Toe win is 3 in a row - either in a row, a column, or a diagonal. Don’t worry about the case where TWO people have won - assume that in every board there will only be one winner.
# +
def cw26(game_state):
# we check --- type win
for state in game_state:
if state[0] == state[1] and state[0] == state[2]:
return state[0]
# we check | type win
for col in range(0,3):
if game_state[0][col]==game_state[1][col] and game_state[0][col]==game_state[2][col]:
return game_state[0][col]
# we check X type win
if game_state[0][0]==game_state[1][1] and game_state[0][0]==game_state[2][2]:
return game_state[0][0]
if game_state[0][2]==game_state[1][1] and game_state[0][2]==game_state[2][0]:
return game_state[0][2]
# -
game_state = [[1, 0, 2],
[2,1, 0],
[2, 1, 1]]
cw26(game_state)
# # cw 27
# https://www.practicepython.org/exercise/2015/11/26/27-tic-tac-toe-draw.html
class cw27():
def __init__(self):
self.game_state = [[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]
self.move_count = 0
def game_move(self, player_move):
self.move_count += 1
player_move = player_move.split(",")
player_move[0] = int(player_move[0])
player_move[1] = int(player_move[1])
print(player_move)
# we check if field is free
if self.game_state[player_move[0]][player_move[1]] != 0:
return "not free position"
if self.move_count % 2 ==1:
# player 1
self.game_state[player_move[0]][player_move[1]] = 1
if self.move_count % 2 ==0:
# player 2
self.game_state[player_move[0]][player_move[1]] = 2
print(self.game_state)
x = cw27()
x.game_move("1,2")
# # cw 28
# https://www.practicepython.org/exercise/2016/03/27/28-max-of-three.html
def cw28(var1, var2, var3):
# we save values for later
p_var1 = var1
p_var2 = var2
p_var3 = var3
# we make list form str
var1 = list(str(var1))
var2 = list(str(var2))
var3 = list(str(var3))
# we take len for str
var1 = len(var1)
var2 = len(var2)
var3 = len(var3)
# we return longest str
if var1 > var2:
return p_var1
else:
return p_var2
if var1 > var3:
return p_var1
else:
return p_var3
if var2 > var3:
return p_var2
else:
return p_var3
cw28(1235,"12aa54674","324325afeasgtwery26")
# # cw 29
# https://www.practicepython.org/exercise/2016/08/03/29-tic-tac-toe-game.html
class cw29_tic_tac_toe_game():
def __init__(self):
self.game_state = [[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]
self.turn_count = 0
def game(self):
print(" game start we weit for muve ")
for game_turn in range(9):
player_move = input()
self.turn_count += 1
self.game_move(player_move)
self.scratch_board(self.game_state)
self.win_checker(self.game_state)
def game_move(self, player_move):
player_move = player_move.split(",")
player_move[0] = int(player_move[0])
player_move[1] = int(player_move[1])
# we check if field is free
if self.game_state[player_move[0]][player_move[1]] != 0:
return "not free position"
if self.turn_count % 2 == 1:
# player 1
self.game_state[player_move[0]][player_move[1]] = 1
if self.turn_count % 2 == 0:
# player 2
self.game_state[player_move[0]][player_move[1]] = 2
#print(self.game_state)
def scratch_board(self, game_state):
print(str(game_state[0][0]) + " | " + str(game_state[0][1]) + " | " + str(game_state[0][2]))
print(str(game_state[1][0]) + " | " + str(game_state[1][1]) + " | " + str(game_state[1][2]))
print(str(game_state[2][0]) + " | " + str(game_state[2][1]) + " | " + str(game_state[2][2]))
def win_checker(self, game_state):
# we check --- type win
for state in self.game_state:
if state[0] == state[1] and state[0] == state[2]:
return state[0]
# we check | type win
for col in range(0,3):
if self.game_state[0][col]==self.game_state[1][col] and self.game_state[0][col]==self.game_state[2][col]:
return self.game_state[0][col]
# we check X type win
if self.game_state[0][0]==self.game_state[1][1] and self.game_state[0][0]==self.game_state[2][2]:
return self.game_state[0][0]
if self.game_state[0][2]==self.game_state[1][1] and self.game_state[0][2]==self.game_state[2][0]:
return self.game_state[0][2]
x = cw29_tic_tac_toe_game()
x.game()
# # cw 30
# https://www.practicepython.org/solution/2016/10/15/30-pick-word-solutions.html
def cw30():
import random
with open('sowpods.txt') as file:
words = list(file)
print(random.choice(words))
cw30()
# # Python Pandas Data Series [4 exercises with solution]
# https://www.w3resource.com/python-exercises/pandas/index.php
# cw1 Write a Python program to create and display a one-dimensional array-like object containing an array of data using Pandas module.
def cw1(number):
import numpy as np
import pandas as pd
arrey = pd.Series(np.random.randn(number))
print(arrey)
return arrey
x = cw1(10)
# cw2 Write a Python program to convert a Panda module Series to Python list and it's type
def cw2(pn_module_series):
import numpy as np
import pandas as pd
pylist = pn_module_series.tolist()
print(pylist)
return pylist
y = cw2(x)
# 3. Write a Python program to add, subtract, multiple and divide two Pandas Series.
def cw3(pn_series1, pn_series2, operator):
# operator = "add","subtract","multiply","divide"
import numpy as np
import pandas as pd
if operator is "add":
pn_series1 = pn_series1.add(pn_series2)
print(pn_series1)
if operator is "subtract":
pn_series1 = pn_series1.subtract(pn_series2)
print(pn_series1)
if operator is "multiply":
pn_series1 = pn_series1.multiply(pn_series2)
print(pn_series1)
if operator is "divide":
pn_series1 = pn_series1.divide(pn_series2, fill_value=0)
print(pn_series1)
# we do pandas series form list
import pandas as pd
pn_series1 = [2, 4, 6, 8, 10]
pn_series1 = pd.Series(pn_series1)
pn_series2 = [1, 3, 5, 7, 9]
pn_series2 = pd.Series(pn_series2)
x = cw3(pn_series1, pn_series2, "divide")
# 4. Write a Python program to get the largest integer smaller or equal to the division of the inputs
def cw4(series1, series2):
series3 = series1.divide(series2,fill_value=0)
series3 = series3.round()
return series3
# we do pandas series form list
import pandas as pd
pn_series1 = [2, 4, 6, 8, 10]
pn_series1 = pd.Series(pn_series1)
pn_series2 = [1, 3, 5, 7, 9]
pn_series2 = pd.Series(pn_series2)
cw4(pn_series1, pn_series2)
# # Naprężenia w reaktorze zbiornikowym z mieszadłem
# ZADANIE
# Aglomeraty komórek o wielkości 120 μm hodowane są w reaktorze o objętości
# 3,5 L wyposażonym w mieszadło Rushtonao średnicy 6 cm. Aglomeraty maja gęstość 1010 kg/m3 oraz lepkość 1,3x10-3Pas.
# Oszacuj maksymalną dopuszczalna szybkość mieszania zapobiegającą niszczeniu komórek.
def cw_bio_1(d, v, D):
# Skala Kołmogorowa mikrowirówmniejsza niż ½ -2/3 średnicy cząstki powoduje niszczenie komórek.
# Wielkość skali Kołmogorowa wynosi:
lambdaa = (2/3)*d
# Moc mieszadła powodująca tworzenie się takich wirów można policzyć z zależności:
lepkosc_dynamiczna = 1.3*(10**(-3))
gestosc = 1010
lepkosc_kinematyczna = lepkosc_dynamiczna/gestosc
dysypacja_mocy = (lepkosc_kinematyczna**3)/(lambdaa**4)
moc_mieszania = dysypacja_mocy*gestosc*(D**3)
print(moc_mieszania)
return moc_mieszania
cw_bio_1(120,3.5,6)
# # Python Web Scraping [25 exercises with solution]
# 1. Write a Python program to test if a given page is found or not on the server
def cw1(url):
import requests
from bs4 import BeautifulSoup
from urllib.request import urlopen
from urllib.error import HTTPError
from urllib.error import URLError
try:
html = urlopen(url)
except HTTPError as e:
print("HTTP error")
except URLError as e:
print("Server not found!")
else:
print(html.read())
cw1("https://www.google.pl/")
def cw2(url):
import requests
from bs4 import BeautifulSoup
req_for_robot = requests.get(url)
req_for_robot_html = req_for_robot.text
req_for_robot_html = BeautifulSoup(req_for_robot_html, "html5lib")
return req_for_robot_html
cw2("https://en.wikipedia.org/wiki/Robots_exclusion_standard")
def cw3(url):
import requests
from bs4 import BeautifulSoup
req_for_robot = requests.get(url)
req_for_robot_html = req_for_robot.text
req_for_robot_html = BeautifulSoup(req_for_robot_html, "html5lib")
req_for_robot = req_for_robot_html.findAll('p')
output_text = []
for text in req_for_robot:
text = text.text.strip()
if len(text)>0:
output_text.append(text)
return output_text
cw3("https://www.data.gov/")
# 4. Write a Python program to convert an address (like "1600 Amphitheatre Parkway, Mountain View, CA") into geographic coordinates (like latitude 37.423021 and longitude -122.083739)
def cw4(address):
import requests
url = 'https://maps.googleapis.com/maps/api/geocode/json'
params = {'sensor': 'false', 'address': address}
r = requests.get(url, params=params)
results = r.json()['results']
location = results[0]['geometry']['location']
return location['lat'], location['lng']
cw4('1600 Amphitheatre Parkway, Mountain View, CA')
# 5. Write a Python program to display the name of the most recently added dataset on data.gov.
def cw5(url):
import requests
from bs4 import BeautifulSoup
req_for_robot = requests.get(url)
req_for_robot_html = req_for_robot.text
req_for_robot_html = BeautifulSoup(req_for_robot_html, "html5lib")
req_for_robot = req_for_robot_html.findAll('h2')
output_text = []
for text in req_for_robot:
text = text.text.strip()
if len(text)>0:
output_text.append(text)
return output_text[1]
cw5("https://www.data.gov/")
# 6. Write a Python program to extract h1 tag from example.com.
def cw6(url):
import requests
from bs4 import BeautifulSoup
req_for_robot = requests.get(url)
req_for_robot_html = req_for_robot.text
req_for_robot_html = BeautifulSoup(req_for_robot_html, "html5lib")
req_for_robot = req_for_robot_html.findAll('h1')
output_text = []
for text in req_for_robot:
text = text.text.strip()
if len(text)>0:
output_text.append(text)
return output_text
cw6("http://www.example.com/")
# 7. Write a Python program to extract and display all the header tags from en.wikipedia.org/wiki/Main_Page.
def cw7(url):
import requests
from bs4 import BeautifulSoup
req_for_robot = requests.get(url)
req_for_robot_html = req_for_robot.text
req_for_robot_html = BeautifulSoup(req_for_robot_html, "html5lib")
all_headers=[]
req_for_h1 = req_for_robot_html.findAll('h1')
all_headers.append(req_for_h1)
req_for_h2 = req_for_robot_html.findAll('h2')
all_headers.append(req_for_h2)
req_for_h3 = req_for_robot_html.findAll('h3')
all_headers.append(req_for_h3)
output_text =[]
for header in all_headers:
for text in header:
text = text.text.strip()
if len(text)>0:
output_text.append(text)
return output_text
cw7("https://en.wikipedia.org/wiki/Main_Page")
# 8. Write a Python program to extract and display all the image links from en.wikipedia.org/wiki/Peter_Jeffrey_(RAAF_officer).
def cw8(url):
import requests
from bs4 import BeautifulSoup
import re
requests = requests.get(url)
requests = requests.text
requests = BeautifulSoup(requests, "html5lib")
all_photos = []
requests = requests.findAll('img', {'src':re.compile('.jpg')})
for img in requests:
print(img['src'])
cw8("https://en.wikipedia.org/wiki/Peter_Jeffrey_(RAAF_officer)")
# 9. Write a Python program to get 90 days of visits broken down by browser for all sites on data.gov.
def cw9(url):
import requests
from bs4 import BeautifulSoup
import re
requests = requests.get(url)
return requests.json()['totals']['browser']
cw9("https://analytics.usa.gov/data/live/browsers.json")
# 10. Write a Python program to that retrieves an arbitary Wikipedia page of "Python" and creates a list of links on that page
def cw10(url):
import requests
from bs4 import BeautifulSoup
import re
requests = requests.get(url)
requests = requests.text
requests = BeautifulSoup(requests, "html5lib")
requests = requests.findAll('a')
for request in requests:
if 'href' in request.attrs:
print(request.attrs['href'])
cw10("https://en.wikipedia.org/wiki/Main_Page")
# 11. Write a Python program to check whether a page contains a title or not.
def cw11(url):
import requests
from bs4 import BeautifulSoup
import re
requests = requests.get(url)
requests = requests.text
requests = BeautifulSoup(requests, "html5lib")
requests = requests.findAll('title')
if not requests:
print("no title on page")
if requests:
print("title on page")
cw11("https://pl.wikipedia.org/wiki/Wikipedia:Strona_g%C5%82%C3%B3wna")
# 12. Write a Python program to list all language names and number of related articles in the order they appear in wikipedia.org.
def cw12(url):
import requests
from bs4 import BeautifulSoup
import re
requests = requests.get(url)
requests = requests.text
requests = BeautifulSoup(requests, "html5lib")
numbers = requests.findAll('bdi', {'dir' : "ltr"})[:10]
output =[]
for i in range(1,11):
class_adress = 'central-featured-lang lang'+str(i)
text = requests.findAll('strong')
output.append(text)
output = output[:1]
return output ,numbers
cw12("https://www.wikipedia.org/")
# 13. Write a Python program to get the number of people visiting a U.S. government website right now.
def cw13(url):
import requests
from bs4 import BeautifulSoup
import re
requests = requests.get(url)
return requests.json()['query']
source = "https://analytics.usa.gov/data/live/realtime.json"
cw13(source)
# 14. Write a Python program get the number of security alerts issued by US-CERT in the current year.
def cw14(url):
import requests
from bs4 import BeautifulSoup
import re
requests = requests.get(url)
requests = requests.text
requests = BeautifulSoup(requests, "html5lib")
requests = requests.findAll('div', {'class' : "item-list"})
for request in requests:
request = request.text.strip()
print(request)
cw14("https://www.us-cert.gov/ncas/alerts/2018")
# 15. Write a Python program to get the number of Pinterest accounts maintained by U.S. State Department embassies and missions.
def cw15(url):
import requests
from bs4 import BeautifulSoup
import re
requests = requests.get(url)
requests = requests.text
requests = BeautifulSoup(requests, "html5lib")
requests = requests.findAll('a')
for request in requests:
request = request.text.strip()
print(request)
cw15("https://www.state.gov/r/pa/ode/socialmedia/#fb")
# 16. Write a Python program to get the number of followers of a given twitter account
def cw16(url):
import requests
from bs4 import BeautifulSoup
import re
requests = requests.get(url)
requests = requests.text
requests = BeautifulSoup(requests, "html5lib")
follow_box = requests.find('li',{'class':'ProfileNav-item ProfileNav-item--followers'})
requests = follow_box.find('a').find('span',{'class':'ProfileNav-value'})
requests = requests.text.strip()
print(requests)
cw16("https://twitter.com/elonmusk")
# 17. Write a Python program to get the number of following on Twitter
def cw17(url):
import requests
from bs4 import BeautifulSoup
import re
requests = requests.get(url)
requests = requests.text
requests = BeautifulSoup(requests, "html5lib")
follow_box = requests.find('li',{'class':'ProfileNav-item ProfileNav-item--followers'})
requests = follow_box.find('a').find('span',{'class':'ProfileNav-value'})
requests = requests.text.strip()
print(requests)
cw17("https://twitter.com/elonmusk")
# 18. Write a Python program to get the number of post on Twitter liked by a given account.
def cw18(url):
import requests
from bs4 import BeautifulSoup
import re
requests = requests.get(url)
requests = requests.text
requests = BeautifulSoup(requests, "html5lib")
follow_box = requests.find('li',{'class':'ProfileNav-item ProfileNav-item--favorites'})
requests = follow_box.find('a').find('span',{'class':'ProfileNav-value'})
requests = requests.text.strip()
print(requests)
# cw18("https://twitter.com/elonmusk")
# 21. Write a Python program to find the live weather report (temperature, wind speed, description and weather) of a given city.
def cw23(city):
import requests
from bs4 import BeautifulSoup
import re
try:
query ='q='+city
request = requests.get('http://api.openweathermap.org/data/2.5/weather?'+query+'&APPID=b35975e18dc93725acb092f7272cc6b8&units=metric')
request = request.json()
print("temp:",request['main']['temp'])
print("wind speed:",request['wind']['speed'])
print("weather:",request['weather'][0]['description'])
print("Weather:",request['weather'][0]['main'])
except:
print('City name not found...')
cw23('Warszawa')
# # Python Challenges
# 1. Write a Python program to check if a given positive integer is a power of two
def ch1(integer):
import math
i = 0
while True:
i += 1
if 2 == math.pow(integer,(1/i)):
print(True)
break
if i == integer*10000:
print(False)
break
ch1(1024)
# 2. Write a Python program to check if a given positive integer is a power of three.
def ch2(integer):
import math
i = 0
while True:
i += 1
if 3 == math.pow(integer,(1/i)):
print(True)
break
if i == integer*10:
print(False)
break
ch2(59049)
# 3. Write a Python program to check if a given positive integer is a power of four
def ch3(integer):
import math
i = 0
while True:
i += 1
if 4 == math.pow(integer,(1/i)):
print(True)
break
if i == integer*10:
print(False)
break
ch3(17)
# 4. Write a Python program to check if a number is a perfect square.
def ch4(integer):
import math
integer_sqrt = math.sqrt(integer)
if integer_sqrt.is_integer():
print(True)
else:
print(False)
ch4(16)
# 5. Write a Python program to check if an integer is the power of another integer.
def ch5(integer):
import math
for i in range(2,integer):
integer_sqrt = math.pow(integer,1/i)
if integer_sqrt.is_integer():
print(True)
return
if i == integer-1:
print(False)
return
ch5(7)
# 6. Write a Python program to check if a number is a power of a given base
def ch6(power, base):
import math
for i in range(1000):
if math.pow(base, i) == power:
print(True)
return
print(False)
ch6(0,2)
# 7. Write a Python program to find a missing number from a list.
def ch7(list_num):
import math
for i in range(1,len(list_num)+1):
if not i == list_num[i-1]:
print(i)
return
ch7([1,2,3,4,6,7,8,9,10])
# 8. Write a Python program to create and display a DataFrame from a specified dictionary data which has the index labels.
# +
import pandas as pd
import numpy as np
exam_data = {'name': ['Anastasia', 'Dima', 'Katherine', 'James', 'Emily', 'Michael', 'Matthew', 'Laura', 'Kevin', 'Jonas'],
'score': [12.5, 9, 16.5, np.nan, 9, 20, 14.5, np.nan, 8, 19],
'attempts': [1, 3, 2, 3, 2, 3, 1, 1, 2, 1],
'qualify': ['yes', 'no', 'yes', 'no', 'no', 'yes', 'yes', 'no', 'no', 'yes']}
labels = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']
df = pd.DataFrame(exam_data, index=labels)
df
# -
def cw1(url):
import requests
from bs4 import BeautifulSoup
import re
requests = requests.get(url)
cw1("http://192.168.1.101/stream")
# # python/tf/keras exercises basic mnist
# MNIST Dataset
import numpy as np
import keras
from keras.datasets import mnist
from keras.layers.core import Dropout
# Load the datasets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# +
# What is the type of X_train?
# traing data set
# +
# What is the type of y_train?
# label for dataset
# -
# What is the dimension of X_train?. What does that mean?
# 60000 x 28 x 28 x 1
x_train.shape
from matplotlib import pyplot
import matplotlib as mpl
# %matplotlib inline
fig = pyplot.figure()
ax = fig.add_subplot(1,1,1)
imgplot = ax.imshow(x_train[10], cmap=mpl.cm.Greys)
imgplot.set_interpolation('nearest')
ax.xaxis.set_ticks_position('top')
ax.yaxis.set_ticks_position('left')
pyplot.show()
from keras.models import Sequential
from keras.layers.core import Dense
from keras.optimizers import SGD
from keras.layers.normalization import BatchNormalization
from keras.datasets import mnist
from keras.utils import np_utils
# Load the datasets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# make arreys
x_train= x_train.reshape(60000,784)
x_test= x_test.reshape(10000,784)
x_train= x_train.astype("float32")
x_test= x_test.astype("float32")
# grey scaling
x_train /=255
x_test /=255
# convert class vectors to binary class matrices
y_train = np_utils.to_categorical(y_train, 10)
y_test = np_utils.to_categorical(y_test, 10)
from sklearn.model_selection import train_test_split
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train)
n_classes = 10
batch_size = 128
epochs = 10
model = Sequential()
model.add(Dense(512, activation='relu', input_shape=(784,)))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(512, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer=SGD(lr=0.001),
metrics=['accuracy'])
network_history = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_val, y_val))
# +
import matplotlib.pyplot as plt
# %matplotlib inline
def plot_history(network_history):
plt.figure()
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.plot(network_history.history['loss'])
plt.plot(network_history.history['val_loss'])
plt.legend(['Training', 'Validation'])
plt.figure()
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.plot(network_history.history['acc'])
plt.plot(network_history.history['val_acc'])
plt.legend(['Training', 'Validation'], loc='lower right')
plt.show()
plot_history(network_history)
# -
model.summary()
print('Model Input Tensors: ', model.input, end='\n\n')
print('Layers - Network Configuration:', end='\n\n')
for layer in model.layers:
print(layer.name, layer.trainable)
print('Layer Configuration:')
print(layer.get_config(), end='\n{}\n'.format('----'*10))
print('Model Output Tensors: ', model.output)
# +
model_truncated = Sequential()
model_truncated.add(Dense(512, activation='relu', input_shape=(784,)))
model_truncated.add(Dropout(0.2))
model_truncated.add(Dense(512, activation='relu'))
for i, layer in enumerate(model_truncated.layers):
layer.set_weights(model.layers[i].get_weights())
model_truncated.compile(loss='categorical_crossentropy', optimizer=SGD(),
metrics=['accuracy'])
# -
np.all(model_truncated.layers[0].get_weights()[0] == model.layers[0].get_weights()[0])
hidden_features = model_truncated.predict(x_train)
hidden_features.shape
# +
from sklearn.manifold import TSNE
tsne = TSNE(n_components=2)
X_tsne = tsne.fit_transform(hidden_features[:1000]) ## Reduced for computational issues
# -
colors_map = np.argmax(y_train, axis=1)
X_tsne.shape
np.where(colors_map==6)
colors = np.array([x for x in 'b-g-r-c-m-y-k-purple-coral-lime'.split('-')])
colors_map = colors_map[:1000]
plt.figure(figsize=(10,10))
for cl in range(n_classes):
indices = np.where(colors_map==cl)
plt.scatter(X_tsne[indices,0], X_tsne[indices, 1], c=colors[cl], label=cl)
plt.legend()
plt.show()
# # python/tf/keras exercises basic Matrix Factorisation in Keras
# https://nipunbatra.github.io/blog/2017/recommend-keras.html
# imports
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# WE LOAD DATASET
dataset = pd.read_csv(r'C:\Users\tensorflow\Desktop\iloraz_inteligencji\python cwiczenia\data\ml-100k\u.data',sep='\t',names="user_id,item_id,rating,timestamp".split(","))
dataset.head()
len(dataset.user_id.unique()), len(dataset.item_id.unique())
dataset.user_id = dataset.user_id.astype('category').cat.codes.values
dataset.item_id = dataset.item_id.astype('category').cat.codes.values
dataset.head()
# split train test data 20 %
from sklearn.model_selection import train_test_split
x_train, x_test = train_test_split(dataset, test_size=0.2)
import keras
from IPython.display import SVG
from keras.optimizers import Adam
from keras.utils.vis_utils import model_to_dot
import tensorflow as tf
import os
os.environ["PATH"] += os.pathsep + 'C:/Program Files (x86)/Graphviz2.38/bin/'
n_users, n_movies = len(dataset.user_id.unique()), len(dataset.item_id.unique())
n_latent_factors = 5
movie_input = keras.layers.Input(shape=[1], name='Item')
movie_embedding = keras.layers.Embedding(n_movies+1, n_latent_factors, name='Movie-Embedding')(movie_input)
movie_vec = keras.layers.Flatten(name='FlattenMovies')(movie_embedding)
user_input = keras.layers.Input(shape=[1],name='User')
user_vec = keras.layers.Flatten(name='FlattenUsers')(keras.layers.Embedding(n_users + 1, n_latent_factors,name='User-Embedding')(user_input))
user_input
prod = keras.layers.merge.dot([movie_vec, user_vec],axes=-1,name='DotProduct')
model = keras.Model([user_input, movie_input], prod)
model.compile('adam', 'mean_squared_error')
SVG(model_to_dot(model, show_shapes=True, show_layer_names=True, rankdir='HB').create(prog='dot', format='svg'))
model.summary()
history = model.fit([x_train.user_id, x_train.item_id], x_train.rating, epochs=100, verbose=0)
pd.Series(history.history['loss']).plot(logy=True)
plt.xlabel("Epoch")
plt.ylabel("Train Error")
y_hat = np.round(model.predict([x_test.user_id, x_test.item_id]),0)
y_true = x_test.rating
from sklearn.metrics import mean_absolute_error
mean_absolute_error(y_true, y_hat)
# n_latent_factors = 10 0.77525
# n_latent_factors = 5 0.7122
# n_latent_factors = 3 0.6915
movie_embedding_learnt = model.get_layer(name='Movie-Embedding').get_weights()[0]
pd.DataFrame(movie_embedding_learnt).describe()
user_embedding_learnt = model.get_layer(name='User-Embedding').get_weights()[0]
pd.DataFrame(user_embedding_learnt).describe()
from keras.constraints import non_neg
movie_input = keras.layers.Input(shape=[1],name='Item')
movie_embedding = keras.layers.Embedding(n_movies + 1, n_latent_factors, name='NonNegMovie-Embedding', embeddings_constraint=non_neg())(movie_input)
movie_vec = keras.layers.Flatten(name='FlattenMovies')(movie_embedding)
user_input = keras.layers.Input(shape=[1],name='User')
user_vec = keras.layers.Flatten(name='FlattenUsers')(keras.layers.Embedding(n_users + 1, n_latent_factors,name='NonNegUser-Embedding',embeddings_constraint=non_neg())(user_input))
prod = keras.layers.merge.dot([movie_vec, user_vec], axes=-1,name='DotProduct')
model = keras.Model([user_input, movie_input], prod)
model.compile('adam', 'mean_squared_error')
history_nonneg = model.fit([x_train.user_id,x_train.item_id], x_train.rating, epochs=10, verbose=0)
movie_embedding_learnt = model.get_layer(name='NonNegMovie-Embedding').get_weights()[0]
pd.DataFrame(movie_embedding_learnt).describe()
# Now we use that to build recomendation system
#
n_latent_factors_user = 10
n_latent_factors_movie = 10
# +
movie_input = keras.layers.Input(shape=[1],name='Item')
movie_embedding = keras.layers.Embedding(n_movies + 1, n_latent_factors_movie, name='Movie-Embedding')(movie_input)
movie_vec = keras.layers.Flatten(name='FlattenMovies')(movie_embedding)
movie_vec = keras.layers.Dropout(0.2)(movie_vec)
user_input = keras.layers.Input(shape=[1],name='User')
user_vec = keras.layers.Flatten(name='FlattenUsers')(keras.layers.Embedding(n_users + 1, n_latent_factors_user,name='User-Embedding')(user_input))
user_vec = keras.layers.Dropout(0.2)(user_vec)
concat = keras.layers.concatenate([movie_vec, user_vec], axis=-1,name='Concat')
concat_dropout = keras.layers.Dropout(0.2)(concat)
dense = keras.layers.Dense(200,name='FullyConnected')(concat)
dropout_1 = keras.layers.Dropout(0.2,name='Dropout')(dense)
dense_2 = keras.layers.Dense(100,name='FullyConnected-1')(concat)
dropout_2 = keras.layers.Dropout(0.2,name='Dropout')(dense_2)
dense_3 = keras.layers.Dense(50,name='FullyConnected-2')(dense_2)
dropout_3 = keras.layers.Dropout(0.2,name='Dropout')(dense_3)
dense_4 = keras.layers.Dense(20,name='FullyConnected-3', activation='relu')(dense_3)
result = keras.layers.Dense(1, activation='relu',name='Activation')(dense_4)
adam = Adam(lr=0.005)
model = keras.Model([user_input, movie_input], result)
model.compile(optimizer=adam,loss= 'mean_absolute_error')
# -
SVG(model_to_dot(model, show_shapes=True, show_layer_names=True, rankdir='HB').create(prog='dot', format='svg'))
model.summary()
history = model.fit([x_train.user_id, x_train.item_id], x_train.rating, epochs=250, verbose=0)
y_hat_2 = np.round(model.predict([x_test.user_id, x_test.item_id]),0)
print(mean_absolute_error(y_true, y_hat_2))
print(mean_absolute_error(y_true, model.predict([x_test.user_id, x_test.item_id])))
# +
# n_latent_factors_user = 5
# n_latent_factors_movie = 8
# 0.6913
# 0.7127672422289848
# n_latent_factors_user = 10
# n_latent_factors_movie = 10
# 0.69375
# 0.7059811423778534
# -
# keras rnn #https://github.com/tertiarycourses/KerasTraining/blob/master/exercises/module5_RNN/module5_2_keras_rnn_imdb.py
# +
import os
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Embedding, LSTM, Dense
# -
from keras.datasets import imdb
(X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=20000)
print(X_train)
# +
#print(y_train)
# -
X_train = sequence.pad_sequences(X_train,maxlen=80)
X_test = sequence.pad_sequences(X_test,maxlen=80)
model = Sequential()
model.add(Embedding(20000,128))
model.add(LSTM(32))
model.add(Dense(1,activation='sigmoid'))
model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])
model.fit(X_train,y_train,epochs=2)
loss,accuracy = model.evaluate(X_test,y_test)
print('Loss = ',loss)
print('Accuracy = ',accuracy)
# keras cifar CNN https://github.com/leriomaggio/deep-learning-keras-tensorflow/blob/master/4.%20Convolutional%20Neural%20Networks/4.3%20CIFAR10%20CNN.ipynb
from keras.datasets import cifar10
from keras.utils import np_utils
from keras.preprocessing.image import ImageDataGenerator
from keras.utils import generic_utils
n_classes = 10
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = np_utils.to_categorical(y_train, n_classes)
y_test = np_utils.to_categorical(y_test, n_classes)
x_train
x_train = x_train.astype("float32")
x_test = x_test.astype("float32")
x_train /= 255
x_test /= 255
x_train
generated_images = ImageDataGenerator(
featurewise_center=True, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=True, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=0, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.2, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.2, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
vertical_flip=False) # randomly flip images
generated_images.fit(x_train)
x_train.shape
# we push 500 photos to photo generator
gen = generated_images.flow(x_train, y_train, batch_size=500, shuffle=True)
x_batch, y_batch = next(gen)
x_batch.shape
n_epochs = 2
for e in range(n_epochs):
print('Epoch', e)
print('Training...')
progbar = generic_utils.Progbar(x_train.shape[0])
for x_batch, y_batch in generated_images.flow(x_train, y_train, batch_size=500, shuffle=True):
loss = model.train_on_batch(x_batch, y_batch)
progbar.add(x_batch.shape[0], values=[('train loss', loss[0])])
# VGG16
from keras.applications import VGG16
from keras.applications.imagenet_utils import preprocess_input, decode_predictions
import os
vgg16 = VGG16(include_top=True, weights='imagenet')
vgg16.summary()
IMAGENET_FOLDER = 'imgs/imagenet' #in the repo
# +
from keras.preprocessing import image
import numpy as np
img_path = os.path.join(IMAGENET_FOLDER, 'strawberry_1157.jpeg')
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
print('Input image shape:', x.shape)
preds = vgg16.predict(x)
print('Predicted:', decode_predictions(preds))
# -
| cwiczenia.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Build
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Overview:
# - **Teaching:** 10 min
# - **Exercises:** 0 min
#
# **Questions**
# - How do I build a lesson?
#
# **Objectives**
# - Understand the 4 key steps in a build
# -
# The main program for NBfancy is the command line program `nbfancy`, we have seen it in use in the previous section. Typical usage is `nbfancy [verb] [options]`, where `[verb]` is some action, and `[options]` are additional options for that action.
#
# For available actions, we can use the `--help` flag.
# !nbfancy --help
# A typical build consists of four steps:
# * Initialisation with `init`
# * Re-execution with `rerun`
# * Rendering with `render`
# * Publising to website with `html`
# + [markdown] slideshow={"slide_type": "slide"}
# ## Initialisation
# To start making training material a directory must be initialised, which is done with the `init` command. This works in a similar way to `git init` a directory is provided and all the files we need to create a new lesson are added to the directory. If no directory is provided, the current directory is used.
# -
# !nbfancy init example
# As part of the initialisation, template files are added to the `nbplain` directory to help you get started. For more information about `init` we can look at the help.
# !nbfancy init --help
# ## Re-execution
# In order to try and reduce issues associated with out of order execution, unexecuted cells and other quirks of notebooks, it is strongly recommended that all notebooks that form a lesson are cleared and re-executed. This can be done quickly and easily with the `rerun` command.
# %%bash2
# cd example
nbfancy rerun
# ## Information: What's this magic?
# As part of NBfancy we have implemented a thin wrapper around the `%%bash` magic available in notebooks, which we have called `%%bash2`. This just keeps track of what directory we are in, so as to keep the content close to what you would type at the command line. `%%bash2` magic is available as part of NBfancy, to find out more, see [this lesson](10_magic.ipynb).
# Re-executing all the notebooks in order has the added feature of resetting the execution count for cells, and ensuring that someone following along in a lesson won't encounter errors. Think of this like automated testing for the code in a lesson.
#
# Further options for the `rerun` command are listed in the help.
# !nbfancy rerun --help
# ## Rendering
# Keywords in notebooks can be processed and marked up by running the `render` command. By default this creates a new directory `nbfancy` containing a copy of all of the notebooks in `nbplain`, but with keywords replaced with the respective environments (see [environments](05_environments.ipynb) section).
# %%bash2
nbfancy render
# As always further options are available in the help.
# !nbfancy render --help
# ## Publishing
# Once you have checked the contents of the rendered notebooks, the whole lesson can be built into a website by running the `html` command.
# %%bash2
nbfancy html
# Additional options are given by help.
# !nbfancy html --help
# + [markdown] slideshow={"slide_type": "slide"}
# ## Key Points:
# - A directory is initialised using `init`
# - The contents of a notebook can be cleared and re-executed using `rerun`
# - `render` provides additional rich reatures to makdown cells
# - A website of the material is generated using `html`
| nbfancy/tutorial/03_build.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import matplotlib.pyplot as plt
vertical_margin = 20
horizontal_margin = 100
x_size = 28
y_size = 25
linewidth = 4
src = 'https://gist.githubusercontent.com/borgar/31c1e476b8e92a11d7e9/raw/0fae97dab6830ecee185a63c1cee0008f6778ff6/pulsar.csv'
plot_name='plot.png'
# +
df = pd.read_csv(src, header=-1)
plt.style.use('dark_background')
fig, ax = plt.subplots(figsize=(x_size,y_size))
n_lines = df.shape[0]
x = range(df.shape[1])
for row in df.iterrows():
line = row[1].values/3 + (n_lines - row[0])
ax.plot(x, line, lw=linewidth, c='white', alpha=1, zorder=row[0]/n_lines)
ax.fill_between(x, -5,line, facecolor='black', zorder=row[0]/n_lines)
ax.set_yticks([])
ax.set_xticks([])
ax.set_xlim(min(x)-horizontal_margin, max(x)+horizontal_margin)
ax.set_ylim(-vertical_margin, df.shape[0] + vertical_margin)
plt.savefig(plot_name)
# -
| plot.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
from sklearn.metrics import multilabel_confusion_matrix as skmultilabel_confusion_matrix
def multilabel_confusion_matrix(y_true, y_pred):
n_labels = len(set(y_true) | set(y_pred))
true_sum = np.bincount(y_true, minlength=n_labels)
pred_sum = np.bincount(y_pred, minlength=n_labels)
tp = np.bincount(y_true[y_true == y_pred], minlength=n_labels)
fp = pred_sum - tp
fn = true_sum - tp
tn = len(y_true) - tp - fp - fn
return np.array([tn, fp, fn, tp]).T.reshape(-1, 2, 2)
# binary
for i in range(10):
rng = np.random.RandomState(i)
y_true = rng.randint(2, size=10)
y_pred = rng.randint(2, size=10)
score1 = multilabel_confusion_matrix(y_true, y_pred)
score2 = skmultilabel_confusion_matrix(y_true, y_pred)
assert np.array_equal(score1, score2)
# multiclass
for i in range(10):
rng = np.random.RandomState(i)
y_true = rng.randint(3, size=10)
y_pred = rng.randint(3, size=10)
score1 = multilabel_confusion_matrix(y_true, y_pred)
score2 = skmultilabel_confusion_matrix(y_true, y_pred)
assert np.array_equal(score1, score2)
| metrics/multilabel_confusion_matrix.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from grid.shake import ShakeGrid
from mpl_toolkits.basemap import Basemap
from collections import OrderedDict
from datetime import datetime
import os.path
# The Grid class hierarchy can be used for reading, writing and manipulating various kinds of 2D grid formats (GMT grids), or multi-layer 2D grid formats (ShakeMap).
#######MODIFY THIS TO REFLECT THE LOCATION OF A GLOBAL GRID ON YOUR SYSTEM####
shakefile = '/Users/mhearne/data/shakemaps/northridge.xml'
##############################################################################
def map2DGrid(ax,grid,tstr,isLeft=False):
"""
grid is a Grid2D object
"""
xmin,xmax,ymin,ymax = grid.getBounds()
pdata = grid.getData()
nr,nc = pdata.shape
lonrange = np.linspace(xmin,xmax,num=nc)
latrange = np.linspace(ymin,ymax,num=nr)
lon,lat = np.meshgrid(lonrange,latrange)
latmean = np.mean([ymin,ymax])
lonmean = np.mean([xmin,xmax])
m = Basemap(llcrnrlon=xmin,llcrnrlat=ymin,urcrnrlon=xmax,urcrnrlat=ymax,\
rsphere=(6378137.00,6356752.3142),\
resolution='i',area_thresh=1000.,projection='lcc',\
lat_1=latmean,lon_0=lonmean,ax=ax)
# draw coastlines and political boundaries.
m.drawcoastlines()
m.drawcountries()
m.drawstates()
lons = np.arange(xmin,xmax,1.0)
lats = np.arange(ymin,ymax,1.0)
if isLeft:
labels = labels=[1,0,0,0]
else:
labels = labels=[0,0,0,0]
m.drawparallels(lats,labels=labels,color='white',fmt='%.1f') # draw parallels
m.drawmeridians(lons,labels=[0,0,0,1],color='white',fmt='%.1f') # draw meridians
pmesh = m.pcolormesh(lon,lat,np.flipud(grid.getData()),latlon=True)
plt.hold(True)
ax.set_title(tstr)
m.colorbar(pmesh)
#ignore warnings that can reveal directory structure
import warnings
warnings.simplefilter("ignore")
#
shakegrid = ShakeGrid.load(shakefile)
pgagrid = shakegrid.getLayer('pga')
pgvgrid = shakegrid.getLayer('pgv')
mmigrid = shakegrid.getLayer('mmi')
fig,(ax0,ax1,ax2) = plt.subplots(nrows=1,ncols=3,figsize=(12,6))
fig.tight_layout()
map2DGrid(ax0,pgagrid,'Full PGA',isLeft=True)
map2DGrid(ax1,pgvgrid,'Full PGV')
map2DGrid(ax2,mmigrid,'Full MMI')
print pgagrid.getGeoDict()
geodict = ShakeGrid.getFileGeoDict(shakefile)
#bring in the shakemap by a half dimension (quarter on each side)
lonrange = geodict['xmax'] - geodict['xmin']
latrange = geodict['ymax'] - geodict['ymin']
geodict['xmin'] = geodict['xmin'] + lonrange/4.0
geodict['xmax'] = geodict['xmax'] - lonrange/4.0
geodict['ymin'] = geodict['ymin'] + latrange/4.0
geodict['ymax'] = geodict['ymax'] - latrange/4.0
shakegrid = ShakeGrid.load(shakefile,samplegeodict=geodict)
pgagrid = shakegrid.getLayer('pga')
pgvgrid = shakegrid.getLayer('pgv')
mmigrid = shakegrid.getLayer('mmi')
fig,(ax0,ax1,ax2) = plt.subplots(nrows=1,ncols=3,figsize=(12,6))
fig.tight_layout()
map2DGrid(ax0,pgagrid,'Trimmed PGA',isLeft=True)
map2DGrid(ax1,pgvgrid,'Trimmed PGV')
map2DGrid(ax2,mmigrid,'Trimmed MMI')
print pgagrid.getGeoDict()
fdict = ShakeGrid.getFileGeoDict(shakefile)
newdict = {'xmin':-120.0,
'xmax':-118.0,
'ymin':33.0,
'ymax':35.0,
'xdim':fdict['xdim'],
'ydim':fdict['ydim']}
shakegrid = ShakeGrid.load(shakefile,samplegeodict=newdict)
pgagrid = shakegrid.getLayer('pga')
pgvgrid = shakegrid.getLayer('pgv')
mmigrid = shakegrid.getLayer('mmi')
fig,(ax0,ax1,ax2) = plt.subplots(nrows=1,ncols=3,figsize=(12,6))
fig.tight_layout()
map2DGrid(ax0,pgagrid,'Partial PGA',isLeft=True)
map2DGrid(ax1,pgvgrid,'Partial PGV')
map2DGrid(ax2,mmigrid,'Partial MMI')
print pgagrid.getGeoDict()
# Creating a ShakeMap
pga = np.arange(0,16,dtype=np.float32).reshape(4,4)
pgv = np.arange(1,17,dtype=np.float32).reshape(4,4)
mmi = np.arange(2,18,dtype=np.float32).reshape(4,4)
geodict = {'xmin':0.5,'ymax':3.5,'ymin':0.5,'xmax':3.5,'xdim':1.0,'ydim':1.0,'nrows':4,'ncols':4}
layers = OrderedDict()
layers['pga'] = pga
layers['pgv'] = pgv
layers['mmi'] = mmi
shakeDict = {'event_id':'usabcd1234',
'shakemap_id':'usabcd1234',
'shakemap_version':1,
'code_version':'4.0',
'process_timestamp':datetime.utcnow(),
'shakemap_originator':'us',
'map_status':'RELEASED',
'shakemap_event_type':'ACTUAL'}
eventDict = {'event_id':'usabcd1234',
'magnitude':7.6,
'depth':1.4,
'lat':2.0,
'lon':2.0,
'event_timestamp':datetime.utcnow(),
'event_network':'us',
'event_description':'sample event'}
uncDict = {'pga':(0.0,0),
'pgv':(0.0,0),
'mmi':(0.0,0)}
shake = ShakeGrid(layers,geodict,eventDict,shakeDict,uncDict)
shake.save('grid.xml',version=1)
shake2 = ShakeGrid.load('grid.xml')
os.remove('grid.xml')
| notebooks/ShakeMap.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # References:
# 1. Deep Learning for Predictive Maintenance https://github.com/Azure/lstms_for_predictive_maintenance/blob/master/Deep%20Learning%20Basics%20for%20Predictive%20Maintenance.ipynb
#
# 2. Predictive Maintenance: Step 2A of 3, train and evaluate regression models https://gallery.cortanaintelligence.com/Experiment/Predictive-Maintenance-Step-2A-of-3-train-and-evaluate-regression-models-2
#
# 3. <NAME> and <NAME> (2008). "Turbofan Engine Degradation Simulation Data Set", NASA Ames Prognostics Data Repository (https://ti.arc.nasa.gov/tech/dash/groups/pcoe/prognostic-data-repository/#turbofan), NASA Ames Research Center, Moffett Field, CA
#
# 4. Understanding LSTM Networks http://colah.github.io/posts/2015-08-Understanding-LSTMs/
#
# This notebook was forked from https://github.com/umbertogriffo/Predictive-Maintenance-using-LSTM
# +
import keras
import keras.backend as K
from keras.layers.core import Activation
from keras.models import Sequential,load_model
from keras.layers import Dense, Dropout, LSTM
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
from sklearn import preprocessing
# Setting seed for reproducibility
np.random.seed(1234)
PYTHONHASHSEED = 0
# define path to save model
model_path = './Output/regression_model.h5'
##################################
# Data Ingestion
##################################
# read training data - It is the aircraft engine run-to-failure data.
train_df = pd.read_csv('./Dataset/PM_train.txt', sep=" ", header=None)
train_df.drop(train_df.columns[[26, 27]], axis=1, inplace=True)
train_df.columns = ['id', 'cycle', 'setting1', 'setting2', 'setting3', 's1', 's2', 's3',
's4', 's5', 's6', 's7', 's8', 's9', 's10', 's11', 's12', 's13', 's14',
's15', 's16', 's17', 's18', 's19', 's20', 's21']
train_df = train_df.sort_values(['id','cycle'])
# read test data - It is the aircraft engine operating data without failure events recorded.
test_df = pd.read_csv('./Dataset/PM_test.txt', sep=" ", header=None)
test_df.drop(test_df.columns[[26, 27]], axis=1, inplace=True)
test_df.columns = ['id', 'cycle', 'setting1', 'setting2', 'setting3', 's1', 's2', 's3',
's4', 's5', 's6', 's7', 's8', 's9', 's10', 's11', 's12', 's13', 's14',
's15', 's16', 's17', 's18', 's19', 's20', 's21']
# read ground truth data - It contains the information of true remaining cycles for each engine in the testing data.
truth_df = pd.read_csv('./Dataset/PM_truth.txt', sep=" ", header=None)
truth_df.drop(truth_df.columns[[1]], axis=1, inplace=True)
# +
##################################
# Prepare training data
##################################
# Data Labeling - generate column RUL(Remaining Usefull Life or Time to Failure)
rul = pd.DataFrame(train_df.groupby('id')['cycle'].max()).reset_index()
rul.columns = ['id', 'max']
train_df = train_df.merge(rul, on=['id'], how='left')
train_df['RUL'] = train_df['max'] - train_df['cycle']
train_df.drop('max', axis=1, inplace=True)
# generate label columns for training data
# we will only make use of "label1" for binary classification,
# while trying to answer the question: is a specific engine going to fail within w1 cycles?
w1 = 30
w0 = 15
train_df['label1'] = np.where(train_df['RUL'] <= w1, 1, 0 )
train_df['label2'] = train_df['label1']
train_df.loc[train_df['RUL'] <= w0, 'label2'] = 2
# MinMax normalization (from 0 to 1)
train_df['cycle_norm'] = train_df['cycle']
cols_normalize = train_df.columns.difference(['id','cycle','RUL','label1','label2'])
min_max_scaler = preprocessing.MinMaxScaler()
norm_train_df = pd.DataFrame(min_max_scaler.fit_transform(train_df[cols_normalize]),
columns=cols_normalize,
index=train_df.index)
join_df = train_df[train_df.columns.difference(cols_normalize)].join(norm_train_df)
train_df = join_df.reindex(columns = train_df.columns)
#train_df.to_csv('../../Dataset/PredictiveManteinanceEngineTraining.csv', encoding='utf-8',index = None)
# +
##################################
# Prepare test data
##################################
# MinMax normalization (from 0 to 1)
test_df['cycle_norm'] = test_df['cycle']
norm_test_df = pd.DataFrame(min_max_scaler.transform(test_df[cols_normalize]),
columns=cols_normalize,
index=test_df.index)
test_join_df = test_df[test_df.columns.difference(cols_normalize)].join(norm_test_df)
test_df = test_join_df.reindex(columns = test_df.columns)
test_df = test_df.reset_index(drop=True)
print(test_df.head())
# We use the ground truth dataset to generate labels for the test data.
# generate column max for test data
rul = pd.DataFrame(test_df.groupby('id')['cycle'].max()).reset_index()
rul.columns = ['id', 'max']
truth_df.columns = ['more']
truth_df['id'] = truth_df.index + 1
truth_df['max'] = rul['max'] + truth_df['more']
truth_df.drop('more', axis=1, inplace=True)
# generate RUL for test data
test_df = test_df.merge(truth_df, on=['id'], how='left')
test_df['RUL'] = test_df['max'] - test_df['cycle']
test_df.drop('max', axis=1, inplace=True)
# generate label columns w0 and w1 for test data
test_df['label1'] = np.where(test_df['RUL'] <= w1, 1, 0 )
test_df['label2'] = test_df['label1']
test_df.loc[test_df['RUL'] <= w0, 'label2'] = 2
#test_df.to_csv('../../Dataset/PredictiveManteinanceEngineValidation.csv', encoding='utf-8',index = None)
# pick a large window size of 50 cycles
sequence_length = 50
# function to reshape features into (samples, time steps, features)
def gen_sequence(id_df, seq_length, seq_cols):
""" Only sequences that meet the window-length are considered, no padding is used. This means for testing
we need to drop those which are below the window-length. An alternative would be to pad sequences so that
we can use shorter ones """
# for one id I put all the rows in a single matrix
data_matrix = id_df[seq_cols].values
num_elements = data_matrix.shape[0]
# Iterate over two lists in parallel.
# For example id1 have 192 rows and sequence_length is equal to 50
# so zip iterate over two following list of numbers (0,112),(50,192)
# 0 50 -> from row 0 to row 50
# 1 51 -> from row 1 to row 51
# 2 52 -> from row 2 to row 52
# ...
# 111 191 -> from row 111 to 191
for start, stop in zip(range(0, num_elements-seq_length), range(seq_length, num_elements)):
yield data_matrix[start:stop, :]
# pick the feature columns
sensor_cols = ['s' + str(i) for i in range(1,22)]
sequence_cols = ['setting1', 'setting2', 'setting3', 'cycle_norm']
sequence_cols.extend(sensor_cols)
# TODO for debug
# val is a list of 192 - 50 = 142 bi-dimensional array (50 rows x 25 columns)
val=list(gen_sequence(train_df[train_df['id']==1], sequence_length, sequence_cols))
print(len(val))
# generator for the sequences
# transform each id of the train dataset in a sequence
seq_gen = (list(gen_sequence(train_df[train_df['id']==id], sequence_length, sequence_cols))
for id in train_df['id'].unique())
# generate sequences and convert to numpy array
seq_array = np.concatenate(list(seq_gen)).astype(np.float32)
print(seq_array.shape)
# function to generate labels
def gen_labels(id_df, seq_length, label):
""" Only sequences that meet the window-length are considered, no padding is used. This means for testing
we need to drop those which are below the window-length. An alternative would be to pad sequences so that
we can use shorter ones """
# For one id I put all the labels in a single matrix.
# For example:
# [[1]
# [4]
# [1]
# [5]
# [9]
# ...
# [200]]
data_matrix = id_df[label].values
num_elements = data_matrix.shape[0]
# I have to remove the first seq_length labels
# because for one id the first sequence of seq_length size have as target
# the last label (the previus ones are discarded).
# All the next id's sequences will have associated step by step one label as target.
return data_matrix[seq_length:num_elements, :]
# generate labels
label_gen = [gen_labels(train_df[train_df['id']==id], sequence_length, ['RUL'])
for id in train_df['id'].unique()]
label_array = np.concatenate(label_gen).astype(np.float32)
label_array.shape
# +
##################################
# Modeling
##################################
def r2_keras(y_true, y_pred):
"""Coefficient of Determination
"""
SS_res = K.sum(K.square( y_true - y_pred ))
SS_tot = K.sum(K.square( y_true - K.mean(y_true) ) )
return ( 1 - SS_res/(SS_tot + K.epsilon()) )
# +
# Next, we build a deep network.
# The first layer is an LSTM layer with 100 units followed by another LSTM layer with 50 units.
# Dropout is also applied after each LSTM layer to control overfitting.
# Final layer is a Dense output layer with single unit and linear activation since this is a regression problem.
nb_features = seq_array.shape[2]
nb_out = label_array.shape[1]
model = Sequential()
model.add(LSTM(
input_shape=(sequence_length, nb_features),
units=100,
return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(
units=50,
return_sequences=False))
model.add(Dropout(0.2))
model.add(Dense(units=nb_out))
model.add(Activation("linear"))
model.compile(loss='mean_squared_error', optimizer='rmsprop',metrics=['mae',r2_keras])
print(model.summary())
# fit the network
history = model.fit(seq_array, label_array, epochs=5, batch_size=10, validation_split=0.05, verbose=2,
callbacks = [keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=0, mode='min'),
keras.callbacks.ModelCheckpoint(model_path,monitor='val_loss', save_best_only=True, mode='min', verbose=0)]
)
# list all data in history
print(history.history.keys())
# +
# Save the training history
import pickle
filename = open("./Output/rul_point_regression_model_history","wb")
pickle.dump(history.history, filename)
filename.close()
history_file = open('./Output/rul_point_regression_model_history','rb')
saved_history = pickle.load(history_file)
history_file.close()
# -
# Save model to disk
# serialize model to JSON
model_json = model.to_json()
with open("./Output/rul_point_regression_model.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("./Output/rul_point_regression_model.h5")
print("Saved model to disk")
# +
###########################################
# Load model and training history from disk
###########################################
import pickle
history_file = open("./Output/rul_point_regression_model_history","rb")
saved_history = pickle.load(history_file)
history_file.close()
# Load model from disk
from keras.models import model_from_json
# load json and create model
json_file = open("./Output/rul_point_regression_model.json", "r")
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("./Output/rul_point_regression_model.h5")
print("Loaded model from disk")
# +
# summarize history for R^2
fig_acc = plt.figure(figsize=(10, 5))
plt.plot(history.history['r2_keras'])
plt.plot(history.history['val_r2_keras'])
plt.title('model r^2')
plt.ylabel('R^2')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
fig_acc.savefig("./Output/model_r2.png")
# summarize history for MAE
fig_acc = plt.figure(figsize=(10, 10))
plt.plot(history.history['mean_absolute_error'])
plt.plot(history.history['val_mean_absolute_error'])
plt.title('model MAE')
plt.ylabel('MAE')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
fig_acc.savefig("./Output/model_mae.png")
# summarize history for Loss
fig_acc = plt.figure(figsize=(10, 5))
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
fig_acc.savefig("./Output/model_regression_loss.png")
# training metrics
scores = model.evaluate(seq_array, label_array, verbose=1, batch_size=200)
print('\nMAE: {}'.format(scores[1]))
print('\nR^2: {}'.format(scores[2]))
y_pred = model.predict(seq_array,verbose=1, batch_size=200)
y_true = label_array
test_set = pd.DataFrame(y_pred)
test_set.to_csv('./Output/submit_train.csv', index = None)
# +
truth_df = pd.read_csv('./Dataset/CMAPSSData/RUL_FD001.txt', sep=" ", header=None)
truth_df.drop(truth_df.columns[[1]], axis=1, inplace=True)
test_df = pd.read_csv('./Dataset/CMAPSSData/test_FD001.txt', sep=" ", header=None)
test_df.drop(test_df.columns[[26, 27]], axis=1, inplace=True)
test_df.columns = ['id', 'cycle', 'setting1', 'setting2', 'setting3', 's1', 's2', 's3',
's4', 's5', 's6', 's7', 's8', 's9', 's10', 's11', 's12', 's13', 's14',
's15', 's16', 's17', 's18', 's19', 's20', 's21']
test_df['cycle_norm'] = test_df['cycle']
norm_test_df = pd.DataFrame(min_max_scaler.transform(test_df[cols_normalize]),
columns=cols_normalize,
index=test_df.index)
test_join_df = test_df[test_df.columns.difference(cols_normalize)].join(norm_test_df)
test_df = test_join_df.reindex(columns = test_df.columns)
test_df = test_df.reset_index(drop=True)
print(test_df.head())
# We use the ground truth dataset to generate labels for the test data.
# generate column max for test data
rul = pd.DataFrame(test_df.groupby('id')['cycle'].max()).reset_index()
rul.columns = ['id', 'max']
truth_df.columns = ['more']
truth_df['id'] = truth_df.index + 1
truth_df['max'] = rul['max'] + truth_df['more']
truth_df.drop('more', axis=1, inplace=True)
# generate RUL for test data
test_df = test_df.merge(truth_df, on=['id'], how='left')
test_df['RUL'] = test_df['max'] - test_df['cycle']
test_df.drop('max', axis=1, inplace=True)
# generate label columns w0 and w1 for test data
test_df['label1'] = np.where(test_df['RUL'] <= w1, 1, 0 )
test_df['label2'] = test_df['label1']
test_df.loc[test_df['RUL'] <= w0, 'label2'] = 2
# +
##################################
# EVALUATE ON TEST DATA
##################################
# We pick the last sequence for each id in the test data
seq_array_test_last = [test_df[test_df['id']==id][sequence_cols].values[-sequence_length:]
for id in test_df['id'].unique() if len(test_df[test_df['id']==id]) >= sequence_length]
seq_array_test_last = np.asarray(seq_array_test_last).astype(np.float32)
print("seq_array_test_last")
#print(seq_array_test_last)
print(seq_array_test_last.shape)
# Similarly, we pick the labels
#print("y_mask")
y_mask = [len(test_df[test_df['id']==id]) >= sequence_length for id in test_df['id'].unique()]
label_array_test_last = test_df.groupby('id')['RUL'].nth(-1)[y_mask].values
label_array_test_last = label_array_test_last.reshape(label_array_test_last.shape[0],1).astype(np.float32)
print(label_array_test_last.shape)
print("label_array_test_last")
print(label_array_test_last)
# if best iteration's model was saved then load and use it
if os.path.isfile(model_path):
estimator = load_model(model_path,custom_objects={'r2_keras': r2_keras})
# test metrics
scores_test = estimator.evaluate(seq_array_test_last, label_array_test_last, verbose=2)
print('\nMAE: {}'.format(scores_test[1]))
print('\nR^2: {}'.format(scores_test[2]))
y_pred_test = estimator.predict(seq_array_test_last)
y_true_test = label_array_test_last
test_set = pd.DataFrame(y_pred_test)
test_set.to_csv('./Output/submit_test.csv', index = None)
# Plot in blue color the predicted data and in green color the
# actual data to verify visually the accuracy of the model.
fig_verify = plt.figure(figsize=(10, 5))
plt.plot(y_pred_test, color="blue")
plt.plot(y_true_test, color="green")
plt.title('prediction')
plt.ylabel('Remaining Useful Life')
plt.xlabel('cycles')
plt.legend(['predicted', 'actual data'], loc='upper left')
plt.show()
fig_verify.savefig("./Output/model_regression_verify.png")
# -
| notebooks/jupyter/LSTM predictions for Remaining Useful Life.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Task 3
# # Imports
# +
import json
import numpy as np
import pandas as pd
from joblib import dump, load
from sklearn.metrics import mean_squared_error
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import plotly.express as px
plt.style.use('ggplot')
plt.rcParams.update({'font.size': 16, 'axes.labelweight': 'bold', 'figure.figsize': (8,6)})
from statsmodels.tsa.api import seasonal_decompose
import matplotlib as mpl
import joblib
# -
# ## Part 1:
# Recall as a final goal of this project. We want to build and deploy ensemble machine learning models in the cloud, where features are outputs of different climate models and the target is the actual rainfall observation. In this milestone, you'll actually build these ensemble machine learning models in the cloud.
#
# **Your tasks:**
#
# 1. Read the data CSV from your s3 bucket.
# 2. Drop rows with nans.
# 3. Split the data into train (80%) and test (20%) portions with `random_state=123`.
# 4. Carry out EDA of your choice on the train split.
# 5. Train ensemble machine learning model using `RandomForestRegressor` and evaluate with metric of your choice (e.g., `RMSE`) by considering `Observed` as the target column.
# 6. Discuss your results. Are you getting better results with ensemble models compared to the individual climate models?
#
# > Recall that individual columns in the data are predictions of different climate models.
# ### Step 1: Read data
# +
## You could download it from your bucket, or you can use the file that I have in my bucket.
## You should be able to access it from my bucket using your key and secret
# aws_credentials ={"key": "","secret": ""}
# Credential is stored in a file to avoid accidential commit
f = open("keys.txt", "r")
aws_credentials = json.loads(f.read())
df = pd.read_csv("s3://mds-s3-student34/output/ml_data_SYD.csv", index_col=0, parse_dates=True, storage_options=aws_credentials)
# -
# ### Step 2: Drop rows with nans
df = df.dropna()
# ### Step 3: Split data
train_df, test_df = train_test_split(df, test_size=0.2, random_state=123, shuffle=False) # we are dealing with timeseries hence avoiding shuffling the data when splitting, although it should not matter much in this case
# ### Step 4: EDA
# EDA
train_df["Observed"].plot.line(xlabel="Time", ylabel="Rainfall", legend=False, figsize=(20,5));
# resample the data into months to have better view of trend and seasonality
monthly_data = train_df[["Observed"]].resample("1M").mean()
monthly_data.index.fred="M"
monthly_data.plot();
# decompose the timeseries into trend and seasonality
model = seasonal_decompose(monthly_data[["Observed"]], model="additive", period=12)
with mpl.rc_context():
mpl.rc("figure", figsize=(7, 15))
model.plot()
plt.tight_layout()
train_df.describe()
# **Highlights:**
# - There is seasonality in the timeseries for every 12 months, which makes sense
# - There is no trend detected in the data
# - The prediction varies considerably among models
# ### Step 5: Train ensemble machine learning model using RandomForestRegressor
X_train, y_train = train_df.drop(columns="Observed"), train_df["Observed"]
X_test, y_test = test_df.drop(columns="Observed"), test_df["Observed"]
# +
# Train ensemble ML model using `RandomForestRegressor`
import os
test_mode = False # set test_mode to True to avoid re-training the model during testing stage
if (test_mode and os.path.exists("first_model.joblib")):
model = joblib.load("first_model.joblib")
else:
model = RandomForestRegressor(random_state=123)
model.fit(X_train, y_train)
dump(model, "first_model.joblib") # save model so that we can reload during testing
# -
# Evaluate
print(f"Train RMSE: {mean_squared_error(y_train, model.predict(X_train), squared=False):.2f}")
print(f" Test RMSE: {mean_squared_error(y_test, model.predict(X_test), squared=False):.2f}")
# ### Step 6: Results discussion
# create a column to store the prediction of the ensemble model
test_df = test_df.assign(Ensemble = model.predict(X_test))
test_df.head()
# +
# visualize the predictions of all models compared to the observed data
# there are too many models to have a clear plot hence disabling this code
# r_test_df = test_df.reset_index()
# result_df = pd.melt(r_test_df, value_vars = r_test_df.columns[1:],
# var_name="Model", value_name="Rainfall", id_vars = ["time"]).set_index("time")
# px.line(result_df, y="Rainfall", color="Model", width=800)
# -
# create a data frame to store MSE of all models compared to the observed data
mse_results = {}
cols = test_df.columns.to_list() # get the list of all model names
cols.remove("Observed")
for c in cols:
mse_results[c] = mean_squared_error(test_df["Observed"], test_df[c], squared=False) # store MSE of the current model to the dictionary
mse_results_df = pd.DataFrame.from_dict(mse_results, orient="index", columns=["MSE"]).sort_values(by="MSE")
mse_results_df
# > results discussion - Are you getting better results with ensemble models compared to the individual climate models?
# **Comments**: Ensemble machine learning model using RandomForestRegressor generates better predictions in terms of lower mean squared errors in comparison to the individual climate models.
# ## Part 2:
# ### Preparation for deploying model next week
# We’ve found ```n_estimators=100, max_depth=5``` to be the best hyperparameter settings with MLlib (from the task 4 from milestone3), here we then use the same hyperparameters to train a scikit-learn model.
model = RandomForestRegressor(n_estimators=100, max_depth=5)
model.fit(X_train, y_train)
print(f"Train RMSE: {mean_squared_error(y_train, model.predict(X_train), squared=False):.2f}")
print(f" Test RMSE: {mean_squared_error(y_test, model.predict(X_test), squared=False):.2f}")
# ready to deploy
dump(model, "model.joblib")
# **Uploaded `model.joblib` to s3.**
| notebooks/Milestone3-Task3.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Q#
# language: qsharp
# name: iqsharp
# ---
# # Deutsch–Jozsa Algorithm Tutorial
#
# The **Deutsch–Jozsa algorithm** is one of the most famous algorithms in quantum computing. The problem it solves has little practical value, but the algorithm itself is one of the earliest examples of a quantum algorithm that is exponentially faster than any possible deterministic algorithm for the same problem. It is also relatively simple to explain and illustrates several very important concepts (such as quantum oracles). As such, Deutsch–Jozsa algorithm is part of almost every introductory course on quantum computing.
#
# This tutorial will:
# * introduce you to the problem solved by the Deutsch–Jozsa algorithm and walk you through the classical solution to it,
# * give you a brief introduction to quantum oracles,
# * describe the idea behind the Deutsch–Jozsa algorithm and walk you through the math for it,
# * teach you how to implement the algorithm in the Q# programming language,
# * and finally help you to run your implementation of the algorithm on several quantum oracles to see for yourself how the algorithm works!
#
# Let's go!
# To begin, first prepare this notebook for execution (if you skip the first step, you'll get "Syntax does not match any known patterns" error when you try to execute Q# code in the next cells; if you skip the second step, you'll get "Invalid test name" error):
%package Microsoft.Quantum.Katas::0.7.1905.3109
# > The package versions in the output of the cell above should always match. If you are running the Notebooks locally and the versions do not match, please install the IQ# version that matches the version of the `Microsoft.Quantum.Katas` package.
# > <details>
# > <summary><u>How to install the right IQ# version</u></summary>
# > For example, if the version of `Microsoft.Quantum.Katas` package above is 0.1.2.3, the installation steps are as follows:
# >
# > 1. Stop the kernel.
# > 2. Uninstall the existing version of IQ#:
# > dotnet tool uninstall microsoft.quantum.iqsharp -g
# > 3. Install the matching version:
# > dotnet tool install microsoft.quantum.iqsharp -g --version 0.1.2.3
# > 4. Reinstall the kernel:
# > dotnet iqsharp install
# > 5. Restart the Notebook.
# > </details>
#
%workspace reload
# # Part I. Problem Statement and Classical Algorithm
#
# ## The problem
#
# You are given a classical function $f(x): \{0, 1\}^N \to \{0, 1\}$. You are guaranteed that the function $f$ is
# * either *constant* (has the same value for all inputs)
# * or *balanced* (has value 0 for half of the inputs and 1 for the other half of the inputs).
#
# The task is to figure out whether the function is constant or balanced.
#
# ## Examples
#
# * $f(x) \equiv 0$ or $f(x) \equiv 1$ are examples of constant functions (and they are actually the only constant functions in existence).
# * $f(x) = x \text{ mod } 2$ (the least significant bit of $x$) or $f(x) = 1 \text{ if the binary notation of }x \text{ has odd number of 1s and 0 otherwise}$ are examples of balanced functions.
# Indeed, for both these functions you can check that for every possible input $x$ for which $f(x) = 0$ there exists an input $x^\prime$ (equal to $x$ with the least significant bit flipped) such that $f(x^\prime) = 1$, and vice versa, which means that the function is balanced.
# There exist more complicated examples of balanced functions, but we will not need to consider them for this tutorial.
#
# ## Implementing classical functions in Q#
#
# Here is the implementation of these functions in Q#; it is pretty self-descriptory, since the functions are not only very simple but also classical.
# +
// Function 1. f(x) = 0
function Function_Zero (x : Int) : Int {
return 0;
}
// Function 2. f(x) = 1
function Function_One (x : Int) : Int {
return 1;
}
// Function 3. f(x) = x mod 2 (least significant bit of x)
function Function_Xmod2 (x : Int) : Int {
return x % 2;
}
// Function 4. f(x) = 1 if the binary notation of x has odd number of 1s, and 0 otherwise
function Function_OddNumberOfOnes (x : Int) : Int {
mutable nOnes = 0;
mutable xBits = x;
while (xBits > 0) {
if (xBits % 2 > 0) {
set nOnes += 1;
}
set xBits /= 2;
}
return nOnes % 2;
}
# -
# ## <span style="color:blue">Exercise 1</span>: Implement a classical function in Q#
#
# Try to implement a similar classical function in Q#!
#
# **Inputs:**
# 1. An integer $x$.
# 2. The number of bits in the input $N$ ($1 \le N \le 5$, $0 \le x \le 2^N-1$).
#
# **Output:** Return $f(x) = \text{the most significant bit of }x$.
#
# > Useful documentation: [Q# Numeric Expressions](https://docs.microsoft.com/quantum/language/expressions#numeric-expressions).
# +
%kata E1_ClassicalFunction_Test
function Function_MostSignificantBit (x : Int, N : Int) : Int {
// ...
}
# -
# ## Classical algorithm
#
# If we solve this problem classically, how many calls to the given function will we need?
#
# The first call will give us no information - regardless of whether it returns 0 or 1, the function could still be constant or balanced.
# In the best case scenario the second call will return a different value and we'll be able to conclude that the function is balanced in just <span style="color:red">$2$</span> calls.
# However, if we get the same value for the first two calls, we'll have to keep querying the function until either we get a different value or until we do <span style="color:red">$2^{N-1}+1$</span> queries that will return the same value - in this case we'll know for certain that the function will be constant.
#
# ## <span style="color:blue">Exercise 2</span>: Implement the classical algorithm!
#
# Q# is a domain-specific language, so it is not designed to handle arbitrary classical computations. However, this classical algorithm is so simple that you can easily implement it in Q#. Try it!
#
# **Inputs:**
# 1. The number of bits in the input $N$ ($1 \le N \le 5$).
# 2. The "black box" function that evaluates $f(x)$ on any given input $x \in [0, 2^N-1]$.
# You are guaranteed that the function implemented by the black box is either constant or balanced.
#
# **Goal:** Return `true` if the function is constant, or `false` if it is balanced.
#
# > Useful documentation: [Q# statements](https://docs.microsoft.com/quantum/language/statements).
# +
%kata E2_ClassicalAlgorithm_Test
operation IsFunctionConstant_Classical (N : Int, f : (Int -> Int)) : Bool {
// ...
}
# -
# # Part II. Quantum Oracles
#
# ## Definition
# A quantum oracle is a "black box" operation which is used as input to another algorithm. This operation is implemented in a way which allows to perform calculations not only on individual inputs, but also on superpositions of inputs.
#
# > This is *not* the same as being able to evaluate the function on all inputs at once, since you will not be able to extract the evaluation results!
#
# Oracles are often defined using a classical function, in the case of Deutsch-Jozsa algorithm the function $f : \{0, 1\}^N \to \{0, 1\}$ takes an $N$-bit binary input and produces an 1-bit binary output.
#
# The oracle has to act on quantum states instead of classical values.
# To enable this, integer input $x$ is represented in binary $x = (x_{0}, x_{1}, \dots, x_{N-1})$,
# and encoded into an $N$-qubit register: $|\vec{x} \rangle = |x_{0} \rangle \otimes |x_{1} \rangle \otimes \cdots \otimes |x_{N-1} \rangle$.
#
# The type of oracles used in this tutorial are called *phase oracles*. A phase oracle $U_f$ encodes the value of the classical function $f$ it implements in the phase of the qubit state as follows:
#
# $$U_f |\vec{x} \rangle = (-1)^{f(x)} |\vec{x} \rangle$$
#
# In our case $f$ can return only two values, 0 or 1, which result in no phase change or adding a $-1$ phase, respectively.
#
# The effect of such an oracle on any single basis state is not particularly interesting: it just adds a global phase which is not something you can observe. However, if you apply this oracle to a superposition of basis states, its effect becomes noticeable.
# Remember that quantum operations are linear: if you define the effect of an operation on the basis states, you'll be able to deduce its effect on superposition states (which are just linear combinations of the basis states) using its linearity.
#
# > ## Example: Deutsch algorithm
# >
# > Consider, for example, the case of $N = 1$: there are two possible inputs to the function, $|0\rangle$ and $|1\rangle$, and we can apply the oracle to their superposition:
# >
# > $$U_f \left( \frac{1}{\sqrt2} \big( |0\rangle + |1\rangle \big) \right)
# = \frac{1}{\sqrt2} \big( U_f |0\rangle + U_f |1\rangle \big)
# = \frac{1}{\sqrt2} \big( (-1)^{f(0)} |0\rangle + (-1)^{f(1)} |1\rangle \big)$$.
# >
# > * If $f(0) = f(1)$, the phases of the two basis states are the same, and the resulting state is $|+\rangle = \frac{1}{\sqrt2} \big( |0\rangle + |1\rangle \big)$ (up to a global phase).
# > * If $f(0) \neq f(1)$, the phases of the two basis states differ by a factor of $-1$, and the resulting state is $|-\rangle = \frac{1}{\sqrt2} \big( |0\rangle - |1\rangle \big)$ (up to a global phase).
# > * The states $|+\rangle$ and $|-\rangle$ can be distinguished using measurement: if you apply the H gate to each of them, you'll get $H|+\rangle = |0\rangle$ if $f(0) = f(1)$, or $H|-\rangle = |1\rangle$ if $f(0) \neq f(1)$. This means that one oracle call does not let you calculate both $f(0)$ and $f(1)$, but it allows you to figure out whether $f(0) = f(1)$.
# >
# > This is a special case of the Deutsch–Jozsa algorithm, called the Deutsch algorithm.
#
# ## Implementing oracles in Q#
#
# Now that we've discussed the mathematical definition of the oracles, let's take a look at how to implement oracles for some classical functions in Q#. We'll consider the same 4 functions we used as an example in the first section.
# ### $f(x) \equiv 0$
#
# This is the easiest function to implement: if $f(x) \equiv 0$, $U_f |x\rangle \equiv (-1)^0 |x\rangle = |x\rangle$.
# This means that $U_f$ is an identity - a transformation which does absolutely nothing!
# This is very easy to express in Q#:
operation PhaseOracle_Zero (x : Qubit[]) : Unit {
// Do nothing...
}
# ### $f(x) \equiv 1$
#
# The second constant function is slightly trickier: if $f(x) \equiv 1$, $U_f |x\rangle \equiv (-1)^1 |x\rangle = - |x\rangle$.
# Now $U_f$ is a negative identity, i.e., a transformation which applies a global phase of $-1$ to the state.
# A lot of algorithms just ignore the global phase accumulated in them, since it is not observable.
# However, if we want to be really meticulous, we can use Q# library operation [Microsoft.Quantum.Intrinsic.R](https://docs.microsoft.com/qsharp/api/qsharp/microsoft.quantum.intrinsic.r) which performs a given rotation around the given axis.
# When called with `PauliI` axis, this operation applies a global phase; since it doesn't take the input into account, it can be applied to any qubit, for example, the first qubit of the input.
# +
// Open namespace where the library function PI() is defined
open Microsoft.Quantum.Math;
operation PhaseOracle_One (x : Qubit[]) : Unit {
// Apply a global phase of -1
R(PauliI, 2.0 * PI(), x[0]);
}
# -
# ### $f(x) = x \text{ mod } 2$
#
# In this oracle we will finally need to use the input! The binary representation of $x$ is $x = (x_{0}, x_{1}, \dots, x_{N-1})$, with the least significant bit encoded in the last bit (stored in the last qubit of the input array): $f(x) = x_{N-1}$. Let's use this in the oracle effect expression:
#
# $$U_f |x\rangle = (-1)^{f(x)} |x\rangle = (-1)^{x_{N-1}} |x\rangle = |x_{0} \rangle \otimes \cdots \otimes |x_{N-2} \rangle \otimes (-1)^{x_{N-1}} |x_{N-1}\rangle$$
#
# This means that we only need to use the last qubit in the implementation: do nothing if it is $|0\rangle$ and apply a phase of $-1$ if it is $|1\rangle$. This is exactly the effect of the [Z gate](https://docs.microsoft.com/qsharp/api/qsharp/microsoft.quantum.intrinsic.z); as a reminder,
#
# $$Z = \begin{bmatrix} 1 & 0 \\ 0 & -1\end{bmatrix}: Z |0\rangle = |0\rangle, Z |1\rangle = -|1\rangle$$
#
# Finally, the expression for the oracle is:
#
# $$U_f = \mathbb{1} \otimes \cdots \otimes \mathbb{1} \otimes Z$$
operation PhaseOracle_Xmod2 (x : Qubit[]) : Unit {
// Length(x) gives you the length of the array.
// Array elements are indexed 0 through Length(x)-1, inclusive.
Z(x[Length(x) - 1]);
}
# ### $f(x) = 1 \text{ if x has odd number of 1s, and 0 otherwise }$
#
# In this oracle the answer depends on all bits of the input. We can write $f(x)$ as follows:
#
# $$f(x) = \bigoplus_{k=0}^{N-1} x_k$$
#
# $$U_f |x\rangle = (-1)^{f(x)} |x\rangle = \bigotimes_{k=0}^{N-1} (-1)^{x_k} |x_{k}\rangle$$
#
# As we've seen in the previous oracle, this can be achieved by applying a Z gate to each qubit; you can use library function [ApplyToEach](https://docs.microsoft.com/qsharp/api/qsharp/microsoft.quantum.canon.applytoeach) to apply a gate to each qubit of the array.
operation PhaseOracle_OddNumberOfOnes (x : Qubit[]) : Unit {
ApplyToEach(Z, x);
}
# ## <span style="color:blue">Exercise 3</span>: Implement a quantum oracle in Q#
#
# You're ready to try and write some actual quantum code! Implement a quantum oracle that corresponds to the classical function from exercise 1.
#
# **Input:** A register of $N$ qubits $x$, stored as an array.
#
# **Goal:** Add a phase of $-1$ to each basis state that has the most significant bit of $x$ equal to $1$, and do nothing to the rest of the basis states.
# Remember that the most significant bit of $x$ is stored in the first qubit of the array.
# +
%kata E3_QuantumOracle_Test
operation PhaseOracle_MostSignificantBit (x : Qubit[]) : Unit {
// ...
}
# -
# # Part III. Quantum Algorithm
#
# Now that we have figured out how the oracles work and looked at the Deutsch algorithm, we can get back to solving the big problem. We'll present the algorithm in detail step-by-step and summarize it in the end.
#
# ### Inputs
#
# You are given the number of bits in the oracle input $N$ and the oracle itself - a "black box" operation $U_f$ that implements a classical function $f(x)$. You are guaranteed that the function implemented by the oracle is either constant or balanced.
#
# ### The starting state
#
# The algorithm starts with $N$ qubits in the $|0...0\rangle = |0\rangle^{\otimes N}$ state.
#
# ### Step 1. Apply Hadamard transform to each qubit
#
# Applying the H gate to one qubit in the $|0\rangle$ state converts it to the $\frac{1}{\sqrt2} \big(|0\rangle + |1\rangle \big)$ state, which is an equal superposition of both basis states on one qubit.
#
# If we apply the H gate to each of the two qubits in the $|00\rangle$ state, we'll get
#
# $$(H \otimes H) |00\rangle = \big(H |0\rangle \big) \otimes \big(H |0\rangle\big) = \left(\frac{1}{\sqrt2} \big(|0\rangle + |1\rangle \big)\right) \otimes \left(\frac{1}{\sqrt2} \big(|0\rangle + |1\rangle \big)\right) = \frac{1}{2} \big(|00\rangle + |01\rangle + |10\rangle + |11\rangle \big)$$
#
# This is just an equal superposition of all basis states on two qubits!
# We can extend the same thinking to applying the H gate to each of the $N$ qubits in the $|0...0\rangle$ state to conclude that this transforms them into a state that is an equal superposition of all basis states on $N$ qubits.
#
# Mathematically the transformation "apply H gate to each of the $N$ qubits" can be denoted as $H^{\otimes N}$. After applying this transformation we'll get the following state:
#
# $$H^{\otimes N} |0\rangle^{\otimes N} = \big( H|0\rangle \big)^{\otimes N} = \left( \frac{1}{\sqrt2} \big(|0\rangle + |1\rangle \big) \right)^{\otimes N} = \frac{1}{\sqrt{2^N}} \sum_{x=0}^{2^N-1} |x\rangle$$
#
#
# ### Step 2. Apply the oracle
#
# This step is the only step in which we use the knowledge of the classical function, given to us as the quantum oracle.
# This step will keep the amplitudes of the basis states for which $f(x) = 0$ unchanged, and multiply the amplitudes of the basis states for which $f(x) = 1$ by $-1$.
#
# Here is an example of the way the amplitudes of the states will change. After the first step the amplitudes of all basis states were the same:
#
# 
#
# Once the oracle is applied, some of the amplitudes will change to negative ones:
#
# 
#
# Mathematically the results of oracle application can be written as follows:
#
# $$U_f \left(\frac{1}{\sqrt{2^N}} \sum_{x=0}^{2^N-1} |x\rangle \right) = \frac{1}{\sqrt{2^N}} \sum_{x=0}^{2^N-1} U_f|x\rangle = \frac{1}{\sqrt{2^N}} \sum_{x=0}^{2^N-1} (-1)^{f(x)} |x\rangle$$
#
# ### Step 3. Apply Hadamard transform to each qubit again
#
# In this step, let's not worry about the whole expression for the state of the qubits after applying the H gates to them; instead let's calculate only the resulting amplitude of the basis state $|0\rangle^{\otimes N}$.
#
# Consider one of the basis states $|x\rangle$ in the expression $\sum_{x=0}^{2^N-1} (-1)^{f(x)} |x\rangle$.
# It can be written as $|x\rangle = |x_{0} \rangle \otimes \cdots \otimes |x_{N-1}\rangle$, where each $|x_k\rangle$ is either $|0\rangle$ or $|1\rangle$.
# When we apply the H gates to $|x\rangle$, we'll get $H^{\otimes N} |x\rangle = H|x_{0} \rangle \otimes \cdots \otimes H|x_{N-1}\rangle$, where each term of the tensor product is either $H|0\rangle = \frac{1}{\sqrt2}\big(|0\rangle + |1\rangle \big) = |+\rangle$ or $H|1\rangle = \frac{1}{\sqrt2}\big(|0\rangle - |1\rangle \big) = |-\rangle$.
# If we open the brackets in this tensor product, we'll get a superposition of all $N$-qubit basis states, each of them with amplitude $\frac{1}{\sqrt{2^N}}$ or $-\frac{1}{\sqrt{2^N}}$ — and, since the amplitude of the $|0\rangle$ state in both $|+\rangle$ and $|-\rangle$ is positive, we know that the amplitude of the basis state $|0\rangle^{\otimes N}$ will end up positive, i.e., $\frac{1}{\sqrt{2^N}}$.
#
# Now we can calculate the amplitude of the $|0\rangle^{\otimes N}$ state in the expression $H^{\otimes N} \left( \frac{1}{\sqrt{2^N}} \sum_{x=0}^{2^N-1} (-1)^{f(x)} |x\rangle \right)$: in each of the $2^N$ terms of the sum its amplitude is $\frac{1}{\sqrt{2^N}}$; therefore, we get the total amplitude
#
# $$\frac{1}{\sqrt{2^N}} \sum_{x=0}^{2^N-1} (-1)^{f(x)} \frac{1}{\sqrt{2^N}} = \frac{1}{2^N} \sum_{x=0}^{2^N-1} (-1)^{f(x)}$$
#
# ### Step 4. Perform measurements and interpret the result
#
# So far we did not use the fact that the function we are given is constant or balanced. Let's see how this affects the amplitude of the $|0\rangle^{\otimes N}$ state.
#
# If the function is constant, $f(x) = C$ (either always $0$ or always $1$), we get $\frac{1}{2^N} \sum_{x=0}^{2^N-1} (-1)^{f(x)} = \frac{1}{2^N} \sum_{x=0}^{2^N-1} (-1)^{C} = \frac{1}{2^N} \cdot 2^N (-1)^C = (-1)^C$.
# Since the sum of squares of amplitudes of all basis states always equals $1$, the amplitudes of the rest of the basis states have to be 0 - this means that the state of the qubits after step 3 *is* $|0\rangle^{\otimes N}$.
#
# If the function is balanced, i.e., returns $0$ for exactly half of the inputs and $1$ for the other half of the inputs, exactly half of the terms in the sum $\frac{1}{2^N} \sum_{x=0}^{2^N-1} (-1)^{f(x)}$ will be $1$ and the other half of the terms will be $-1$, and they will all cancel out, leaving the amplitude of $|0\rangle^{\otimes N}$ equal to $0$.
#
# Now, what happens when we measure all qubits? (Remember that the probability of getting a certain state as a result of measurement equals to the square of the amplitude of this state.)
#
# If the function is constant, the only measurement result we can get is all zeros - the probability of getting any other result is $0$. If the function is balanced, the probability of getting all zeros is $0$, so we'll get any measurement result except this.
#
# This is exactly the last step of the algorithm: **measure all qubits, if all measurement results are 0, the function is constant, otherwise it is balanced**.
#
# ### Summary
#
# In the end the algorithm is very straightforward:
#
# 1. Apply the H gate to each qubit.
# 2. Apply the oracle.
# 3. Apply the H gate to each qubit again.
# 4. Measure each qubits.
# 5. If all qubits were measured in $|0\rangle$ state, the function is constant, otherwise it is balanced.
#
# Note that this algorithm requires only <span style="color:green">$1$</span> oracle call, and always produces the correct result.
# ## <span style="color:blue">Exercise 4</span>: Implement the quantum algorithm!
#
# **Inputs:**
# 1. The number of bits in the input $N$ ($1 \le N \le 5$).
# 2. The "black box" oracle the implements $f(x)$.
# You are guaranteed that the function implemented by the oracle is either constant or balanced.
#
# **Goal:** Return `true` if the function is constant, or `false` if it is balanced.
#
# > Useful documentation: [Q# Control Flow](https://docs.microsoft.com/quantum/language/statements#control-flow).
# +
%kata E4_QuantumAlgorithm_Test
operation DeutschJozsaAlgorithm (N : Int, oracle : (Qubit[] => Unit)) : Bool {
// Create a boolean variable for storing the return value.
// You'll need to update it later, so it has to be declared as mutable.
mutable isConstant = ...;
// Allocate an array of N qubits for the input register x.
using (x = Qubit[...]) {
// Newly allocated qubits start in the |0⟩ state.
// The first step is to prepare the qubits in the required state before calling the oracle.
// A qubit can be transformed from the |0⟩ state to the |+⟩ state by applying a Hadamard gate H.
// ...
// Apply the oracle to the input register.
// The syntax is the same as for applying any function or operation.
// ...
// Apply a Hadamard gate to each qubit of the input register again.
// ...
// Measure each qubit of the input register in the computational basis using the M operation.
// You can use a for loop to iterate over the range of indexes 0..N-1.
// Note that you can't return the answer in the middle of a loop,
// you have to update the variable isConstant using the "set" keyword.
// ...
// Before releasing the qubits make sure they are all in the |0⟩ state
// (otherwise you'll get a ReleasedQubitsAreNotInZeroState exception).
// You can use the library operation Reset which measures a qubit and applies a correction if necessary.
// The library operation ResetAll does the same for a register of qubits.
// ...
}
// Return the value of the boolean variable.
return ...;
}
# -
# # Part IV. Running the Algorithm
#
# You have implemented the quantum version of the algorithm - congratulations! The last step is to combine everything you've seen so far - run your code to check whether the oracles you've seen in part II implement constant or balanced functions.
#
# > This is an open-ended task, and is not covered by a unit test. To run the code, execute the cell with the definition of the `Run_DeutschJozsaAlgorithm` operation first; if it compiled successfully without any errors, you can run the operation by executing the next cell (`%simulate Run_DeutschJozsaAlgorithm`).
#
# > Note that this task relies on your implementations of the previous tasks. If you are getting the "No variable with that name exists." error, you might have to execute previous code cells before retrying this task. Don't forget to execute Q# code cells that define oracles in part II!
# +
open Microsoft.Quantum.Diagnostics;
operation Run_DeutschJozsaAlgorithm () : String {
// You can use Fact function to check that the return value of DeutschJozsaAlgorithm operation matches the expected value.
// Uncomment the next line to run it.
// Fact(DeutschJozsaAlgorithm(4, PhaseOracle_Zero) == true, "f(x) = 0 not identified as constant");
// Run the algorithm for the rest of the oracles
// ...
// If all tests pass, report success!
return "Success!";
}
# -
%simulate Run_DeutschJozsaAlgorithm
# # Part V. What's Next?
#
# We hope you've enjoyed this tutorial and learned a lot from it! If you're looking to learn more about quantum computing and Q#, here are some suggestions:
#
# * The [Quantum Katas](https://github.com/microsoft/QuantumKatas/) are sets of programming exercises on quantum computing that can be solved using Q#. They cover a variety of topics, from the basics like the concepts of superposition and measurements to more interesting algorithms like Grover's search.
# * In particular, [DeutschJozsaAlgorithm kata](https://github.com/microsoft/QuantumKatas/tree/master/DeutschJozsaAlgorithm) offers you more exercises on quantum oracles, a different presentation of Deutsch–Jozsa algorithm, and a couple of similar algorithms.
| tutorials/DeutschJozsaAlgorithm/DeutschJozsaAlgorithmTutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
# # Plot a cortical parcellation
#
#
# In this example, we download the HCP-MMP1.0 parcellation [1]_ and show it
# on ``fsaverage``.
#
# <div class="alert alert-info"><h4>Note</h4><p>The HCP-MMP dataset has license terms restricting its use.
# Of particular relevance:
#
# "I will acknowledge the use of WU-Minn HCP data and data
# derived from WU-Minn HCP data when publicly presenting any
# results or algorithms that benefitted from their use."</p></div>
#
# References
# ----------
# .. [1] Glasser MF et al. (2016) A multi-modal parcellation of human
# cerebral cortex. Nature 536:171-178.
#
#
# +
# Author: <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
from surfer import Brain
import mne
subjects_dir = mne.datasets.sample.data_path() + '/subjects'
mne.datasets.fetch_hcp_mmp_parcellation(subjects_dir=subjects_dir,
verbose=True)
labels = mne.read_labels_from_annot(
'fsaverage', 'HCPMMP1', 'lh', subjects_dir=subjects_dir)
brain = Brain('fsaverage', 'lh', 'inflated', subjects_dir=subjects_dir,
cortex='low_contrast', background='white', size=(800, 600))
brain.add_annotation('HCPMMP1')
aud_label = [label for label in labels if label.name == 'L_A1_ROI-lh'][0]
brain.add_label(aud_label, borders=False)
# -
# We can also plot a combined set of labels (23 per hemisphere).
#
#
brain = Brain('fsaverage', 'lh', 'inflated', subjects_dir=subjects_dir,
cortex='low_contrast', background='white', size=(800, 600))
brain.add_annotation('HCPMMP1_combined')
| 0.17/_downloads/e4c80995312cbf444f08bae49b269d8b/plot_parcellation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import cv2
import os
import re
import warnings
import keras
from keras.applications.imagenet_utils import preprocess_input
from keras.backend.tensorflow_backend import set_session
from keras.models import Model
from keras.preprocessing import image
from keras.backend import tensorflow_backend
import matplotlib.pyplot as plt
import numpy as np
from scipy.misc import imread
import tensorflow as tf
from ssd import SSD300
from ssd_utils import BBoxUtility
# %matplotlib inline
plt.rcParams['figure.figsize'] = (8, 8)
plt.rcParams['image.interpolation'] = 'nearest'
np.set_printoptions(suppress=True)
#config = tf.ConfigProto()
#config.gpu_options.per_process_gpu_memory_fraction = 0.45
#set_session(tf.Session(config=config))
# -
config = tf.ConfigProto(gpu_options = tf.GPUOptions(allow_growth = True))
session = tf.Session(config = config)
tensorflow_backend.set_session(session)
voc_classes = ['jinkou','sizen']
NUM_CLASSES = len(voc_classes) + 1
input_shape=(300, 300, 3)
warnings.filterwarnings('ignore')
model = SSD300(input_shape, num_classes=NUM_CLASSES)
model.load_weights('./checkpoints/weights.100-1.97.hdf5', by_name=True)
#model.load_weights('./weights_SSD300.hdf5', by_name=True)
bbox_util = BBoxUtility(NUM_CLASSES)
# +
inputs = []
images = []
inputs2 = []
images2 = []
def list_pictures(directory, ext='jpg|jpeg|bmp|png|ppm'):
return [os.path.join(root, f)
for root, _, files in os.walk(directory) for f in files
if re.match(r'([\w]+\.(?:' + ext + '))', f.lower())]
print('テストデータを読み込み中…')
for filepath in list_pictures('../master/data/CNN_data/test/jinkou/'):
#for filepath in list_pictures('../master/data/CNN_data/test/sizen/'):
warnings.filterwarnings('ignore')
img = image.load_img(filepath, target_size=(300, 300))
img = image.img_to_array(img)
images.append(imread(filepath))
inputs.append(img.copy())
inputs = preprocess_input(np.array(inputs))
for filepath in list_pictures('../master/data/CNN_data/test/sizen/'):
warnings.filterwarnings('ignore')
img2 = image.load_img(filepath, target_size=(300, 300))
img2 = image.img_to_array(img2)
images2.append(imread(filepath))
inputs2.append(img2.copy())
inputs2 = preprocess_input(np.array(inputs2))
print('テストデータを読み込み終了')
# -
preds = model.predict(inputs, batch_size=1, verbose=1)
preds2 = model.predict(inputs2, batch_size=1, verbose=1)
#preds = model.predict(inputs, batch_size=1)
#print(preds.shape)
#print(preds[0])
results = bbox_util.detection_out(preds)
results2 = bbox_util.detection_out(preds2)
#print(results[3])
# +
def detector(images_eva, result, img):
no_feature = 0
count_jinkou = 0
count_sizen = 0
for i, img in enumerate(images_eva):
# Parse the outputs.
if len(results[i])==0:
no_feature += 1
continue
det_label = result[i][:, 0]
det_conf = result[i][:, 1]
det_xmin = result[i][:, 2]
det_ymin = result[i][:, 3]
det_xmax = result[i][:, 4]
det_ymax = result[i][:, 5]
# Get detections with confidence higher than 0.6.
top_indices = [i for i, conf in enumerate(det_conf) if conf >= 0.6]
top_conf = det_conf[top_indices]
top_label_indices = det_label[top_indices].tolist()
top_xmin = det_xmin[top_indices]
top_ymin = det_ymin[top_indices]
top_xmax = det_xmax[top_indices]
top_ymax = det_ymax[top_indices]
colors = plt.cm.hsv(np.linspace(0, 1, 21)).tolist()
plt.imshow(img / 255.)
currentAxis = plt.gca()
sum_jinkou = 0
sum_sizen = 0
for i in range(top_conf.shape[0]):
xmin = int(round(top_xmin[i] * img.shape[1]))
ymin = int(round(top_ymin[i] * img.shape[0]))
xmax = int(round(top_xmax[i] * img.shape[1]))
ymax = int(round(top_ymax[i] * img.shape[0]))
score = top_conf[i]
label = int(top_label_indices[i])
label_name = voc_classes[label - 1]
if label_name == 'jinkou':
sum_jinkou += ((xmax - xmin) * (ymax - ymin))
else:
sum_sizen += ((xmax - xmin) * (ymax - ymin))
display_txt = '{:0.2f}, {}'.format(score, label_name)
coords = (xmin, ymin), xmax-xmin+1, ymax-ymin+1
color = colors[label]
#currentAxis.add_patch(plt.Rectangle(*coords, fill=False, edgecolor=color, linewidth=2))
#currentAxis.text(xmin, ymin, display_txt, bbox={'facecolor':color, 'alpha':0.5})
if sum_jinkou > sum_sizen:
count_jinkou += 1
#print(sum_jinkou, ':', sum_sizen, '-> jinkou', count_jinkou)
elif sum_jinkou < sum_sizen:
count_sizen += 1
#print(sum_jinkou, ':', sum_sizen, '-> sizen', count_sizen)
#plt.show()
print('自然物画像:', count_sizen / 1200 * 100, '%')
print('人工物画像:', count_jinkou / 1200 * 100, '%')
print('人工物画像の正解率')
detector(images, results, img)
#print('自然物画像の正解率')
#detector(images2, results2, img2)
# -
| SSD.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
vinos = pd.read_csv("archives/vino.csv")
vinos.head()
vinos["Wine Type"].unique()
vinos["Wine Type"].value_counts()
y = vinos['Wine Type']
X = vinos.drop('Wine Type', axis=1)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
from sklearn.tree import DecisionTreeClassifier
arbol = DecisionTreeClassifier()
arbol.fit(X_train, y_train)
predicciones = arbol.predict(X_test)
predicciones
np.array(y_test)
from sklearn.metrics import classification_report, confusion_matrix
print(classification_report(y_test, predicciones))
print(confusion_matrix(y_test, predicciones))
| Machine Learning/arboles_decision.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Radio Tutorial 2: Density models and radio burst speeds
#
# ### The goal for this tutorial
# The goal will be to plot a variety of 1 dimensional electron density models of the solar atmosphere, both theoretical and empirical. The empirical models that we will use (Newkirk and Saito) are often used for estimates of radio burst altitude and speed in the corona. We'll be plotting electrons density as a function of height $n(r)$.
#
# N.B. When I use the term height, altitude or distance I mean heliocnetric distance i.e., 1 R$_{\odot}$ is the solar surface.
# ### First let's start with the hydrostatic plane parallel model
# Start with plotting the following equation from 1-10 solar radii
# \begin{equation}
# n(r) = n_0\mathrm{exp}\bigg(\frac{-r}{H}\bigg)
# \end{equation}
#
# where $n_0$ is the density at the base of the corona and $r$ is the distance into the solar atmosphere. $H$ is known as the scale height. And is given by
#
# \begin{equation}
# H = \frac{m_p g_{\odot}}{k_B T}
# \end{equation}
#
# where $k_B$ is Boltzmann's constant, $T$ is the temperature, $m_p$ is the proton mass, and $g_{\odot}$ is solar gravity. What is the significance of the scale height?
import numpy as np
import matplotlib.pyplot as plt
# ### First define your constants here
n0 = None # Electron density at 1 Rsun (cm^-3)
T = None # MK
kb = None # Boltzman constant J/K
mp = None # Proton mass kg
Msun = None # Solar mass kg
rsun = None # Solar radius m
G = None # Universal gravitational constant (m3 kg-1 s-2)
gsun = None # Solar gravity (m/s/s)
# ### First build a 1D distance grid
# +
# Build a 1D array of distance values in units of Rsun.
# Choose a reasonable resolution e.g., <0.1 Rsun.
# -
# ### Let's look at a plane-parallel hydrostatic model
# +
# Start with defining the scale height.
# Print its value in Mm. Does this make sense? What is the meaning of this value?
# +
# Now use your distance grid and scale height to get the density values, using Equation 1 above.
# -
# And plot the result...
'''
fig, ax = plt.subplots()
ax.plot(radius, npp)
ax.set_xlabel('Heliocentric Distance (R$_{\odot}$)')
ax.set_ylabel('Electron density (cm$^{-3})$')
plt.show()
'''
# ### Increasing the temperature of the atmosphere
# Now let's see what happens when we increase the temperature of the atmosphere. Make a scale height for an atmosphere of 1 MK, 5 MK, and 10 MK. What would you expect to happen?
# +
# The most convenient way to do this is to build a function that takes your distance grid
# and temperature as input plots the density model. Fill in the function template below.
fig, ax = plt.subplots()
def plothydrostat(radius, temp, label=None):
# Inside here calulate scale height H
# Then get the density and plot the result
#ax.plot(radius, n, label=label)
return #ax
'''
plothydrostat(radius, T1, label='1 MK')
plothydrostat(radius, T2, label='5 MK')
ax = plothydrostat(radius, T3, label='10 MK')
ax.set_yscale('log')
ax.set_xlabel('Heliocentric Distance (R$_{\odot}$)')
ax.set_ylabel('Electron density (cm$^{-3})$')
plt.legend()
plt.show()
'''
# -
# ### Now let's plot the spherically symmetric solution
#
# Now we'll do something slightly more realistic. Gravity in the solar atmosphere is of course not a constant, but decreases with increasing altitude. The hydrostatic model that's accounts for this is similar:
#
# \begin{equation}
# n = n_0\mathrm{exp}\bigg( \frac{1}{H}\bigg\{\frac{1}{r}-1\bigg\} \bigg)
# \end{equation}
#
# where $H$ takes on the slightly different form
#
# \begin{equation}
# H = \frac{2 k_B T R_{\odot}^2}{G M_{\odot}m_p}
# \end{equation}
# +
# Define these equations here. Take care with your units, units of Rsun or in meters etc.
# the plot up the result, and also compare to the plane parallel solution you calculated previously.
'''
plt.plot(radius, nss, label='Plane parallel')
plt.plot(radius, npp, label='Spherically symmetric')
plt.yscale('log')
plt.ylim(1e4,1e10)
plt.xlabel('Heliocentric Distance (R$_{\odot}$)')
plt.ylabel('Electron density (cm$^{-3})$')
plt.legend()
plt.show()
'''
# -
# ### What does this look like in terms of plasma frequency?
# +
# Use the plasma frequency formula from the lectures to convert from density to frequency in MHz
# Then plot as a function of radius.
# What is the radius at which you expect 100 MHz?
'''
plt.plot(radius, fplasma_ss)
plt.xlabel('Heliocentric Distance (R$_{\odot}$)')
plt.ylabel('Plasma Frequency (MHz)')
plt.yscale('log')
plt.ylim(1,300)
plt.show()
'''
# -
# ### Now let's plot some empirical density models
# Radio bursts in solar physics are most often analysed with 1D empircal density models. There are many: Newkirk, Saito, Leblanc, Baumbach Allen, Mann. This list goes on. Each are suitable to different parts of the corona. For example, the Newkirk and Saito can be used for the low corona, in the early stages of eruption typically below a few solar radii (although the Newkirk model is known to be slightly higher than epxected densities). The Leblanc and Mann models is for distances further into the heliosphere.
#
# Here we'll take a look at the Newkirk and Saito models. Firtly the newkirk model
#
# \begin{equation}
# n(r) = 4.2\times10^4 \times 10^{4.32/r}
# \end{equation}
#
# where $r$ is units of solar radii. Plot this model along side the spherically symmetric model from above and compare them.
# +
# Plot the the Newkirk and spherically symmetric model
# -
# Now plot the Saito model using the following formula
#
# \begin{equation}
# n(r) = c_1r^{-d_1} + c_2r^{-d_2}
# \end{equation}
#
# where $r$ is units of solar radii and $c1=1.38\times10^6$, $c2=1.68\times10^8$, $d1=2.14$, $d2=6.13$.
#
# +
# Use this formula to plot the Saito, overplotting Newkirk, and the hydrostatic model.
# -
# ### Now let's get the speed of the shock that caused the type II
# +
# Firstly write a function of frequency to density
def freq2dens(freq):
ne = None
return ne # cm^-3
# +
import numpy as np
result = np.load('/Users/eoincarley/Desktop/WP4-T1/STELLAR_SSW_tutorials/day2_radio_session/ft_coords.npz', allow_pickle=True)
# Read in the frequency time points from Pearse's tutorial.
#freq = np.array([150e6, 100e6, 40e6, 30e6, 20e6])
#tsec = np.array([0, 120, 200, 360, 520])
# Convert the frequencies to densities.
# density = freq2dens(freq)
list(result.keys())
time = result['arr_0']
# +
# Now use one of the density models to map density to radius.
# You can rearrange the Newkirk model to get the density directly.
# However, with a polynomial like Saito, the most convient would
# be to interpolate between density and radius
# Hint. Use numpy.interp.
# The results should be an array of heliocentric distances.
# +
# Plot the results of radii vs time and fit a line to the data points
# Use scipy.stats.linregress
# -
# ### Now estimate the speed using a Saito model.
# Follow the same procedure for the Saito model. Compare the distance values that you get with Saito. Compare the the speed given by the Newkirk model and Saito. Which one would you trust more?
| day2_radio_session/.ipynb_checkpoints/radio2_questions-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## 2021년 8월 23일 월요일
# ### Programmers - 직업군 추천하기 (Python)
# ### 문제 : https://programmers.co.kr/learn/courses/30/lessons/84325
# ### 블로그 : https://somjang.tistory.com/entry/Programmers-%EC%9C%84%ED%81%B4%EB%A6%AC-%EC%B1%8C%EB%A6%B0%EC%A7%80-4%EC%A3%BC%EC%B0%A8-%EC%A7%81%EC%97%85%EA%B5%B0-%EC%B6%94%EC%B2%9C%ED%95%98%EA%B8%B0-Python
# ### Solution
# +
def make_prefer_score_table(languages, preference):
prefer_score = {}
for language, score in zip(languages, preference):
prefer_score[language] = score
return prefer_score
def job_lang_score_table(table):
job_lang_score = {}
for job_score in table:
job_score = job_score.split()
job_lang_score[job_score[0]] = job_score[1:][::-1]
return job_lang_score
def get_job_score(prefer_score, job_lang_score):
job_score_dict = {}
for job in job_lang_score.keys():
job_score = 0
for score, lang in enumerate(job_lang_score[job]):
if lang in prefer_score.keys():
job_score += (score + 1) * prefer_score[lang]
job_score_dict[job] = job_score
return job_score_dict
def solution(table, languages, preference):
answer = ''
prefer_table = make_prefer_score_table(languages=languages,
preference=preference)
job_lang_score = job_lang_score_table(table=table)
job_score_dict = get_job_score(prefer_score=prefer_table,
job_lang_score=job_lang_score)
answer = sorted(job_score_dict.items(), key=lambda x: (-x[1], x[0]))[0][0]
return answer
# -
table = ["SI JAVA JAVASCRIPT SQL PYTHON C#", "CONTENTS JAVASCRIPT JAVA PYTHON SQL C++", "HARDWARE C C++ PYTHON JAVA JAVASCRIPT", "PORTAL JAVA JAVASCRIPT PYTHON KOTLIN PHP", "GAME C++ C# JAVASCRIPT C JAVA"]
languages = ["PYTHON", "C++", "SQL"]
preference = [7, 5, 5]
solution(table=table, languages=languages, preference=preference)
table = ["SI JAVA JAVASCRIPT SQL PYTHON C#", "CONTENTS JAVASCRIPT JAVA PYTHON SQL C++", "HARDWARE C C++ PYTHON JAVA JAVASCRIPT", "PORTAL JAVA JAVASCRIPT PYTHON KOTLIN PHP", "GAME C++ C# JAVASCRIPT C JAVA"]
languages = ["JAVA", "JAVASCRIPT"]
preference = [7, 5]
solution(table=table, languages=languages, preference=preference)
| DAY 401 ~ 500/DAY462_[Programmers] 직업군 추천하기 (Python).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# %load_ext autoreload
# %autoreload 2
import molsysmt as msm
# # Vectors from lengths and angles
angles = [[60.0, 60.0, 90.0]] * msm.puw.unit('degrees')
lengths = [[2.0, 2.0, 2.0]] * msm.puw.unit('nm')
msm.pbc.box_vectors_from_box_lengths_and_angles(lengths, angles)
| docs/contents/pbc/box_vectors_box_lengths_and_angles.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.5 64-bit (''base'': conda)'
# name: python385jvsc74a57bd0b4521bcfda93bcc98ae6ed32f504b5e2ee5889d56f563777bb9f597021a38782
# ---
# # Exercise 2
# What's the probability of rolling 3 six-sided dice and getting more than 7?
# +
possibility_set = tuple((x, y, z) for x in range(1, 7) for y in range (1, 7) for z in range(1, 7))
# for index, result in enumerate(possibility_set):
# print(str(index) + ":", result)
sums_set = tuple(sum(result) for result in possibility_set if sum(result) > 7)
print(len(sums_set) / len(possibility_set))
| ch02/ex-02.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %load_ext autoreload
# %autoreload
import os, json
from types import SimpleNamespace
from experiment import run_model
from eval import calculate_stats
import pickle
from datetime import datetime
import torch
from transformers import BertTokenizer
from sklearn.metrics import classification_report
from util.tools import load_config, configEncoder
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device = "cpu"
config_folder = "config/"
config = load_config(config_folder)
config.modelconfig.DROP_OUT
train_inputs, train_label_ids, train_preds, train_loss, dev_inputs, dev_label_ids, dev_loss, dev_preds,test_inputs, test_preds, \
test_labels, test_loss = run_model(config, device)
# +
# train_inputs, train_label_ids, train_preds, train_loss, dev_inputs, dev_label_ids, dev_loss, dev_preds, test_preds, test_labels
# -
print(classification_report(test_labels,test_preds ))
# +
def save_missed_cases_to_file(file_start_name, dev_preds, dev_label_ids, train_inputs):
tokenizer = BertTokenizer.from_pretrained('bert-base-cased', do_lower_case=False)
missed_cases = []
for i in range(0,50):
if dev_label_ids[i] != dev_preds[i]:
missed_cases.append([ dev_preds[i], dev_label_ids[i] , " ". join (tokenizer.convert_ids_to_tokens(train_inputs[i])) ])
#Save into a file
missed_cases_file = config.programsettings.REPORTS_DIR +file_start_name + str(datetime.now()).replace(":", "_").replace(".", "_") + ".pkl"
with open(missed_cases_file, "wb") as f:
pickle.dump(missed_cases, f)
save_missed_cases_to_file("BIOBERT_fc_missedcases_" , dev_preds, dev_label_ids, train_inputs)
# -
import numpy as np
np.array(dev_label_ids).shape, np.array(dev_preds).shape, np.array(train_preds).shape, np.array(train_label_ids).shape
# %autoreload
train_mcc, train_f1_score, train_df_results, train_label_matches_df = calculate_stats(train_label_ids,train_preds )
dev_mcc, dev_f1_score, dev_df_results, dev_label_matches_df = calculate_stats(dev_label_ids,dev_preds )
all_experiment_results = []
all_experiment_results.append([config, train_loss, dev_loss, train_mcc, train_f1_score,dev_mcc,dev_f1_score,
dev_label_ids, dev_preds,train_label_ids,train_preds ])
all_experiment_results
dev_label_matches_df
# ## Try with BERT Sequential configuration
# ### Just change model from BIOR to BERT Sequence
config.programsettings.MODEL_NAME = "BERT_Sequence"
config.programsettings.DEBUG_PRINT = 0
train_inputs, train_label_ids, train_preds, train_loss, dev_inputs, dev_label_ids, dev_loss, dev_preds = run_model(config, device)
# +
train_mcc, train_f1_score, train_df_results, train_label_matches_df = calculate_stats(train_label_ids,train_preds )
dev_mcc, dev_f1_score, dev_df_results, dev_label_matches_df = calculate_stats(dev_label_ids,dev_preds)
# -
all_experiment_results.append([config, train_loss, dev_loss, train_mcc, train_f1_score,dev_mcc,dev_f1_score,
dev_label_ids, dev_preds,train_label_ids,train_preds ])
all_experiment_results
dev_label_matches_df
# +
all_model_results_pickle_file = config.programsettings.REPORTS_DIR + "multi_model_experiment_results_" + str(datetime.now()).replace(":", "_").replace(".", "_") + ".pkl"
with open(all_model_results_pickle_file, "wb") as f:
pickle.dump(all_experiment_results, f)
# -
print(classification_report(dev_label_ids,dev_preds ))
# Save missed cases.
save_missed_cases_to_file("BERT_sequential_missedcases_" , dev_preds, dev_label_ids, train_inputs)
# # Hyper paramter tuning based on experiments in experiments_batch
# +
# import torch
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# # device = "cpu"
# from experiments_batch import run_all_experiments_save
# run_all_experiments_save(device)
# -
| project_re/Experiments.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Now You Code 3: List of numbers
#
# This example will demonstrate how to parse an input string into
# a list of numbers. You will then demonstrate it's a list of
# numbers by calculating the count, sum(), min(), max() and average
# of the numbers using Python's len(), sum(), min(), and max() functions.
#
# Example Run #1:
# ```
# Enter numbers separated by a space: 10 5 0
#
# Numbers: [10.0, 5.0, 0.0]
# Count: 3
# Min: 0.0
# Max: 10.0
# Sum: 15.0
# Average: 5.0
# ```
#
# As usual, devise your plan, write the program, THEN figure out how to handle bad input in Example run #2.
#
# HINT: Use split() to make a list from the input, use a loop to convert
# each item in the list to a float
#
# Start out your program by writing your TODO list of steps
# you'll need to solve the problem!
# ## Step 1: Problem Analysis
#
# Inputs: a string of numbers separated by a space eg. `10 5 1`
#
# Outputs: the count, sum, min, max and average of the numbers
#
# Algorithm (Steps in Program):
#
# ```
# TODO: Write algorithm here
# ```
#
#
# +
# Step 2: Write solution
numbers = []
number=("list three numbers")
count
# -
# ## Step 3: Questions
#
# 1. Re-write the solution so that it handles bad input (place in a new cell):
#
# Example Run #2: (Handles bad input)
# ```
# Enter numbers separated by a space: 5 mike 3
#
# Error: mike is not a number!
# ```
#
# 2. Explain which part of the solution could be refactored into a function?
#
# Answer:
#
# 3. What would be be the input(s) and output of that function?
#
# Answer:
#
# ## Step 4: Reflection
#
# Reflect upon your experience completing this assignment. This should be a personal narrative, in your own voice, and cite specifics relevant to the activity as to help the grader understand how you arrived at the code you submitted. Things to consider touching upon: Elaborate on the process itself. Did your original problem analysis work as designed? How many iterations did you go through before you arrived at the solution? Where did you struggle along the way and how did you overcome it? What did you learn from completing the assignment? What do you need to work on to get better? What was most valuable and least valuable about this exercise? Do you have any suggestions for improvements?
#
# To make a good reflection, you should journal your thoughts, questions and comments while you complete the exercise.
#
# Keep your response to between 100 and 250 words.
#
# `--== Write Your Reflection Below Here ==--`
#
#
| content/lessons/08/Now-You-Code/NYC3-Numbers.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Notebook for assessing stop/start and dfco impacts
# +
import sys
import os
import numpy as np
import time
import pandas as pd
import matplotlib.pyplot as plt
import importlib
import seaborn as sns
# sns.set(font_scale=2, style='whitegrid')
sns.set()
# %matplotlib inline
# -
# local modules
from fastsim import simdrive, cycle, vehicle
t0 = time.time()
# cyc = cycle.Cycle(cyc_dict=
# cycle.clip_by_times(cycle.Cycle("udds").get_cyc_dict(), 130))
cyc = cycle.Cycle('udds')
cyc_jit = cyc.get_numba_cyc()
print(f"Elapsed time: {time.time() - t0:.3e} s")
t0 = time.time()
vehno = 1
veh0 = vehicle.Vehicle(vehno).get_numba_veh()
print(f"Elapsed time: {time.time() - t0:.3e} s")
t0 = time.time()
# veh1 = vehicle.Vehicle(28).get_numba_veh()
veh1 = vehicle.Vehicle(vehno)
veh1.stopStart = True
veh1.maxMotorKw = 1
veh1.maxEssKw = 5
veh1.maxEssKwh = 1
veh1.set_init_calcs()
veh1.vehKg = veh0.vehKg
veh1 = veh1.get_numba_veh()
print(f"Elapsed time: {time.time() - t0:.3e} s")
t0 = time.time()
sim_drive0 = simdrive.SimDriveJit(cyc_jit, veh0)
sim_drive0.sim_drive()
sim_drive1 = simdrive.SimDriveJit(cyc_jit, veh1)
sim_drive1.sim_drive()
print(f"Elapsed time: {time.time() - t0:.3e} s")
# +
fig, (ax0, ax1) = plt.subplots(2, 1, sharex=True, figsize=(9,5))
ax0.plot(cyc.cycSecs, sim_drive0.fcKwOutAch,
label='base')
ax0.plot(cyc.cycSecs, sim_drive1.fcKwOutAch,
label='stop-start', linestyle='--')
# ax.plot(cyc.cycSecs, dfco_fcKwOutAchPos, label='dfco', linestyle='--', color='blue')
ax0.legend(loc='upper left')
ax0.set_ylabel('Fuel Power [kW]')
ax0.set_ylim([0, 25])
ax2 = ax1.twinx()
ax2.yaxis.label.set_color('red')
ax2.tick_params(axis='y', colors='red')
ax2.plot(cyc.cycSecs, sim_drive1.canPowerAllElectrically,
color='red')
ax2.set_ylabel('SS active')
ax2.set_xlim(ax0.get_xlim())
ax2.grid()
ax1.plot(cyc.cycSecs, cyc.cycMph)
ax1.yaxis.label.set_color('blue')
ax1.tick_params(axis='y', colors='blue')
ax1.set_ylabel('Speed [mph]')
ax1.set_ylim([0, 35])
ax1.set_xlabel('Time [s]')
ax1.set_xlim([0, 130])
plt.savefig('plots/stop-start-power.svg')
# +
fig, (ax0, ax1) = plt.subplots(2, 1, sharex=True, figsize=(9,5))
ax0.plot(cyc.cycSecs, (sim_drive0.fcKwOutAch * cyc.secs).cumsum() / 1e3,
label='base')
ax0.plot(cyc.cycSecs, (sim_drive1.fcKwOutAch * cyc.secs).cumsum() / 1e3,
label='stop-start')
ax0.legend(loc='upper left')
ax0.set_ylabel('Fuel Energy [MJ]')
ax2 = ax1.twinx()
ax2.yaxis.label.set_color('red')
ax2.tick_params(axis='y', colors='red')
ax2.plot(cyc.cycSecs, sim_drive1.canPowerAllElectrically,
color='red', alpha=0.25)
ax2.set_ylabel('SS active')
ax2.set_xlim(ax0.get_xlim())
ax2.set_yticks([0, 1])
ax2.grid()
ax1.plot(cyc.cycSecs, cyc.cycMph)
ax1.yaxis.label.set_color('blue')
ax1.tick_params(axis='y', colors='blue')
ax1.set_ylabel('Speed [mph]')
ax1.set_xlabel('Time [s]')
plt.savefig('plots/stop-start-energy.svg')
diff = ((sim_drive0.fcKwOutAch * cyc.secs).sum() -
(sim_drive1.fcKwOutAch * cyc.secs).sum()) / (
sim_drive0.fcKwOutAch * cyc.secs).sum()
print(f'Stop/start produces a {diff:.2%} reduction in fuel consumption.\n')
# -
| fastsim-2021a/fastsim/docs/stop_start.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.5 64-bit (''base'': conda)'
# name: python3
# ---
#
# +
'''---------------------------------------------IMPORT PACKAGES -----------------------------------------------------'''
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
mpl.rcParams['text.usetex'] = True
import shutil
from windrose import WindroseAxes
import matplotlib.cm as cm
'''----------------------------------------------SET PATHS ---------------------------------------------------- '''
data_path = "C:/Users/tjmor/OneDrive/Research/Projects/Oregon_Carrot/Data/IOP1/SWOPC/" # Data Path
files = 'LEMSBC01.CSV' # Data file name
#LocalsaveFig_path = "../gen/" #Local Save Dir
#Finalsave_path = "C:/Users/tjmor/OneDrive/Research/Projects/Oregon_Carrot/Quick_Looks/LEMS3/7_7_2021/"
Figure_Names = ['Batt_Press_Solar', 'Soil_T_Moist', 'Air_Surface_T_RH','Sonic','Raw_windrose']
# +
'''--------------------------------------------LOAD/PREPARE DATA ----------------------------------------------------'''
df = pd.read_csv(data_path + files) #read in data
df_time = pd.DataFrame(columns=['time']) # creates the blank dataframe with column called time
df_time.time=pd.to_datetime(df['Year'].astype(str)+"-"+df['Month'].astype(str)+"-"+df['Date'].astype(str)+" "+df['Hour'].astype(str)+":"+df['Minute'].astype(str)+":"+df['Second'].astype(str),format='%Y-%m-%d %H:%M:%S') #convert the time to pandas time
# delete the old time columns
del df['Year']
del df['Month']
del df['Date']
del df['Hour']
del df['Minute']
del df['Second']
df = pd.concat([df_time.time, df ], axis=1) # Merge the time series with rest of data
#Set time index and average every 30 mins
df = df.set_index('time')
df_raw = df #set aside raw data just in case
df = df.resample(rule = '1Min').mean()
print(df.index[-1])
# -
# Make Quick Looks of Raw Data
'''----------------------------------------------PLOT SETTINGS ------------------------------------------------------'''
plt.style.use('seaborn-colorblind') # Color Scheme for plots
plt.rcParams["font.family"] = "Times New Roman" # Font style
plt.rcParams.update({'font.size': 12}) #Set font size
# +
'''---------------------------------------------- Figure 1 ------------------------------------------------------'''
#Figure 1 ~ Battery, Pressure, Pressure, and Pressue Temp
fig, (ax1, ax2, ax3, ax4) = plt.subplots(4, sharex=True, figsize=(10, 6))
#fig.suptitle('Aligning x-axis using sharex')
ax1.plot(df.index, df['Bat_Lvl'])
ax1.set(ylabel = 'Batt. Lvl. [V]')
ax1.grid(b=bool, which='major', axis='both')
ax1.autoscale(enable=True, axis='x', tight=True)
ax2.plot(df.index, df['Pressure'])
ax2.set(ylabel = 'Pressure [Pa]')
ax2.grid(b=bool, which='major', axis='both')
ax2.autoscale(enable=True, axis='x', tight=True)
ax3.plot(df.index, df['BMP_Amb'])
ax3.set(ylabel = r'Pressure T [$^{\circ}$C]')
ax3.grid(b=bool, which='major', axis='both')
ax3.autoscale(enable=True, axis='x', tight=True)
ax4.plot(df.index, df['Sunlight'])
ax4.set(ylabel = r'$SW_i$ [W m$^{-2}$]')
ax4.grid(b=bool, which='major', axis='both')
ax4.autoscale(enable=True, axis='x', tight=True)
plt.minorticks_on()
#save figure
plt.savefig('../gen/'+Figure_Names[0] +'.png', bbox_inches='tight')
# +
'''---------------------------------------------- Figure 2 ------------------------------------------------------'''
#Figure 2 ~ Soil Temp and Moist
fig, (ax1, ax2, ax3, ax4) = plt.subplots(4, sharex=True, figsize=(10, 6))
#fig.suptitle('Aligning x-axis using sharex')
ax1.plot(df.index, df['Upper_Soil_Temp'])
ax1.set(ylabel = r'Up Soil T [$^{\circ}$C]')
ax1.grid(b=bool, which='major', axis='both')
#ax1.autoscale(enable=True, axis='x', tight=True)
ax2.plot(df.index, df['Upper_Soil_Mois'])
ax2.set(ylabel = 'Up Moist. []')
ax2.grid(b=bool, which='major', axis='both')
ax2.autoscale(enable=True, axis='x', tight=True)
ax3.plot(df.index, df['Lower_Soil_Temp'])
ax3.set(ylabel = r'Low Soil T [$^{\circ}$C]')
ax3.grid(b=bool, which='major', axis='both')
#ax3.autoscale(enable=True, axis='x', tight=True)
ax4.plot(df.index, df['Lower_Soil_Mois'])
ax4.set(ylabel = 'Low Moist. []')
ax4.grid(b=bool, which='major', axis='both')
ax4.autoscale(enable=True, axis='x', tight=True)
plt.minorticks_on()
#save figure
plt.savefig('../gen/' + Figure_Names[1] +'.png', bbox_inches='tight')
# +
'''---------------------------------------------- Figure 3 ------------------------------------------------------'''
#Figure 3 ~ Air Temp and RH
fig, (ax1, ax2, ax3, ax4) = plt.subplots(4, sharex=True, figsize=(10, 6))
#fig.suptitle('Aligning x-axis using sharex')
ax1.plot(df.index, df['SHT_Amb_C'])
ax1.set(ylabel = r'Air T [$^{\circ}$C]')
ax1.grid(b=bool, which='major', axis='both')
ax1.autoscale(enable=True, axis='x', tight=True)
ax2.plot(df.index, df['SHT_Hum_Pct'])
ax2.set(ylabel = r'Air RH [\%]')
ax2.grid(b=bool, which='major', axis='both')
ax2.autoscale(enable=True, axis='x', tight=True)
ax3.plot(df.index, df['MLX_IR_C'])
ax3.set(ylabel = r'MLX $T_s$ [$^{\circ}$C]')
ax3.grid(b=bool, which='major', axis='both')
ax3.autoscale(enable=True, axis='x', tight=True)
ax4.plot(df.index, df['MLX_Amb_C'])
ax4.set(ylabel = r'MLX $T_A$ [$^{\circ}$C]')
ax4.grid(b=bool, which='major', axis='both')
ax4.autoscale(enable=True, axis='x', tight=True)
plt.minorticks_on()
#save figure
plt.savefig('../gen/'+Figure_Names[2] +'.png', bbox_inches='tight')
# +
'''---------------------------------------------- Figure 4 ------------------------------------------------------'''
#Figure 4 ~ Sonic Values
fig, (ax1, ax2, ax3, ax4) = plt.subplots(4, sharex=True, figsize=(10, 6))
#fig.suptitle('Aligning x-axis using sharex')
ax1.plot(df.index, df['Sonic_Dir'])
ax1.set(ylabel = r'Son.Dir [$^{\circ}$]')
ax1.grid(b=bool, which='major', axis='both')
ax1.autoscale(enable=True, axis='x', tight=True)
ax2.plot(df.Sonic_Spd[df.Sonic_Spd > 0 ])
ax2.set(ylabel = r'Son.Spd [m s$^{-1}$]')
ax2.grid(b=bool, which='major', axis='both')
ax2.autoscale(enable=True, axis='x', tight=True)
ax3.plot(df.index, df['Sonic_Gst'])
ax3.set(ylabel = r'Son.Gst. [m s$^{-1}$]')
ax3.grid(b=bool, which='major', axis='both')
ax3.autoscale(enable=True, axis='x', tight=True)
ax4.plot(df.index, df['Sonic_Tmp'])
ax4.set(ylabel = r'Son.T [$^{\circ}$C]')
ax4.grid(b=bool, which='major', axis='both')
ax4.autoscale(enable=True, axis='x', tight=True)
plt.minorticks_on()
#save figure
plt.savefig('../gen/'+Figure_Names[3] +'.png', bbox_inches='tight')
# +
#Calculate the proper average wind direction
V_east = mean(df_raw.Sonic_Spd * sin(df_raw.Sonic_Dir * pi/180))
V_north = mean(df_raw.Sonic_Spd * cos(df_raw.Sonic_Dir * pi/180))
mean_WD = arctan2(V_east, V_north) * 180/pi
mean_WD = (360 + mean_WD) % 360
# +
'''---------------------------------------------- Figure 5 ------------------------------------------------------'''
#Wind rose figure
# Create wind speed and direction variables
#ws = np.random.random(500) * 6
#wd = np.random.random(500) * 360
ax = WindroseAxes.from_ax()
ax.contourf(df_raw.Sonic_Dir,df_raw.Sonic_Spd, bins=np.arange(0, 8, 1), cmap=cm.hot)
ax.contour(df_raw.Sonic_Dir,df_raw.Sonic_Spd, bins=np.arange(0, 8, 1), colors='black')
ax.set_legend()
plt.savefig('../gen/'+Figure_Names[4] +'.png', bbox_inches='tight')
'''ax = WindroseAxes.from_ax()
ax.bar(df['Sonic_Dir'], df['Sonic_Spd'], normed=True, opening=0.8, edgecolor='white', mean_values = 'True')
ax.set_legend()'''
# -
# Move all the figures to a final destination ~ Still not working
'''for ii in Figure_Names:
shutil.move("../gen/"+Figure_Names[ii]+".png", "C:/Users/tjmor/OneDrive/Research/Projects/Oregon_Carrot/Quick_Looks/LEMS2/7_7_2021/" + Figure_Names[ii] + ".png")'''
| LEMS/src/LEMs_QuickLook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
import os
from glob import glob
from IPython.display import display
import numpy as np
import pandas as pd
from tqdm import tqdm
from common.utils import median_ensemble
# +
from common.summary_utils import EnsembleStatistics
from common.metrics import smape_1, smape_2, nd, mape
# FRED
from resources.fred.dataset import FredDataset, FredMeta
from resources.fred.evaluator import FredEvaluator
from common.timeseries import TimeseriesBundle
class FredStatistics:
def __init__(self, **args):
self.training, self.target = FredDataset(FredMeta.dataset_path).standard_split()
self.evaluator = FredEvaluator(self.target, **args)
def evaluate(self, predictions):
return self.evaluator.evaluate(self.training.future_values([p[~np.isnan(p)] for p in predictions]))
# M4
from resources.m4.dataset import M4Dataset, M4Meta
from resources.m4.evaluator import M4Evaluator
class M4Statistics:
def __init__(self, **args):
self.training, self.target = M4Dataset(M4Meta.dataset_path).standard_split()
self.evaluator = M4Evaluator(self.target, **args)
def evaluate(self, predictions):
return self.evaluator.evaluate(self.training.future_values([p[~np.isnan(p)] for p in predictions]))
# M3
from resources.m3.dataset import M3Dataset, M3Meta
from resources.m3.evaluator import M3Evaluator
class M3Statistics:
def __init__(self, **args):
self.training, self.target = M3Dataset(M3Meta.dataset_path).standard_split()
self.evaluator = M3Evaluator(self.target, **args)
def evaluate(self, predictions):
return self.evaluator.evaluate(self.training.future_values([p[~np.isnan(p)] for p in predictions]))
# TOURISM
from resources.tourism.dataset import TourismDataset, TourismMeta
from resources.tourism.evaluator import TourismEvaluator
class TourismStatistics:
def __init__(self, **args):
self.training, self.target = TourismDataset(TourismMeta.dataset_path).standard_split()
self.evaluator = TourismEvaluator(self.target, **args)
def evaluate(self, predictions):
return self.evaluator.evaluate(self.training.future_values([p[~np.isnan(p)] for p in predictions]))
# ELECTRICITY
from resources.electricity.dataset import ElectricityDataset, ElectricityMeta
from resources.electricity.evaluator import ElectricityEvaluator
class ElectricityStatisticsDeepAR:
def __init__(self, **args):
self.training, self.target = ElectricityDataset(ElectricityMeta.dataset_path).load_cache().split(
lambda ts: ts.split_by_time(ElectricityMeta.deepar_split))
self.target, _ = self.target.split(lambda ts: ts.split(24 * 7))
self.evaluator = ElectricityEvaluator(self.target, precision=5, **args)
def evaluate(self, predictions):
return self.evaluator.evaluate(self.training.future_values([p[~np.isnan(p)] for p in predictions]))
class ElectricityStatisticsDeepFact:
def __init__(self, **args):
self.training, self.target = ElectricityDataset(ElectricityMeta.dataset_path).load_cache().split(
lambda ts: ts.split_by_time(ElectricityMeta.deepfact_split))
self.target, _ = self.target.split(lambda ts: ts.split(24 * 7))
self.evaluator = ElectricityEvaluator(self.target, precision=5, **args)
def evaluate(self, predictions):
return self.evaluator.evaluate(self.training.future_values([p[~np.isnan(p)] for p in predictions]))
class ElectricityStatisticsLastWindow:
def __init__(self, **args):
self.training, self.target = ElectricityDataset(ElectricityMeta.dataset_path).standard_split()
self.evaluator = ElectricityEvaluator(self.target, precision=5, **args)
def evaluate(self, predictions):
return self.evaluator.evaluate(self.training.future_values([p[~np.isnan(p)] for p in predictions]))
# TRAFFIC
from resources.traffic.dataset import TrafficDataset, TrafficMeta
from resources.traffic.evaluator import TrafficEvaluator
class TrafficStatisticsDeepAR:
def __init__(self, **args):
self.training, self.target = TrafficDataset(TrafficMeta.dataset_path).load_cache().split(
lambda ts: ts.split_by_time(TrafficMeta.deepar_split))
self.target, _ = self.target.split(lambda ts: ts.split(24 * 7))
self.evaluator = TrafficEvaluator(self.target, precision=5, **args)
def evaluate(self, predictions):
return self.evaluator.evaluate(self.training.future_values([p[~np.isnan(p)] for p in predictions]))
class TrafficStatisticsDeepFact:
def __init__(self, **args):
self.training, self.target = TrafficDataset(TrafficMeta.dataset_path).load_cache().split(
lambda ts: ts.split_by_time(TrafficMeta.deepfact_split))
self.target, _ = self.target.split(lambda ts: ts.split(24 * 7))
self.evaluator = TrafficEvaluator(self.target, precision=5, **args)
def evaluate(self, predictions):
return self.evaluator.evaluate(self.training.future_values([p[~np.isnan(p)] for p in predictions]))
class TrafficStatisticsLastWindow:
def __init__(self, **args):
self.training, self.target = TrafficDataset(TrafficMeta.dataset_path).standard_split()
self.evaluator = TrafficEvaluator(self.target, precision=5, **args)
def evaluate(self, predictions):
return self.evaluator.evaluate(self.training.future_values([p[~np.isnan(p)] for p in predictions]))
def collect_statistics(filter_path, evaluator):
statistics = EnsembleStatistics(filter_path=filter_path, evaluator=evaluator)
bootstrap = statistics.bootstrap(ensemble_keys=['repeats', 'lookback_period', 'loss_name'],
bootstrap_key='repeats',
bootstrap_size=5,
number_of_samples=1)
return bootstrap
#######################################
def assemble_results(experiment_path):
# fred_bootstrap = collect_statistics(f'{experiment_path}/fred.csv', FredStatistics())
# m4_bootstrap = collect_statistics(f'{experiment_path}/M4.csv', M4Statistics())
# m3_bootstrap = collect_statistics(f'{experiment_path}/M3.csv', M3Statistics())
# tourism_bootstrap = collect_statistics(f'{experiment_path}/tourism.csv', TourismStatistics())
electricity_deepar_bootstrap = collect_statistics(f'{experiment_path}/electricity_deepar.csv', ElectricityStatisticsDeepAR())
electricity_deepfact_bootstrap = collect_statistics(f'{experiment_path}/electricity_deepfactors.csv', ElectricityStatisticsDeepFact())
electricity_lw_bootstrap = collect_statistics(f'{experiment_path}/electricity_last_window.csv', ElectricityStatisticsLastWindow())
traffic_deepar_bootstrap = collect_statistics(f'{experiment_path}/traffic_deepar.csv', TrafficStatisticsDeepAR())
traffic_deepfact_bootstrap = collect_statistics(f'{experiment_path}/traffic_deepfactors.csv', TrafficStatisticsDeepFact())
traffic_lw_bootstrap = collect_statistics(f'{experiment_path}/traffic_last_window.csv', TrafficStatisticsLastWindow())
result = {
# 'fred': fred_bootstrap.groupby('evaluation_key').mean()[['metric']].transpose()[['Yearly', 'Quarterly', 'Monthly', 'Weekly', 'Daily', 'Average']],
# 'm4': m4_bootstrap.groupby('evaluation_key').mean()[['metric']].transpose()[['Yearly', 'Quarterly', 'Monthly', 'Others', 'Average']],
# 'm3': m3_bootstrap.groupby('evaluation_key').mean()[['metric']].transpose()[['M3Year', 'M3Quart', 'M3Month', 'M3Other', 'Average']],
# 'tourism': tourism_bootstrap.groupby('evaluation_key').mean()[['metric']].transpose()[['Yearly', 'Quarterly', 'Monthly', 'Average']],
'electricity_deepar': electricity_deepar_bootstrap.groupby('evaluation_key').mean()[['metric']].transpose(),
'electricity_deepfact': electricity_deepfact_bootstrap.groupby('evaluation_key').mean()[['metric']].transpose(),
'electricity_lw': electricity_lw_bootstrap.groupby('evaluation_key').mean()[['metric']].transpose(),
'traffic_deepar': traffic_deepar_bootstrap.groupby('evaluation_key').mean()[['metric']].transpose(),
'traffic_deepfact': traffic_deepfact_bootstrap.groupby('evaluation_key').mean()[['metric']].transpose(),
'traffic_lw': traffic_lw_bootstrap.groupby('evaluation_key').mean()[['metric']].transpose()
}
for k, v in result.items():
print(k)
display(v)
# -
assemble_results('/project/experiments/nbeats_meta/shared/*source_dataset=M4')
assemble_results('/project/experiments/nbeats_meta/not_shared/*source_dataset=M4')
assemble_results('/project/experiments/nbeats_meta/shared/*source_dataset=FRED')
assemble_results('/project/experiments/nbeats_meta/not_shared/*source_dataset=FRED')
assemble_results('/project/experiments/nbeats_meta/shared/*source_dataset=M4')
assemble_results('/project/experiments/nbeats_meta/not_shared/*source_dataset=M4')
assemble_results('/project/experiments/nbeats_meta/shared/*source_dataset=FRED')
assemble_results('/project/experiments/nbeats_meta/not_shared/*source_dataset=FRED')
| experiments/tl/MetaPerformance.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import cv2
import numpy as np
def brightcontrast(image,value):
brightimg=np.array(image[:,:]*value)
brightimg[brightimg > 255] = 255
brightimg = brightimg.astype('uint8')
return brightimg
image=cv2.imread("/home/paa/COVID-19/Dataset/Normal/1.jpeg",0)
image=cv2.resize(image,(200,200))
bimg=brightcontrast(image,1.1)
cv2.imshow("bimg",bimg)
bbimg=brightcontrast(image,1.2)
cv2.imshow("bbimg",bbimg)
cimg=brightcontrast(image,0.9)
cv2.imshow("cimg",cimg)
ccimg=brightcontrast(image,0.8)
cv2.imshow("ccimg",ccimg)
cv2.waitKey(0)
cv2.destroyAllWindows()
j=1
for i in range(1,201):
image=cv2.imread("/home/paa/COVID-19/Raw Dataset/Normal/"+str(i)+".jpeg",0)
image=cv2.resize(image,(500,500))
cv2.imwrite("/home/paa/COVID-19/Positive1/"+str(j)+".jpeg",image)
j+=1
bimg=brightcontrast(image,1.1)
cv2.imwrite("/home/paa/COVID-19/Positive1/"+str(j)+".jpeg",bimg)
j+=1
bbimg=brightcontrast(image,1.2)
cv2.imwrite("/home/paa/COVID-19/Positive1/"+str(j)+".jpeg",bbimg)
j+=1
cimg=brightcontrast(image,0.9)
cv2.imwrite("/home/paa/COVID-19/Positive1/"+str(j)+".jpeg",cimg)
j+=1
ccimg=brightcontrast(image,0.8)
cv2.imwrite("/home/paa/COVID-19/Positive1/"+str(j)+".jpeg",ccimg)
j+=1
print(i)
| Data Augmentation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# %matplotlib inline
import numpy as np
import pandas as pd
import glob
# ## 绘制基站分布图
# 基站信息文件base.csv,注意文件的编码格式cp936。
base=pd.read_csv('base.csv', encoding='CP936')
base.head()
print base.dtypes
# 考虑到定位不需要三维定位,Z坐标直接舍弃。
#TODO 从模型改进的角度,有没有可能借助Z坐标,进一步提升模型
data=base.loc[:, [u'序号', u'MAC地址', u'X坐标', u'Y坐标']]
# +
import matplotlib.pyplot as plt
x = base[u'X坐标'].tolist()
y = base[u'Y坐标'].tolist()
labels = base[u'序号'].tolist()
macs = base[u'MAC地址'].tolist()
fig, ax = plt.subplots()
ax.scatter(y, x)
for i, txt in enumerate(labels):
ax.annotate(txt, (y[i], x[i]))
# -
# ## 建立测距模型
# ### 1、根据MAC地址查找基站信息
b0mac = 'C4:BE:84:D5:3E:07'
b0idx = base.index[base[u'MAC地址'] == b0mac].tolist()
b0 = base.iloc[b0idx]
b0
# ### 2、测距模型单站观测序列分析
# 首先,将Unix时间戳转换为标准时间。
import datetime
unix_ts = 1439111214.0
time = datetime.datetime.fromtimestamp(unix_ts) # 毫秒的情况下,需要除以1000
print time
# +
# 时间序列的影响大不大???
f = open(u'建立测距模型所需数据/0.6.txt', 'r')
rssi_all = [] # 存储蓝牙信号强度
t_all = [] # 存储观测的时间序列,从0开始,单位为s
t_min = 0
for line in f.readlines():
t, _, mac, rssi = line.split()
t = datetime.datetime.fromtimestamp(long(t)/1000.0)
if len(t_all)==0:
t_min = t
tmp = t-t_min
t_all.append(tmp.total_seconds())
else:
tmp = t-t_min
t_all.append(tmp.total_seconds())
rssi_all.append(int(rssi))
f.close()
rssi_all = np.array(rssi_all)
plt.hist(rssi_all, bins=10)
plt.xlabel('RSSI Bins')
plt.ylabel('Number')
# -
print('RSSI Mean: ', rssi_all.mean(), 'RSSI STD: ', rssi_all.std())
plt.plot(range(len(rssi_all)), rssi_all)
plt.xlabel('time(s)')
plt.ylabel('rssi(-dBm)')
# ### 2.1 单站观测数据滤波
# 提高数据观测质量,改进模型
# https://www.kalmanfilter.net/default.aspx
#
# 
# 
T = np.array(0.5)
print T.transpose()
# +
class Kalman:
def __init__(self, Q, R, start):
self.A = 1 # transition matrix 转移矩阵?
self.H = 1 # 从预测值到测量值的转换矩阵?
self.P = 10 # 预测的不确定性,协方差矩阵
self.Q = Q # 预测的外部无法估计的噪声协方差矩阵,将被叠加到P上
self.R = R # 测量or传感器不确定性,(系统搭建好以后,通过测量统计实验获得)
self.startValue = start
def KalmanFilter(self, value):
# 预测下一时刻的值
predictValue = self.A*self.startValue # state extrapolation eq
# 求预测下一时刻的协方差
self.P = self.A*self.P*self.A+self.Q # covariance extrapolation eq
# 计算Kalman增益
kalmanGain = self.P*self.H/(self.H*self.P*self.H+self.R) # kalman gain eq
# 修正结果,计算滤波值
self.startValue = predictValue+kalmanGain*(value-predictValue) # state update eq
# 更正后验估计
self.P = (1-kalmanGain*self.H)*self.P # covariance update eq
return self.startValue
# Q = 1 # 通过测距模型观测数据,进行估计
# R = 300 # 观测噪声协方差
def kf(inputs, Q=0.1, R=9):
outputs = []
start = inputs[0]
outputs.append(start)
kalman1 = Kalman(Q, R, start)
for value in inputs[1:]:
outputs.append(kalman1.KalmanFilter(value))
return np.array(outputs)
# +
inputs = rssi_all
outputs = []
start = inputs[0]
outputs.append(start)
Q = 0.1 # 预测过程中外部噪声的方差
R = 9 # 测量的方差
kalman1 = Kalman(Q, R, start)
for value in inputs[1:]:
outputs.append(kalman1.KalmanFilter(value))
plt.plot(range(len(inputs)), inputs)
plt.plot(range(len(outputs)), outputs, color='r')
plt.xlabel('time(s)')
plt.ylabel('rssi(-dBm)')
print('Input: RSSI Mean: {0}. RSSI STD: {1}'.format( inputs.mean(), inputs.std()))
print('Output: RSSI Mean: {0}. RSSI STD: {1}'.format( np.mean(outputs), np.std(outputs)))
# -
# ### 多站序列测距模型数据分析
import os
flist = glob.glob(u'建立测距模型所需数据/*.txt')
# +
x = []
y = []
y_kf = []
yerr = []
yerr_kf = []
for fp in flist:
#print(float(os.path.basename(f)[:-4]))
x.append(float(os.path.basename(fp)[:-4]))
f = open(fp, 'r')
rssi_all = []
for line in f.readlines():
t, _, mac, rssi = line.split()
t = datetime.datetime.fromtimestamp(long(t)/1000.0)
#print(t.strftime('%Y-%m-%d %H:%M:%S'), rssi)
rssi_all.append(int(rssi))
f.close()
rssi_all = np.array(rssi_all)
# kalman滤波
rssi_all_kf = kf(rssi_all)
y.append(rssi_all.mean())
yerr.append(rssi_all.std())
y_kf.append(rssi_all_kf.mean())
yerr_kf.append(rssi_all_kf.std())
# -
fig = plt.figure()
plt.errorbar(x, y, yerr=yerr, fmt='o',ecolor='r',color='r', uplims=True, lolims=True)
plt.errorbar(x, y_kf, yerr=yerr_kf, fmt='o',ecolor='b',color='b', uplims=True, lolims=True)
plt.xlabel("Distance(m)")
plt.ylabel("RSSI(dbm)")
# ### 测距模型介绍
#
# 测距模型论文推荐了第三种。
# 针对本次比赛的情况,分析最适合的模型。
#
# 可以舍弃一部分远距离的
#
# 拟合的技术流程
# http://phy224.ca/19-curvefit/index.html
#
# rssi 平滑
# https://www.wouterbulten.nl/blog/tech/kalman-filters-explained-removing-noise-from-rssi-signals/
# 
# +
from scipy.optimize import curve_fit
iteration=0
def nonlinear_function(t, a, b, c):
global iteration
print (iteration, "a=",a, "b=",b, "c=",c)
iteration = iteration+1
return a*t**(b-1) + c
#generated "good" data
t = np.arange(10)
y1 = np.array([-0.173, 2.12, 9.42, 19.69, 37.16, 59.40, 96.59, 119.448, 158.0,201.9])
sigmaNL = np.ones(10)*0.5
iteration=0
poptNL1, pcovNL1 = curve_fit(nonlinear_function, t, y1,
absolute_sigma=True, sigma = sigmaNL)
plt.style.use("seaborn-whitegrid")
plt.errorbar(t, y1, yerr=sigmaNL, marker='o', linestyle='none')
plt.plot(t, nonlinear_function(t, poptNL1[0], poptNL1[1], poptNL1[2]))
plt.xlabel("Time")
plt.ylabel("Value")
# +
iteration=0
# RSSI = 10nlog(d)+A
def rssi(distance, n, A):
global iteration
print (iteration, "n=", n, "A=", A)
iteration = iteration+1
return 10*n*np.log(distance)+A
poptNL1, pcovNL1 = curve_fit(rssi, x, y_kf, absolute_sigma=False, sigma = yerr_kf)
plt.style.use("seaborn-whitegrid")
plt.errorbar(x, y_kf, yerr=yerr_kf, marker='o', linestyle='none')
x1 = x[:]
x1.sort()
plt.plot(x1, rssi(x1, poptNL1[0], poptNL1[1]))
plt.xlabel("Distance")
plt.ylabel("RSSI")
# + [markdown] slideshow={"slide_type": "-"}
# #### 使用Reduced chi squared衡量模型拟合结果
# +
def chi2(y_measure,y_predict,errors):
"""Calculate the chi squared value given a measurement with errors and prediction"""
return np.sum( (y_measure - y_predict)**2 / errors**2 )
def chi2reduced(y_measure, y_predict, errors, number_of_parameters):
"""Calculate the reduced chi squared value given a measurement with errors and prediction,
and knowing the number of parameters in the model."""
return chi2(y_measure, y_predict, errors)/(y_measure.size - number_of_parameters)
print(u"拟合模型 chi2r= {0}".format(chi2reduced(np.array(y),
rssi(np.array(x1), poptNL1[0], poptNL1[1]),
np.array(yerr),
2)))
# -
# #### 测距模型拟合结果
print(u'测距模型:RSSI = 10*%0.5f*log(d) + %0.5f'%(poptNL1[0], poptNL1[1]))
# ### 基于测距模型的定位方法
# #### 测距模型的逆函数
# +
from pynverse import inversefunc
RSSI101 = (lambda x: 10*poptNL1[0]*np.log(x)+poptNL1[1])
x_value = inversefunc(RSSI101, y_values=-55)
print("RSSI -55 -> Disatance: ", x_value)
# 由rssi计算distance
def rssi2distance(rssi, model=RSSI101):
x_value = inversefunc(RSSI101, y_values=rssi)
return x_value
print("RSSI -55 -> Disatance: ", rssi2distance(-55))
# -
# #### 通过MAC地址查找序号
# 通过MAC地址查找序号
def find_by_mac(mac, mac_db=base):
idx = mac_db.index[mac_db[u'MAC地址'] == mac].tolist()
record = mac_db.iloc[idx]
name = str(record[u'序号'].values[0])
x = float(record[u'X坐标'].values[0])
y = float(record[u'Y坐标'].values[0])
z = float(record[u'Z坐标'].values[0])
return name, x, y, z
# +
mac = 'A0:E6:F8:2D:1F:E8'
idx = base.index[base[u'MAC地址'] == mac].tolist()
record = base.iloc[idx]
name = str(record[u'序号'].values[0])
x = float(record[u'X坐标'].values[0])
y = float(record[u'Y坐标'].values[0])
z = float(record[u'Z坐标'].values[0])
print name, x, y, z
name, x, y, z = find_by_mac(mac, base)
print name, x, y, z
# -
# ### 单一点的位置解算
# +
f = open(u'静态定位点/1号点.txt', 'r')
src = {}
address = {}
mac_db = str(base[u'MAC地址'])
for line in f.readlines():
t, _, mac, rssi = line.split()
t = datetime.datetime.fromtimestamp(long(t)/1000.0)
mac = mac.strip()
if mac in mac_db:
name, x, y, z = find_by_mac(mac, base)
if not src.has_key(name):
src[name] = []
address[name] = [mac, (x, y, z)]
else:
src[name].append(int(rssi))
else:
pass
# print"Bad MAC:"+mac
f.close()
# -
for k, v in src.items():
# print k,v
print(k, len(v), np.mean(v), np.std(v))
# ### 观测值的卡尔曼滤波
# +
inputs = src['B8']
outputs = kf(inputs)
plt.plot(range(len(inputs)), inputs)
plt.plot(range(len(outputs)), outputs, color='r')
plt.xlabel('time(s)')
plt.ylabel('rssi(-dBm)')
print np.mean(inputs), np.std(inputs)
print np.mean(outputs), np.std(outputs)
# -
# #### 将原始观测数据进行滤波
src_out = {}
for k, v in src.items():
src_out[k] = kf(v)
# #### 可以采用最小二乘法,解算待定位点位坐标
#
# mlat技术
# https://en.wikipedia.org/wiki/Multilateration
#
# python mlat库
# https://github.com/gsongsong/mlat/tree/master/python
from mlat import MLAT
for k, v in address.items():
src_ranges = [rssi2distance(r) for r in src_out[k]]
rng = np.mean(src_ranges)
err = np.std(src_ranges)
if len(v)<5:
v.append(rng)
v.append(err)
v.append(len(src_ranges))
else:
v[2] = rng
v[3] = err
v[4] = len(src_ranges)
# 按照std进行过滤,把观测误差较大的基站移除
# 使用std以及距离,进行双重过滤
alpha = 1
beta = 15
num = 100
address_used = []
for k, v in address.items():
if v[3]<=alpha and v[2]<= beta and v[4]>=num:
address_used.append(list(v[1])+[v[2]]+[v[3]])
address_used = np.array(address_used)
address_used
anchors = address_used[:,0:3]
print anchors
ranges = address_used[:, 3]
print ranges
errors = address_used[:, 4]
print errors # 如何将观测值的误差纳入模型之中
# +
bounds = np.zeros((2, anchors.shape[1]))
for i in range(anchors.shape[1]):
bounds[0, i] = min(anchors[:, i]) # minimum boundary of ith axis
bounds[1, i] = max(anchors[:, i]) # maximum boundary of ith axis
# 高程不需要定位,全等于2
bounds[0, -1] = 2
bounds[1, -1] = 2
estimator, result = MLAT.mlat(anchors, ranges, bounds_in=bounds, n_trial=500, alpha=0.001)
print('Anchors', anchors)
print('Ranges with error:', ranges)
print('Estimator', estimator)
#print('Full result')
#print(result)
# +
x = base[u'X坐标'].tolist()
y = base[u'Y坐标'].tolist()
labels = base[u'序号'].tolist()
macs = base[u'MAC地址'].tolist()
fig, ax = plt.subplots()
ax.scatter(y, x)
for i, txt in enumerate(labels):
ax.annotate(txt, (y[i], x[i]))
ax.scatter(estimator[1], estimator[0], c = 'r',marker = 'p')
ax.annotate('1', (estimator[1], estimator[0]))
# -
estimator
# ### 全部点位定位结果
flist = glob.glob(u'静态定位点/*.txt')
def locate_by_ble(fname, base=base):
f = open(fname, 'r')
src = {}
address = {}
mac_db = str(base[u'MAC地址'])
for line in f.readlines():
t, _, mac, rssi = line.split()
mac = mac.strip()
if mac in mac_db:
name, x, y, z = find_by_mac(mac, base)
if not src.has_key(name):
src[name] = []
address[name] = [mac, (x, y, z)]
else:
src[name].append(int(rssi))
f.close()
# 增加卡尔曼滤波
src_out = {}
for k, v in src.items():
if len(v)>0:
src_out[k] = kf(v)
else:
src_out[k] = v
for k, v in address.items():
src_ranges = [rssi2distance(r) for r in src_out[k]]
rng = np.mean(src_ranges)
err = np.std(src_ranges)
if len(v)<4:
v.append(rng)
v.append(err)
else:
v[2] = rng
v[3] = err
# 按照std进行过滤,把观测误差较大的基站移除
alpha = 3
beta = 10
address_used = []
for k, v in address.items():
if v[2] <=beta and v[3]<=alpha and len(src_out[k])>= 100: #距离要近,方差要小,观测数据要多
address_used.append(list(v[1])+[v[2]]+[v[3]])
address_used = np.array(address_used)
anchors = address_used[:, 0:3]
ranges = address_used[:, 3]
errors = address_used[:, 4]
bounds = np.zeros((2, anchors.shape[1]))
for i in range(anchors.shape[1]):
bounds[0, i] = min(anchors[:, i]) # minimum boundary of ith axis
bounds[1, i] = max(anchors[:, i]) # maximum boundary of ith axis
# 高程不需要定位,全等于2
bounds[0, -1] = 2
bounds[1, -1] = 2
estimator, result = MLAT.mlat(anchors, ranges, bounds_in=bounds, n_trial=500, alpha=0.001)
return estimator, anchors, ranges, errors
xyz,_,_,_ = locate_by_ble(u'静态定位点/1号点.txt', base)
xyz
result = []
for f in flist:
txt = os.path.basename(f)[0:-4]
print txt
xyz,_,_,_ = locate_by_ble(f, base)
x, y, z = xyz
result.append([txt, y, x])
# +
x = base[u'X坐标'].tolist()
y = base[u'Y坐标'].tolist()
labels = base[u'序号'].tolist()
macs = base[u'MAC地址'].tolist()
fig, ax = plt.subplots()
#ax.axis('equal')
ax.scatter(y, x)
for i, txt in enumerate(labels):
ax.annotate(txt, (y[i], x[i]))
print ','.join([txt, str(y[i]), str(x[i])])
for i in result:
ax.scatter(i[1], i[2], c = 'r',marker = 'p')
ax.annotate(i[0][0:-2], (i[1], i[2]))
print ','.join([i[0][0:-2], str(i[1]), str(i[2])])
| main.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Clasificación
# + [markdown] slideshow={"slide_type": "slide"}
# Los modelos de regresión asumen que la variable de respuesta es cuantitativa, sin embargo, en muchas situaciones esta variable es cualitativa/categórica, por ejemplo el color de ojos. La idea de predecir variables categóricas es usualmente nombrada como _Clasificación_. Muchos de los problemas en los que se enfoca el Machine Learning están dentro de esta categoría, por lo mismo existen una serie de algoritmos y modelos con tal de obtener los mejores resultados. En esta clase introduciremos el algoritmo de clasificación más sencillo: _Regresión Logística_.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Motivación
# -
# Para comprender mejor los algoritmos de clasificación se comenzará con un ejemplo.
#
# **Space Shuttle Challege**
#
# 28 Junio 1986. A pesar de existir evidencia de funcionamiento defectuoso, se da luz verde al lanzamiento.
#
# 
#
# A los 73 segundos de vuelo, el transbordador espacial explota, matando a los 7 pasajeros.
#
# 
# Como parte del debriefing del accidente, se obtuvieron los siguientes datos
# %pycat ../data/Challenger.txt
# Es posible graficarlos para tener una idea general de estos.
# +
import numpy as np
import pandas as pd
import altair as alt
import matplotlib.pyplot as plt
from pathlib import Path
alt.themes.enable('opaque') # Para quienes utilizan temas oscuros en Jupyter Lab/Notebook
# %matplotlib inline
# -
filepath = Path().resolve().parent / "data" / "Challenger.txt"
challenger = pd.DataFrame(
np.loadtxt(filepath, skiprows=1).astype(np.int),
columns=["temp_f", "nm_bad_rings"]
)
challenger.head()
alt.Chart(challenger).mark_circle(size=100).encode(
x=alt.X("temp_f", scale=alt.Scale(zero=False), title="Temperature [F]"),
y=alt.Y("nm_bad_rings", title="# Bad Rings")
).properties(
title="Cantidad de fallas vs temperatura en lanzamiento de Challenger",
width=600,
height=400
)
# + [markdown] slideshow={"slide_type": "slide"}
# Nos gustaría saber en qué condiciones se produce accidente. No nos importa el número de fallas, sólo si existe falla o no.
# -
# Un poco de procesamiento de datos
challenger = challenger.assign(
temp_c=lambda x: ((x["temp_f"] - 32.) / 1.8).round(2),
is_failure=lambda x: x["nm_bad_rings"].ne(0).astype(np.int),
ds_failure=lambda x: x["is_failure"].map({1: "Falla", 0:"Éxito"})
)
challenger.head()
# +
failure_chart = alt.Chart(challenger).mark_circle(size=100).encode(
x=alt.X("temp_c:Q", scale=alt.Scale(zero=False), title="Temperature [C]"),
y=alt.Y("is_failure:Q", scale=alt.Scale(padding=0.5), title="Success/Failure"),
color="ds_failure:N"
).properties(
title="Exito o Falla en lanzamiento de Challenger",
width=600,
height=400
)
failure_chart
# + [markdown] slideshow={"slide_type": "slide"}
# ## Regresión Logística
# -
# Recordemos que la **Regresión Lineal** considera un modelo de la siguiente forma:
#
# $$ Y \approx X \theta $$
#
# donde
# $$
# Y = \begin{bmatrix}y^{(1)} \\ y^{(2)} \\ \vdots \\ y^{(m)}\end{bmatrix}
# \qquad , \qquad
# X = \begin{bmatrix}
# 1 & x^{(1)}_1 & \dots & x^{(1)}_n \\
# 1 & x^{(2)}_1 & \dots & x^{(2)}_n \\
# \vdots & \vdots & & \vdots \\
# 1 & x^{(m)}_1 & \dots & x^{(m)}_n
# \end{bmatrix}
# \qquad y \qquad
# \theta = \begin{bmatrix}\theta_1 \\ \theta_2 \\ \vdots \\ \theta_m\end{bmatrix}
# $$
#
# y que se entrenar una función lineal
#
# $$h_{\theta}(x) = \theta_0 + \theta_1 x_1 + ... + \theta_n x_n$$
#
# deforma que se minimice
#
# $$J(\theta) = \frac{1}{2} \sum_{i=1}^{m} \left( h_{\theta}(x^{(i)}) - y^{(i)}\right)^2$$
#
#
# La **Regresión Logística** busca entrenar la función
#
# $$h_{\theta}(x) = \frac{1}{1 + e^{-(\theta_0 + \theta_1 x_1 + ... + \theta_n x_n)}}$$
#
# de forma que se minimice
#
# $$J(\theta) = \frac{1}{2} \sum_{i=1}^{m} \left( h_{\theta}(x^{(i)}) - y^{(i)}\right)^2$$
#
# Es decir, el objetivo es encontrar un _"buen"_ vector $\theta$ de modo que
#
# $$Y \approx g(X \theta)$$
#
# en donde $g(z)$ es la función sigmoide (_sigmoid function_),
#
# $$g(z) = \frac{1}{1+e^{-z}}$$
# ### Función Sigmoide
def sigmoid(z):
return 1 / (1 + np.exp(-z))
z = np.linspace(-5,5,100)
sigmoid_df_tmp = pd.DataFrame(
{
"z": z,
"sigmoid(z)": sigmoid(z),
"sigmoid(z*2)": sigmoid(z * 2),
"sigmoid(z-2)": sigmoid(z - 2),
}
)
sigmoid_df = pd.melt(
sigmoid_df_tmp,
id_vars="z",
value_vars=["sigmoid(z)", "sigmoid(z*2)", "sigmoid(z-2)"],
var_name="sigmoid_function",
value_name="value"
)
sigmoid_df.head()
alt.Chart(sigmoid_df).mark_line().encode(
x="z:Q",
y="value:Q",
color="sigmoid_function:N"
).properties(
title="Sigmoid functions",
width=600,
height=400
)
# La función
# sigmoide $g(z) = (1+e^{-z})^{-1}$ tiene la siguiente propiedad:
#
# $$g'(z) = g(z)(1-g(z))$$
#
# "Demostración":
#
# $$\begin{aligned}
# g'(z) &= \frac{-1}{(1+e^{-z})^2} (-e^{-z}) \\
# &= \frac{e^{-z}}{(1+e^{-z})^2} \\
# &= \frac{1}{1+e^{-z}} \frac{e^{-z}}{1+e^{-z}} \\
# &= \frac{1}{1+e^{-z}} \left(1 - \frac{1}{1+e^{-z}} \right) \\
# &= g(z)(1-g(z))\end{aligned}$$
# +
def d_sigmoid(z):
return sigmoid(z) * (1 - sigmoid(z))
sigmoid_dz_df = (
pd.DataFrame(
{
"z": z,
"sigmoid(z)": sigmoid(z),
"d_sigmoid(z)/dz": d_sigmoid(z)
}
)
.melt(
id_vars="z",
value_vars=["sigmoid(z)", "d_sigmoid(z)/dz"],
var_name="function"
)
)
sigmoid_dz_df.head()
# -
alt.Chart(sigmoid_dz_df).mark_line().encode(
x="z:Q",
y="value:Q",
color="function:N"
).properties(
title="Sigmoid and her derivate function",
width=600,
height=400
)
# ¡Es perfecta para encontrar un punto de corte y clasificar de manera binaria!
# <a id='implementation'></a>
# ## Implementación
# + [markdown] slideshow={"slide_type": "slide"}
# ### Aproximación Ingenieril
#
# ¿Cómo podemos reutilizar lo que conocemos de regresión lineal?
#
# Si buscamos minimizar
# $$J(\theta) = \frac{1}{2} \sum_{i=1}^{m} \left( h_{\theta}(x^{(i)}) - y^{(i)}\right)^2$$
# Podemos calcular el gradiente y luego utilizar el método del máximo
# descenso para obtener $\theta$.
#
#
# + [markdown] slideshow={"slide_type": "slide"}
# El cálculo del gradiente es directo:
#
# $$\begin{aligned}
# \frac{\partial J(\theta)}{\partial \theta_k}
# &= \sum_{i=1}^{m} \left( h_{\theta}(x^{(i)}) - y^{(i)}\right) \frac{\partial}{\partial \theta_k} h_{\theta}(x^{(i)}) \\
# &= \sum_{i=1}^{m} \left( h_{\theta}(x^{(i)}) - y^{(i)}\right) \frac{\partial}{\partial \theta_k} g(\theta^T x^{(i)}) \\
# &= \sum_{i=1}^{m} \left( h_{\theta}(x^{(i)}) - y^{(i)}\right) h_{\theta}(x^{(i)}) \left(1-h_{\theta}(x^{(i)})\right) \frac{\partial}{\partial \theta_k} (\theta^T x^{(i)}) \\
# &= \sum_{i=1}^{m} \left( h_{\theta}(x^{(i)}) - y^{(i)}\right) h_{\theta}(x^{(i)}) \left(1-h_{\theta}(x^{(i)})\right) x^{(i)}_k\end{aligned}$$
#
# + [markdown] slideshow={"slide_type": "slide"}
# ¿Hay alguna forma de escribir todo esto de manera matricial? Recordemos
# que si las componentes eran
#
# $$\begin{aligned}
# \sum_{i=1}^{m} \left( h_{\theta}(x^{(i)}) - y^{(i)}\right) x^{(i)}_k = \sum_{i=1}^{m} x^{(i)}_k \left( h_{\theta}(x^{(i)}) - y^{(i)}\right)\end{aligned}$$
#
# podíamos escribirlo vectorialmente como $$X^T (X\theta - Y)$$
#
#
# + [markdown] slideshow={"slide_type": "slide"}
# Luego, para
#
# $$\begin{aligned}
# \frac{\partial J(\theta)}{\partial \theta_k}
# &= \sum_{i=1}^{m} \left( h_{\theta}(x^{(i)}) - y^{(i)}\right) h_{\theta}(x^{(i)}) \left(1-h_{\theta}(x^{(i)})\right) x^{(i)}_k \\
# &= \sum_{i=1}^{m} x^{(i)}_k \left( h_{\theta}(x^{(i)}) - y^{(i)}\right) h_{\theta}(x^{(i)}) \left(1-h_{\theta}(x^{(i)})\right)\end{aligned}$$
#
# podemos escribirlo vectorialmente como
# $$\nabla_{\theta} J(\theta) = X^T \Big[ (g(X\theta) - Y) \odot g(X\theta) \odot (1-g(X\theta)) \Big]$$
# donde $\odot$ es la multiplicación elemento a elemento (element-wise) o producto Hadamard.
# + [markdown] slideshow={"slide_type": "slide"}
# **Observación crucial:**
# $$\nabla_{\theta} J(\theta) = X^T \Big[ (g(X\theta) - Y) \odot g(X\theta) \odot (1-g(X\theta)) \Big]$$
# no permite construir un sistema lineal para $\theta$, por lo cual sólo
# podemos resolver iterativamente.
#
# + [markdown] slideshow={"slide_type": "slide"}
# Por
# lo tanto tenemos el algoritmo
#
# $$\begin{aligned}
# \theta^{(n+1)} & = \theta^{(n)} - \alpha \nabla_{\theta} J(\theta^{(n)}) \\
# \nabla_{\theta} J(\theta) &= X^T \Big[ (g(X\theta) - Y) \odot g(X\theta) \odot (1-g(X\theta)) \Big]\end{aligned}$$
#
#
# + [markdown] slideshow={"slide_type": "slide"}
# El código sería el siguiente:
# -
def norm2_error_logistic_regression(X, Y, theta0, tol=1E-6):
converged = False
alpha = 0.01 / len(Y)
theta = theta0
while not converged:
H = sigmoid(np.dot(X, theta))
gradient = np.dot(X.T, (H - Y) * H * (1 - H))
new_theta = theta - alpha * gradient
converged = np.linalg.norm(theta - new_theta) < tol * np.linalg.norm(theta)
theta = new_theta
return theta
# + [markdown] slideshow={"slide_type": "slide"}
# ### Interpretación Probabilística
#
# ¿Es la derivación anterior
# probabilísticamente correcta?
#
# Asumamos que la pertenencia a los grupos está dado por
#
# $$\begin{aligned}
# \mathbb{P}[y = 1| \ x ; \theta ] & = h_\theta(x) \\
# \mathbb{P}[y = 0| \ x ; \theta ] & = 1 - h_\theta(x)\end{aligned}$$
#
# Esto es, una distribución de Bernoulli con $p=h_\theta(x)$.\
# Las expresiones anteriores pueden escribirse de manera más compacta como
#
# $$\begin{aligned}
# \mathbb{P}[y | \ x ; \theta ] & = (h_\theta(x))^y (1 - h_\theta(x))^{(1-y)} \\\end{aligned}$$
#
# + [markdown] slideshow={"slide_type": "slide"}
# La función de verosimilitud $L(\theta)$ nos
# permite entender que tan probable es encontrar los datos observados,
# para una elección del parámetro $\theta$.
#
# $$\begin{aligned}
# L(\theta)
# &= \prod_{i=1}^{m} \mathbb{P}[y^{(i)}| x^{(i)}; \theta ] \\
# &= \prod_{i=1}^{m} \Big(h_{\theta}(x^{(i)})\Big)^{y^{(i)}} \Big(1 - h_\theta(x^{(i)})\Big)^{(1-y^{(i)})}\end{aligned}$$
#
# Nos gustaría encontrar el parámetro $\theta$ que más probablemente haya
# generado los datos observados, es decir, el parámetro $\theta$ que
# maximiza la función de verosimilitud.
#
# + [markdown] slideshow={"slide_type": "slide"}
# Calculamos la log-verosimilitud:
#
# $$\begin{aligned}
# l(\theta)
# &= \log L(\theta) \\
# &= \log \prod_{i=1}^{m} (h_\theta(x^{(i)}))^{y^{(i)}} (1 - h_\theta(x^{(i)}))^{(1-y^{(i)})} \\
# &= \sum_{i=1}^{m} y^{(i)}\log (h_\theta(x^{(i)})) + (1-y^{(i)}) \log (1 - h_\theta(x^{(i)}))\end{aligned}$$
#
# No existe una fórmula cerrada que nos permita obtener el máximo de la
# log-verosimitud. Pero podemos utilizar nuevamente el método del
# gradiente máximo.
#
# + [markdown] slideshow={"slide_type": "slide"}
# Recordemos que si
#
# $$\begin{aligned}
# g(z) = \frac{1}{1+e^{-z}}\end{aligned}$$
#
# Entonces
#
# $$\begin{aligned}
# g'(z) &= g(z)(1-g(z))\end{aligned}$$
#
# y luego tenemos que
#
# $$\begin{aligned}
# \frac{\partial}{\partial \theta_k} h_\theta(x) &= h_\theta(x) (1-h_\theta(x)) x_k\end{aligned}$$
#
# + [markdown] slideshow={"slide_type": "slide"}
# $$\begin{aligned}
# \frac{\partial}{\partial \theta_k} l(\theta) &=
# \frac{\partial}{\partial \theta_k} \sum_{i=1}^{m} y^{(i)}\log (h_\theta(x^{(i)})) + (1-y^{(i)}) \log (1 - h_\theta(x^{(i)})) \\
# &= \sum_{i=1}^{m} y^{(i)}\frac{\partial}{\partial \theta_k} \log (h_\theta(x^{(i)})) + (1-y^{(i)}) \frac{\partial}{\partial \theta_k} \log (1 - h_\theta(x^{(i)})) \\
# &= \sum_{i=1}^{m} y^{(i)}\frac{1}{h_\theta(x^{(i)})}\frac{\partial h_\theta(x^{(i)})}{\partial \theta_k}
# # + (1-y^{(i)}) \frac{1}{1 - h_\theta(x^{(i)})} \frac{\partial (1-h_\theta(x^{(i)}))}{\partial \theta_k} \\
# &= \sum_{i=1}^{m} y^{(i)}(1-h_\theta(x^{(i)})) x^{(i)}- (1-y^{(i)}) h_\theta(x^{(i)}) x^{(i)}\\
# &= \sum_{i=1}^{m} y^{(i)}x^{(i)}- y^{(i)}h_\theta(x^{(i)}) x^{(i)}- h_\theta(x^{(i)}) x^{(i)}+ y^{(i)}h_\theta(x^{(i)}) x^{(i)}\\
# &= \sum_{i=1}^{m} (y^{(i)}-h_\theta(x^{(i)})) x^{(i)}\end{aligned}$$
#
# + [markdown] slideshow={"slide_type": "slide"}
# Es decir, para maximizar la log-verosimilitud
# obtenemos igual que para la regresión lineal:
#
# $$\begin{aligned}
# \theta^{(n+1)} & = \theta^{(n)} - \alpha \nabla_{\theta} l(\theta^{(n)}) \\
# \frac{\partial l(\theta)}{\partial \theta_k}
# &= \sum_{i=1}^{m} \left( h_{\theta}(x^{(i)}) - y^{(i)}\right) x^{(i)}_k\end{aligned}$$
#
# Aunque, en el caso de regresión logística, se tiene
# $h_\theta(x)=1/(1+e^{-x^T\theta})$
# -
def likelihood_logistic_regression(X, Y, theta0, tol=1E-6):
converged = False
alpha = 0.01 / len(Y)
theta = theta0
while not converged:
H = sigmoid(np.dot(X, theta))
gradient = np.dot(X.T, H - Y)
new_theta = theta - alpha * gradient
converged = np.linalg.norm(theta - new_theta) < tol * np.linalg.norm(theta)
theta = new_theta
return theta
# ## Aplicación
# Recordemos que los datos son:
challenger.head()
# Generamos la matriz de diseño $X$ y el vector de respuestas $Y$ a partir del dataframe anterior.
X = challenger.assign(in|tercept=1).loc[:, ["intercept", "temp_c"]].values
y = challenger.loc[:, ["is_failure"]].values
theta_0 = (y.mean() / X.mean(axis=0)).reshape(2, 1)
print(f"theta_0 =\n {theta_0}\n")
theta_J = norm2_error_logistic_regression(X, y, theta_0)
print(f"theta_J =\n {theta_J}\n")
theta_l = likelihood_logistic_regression(X, y, theta_0)
print(f"theta_l = \n{theta_l}")
predict_df = (
pd.DataFrame(
{
"temp_c": X[:, 1],
"norm_2_error_prediction": sigmoid(np.dot(X, theta_J)).ravel(),
"likelihood_prediction": sigmoid(np.dot(X, theta_l)).ravel()
}
)
.melt(
id_vars="temp_c",
value_vars=["norm_2_error_prediction", "likelihood_prediction"],
var_name="prediction"
)
)
predict_df.head()
# +
prediction_chart = alt.Chart(predict_df).mark_line().encode(
x=alt.X("temp_c:Q", scale=alt.Scale(zero=False), title="Temperature [C]"),
y=alt.Y("value:Q", scale=alt.Scale(padding=0.5)),
color="prediction:N"
)
rule = alt.Chart(pd.DataFrame({"decision_boundary": [0.5]})).mark_rule().encode(y='decision_boundary')
(prediction_chart + failure_chart + rule).properties(title="Data and Prediction Failure")
# -
# ### scikit-learn
# Scikit-learn tiene su propia implementación de Regresión Logística al igual que la Regresión Lineal. A medida que vayas leyendo e interiorizándote en la librería verás que tratan de mantener una sintaxis y API consistente en los distintos objetos y métodos.
from sklearn.linear_model import LogisticRegression
# +
X = challenger[["temp_c"]].values
y = challenger["is_failure"].values
# Fitting the model
Logit = LogisticRegression(solver="lbfgs")
Logit.fit(X, y)
# +
# Obtain the coefficients
print(Logit.intercept_, Logit.coef_ )
# Predicting values
y_pred = Logit.predict(X)
# -
# Procedamos a calcular la matriz de confusión.
#
# Por definición, la matriz de confusión $C$ es tal que $C_{i, j}$ es igual al número de observaciones conocidas en el grupo $i$ y predecidas en el grupo $j$.
# +
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y, y_pred, labels=[0, 1])
print(cm)
# -
pd.DataFrame(cm, index=[0, 1], columns=[0, 1])
Logit
# +
from sklearn.metrics import plot_confusion_matrix # New in scikit-learn 0.22
plot_confusion_matrix(Logit, X, y);
# -
# ### Otro ejemplo: Iris Dataset
# +
from sklearn import datasets
iris = datasets.load_iris()
# print(iris.DESCR)
# +
iris_df = (
pd.DataFrame(iris.data, columns=iris.feature_names)
.assign(
target=iris.target,
target_names=lambda x: x.target.map(dict(zip(range(3), iris.target_names)))
)
)
iris_df.head()
# -
from sklearn.model_selection import train_test_split
# +
X = iris.data[:, :2] # we only take the first two features.
y = iris.target
# split dataset
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state = 42)
# model
iris_lclf = LogisticRegression()
iris_lclf.fit(X_train, y_train)
# +
## Inspirado en https://scikit-learn.org/stable/auto_examples/linear_model/plot_iris_logistic.html#sphx-glr-auto-examples-linear-model-plot-iris-logistic-py
plt.figure(figsize=(14, 8))
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
h = .01 # step size in the mesh
xx, yy = np.meshgrid(
np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h)
)
Z = iris_lclf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired, shading="auto")
# Plot also the real points
plt.scatter(X[:, 0], X[:, 1], c=y, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.show()
# -
# Por otro lado, la predicción debe hacerse con el set de test
y_pred = iris_lclf.predict(X_test)
plot_confusion_matrix(iris_lclf, X_test, y_test);
plot_confusion_matrix(iris_lclf, X_train, y_train);
plot_confusion_matrix(iris_lclf, X, y);
| lessons/M4L03_classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="NDrXKN-NHeYR"
# # Modules
# + [markdown] colab_type="text" id="i0AVpPtvHeYU"
# Modules refer to a file containing Python statements and definitions.
#
# A file containing Python code, for e.g.: abc.py, is called a module and its module name would be "abc".
# + [markdown] colab_type="text" id="Cncmoy9XHeYW"
# We use modules to break down large programs into small manageable and organized files. Furthermore, modules provide reusability of code.
#
# We can define our most used functions in a module and import it, instead of copying their definitions into different programs.
# + [markdown] colab_type="text" id="OMq5zEJZHeYY"
# # How to import a module?
# + [markdown] colab_type="text" id="-BZo1veLHeYa"
# We use the import keyword to do this.
# + colab={} colab_type="code" id="LkdtsKI-HeYb"
import example #imported example module
# + [markdown] colab_type="text" id="8QYfvfCiHeYh"
# Using the module name we can access the function using dot (.) operation.
# + colab={} colab_type="code" id="4aS7F29GHeYk" outputId="6283c968-e7a6-4450-de6a-94abff22feff"
example.add(10, 20)
# + [markdown] colab_type="text" id="UzrMkOH9HeYs"
# Python has a lot of standard modules available.
#
# https://docs.python.org/3/py-modindex.html
# + [markdown] colab_type="text" id="k-zR_tU7HeYs"
# # Examples:
# + colab={} colab_type="code" id="dRUqBrXjHeYt" outputId="add83409-8085-4540-b91b-be5580a6a8c3"
import math
print(math.pi)
# + colab={} colab_type="code" id="wr-Jyr-cHeY1" outputId="438ed43d-8c18-4da3-e929-1c4c1676ba5c"
import datetime
datetime.datetime.now()
# + [markdown] colab_type="text" id="34mCV3oJHeY4"
# # import with renaming
# + colab={} colab_type="code" id="BljTPvmRHeY5" outputId="c1213478-fa22-4cc8-cc8a-d6073e5afb1d"
import math as m
print(m.pi)
# + [markdown] colab_type="text" id="Yck3dGWaHeY-"
# # from...import statement
# + [markdown] colab_type="text" id="PgxZkgecHeY_"
# We can import specific names form a module without importing the module as a whole.
# + colab={} colab_type="code" id="SAgwJxMuHeZA" outputId="1c5c83bd-525f-483d-95dd-5813d922912b"
from datetime import datetime
datetime.now()
# + [markdown] colab_type="text" id="3FKKbMKCHeZL"
# # import all names
# + colab={} colab_type="code" id="OvEe2dxZHeZM" outputId="42a14832-f267-4af1-b712-79c1d099a760"
from math import *
print("Value of PI is " + str(pi))
# + [markdown] colab_type="text" id="dPVCLPyQHeZR"
# # dir() built in function
# + [markdown] colab_type="text" id="EPRn9wTiHeZT"
# We can use the dir() function to find out names that are defined inside a module.
# + colab={} colab_type="code" id="T9PeK-KxHeZX" outputId="ecdf1d42-e747-4e27-9a5a-5811ea885c97"
dir(example)
# + colab={} colab_type="code" id="1vvA0aSwHeZb" outputId="61b9e1ff-5334-4103-8a62-b4e40ae61002"
print(example.add.__doc__)
| Python Functions, Packages, Input_Output, Exception Handling and Debugging/6_modules.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
os.environ["PATH"] += os.pathsep + 'C:/Program Files (x86)/Graphviz2.38/bin/'
# +
# import networkx as nx
# from IPython.core.display import Image
# from networkx.drawing.nx_pydot import to_pydot
# g1 = nx.DiGraph()
# g1.add_edge("A", "B")
# g1.add_edge("B", "C")
# d1 = to_pydot(g1)
# d1.set_dpi(300)
# d1.set_rankdir("LR")
# d1.set_margin(0.2)
# Image(d1.create_png(), width=600)
# -
# Graphviz 설치 오류 또는 경로 설정 이상으로 실행안됨
# 블로그 보고 추후 오류 수정 예정
# >- http://yeyyyyee.blogspot.com/2019/10/graphviz-dot-not-found-in-path.html
# >- https://graphviz.gitlab.io/_pages/Download/Download_windows.html
#
# ## 베이지안 네트워크 모형
# >베이지안 네트워크 모형 : 인과관계가 확실하여 방향성 그래프를 표시할 수 있는 모형
# 예)
#
# A, B, C가 각각 어떤 학생의
#
# * A: 건강 상태
# * B: 공부 시간
# * C: 시험 성적
#
# 을 나타낸 것이라고 하자. 이 확률변수는 각각 $\{0, 1, 2\}$라는 값을 가질 수 있는데 하(0), 중(1), 상(2)의 상태를 나타낸다.
# 즉 $A=0$이면 건강 상태가 안좋은 것이고 $B=1$이면 공부 시간이 보통이며 $C=2$이면 시험 성적이 좋은 것이다.
#
# 조건부 확률 분포를 표로 나타내면 다음과 같다
# +
from pgmpy.factors.discrete import TabularCPD
## 3인자 가지면 (3,1) 형태로 확률 입력
P_A = TabularCPD('A', 3, [[0.1],[0.6],[0.3]])
print(P_A)
# -
# P(B|A)
# 확률 입력은 행단위로
P_B_I_A = TabularCPD('B', 3,
np.array([[0.6, 0.2, 0.1], [0.3, 0.5, 0.3], [0.1, 0.3, 0.6]]),
evidence=['A'], evidence_card =[3])
print(P_B_I_A)
# +
from pgmpy.factors.discrete import TabularCPD
from pgmpy.models import BayesianModel
P_A = TabularCPD('A', 3, [[0.1],[0.6], [0.3]])
P_B_I_A = TabularCPD('B', 3,
np.array([[0.6, 0.2, 0.1], [0.3, 0.5, 0.3], [0.1, 0.3, 0.6]]),
evidence=['A'], evidence_card=[3])
P_C_I_B = TabularCPD('C', 3,
np.array([[0.7, 0.1, 0.1], [0.2, 0.6, 0.1], [0.1, 0.3, 0.8]]),
evidence=['B'], evidence_card=[3])
model = BayesianModel([('A', 'B'), ('B', 'C')])
model.add_cpds(P_A, P_B_I_A, P_C_I_B)
# d = to_pydot(model)
# d.set_dpi(300)
# d.set_margin(0.2)
# d.set_rankdir("LR")
# Image(d.create_png(), width=600)
# -
# CPD 객체는 `marginalize` 메서드로 특정 변수의 모든 경우의 확률을 더하는 sum-out을 할 수 있다.
#
# $$ P(B) = \sum_A P(A,B) = \sum_A P(B|A)P(A) $$
#P(A,B)
print(P_B_I_A * P_A)
# sum(P(A,B)) = P(B)
P_B = (P_B_I_A * P_A).marginalize(["A"],inplace = False)
print(P_B)
#P(C|B)
P_C_I_B = TabularCPD('C', 3,
np.array([[0.7, 0.1, 0.1], [0.2, 0.6, 0.1], [0.1, 0.3, 0.8]]),
evidence=['B'], evidence_card=[3])
print(P_C_I_B)
#P(C)= Sum(P(C,B))
P_C = (P_C_I_B* P_B).marginalize(["B"], inplace=False)
print(P_C)
# ### 조건부 확률들을 결합하여 베이지안 네트워크로 만들기
# +
from pgmpy.models import BayesianModel
model = BayesianModel([('A','B'),('B','C')])
model.add_cpds(P_A, P_B_I_A, P_C_I_B)
model.check_model()
# +
# from IPython.core.display import Image
# from networkx.drawing.nx_pydot import to_pydot
# d = to_pydot(model)
# d.set_dpi(300)
# d.set_margin(0.2)
# d.set_rankdir("LR")
# Image(d.create_png(), width=600)
# -
# 건강 상태가 좋은 경우(A=2)의 성적(C) 분포를 구하라.
# +
#P(C|A=2)
from pgmpy.inference import VariableElimination
infer = VariableElimination(model)
print(infer.query(["C"], evidence={"A": 2}))
# -
#P(A=2)
P_A2 = TabularCPD('A', 3, [[0],[0], [1]])
#P(B) = Sum(P(B,A=2))
P_B = (P_B_I_A * P_A2).marginalize(["A"], inplace=False)
#P(C)= Sum(P(C,B))
P_C = (P_C_I_B * P_B).marginalize(["B"], inplace=False)
print(P_C)
# 건강 상태는 좋지만(A=2), 공부를 하지 않는 경우(B=0)의 성적 분포를 구하라.
# +
from pgmpy.inference import VariableElimination
infer = VariableElimination(model)
#P(B=0), A는 상관없다
P_B2 = TabularCPD('B', 3, [[1],[0], [0]])
P_C = (P_C_I_B* P_B2).marginalize(["B"], inplace=False)
print(P_C)
# +
from pgmpy.inference import VariableElimination
infer = VariableElimination(model)
print(infer.query(["C"], evidence={"A": 2, "B":0}))
# +
from pgmpy.inference import VariableElimination
infer = VariableElimination(model)
print(infer.query(["A"], evidence={"B": 0}))
# -
print(infer.query(["A"], evidence={"B": 0, "C":0}))
# ## 베이지안 네트워크의 결합확률분포
# 베이지안 네트워크를 만들려면 조사 대상이 되는 확률변수를 노드(node)로 생성하고 인과관계가 있는 노드를 방향성 간선(directed edge)로 연결한다.
# 베이지안 네트워크를 구성하는 확률변수의 결합확률분포는 다음처럼 주어진다.
#
# $$ P(X_1, \cdots, X_N) = \prod_{i=1}^N P(X_i | Pa(X_i)) $$
# 
# ### 머리- 머리 결합
# - A,B가 서로 독립이다.
# - 하지만 C값을 알고있다면 A,B가 서로 종속관계가 된다
# - 수식으로 표현하면 다음과 같다
# $$ P(A,B,C) = P(A)P(B)P(C|A,B) $$
#
# $$ P(A,B) = \sum_c P(A)P(B)P(C|A,B) = P(A)P(B) $$
#
# 
# ### 방향성 분리(d-seperation)정리
# - A와 B가 C에 대해서 조건부 독립인 경우 다음 조건을 만족해야한다
# 1. C가 A,B 사이의 경로상에 있는 꼬리-꼬리 결합이거나, 머리-꼬리 결합이다.
# 2. C가 A,B 사이의 경로상에 있는 머리-머리 결합이거나 혹은 이러한 노드의 자손이 아니어야 한다.
#
# ## 마코프 네트워크
# - 무방향성 그래프
# 
# ### 클리크와 팩터
# - 마코프 네트워크를 구성하는 요소
# - 클리크를 구성하는 확률변수의 분포 : 포텐셜 함수(팩터)
#
# <br>
# - 팩터(factor)
#
# > 가능한 모든 결과의 조합에 대한 결합확률분포 또는 조건부확률분포에 비례하는 함수
#
# >더해서 1이 되어야 할 필요는 없다.
#
#
# $$ p(X, Y) = \dfrac{1}{Z}\psi_1(X, Y) $$
#
# $$ p(X | Y) = \dfrac{1}{Z}\psi_2(X, Y) $$
#
#
# +
#DiscreateFactor : 팩터정의하는 함수
from pgmpy.factors.discrete import DiscreteFactor
#팩터 정의
phi = DiscreteFactor(['x1','x2','x3'],[2,2,2],np.arange(8))
print(phi)
# -
# to_factor CPD 결합본포 객체를 팩터로 변환 (결합확률분포에 비례)
print(P_B_I_A)
print(P_B_I_A.to_factor())
# +
## reduce: (factor 개체)어떤 확률변수가 특정한 값을 가지는 경우만 추출
### likelihood 여서 합이 1이 아니라는것 염두
### B=0일때 조건부확률에 비례하는 값
print(P_B_I_A.to_factor().reduce([("B",0)], inplace=False))
# -
# ## nomalize : 정규화
# - 합이 1이 되도록 정규화
# - 조건부확률
# P(A|B=0) => 조건부확률
print(P_B_I_A.to_factor().reduce([("B",0)], inplace=False).normalize(inplace=False))
# ## 마코프 네트워크의 결합확률분포
# - 모든 클리크의 팩터의 곱
#
# $$ P(X) = \dfrac{1}{Z(X)} \prod_{\{C\}} \psi_C(X_C) $$
#
#
# $C$는 클리크, $X_C$는 그 클리크 안의 확률변수, $\psi_C$는 그 클리크의 팩터, $\{C\}$는 모든 클리크의 집합, $Z$는 파티션 함수(partition)함수
#
# 예)
# 3 x 3 이미지의 경우 9개의 확률변수의 결합확률분포는 다음처럼 표현할 수 있다.
#
# $$
# P(X_{11}, \ldots, X_{33}) =
# \dfrac{1}{Z}
# \prod
# \psi(X_{11}, X_{12})
# \psi(X_{11}, X_{21})
# \psi(X_{12}, X_{13})
# \cdots
# \psi(X_{23}, X_{33})
# \psi(X_{32}, X_{33})
# $$
# ## 에너지 함수
# - 벡터함수는 다음과 같은 형태로 표시할 수 있다.
#
# $$ \psi(X) = \exp(−E(X)) $$
#
# $E(X)$ : 에너지 함수(energe function). 확률이 높을수록 에너지 함수는 작아짐
#
# 예)
#
# 베르누이 확률변수(값 0,1) $X1$,$X2$
# $$ E(X_1, X_2) = -3(2X_1 - 1)(2X_2 - 1) $$
#
# - 펙터의 값
# > $$ \psi(X_1 = 1, X_2 = 1) = e^3 $$
# $$ \psi(X_1 = 0, X_2 = 0) = e^3 $$
# $$ \psi(X_1 = 1, X_2 = 0) = e^{-3} $$
# $$ \psi(X_1 = 0, X_2 = 1) = e^{-3} $$
# # 네트워크 추론
# - 확률모형에서 일부 확률변수의 값이 주어졌을 때 다른 확률 변수의 값이 얼마인지 알아내는 것
#
# > 조건부 확률분포함수 $p(X_{\text{unknown}}|\{X\}_{\text{known}})$를 알면 일부 확률변수의 값 $\{X\}_{\text{known}}$이 주어졌을 때 다른 확률변수 $X_{\text{unknown}}$의 확률 $p(X_{\text{unknown}})$을 알 수 있으므로 추론은 조건부 확률분포함수 $p(X_{\text{unknown}}|\{X\}_{\text{known}})$를 알아내는 것과 같다.
# +
from pgmpy.factors.discrete import TabularCPD
from pgmpy.models import BayesianModel
# A :건강상태
# B : 공부시간
# c : 시험성적
## 각각 하,중,상 값을 가짐
P_A = TabularCPD('A', 3, [[0.1],[ 0.6], [0.3]])
P_B_I_A = TabularCPD('B', 3,
np.array([[0.6, 0.2, 0.1], [0.3, 0.5, 0.3], [0.1, 0.3, 0.6]]),
evidence=['A'], evidence_card=[3])
P_C_I_B = TabularCPD('C', 3,
np.array([[0.7, 0.1, 0.1], [0.2, 0.6, 0.1], [0.1, 0.3, 0.8]]),
evidence=['B'], evidence_card=[3])
print(P_A)
print('='*50)
print(P_B_I_A)
print('='*50)
print(P_C_I_B)
# -
# ### 연습문제
# 이 그래프 확률모형을 기반으로 다음과 같은 문제를 풀어보자.
#
# 1. 이 학생의 시험 성적이 어떤 확률분포를 가질 것인가? 어떤 성적을 맞을 확률이 가장 높은가?
#
# -> P(C)
#
# 2. 이 학생의 건강 상태가 좋았다. 어떤 성적을 맞을 확률이 가장 높은가?
#
# -> P(C|A=2)
#
# 3. 이 학생의 공부 시간이 적었지만 시험 성적은 좋았다. 건강 상태가 어땠을까?
#
# -> P(A|B =0, C=2)
P_B = (P_B_I_A * P_A).marginalize(["A"],inplace = False)
print(P_B)
# ## 변수제거(variable elimination)
# > 확률변수 B의 분포가 이미 계산된 상태라면 확률변수 A의 영향은 없어진다
#
# $$ P(C) = \sum_{B} P(C|B)P(B) $$
#
# >변수제거 : 이런식으로 알고있는 확률변수 혹은 무조건부 확률변수분포를 알고있는 확률변수부터 네트워크를 따라 차례대로 확률분포를 계산하는 방식
# +
# 문제 1
## 아무런 조건이 없는 경우 시험 성적의 분포
from pgmpy.inference import VariableElimination
infer = VariableElimination(model)
print(infer.query(["C"]))
# -
# 문제 2
## 건강상태가 좋았을 때 성적분포
print(infer.query(["C"], evidence= {"A":2}))
# 문제 3
## 공부시간은 적었지만, 시험성적이 좋은경우 건강상태 분포
print(infer.query(["A"], evidence={"B":0, "C":2}))
# +
## B를 알고있는 경우 A와 C는 독립이다(머리-꼬리 관계)
### 공부시간을 알고있으면 시험성적과 관계없이 건강상태 유추 가능
print(infer.query(["A"], evidence = {"B":0}))
# -
# 다른방법
print((P_B_I_A * P_A).to_factor().reduce([("B",0)], inplace=False).normalize(inplace=False))
# ### 몬티홀 문제
# +
from pgmpy.factors.discrete import TabularCPD
# 어떤문에 자동차가 있을 확률은 모두 동일하다
P_C = TabularCPD('C', 3, [[0.33], [0.33], [0.33]])
print(P_C)
# +
# 참가자가 어떤 문을 고를 확률은 모두 동일하다
P_P = TabularCPD('P', 3, [[0.33], [0.33], [0.33]])
print(P_P)
# -
## 진행자가 여는 문은 참가자의 선택에 따라 달라진다(조건부 확률)
#P(H|C,P)
#C=P 일때는 선택지가 2개이므로 확률 0.5, C!=P일때는 선택지가 1개이므로 확률 0.1)
P_H_I_CP = TabularCPD('H',3,[[0, 0, 0, 0, 0.5, 1, 0, 1, 0.5],
[0.5, 0, 1, 0, 0, 0, 1, 0, 0.5],
[0.5, 1, 0, 1, 0.5, 0, 0, 0, 0 ]],
evidence = ['C','P'], evidence_card = [3,3]
)
print(P_H_I_CP)
# +
### 모델만들기
from pgmpy.models import BayesianModel
model_monty = BayesianModel([('C','H'),('P','H')])
model_monty.add_cpds(P_C, P_P, P_H_I_CP)
# +
# 변수제거방법으로 문제 풀기
from pgmpy.inference import VariableElimination
infer = VariableElimination(model_monty)
# -
# 참가자가 0번 문 선택할 때
posteriors = infer.query(['C','H'],evidence={'P':0})
print(posteriors)
#참가자가 0번 문을 선택하고 진행자가 1번 문을 열었을 때
# 선택 유지(0번) 보다 선택변경(2번)을 택하는 것이 차가있을 확률이 2배 높다
posterior_c = infer.query(['C'],evidence = {'P':0, 'H':1})
print(posterior_c)
# +
#참가자가 0번 문을 선택하고 진행자가 2번 문을 열었을 때
# 선택 유지(0번) 보다 선택변경(1번)을 택하는 것이 차가있을 확률이 2배 높다
posterior_c = infer.query(['C'], evidence={'P': 1, 'H': 2})
print(posterior_c)
# -
| machine_learning/13_graphical probability model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Kinetic Learning Example
# ## Simulate Limonene Dynamics
from KineticLearning import learn_dynamics,read_timeseries_data,simulate_dynamics
from IPython.display import display
import pandas as pd
#Import DataFrame from CSV & Define Important Variables
controls = ['AtoB', 'GPPS', 'HMGR', 'HMGS', 'Idi','Limonene Synthase', 'MK', 'PMD', 'PMK']
states = ['Acetyl-CoA','HMG-CoA', 'Mevalonate', 'Mev-P', 'IPP/DMAPP', 'Limonene']
limonene_df = read_timeseries_data('data/limonene_data.csv',states,controls,time='Hour',strain='Strain',augment=200)
# ## Learn the Dynamics of the Limonene Pathway
model = learn_dynamics(limonene_df,generations=50,population_size=30,verbose=True)
strain_df = limonene_df.loc[limonene_df.index.get_level_values(0)=='L2']
trajectory_df = simulate_dynamics(model,strain_df,verbose=True)
import matplotlib.pyplot as plt
for metabolite in limonene_df['states'].columns:
plt.figure()
ax = plt.gca()
strain_df['states'].loc[strain_df.index.get_level_values(0)=='L2'].reset_index().plot(x='Time',y=metabolite,ax=ax)
trajectory_df.plot(x='Time',y=metabolite,ax=ax)
plt.show()
| notebooks/LearnLimoneneDynamics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
# # Cross-hemisphere comparison
#
#
# This example illustrates how to visualize the difference between activity in
# the left and the right hemisphere. The data from the right hemisphere is
# mapped to the left hemisphere, and then the difference is plotted. For more
# information see :func:`mne.compute_source_morph`.
#
# +
# Author: <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
import mne
data_dir = mne.datasets.sample.data_path()
subjects_dir = data_dir + '/subjects'
stc_path = data_dir + '/MEG/sample/sample_audvis-meg-eeg'
stc = mne.read_source_estimate(stc_path, 'sample')
# First, morph the data to fsaverage_sym, for which we have left_right
# registrations:
stc = mne.compute_source_morph(stc, 'sample', 'fsaverage_sym', smooth=5,
warn=False,
subjects_dir=subjects_dir).apply(stc)
# Compute a morph-matrix mapping the right to the left hemisphere,
# and vice-versa.
morph = mne.compute_source_morph(stc, 'fsaverage_sym', 'fsaverage_sym',
spacing=stc.vertices, warn=False,
subjects_dir=subjects_dir, xhemi=True,
verbose='error') # creating morph map
stc_xhemi = morph.apply(stc)
# Now we can subtract them and plot the result:
diff = stc - stc_xhemi
diff.plot(hemi='lh', subjects_dir=subjects_dir, initial_time=0.07,
size=(800, 600))
| stable/_downloads/e563205ad3014f3289d0b0f8ecb38abf/plot_xhemi.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # アンサンブルによる学習
# ### 確率質量関数を実装
# +
from scipy.special import comb
import math
def ensemble_error(n_classifier, error):
k_start = int(math.ceil(n_classifier / 2.))
probs = [comb(n_classifier, k) * error**k * (1-error)**(n_classifier - k)
for k in range(k_start, n_classifier + 1)]
return sum(probs)
ensemble_error(n_classifier=11, error=0.25)
# -
# ### アンサンブルとベース分類器のご分類の関係を折れ線グラフとしてプロット
# +
import numpy as np
import matplotlib.pyplot as plt
error_range = np.arange(0.0, 1.01, 0.01)
ens_errors = [ensemble_error(n_classifier=11, error=error)
for error in error_range]
plt.plot(error_range, ens_errors, label="Ensemble error", linewidth=2)
plt.plot(error_range, error_range, linestyle="--", label="Base error", linewidth=2)
plt.xlabel("Base error")
plt.ylabel("Base/Ensemble error")
plt.legend(loc="upper left")
plt.grid(alpha=0.5)
plt.show()
# -
import numpy as np
np.argmax(np.bincount([0, 0, 1], weights=[0.2, 0.2, 0.6]))
ex = np.array([[0.9, 0.1],
[0.8, 0.2],
[0.4, 0.6]])
p = np.average(ex, axis=0, weights=[0.2, 0.2, 0.6])
p
np.argmax(p)
# ### 多数決の分類によるアルゴリズム
# +
from sklearn.base import BaseEstimator
from sklearn.base import ClassifierMixin
from sklearn.preprocessing import LabelEncoder
from sklearn.externals import six
from sklearn.base import clone
from sklearn.pipeline import _name_estimators
import numpy as np
import operator
class MajorityVoteClassifier(BaseEstimator, ClassifierMixin):
"""多数決アンサンブル分類器
パラメータ
-------------------
classifiers : array-like, shape = [n_classifiers]
アンサンブルの様々な分類器
vote : str, {"classlabel", "probability"} (default: "classlabel")
"classlabel"の場合、クラスラベルの予測はクラスラベルのargmaxに基づく
"probability"の場合、クラスラベルの予測はクラスの所属確率のargmaxに基づく(分類器が調整済みであることが推奨)
weights : array-like, shape = [n_classifiers] (optional, default=None)
"int"または"float"型の値のリストが提供された場合、分類器は重要度で重み付けされる
"weights=None"の場合は均一な重みを使用
"""
def __init__(self, classifiers, vote="classlabel", weights=None):
self.classifiers = classifiers
self.named_classifiers = {key: value for key,
value in _name_estimators(classifiers)}
self.vote = vote
self.weights = weights
def fit(self, X, y):
"""分類器を学習させる
パラメータ
---------------------
X : {array-like, spare matrix},
shape = [n_samples, n_features]
トレーニングサンプルからなる行列
y : array-like, shape = [n_samples]
クラスラベルのベクトル
戻り値
-------------
self : object
"""
# LabelEncoderを使ってクラスラベルが0から始まるようにエンコードする
# self.predictのnp.argmax呼び出しで重要となる
self.lablenc_ = LabelEncoder()
self.lablenc_.fit(y)
self.classes_ = self.lablenc_.classes_
self.classifiers_ = []
for clf in self.classifiers:
fitted_clf = clone(clf).fit(X, self.lablenc_.transform(y))
self.classifiers_.append(fitted_clf)
return self
def predict(self, X):
"""Xのクラスラベルを予測
パラメータ
------------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
トレーニングサンプルからなる行列
戻り値
------------
maj_vote : array-like, shape = [n_samples]
予測されたクラスラベル
"""
if self.vote == "probability":
maj_vote = np.argmax(self.predict_proba(X), axis=1)
else: # "classlabel"での多数決
# clf.predict呼び出しの結果を収集
predictions = np.asarray([clf.predict(X)
for clf in self.classifiers_]).T
# 各サンプルのクラス確率に重みをかけて足し合わせた値が最大となる
# 列番号を配列として返す
maj_vote = np.apply_along_axis(
lambda x: np.argmax(np.bincount(x, weights=self.weights)),
axis=1,
arr=predictions)
#各サンプルに確率の最大値を与えるクラスラベルを抽出
maj_vote = self.lablenc_.inverse_transform(maj_vote)
return maj_vote
def predict_proba(self, X):
"""Xのクラス確率を予測する
パラメータ
-----------
X : {array-like, spare matrix}, shape = [n_samples, n_features]
トレーニングベクトル: n_samplesはサンプルの個数、
n_featuresは特徴量の個数
戻り値
-----------
avg_prpba : array-like, shape = [n_samples, n_features]
各サンプルに対する各クラスで重み付けた平均確率
"""
probas = np.asarray([clf.predict_proba(X)
for clf in self.classifiers_])
avg_proba = np.average(probas, axis=0, weights=self.weights)
return avg_proba
def get_params(self, deep=True):
"""GridSearchの実行時に分類器のパラメータ名を取得"""
if not deep:
return super(MajorityVoteClassifier, self).get_params(deep=False)
else:
# キーを"分類器の名前__パラメータ名"、
# バリューをパラメータの値とするディクショナリを生成
out = self.named_classifiers.copy()
for name, step in six.iteritems(self.named_classifiers):
for key, value in six.iteritems(step.get_params(deep=True)):
out["%s__%s" % (name, key)] = value
return out
# -
# ### アヤメデータのデータセットを準備
# +
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import LabelEncoder
iris = datasets.load_iris()
X, y = iris.data[50:, [1, 2]], iris.target[50:]
le = LabelEncoder()
y = le.fit_transform(y)
# -
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=1, stratify=y)
# ### ロジスティック回帰、決定木分類器、k近傍分類法の三種類の分類器のトレーニングを行う
# +
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.pipeline import Pipeline
import numpy as np
import warnings
warnings.simplefilter("ignore")
clf1 = LogisticRegression(penalty="l2", C=0.001, random_state=1)
clf2 = DecisionTreeClassifier(max_depth=1, criterion="entropy", random_state=0)
clf3 = KNeighborsClassifier(n_neighbors=1, p=2, metric="minkowski")
pipe1 = Pipeline([["sc", StandardScaler()], ["clf", clf1]])
pipe3 = Pipeline([["sc", StandardScaler()], ["clf", clf3]])
clf_labels = ["Logistic regression", "Decision tree", "KNN"]
print("10-fold cross validation:\n")
for clf, label in zip([pipe1, clf2, pipe3], clf_labels):
scores = cross_val_score(estimator=clf, X=X_train, y=y_train, cv=10, scoring="roc_auc")
print("ROC AUC: %0.2f(+/- %0.2f) [%s]" % (scores.mean(), scores.std(), label))
# -
# ### 個々の分類器をMajorityVoteClassifierオフジェクトで組み合わせ
# +
mv_clf = MajorityVoteClassifier(classifiers=[pipe1, clf2, pipe3])
clf_labels += ["Majority voting"]
all_clf = [pipe1, clf2, pipe3, mv_clf]
for clf, label in zip(all_clf, clf_labels):
scores = cross_val_score(estimator=clf, X=X_train, y=y_train, cv=10, scoring="roc_auc")
print("ROC AUC: %0.2f(+/- %0.2f)[%s]" % (scores.mean(), scores.std(), label))
# -
# # アンサンブル分類器の評価とチューニング
# +
from sklearn.metrics import roc_curve
from sklearn.metrics import auc
colors = ["black", "orange", "blue", "green"]
linestyles = [":", "--", "-.", "-"]
for clf, label, clr, ls in zip(all_clf, clf_labels, colors, linestyles):
# 陽性クラスのラベルは1であることは前提
y_pred = clf.fit(X_train, y_train).predict_proba(X_test)[:, 1]
fpr, tpr, thresholds = roc_curve(y_true=y_test, y_score=y_pred)
roc_auc = auc(x=fpr, y=tpr)
plt.plot(fpr, tpr, color=clr, linestyle=ls, label="%s (auc = %0.2f)" % (label, roc_auc))
plt.legend(loc="lower right")
plt.plot([0, 1], [0, 1], linestyle="--", color="gray", linewidth=2)
plt.xlim([-0.1, 1.1])
plt.ylim([-0.1, 1.1])
plt.grid(alpha=0.5)
plt.xlabel("False positive rate (FPR)")
plt.ylabel("True positive rate (TPR)")
plt.show()
# +
sc = StandardScaler()
X_train_std = sc.fit_transform(X_train)
from itertools import product # 多重ループの省略(product)
# 決定領域を描画する最小値、最大値を作成
x_min = X_train_std[:, 0].min() - 1
x_max = X_train_std[:, 0].max() + 1
y_min = X_train_std[:, 1].min() - 1
y_max = X_train_std[:, 1].max() + 1
# グリッドポイントを作成
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),
np.arange(y_min, y_max, 0.1))
# 描画領域を2行2列に分割
f, axarr = plt.subplots(nrows=2, ncols=2, sharex="col", sharey="row", figsize=(7, 5))
# 決定領域のプロット、青や赤の散布図の作例などを実行
# 変数idxは各分類器を描画すると行と列の位置を表すタプル
for idx, clf, tt in zip(product([0, 1], [0, 1]), all_clf, clf_labels):
clf.fit(X_train_std, y_train)
z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
z = z.reshape(xx.shape)
axarr[idx[0], idx[1]].contourf(xx, yy, z, alpha=0.3)
axarr[idx[0], idx[1]].scatter(X_train_std[y_train==0, 0],
X_train_std[y_train==0, 1], c="blue", marker="^", s=50)
axarr[idx[0], idx[1]].scatter(X_train_std[y_train==1, 0],
X_train_std[y_train==1, 1], c="green", marker="o", s=50)
axarr[idx[0], idx[1]].set_title(tt)
plt.text(-3.5, -5, s="Sepal width [standardized]", ha="center", va="center", fontsize=12)
plt.text(-12.5, 4.5, s="Petal length [standardized]", ha="center", va="center", fontsize=12, rotation=90)
# -
mv_clf.get_params(deep=False)
mv_clf.get_params()
# ### ロジスティック回帰Cと決定木の深さをチューニング
# +
from sklearn.model_selection import GridSearchCV
params = {"decisiontreeclassifier__max_depth": [1, 2],
"pipeline-1__clf__C": [0.001, 0.1, 100.0]}
grid = GridSearchCV(estimator=mv_clf, param_grid=params, cv=10, scoring="roc_auc")
grid.fit(X_train, y_train)
# -
for r, _ in enumerate(grid.cv_results_["mean_test_score"]):
print("%0.3f +/- %0.2f %r"
% (grid.cv_results_["mean_test_score"][r],
grid.cv_results_["std_test_score"][r] / 2.0,
grid.cv_results_["params"][r]))
print("Best parameters: %s" % grid.best_params_)
print("Accuracy: %.2f" % grid.best_score_)
# # バギング:ブートストラップ標本を使った分類器アンサンブルの構築
# ### バギングを使ってWineデータセットのサンプルを分類する
# +
import pandas as pd
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
df_wine = pd.read_csv('https://archive.ics.uci.edu/'
'ml/machine-learning-databases/wine/wine.data',
header=None)
df_wine.columns = ['Class label', 'Alcohol', 'Malic acid', 'Ash',
'Alcalinity of ash', 'Magnesium', 'Total phenols',
'Flavanoids', 'Nonflavanoid phenols', 'Proanthocyanins',
'Color intensity', 'Hue', 'OD280/OD315 of diluted wines',
'Proline']
# クラス1を削除
df_wine = df_wine[df_wine["Class label"] != 1]
y = df_wine["Class label"].values
X = df_wine[["Alcohol", "OD280/OD315 of diluted wines"]].values
# +
# クラスラベルを二値でエンコードし、データセットを分割
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
le = LabelEncoder()
y = le.fit_transform(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1, stratify=y)
# +
from sklearn.ensemble import BaggingClassifier
tree = DecisionTreeClassifier(criterion="entropy", max_depth=None, random_state=1)
bag = BaggingClassifier(base_estimator=tree,
n_estimators=500,
max_samples=1.0,
max_features=1.0,
bootstrap=True,
bootstrap_features=False,
n_jobs=1,
random_state=1)
# +
from sklearn.metrics import accuracy_score
tree = tree.fit(X_train, y_train)
y_train_pred = tree.predict(X_train)
y_test_pred = tree.predict(X_test)
tree_train = accuracy_score(y_train, y_train_pred)
tree_test = accuracy_score(y_test, y_test_pred)
print("tree train of accuracy score: %.3f" % tree_train)
print("tree test of accuracy score: %.3f" % tree_test)
# -
bag = bag.fit(X_train, y_train)
y_train_pred = bag.predict(X_train)
y_test_pred = bag.predict(X_test)
bag_train = accuracy_score(y_train, y_train_pred)
bag_test = accuracy_score(y_test, y_test_pred)
print("bagging train of accuracy score: %.3f" % bag_train)
print("bagging test of accuracy score: %.3f" % bag_test)
# +
# 決定領域
x_min = X_train[:, 0].min() - 1
x_max = X_train[:, 0].max() + 1
y_min = X_train[:, 1].min() - 1
y_max = X_train[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),
np.arange(y_min, y_max, 0.1))
f, axarr = plt.subplots(nrows=1, ncols=2, sharex="col", sharey="row", figsize=(8, 3))
for idx, clf, tt in zip([0, 1], [tree, bag], ["Decision tree", "Bagging"]):
clf.fit(X_train, y_train)
z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
z = z.reshape(xx.shape)
axarr[idx].contourf(xx, yy, z, alpha=0.3)
axarr[idx].scatter(X_train[y_train == 0, 0], X_train[y_train == 0, 1], c="blue", marker="^" )
axarr[idx].scatter(X_test[y_test == 0, 0], X_test[y_test == 0, 1], c="green", marker="o" )
axarr[idx].set_title(tt)
axarr[0].set_ylabel("Alcohol", fontsize=12)
plt.text(10.2, -0.5, s="OD280/OD315 of diluted wines", ha="center", va="center", fontsize=12)
plt.tight_layout()
plt.show()
# -
# # アダブーストによる弱学習器の活用
# ### scikit-learnを使ってアダブーストを適用する
# +
from sklearn.ensemble import AdaBoostClassifier
tree = DecisionTreeClassifier(criterion="entropy",
max_depth=1,
random_state=0)
ada = AdaBoostClassifier(base_estimator=tree,
n_estimators=500,
learning_rate=0.1,
random_state=1)
# 決定木
tree = tree.fit(X_train, y_train)
y_train_pred = tree.predict(X_train)
y_test_pred = tree.predict(X_test)
tree_train = accuracy_score(y_train, y_train_pred)
tree_test = accuracy_score(y_test, y_test_pred)
print("Decision tree train/test accuracies %.3f/%.3f" % (tree_train, tree_test))
# アダブースト
ada = ada.fit(X_train, y_train)
y_train_pred = ada.predict(X_train)
y_test_pred = ada.predict(X_test)
ada_train = accuracy_score(y_train, y_train_pred)
ada_test = accuracy_score(y_test, y_test_pred)
print("AdaBoost train/test accuracies %.3f/%.3f" % (ada_train, ada_test))
# +
# 決定領域
x_min = X_train[:, 0].min() - 1
x_max = X_train[:, 0].max() + 1
y_min = X_train[:, 1].min() - 1
y_max = X_train[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),
np.arange(y_min, y_max, 0.1))
f, axarr = plt.subplots(nrows=1, ncols=2, sharex="col", sharey="row", figsize=(8, 3))
for idx, clf, tt in zip([0, 1], [tree, ada], ["Decision tree", "AdaBoost"]):
clf.fit(X_train, y_train)
z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
z = z.reshape(xx.shape)
axarr[idx].contourf(xx, yy, z, alpha=0.3)
axarr[idx].scatter(X_train[y_train == 0, 0], X_train[y_train == 0, 1], c="blue", marker="^" )
axarr[idx].scatter(X_test[y_test == 0, 0], X_test[y_test == 0, 1], c="green", marker="o" )
axarr[idx].set_title(tt)
axarr[0].set_ylabel("Alcohol", fontsize=12)
plt.text(10.2, -0.5, s="OD280/OD315 of diluted wines", ha="center", va="center", fontsize=12)
plt.tight_layout()
plt.show()
# -
| 7. Ensemble Learning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Hash functions and multiplanes
#
#
# In this lab, we are going to practice the most important concepts related to the hash functions explained in the videos. You will be using these in this week's assignment.
#
# A key point for the lookup using hash functions is the calculation of the hash key or bucket id that we assign for a given entry. In this notebook, we will cover:
#
# * Basic hash tables
# * Multiplanes
# * Random planes
# ## Basic Hash tables
#
# Hash tables are data structures that allow indexing data to make lookup tasks more efficient.
# In this part, you will see the implementation of the simplest hash function.
# +
import numpy as np # library for array and matrix manipulation
import pprint # utilities for console printing
from utils_nb import plot_vectors # helper function to plot vectors
import matplotlib.pyplot as plt # visualization library
pp = pprint.PrettyPrinter(indent=4) # Instantiate a pretty printer
# -
# In the next cell, we will define a straightforward hash function for integer numbers. The function will receive a list of integer numbers and the desired amount of buckets. The function will produce a hash table stored as a dictionary, where keys contain the hash keys, and the values will provide the hashed elements of the input list.
#
# The hash function is just the remainder of the integer division between each element and the desired number of buckets.
def basic_hash_table(value_l, n_buckets):
def hash_function(value, n_buckets):
return int(value) % n_buckets
hash_table = {i:[] for i in range(n_buckets)} # Initialize all the buckets in the hash table as empty lists
for value in value_l:
hash_value = hash_function(value,n_buckets) # Get the hash key for the given value
hash_table[hash_value].append(value) # Add the element to the corresponding bucket
return hash_table
# Now let's see the hash table function in action. The pretty print function (`pprint()`) will produce a visually appealing output.
value_l = [100, 10, 14, 17, 97] # Set of values to hash
hash_table_example = basic_hash_table(value_l, n_buckets=10)
pp.pprint(hash_table_example)
# In this case, the bucket key must be the rightmost digit of each number.
# ## Planes
#
# Multiplanes hash functions are other types of hash functions. Multiplanes hash functions are based on the idea of numbering every single region that is formed by the intersection of n planes. In the following code, we show the most basic forms of the multiplanes principle. First, with a single plane:
# +
P = np.array([[1, 1]]) # Define a single plane.
fig, ax1 = plt.subplots(figsize=(8, 8)) # Create a plot
plot_vectors([P], axes=[2, 2], ax=ax1) # Plot the plane P as a vector
# Plot random points.
for i in range(0, 10):
v1 = np.array(np.random.uniform(-2, 2, 2)) # Get a pair of random numbers between -4 and 4
side_of_plane = np.sign(np.dot(P, v1.T))
# Color the points depending on the sign of the result of np.dot(P, point.T)
if side_of_plane == 1:
ax1.plot([v1[0]], [v1[1]], 'bo') # Plot blue points
else:
ax1.plot([v1[0]], [v1[1]], 'ro') # Plot red points
plt.show()
# -
# The first thing to note is that the vector that defines the plane does not mark the boundary between the two sides of the plane. It marks the direction in which you find the 'positive' side of the plane. Not intuitive at all!
#
# If we want to plot the separation plane, we need to plot a line that is perpendicular to our vector `P`. We can get such a line using a $90^o$ rotation matrix.
#
# Feel free to change the direction of the plane `P`.
# +
P = np.array([[1, 2]]) # Define a single plane. You may change the direction
# Get a new plane perpendicular to P. We use a rotation matrix
PT = np.dot([[0, 1], [-1, 0]], P.T).T
fig, ax1 = plt.subplots(figsize=(8, 8)) # Create a plot with custom size
plot_vectors([P], colors=['b'], axes=[2, 2], ax=ax1) # Plot the plane P as a vector
# Plot the plane P as a 2 vectors.
# We scale by 2 just to get the arrows outside the current box
plot_vectors([PT * 4, PT * -4], colors=['k', 'k'], axes=[4, 4], ax=ax1)
# Plot 20 random points.
for i in range(0, 20):
v1 = np.array(np.random.uniform(-4, 4, 2)) # Get a pair of random numbers between -4 and 4
side_of_plane = np.sign(np.dot(P, v1.T)) # Get the sign of the dot product with P
# Color the points depending on the sign of the result of np.dot(P, point.T)
if side_of_plane == 1:
ax1.plot([v1[0]], [v1[1]], 'bo') # Plot a blue point
else:
ax1.plot([v1[0]], [v1[1]], 'ro') # Plot a red point
plt.show()
# -
# Now, let us see what is inside the code that color the points.
P = np.array([[1, 1]]) # Single plane
v1 = np.array([[1, 2]]) # Sample point 1
v2 = np.array([[-1, 1]]) # Sample point 2
v3 = np.array([[-2, -1]]) # Sample point 3
np.dot(P, v1.T)
np.dot(P, v2.T)
np.dot(P, v3.T)
# The function below checks in which side of the plane P is located the vector `v`
def side_of_plane(P, v):
dotproduct = np.dot(P, v.T) # Get the dot product P * v'
sign_of_dot_product = np.sign(dotproduct) # The sign of the elements of the dotproduct matrix
sign_of_dot_product_scalar = sign_of_dot_product.item() # The value of the first item
return sign_of_dot_product_scalar
side_of_plane(P, v1) # In which side is [1, 2]
side_of_plane(P, v2) # In which side is [-1, 1]
side_of_plane(P, v3) # In which side is [-2, -1]
# ## Hash Function with multiple planes
#
# In the following section, we are going to define a hash function with a list of three custom planes in 2D.
# +
P1 = np.array([[1, 1]]) # First plane 2D
P2 = np.array([[-1, 1]]) # Second plane 2D
P3 = np.array([[-1, -1]]) # Third plane 2D
P_l = [P1, P2, P3] # List of arrays. It is the multi plane
# Vector to search
v = np.array([[2, 2]])
# -
# The next function creates a hash value based on a set of planes. The output value is a combination of the side of the plane where the vector is localized with respect to the collection of planes.
#
# We can think of this list of planes as a set of basic hash functions, each of which can produce only 1 or 0 as output.
def hash_multi_plane(P_l, v):
hash_value = 0
for i, P in enumerate(P_l):
sign = side_of_plane(P,v)
hash_i = 1 if sign >=0 else 0
hash_value += 2**i * hash_i
return hash_value
hash_multi_plane(P_l, v) # Find the number of the plane that containes this value
# ## Random Planes
#
# In the cell below, we create a set of three random planes
np.random.seed(0)
num_dimensions = 2 # is 300 in assignment
num_planes = 3 # is 10 in assignment
random_planes_matrix = np.random.normal(
size=(num_planes,
num_dimensions))
print(random_planes_matrix)
v = np.array([[2, 2]])
# The next function is similar to the `side_of_plane()` function, but it evaluates more than a plane each time. The result is an array with the side of the plane of `v`, for the set of planes `P`
# Side of the plane function. The result is a matrix
def side_of_plane_matrix(P, v):
dotproduct = np.dot(P, v.T)
sign_of_dot_product = np.sign(dotproduct) # Get a boolean value telling if the value in the cell is positive or negative
return sign_of_dot_product
# Get the side of the plane of the vector `[2, 2]` for the set of random planes.
sides_l = side_of_plane_matrix(
random_planes_matrix, v)
sides_l
# Now, let us use the former function to define our multiplane hash function
def hash_multi_plane_matrix(P, v, num_planes):
sides_matrix = side_of_plane_matrix(P, v) # Get the side of planes for P and v
hash_value = 0
for i in range(num_planes):
sign = sides_matrix[i].item() # Get the value inside the matrix cell
hash_i = 1 if sign >=0 else 0
hash_value += 2**i * hash_i # sum 2^i * hash_i
return hash_value
# Print the bucket hash for the vector `v = [2, 2]`.
hash_multi_plane_matrix(random_planes_matrix, v, num_planes)
# #### Note
# This showed you how to make one set of random planes. You will make multiple sets of random planes in order to make the approximate nearest neighbors more accurate.
# ## Document vectors
#
# Before we finish this lab, remember that you can represent a document as a vector by adding up the word vectors for the words inside the document. In this example, our embedding contains only three words, each represented by a 3D array.
# +
word_embedding = {"I": np.array([1,0,1]),
"love": np.array([-1,0,1]),
"learning": np.array([1,0,1])
}
words_in_document = ['I', 'love', 'learning', 'not_a_word']
document_embedding = np.array([0,0,0])
for word in words_in_document:
document_embedding += word_embedding.get(word,0)
print(document_embedding)
# -
# **Congratulations! You've now completed this lab on hash functions and multiplanes!**
| C1 Natural Language Processing with Classification/W4/Labs/2 Hash tables.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import numpy as np
import string
import os
from collections import Counter
import itertools
# +
train_paths = os.listdir("../data/train/")
test_paths = os.listdir("../data/test/")
train_ids = []
train_class = []
test_ids = []
extract_id =False
extract_class = False
def generate_xml_paths(train_paths, test_paths, xml_processor=lambda x: x, i=0):
"""
Processes the provided paths, extracting id and class information and
applying whatever function on the xml is desired.
xml_processor should takes in xml_string and should return something
"""
paths = train_paths + test_paths
print "The length of the test data is {0}, training data {1}".format(
len(test_paths), len(train_paths)
)
while i < len(paths):
abs_path = ''
# Split the file name into a list of [id, class_name, xml]
id_class_xml = paths[i].split('.')
assert id_class_xml[2] == 'xml'
# If the file is part of the test set, append the id to test_ids
if i >= len(train_paths):
if extract_id:
test_ids.append(id_class_xml[0])
assert id_class_xml[1] == 'X'
abs_path = os.path.join(
os.path.abspath("../data/test/"), paths[i])
# Otherwise file is in training set. Append id and class
else:
if extract_id:
train_ids.append(id_class_xml[0])
if extract_class:
train_class.append(id_class_xml[1])
abs_path = os.path.join(
os.path.abspath("../data/train/"), paths[i])
# Open the file, process, and yield string
with open(abs_path, 'r') as xml_file:
xml_content = xml_processor(xml_file.read())
assert type(xml_content) == str
yield xml_content
if (i % 500) == 0:
print "sent file {0}, named \n {1} to processing".format(i, paths[i])
i += 1
# -
def remove_special_xml(xmlstr):
table = string.maketrans('=\/<>.\r\n-"ABCDEFGHIJKLMNOPQRSTUVWXYZ',(
' abcdefghijklmnopqrstuvwxyz'))
delete = '_?:'
return string.translate(xmlstr,table,delete)
# +
xml_corpus = generate_xml_paths(train_paths, test_paths,
xml_processor=remove_special_xml)
xml_tokens = [xml.split() for xml in xml_corpus]
counter = Counter(itertools.chain.from_iterable(xml_tokens))
print counter.most_common(10)
# +
def frequency_check(counts, index, cutoff):
if counts < cutoff:
return 0
else:
return index
cutoff = 3
# most common returns word, count pairs (as a tuple) check that the num
# counts is greater than the cutoff
word_to_int = {
word[0]: frequency_check(word[1], i+1, cutoff) for i, word in enumerate(counter.most_common())}
print len(word_to_int.items())
print word_to_int.items()[:10]
print word_to_int['windows']
# -
len(counter.most_common())
# Convert every word in every token list into the correct index value.
# Should look like this:
#[ [ 1,2,3,...(sequence of ints representing words in xml file 1)]
# [(sequence of ints representing word in xml file 2)]
# ...]
xml_int_tokens = [[word_to_int[word] for word in xml_file] for xml_file in xml_tokens]
print len(xml_int_tokens[1])
print len(xml_int_tokens[2])
print xml_int_tokens[1][:400]
token_arr = np.array([np.array(xml) for xml in xml_int_tokens])
print token_arr
token_arr.shape
# +
# Save and load the array to check that this operation works
np.save("../data/features/3_cutoff_word_to_intseq.npy", token_arr)
# -
test = np.load("../data/features/3_cutoff_word_to_intseq.npy")
print test
print test.shape
# Appears to work well. Clobber both to garbage collect
test = 0
token_arr = 0
# +
# Rerun with a higher cutoff (to get a good set of options)
def frequency_check(counts, index, cutoff):
if counts < cutoff:
return 0
else:
return index
cutoff = 10
# most common returns word, count pairs (as a tuple) check that the num
# counts is greater than the cutoff
word_to_int = {
word[0]: frequency_check(word[1], i+1, cutoff) for i, word in enumerate(counter.most_common())}
print len(word_to_int.items())
print word_to_int.items()[:10]
print word_to_int['windows']
# -
print xml_int_tokens[1][:400]
# Convert every word in every token list into the correct index value.
# Should look like this:
#[ [ 1,22,31,...(sequence of ints representing words in xml file 1)]
# [(sequence of ints representing word in xml file 2)]
# ...]
xml_int_tokens = [[word_to_int[word] for word in xml_file] for xml_file in xml_tokens]
print len(xml_int_tokens[1])
print len(xml_int_tokens[2])
print xml_int_tokens[1][:400]
max([max(ele) for ele in xml_int_tokens])
# This is still too big probably. I'm going to save and then try excluding
# more.
token_arr = np.array([np.array(xml) for xml in xml_int_tokens])
np.save("../data/features/10_cutoff_word_to_intseq.npy", token_arr)
token_arr = 0
token_arr = 0
# +
# Rerun with a cutoff of 50 (to get a good set of options)
def frequency_check(counts, index, cutoff):
if counts < cutoff:
return 0
else:
return index
cutoff = 50
# most common returns word, count pairs (as a tuple) check that the num
# counts is greater than the cutoff
word_to_int = {
word[0]: frequency_check(word[1], i+1, cutoff) for i, word in enumerate(counter.most_common())}
print len(word_to_int.items())
print word_to_int.items()[:10]
print word_to_int['windows']
# -
# Convert every word in every token list into the correct index value.
# Should look like this:
#[ [ 1,22,31,...(sequence of ints representing words in xml file 1)]
# [(sequence of ints representing word in xml file 2)]
# ...]
xml_int_tokens = [[word_to_int[word] for word in xml_file] for xml_file in xml_tokens]
print len(xml_int_tokens[1])
print len(xml_int_tokens[2])
print xml_int_tokens[1][:400]
max([max(ele) for ele in xml_int_tokens])
# This is starting to seem like a reasonable vocabulary size
# I will do one more (shooting for ~10000 vocab size)
# This is still too big probably. I'm going to save and then try excluding
# more.
token_arr = np.array([np.array(xml) for xml in xml_int_tokens])
np.save("../data/features/50_cutoff_word_to_intseq.npy", token_arr)
token_arr = 0
# +
# Rerun with a cutoff of 50 (to get a good set of options)
def frequency_check(counts, index, cutoff):
if counts < cutoff:
return 0
else:
return index
cutoff = 100
# most common returns word, count pairs (as a tuple) check that the num
# counts is greater than the cutoff
word_to_int = {
word[0]: frequency_check(word[1], i+1, cutoff) for i, word in enumerate(counter.most_common())}
print len(word_to_int.items())
print word_to_int.items()[:10]
print word_to_int['windows']
# -
# Convert every word in every token list into the correct index value.
# Should look like this:
#[ [ 1,22,31,...(sequence of ints representing words in xml file 1)]
# [(sequence of ints representing word in xml file 2)]
# ...]
xml_int_tokens = [[word_to_int[word] for word in xml_file] for xml_file in xml_tokens]
print len(xml_int_tokens[1])
print len(xml_int_tokens[2])
print xml_int_tokens[1][:400]
max([max(ele) for ele in xml_int_tokens])
# This is still too big probably. I'm going to save and then try excluding
# more.
token_arr = np.array([np.array(xml) for xml in xml_int_tokens])
np.save("../data/features/100_cutoff_alphabet_19679_word_to_intseq.npy", token_arr)
token_arr = 0
# +
# Still not quite small enough. I will try one more time, at 200
# although I feel like significant info is being lost at this point
# Rerun with a cutoff of 50 (to get a good set of options)
def frequency_check(counts, index, cutoff):
if counts < cutoff:
return 0
else:
return index
cutoff = 200
# most common returns word, count pairs (as a tuple) check that the num
# counts is greater than the cutoff
word_to_int = {
word[0]: frequency_check(word[1], i+1, cutoff) for i, word in enumerate(counter.most_common())}
print len(word_to_int.items())
print word_to_int.items()[:10]
print word_to_int['windows']
# -
# Convert every word in every token list into the correct index value.
# Should look like this:
#[ [ 1,22,31,...(sequence of ints representing words in xml file 1)]
# [(sequence of ints representing word in xml file 2)]
# ...]
xml_int_tokens = [[word_to_int[word] for word in xml_file] for xml_file in xml_tokens]
print len(xml_int_tokens[1])
print len(xml_int_tokens[2])
print xml_int_tokens[1][:400]
max([max(ele) for ele in xml_int_tokens])
token_arr = np.array([np.array(xml) for xml in xml_int_tokens])
np.save("../data/features/200_cutoff_alphabet_to_intseq.npy", token_arr)
token_arr = 0
| feature_extract/explore_sequential_features.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from keras import models
from keras import layers
# + pycharm={"name": "#%%\n"}
model = models.Sequential()
#卷积层,参数意义分别为:
#经过这一层之后,特征图的个数,一个卷积核,产生一个特征图,第一层:32,说明有32个卷积核;第二层64,说明在第一层的特征图基础上,每张特征图有两个卷积核进行特征采集
#卷积核大小
#激活函数
#输入大小(只在开始的第一层有,后面不需要)
model.add(layers.Conv2D(32,(3,3),activation='relu',input_shape=(400,100,3)))
model.add(layers.MaxPool2D(2,2))
model.add(layers.Conv2D(64,(3,3),activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Flatten())
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
# + pycharm={"name": "#%%\n"}
#配置模型的损失函数、优化器、指标名称
from keras import optimizers
model.compile(loss='binary_crossentropy', #损失函数
optimizer=optimizers.RMSprop(lr=1e-4), #优化器
metrics=['acc']) #指标名称
# + pycharm={"name": "#%%\n"}
#图片的训练路径和验证路径
train_dir = r'G:\test\normal_x\typeIII1.3\train'
validation_dir = r'G:\test\normal_x\typeIII1.3\val'
# + pycharm={"name": "#%%\n"}
#生成训练需要的图片和标签
from keras.preprocessing.image import ImageDataGenerator
#将图片大小调整到1以内,原先图片每个像素的格式为uint8,所以要除以255
train_datagen = ImageDataGenerator(rescale=1./255)
validation_datagen = ImageDataGenerator(rescale=1./255)
#根据目录的名称,生成对应的标签
#train_dir有Ⅱ型和Ⅲ型的图片
#每次生成batch_size数量的图片,图片大小为target_size
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(400, 100), #生成图片的大小
batch_size=40, #一次生成图片的数量
class_mode='binary') #图片标签的类型
validation_generator = validation_datagen.flow_from_directory(
validation_dir,
target_size=(400, 100), #生成图片的大小
batch_size=30, #一次生成图片的数量
class_mode='binary') #图片标签的类型
# + pycharm={"name": "#%%\n"}
#开始训练
history = model.fit_generator(
train_generator, #通过生成器传入图片和标签
steps_per_epoch=50, #每轮要传入10次,即每次100张图片进行训练
epochs=50, #总共训练40轮
validation_data=validation_generator, #通过生成器传入图片和标签进行验证
validation_steps=10) #每轮要传入10次,即每次30张图片进行验证
# + pycharm={"name": "#%%\n"}
#绘制训练精度、验证精度
#绘制训练损失、验证损失
#python画图库,类似matlab的plot
import matplotlib.pyplot as plt
acc = history.history['acc'] #得到训练的指标数据
val_acc = history.history['val_acc'] #得到验证的指标数据
loss = history.history['loss'] #得到训练损失
val_loss = history.history['val_loss'] #得到验证损失
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.savefig('accuracy_1.4_50.png')
plt.legend() #画图例
plt.figure() #另一张图
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.savefig('loss_1.4_50.png')
plt.legend()
plt.show() #画图,最后加上
# + pycharm={"name": "#%%\n"}
# 保存每轮的精度和损失
file = open('acc_loss_1.4_50.txt','a')
file.write('训练精度:')
for i in acc :
file.write(str(i))
file.write(" ")
file.write("\n")
file.write('验证精度:')
for i in val_acc :
file.write(str(i))
file.write(" ")
file.write("\n")
file.write('训练损失:')
for i in loss :
file.write(str(i))
file.write(" ")
file.write("\n")
file.write('验证损失:')
for i in val_loss :
file.write(str(i))
file.write(" ")
file.close()
# + pycharm={"name": "#%%\n"}
import os
import cv2 as cv
import numpy as np
III_dir = r'G:\test\normal_x\typeIII\val\III'
O_dir = r'G:\test\normal_x\typeIII\val\O'
def my_image(path):
out = []
filenames = os.listdir(path)
for filename in filenames:
image = cv.imread(os.path.join(path, filename))
image = cv.resize(image, (100, 400))
image = image/255.0
out.append(image)
return np.array(out)
imgs_III = my_image(III_dir)
imgs_O = my_image(O_dir)
ret_III = model.predict_classes(imgs_III)
ret_O = model.predict_classes(imgs_O)
ret_III = ret_III.tolist()
ret_O = ret_O.tolist()
true = ret_III.count([0])
false = ret_O.count([0])
TPR = true/len(ret_III)
FPR = false/len(ret_O)
print("TPR is :{:f} ".format(TPR))
print("FPR is :{:f} ".format(FPR))
# + pycharm={"name": "#%%\n"}
model.save('typeIII_binary_normalization_100_1.4_50.h5')
# + pycharm={"name": "#%%\n"}
| typeIII/Net1.4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
LE = LabelEncoder()
from sklearn.ensemble import RandomForestClassifier
RFC = RandomForestClassifier()
traindata = pd.read_csv("train.csv")
testdata = pd.read_csv("test.csv")
traindata = traindata.drop('index',axis=1)
testdata = testdata.drop('index',axis=1)
blank_columns = ["age","workclass","fnlwgt","education","education-num","marital-status","occupation","relationship","race","sex","capital-gain","capital-loss","hours-per-week","native-country"]
traindata = traindata.replace(np.nan,'*')
testdata = testdata.replace(np.nan,'*')
for column in blank_columns :
LE.fit(traindata[column])
traindata[column] = LE.transform(traindata[column])
LE.fit(testdata[column])
testdata[column] = LE.transform(testdata[column])
x = traindata[blank_columns]
y = traindata["label"]
RFC.fit(x,y)
pred = RFC.predict(testdata)
pred = pd.DataFrame(pred)
pred = pred.reset_index()
pred.columns =['index','label']
pred.to_csv("16ME237_Pavan_27AUG_1.csv",index=False)
| salary.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from cassandra.cluster import Cluster
from cassandra.auth import PlainTextAuthProvider
from cassandra.cqlengine import connection
auth_provider = PlainTextAuthProvider(
username='cassandra', password='<PASSWORD>')
# cluster = Cluster(contact_points=['bem.ei.team'],
# auth_provider=auth_provider)
connection.setup(hosts=['bem.ei.team'],default_keyspace='bem',auth_provider=auth_provider)
# +
import uuid
from cassandra.cqlengine import columns
from cassandra.cqlengine.management import sync_table
from cassandra.cqlengine.models import Model
class Item(Model):
id = columns.UUID(primary_key=True, default=uuid.uuid4)
name = columns.Text()
price = columns.Decimal()
CQLENG_ALLOW_SCHEMA_MANAGEMENT = 'CQLENG_ALLOW_SCHEMA_MANAGEMENT'
sync_table(Item)
# -
Item.create(name='Banana',price=20.0)
Item.create(name='Apple',price=30.0)
print(Item.objects.all())
for i in Item.objects.all():
print(i.id,i.name,i.price)
| 01day03_etl1/notebook_example/04 Cassandra.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Principal Components
import numpy as np
import matplotlib.pyplot as plt
X1 = 1.5*np.random.randn(10000, 1)
X2 = 0.5*np.random.randn(10000, 1)
X = np.c_[X1, X2]
plt.plot(X1, X2, "r.", markersize=2)
y = np.sqrt(np.square(X1) + np.square(X2)) + 0.2*np.random.randn(10000, 1)
theta = np.radians(45)
c, s = np.cos(theta), np.sin(theta)
R = np.array(((c, s), (-s, c)))
R
Y = np.ones((200, 2))
Y.dot(R)[:10]
X_rot = X.dot(R)
plt.plot(X_rot[:, 0], X_rot[:, 1], "b.", markersize=2)
U, s, Vt = np.linalg.svd(X_rot)
c1 = Vt.T[:, 0]
c2 = Vt.T[:, 1]
# # Projecting Down to d Dimensions
W2 = Vt.T[:, :2]
R.dot(W2)
plt.plot(X2D[:, 0], X2D[:, 1], "g.", markersize=2)a
# # Using Scikit-Learn
from sklearn.decomposition import PCA
pca = PCA(n_components = 2)
X2D = pca.fit_transform(X_rot)
plt.plot(X2D[:, 0], X2D[:, 1], "g.", markersize=2)
# # Explained Variance Ratio
pca.explained_variance_ratio_
# # Choosing the Right Number of Dimensions
pca = PCA()
pca.fit(X_rot)
cumsum = np.cumsum(pca.explained_variance_ratio_)
d = np.argmax(cumsum >= 0.95) + 1
pca = PCA(n_components=0.95)
X_reduced = pca.fit_transform(X_rot)
X_reduced[:10]
X[:10]
# # PCA for Compression
pca = PCA(n_components = 2)
X_reduced = pca.fit_transform(X)
X_recovered = pca.inverse_transform(X_reduced)
X_recovered[:10]
# # Randomized PCA
rnd_pca = PCA(n_components=2, svd_solver="randomized")
X_reduced = rnd_pca.fit_transform(X)
# # Incremental PCA
from sklearn.decomposition import IncrementalPCA
# + jupyter={"outputs_hidden": true}
n_batches = 100
inc_pca = IncrementalPCA(n_components=154)
for X_batch in np.array_split(X_train, n_batches):
inc_pca.partial_fit(X_batch)
X_reduced = inc_pca.transform(X)
# + jupyter={"outputs_hidden": true}
X_mm = np.memmap(filename, dtype="float32", mode="readonly", shape=(m, n))
batch_size = m // n_batches
inc_pca = IncrementalPCA(n_components=154, batch_size=batch_size)
inc_pca.fit(X_mm)
# -
# # Kernel PCA
from sklearn.decomposition import KernelPCA
rbf_pca = KernelPCA(n_components = 2, kernel="rbf", gamma=0.04)
X_reduced = rbf_pca.fit_transform(X)
# # Selecting a Kernel and Tuning Hyperparameters
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.metrics import mean_squared_error
# + jupyter={"outputs_hidden": true}
clf = Pipeline([
("kpca", KernelPCA(n_components=2)),
("log_reg", LogisticRegression())])
param_grid = [{
"kpca__gamma": np.linspace(0.03, 0.05, 10),
"kpca__kernel": ["rbf", "sigmoid"]}]
grid_search = GridSearchCV(clf, param_grid, cv=3)
grid_search.fit(X, y)
# -
print(grid_search.best_params_)
# +
rbf_pca = KernelPCA(
n_components = 2,
kernel="rbf",
gamma=0.0433,
fit_inverse_transform=True)
X_reduced = rbf_pca.fit_transform(X)
X_preimage = rbf_pca.inverse_transform(X_reduced)
# -
X_preimage[:10]
X[:10]
mean_squared_error(X, X_preimage)
# # LLE
from sklearn.manifold import LocallyLinearEmbedding
lle = LocallyLinearEmbedding(n_components=2, n_neighbors=10)
X_reduced = lle.fit_transform(X)
plt.plot(X_reduced[:, 0], X_reduced[:, 1], "r.", markersize=2)
| cd08.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="xLOXFOT5Q40E"
# ##### Copyright 2020 The TensorFlow Authors.
# + cellView="form" id="iiQkM5ZgQ8r2"
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] id="j6331ZSsQGY3"
# # 勾配の計算
# + [markdown] id="i9Jcnb8bQQyd"
# <table class="tfo-notebook-buttons" align="left">
# <td><a target="_blank" href="https://www.tensorflow.org/quantum/tutorials/gradients"><img src="https://www.tensorflow.org/images/tf_logo_32px.png"> TensorFlow.orgで表示</a></td>
# <td><a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ja/quantum/tutorials/gradients.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png"> Google Colab で実行</a></td>
# <td><a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ja/quantum/tutorials/gradients.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png">GitHub でソースを表示{</a></td>
# <td><a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ja/quantum/tutorials/gradients.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png">ノートブックをダウンロード</a></td>
# </table>
# + [markdown] id="FxkQA6oblNqI"
# このチュートリアルでは、量子回路の期待値の勾配計算アルゴリズムについて説明します。
#
# 量子回路で特定のオブザーバブルの期待値の勾配を計算することは、複雑なプロセスです。行列の乗算やベクトルの加算などの従来の機械学習変換では簡単に使用できる解析的勾配式がありますが、オブザーバブルの期待値には、このような解析的勾配式は必ずしもありません。そのため、シナリオに適したさまざまな量子勾配計算方法を使用する必要があります。このチュートリアルでは、2 つの異なる微分スキームを比較対照します。
# + [markdown] id="pvG0gAJqGYJo"
# ## セットアップ
# + id="TorxE5tnkvb2"
# !pip install tensorflow==2.1.0
# + [markdown] id="OIbP5hklC338"
# TensorFlow Quantum をインストールします。
# + id="saFHsRDpkvkH"
# !pip install tensorflow-quantum
# + [markdown] id="MkTqyoSxGUfB"
# 次に、TensorFlow とモジュールの依存関係をインポートします。
# + id="enZ300Bflq80"
import tensorflow as tf
import tensorflow_quantum as tfq
import cirq
import sympy
import numpy as np
# visualization tools
# %matplotlib inline
import matplotlib.pyplot as plt
from cirq.contrib.svg import SVGCircuit
# + [markdown] id="b08Mmbs8lr81"
# ## 1. 予備
#
# 量子回路の勾配計算の概念をもう少し具体的に見てみましょう。次のようなパラメータ化された回路があるとします。
# + id="YkPYJ_Ak-GKu"
qubit = cirq.GridQubit(0, 0)
my_circuit = cirq.Circuit(cirq.Y(qubit)**sympy.Symbol('alpha'))
SVGCircuit(my_circuit)
# + [markdown] id="wgQIlCWy-MVr"
# オブザーバブルは以下のとおりです。
# + id="xurmJdFy-Jae"
pauli_x = cirq.X(qubit)
pauli_x
# + [markdown] id="j3OzKYe5NT_W"
# この演算子を見ると、$⟨Y(\alpha)| X | Y(\alpha)⟩ = \sin(\pi \alpha)$ であることが分かります。
# + id="Ps-pd2mndXs7"
def my_expectation(op, alpha):
"""Compute ⟨Y(alpha)| `op` | Y(alpha)⟩"""
params = {'alpha': alpha}
sim = cirq.Simulator()
final_state = sim.simulate(my_circuit, params).final_state
return op.expectation_from_wavefunction(final_state, {qubit: 0}).real
my_alpha = 0.3
print("Expectation=", my_expectation(pauli_x, my_alpha))
print("Sin Formula=", np.sin(np.pi * my_alpha))
# + [markdown] id="zcCX109cJUaz"
# $f_{1}(\alpha) = ⟨Y(\alpha)| X | Y(\alpha)⟩$ と定義すると $f_{1}^{'}(\alpha) = \pi \cos(\pi \alpha)$ になります。確認しましょう。
# + id="VMq7EayNRyQb"
def my_grad(obs, alpha, eps=0.01):
grad = 0
f_x = my_expectation(obs, alpha)
f_x_prime = my_expectation(obs, alpha + eps)
return ((f_x_prime - f_x) / eps).real
print('Finite difference:', my_grad(pauli_x, my_alpha))
print('Cosine formula: ', np.pi * np.cos(np.pi * my_alpha))
# + [markdown] id="-SUlLpXBeicF"
# ## 2. 微分器の必要性
#
# より大きな回路では、与えられた量子回路の勾配を正確に計算する式は必ずしもありません。単純な式では勾配を計算できない場合、`tfq.differentiators.Differentiator`クラスを使用すると、回路の勾配を計算するためのアルゴリズムを定義できます。たとえば、TensorFlow Quantum(TFQ)で上記の例を次のように再現できます。
# + id="Om76ZLu8NT_i"
expectation_calculation = tfq.layers.Expectation(
differentiator=tfq.differentiators.ForwardDifference(grid_spacing=0.01))
expectation_calculation(my_circuit,
operators=pauli_x,
symbol_names=['alpha'],
symbol_values=[[my_alpha]])
# + [markdown] id="lx3y2DX9NT_k"
# ただし、サンプリングに基づいて期待値を推定するように切り替えると(実際のデバイスで何が起こるか)、値が少し変わる可能性があります。これは、期待値が不正確になることを意味します。
# + id="v27rRyAHNT_l"
sampled_expectation_calculation = tfq.layers.SampledExpectation(
differentiator=tfq.differentiators.ForwardDifference(grid_spacing=0.01))
sampled_expectation_calculation(my_circuit,
operators=pauli_x,
repetitions=500,
symbol_names=['alpha'],
symbol_values=[[my_alpha]])
# + [markdown] id="Igwa3EnzNT_p"
# これは、勾配における深刻な精度の問題につながる可能性があります。
# + id="StljXH38NT_q"
# Make input_points = [batch_size, 1] array.
input_points = np.linspace(0, 5, 200)[:, np.newaxis].astype(np.float32)
exact_outputs = expectation_calculation(my_circuit,
operators=pauli_x,
symbol_names=['alpha'],
symbol_values=input_points)
imperfect_outputs = sampled_expectation_calculation(my_circuit,
operators=pauli_x,
repetitions=500,
symbol_names=['alpha'],
symbol_values=input_points)
plt.title('Forward Pass Values')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.plot(input_points, exact_outputs, label='Analytic')
plt.plot(input_points, imperfect_outputs, label='Sampled')
plt.legend()
# + id="dfXObk7KNT_t"
# Gradients are a much different story.
values_tensor = tf.convert_to_tensor(input_points)
with tf.GradientTape() as g:
g.watch(values_tensor)
exact_outputs = expectation_calculation(my_circuit,
operators=pauli_x,
symbol_names=['alpha'],
symbol_values=values_tensor)
analytic_finite_diff_gradients = g.gradient(exact_outputs, values_tensor)
with tf.GradientTape() as g:
g.watch(values_tensor)
imperfect_outputs = sampled_expectation_calculation(
my_circuit,
operators=pauli_x,
repetitions=500,
symbol_names=['alpha'],
symbol_values=values_tensor)
sampled_finite_diff_gradients = g.gradient(imperfect_outputs, values_tensor)
plt.title('Gradient Values')
plt.xlabel('$x$')
plt.ylabel('$f^{\'}(x)$')
plt.plot(input_points, analytic_finite_diff_gradients, label='Analytic')
plt.plot(input_points, sampled_finite_diff_gradients, label='Sampled')
plt.legend()
# + [markdown] id="Ld34TJvTNT_w"
# ここでは、解析の場合は有限差分式は勾配自体を高速に計算できますが、サンプリングベースの方法の場合ではノイズが多すぎることが分かります。適切な勾配を計算するには、より注意深い手法を使用する必要があります。次に、解析的期待値の勾配計算にはあまり適していませんが、実際のサンプルベースの方法の場合ではより優れたパフォーマンスを発揮する、大幅に低速な手法を見ていきます。
# + id="JsBxH_RaNT_x"
# A smarter differentiation scheme.
gradient_safe_sampled_expectation = tfq.layers.SampledExpectation(
differentiator=tfq.differentiators.ParameterShift())
with tf.GradientTape() as g:
g.watch(values_tensor)
imperfect_outputs = gradient_safe_sampled_expectation(
my_circuit,
operators=pauli_x,
repetitions=500,
symbol_names=['alpha'],
symbol_values=values_tensor)
sampled_param_shift_gradients = g.gradient(imperfect_outputs, values_tensor)
plt.title('Gradient Values')
plt.xlabel('$x$')
plt.ylabel('$f^{\'}(x)$')
plt.plot(input_points, analytic_finite_diff_gradients, label='Analytic')
plt.plot(input_points, sampled_param_shift_gradients, label='Sampled')
plt.legend()
# + [markdown] id="0xlUlh8wNT_z"
# 上記から、特定の微分器が特定の研究シナリオに最適であることがわかります。一般に、デバイスノイズなどに対して堅牢な、低速のサンプルベースの方法は、より「現実的」設定でアルゴリズムをテストまたは実装する場合に適した微分器です。有限差分のようなより高速な方法はアルゴリズムのデバイスにおける実行可能性にはまだ関心がなく、解析的計算やより高いスループットが必要な場合に最適です。
# + [markdown] id="FaijzZ4MNT_0"
# ## 3. 複数のオブザーバブル
#
# 2 番目のオブザーバブルを使用し、TensorFlow Quantum が 1 つの回路に対して複数のオブザーバブルをサポートする方法を見てみましょう。
# + id="ytgB_DqDNT_3"
pauli_z = cirq.Z(qubit)
pauli_z
# + [markdown] id="r51TZls4NT_6"
# このオブザーバブルが以前と同じ回路で使用されている場合、$f_{2}(\alpha) = ⟨Y(\alpha)| Z | Y(\alpha)⟩ = \cos(\pi \alpha)$ および$f_{2}^{'}(\alpha) = -\pi \sin(\pi \alpha)$になります。確認します。
# + id="19FKgu0ANT_7"
test_value = 0.
print('Finite difference:', my_grad(pauli_z, test_value))
print('Sin formula: ', -np.pi * np.sin(np.pi * test_value))
# + [markdown] id="_33Y5mL0NT_-"
# (ほぼ)一致します。
#
# 次に、$g(\alpha) = f_{1}(\alpha) + f_{2}(\alpha)$ を定義すると、$g'(\alpha) = f_{1}^{'}(\alpha) + f^{'}_{2}(\alpha)$になります。TensorFlow Quantum で複数のオブザーバブルを定義して回路と共に使用するには、$g$ にさらに項を追加します。
#
# これは、回路内の特定のシンボルの勾配が、その回路に適用されたそのシンボルの各オブザーバブルに関する勾配の合計に等しいことを意味します。これは、TensorFlow の勾配取得およびバックプロパゲーションと互換性があります(特定のシンボルの勾配として、すべてのオブザーバブルの勾配の合計を指定します)。
# + id="3WFJfFEbNT_-"
sum_of_outputs = tfq.layers.Expectation(
differentiator=tfq.differentiators.ForwardDifference(grid_spacing=0.01))
sum_of_outputs(my_circuit,
operators=[pauli_x, pauli_z],
symbol_names=['alpha'],
symbol_values=[[test_value]])
# + [markdown] id="-ujQUu3WNUAB"
# ここで、最初のエントリは期待値 w.r.t Pauli X であり、2 番目のエントリは期待値 w.r.t Pauli Z です。勾配は以下のとおりです。
# + id="jcAQa9l0NUAB"
test_value_tensor = tf.convert_to_tensor([[test_value]])
with tf.GradientTape() as g:
g.watch(test_value_tensor)
outputs = sum_of_outputs(my_circuit,
operators=[pauli_x, pauli_z],
symbol_names=['alpha'],
symbol_values=test_value_tensor)
sum_of_gradients = g.gradient(outputs, test_value_tensor)
print(my_grad(pauli_x, test_value) + my_grad(pauli_z, test_value))
print(sum_of_gradients.numpy())
# + [markdown] id="-fZmbYGANUAE"
# ここで、各オブザーバブルの勾配の合計が実際に $\alpha$ の勾配であることを確認しました。この動作は、すべての TensorFlow Quantum 微分器によってサポートされており、TensorFlow の他の部分との互換性において重要な役割を果たします。
# + [markdown] id="lZsGG7lWNUAF"
# ## 4. 高度な使用
#
# ここでは、量子回路用に独自のカスタム微分ルーチンを定義する方法を紹介します。TensorFlowQuantum サブクラス`tfq.differentiators.Differentiator`内に存在するすべての微分器。微分器は`differentiate_analytic`と`differentiate_sampled`を実装する必要があります。
#
# 以下では、TensorFlow Quantum 構造を使用して、このチュートリアルの最初の部分の閉形式の解を実装します。
# + id="5iY4q6FKNUAG"
class MyDifferentiator(tfq.differentiators.Differentiator):
"""A Toy differentiator for <Y^alpha | X |Y^alpha>."""
def __init__(self):
pass
@tf.function
def _compute_gradient(self, symbol_values):
"""Compute the gradient based on symbol_values."""
# f(x) = sin(pi * x)
# f'(x) = pi * cos(pi * x)
return tf.cast(tf.cos(symbol_values * np.pi) * np.pi, tf.float32)
@tf.function
def differentiate_analytic(self, programs, symbol_names, symbol_values,
pauli_sums, forward_pass_vals, grad):
"""Specify how to differentiate a circuit with analytical expectation.
This is called at graph runtime by TensorFlow. `differentiate_analytic`
should calculate the gradient of a batch of circuits and return it
formatted as indicated below. See
`tfq.differentiators.ForwardDifference` for an example.
Args:
programs: `tf.Tensor` of strings with shape [batch_size] containing
the string representations of the circuits to be executed.
symbol_names: `tf.Tensor` of strings with shape [n_params], which
is used to specify the order in which the values in
`symbol_values` should be placed inside of the circuits in
`programs`.
symbol_values: `tf.Tensor` of real numbers with shape
[batch_size, n_params] specifying parameter values to resolve
into the circuits specified by programs, following the ordering
dictated by `symbol_names`.
pauli_sums: `tf.Tensor` of strings with shape [batch_size, n_ops]
containing the string representation of the operators that will
be used on all of the circuits in the expectation calculations.
forward_pass_vals: `tf.Tensor` of real numbers with shape
[batch_size, n_ops] containing the output of the forward pass
through the op you are differentiating.
grad: `tf.Tensor` of real numbers with shape [batch_size, n_ops]
representing the gradient backpropagated to the output of the
op you are differentiating through.
Returns:
A `tf.Tensor` with the same shape as `symbol_values` representing
the gradient backpropagated to the `symbol_values` input of the op
you are differentiating through.
"""
# Computing gradients just based off of symbol_values.
return self._compute_gradient(symbol_values) * grad
@tf.function
def differentiate_sampled(self, programs, symbol_names, symbol_values,
pauli_sums, num_samples, forward_pass_vals, grad):
"""Specify how to differentiate a circuit with sampled expectation.
This is called at graph runtime by TensorFlow. `differentiate_sampled`
should calculate the gradient of a batch of circuits and return it
formatted as indicated below. See
`tfq.differentiators.ForwardDifference` for an example.
Args:
programs: `tf.Tensor` of strings with shape [batch_size] containing
the string representations of the circuits to be executed.
symbol_names: `tf.Tensor` of strings with shape [n_params], which
is used to specify the order in which the values in
`symbol_values` should be placed inside of the circuits in
`programs`.
symbol_values: `tf.Tensor` of real numbers with shape
[batch_size, n_params] specifying parameter values to resolve
into the circuits specified by programs, following the ordering
dictated by `symbol_names`.
pauli_sums: `tf.Tensor` of strings with shape [batch_size, n_ops]
containing the string representation of the operators that will
be used on all of the circuits in the expectation calculations.
num_samples: `tf.Tensor` of positive integers representing the
number of samples per term in each term of pauli_sums used
during the forward pass.
forward_pass_vals: `tf.Tensor` of real numbers with shape
[batch_size, n_ops] containing the output of the forward pass
through the op you are differentiating.
grad: `tf.Tensor` of real numbers with shape [batch_size, n_ops]
representing the gradient backpropagated to the output of the
op you are differentiating through.
Returns:
A `tf.Tensor` with the same shape as `symbol_values` representing
the gradient backpropagated to the `symbol_values` input of the op
you are differentiating through.
"""
return self._compute_gradient(symbol_values) * grad
# + [markdown] id="bvEgw2m6NUAI"
# この新しい微分器は、既存の`tfq.layer`オブジェクトで使用できるようになりました。
# + id="QrKnkWswNUAJ"
custom_dif = MyDifferentiator()
custom_grad_expectation = tfq.layers.Expectation(differentiator=custom_dif)
# Now let's get the gradients with finite diff.
with tf.GradientTape() as g:
g.watch(values_tensor)
exact_outputs = expectation_calculation(my_circuit,
operators=[pauli_x],
symbol_names=['alpha'],
symbol_values=values_tensor)
analytic_finite_diff_gradients = g.gradient(exact_outputs, values_tensor)
# Now let's get the gradients with custom diff.
with tf.GradientTape() as g:
g.watch(values_tensor)
my_outputs = custom_grad_expectation(my_circuit,
operators=[pauli_x],
symbol_names=['alpha'],
symbol_values=values_tensor)
my_gradients = g.gradient(my_outputs, values_tensor)
plt.subplot(1, 2, 1)
plt.title('Exact Gradient')
plt.plot(input_points, analytic_finite_diff_gradients.numpy())
plt.xlabel('x')
plt.ylabel('f(x)')
plt.subplot(1, 2, 2)
plt.title('My Gradient')
plt.plot(input_points, my_gradients.numpy())
plt.xlabel('x')
# + [markdown] id="oXqcJWigNUAL"
# この新しい微分器を使用して、微分可能な演算を生成できるようになりました。
#
# 重要点:微分器は一度に 1 つの演算にしか接続できないため、以前に演算に接続されていた微分器は、新しい演算に接続する前に更新する必要があります。
# + id="F_WHcj3bNUAM"
# Create a noisy sample based expectation op.
expectation_sampled = tfq.get_sampled_expectation_op(
cirq.DensityMatrixSimulator(noise=cirq.depolarize(0.01)))
# Make it differentiable with your differentiator:
# Remember to refresh the differentiator before attaching the new op
custom_dif.refresh()
differentiable_op = custom_dif.generate_differentiable_op(
sampled_op=expectation_sampled)
# Prep op inputs.
circuit_tensor = tfq.convert_to_tensor([my_circuit])
op_tensor = tfq.convert_to_tensor([[pauli_x]])
single_value = tf.convert_to_tensor([[my_alpha]])
num_samples_tensor = tf.convert_to_tensor([[1000]])
with tf.GradientTape() as g:
g.watch(single_value)
forward_output = differentiable_op(circuit_tensor, ['alpha'], single_value,
op_tensor, num_samples_tensor)
my_gradients = g.gradient(forward_output, single_value)
print('---TFQ---')
print('Foward: ', forward_output.numpy())
print('Gradient:', my_gradients.numpy())
print('---Original---')
print('Forward: ', my_expectation(pauli_x, my_alpha))
print('Gradient:', my_grad(pauli_x, my_alpha))
# + [markdown] id="OGWcpqzDNUAP"
# 成功:TensorFlow Quantum が提供するすべての微分器を使用して、独自の微分器を定義できるようになりました。
| site/ja/quantum/tutorials/gradients.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.5
# language: python
# name: python3
# ---
# http://pandas.pydata.org/pandas-docs/stable/visualization.html
#
# %matplotlib inline
import matplotlib.pyplot as plt
import matplotlib
import pandas as pd
import numpy as np
matplotlib.style.use('ggplot')
ts = pd.Series(np.random.randn(1000), index=pd.date_range('1/1/2000', periods=1000))
ts = ts.cumsum()
ts.plot()
df = pd.DataFrame(np.random.randn(1000, 4), index=ts.index, columns=list('ABCD'))
df = df.cumsum()
plt.figure(); df.plot();
df3 = pd.DataFrame(np.random.randn(1000, 2), columns=['B', 'C']).cumsum()
df3['A'] = pd.Series(list(range(len(df))))
df3.plot(x='A', y='B')
| notebooks/matplotlib/pandas_plot.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python
# language: python
# name: conda-env-python-py
# ---
# <a href="http://cocl.us/pytorch_link_top">
# <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DL0110EN/notebook_images%20/Pytochtop.png" width="750" alt="IBM Product " />
# </a>
#
# <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DL0110EN/notebook_images%20/cc-logo-square.png" width="200" alt="cognitiveclass.ai logo" />
# <h1>Using Dropout for Classification </h1>
# <h2>Table of Contents</h2>
# <p>In this lab, you will see how adding dropout to your model will decrease overfitting.</p>
#
# <ul>
# <li><a href="#Makeup_Data">Make Some Data</a></li>
# <li><a href="#Model_Cost">Create the Model and Cost Function the PyTorch way</a></li>
# <li><a href="#BGD">Batch Gradient Descent</a></li>
# </ul>
# <p>Estimated Time Needed: <strong>20 min</strong></p>
#
# <hr>
# <h2>Preparation</h2>
# We'll need the following libraries
# +
# Import the libraries we need for this lab
import torch
import matplotlib.pyplot as plt
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from matplotlib.colors import ListedColormap
from torch.utils.data import Dataset, DataLoader
# -
# Use this function only for plotting:
# +
# The function for plotting the diagram
def plot_decision_regions_3class(data_set, model=None):
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#00AAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#00AAFF'])
X = data_set.x.numpy()
y = data_set.y.numpy()
h = .02
x_min, x_max = X[:, 0].min() - 0.1, X[:, 0].max() + 0.1
y_min, y_max = X[:, 1].min() - 0.1, X[:, 1].max() + 0.1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
newdata = np.c_[xx.ravel(), yy.ravel()]
Z = data_set.multi_dim_poly(newdata).flatten()
f = np.zeros(Z.shape)
f[Z > 0] = 1
f = f.reshape(xx.shape)
if model != None:
model.eval()
XX = torch.Tensor(newdata)
_, yhat = torch.max(model(XX), 1)
yhat = yhat.numpy().reshape(xx.shape)
plt.pcolormesh(xx, yy, yhat, cmap=cmap_light)
plt.contour(xx, yy, f, cmap=plt.cm.Paired)
else:
plt.contour(xx, yy, f, cmap=plt.cm.Paired)
plt.pcolormesh(xx, yy, f, cmap=cmap_light)
plt.title("decision region vs True decision boundary")
# -
# Use this function to calculate accuracy:
# +
# The function for calculating accuracy
def accuracy(model, data_set):
_, yhat = torch.max(model(data_set.x), 1)
return (yhat == data_set.y).numpy().mean()
# -
# <!--Empty Space for separating topics-->
# <h2 id="Makeup_Data">Make Some Data</h2>
# Create a nonlinearly separable dataset:
# +
# Create data class for creating dataset object
class Data(Dataset):
# Constructor
def __init__(self, N_SAMPLES=1000, noise_std=0.1, train=True):
a = np.matrix([-1, 1, 2, 1, 1, -3, 1]).T
self.x = np.matrix(np.random.rand(N_SAMPLES, 2))
self.f = np.array(a[0] + (self.x) * a[1:3] + np.multiply(self.x[:, 0], self.x[:, 1]) * a[4] + np.multiply(self.x, self.x) * a[5:7]).flatten()
self.a = a
self.y = np.zeros(N_SAMPLES)
self.y[self.f > 0] = 1
self.y = torch.from_numpy(self.y).type(torch.LongTensor)
self.x = torch.from_numpy(self.x).type(torch.FloatTensor)
self.x = self.x + noise_std * torch.randn(self.x.size())
self.f = torch.from_numpy(self.f)
self.a = a
if train == True:
torch.manual_seed(1)
self.x = self.x + noise_std * torch.randn(self.x.size())
torch.manual_seed(0)
# Getter
def __getitem__(self, index):
return self.x[index], self.y[index]
# Get Length
def __len__(self):
return self.len
# Plot the diagram
def plot(self):
X = data_set.x.numpy()
y = data_set.y.numpy()
h = .02
x_min, x_max = X[:, 0].min(), X[:, 0].max()
y_min, y_max = X[:, 1].min(), X[:, 1].max()
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = data_set.multi_dim_poly(np.c_[xx.ravel(), yy.ravel()]).flatten()
f = np.zeros(Z.shape)
f[Z > 0] = 1
f = f.reshape(xx.shape)
plt.title('True decision boundary and sample points with noise ')
plt.plot(self.x[self.y == 0, 0].numpy(), self.x[self.y == 0,1].numpy(), 'bo', label='y=0')
plt.plot(self.x[self.y == 1, 0].numpy(), self.x[self.y == 1,1].numpy(), 'ro', label='y=1')
plt.contour(xx, yy, f,cmap=plt.cm.Paired)
plt.xlim(0,1)
plt.ylim(0,1)
plt.legend()
# Make a multidimension ploynomial function
def multi_dim_poly(self, x):
x = np.matrix(x)
out = np.array(self.a[0] + (x) * self.a[1:3] + np.multiply(x[:, 0], x[:, 1]) * self.a[4] + np.multiply(x, x) * self.a[5:7])
out = np.array(out)
return out
# -
# Create a dataset object:
# +
# Create a dataset object
data_set = Data(noise_std=0.1)
data_set.plot()
# -
# Validation data:
# +
# Get some validation data
torch.manual_seed(0)
validation_set = Data(train=False)
# -
# <!--Empty Space for separating topics-->
# <h2 id="Model_Cost">Create the Model, Optimizer, and Total Loss Function (Cost)</h2>
# Create a custom module with three layers. <code>in_size</code> is the size of the input features, <code>n_hidden</code> is the size of the layers, and <code>out_size</code> is the size. <code>p</code> is the dropout probability. The default is 0, that is, no dropout.
#
# +
# Create Net Class
class Net(nn.Module):
# Constructor
def __init__(self, in_size, n_hidden, out_size, p=0):
super(Net, self).__init__()
self.drop = nn.Dropout(p=p)
self.linear1 = nn.Linear(in_size, n_hidden)
self.linear2 = nn.Linear(n_hidden, n_hidden)
self.linear3 = nn.Linear(n_hidden, out_size)
# Prediction function
def forward(self, x):
x = F.relu(self.drop(self.linear1(x)))
x = F.relu(self.drop(self.linear2(x)))
x = self.linear3(x)
return x
# -
# Create two model objects: <code>model</code> had no dropout and <code>model_drop</code> has a dropout probability of 0.5:
# +
# Create two model objects: model without dropout and model with dropout
model = Net(2, 300, 2)
model_drop = Net(2, 300, 2, p=0.5)
# -
# <!--Empty Space for separating topics-->
# <h2 id="BGD">Train the Model via Mini-Batch Gradient Descent</h2>
# Set the model using dropout to training mode; this is the default mode, but it's good practice to write this in your code :
# +
# Set the model to training mode
model_drop.train()
# -
model.train()
# Train the model by using the Adam optimizer. See the unit on other optimizers. Use the Cross Entropy Loss:
# +
# Set optimizer functions and criterion functions
optimizer_ofit = torch.optim.Adam(model.parameters(), lr=0.01)
optimizer_drop = torch.optim.Adam(model_drop.parameters(), lr=0.01)
criterion = torch.nn.CrossEntropyLoss()
# -
# Initialize a dictionary that stores the training and validation loss for each model:
# +
# Initialize the LOSS dictionary to store the loss
LOSS = {}
LOSS['training data no dropout'] = []
LOSS['validation data no dropout'] = []
LOSS['training data dropout'] = []
LOSS['validation data dropout'] = []
# -
# Run 500 iterations of batch gradient gradient descent:
# +
# Train the model
epochs = 500
def train_model(epochs):
for epoch in range(epochs):
#all the samples are used for training
yhat = model(data_set.x)
yhat_drop = model_drop(data_set.x)
loss = criterion(yhat, data_set.y)
loss_drop = criterion(yhat_drop, data_set.y)
#store the loss for both the training and validation data for both models
LOSS['training data no dropout'].append(loss.item())
model.eval()
LOSS['validation data no dropout'].append(criterion(model(validation_set.x), validation_set.y).item())
model.train()
LOSS['training data dropout'].append(loss_drop.item())
model_drop.eval()
LOSS['validation data dropout'].append(criterion(model_drop(validation_set.x), validation_set.y).item())
model_drop.train()
optimizer_ofit.zero_grad()
optimizer_drop.zero_grad()
loss.backward()
loss_drop.backward()
optimizer_ofit.step()
optimizer_drop.step()
train_model(epochs)
# -
# Set the model with dropout to evaluation mode:
# +
# Set the model to evaluation model
model_drop.eval()
# -
model.eval()
# Test the model without dropout on the validation data:
# +
# Print out the accuracy of the model without dropout
print("The accuracy of the model without dropout: ", accuracy(model, validation_set))
# -
# Test the model with dropout on the validation data:
# +
# Print out the accuracy of the model with dropout
print("The accuracy of the model with dropout: ", accuracy(model_drop, validation_set))
# -
# You see that the model with dropout performs better on the validation data.
# <h3>True Function</h3>
# Plot the decision boundary and the prediction of the networks in different colors.
# +
# Plot the decision boundary and the prediction
plot_decision_regions_3class(data_set)
# -
# Model without Dropout:
# +
# The model without dropout
plot_decision_regions_3class(data_set, model)
# -
# Model with Dropout:
# +
# The model with dropout
plot_decision_regions_3class(data_set, model_drop)
# -
# You can see that the model using dropout does better at tracking the function that generated the data.
# Plot out the loss for the training and validation data on both models, we use the log to make the difference more apparent
# +
# Plot the LOSS
plt.figure(figsize=(6.1, 10))
def plot_LOSS():
for key, value in LOSS.items():
plt.plot(np.log(np.array(value)), label=key)
plt.legend()
plt.xlabel("iterations")
plt.ylabel("Log of cost or total loss")
plot_LOSS()
# -
# You see that the model without dropout performs better on the training data, but it performs worse on the validation data. This suggests overfitting. However, the model using dropout performed better on the validation data, but worse on the training data.
# <!--Empty Space for separating topics-->
# <a href="http://cocl.us/pytorch_link_bottom">
# <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DL0110EN/notebook_images%20/notebook_bottom%20.png" width="750" alt="PyTorch Bottom" />
# </a>
# <h2>About the Authors:</h2>
#
# <a href="https://www.linkedin.com/in/joseph-s-50398b136/"><NAME></a> has a PhD in Electrical Engineering, his research focused on using machine learning, signal processing, and computer vision to determine how videos impact human cognition. Joseph has been working for IBM since he completed his PhD.
# Other contributors: <a href="https://www.linkedin.com/in/michelleccarey/"><NAME></a>, <a href="www.linkedin.com/in/jiahui-mavis-zhou-a4537814a"><NAME></a>
# <hr>
# Copyright © 2018 <a href="cognitiveclass.ai?utm_source=bducopyrightlink&utm_medium=dswb&utm_campaign=bdu">cognitiveclass.ai</a>. This notebook and its source code are released under the terms of the <a href="https://bigdatauniversity.com/mit-license/">MIT License</a>.
| 5. Dropout/5.1.1dropoutPredictin.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: tflow2
# language: python
# name: tflow2
# ---
# # Fertig's version of Variational Inference using normalizing flows
# +
import scipy
import numpy as np
import pandas as pd
from tqdm import trange
import time
#import tensorflow.compat.v2 as tf
#tf.enable_v2_behavior()
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
tfb = tfp.bijectors
# data splitting and outlier removal
import sklearn.preprocessing # StandardScaler
from sklearn.ensemble import IsolationForest
import sklearn.model_selection # train_test_split
import colossus
from colossus.cosmology import cosmology
# Set tensor numeric type.
dtype = 'float32'
# Data Viz.
import matplotlib.pyplot as plt
import seaborn as sns
import itertools # to cycle through palette colors
if 1== 1 :
sns.set_style(
style='darkgrid',
rc={'axes.facecolor': '.9', 'grid.color': '.8'}
)
sns.set_palette(palette='deep')
#sns.set_palette(palette="Paired_r")
#sns_c = sns.color_palette(palette="Paired_r")
# %matplotlib inline
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
# Get TensorFlow version.
print(f'TensorFlow version: {tf.__version__}')
print(f'TensorFlow Probability version: {tfp.__version__}')
if tf.test.gpu_device_name() != '/device:GPU:0':
print('WARNING: GPU device not found.')
else:
print('SUCCESS: Found GPU: {}'.format(tf.test.gpu_device_name()))
# +
# for wide spread use
# ppd_df is the output of make_posterior_predictive_distribution
def yp_on_yt_plot (y_predictions, y_sigma, y_test, ppd_df) :
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(14, 5), constrained_layout=True)
ax[0].errorbar( x=np.array(y_test), y=y_predictions, yerr=y_sigma,fmt='o',
ecolor="r", mfc="r", mec="k", capsize=2, label='predictions + credible intervals')
sns.regplot(x=np.array(y_test), y=y_predictions,
scatter=False, line_kws=dict(alpha=0.5), fit_reg=True, label="linear regression",
truncate=False, ax=ax[0])
ax[0].plot(ppd_df["range"],ppd_df["mean"], color="black", label="y=x, 90% posterior predictive")
ax[0].fill_between(ppd_df["range"], ppd_df["90l"], ppd_df["90u"], color='k', alpha=.1)
ax[0].set(title='Model Predictions for Test Sample', xlabel='y',ylabel='y_pred');
ax[0].legend(loc='lower right')
ax[1].errorbar( x=np.array(y_test), y=y_predictions-np.array(y_test), yerr=y_sigma,fmt='o',
#ecolor=sns_c[1], mfc=sns_c[4],
ecolor="r", mfc="r", mec="k", capsize=2, label='predictions + credible intervals',)
sns.regplot(x=np.array(y_test), y = y_predictions-np.array(y_test),
scatter=False, line_kws=dict(alpha=0.5), fit_reg=True, label="linear regression",
truncate=False, ax=ax[1])
ax[1].plot(ppd_df["range"],np.zeros(ppd_df["mean"].size),
color="black", label="y=x, 90% posterior predictive")
ax[1].fill_between(ppd_df["range"], ppd_df["90l"]-ppd_df["mean"], ppd_df["90u"]-ppd_df["mean"], color='k', alpha=.1)
ax[1].set(title='Delta Model Predictions for Test Sample', xlabel='y', ylabel='y_pred');
ax[1].legend(loc='lower right')
def make_posterior_predictive_distribution (sigma, verbose=False) :
# prepare the fit and confidence interval: weirdly, assuming that the only thing that matters is sigma
# becuse it is a posterior predictive distribution
range_x = np.arange(14.0,15.0,0.05) ;
if verbose : print("range_x: ", range_x.shape)
range_y = tf.map_fn(
fn=lambda z: tfd.Normal(loc=z, scale=sigma).sample(20000),
elems=tf.constant(range_x , dtype=dtype ) )
range_y = tf.squeeze(range_y)
if verbose: print("range_y ", range_y.shape)
range_mean = tf.math.reduce_mean(range_y, axis=1).numpy()
range_std = tf.math.reduce_std(range_y, axis=1).numpy()
range_90l = range_mean - 2*range_std
range_90u = range_mean + 2*range_std
df = pd.DataFrame({"mean": range_mean})
df["range"]= range_x
df["std"]= range_std
df["90l"]= range_90l
df["90u"]= range_90u
return df
#
# -
cluster_data=pd.read_csv("~/Data/cluster_data_0.3-0.6-msm.csv")
clusters = pd.DataFrame( {"central_sm":cluster_data["central_sm"]} )
clusters["measured_sm"] = cluster_data["measured_sm"]
clusters["halo_mass"] = cluster_data["halo_mass"]
clusters.head()
#
# +
train,test = sklearn.model_selection.train_test_split(clusters, train_size=0.80)
X = train[train.columns[:-1]].astype(dtype)
y = train[train.columns[-1]].astype(dtype)
print("X, y:", X.shape, y.shape)
X_test = test[test.columns[:-1]].astype(dtype)
y_test = test[test.columns[-1]].astype(dtype)
all_X = clusters[clusters.columns[:-1]].astype(dtype)
all_y = clusters[clusters.columns[-1]].astype(dtype)
train = tf.data.Dataset.from_tensor_slices( (X, y)).shuffle(10000).batch(1000)
# Are these globals?
x = X
y = y
# +
#x = np.vstack([cluster_data[xname],cluster_data[x2name]]).transpose()
x = tf.convert_to_tensor(X, dtype=dtype)
y = tf.convert_to_tensor(y, dtype=dtype)
y = tf.reshape(y, (-1, 1))
print("x shape: {}, y shape: {}".format(x.shape,y.shape))
#dir(train)
#train.get_single_element()
#dir(train.batch(10))
#for i in train.batch(10).as_numpy_iterator():
#print(i)
#rtain.batch(2).get_single_element()
# -
# ## Define the physical model
jds_ab = tfd.JointDistributionNamedAutoBatched(dict(
sigma=tfd.HalfNormal(scale=[tf.cast(1.0, dtype)]),
alpha=tfd.Normal(
#loc=[tf.cast(0.0, dtype)],
loc=[tf.cast(2.5, dtype)],
scale=[tf.cast(10.0, dtype)]
),
beta=tfd.Normal(
#loc=[[tf.cast(0.0, dtype)], [tf.cast(0.0, dtype)]],
loc=[[tf.cast(0.5, dtype)], [tf.cast(0.35, dtype)]],
scale=[[tf.cast(10.0, dtype)], [tf.cast(10.0, dtype)]]
),
y=lambda beta, alpha, sigma:
tfd.Normal(
loc=tf.linalg.matmul(x, beta) + alpha,
scale=sigma
)
))
# +
#tf.keras.backend.clear_session()
# Define the probabilistic graphical model as a JointDistribution.
def out_y_point (alpha, beta, x) :
beta = tf.reshape(beta,[2,-1])
return tf.linalg.matmul(x, beta) + alpha
def out_y (alpha, beta, sigma, x) :
return tfd.Normal( loc=out_y_point(alpha, beta, x), scale=sigma, name="y")
@tfd.JointDistributionCoroutineAutoBatched
def model_standard():
alpha = yield tfd.Normal( loc=5.0, scale=3, name="alpha")
beta = yield tfd.Normal( loc=[0.5,0.5], scale=[3.0,3.0], name="beta")
sigma = yield tfd.HalfNormal(scale=0.5, name="sigma")
y = yield out_y(alpha, beta, sigma, x)
@tfd.JointDistributionCoroutineAutoBatched
def model_yyz():
alpha = yield tfd.Normal( loc=3.0, scale=6.0, name="alpha")
beta = yield tfd.Normal( loc=[0.0,0.0], scale=[3.0,3.0], name="beta")
sigma = yield tfd.HalfNormal(scale=1.0, name="sigma")
q = yield tfd.Normal ( loc=0.0, scale=1.0, name="q")
y = yield out_y(alpha, beta, (sigma + q* (out_y_point(alpha, beta, x)-12.0)), x)
@tfd.JointDistributionCoroutineAutoBatched
def model_vector():
alpha = yield tfd.Normal( loc=3.0, scale=6.0, name="alpha")
beta = yield tfd.Normal( loc=[0.0,0.0], scale=[3.0,3.0], name="beta")
sigma = yield tfd.HalfNormal(
loc=(out_y_point(alpha,beta,x)-12.0),
scale=tf.math.softplus( 0.005*(out_y_point(alpha,beta,x)-12.0)) + 0.001,
name="sigma")
y = yield out_y(alpha, beta, sigma, x)
def pack_samples(samples):
try :
dict= {'alpha': samples.alpha,
'beta0': samples.beta[...,0],
'beta1': samples.beta[...,1],
'sigma': samples.sigma,
}
except :
dict= {'alpha': samples["alpha"],
'beta0': samples["beta0"],
'beta1': samples["beta1"],
'sigma': samples["sigma"],
}
return dict
params = ["beta0", "beta1", "alpha", "sigma"];# wonder how to get this automatically
model = model_standard
target_model = model
print("model.event_shape: \n",model.event_shape)
print( target_model.event_shape_tensor())
# +
# Sample from the prior.
prior_samples = jds_ab.sample(500)['y']
prior_samples = tf.squeeze(prior_samples)
prior_mean = tf.math.reduce_mean(prior_samples, axis=0).numpy()
prior_std = tf.math.reduce_std(prior_samples, axis=0).numpy()
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(13, 6), constrained_layout=True)
sns.regplot(
x=tf.squeeze(y).numpy(),
y=prior_mean,
#scatter=False,
line_kws=dict(alpha=0.5),
label='y ~ y_pred',
truncate=False,
ax=ax
)
#print(tf.squeeze(y).numpy())
#print(prior_mean)
#ax.axline(xy1=(0,0), slope=1, linestyle='--', color=sns_c[3], label='diagonal (y = y_pred)')
ax.legend(loc='lower right')
ax.set(title='Model Prior Predictions', xlabel='y', ylabel='y_pred');
# -
# ## Variational Inference
# ### Change the form of the posterior surrogate to Inverse Autoregressive Flow surrogate posterior
#
# +
# target_model is set when one sets the model earlier in the noebook.
# Determine the `event_shape` of the posterior, and calculate the size of each
# `event_shape` component. These determine the sizes of the components of the
# underlying standard Normal distribution, and the dimensions of the blocks in
# the blockwise matrix transformation.
event_shape = target_model.event_shape_tensor()
flat_event_shape = tf.nest.flatten(event_shape)
flat_event_size = tf.nest.map_structure(tf.reduce_prod, flat_event_shape)
print("event_shape: ", event_shape)
print("\n\nflat_event_shape:")
for fes in flat_event_shape:
print(fes)
print("\n\nflat_event_size:")
for fes in flat_event_size:
print(fes)
# The `event_space_bijector` maps unconstrained values (in R^n) to the support
# of the prior -- we'll need this at the end to constrain Multivariate Normal
# samples to the prior's support.
event_space_bijector = target_model.experimental_default_event_space_bijector()
base_standard_dist = tfd.JointDistributionSequential(
[tfd.Sample(tfd.Normal(0., 1.), s) for s in flat_event_size])
#block_tril_linop = (
# tfp.experimental.vi.util.build_trainable_linear_operator_block(
# operators, flat_event_size))
#scale_bijector = tfb.ScaleMatvecLinearOperatorBlock(block_tril_linop)
loc_bijector = tfb.JointMap(
tf.nest.map_structure(
lambda s: tfb.Shift(
tf.Variable(tf.random.uniform(
(s,), minval=-2., maxval=2., dtype=tf.float32))),
flat_event_size))
reshape_bijector = tfb.JointMap(
tf.nest.map_structure(tfb.Reshape, flat_event_shape))
unflatten_bijector = tfb.Restructure(
tf.nest.pack_sequence_as(
event_shape, range(len(flat_event_shape))))
# event_space_bijector = target_model.experimental_default_event_space_bijector()
# Reshape each component to match the prior, using a nested structure of
# `Reshape` bijectors wrapped in `JointMap` to form a multipart bijector.
reshape_bijector = tfb.JointMap(
tf.nest.map_structure(tfb.Reshape, flat_event_shape))
# Restructure the flat list of components to match the prior's structure
unflatten_bijector = tfb.Restructure(
tf.nest.pack_sequence_as(
event_shape, range(len(flat_event_shape))))
# +
# Build a standard Normal with a vector `event_shape`, with length equal to the
# total number of degrees of freedom in the posterior.
base_distribution = tfd.Sample(
tfd.Normal(0., 1.), sample_shape=[tf.reduce_sum(flat_event_size)])
# Apply an IAF to the base distribution.
num_iafs = 2
iaf_bijectors = [
tfb.Invert(tfb.MaskedAutoregressiveFlow(
shift_and_log_scale_fn=tfb.AutoregressiveNetwork(
params=2, hidden_units=[256, 256], activation='relu')))
for _ in range(num_iafs)
]
# Split the base distribution's `event_shape` into components that are equal
# in size to the prior's components.
split = tfb.Split(flat_event_size)
# Chain these bijectors and apply them to the standard Normal base distribution
# to build the surrogate posterior. `event_space_bijector`,
# `unflatten_bijector`, and `reshape_bijector` are the same as in the
# multivariate Normal surrogate posterior.
iaf_surrogate_posterior = tfd.TransformedDistribution(
base_distribution,
bijector=tfb.Chain([
event_space_bijector, # constrain the surrogate to the support of the prior
unflatten_bijector, # pack the reshaped components into the `event_shape` structure of the prior
reshape_bijector, # reshape the vector-valued components to match the shapes of the prior components
split] + # Split the samples into components of the same size as the prior components
iaf_bijectors # Apply a flow model to the Tensor-valued standard Normal distribution
))
# +
start = time.time()
optimizer=tf.optimizers.Adam(learning_rate=1e-3)
iaf_loss = tfp.vi.fit_surrogate_posterior(
target_model.unnormalized_log_prob,
iaf_surrogate_posterior,
optimizer=optimizer,
#num_steps=10**4,
num_steps=10**5,
sample_size=4,
jit_compile=True)
end = time.time()
print("optimizing time: {:2f} seconds".format(end - start))
iaf_samples = iaf_surrogate_posterior.sample(1000)
iaf_final_elbo = tf.reduce_mean(
target_model.unnormalized_log_prob(*iaf_samples)
- iaf_surrogate_posterior.log_prob(iaf_samples))
print('IAF surrogate posterior ELBO: {}'.format(iaf_final_elbo))
plt.figure(figsize=(10, 4))
plt.plot(iaf_loss)
plt.xlabel('Training step')
_ = plt.ylabel('Loss value')
# +
def get_y_predictions(alpha, beta, sigma, x) :
beta = tf.reshape(beta,[2,-1])
return tfd.Normal( loc=tf.linalg.matmul(x, beta) + alpha, scale=sigma)
iaf_samples = iaf_surrogate_posterior.sample(10000)
#print(iaf_samples[0].numpy().mean())
alpha = iaf_samples[0].numpy()
beta = iaf_samples[1].numpy()
sigma = iaf_samples[2].numpy()
y_model = get_y_predictions(alpha, beta, sigma, tf.convert_to_tensor(X_test, dtype=dtype))
posterior_mean =( y_model.mean()).numpy().mean(axis=1)
posterior_std = ( y_model.stddev()).numpy().mean(axis=1)
chisq = (((posterior_mean-y_test)/posterior_std)**2).sum()/(y_test.size-len(params)-1)
rms = np.sqrt(((posterior_mean-y_test)**2).sum()/y_test.size)
print("\n Test data\n")
print (" posterior_std chi-sq and rms: {:6.2f} {:6.3f}".format(chisq, rms))
# posterior predictive distribution
ppd_df = make_posterior_predictive_distribution (sigma.mean())
yp_on_yt_plot (posterior_mean, posterior_std, y_test, ppd_df)
# -
| bnn14-for-stellarmass.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Logistic regression from scratch
def sigmoid(z):
return 1 / (1 + np.exp(-z))
def loss(h, y):
return (-y * np.log(h) - (1 - y) * np.log(1 - h)).mean()
# # Logistic regression using scikit-learn : digits dataset
# Loading the Data (Digits Dataset)
from sklearn.datasets import load_digits
digits = load_digits()
# Print to show there are 1797 images (8 by 8 images for a dimensionality of 64)
print("Image Data Shape" , digits.data.shape)
# Print to show there are 1797 labels (integers from 0–9)
print("Label Data Shape", digits.target.shape)
# Showing the Images and the Labels (Digits Dataset)
import numpy as np
import matplotlib.pyplot as plt
plt.figure(figsize=(20,4))
for index, (image, label) in enumerate(zip(digits.data[0:5], digits.target[0:5])):
plt.subplot(1, 5, index + 1)
plt.imshow(np.reshape(image, (8,8)), cmap=plt.cm.gray)
plt.title('Training: %i\n' % label, fontsize = 20)
# Splitting Data into Training and Test Sets (Digits Dataset)
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(digits.data, digits.target, test_size=0.25, random_state=0)
# +
# Model creation and training
# -
from sklearn.linear_model import LogisticRegression
# all parameters not specified are set to their defaults
logisticRegr = LogisticRegression()
logisticRegr.fit(x_train, y_train)
# Returns a NumPy Array
# Predict for One Observation (image)
logisticRegr.predict(x_test[0].reshape(1,-1))
# Predict for Multiple Observations (images) at Once
logisticRegr.predict(x_test[0:10])
# Make predictions on entire test data
predictions = logisticRegr.predict(x_test)
# Measuring Model Performance (Digits Dataset)
score = logisticRegr.score(x_test, y_test)
print(score)
# +
# Confusion Matrix (Digits Dataset)
import matplotlib.pyplot as plt
from sklearn import metrics
cm = metrics.confusion_matrix(y_test, predictions)
print(cm)
# -
# # Logistic regression using scikit-learn : diabetes dataset
# ## Import necessary libraries
# +
import numpy
import pandas as pd
import matplotlib.pylab as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from sklearn.linear_model import LogisticRegression
# %matplotlib inline
# -
# ## Load and explore data
dataset=pd.read_csv("data/pima-indians-diabetes-data.csv")
dataset.head()
dataset.describe(include='all')
# ## Prepare training and testing data
# +
#create a dataframe with all training data except the target column
X = dataset.drop(columns=['diabetes'])
#check that the target variable has been removed
X.head()
# +
#one-hot encode target column
Y = pd.get_dummies(dataset.diabetes, drop_first=True)
# Y
# -
(trainX, testX, trainY, testY) = train_test_split(X, Y, test_size=0.25, random_state=42)
print("Number of data in training set ",len(trainX), len(trainY))
print("Number of data in tesing set ",len(testX), len(testY))
# ## Model building
# There are three steps to model something with sklearn
# 1. Set up the model
model = LogisticRegression()
# 2. Use fit
model.fit(trainX, trainY)
# 3. Check the score
model.score(testX, testY)
predY = model.predict(testX)
# plt.plot(testY, predY, '.')
# # plot a line, a perfit predict would all fall on this line
# x = np.linspace(0, 5, 5)
# y = x
# plt.plot(x, y)
# plt.show()
testX
# ## Save and Load the model / Prediction
import pickle
# save the model to disk
filename = 'logr_model'
pickle.dump(model, open(filename, 'wb'))
Xnew = [[6, 98, 58, 33, 190, 34.0, 0.430, 43]]
ynew = model.predict(Xnew)
ynew
# load the model from disk
loaded_model = pickle.load(open(filename, 'rb'))
ynew = loaded_model.predict(Xnew)
ynew
| logistic_regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Day 1
https://github.com/c17hawke/Perceptron_implimentation_FSDS
# !pip install joblib
# +
import os
import pandas as pd
import matplotlib.pyplot as plt
import joblib
import numpy as np
plt.style.use('fivethirtyeight')
# -
class Perceptron:
def __init__(self, eta: float=None, epochs: int=None):
self.weights = np.random.randn(3) * 1e-4
self.eta = eta # Learning rate
self.epochs = epochs # iterations
def _z_outcome(self, inputs, weights):
return np.dot(inputs, weights)
def activation_function(self,z):
return np.where(z > 0, 1, 0)
def fit(self, x, y):
self.x = x
self.y = y
x_with_bias = np.c_[self.x, -np.ones((len(self.x),1))]
print(f"X with bias: \n{x_with_bias}")
for epoch in range(self.epochs):
print("--"*10)
print(f"for epoch >> {epoch+1}")
print("--"*10)
z = self._z_outcome(x_with_bias, self.weights)
y_hat = self.activation_function(z)
print(f"Predicted value after forward pass: \n{y_hat}")
self.error = self.y - y_hat
print(f"error: \n{self.error}")
self.weights = self.weights + self.eta * np.dot(x_with_bias.T, self.error)
print(f"updated weights afetr epoch: {epoch+1}/{self.epochs}: \n{self.weights}")
print(f"##"*10)
def predict(self,x):
x_with_bias = np.c_[x, -np.ones((len(x), 1))]
z = self._z_outcome(x_with_bias, self.weights)
return self.activation_function(z)
# +
OR = {
'x1':[0,0,1,1],
'x2':[0,1,0,1],
'y':[0,1,1,1]
}
df_OR = pd.DataFrame(OR)
df_OR
# -
def prepare_data(df, target_col='y'):
x = df.drop(target_col, axis=1)
y = df[target_col]
return x,y
# +
x, y = prepare_data(df_OR)
ETA = 0.1
EPOCHS = 10
model_or = Perceptron(eta=ETA, epochs=EPOCHS)
model_or.fit(x,y)
# -
model_or.predict(x=[[1,0]])
# +
AND = {
'x1':[0,0,1,1],
'x2':[0,1,0,1],
'y':[0,0,0,1]
}
df_AND = pd.DataFrame(AND)
df_AND
x, y = prepare_data(df_AND)
ETA = 0.1
EPOCHS = 10
model_and = Perceptron(eta=ETA, epochs=EPOCHS)
model_and.fit(x,y)
# +
XOR = {
'x1':[0,0,1,1],
'x2':[0,1,0,1],
'y':[0,1,1,0]
}
df_XOR = pd.DataFrame(XOR)
df_XOR
x, y = prepare_data(df_XOR)
ETA = 0.1
EPOCHS = 10
model_xor = Perceptron(eta=ETA, epochs=EPOCHS)
model_xor.fit(x,y)
# +
NAND = {
'x1':[0,0,1,1],
'x2':[0,1,0,1],
'y':[1,0,0,0]
}
df_NAND = pd.DataFrame(NAND)
df_NAND
x, y = prepare_data(df_NAND)
ETA = 0.1
EPOCHS = 10
model_nand = Perceptron(eta=ETA, epochs=EPOCHS)
model_nand.fit(x,y)
# -
| Research_env/Perceptron implementation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: virtualenvs-python36
# language: python
# name: virtualenvs-python36
# ---
# + [markdown] toc=true
# <h1>Initialising the client<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#Makes-all-necessary-imports-and-initialise-the-Python-CVA-client" data-toc-modified-id="Makes-all-necessary-imports-and-initialise-the-Python-CVA-client-1"><span class="toc-item-num">1 </span>Makes all necessary imports and initialise the Python CVA client</a></span></li><li><span><a href="#Count-number-of-primary-elements-in-CVA" data-toc-modified-id="Count-number-of-primary-elements-in-CVA-2"><span class="toc-item-num">2 </span>Count number of primary elements in CVA</a></span></li></ul></div>
# -
# ## Makes all necessary imports and initialise the Python CVA client
#
# When initialising the client it is handy to set the log level to INFO in order to see the times that each query takes and how the URL is built.
#
# We will be importing some system libraries, the Pandas library which is used by pyark and will be explained later, some entities available in the Genomics England models in the package `protocols` and finally the `pyark` client.
# +
import getpass
import logging
import os
import sys
import pandas as pd
from collections import defaultdict, OrderedDict
import pyark
from pyark.cva_client import CvaClient
from protocols.protocol_7_2.reports import Program, Tier, Assembly
from protocols.protocol_7_2.cva import ReportEventType
# sets logging messages so the URLs that are called get printed
logging.basicConfig(level=logging.INFO)
# -
# You need three things to initialise pyark: the CVA backend URL, your user name and your password. In this example these are loaded from environment variables.
#
# The client gets a token which will contain your authorisation level. The token renews automatically if necessary. The client will also make retries in case of request failures.
# initialise CVA client and subclients
# every subclient provides access to different sets of data exposed in the API
user = os.environ.get("CVA_USER")
password = <PASSWORD>("CVA_PASSWORD")
url = os.environ.get("CVA_URL_BASE", "http://localhost:8090")
cva = CvaClient(url_base=url, user=user, password=password)
# Once the token is obtained we will have available a number of different subclients, each of those providing access to a different CVA entity or functionality.
cases_client = cva.cases()
pedigrees_client = cva.pedigrees()
entities_client = cva.entities()
variants_client = cva.variants()
report_events_client = cva.report_events()
transactions_client = cva.transactions()
# Check the version of your client as follows.
print("pyark version {}".format(pyark.VERSION))
# ## Count number of primary elements in CVA
#
# As the simplest usage example we can count the number of entities in CVA.
# we can count the total number of cases
cases_client.count()
# or we can count the number of cases given some criteria
cases_client.count(program=Program.rare_disease, panelNames='intellectual disability')
# count the total number of report events
report_events_client.count()
# count the number of report events given some criteria
report_events_client.count(program=Program.rare_disease, type="questionnaire")
# count the total number of variants
variants_client.count()
# count the number of variants given some criteria
variants_client.count(assembly=Assembly.GRCh38, geneSymbols="BRCA2")
| notebooks/client_initialisation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/devcmgm/pytorch101/blob/master/NumpyFirstone.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="_3DOyRExRTs8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="b7a5474a-7cd8-4de5-c7fd-5b5e4086b7b3"
import numpy as np
# N is batch size; D_in is input dimension;
# H is hidden dimension; D_out is output dimension.
N, D_in, H, D_out = 64, 1000, 100, 10
# Create random input and output data
x = np.random.randn(N, D_in)
y = np.random.randn(N, D_out)
# Randomly initialize weights
w1 = np.random.randn(D_in, H)
w2 = np.random.randn(H, D_out)
learning_rate = 1e-6
for t in range(500):
# Forward pass: compute predicted y
h = x.dot(w1)
h_relu = np.maximum(h, 0)
y_pred = h_relu.dot(w2)
# Compute and print loss
loss = np.square(y_pred - y).sum()
print(t, loss)
# Backprop to compute gradients of w1 and w2 with respect to loss
grad_y_pred = 2.0 * (y_pred - y)
grad_w2 = h_relu.T.dot(grad_y_pred)
grad_h_relu = grad_y_pred.dot(w2.T)
grad_h = grad_h_relu.copy()
grad_h[h < 0] = 0
grad_w1 = x.T.dot(grad_h)
# Update weights
w1 -= learning_rate * grad_w1
w2 -= learning_rate * grad_w2
| NumpyFirstone.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Algorithms
#
# In this module, the lecturer will be completing examples in Java. And I'm going to be doing the same examples, in Python.
#
# In Week 1, we're looking at common operators used in programming.
# ### Example 1: Operators
# **Increment and Decrement:** Well, Python doesn't have an increment or decrement operators so we need to write equations to perform that operation
n = 23
n = n+1
n
n = n - 1
n
# **Assignment:** Shorthand for n + or - value
n = 10
n += 1
n
n += 2
n
n -=3
n
# *Remember, the program stores the last value of n, and uses that to calculate when a code cell is run. If cells were run correctly and in order, n should equal 10. If you get a number other than 10, you should rerun the code cells in order.*
# **Equality Tests:**
# Is the variable n = 1 or 10?
n ==1
n ==10
# Which value does n NOT equal? 1 or 10
n != 1
n !=10
# **Logical AND**
(2==1) & (1==1)
# Java uses &&, to perform the logical AND operation.
# **Logical OR**
(2==1) | (1==1)
Java uses ||, to perform the logical AND operation.
# **Inequalities**
# Remember the value of n is 10.
n >10
n>=10
n<10
n<=10
# [Precedene and Associativity in Python Code](https://www.programiz.com/python-programming/precedence-associativity)
# ### Example 2: Functions
# [Python Functions at W3schools](https://www.w3schools.com/python/python_functions.asp)
def myfunction():
print('hello')
myfunction()
# ### Example 3 : Control Structures
# Sequential - Runs code in the order it appears
#
# Selection - Different blocks of codes are run based on conditions eg. **if/else/elsif** and **switch** statements
#
# Iteration - Get a program to loop through a number of values until it terminates eg. **for** and **while** loops
# +
# Basic example of if, else if, and else statements in Python
# Watch syntax and indentation
i = 0
if (i ==1):
print('no')
elif (i!=0):
print ('no')
else:
print('yes')
# -
# Python doesn't have a Switch / Case statement, so we have to use functions to get around this.
# +
# To replace numbers with words
def numbers_to_strings(argument):
switcher = {
0: "zero",
1: "one",
2: "two",
}
return switcher.get(argument, "nothing")
# -
numbers_to_strings(0)
# For Loop
for x in range (2):
print('hello world')
# Beware of infinite loops. They will never terminate, and need to be interrupted manually.
# While loop
a=0
while a < 5:
print('Happy Monday')
a = a + 1
# ### Example 4: Data Structures
# Arrays are a data structure. They have an index that allow us to find a value based on its position or index in the array. It's a list, where position matters. An array is zero indexed by default.
a = [1, 2, 3, 4, 5]
# Length of a
len(a)
# What value do I have at index 0
a[0]
# Selecting specific values
a[2:4]
# Python does not have inbuilt array support, and you may need to use a library such as numpy to perform operations on your array
#Import numpy
import numpy as np
#Add up all the values in my array
np.sum(a)
#Find the mean of a
np.mean(a)
#Randomly choose a value from a
np.random.choice(a)
# Arrays can also be used to contain strings.
string = ['hello', 'world']
| week1_algorithms.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
from matplotlib import pyplot as plt
import numpy as np
# ## Fluorescence Microscopy
# <NAME>'s lab studies DNA double-strand break repair in *C. elegans* worms. She uses microscopy to image the worms, using a specific fluoresence marker of double strand breaks. We are going to analyze some of her data and ask whether there is a difference in the number of double strand breaks between her control and experimental conditions.
#
# For each condition, we have a collection of images that are *slices* through a worm. If you flip through them quickly, you pass through the worm. These images are stitched into short videos below.
# %%HTML
<video width="600" height="600" controls loop autoplay>
<source src="videos/control.mp4" type="video/mp4">
</video>
# %%HTML
<video width="600" height="600" controls loop autoplay>
<source src="videos/experimental.mp4" type="video/mp4">
</video>
# ### Task
# The red globs are chromosomes inside nuclei, the green dots are double strand breaks. Your goal is to count the number of double strand breaks per nucleus under these treatment conditions.
#
# + Choose 5 nuclei from the control worm and 5 nuclei from the experimental worm.
# + Count the number of bright green puncta in each nucleus.
# + Report the mean and standard deviation of the number of puncta per chromosome between your 5 control and 5 experimental chromosomes.
# + Are the differences you observed statistically significant? (A t-test seems like a reasonable way to go about this...)
#
# ### Hints:
#
# + Define a circular mask for each nucleus you want to study.
# + The *same* puncta can occur across multiple slices, but not all puncta appear in all slices. (This is a 3D chromosome blob). You'll have to make sure you don't count the same point twice.
#
# ### Data
# The collection of images are available here:
#
# https://www.dropbox.com/s/dry59l6h14cqkkj/worm-images.zip?dl=0
#
# If you unzip the directory, it has the following files and directories.
#
# + worm-images
# + experimental
# + z000.png
# + z001.png
# + ...
# + z066.png
# + control
# + z011.png
# + z012.png
# + ...
# + z054.png
#
# + Each `zxxx.png` file is a slice.
# + The R, G, and B channels have different information:
# + R: chromosome marker
# + G: double-strand break marker
# + B: nucleus marker
| labs/06_microscopy/06_counting-puncta.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Cotraining classification performance in simulated multiview scenarios
# - ### [Experimental Setup](#Performance-on-simulated-data)
# - ### [Performance when one view is totally redundant](#Performance-when-one-view-is-totally-redundant)
# - ### [Performance when one view is inseparable](#Performance-when-one-view-is-inseparable)
# - ### [Performance when labeled data is excellent](#Performance-when-labeled-data-is-excellent)
# - ### [Performance when labeled data is not very separable](#Performance-when-labeled-data-is-not-very-separable)
# - ### [Performance when data is overlapping](#Performance-when-data-is-overlapping)
# - ### [Performance as labeled data proportion (essentially sample size) is varied](#Performance-as-labeled-data-proportion-(essentially-sample-size)-is-varied)
# +
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.decomposition import PCA
from mvlearn.semi_supervised import CTClassifier
from mvlearn.datasets import load_UCImultifeature
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
# -
# ### Function to create 2 class data
#
# This function is used to generate examples for 2 classes from multivariate normal distributions. Once the examples are generated, it splits them into training and testing sets and returns the needed information
def create_data(seed, class2_mean_center, view1_var, view2_var, N_per_class, view2_class2_mean_center=1):
np.random.seed(seed)
view1_mu0 = np.zeros(2,)
view1_mu1 = class2_mean_center * np.ones(2,) #
view1_cov = view1_var*np.eye(2)
view2_mu0 = np.zeros(2,)
view2_mu1 = view2_class2_mean_center * np.ones(2,)
view2_cov = view2_var*np.eye(2)
view1_class0 = np.random.multivariate_normal(view1_mu0, view1_cov, size=N_per_class)
view1_class1 = np.random.multivariate_normal(view1_mu1, view1_cov, size=N_per_class)
view2_class0 = np.random.multivariate_normal(view2_mu0, view2_cov, size=N_per_class)
view2_class1 = np.random.multivariate_normal(view2_mu1, view2_cov, size=N_per_class)
View1 = np.concatenate((view1_class0, view1_class1))
View2 = np.concatenate((view2_class0, view2_class1))
Labels = np.concatenate((np.zeros(N_per_class,), np.ones(N_per_class,)))
# Split both views into testing and training
View1_train, View1_test, labels_train_full, labels_test_full = train_test_split(View1, Labels, test_size=0.3, random_state=42)
View2_train, View2_test, labels_train_full, labels_test_full = train_test_split(View2, Labels, test_size=0.3, random_state=42)
labels_train = labels_train_full.copy()
labels_test = labels_test_full.copy()
return View1_train, View2_train, labels_train, labels_train.copy(), View1_test, View2_test, labels_test
# ### Function to do predictions on single or concatenated view data
#
# This function is used create classifiers for single or concatenated views and return their predictions.
def single_view_class(v1_train, labels_train, v1_test, labels_test, v2_train, v2_test, v2_solver, v2_penalty):
gnb0 = LogisticRegression()
gnb1 = LogisticRegression(solver=v2_solver, penalty=v2_penalty)
gnb2 = LogisticRegression()
# Train on only the examples with labels
gnb0.fit(v1_train, labels_train)
y_pred0 = gnb0.predict(v1_test)
gnb1.fit(v2_train, labels_train)
y_pred1 = gnb1.predict(v2_test)
accuracy_view1 = (accuracy_score(labels_test, y_pred0))
accuracy_view2 = (accuracy_score(labels_test, y_pred1))
# Concatenate views in naive way and train model
combined_labeled = np.hstack((v1_train, v2_train))
combined_test = np.hstack((v1_test, v2_test))
gnb2.fit(combined_labeled, labels_train)
y_pred2 = gnb2.predict(combined_test)
accuracy_combined = (accuracy_score(labels_test, y_pred2))
return accuracy_view1, accuracy_view2, accuracy_combined
# ### Function to create 2 class scatter plots with labeled data shown
#
# This function is used to create scatter plots of the 2 class data as well as show the samples that are labeled, making it easier to understand what distributions the simulations are dealing with
def scatterplot_classes(not_removed, labels_train, labels_train_full, View1_train, View2_train):
idx_train_0 = np.where(labels_train_full==0)
idx_train_1 = np.where(labels_train_full==1)
labeled_idx_class0 = not_removed[np.where(labels_train[not_removed]==0)]
labeled_idx_class1 = not_removed[np.where(labels_train[not_removed]==1)]
# plot the views
plt.figure()
fig, ax = plt.subplots(1,2, figsize=(14,5))
ax[0].scatter(View1_train[idx_train_0,0], View1_train[idx_train_0,1])
ax[0].scatter(View1_train[idx_train_1,0], View1_train[idx_train_1,1])
ax[0].scatter(View1_train[labeled_idx_class0,0], View1_train[labeled_idx_class0,1], s=300, marker='X')
ax[0].scatter(View1_train[labeled_idx_class1,0], View1_train[labeled_idx_class1,1], s=300, marker='X')
ax[0].set_title('One Randomization of View 1')
ax[0].legend(('Class 0', 'Class 1', 'Labeled Class 0', 'Labeled Class 1'))
ax[0].axes.get_xaxis().set_visible(False)
ax[0].axes.get_yaxis().set_visible(False)
ax[1].scatter(View2_train[idx_train_0,0], View2_train[idx_train_0,1])
ax[1].scatter(View2_train[idx_train_1,0], View2_train[idx_train_1,1])
ax[1].scatter(View2_train[labeled_idx_class0,0], View1_train[labeled_idx_class0,1], s=300, marker='X')
ax[1].scatter(View2_train[labeled_idx_class1,0], View1_train[labeled_idx_class1,1], s=300, marker='X')
ax[1].set_title('One Randomization of View 2')
ax[1].legend(('Class 0', 'Class 1', 'Labeled Class 0', 'Labeled Class 1'))
ax[1].axes.get_xaxis().set_visible(False)
ax[1].axes.get_yaxis().set_visible(False)
plt.show()
# ## Performance on simulated data
#
# ### General Experimental Setup
# - Below are the results from simulated data testing of the cotraining classifier with different classification problems (class distributions)
# - Results are averaged over 20 randomizations, where a single randomization means using a new seed to generate examples from 2 class distributions and then randomly selecting about 1% of the training data as labeled and leaving the rest unlabeled
# - 500 examples per class, with 70% used for training and 30% for testing
# - For a randomization, train 4 classifiers
# 1. Classifier trained on view 1 labeled data only
# 2. Classifier trained on view 2 labeled data only
# 3. Classifier trained on concatenation of labeled features from views 1 and 2
# 4. multivew CTClassifier trained on views 1 and 2
# - For this, test classification accuracy after different numbers of cotraining iterations to see trajectory of classification accuracy
# - Classification Method:
# - Logistic Regression
# - 'l2' penalty for view 1 and 'l1' penalty for view 2 to ensure independence between the classifiers in the views. This is important because a key aspect of cotraining is view independence, which can either be enforced by completely independent data, or by using an independent classifier for each view, such as using different parameters with the same type of classifier, or two different classification algorithms.
# ### Performance when classes are well separated and labeled examples are randomly chosen
#
# Here, the 2 class distributions are the following
# - Class 0 mean: [0, 0]
# - Class 0 covariance: .2*eye(2)
# - Class 1 mean: [1, 1]
# - Class 1 covariance: .2*eye(2)
#
# Labeled examples are chosen randomly from the training set
# +
randomizations = 20
N_per_class = 500
view2_penalty = 'l1'
view2_solver = 'liblinear'
N_iters = np.arange(1, 202, 15)
acc_ct = [[] for _ in N_iters]
acc_view1 = []
acc_view2 = []
acc_combined = []
for count, iters in enumerate(N_iters):
for seed in range(randomizations):
######################### Create Data ###########################
View1_train, View2_train, labels_train, labels_train_full, View1_test, View2_test, labels_test = create_data(seed, 1, .2, .2, N_per_class)
# randomly remove some labels
np.random.seed(11)
remove_idx = np.random.rand(len(labels_train),) < .99
labels_train[remove_idx] = np.nan
not_removed = np.where(remove_idx==False)[0]
# make sure both classes have at least 1 labeled example
if len(set(labels_train[not_removed])) != 2:
continue
if seed == 0 and count == 0:
scatterplot_classes(not_removed, labels_train, labels_train_full, View1_train, View2_train)
############## Single view semi-supervised learning ##############
# Only do this calculation once, since not affected by number of iterations
if count == 0:
accuracy_view1, accuracy_view2, accuracy_combined = single_view_class(View1_train[not_removed,:].squeeze(),
labels_train[not_removed],
View1_test,
labels_test,
View2_train[not_removed,:].squeeze(),
View2_test,
view2_solver,
view2_penalty)
acc_view1.append(accuracy_view1)
acc_view2.append(accuracy_view2)
acc_combined.append(accuracy_combined)
##################### Multiview ########################################
gnb0 = LogisticRegression()
gnb1 = LogisticRegression(solver=view2_solver, penalty=view2_penalty)
ctc = CTClassifier(gnb0, gnb1, num_iter=iters)
ctc.fit([View1_train, View2_train], labels_train)
y_pred_ct = ctc.predict([View1_test, View2_test])
acc_ct[count].append((accuracy_score(labels_test, y_pred_ct)))
acc_view1 = np.mean(acc_view1)
acc_view2 = np.mean(acc_view2)
acc_combined = np.mean(acc_combined)
acc_ct = [sum(row) / float(len(row)) for row in acc_ct]
# -
# make a figure from the data
plt.figure()
plt.plot(N_iters, acc_view1*np.ones(N_iters.shape))
plt.plot(N_iters, acc_view2*np.ones(N_iters.shape))
plt.plot(N_iters, acc_combined*np.ones(N_iters.shape))
plt.plot(N_iters, acc_ct)
plt.legend(('View 1', 'View 2', 'Naive Concatenated', 'multiview'))
plt.ylabel("Average Accuracy Over {} Randomizations".format(randomizations))
plt.xlabel('Iterations of Co-Training')
plt.title('When Views are Independent and Labeled Samples are Random\nCoTraining Outperforms Single Views and Naive Concatenation')
plt.show()
# ### Performance when one view is totally redundant
#
# Here, the 2 class distributions are the following
# - Class 0 mean: [0, 0]
# - Class 0 covariance: .2*eye(2)
# - Class 1 mean: [1, 1]
# - Class 1 covariance: .2*eye(2)
#
# Views 1 and 2 hold the exact same samples
#
# Labeled examples are chosen randomly from the training set
# +
randomizations = 20
N_per_class = 500
view2_penalty = 'l1'
view2_solver = 'liblinear'
N_iters = np.arange(1, 202, 15)
acc_ct = [[] for _ in N_iters]
acc_view1 = []
acc_view2 = []
acc_combined = []
for count, iters in enumerate(N_iters):
for seed in range(randomizations):
######################### Create Data ###########################
View1_train, View2_train, labels_train, labels_train_full, View1_test, View2_test, labels_test = create_data(seed, 1, .2, .2, N_per_class)
View2_train = View1_train.copy()
View2_test = View1_test.copy()
# randomly remove some labels
np.random.seed(11)
remove_idx = np.random.rand(len(labels_train),) < .99
labels_train[remove_idx] = np.nan
not_removed = np.where(remove_idx==False)[0]
# make sure both classes have at least 1 labeled example
if len(set(labels_train[not_removed])) != 2:
continue
if seed == 0 and count == 0:
scatterplot_classes(not_removed, labels_train, labels_train_full, View1_train, View2_train)
############## Single view semi-supervised learning ##############
# Only do this calculation once, since not affected by number of iterations
if count == 0:
accuracy_view1, accuracy_view2, accuracy_combined = single_view_class(View1_train[not_removed,:].squeeze(),
labels_train[not_removed],
View1_test,
labels_test,
View2_train[not_removed,:].squeeze(),
View2_test,
view2_solver,
view2_penalty)
acc_view1.append(accuracy_view1)
acc_view2.append(accuracy_view2)
acc_combined.append(accuracy_combined)
##################### Multiview ########################################
gnb0 = LogisticRegression()
gnb1 = LogisticRegression(solver=view2_solver, penalty=view2_penalty)
ctc = CTClassifier(gnb0, gnb1, num_iter=iters)
ctc.fit([View1_train, View2_train], labels_train)
y_pred_ct = ctc.predict([View1_test, View2_test])
acc_ct[count].append((accuracy_score(labels_test, y_pred_ct)))
acc_view1 = np.mean(acc_view1)
acc_view2 = np.mean(acc_view2)
acc_combined = np.mean(acc_combined)
acc_ct = [sum(row) / float(len(row)) for row in acc_ct]
# -
# make a figure from the data
plt.figure()
plt.plot(N_iters, acc_view1*np.ones(N_iters.shape))
plt.plot(N_iters, acc_view2*np.ones(N_iters.shape))
plt.plot(N_iters, acc_combined*np.ones(N_iters.shape))
plt.plot(N_iters, acc_ct)
plt.legend(('View 1', 'View 2', 'Naive Concatenated', 'multiview'))
plt.ylabel("Average Accuracy Over {} Randomizations".format(randomizations))
plt.xlabel('Iterations of Co-Training')
plt.title('When One View is Completely Redundant\nCoTraining Performs Worse Than\nSingle View or View Concatenation')
plt.show()
# ### Performance when one view is inseparable
#
# Here, the 2 class distributions are the following for the first view
# - Class 0 mean: [0, 0]
# - Class 0 covariance: .2*eye(2)
# - Class 1 mean: [1, 1]
# - Class 1 covariance: .2*eye(2)
#
# For the second view:
# - Class 0 mean: [0, 0]
# - Class 0 covariance: .2*eye(2)
# - Class 1 mean: [0, 0]
# - Class 1 covariance: .2*eye(2)
#
# Labeled examples are chosen randomly from the training set
# +
randomizations = 20
N_per_class = 500
view2_penalty = 'l1'
view2_solver = 'liblinear'
N_iters = np.arange(1, 202, 15)
acc_ct = [[] for _ in N_iters]
acc_view1 = []
acc_view2 = []
acc_combined = []
for count, iters in enumerate(N_iters):
for seed in range(randomizations):
######################### Create Data ###########################
View1_train, View2_train, labels_train, labels_train_full, View1_test, View2_test, labels_test = create_data(seed,
1,
.2,
.2,
N_per_class,
view2_class2_mean_center=0)
# randomly remove some labels
np.random.seed(11)
remove_idx = np.random.rand(len(labels_train),) < .99
labels_train[remove_idx] = np.nan
not_removed = np.where(remove_idx==False)[0]
# make sure both classes have at least 1 labeled example
if len(set(labels_train[not_removed])) != 2:
continue
if seed == 0 and count == 0:
scatterplot_classes(not_removed, labels_train, labels_train_full, View1_train, View2_train)
############## Single view semi-supervised learning ##############
# Only do this calculation once, since not affected by number of iterations
if count == 0:
accuracy_view1, accuracy_view2, accuracy_combined = single_view_class(View1_train[not_removed,:].squeeze(),
labels_train[not_removed],
View1_test,
labels_test,
View2_train[not_removed,:].squeeze(),
View2_test,
view2_solver,
view2_penalty)
acc_view1.append(accuracy_view1)
acc_view2.append(accuracy_view2)
acc_combined.append(accuracy_combined)
##################### Multiview ########################################
gnb0 = LogisticRegression()
gnb1 = LogisticRegression(solver=view2_solver, penalty=view2_penalty)
ctc = CTClassifier(gnb0, gnb1, num_iter=iters)
ctc.fit([View1_train, View2_train], labels_train)
y_pred_ct = ctc.predict([View1_test, View2_test])
acc_ct[count].append((accuracy_score(labels_test, y_pred_ct)))
acc_view1 = np.mean(acc_view1)
acc_view2 = np.mean(acc_view2)
acc_combined = np.mean(acc_combined)
acc_ct = [sum(row) / float(len(row)) for row in acc_ct]
# -
# make a figure from the data
plt.figure()
plt.plot(N_iters, acc_view1*np.ones(N_iters.shape))
plt.plot(N_iters, acc_view2*np.ones(N_iters.shape))
plt.plot(N_iters, acc_combined*np.ones(N_iters.shape))
plt.plot(N_iters, acc_ct)
plt.legend(('View 1', 'View 2', 'Naive Concatenated', 'multiview'))
plt.ylabel("Average Accuracy Over {} Randomizations".format(randomizations))
plt.xlabel('Iterations of Co-Training')
plt.title('When One View is Uninformative\nCoTraining Performs Worse Than Single View')
plt.show()
# ### Performance when labeled data is excellent
#
# Here, the 2 class distributions are the following
# - Class 0 mean: [0, 0]
# - Class 0 covariance: .2*eye(2)
# - Class 1 mean: [1, 1]
# - Class 1 covariance: .2*eye(2)
#
# Labeled examples are chosen to be very close to the mean of their respective class
# - Normally distributed around their class mean with standard deviation 0.05 in both dimensions
# +
randomizations = 20
N_per_class = 500
num_perfect = 3
perfect_scale = 0.05
view2_penalty = 'l1'
view2_solver = 'liblinear'
N_iters = np.arange(1, 202, 15)
acc_ct = [[] for _ in N_iters]
acc_view1 = []
acc_view2 = []
acc_combined = []
for count, iters in enumerate(N_iters):
for seed in range(randomizations):
######################### Create Data ###########################
np.random.seed(seed)
view1_mu0 = np.zeros(2,)
view1_mu1 = np.ones(2,)
view1_cov = .2*np.eye(2)
view2_mu0 = np.zeros(2,)
view2_mu1 = np.ones(2,)
view2_cov = .2*np.eye(2)
# generage perfect examples
perfect_class0_v1 = view1_mu0 + np.random.normal(loc=0, scale=perfect_scale, size=view1_mu0.shape)
perfect_class0_v2 = view1_mu0 + np.random.normal(loc=0, scale=perfect_scale, size=view1_mu0.shape)
perfect_class1_v1 = view1_mu1 + np.random.normal(loc=0, scale=perfect_scale, size=view1_mu1.shape)
perfect_class1_v2 = view1_mu1 + np.random.normal(loc=0, scale=perfect_scale, size=view1_mu1.shape)
for p in range(1, num_perfect):
perfect_class0_v1 = np.vstack((perfect_class0_v1, view1_mu0 + np.random.normal(loc=0, scale=0.01, size=view1_mu0.shape)))
perfect_class0_v2 = np.vstack((perfect_class0_v2, view1_mu0 + np.random.normal(loc=0, scale=0.01, size=view1_mu0.shape)))
perfect_class1_v1 = np.vstack((perfect_class1_v1, view1_mu1 + np.random.normal(loc=0, scale=0.01, size=view1_mu1.shape)))
perfect_class1_v2 = np.vstack((perfect_class1_v2, view1_mu1 + np.random.normal(loc=0, scale=0.01, size=view1_mu1.shape)))
perfect_labels = np.zeros(num_perfect,)
perfect_labels = np.concatenate((perfect_labels, np.ones(num_perfect,)))
view1_class0 = np.random.multivariate_normal(view1_mu0, view1_cov, size=N_per_class)
view1_class1 = np.random.multivariate_normal(view1_mu1, view1_cov, size=N_per_class)
view2_class0 = np.random.multivariate_normal(view2_mu0, view2_cov, size=N_per_class)
view2_class1 = np.random.multivariate_normal(view2_mu1, view2_cov, size=N_per_class)
View1 = np.concatenate((view1_class0, view1_class1))
View2 = np.concatenate((view2_class0, view2_class1))
Labels = np.concatenate((np.zeros(N_per_class,), np.ones(N_per_class,)))
# Split both views into testing and training
View1_train, View1_test, labels_train_full, labels_test_full = train_test_split(View1, Labels, test_size=0.3, random_state=42)
View2_train, View2_test, labels_train_full, labels_test_full = train_test_split(View2, Labels, test_size=0.3, random_state=42)
labels_train = labels_train_full.copy()
labels_test = labels_test_full.copy()
# Add the perfect examples
View1_train = np.vstack((View1_train, perfect_class0_v1, perfect_class1_v1))
View2_train = np.vstack((View2_train, perfect_class0_v2, perfect_class1_v2))
labels_train = np.concatenate((labels_train, perfect_labels))
# randomly remove all but perfect labeled samples
remove_idx = [True for i in range(len(labels_train)-2*num_perfect)]
for i in range(2*num_perfect):
remove_idx.append(False)
#remove_idx = [False if i < (len(labels_train)-2*num_perfect) else True for i in range(len(labels_train))]
labels_train[remove_idx] = np.nan
not_removed = np.where(remove_idx==False)[0]
not_removed = np.arange(len(labels_train)-2*num_perfect, len(labels_train))
# make sure both classes have at least 1 labeled example
if len(set(labels_train[not_removed])) != 2:
continue
if seed == 0 and count == 0:
scatterplot_classes(not_removed, labels_train, labels_train_full, View1_train, View2_train)
############## Single view semi-supervised learning ##############
# Only once, since not affected by "num iters"
if count == 0:
accuracy_view1, accuracy_view2, accuracy_combined = single_view_class(View1_train[not_removed,:].squeeze(),
labels_train[not_removed],
View1_test,
labels_test,
View2_train[not_removed,:].squeeze(),
View2_test,
view2_solver,
view2_penalty)
acc_view1.append(accuracy_view1)
acc_view2.append(accuracy_view2)
acc_combined.append(accuracy_combined)
##################### Multiview ########################################
gnb0 = LogisticRegression()
gnb1 = LogisticRegression(solver=view2_solver, penalty=view2_penalty)
ctc = CTClassifier(gnb0, gnb1, num_iter=iters)
ctc.fit([View1_train, View2_train], labels_train)
y_pred_ct = ctc.predict([View1_test, View2_test])
acc_ct[count].append((accuracy_score(labels_test, y_pred_ct)))
acc_view1 = np.mean(acc_view1)
acc_view2 = np.mean(acc_view2)
acc_combined = np.mean(acc_combined)
acc_ct = [sum(row) / float(len(row)) for row in acc_ct]
# -
# make a figure from the data
plt.figure()
plt.plot(N_iters, acc_view1*np.ones(N_iters.shape))
plt.plot(N_iters, acc_view2*np.ones(N_iters.shape))
plt.plot(N_iters, acc_combined*np.ones(N_iters.shape))
plt.plot(N_iters, acc_ct)
plt.legend(('View 1', 'View 2', 'Naive Concatenated', 'multiview'))
plt.ylabel("Average Accuracy Over {} Randomizations".format(randomizations))
plt.xlabel('Iterations of Co-Training')
plt.title('When Labeled Data is Extremely Clean\nCoTraining Outperforms Single Views\nbut Naive Concatenation Performs Better')
plt.show()
# ### Performance when labeled data is not very separable
#
# Here, the 2 class distributions are the following
# - Class 0 mean: [0, 0]
# - Class 0 covariance: .2*eye(2)
# - Class 1 mean: [1, 1]
# - Class 1 covariance: .2*eye(2)
#
# Labeled examples are chosen to be far from their respective means according to a uniform distribution in 2 dimensions between .2 and .75 away from the x1 or x2 coordinate of the mean
# +
randomizations = 20
N_per_class = 500
num_perfect = 2
uniform_min = 0.2
uniform_max = 0.75
view2_penalty = 'l1'
view2_solver = 'liblinear'
N_iters = np.arange(1, 202, 15)
acc_ct = [[] for _ in N_iters]
acc_view1 = []
acc_view2 = []
acc_combined = []
for count, iters in enumerate(N_iters):
for seed in range(randomizations):
######################### Create Data ###########################
np.random.seed(seed)
view1_mu0 = np.zeros(2,)
view1_mu1 = np.ones(2,)
view1_cov = .2*np.eye(2)
view2_mu0 = np.zeros(2,)
view2_mu1 = np.ones(2,)
view2_cov = .2*np.eye(2)
# generage bad examples
perfect_class0_v1 = view1_mu0 + np.random.uniform(uniform_min, uniform_max, size=view1_mu0.shape)
perfect_class0_v2 = view1_mu0 + np.random.uniform(uniform_min, uniform_max, size=view1_mu0.shape)
perfect_class1_v1 = view1_mu1 - np.random.uniform(uniform_min, uniform_max, size=view1_mu0.shape)
perfect_class1_v2 = view1_mu1 - np.random.uniform(uniform_min, uniform_max, size=view1_mu0.shape)
for p in range(1, num_perfect):
perfect_class0_v1 = np.vstack((perfect_class0_v1, view1_mu0 + np.random.uniform(uniform_min, uniform_max, size=view1_mu0.shape)))
perfect_class0_v2 = np.vstack((perfect_class0_v2, view1_mu0 + np.random.uniform(uniform_min, uniform_max, size=view1_mu0.shape)))
perfect_class1_v1 = np.vstack((perfect_class1_v1, view1_mu1 - np.random.uniform(uniform_min, uniform_max, size=view1_mu0.shape)))
perfect_class1_v2 = np.vstack((perfect_class1_v2, view1_mu1 - np.random.uniform(uniform_min, uniform_max, size=view1_mu0.shape)))
perfect_labels = np.zeros(num_perfect,)
perfect_labels = np.concatenate((perfect_labels, np.ones(num_perfect,)))
view1_class0 = np.random.multivariate_normal(view1_mu0, view1_cov, size=N_per_class)
view1_class1 = np.random.multivariate_normal(view1_mu1, view1_cov, size=N_per_class)
view2_class0 = np.random.multivariate_normal(view2_mu0, view2_cov, size=N_per_class)
view2_class1 = np.random.multivariate_normal(view2_mu1, view2_cov, size=N_per_class)
View1 = np.concatenate((view1_class0, view1_class1))
View2 = np.concatenate((view2_class0, view2_class1))
Labels = np.concatenate((np.zeros(N_per_class,), np.ones(N_per_class,)))
# Split both views into testing and training
View1_train, View1_test, labels_train_full, labels_test_full = train_test_split(View1, Labels, test_size=0.3, random_state=42)
View2_train, View2_test, labels_train_full, labels_test_full = train_test_split(View2, Labels, test_size=0.3, random_state=42)
labels_train = labels_train_full.copy()
labels_test = labels_test_full.copy()
# Add the perfect examples
View1_train = np.vstack((View1_train, perfect_class0_v1, perfect_class1_v1))
View2_train = np.vstack((View2_train, perfect_class0_v2, perfect_class1_v2))
labels_train = np.concatenate((labels_train, perfect_labels))
# randomly remove all but perfect labeled samples
remove_idx = [True for i in range(len(labels_train)-2*num_perfect)]
for i in range(2*num_perfect):
remove_idx.append(False)
labels_train[remove_idx] = np.nan
not_removed = np.where(remove_idx==False)[0]
not_removed = np.arange(len(labels_train)-2*num_perfect, len(labels_train))
# make sure both classes have at least 1 labeled example
if len(set(labels_train[not_removed])) != 2:
continue
if seed == 0 and count == 0:
scatterplot_classes(not_removed, labels_train, labels_train_full, View1_train, View2_train)
############## Single view semi-supervised learning ##############
# Only once, since not affected by "num iters"
if count == 0:
accuracy_view1, accuracy_view2, accuracy_combined = single_view_class(View1_train[not_removed,:].squeeze(),
labels_train[not_removed],
View1_test,
labels_test,
View2_train[not_removed,:].squeeze(),
View2_test,
view2_solver,
view2_penalty)
acc_view1.append(accuracy_view1)
acc_view2.append(accuracy_view2)
acc_combined.append(accuracy_combined)
##################### Multiview ########################################
gnb0 = LogisticRegression()
gnb1 = LogisticRegression(solver=view2_solver, penalty=view2_penalty)
ctc = CTClassifier(gnb0, gnb1, num_iter=iters)
ctc.fit([View1_train, View2_train], labels_train)
y_pred_ct = ctc.predict([View1_test, View2_test])
acc_ct[count].append((accuracy_score(labels_test, y_pred_ct)))
acc_view1 = np.mean(acc_view1)
acc_view2 = np.mean(acc_view2)
acc_combined = np.mean(acc_combined)
acc_ct = [sum(row) / float(len(row)) for row in acc_ct]
# -
# make a figure from the data
plt.figure()
plt.plot(N_iters, acc_view1*np.ones(N_iters.shape))
plt.plot(N_iters, acc_view2*np.ones(N_iters.shape))
plt.plot(N_iters, acc_combined*np.ones(N_iters.shape))
plt.plot(N_iters, acc_ct)
plt.legend(('View 1', 'View 2', 'Naive Concatenated', 'multiview'))
plt.ylabel("Average Accuracy Over {} Randomizations".format(randomizations))
plt.xlabel('Iterations of Co-Training')
plt.title('When Labeled Examples are Not Representative\nCoTraining Does Poorly, as Expected')
plt.show()
# ### Performance when data is overlapping
#
# Here, the 2 class distributions are the following
# - Class 0 mean: [0, 0]
# - Class 0 covariance: .2*eye(2)
# - Class 1 mean: [0, 0]
# - Class 1 covariance: .2*eye(2)
#
# Labeled examples are chosen randomly from the training set
# +
randomizations = 20
N_per_class = 500
view2_penalty = 'l1'
view2_solver = 'liblinear'
class2_mean_center = 0 # 1 would make this identical to first test
N_iters = np.arange(1, 202, 15)
acc_ct = [[] for _ in N_iters]
acc_view1 = []
acc_view2 = []
acc_combined = []
for count, iters in enumerate(N_iters):
for seed in range(randomizations):
######################### Create Data ###########################
View1_train, View2_train, labels_train, labels_train_full, View1_test, View2_test, labels_test = create_data(seed,
0,
.2,
.2,
N_per_class,
view2_class2_mean_center=class2_mean_center)
# randomly remove some labels
np.random.seed(11)
remove_idx = np.random.rand(len(labels_train),) < .99
labels_train[remove_idx] = np.nan
not_removed = np.where(remove_idx==False)[0]
# make sure both classes have at least 1 labeled example
if len(set(labels_train[not_removed])) != 2:
continue
if seed == 0 and count == 0:
scatterplot_classes(not_removed, labels_train, labels_train_full, View1_train, View2_train)
############## Single view semi-supervised learning ##############
# Only once, since not affected by "num iters"
if count == 0:
accuracy_view1, accuracy_view2, accuracy_combined = single_view_class(View1_train[not_removed,:].squeeze(),
labels_train[not_removed],
View1_test,
labels_test,
View2_train[not_removed,:].squeeze(),
View2_test,
view2_solver,
view2_penalty)
acc_view1.append(accuracy_view1)
acc_view2.append(accuracy_view2)
acc_combined.append(accuracy_combined)
##################### Multiview ########################################
gnb0 = LogisticRegression()
gnb1 = LogisticRegression(solver=view2_solver, penalty=view2_penalty)
ctc = CTClassifier(gnb0, gnb1, num_iter=iters)
ctc.fit([View1_train, View2_train], labels_train)
y_pred_ct = ctc.predict([View1_test, View2_test])
acc_ct[count].append((accuracy_score(labels_test, y_pred_ct)))
acc_view1 = np.mean(acc_view1)
acc_view2 = np.mean(acc_view2)
acc_combined = np.mean(acc_combined)
acc_ct = [sum(row) / float(len(row)) for row in acc_ct]
# -
# make a figure from the data
plt.figure()
plt.plot(N_iters, acc_view1*np.ones(N_iters.shape))
plt.plot(N_iters, acc_view2*np.ones(N_iters.shape))
plt.plot(N_iters, acc_combined*np.ones(N_iters.shape))
plt.plot(N_iters, acc_ct)
plt.legend(('View 1', 'View 2', 'Naive Concatenated', 'multiview'))
plt.ylabel("Average Accuracy Over {} Randomizations".format(randomizations))
plt.xlabel('Iterations of Co-Training')
plt.title('When Both Views Have Overlapping Data\nCoTraining Performs with Chance, as Expected')
plt.show()
# ### Performance as labeled data proportion (essentially sample size) is varied
# +
data, labels = load_UCImultifeature(select_labeled=[0,1])
# Use only the first 2 views as an example
View0, View1 = data[0], data[1]
# Split both views into testing and training
View0_train, View0_test, labels_train_full, labels_test_full = train_test_split(View0, labels, test_size=0.33, random_state=42)
View1_train, View1_test, labels_train_full, labels_test_full = train_test_split(View1, labels, test_size=0.33, random_state=42)
# Do PCA to visualize data
pca = PCA(n_components = 2)
View0_pca = pca.fit_transform(View0_train)
View1_pca = pca.fit_transform(View1_train)
View0_pca_class0 = View0_pca[np.where(labels_train_full==0)[0],:]
View0_pca_class1 = View0_pca[np.where(labels_train_full==1)[0],:]
View1_pca_class0 = View1_pca[np.where(labels_train_full==0)[0],:]
View1_pca_class1 = View1_pca[np.where(labels_train_full==1)[0],:]
# plot the views
plt.figure()
fig, ax = plt.subplots(1,2, figsize=(14,5))
ax[0].scatter(View0_pca_class0[:,0], View0_pca_class0[:,1])
ax[0].scatter(View0_pca_class1[:,0], View0_pca_class1[:,1])
ax[0].set_title('2 Component PCA of Full View 1 (Fourier Coefficients) Training Data')
ax[0].legend(('Class 0', 'Class 1'))
ax[1].scatter(View1_pca_class0[:,0], View1_pca_class0[:,1])
ax[1].scatter(View1_pca_class1[:,0], View1_pca_class1[:,1])
ax[1].set_title('2 Component PCA of Full View 2 (Profile Correlations) Training Data')
ax[1].legend(('Class 0', 'Class 1'))
plt.show()
# +
N_labeled_full = []
acc_ct_full = []
acc_v0_full = []
acc_v1_full = []
iters = 500
for i, num in zip(np.linspace(0.03, .30, 20), (np.linspace(4, 30, 20)).astype(int)):
N_labeled = []
acc_ct = []
acc_v0 = []
acc_v1 = []
View0_train, View0_test, labels_train_full, labels_test_full = train_test_split(View0, labels, test_size=0.33, random_state=42)
View1_train, View1_test, labels_train_full, labels_test_full = train_test_split(View1, labels, test_size=0.33, random_state=42)
for seed in range(iters):
labels_train = labels_train_full.copy()
labels_test = labels_test_full.copy()
# Randomly remove all but a small percentage of the labels
np.random.seed(2*seed) #6
remove_idx = np.random.rand(len(labels_train),) < 1-i
labels_train[remove_idx] = np.nan
not_removed = np.where(remove_idx==False)[0]
not_removed = not_removed[:num]
N_labeled.append(len(labels_train[not_removed])/len(labels_train))
if len(set(labels_train[not_removed])) != 2:
continue
if Reverse_Labels:
labels_one_idx = np.argwhere(labels_train == 1)
labels_zero_idx = np.argwhere(labels_train == 0)
############## Single view semi-supervised learning ##############
#-----------------------------------------------------------------
gnb0 = GaussianNB()
gnb1 = GaussianNB()
# Train on only the examples with labels
gnb0.fit(View0_train[not_removed,:].squeeze(), labels_train[not_removed])
y_pred0 = gnb0.predict(View0_test)
gnb1.fit(View1_train[not_removed,:].squeeze(), labels_train[not_removed])
y_pred1 = gnb1.predict(View1_test)
acc_v0.append(accuracy_score(labels_test, y_pred0))
acc_v1.append(accuracy_score(labels_test, y_pred1))
######### Multi-view co-training semi-supervised learning #########
#------------------------------------------------------------------
# Train a CTClassifier on all the labeled and unlabeled training data
ctc = CTClassifier()
ctc.fit([View0_train, View1_train], labels_train)
y_pred_ct = ctc.predict([View0_test, View1_test])
acc_ct.append(accuracy_score(labels_test, y_pred_ct))
acc_ct_full.append(np.mean(acc_ct))
acc_v0_full.append(np.mean(acc_v0))
acc_v1_full.append(np.mean(acc_v1))
N_labeled_full.append(np.mean(N_labeled))
# +
matplotlib.rcParams.update({'font.size': 12})
plt.figure()
plt.plot(N_labeled_full, acc_v0_full)
plt.plot(N_labeled_full, acc_v1_full)
plt.plot(N_labeled_full, acc_ct_full,"r")
plt.legend(("Fourier Coefficients Only:\nsklearn Gaussian Naive Bayes", "Profile Correlations Only:\nsklearn Gaussian Naive Bayes", "Using Both Views:\nmultiview CTClassifier (default)"))
plt.title("Semi-Supervised Classification Accuracy with\nCTClassifier (default Naive Bayes)")
plt.xlabel("Labeled Data Proportion")
plt.ylabel("Average Accuracy on Test Data: {} Trials".format(iters))
#plt.savefig('AvgAccuracy_CTClassifier.png', bbox_inches='tight')
plt.show()
| docs/tutorials/semi_supervised/cotraining_classification_simulatedperformance.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import cv2
import numpy as np
from matplotlib import pyplot as plt
# + pycharm={"name": "#%%\n"}
img = cv2.imread('apples.jpg',0)
edges = cv2.Canny(img,100,200)
# + pycharm={"name": "#%%\n"}
plt.subplot(121),plt.imshow(img,cmap = 'gray')
plt.title('Original Image'), plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(edges,cmap = 'gray')
plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
plt.show()
# + pycharm={"name": "#%%\n"}
cv2.imshow("edge_image", edges)
cv2.waitKey(0)
# + pycharm={"name": "#%%\n"}
cv2.destroyAllWindows()
| opencv/core/canny_edge_detection.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: nama
# language: python
# name: nama
# ---
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
# +
from collections import namedtuple
import matplotlib.pyplot as plt
from matplotlib.pyplot import cm
import numpy as np
import pandas as pd
import torch
from tqdm import tqdm
import umap
import wandb
from src.data.filesystem import fopen
from src.data.utils import load_dataset, select_frequent_k
from src.eval import metrics
from src.models.swivel import SwivelModel, get_swivel_embeddings, get_best_swivel_matches
from src.models.utils import remove_padding, add_padding
# + pycharm={"is_executing": true}
# config
plt.rcParams["figure.figsize"] = [12, 12]
given_surname = "given"
vocab_size = 610000 if given_surname == "given" else 2100000
embed_dim = 100
Config = namedtuple("Config", [
"train_path",
"freq_path",
"embed_dim",
"swivel_vocab_path",
"swivel_model_path",
])
config = Config(
train_path=f"s3://familysearch-names/processed/tree-hr-{given_surname}-train.csv.gz",
freq_path=f"s3://familysearch-names/processed/tree-preferred-{given_surname}-aggr.csv.gz",
embed_dim=embed_dim,
swivel_vocab_path=f"s3://nama-data/data/models/fs-{given_surname}-swivel-vocab-{vocab_size}-augmented.csv",
swivel_model_path=f"s3://nama-data/data/models/fs-{given_surname}-swivel-model-{vocab_size}-{embed_dim}-augmented.pth",
)
# + pycharm={"name": "#%%\n"}
wandb.init(
project="nama",
entity="nama",
name="71_visualize_embeddings",
group=given_surname,
notes="",
config=config._asdict(),
)
# -
# ### Load data
# + pycharm={"name": "#%%\n"}
# device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
device="cpu"
print(device)
# -
input_names_eval, weighted_actual_names_eval, candidate_names_eval = \
load_dataset(config.train_path, is_eval=True)
freq_df = pd.read_csv(config.freq_path, na_filter=False)
print(len(freq_df))
vocab_df = pd.read_csv(fopen(config.swivel_vocab_path, "rb"))
swivel_vocab = {name: _id for name, _id in zip(vocab_df["name"], vocab_df["index"])}
swivel_model = SwivelModel(len(swivel_vocab), config.embed_dim)
swivel_model.load_state_dict(torch.load(fopen(config.swivel_model_path, "rb")))
swivel_model.eval()
swivel_model.to(device)
encoder_model = None
# encoder_model = SwivelEncoderModel(n_layers=encoder_layers, output_dim=config.embed_dim, device=device)
# encoder_model.load_state_dict(torch.load(fopen(config.encoder_model_path, "rb"), map_location=torch.device(device)))
# encoder_model.to(device)
# encoder_model.eval()
# ### PR Curve
# +
# input_names_sample, weighted_actual_names_sample, candidate_names_sample = \
# select_frequent_k(input_names_eval, weighted_actual_names_eval, candidate_names_eval,
# 50000)
# -
freq_names = set(add_padding(name) for name in freq_df["name"][:10000])
input_names_sample = []
weighted_actual_names_sample = []
# weighted_actual_names_temp = []
for pos, name in enumerate(input_names_eval):
if name not in freq_names:
continue
# pos = input_names_eval.index(name)
input_names_sample.append(name)
weighted_actual_names_sample.append(weighted_actual_names_eval[pos])
# weighted_actual_names_temp.append(weighted_actual_names_eval[pos])
# weighted_actual_names_sample = []
# candidate_names_sample = set()
# for wans in weighted_actual_names_temp:
# wans = sorted(wans, key=lambda wan: -wan[2])[:10]
# sum_freq = sum(freq for _, _, freq in wans)
# wans = [(name, freq / sum_freq, freq) for name, _, freq in wans]
# weighted_actual_names_sample.append(wans)
# candidate_names_sample.update([name for name, _, _ in wans])
# candidate_names_sample = np.array(list(candidate_names_sample))
candidate_names_sample = candidate_names_eval
print(len(input_names_eval))
print(len(candidate_names_eval))
print(len(input_names_sample))
print(len(weighted_actual_names_sample))
print(len(candidate_names_sample))
print(input_names_sample[:100])
print(freq_df[freq_df["name"] == 'aagje'])
# get best matches
# NOTE: only considers as potential matches names in candidate_names_eval, not names in input_names_eval
k = 200
eval_batch_size = 1024
add_context = True
n_jobs=1
best_matches = get_best_swivel_matches(swivel_model,
swivel_vocab,
input_names_sample,
candidate_names_sample,
k,
eval_batch_size,
add_context=add_context,
n_jobs=n_jobs)
metrics.precision_weighted_recall_curve_at_threshold(
weighted_actual_names_sample, best_matches, min_threshold=0.01, max_threshold=1.0, step=0.05, distances=False
)
input_names_graphed = set()
candidate_names_graphed = set()
step = 10
total = 20
for i, (name, matches, wans) in enumerate(zip(input_names_sample, best_matches, weighted_actual_names_sample)):
if i % step != 0:
continue
print(name)
input_names_graphed.add(name)
candidate_names_graphed.add(name)
true_names = {name: freq for name, _, freq in wans}
for j , (match, score) in enumerate(matches):
print(" * " if match in true_names.keys() else " ", j, match, score, true_names.get(match, 0))
candidate_names_graphed.add(match)
if j >= 20:
break
if i >= step * total:
break
candidate_names_graphed = np.array(list(candidate_names_graphed))
print("input_names_graphed", len(input_names_graphed), input_names_graphed)
print(len(candidate_names_graphed))
candidate_names_graphed.shape
# ### Get embeddings
embeddings = get_swivel_embeddings(
swivel_model,
swivel_vocab,
candidate_names_graphed,
)
# ### Use umap to reduce dimensionality
reducer = umap.UMAP()
reduced = reducer.fit_transform(embeddings)
reduced.shape
# ### Plot embeddings
xs = list(x for x, _ in reduced)
ys = list(y for _, y in reduced)
plt.scatter(xs, ys)
for ix, name in enumerate(candidate_names_graphed):
plt.annotate(name, xy=(xs[ix], ys[ix]), xytext=(5, 2),
textcoords='offset points', ha='right', va='bottom')
# + pycharm={"name": "#%%\n"}
wandb.finish()
# -
| notebooks/71_analyze_embeddings.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
from matplotlib.offsetbox import TextArea, DrawingArea, OffsetImage, AnnotationBbox
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
# %config InlineBackend.figure_format = 'retina'
#plt.style.use('seaborn-dark')
plt.rcParams['figure.figsize'] = [20, 5]
import pickle
with open('example_data.pkl', 'rb') as f:
example_data = pickle.load(f)
example_data
class_name = [
'BICYCLE',
'BUS',
'CAR',
'CYCLIST',
'MOTORCYCLE',
'PEDESTRIAN',
'TRUCK'
]
class_marker_shape = {
'BICYCLE': '8',
'BUS': 'p',
'CAR': 'X',
'CYCLIST': 'o',
'MOTORCYCLE': 'D',
'PEDESTRIAN': '*',
'TRUCK': 'v',
}
# +
cyclist_img = mpimg.imread('cyclist-top.png')
imagebox = OffsetImage(cyclist_img, zoom=0.08)
ab = AnnotationBbox(imagebox, (0.0, 0.0), frameon=False)
fig, ax = plt.subplots()
plt.axis([-75,75,-5,5])
plt.xlabel('FORWARD(+) / REAR(-) distance')
plt.ylabel('LEFT(+) / RIGHT(-) distance')
ax.set_facecolor('silver')
ax.add_artist(ab)
# vertical line representing overtake line
plt.axline((0,0),(0,1), alpha=0.2, color='red', linestyle='--', linewidth=10)
# horizontal line representing road
plt.axline((0,0),(1,0), alpha=1.0, color='white', linestyle='-', linewidth=130)
for track_id in example_data:
point = example_data[track_id]
filter_x = point['filter_pred_x'][0]
filter_y = point['filter_pred_y'][0]
filter_dxdt = point['filter_pred_dxdt'][0]
filter_dydt = point['filter_pred_dydt'][0]
# filter predictions arrow
prop = dict(arrowstyle=f"-|>,head_width=1,head_length=1,width=0.5", shrinkA=0, shrinkB=0)
plt.annotate("",
xy=(2*filter_dxdt,2*filter_dydt),
xytext=(filter_x, filter_y),
arrowprops=prop,
zorder=999)
plt.arrow(
filter_x,
filter_y,
3*filter_dxdt,
3*filter_dydt,
width=0.5,
head_width=1.5,
head_length=1.5,
alpha=0.6,
fill=True,
color = 'orangered' if point['dangerous'] else 'skyblue',
zorder=999)
# text on filter prediction arrow
plt.plot(
filter_x,
filter_y,
color = 'orangered' if point['dangerous'] else 'skyblue',
marker=class_marker_shape[class_name[int(point['class_index'])]],
markersize=15.0,
alpha=0.6
)
# 'CYCLIST 11' text
plt.annotate(
f"{class_name[int(point['class_index'])]} {int(track_id)}",
(filter_x-3, filter_y+2),
fontweight=600,
fontsize=15.0,
alpha=0.8
)
# TTE text
plt.annotate(
f"{round(point['TTE'][0],1)}s away",
(filter_x-2.5, filter_y-3),
fontweight=400,
fontsize=15.0,
alpha=0.8,
)
# raw current point
plt.plot(
point['raw_x'],
point['raw_y'],
color = 'maroon' if point['dangerous'] else 'steelblue',
marker=class_marker_shape[class_name[int(point['class_index'])]],
markersize=20.0,
alpha=0.9
)
# -
| computer_vision/subvision/intercept/birds_eye_visualization_development.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# # Importing dataset
from sklearn import datasets
iris = datasets.load_iris()
#print(iris)
data=iris.data
target=iris.target
#print(data,target)
# +
#print(data)
# -
# # Ploting the data and target values.
# +
# As seen in the iris dataset it's target only includs 3 classes (0, 1 and 2).
import matplotlib.pyplot as plt
plt.scatter(data[:,0], target)
plt.scatter(data[:,1], target)
plt.scatter(data[:,2], target)
plt.ylabel('Type of flowers')
plt.xlabel('Parameters')
# -
# # Dividing data into training and testing dataset
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(data, target, test_size=0.2, random_state=42)
# # Accuracy function
def accuracy(y_pred,y_test):
c=0
for i in range(0,len(y_pred)):
if y_pred[i]==y_test[i]:
c=c+1
print("Accuracy=",c/len(y_pred)*100)
from sklearn.metrics import accuracy_score,confusion_matrix, f1_score
import seaborn as sns
print("accuracy score:",accuracy_score(y_test, y_pred))
print("confusion matrix:\n",confusion_matrix(y_test, y_pred))
print("f1 score:",f1_score(y_test, y_pred, average='macro'))
# using heatmat to plot accuracy
a=np.array(y_pred).reshape(-1,1)
b=np.array(y_test).reshape(-1,1)
df=pd.DataFrame(np.append(a,b,axis=1))
df.columns=["predicted_vals","true_vals"]
cor = df.corr()
sns.heatmap(cor)
#to use scatter plot uncomment the below given code
#plt.scatter(y_test,y_pred)
plt.show()
# # 1) Using RandomForestClassifier from sklearn.ensemble to generate, fit the model and predict the output.
from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier(n_estimators=100, bootstrap = True, max_features = 'sqrt')
model.fit(x_train,y_train)
y_pred_randF= model.predict(x_test)
y_pred_randF=y_pred_randF.tolist()
# # 2) Using Naive Bayes from sklearn.ensemble to generate, fit the model and predict the output.
from sklearn.naive_bayes import GaussianNB
gnb = GaussianNB()
y_pred_naiveBayes = gnb.fit(x_train, y_train).predict(x_test)
# # 3) Using Support Vector Machine from sklearn.ensemble to generate, fit the model and predict the output.
from sklearn import svm
clf = svm.SVC()
clf.fit(x_train, y_train)
y_pred_SVM=clf.predict(x_test)
# # 4) Using Stochastic Gradient Descent from sklearn.ensemble to generate, fit the model and predict the output.
from sklearn.linear_model import SGDClassifier
clf = SGDClassifier(loss="hinge", penalty="l2", max_iter=5)
clf.fit(x_train, y_train)
SGDClassifier(max_iter=5)
y_pred_SGD=clf.predict(x_test)
# # 5)Using KNN from sklearn.ensemble to generate, fit the model and predict the output.
from sklearn.neighbors import KNeighborsClassifier
neigh = KNeighborsClassifier(n_neighbors=2)
neigh.fit(x_train,y_train)
y_pred_KNN=neigh.predict(x_test)
print("Random Forest Accuracy")
accuracy(y_pred_randF,y_test)
print("\nNaive Bayes Accuracy")
accuracy(y_pred_naiveBayes,y_test)
print("\nSupport Vector Machine Accuracy")
accuracy(y_pred_SVM,y_test)
print("\nStochastic Gradient Decent Accuracy")
accuracy(y_pred_SGD,y_test)
print("\nKNN Accuracy")
accuracy(y_pred_KNN,y_test)
| Classification/1) Iris/.ipynb_checkpoints/1) Iris Dateset Solution-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
import pandas as pd
import numpy as np
from random import shuffle
data = pd.read_csv('sampleData.csv', header=0)
data
dinner = data.loc[:,'Name':'VeggieGalaxy']
brunch = data.loc[:,'Area4':]
names = data['Name']
brunch = pd.concat([names,brunch], axis=1)
# +
cap = 3
kika = []
thelonious = []
smokeshop =[]
ole = []
meadhall = []
indiapav = []
veggieGalaxy = []
names = data['Name'].tolist()
assigned = []
unassigned = []
assignmentLists = [kika,thelonious,smokeshop,ole,meadhall,indiapav,veggieGalaxy]
restaurantNames = dinner.columns[1:].tolist()
assignmentDict = dict(zip(restaurantNames, assignmentLists))
zeroes = [0]*len(names)
preferenceScoreDict = dict(zip(names, zeroes))
# -
# +
for col in dinner.columns[1:]:
d = dinner[col]
i = 0
while i < len(d):
if d[i] == 1:
#assign first preference
assignmentDict[col].append(names[i])
#mark the person as assigned.
assigned.append(names[i])
#update the person's cumulative preference score
preferenceScoreDict[names[i]] += 1
i += 1
#Now all first choices have been granted, check whether any restaurants are over capacity
def dinnerCapacityCheck():
dinnerOverassigned = []
dinnerUnderassigned = []
dinnerFull = []
for rest in restaurantNames:
if len(assignmentDict[rest]) > cap:
print(str(rest) + ' is overassigned')
dinnerOverassigned.append(rest)
elif len(assignmentDict[rest]) < cap:
print(str(rest) + ' is underassigned')
dinnerUnderassigned.append(rest)
elif len(assignmentDict[rest]) == cap:
print(str(rest) + ' is full')
dinnerFull.append(rest)
return dinnerOverassigned, dinnerUnderassigned, dinnerFull
dinnerOverassigned, dinnerUnderassigned, dinnerFull = dinnerCapacityCheck()
dinnerDone=False
if len(dinnerOverassigned) == 0:
#everyone gets their first choice, ideal solution.
dinnerDone = True
for name in restaurantNames:
print("The following have been assigned to " + str(name) + ":")
for x in assignmentDict[name]:
print(str(x) + '\n')
elif len(dinnerOverassigned) > 0:
#print(dinnerOverassigned)
#print(assignmentDict)
#some people will have to be reassigned.
#for each overassigned restaurant, randomly choose the number of people required to kick out to go below the cap
for rest in dinnerOverassigned:
dinnerList = assignmentDict[rest]
bumpNum = len(dinnerList)-cap
gotBumped = np.random.choice(dinnerList, bumpNum)
for b in gotBumped:
assignmentDict[rest].remove(b) #kick that dude out
assigned.remove(b) #mark them unassigned
unassigned.append(b)
preferenceScoreDict[b] += 1 #increment their preference score
#print(assignmentDict)
#assign bumped people to their second choice.
#print(unassigned)
for x in unassigned:
if x in assigned:
print(str(x) + " is already assigned.")
unassigned.remove(x)
continue
#print(x)
i = names.index(x)
row = dinner.ix[i] #this should be the row of the the bumped person
#reassign them to their second choice
j=0
#find the row index of this person who must be reassigned.
while j < len(dinner['Name']):
if dinner['Name'][j] == x:
break
j += 1
#Search for which restaurant was their second choice.
for col in dinner.columns[1:]:
d = dinner[col][j]
if d == 2:
#assign second preference
assignmentDict[col].append(names[j])
#mark the person as assigned.
if names[j] not in assigned:
assigned.append(names[j])
for a in assigned:
if a in unassigned:
unassigned.remove(a)
dinnerOverassigned, dinnerUnderassigned, dinnerFull = dinnerCapacityCheck()
# -
print(preferenceScoreDict)
# +
#now second choices have been assigned.
if len(dinnerOverassigned) == 0 and dinnerDone==False:
dinnerDone = True
for name in restaurantNames:
print("The following have been assigned to " + str(name) + ":")
for x in assignmentDict[name]:
print(str(x) + '\n')
#consider bumping people to third choice.
elif len(dinnerOverassigned) > 0 and dinnerDone==False:
#some people will have to be reassigned.
#for each overassigned restaurant, randomly choose the number of people required to kick out to go below the cap
for rest in dinnerOverassigned:
dinnerList = assignmentDict[rest]
bumpNum = len(dinnerList)-cap
bumpOrder = random.shuffle(dinnerList) #want to preferentially bump people who haven't yet been bumped.
gotBumped = []
for x in bumpOrder:
if len(gotBumped) == bumpNum:
break
if preferenceScoreDict[x] == 1:
gotBumped.append(x)
else:
continue
for b in gotBumped:
assignmentDict[rest].remove(b) #kick that dude out
assigned.remove(b) #mark them unassigned
unassigned.append(b)
preferenceScoreDict[b] += 1 #increment their preference score
#print(assignmentDict)
#assign bumped people to their second choice.
#print(unassigned)
for x in unassigned:
if x in assigned:
print(str(x) + " is already assigned.")
unassigned.remove(x)
continue
#print(x)
i = names.index(x)
row = dinner.ix[i] #this should be the row of the the bumped person
#reassign them to their second choice
j=0
#find the row index of this person who must be reassigned.
while j < len(dinner['Name']):
if dinner['Name'][j] == x:
break
j += 1
#Search for which restaurant was their third choice.
for col in dinner.columns[1:]:
d = dinner[col][j]
if d == 3:
#assign second preference
assignmentDict[col].append(names[j])
#mark the person as assigned.
if names[j] not in assigned:
assigned.append(names[j])
for a in assigned:
if a in unassigned:
unassigned.remove(a)
dinnerOverassigned, dinnerUnderassigned, dinnerFull = dinnerCapacityCheck()
#if still overassigned after the second choice, run the brunch algorithm
#then minimize preference scores as a last resort.
if len(dinnerOverassigned) == 0 and dinnerDone==False:
dinnerDone = True
for name in restaurantNames:
print("The following have been assigned to " + str(name) + ":")
for x in assignmentDict[name]:
print(str(x) + '\n')
# -
brunch.columns[1:].tolist()
# +
Area4 = []
PaintedBurro = []
RusselHouse =[]
Christophers = []
Ryles = []
CambridgeCommons = []
assigned = []
unassigned = []
assignmentLists = [Area4,PaintedBurro,RusselHouse,Christophers,Ryles,CambridgeCommons]
restaurantNames = brunch.columns[1:].tolist()
assignmentDict = dict(zip(restaurantNames, assignmentLists))
#zeroes = [0]*len(names)
#preferenceScoreDict = dict(zip(names, zeroes))
# +
for col in brunch.columns[1:]:
d = brunch[col]
i = 0
while i < len(d):
if d[i] == 1:
#assign first preference
assignmentDict[col].append(names[i])
#mark the person as assigned.
assigned.append(names[i])
#update the person's cumulative preference score
preferenceScoreDict[names[i]] += 1
i += 1
#Now all first choices have been granted, check whether any restaurants are over capacity
def brunchCapacityCheck():
brunchOverassigned = []
brunchUnderassigned = []
brunchFull = []
for rest in restaurantNames:
if len(assignmentDict[rest]) > cap:
print(str(rest) + ' is overassigned')
brunchOverassigned.append(rest)
elif len(assignmentDict[rest]) < cap:
print(str(rest) + ' is underassigned')
brunchUnderassigned.append(rest)
elif len(assignmentDict[rest]) == cap:
print(str(rest) + ' is full')
brunchFull.append(rest)
return brunchOverassigned, brunchUnderassigned, brunchFull
brunchOverassigned, brunchUnderassigned, brunchFull = brunchCapacityCheck()
brunchDone=False
if len(brunchOverassigned) == 0:
#everyone gets their first choice, ideal solution.
brunchDone = True
for name in restaurantNames:
print("The following have been assigned to " + str(name) + ":")
for x in assignmentDict[name]:
print(str(x) + '\n')
elif len(brunchOverassigned) > 0:
#print(dinnerOverassigned)
#print(assignmentDict)
#some people will have to be reassigned.
#for each overassigned restaurant, randomly choose the number of people required to kick out to go below the cap
for rest in brunchOverassigned:
brunchList = assignmentDict[rest]
bumpNum = len(brunchList)-cap
gotBumped = np.random.choice(brunchList, bumpNum)
for b in gotBumped:
assignmentDict[rest].remove(b) #kick that dude out
assigned.remove(b) #mark them unassigned
unassigned.append(b)
preferenceScoreDict[b] += 1 #increment their preference score
#print(assignmentDict)
#assign bumped people to their second choice.
#print(unassigned)
for x in unassigned:
if x in assigned:
print(str(x) + " is already assigned.")
unassigned.remove(x)
continue
#print(x)
i = names.index(x)
row = brunch.ix[i] #this should be the row of the the bumped person
#reassign them to their second choice
j=0
#find the row index of this person who must be reassigned.
while j < len(brunch['Name']):
if brunch['Name'][j] == x:
break
j += 1
#Search for which restaurant was their second choice.
for col in brunch.columns[1:]:
d = brunch[col][j]
if d == 2:
#assign second preference
assignmentDict[col].append(names[j])
#mark the person as assigned.
if names[j] not in assigned:
assigned.append(names[j])
for a in assigned:
if a in unassigned:
unassigned.remove(a)
brunchOverassigned, brunchUnderassigned,brunchFull = brunchCapacityCheck()
if len(brunchOverassigned) == 0 and brunchDone==False:
brunchDone = True
for name in restaurantNames:
print("The following have been assigned to " + str(name) + ":")
for x in assignmentDict[name]:
print(str(x) + '\n')
#consider bumping people to third choice.
elif len(brunchOverassigned) > 0 and brunchDone==False:
#some people will have to be reassigned.
#for each overassigned restaurant, randomly choose the number of people required to kick out to go below the cap
for rest in brunchOverassigned:
brunchList = assignmentDict[rest]
bumpNum = len(brunchList)-cap
bumpOrder = random.shuffle(brunchList) #want to preferentially bump people who haven't yet been bumped.
gotBumped = []
for x in bumpOrder:
if len(gotBumped) == bumpNum:
break
if preferenceScoreDict[x] == 1:
gotBumped.append(x)
else:
continue
if len(gotBumped) < bumpNum:
for x in bumpOrder:
if len(gotBumped) == bumpNum:
break
if preferenceScoreDict[x] == 2:
gotBumped.append(x)
else:
continue
if len(gotBumped) < numpNum:
for x in bumpOrder:
if len(gotBumped) == bumpNum:
break
if preferenceScoreDict[x] == 3:
gotBumped.append(x)
else:
continue
for b in gotBumped:
assignmentDict[rest].remove(b) #kick that dude out
assigned.remove(b) #mark them unassigned
unassigned.append(b)
preferenceScoreDict[b] += 1 #increment their preference score
#print(assignmentDict)
#assign bumped people to their second choice.
#print(unassigned)
for x in unassigned:
if x in assigned:
print(str(x) + " is already assigned.")
unassigned.remove(x)
continue
#print(x)
i = names.index(x)
row = brunch.ix[i] #this should be the row of the the bumped person
#reassign them to their second choice
j=0
#find the row index of this person who must be reassigned.
while j < len(brunch['Name']):
if brunch['Name'][j] == x:
break
j += 1
#Search for which restaurant was their third choice.
for col in brunch.columns[1:]:
d = brunch[col][j]
if d == 3:
#assign second preference
assignmentDict[col].append(names[j])
#mark the person as assigned.
if names[j] not in assigned:
assigned.append(names[j])
for a in assigned:
if a in unassigned:
unassigned.remove(a)
brunchOverassigned, brunchUnderassigned, brunchFull = brunchCapacityCheck()
if len(brunchOverassigned) == 0 and brunchDone==False:
brunchDone = True
for name in restaurantNames:
print("The following have been assigned to " + str(name) + ":")
for x in assignmentDict[name]:
print(str(x) + '\n')
if dinnerDone == True and brunchDone == True:
print("all these motherfuckers gonna eat")
else:
print("impossible to fairly assign based on chosen preferences.")
# -
| monkfish.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## _*H2 energy plot comparing full to particle hole transformations*_
#
# This notebook demonstrates using QISKit ACQUA Chemistry to plot graphs of the ground state energy of the Hydrogen (H2) molecule over a range of inter-atomic distances using VQE and UCCSD with full and particle hole transformations. It is compared to the same energies as computed by the ExactEigensolver
#
# This notebook populates a dictionary, that is a progammatic representation of an input file, in order to drive the QISKit ACQUA Chemistry stack. Such a dictionary can be manipulated programmatically and this is indeed the case here where we alter the molecule supplied to the driver in each loop.
#
# This notebook has been written to use the PYQUANTE chemistry driver. See the PYQUANTE chemistry driver readme if you need to install the external PyQuante2 library that this driver requires.
# +
import numpy as np
import pylab
from qiskit_acqua_chemistry import ACQUAChemistry
# Input dictionary to configure QISKit ACQUA Chemistry for the chemistry problem.
acqua_chemistry_dict = {
'problem': {'random_seed': 50},
'driver': {'name': 'PYQUANTE'},
'PYQUANTE': {'atoms': '', 'basis': 'sto3g'},
'operator': {'name': 'hamiltonian', 'qubit_mapping': 'jordan_wigner',
'two_qubit_reduction': False},
'algorithm': {'name': ''},
'optimizer': {'name': 'COBYLA', 'maxiter': 10000 },
'variational_form': {'name': 'UCCSD'},
'initial_state': {'name': 'HartreeFock'}
}
molecule = 'H .0 .0 -{0}; H .0 .0 {0}'
algorithms = ['VQE', 'ExactEigensolver']
transformations = ['full', 'particle_hole']
start = 0.5 # Start distance
by = 0.5 # How much to increase distance by
steps = 20 # Number of steps to increase by
energies = np.empty([len(transformations), len(algorithms), steps+1])
hf_energies = np.empty(steps+1)
distances = np.empty(steps+1)
eval_counts = np.empty([len(transformations), steps+1])
print('Processing step __', end='')
for i in range(steps+1):
print('\b\b{:2d}'.format(i), end='', flush=True)
d = start + i*by/steps
acqua_chemistry_dict['PYQUANTE']['atoms'] = molecule.format(d/2)
for j in range(len(algorithms)):
acqua_chemistry_dict['algorithm']['name'] = algorithms[j]
for k in range(len(transformations)):
acqua_chemistry_dict['operator']['transformation'] = transformations[k]
solver = ACQUAChemistry()
result = solver.run(acqua_chemistry_dict)
energies[k][j][i] = result['energy']
hf_energies[i] = result['hf_energy']
if algorithms[j] == 'VQE':
eval_counts[k][i] = result['algorithm_retvals']['eval_count']
distances[i] = d
print(' --- complete')
print('Distances: ', distances)
print('Energies:', energies)
print('Hartree-Fock energies:', hf_energies)
print('VQE num evaluations:', eval_counts)
# -
pylab.plot(distances, hf_energies, label='Hartree-Fock')
for j in range(len(algorithms)):
for k in range(len(transformations)):
pylab.plot(distances, energies[k][j], label=algorithms[j]+' + '+transformations[k])
pylab.xlabel('Interatomic distance')
pylab.ylabel('Energy')
pylab.title('H2 Ground State Energy')
pylab.legend(loc='upper right')
pylab.plot(distances, np.subtract(hf_energies, energies[0][1]), label='Hartree-Fock')
for k in range(len(transformations)):
pylab.plot(distances, np.subtract(energies[k][0], energies[k][1]), label='VQE + '+transformations[k])
pylab.xlabel('Interatomic distance')
pylab.ylabel('Energy')
pylab.title('Energy difference from ExactEigensolver')
pylab.legend(loc='upper left')
for k in range(len(transformations)):
pylab.plot(distances, eval_counts[k], '-o', label='VQE + ' + transformations[k])
pylab.xlabel('Interatomic distance')
pylab.ylabel('Evaluations')
pylab.title('VQE number of evaluations')
pylab.legend(loc='upper left')
| chemistry/h2_particle_hole.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
"""
Today we will be looking at exponential and polynomial regression. They aren't used nearly as much as other types of
regression, but they are still interesting. Keep in mind that they are extremely sensitive to high values with the use of
powers. """
# let's begin
import sealion as sl
from sealion.regression import ExponentialRegression, PolynomialRegression
# +
# the way exponential regression works is by essentially taking a log of the data.
# let's first just create an exponential dataset
import numpy as np
def f(x) : return 1.01** x # no bias possible with exponential regression
X = np.array(range(-100, 100)).reshape(200, 1)
y = np.apply_along_axis(f, 1, X).flatten() # we can just flatten it to a vector
# -
# as usual, we will want to viz it
import matplotlib.pyplot as plt
plt.plot(X, y)
plt.xlabel("X-axis")
plt.ylabel("Y-axis")
plt.title("Exponential Data")
plt.show()
# clearly we see the data is exponential. Notice what happens when we take the log of y.
import matplotlib.pyplot as plt
plt.plot(X, np.log(y))
plt.xlabel("X-axis")
plt.ylabel("Y-axis")
plt.title("Exponential Data Log Transformation")
plt.show()
# +
# It becomes a straight line! Here's why that matters to exponential regression.
# If we can take exponential data, do a log transformation, and then model that with the y = mx + b equation we can
# essentially model exponential data.
# first let's split the data
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.15, random_state = 3)
# -
exp = ExponentialRegression()
exp.fit(X_train, y_train)
# and evaluation
exp.evaluate(X_test, y_test)
# +
# looks like it did perfectly!
#let's take a look at polynomial regression now
"""
The way polynomial regression works is by having the equation = y = bias + m1x1^1 + m2x2 ^2 ... mNxN^N.
This means it can model data to the degree of the number of features it has. This usually works well as more features
may lead to more complex curves and graphs.
"""
# first we generate data
def f(x) : return x ** 3 + 2 * x ** 2 + x + 3
X = np.array(range(-100, 100)).reshape(200, 1)
y = np.apply_along_axis(f, 1, X).flatten()
# -
# visualize
import matplotlib.pyplot as plt
plt.plot(X, y)
plt.xlabel("X-axis")
plt.ylabel("Y-axis")
plt.title("Polynomial Data")
plt.show()
# let's split
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.15, random_state = 3)
# +
# this looks like a pretty polynomial curve. Next we can apply the PolynomialRegression class
polyreg = PolynomialRegression()
polyreg.fit(X_train, y_train)
# -
polyreg.evaluate(X_test, y_test) # evaluate it (r^2 metric)
# we can visualize it's evaluations. Looks like its doing pretty well for this kind of curve.
polyreg.visualize_evaluation(polyreg.predict(X_test), y_test)
# +
# It's modeling decently with the predictions, but always a little too high or short. That's it for this tutorial.
# thanks!
| examples/exponential_and_polynomial_regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import glob
import os
import numpy as np
import matplotlib.pyplot as plt
import shutil
from torchvision import transforms
from torchvision import models
import torch
from torch.autograd import Variable
import torch.nn as nn
from torch.optim import lr_scheduler
from torch import optim
from torchvision.datasets import ImageFolder
from torchvision.utils import make_grid
import warnings
warnings.filterwarnings("ignore")
import time
# %matplotlib inline
# +
# # !git clone https://github.com/ardamavi/Dog-Cat-Classifier.git
# -
def imshow(inp):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
# + [markdown] heading_collapsed=true
# ## Create validation data set
# + hidden=true
path = 'Dog-Cat-Classifier/Data/Train_Data/'
# + hidden=true
dog_files = [f for f in glob.glob('Dog-Cat-Classifier/Data/Train_Data/dog/*.jpg')]
cat_files = [f for f in glob.glob('Dog-Cat-Classifier/Data/Train_Data/cat/*.jpg')]
files = dog_files + cat_files
print(f'Total no of images {len(files)}')
# + hidden=true
no_of_images = len(files)
# + hidden=true
no_of_images*0.8
# + hidden=true
shuffle = np.random.permutation(no_of_images)
# + hidden=true
os.mkdir(os.path.join(path,'train'))
os.mkdir(os.path.join(path,'valid'))
# -
# ## Check if GPU is present
if torch.cuda.is_available():
is_cuda = True
# ## Load data into PyTorch tensors
simple_transform = transforms.Compose([transforms.Resize((224,224))
,transforms.ToTensor()
,transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
train = ImageFolder('Dog-Cat-Classifier/Data/Train_Data/train/',simple_transform)
valid = ImageFolder('Dog-Cat-Classifier/Data/Train_Data/valid/',simple_transform)
print(train.class_to_idx)
print(train.classes)
imshow(train[50][0])
# ## Create data generators
train_data_gen = torch.utils.data.DataLoader(train,shuffle=True,batch_size=64,num_workers=3)
valid_data_gen = torch.utils.data.DataLoader(valid,batch_size=64,num_workers=3)
dataset_sizes = {'train':len(train_data_gen.dataset),'valid':len(valid_data_gen.dataset)}
dataloaders = {'train':train_data_gen,'valid':valid_data_gen}
# ## Create a network
# +
model_ft = models.resnet18(pretrained=True)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, 2)
if torch.cuda.is_available():
model_ft = model_ft.cuda()
# -
model_ft
# Loss and Optimizer
learning_rate = 0.001
criterion = nn.CrossEntropyLoss()
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
def train_model(model, criterion, optimizer, scheduler, num_epochs=20):
since = time.time()
best_model_wts = model.state_dict()
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'valid']:
if phase == 'train':
scheduler.step()
model.train(True) # Set model to training mode
else:
model.train(False) # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for data in dataloaders[phase]:
# get the inputs
inputs, labels = data
# wrap them in Variable
if torch.cuda.is_available():
inputs = Variable(inputs.cuda())
labels = Variable(labels.cuda())
else:
inputs, labels = Variable(inputs), Variable(labels)
# zero the parameter gradients
optimizer.zero_grad()
# forward
outputs = model(inputs)
_, preds = torch.max(outputs.data, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.data[0]
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'valid' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = model.state_dict()
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler, num_epochs=1)
| Chapter03/Chapter03b.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.1 64-bit (''pyUdemy'': conda)'
# name: python38164bitpyudemyconda8c705f49a8e643418ce4b1ca64c8ab63
# ---
my_list = [val for val in range(20) if val % 2 == 0]
print(my_list)
for i in range(len(my_list)):
print(i, my_list[i])
# (IDX,VAL)
for idx, val in enumerate(my_list):
print(idx, val)
# ITER1 ITER2
for idx, val in zip(range(len(my_list)), my_list):
print(idx, val)
# +
idx_range = range(len(my_list))
for idx, val in zip(idx_range, my_list):
print(idx, val)
# +
my_list_even_numbers = [val for val in range(20) if val % 2 == 0]
my_list_even_numbers_squared = [val**2 for val in range(10) if val % 2 == 0]
for num, num_squared in zip(my_list_even_numbers, my_list_even_numbers_squared):
print(num, num_squared)
| Chapter4_Iterables/Lists/zip_and_enumerate.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import altair as alt
# run scraping script to save briefings as csv
# %run scrape_briefings.py
# import scraped csv to pandas df
briefings_df = pd.read_csv('../data/all_briefings.csv')
briefings_df
# ### Checking for null values:
briefings_df[briefings_df['text'].isnull()]
# Manually comparing with the full transcripts and videos we can see these correspond to the speaker being cut off or uttering something inaudible. Let's simply drop these three rows.
briefings_df = briefings_df.dropna(subset=['text']).reset_index(drop=True)
# ### Cleaning up speaker names
# how many paragraphs of text for the top speakers?
briefings_df['speaker'].value_counts()[:50]
# Looking at the output above and inspecting the data in more detail we can see there's opportunity to:
# - Consolidate numbered unnamed speakers and reporters, since numbering is not consisent across briefings
# - Note specific recurring reports who are referred to by first name
# - Standardize the name used for other important speakers who have multiple spellings/titles/variations present
# replace speaker names using basic regex
briefings_df['speaker'].replace(regex={r'.*Trump.*': '<NAME>',
r'.*Pence.*': '<NAME>',
r'.*Fauci.*': 'Dr. <NAME>',
r'.*Birx.*': 'Dr. <NAME>',
r'.*Berks.*': 'Dr. <NAME>',
r'.*Pompeo.*': '<NAME>',
r'.*Report.*': 'Unnamed (Reporter)',
r'.*Audience Member.*': 'Unnamed',
r'.*Speaker .*': 'Unnamed',
r'.*Jeff\Z': 'Jeff (Reporter)',
r'.*John\Z': 'John (Reporter)',
r'.*Peter\Z': 'Peter (Reporter)',
r'.*Jim\Z': 'Jim (Reporter)',
r'.*Steve\Z': 'Steve (Reporter)',
r'.*Pete\Z': '<NAME>',
r'.*Novarro.*': '<NAME>',
r'.*Surgeon General.*': '<NAME>',
r'.*Giroir.*': '<NAME>',
r'.*Polowczyk.*': '<NAME>',
r'.*Verma.*': '<NAME>',
r'.*Azar.*': '<NAME>',
r'.*Hahn.*': 'Dr. <NAME>',
r'.*Mnuchin.*': '<NAME>'},
inplace = True)
# how many paragraphs of text per speaker?
top_speaker_counts = briefings_df['speaker'].value_counts()[:25]
top_speaker_counts
# save cleaned df to csv
briefings_df.to_csv("../data/cleaned_briefings.csv",index=False)
| src/archive/eda_cleaning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
# +
#SET A RANDOM NUMBER SEED
np.random.seed(119)
#set number of data points
npoints=50
#set x
x=np.linspace(0,10,npoints)
#set slope intercept, and scatter rms
m=2.0
b=1.0
sigma=2.0
#generate y points
y=m*x+b+np.random.normal(scale=sigma, size=npoints)
y_err=np.full(npoints,sigma)
# -
# ### Let's just plot that data first
f=plt.figure(figsize=(7,7))
plt.errorbar(x,y,sigma,fmt='o')
plt.xlabel('x')
plt.ylabel('y')
# ## Method #1, polyfit()
# +
m_fit, b_fit=np.poly1d(np.polyfit(x,y,1,w=1./y_err)) #weight with uncertainties
print(m_fit, b_fit)
y_fit=m_fit*x*b_fit
# -
f=plt.figure(figsize=(7,7))
plt.errorbar(x,y,yerr=y_err,fmt='o',label='data')
plt.plot(x,y_fit,label='fit')
plt.xlabel('x')
plt.ylabel('y')
plt.legend(loc=2,frameon=False)
# ## Method #2, scipy +optimize
# +
#import optimize from scipy
from scipy import optimize
#define the function to fit
def f_line(x, m, b):
return m*x+b
#perform the fit
params,params_cov=optimize.curve_fit(f_line,x,y,sigma=y_err)
m_fit=params[0]
b_fit =params[1]
print(m_fit,b_fit)
# -
# ## Plot the result
f=plt.figure(figsize=(7,7))
plt.errorbar(x,y,yerr=y_err,fmt='o',label='data')
plt.plot(x,y_fit,label='fit')
plt.xlabel('x')
plt.ylabel('y')
plt.legend(loc=2,frameon=False)
# ## We can perform much more complicated fits...
# +
#redefine x and y
npoints=50
x=np.linspace(0.,2*np.pi,npoints)
#make y a complicated function
a=3.4
b=2.1
c=.27
d=-1.3
sig=.6
y=a*np.sin(b*x+c)+d+np.random.normal(scale=sig,size=npoints)
y_err=np.full(npoints,sig)
f=plt.figure(figsize=(7,7))
plt.errorbar(x,y,yerr=y_err,fmt='o')
plt.xlabel('x')
plt.ylabel('y')
# -
# ## Perform a fit using scipy.optimize.curve_fit()
# +
#define the function to fit
def f_line(x,a,b,c,d):
return a*np.sin(b*x+c)+d
#perform the fit
params, params_cov=optimize.curve_fit(f_line,x,y,sigma=y_err,p0=[1,2.,.1,-.1])
a_fit=params[0]
b_fit=params[1]
c_fit=params[2]
d_fit=params[3]
print(a_fit,b_fit,c_fit,d_fit)
y_fit=a_fit*np.sin(b_fit*x+c_fit)+d_fit
# -
# ## Plot the fit
f=plt.figure(figsize=(7,7))
plt.errorbar(x,y,yerr=y_err,fmt='o',label='data')
plt.plot(x,y_fit,label='fit')
plt.xlabel('x')
plt.ylabel('y')
plt.legend(loc=0,frameon=False)
| linear_fit.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Load PyEpiDAG
import epidag as dag
import numpy as np
# ## Define a DAG
# Compose a script
#
# ```
# PCore Exp1 {
# # Definitions of nodes
# }
# ```
#
# Then, .. parse to a DAG
# +
script = '''
PCore Exp1 {
n
a = 0.5
b ~ beta(1, 1)
c = min(a, b)
y ~ binom(n, c)
z = f(a,b)
}
'''
bn = dag.bayes_net_from_script(script)
bn
# -
# ### Single value variable
#
#
# > VariableName = Number
#
# +
SingleValue = bn['a']
print('Node \'a\'')
print('Definition:', SingleValue)
print('\nFind the value')
print(SingleValue())
# -
# ### Exogenous variable
#
# > VariableName
# +
Exogenous = bn['n']
print('Node \'n\'')
print('Definition:', Exogenous)
print('\nFind the value; must append external resources')
print(Exogenous({'n': 5}))
# -
# ### Random variable
#
# > VariableName ~ p(...)
#
# ** p(...) **: a probabilidy density/mass function
# +
Random = bn['b']
print('Node \'b\'')
print('Definition:', Random)
print('\nSample a value')
print(Random())
# -
# ### Equation output
#
# > OutputName = g(...)
#
# ** g(...) **: a mathematical function
# +
Equation = bn['c']
print('Node \'c\'')
print('Definition:', Equation)
parents = {
'a': SingleValue(),
'b': Random()
}
print('\nCalculate a value')
print(Equation(parents))
# -
# ### Pseudo Node
# Pseudo nodes are the nodes which can not be implemented in simulation but in relation inference.
#
# > VariableName = f(...)
#
# ** f(...) **: a pseudo function start with ** f ** and a list of parent variables follows
# +
Pseudo = bn['z']
print('Node \'z\'')
print('Definition:', Pseudo)
parents = {
'a': SingleValue(),
'b': Random()
}
print('Parents:', Pseudo.Parents)
try:
Pseudo(parents)
except AttributeError:
print('Pseudo nodes do not provide any implementation')
# -
# ## For simulation model
#
# The **'epidag.simulation'** is used to provide tools for simulation modelling.
#
# ### Reasons to use 'epidag.simulation'
# - The parameters of my model have complicated interactions among each other.
# - My model includes many random effects, so I don't want to fix the parameters in the begining.
# - I don't want to rebuild my model after the parameters changed. (Intervention analysis).
# - My study include Monte Carlo inference and model fitting, so I need a convienant interface.
#
#
# ### SimulationCore
# SimulationCore is a type of object carrying all the definition of a parameter model.
#
#
# ### ParameterCore
# ParameterCore is a type of object can be use directly in a simulation model. A ParameterCore can be generated from its parent ParameterCore or a SimulationCore. After a ParameterCore instantiated 1) the fixed nodes are assigned, 2) the random nodes are ready to be used.
#
# ### Purposed workflow
#
# Monte Carlo simulation
# 1. Prepare a SimulationCore
# 2. For each iteration, generate a ParameterCore
# 3. Build simulation models with the ParamterCores
# 4. Collect results and analysis (summarise or fit to data)
# ### Example 1. Basic syntex and function, Gaussian distribution
#
# This example shows a normal variable ($x$) with a fixed mean value ($mu_x$) and a standard deviation ($sd$) distributed from an exponential distribution
# #### Step 1. generate a simulation given a Bayesian network
# +
script = '''
PCore Exp1 {
mu_x = 0
sd ~ exp(1)
x ~ norm(mu_x, sd)
}
'''
bn = dag.bayes_net_from_script(script)
sc = dag.as_simulation_core(bn, dag.NodeSet('root', as_fixed=['mu_x', 'sd'], as_floating=['x']))
# -
# #### Step 2. Instantiate a ParameterCore which
#
# - Hyper-parameter ($sd$) is fixed to a certain value
# - A sampler of the leaf variable ($x$) is prepared
pc = sc.generate(nickname='exp2', exo={'mu_x': 30})
pc
pc.get_sampler('x').sample(5)
# #### Step 3. Get sampler x from the ParameterCore and provide it to a simulation model.
#
# - The sampler can be used repeatly
# - You don't need to refer to its hyper-parameters while using
x = pc.get_sampler('x')
x
x(), np.mean(x.sample(1000))
# #### Step 4. Intervention
#
# You can set impulse on the ParameterCore. Then,
# - The impulse will be passed down to its downstream variables
# - You don't need to do anything to the sample
pc.impulse({'sd': 5, 'mu_x': 100})
pc
x(), np.mean(x.sample(1000))
# ### Example 2. Random effects, Beta-Binomial model
# Example 2 is a standard example in Baysian inference. Beta-Binomial model are used to model count data ($x$) with a latent variable, probability ($prob$).
#
#
# > dag.as_simulation_core(bn, random=[...])
#
# The option **random** defined variables which we do not want then be fixed during ParameterCore instantiation
# +
script = '''
PCore BetaBinom {
alpha = 1
beta = 1
n
prob ~ beta(alpha, beta)
x ~ binom(n, prob)
}
'''
bn = dag.bayes_net_from_script(script)
sc = dag.as_simulation_core(bn, dag.NodeSet('root', as_floating=['x']))
sc.deep_print()
# -
# Since the variable $n$ is an exogenous variable, we introduce it to new ParameterCore by **exo={...}**.
#
# To be noticed that, $prob$ has been set as a random effect, so the variable will be requested whenever new $x$ is requested
pc = sc.generate('exp1', exo={'n': 100})
pc
# Again, we get a sampler $x$ for the generated ParameterCore
x = pc.get_sampler('x')
x(), np.mean(x.sample(1000))
# **list_all** option print all variables used to sample outcome variable. You can see that $prob$ is not a fixed variable
# ### Example 3. Exposed variables, Regression model
# The example is a linear regression model. Dependant variable ($y$) is composed of $x*b1$ and an intercept $b0$; $var=eps^2$ is a measure of variance.
# +
script = '''
PCore Regression {
b0 = 1
b1 = 0.5
x = 1
eps ~ norm(0, 1)
y = x*b1 + b0 + eps
var = pow(eps, 2)
}
'''
bn = dag.bayes_net_from_script(script)
sc = dag.as_simulation_core(bn, dag.NodeSet('root', as_fixed=['b0', 'b1', 'x'], as_floating=['var', 'y']))
sc.deep_print()
# -
# However, $var$ is not a variable for simulation but for providing external information, so it do not need to be treated as a sampler..
pc = sc.generate('exp3-1')
print('Fixed variables', pc)
print('Samplers', pc.list_actors())
# ### Example 4. Hierarchy, BMI model
# Example 4 describes a parameter model of a BMI (body mass index) simulation model. The model include the layers of model: country, area, and people. A country have many areas; an area have many people; there are two types of people: agA and agB.
#
# - Each area have its amount of foodstores which can provide food to people.
# - agA and agB preform differently in the variance of BMI
# - The simulation model requests the sex of agA individuals in order to model their behavour
# +
script = '''
PCore BMI {
b0 ~ norm(12, 1)
b1 = 0.5
pf ~ beta(8, 20)
foodstore ~ binom(100, pf)
b0r ~ norm(0, .01)
ageA ~ norm(20, 3)
ageB ~ norm(30, 2)
ps ~ beta(5, 6)
sexA ~ cat({'m': ps, 'f': 1-ps})
muA = b0 + b0r + b1*ageA
bmiA ~ norm(muA, sd)
sdB = sd * 0.5
muB = b0 + b0r + b1*ageB
bmiB ~ norm(muB, sdB)
}
'''
bn = dag.bayes_net_from_script(script)
# -
# You can define hierarchies by a dictionary with
#
# 1. parameter groups as keys and of respective parameters as values
# 2. putting their children groups in the values as well
#
# You do not need to list every variable. The variable outside the list will be optimalised in the SimulationCore
# +
ns = dag.NodeSet('country', as_fixed=['sdB', 'b0'])
ns_area = ns.new_child('area', as_fixed=['b0r', 'ps', 'pf'], as_floating=['foodstore'])
nsa = ns_area.new_child('agA', as_fixed=['ageA', 'sexA'], as_floating=['bmiA'])
ns_area.new_child('agB', as_fixed=['ageB'], as_floating=['bmiB'])
sc = dag.as_simulation_core(bn, ns)
sc.deep_print()
# -
# You can use a SimulationCore to generate a root ParameterCore and use the root to $breed$ children ParameterCores
# +
pc = sc.generate('Taiwan', {'sd': 1})
pc_taipei = pc.breed('Taipei', 'area')
a1 = pc_taipei.breed('A1', 'agA')
pc_taipei.breed('A2', 'agA')
pc_taipei.breed('B1', 'agB')
pc_taipei.breed('B2', 'agB')
pc.deep_print()
# +
print('Get node from it parent')
a1 = pc_taipei.get_child('A1')
a1.print()
print('\nGet a node from root: use \'@\' to link names of ParameterCores')
a1 = pc.find_descendant('Taiwan@Taipei@A1')
a1.print()
# -
# When putting intervention in a node, the impulse will be passed to its children node automatically.
# see the change of muB
pc.impulse({'b0r': 10})
pc.deep_print()
pc.impulse(['b0r'])
pc.deep_print()
# ### Example 5. A simple agent-based model
# +
script = '''
PCore BetaBin {
al = 1
be = 1
p ~ beta(al, be)
x ~ binom(5, p)
}
'''
bn = dag.bayes_net_from_script(script)
ns = dag.NodeSet('root', as_fixed=['p'])
ns.new_child('ag', as_floating=['x'])
sm = dag.as_simulation_core(bn, ns)
sm.deep_print()
# +
class Agent:
def __init__(self, name, p):
self.Name = name
self.Parameters = p
self.X = p.get_sampler('x')
def produce(self, k):
return self.X.sample(k)
class AgentBasedModel:
def __init__(self, pars, n_agents):
self.Parameters = pars
self.Agents = list()
for i in range(n_agents):
name = 'Ag{}'.format(i)
p = pars.breed(name, 'ag')
self.Agents.append(Agent(name, p))
def product(self, k):
return np.array([ag.produce(k) for ag in self.Agents]).sum()
def fn_sim(pars, data):
abm = AgentBasedModel(pars, data['N'])
return abm.product(data['K'])
def fn_mean(sim, data):
return -abs(sim - data['X'])
# +
pars = sm.generate('Simu A')
abm = AgentBasedModel(pars, 5)
# -
d = {
'N': 10,
'K': 10,
'X': 200
}
pars = sm.generate('Simu A')
sim = fn_sim(pars, d)
mea = fn_mean(sim, d)
sim, mea
sdm = dag.as_simulation_data_model(sm, d, fn_sim, fn_mean)
fit = dag.fitting.MCMC(sdm)
fit.fit(100)
fit.summarise_posterior()
fit.BestFit
| tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: py35-paddle1.2.0
# ---
# # 大脑磁共振项目
# # 2.数据预处理
# ## 2.1 数据集的准备与介绍
# ### 2.1.1把数据集解压到work目录下
# +
# # !unzip -oq /home/aistudio/data/data105078/archive.zip -d work/
# -
# ### 2.1.2 展示
# ! tree work/ -d
# ### 2.1.3样本可视化
# +
import cv2
import glob
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['Simhei']
# %matplotlib inline
# work/kaggle_3m/TCGA_CS_4941_19960909/TCGA_CS_4941_19960909_11.tif
# work/lgg-mri-segmentation/kaggle_3m/TCGA_HT_8018_19970411/TCGA_HT_8018_19970411_14_mask.tif
work_path = 'work/kaggle_3m/'
file_path = 'TCGA_CS_4941_19960909/'
image_head = 'TCGA_CS_4941_19960909_'
# work_path = 'work/lgg-mri-segmentation/kaggle_3m/'
# file_path = 'TCGA_HT_8018_19970411/'
# image_head = 'TCGA_HT_8018_19970411_'
lable_name = '_mask'
image_path_list = []
label_path_list = []
mask_str = work_path + file_path + image_head + '*' + lable_name + '.tif'
# print(mask_str)
lable_path = glob.glob(mask_str)
image_path = [path.replace('_mask', '')
for path in lable_path]
# print(image_path)
for i in range(12, 16):
image_path = work_path + file_path + image_head + str(i) + '.tif'
lable_path = work_path + file_path + image_head + str(i) + lable_name + '.tif'
image_path_list.append(image_path)
label_path_list.append(lable_path)
print(image_path_list, label_path_list)
# +
import cv2
import matplotlib.pyplot as plt
plt.figure(figsize=(12, 12))
for i in range(len(image_path_list)):
plt.subplot(len(image_path_list), 2, i*2+1)
# plt.title(image_path_list[i])
plt.imshow(cv2.imread(image_path_list[i]))
plt.subplot(len(label_path_list), 2, i*2+2)
# plt.title(label_path_list[i])
plt.imshow(cv2.imread(label_path_list[i]))
plt.tight_layout()
plt.show()
# -
# ### 2.1.4 图像基础操作练习
# +
img = Image.open('work/kaggle_3m/TCGA_CS_4941_19960909/TCGA_CS_4941_19960909_14_mask.tif')
# if img.mode != 'RGB':
if img.mode not in ('L', 'I;16', 'I'):
img = img.convert('L')
print(img)
transforms.Resize(size=(256, 256)), #把数据长宽像素调成256*256
transforms.ToTensor(),
transforms.Normalize(mean=[0.5],std=[0.5])
# +
# img_read_test = cv2.imread('work/kaggle_3m/TCGA_CS_4941_19960909/TCGA_CS_4941_19960909_11.tif')
img_read_test = cv2.imread('work/kaggle_3m/TCGA_CS_4941_19960909/TCGA_CS_4941_19960909_14_mask.tif')
# print(img_read_test.shape)
# plt.imshow(img_read_test)
# plt.show()
transform = Compose([
transforms.Resize(size=(256, 256)), #把数据长宽像素调成256*256
transforms.ToTensor(),
transforms.Normalize(mean=[0.4],std=[0.6]),
# transforms.Grayscale(),
# transforms.Normalize()
])
img_read_test = transform(img_read_test)
img_read_test = np.array(img_read_test, dtype='int64')
print(img_read_test.max(), img_read_test.min())
# -
plt.imshow(img_read_test[0:200, 0:200])
plt.show()
# +
# 图像格式调整
class Resize:
def __init__(self, size):
self.size=size
def __call__(self, img):
return cv2.resize(img, self.size)
# Resize( (600, 600))通过修改函数中参数进行调节图片的大小
resize=Resize((600, 600))
img_resize=resize(img_read_test)
print(img_resize.shape)
plt.imshow(img_resize)
# -
# +
# 图像翻转
class Flip:
def __init__(self, mode):
self.mode=mode
def __call__(self, img):
return cv2.flip(img, self.mode)
# 指定翻转类型(非随机)
# mode=0垂直翻转、1水平翻转、-1水平加垂直翻转
flip=Flip(mode=-1)
img_flip=flip(img_read_test)
plt.imshow(img_flip)
# -
# ## 2.2 图像的统计分析
# +
import glob
import numpy as np
def get_mean_std(image_path_list):
print('Total images:', len(image_path_list))
max_val, min_val = np.zeros(3), np.ones(3) * 255
mean, std = np.zeros(3), np.zeros(3)
for image_path in image_path_list:
image = cv2.imread(image_path)
for c in range(3):
mean[c] += image[:, :, c].mean()
std[c] += image[:, :, c].std()
max_val[c] = max(max_val[c], image[:, :, c].max())
min_val[c] = min(min_val[c], image[:, :, c].min())
mean /= len(image_path_list)
std /= len(image_path_list)
mean /= max_val - min_val
std /= max_val - min_val
return mean, std
# mean, std = get_mean_std(glob.glob('work/kaggle_3m/TCGA_CS_4941_19960909/TCGA_CS_4941_19960909_*.tif'))
mean, std = get_mean_std(glob.glob('work/kaggle_3m/TCGA_CS_4941_19960909/TCGA_CS_4941_19960909_*_mask.tif'))
print('mean:', mean)
print('std:', std)
# -
# ## 2.3 数据集类定义
# +
# 自定义数据读取器
import paddle
import glob
from paddle.vision import transforms
from paddle.vision.transforms import Compose, ColorJitter, Resize,Transpose, Normalize,BrightnessTransform,RandomVerticalFlip,RandomHorizontalFlip,RandomRotation
import cv2
import numpy as np
from PIL import Image
from paddle.io import Dataset
#自定义的数据预处理函数,输入原始图像,输出处理后的图像,可以借用paddle.vision.transforms的数据处理功能
def preprocess(img, mode='train'):
# 保留不同的图像方法处理
if mode == 'train':
transform = Compose([
transforms.Resize(size=(224, 224)), #把数据长宽像素调成256*256
transforms.ColorJitter(0.4, 0.4, 0.4, 0.4),# 随机调整图像的亮度,对比度,饱和度和色调。
#RandomHorizontalFlip(224),
#RandomVerticalFlip(224),
#BrightnessTransform(0.2),
transforms.Normalize(mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], data_format='HWC'), #标准化
# transforms.Transpose(), #原始数据形状维度是HWC格式,经过Transpose,转换为CHW格式
])
elif mode == 'label':
transform = Compose([
transforms.Resize(size=(224, 224)), #把数据长宽像素调成256*256
# transforms.ToTensor(),
# transforms.Normalize(mean=[0.4914, 0.4822, 0.4465], std=[0.2023, 0.1994, 0.2010]),
transforms.Grayscale()
])
img = transform(img)
return img
class Reader(Dataset):
def __init__(self, data, is_val=False):
super().__init__()
#在初始化阶段,把数据集划分训练集和测试集。由于在读取前样本已经被打乱顺序,取20%的样本作为测试集,80%的样本作为训练集。
self.samples = data[-int(len(data)*0.2):] if is_val else data[:-int(len(data)*0.2)]
def _load_img(self, path, mode='image'):
if mode == 'image':
# 保留标签和图像不同的处理方式
img = Image.open(path)
if img.mode != 'RGB':
img = img.convert('RGB')
img = preprocess(img, mode='train') #数据预处理--这里仅包括简单数据预处理,没有用到数据增强
# img = img.astype("float32")
img = np.array(img, dtype='float32')
# 保留不同类型的图像处理
else:
img = Image.open(path)
# if img.mode != 'RGB':
if img.mode not in ('L', 'I;16', 'I'):
img = img.convert('L')
# img = img.convert('RGB')
img = preprocess(img, mode='label') #数据预处理--这里仅包括简单数据预处理,没有用到数据增强
# img = img.astype("int64")
img = np.array(img, dtype='int64')
return img
def __getitem__(self,index):
#处理图像
img_path = self.samples[index] #得到某样本的路径
lable_path = img_path.replace('.tif', '_mask.tif')
img = self._load_img(img_path)
label = self._load_img(lable_path, mode='label')
return img, label
def __len__(self):
#返回每个Epoch中图片数量
return len(self.samples)
image_path = glob.glob('work/kaggle_3m/*/*[0-9].tif')
lable_path = [path.replace('.tif', '_mask.tif')
for path in image_path]
#生成训练数据集实例
train_dataset = Reader(image_path, is_val=False)
#生成测试数据集实例
eval_dataset = Reader(image_path, is_val=True)
# -
# ## 2.4 数据集类的测试
# 打印实例
print(len(train_dataset))
for data in train_dataset:
print(data[0].shape, data[1].shape)
break
# +
train_dataloader = paddle.io.DataLoader(
train_dataset,
batch_size=32,
shuffle=True,
drop_last=False)
eval_dataloader = paddle.io.DataLoader(
eval_dataset,
batch_size=32,
shuffle=True,
drop_last=False)
num = 0
for step, data in enumerate(train_dataloader):
if num <1:
image, label = data
print(step, image.shape, label.shape)
num += 1
else:
break
# -
# # 3.模型搭建
# +
# 自定义数据读取器
import paddle
import glob
from paddle.vision import transforms
from paddle.vision.transforms import Compose, ColorJitter, Resize,Transpose, Normalize,BrightnessTransform,RandomVerticalFlip,RandomHorizontalFlip,RandomRotation
import cv2
import numpy as np
from PIL import Image
from paddle.io import Dataset
#自定义的数据预处理函数,输入原始图像,输出处理后的图像,可以借用paddle.vision.transforms的数据处理功能
def preprocess(img, mode='train'):
# 保留不同的图像方法处理
if mode == 'train':
transform = Compose([
transforms.Resize(size=(256, 256)), #把数据长宽像素调成256*256
transforms.Transpose(), #原始数据形状维度是HWC格式,经过Transpose,转换为CHW格式
# transforms.ColorJitter(0.4, 0.4, 0.4, 0.4),# 随机调整图像的亮度,对比度,饱和度和色调。
#RandomHorizontalFlip(224),
#RandomVerticalFlip(224),
#BrightnessTransform(0.2),
transforms.Normalize(mean=127.5, std=127.5), #标准化
])
elif mode == 'label':
transform = Compose([
transforms.Resize(size=(256, 256)), #把数据长宽像素调成256*256
transforms.ToTensor(),
transforms.Normalize(mean=[0.4],std=[0.6]),
# transforms.Grayscale(),
# transforms.Normalize()
])
img = transform(img)
return img
class Reader(Dataset):
def __init__(self, data, is_val=False):
super().__init__()
#在初始化阶段,把数据集划分训练集和测试集。由于在读取前样本已经被打乱顺序,取20%的样本作为测试集,80%的样本作为训练集。
self.samples = data[-int(len(data)*0.2):] if is_val else data[:-int(len(data)*0.2)]
def _load_img(self, path, mode='image'):
if mode == 'image':
# 保留标签和图像不同的处理方式
img = Image.open(path)
if img.mode != 'RGB':
img = img.convert('RGB')
img = preprocess(img, mode='train') #数据预处理--这里仅包括简单数据预处理,没有用到数据增强
# img = img.astype("float32")
img = np.array(img, dtype='float32')
# 保留不同类型的图像处理
else:
img = Image.open(path)
# if img.mode != 'RGB':
if img.mode not in ('L', 'I;16', 'I'):
img = img.convert('L')
# img = img.convert('RGB')
img = preprocess(img, mode='label') #数据预处理--这里仅包括简单数据预处理,没有用到数据增强
# img = img.astype("int64")
img = np.array(img, dtype='int64')
return img
def __getitem__(self,index):
#处理图像
img_path = self.samples[index] #得到某样本的路径
lable_path = img_path.replace('.tif', '_mask.tif')
img = self._load_img(img_path)
label = self._load_img(lable_path, mode='label')
return img, label
def __len__(self):
#返回每个Epoch中图片数量
return len(self.samples)
image_path = glob.glob('work/kaggle_3m/*/*[0-9].tif')
lable_path = [path.replace('.tif', '_mask.tif')
for path in image_path]
# +
from paddle.nn import functional as F
import paddle.nn as nn
import paddle
import numpy as np
class SeparableConv2D(paddle.nn.Layer):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=None,
weight_attr=None,
bias_attr=None,
data_format="NCHW"):
super(SeparableConv2D, self).__init__()
self._padding = padding
self._stride = stride
self._dilation = dilation
self._in_channels = in_channels
self._data_format = data_format
# 第一次卷积参数,没有偏置参数
filter_shape = [in_channels, 1] + self.convert_to_list(kernel_size, 2, 'kernel_size')
self.weight_conv = self.create_parameter(shape=filter_shape, attr=weight_attr)
# 第二次卷积参数
filter_shape = [out_channels, in_channels] + self.convert_to_list(1, 2, 'kernel_size')
self.weight_pointwise = self.create_parameter(shape=filter_shape, attr=weight_attr)
self.bias_pointwise = self.create_parameter(shape=[out_channels],
attr=bias_attr,
is_bias=True)
def convert_to_list(self, value, n, name, dtype=np.int):
if isinstance(value, dtype):
return [value, ] * n
else:
try:
value_list = list(value)
except TypeError:
raise ValueError("The " + name +
"'s type must be list or tuple. Received: " + str(
value))
if len(value_list) != n:
raise ValueError("The " + name + "'s length must be " + str(n) +
". Received: " + str(value))
for single_value in value_list:
try:
dtype(single_value)
except (ValueError, TypeError):
raise ValueError(
"The " + name + "'s type must be a list or tuple of " + str(
n) + " " + str(dtype) + " . Received: " + str(
value) + " "
"including element " + str(single_value) + " of type" + " "
+ str(type(single_value)))
return value_list
def forward(self, inputs):
conv_out = F.conv2d(inputs,
self.weight_conv,
padding=self._padding,
stride=self._stride,
dilation=self._dilation,
groups=self._in_channels,
data_format=self._data_format)
out = F.conv2d(conv_out,
self.weight_pointwise,
bias=self.bias_pointwise,
padding=0,
stride=1,
dilation=1,
groups=1,
data_format=self._data_format)
return out
class Encoder(paddle.nn.Layer):
def __init__(self, in_channels, out_channels):
super(Encoder, self).__init__()
self.relus = paddle.nn.ReLU()
self.separable_conv_01 = SeparableConv2D(in_channels,
out_channels,
kernel_size=3,
padding='same')
self.bns = paddle.nn.BatchNorm2D(out_channels)
self.pool = paddle.nn.MaxPool2D(kernel_size=3, stride=2, padding=1)
self.residual_conv = paddle.nn.Conv2D(in_channels,
out_channels,
kernel_size=1,
stride=2,
padding='same')
def forward(self, inputs):
previous_block_activation = inputs
y = self.relus(inputs)
y = self.separable_conv_01(y)
y = self.bns(y)
y = self.pool(y)
residual = self.residual_conv(previous_block_activation)
y = paddle.add(y, residual)
return y
class Decoder(paddle.nn.Layer):
def __init__(self, in_channels, out_channels):
super(Decoder, self).__init__()
self.relus = paddle.nn.ReLU()
self.conv_transpose_01 = paddle.nn.Conv2DTranspose(in_channels,
out_channels,
kernel_size=3,
padding='same')
self.bns = paddle.nn.BatchNorm2D(out_channels)
self.upsamples = paddle.nn.LayerList(
[paddle.nn.Upsample(scale_factor=2.0) for i in range(2)]
)
self.residual_conv = paddle.nn.Conv2D(in_channels,
out_channels,
kernel_size=1,
padding='same')
def forward(self, inputs):
previous_block_activation = inputs
y = self.relus(inputs)
y = self.conv_transpose_01(y)
y = self.bns(y)
y = self.upsamples[0](y)
residual = self.upsamples[1](previous_block_activation)
residual = self.residual_conv(residual)
y = paddle.add(y, residual)
return y
class Net(paddle.nn.Layer):
def __init__(self, num_classes):
super(Net, self).__init__()
self.conv_1 = paddle.nn.Conv2D(3, 32, kernel_size=3, stride=2, padding='same')
self.bn = paddle.nn.BatchNorm2D(32)
self.relu = paddle.nn.ReLU()
in_channels = 32
self.encoders = []
self.encoder_list = [64, 128]
self.decoder_list = [128, 64, 32]
# self.encoder_list = [64, 128]
# self.decoder_list = [128, 64, 32]
# 根据下采样个数和配置循环定义子Layer,避免重复写一样的程序
for out_channels in self.encoder_list:
block = self.add_sublayer('encoder_{}'.format(out_channels),Encoder(in_channels, out_channels))
self.encoders.append(block)
in_channels = out_channels
self.decoders = []
# 根据上采样个数和配置循环定义子Layer,避免重复写一样的程序
for out_channels in self.decoder_list:
block = self.add_sublayer('decoder_{}'.format(out_channels), Decoder(in_channels, out_channels))
self.decoders.append(block)
in_channels = out_channels
self.output_conv = paddle.nn.Conv2D(in_channels, num_classes, kernel_size=3, padding='same')
def forward(self, inputs):
y = self.conv_1(inputs)
y = self.bn(y)
y = self.relu(y)
for encoder in self.encoders:
y = encoder(y)
for decoder in self.decoders:
y = decoder(y)
y = self.output_conv(y)
# y = nn.MaxPool2D(2, 2)
return y
# +
import paddle
# from unet import DoveNet
class SaveBestModel(paddle.callbacks.Callback):
def __init__(self, target=0.5, path='./best_model', verbose=0):
self.target = target
self.epoch = None
self.path = path
def on_epoch_end(self, epoch, logs=None):
self.epoch = epoch
def on_eval_end(self, logs=None):
if logs.get('loss')[0] < self.target:
self.target = logs.get('loss')[0]
self.model.save(self.path)
print('best model is loss {} at epoch {}'.format(self.target, self.epoch))
callback_visualdl = paddle.callbacks.VisualDL(log_dir='unet')
callback_savebestmodel = SaveBestModel(target=1, path='unet')
callbacks = [callback_visualdl, callback_savebestmodel]
train_dataset = Reader(image_path, is_val=False)
#生成测试数据集实例
val_dataset = Reader(image_path, is_val=True)
train_loader = paddle.io.DataLoader(
train_dataset,
batch_size=32,
shuffle=True,
drop_last=False)
eval_loader = paddle.io.DataLoader(
val_dataset,
batch_size=32,
shuffle=True,
drop_last=False)
num_classes = 2
IMAGE_SIZE = (256, 256)
network = Net(num_classes)
model = paddle.Model(network)
# 动量优化法
optim = paddle.optimizer.Momentum(learning_rate=0.001,
momentum=0.9,
parameters=model.parameters())
# model.prepare(optim, paddle.nn.CrossEntropyLoss(axis=1))
model.prepare(optim, paddle.nn.CrossEntropyLoss(axis=1))
# -
model.summary((-1, 3,) + IMAGE_SIZE)
# +
model.fit(train_loader,
eval_loader,
epochs=20,
callbacks=callbacks,
verbose=1)
# model.fit(train_loader)
# model.evaluate(eval_loader,
# verbose=1)
# model.predict(eval_loader)
# -
# ## 预测数据集准备
num_classes = 2
IMAGE_SIZE = (256, 256)
network_new = Net(num_classes)
state_dict = paddle.load('unet.pdparams')
network_new.set_state_dict(state_dict)
model_new = paddle.Model(network_new)
# +
predict_dataset = Reader(image_path, is_val=True)
test_loader = paddle.io.DataLoader(
predict_dataset,
batch_size=32,
shuffle=True,
drop_last=False)
# test_loader = paddle.io.DataLoader(predict_dataset, places=paddle.CUDAPlace(0), batch_size= 32)
model_new.prepare(paddle.nn.CrossEntropyLoss(axis=1))
predict_results = model_new.predict(test_loader)
# -
import numpy as np
import matplotlib.pyplot as plt
from paddle.vision.transforms import transforms as T
from PIL import Image as PilImage
# predict_dataset[1][0], predict_dataset[1][1]
image_path = glob.glob('work/kaggle_3m/*/*[0-9].tif')
predict_set = image_path[:-int(len(image_path)*0.2)]
# print(predict_samples)
predict_label = [path.replace('.tif', '_mask.tif')
for path in predict_set]
# +
idx = 0
for id in range(3):
i = id+20
image_path = predict_set[i]
image_label = predict_label[i]
resize_t = T.Compose([
T.Resize(IMAGE_SIZE)
])
image = resize_t(PilImage.open(image_path))
label = resize_t(PilImage.open(image_label))
image = np.array(image).astype('uint8')
label = np.array(label).astype('uint8')
plt.subplot(3, 3, idx + 1)
plt.imshow(image)
plt.title('Input Image')
plt.axis("off")
plt.subplot(3, 3, idx + 2)
plt.imshow(label, cmap='gray')
plt.title('Label')
plt.axis("off")
data = predict_results[0][0][i].transpose((1, 2, 0))
mask = np.argmax(data, axis=-1)
plt.subplot(3, 3, idx + 3)
plt.imshow(mask.astype('uint8'), cmap='gray')
plt.title('Predict')
plt.axis("off")
idx += 3
plt.show()
# -
| brain.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# + [markdown] origin_pos=0
# # 稠密连接网络(DenseNet)
#
# ResNet极大地改变了如何参数化深层网络中函数的观点。
# *稠密连接网络*(DenseNet) :cite:`Huang.Liu.Van-Der-Maaten.ea.2017`在某种程度上是ResNet的逻辑扩展。让我们先从数学上了解一下。
#
# ## 从ResNet到DenseNet
#
# 回想一下任意函数的泰勒展开式(Taylor expansion),它把这个函数分解成越来越高阶的项。在$x$接近0时,
#
# $$f(x) = f(0) + f'(0) x + \frac{f''(0)}{2!} x^2 + \frac{f'''(0)}{3!} x^3 + \ldots.$$
#
# 同样,ResNet将函数展开为
#
# $$f(\mathbf{x}) = \mathbf{x} + g(\mathbf{x}).$$
#
# 也就是说,ResNet将$f$分解为两部分:一个简单的线性项和一个复杂的非线性项。
# 那么再向前拓展一步,如果我们想将$f$拓展成超过两部分的信息呢?
# 一种方案便是DenseNet。
#
# 
# :label:`fig_densenet_block`
#
# 如 :numref:`fig_densenet_block`所示,ResNet和DenseNet的关键区别在于,DenseNet输出是*连接*(用图中的$[,]$表示)而不是如ResNet的简单相加。
# 因此,在应用越来越复杂的函数序列后,我们执行从$\mathbf{x}$到其展开式的映射:
#
# $$\mathbf{x} \to \left[
# \mathbf{x},
# f_1(\mathbf{x}),
# f_2([\mathbf{x}, f_1(\mathbf{x})]), f_3([\mathbf{x}, f_1(\mathbf{x}), f_2([\mathbf{x}, f_1(\mathbf{x})])]), \ldots\right].$$
#
# 最后,将这些展开式结合到多层感知机中,再次减少特征的数量。
# 实现起来非常简单:我们不需要添加术语,而是将它们连接起来。
# DenseNet这个名字由变量之间的“稠密连接”而得来,最后一层与之前的所有层紧密相连。
# 稠密连接如 :numref:`fig_densenet`所示。
#
# 
# :label:`fig_densenet`
#
# 稠密网络主要由2部分构成:*稠密块*(dense block)和*过渡层*(transition layer)。
# 前者定义如何连接输入和输出,而后者则控制通道数量,使其不会太复杂。
#
# ## (**稠密块体**)
#
# DenseNet使用了ResNet改良版的“批量规范化、激活和卷积”架构(参见 :numref:`sec_resnet`中的练习)。
# 我们首先实现一下这个架构。
#
# + origin_pos=1 tab=["mxnet"]
from mxnet import np, npx
from mxnet.gluon import nn
from d2l import mxnet as d2l
npx.set_np()
def conv_block(num_channels):
blk = nn.Sequential()
blk.add(nn.BatchNorm(),
nn.Activation('relu'),
nn.Conv2D(num_channels, kernel_size=3, padding=1))
return blk
# + [markdown] origin_pos=4
# 一个*稠密块*由多个卷积块组成,每个卷积块使用相同数量的输出通道。
# 然而,在前向传播中,我们将每个卷积块的输入和输出在通道维上连结。
#
# + origin_pos=5 tab=["mxnet"]
class DenseBlock(nn.Block):
def __init__(self, num_convs, num_channels, **kwargs):
super().__init__(**kwargs)
self.net = nn.Sequential()
for _ in range(num_convs):
self.net.add(conv_block(num_channels))
def forward(self, X):
for blk in self.net:
Y = blk(X)
# 连接通道维度上每个块的输入和输出
X = np.concatenate((X, Y), axis=1)
return X
# + [markdown] origin_pos=8
# 在下面的例子中,我们[**定义一个**]有2个输出通道数为10的(**`DenseBlock`**)。
# 使用通道数为3的输入时,我们会得到通道数为$3+2\times 10=23$的输出。
# 卷积块的通道数控制了输出通道数相对于输入通道数的增长,因此也被称为*增长率*(growth rate)。
#
# + origin_pos=9 tab=["mxnet"]
blk = DenseBlock(2, 10)
blk.initialize()
X = np.random.uniform(size=(4, 3, 8, 8))
Y = blk(X)
Y.shape
# + [markdown] origin_pos=12
# ## [**过渡层**]
#
# 由于每个稠密块都会带来通道数的增加,使用过多则会过于复杂化模型。
# 而过渡层可以用来控制模型复杂度。
# 它通过$1\times 1$卷积层来减小通道数,并使用步幅为2的平均汇聚层减半高和宽,从而进一步降低模型复杂度。
#
# + origin_pos=13 tab=["mxnet"]
def transition_block(num_channels):
blk = nn.Sequential()
blk.add(nn.BatchNorm(), nn.Activation('relu'),
nn.Conv2D(num_channels, kernel_size=1),
nn.AvgPool2D(pool_size=2, strides=2))
return blk
# + [markdown] origin_pos=16
# 对上一个例子中稠密块的输出[**使用**]通道数为10的[**过渡层**]。
# 此时输出的通道数减为10,高和宽均减半。
#
# + origin_pos=17 tab=["mxnet"]
blk = transition_block(10)
blk.initialize()
blk(Y).shape
# + [markdown] origin_pos=20
# ## [**DenseNet模型**]
#
# 我们来构造DenseNet模型。DenseNet首先使用同ResNet一样的单卷积层和最大汇聚层。
#
# + origin_pos=21 tab=["mxnet"]
net = nn.Sequential()
net.add(nn.Conv2D(64, kernel_size=7, strides=2, padding=3),
nn.BatchNorm(), nn.Activation('relu'),
nn.MaxPool2D(pool_size=3, strides=2, padding=1))
# + [markdown] origin_pos=24
# 接下来,类似于ResNet使用的4个残差块,DenseNet使用的是4个稠密块。
# 与ResNet类似,我们可以设置每个稠密块使用多少个卷积层。
# 这里我们设成4,从而与 :numref:`sec_resnet`的ResNet-18保持一致。
# 稠密块里的卷积层通道数(即增长率)设为32,所以每个稠密块将增加128个通道。
#
# 在每个模块之间,ResNet通过步幅为2的残差块减小高和宽,DenseNet则使用过渡层来减半高和宽,并减半通道数。
#
# + origin_pos=25 tab=["mxnet"]
# num_channels为当前的通道数
num_channels, growth_rate = 64, 32
num_convs_in_dense_blocks = [4, 4, 4, 4]
for i, num_convs in enumerate(num_convs_in_dense_blocks):
net.add(DenseBlock(num_convs, growth_rate))
# 上一个稠密块的输出通道数
num_channels += num_convs * growth_rate
# 在稠密块之间添加一个转换层,使通道数量减半
if i != len(num_convs_in_dense_blocks) - 1:
num_channels //= 2
net.add(transition_block(num_channels))
# + [markdown] origin_pos=28
# 与ResNet类似,最后接上全局汇聚层和全连接层来输出结果。
#
# + origin_pos=29 tab=["mxnet"]
net.add(nn.BatchNorm(),
nn.Activation('relu'),
nn.GlobalAvgPool2D(),
nn.Dense(10))
# + [markdown] origin_pos=32
# ## [**训练模型**]
#
# 由于这里使用了比较深的网络,本节里我们将输入高和宽从224降到96来简化计算。
#
# + origin_pos=33 tab=["mxnet"]
lr, num_epochs, batch_size = 0.1, 10, 256
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size, resize=96)
d2l.train_ch6(net, train_iter, test_iter, num_epochs, lr, d2l.try_gpu())
# + [markdown] origin_pos=34
# ## 小结
#
# * 在跨层连接上,不同于ResNet中将输入与输出相加,稠密连接网络(DenseNet)在通道维上连结输入与输出。
# * DenseNet的主要构建模块是稠密块和过渡层。
# * 在构建DenseNet时,我们需要通过添加过渡层来控制网络的维数,从而再次减少通道的数量。
#
# ## 练习
#
# 1. 为什么我们在过渡层使用平均汇聚层而不是最大汇聚层?
# 1. DenseNet的优点之一是其模型参数比ResNet小。为什么呢?
# 1. DenseNet一个诟病的问题是内存或显存消耗过多。
# 1. 真的是这样吗?可以把输入形状换成$224 \times 224$,来看看实际的显存消耗。
# 1. 你能想出另一种方法来减少显存消耗吗?你需要如何改变框架?
# 1. 实现DenseNet论文 :cite:`Huang.Liu.Van-Der-Maaten.ea.2017`表1所示的不同DenseNet版本。
# 1. 应用DenseNet的思想设计一个基于多层感知机的模型。将其应用于 :numref:`sec_kaggle_house`中的房价预测任务。
#
# + [markdown] origin_pos=35 tab=["mxnet"]
# [Discussions](https://discuss.d2l.ai/t/1882)
#
| submodules/resource/d2l-zh/mxnet/chapter_convolutional-modern/densenet.ipynb |