markdown
stringlengths 0
37k
| code
stringlengths 1
33.3k
| path
stringlengths 8
215
| repo_name
stringlengths 6
77
| license
stringclasses 15
values |
|---|---|---|---|---|
Test that number of tracks is independent on Track description
|
def compute_target_number_of_tracks(X):
ids = numpy.unique(X.group_column, return_inverse=True)[1]
number_of_tracks = numpy.bincount(X.group_column)
target = number_of_tracks[ids]
return target
from decisiontrain import DecisionTrainRegressor
from rep.estimators import SklearnRegressor
from rep.metaml import FoldingRegressor
tt_base_reg = DecisionTrainRegressor(learning_rate=0.02, n_estimators=1000,
n_threads=16)
%%time
tt_data_NT = FoldingRegressor(SklearnRegressor(tt_base_reg), n_folds=2, random_state=321,
features=features)
tt_data_NT.fit(data, compute_target_number_of_tracks(data), sample_weight=data.N_sig_sw.values * mask_sw_positive)
from sklearn.metrics import mean_squared_error
mean_squared_error(compute_target_number_of_tracks(data), tt_data_NT.predict(data),
sample_weight=data.N_sig_sw.values) ** 0.5
mean_squared_error(compute_target_number_of_tracks(data),
[numpy.mean(compute_target_number_of_tracks(data))] * len(data),
sample_weight=data.N_sig_sw.values) ** 0.5
%%time
tt_MC_NT = FoldingRegressor(SklearnRegressor(tt_base_reg), n_folds=2, random_state=321,
features=features)
tt_MC_NT.fit(MC, compute_target_number_of_tracks(MC), sample_weight=MC.N_sig_sw.values)
mean_squared_error(compute_target_number_of_tracks(MC),
tt_MC_NT.predict(MC), sample_weight=MC.N_sig_sw.values) ** 0.5
mean_squared_error(compute_target_number_of_tracks(MC),
[numpy.mean(compute_target_number_of_tracks(MC))] * len(MC),
sample_weight=MC.N_sig_sw.values) ** 0.5
tt_MC_NT.get_feature_importances().sort_values(by='effect')[-5:]
|
experiments_MC_data_reweighting/not_simulated_tracks_removing.ipynb
|
tata-antares/tagging_LHCb
|
apache-2.0
|
Define base estimator and B weights, labels
|
tt_base = DecisionTrainClassifier(learning_rate=0.02, n_estimators=1000,
n_threads=16)
B_signs = data['signB'].groupby(data['group_column']).aggregate(numpy.mean)
B_weights = data['N_sig_sw'].groupby(data['group_column']).aggregate(numpy.mean)
B_signs_MC = MC['signB'].groupby(MC['group_column']).aggregate(numpy.mean)
B_weights_MC = MC['N_sig_sw'].groupby(MC['group_column']).aggregate(numpy.mean)
|
experiments_MC_data_reweighting/not_simulated_tracks_removing.ipynb
|
tata-antares/tagging_LHCb
|
apache-2.0
|
B probability computation
|
from scipy.special import logit, expit
def compute_Bprobs(X, track_proba, weights=None, normed_weights=False):
if weights is None:
weights = numpy.ones(len(X))
_, data_ids = numpy.unique(X['group_column'], return_inverse=True)
track_proba[~numpy.isfinite(track_proba)] = 0.5
track_proba[numpy.isnan(track_proba)] = 0.5
if normed_weights:
weights_per_events = numpy.bincount(data_ids, weights=weights)
weights /= weights_per_events[data_ids]
predictions = numpy.bincount(data_ids, weights=logit(track_proba) * X['signTrack'] * weights)
return expit(predictions)
|
experiments_MC_data_reweighting/not_simulated_tracks_removing.ipynb
|
tata-antares/tagging_LHCb
|
apache-2.0
|
Inclusive tagging: training on data
|
tt_data = FoldingGroupClassifier(SklearnClassifier(tt_base), n_folds=2, random_state=321,
train_features=features, group_feature='group_column')
%time tt_data.fit(data, data.label, sample_weight=data.N_sig_sw.values * mask_sw_positive)
pass
pandas.DataFrame({'dataset': ['MC', 'data'],
'quality': [roc_auc_score(
B_signs_MC, compute_Bprobs(MC, tt_data.predict_proba(MC)[:, 1]), sample_weight=B_weights_MC),
roc_auc_score(
B_signs, compute_Bprobs(data, tt_data.predict_proba(data)[:, 1]), sample_weight=B_weights)]})
|
experiments_MC_data_reweighting/not_simulated_tracks_removing.ipynb
|
tata-antares/tagging_LHCb
|
apache-2.0
|
Inclusive tagging: training on MC
|
tt_MC = FoldingGroupClassifier(SklearnClassifier(tt_base), n_folds=2, random_state=321,
train_features=features, group_feature='group_column')
%time tt_MC.fit(MC, MC.label)
pass
pandas.DataFrame({'dataset': ['MC', 'data'],
'quality': [roc_auc_score(
B_signs_MC, compute_Bprobs(MC, tt_MC.predict_proba(MC)[:, 1]), sample_weight=B_weights_MC),
roc_auc_score(
B_signs, compute_Bprobs(data, tt_MC.predict_proba(data)[:, 1]), sample_weight=B_weights)]})
|
experiments_MC_data_reweighting/not_simulated_tracks_removing.ipynb
|
tata-antares/tagging_LHCb
|
apache-2.0
|
New method
Reweighting with classifier
combine data and MC together to train a classifier
|
combined_data_MC = pandas.concat([data, MC])
combined_label = numpy.array([0] * len(data) + [1] * len(MC))
combined_weights_data = data.N_sig_sw.values #/ numpy.bincount(data.group_column)[data.group_column.values]
combined_weights_data_passed = combined_weights_data * mask_sw_positive
combined_weights_MC = MC.N_sig_sw.values# / numpy.bincount(MC.group_column)[MC.group_column.values]
combined_weights = numpy.concatenate([combined_weights_data_passed,
1. * combined_weights_MC / sum(combined_weights_MC) * sum(combined_weights_data_passed)])
combined_weights_all = numpy.concatenate([combined_weights_data,
1. * combined_weights_MC / sum(combined_weights_MC) * sum(combined_weights_data)])
|
experiments_MC_data_reweighting/not_simulated_tracks_removing.ipynb
|
tata-antares/tagging_LHCb
|
apache-2.0
|
train classifier to distinguish data and MC
|
%%time
tt_base_large = DecisionTrainClassifier(learning_rate=0.3, n_estimators=1000,
n_threads=20)
tt_data_vs_MC = FoldingGroupClassifier(SklearnClassifier(tt_base_large), n_folds=2, random_state=321,
train_features=features + ['label'], group_feature='group_column')
tt_data_vs_MC.fit(combined_data_MC, combined_label, sample_weight=combined_weights)
a = []
for n, p in enumerate(tt_data_vs_MC.staged_predict_proba(combined_data_MC)):
a.append(roc_auc_score(combined_label, p[:, 1], sample_weight=combined_weights))
plot(a)
|
experiments_MC_data_reweighting/not_simulated_tracks_removing.ipynb
|
tata-antares/tagging_LHCb
|
apache-2.0
|
quality
|
combined_p = tt_data_vs_MC.predict_proba(combined_data_MC)[:, 1]
roc_auc_score(combined_label, combined_p, sample_weight=combined_weights)
roc_auc_score(combined_label, combined_p, sample_weight=combined_weights_all)
|
experiments_MC_data_reweighting/not_simulated_tracks_removing.ipynb
|
tata-antares/tagging_LHCb
|
apache-2.0
|
calibrate probabilities (due to reweighting rule where probabilities are used)
|
from utils import calibrate_probs, plot_calibration
combined_p_calib = calibrate_probs(combined_label, combined_weights, combined_p)[0]
plot_calibration(combined_p, combined_label, weight=combined_weights)
plot_calibration(combined_p_calib, combined_label, weight=combined_weights)
|
experiments_MC_data_reweighting/not_simulated_tracks_removing.ipynb
|
tata-antares/tagging_LHCb
|
apache-2.0
|
compute MC and data track weights
|
# reweight data predicted as data to MC
used_probs = combined_p_calib
data_probs_to_be_MC = used_probs[combined_label == 0]
MC_probs_to_be_MC = used_probs[combined_label == 1]
track_weights_data = numpy.ones(len(data))
# take data with probability to be data
mask_data = data_probs_to_be_MC < 0.5
track_weights_data[mask_data] = (data_probs_to_be_MC[mask_data]) / (1 - data_probs_to_be_MC[mask_data])
# reweight MC predicted as MC to data
track_weights_MC = numpy.ones(len(MC))
mask_MC = MC_probs_to_be_MC > 0.5
track_weights_MC[mask_MC] = (1 - MC_probs_to_be_MC[mask_MC]) / (MC_probs_to_be_MC[mask_MC])
# simple approach, reweight only MC
track_weights_only_MC = (1 - MC_probs_to_be_MC) / MC_probs_to_be_MC
# data_ids = numpy.unique(data['group_column'], return_inverse=True)[1]
# MC_ids = numpy.unique(MC['group_column'], return_inverse=True)[1]
# # event_weight_data = (numpy.bincount(data_ids, weights=data.N_sig_sw) / numpy.bincount(data_ids))[data_ids]
# # event_weight_MC = (numpy.bincount(MC_ids, weights=MC.N_sig_sw) / numpy.bincount(MC_ids))[MC_ids]
# # normalize weights for tracks in a way that sum w_track = 1 per event
# track_weights_data /= numpy.bincount(data_ids, weights=track_weights_data)[data_ids]
# track_weights_MC /= numpy.bincount(MC_ids, weights=track_weights_MC)[MC_ids]
|
experiments_MC_data_reweighting/not_simulated_tracks_removing.ipynb
|
tata-antares/tagging_LHCb
|
apache-2.0
|
reweighting plotting
|
hist(combined_p_calib[combined_label == 1], label='MC', normed=True, alpha=0.4, bins=60,
weights=combined_weights_MC)
hist(combined_p_calib[combined_label == 0], label='data', normed=True, alpha=0.4, bins=60,
weights=combined_weights_data);
legend(loc='best')
hist(track_weights_MC, normed=True, alpha=0.4, bins=60, label='MC')
hist(track_weights_data, normed=True, alpha=0.4, bins=60, label='RD');
legend(loc='best')
numpy.mean(track_weights_data), numpy.mean(track_weights_MC)
hist(combined_p_calib[combined_label == 1], label='MC', normed=True, alpha=0.4, bins=60,
weights=track_weights_MC * MC.N_sig_sw.values)
hist(combined_p_calib[combined_label == 0], label='data', normed=True, alpha=0.4, bins=60,
weights=track_weights_data * data.N_sig_sw.values);
legend(loc='best')
roc_auc_score(combined_label, combined_p_calib,
sample_weight=numpy.concatenate([track_weights_data * data.N_sig_sw.values,
track_weights_MC * MC.N_sig_sw.values]))
|
experiments_MC_data_reweighting/not_simulated_tracks_removing.ipynb
|
tata-antares/tagging_LHCb
|
apache-2.0
|
Check reweighting rule
train classifier to distinguish data vs MC with provided weights
|
%%time
tt_check = FoldingGroupClassifier(SklearnClassifier(tt_base), n_folds=2, random_state=433,
train_features=features + ['label'], group_feature='group_column')
tt_check.fit(combined_data_MC, combined_label,
sample_weight=numpy.concatenate([track_weights_data * data.N_sig_sw.values * mask_sw_positive,
track_weights_MC * MC.N_sig_sw.values]))
roc_auc_score(combined_label, tt_check.predict_proba(combined_data_MC)[:, 1],
sample_weight=numpy.concatenate([track_weights_data * data.N_sig_sw.values * mask_sw_positive,
track_weights_MC * MC.N_sig_sw.values]))
# * sum(track_weights_data * mask_sw_positive) / sum(track_weights_MC)
roc_auc_score(combined_label, tt_check.predict_proba(combined_data_MC)[:, 1],
sample_weight=numpy.concatenate([track_weights_data * data.N_sig_sw.values,
track_weights_MC * MC.N_sig_sw.values]))
# * sum(track_weights_data) / sum(track_weights_MC)
|
experiments_MC_data_reweighting/not_simulated_tracks_removing.ipynb
|
tata-antares/tagging_LHCb
|
apache-2.0
|
Classifier trained on MC
|
tt_reweighted_MC = FoldingGroupClassifier(SklearnClassifier(tt_base), n_folds=2, random_state=321,
train_features=features, group_feature='group_column')
%time tt_reweighted_MC.fit(MC, MC.label, sample_weight=track_weights_MC * MC.N_sig_sw.values)
pass
pandas.DataFrame({'dataset': ['MC', 'data'],
'quality': [roc_auc_score(
B_signs_MC,
compute_Bprobs(MC, tt_reweighted_MC.predict_proba(MC)[:, 1],
weights=track_weights_MC, normed_weights=False),
sample_weight=B_weights_MC),
roc_auc_score(
B_signs,
compute_Bprobs(data, tt_reweighted_MC.predict_proba(data)[:, 1],
weights=track_weights_data, normed_weights=False),
sample_weight=B_weights)]})
pandas.DataFrame({'dataset': ['MC', 'data'],
'quality': [roc_auc_score(
B_signs_MC,
compute_Bprobs(MC, tt_reweighted_MC.predict_proba(MC)[:, 1],
weights=track_weights_MC, normed_weights=False),
sample_weight=B_weights_MC),
roc_auc_score(
B_signs,
compute_Bprobs(data, tt_reweighted_MC.predict_proba(data)[:, 1],
weights=track_weights_data, normed_weights=False),
sample_weight=B_weights)]})
|
experiments_MC_data_reweighting/not_simulated_tracks_removing.ipynb
|
tata-antares/tagging_LHCb
|
apache-2.0
|
Classifier trained on data
|
%%time
tt_reweighted_data = FoldingGroupClassifier(SklearnClassifier(tt_base), n_folds=2, random_state=321,
train_features=features, group_feature='group_column')
tt_reweighted_data.fit(data, data.label,
sample_weight=track_weights_data * data.N_sig_sw.values * mask_sw_positive)
pass
pandas.DataFrame({'dataset': ['MC', 'data'],
'quality': [roc_auc_score(
B_signs_MC,
compute_Bprobs(MC, tt_reweighted_data.predict_proba(MC)[:, 1],
weights=track_weights_MC, normed_weights=False),
sample_weight=B_weights_MC),
roc_auc_score(
B_signs,
compute_Bprobs(data, tt_reweighted_data.predict_proba(data)[:, 1],
weights=track_weights_data, normed_weights=False),
sample_weight=B_weights)]})
pandas.DataFrame({'dataset': ['MC', 'data'],
'quality': [roc_auc_score(
B_signs_MC,
compute_Bprobs(MC, tt_reweighted_data.predict_proba(MC)[:, 1],
weights=track_weights_MC, normed_weights=False),
sample_weight=B_weights_MC),
roc_auc_score(
B_signs,
compute_Bprobs(data, tt_reweighted_data.predict_proba(data)[:, 1],
weights=track_weights_data, normed_weights=False),
sample_weight=B_weights)]})
|
experiments_MC_data_reweighting/not_simulated_tracks_removing.ipynb
|
tata-antares/tagging_LHCb
|
apache-2.0
|
numpy.mean(mc_sum_weights_per_event), numpy.mean(data_sum_weights_per_event)
_, data_ids = numpy.unique(data['group_column'], return_inverse=True)
mc_sum_weights_per_event = numpy.bincount(MC.group_column.values, weights=track_weights_MC)
data_sum_weights_per_event = numpy.bincount(data_ids, weights=track_weights_data)
hist(mc_sum_weights_per_event, bins=60, normed=True, alpha=0.5)
hist(data_sum_weights_per_event, bins=60, normed=True, alpha=0.5, weights=B_weights);
hist(mc_sum_weights_per_event, bins=60, normed=True, alpha=0.5)
hist(data_sum_weights_per_event, bins=60, normed=True, alpha=0.5, weights=B_weights);
hist(numpy.bincount(MC.group_column), bins=81, normed=True, alpha=0.5, range=(0, 80))
hist(numpy.bincount(data.group_column), bins=81, normed=True, alpha=0.5, range=(0, 80));
hist(expit(p_tt_mc) - expit(p_data), bins=60, weights=B_weights, normed=True, label='standard approach',
alpha=0.5);
hist(expit(p_data_w_MC) - expit(p_data_w), bins=60, weights=B_weights, normed=True, label='compensate method',
alpha=0.5);
legend()
xlabel('$p_{MC}-p_{data}$')
|
experiments_MC_data_reweighting/not_simulated_tracks_removing.ipynb
|
tata-antares/tagging_LHCb
|
apache-2.0
|
|
Calibration
|
from utils import compute_mistag
bins_perc = [10, 20, 30, 40, 50, 60, 70, 80, 90]
compute_mistag(expit(p_data), B_signs, B_weights, chosen=numpy.ones(len(B_signs), dtype=bool),
bins=bins_perc,
uniform=False, label='data')
compute_mistag(expit(p_tt_mc), B_signs, B_weights, chosen=numpy.ones(len(B_signs), dtype=bool),
bins=bins_perc,
uniform=False, label='MC')
compute_mistag(expit(p_data_w), B_signs, B_weights, chosen=numpy.ones(len(B_signs), dtype=bool),
bins=bins_perc,
uniform=False, label='new')
legend(loc='best')
xlim(0.3, 0.5)
ylim(0.2, 0.5)
bins_edg = numpy.linspace(0.3, 0.9, 10)
compute_mistag(expit(p_data), B_signs, B_weights, chosen=numpy.ones(len(B_signs), dtype=bool),
bins=bins_edg,
uniform=True, label='data')
compute_mistag(expit(p_tt_mc), B_signs, B_weights, chosen=numpy.ones(len(B_signs), dtype=bool),
bins=bins_edg,
uniform=True, label='MC')
compute_mistag(expit(p_data_w), B_signs, B_weights, chosen=numpy.ones(len(B_signs), dtype=bool),
bins=bins_edg,
uniform=True, label='new')
legend(loc='best')
|
experiments_MC_data_reweighting/not_simulated_tracks_removing.ipynb
|
tata-antares/tagging_LHCb
|
apache-2.0
|
Setup
|
samples = [
"Owen Wilson is the ugliest person I've ever seen, period.",
"Of the things I don't like, I like bankers the least.",
"You shouldn't listen to Sam Harris; He's an idiot.",
"I don't like women.",
"Alex is worse than James, though both of them are fuckheads."
"I just want to tell those guys to go die in a hole."
"You're great, but idealists are awful.",
]
insult = Insults()
results = []
for example in samples:
results.append(insult.rate_comment(example))
|
insults/exploration/model/non_personal_insults.ipynb
|
thundergolfer/Insults
|
gpl-3.0
|
Exploration
|
import seaborn
seaborn.distplot(results, hist_kws={"range": [0,1]})
|
insults/exploration/model/non_personal_insults.ipynb
|
thundergolfer/Insults
|
gpl-3.0
|
Comparing initial point generation methods
Holger Nahrstaedt 2020
.. currentmodule:: skopt
Bayesian optimization or sequential model-based optimization uses a surrogate
model to model the expensive to evaluate function func. There are several
choices for what kind of surrogate model to use. This notebook compares the
performance of:
Halton sequence,
Hammersly sequence,
Sobol' sequence and
Latin hypercube sampling
as initial points. The purely random point generation is used as
a baseline.
|
print(__doc__)
import numpy as np
np.random.seed(123)
import matplotlib.pyplot as plt
|
dev/notebooks/auto_examples/sampler/sampling_comparison.ipynb
|
scikit-optimize/scikit-optimize.github.io
|
bsd-3-clause
|
Note that this can take a few minutes.
|
plot = plot_convergence([("random", dummy_res),
("lhs", lhs_res),
("lhs_maximin", lhs2_res),
("sobol'", sobol_res),
("halton", halton_res),
("hammersly", hammersly_res),
("grid", grid_res)],
true_minimum=true_minimum,
yscale=yscale,
title=title)
plt.show()
|
dev/notebooks/auto_examples/sampler/sampling_comparison.ipynb
|
scikit-optimize/scikit-optimize.github.io
|
bsd-3-clause
|
Reload Parameters and test performance
|
batchSize=20
with tf.Session() as sess:
saver=tf.train.Saver()
saver.restore(sess=sess,save_path=r".\model_checkpoints\MNIST_CNN-"+str(3000))
acc=0
for batch_i in range(int(MNIST.test.num_examples/batchSize)):
x_batch,y_batch=MNIST.test.next_batch(batch_size=batchSize)
pred=sess.run(L4Out,feed_dict={X:x_batch})
acc+=sess.run(tf.reduce_sum(tf.cast(x=tf.equal(tf.argmax(input=pred,axis=1),tf.argmax(input=y_batch,axis=1)),dtype=tf.float32)))
print("Accuracy: {}".format(acc/MNIST.test.num_examples))
|
CNN101/CNN101_Test.ipynb
|
BorisPolonsky/LearningTensorFlow
|
mit
|
The first layer in this network, tf.keras.layers.Flatten, transforms the format of the images from a two-dimensional array (of 28 by 28 pixels) to a one-dimensional array (of 28 * 28 = 784 pixels). Think of this layer as unstacking rows of pixels in the image and lining them up. This layer has no parameters to learn; it only reformats the data.
After the pixels are flattened, the network consists of a sequence of two tf.keras.layers.Dense layers. These are densely connected, or fully connected, neural layers. The first Dense layer has 128 nodes (or neurons). The second (and last) layer returns a logits array with length of 10. Each node contains a score that indicates the current image belongs to one of the 10 classes.
Compile the model
Before the model is ready for training, it needs a few more settings. These are added during the model's compile step:
Loss function —This measures how accurate the model is during training. You want to minimize this function to "steer" the model in the right direction.
Optimizer —This is how the model is updated based on the data it sees and its loss function.
Metrics —Used to monitor the training and testing steps. The following example uses accuracy, the fraction of the images that are correctly classified.
|
model.compile(
optimizer="adam",
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=["accuracy"],
)
|
notebooks/image_models/solutions/5_fashion_mnist_class.ipynb
|
GoogleCloudPlatform/asl-ml-immersion
|
apache-2.0
|
Interruptible optimization runs with checkpoints
Christian Schell, Mai 2018
Reformatted by Holger Nahrstaedt 2020
.. currentmodule:: skopt
Problem statement
Optimization runs can take a very long time and even run for multiple days.
If for some reason the process has to be interrupted results are irreversibly
lost, and the routine has to start over from the beginning.
With the help of the :class:callbacks.CheckpointSaver callback the optimizer's current state
can be saved after each iteration, allowing to restart from that point at any
time.
This is useful, for example,
if you don't know how long the process will take and cannot hog computational resources forever
if there might be system failures due to shaky infrastructure (or colleagues...)
if you want to adjust some parameters and continue with the already obtained results
|
print(__doc__)
import sys
import numpy as np
np.random.seed(777)
import os
# The followings are hacks to allow sphinx-gallery to run the example.
sys.path.insert(0, os.getcwd())
main_dir = os.path.basename(sys.modules['__main__'].__file__)
IS_RUN_WITH_SPHINX_GALLERY = main_dir != os.getcwd()
|
0.7/notebooks/auto_examples/interruptible-optimization.ipynb
|
scikit-optimize/scikit-optimize.github.io
|
bsd-3-clause
|
Simple example
We will use pretty much the same optimization problem as in the
sphx_glr_auto_examples_bayesian-optimization.py
notebook. Additionally we will instantiate the :class:callbacks.CheckpointSaver
and pass it to the minimizer:
|
from skopt import gp_minimize
from skopt import callbacks
from skopt.callbacks import CheckpointSaver
noise_level = 0.1
if IS_RUN_WITH_SPHINX_GALLERY:
# When this example is run with sphinx gallery, it breaks the pickling
# capacity for multiprocessing backend so we have to modify the way we
# define our functions. This has nothing to do with the example.
from utils import obj_fun
else:
def obj_fun(x, noise_level=noise_level):
return np.sin(5 * x[0]) * (1 - np.tanh(x[0] ** 2)) + np.random.randn() * noise_level
checkpoint_saver = CheckpointSaver("./checkpoint.pkl", compress=9) # keyword arguments will be passed to `skopt.dump`
gp_minimize(obj_fun, # the function to minimize
[(-20.0, 20.0)], # the bounds on each dimension of x
x0=[-20.], # the starting point
acq_func="LCB", # the acquisition function (optional)
n_calls=10, # the number of evaluations of f including at x0
n_random_starts=0, # the number of random initialization points
callback=[checkpoint_saver], # a list of callbacks including the checkpoint saver
random_state=777);
|
0.7/notebooks/auto_examples/interruptible-optimization.ipynb
|
scikit-optimize/scikit-optimize.github.io
|
bsd-3-clause
|
Generated some initial 2D data:
|
learning_rate = 0.01
training_epochs = 1000
num_labels = 3
batch_size = 100
x1_label0 = np.random.normal(1, 1, (100, 1))
x2_label0 = np.random.normal(1, 1, (100, 1))
x1_label1 = np.random.normal(5, 1, (100, 1))
x2_label1 = np.random.normal(4, 1, (100, 1))
x1_label2 = np.random.normal(8, 1, (100, 1))
x2_label2 = np.random.normal(0, 1, (100, 1))
plt.scatter(x1_label0, x2_label0, c='r', marker='o', s=60)
plt.scatter(x1_label1, x2_label1, c='g', marker='x', s=60)
plt.scatter(x1_label2, x2_label2, c='b', marker='_', s=60)
plt.show()
|
ch04_classification/Concept04_softmax.ipynb
|
BinRoot/TensorFlow-Book
|
mit
|
Define the labels and shuffle the data:
|
xs_label0 = np.hstack((x1_label0, x2_label0))
xs_label1 = np.hstack((x1_label1, x2_label1))
xs_label2 = np.hstack((x1_label2, x2_label2))
xs = np.vstack((xs_label0, xs_label1, xs_label2))
labels = np.matrix([[1., 0., 0.]] * len(x1_label0) + [[0., 1., 0.]] * len(x1_label1) + [[0., 0., 1.]] * len(x1_label2))
arr = np.arange(xs.shape[0])
np.random.shuffle(arr)
xs = xs[arr, :]
labels = labels[arr, :]
|
ch04_classification/Concept04_softmax.ipynb
|
BinRoot/TensorFlow-Book
|
mit
|
We'll get back to this later, but the following are test inputs that we'll use to evaluate the model:
|
test_x1_label0 = np.random.normal(1, 1, (10, 1))
test_x2_label0 = np.random.normal(1, 1, (10, 1))
test_x1_label1 = np.random.normal(5, 1, (10, 1))
test_x2_label1 = np.random.normal(4, 1, (10, 1))
test_x1_label2 = np.random.normal(8, 1, (10, 1))
test_x2_label2 = np.random.normal(0, 1, (10, 1))
test_xs_label0 = np.hstack((test_x1_label0, test_x2_label0))
test_xs_label1 = np.hstack((test_x1_label1, test_x2_label1))
test_xs_label2 = np.hstack((test_x1_label2, test_x2_label2))
test_xs = np.vstack((test_xs_label0, test_xs_label1, test_xs_label2))
test_labels = np.matrix([[1., 0., 0.]] * 10 + [[0., 1., 0.]] * 10 + [[0., 0., 1.]] * 10)
|
ch04_classification/Concept04_softmax.ipynb
|
BinRoot/TensorFlow-Book
|
mit
|
Again, define the placeholders, variables, model, and cost function:
|
train_size, num_features = xs.shape
X = tf.placeholder("float", shape=[None, num_features])
Y = tf.placeholder("float", shape=[None, num_labels])
W = tf.Variable(tf.zeros([num_features, num_labels]))
b = tf.Variable(tf.zeros([num_labels]))
y_model = tf.nn.softmax(tf.matmul(X, W) + b)
cost = -tf.reduce_sum(Y * tf.log(y_model))
train_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
correct_prediction = tf.equal(tf.argmax(y_model, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
|
ch04_classification/Concept04_softmax.ipynb
|
BinRoot/TensorFlow-Book
|
mit
|
Train the softmax classification model:
|
with tf.Session() as sess:
tf.global_variables_initializer().run()
for step in range(training_epochs * train_size // batch_size):
offset = (step * batch_size) % train_size
batch_xs = xs[offset:(offset + batch_size), :]
batch_labels = labels[offset:(offset + batch_size)]
err, _ = sess.run([cost, train_op], feed_dict={X: batch_xs, Y: batch_labels})
if step % 100 == 0:
print (step, err)
W_val = sess.run(W)
print('w', W_val)
b_val = sess.run(b)
print('b', b_val)
print("accuracy", accuracy.eval(feed_dict={X: test_xs, Y: test_labels}))
|
ch04_classification/Concept04_softmax.ipynb
|
BinRoot/TensorFlow-Book
|
mit
|
Define AOI
Define the AOI as a geojson polygon. This can be done at geojson.io. If you use geojson.io, only copy the single aoi feature, not the entire feature collection.
|
aoi = {u'geometry': {u'type': u'Polygon', u'coordinates': [[[-121.3113248348236, 38.28911976564886], [-121.3113248348236, 38.34622533958], [-121.2344205379486, 38.34622533958], [-121.2344205379486, 38.28911976564886], [-121.3113248348236, 38.28911976564886]]]}, u'type': u'Feature', u'properties': {u'style': {u'opacity': 0.5, u'fillOpacity': 0.2, u'noClip': False, u'weight': 4, u'color': u'blue', u'lineCap': None, u'dashArray': None, u'smoothFactor': 1, u'stroke': True, u'fillColor': None, u'clickable': True, u'lineJoin': None, u'fill': True}}}
json.dumps(aoi)
|
jupyter-notebooks/crossovers/ps_l8_crossovers.ipynb
|
planetlabs/notebooks
|
apache-2.0
|
Build Request
Build the Planet API Filter request for the Landsat 8 and PS Orthotile imagery taken in 2017 through August 23.
|
# define the date range for imagery
start_date = datetime.datetime(year=2017,month=1,day=1)
stop_date = datetime.datetime(year=2017,month=8,day=23)
# filters.build_search_request() item types:
# Landsat 8 - 'Landsat8L1G'
# Sentinel - 'Sentinel2L1C'
# PS Orthotile = 'PSOrthoTile'
def build_landsat_request(aoi_geom, start_date, stop_date):
query = filters.and_filter(
filters.geom_filter(aoi_geom),
filters.range_filter('cloud_cover', lt=5),
# ensure has all assets, unfortunately also filters 'L1TP'
# filters.string_filter('quality_category', 'standard'),
filters.range_filter('sun_elevation', gt=0), # filter out Landsat night scenes
filters.date_range('acquired', gt=start_date),
filters.date_range('acquired', lt=stop_date)
)
return filters.build_search_request(query, ['Landsat8L1G'])
def build_ps_request(aoi_geom, start_date, stop_date):
query = filters.and_filter(
filters.geom_filter(aoi_geom),
filters.range_filter('cloud_cover', lt=0.05),
filters.date_range('acquired', gt=start_date),
filters.date_range('acquired', lt=stop_date)
)
return filters.build_search_request(query, ['PSOrthoTile'])
print(build_landsat_request(aoi['geometry'], start_date, stop_date))
print(build_ps_request(aoi['geometry'], start_date, stop_date))
|
jupyter-notebooks/crossovers/ps_l8_crossovers.ipynb
|
planetlabs/notebooks
|
apache-2.0
|
Search Planet API
The client is how we interact with the planet api. It is created with the user-specific api key, which is pulled from $PL_API_KEY environment variable. Create the client then use it to search for PS Orthotile and Landsat 8 scenes. Save a subset of the metadata provided by Planet API as our 'scene'.
|
def get_api_key():
return os.environ['PL_API_KEY']
# quick check that key is defined
assert get_api_key(), "PL_API_KEY not defined."
def create_client():
return api.ClientV1(api_key=get_api_key())
def search_pl_api(request, limit=500):
client = create_client()
result = client.quick_search(request)
# note that this returns a generator
return result.items_iter(limit=limit)
items = list(search_pl_api(build_ps_request(aoi['geometry'], start_date, stop_date)))
print(len(items))
# uncomment below to see entire metadata for a PS orthotile
# print(json.dumps(items[0], indent=4))
del items
items = list(search_pl_api(build_landsat_request(aoi['geometry'], start_date, stop_date)))
print(len(items))
# uncomment below to see entire metadata for a landsat scene
# print(json.dumps(items[0], indent=4))
del items
|
jupyter-notebooks/crossovers/ps_l8_crossovers.ipynb
|
planetlabs/notebooks
|
apache-2.0
|
In processing the items to scenes, we are only using a small subset of the product metadata.
|
def items_to_scenes(items):
item_types = []
def _get_props(item):
props = item['properties']
props.update({
'thumbnail': item['_links']['thumbnail'],
'item_type': item['properties']['item_type'],
'id': item['id'],
'acquired': item['properties']['acquired'],
'footprint': item['geometry']
})
return props
scenes = pd.DataFrame(data=[_get_props(i) for i in items])
# acquired column to index, it is unique and will be used a lot for processing
scenes.index = pd.to_datetime(scenes['acquired'])
del scenes['acquired']
scenes.sort_index(inplace=True)
return scenes
scenes = items_to_scenes(search_pl_api(build_landsat_request(aoi['geometry'],
start_date, stop_date)))
# display(scenes[:1])
print(scenes.thumbnail.tolist()[0])
del scenes
|
jupyter-notebooks/crossovers/ps_l8_crossovers.ipynb
|
planetlabs/notebooks
|
apache-2.0
|
Investigate Landsat Scenes
There are quite a few Landsat 8 scenes that are returned by our query. What do the footprints look like relative to our AOI and what is the collection time of the scenes?
|
landsat_scenes = items_to_scenes(search_pl_api(build_landsat_request(aoi['geometry'],
start_date, stop_date)))
# How many Landsat 8 scenes match the query?
print(len(landsat_scenes))
|
jupyter-notebooks/crossovers/ps_l8_crossovers.ipynb
|
planetlabs/notebooks
|
apache-2.0
|
Show Landsat 8 Footprints on Map
|
def landsat_scenes_to_features_layer(scenes):
features_style = {
'color': 'grey',
'weight': 1,
'fillColor': 'grey',
'fillOpacity': 0.15}
features = [{"geometry": r.footprint,
"type": "Feature",
"properties": {"style": features_style,
"wrs_path": r.wrs_path,
"wrs_row": r.wrs_row}}
for r in scenes.itertuples()]
return features
def create_landsat_hover_handler(scenes, label):
def hover_handler(event=None, id=None, properties=None):
wrs_path = properties['wrs_path']
wrs_row = properties['wrs_row']
path_row_query = 'wrs_path=={} and wrs_row=={}'.format(wrs_path, wrs_row)
count = len(scenes.query(path_row_query))
label.value = 'path: {}, row: {}, count: {}'.format(wrs_path, wrs_row, count)
return hover_handler
def create_landsat_feature_layer(scenes, label):
features = landsat_scenes_to_features_layer(scenes)
# Footprint feature layer
feature_collection = {
"type": "FeatureCollection",
"features": features
}
feature_layer = ipyl.GeoJSON(data=feature_collection)
feature_layer.on_hover(create_landsat_hover_handler(scenes, label))
return feature_layer
# Initialize map using parameters from above map
# and deleting map instance if it exists
try:
del fp_map
except NameError:
pass
zoom = 6
center = [38.28993659801203, -120.14648437499999] # lat/lon
# Create map, adding box drawing controls
# Reuse parameters if map already exists
try:
center = fp_map.center
zoom = fp_map.zoom
print(zoom)
print(center)
except NameError:
pass
# Change tile layer to one that makes it easier to see crop features
# Layer selected using https://leaflet-extras.github.io/leaflet-providers/preview/
map_tiles = ipyl.TileLayer(url='http://{s}.basemaps.cartocdn.com/light_all/{z}/{x}/{y}.png')
fp_map = ipyl.Map(
center=center,
zoom=zoom,
default_tiles = map_tiles
)
label = ipyw.Label(layout=ipyw.Layout(width='100%'))
fp_map.add_layer(create_landsat_feature_layer(landsat_scenes, label)) # landsat layer
fp_map.add_layer(ipyl.GeoJSON(data=aoi)) # aoi layer
# Display map and label
ipyw.VBox([fp_map, label])
|
jupyter-notebooks/crossovers/ps_l8_crossovers.ipynb
|
planetlabs/notebooks
|
apache-2.0
|
This AOI is located in a region covered by 3 different path/row tiles. This means there is 3x the coverage than in regions only covered by one path/row tile. This is particularly lucky!
What about the within each path/row tile. How long and how consistent is the Landsat 8 collect period for each path/row?
|
def time_diff_stats(group):
time_diff = group.index.to_series().diff() # time difference between rows in group
stats = {'median': time_diff.median(),
'mean': time_diff.mean(),
'std': time_diff.std(),
'count': time_diff.count(),
'min': time_diff.min(),
'max': time_diff.max()}
return pd.Series(stats)
landsat_scenes.groupby(['wrs_path', 'wrs_row']).apply(time_diff_stats)
|
jupyter-notebooks/crossovers/ps_l8_crossovers.ipynb
|
planetlabs/notebooks
|
apache-2.0
|
It looks like the collection period is 16 days, which lines up with the Landsat 8 mission description.
path/row 43/33 is missing one image which causes an unusually long collect period.
What this means is that we don't need to look at every Landsat 8 scene collect time to find crossovers with Planet scenes. We could look at the first scene for each path/row, then look at every 16 day increment. However, we will need to account for dropped Landsat 8 scenes in some way.
What is the time difference between the tiles?
|
def find_closest(date_time, data_frame):
# inspired by:
# https://stackoverflow.com/questions/36933725/pandas-time-series-join-by-closest-time
time_deltas = (data_frame.index - date_time).to_series().reset_index(drop=True).abs()
idx_min = time_deltas.idxmin()
min_delta = time_deltas[idx_min]
return (idx_min, min_delta)
def closest_time(group):
'''group: data frame with acquisition time as index'''
inquiry_date = datetime.datetime(year=2017,month=3,day=7)
idx, _ = find_closest(inquiry_date, group)
return group.index.to_series().iloc[idx]
# for accurate results, we look at the closest time for each path/row tile to a given time
# using just the first entry could result in a longer time gap between collects due to
# the timing of the first entries
landsat_scenes.groupby(['wrs_path', 'wrs_row']).apply(closest_time)
|
jupyter-notebooks/crossovers/ps_l8_crossovers.ipynb
|
planetlabs/notebooks
|
apache-2.0
|
So the tiles that are in the same path are very close (24sec) together from the same day. Therefore, we would want to only use one tile and pick the best image.
Tiles that are in different paths are 7 days apart. Therefore, we want to keep tiles from different paths, as they represent unique crossovers.
Investigate PS Orthotiles
There are also quite a few PS Orthotiles that match our query. Some of those scenes may not have much overlap with our AOI. We will want to filter those out. Also, we are interested in knowing how many unique days of coverage we have, so we will group PS Orthotiles by collect day, since we may have days with more than one collect (due multiple PS satellites collecting imagery).
|
all_ps_scenes = items_to_scenes(search_pl_api(build_ps_request(aoi['geometry'], start_date, stop_date)))
# How many PS scenes match query?
print(len(all_ps_scenes))
all_ps_scenes[:1]
|
jupyter-notebooks/crossovers/ps_l8_crossovers.ipynb
|
planetlabs/notebooks
|
apache-2.0
|
What about overlap? We really only want images that overlap over 20% of the AOI.
Note: we do this calculation in WGS84, the geographic coordinate system supported by geojson. The calculation of coverage expects that the geometries entered are 2D, which WGS84 is not. This will cause a small inaccuracy in the coverage area calculation, but not enough to bother us here.
|
def aoi_overlap_percent(footprint, aoi):
aoi_shape = sgeom.shape(aoi['geometry'])
footprint_shape = sgeom.shape(footprint)
overlap = aoi_shape.intersection(footprint_shape)
return overlap.area / aoi_shape.area
overlap_percent = all_ps_scenes.footprint.apply(aoi_overlap_percent, args=(aoi,))
all_ps_scenes = all_ps_scenes.assign(overlap_percent = overlap_percent)
all_ps_scenes.head()
print(len(all_ps_scenes))
ps_scenes = all_ps_scenes[all_ps_scenes.overlap_percent > 0.20]
print(len(ps_scenes))
|
jupyter-notebooks/crossovers/ps_l8_crossovers.ipynb
|
planetlabs/notebooks
|
apache-2.0
|
Ideally, PS scenes have daily coverage over all regions. How many days have PS coverage and how many PS scenes were taken on the same day?
|
# ps_scenes.index.to_series().head()
# ps_scenes.filter(items=['id']).groupby(pd.Grouper(freq='D')).agg('count')
# Use PS acquisition year, month, and day as index and group by those indices
# https://stackoverflow.com/questions/14646336/pandas-grouping-intra-day-timeseries-by-date
daily_ps_scenes = ps_scenes.index.to_series().groupby([ps_scenes.index.year,
ps_scenes.index.month,
ps_scenes.index.day])
daily_count = daily_ps_scenes.agg('count')
daily_count.index.names = ['y', 'm', 'd']
# How many days is the count greater than 1?
daily_multiple_count = daily_count[daily_count > 1]
print('Out of {} days of coverage, {} days have multiple collects.'.format( \
len(daily_count), len(daily_multiple_count)))
daily_multiple_count.head()
def scenes_and_count(group):
entry = {'count': len(group),
'acquisition_time': group.index.tolist()}
return pd.DataFrame(entry)
daily_count_and_scenes = daily_ps_scenes.apply(scenes_and_count)
# need to rename indices because right now multiple are called 'acquired', which
# causes a bug when we try to run the query
daily_count_and_scenes.index.names = ['y', 'm', 'd', 'num']
multiplecoverage = daily_count_and_scenes.query('count > 1')
multiplecoverage.query('m == 7') # look at just occurrence in July
|
jupyter-notebooks/crossovers/ps_l8_crossovers.ipynb
|
planetlabs/notebooks
|
apache-2.0
|
Looks like the multiple collects on the same day are just a few minutes apart. They are likely crossovers between different PS satellites. Cool! Since we only want to us one PS image for a crossover, we will chose the best collect for days with multiple collects.
Find Crossovers
Now that we have the PS Orthotiles filtered to what we want and have investigated the Landsat 8 scenes, let's look for crossovers between the two.
First we find concurrent crossovers, PS and Landsat collects that occur within 1hour of each other.
|
def find_crossovers(acquired_time, landsat_scenes):
'''landsat_scenes: pandas dataframe with acquisition time as index'''
closest_idx, closest_delta = find_closest(acquired_time, landsat_scenes)
closest_landsat = landsat_scenes.iloc[closest_idx]
crossover = {'landsat_acquisition': closest_landsat.name,
'delta': closest_delta}
return pd.Series(crossover)
# fetch PS scenes
ps_scenes = items_to_scenes(search_pl_api(build_ps_request(aoi['geometry'],
start_date, stop_date)))
# for each PS scene, find the closest Landsat scene
crossovers = ps_scenes.index.to_series().apply(find_crossovers, args=(landsat_scenes,))
# filter to crossovers within 1hr
concurrent_crossovers = crossovers[crossovers['delta'] < pd.Timedelta('1 hours')]
print(len(concurrent_crossovers))
concurrent_crossovers
|
jupyter-notebooks/crossovers/ps_l8_crossovers.ipynb
|
planetlabs/notebooks
|
apache-2.0
|
Now that we have the crossovers, what we are really interested in is the IDs of the landsat and PS scenes, as well as how much they overlap the AOI.
|
def get_crossover_info(crossovers, aoi):
def get_scene_info(acquisition_time, scenes):
scene = scenes.loc[acquisition_time]
scene_info = {'id': scene.id,
'thumbnail': scene.thumbnail,
# we are going to use the footprints as shapes so convert to shapes now
'footprint': sgeom.shape(scene.footprint)}
return pd.Series(scene_info)
landsat_info = crossovers.landsat_acquisition.apply(get_scene_info, args=(landsat_scenes,))
ps_info = crossovers.index.to_series().apply(get_scene_info, args=(ps_scenes,))
footprint_info = pd.DataFrame({'landsat': landsat_info.footprint,
'ps': ps_info.footprint})
overlaps = footprint_info.apply(lambda x: x.landsat.intersection(x.ps),
axis=1)
aoi_shape = sgeom.shape(aoi['geometry'])
overlap_percent = overlaps.apply(lambda x: x.intersection(aoi_shape).area / aoi_shape.area)
crossover_info = pd.DataFrame({'overlap': overlaps,
'overlap_percent': overlap_percent,
'ps_id': ps_info.id,
'ps_thumbnail': ps_info.thumbnail,
'landsat_id': landsat_info.id,
'landsat_thumbnail': landsat_info.thumbnail})
return crossover_info
crossover_info = get_crossover_info(concurrent_crossovers, aoi)
print(len(crossover_info))
|
jupyter-notebooks/crossovers/ps_l8_crossovers.ipynb
|
planetlabs/notebooks
|
apache-2.0
|
Next, we filter to overlaps that cover a significant portion of the AOI.
|
significant_crossovers_info = crossover_info[crossover_info.overlap_percent > 0.9]
print(len(significant_crossovers_info))
significant_crossovers_info
|
jupyter-notebooks/crossovers/ps_l8_crossovers.ipynb
|
planetlabs/notebooks
|
apache-2.0
|
Browsing through the crossovers, we see that in some instances, multiple crossovers take place on the same day. Really, we are interested in 'unique crossovers', that is, crossovers that take place on unique days. Therefore, we will look at the concurrent crossovers by day.
|
def group_by_day(data_frame):
return data_frame.groupby([data_frame.index.year,
data_frame.index.month,
data_frame.index.day])
unique_crossover_days = group_by_day(significant_crossovers_info.index.to_series()).count()
print(len(unique_crossover_days))
print(unique_crossover_days)
|
jupyter-notebooks/crossovers/ps_l8_crossovers.ipynb
|
planetlabs/notebooks
|
apache-2.0
|
There are 6 unique crossovers between Landsat 8 and PS that cover over 90% of our AOI between January and August in 2017. Not bad! That is definitely enough to perform comparison.
Display Crossovers
Let's take a quick look at the crossovers we found to make sure that they don't look cloudy, hazy, or have any other quality issues that would affect the comparison.
|
# https://stackoverflow.com/questions/36006136/how-to-display-images-in-a-row-with-ipython-display
def make_html(image):
return '<img src="{0}" alt="{0}"style="display:inline;margin:1px"/>' \
.format(image)
def display_thumbnails(row):
print(row.name)
display(HTML(''.join(make_html(t)
for t in (row.ps_thumbnail, row.landsat_thumbnail))))
_ = significant_crossovers_info.apply(display_thumbnails, axis=1)
|
jupyter-notebooks/crossovers/ps_l8_crossovers.ipynb
|
planetlabs/notebooks
|
apache-2.0
|
04.02 统计《择天记》人物出场次数
我们需要收集在择天记中出现的人物姓名,以便统计人物的出场次数。主要人物的姓名抓了《择天记》百度百科中的人物列表,保存成了TXT文本,名为names.txt。
|
#读取人物名字
with open('names.txt') as f:
names = [name.strip() for name in f.readlines()]
print(names)
# 统计人物出场次数
def find_main_charecters(num = 10):
novel = ''.join(cont)
count = []
for name in names:
count.append([name,novel.count(name)])
count.sort(key = lambda v : v[1],reverse=True)
return count[:num]
find_main_charecters()
|
Practice_03/Python与择天记.ipynb
|
Alenwang1/Python_Practice
|
gpl-3.0
|
我们使用echarts做数据可视化的工作
|
from IPython.display import HTML
chart_header_html = """
<div id="main_charecters" style="width: 800px;height: 600px;" class="chart"></div>
<script>
require.config({
paths:{
echarts: '//cdn.bootcss.com/echarts/3.2.3/echarts.min',
}
});
require(['echarts'],function(ec){
var charectersNames = ['陈长生', '唐三十六', '徐有容', '苏离', '落落', '折袖', '苟寒食', '轩辕破', '魔君', '王破', '商行舟', '周通', '南客', '莫雨', '秋山君', '黑袍', '天海圣后', '王之策', '朱洛', '肖张']
var charectersCount = [15673, 3609, 3262, 1889, 1877, 1656, 1183, 1158, 1146, 1134, 1110, 945, 874, 746, 624, 589, 554, 532, 513, 476]
var charectersCountChart = ec.init(document.getElementById('main_charecters'))
charectersCountChart.setOption({
title:{
text: '择天记人物出场次数统计图'
},
tooltip:{},
xAxis:{
type: 'value',
data: ['出场次数']
},
yAxis:{
type: 'category',
data: charectersNames.reverse()
},
series:{
name: '主要人物',
type: 'bar',
data: charectersCount.reverse()
}
})
});
</script>
"""
# 我本地的浏览器可以渲染出来,放到git上就不行了...所以拿图片代替了
|
Practice_03/Python与择天记.ipynb
|
Alenwang1/Python_Practice
|
gpl-3.0
|
我们可以清楚的看到,《择天记》的主角为陈长生共出场接近16000次,紧随其后的是唐三十六(男性)和徐有容(有容奶大是女性)。仅从这个简单的数据,我们就可以推测唐三十六是主角陈长生的好基友,徐有容很有可能和陈长生是恋人关系。
另外,我们看到其他出场率比较相似的人物中,女性角色明显不多。我们可以大致推断《择天记》这本小说是一个单女主的小说。更进一步的说,徐有容和陈长生在整部小说中可能都很专情。
出场次数前20的人物中,可以看出一个明显的规律——主要人物的人名都非常奇葩,一看就不是普通人能叫的名字!在现实生活中不可能有人叫唐三十六,折袖,苟寒食,商行舟,南客这样的名字。所以,《择天记》的作者多半是一个很中二的人。
另外还有一个有趣的事情,就是这些人物的姓氏明显都不相同,只有王之策和王破都姓王。这一点我们可以推断,《择天记》中的故事很有可能不是关于家族之间的纷争,或者说着墨不多。我们看到魔君的出场次数达到700多次,这种名字一看就是反派,无一例外。
第一部分的结论
《择天记》男主陈长生,男配唐三十六,女主徐有容
《择天记》是单女主的小说,男主女主在整部小说中可能都很专情。
《择天记》的作者大概率是一个很中二的人
《择天记》中的故事很有可能不是关于家族之间的纷争
《择天记》中的反派BOSS可能叫做魔君
04.03 《择天记》与自然语言处理
统计人物出场次数显然是Python中最最基础的操作,那我们现在来使用更高级的算法来尝试分析《择天记》这本小说。
我们使用gensim库对《择天记》进行 Word2Vec 的操作,这种操作可以将小说中的词映射到向量空间中,从而分析出不同词汇之间的关系。另外值得一提的是,由于中文和英文不同,中文词语之间没有空格,所以我们需要使用Python第三方库结巴分词对文本进行分词。我们为了提高分词的准确性,我们需要将小说中一些专属名词添加到词库中。
中文分词
|
import jieba
# 读取门派、境界、招式等名词
with open('novelitems.txt') as f:
items = [item.strip() for item in f.readlines()]
for i in items[10:20]:
print(i)
|
Practice_03/Python与择天记.ipynb
|
Alenwang1/Python_Practice
|
gpl-3.0
|
我们需要将这些名词添加到结巴分词的词库中。
|
for name in names:
jieba.add_word(name)
for item in items:
jieba.add_word(item)
|
Practice_03/Python与择天记.ipynb
|
Alenwang1/Python_Practice
|
gpl-3.0
|
我们现在就可以开始使用机器学习来训练模型了。
|
novel_sentences = []
# 对小说进行分词,这里只是任选了一句
# for line in cont:
for line in cont[:6]:
words = list(jieba.cut(line))
novel_sentences.append(words)
novel_sentences[4]
|
Practice_03/Python与择天记.ipynb
|
Alenwang1/Python_Practice
|
gpl-3.0
|
训练模型
|
# 按照默认参数进行训练
model = gensim.models.Word2Vec(sentences, size=100, window=5, min_count=5, workers=4)
# 把训练好的模型存下来
model.save("zetianjied.model")
# 训练模型需要大概20分钟左右的时间,因性能而异。由于模型太大了就不放在github上面了。
import gensim
# 读取模型
model = gensim.models.Word2Vec.load("zetianjied.model")
|
Practice_03/Python与择天记.ipynb
|
Alenwang1/Python_Practice
|
gpl-3.0
|
寻找境界体系
首先,让我们看看《择天记》中实力境界的划分。作者在一开始告诉我们有一种境界叫做坐照境。那么,我们就通过上文中用Word2Vec 训练出来的模型找到与坐照类似的词汇。
|
# 寻找相似境界
for s in model.most_similar(positive=["坐照"]):
print(s)
|
Practice_03/Python与择天记.ipynb
|
Alenwang1/Python_Practice
|
gpl-3.0
|
择天记中的大人物
找到择天记中和反派魔君实力水平相似的人物
|
for s in model.most_similar(positive=["魔君"])[:7]:
print(s)
|
Practice_03/Python与择天记.ipynb
|
Alenwang1/Python_Practice
|
gpl-3.0
|
我们可以看到结果中出现了与魔君实力水平相似的前七个人物,这些人物可以与反派BOSS相提并论,肯定是站在《择天记》实力巅峰的大人物。事实上这些人物在原著中都是从圣境。
择天记中的情侣
训练出来的模型还可以找到具有相似联系的词汇,比如给定情侣关系的两个人物,模型会找到小说中的情侣关系。
我们先来测试一下模型,因为我们知道别样红和无穷碧是在小说中直接描写的情侣,所以我们给定陈长生和徐有容之间的关系,看看程序能否找出和无穷碧有情侣关系的人物。
|
d = model.most_similar(positive=['无穷碧', '陈长生'], negative=['徐有容'])[0]
d
|
Practice_03/Python与择天记.ipynb
|
Alenwang1/Python_Practice
|
gpl-3.0
|
我们随便找一个人物,比如:折袖。运行程序,看一看机器眼中折袖与谁是情侣?
|
d = model.most_similar(positive=['折袖', '无穷碧'], negative=['别样红'])[0]
d
|
Practice_03/Python与择天记.ipynb
|
Alenwang1/Python_Practice
|
gpl-3.0
|
Fitting (Predicting) Topics Distribution From Raw Text
predict function will predict the topics distributions from a given raw text. The result is a pandas dataframe, with topics ids and confidence thereof.
|
def text2vec(text):
if text:
return dictionary.doc2bow(TextBlob(text.lower()).noun_phrases)
else:
return []
def tokenised2vec(tokenised):
if tokenised:
return dictionary.doc2bow(tokenised)
else:
return []
def predict(sometext):
vec = text2vec(sometext)
dtype = [('topic_id', int), ('confidence', float)]
topics = np.array(model[vec], dtype=dtype)
topics.sort(order="confidence")
# for topic in topics[::-1]:
# print("--------")
# print(topic[1], topic[0])
# print(model.print_topic(topic[0]))
return pd.DataFrame(topics)
def predict_vec(vec):
dtype = [('topic_id', int), ('confidence', float)]
topics = np.array(model[tokenised2vec(vec)], dtype=dtype)
topics.sort(order="confidence")
# for topic in topics[::-1]:
# print("--------")
# print(topic[1], topic[0])
# print(model.print_topic(topic[0]))
return pd.DataFrame(topics)
predict("null values are interpreted as unknown value or inapplicable value. This paper proposes a new approach for solving the unknown value problems with Implicit Predicate (IP). The IP serves as a descriptor corresponding to a set of the unknown values, thereby expressing the semantics of them. In this paper, we demonstrate that the IP is capable of (1) enhancing the semantic expressiveness of the unknown values, (2) entering incomplete information into database and (3) exploiting the information and a variety of inference rules in database to reduce the uncertainties of the unknown values.")
model.print_topic(167)
|
tutorials/Profiling_Reviewers.ipynb
|
conferency/find-my-reviewers
|
mit
|
Generate a Author's Topic Vector
The vector is a topic confidence vector for the author. The length of the vector should be the number of topics in the LDA model.
|
def update_author_vector(vec, doc_vec):
for topic_id, confidence in zip(doc_vec['topic_id'], doc_vec['confidence']):
vec[topic_id] += confidence
return vec
def get_topic_in_list(model, topic_id):
return [term.strip().split('*') for term in model.print_topic(topic_id).split("+")]
def get_author_top_topics(author_id, top=10):
author = authors_lib[author_id]
top_topics = []
for topic_id, confidence in enumerate(author):
if confidence > 1:
top_topics.append([topic_id, (confidence - 1) * 100])
top_topics.sort(key=lambda tup: tup[1], reverse=True)
return top_topics[:top]
def get_topic_in_string(model, topic_id, top=5):
topic_list = get_topic_in_list(model, topic_id)
topic_string = " / ".join([i[1] for i in topic_list][:top])
return topic_string
def get_topics_in_string(model, topics, confidence=False):
if confidence:
topics_list = []
for topic in topics:
topic_map = {
"topic_id": topic[0],
"string": get_topic_in_string(model, topic[0]),
"confidence": topic[1]
}
topics_list.append(topic_map)
else:
topics_list = []
for topic_id in topics:
topic_map = {
"topic_id": topic_id,
"string": get_topic_in_string(model, topic_id),
}
topics_list.append(topic_map)
return topics_list
|
tutorials/Profiling_Reviewers.ipynb
|
conferency/find-my-reviewers
|
mit
|
For a author, we first get all his previous papers in our database. For each paper we get, we generate a paper's vector. At last, the sum of all vectors will be the vector (aka the position) in the interest space.
|
def profile_author(author_id, model_topics_num=None):
if not model_topics_num:
model_topics_num = model.num_topics
author_vec = np.array([1.0 for i in range(model_topics_num)])
# Initialize with 1s
paper_list = pd.read_sql_query("SELECT * FROM documents_authors WHERE authors_id=" + str(author_id), con)['documents_id']
paper_list = [i for i in paper_list if i not in non_en]
# print(paper_list)
for paper_id in paper_list:
try:
abstract = db_documents.loc[paper_id]["abstract"]
vec = predict_vec(tokenised[paper_id -1])
except:
print("Error occurred on paper id " + str(paper_id))
raise
author_vec = update_author_vector(author_vec, vec)
return list(author_vec) # to make it serializable by JSON
profile_author(1)
def profile_all_authors():
authors = {}
for author_id in db_authors['id']:
result = profile_author(author_id)
if len(result):
authors[str(author_id)] = result # JSON does not allow int to be the key
# print("Done: ", author_id)
# uncomment the above line to track the progress
return authors
authors_lib = profile_all_authors()
len(db_authors) == len(authors_lib)
|
tutorials/Profiling_Reviewers.ipynb
|
conferency/find-my-reviewers
|
mit
|
Save the Library (aka Pool of Scholars)
We will save our profiled authors in a JSON file. It will then used by our matching algorithm.
|
save_json(authors_lib, "aisnet_600_cleaned.authors.json")
|
tutorials/Profiling_Reviewers.ipynb
|
conferency/find-my-reviewers
|
mit
|
Dealing with Conflicts
This file shows how Ply deals with shift/reduce and reduce/reduce conflicts.
The following grammar is ambiguous because it does not specify the precedence of the arithmetical operators:
expr : expr '+' expr
| expr '-' expr
| expr '*' expr
| expr '/' expr
| '(' expr ')'
| NUMBER
;
Specification of the Scanner
We implement a minimal scanner for arithmetic expressions.
|
import ply.lex as lex
tokens = [ 'NUMBER' ]
def t_NUMBER(t):
r'0|[1-9][0-9]*'
t.value = int(t.value)
return t
literals = ['+', '-', '*', '/', '^', '(', ')']
t_ignore = ' \t'
def t_newline(t):
r'\n+'
t.lexer.lineno += t.value.count('\n')
def t_error(t):
print(f"Illegal character '{t.value[0]}'")
t.lexer.skip(1)
__file__ = 'main'
lexer = lex.lex()
|
Ply/Conflicts.ipynb
|
karlstroetmann/Formal-Languages
|
gpl-2.0
|
We can specify multiple expressions in a single rule. In this case, we have used the passstatement
as we just want to generate the shift/reduce conflicts that are associated with this grammar.
def p_expr(p):
"""
expr : expr '+' expr
| expr '-' expr
| expr '*' expr
| expr '/' expr
| expr '^' expr
| '(' expr ')'
| NUMBER
"""
pass
If we want to generate a parse tree, we have to do more work.
|
def p_expr_plus(p):
"expr : expr '+' expr"
p[0] = ('+', p[1], p[3])
def p_expr_minus(p):
"expr : expr '-' expr"
p[0] = ('-', p[1], p[3])
def p_expr_mult(p):
"expr : expr '*' expr"
p[0] = ('*', p[1], p[3])
def p_expr_div(p):
"expr : expr '/' expr"
p[0] = ('/', p[1], p[3])
def p_expr_power(p):
"expr : expr '^' expr"
p[0] = ('^', p[1], p[3])
def p_expr_paren(p):
"expr : '(' expr ')'"
p[0] = p[2]
def p_expr_NUMBER(p):
"expr : NUMBER"
p[0] = p[1]
|
Ply/Conflicts.ipynb
|
karlstroetmann/Formal-Languages
|
gpl-2.0
|
We define p_errorin order to prevent a warning.
|
def p_error(p):
if p:
print(f'Syntax error at {p.value}.')
else:
print('Syntax error at end of input.')
|
Ply/Conflicts.ipynb
|
karlstroetmann/Formal-Languages
|
gpl-2.0
|
Let's look at the action table that is generated. Note that all conflicts are resolved in favour of shifting.
|
!type parser.out
!cat parser.out
%run ../ANTLR4-Python/AST-2-Dot.ipynb
|
Ply/Conflicts.ipynb
|
karlstroetmann/Formal-Languages
|
gpl-2.0
|
The function test(s) takes a string s as its argument an tries to parse this string. If all goes well, an abstract syntax tree is returned.
If the string can't be parsed, an error message is printed by the parser.
|
def test(s):
t = yacc.parse(s)
d = tuple2dot(t)
display(d)
return t
|
Ply/Conflicts.ipynb
|
karlstroetmann/Formal-Languages
|
gpl-2.0
|
The next example shows that this parser does not produce the abstract syntax that reflects the precedences of the arithmetical operators.
|
test('2^3*4+5')
|
Ply/Conflicts.ipynb
|
karlstroetmann/Formal-Languages
|
gpl-2.0
|
1.
Создайте DecisionTreeClassifier с настройками по умолчанию и измерьте качество его работы с помощью cross_val_score. Эта величина и будет ответом в пункте 1.
|
clf = tree.DecisionTreeClassifier()
x_val_score = cross_val_score(clf, X, y, cv=10).mean()
write_answer(x_val_score, 'answer_1.txt')
print(x_val_score)
|
src/cours_2/week_4/bagging_and_rand_forest.ipynb
|
agushman/coursera
|
mit
|
2.
Воспользуйтесь BaggingClassifier из sklearn.ensemble, чтобы обучить бэггинг над DecisionTreeClassifier. Используйте в BaggingClassifier параметры по умолчанию, задав только количество деревьев равным 100.
Качество классификации новой модели - ответ в пункте 2. Обратите внимание, как соотносится качество работы композиции решающих деревьев с качеством работы одного решающего дерева.
|
bagging_clf = ensemble.BaggingClassifier(clf, n_estimators=100)
x_val_score = cross_val_score(bagging_clf, X, y, cv=10).mean()
write_answer(x_val_score, 'answer_2.txt')
print(x_val_score)
|
src/cours_2/week_4/bagging_and_rand_forest.ipynb
|
agushman/coursera
|
mit
|
3.
Теперь изучите параметры BaggingClassifier и выберите их такими, чтобы каждый базовый алгоритм обучался не на всех d признаках, а на $\sqrt d$ случайных признаков. Качество работы получившегося классификатора - ответ в пункте 3. Корень из числа признаков - часто используемая эвристика в задачах классификации, в задачах регрессии же часто берут число признаков, деленное на три. Но в общем случае ничто не мешает вам выбирать любое другое число случайных признаков.
|
stoch_train_len = int(sqrt(X.shape[1]))
bagging_clf = ensemble.BaggingClassifier(clf, n_estimators=100, max_features=stoch_train_len)
x_val_score = cross_val_score(bagging_clf, X, y, cv=10).mean()
write_answer(x_val_score, 'answer_3.txt')
print(x_val_score)
|
src/cours_2/week_4/bagging_and_rand_forest.ipynb
|
agushman/coursera
|
mit
|
4.
Наконец, давайте попробуем выбирать случайные признаки не один раз на все дерево, а при построении каждой вершины дерева. Сделать это несложно: нужно убрать выбор случайного подмножества признаков в BaggingClassifier и добавить его в DecisionTreeClassifier. Какой параметр за это отвечает, можно понять из документации sklearn, либо просто попробовать угадать (скорее всего, у вас сразу получится). Попробуйте выбирать опять же $\sqrt d$ признаков. Качество полученного классификатора на контрольной выборке и будет ответом в пункте 4.
|
stoch_clf = tree.DecisionTreeClassifier(max_features=stoch_train_len)
bagging_clf = ensemble.BaggingClassifier(stoch_clf, n_estimators=100)
x_val_score_own = cross_val_score(bagging_clf, X, y, cv=10).mean()
write_answer(x_val_score_own, 'answer_4.txt')
print(x_val_score)
|
src/cours_2/week_4/bagging_and_rand_forest.ipynb
|
agushman/coursera
|
mit
|
5.
Полученный в пункте 4 классификатор - бэггинг на рандомизированных деревьях (в которых при построении каждой вершины выбирается случайное подмножество признаков и разбиение ищется только по ним). Это в точности соответствует алгоритму Random Forest, поэтому почему бы не сравнить качество работы классификатора с RandomForestClassifier из sklearn.ensemble. Сделайте это, а затем изучите, как качество классификации на данном датасете зависит от количества деревьев, количества признаков, выбираемых при построении каждой вершины дерева, а также ограничений на глубину дерева. Для наглядности лучше построить графики зависимости качества от значений параметров, но для сдачи задания это делать не обязательно.
|
random_forest_clf = ensemble.RandomForestClassifier(random_state=stoch_train_len, n_estimators=100)
x_val_score_lib = cross_val_score(random_forest_clf, X, y, cv=10).mean()
print(x_val_score_lib)
answers = '2 3 4 7'
write_answer(answers, 'answer_5.txt')
|
src/cours_2/week_4/bagging_and_rand_forest.ipynb
|
agushman/coursera
|
mit
|
A very simple pipeline to show how registers are inferred.
|
class SimplePipelineExample(SimplePipeline):
def __init__(self):
self._loopback = pyrtl.WireVector(1, 'loopback')
super(SimplePipelineExample, self).__init__()
def stage0(self):
self.n = ~ self._loopback
def stage1(self):
self.n = self.n
def stage2(self):
self.n = self.n
def stage3(self):
self.n = self.n
def stage4(self):
self._loopback <<= self.n
|
ipynb-examples/example5-instrospection.ipynb
|
UCSBarchlab/PyRTL
|
bsd-3-clause
|
Simulation of the core
|
simplepipeline = SimplePipelineExample()
sim_trace = pyrtl.SimulationTrace()
sim = pyrtl.Simulation(tracer=sim_trace)
for cycle in range(15):
sim.step({})
sim_trace.render_trace()
|
ipynb-examples/example5-instrospection.ipynb
|
UCSBarchlab/PyRTL
|
bsd-3-clause
|
Define Mosaic Parameters
In this tutorial, we use the Planet mosaic tile service. There are many mosaics to choose from. For a list of mosaics available, visit https://api.planet.com/basemaps/v1/mosaics.
We first build the url for the xyz basemap tile service, then we add authorization in the form of the Planet API key.
|
# Planet tile server base URL (Planet Explorer Mosaics Tiles)
mosaic = 'global_monthly_2018_02_mosaic'
mosaicsTilesURL_base = 'https://tiles.planet.com/basemaps/v1/planet-tiles/{}/gmap/{{z}}/{{x}}/{{y}}.png'.format(mosaic)
mosaicsTilesURL_base
# Planet tile server url with auth
planet_api_key = os.environ['PL_API_KEY']
planet_mosaic = mosaicsTilesURL_base + '?api_key=' + planet_api_key
# url is not printed because it will show private api key
|
jupyter-notebooks/label-data/label_maker_pl_mosaic.ipynb
|
planetlabs/notebooks
|
apache-2.0
|
Prepare label maker config file
This config file is pulled from the label-maker repo README.md example and then customized to utilize the Planet mosaic. The imagery url is set to the Planet mosaic url and the zoom is changed to 15, the maximum zoom supported by the Planet tile services.
See the label-maker README.md file for a description of the config entries.
|
# create data directory
data_dir = os.path.join('data', 'label-maker-mosaic')
if not os.path.isdir(data_dir):
os.makedirs(data_dir)
# label-maker doesn't clean up, so start with a clean slate
!cd $data_dir && rm -R *
# create config file
bounding_box = [1.09725, 6.05520, 1.34582, 6.30915]
config = {
"country": "togo",
"bounding_box": bounding_box,
"zoom": 15,
"classes": [
{ "name": "Roads", "filter": ["has", "highway"] },
{ "name": "Buildings", "filter": ["has", "building"] }
],
"imagery": planet_mosaic,
"background_ratio": 1,
"ml_type": "classification"
}
# define project files and folders
config_filename = os.path.join(data_dir, 'config.json')
# write config file
with open(config_filename, 'w') as cfile:
cfile.write(json.dumps(config))
print('wrote config to {}'.format(config_filename))
|
jupyter-notebooks/label-data/label_maker_pl_mosaic.ipynb
|
planetlabs/notebooks
|
apache-2.0
|
Visualize Mosaic at config area of interest
|
# calculate center of map
bounds_lat = [bounding_box[1], bounding_box[3]]
bounds_lon = [bounding_box[0], bounding_box[2]]
def calc_center(bounds):
return bounds[0] + (bounds[1] - bounds[0])/2
map_center = [calc_center(bounds_lat), calc_center(bounds_lon)] # lat/lon
print(bounding_box)
print(map_center)
# create and visualize mosaic at approximately the same bounds as defined in the config file
map_zoom = 12
layout=ipyw.Layout(width='800px', height='800px') # set map layout
mosaic_map = ipyl.Map(center=map_center, zoom=map_zoom, layout=layout)
mosaic_map.add_layer(ipyl.TileLayer(url=planet_mosaic))
mosaic_map
mosaic_map.bounds
|
jupyter-notebooks/label-data/label_maker_pl_mosaic.ipynb
|
planetlabs/notebooks
|
apache-2.0
|
Download OSM tiles
In this step, label-maker downloads the OSM vector tiles for the country specified in the config file.
According to Label Maker documentation, these can be visualized with mbview. So far I have not been successful getting mbview to work. I will keep on trying and would love to hear how you got this to work!
|
!cd $data_dir && label-maker download
|
jupyter-notebooks/label-data/label_maker_pl_mosaic.ipynb
|
planetlabs/notebooks
|
apache-2.0
|
Create ground-truth labels from OSM tiles
In this step, the OSM tiles are chipped into label tiles at the zoom level specified in the config file. Also, a geojson file is created for visual inspection.
|
!cd $data_dir && label-maker labels
|
jupyter-notebooks/label-data/label_maker_pl_mosaic.ipynb
|
planetlabs/notebooks
|
apache-2.0
|
Visualizing classification.geojson in QGIS gives:
Although Label Maker doesn't tell us which classes line up with the labels (see the legend in the visualization for labels), it looks like the following relationships hold:
- (1,0,0) - no roads or buildings
- (0,1,1) - both roads and buildings
- (0,0,1) - only buildings
- (0,1,0) - only roads
Most of the large region with no roads or buildings at the bottom portion of the image is the water off the coast.
Preview image chips
Create a subset of the image chips for preview before creating them all. Preview chips are placed in subdirectories named after each class specified in the config file.
NOTE This section is commented out because preview fails due to imagery-offset arg. See more:
https://github.com/developmentseed/label-maker/issues/79
|
# !cd $data_dir && label-maker preview -n 3
# !ls $data_dir/data/examples
# for fclass in ('Roads', 'Buildings'):
# example_dir = os.path.join(data_dir, 'data', 'examples', fclass)
# print(example_dir)
# for img in os.listdir(example_dir):
# print(img)
# display(Image(os.path.join(example_dir, img)))
|
jupyter-notebooks/label-data/label_maker_pl_mosaic.ipynb
|
planetlabs/notebooks
|
apache-2.0
|
Other than the fact that 4 tiles were created instead of the specified 3, the results look pretty good! All Road examples have roads, and all Building examples have buildings.
Create image tiles
In this step, we invoke label-maker images, which downloads and chips the mosaic into tiles that match the label tiles.
Interestingly, only 372 image tiles are downloaded, while 576 label tiles were generated. Looking at the label tile generation output (370 Road tiles, 270 Building tiles) along with the classification.geojson visualization (only two tiles that are Building and not Road), we find that there are only 372 label tiles that represent at least one of the Road/Building classes. This is why only 372 image tiles were generated.
|
!cd $data_dir && label-maker images
# look at three tiles that were generated
tiles_dir = os.path.join(data_dir, 'data', 'tiles')
print(tiles_dir)
for img in os.listdir(tiles_dir)[:3]:
print(img)
display(Image(os.path.join(tiles_dir, img)))
|
jupyter-notebooks/label-data/label_maker_pl_mosaic.ipynb
|
planetlabs/notebooks
|
apache-2.0
|
Package tiles and labels
Convert the image and label tiles into train and test datasets.
|
# will not be able to open image tiles that weren't generated because the label tiles contained no classes
!cd $data_dir && label-maker package
|
jupyter-notebooks/label-data/label_maker_pl_mosaic.ipynb
|
planetlabs/notebooks
|
apache-2.0
|
Check Package
Let's load the packaged data and look at the train and test datasets.
|
data_file = os.path.join(data_dir, 'data', 'data.npz')
data = np.load(data_file)
for k in data.keys():
print('data[\'{}\'] shape: {}'.format(k, data[k].shape))
|
jupyter-notebooks/label-data/label_maker_pl_mosaic.ipynb
|
planetlabs/notebooks
|
apache-2.0
|
The best feature to split on first is x3
In this tree below you will see that starting from x3 = 1, the depth of the tree is 3.
|
decision_tree_model.show()
decision_tree_model.show(view="Tree")
|
machine_learning/4_clustering_and_retrieval/lecture/week3/.ipynb_checkpoints/quiz-Decision Trees-checkpoint.ipynb
|
tuanavu/coursera-university-of-washington
|
mit
|
Question 3
<img src="images/lec3_quiz03.png">
Screenshot taken from Coursera
<!--TEASER_END-->
|
# Accuracy
print decision_tree_model.evaluate(x)['accuracy']
|
machine_learning/4_clustering_and_retrieval/lecture/week3/.ipynb_checkpoints/quiz-Decision Trees-checkpoint.ipynb
|
tuanavu/coursera-university-of-washington
|
mit
|
Here are the main Python imports for solving the ODEs, plotting and other data analysis. Import modules as needed.
|
# Python module imports
import numpy as np
from matplotlib import pyplot as plt
import pandas as pd
from scipy.integrate import odeint
from IPython.display import Image
from IPython.core.display import HTML
from scipy.optimize import minimize
from scipy.optimize import curve_fit
import statsmodels.formula.api as sm
%matplotlib inline
from pdb import set_trace
plt.rcParams.update({'figure.figsize':(8,6),
'figure.titlesize': 16.,
'axes.labelsize':12.,
'xtick.labelsize':12.,
'legend.fontsize': 12.})
|
notebooks/Impurity Prediction Example 1.ipynb
|
brentjm/Impurity-Predictions
|
bsd-2-clause
|
The following numerically integrates the reaction rate to give the exact impurity profile. Nothing needs to be modified here by the user. Scroll down to see calculation outputs and model comparisons.
|
# gas constant kcal/mol K
R = 0.00198588
# define the domain for the ODEs solution
ndays = np.max(days) + 365*10
dt = 5000.
t = np.arange(0, ndays*(24*3600), dt)
npts = t.shape[0]
# differential equation solution variable
cols = [(T, C) for T in Temperatures for C in ['P','I','D']]
concentrations = pd.DataFrame({col: np.zeros(t.shape[0]) for col in cols})
concentrations['t'] = t
# experimental results
iterables = [(T, d) for i, T in enumerate(Temperatures) for d in days[i]]
index = pd.MultiIndex.from_tuples(iterables, names=['Temperature', 'days'])
results = pd.DataFrame(data=np.zeros(12), index=index, columns=['P'])
# predicted times
predictions = pd.DataFrame(data=np.zeros(len(Temperatures)), index=Temperatures, columns=['Theory'])
# calculate rate constants from the user defined Arrhenius values
def Arrhenius(A, E, T):
return A*np.exp(-E/(R*T))
# calculate the rate constants for each user defined temperature
k1f = [Arrhenius(A1f, E1f, T+273.) for T in Temperatures]
k1r = [Arrhenius(A1r, E1r, T+273.) for T in Temperatures]
k2 = [Arrhenius(A2, E2, T+273.) for T in Temperatures]
k3 = [Arrhenius(A3, E3, T+273.) for T in Temperatures]
# definition of the derivative of the concentrations vector variable.
def deriv(y, t, k1f, k1r, k2, k3):
P, I, D = y
dPdt = k2*I + k3*D
dIdt = (-k1r-k2)*I + k1f*D
dDdt = k1r*I + (-k1f-k3)*D
dydt = [dPdt, dIdt, dDdt]
return dydt
# initial conditions (user defined)
yo = [Po, Io, Do]
# call solver for each user defined temperature
for i, T in enumerate(Temperatures):
yt = odeint(deriv, yo, t, args=(k1f[i], k1r[i], k2[i], k3[i]))
for j, C in enumerate(['P', 'I', 'D']):
concentrations[T,C] = yt[:,j]
# find the impurity concentration at the user defined time points
index = (np.array(days)/float(ndays)*npts).astype('int')
for i, T in enumerate(Temperatures):
for j, d in enumerate(days[i]):
results.loc[T,d].P = concentrations[T,'P'].iloc[index[i,j]]
# find the time that the reaction will produce the user-defined impurity concentration
for T in Temperatures:
predictions.loc[T].Theory = t[np.argmin(np.abs(concentrations[T,"P"] - impurity_setpoint))]/(3600*24*365)
# define a "private" dictionary to contain all data
_dat = dict(kinetics={'A1f': A1f, 'E1f': E1f, 'A1r': A1r, 'E1r': E1r, 'A2': A2, 'E2': E2, 'A3':A3, 'E3': E3},
initial={'Po': Po, 'Io': Io, 'Do':Do},
setpt=impurity_setpoint)
_dat['const'] = {'R': R}
_dat['conc'] = concentrations
_dat['exppts'] = results
_dat['predictions'] = predictions
|
notebooks/Impurity Prediction Example 1.ipynb
|
brentjm/Impurity-Predictions
|
bsd-2-clause
|
Below are the calculated results for the user-defined kinetics
|
t = _dat['conc']['t'].values
t_days = t / (24*3600)
max_days_index = np.argmin(np.abs(t_days - (np.max(_dat['exppts'].index.levels[1].values)+1)))
t_days = t_days[:max_days_index]
c = _dat['conc'].iloc[:max_days_index]
max_conc = c.iloc[:, c.columns.get_level_values(1)=='P'].max().max()
fig = plt.figure(figsize=(14,10))
for i, T in enumerate(Temperatures):
ax = fig.add_subplot(2,2,i+1)
ax.plot(t_days, c[T,'P'], color='red', label='P')
ax.plot(_dat['exppts'].loc[T].index.values, _dat['exppts'].loc[T].P, ls="none", marker='o', color="red")
ax2 = ax.twinx()
ax2.plot(t_days, c[T,'I'], color='green', label='I')
ax2.plot(t_days, c[T,'D'], color='blue', label='D')
ax.set_ylim([0, max_conc])
ax2.set_ylim([0,1])
ax.set_xlabel("time (days)")
ax.set_ylabel("impurity concentration (%)")
ax2.set_ylabel("intermediate and drug concentration (%) ")
ax.legend(['P'], loc="upper left")
ax2.legend(loc="upper right")
ax.set_title("%s C"%(Temperatures[i]))
|
notebooks/Impurity Prediction Example 1.ipynb
|
brentjm/Impurity-Predictions
|
bsd-2-clause
|
<b>Concentration profiles</b>: The solid lines are the exact concentrations of the components $P$, $I$, $D$ as a function of time calculated from the defined reaction parameters. The filled symbols denote the experimental time points (defined by the user in the parameter list).
|
_dat['exppts']
|
notebooks/Impurity Prediction Example 1.ipynb
|
brentjm/Impurity-Predictions
|
bsd-2-clause
|
<b>Theoretical concentration measurements</b>: The above tabulates the theoretical impurity concentrations, $P$, at the measurement time points.
|
_dat['setpt']
t = _dat['conc']['t'].values
c = _dat['conc']
t_days = t / (24*3600)
max_conc = c.iloc[:, c.columns.get_level_values(1)=='P'].max().max()
fig = plt.figure(figsize=(14,10))
for i, T in enumerate(Temperatures):
ax = fig.add_subplot(2,2,i+1)
ax.plot(t_days, c[T,'P'], color='red', label='P')
ax.plot(t_days, [_dat['setpt']]*t_days.shape[0], color='red', ls=':')
ax.plot(_dat['exppts'].loc[T].index.values, _dat['exppts'].loc[T].P, ls="none", marker='o', color="red")
ax.set_ylim([0, max_conc])
ax.set_xlabel("time (days)")
ax.set_ylabel(r"impurity concentration (%)")
ax.legend(['P'], loc="upper left")
ax.set_title("%s C"%(Temperatures[i]))
ax2 = ax.twinx()
ax2.plot(t_days, c[T,'I'], color='green', label='I')
ax2.plot(t_days, c[T,'D'], color='blue', label='D')
ax2.set_ylim([0,1])
#ax2.set_ylabel(r"drug concentration")
ax2.legend(loc="upper right")
|
notebooks/Impurity Prediction Example 1.ipynb
|
brentjm/Impurity-Predictions
|
bsd-2-clause
|
<b>Concentration profiles</b>: The plots above show the concentration profiles over a duration sufficient that the impurity concentrations reaches the user defined set point (denoted by the red dashed line).
|
_dat['predictions']
|
notebooks/Impurity Prediction Example 1.ipynb
|
brentjm/Impurity-Predictions
|
bsd-2-clause
|
<b>Shelf life</b>: The above tabulates the theoretical shelf life in years calculated from the user defined kinetic parameters. The value for $25~ ^\circ C$ is the most useful as this is the number that the following models will attempt to predict.
Shelf life prediction assuming zero order kinetics
The actual reaction mechanism is unknown in advance, thus a simple reaction mechanism is assumed.
Assuming the reaction kinetics are modeled by a zeroth order on the reactive drug product:
\begin{equation}
r_P \equiv \frac{dP}{dt} = A \exp(-\frac{E}{RT})
\end{equation}
\begin{equation}
P = A \exp(-\frac{E}{RT})t
\end{equation}
\begin{equation}
\ln \left ( \frac{P}{t} \right ) = \ln A - \frac{E}{R} \frac{1}{T}
\end{equation}
The parameters, $A$ and $E$ can readily be obtained by a plot of $\ln \left ( \frac{P}{t} \right ) \equiv \ln (k_P)$ versus $\frac{1}{T}$. <br>
The time is then calculated as:
\begin{equation}
t = \frac{P}{\hat{A} \exp(-\frac{\hat{E}}{RT})}
\end{equation}
|
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(111)
r = _dat['exppts'].iloc[_dat['exppts'].index.get_level_values(1)!=0].copy()
r['y'] = np.log(np.divide(r['P'].values, r.index.get_level_values(1).astype('f8').values*24*3600+1))
r['x'] = 1./(r.index.get_level_values(0).astype('f8').values + 273)
ax.plot(r.x, r.y, ls='none', marker='o')
ax.set_xlabel(r'$1/T$')
ax.set_ylabel(r'$\ln (P/t)$')
reg = sm.ols(formula="y ~ x", data=r).fit()
pred = reg.params[1]*r.x + reg.params[0]
ax.plot(r.x, pred)
A = np.exp(reg.params[0])
E = -1*reg.params[1]*R
print "regression fit for A = %s "%(A)
print "regression fit for E = %s"%(E)
|
notebooks/Impurity Prediction Example 1.ipynb
|
brentjm/Impurity-Predictions
|
bsd-2-clause
|
The above regression provides the kinetic parameters necessary to calculate the predicted shelf-life assuming the mechanism is zeroth-order.
|
temp = _dat['const']['R']*(_dat['predictions'].index.astype('f8').values+273)
k = A * np.exp(-E/temp)
_dat['predictions']['zero_order'] = _dat['setpt'] / k * 1/(3600*24*365)
_dat['predictions']
|
notebooks/Impurity Prediction Example 1.ipynb
|
brentjm/Impurity-Predictions
|
bsd-2-clause
|
<b>Table of predicted results</b> The above table compares the theoretical shelf-life in years (calculated from the exact reaction mechanism and user defined parameters) to the predicted shelf-life assuming zero order kinetics.
Shelf life prediction assuming first-order kinetics
Assuming a reaction mechanism as:
In this prediction model, the impurity production rate is assumed to be first-order in the total drug concentratjion.
\begin{equation}
r_P \equiv \frac{dP}{dt} = k D_T
\end{equation}
where $D_T$ is the total drug concentration, $P$ is the impurity concentration and $k$ is the kinetic parameters, which is assumed to have an Arrehnius temperature dependence.
\begin{equation}
k = A \exp \left ( -\frac{E}{RT} \right )
\end{equation}
The impurity concentration is linked to the drug concentration by a mass balance
\begin{equation}
D = (D_o + I_o) - P = D_{To} - P
\end{equation}
where $D_o$ is the starting crystalline drug concentration, $I_o$ is the starting amorphous drug concentration, and $D_{To}$ is the starting total drug concentration. Substituting,
\begin{equation}
\frac{dP}{dt} = k [D_{To} - P]
\end{equation}
The first-order ODE can be solved with the integrating factor.
\begin{equation}
\mu(t) = e^{\int k dt} = e^{kt}
\end{equation}
\begin{equation}
\frac{d}{dt} \left [P e^{kt} \right ] = e^{kt} kD_{To}
\end{equation}
Integrating
\begin{equation}
\left [P e^{kt} \right ] = \int_0^t e^{kt} kD_{To} = D_{To} e^{kt} - D_{To} = D_{To} (e^{kt} - 1)
\end{equation}
Rearranging:
\begin{equation}
P = D_{To} (1 - e^{-k t}) = D_{To} \left [ 1 - \exp \left (\exp \left (- A \frac{E}{RT} t \right ) \right ) \right ]
\end{equation}
The above equation links the experimental data (impurity concentration, $P$, at time points, $t$) to the reaction kinetic parameters (i.e. Arrhenius parameters $A$ and $E$).
Method 1 - Linear Regression
One method to obtain the Arrhenius parameters is to a stepwise calculation. First, the impurity concentration experiments conducted at the same temperature, $T$, the kinetic parameter, $k$, at the temperature $T$ can be obtained by plotting log of the drug concentration over time (recall that $P = D_{To} - D$).
\begin{equation}
D_{To} - D = D_{To} (1-e^{-k t})
\end{equation}
\begin{equation}
D = D_{To} e^{-k t}
\end{equation}
\begin{equation}
\ln D = \ln D_{To} - k t
\end{equation}
\begin{equation}
\ln \left ( \frac{D}{D_{To}} \right ) = - k t
\end{equation}
|
D = Do + Io - _dat['exppts']['P']
data = np.log(D/(Do+Io)).to_frame('ln(D/Do)')
data['t'] = data.index.get_level_values(1)*24*3600
data['k'] = [0]*data['t'].shape[0]
colors = ['blue','green','red','orange']
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(111)
for i, T in enumerate(_dat['exppts'].index.levels[0]):
ax.plot(data.loc[T]['t'], data.loc[T]['ln(D/Do)'], ls='none', marker='o', color=colors[i])
slope = sm.OLS(data.loc[T]['ln(D/Do)'], data.loc[T]['t']).fit().params[0]
data.loc[(T,slice(None)),'k'] = -1 * slope
ax.plot(data.loc[T]['t'], data.loc[T]['t']*slope,
ls='--', color=colors[i], label="%.2e 1/s"%(-1*slope))
ax.set_ylabel(r'ln $\left ( D/D_{To} \right )$')
ax.set_xlabel(r'$t$ (sec)')
plt.legend(loc='best')
|
notebooks/Impurity Prediction Example 1.ipynb
|
brentjm/Impurity-Predictions
|
bsd-2-clause
|
<b>Table of the kinetic parameter</b>, $k$ regressed from data at the different experimental temperatures.
Once the kinetic parameter, $k$, is estimated at the different temperatures, the Arrenius parameters, $A$ and $E$, can be obtained by plotting the logarithm of the rate constant at each experimental temperature against the reciprical temperature.
\begin{equation}
\ln k = \ln A - \frac{E}{R} \frac{1}{T}
\end{equation}
|
_dat['exppts']
# variable data is used from above cell
df = pd.DataFrame({'x': 1./(data.index.levels[0]+273).values, 'y': np.log(data.loc[(slice(None),0),'k'])})
reg = sm.ols(formula='y~x', data=df).fit()
A = np.exp(reg.params['Intercept'])
E = -1 * R * reg.params['x']
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(111)
ax.plot(df.x, df.y, marker='o', ls='none', color='blue', label='data')
ax.plot(df.x, df.x*reg.params['x'] + reg.params['Intercept'], ls="--", color='blue', label='fit')
ax.set_xlabel(r"$1/T$ (1/K)")
ax.set_ylabel(r"log $k$")
ax.legend()
print "regression fit for A = %.2e"%A
print "regression fit for E = %.2f"%E
|
notebooks/Impurity Prediction Example 1.ipynb
|
brentjm/Impurity-Predictions
|
bsd-2-clause
|
<b>Figure $\ln k ~\mathrm{vs}~\frac{1}{T}$:</b> to estimate the activation energy and the collision factor to use as initial guesses in the subsequent nonlinear regression.
The estimated Arrhenius parameters can now be used to estimate shelf-life.
|
temp = _dat['const']['R']*(_dat['predictions'].index.astype('f8').values+273)
k = A * np.exp(-E/temp)
_dat['predictions']['first_order'] = _dat['setpt'] / k * 1/(3600*24*365)
_dat['predictions']
|
notebooks/Impurity Prediction Example 1.ipynb
|
brentjm/Impurity-Predictions
|
bsd-2-clause
|
Method 2 - non-linear regression
According to a paper by Fung (Statistical prediction of drug stability based on nonlinear parameter estimation, Fung et al Journal Pharm Sci Vol 73 No 5 pg 657-662 1984), improved confidence can be obtained by performing a non-linear regression of all the data simuntaneously, instead of the stepwise approach. In Fung's paper, they pre-assumed a shelf-life defined by 10% degredation of the product, which simplifies the estimation. However, the regression done here will be performed without the assumption.
\begin{equation}
D = D_{To} e^{-k t}
\end{equation}
\begin{equation}
\ln D = \ln D_{To} - kt
\end{equation}
\begin{equation}
\ln \left ( \frac{D}{D_{To}} \right ) = - A \exp \left ( -\frac{E}{RT} \right ) t
\end{equation}
\begin{equation}
\left ( \frac{\ln \left ( \frac{D}{D_{To}} \right )}{t} \right ) + A \exp \left ( -\frac{E}{RT} \right ) = 0
\end{equation}
<br>
Thus, the Arrhenius parameters are found by finding the roots to the above equation, where $A$ and $E$ are the variables.
|
D = Do + Io - _dat['exppts']['P'].values
df = pd.DataFrame(np.log(D/(Do+Io)), columns=['D*'])
df['t'] = _dat['exppts'].index.get_level_values(1)*24*3600
df['T'] = _dat['exppts'].index.get_level_values(0) + 273.
df = df.loc[df['t']!=0]
def error(p):
A = p[0]
E = p[1]
return np.sum((df['D*']/df['t'] + A * np.exp(-1. * E/(R*df['T'])))**2)
|
notebooks/Impurity Prediction Example 1.ipynb
|
brentjm/Impurity-Predictions
|
bsd-2-clause
|
<b>Figure of square error</b> indicates that the objective function likely has some very shallow gradients near the minimum, thus inhibiting convergence to a unique value.
|
temp = _dat['const']['R']*(_dat['predictions'].index.astype('f8').values+273)
k = A * np.exp(-E/temp)
_dat['predictions']['first_order_nonlinear'] = impurity_setpoint / k * 1/(3600*24*365)
_dat['predictions']
|
notebooks/Impurity Prediction Example 1.ipynb
|
brentjm/Impurity-Predictions
|
bsd-2-clause
|
<b>Table of precition comparison</b> suggests that zero_order was actually the most accurate of the models. It should be noted that the convergence of the non-linear regression is questionable. The under prediction of shelf-life is to be expected, because of the faster reaction of the amorphous form that gets consumed early in the process.
King, Kung, Fung method
As illustrated above, there is a weak dependence of the objective function on the kinetic parameters near the minimum. Consequently, the optimization problem has difficulty converging to an absolute minimum. The paper by King et al suggests some modification to the equations prior to the non-linear regression. First, the concentration profiles are written as a function of the temperature dependent reaction rate constant, $k$, for zero, first and second order reactions.
\begin{align}
& \mathrm{zero~order} & {\Huge |} & \mathrm{first~order} & {\Huge |} & \mathrm{second~order} & \
& C = C_o - kt & {\Huge |} & C = C_o \exp(-kt) & {\Huge |} & C = \frac{C_o}{1 + C_okt} &
\end{align}
Here, $Co$ is the total starting drug concentration and $C$ is the total drug concentration at time, $t$. The rate constant is related to temperature via the Arrhenius equation.
\begin{equation}
k = Ae^{-\frac{E}{RT}}
\end{equation}
The Arrhenius relation could be substituted into the above concentration equations, and a non-linear regression be applied to estimate the parametes $A$ and $E$. However, as illustrated in the previous method example above, the gradients are very small in the vicinity of the global minimum, which makes the parameter estimation difficult. Part of the problem seems to aries because the rate constant, $A$ is on the order of $10^5$, whereas the activation energy is on the order of $10^1$. The disparity in size of the parameters, may lead to some instabilities.
Fung et. al eliminate the collision parameter, $A$ by using the Arrhenius equation at a temperature of 298 K.
\begin{equation}
A = k_{298} e^{\frac{E}{R298}}
\end{equation}
Substitution gives:
\begin{equation}
k = k_{298} e^{\frac{E}{R298}} e^{-\frac{E}{RT}} = k_{298} e^{\frac{E}{R} \left ( \frac{1}{298} - \frac{1}{T} \right ) }
\end{equation}
The rate constant at 298 K, $k_{298}$, is then exressed as a function of the user defined concentration set-point to define the product shelf-life, $C_f$, and the time it will take the drug to reach the user set-point at 298 K, $t_{298}$.
\begin{equation}
k_{298} = f(C_f, t_{298})
\end{equation}
Here, $f$ is a function that depends on the reaction order. The shelf-life concentration set-point can be exressed in terms of the variable impurity-setpoint, $S_p$ as:
\begin{equation}
S_p = \frac{C_o - C_f}{C_o} \rightarrow C_f = C_o(1-S_p)
\end{equation}
Rearranging the above concentration equations for zero, first and second order reactions and defining the parameters using the 298 K subscript gives:
\begin{align}
& \mathrm{zero~order} & {\Huge |} & \mathrm{first~order} & {\Huge |} & \mathrm{second~order} & \
& k_{298} = \frac{C_o - C_f}{t^{298}} & {\Huge |}
& k{298} = -\frac{ln \frac{C_f}{C_o}}{t^{298}} & {\Huge |}
& k{298} = \frac{1}{t^{298}} \left ( \frac{1}{C_f} - \frac{1}{C_o} \right ) \
& k{298} = \frac{S_p }{t^{298}} & {\Huge |}
& k{298} = -\frac{ \ln (1 - S_p) }{t^{298}} & {\Huge |}
& k{298} = \frac{ 1 }{C_o t^_{298}} \left ( \frac{S_p}{1-S_p} \right )
\end{align}
Substituting the expression for the rate constant at 298 K, $k_{298}$, into the concentration equations gives:
\begin{align}
& \mathrm{zero~order} & {\Huge |} & \mathrm{first~order} & {\Huge |} & \mathrm{second~order} \
& C = C_o \left [ 1 - \frac{S_p}{t^{298}} e^{\frac{E}{R} \left ( \frac{1}{298} - \frac{1}{T} \right ) } t \right ] & {\Huge |}
& C = C_o \exp \left ( \frac{\ln (1-S_p)}{t^_{298}} e^{\frac{E}{R} \left ( \frac{1}{298} - \frac{1}{T} \right ) } t \right ) & {\Huge |}
& C = \frac{C_o}{1 + \frac{ 1 }{t^{298}} \left ( \frac{S_p}{1-S_p} \right ) e^{\frac{E}{R} \left ( \frac{1}{298} - \frac{1}{T} \right ) } t } \
\end{align*}
In the above relationships, the fit parameters are the activation energy, $E$, and the product shelf-life at 298 K, $t^*_{298}$.
Define the variables that will be used for the subsequent non-linear regression analysis.
|
Co = Do + Io
C = Co - _dat['exppts']['P'].values
t = _dat['exppts'].index.get_level_values(1).values*24*3600.
T = _dat['exppts'].index.get_level_values(0).values + 273.
C = C[t!=0]
T = T[t!=0]
t = t[t!=0]
Sp = _dat['setpt']
R = _dat['const']['R']
|
notebooks/Impurity Prediction Example 1.ipynb
|
brentjm/Impurity-Predictions
|
bsd-2-clause
|
Zero order (method of King, Kung, Fung)
Define the objective function for a first order reaaction.
|
def error(p):
t_298 = p[0]
E = p[1]
k = np.exp(E/R * (1/298. - 1/T))
k = Sp / t_298 * k
err = Co * (1 - k * t)
return np.sum(err**2)
|
notebooks/Impurity Prediction Example 1.ipynb
|
brentjm/Impurity-Predictions
|
bsd-2-clause
|
Perform the non-linear regression over a range of initial conditions.
|
t_298 = 10.*365*24*3600
E = 10.
xo = np.array([t_298,E])
opt = []
for d in np.arange(.1,10,.1):
opt.append(minimize(error, xo*d, tol=1e-30))
|
notebooks/Impurity Prediction Example 1.ipynb
|
brentjm/Impurity-Predictions
|
bsd-2-clause
|
Plot the error of the regression as a function of the initial conditions.
|
e2 = np.array([error(r.x) for r in opt])
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(e2)
ax.set_ylabel(r"$\sum \mathrm{error}^2$")
ax.set_xlabel("initial condition index")
t_298, E = (opt[e2.argmin()]).x
print "optimal t_298 = %.1f y" %(t_298/(365*24*3600))
print "optimal E = %.2f kcal/mol K" %E
|
notebooks/Impurity Prediction Example 1.ipynb
|
brentjm/Impurity-Predictions
|
bsd-2-clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.