code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .java
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: Java
// language: java
// name: java
// ---
// # Spark streaming with trace DSL
// This notebook uses spark streaming and trace DSL to extract features from trace data
// and publish metrics to Prometheus.
// %%loadFromPOM
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-core_2.12</artifactId>
<version>2.4.4</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-streaming_2.12</artifactId>
<version>2.4.4</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-streaming-kafka-0-10_2.12</artifactId>
<version>2.4.4</version>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.12</artifactId>
<version>2.3.0</version>
</dependency>
<dependency>
<groupId>io.prometheus</groupId>
<artifactId>simpleclient_httpserver</artifactId>
<version>0.7.0</version>
</dependency>
// ## Install library to the local maven repository
// This step is only needed if trace DSL source code has been modified.
// Open terminal in Jupyter and run the following command:
// ```
// cd work && ./mvnw clean install -DskipTests
// ```
// +
// %maven io.jaegertracing:jaeger-tracedsl:1.0-SNAPSHOT
System.out.println(io.jaegertracing.dsl.gremlin.Keys.class);
System.out.println(org.apache.spark.SparkConf.class);
// -
// ## Define connection to Kafka
String kafkaServers = "192.168.42.6:32632";
String kafkaTopic = "jaeger-spans";
int prometheusPort = 9001;
// +
import io.prometheus.client.exporter.HTTPServer;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
import java.util.stream.StreamSupport;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.streaming.Duration;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaInputDStream;
import org.apache.spark.streaming.api.java.JavaPairDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka010.ConsumerStrategies;
import org.apache.spark.streaming.kafka010.KafkaUtils;
import org.apache.spark.streaming.kafka010.LocationStrategies;
import org.apache.tinkerpop.gremlin.structure.Graph;
import scala.Tuple2;
import io.jaegertracing.dsl.gremlin.*;
import io.jaegertracing.dsl.gremlin.model.*;
HTTPServer server = new HTTPServer(prometheusPort);
SparkConf sparkConf = new SparkConf().setAppName("Trace DSL").setMaster("local[*]");
JavaSparkContext sc = new JavaSparkContext(sparkConf);
JavaStreamingContext ssc = new JavaStreamingContext(sc, new Duration(5000));
Set<String> topics = Collections.singleton(kafkaTopic);
Map<String, Object> kafkaParams = new HashMap<>();
kafkaParams.put("bootstrap.servers", kafkaServers);
kafkaParams.put("key.deserializer", StringDeserializer.class);
kafkaParams. put("value.deserializer", ProtoSpanDeserializer.class);
// hack to start always from beginning
kafkaParams.put("group.id", "trace-aggregation-" + System.currentTimeMillis());
kafkaParams.put("auto.offset.reset", "earliest");
kafkaParams.put("enable.auto.commit", false);
kafkaParams.put("startingOffsets", "earliest");
kafkaParams.put("endingOffsets", "latest");
JavaInputDStream<ConsumerRecord<String, Span>> messages =
KafkaUtils.createDirectStream(
ssc,
LocationStrategies.PreferConsistent(),
ConsumerStrategies.Subscribe(topics, kafkaParams));
JavaPairDStream<String, Span> traceIdSpanTuple = messages.mapToPair(record -> {
return new Tuple2<>(record.value().traceId, record.value());
});
JavaDStream<Trace> tracesStream = traceIdSpanTuple.groupByKey().map(traceIdSpans -> {
Iterable<Span> spans = traceIdSpans._2();
Trace trace = new Trace();
trace.traceId = traceIdSpans._1();
trace.spans = StreamSupport.stream(spans.spliterator(), false)
.collect(Collectors.toList());
return trace;
});
tracesStream.foreachRDD((traceRDD, time) -> {
traceRDD.foreach(trace -> {
Graph graph = GraphCreator.create(trace);
TraceDepth.calculate(graph);
});
});
ssc.start();
ssc.awaitTermination();
// -
// ## Stop
if (server != null) server.stop();
if (ssc != null) { ssc.stop(); ssc.close();}
// ## Get Prometheus metrics
//
// Open browser on the host running this notebook to see exported Prometheus metrics e.g. http://localhost:9001.
// Or configure Prometheus to scarep metrics from the host where this notebook is running.
| jupyter/spark-runner.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <table align="left">
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/gimseng/99-ML-Learning-Projects/blob/master/001/solution/titanic_tf_nn.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# </td>
# </table>
# # Data Processing
# First we can import the relevant libraries.
import numpy as np
import pandas as pd
# Next we can download the data from github using pandas.
project_url = 'https://raw.githubusercontent.com/gimseng/99-ML-Learning-Projects/'
data_path = 'master/001/solution/'
train=pd.read_csv(project_url+data_path+'train.csv')
test=pd.read_csv(project_url+data_path+'test.csv')
# Since the datasets contain a lot of data that isn't really necessary we can remove those columns from both the train and testing datasets.
del train['PassengerId']
del train['Ticket']
del train['Fare']
del train['Cabin']
del train['Name']
del test['Ticket']
del test['Fare']
del test['Cabin']
del test['Name']
# Since we want all fields to be numerical to feed into the nn we can substitute 'male' and 'female' for 0 and 1 in the 'Sex' column respectively.
# +
def getNum(str):
if str=='male':
return 0
if str=='female':
return 1
train["Sex"]=train["Sex"].apply(getNum)
test["Sex"]=test["Sex"].apply(getNum)
# -
# We can now do the same with the 'Embarked' column.
# +
def getEmbarked(str):
if str == 'S':
return 0
elif str == 'C':
return 1
elif str == 'Q':
return 2
train["Embarked"] = train["Embarked"].apply(getEmbarked)
test["Embarked"] = test["Embarked"].apply(getEmbarked)
# -
train.head()
test.head()
# # Neural Network (Pytortch)
# First we can import pytorch.
import torch
import torch.nn as nn
from torch import optim
import torch.nn.functional as F
# Now we can define our hyperparameters
BATCH_SIZE = 10
LR = 1e-4
LR_DECAY = 1e-6
VALIDATION_SPLIT = 0.2
EPOCHS = 800
# Then we need to convert the pandas dataframes to lists of pytorch tensors to be passed through our network
# +
X = []
y = []
for cbatch in range(len(train.values)//BATCH_SIZE):
batch_X = []
batch_y = []
for value in train.values[(cbatch*BATCH_SIZE):((cbatch*BATCH_SIZE)+BATCH_SIZE)]:
if np.isnan(value[3]):
value[3] = float(30)
if np.isnan(value[6]):
value[6] = 0
value[3] = round(value[3]/100, 6)
batch_X.append(value[1::])
batch_y.append(int(value[0]))
X.append(torch.Tensor(batch_X))
y.append(torch.Tensor(batch_y))
# -
# Now we split the dataset up into training and validation data using our VALIDATION_SPLIT hyperparameter
X_train = X[0:int(len(X)*(1-VALIDATION_SPLIT))]
y_train = y[0:int(len(X)*(1-VALIDATION_SPLIT))]
X_val = X[int(len(X)*(1-VALIDATION_SPLIT))::]
y_val = y[int(len(X)*(1-VALIDATION_SPLIT))::]
print(len(X_val))
print(len(X_train))
print(len(y_val))
print(len(y_train))
# Next we define the nn model
class Net(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(6, 8)
self.fc2 = nn.Linear(8, 6)
self.fc3 = nn.Linear(6, 4)
self.fc4 = nn.Linear(4, 2)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = self.fc4(x)
return F.log_softmax(x, dim=1)
# then initialize both the model and the optimizer with the learning rate hyperparameters
# +
net = Net()
optimizer = optim.Adam(net.parameters(), lr=LR, eps=LR_DECAY)
# -
# Now we can train the network. I am logging the in-sample loss, validation loss and validation accuracy and storing them in a list after each epoch.
# +
epoch_loss = []
epoch_VAL_loss = []
epoch_VAL_acc = []
for epoch in range(EPOCHS):
for batch in range(len(X_train)):
curr_X = X_train[batch]
curr_y = y_train[batch]
net.zero_grad()
output = net(curr_X.view(-1, 6))
loss = F.nll_loss(output, curr_y.long())
loss.backward()
optimizer.step()
# Validation
total = 0
correct = 0
with torch.no_grad():
for batch in range(len(X_val)):
curr_X = X_val[batch]
curr_y = y_val[batch]
output = net(curr_X.view(-1, 6))
VAL_loss = F.nll_loss(output, curr_y.long())
for idx, i in enumerate(output):
if torch.argmax(i) == curr_y[idx]:
correct += 1
total += 1
epoch_loss.append(round(float(loss), 5))
epoch_VAL_loss.append(round(float(VAL_loss), 5))
epoch_VAL_acc.append(round(correct/total, 5))
print(f"Epoch: {epoch}/{EPOCHS} Loss: {round(float(loss), 3)}, val_loss: {round(float(VAL_loss), 3)}, val_acc: {round(correct/total, 3)}")
# -
# Now we can use matplotlib to plot the results of our model
import matplotlib.pyplot as plt
# This is the in-sample loss
plt.plot(epoch_loss)
plt.show()
# this is validation loss
plt.plot(epoch_VAL_loss)
plt.show()
# and this is our validation accuracy
plt.plot(epoch_VAL_acc)
plt.show()
# Since the results look good we can now run the testing dataset through the trained network
# First we need to convert it to pytorch tensors
# +
X_test = []
X_test_df=test.drop('PassengerId', axis=1).copy()
for value in X_test_df.values:
if np.isnan(value[2]):
value[2] = 30
value[2] = value[2]/100
X_test.append(torch.Tensor(value))
# -
# Then we can run the data through the network and store the predictions
predictions = []
for data in X_test:
prediction = torch.argmax(net(data.view(-1, 6)))
predictions.append(int(prediction))
# And finally we can save the predictions to a csv file
output = pd.DataFrame({'PassengerId': test['PassengerId'], 'Survived': predictions})
output.to_csv('prediction_pt.csv', index=False)
| 001/solution/titanic_pt_nn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %load_ext autoreload
# %autoreload 2
# +
from pathlib import Path
import numpy as np
import os
from tqdm import tqdm
from sklearn.metrics import roc_auc_score
#import json
import pandas as pd
import orjson as json
import re
from ue4nlp.ue_scores import *
from ue4nlp.ue_variation_ratio import ue_variation_ratio
from utils.plot_error_detection import draw_charts
from utils.plot_error_detection import create_chart_data
import matplotlib.pyplot as plt
from utils.plot_error_detection import plot_error_detection, plot_rejection_curve_aucs
def plot_roc_auc(res_path):
with open(Path(res_path) / 'dev_inference.json') as f:
model_outputs = json.loads(f.read())
plot_error_detection(np.asarray(model_outputs['probabilities']),
np.asarray(model_outputs['true_labels']),
np.asarray(model_outputs['sampled_probabilities']).transpose(1, 0, 2))
def plot_rejection_curves(res_path):
with open(Path(res_path) / 'dev_inference.json') as f:
model_outputs = json.loads(f.read())
plot_rejection_curve_aucs(np.asarray(model_outputs['probabilities']),
np.asarray(model_outputs['true_labels']),
np.asarray(model_outputs['sampled_probabilities']).transpose(1, 0, 2),
np.asarray(model_outputs['answers']), )
# +
def calc_uncertainty_regection_curve(errors, uncertainty, group_by_uncertainty=True):
n_objects = errors.shape[0]
if group_by_uncertainty:
data = pd.DataFrame(dict(
errors=errors,
uncertainty=uncertainty
))
mean_errors = data.groupby("uncertainty").mean()
mean_errors.rename(columns={"errors": "mean_errors"}, inplace=True)
data = data.join(mean_errors, "uncertainty")
data.drop("errors", axis=1, inplace=True)
uncertainty_order = data["uncertainty"].argsort()
errors = data["mean_errors"][uncertainty_order]
else:
uncertainty_order = uncertainty.argsort()
errors = errors[uncertainty_order]
error_rates = np.zeros(n_objects + 1)
error_rates[:-1] = np.cumsum(errors)[::-1]# / n_objects
return error_rates
def calc_aucs(errors, uncertainty):
uncertainty_rejection_curve = calc_uncertainty_regection_curve(errors, uncertainty)
uncertainty_rejection_auc = uncertainty_rejection_curve.mean()
random_rejection_auc = uncertainty_rejection_curve[0] / 2
ideal_rejection_auc = calc_uncertainty_regection_curve(errors, errors).mean()
rejection_ratio = (uncertainty_rejection_auc - random_rejection_auc) / (
ideal_rejection_auc - random_rejection_auc) * 100.0
return rejection_ratio, uncertainty_rejection_auc
# -
res_path = '../workdir/run_glue_for_model_series/electra_raw_sn/20newsgroups/0.0/nuq/results/42'
with open(Path(res_path) / 'dev_inference.json') as f:
model_outputs = json.loads(f.read())
# +
unc = np.asarray(model_outputs['epistemic'])
probs = np.asarray(model_outputs['probabilities'])
labels = np.asarray(model_outputs['true_labels'])
predictions = np.argmax(probs, axis=-1)
errors = (labels != predictions).astype("uint64")#.astype("uint8")
# -
calc_aucs(errors, unc)
plt.plot(calc_uncertainty_regection_curve(errors, unc))
# +
from analyze_results import *
res, x, y = rcc_auc(-unc, errors, return_points=True)
# -
res
plt.plot(x, np.cumsum(y)[::-1]/len(y))
# +
import pandas as pd
import numpy as np
from sklearn.metrics import *
from sklearn.utils.multiclass import type_of_target
from sklearn.utils import check_consistent_length, column_or_1d, assert_all_finite
from sklearn.utils.extmath import stable_cumsum
def calc_uncertainty_regection_curve(errors, uncertainty, group_by_uncertainty=True):
n_objects = errors.shape[0]
if group_by_uncertainty:
data = pd.DataFrame(dict(
errors=errors,
uncertainty=uncertainty
))
mean_errors = data.groupby("uncertainty").mean()
mean_errors.rename(columns={"errors": "mean_errors"}, inplace=True)
data = data.join(mean_errors, "uncertainty")
data.drop("errors", axis=1, inplace=True)
uncertainty_order = data["uncertainty"].argsort()
errors = data["mean_errors"][uncertainty_order]
else:
uncertainty_order = uncertainty.argsort()
errors = errors[uncertainty_order]
error_rates = np.zeros(n_objects + 1)
error_rates[:-1] = np.cumsum(errors)[::-1] / n_objects
return error_rates
assert np.allclose(
calc_uncertainty_regection_curve(np.array([2, 1]), np.array([1, 0])).mean(),
2 / 3
)
assert np.allclose(
calc_uncertainty_regection_curve(np.arange(5), np.array([0, 0, 2, 1, 1])).mean(),
0.8
)
debug_errors = np.random.rand(10)
assert np.allclose(
calc_uncertainty_regection_curve(debug_errors, np.zeros_like(debug_errors)).mean(),
debug_errors.mean() / 2
)
def calc_aucs(errors, uncertainty):
uncertainty_rejection_curve = calc_uncertainty_regection_curve(errors, uncertainty)
uncertainty_rejection_auc = uncertainty_rejection_curve.mean()
random_rejection_auc = uncertainty_rejection_curve[0] / 2
ideal_rejection_auc = calc_uncertainty_regection_curve(errors, errors).mean()
rejection_ratio = (uncertainty_rejection_auc - random_rejection_auc) / (
ideal_rejection_auc - random_rejection_auc) * 100.0
return rejection_ratio, uncertainty_rejection_auc
def prr_classification(labels, probs, measure, rev: bool):
if rev:
measure = -measure
preds = np.argmax(probs, axis=1)
errors = (labels != preds).astype("float32")
return calc_aucs(errors, measure)
def prr_regression(targets, preds, measure):
preds = np.squeeze(preds)
# Compute MSE errors
errors = (preds - targets) ** 2
return calc_aucs(errors, measure)
def ood_detect(domain_labels, in_measure, out_measure, mode='ROC', pos_label=1):
scores = np.concatenate((in_measure, out_measure), axis=0)
scores = np.asarray(scores, dtype=np.float128)
if pos_label != 1:
scores *= -1.0
if mode == 'PR':
precision, recall, thresholds = precision_recall_curve(domain_labels, scores)
aupr = auc(recall, precision)
return aupr
elif mode == 'ROC':
roc_auc = roc_auc_score(domain_labels, scores)
return roc_auc
def nll_regression(target, mu, var, epsilon=1e-8, raw=False):
nll = (target - mu) ** 2 / (2.0 * var + epsilon) + np.log(var + epsilon) / 2.0 + np.log(2 * np.pi) / 2.0
if raw:
return nll
return np.mean(nll)
def nll_class(target, probs, epsilon=1e-10):
log_p = -np.log(probs + epsilon)
return target * log_p[:, 1] + (1 - target) * log_p[:, 0]
def ens_nll_regression(target, preds, epsilon=1e-8, raw=False):
mu = preds[:, :, 0]
var = preds[:, :, 1]
nll = (target - mu) ** 2 / (2.0 * var + epsilon) + np.log(var + epsilon) / 2.0 + np.log(2 * np.pi) / 2.0
proba = np.exp(-1 * nll)
if raw:
return -1 * np.log(np.mean(proba, axis=0))
return np.mean(-1 * np.log(np.mean(proba, axis=0)))
def calc_rmse(preds, target, raw=False):
if raw:
return (preds - target) ** 2
return np.sqrt(np.mean((preds - target) ** 2))
def ens_rmse(target, preds, epsilon=1e-8, raw=False):
means = preds[:, :, 0] # mean values predicted by all models
avg_mean = np.mean(means, axis=0) # average predicted mean value
if raw:
return calc_rmse(avg_mean, target, raw=True)
return calc_rmse(avg_mean, target)
def _check_pos_label_consistency(pos_label, y_true):
# ensure binary classification if pos_label is not specified
# classes.dtype.kind in ('O', 'U', 'S') is required to avoid
# triggering a FutureWarning by calling np.array_equal(a, b)
# when elements in the two arrays are not comparable.
classes = np.unique(y_true)
if (pos_label is None and (
classes.dtype.kind in 'OUS' or
not (np.array_equal(classes, [0, 1]) or
np.array_equal(classes, [-1, 1]) or
np.array_equal(classes, [0]) or
np.array_equal(classes, [-1]) or
np.array_equal(classes, [1])))):
classes_repr = ", ".join(repr(c) for c in classes)
raise ValueError(
f"y_true takes value in {{{classes_repr}}} and pos_label is not "
f"specified: either make y_true take value in {{0, 1}} or "
f"{{-1, 1}} or pass pos_label explicitly."
)
elif pos_label is None:
pos_label = 1.0
return pos_label
def _binary_clf_curve_ret(y_true, y_score, pos_label=None, sample_weight=None):
# Check to make sure y_true is valid
y_type = type_of_target(y_true)
if not (y_type == "binary" or
(y_type == "multiclass" and pos_label is not None)):
raise ValueError("{0} format is not supported".format(y_type))
check_consistent_length(y_true, y_score, sample_weight)
y_true = column_or_1d(y_true)
y_score = column_or_1d(y_score)
assert_all_finite(y_true)
assert_all_finite(y_score)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
pos_label = _check_pos_label_consistency(pos_label, y_true)
# make y_true a boolean vector
y_true = (y_true == pos_label)
# sort scores and corresponding truth values
desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1]
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
if sample_weight is not None:
weight = sample_weight[desc_score_indices]
else:
weight = 1.
# y_score typically has many tied values. Here we extract
# the indices associated with the distinct values. We also
# concatenate a value for the end of the curve.
# distinct_value_indices = np.where(np.diff(y_score))[0]
# threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1]
# accumulate the true positives with decreasing threshold
tps = stable_cumsum(y_true * weight) # [threshold_idxs]
if sample_weight is not None:
# express fps as a cumsum to ensure fps is increasing even in
# the presence of floating point errors
fps = stable_cumsum((1 - y_true) * weight) # [threshold_idxs]
else:
fps = stable_cumsum((1 - y_true)) # [threshold_idxs]
return fps, tps, y_score # [threshold_idxs]
def _precision_recall_curve_retention(y_true, probas_pred, *, pos_label=None,
sample_weight=None):
fps, tps, thresholds = _binary_clf_curve_ret(y_true, probas_pred,
pos_label=pos_label,
sample_weight=sample_weight)
precision = tps / (tps + fps)
precision[np.isnan(precision)] = 0
recall = tps / tps[-1]
# stop when full recall attained
# and reverse the outputs so recall is decreasing
last_ind = tps.searchsorted(tps[-1])
sl = slice(-1, None, -1)
return np.r_[precision[sl], 1], np.r_[recall[sl], 0], thresholds[sl]
def _acceptable_error(errors, threshold):
return np.asarray(errors <= threshold, dtype=np.float32)
def _calc_fbeta_regection_curve(errors, uncertainty, threshold, beta=1.0, group_by_uncertainty=True, eps=1e-10):
ae = _acceptable_error(errors, threshold)
pr, rec, _ = _precision_recall_curve_retention(ae, -uncertainty)
pr = np.asarray(pr)
rec = np.asarray(rec)
f_scores = (1 + beta ** 2) * pr * rec / (pr * beta ** 2 + rec + eps)
return f_scores, pr, rec
def f_beta_metrics(errors, uncertainty, threshold, beta=1.0):
"""
:param errors: Per sample errors - array [n_samples]
:param uncertainty: Uncertainties associated with each prediction. rray [n_samples]
:param threshold: The error threshold below which we consider the prediction acceptable
:param beta: The beta value for the F_beta metric. Defaults to 1
:return: fbeta_auc, fbeta_95, retention
"""
f_scores, pr, rec = _calc_fbeta_regection_curve(errors, uncertainty, threshold, beta)
ret = np.arange(pr.shape[0]) / pr.shape[0]
f_auc = auc(ret[::-1], f_scores)
f95 = f_scores[::-1][np.int(0.95 * pr.shape[0])]
return f_auc, f95, f_scores[::-1]
# -
f_auc, f95, f_scores = f_beta_metrics(errors, unc, 0.5)
f_auc
plt.plot(f_scores)
# +
from sklearn.metrics import roc_curve
from sklearn.metrics import accuracy_score, auc, roc_auc_score, f1_score
import numpy as np
import matplotlib.pyplot as plt
from pathlib import Path
import os
import json
import re
from ue4nlp.ue_scores import *
def get_score_ratio(sorted_indexes, answers, true_answers, ratio):
last_index = int(len(sorted_indexes) * ratio)
sel_indexes = sorted_indexes[:last_index]
unsel_indexes = sorted_indexes[last_index:]
sel_answers = true_answers[sel_indexes].tolist() + answers[unsel_indexes].tolist()
sel_true_answers = (
true_answers[sel_indexes].tolist() + true_answers[unsel_indexes].tolist()
)
#score = accuracy_score(sel_true_answers, sel_answers)
score = f1_score(sel_true_answers, sel_answers, average='micro')
return score
predictions = np.argmax(probs, axis=-1)
errors = (labels != predictions).astype("uint8")
ratio_list = [0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
ue_scores = unc
ensemble_answers = np.asarray(probs).argmax(-1)
sorted_indexes_ensemble = np.argsort(-ue_scores)
ens_scores = [
get_score_ratio(sorted_indexes_ensemble, ensemble_answers, labels, ratio)
for ratio in ratio_list
]
print(f"mahalanobis:", auc(ratio_list, ens_scores))
# -
plt.plot(ens_scores)
plt.plot(f_scores)
| src/exps_notebooks/analyze_arc_auc.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# HIDDEN
from datascience import *
from prob140 import *
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
# %matplotlib inline
import math
from scipy import stats
# ## Two-to-One Functions ##
# Let $X$ have density $f_X$. As you have seen, the random variable $Y = X^2$ comes up frequently in calculations. Thus far, all we have needed is $E(Y)$ which can be found by the formula for the expectation of a non-linear function of $X$. To find the density of $Y$, we can't directly use the change of variable formula of the previous section because the function $g(x) = x^2$ is not monotone. It is two-to-one because both $\sqrt{x}$ and $-\sqrt{x}$ have the same square.
#
# In this section we will find the density of $Y$ by developing a modification of the change of variable formula for the density of a monotone function of $X$. The modification extends in a straightforward manner to other two-to-one functions and also to many-to-one functions.
# ### Density of $Y = X^2$ ###
# If $X$ can take both positive and negative values, we have to account for the fact that there are two mutually exclusive ways in which the event $\{ Y \in dy \}$ can happen: either $X$ has to be near the positive square root of $y$ or near the negative square root of $y$.
# +
# NO CODE
x = np.arange(-5, 5.01, 0.01)
y = x ** 2
y_star = 12
x_star = y_star ** 0.5
neg_x_star = -1 * y_star ** 0.5
plt.plot(x, y, color='darkblue', lw=2)
plt.plot([0, 0], [0, 25], color='k', lw=1)
plt.plot([x_star, x_star], [0, y_star], color='k', lw=2)
plt.plot([neg_x_star, neg_x_star], [0, y_star], color='k', lw=2)
plt.plot([neg_x_star, x_star], [y_star, y_star], color='k', lw=2)
plt.scatter(2, y_star, marker='>', color='red', s=40)
plt.scatter(-2, y_star, marker='<', color='red', s=40)
plt.scatter(-2, y_star, marker='<', color='red', s=40)
plt.scatter(neg_x_star, 5, marker='v', color='red', s=40)
plt.scatter(x_star, 5, marker='v', color='red', s=40)
plt.ylim(-0.5, 25)
plt.xticks(np.arange(-5, 5.1))
plt.xlabel('$x$')
plt.ylabel('$y$', rotation=0)
plt.title('$y = x^2$');
# -
# So the density of $Y$ at $y$ has two components, as follows. For $y > 0$,
#
# $$
# f_Y(y) ~ = ~ a + b
# $$
#
# where
#
# $$
# a = \frac{f_X(x_1)}{2x_1} ~~~~ \text{at } x_1 = \sqrt{y}
# $$
#
# and
#
# $$
# b = \frac{f_X(x_2)}{\vert 2x_2 \vert} ~~~~ \text{at } x_2 = -\sqrt{y}
# $$
#
# We have used $g'(x) = 2x$ when $g(x) = x^2$.
#
# For a more formal approach, start with the cdf of $Y$:
#
# $$
# \begin{align*}
# F_Y(y) ~ &= ~ P(Y \le y) \\
# &= ~ P(\vert X \vert \le \sqrt{y}) \\
# &= ~ P(-\sqrt{y} \le X \le \sqrt{y}) \\
# &= ~ F_X(\sqrt{y}) - F_X(-\sqrt{y})
# \end{align*}
# $$
#
# Differentiate both sides to get our formula for $f_Y(y)$; keep an eye on the two minus signs in the second term and make sure you combine them correctly.
#
# This approach can be extended to any many-to-one function $g$. For every $y$, there will be one component for each value of $x$ such that $g(x) = y$.
# ### Square of the Standard Normal ###
# Let $Z$ be standard normal and let $W = Z^2$. The possible values of $W$ are non-negative. For a possible value $w \ge 0$, the formula we have derived says that the density of $W$ is given by:
#
# $$
# \begin{align*}
# f_W(w) ~ &= ~ \frac{f_Z(\sqrt{w})}{2\sqrt{w}} ~ + ~ \frac{f_Z(-\sqrt{w})}{2\sqrt{w}} \\ \\
# &= ~ \frac{\frac{1}{\sqrt{2\pi}} e^{-\frac{1}{2}w}}{2\sqrt{w}} ~ + ~ \frac{\frac{1}{\sqrt{2\pi}} e^{-\frac{1}{2}w}}{2\sqrt{w}} \\ \\
# &= \frac{1}{\sqrt{2\pi}} w^{-\frac{1}{2}} e^{-\frac{1}{2}w}
# \end{align*}
# $$
#
# By algebra, the density can be written in an equivalent form that we will use more frequently.
#
# $$
# f_W(w) ~ = ~ \frac{\frac{1}{2}^{\frac{1}{2}}}{\sqrt{\pi}} w^{\frac{1}{2} - 1} e^{-\frac{1}{2}w}
# $$
#
# This is a member of the family of *gamma* densities that we will study later in the course. In statistics, it is called the *chi squared density with one degree of freedom*.
| content/Chapter_16/03_Two_to_One_Functions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# In this tutorial, we will look at how you can register custom recommendation actions (i.e. tabs of recommendations) to display on the Lux widget. The custom actions can be globally defined and used across different dataframes. We look at the [Happy Planet Index](http://happyplanetindex.org/) dataset, which contains metrics related to well-being for 140 countries around the world.
import pandas as pd
import lux
# The dataframe initially registers a few default recommendations, such as Correlation, Enhance, Filter, etc.
df = pd.read_csv("https://raw.githubusercontent.com/lux-org/lux-datasets/master/data/hpi.csv")
df["G10"] = df["Country"].isin(["Belgium","Canada","France","Germany","Italy","Japan","Netherlands","United Kingdom","Switzerland","Sweden","United States"])
lux.config.default_display = "lux"
df
# As we can see, Lux displays several recommendation actions, such as Correlation and Distributions, which is globally registered by default.
#
# ## Registering Custom Actions
# Let's define a custom function to generate the recommendations on the dataframe. In this example, we register a custom action called `G10` to generate a collection of visualizations that showcases numerical measures that differs significantly across `G10 <https://en.wikipedia.org/wiki/Group_of_Ten_(economics)>`_ and non-G10 countries. In other words, we want to understand how the G10 and non-G10 countries differs based on the measures present in the dataframe.
#
# Here, we first generate a VisList that looks at how various quantitative attributes breakdown between G10 and non-G10 countries. Then, we score and rank these visualization by calculating the percentage difference in means across G10 v.s. non-G10 countries.
from lux.vis.VisList import VisList
# +
intent = [lux.Clause("?",data_type="quantitative"),lux.Clause("G10")]
vlist = VisList(intent,df)
for vis in vlist:
# Percentage Change Between G10 v.s. non-G10 countries
a = vis.data.iloc[0,1]
b = vis.data.iloc[1,1]
vis.score = (b-a)/a
vlist = vlist.topK(15)
# -
vlist
# Let's define a custom function to generate the recommendations on the dataframe. In this example, we will use G10 to generate a VisList to calculate the percentage change of means Between G10 v.s. non-G10 countries.
def G10_mean_difference(input_df): #TODO: change input_df name?
intent = [lux.Clause("?",data_type="quantitative"),lux.Clause("G10")]
vlist = VisList(intent,input_df)
for vis in vlist:
a = vis.data.iloc[0,1]
b = vis.data.iloc[1,1]
vis.score = (b-a)/a
vlist = vlist.topK(15)
return {"action":"G10", "description": "Percentage Change of Means Between G10 v.s. non-G10 countries", "collection": vlist}
# In the code below, we define a display condition function to determine whether or not we want to generate recommendations for the custom action. In this example, we simply check if we are using the HPI dataset to generate recommendations for the custom action `G10`.
def is_G10_hpi_dataset(df):
try:
return all(df.columns == ['HPIRank', 'Country', 'SubRegion', 'AverageLifeExpectancy',
'AverageWellBeing', 'HappyLifeYears', 'Footprint',
'InequalityOfOutcomes', 'InequalityAdjustedLifeExpectancy',
'InequalityAdjustedWellbeing', 'HappyPlanetIndex', 'GDPPerCapita',
'Population', 'G10'])
except:
return False
# To register the `G10` action in Lux, we apply the `register_action` function, which takes a name and action as inputs, as well as a display condition and additional arguments as optional parameters.
lux.register_action("G10", G10_mean_difference)
# After registering the action, the G10 recomendation action is automatically generated when we display the Lux dataframe again.
#
df
# As expected, we see that G10 and non-G10 countries differs significantly in terms of their GDPPerCapita, but also in terms of their carbon footprint (Footprint) and number of expected happy year an average citizen can expect to live within a country (HappyLifeYears).
#
# Since the registered action is globally defined, the G10 action is displayed whenever the display condition is satisfied (i.e. if the data schema matches that of the HPI dataset). For example, we might want to isolate the GDPPerCapita factor and only examine countries with high GDP. We can filter to only countries with GDPPerCapita over 40k and see the difference across the various quantitative attributes for these countries.
df[df["GDPPerCapita"]>40000]
# As we can see, there is a less of a distinction between G10 and non-G10 countries across the measures when we only filter to only high GDP countries.
# ## Navigating the Action Manager
# You can inspect a list of actions that are currently registered in the Lux Action Manager. The following code displays both default and user-defined actions.
lux.actions.__getactions__()
# You can also get a single action attribute by calling this function with the action's name.
lux.actions.__getattr__("G10")
# ## Removing Custom Actions
# Let's say that we are no longer in looking at the `G10` action, the `remove_action` function allows you to remove from Lux's action manager an action with its id. The action will no longer display with the Lux dataframe.
lux.remove_action("G10")
# After removing the action, when we print the dataframe again, the `G10` action is no longer displayed.
#
df
| demo/8-custom-action.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Introduction
# pandas is one of the most powerful and widely used Python libraries for efficient data engineering. It is commonly used in combination with other analytical libraries such as scikit-lean, matplotlib, and statsmodel. This Notebook teaches the essential functionalities of pandas, including:
# - Creating pandas Series and DataFrame;
# - Manipulating pandas objects (e.g., reindexing, modifying, and deleting index);
# - Selecting data with loc and iloc;
# - Sorting and ranking data; and
# - Computing descriptive statistics.
# # What is pandas?
# pandas is an open-source tool built on top of Python to make data cleaning and analysis fast and efficient. pandas leverages NumPy functionalities such as array-based computing, in particular array-based functions and a preference for data processing without `for` loops. The core difference between Numpy and pandas is that Numpy is developed for working with homogeneous numerical array data and pandas is best suited for working with tabular or heterogeneous data.
#
#
# ## Installing Pandas
# The simplest way to install not only pandas, but Python and the most popular packages that make up the SciPy stack (e.g., NumPy, Matplotlib) is with [Anaconda](https://docs.continuum.io/anaconda/), a cross-platform Python distribution for data analytics and scientific computing.
#
# To install pandas using `conda`, the package manager that the Anaconda distribution is built upon, run the following command:
# `conda install pandas`
# ## Importing Pandas
# After installation, pandas can be imported as such:
#
# `import pandas as pd`
#
# After import, whenever you see `pd.` in any of the code, it is referring to `pandas`.
# # Pandas Series
# pandas is made up of two core data structures: **Series** and **DataFrame**
# A Series is a one-dimensional array-like object that contains a sequence of values and an associated array of labels, called _index_. You can think of a Series as a fixed-length ordered dictionary, where each index is mapped to a value.
import pandas as pd
import numpy as np
pObject = pd.Series([6,3,-2,7,-2,-2,3,0,1])
pObject
# In the above example, `pObject` is a pandas Series. The index for this Series is displayed on the left (column 1) and the values are displayed on the right (column 2).
#
# Since we did not specify an index for the data, a default one consisting of integers starting at 0 to N-1 is created.
# To retrieve the array representation of the Series and index object, its values and index attributes can be used
pObject.values #get array representation of the Series
pObject.index #get index object
#
# In real-world applications, it is often useful to create a Series with an index identifying each data point with a label. Here we create a Series and specify the index using the `index=` keyword.
pObject = pd.Series([6,3,-2,7,-2,-2,3,0,1], index=['p','i','y','a','d','u','j','q','g'])
pObject
# Values in a Series can be selected using the labels in the index:
pObject['i']
pObject[['i', 'p', 'q', 'g']]
pObject['i'] = 6
pObject['i']
# ## Filtering
# It is possible to use Numpy-like operations, such as filtering with a boolean array, scalar multiplication, or applying math functions, on pandas Series. Using these operations will preserve the index-value link.
pObject[pObject >0] #filter the positive values
pObject*5 #multiply all values by 5
'c' in pObject #check if 'c' is a key in pObject
'i' in pObject #check if 'i' is a key in pObject
# ## Converting Python Dictionaties to Pandas Series
# If you already have data contained in a Python dictionary, it is very easy to create a Series from the dictionary. Simply pass the dictionary when creating the Series.
myDict = {'b': 1, 'd': 2, 'c': 3}
pObject2 = pd.Series(myDict)
pObject2
# When you're passing a dictionary, the index in the resulting Series will be the dict's keys. It is also possible to pass another list to be used as as the index instead.
# +
myDict = {'b': 1, 'd': 2, 'c': 3}
customKeys = ['a', 'b', 'c']
pObject3 = pd.Series(myDict, index=customKeys)
pObject3
# -
# ## isnull()
# In the above example, the keys `b` and `c` were found in `myDict` and placed in the appropriate locations. However, as no values were found for `a`, it appears as `NaN` (not a number) in the resulting Series.
#
# The `isnull()` and `notnull()` functions in pandas can be used to detect missing data:
pd.isnull(pObject3)
# ## notnull()
pd.notnull(pObject3)
# The above function can also be called as an instance method:
pObject3.notnull()
# ## Arithmetic Operations
# A useful feature of pandas Series is that it automatically aligns by index label in arithmetic operations. This is similar to the `join` operation used in databases.
obj1 = pd.Series([6,3,-2,7,-2,-2,3,0,1], index=['p','i','y','a','d','u','j','q','w'])
obj2 = pd.Series([-4,8,8,8,6,-3,6,8,7], index=['p','i','y','a','d','u','j','q','g'])
obj1 + obj2
# ## pandas.Series.name
# In pandas, both the Series object and its index have a `name` attribute.
obj1.name = 'Sample Data' #assign series name
obj1.index.name = 'ind' #assign series index name
obj1
# # Pandas DataFrames
# The pandas DataFrame represents a rectangular table of data and contains an ordered collection of columns, each of which can be a different value type (numeric, string, boolean, etc.).
# There are many ways to create a DataFrame. A common approach is from a dictionary of equal-length lists or Numpy arrays.
data = {"country": ["Belgium", "Norway", "Australia", "India"],
"capital": ["Brussels","Oslo","Canberra","New Delhi"],
"population":[1.7, 6.4, 3.9, 210]}
myDataFrame = pd.DataFrame(data)
myDataFrame
# You can also specify the columns of DataFrame. If you pass a column that is not contained in the dictionary, it will appear with missing values in the result.
myDataFrame2 = pd.DataFrame(data, columns=["country","capital","population","happiness index"])
myDataFrame2
# ## .head()
# If you have a large DataFrame, the `.head()` method can be used to select the first five rows.
myDataFrame.head()
# ## Accessing columns of a DataFrame
# When you access a column/columns of a DataFrame, it is retrived as a Series. Note that the column returned from indexing a DataFrame is a _view_ on the underlying data, not a copy. Thus, any in-place modifications to the Series will be reflected in the DataFrame.
myDataFrame2['population'] #accessing the population column using the dictionary-like notation
myDataFrame2.population #accessing the population column by the attribute (i.e., attribute-like access)
# ## Modifying columns of a DataFrame
# Columns of a DataFrame can be modified by assignment. For example, the empty happiness index could be assigned a scalar value or a list of values:
myDataFrame2['happiness index'] = -1
myDataFrame2
#When you are assigning lists or arrays to a column, the value’s length must match the length of the DataFrame.
myDataFrame2['happiness index'] = [8.0, 7.5, 6.0, 5.5]
myDataFrame2
# ## Deleting a column of a DataFrame
# The `del` method can be used to remove a column.
# I first create a new column called megacity
myDataFrame2["megacity"] = [False, False, False, True]
myDataFrame2
del myDataFrame2["megacity"]
myDataFrame2
# # Reindexing
# A powerful feature of pandas is the `reindex` method. `reindex` rearranges the data according to the new index and introducing missing values if any index values were not already present.
#
# ## Reindexing a Series
obj = pd.Series([6,2,9,3,7], index=['a', 'b', 'c', 'd', 'e'])
obj
obj2 = obj.reindex(['d', 'a', 'b', 'c', 'e'])
obj2
# ## Reindexing a DataFrame
# +
data = {"country": ["Belgium", "Norway", "Australia", "India"],
"capital": ["Brussels","Oslo","Canberra","New Delhi"],
"population":[1.7, 6.4, 3.9, 210]}
df = pd.DataFrame(data, index=['a','b','c','d'])
df
# -
df2 = df.reindex(['a','b','c','d','e'])
df2
# Columns can be reindexed with the `columns` keyword
columns = ['capital', 'country', 'population']
df.reindex(columns = columns)
# # Dropping Entries
# ## Dropping entries from a Series
# The `drop` method will return a **new object** with the indicated values deleted from an axis.
pObject = pd.Series([6,3,-2,7,-2,-2,3,0,1], index=['p','i','y','a','d','u','j','q','g'])
pObject
pObject.drop('p')
pObject.drop(['y', 'a'])
# ## Dropping entries from a DataFrame
# With DataFrames, index values can be deleted from either axis.
# +
data = {'capital': ['Brussels','Oslo','Canberra','New Delhi'],
'population':[1.7, 6.4, 3.9, 210]}
df = pd.DataFrame(data, index=['Belgium', 'Norway', 'Australia', 'India'])
df
# -
#Calling `drop` with a sequence of labels will drop values from the row label (axis=0)
df.drop(['Australia', 'Norway'])
#Values from the columns can be dropped by passing axis=1 or axis='columns' to the drop function
df.drop('capital', axis=1)
# It is possible to manipulate an object in-place without returning a new object using the `inplace=True` argument.
df.drop('capital', axis=1, inplace=True)
df
# # Selection with `loc` and `iloc`
# `loc` and `iloc` are special indexing operation that allow you to select a subset of rows and columns from a DataFrame using either axis labels (`loc`) or integers (`iloc`).
data = {'capital': ['Brussels','Oslo','Canberra','New Delhi', 'Jakarta', 'Wellington'],
'population':[1.7, 6.4, 3.9, 210, 105, 2.1],
'megacity': [False, False, False, True, True, False]}
df = pd.DataFrame(data, index=['Belgium', 'Norway', 'Australia', 'India', 'indonesia', 'New Zealand'])
df
# + [markdown] tags=[]
# ## loc
# -
df.loc['Australia', ['capital', 'population']] #retrive the the columns `capital` and `population` for index Australia
# + [markdown] tags=[]
# ## iloc
# -
df.iloc[2, [0,1]] #same operation as above but with `iloc`
df.iloc[[1,2,3], [0,1]] #same operation as above but with `iloc`
# ## Slicing with `loc` and `iloc`
df.loc[:'India', 'capital'] #get the capital of all countries up to `India`
df.iloc[:, :2][df.megacity == True] #get the capital and population column (:2) of all countries (:), which are classified as megacities
# # Sorting and Ranking
# ## Sorting
# Sorting a dataset by a criterion is a common operation in real-world applications. To sort alphabetically by row or column index, use the `sort_index` method. This returns a new, sorted object.
#sorting a Series alphabetically
pObject = pd.Series([6,3,-2,7,-2,-2,3,0,1], index=['p','i','y','a','d','u','j','q','g'])
pObject.sort_index()
#
#
#
# DataFrames can be sorted by index on either axis (axis 0, axis 1)
data = {'capital': ['Brussels','Oslo','Canberra','New Delhi', 'Jakarta', 'Wellington'],
'population':[1.7, 6.4, 3.9, 210, 105, 2.1],
'megacity': [False, False, False, True, True, False]}
df = pd.DataFrame(data, index=['Belgium', 'Norway', 'Australia', 'India', 'indonesia', 'New Zealand'])
df.sort_index() #sort by row index
df.sort_index(axis=1) #sort by column index
df.sort_index(axis=1, ascending=False) #sort by column index in descending order
# ## Ranking
# Ranking is a pandas operations that assigns ranks from one through to the number of valid data points in an array. By default, equal values are assigned a rank that is the average of the ranks of those values
obj = pd.Series([6,3,-2,7,-2,-2,3,0,1])
obj
obj.rank()
#
# Data can also be ranked according to the order in which they are observed.
obj.rank(method='first')
# Ranks can also be assigned in descending order
obj.rank(ascending=False)
# # Computing Descriptive Statistics
# pandas objects are equipped with a set of common mathematical and statistical methods. These methods have built-in handling for missing data.
df = pd.DataFrame([[1,2],[6,2],[-2, np.nan], [np.nan, np.nan]],
index=['a', 'b', 'c','d'],
columns=['one', 'two'])
df
# the .sum() function returns the sum of the values over the requested axis.
df.sum()
# Passing `axis=1` to the .sum() function returns the sum of the values across the columns
df.sum(axis=1)
# the .mean() function returns the mean of the values over the requested axis.
df.mean(axis=1)
# NA values are excluded unless the entire slice is NA. This can be disabled with the `skipna` option
df.mean(axis=1, skipna=False)
# +
# the .describe() function produces multiple summary statistics of the pandas object in one call
df.describe()
# -
# # Unique Values, Value Counts, and `isin()`
obj = pd.Series(['c', 'a', 'd', 'a', 'a', 'b', 'b', 'c', 'c'])
obj
# the .unique() function returns an array of the unique values in a Series
obj.unique()
# the .value_counts() function returns a Series containing the frequency of each value
obj.value_counts()
# the .isin() function can be used for filtering a dataset down to a subset of values in a Series or column in a DataFrame
obj = pd.Series(['c', 'a', 'd', 'a', 'a', 'b', 'b', 'c', 'c'])
mask = obj.isin(['a', 'd'])
mask
obj[mask]
| Intro to Data Analysis using Pandas.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from collections import namedtuple
from datetime import datetime
from pprint import pprint
from itertools import cycle
# +
Set_ = namedtuple('Set_', [
'reps',
'exercises',
])
Exercise = namedtuple('Exercise', [
'name',
'time',
])
RepsSetting = namedtuple('RepsSetting', [
'start',
'step',
'step_interval',
'max',
])
ExerciseSetting = namedtuple('ExerciseSetting', [
'description',
'start',
'step',
'step_interval',
'max',
])
# +
progress_settings = {
'runeasy': {'warmup': 0,
'warmdown': 0,
'reps': RepsSetting(1, 0, 0, 1),
'exercises': [ExerciseSetting('easy', 25, 5, 3, 100)]},
'hillsprint': {'warmup': 12,
'warmdown': 12,
'reps': RepsSetting(6, 1, 2, 8),
'exercises': [ExerciseSetting('hill', 1, 0.25, 2, 2)]},
'intervals': {'warmup': 10,
'warmdown': 10,
'reps': RepsSetting(5, 0, 0, 1),
'exercises': [ExerciseSetting('fast', 0.5, 0.25, 2, 2),
ExerciseSetting('easy', 1, 0, 0, 1)]},
'crosstrain': {'warmup': 0,
'warmdown': 0,
'reps': RepsSetting(1, 0, 0, 1),
'exercises': [ExerciseSetting('crosstrain', 30, 0, 0, 30)]},
}
plans = {
'beginner': [['runeasy', ],
['hillsprint', ],
['intervals', ]],
'intermediate': [['runeasy', ],
['hillsprint', 'runeasy'],
['intervals', ]],
'advanced': [['runeasy', ],
['hillsprint', 'intervals'],
['intervals', ],
['crosstrain', ]],
}
# -
def rest_week(week, plan_length):
'''
int -> boolean
Determine if current week is a rest week.
Plans work on a 4 week block, with every 4th week being an easier week.
Runner has at least 2 weeks, and a maximum of 5 weeks before they get an
easier week. So if they were on a 6 week plan they would only have an
easier week on race week.
Returns True if rest week and False if progression week.
'''
build_up = plan_length % 4
if week <= build_up < 3:
return False
elif (week - build_up) % 4 == 0:
return True
else:
return False
# +
def create_plan(ability, length, days=[0, 1, 2]):
""" Create bespoke training plan.
int
list
--> dict
plan = {'date': workout}
"""
bespoke = {}
for day, plan in zip(days, plans[ability]):
for wk, progression in zip(range(length), cycle(plan)):
ps = progress_settings[progression]
workout = {}
workout['warmup'] = Exercise('easy', ps['warmup'])
workout['warmdown'] = Exercise('easy', ps['warmdown'])
if rest_week(wk, length):
multiple = wk - 1
else:
multiple = wk
# if wk % ps['reps'].step_interval == 0:
# reps = ps['reps'].start + ps['reps'].step * multiple
workout['reps'] = min(ps['reps'].start +
ps['reps'].step * multiple, ps['reps'].max)
workout['exercises'] = []
for es in ps['exercises']:
description = es.description
time = min(es.start + es.step * multiple, es.max)
workout['exercises'].append(Exercise(description, time))
bespoke[f'wk{wk}-day{day}'] = workout
return bespoke
# -
plan = create_plan('advanced', 20, [0, 2, 4, 5])
for session in plan:
if 'day0' in session:
pprint({session: {plan[session]['reps']: plan[session]['exercises']}})
for session in plan:
if 'day2' in session:
pprint({session: {plan[session]['reps']: plan[session]['exercises']}})
for session in plan:
if 'day4' in session:
pprint({session: {plan[session]['reps']: plan[session]['exercises']}})
for session in plan:
if 'day5' in session:
pprint({session: {plan[session]['reps']: plan[session]['exercises']}})
def determine_reps_or_duration(plan_week, workout_week, start, step, maximum, interval):
# convert boolean returned from function to 1 or 0
rest = int(rest_week(plan_week, length))
result = start + (int(workout_week / interval) - rest ) * step
return min(result, maximum - rest * step)
length = 20
progress_weeks = {'1': 0, '2': 0}
for week in range(length):
if week % 2 == 0:
print(determine_value(week, int(week/2), 25, 5, 35, 2))
progress_weeks['1'] += 1
else:
print(determine_value(week, int(week/2), 50, 5, 70, 3))
progress_weeks['2'] += 1
for i in range(8):
print(int(i/2))
| notebooks/Plan.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="QOEo1A2fMlCF"
# # <NAME>
# Poetry Generation in Urdu
# + id="lMimKiVEMM7M" colab={"base_uri": "https://localhost:8080/"} outputId="d27beef7-6255-44ad-b7fe-4d5135c7a1f2"
import spacy
import random
import operator
nlp = spacy.load("en_core_web_sm")
nlp = spacy.blank('ur')
#Opening File and saving complete txt to doc
train_data=""
my_file=open("ghalib.txt","r")
new_line = 0
for i in my_file :
if new_line == 0 and len(i)!=1:
train_data += "سسسسس" # Check start of sentence
train_data += " "
for index, j in enumerate(i):
if j != "\n" :
train_data += j
elif j=="\n" and index != 0 :
train_data += " "
train_data += "کککککک " # Check start of sentence
new_line == 0
# Removing Unnecessary characters & Inserting Start/End of sentence tags
doc = nlp(train_data)
exclude_word = ["٪","؟","‘","!","’",".",'"',")","("," ِ","،",":","'","“","%"," ِ"," ِ"," ِ","K","۔"]
doc_string = list()
for i in range(len(doc)):
if str(doc[i]) == "سسسسس": # Converting to Start of sentence "<S>"
x = "<S>"
elif str(doc[i]) == "کککککک": # Converting Check of end of sentence to "</S>"
x = "</S>"
elif str(doc[i]) == " " or (str(doc[i]) in exclude_word):
pass
else:
x = str(doc[i])
doc_string.append(x)
# Printing out the final list
for i in range(len(doc)):
print(i, " ",doc_string[i])
# + [markdown] id="IjVra9EbeqPU"
# ## Calcuating the frequency of uni-gram in the corpus
# + id="JmkKlncQjYJO"
word_frequency = {}
for i in range(len(doc_string)):
if doc_string[i] not in word_frequency:
word_frequency[doc_string[i]] = 1
else:
word_frequency[doc_string[i]] += 1
# + [markdown] id="t_hJpYf9fHDg"
# ### Extracting Bigram
# + colab={"base_uri": "https://localhost:8080/"} id="qq3x-CJSiD8p" outputId="53307ba3-d4ce-4e59-a113-8c3e18a28918"
bigram = list()
list_of_firstword = []
list_of_lastword = []
for i in range(len(doc_string)-1):
if doc_string[i] !='</S>' and doc_string[i+1] != '<S>':
w1=doc_string[i]
w2=doc_string[i+1]
bigram.append(tuple((w1,w2)))
# Extracting first words and list of last words of each sentence
if doc_string[i] == '<S>':
list_of_firstword.append(doc_string[i+1])
elif doc_string[i+1] == '</S>':
list_of_lastword.append(doc_string[i])
# print("(0,0): ", bigram[0][0],"(0,1): ",bigram[0][1])
print(bigram)
# + [markdown] id="_iNzHptzONvO"
# #Finding Frequency of bi-gram
# + colab={"base_uri": "https://localhost:8080/"} id="JYYCG2twORhl" outputId="fc55e270-2de2-4df6-dfdb-c3c5b33b70a4"
bigram_word_frequency = {}
for i in range(len(bigram)):
if bigram[i] not in bigram_word_frequency:
bigram_word_frequency[bigram[i]] = 1
else:
bigram_word_frequency[bigram[i]] += 1
#Printing bigram with freq greater then 1
for i in bigram_word_frequency:
if bigram_word_frequency[i]>=2:
print( bigram_word_frequency[i]," ", i )
# + [markdown] id="yFrbk75kHHYN"
# #Sentence generation using uni-Gram method
#
# + id="rZwBe2cMqx53" colab={"base_uri": "https://localhost:8080/"} outputId="060e2682-d211-4cc1-8907-b6015650f8bc"
def unigram_model(word_frequency,list_of_firstword,list_of_lastword):
words_dic ={}
for w in word_frequency:
# Converting diction to
# key = freq of word
# values = word
if word_frequency[w] in words_dic:
words_dic[word_frequency[w]].append(w)
else:
words_dic[word_frequency[w]] = [w]
keys_in_words_dictionary = sorted(words_dic.keys())
words_in_sent = {}
no_of_iteration = 0
i = 1
while len(words_in_sent) <10:
# for i in range(1,len(words_dic)+1): # Loop over whole dictionary with key = Frequncy of words & value = words
if i == len(words_dic)+1:
break
for w in words_dic[keys_in_words_dictionary[-i]]: # Iterating over reverse order i.e from highest occurance value
if w != '<S>' and w != '</S>':
choosed_word = random.choice(words_dic[keys_in_words_dictionary[-i]])
words_in_sent[choosed_word] = word_frequency[choosed_word] / len(word_frequency)
no_of_iteration+=1
if (len(words_in_sent) >7 and (w in list_of_lastword)):
break
i+=1
for i in words_in_sent:
print(i,end = " ")
print("")
for i in range (1,13):
unigram_model(word_frequency,list_of_firstword,list_of_lastword)
if i%4 == 0:
print("\n")
# + [markdown] id="D2bJlo5GK3Js"
# # Remarks of Uni-Gram
# I am choosing words according to thier pobility i.e from high to lower in a seq due to which same sent are being generated .
# As prob is directly proportional to the count of occurance as word in word as cound of no. of word is same so I just take count as a reference
#
#
# + [markdown] id="oBczi3IJQkR2"
# #Sentence generation using bi-Gram method
#
#
#
#
# + id="DldOWIi2Qj80" colab={"base_uri": "https://localhost:8080/"} outputId="c173d7cf-713b-46b6-af7a-2813852dea41"
def bi_model(bigram_word_frequency,list_of_firstword,list_of_lastword):
words_in_sent = {}
i = 0
w1 = random.choice(list_of_firstword)
final_sent = w1
for i in range(10):
bigram_words_count ={0:[]}
for w2 in word_frequency:
if w2 != "<S>":
if (len(words_in_sent) < 7) and (w2 == "</S>") or (w1 == w2):
pass
elif (w1,w2) not in bigram_word_frequency:
bigram_words_count[0].append((w1,w2))
else:
if bigram_word_frequency[(w1,w2)] not in bigram_words_count:
bigram_words_count[bigram_word_frequency[(w1,w2)]] = [(w1,w2)]
else :
bigram_words_count[0].append((w1,w2))
w2 = random.choice(bigram_words_count[max(bigram_words_count)])
# print(w2)
words_in_sent[w2] = float( bigram_word_frequency[w2] / word_frequency[w1] )
w1 = w2[1]
if w1 != "</S>":
final_sent = final_sent+" "+ w1
if (len(words_in_sent) >= 8) and (w1 == "</S>"):
break
print(final_sent)
for i in range (1,13):
bi_model(bigram_word_frequency,list_of_firstword,list_of_lastword)
if i%4 == 0:
print("\n")
# + [markdown] id="kMogPi4K1n2b"
# # Backward Bigram Model
# + id="VbmrZwSL48TM" colab={"base_uri": "https://localhost:8080/"} outputId="dd74e0fa-d1f8-405e-f6b4-aed79e16cb89"
def BackwardBigramModel(bigram_word_frequency,list_of_firstword,list_of_lastword):
words_in_sent = {}
i = 0
w2 = random.choice(list_of_lastword)
final_sent = w2
for i in range(10):
bigram_words_count ={}
for w1 in word_frequency:
if w1 != "</S>":
if (len(words_in_sent) < 7) and (w1 == "<S>") or (w1 == w2):
pass
elif (w1,w2) not in bigram_word_frequency:
bigram_words_count[(w1,w2)] = 0
else:
bigram_words_count[(w1,w2)] = float( bigram_word_frequency[w1,w2] / word_frequency[w1] )
w1 = max(bigram_words_count.items(), key=operator.itemgetter(1))[0]
w2 = w1[0]
if w2 != "<S>":
final_sent = w2 +" "+ final_sent
if (len(words_in_sent) >= 8) and (w2 == "<S>"):
break
print(final_sent)
for i in range (1,13):
BackwardBigramModel(bigram_word_frequency,list_of_firstword,list_of_lastword)
if i%4 == 0:
print("\n")
# + [markdown] id="1IfxJYCujOzL"
# # Bidirectional Bigram Model
# + id="SQruV-xR0Goe"
all_word = doc_string + doc_string[::-1] # Forward and backward corpus
word_frequency = {}
doc_string = all_word
# + id="_pDBwxqORoBc"
# Tokenizing
for i in range(len(doc_string)):
if doc_string[i] not in word_frequency:
word_frequency[doc_string[i]] = 1
else:
word_frequency[doc_string[i]] += 1
bigram = list()
list_of_firstword = []
list_of_lastword = []
# Extracting Bi-Grams
for i in range(len(doc_string)-1):
if doc_string[i] !='</S>' and doc_string[i+1] != '<S>':
w1=doc_string[i]
w2=doc_string[i+1]
bigram.append(tuple((w1,w2)))
# Extracting first words and list of last words of each sentence
if doc_string[i] == '<S>':
list_of_firstword.append(doc_string[i+1])
elif doc_string[i+1] == '</S>':
list_of_lastword.append(doc_string[i])
# print("(0,0): ", bigram[0][0],"(0,1): ",bigram[0][1])
print(bigram)
# + id="Lb0JQKWm1dAT"
bigram_word_frequency = {}
for i in range(len(bigram)):
if bigram[i] not in bigram_word_frequency:
bigram_word_frequency[bigram[i]] = 1
else:
bigram_word_frequency[bigram[i]] += 1
#Printing bigram with freq greater then 1
for i in bigram_word_frequency:
if bigram_word_frequency[i]>=2:
print( bigram_word_frequency[i]," ", i )
# + [markdown] id="ZXbBaO4GFKTv"
# # Output of Bidirectional Model
# + colab={"base_uri": "https://localhost:8080/"} id="3qm4sdZlFLK1" outputId="cbc4d77a-1957-47ae-ba17-dd45d1acff94"
bi_model(bigram_word_frequency,list_of_firstword,list_of_lastword)
bi_model(bigram_word_frequency,list_of_firstword,list_of_lastword)
bi_model(bigram_word_frequency,list_of_firstword,list_of_lastword)
print("\n")
bi_model(bigram_word_frequency,list_of_firstword,list_of_lastword)
bi_model(bigram_word_frequency,list_of_firstword,list_of_lastword)
bi_model(bigram_word_frequency,list_of_firstword,list_of_lastword)
print("\n")
bi_model(bigram_word_frequency,list_of_firstword,list_of_lastword)
bi_model(bigram_word_frequency,list_of_firstword,list_of_lastword)
bi_model(bigram_word_frequency,list_of_firstword,list_of_lastword)
print("\n")
bi_model(bigram_word_frequency,list_of_firstword,list_of_lastword)
bi_model(bigram_word_frequency,list_of_firstword,list_of_lastword)
bi_model(bigram_word_frequency,list_of_firstword,list_of_lastword)
# + [markdown] id="AnLJ1raRKOCC"
# # Remarks
# ## Ouput of Bidirectional model is more understandable and closer to real linguistic
# ## I am using random functioning to choose first words of each sentence so sentances are not exactly the same but are both quite similar
| Poetry Generation in Urdu .ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Calculate State Trends of COVID-19 Cases for
# Alaska, Alabama, Arkansas, Arizona, California, Colorado, Connecticut, Delaware, District of Columbia, and Florida.
# ### Read Merged Covid-19 Dataset with Variable "State" as our index.
# +
import pandas as pd
import numpy as np
#read super Covid-19 dataframe with state as index.
Read = pd.read_csv("../../../data/output/covid.csv", index_col="State")
# -
# ### Group columns by index State, and add the cases of all counties within a state.
# +
#groups the column by States, and adds up cases of each countie per state.
States = Read.groupby(['State']).sum()
#Output the recent week.
Trend = States[['9/8/20_x','9/9/20_x','9/10/20_x','9/11/20_x','9/12/20_x','9/13/20_x','9/14/20_x']]
# -
# ### Outputs the first 10 states in alphabetical order.
Trend[:10:]
# ### Calculate Percentage Increase of each of the 10 states.
#Creates new column 'Percent' and calculates percentage increase for better anaylsis of trend.
States['Percent'] = ((States['9/14/20_x'] - States['9/8/20_x']) / States['9/8/20_x']) * 100
#Prints out first 10 States Trends of case numbers.
MyState_Trend = States[['Percent']]
MyState_Trend[:10:]
# # Analysis
# ## All 10 states COVID-19 cases have been increasing in the past recent week
#
# #### Alaska's COVID-19 cases have increased by 8.9%
# #### Alabama's COVID-19 cases have increased by 4.3%
# #### Arkansas' COVID-19 cases have increased by 6.9%
# #### Arizona's COVID-19 cases have increased by 1.3%
# #### California COVID-19 cases have increased by 3.0%
# #### Colorado COVID-19 cases have increased by 3.4%
# #### Connecticut COVID-19 cases have increased by 2.0%
# #### District of Columbia's COVID-19 cases have increased by 1.8%
# #### Deleware COVID-19 cases have increased by 3.4%
# #### Florida COVID-19 cases have increased by 2.42%
| src/Stage_I/Badesha/Harinder_StateTrend.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# Import relevant libraries
import re
import requests
import numpy as np
import pandas as pd
import lxml.html as lh
from bs4 import BeautifulSoup
# Get Url Function
def get_url(num):
return "https://eapps.naic.org/cis/financialReport.do?entityId=%d" % num
# Define DataFrame
columns = ['number', 'company_name', 'business_type', 'home_office', 'url']
df = pd.DataFrame(columns=columns)
# Real Number from 0 to 15000
for number in range(0,100):
url_ = get_url(number)
req = requests.get(url_)
if "status=yes" in req.text:
soup = BeautifulSoup(req.text, 'lxml')
tds1 = soup.findAll("td", {"colspan": 3})
sub1 = tds1[0].text
sub1 = sub1.replace('\n','')
sub1 = sub1.replace(' ','')
sub2 = tds1[1].text.replace('Business Type:\n','')
sub2 = sub2.replace('\n','')
sub2 = sub2.replace(' ','')
tds2 = soup.findAll("td", {"colspan": 2})
sub3 = tds2[0].text.replace('Home Office:\n','')
sub3 = sub3.replace('\n','')
sub3 = sub3.replace(' ','')
row = pd.Series({'number':number, 'company_name':sub1, 'business_type':sub2, 'home_office':sub3, 'url':url_})
df = df.append(row, ignore_index=True)
print(number)
df.to_csv('insurance_web_scrap.csv')
| web_scrap.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text"
# This is a companion notebook for the book [Deep Learning with Python, Second Edition](https://www.manning.com/books/deep-learning-with-python-second-edition?a_aid=keras&a_bid=76564dff). For readability, it only contains runnable code blocks and section titles, and omits everything else in the book: text paragraphs, figures, and pseudocode.
#
# **If you want to be able to follow what's going on, I recommend reading the notebook side by side with your copy of the book.**
#
# This notebook was generated for TensorFlow 2.6.
# + [markdown] colab_type="text"
# ## Modern convnet architecture patterns
# + [markdown] colab_type="text"
# ### Modularity, hierarchy, and reuse
# + [markdown] colab_type="text"
# ### Residual connections
# + [markdown] colab_type="text"
# **Case where the target block changes the number of output filters**
# + colab_type="code"
from tensorflow import keras
from tensorflow.keras import layers
inputs = keras.Input(shape=(32, 32, 3))
x = layers.Conv2D(32, 3, activation="relu")(inputs)
residual = x
x = layers.Conv2D(64, 3, activation="relu", padding="same")(x)
residual = layers.Conv2D(64, 1)(residual)
x = layers.add([x, residual])
# + [markdown] colab_type="text"
# **Case where the target block includes a max pooling layer**
# + colab_type="code"
inputs = keras.Input(shape=(32, 32, 3))
x = layers.Conv2D(32, 3, activation="relu")(inputs)
residual = x
x = layers.Conv2D(64, 3, activation="relu", padding="same")(x)
x = layers.MaxPooling2D(2, padding="same")(x)
residual = layers.Conv2D(64, 1, strides=2)(residual)
x = layers.add([x, residual])
# + colab_type="code"
inputs = keras.Input(shape=(32, 32, 3))
x = layers.experimental.preprocessing.Rescaling(1./255)(inputs)
def residual_block(x, filters, pooling=False):
residual = x
x = layers.Conv2D(filters, 3, activation="relu", padding="same")(x)
x = layers.Conv2D(filters, 3, activation="relu", padding="same")(x)
if pooling:
x = layers.MaxPooling2D(2, padding="same")(x)
residual = layers.Conv2D(filters, 1, strides=2)(residual)
elif filters != residual.shape[-1]:
residual = layers.Conv2D(filters, 1)(residual)
x = layers.add([x, residual])
return x
x = residual_block(x, filters=32, pooling=True)
x = residual_block(x, filters=64, pooling=True)
x = residual_block(x, filters=128, pooling=False)
x = layers.GlobalAveragePooling2D()(x)
outputs = layers.Dense(1, activation="sigmoid")(x)
model = keras.Model(inputs=inputs, outputs=outputs)
model.summary()
# + [markdown] colab_type="text"
# ### Batch normalization
# + [markdown] colab_type="text"
# ### Depthwise separable convolutions
# + [markdown] colab_type="text"
# ### Putting it together: a mini Xception-like model
# + colab_type="code"
from google.colab import files
files.upload()
# + colab_type="code"
# !mkdir ~/.kaggle
# !cp kaggle.json ~/.kaggle/
# !chmod 600 ~/.kaggle/kaggle.json
# !kaggle competitions download -c dogs-vs-cats
# !unzip -qq train.zip
# + colab_type="code"
import os, shutil, pathlib
from tensorflow.keras.preprocessing import image_dataset_from_directory
original_dir = pathlib.Path("train")
new_base_dir = pathlib.Path("cats_vs_dogs_small")
def make_subset(subset_name, start_index, end_index):
for category in ("cat", "dog"):
dir = new_base_dir / subset_name / category
os.makedirs(dir)
fnames = [f"{category}.{i}.jpg" for i in range(start_index, end_index)]
for fname in fnames:
shutil.copyfile(src=original_dir / fname,
dst=dir / fname)
make_subset("train", start_index=0, end_index=1000)
make_subset("validation", start_index=1000, end_index=1500)
make_subset("test", start_index=1500, end_index=2500)
train_dataset = image_dataset_from_directory(
new_base_dir / "train",
image_size=(180, 180),
batch_size=32)
validation_dataset = image_dataset_from_directory(
new_base_dir / "validation",
image_size=(180, 180),
batch_size=32)
test_dataset = image_dataset_from_directory(
new_base_dir / "test",
image_size=(180, 180),
batch_size=32)
# + colab_type="code"
data_augmentation = keras.Sequential(
[
layers.experimental.preprocessing.RandomFlip("horizontal"),
layers.experimental.preprocessing.RandomRotation(0.1),
layers.experimental.preprocessing.RandomZoom(0.2),
]
)
# + colab_type="code"
inputs = keras.Input(shape=(180, 180, 3))
x = data_augmentation(inputs)
x = layers.experimental.preprocessing.Rescaling(1./255)(x)
x = layers.Conv2D(filters=32, kernel_size=5, use_bias=False)(x)
for size in [32, 64, 128, 256, 512]:
residual = x
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
x = layers.SeparableConv2D(size, 3, padding="same", use_bias=False)(x)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
x = layers.SeparableConv2D(size, 3, padding="same", use_bias=False)(x)
x = layers.MaxPooling2D(3, strides=2, padding="same")(x)
residual = layers.Conv2D(
size, 1, strides=2, padding="same", use_bias=False)(residual)
x = layers.add([x, residual])
x = layers.GlobalAveragePooling2D()(x)
x = layers.Dropout(0.5)(x)
outputs = layers.Dense(1, activation="sigmoid")(x)
model = keras.Model(inputs=inputs, outputs=outputs)
# + colab_type="code"
model.compile(loss="binary_crossentropy",
optimizer="rmsprop",
metrics=["accuracy"])
history = model.fit(
train_dataset,
epochs=100,
validation_data=validation_dataset)
| chapter09_part02_modern-convnet-architecture-patterns.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
import nltk
from pycorenlp import StanfordCoreNLP
import re
import codecs
import requests
with codecs.open('../data/raw/battle-cry-army-of-the-pharaohs.txt', 'rb', encoding='utf-8', errors='ignore') as f:
lyrics = re.sub('[^a-zA-Z0-9\n\[\]]', ' ', f.read()).lower()
nlp = StanfordCoreNLP('http://localhost:9000')
lines = [l for l in lyrics.split('\n') if (len(l) > 0 and l[0] != '[')]
tokens = []
for line in lines:
output = nlp.annotate(line, properties={
'annotators': 'tokenize, ssplit', \
'outputFormat': 'json' \
})
tokens.append([t['word'] for s in output['sentences'] for t in s['tokens']])
tokens
# +
arpabet = nltk.corpus.cmudict.dict()
base_url = 'http://www.speech.cs.cmu.edu/cgi-bin/tools/logios/lextool.pl'
file = {'wordfile': ('words.txt', "\n".join([str(t).upper() for line in tokens for t in line if t not in arpabet.keys()]))}
res = requests.post(base_url, files=file, allow_redirects=True)
dict_re = re.compile(r"(?<=DICT ).*?\.dict")
dict_url = dict_re.search(res.text).group(0)
res_dict = requests.get(dict_url)
# -
custom_dict = {line.split('\t')[0].lower(): line.split('\t')[1].split(' ') for line in res_dict.text.split('\n') if len(line) > 1}
# +
def get_phonemes(word):
try:
return arpabet[word][0]
except:
try:
return custom_dict[word]
except:
print(word)
return word
phonemes = [[get_phonemes(t) for t in line] for line in tokens]
# -
phonemes
import numpy as np
import pandas as pd
song_name = 'battle_cry'
pd.DataFrame({song_name: pd.Series([p for line in phonemes for word in line for p in word]).value_counts()})#.transpose()
# +
# change is good
# -
| notebooks/1.0-b-phonemes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/macscheffer/DS-Unit-1-Sprint-2-Data-Wrangling/blob/master/module1-scrape-and-process-data/LS_DS_121_Scrape_and_process_data.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="oR4Yeg3P07gu" colab_type="text"
# _Lambda School Data Science_
#
# # Scrape and process data
#
# Objectives
# - scrape and parse web pages
# - use list comprehensions
# - select rows and columns with pandas
#
# Links
# - [Automate the Boring Stuff with Python, Chapter 11](https://automatetheboringstuff.com/chapter11/)
# - Requests
# - Beautiful Soup
# - [Python List Comprehensions: Explained Visually](https://treyhunner.com/2015/12/python-list-comprehensions-now-in-color/)
# - [Pandas Cheat Sheet](https://github.com/pandas-dev/pandas/blob/master/doc/cheatsheet/Pandas_Cheat_Sheet.pdf)
# - Subset Observations (Rows)
# - Subset Variables (Columns)
# - Python Data Science Handbook
# - [Chapter 3.1](https://jakevdp.github.io/PythonDataScienceHandbook/03.01-introducing-pandas-objects.html), Introducing Pandas Objects
# - [Chapter 3.2](https://jakevdp.github.io/PythonDataScienceHandbook/03.02-data-indexing-and-selection.html), Data Indexing and Selection
#
# + [markdown] id="I_NRVchqgGvM" colab_type="text"
# ## Scrape the titles of PyCon 2018 talks
# + id="3elw_8Nc7Tpe" colab_type="code" colab={}
url = 'https://us.pycon.org/2018/schedule/talks/list/'
# + id="SFNsyjVsTU4b" colab_type="code" colab={}
# + [markdown] id="vqkNgAzYpeK7" colab_type="text"
# ## 5 ways to look at long titles
#
# Let's define a long title as greater than 80 characters
# + [markdown] id="N7tqeZh14Fws" colab_type="text"
# ### 1. For Loop
# + id="kKxs5tqDApuZ" colab_type="code" colab={}
# + [markdown] id="I21jcEnK4IN7" colab_type="text"
# ### 2. List Comprehension
# + id="qaXe9UldAs3H" colab_type="code" colab={}
# + [markdown] id="2kn8pxL-4yMG" colab_type="text"
# ### 3. Filter with named function
# + id="ywLqqFJNAvFm" colab_type="code" colab={}
# + [markdown] id="IPIT6oXz40Q3" colab_type="text"
# ### 4. Filter with anonymous function
# + id="giIcFYkiAwiR" colab_type="code" colab={}
# + [markdown] id="qj8Yod8_45z4" colab_type="text"
# ### 5. Pandas
#
# pandas documentation: [Working with Text Data](https://pandas.pydata.org/pandas-docs/stable/text.html)
# + id="yRwPEHNcAzc_" colab_type="code" colab={}
# + [markdown] id="8YaUZJvRp681" colab_type="text"
# ## Make new dataframe columns
#
# pandas documentation: [apply](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.apply.html)
# + id="DR_WZ-olA4-v" colab_type="code" colab={}
# + [markdown] id="Ua74pMrGrsZR" colab_type="text"
# ### title length
# + id="p-Euz7tgA8Fd" colab_type="code" colab={}
# + [markdown] id="OgsKArXPrz5n" colab_type="text"
# ### long title
# + id="b_WCRvvKA-IP" colab_type="code" colab={}
# + [markdown] id="TonCXYPesUsT" colab_type="text"
# ### first letter
# + id="fhO4aABpBBgA" colab_type="code" colab={}
# + [markdown] id="Etz1XeLKs6DL" colab_type="text"
# ### word count
#
# Using [`textstat`](https://github.com/shivam5992/textstat)
# + id="GVIkRWchs4zR" colab_type="code" colab={}
# !pip install textstat
# + id="mY_M_MuaBFrF" colab_type="code" colab={}
# + [markdown] id="UN_7FABhwDqc" colab_type="text"
# ## Rename column
#
# `title length` --> `title character count`
#
# pandas documentation: [rename](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.rename.html)
# + id="jvTif7sBBMpN" colab_type="code" colab={}
# + [markdown] id="ca2pDtytr5tR" colab_type="text"
# ## Analyze the dataframe
# + [markdown] id="AitNVDCFwWwc" colab_type="text"
# ### Describe
#
# pandas documentation: [describe](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.describe.html)
# + id="yPo9RdxYBQ64" colab_type="code" colab={}
# + [markdown] id="T0lc_o-xyjZU" colab_type="text"
# ### Sort values
#
# pandas documentation: [sort_values](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.sort_values.html)
# + [markdown] id="kxE2swJ9-cG_" colab_type="text"
# Five shortest titles, by character count
# + id="7t8DlpLhBVQa" colab_type="code" colab={}
# + [markdown] id="NOEH4Ef5-kvo" colab_type="text"
# Titles sorted reverse alphabetically
# + id="WkymeWDjBV8X" colab_type="code" colab={}
# + [markdown] id="e4wr42FB0GV-" colab_type="text"
# ### Get value counts
#
# pandas documentation: [value_counts](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.value_counts.html)
#
# + [markdown] id="D81LNGaI-6ya" colab_type="text"
# Frequency counts of first letters
# + id="AdTQYsRKBZio" colab_type="code" colab={}
# + [markdown] id="04NVokvTAwqK" colab_type="text"
# Percentage of talks with long titles
# + id="uS8qp4hrBat6" colab_type="code" colab={}
# + [markdown] id="mmYZL2QL0lgd" colab_type="text"
# ### Plot
#
# pandas documentation: [Visualization](https://pandas.pydata.org/pandas-docs/stable/visualization.html)
#
#
#
#
# + [markdown] id="c6gCotA9_B68" colab_type="text"
# Top 5 most frequent first letters
# + id="DUmcVcdXBdkw" colab_type="code" colab={}
# + [markdown] id="_Ngegk0bASty" colab_type="text"
# Histogram of title lengths, in characters
# + id="y5oLu2D4BeKw" colab_type="code" colab={}
# + [markdown] id="DiylH7LQw44u" colab_type="text"
# # Assignment
#
# **Scrape** the talk descriptions. Hint: `soup.select('.presentation-description')`
#
# **Make** new columns in the dataframe:
# - description
# - description character count
# - description word count
# - description grade level (use [this `textstat` function](https://github.com/shivam5992/textstat#the-flesch-kincaid-grade-level) to get the Flesh-Kincaid grade level)
#
# **Describe** all the dataframe's columns. What's the average description word count? The minimum? The maximum?
#
# **Answer** these questions:
# - Which descriptions could fit in a tweet?
# - What's the distribution of grade levels? Plot a histogram.
#
#
# + id="xd5PbGuB50fK" colab_type="code" colab={}
import requests
import bs4
url = 'https://us.pycon.org/2018/schedule/talks/list/'
result = requests.get(url)
soup = bs4.BeautifulSoup(result.text)
# + id="wCpKzYtV50tc" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 55} outputId="47a56896-df61-4345-80a2-e7ea29c5af6b"
soup.select('.presentation-description')[0].text.strip()
# + id="y2doTG3w50ya" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="bb3f554f-f0c1-4951-faff-34ef2c74651b"
descriptions = [tag.text.strip() for tag in soup.select('.presentation-description')]
titles = [tag.text.strip() for tag in soup.select('h2')]
len(descriptions), len(titles)
# + id="DW9SQBpD9CcB" colab_type="code" colab={}
import pandas as pd
pd.options.display.max_colwidth = 200
# + id="4n7wYato9Ch0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 203} outputId="2fdcb671-f836-451a-ac6b-8c6452b8e946"
df = pd.DataFrame({'description':descriptions})
df.head()
# + id="zuTuat_r9DFL" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 306} outputId="14f1b13d-a91d-49bf-d3c9-b627aeb1c144"
df['description char count'] = df.description.apply(len)
df.head()
# + id="RM4cz71V-lpk" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 70} outputId="37e158e8-afe3-4c24-be11-485d03f0dc3b"
# !pip install textstat
# + id="alC4RE96-zio" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 306} outputId="ca9ac683-5d0f-44ba-ca13-f43e31fa3f17"
import textstat
# use textstat to count words.
df['description word count'] = df.description.apply(textstat.lexicon_count)
df.head()
# + id="t7Z4ggJV_dXO" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 306} outputId="6c3ab609-4a01-4a4b-92d1-12395da025ba"
# readability by grade level using the Flesh-Kincaid grade level
# FK grade levels 0-18
# 0-6: Basic, 7-12: Average, 12-18: Skilled
df['description FK grade level'] = df.description.apply(textstat.flesch_kincaid_grade)
df.head()
# + id="mB7m895HAHPG" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 175} outputId="0faf9d60-4f3c-4d74-d5c3-7c0c79f365b3"
# looks like we have one value that is way too high. might want to categorize them.
df['description FK grade level'].describe()
# + id="_ik9STyGCRbs" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 306} outputId="65787bb5-0328-405f-ef60-03dccb0e2324"
import numpy as np
criteria = [((df['description FK grade level'] >= 0) & (df['description FK grade level'] < 6)),
((df['description FK grade level'] >= 6) & (df['description FK grade level'] < 12)),
((df['description FK grade level'] >= 12))]
values = ['Basic', 'Average', 'Skilled']
df['description FK category'] = np.select(criteria,values)
df.head()
# + id="yo5TsPh4Ck5K" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 362} outputId="e6cb3245-6814-4b0d-9c3f-bae67e86d423"
df['description FK category'].value_counts().plot.barh(title='Counts for each FK category');
# + id="HqFeStXIH9vw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="d1435eec-65c4-45c2-bfb3-1540820fb1cd"
df.describe()
# + id="mPdV5UW6IUu8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 55} outputId="244d0aa7-c9e6-43e9-a3dd-477c39e18e6e"
list(df['description'][df['description char count'] < 280])[0]
# + id="qWuOX9UXIsRI" colab_type="code" colab={}
df['tweetable description'] = df['description char count'] <= 280
# + id="9CtNafGxJLr0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 362} outputId="d29dc6b9-73c9-43ee-d874-f71e94ecd3d0"
df['description FK grade level'].plot.hist(title='distribution of FK grade levels');
# + id="SFB6DtXvS1sc" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 357} outputId="21b68b45-712c-4727-8a4f-d1640fae5f7a"
df['description FOG grade level'] = df.description.apply(textstat.gunning_fog)
df['description SMOG grade level'] = df.description.apply(textstat.smog_index)
df.head()
# + id="DqAOr7cXTULn" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 409} outputId="ac949013-8e53-44c2-e746-ec6baff9cb48"
df['mean grade level'] = (df['description FK grade level'] + df['description FOG grade level'] + df['description SMOG grade level']) / 3
df.head()
# + id="evTICBrcTXl_" colab_type="code" colab={}
df['description char per word'] = df['description char count'] / df['description word count']
# + id="4mzB0IFbT8Gh" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="04f0faf8-101b-48ba-b2f2-81a6798adfa0"
df['description char per word'].corr(df['mean grade level'])
# + id="hWCnORb9UBN7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 365} outputId="7eb4d7b4-c135-4eba-8234-cca7a181fab0"
df.pivot_table(values = 'description char per word', index='description FK category').plot.barh()
# + id="tL4oWv-mUMRS" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 409} outputId="fe01d9c3-e018-4ef8-d569-409a891f677b"
df.head()
# + id="5vIMDq8CUb32" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 312} outputId="8169cfae-7151-4618-ab73-ce59d1f7259e"
df.describe()
# + id="xvY0MiUbU6yz" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 398} outputId="185c2065-be81-4464-d91c-283a8b971f47"
df.corr()
# + id="Dzr9UlI1VJA7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="f1e8c77d-480b-4e98-af8c-e3cacf1f4fe0"
soup.select('h2')[0].text.strip()
# + id="w73t_jz1hCrl" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 148} outputId="7ff6bea8-58e3-4dc0-ecb2-e71f47ab35c1"
df.head(1)
# + id="igdSHTishGSN" colab_type="code" colab={}
df['title'] = [tag.text.strip() for tag in soup.select('h2')]
# + id="sEVgECX-hdy_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 183} outputId="a27a3f29-ef21-4394-dd84-758b4961595e"
df.head(1)
# + id="loYKhz4JhfC5" colab_type="code" colab={}
df = df.drop(labels='titles', axis='columns')
# + id="yEN__n0jhsbm" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 183} outputId="de5ad7f7-b6ef-4e10-8fe4-915419fd1e14"
df.head(1)
# + id="gXoM0MwqidGF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 217} outputId="27976a2c-2815-4e02-d406-26ae7d2b77a5"
df['title char count'] = df.title.apply(len)
df['first letter in title'] = df.title.str[0]
df['title word count'] = df.title.apply(textstat.lexicon_count)
df.head(1)
# + id="wHdAyV8kjFQq" colab_type="code" colab={}
df['first letter in title'] = df['first letter in title'].str.upper()
# + id="S9svCgNhjV3u" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="78a7dbd6-3eb0-40ec-e21f-74c58c3c8098"
df.shape
# + id="4ho-Vg67jj9v" colab_type="code" colab={}
df['title char per word'] = df['title char count'] / df['title word count']
# + id="W1CXvjQvju_v" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 105} outputId="e942981e-0c2c-47b2-b602-e0d76fab2185"
df['bigger words in title'] = (df['title char per word'] > df['description char per word'])
df['bigger words in title'].describe()
# + id="5H9wVcWEkIaU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="f75ffa59-84ad-4314-fdcd-80acccc584d9"
len(soup.select('b')[1::2][0])
# + id="-kv_Pyv0m9E5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 443} outputId="a33fa019-c5a7-40f5-9ddb-2dec4b4e6591"
df = df.drop(labels='speaker names', axis='columns')
df.head(1)
# + id="Hsz0TXm7kyD0" colab_type="code" colab={}
df['speaker names'] = [tag.text.strip() for tag in soup.select('b')[::2]]
df['time/place'] = [tag.text.strip() for tag in soup.select('b')[1::2]]
# + id="MChar1A9m6xC" colab_type="code" colab={}
import re
def split(expression):
expression = re.split('\n',expression)
cols = []
event_day = expression[0].strip()
event_time = expression[1].strip()
event_location = expression[3].strip()
cols.append(event_day)
cols.append(event_time)
cols.append(event_location)
return cols
# + id="JBmk43E-qVDi" colab_type="code" colab={}
times_places = list(df['time/place'].apply(split))
days = []
times = []
locations = []
for item in times_places:
days.append(item[0])
times.append(item[1])
locations.append(item[2])
# + id="-7hlGO34q2za" colab_type="code" colab={}
df['event day'] = days
df['event times'] = times
df['event locations'] = locations
df = df.drop(labels='time/place',axis=1)
# + id="JBHvYYu9uH0k" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 123} outputId="ef3a24ed-a72e-4a2a-9a48-6801f9c8e7ec"
df['event locations'].value_counts()
# + id="IZyhs6odw5zA" colab_type="code" colab={}
| module1-scrape-and-process-data/LS_DS_121_Scrape_and_process_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introduction
# Ben previously noticed that there were some metabolites which had now been labelled with a double M_M_ infront of the metabolite ID. Cobra then only removes the first M_ (metabolite indicator) and leaves the second. This should be removed so all metabolites are present in the same format.
#
# The same goes for some reactions. This will be invesigated and fixed here.
#
#
import cobra
import pandas as pd
import cameo
model = cobra.io.read_sbml_model("../model/p-thermo.xml")
# ### Fix M_M_ metabolites
#remove double prefix
for met in model.metabolites:
prefix = 'M_'
if met.id[:2] in prefix:
met.id = met.id[2:]
else:
continue
# Now all metabolites with a double M_ prefix should have been fixed.
#save&commit
cobra.io.write_sbml_model(model,'../model/p-thermo.xml')
# ### Fix R_R_ reactions
#remove double prefix
for rct in model.reactions:
prefix = 'R_'
if rct.id[:2] in prefix:
rct.id = rct.id[2:]
else:
continue
#save&commit
cobra.io.write_sbml_model(model,'../model/p-thermo.xml')
# After running the above code, we got rid of the reactions with the R_R_ double prefix. However it shows that there are still some reactions that have strange names associated to them: rXXXX. These I will manually fix here.
model = cobra.io.read_sbml_model('../model/p-thermo.xml')
model.reactions.r0777.notes
model.reactions.r0068.id = 'AADCOAT'
model.reactions.r0082.id = 'OAACOLY'
model.reactions.r0097.id = 'ATPACAT'
model.reactions.r0120.id = 'GTPHYDRO'
model.reactions.r0163.id = 'AKGDEHY'
model.reactions.r0422.id = 'ISOCITOR'
model.reactions.r0708.id = 'AHETDYTTPHY'
model.reactions.r0775.id = 'FAPNTPAH'
model.reactions.r0777.id = 'DAPNTPMUT'
#save&commit
cobra.io.write_sbml_model(model,'../model/p-thermo.xml')
| notebooks/8. Fixing M_M_ metabolites & R_R_ reactions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="-pNWScUokvZ8"
# # Challenge: Países, ciudades y monedas
# + [markdown] id="04ukMAXTkwVg"
# Para el reto de hoy se trabajará con datos de países que contiene información sobre las ciudades más pobladas del mundo, así como datos económicos a nivel de país, datos de población y datos geográficos. 🤓
#
# En este ejercicio se deberá de explorar una forma de obtener datos de las tablas de países y economías para examinar la tasa de inflación tanto para 2010 como para 2015. 😉
# + [markdown] id="VEob4ygnwQrB"
# ## Preparación de los datos
#
# Antes de comenzar, repetiremos un proceso que vimos en la leccion anterior. Sigue estos pasos para crear una nueva base de datos e importar un CSV en una tabla nueva.
# + [markdown] id="0rKp_z6t701h"
# 1. Conéctate a MySQL Server: abre MySQL Workbench y haz clic en la conexión a tu servidor local. Ingresa tu usuario y contraseña.
#
# ¿Aún no tienes hecha tu conexión? Averigua cómo hacerla [aquí](https://dev.mysql.com/doc/workbench/en/wb-mysql-connections-new.html)
# + [markdown] id="aDweLGvx8tnk"
# 2. Una vez conectado, acceda al menú de schemas del lado izquierdo:
#
# 
# + [markdown] id="0aIjj9muAAjq"
# 3. Una vez en la sección de Schemas, has clic en un espacio en blanco y selecciona 'Create Schema...'
# 
#
# 4. Del lado derecho verás algo como lo siguiente. Allí, deberás ingresar el nombre **'master'** como el nombre de tu Schema.
# 
#
# 5. Sigue los pasos de la ventana que aparecerá hasta que veas la siguiente pantalla:
# 
# + [markdown] id="d6MvYaqIg4r0"
# 6. Verás un nuevo ícono en Schemas con el nombre de tu schema, haz clic derecho sobre él y selecciona "Table Data Import Wizard"
# 
#
# 7. Ha llegado la hora de descargar los siguientes archivos:
# * [cities](https://drive.google.com/file/d/1tSZBx32V6P6qfyGb9DOfmEmSaTtCbMtm/view?usp=sharing)
#
# * [countries](https://drive.google.com/file/d/1Js2H_Djlw8JaVOIp1vcWDigHN6gIPKR4/view?usp=sharing)
#
# * [languages](https://drive.google.com/file/d/19EC_01FqSgVd-8tlScrk96rhf3vjhGnm/view?usp=sharing)
#
# * [economies](https://drive.google.com/file/d/1lwlOI09uD3Citq1_OjsDN7jM8CiJvpKc/view?usp=sharing)
#
# * [population](https://drive.google.com/file/d/1gb9W6cgh69f_wWtlI617ZyANu1SdfJbH/view?usp=sharing)
#
#
# Una vez que lo hayas hecho, haz clic en "Browse" y selecciónalo.
# 
#
# 8. En esta pantalla solo haz clic en "Next"
# 
#
# 9. En esta pantalla, verifica que la pequeña muestra de la tabla se muestre correctamente y haz clic en "Next"
# 
#
# 10. Finalmente, clic en "Next" a esta pantalla y espera a que se importen los datos. Al terminar haz clic en "Finish".
# 
#
# 11. Ahora, en el editor de scripts del lado derecho, escribe:
#
# ```
# use master;
# show tables;
# ```
#
# Deberías ver el siguiente resultado:
#
# 
# + [markdown] id="nE4naQiA3wSW"
# ## Ahora sí, hagamos algunos joins sobre esa base de datos 🤠
#
# NOTA: En algunos de los siguientes ejercicios te vamos a pedir que utilices alias para columnas con el mismo nombre en diferentes tablas, si no está familiarizado con este tema, te dejamos este [link](https://www.w3schools.com/sql/sql_alias.asp).
# + [markdown] id="UyJcYPiikwj5"
# 1. Utiliza un INNER JOIN para cruzar la tablas `cities` y `countries`.
#
# Selecciona el nombre de la ciudad (con alias 'city'), el código de país, el nombre del país (con alias 'country') y la columna city_prop_population (población propia de la ciudad). Finalmente, ordena en orden descendente por la columna que ambas tablas tienen en común.
#
# ```
# # Tu código aquí
# ```
# Resultado esperado:
#
# 
# + [markdown] id="IFr6CSee-KZO"
# **2.** Utiliza un LEFT JOIN para cruzar la tablas `countries` y `languages`.
#
# Selecciona el país (con alias 'country'), el nombre local del país (local_name), el nombre del idioma y, finalmente, el porcentaje del idioma hablado en cada país
#
# ```
# # Tu código aquí
# ```
#
# Resultado esperado:
#
# 
#
# + [markdown] id="6cCfUJU--K28"
# 3. Utiliza nuevamente un LEFT JOIN (o RIGHT JOIN si quiere alocarte un poco 🤓) para cruzar las tablas `countries` y `economies`.
#
# Selecciona el nombre del país, región y GDP per cápita (de `economies`). Finalmente, filtra las filas para obtener solo los resultados del año 2010.
#
# ```
# # Tu código aquí
# ```
#
# Resultado esperado:
#
# 
# + [markdown] id="V-Z2JuYt-Lbt"
# **4.** Veamos si aprendiste la diferencia entre LEFT JOIN y RIGHT JOIN. Convierte el siguiente ejemplo para utilizar RIGHT JOINs pero obteniendo el mismo resultado.
#
# ```
# /*
# SELECT cities.name AS city, urbanarea_pop, countries.name AS country,
# indep_year, languages.name AS language, percent
# FROM cities
# LEFT JOIN countries
# ON cities.country_code = countries.code
# LEFT JOIN languages
# ON countries.code = languages.code
# ORDER BY city, language;
# */
#
# # Tu código aquí
# ```
#
# Resultado esperado:
#
# 
# + [markdown] id="msDEW7rVzfas"
# **5.** Has una subconsulta en WHERE donde calcules el promedio de la expectativa de vida en la tabla `populations`, filtrando solo para el año 2015.
#
#
# ```
# # Tu código aquí
# ```
#
# Resultado esperado:
#
# 
# + id="9dvuy_kXyUUc"
| 0. Herramientas para la Ciencia de Datos/7. Subconsultas/Challenge.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Comparing the Classification Accuracies
#
# So, you should now have a set of reference points - either the ones provided (used here) or you could have created your own set using the ClassAccuracy plugin. For this analysis we need to intersect the reference points with each of the classifications which have been produced and then collate the statistics for those classifications.
#
# ## Running Notebook
#
# The notebook has been run and saved with the outputs so you can see what the outputs should be and so the notebook and be browsed online without having to run the notebook for it to make sense.
#
# If you are running the notebook for yourself it is recommended that you clear the existing outputs which can be done by running one of the following options depending on what system you are using:
#
# **Jupyter-lab**:
#
# > \> _Edit_ \> _'Clear All Outputs'_
#
# **Jupyter-notebook**:
#
# > \> _Cell_ \> _'All Outputs'_ \> _Clear_
# # 1. Import Modules
# +
import os
# Import the pandas module
import pandas
# Import the geopandas module
import geopandas
# Import the rsgislib classification module
import rsgislib.classification
# Import rsgislib vectorutils module
import rsgislib.vectorutils
# Import the rsgislib tools utils module
import rsgislib.tools.utils
# Import the function to calculate the accuracy stats
from rsgislib.classification.classaccuracymetrics import calc_acc_metrics_vecsamples
# -
# # 2. Create Output Directories
# +
out_pts_dir = "acc_pts"
if not os.path.exists(out_pts_dir):
os.mkdir(out_pts_dir)
out_stats_dir = "acc_stats"
if not os.path.exists(out_stats_dir):
os.mkdir(out_stats_dir)
# -
# # 3. Define Input Files
# +
refl_cls_dir = "cls_refl_results"
refl_cls_ml_img = os.path.join(refl_cls_dir, "cls_ml_refl.kea")
refl_cls_svm_img = os.path.join(refl_cls_dir, "cls_svm_refl.kea")
refl_cls_rf_img = os.path.join(refl_cls_dir, "cls_rf_refl.kea")
refl_cls_et_img = os.path.join(refl_cls_dir, "cls_et_refl.kea")
refl_cls_gbt_img = os.path.join(refl_cls_dir, "cls_gbt_refl.kea")
refl_cls_nn_img = os.path.join(refl_cls_dir, "cls_nn_refl.kea")
linnorm_cls_dir = "cls_lin_norm_results"
linnorm_cls_ml_img = os.path.join(linnorm_cls_dir, "cls_ml_linnorm.kea")
linnorm_cls_svm_img = os.path.join(linnorm_cls_dir, "cls_svm_linnorm.kea")
linnorm_cls_rf_img = os.path.join(linnorm_cls_dir, "cls_rf_linnorm.kea")
linnorm_cls_et_img = os.path.join(linnorm_cls_dir, "cls_et_linnorm.kea")
linnorm_cls_gbt_img = os.path.join(linnorm_cls_dir, "cls_gbt_linnorm.kea")
linnorm_cls_nn_img = os.path.join(linnorm_cls_dir, "cls_nn_linnorm.kea")
sdnorm_cls_dir = "cls_sdnorm_results"
sdnorm_cls_ml_img = os.path.join(sdnorm_cls_dir, "cls_ml_sdnorm.kea")
sdnorm_cls_svm_img = os.path.join(sdnorm_cls_dir, "cls_svm_sdnorm.kea")
sdnorm_cls_rf_img = os.path.join(sdnorm_cls_dir, "cls_rf_sdnorm.kea")
sdnorm_cls_et_img = os.path.join(sdnorm_cls_dir, "cls_et_sdnorm.kea")
sdnorm_cls_gbt_img = os.path.join(sdnorm_cls_dir, "cls_gbt_sdnorm.kea")
sdnorm_cls_nn_img = os.path.join(sdnorm_cls_dir, "cls_nn_sdnorm.kea")
vec_file = "../data/cls_data/cls_acc_assessment_pts_ref.geojson"
vec_lyr = "cls_acc_assessment_pts_ref"
# -
# # 4. Copy Existing Points File
#
# To avoid overwriting and editting the input file provided we will first copy it into our output directory.
# +
vec_refpts_file = os.path.join(out_pts_dir, "cls_acc_assessment_pts_compare_ref.geojson")
vec_refpts_lyr = "cls_acc_assessment_pts_compare_ref"
rsgislib.vectorutils.vector_translate(vec_file, vec_lyr, vec_refpts_file, vec_refpts_lyr, out_format="GeoJSON", del_exist_vec=True)
# -
# # 5. Create `dict` look up table (LUT)
#
# To reduce the amount of code we need to write, reducing duplication and improving code relability is often better to use a loop with a look up table (LUT) for the input and output parameters. In this case we will use a `dict` as provide that LUT.
#
# +
cls_info = dict()
cls_info["ml_rl_cls"] = refl_cls_ml_img
cls_info["svm_rl_cls"] = refl_cls_svm_img
cls_info["rf_rl_cls"] = refl_cls_rf_img
cls_info["et_rl_cls"] = refl_cls_et_img
cls_info["gbt_rl_cls"] = refl_cls_gbt_img
cls_info["nn_rl_cls"] = refl_cls_nn_img
cls_info["ml_nln_cls"] = linnorm_cls_ml_img
cls_info["svm_nln_cls"] = linnorm_cls_svm_img
cls_info["rf_nln_cls"] = linnorm_cls_rf_img
cls_info["et_nln_cls"] = linnorm_cls_et_img
cls_info["gbt_nln_cls"] = linnorm_cls_gbt_img
cls_info["nn_nln_cls"] = linnorm_cls_nn_img
cls_info["ml_nsd_cls"] = sdnorm_cls_ml_img
cls_info["svm_nsd_cls"] = sdnorm_cls_svm_img
cls_info["rf_nsd_cls"] = sdnorm_cls_rf_img
cls_info["et_nsd_cls"] = sdnorm_cls_et_img
cls_info["gbt_nsd_cls"] = sdnorm_cls_gbt_img
cls_info["nn_nsd_cls"] = sdnorm_cls_nn_img
# -
# # 6. Populate Accuracy Reference Points
for cls_col in cls_info:
print(cls_col)
rsgislib.classification.pop_class_info_accuracy_pts(
input_img=cls_info[cls_col],
vec_file=vec_refpts_file,
vec_lyr=vec_refpts_lyr,
rat_class_col="class_names",
vec_class_col=cls_col,
vec_ref_col=None,
vec_process_col=None,
)
cls_cols = list(cls_info.keys())
# # 7. Filter Valid Points
#
# Some classifiers can produce no data regions and if those intersect with reference points then an error will occur when calculating the accuracy statistics so we need to remove those which we will do using geopandas. For points where there is no class (i.e., no data) in the input classification then the value `"NA"` is outputted into the attribute table and it is rows with an `"NA"` value that we want to remove.
#
# +
vec_refpts_vld_file = os.path.join(out_pts_dir, "cls_acc_assessment_pts_compare_ref_vld.geojson")
vec_refpts_vld_lyr = "cls_acc_assessment_pts_compare_ref_vld"
points_gdf = geopandas.read_file(vec_refpts_file)
for cls_col in cls_cols:
print(cls_col)
points_gdf = points_gdf.drop(points_gdf[points_gdf[cls_col] == "NA"].index)
points_gdf.to_file(vec_refpts_vld_file, driver="GeoJSON")
points_gdf
# -
# # 8. Calculate Classification Accuracy Stats
#
# An LUT of the classification accuracy statistics JSON files will also be created to allow post processing and summarising of the classification accuracy statistics.
#
cls_acc_stats_lut = dict()
for cls_col in cls_info:
print(cls_col)
out_json_file = os.path.join(out_stats_dir, f"{cls_col}_acc_info.json")
out_csv_file = os.path.join(out_stats_dir, f"{cls_col}_acc_info.csv")
calc_acc_metrics_vecsamples(
vec_file=vec_refpts_vld_file,
vec_lyr=vec_refpts_vld_lyr,
ref_col="ref_pts",
cls_col=cls_col,
cls_img=cls_info[cls_col],
img_cls_name_col="class_names",
img_hist_col="Histogram",
out_json_file=out_json_file,
out_csv_file=out_csv_file,
)
cls_acc_stats_lut[cls_col] = out_json_file
# # 9. Summarise Classification Statistics
# +
# A convinent way of creating a pandas dataframe (i.e., table of data)
# is through a dict where the dict keys provide the column names.
cls_acc_stats = dict()
cls_acc_stats["Classifier"] = list()
cls_acc_stats["Proportion Correct"] = list()
cls_acc_stats["Allocation Disagreement"] = list()
cls_acc_stats["Quantity Disagreement"] = list()
cls_acc_stats["Overall Accuracy"] = list()
cls_acc_stats["Kappa"] = list()
cls_acc_stats["macro f1-score"] = list()
cls_acc_stats["weighted area f1-score"] = list()
for cls_col in cls_acc_stats_lut:
print(cls_col)
cls_acc_stats_dict = rsgislib.tools.utils.read_json_to_dict(cls_acc_stats_lut[cls_col])
cls_acc_stats["Classifier"].append(cls_col)
cls_acc_stats["Proportion Correct"].append(cls_acc_stats_dict["quantity_metrics"]["Proportion Correct (C)"])
cls_acc_stats["Allocation Disagreement"].append(cls_acc_stats_dict["quantity_metrics"]["Allocation Disagreement (A)"])
cls_acc_stats["Quantity Disagreement"].append(cls_acc_stats_dict["quantity_metrics"]["Quantity Disagreement (Q)"])
cls_acc_stats["Overall Accuracy"].append(cls_acc_stats_dict["accuracy"])
cls_acc_stats["Kappa"].append(cls_acc_stats_dict["cohen_kappa"])
cls_acc_stats["macro f1-score"].append(cls_acc_stats_dict["macro avg"]["f1-score"])
cls_acc_stats["weighted area f1-score"].append(cls_acc_stats_dict["weighted area avg"]["f1-score"])
cls_acc_stats_df = pandas.DataFrame.from_dict(cls_acc_stats)
cls_acc_stats_df.set_index("Classifier")
# -
# # 10. Sort Summarised Results
cls_acc_stats_sort_df = cls_acc_stats_df.sort_values(by=['Proportion Correct'], ascending=False).set_index("Classifier")
cls_acc_stats_sort_df
cls_acc_stats_sort_df = cls_acc_stats_df.sort_values(by=['macro f1-score'], ascending=False).set_index("Classifier")
cls_acc_stats_sort_df
# # 11. Conclusions
#
# So, what can we interpret from the result above?
#
# 1. The overall accuracies are must lower than the test/train scores we looked earlier - due to poor sample data as discussed.
# 2. The order of the classifiers in terms of accuracy is very different to those using the test/train scores.
# 3. The order of the classifiers is very different when using the "Proportion Correct" (areas normalised) verses the "macro f1-score" (not area normalised)
#
| 05_further_image_classification/.ipynb_checkpoints/10_compare_cls_accuracies-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# Can return single data type of data
dbutils.notebook.exit("hello world")
| ch4 - Manage Databricks with Databricks Utilities/Notebook Workflows - Return Single Result.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# %pylab inline
from scipy import integrate
def func(z, omega_m=0.3, omega_l=0.7, omega_k=0.0):
return sqrt(omega_m * (z+1)**3 + omega_k * (z+1)**2 + omega_l)
n_points = 1000
z_range = linspace(0,6.0, n_points)
hubble_0 = 70.0
hubble = hubble_0 * func(z_range)
fig = plt.figure(figsize=(10, 8.0))
plot(z_range, hubble)
xlabel("redshift")
ylabel("Hubble (km/s/Mpc)")
# +
fig = plt.figure(figsize=(10, 8.0))
omega_m = array([0.3,0.4,0.5,1.0])
omega_l = 1.0 - omega_m
omega_k = 1.0 - (omega_m + omega_l)
for o_m, o_l, o_k in zip(omega_m, omega_l, omega_k):
hubble = hubble_0 * func(z_range, omega_m = o_m, omega_l=o_l, omega_k=o_k)
plot(z_range, hubble, label=" o_m={} o_l={}".format(o_m,o_l))
xlabel("redshift")
ylabel("Hubble (km/s/Mpc)")
legend(loc=2)
# -
| src/hubble.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.9.7 64-bit (''env-01'': conda)'
# name: python3
# ---
# + [markdown] id="SJEgUhyh-k4-"
# # Arize Tutorial: Logging Predictions First, Then Logging Delayed Actuals
#
# Let's get started on using Arize! ✨
#
# Arize helps you visualize your model performance, understand drift & data quality issues, and share insights learned from your models.
#
# In this tutorial, we will using our Score Categorical model for predicting if someone has breast cancer or not to showcase one of the many ways of using the `arize.pandas.log` to log (i.e. send) data from a Pandas dataframe to the Arize platform.
#
# ### Why Use Multiple `log` Calls 🤔
# Sometimes, we want to `log` predictions during production and store our `prediction_ids` right away for model tracking, but we don't have ground truth labels avaliable until much later. Othertimes, they become avaliable at the same time. Depending on your situation, you may need to use `log` differently.
#
# **In this notebook, we will show how to `log` using `prediction_ids` to log only your predictions, then follow up with delayed logging of actuals as they become avaliable 🚀**
#
# For more of our usage case tutorials, visit our other [example tutorials](https://arize.gitbook.io/arize/examples).
#
# In general, if any part if your data (including `features`) become avaliable later and you can't log them right away, Arize provides the functionality of matching them through using `prediction_ids`, which is a required input for all `log` calls.
#
# ### Running This Notebook
# 1. Save a copy in Google Drive for yourself.
# 2. Step through each section below, pressing play on the code blocks to run the cells.
# 3. In Step 2, use your own Org and API key from your Arize account.
#
# + [markdown] id="aUUdm-QfF8xG"
# ## Step 1: Load Data and Build Model
# + id="OWSc0hFn-Y4W"
import numpy as np
import pandas as pd
from sklearn import datasets
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
###############################################################################
# 1 Load data and split data
data = datasets.load_breast_cancer()
X, y = datasets.load_breast_cancer(return_X_y=True)
# NOTE: We need to set y.astype(str) since BINARY expected non-integer.
X, y = X.astype(np.float32), y.astype(str)
X, y = pd.DataFrame(X, columns=data["feature_names"]), pd.Series(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, random_state=42)
###############################################################################
# 2 Fit a simple logistic regression model
clf = LogisticRegression(max_iter=3000, verbose=False).fit(X_train, y_train)
# 3 Use the model to generate predictions
def predict(model, X):
proba = model.predict_proba(X)
pred = pd.Series((str(np.argmax(p)) for p in proba), index=X.index)
score = pd.Series((p[1] for p in proba), index=X.index)
return pred, score
y_train_pred, y_train_pred_score = predict(clf, X_train)
y_val_pred, y_val_pred_score = predict(clf, X_val)
y_test_pred, y_test_pred_score = predict(clf, X_test)
print("Step 1 ✅: Load Data & Build Model Done!")
# + [markdown] id="PcVdPGFkGF2t"
# ## Step 2: Import and Setup Arize Client
# First, copy the Arize `API_KEY` and `ORG_KEY` from your admin page linked below!
#
#
# [](https://app.arize.com/admin)
#
# <img src="https://storage.googleapis.com/arize-assets/fixtures/copy-keys.jpeg" width="600">
# + id="btoJ-OY5DW5K"
# !pip install -q arize
from arize.pandas.logger import Client, Schema
from arize.utils.types import ModelTypes, Environments
ORGANIZATION_KEY = "ORGANIZATION_KEY"
API_KEY = "API_KEY"
arize_client = Client(organization_key=ORGANIZATION_KEY, api_key=API_KEY)
model_id = "logging_tutorial_pred_delayed_actuals"
model_version = "1.0"
model_type = ModelTypes.SCORE_CATEGORICAL
if ORGANIZATION_KEY == "ORGANIZATION_KEY" or API_KEY == "API_KEY":
raise ValueError("❌ NEED TO CHANGE ORGANIZATION AND/OR API_KEY")
else:
print("Step 2 ✅: Import and Setup Arize Client Done! Now we can start using Arize!")
# + [markdown] id="ZtuHsrFvg6vf"
# # Logging Tutorial
# We'll use the following helper functions to generate prediction IDs and timestamps to simulate a production environment.
# + id="qrasyM6llqW2"
import uuid
from datetime import datetime, timedelta
# Prediction ID is required for all datasets
def generate_prediction_ids(X):
return pd.Series((str(uuid.uuid4()) for _ in range(len(X))), index=X.index)
# OPTIONAL: We can directly specify when inferences were made
def simulate_production_timestamps(X, days=30):
t = datetime.now()
current_ts, earlier_ts = t.timestamp(), (t - timedelta(days=days)).timestamp()
return pd.Series(np.linspace(earlier_ts, current_ts, num=len(X)), index=X.index)
# + [markdown] id="TDjRIV2ijdbs"
# ## Step 3: Logging Predictions
# We can log predictions to Arize first, and match various other values such as actuals, explainability (i.e SHAP), or even features later.
#
# In this example, we will use `arize.pandas.log()` to only log the `prediction_labels` and `features` directly assuming you had it avaliable. This is to simulate predictions making in production as data become avaliable.
#
# You can see our `arize.pandas.log()` documentations by clicking the button below.
#
# [](https://docs.arize.com/arize/sdks-and-integrations/python-sdk/arize.pandas)
# + id="_GId3HJQsohr"
# For this example we need to first assemble our data into a pandas DataFrame
production_dataset = X_test.join(
pd.DataFrame(
{
"prediction_id": generate_prediction_ids(X_test),
"prediction_ts": simulate_production_timestamps(X_test),
"prediction_label": y_test_pred,
"prediction_score": y_test_pred_score,
}
)
)
# + [markdown] id="cSV1vBHtsx2V"
# Three easy steps to log a `pandas.DataFrame`. See [docs](https://docs.arize.com/arize/api-reference/python-sdk/arize.pandas) for more details.
#
# 1. Define `Schema` to designate column names
# 2. Call `arize.pandas.log()`
# 3. Check `response.status_code`
# + id="XgpD4pBgjitA"
# Define a Schema() object for Arize to pick up data from the correct columns for logging
production_schema = Schema(
prediction_id_column_name="prediction_id", # REQUIRED
timestamp_column_name="prediction_ts",
prediction_label_column_name="prediction_label",
prediction_score_column_name="prediction_score",
feature_column_names=data["feature_names"],
)
# arize_client.log returns a Response object from Python's requests module
response = arize_client.log(
dataframe=production_dataset,
schema=production_schema,
model_id=model_id,
model_version=model_version,
model_type=model_type,
environment=Environments.PRODUCTION,
)
# If successful, the server will return a status_code of 200
if response.status_code != 200:
print(
f"❌ logging failed with response code {response.status_code}, {response.text}"
)
else:
print(
f"Step 3 ✅: You have successfully logged {len(production_dataset)} data points to Arize!"
)
# + [markdown] id="FI53QL7qpANn"
# ## Step 4: Matching Actuals
# Sometimes, actuals will become avaliable later after we already logged the predictions. If `log` calls are made separately, the shape, length, and order of the `prediction_labels` and `actual_labels` do not need to match.
#
# **IMPORTANT:** To match an actual value with a prediction, both MUST be logged with the same `prediction_id`
# + id="BNDX1mvawxwn"
# Here we create a actual values DataFrame with matching prediction_ids
actual_dataset = production_dataset[["prediction_id"]].join(
pd.DataFrame({"actual_label": y_test})
)
# + [markdown] id="2UKQGgBtwy1u"
# Three easy steps to log a `pandas.DataFrame`. See [docs](https://docs.arize.com/arize/api-reference/python-sdk/arize.pandas) for more details.
#
# 1. Define `Schema` to designate column names
# 2. Call `arize.pandas.log()`
# 3. Check `response.status_code`
# + id="MThKC6qXpPX8"
# Define a Schema() object for Arize to pick up data from the correct columns for logging
actual_schema = Schema(
prediction_id_column_name="prediction_id", # REQUIRED
actual_label_column_name="actual_label",
feature_column_names=[],
)
# arize_client.log returns a Response object from Python's requests module
response = arize_client.log(
dataframe=actual_dataset,
schema=actual_schema,
model_id=model_id,
model_version=model_version,
model_type=model_type,
environment=Environments.PRODUCTION,
)
# If successful, the server will return a status_code of 200
if response.status_code != 200:
print(
f"❌ logging failed with response code {response.status_code}, {response.text}"
)
else:
print(
f"Step 4 ✅: You have successfully logged {len(actual_dataset)} data points to Arize!"
)
# + [markdown] id="ta6g2K1PMf8M"
# ### Overview
# Arize is an end-to-end ML observability and model monitoring platform. The platform is designed to help ML engineers and data science practitioners surface and fix issues with ML models in production faster with:
# - Automated ML monitoring and model monitoring
# - Workflows to troubleshoot model performance
# - Real-time visualizations for model performance monitoring, data quality monitoring, and drift monitoring
# - Model prediction cohort analysis
# - Pre-deployment model validation
# - Integrated model explainability
#
# ### Website
# Visit Us At: https://arize.com/model-monitoring/
#
# ### Additional Resources
# - [What is ML observability?](https://arize.com/what-is-ml-observability/)
# - [Playbook to model monitoring in production](https://arize.com/the-playbook-to-monitor-your-models-performance-in-production/)
# - [Using statistical distance metrics for ML monitoring and observability](https://arize.com/using-statistical-distance-metrics-for-machine-learning-observability/)
# - [ML infrastructure tools for data preparation](https://arize.com/ml-infrastructure-tools-for-data-preparation/)
# - [ML infrastructure tools for model building](https://arize.com/ml-infrastructure-tools-for-model-building/)
# - [ML infrastructure tools for production](https://arize.com/ml-infrastructure-tools-for-production-part-1/)
# - [ML infrastructure tools for model deployment and model serving](https://arize.com/ml-infrastructure-tools-for-production-part-2-model-deployment-and-serving/)
# - [ML infrastructure tools for ML monitoring and observability](https://arize.com/ml-infrastructure-tools-ml-observability/)
#
# Visit the [Arize Blog](https://arize.com/blog) and [Resource Center](https://arize.com/resource-hub/) for more resources on ML observability and model monitoring.
#
| arize/examples/tutorials/Arize_Tutorials/Log_Examples/Arize_Tutorial_Delayed_Actuals.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Python Pandas Time Series Data
# [Documentation](https://pandas.pydata.org/docs/user_guide/timeseries.html)
import numpy as np
import pandas as pd
import datetime
import random
# ### Data Formats Supported
# Pandas datetime64 can interpret strings, Python datetime, and Numpy datetime64 objects.
# Also note, a list of pd.datetime64 objects are automatically converted to a DatetimeIndex.
a1 = pd.to_datetime([
'6/1/2020',
'6-2-2020',
datetime.datetime(2020, 6, 3),
np.datetime64('2020-06-04'),
np.datetime64('2020-06-05')])
a1
# Pass in a format argument for custom formatted dates (case matters).
a2 = pd.to_datetime(['2020/14/06', '2020/15/06'], format='%Y/%d/%m')
a2
# Hours and Minutes too? No problem.
a3 = pd.to_datetime(
['2020/6/8 14.05', '2020/6/9 06.45'], format='%Y/%d/%m %H.%M')
a3
# ### Creating a datetime sequence with fixed intervals
# freq parameters:
# D=days, W=weeks, M=months, B=business days, BW=bus weeks, BM=bus months
b1 = [random.random() for i in range(30)]
b2 = pd.date_range('2020-06-01', periods=30, freq='1d')
print(b2)
df = pd.DataFrame({'M':b1}, index=b2)
#df.loc['2020-06-18':]
df[df['M'] > 0.8]
b3 = np.random.rand(52)
b4 = pd.date_range('2020-06-01', periods=52, freq='W')
df = pd.DataFrame(b3, index=b4)
df['2020-07-10':'2020-07-28']
# Alternative to periods, you can give start and stop dates.
b3 = pd.date_range('2020-06-30', '2020-12-31', freq='M')
b3
# ### Dates Index to/from CSV file
# Create DataFrame with Dates as Index, Write it to a CSV file, then Read in the CSV data and put the dates as Index
# +
d1 = np.round(6 + 4 * np.random.randn(7), decimals=2)
d2 = np.random.randint(12, 30, size=7)
d3 = pd.Series(pd.date_range('2020-05-29', periods=7, freq='1d'))
df = pd.DataFrame({'alpha':d1, 'beta':d2}, index=d3)
df.to_csv('file01.csv')
df
# -
df = pd.read_csv('file01.csv', index_col=0)
print(type(df.index[2]))
df.index = pd.to_datetime(df.index, format='%Y/%m/%d')
print(type(df.index[2]))
df[:'2020/05/31']
# ### Constructing Dates from Multiple Columns
# You have Month, Day and Year in separate fields, and need to combine them into a single Datetime field.
yyyy = [random.randint(1995,2020) for i in range(100)]
mm = [random.randint(1,12) for i in range(100)]
dd = [random.randint(1,28) for i in range(100)]
data = [random.random() for i in range(100)]
print(yyyy[5], mm[5], dd[5], data[5])
df1 = pd.DataFrame({'year': yyyy,'month': mm, 'day': dd})
df1 = pd.to_datetime(df1)
df2 = pd.Series(data)
df = pd.concat([df1, df2], axis=1)
df[:5]
# ### Pivot (Transpose) Rows & Columns
# You normally want dates as the row index, not the column headers.
# Flip the rows and columns using T.
df = pd.read_csv('pivot.csv')
df = df.T
df.head()
# ### Date Arithmetic
appointment = pd.Timestamp('2020-06-04')
appointment.day_name()
# Uh oh! my appointment is delayed 2 days.
# Here are 3 different ways to add 2 days to the date.
appointment = pd.Timestamp('2020-06-04')
appointment += pd.Timedelta('2 days')
appointment.day_name()
appointment = pd.Timestamp('2020-06-04')
appointment += pd.Timedelta(days=2)
appointment.day_name()
# Date offsets: Day, Hour, Minute, Second, Milli, Micro, Nano
appointment = pd.Timestamp('2020-06-04')
appointment += pd.offsets.Day(2)
appointment.day_name()
# NO, it's delayed 2 business days.
appointment = pd.Timestamp('2020-06-04')
appointment += pd.offsets.BDay(2)
appointment.day_name()
| Pandas/Python Pandas Time Series Data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import matplotlib.pyplot as plt
x = [2,4,6,8,12]
y = ["Mick","Joe","Sam","Tref","God's"]
x2 = [3,9,1,10,15]
y2=["Henery","James","<NAME>","Marley","Kate"]
# +
plt.bar(y,x, label="Hours",color="red")
plt.bar(y2,x2, label="Man",color="green")
plt.xlabel("Hours contributed")
plt.ylabel("Names of workers")
plt.title("People's Graph")
plt.legend()
# -
import numpy as np
population = np.random.randint(150,200,5)
population
ids = [x*10 for x in range(len(population))]
ids
plt.bar(ids,population)
plt.xlabel("Ids")
plt.ylabel("population")
plt.title("Population chart")
plt.legend()
plt.hist(population,ids,histtype="bar",rwidth=8)
plt.bar(ids,population)
plt.xlabel("Ids")
plt.ylabel("population")
plt.title("Population chart")
plt.legend()
| path_of_ML/Matplotlib/Histogram.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from sqlalchemy import create_engine, Column, Integer, String
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
# Define our pet table
class Pet(Base):
__tablename__ = 'pet'
id = Column(Integer, primary_key=True)
name = Column(String)
type = Column(String)
age = Column(Integer)
# Right now, this table only exists in python and not in the actual database
Base.metadata.tables
# Create our database engine
engine = create_engine('sqlite:///pets.sqlite')
# This is where we create our tables in the database
Base.metadata.create_all(engine)
# The ORM’s “handle” to the database is the Session.
from sqlalchemy.orm import Session
session = Session(engine)
# ## Create Data
# Note that adding to the session does not update the table. It queues up those queries.
session.add(Pet(name='<NAME>', type='snek', age=2))
session.add(Pet(name='<NAME>', type='good boy', age=10))
session.add(Pet(name='Godzilla', type='iguana', age=1))
session.add(Pet(name='Marshmallow', type='polar bear', age=4))
# The data hasn't been added yet
engine.execute('select * from pet').fetchall()
# We can use the new attribute to see the queue of data ready to go into the database
session.new
# commit() flushes whatever remaining changes remain to the database, and commits the transaction.
session.commit()
# Nothing new to add
session.new
# query the database
session.query(Pet.name, Pet.type, Pet.age).all()
# ## Update Data
# Create a query and then run update on it
pet = session.query(Pet).filter_by(name="Marshmallow").first()
pet.age += 1
# For modifications, we can use the dirty attribute
session.dirty
# Commit Transaction
session.commit()
# Session is up-to-date
session.dirty
session.query(Pet.id, Pet.name, Pet.type, Pet.age).all()
# ## Delete Data
# Create a query and then delete the row collected
pet = session.query(Pet).filter_by(id=4).delete()
session.commit()
session.query(Pet.id, Pet.name, Pet.type, Pet.age).all()
| 2/Activities/03-Ins_Basic_Updating/Solved/Ins_Basic_Updating.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Process visualization data
#
# This notebook processes the CovidCareMap US Healthcare System Capacity data into the format used with the `viz/heathcare-system-capacity` visualization.
#
# It also move the HGHI and ventilator into the `viz/hghi-vents` app.
# +
import shutil
import os
import json
import numpy as np
from covidcaremap.data import (published_data_dir,
data_dir,
processed_data_dir,
processed_data_path)
# -
# ### Data for US Health System Capacity app
# +
viz_data_dir = os.path.join(data_dir(), '../viz/us-healthcare-system-capacity/data')
viz_input_dir = os.path.join(viz_data_dir, 'input')
viz_config_dir = os.path.join(viz_data_dir, 'config')
# !mkdir -p $viz_input_dir
# !mkdir -p $viz_config_dir
data_paths = [
(os.path.join(published_data_dir(), x), os.path.join(viz_input_dir, x))
for x in [
'us_healthcare_capacity-county-CovidCareMap.geojson',
'us_healthcare_capacity-facility-CovidCareMap.geojson',
'us_healthcare_capacity-hrr-CovidCareMap.geojson',
'us_healthcare_capacity-state-CovidCareMap.geojson'
]
] + [
(os.path.join(processed_data_dir(), x), os.path.join(viz_config_dir, x))
for x in [
'ccm_county_breaks.json',
'ccm_state_breaks.json',
'ccm_hrr_breaks.json',
'ccm_facility_breaks.json'
]
]
# -
for src, dst in data_paths:
print('Copying {} to {}'.format(src, dst))
# Tippecanoe doesn't read NaN values, so convert to nulls
if src.endswith('.geojson'):
with open(src) as f:
gj = json.loads(f.read())
for feat in gj['features']:
for prop in feat['properties']:
v = feat['properties'][prop]
if type(v) is float and np.isnan(v):
feat['properties'][prop] = None
with open(dst, 'w') as f:
f.write(json.dumps(gj, indent=2))
else:
shutil.copy(src, dst)
# !cd $viz_data_dir && ./process.sh
# ### Data for Ventilator Supply and Healthcare Capacity Map, by State app
hghi_vents_data_dir = os.path.join(data_dir(), '../viz/hghi-vents/public')
hghi_vents_data = processed_data_path('hghi_state_data_with_vents.geojson')
# !cp $hghi_vents_data $hghi_vents_data_dir
| notebooks/processing/07_Process_visualization_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + active=""
# 我感觉该题的题目描述有很多迷惑人的地方,比如可以删除中间字符这样的描述很容易就让人想偏了,
# 对于该题主要有两个大的分支:
# 1.字符串长度不相等:在这种情况下,因为字符串本身就是自己的子序列,且较长的字符串肯定不能是短的字符串的子序列,所以此时较长字符串的长度肯定就是结果
# 2.字符串长度相等: 此时又分为两种情况,第一个为两个字符串完全相同,输出-1,另一个两个字符串不相同,则此时最长子序列即字符串本身肯定不可能和另一个字符串一样,所以输出字符串长度。
#
# 作者:object123
# 链接:https://leetcode-cn.com/problems/longest-uncommon-subsequence-i/solution/zui-chang-zi-xu-lie-jian-dan-fen-xi-by-object123/
# 来源:力扣(LeetCode)
# 著作权归作者所有。商业转载请联系作者获得授权,非商业转载请注明出处。
# -
class Solution:
def findLUSlength(self, a: str, b: str) -> int:
if a == b:
return -1
n = len(a)
m = len(b)
if len(a) != len(b):
return n if n>m else m
return n
| String/1228/521. Longest Uncommon Subsequence I.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Project: Reucurrent Neural Network
# - A project on weather predictin on time series data
# ### Step 1: Import libraries
import tensorflow as tf
import os
import pandas as pd
import numpy as np
from tensorflow.keras import layers, models
import matplotlib.pyplot as plt
# %matplotlib inline
# ### Step 2: Download dataset
# - Excute the cell below
zip_path = tf.keras.utils.get_file(
origin='https://storage.googleapis.com/tensorflow/tf-keras-datasets/jena_climate_2009_2016.csv.zip',
fname='jena_climate_2009_2016.csv.zip',
extract=True)
csv_path, _ = os.path.splitext(zip_path)
# ### Step 3: Read the data
# - Use Pandas [read_csv](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html) method to read **csv_path** (from step 2).
# - Also **parse_dates=True** and **index_col=0**
data = pd.read_csv(csv_path, parse_dates=True, index_col=0)
data.head()
len(data)
# ### Step 4: Limit dataset
# - The dataset has metrics for every 10 minutes - we will limit it to only once per hour
# - HINT: **data[5::6]** will start at 5 and step 6.
# - **a[start:stop:step]** start through not past stop, by step
data = data[5::6]
len(data)
data.head()
# ### Step 5: Investigate data
# - Call **corr()** on the data to see correlations
# - Inspect what columns are correlated and not
data.corr()
# ### Step 6: Remove data
# - Potential some data could be transformed **'wv (m/s)', 'max. wv (m/s)', 'wd (deg)'**
# - We will ignorre it
df = data.drop(['wv (m/s)', 'max. wv (m/s)', 'wd (deg)'], axis=1)
# ### Step 7: Add periodic time intervals
# - Temperature is correlated to the time of day - e.g. it is warmer at mid day than at mid night
# - Temperature is correlated to seasons (most places in the world) - e.g. it is warmer in summer than in winter
# - The datetime index is not easy for the model to interpret, hence we can transform it into sinus and cosinus curves based on day and year.
# - Do it like this
# - Assign the dataframe index to a variable, say, **timestamp_s**
# - Transform that by using **map(pd.Timestamp.timestamp)**
# - Use the period **day =** $24\times 60 \times 60$ and **year =** $(365.2425)\times$**day**
# - Make the following columns **'Day sin', 'Day cos', 'Year sin'**, and **'Year cos'** as follows:
# - e.g. **df['Day sin'] = np.sin(timestamp_s * (2 * np.pi / day))**
timestamp_s = df.index
timestamp_s = timestamp_s.map(pd.Timestamp.timestamp)
timestamp_s
df.index
day = 24 * 60 * 60
year = (365.2425) * day
df['Day sin'] = np.sin(timestamp_s * (2 * np.pi / day))
df['Day cos'] = np.cos(timestamp_s * (2 * np.pi / day))
df['Year sin'] = np.sin(timestamp_s * (2 * np.pi / year))
df['Year cos'] = np.cos(timestamp_s * (2 * np.pi / year))
df.corr()
# ### Step 8: Splitting data
#
# #### About splitting
# If you want to build a solid model you have to follow that specific protocol of splitting your data into three sets: One for training, one for validation and one for final evaluation, which is the test set.
#
# The idea is that you train on your training data and tune your model with the results of metrics (accuracy, loss etc) that you get from your validation set.
#
# Your model doesn't "see" your validation set and isn't in any way trained on it, but you as the architect and master of the hyperparameters tune the model according to this data. Therefore it indirectly influences your model because it directly influences your design decisions. You nudge your model to work well with the validation data and that can possibly bring in a tilt.
#
# #### What to do?
# - Use the length of data and split it into
# - 70% for training
# - 20% for validation
# - 10% for testing set
nb_elts = len(df)
train_df = df[:int(nb_elts * .7)]
val_df = df[int(nb_elts * .7):int(nb_elts *.9)]
test_df = df[int(nb_elts * .9):]
# ### Step 9: Normalize data
# - Only normalize data based on training data
# - Notice you should only normalize the training data - because validation and test data could affect the normalization
# - Get the mean and standard deviation of the data
# - HINT: Use **.mean()** and **.std()** on the dataframe.
# - Noramlize the data as follows
# - **train_df = (train_df - train_mean) / train_std** (assuming naming fits)
# - HINT: The transformation of validation and test data is done similarly with **train_mean** and **train_std**.
train_mean = train_df.mean()
train_std = train_df.std()
train_df = (train_df - train_mean) / train_std
val_df = (val_df - train_mean) / train_std
test_df = (test_df - train_mean) / train_std
# ### Step 10: Create datasets
# <img src='img/data_windowing.png' width=600 align='left'>
# - Make a function with **input_width** and **offset** - assume we always use **label_width=1**.
# - Call the function **create_dataset**, which takes arguments **df, input_width=24, offset=0, predict_column='T (degC)'**
# - Let it create two empty lists **x** and **y**
# - Convert the dataframe **df** to numpy and assign it to **data_x**
# - Do the same for the **predict_column** but assign it to **data_y**
# - Iterate over the range of starting from **input_width** to **len(data_x) - offset**
# - Append to **x** with **data_x[i-input_width:i,:]**
# - Append to **y** with **data_y[i + offset]**
# - Convert **x** and **y** to numpy arrays
# - HINT: Use **np.array(...)**
# - Return the **x** and **y** (but reshape y with **reshape(-1, 1)**)
# - Apply the function on training, validation, and test data
def create_dataset(df, input_width:int=24, offset:int=0, predict_column:str='T (degC)'):
x = []
y = []
data_x = df.to_numpy()
data_y = df[predict_column].to_numpy()
for i in range(input_width, len(data_x) - offset ):
x.append(data_x[i - input_width:i, :])
y.append(data_y[i + offset])
x = np.array(x)
y = np.array(y)
return x, y.reshape(-1, 1)
train_ds = create_dataset(train_df)
val_ds = create_dataset(val_df)
test_ds = create_dataset(test_df)
train_ds[0].shape
# ### Step 11: Create model
# - Create the following model
# - **model = models.Sequential()**
# - **model.add(layers.LSTM(32, return_sequences=True, input_shape=train_ds[0].shape[1:]))**
# - **model.add(layers.Dense(units=1))**
model = models.Sequential()
model.add(layers.LSTM(32, return_sequences=True, input_shape=train_ds[0].shape[1:]))
model.add(layers.Dense(units=1))
# ### Step 12: Train model
# - Compile and fit the model
# - Complie the model as follows
# - **model.compile(optimizer='adam', loss='mean_squared_error', metrics=['accuracy'])**
# - Fit the model as follows
# - **model.fit(x=train_ds[0], y=train_ds[1], validation_data=(val_ds[0], val_ds[1]), epochs=5)**
model.compile(optimizer='adam', loss='mean_squared_error', metrics=['accuracy'])
model.fit(x=train_ds[0], y=train_ds[1], validation_data=(val_ds[0], val_ds[1]), epochs=10)
# ### Step 13: Predict data
# - Apply the model on the test data
# - HINT: Use **model.predict(x)**, where **x** is assigned to the test data.
x, y = test_ds
y_pred = model.predict(x)
y_pred.shape
# ### Step 14: Plot the result
# - Plot a window of the data predicted together with the actual data.
# - One way:
# - **fig, ax = plt.subplots()**
# - **ax.plot(y[i:i+96*2,0], c='g')**
# - **ax.plot(pred[i:i+96*2,-1,0], c='r')**
# - It will plot a window of 96 hours, where you can index with **i** (**i=150** as an example) and **y** is the real values and **pred** are the predicted values
fig, ax = plt.subplots()
i = 200
ax.plot(y[i:i+96*2,0], c='g')
ax.plot(y_pred[i:i+96*2,-1,0], c='r')
# ### Step 15 (Optional): Calculate the correlation
# - Create a dataframe with real and predicted values.
# - Apply the **.corr()** method on the dataframe.
# +
df_c = pd.DataFrame({'real': y[:,0], 'pred': y_pred[:, -1,0]})
df_c.corr()
# -
| Machine Learning With Python/jupyter/final/10 - Project - Recurrent Neural Network.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
# default_exp search
# -
# # Search
#
# > Functions related to the search
# This notebook contains all functions related to searching and getting peptide-spectrum-matches (PSMs). When searching, we compare how similar an experimental spectrum is to a theoretical spectrum. As described in the FASTA notebook, we can calculate theoretical fragment masses for a given peptide sequence and get theoretical spectra. Typically, we calculate a database with all possible spectra, save it to disk, and then compare our experimental data. This allows re-using the database and saving time for this computational step. It could be that the database is too large to be saved on disc; in this case, generate the database on the fly, referring to only have a subset of all FASTA entries in memory and processing them.
# +
#hide
import warnings
from numba import NumbaPendingDeprecationWarning
warnings.simplefilter("ignore", category=NumbaPendingDeprecationWarning)
# -
# ## Comparing spectra
#
# To efficiently compare two spectra, we use a pointer based approach. We start with two sorted arrays, the `query_frag` that contains the m/z positions of the experimental query spectrum and the `db_frag` which contains the database fragment that is compared against to. The two pointers compare each m/z position with each other and check wheter they are within a certain tolerance `frag_tol`. Depending on their delta, either of the pointers is advanced. The function returns an arrray named `hits` that is the same length as the database spectrum and encodes the hit positions.
# +
#export
import logging
from numba import njit
import numpy as np
@njit
def compare_frags(query_frag: np.ndarray, db_frag: np.ndarray, frag_tol: float, ppm:bool=False) -> np.ndarray:
"""Compare query and database frags and find hits
Args:
query_frag (np.ndarray): Array with query fragments.
db_frag (np.ndarray): Array with database fragments.
frag_tol (float): Fragment tolerance for search.
ppm (bool, optional): Use ppm as unit or Dalton. Defaults to False.
Returns:
np.ndarray: Array with reported hits.
"""
q_max = len(query_frag)
d_max = len(db_frag)
hits = np.zeros(d_max, dtype=np.int16)
q, d = 0, 0 # q > query, d > database
while q < q_max and d < d_max:
mass1 = query_frag[q]
mass2 = db_frag[d]
delta_mass = mass1 - mass2
if ppm:
sum_mass = mass1 + mass2
mass_difference = 2 * delta_mass / sum_mass * 1e6
else:
mass_difference = delta_mass
if abs(mass_difference) <= frag_tol:
hits[d] = q + 1 # Save query position +1 (zero-indexing)
d += 1
q += 1 # Only one query for each db element
elif delta_mass < 0:
q += 1
elif delta_mass > 0:
d += 1
return hits
# +
import numpy as np
query_frag = np.array([100, 200, 300, 400])
db_frag = np.array([150, 200, 300, 450])
# Hits: Query 2 -> Db 2 and Query 3 -> Db 3
compare_frags(query_frag, db_frag, frag_tol=1)
# +
#hide
def test_compare_frags():
query_frag = np.array([100, 200, 300, 400])
frag_tol = 1
db_frag = query_frag.copy()
# Self-Comparison: no of hits should be same as length
hits = compare_frags(query_frag, db_frag, frag_tol, ppm=False)
assert np.sum(hits > 0) == len(query_frag)
# Self-Comparison: above but in tolerance
hits = compare_frags(query_frag, db_frag + frag_tol - 0.01, frag_tol, ppm=False)
assert np.sum(hits > 0) == len(query_frag)
# Self-Comparison: below but in tolerance
hits = compare_frags(query_frag, db_frag - frag_tol + 0.01, frag_tol, ppm=False)
assert np.sum(hits > 0) == len(query_frag)
# Self-Comparison: above tolerance, no hits
hits = compare_frags(query_frag, db_frag + frag_tol + 0.01, frag_tol, ppm=False)
assert np.sum(hits > 0) == 0
# Special case 1: First and last
db_frag = np.array([100, 400])
hits = compare_frags(query_frag, db_frag, frag_tol, ppm=False)
assert np.sum(hits > 0) == 2
# Special case 2: Two queries matching the same DB frag
query_frag = np.array([100, 100.5])
db_frag = np.array([100, 200, 300])
hits = compare_frags(query_frag, db_frag, frag_tol, ppm=False)
assert np.sum(hits > 0) == 1
# Special case 3: Two db frags matching the same query frag
db_frag = np.array([100, 100.5])
query_frag = np.array([100, 200, 300])
hits = compare_frags(query_frag, db_frag, frag_tol, ppm=False)
assert np.sum(hits > 0) == 1
test_compare_frags()
# -
# This function allows us to easily compare a query spectrum against a spectrum from a theoretical database.
# +
import matplotlib.pyplot as plt
from alphapept import constants
from alphapept.fasta import get_frag_dict, parse
import alphapept.io
import numpy as np
# %matplotlib inline
peptide = 'PEPTIDE'
# Theoretical Spectrum
frag_dict = get_frag_dict(parse(peptide), constants.mass_dict)
db_frag = list(frag_dict.values())
db_frag.sort()
db_int = [100 for _ in db_frag]
# Experimental Spectrum, dummy data
query_frag = np.array([98.06, 227.10, 263.08, 548.06, 653.31])
query_int = np.array([20, 80, 30, 30, 50])
hits = compare_frags(query_frag, db_frag, frag_tol=1)
hitpos = hits[hits > 0] - 1
hit_x = query_frag[hitpos]
hit_y = query_int[hitpos]
plt.figure(figsize=(10,5))
plt.vlines(db_frag, 0, db_int, "k", label="DB", alpha=0.2)
plt.vlines(query_frag, 0, query_int, "r", label="Query", alpha=0.5)
plt.plot(hit_x, hit_y, "ro", label="Hit", alpha=0.5)
for _ in frag_dict.keys():
plt.text(frag_dict[_], 104, _, fontsize=12, alpha = 0.8)
plt.title('Theoretical Spectrum for {}'.format(peptide))
plt.xlabel('Mass')
plt.ylabel('Intensity')
plt.legend()
plt.ylim([0,110])
plt.show()
# -
# ## Comparing Spectra
#
# To compare multiple spectra against a database, we first need some helper functions. First, we need a conversion function to convert from Dalton masses to ppm, which is implemented in the `ppm_to_dalton` function.
#
# To minimize the search space, we typically only compare spectra with precursors in the same mass range as defined by `prec_tol`. To look up the limits for search, we define the function `get_idxs`, which is a wrapper to the fast `searchsorted` method from `NumPy`.
#
# The actual search takes place in `compare_spectrum_parallel`, which utilizes the performance decorator from the performance notebook. Here we save the top matching spectra for each query spectrum. Note that for code compilation reasons, the code of the previously defined function `compare_frags` is duplicated in here.
# +
#export
@njit
def ppm_to_dalton(mass:float, prec_tol:int)->float:
"""Function to convert ppm tolerances to Dalton.
Args:
mass (float): Base mass.
prec_tol (int): Tolerance.
Returns:
float: Tolerance in Dalton.
"""
return mass / 1e6 * prec_tol
# +
#hide
def test_ppm_to_dalton():
assert ppm_to_dalton(200, 20) == 0.004
test_ppm_to_dalton()
# +
#export
def get_idxs(db_masses:np.ndarray, query_masses:np.ndarray, prec_tol:float, ppm:bool)-> (np.ndarray, np.ndarray):
"""Function to get upper and lower limits to define search range for a given precursor tolerance.
Args:
db_masses (np.ndarray): Array containing database masses.
query_masses (np.ndarray): Array containing query masses.
prec_tol (float): Precursor tolerance for search.
ppm: Flag to use ppm instead of Dalton.
Returns:
(np.ndarray, np.ndarray): Indices to lower and upper bounds.
"""
if ppm:
dalton_offset = ppm_to_dalton(query_masses, prec_tol)
else:
dalton_offset = prec_tol
idxs_lower = db_masses.searchsorted(query_masses - dalton_offset, side="left")
idxs_higher = db_masses.searchsorted(query_masses + dalton_offset, side="right")
return idxs_lower, idxs_higher
# +
#hide
def test_get_idxs():
idxs_lower, idxs_higher = get_idxs(np.array([0,1,2,3]),np.array([1,2,3,4]), 10, True)
assert np.allclose(idxs_lower, np.array([1, 2, 3, 4]))
assert np.allclose(idxs_higher, np.array([2, 3, 4, 4]))
test_get_idxs()
# +
#export
import alphapept.performance
@alphapept.performance.performance_function
def compare_spectrum_parallel(query_idx:int, query_masses:np.ndarray, idxs_lower:np.ndarray, idxs_higher:np.ndarray, query_indices:np.ndarray, query_frags:np.ndarray, query_ints:np.ndarray, db_indices:np.ndarray, db_frags:np.ndarray, best_hits:np.ndarray, score:np.ndarray, frag_tol:float, ppm:bool):
"""Compares a spectrum and writes to the best_hits and score.
Args:
query_idx (int): Integer to the query_spectrum that should be compared.
query_masses (np.ndarray): Array with query masses.
idxs_lower (np.ndarray): Array with indices for lower search boundary.
idxs_higher (np.ndarray): Array with indices for upper search boundary.
query_indices (np.ndarray): Array with indices to the query data.
query_frags (np.ndarray): Array with frag types of the query data.
query_ints (np.ndarray): Array with fragment intensities from the query.
db_indices (np.ndarray): Array with indices to the database data.
db_frags (np.ndarray): Array with frag types of the db data.
best_hits (np.ndarray): Reporting array which stores indices to the best hits.
score (np.ndarray): Reporting array that stores the scores of the best hits.
frag_tol (float): Fragment tolerance for search.
ppm (bool): Flag to use ppm instead of Dalton.
"""
idx_low = idxs_lower[query_idx]
idx_high = idxs_higher[query_idx]
query_idx_start = query_indices[query_idx]
query_idx_end = query_indices[query_idx + 1]
query_frag = query_frags[query_idx_start:query_idx_end]
query_int = query_ints[query_idx_start:query_idx_end]
query_int_sum = 0
for qi in query_int:
query_int_sum += qi
for db_idx in range(idx_low, idx_high):
db_idx_start = db_indices[db_idx]
db_idx_next = db_idx +1
db_idx_end = db_indices[db_idx_next]
db_frag = db_frags[db_idx_start:db_idx_end]
q_max = len(query_frag)
d_max = len(db_frag)
hits = 0
q, d = 0, 0 # q > query, d > database
while q < q_max and d < d_max:
mass1 = query_frag[q]
mass2 = db_frag[d]
delta_mass = mass1 - mass2
if ppm:
sum_mass = mass1 + mass2
mass_difference = 2 * delta_mass / sum_mass * 1e6
else:
mass_difference = delta_mass
if abs(mass_difference) <= frag_tol:
hits += 1
hits += query_int[q]/query_int_sum
d += 1
q += 1 # Only one query for each db element
elif delta_mass < 0:
q += 1
elif delta_mass > 0:
d += 1
len_ = best_hits.shape[1]
for i in range(len_):
if score[query_idx, i] < hits:
# This is mean to report the hit in our top-n array
# The code below looks weird but is necessary to be used with cuda
# It should be equivalent to this code:
#score_slice = score[query_idx, i:(len_-1)]
#hit_slice = best_hits[query_idx, i:(len_-1)]
#score[query_idx, (i+1):len_] = score_slice
#best_hits[query_idx, (i+1):len_] = hit_slice
j = 1
while len_-j >= (i+1):
k = len_-j
score[query_idx, k] = score[query_idx, k-1]
best_hits[query_idx, k] = best_hits[query_idx, k-1]
j+=1
score[query_idx, i] = hits
best_hits[query_idx, i] = db_idx
break
# +
#hide
def test_compare_spectrum_parallel():
query_masses = np.array([300, 400, 500, 600])
db_masses = np.array([300, 400, 500, 700])
query_idxs = np.arange(len(query_masses))
query_indices = np.array([4,8,12,16])
query_frags = np.array([1,2,3,4]*4)
query_ints = query_frags.copy()
db_indices = query_indices.copy()
db_frags = query_frags.copy()
idxs_lower, idxs_higher = get_idxs(db_masses, query_masses, 10, True)
best_hits = np.zeros((len(query_masses), 5), dtype=np.int_)-1
score = np.zeros((len(query_masses), 4), dtype=np.float_)
frag_tol = 20
ppm = True
compare_spectrum_parallel(query_idxs, query_masses, idxs_lower, idxs_higher, query_indices, query_frags, query_ints, db_indices, db_frags, best_hits, score, frag_tol, ppm)
query_idx, db_idx = np.where(score > 1)
sortindex = np.argsort(query_idx)
query_idx = query_idx[sortindex]
db_idx = db_idx[sortindex]
assert np.allclose(query_idx, np.array([0, 1, 2]))
assert np.allclose(db_idx, np.array([0, 0, 0]))
#test_compare_spectrum_parallel() #TODO: this causes a bug in the CI
# -
# ## Wrapper
#
# To conveniently perform peptide-spectrum matches on multiple datasets we define a wrapper `get_psms` that returns the PSMS when handing over `query_data` and `db_data`.
# +
#export
import pandas as pd
import logging
from alphapept.fasta import read_database
def query_data_to_features(query_data: dict)->pd.DataFrame:
"""Helper function to extract features from query data.
This is used when the feature finder will not be used.
Args:
query_data (dict): Data structure containing the query data.
Returns:
pd.DataFrame: Pandas dataframe so that it can be used for subsequent processing.
"""
query_masses = query_data['prec_mass_list2']
query_mz = query_data['mono_mzs2']
query_rt = query_data['rt_list_ms2']
features = pd.DataFrame(np.array([query_masses, query_mz, query_rt]).T, columns = ['mass_matched', 'mz_matched', 'rt_matched'])
features['feature_idx'] = features.index #Index to query_data
features['query_idx'] = np.arange(len(query_masses))
features = features.sort_values('mass_matched', ascending=True)
return features
# +
#hide
def test_query_data_to_features():
df = pd.DataFrame({'prec_mass_list2':[100,200,300], 'mono_mzs2':[100,200,300], 'rt_list_ms2':[1,2,3]})
df_ = query_data_to_features(df)
assert 'feature_idx' in df_.columns
assert 'query_idx' in df_.columns
test_query_data_to_features()
# +
#export
from typing import Callable
#this wrapper function is covered by the quick_test
def get_psms(
query_data: dict,
db_data: dict,
features: pd.DataFrame,
parallel: bool,
frag_tol: float,
prec_tol: float,
ppm: bool,
min_frag_hits: int,
callback: Callable = None,
prec_tol_calibrated:float = None,
frag_tol_calibrated:float = None,
**kwargs
)->(np.ndarray, int):
"""[summary]
Args:
query_data (dict): Data structure containing the query data.
db_data (dict): Data structure containing the database data.
features (pd.DataFrame): Pandas dataframe containing feature data.
parallel (bool): Flag to use parallel processing.
frag_tol (float): Fragment tolerance for search.
prec_tol (float): Precursor tolerance for search.
ppm (bool): Flag to use ppm instead of Dalton.
min_frag_hits (int): Minimum number of frag hits to report a PSMs.
callback (Callable, optional): Optional callback. Defaults to None.
prec_tol_calibrated (float, optional): Precursor tolerance if calibration exists. Defaults to None.
frag_tol_calibrated (float, optional): Fragment tolerance if calibration exists. Defaults to None.
Returns:
np.ndarray: Numpy recordarray storing the PSMs.
int: 0
"""
if isinstance(db_data, str):
db_masses = read_database(db_data, array_name = 'precursors')
db_frags = read_database(db_data, array_name = 'fragmasses')
db_indices = read_database(db_data, array_name = 'indices')
else:
db_masses = db_data['precursors']
db_frags = db_data['fragmasses']
db_indices = db_data['indices']
query_indices = query_data["indices_ms2"]
query_frags = query_data['mass_list_ms2']
query_ints = query_data['int_list_ms2']
if frag_tol_calibrated:
frag_tol = frag_tol_calibrated
if features is not None:
if prec_tol_calibrated:
prec_tol = prec_tol_calibrated
query_masses = features['corrected_mass'].values
else:
query_masses = features['mass_matched'].values
query_mz = features['mz_matched'].values
query_rt = features['rt_matched'].values
query_selection = features['query_idx'].values
indices = np.zeros(len(query_selection) + 1, np.int64)
indices[1:] = np.diff(query_indices)[query_selection]
indices = np.cumsum(indices)
query_frags = np.concatenate(
[
query_frags[s: e] for s, e in zip(
query_indices[query_selection], query_indices[query_selection + 1]
)
]
)
query_ints = np.concatenate(
[
query_ints[s: e] for s, e in zip(
query_indices[query_selection], query_indices[query_selection + 1]
)
]
)
query_indices = indices
else:
if prec_tol_calibrated:
prec_tol = prec_tol_calibrated
query_masses = query_data['prec_mass_list2']
query_mz = query_data['mono_mzs2']
query_rt = query_data['rt_list_ms2']
idxs_lower, idxs_higher = get_idxs(
db_masses,
query_masses,
prec_tol,
ppm
)
n_queries = len(query_masses)
n_db = len(db_masses)
top_n = 5
if alphapept.performance.COMPILATION_MODE == "cuda":
import cupy
cupy = cupy
idxs_lower = cupy.array(idxs_lower)
idxs_higher = cupy.array(idxs_higher)
query_indices = cupy.array(query_indices)
query_ints = cupy.array(query_ints)
query_frags = cupy.array(query_frags)
db_indices = cupy.array(db_indices)
db_frags = cupy.array(db_frags)
db_frags = cupy.array(db_frags)
else:
import numpy
cupy = numpy
best_hits = cupy.zeros((n_queries, top_n), dtype=cupy.int_)-1
score = cupy.zeros((n_queries, top_n), dtype=cupy.float_)
logging.info(f'Performing search on {n_queries:,} query and {n_db:,} db entries with frag_tol = {frag_tol:.2f} and prec_tol = {prec_tol:.2f}.')
compare_spectrum_parallel(cupy.arange(n_queries), cupy.arange(n_queries), idxs_lower, idxs_higher, query_indices, query_frags, query_ints, db_indices, db_frags, best_hits, score, frag_tol, ppm)
query_idx, db_idx_ = cupy.where(score > min_frag_hits)
db_idx = best_hits[query_idx, db_idx_]
score_ = score[query_idx, db_idx_]
if cupy.__name__ != 'numpy':
query_idx = query_idx.get()
db_idx = db_idx.get()
score_ = score_.get()
psms = np.array(
list(zip(query_idx, db_idx, score_)), dtype=[("query_idx", int), ("db_idx", int), ("hits", float)]
)
logging.info('Found {:,} psms.'.format(len(psms)))
return psms, 0
# -
# ## Extracting columns for scoring
#
# The basic fragment comparison only counts the number of hits and matched intensity fraction when comparing a theoretical spectrum to an experimental one. Based on this metric, we can drastically reduce the number of candidates one wants to analyze for an in-depth comparison, which requires additional features. The following section describes several functions which extract parameters to compare spectrum matches better.
# ### Frag Delta
#
# `frag_delta` substracts the experimental fragment masses from the theoretical fragment masses for each hit.
#export
@njit
def frag_delta(query_frag:np.ndarray, db_frag:np.ndarray, hits:np.ndarray)-> (float, float):
"""Calculates the mass difference for a given array of hits in Dalton and ppm.
Args:
query_frag (np.ndarray): Array with query fragments.
db_frag (np.ndarray): Array with database fragments.
hits (np.ndarray): Array with reported hits.
Returns:
float: Fragment deltas in Dalton.
float: Fragment deltas in ppm.
"""
delta_m = db_frag[hits > 0] - query_frag[hits[hits > 0] - 1]
delta_m_ppm = (
2 * delta_m / (db_frag[hits > 0] + query_frag[hits[hits > 0] - 1]) * 1e6
)
return delta_m, delta_m_ppm
# +
#hide
def test_frag_delta():
mtol = 10
query_frag = np.array([100, 200, 300, 400])
db_frag = np.array([101, 202, 303, 404])
hits = compare_frags(query_frag, db_frag, mtol, ppm=False)
delta_m, delta_m_ppm = frag_delta(query_frag, db_frag, hits)
assert np.sum(delta_m) == 10
test_frag_delta()
# -
# ### Intensity Fraction
# `intensity_fraction` calculates the fraction of matched intensity. This refers to the intensity of all hits compared to the intensity of all peaks in the query spectrum.
#export
@njit
def intensity_fraction(query_int:np.ndarray, hits:np.ndarray)->float:
"""Calculate the fraction of matched intensity
Args:
query_int (np.ndarray): Array with query intensities.
hits (np.ndarray): Array with reported hits.
Returns:
float: Fraction of the matched intensity to the total intensity.
"""
total_intensity = np.sum(query_int)
if total_intensity != 0:
matched_intensity = np.sum(query_int[hits[hits > 0] - 1])
i_frac = matched_intensity / total_intensity
else:
i_frac = 0
return i_frac
# +
#hide
def test_intensity_fraction():
mtol = 1
query_frag = np.array([100, 200, 300, 400])
db_frag = np.array([100, 300, 500, 700])
query_int = np.array([10, 20, 30, 40])
hits = compare_frags(query_frag, db_frag, mtol, ppm=False)
i_frac = intensity_fraction(query_int, hits)
assert i_frac == 40 / 100
test_intensity_fraction()
# -
# ### File Format
#
# To have an efficient data format to store PSMs in the search. We use `numpy`-recarrays and define the utility functions `add_column` and `remove_column` to append and remove data.
# +
#export
from numpy.lib.recfunctions import append_fields, drop_fields
def add_column(recarray:np.ndarray, column:np.ndarray, name:str)->np.ndarray:
"""Function to add a column with given name to recarray
Args:
recarray (np.ndarray): NumPy record array.
column (np.ndarray): Data column that should be added to the record array.
name (str): Name of the column in the new recordarray.
Returns:
np.ndarray: NumPy recordarray with new field.
"""
if hasattr(recarray, name):
recarray = drop_fields(recarray, name, usemask=False, asrecarray=True)
recarray = append_fields(
recarray, name, column, dtypes=column.dtype, usemask=False, asrecarray=True
)
return recarray
def remove_column(recarray:np.ndarray, name:str)->np.ndarray:
"""Function to remove a column from a recarray.
Args:
recarray (np.ndarray): NumPy record array.
name (str): Column name of the column to be removed.
Returns:
np.ndarray: NumPy record array with removed column.
"""
if hasattr(recarray, name):
recarray = drop_fields(recarray, name, usemask=False, asrecarray=True)
return recarray
# +
#hide
def test_rec_funs():
x = np.array([1,2,3], dtype=[('x',float)])
x_ = add_column(x, np.array([4,5,6]), 'y')
assert np.allclose(x_['y'], np.array([4,5,6]))
x__ = remove_column(x_, 'y')
assert np.all(x == x__)
test_rec_funs()
# -
# ## Extracting features for scoring
#
# ### Indices
#
# When performing a database search, we need to know which experimental spectrum we compare with what database entry.
# We distinguish three indices:
#
# * query_idx
# * raw_idx
# * feature_idx
#
# Initially, the get_psms function accepts experimental data in the form of `query_data`. Here, the `query_idx` refers to the index to `query_data`. However, this might not be the same index as the raw data. This is due to the implementation of the matching of MS1-features to MS2 spectra. Here we allow multiple matches and implement this by repeating the respective spectrum.
#
# We then add the two columns `feature_idx` and `raw_idx` to the PSMs to later be able to distinguish where the match originated. In this case, `raw_idx` refers to the original spectrum.
#
# When not applying feature finding, `raw_idx` and `query_idx` are equivalent.
#
#
# ### Features
#
# In the `score`-function we use the pre-filtered PSMs to extract additional columns for scoring such as the offset from theoretical to experimental precursor or the number of b- and y-ion hits.
#export
from numba.typed import List
@njit
def get_hits(query_frag:np.ndarray, query_int:np.ndarray, db_frag:np.ndarray, db_int:np.ndarray, frag_type:np.ndarray, mtol:float, ppm:bool, losses:list)-> np.ndarray:
"""Function to extract the types of hits based on a single PSMs.
The reporting array stores information about the matched fragment_ions column wise:
Column 0: Type of the ion.
Column 1: Ion-index refering to what ion type was matched.
Column 2: Intensity of the matched ion.
Column 3: Intensity of the database ion.
Column 4: Experimental mass of the ion.
Column 5: Theoretical mass of the ion.
Column 6: Index to the query_frag of the ion.
Column 7: Index to the database_frag of the ion.
Args:
query_frag (np.ndarray): Array with query fragments.
query_int (np.ndarray): Array with query intensities.
db_frag (np.ndarray): Array with database fragments.
db_int (np.ndarray): Array with database intensities.
frag_type (np.ndarray): Array with fragment types.
mtol (float): Mass tolerance.
ppm (bool): Flag to use ppm instead of Dalton.
losses (list): List of losses.
Returns:
np.ndarray: NumPy array that stores ion information.
"""
max_array_size = len(db_frag)*len(losses)
fragment_ions = np.zeros((max_array_size, 9))
pointer = 0
query_range = np.arange(len(query_frag))
db_range = np.arange(len(db_frag))
for idx, off in enumerate(losses):
hits = compare_frags(query_frag, db_frag-off, mtol, ppm)
n_hits = np.sum(hits>0)
hitpos = hits[hits > 0] - 1
hit = hits > 0
fragment_ions[pointer:pointer+n_hits,0] = frag_type[hits>0] #type
fragment_ions[pointer:pointer+n_hits,1] = idx #ion-index
fragment_ions[pointer:pointer+n_hits,2] = query_int[hitpos] #query int
fragment_ions[pointer:pointer+n_hits,3] = db_int[hit] #db int
fragment_ions[pointer:pointer+n_hits,4] = query_frag[hitpos] #query mass
fragment_ions[pointer:pointer+n_hits,5] = db_frag[hit]-off # db mass
fragment_ions[pointer:pointer+n_hits,6] = query_range[hitpos] # index to query entry
fragment_ions[pointer:pointer+n_hits,7] = db_range[hit] # index to db entry
pointer += n_hits
fragment_ions = fragment_ions[:pointer,:]
return fragment_ions
# +
#hide
def test_get_hits():
mtol = 1
query_frag = np.array([100, 200, 300, 400])
db_frag = np.array([100, 300, 500, 700])
query_int = np.array([10, 20, 30, 40])
db_int = np.array([10, 20, 30, 40])
frag_type = np.array([1, -1, 1, -1])
ppm = True
losses = [0]
fragment_ions = get_hits(query_frag, query_int, db_frag, db_int, frag_type, mtol, ppm, losses)
assert fragment_ions[0,0] == 1.0
assert fragment_ions[1,0] == -1.0
assert fragment_ions[0,1] == 0
assert fragment_ions[1,1] == 0
assert fragment_ions[0,7] == 0
assert fragment_ions[1,7] == 1
test_get_hits()
# +
#export
from alphapept import constants
LOSS_DICT = constants.loss_dict
LOSSES = np.array(list(LOSS_DICT.values()))
#This function is a wrapper and ist tested by the quick_test
@njit
def score(
psms: np.recarray,
query_masses: np.ndarray,
query_masses_raw: np.ndarray,
query_frags: np.ndarray,
query_ints: np.ndarray,
query_indices: np.ndarray,
db_masses: np.ndarray,
db_frags: np.ndarray,
frag_types: np.ndarray,
mtol: float,
db_indices: np.ndarray,
ppm: bool,
psms_dtype: list,
db_ints: np.ndarray = None,
parallel: bool = False
) -> (np.ndarray, np.ndarray):
"""Function to extract score columns when giving a recordarray with PSMs.
Args:
psms (np.recarray): Recordarray containing PSMs.
query_masses (np.ndarray): Array with query masses.
query_masses_raw (np.ndarray): Array with raw query masses.
query_frags (np.ndarray): Array with frag types of the query data.
query_ints (np.ndarray): Array with fragment intensities from the query.
query_indices (np.ndarray): Array with indices to the query data.
db_masses (np.ndarray): Array with database masses.
db_frags (np.ndarray): Array with fragment masses.
frag_types (np.ndarray): Array with fragment types.
mtol (float): Mass tolerance.
db_indices (np.ndarray): Array with indices to the database array.
ppm (bool): Flag to use ppm instead of Dalton.
psms_dtype (list): List describing the dtype of the PSMs record array.
db_ints (np.ndarray, optional): Array with database intensities. Defaults to None.
parallel (bool, optional): Flag to use parallel processing. Defaults to False.
Returns:
np.recarray: Recordarray containing PSMs with additional columns.
np.ndarray: NumPy array containing ion information.
"""
psms_ = np.zeros(len(psms), dtype=psms_dtype)
ions_ = List()
ion_count = 0
for i in range(len(psms)):
query_idx = psms[i]["query_idx"]
db_idx = psms[i]["db_idx"]
query_idx_start = query_indices[query_idx]
query_idx_end = query_indices[query_idx + 1]
query_frag = query_frags[query_idx_start:query_idx_end]
query_int = query_ints[query_idx_start:query_idx_end]
db_frag = db_frags[db_indices[db_idx]:db_indices[db_idx+1]]
frag_type = frag_types[db_indices[db_idx]:db_indices[db_idx+1]]
if db_ints is None:
db_int = np.ones(len(db_frag))
else:
db_int = db_ints[i]
fragment_ions = get_hits(query_frag, query_int, db_frag, db_int, frag_type, mtol, ppm, LOSSES)
psms_['mass_db'][i] = db_masses[db_idx]
psms_['prec_offset'][i] = query_masses[query_idx] - db_masses[db_idx]
psms_['prec_offset_ppm'][i] = 2 * psms_['prec_offset'][i] / (query_masses[query_idx] + db_masses[db_idx] ) * 1e6
psms_['prec_offset_raw'][i] = query_masses_raw[query_idx] - db_masses[db_idx]
psms_['prec_offset_raw_ppm'][i] = 2 * psms_['prec_offset_raw'][i] / (query_masses_raw[query_idx] + db_masses[db_idx] ) * 1e6
psms_['delta_m'][i] = np.mean(fragment_ions[:,4]-fragment_ions[:,5])
psms_['delta_m_ppm'][i] = np.mean(2 * psms_['delta_m'][i] / (fragment_ions[:,4] + fragment_ions[:,5] ) * 1e6)
psms_['fragments_int_sum'][i] = np.sum(query_int)
psms_['fragments_matched_int_sum'][i] = np.sum(fragment_ions[:,2])
psms_['fragments_matched_int_ratio'][i] = psms_['fragments_matched_int_sum'][i] / psms_['fragments_int_sum'][i]
psms_['fragments_int_ratio'][i] = np.mean(fragment_ions[:,2]/fragment_ions[:,3]) #3 is db_int, 2 is query_int
psms_['hits_b'][i] = np.sum(fragment_ions[fragment_ions[:,1]==0][:,0]>0)
psms_['hits_y'][i] = np.sum(fragment_ions[fragment_ions[:,1]==0][:,0]<0)
psms_['hits_b-H2O'][i] = np.sum(fragment_ions[fragment_ions[:,1]==1][:,0]>0)
psms_['hits_y-H2O'][i] = np.sum(fragment_ions[fragment_ions[:,1]==1][:,0]<0)
psms_['hits_b-NH3'][i] = np.sum(fragment_ions[fragment_ions[:,1]==2][:,0]>0)
psms_['hits_y-NH3'][i] = np.sum(fragment_ions[fragment_ions[:,1]==2][:,0]<0)
n_fragments_matched = len(fragment_ions)
psms_['n_fragments_matched'][i] = n_fragments_matched
psms_['fragment_ion_idx'][i] = ion_count
ion_count += n_fragments_matched
fragment_ions[:,8] = i #Save psms index
ions_.append(fragment_ions)
return psms_, ions_
# +
#export
from numba.typed import Dict
def get_sequences(psms: np.recarray, db_seqs:np.ndarray)-> np.ndarray:
"""Get sequences to add them to a recarray
Args:
psms (np.recarray): Recordarray containing PSMs.
db_seqs (np.ndarray): NumPy array containing sequences.
Returns:
np.ndarray: NumPy array containing a subset of sequences.
"""
sequence_list = db_seqs[psms["db_idx"]]
return sequence_list
# +
#hide
def test_get_sequences():
x = np.array([(1.0, 2), (3, 4)], dtype=[('x', '<f8'), ('db_idx', '<i8')])
db_seqs = np.array(['A','B','C','D','E'])
assert list(get_sequences(x, db_seqs)) == ['C', 'E']
test_get_sequences()
# +
#export
from typing import Union
#This function is a wrapper and ist tested by the quick_test
def get_score_columns(
psms: np.recarray,
query_data: dict,
db_data: Union[dict, str],
features: pd.DataFrame,
parallel:bool,
frag_tol:float,
prec_tol:float,
ppm:bool,
prec_tol_calibrated:Union[None, float]=None,
frag_tol_calibrated:float = None,
**kwargs
) -> (np.ndarray, np.ndarray):
"""Wrapper function to extract score columns.
Args:
psms (np.recarray): Recordarray containing PSMs.
query_data (dict): Data structure containing the query data.
db_data: Union[dict, str]: Data structure containing the database data or path to database.
features (pd.DataFrame): Pandas dataframe containing feature data.
parallel (bool): Flag to use parallel processing.
frag_tol (float): Fragment tolerance for search.
prec_tol (float): Precursor tolerance for search.
ppm (bool): Flag to use ppm instead of Dalton.
prec_tol_calibrated (Union[None, float], optional): Calibrated offset mass. Defaults to None.
frag_tol_calibrated (float, optional): Fragment tolerance if calibration exists. Defaults to None.
Returns:
np.recarray: Recordarray containing PSMs with additional columns.
np.ndarray: NumPy array containing ion information.
"""
logging.info('Extracting columns for scoring.')
query_indices = query_data["indices_ms2"]
query_charges = query_data['charge2']
query_frags = query_data['mass_list_ms2']
query_ints = query_data['int_list_ms2']
query_scans = query_data['scan_list_ms2']
if frag_tol_calibrated:
frag_tol = frag_tol_calibrated
if 'prec_id2' in query_data.keys():
bruker = True
query_prec_id = query_data['prec_id2']
else:
bruker = False
if isinstance(db_data, str):
db_masses = read_database(db_data, array_name = 'precursors')
db_frags = read_database(db_data, array_name = 'fragmasses')
db_indices = read_database(db_data, array_name = 'indices')
frag_types = read_database(db_data, array_name = 'fragtypes')
try:
db_ints = read_database(db_data, array_name = 'db_ints')
except KeyError:
db_ints = None
else:
db_masses = db_data['precursors']
db_frags = db_data['fragmasses']
db_indices = db_data['indices']
frag_types = db_data['fragtypes']
if 'db_ints' in db_data.keys():
db_ints = db_data['db_ints']
else:
db_ints = None
if features is not None:
if prec_tol_calibrated:
query_masses = features['corrected_mass'].values
else:
query_masses = features['mass_matched'].values
query_masses_raw = features['mass_matched'].values
query_mz = features['mz_matched'].values
query_rt = features['rt_matched'].values
query_charges = features['charge_matched'].values
query_scans = query_scans[features['query_idx'].values]
if bruker:
query_prec_id = query_prec_id[features['query_idx'].values]
query_selection = features['query_idx'].values
indices = np.zeros(len(query_selection) + 1, np.int64)
indices[1:] = np.diff(query_indices)[query_selection]
indices = np.cumsum(indices)
query_frags = np.concatenate(
[
query_frags[s: e] for s, e in zip(
query_indices[query_selection], query_indices[query_selection + 1]
)
]
)
query_ints = np.concatenate(
[
query_ints[s: e] for s, e in zip(
query_indices[query_selection], query_indices[query_selection + 1]
)
]
)
query_indices = indices
else:
#TODO: This code is outdated, callin with features = None will crash.
query_masses = query_data['prec_mass_list2']
query_masses_raw = query_data['prec_mass_list2']
query_mz = query_data['mono_mzs2']
query_rt = query_data['rt_list_ms2']
float_fields = ['mass_db','prec_offset', 'prec_offset_ppm', 'prec_offset_raw','prec_offset_raw_ppm','delta_m','delta_m_ppm','fragments_matched_int_ratio','fragments_int_ratio']
int_fields = ['fragments_int_sum','fragments_matched_int_sum','n_fragments_matched','fragment_ion_idx'] + [f'hits_{a}{_}' for _ in LOSS_DICT for a in ['b','y']]
psms_dtype = np.dtype([(_,np.float32) for _ in float_fields] + [(_,np.int64) for _ in int_fields])
psms_, fragment_ions, = score(
psms,
query_masses,
query_masses_raw,
query_frags,
query_ints,
query_indices,
db_masses,
db_frags,
frag_types,
frag_tol,
db_indices,
ppm,
psms_dtype)
ions_ = np.vstack(fragment_ions)
for _ in psms_.dtype.names:
psms = add_column(psms, psms_[_], _)
rts = np.array(query_rt)[psms["query_idx"]]
psms = add_column(psms, rts, 'rt')
if isinstance(db_data, str):
db_seqs = read_database(db_data, array_name = 'seqs').astype(str)
else:
db_seqs = db_data['seqs']
seqs = get_sequences(psms, db_seqs)
del db_seqs
psms = add_column(psms, seqs, "sequence")
mass = np.array(query_masses)[psms["query_idx"]]
mz = np.array(query_mz)[psms["query_idx"]]
charge = np.array(query_charges)[psms["query_idx"]]
psms = add_column(psms, mass, "mass")
psms = add_column(psms, mz, "mz")
psms = add_column(psms, charge, "charge")
psms = add_column(psms, np.char.add(np.char.add(psms['sequence'],"_"), psms['charge'].astype(int).astype(str)), 'precursor')
if features is not None:
psms = add_column(psms, features.loc[psms['query_idx']]['feature_idx'].values, 'feature_idx')
psms = add_column(psms, features.loc[psms['query_idx']]['query_idx'].values, 'raw_idx')
for key in ['ms1_int_sum_area','ms1_int_sum_apex','ms1_int_max_area','ms1_int_max_apex','rt_start','rt_apex','rt_end','fwhm','dist','mobility']:
if key in features.keys():
psms = add_column(psms, features.loc[psms['query_idx']][key].values, key)
scan_no = np.array(query_scans)[psms["query_idx"]]
if bruker:
psms = add_column(psms, scan_no, "parent")
psms = add_column(psms, np.array(query_prec_id)[psms["query_idx"]], 'precursor_idx')
psms = add_column(psms, psms['feature_idx']+1, 'feature_id') #Bruker
else:
psms = add_column(psms, scan_no, "scan_no")
logging.info(f'Extracted columns from {len(psms):,} spectra.')
return psms, ions_
# -
# ## Plot
# +
#export
import matplotlib.pyplot as plt
def plot_psms(index, ms_file):
df = ms_file.read(dataset_name='peptide_fdr')
ion_dict = {}
ion_dict[0] = ''
ion_dict[1] = '-H20'
ion_dict[2] = '-NH3'
spectrum = df.iloc[index]
start = spectrum['fragment_ion_idx']
end = spectrum['n_fragments_matched'] + start
query_data = ms_file.read_DDA_query_data()
fragment_ions = ms_file.read(dataset_name="fragment_ions")
ion = [('b'+str(int(_))).replace('b-','y') for _ in fragment_ions.iloc[start:end]['ion_index']]
losses = [ion_dict[int(_)] for _ in fragment_ions.iloc[start:end]['fragment_ion_type']]
ion = [a+b for a,b in zip(ion, losses)]
ints = fragment_ions.iloc[start:end]['fragment_ion_int'].astype('int').values
masses = fragment_ions.iloc[start:end]['fragment_ion_mass'].astype('float').values
fragment_ion_type = fragment_ions.iloc[start:end]['fragment_ion_type'].abs().values
query_idx = spectrum['raw_idx']
query_indices = query_data["indices_ms2"]
query_charges = query_data['charge2']
query_frags = query_data['mass_list_ms2']
query_ints = query_data['int_list_ms2']
query_idx_start = query_indices[query_idx]
query_idx_end = query_indices[query_idx + 1]
query_frag = query_frags[query_idx_start:query_idx_end]
query_int = query_ints[query_idx_start:query_idx_end]
ax = plt.figure(figsize=(15, 5))
plt.vlines(query_frag, 0, query_int, "k", label="Query", alpha=0.5)
plt.vlines(masses, ints, max(query_int)*(1+0.1*fragment_ion_type), "k", label="Hits", alpha=0.5, linestyle=':')
plt.vlines(masses, 0, ints, "r", label="Hits", alpha=0.5)
for i in range(len(masses)):
plt.text(masses[i], (1+0.1*fragment_ion_type[i])*max(query_int), ion[i])
figure_title = f"{spectrum['precursor']} - b-hits {spectrum['hits_b']}, y-hits {spectrum['hits_y']}, matched int {spectrum['fragments_matched_int_ratio']*100:.2f} %"
plt.xlabel("m/z")
plt.ylabel('Intensity')
plt.ylim([0, (1+0.1*max(fragment_ion_type)+0.1)*max(query_int)])
plt.legend()
plt.title(figure_title)
plt.show()
# -
# Example usage to plot a psms
ms_file = alphapept.io.MS_Data_File('../testfiles/test.ms_data.hdf')
plot_psms(0, ms_file)
# ## Searching with database
#
# We save intermediate results to hdf5 files
# +
#export
import os
import pandas as pd
import copy
import alphapept.io
import alphapept.fasta
from typing import Callable
#This function is a wrapper and ist tested by the quick_test
def store_hdf(df: pd.DataFrame, path: str, key:str, replace:bool=False, swmr:bool = False):
"""Wrapper function to store a DataFrame in an hdf.
Args:
df (pd.DataFrame): DataFrame to be stored.
path (str): Target path of the hdf file.
key (str): Name of the field to be saved.
replace (bool, optional): Flag whether the field should be replaced.. Defaults to False.
swmr (bool, optional): Flag to use swmr(single write multiple read)-mode. Defaults to False.
"""
ms_file = alphapept.io.MS_Data_File(path.file_name, is_overwritable=True)
if replace:
ms_file.write(df, dataset_name=key, swmr = swmr)
else:
try:
df.to_hdf(path, key=key, append=True)
#TODO, append is not implemented yet
except (ValueError, AttributeError):
try:
old_df = ms_file.read(dataset_name=key, swmr = swmr)
new_df = pd.concat([old_df, df])
ms_file.write(new_df, dataset_name=key, swmr = swmr)
except KeyError: # File is created new
ms_file.write(df, dataset_name=key, swmr = swmr)
#This function is a wrapper and ist tested by the quick_test
def search_db(to_process:tuple, callback:Callable = None, parallel:bool=False, first_search:bool = True) -> Union[bool, str]:
"""Wrapper function to perform database search to be used by a parallel pool.
Args:
to_process (tuple): Tuple containing an index to the file and the experiment settings.
callback (Callable, optional): Callback function to indicate progress. Defaults to None.
parallel (bool, optional): Flag to use parallel processing. Defaults to False.
first_search (bool, optional): Flag to indicate this is the first search. Defaults to True.
Returns:
Union[bool, str]: Returns True if the search was successfull, otherwise returns a string containing the Exception.
"""
try:
index, settings = to_process
file_name = settings['experiment']['file_paths'][index]
base_file_name, ext = os.path.splitext(file_name)
ms_file = base_file_name+".ms_data.hdf"
skip = False
feature_calibration = False
ms_file_ = alphapept.io.MS_Data_File(
f"{ms_file}"
)
if not first_search:
try:
calibration = float(ms_file_.read(group_name = 'features', dataset_name='corrected_mass', attr_name='estimated_max_precursor_ppm'))
if calibration == 0:
logging.info('Calibration is 0, skipping second database search.')
skip = True
else:
settings['search']['prec_tol_calibrated'] = calibration*settings['search']['calibration_std_prec']
calib = settings['search']['prec_tol_calibrated']
logging.info(f"Found calibrated prec_tol with value {calib:.2f}")
except KeyError as e:
logging.info(f'{e}')
try:
fragment_std = float(ms_file_.read(dataset_name="estimated_max_fragment_ppm")[0])
skip = False
settings['search']['frag_tol_calibrated'] = fragment_std*settings['search']['calibration_std_frag']
calib = settings['search']['frag_tol_calibrated']
logging.info(f"Found calibrated frag_tol with value {calib:.2f}")
except KeyError as e:
logging.info(f'{e}')
if not skip:
db_data_path = settings['experiment']['database_path']
# TODO calibrated_fragments should be included in settings
query_data = ms_file_.read_DDA_query_data(
calibrated_fragments=True,
database_file_name=settings['experiment']['database_path']
)
features = ms_file_.read(dataset_name="features")
psms, num_specs_compared = get_psms(query_data, db_data_path, features, **settings["search"])
if len(psms) > 0:
psms, fragment_ions = get_score_columns(psms, query_data, db_data_path, features, **settings["search"])
if first_search:
logging.info('Saving first_search results to {}'.format(ms_file))
save_field = 'first_search'
else:
logging.info('Saving second_search results to {}'.format(ms_file))
save_field = 'second_search'
store_hdf(pd.DataFrame(psms), ms_file_, save_field, replace=True)
ion_columns = ['ion_index','fragment_ion_type','fragment_ion_int','db_int','fragment_ion_mass','db_mass','query_idx','db_idx','psms_idx']
store_hdf(pd.DataFrame(fragment_ions, columns = ion_columns), ms_file_, 'fragment_ions', replace=True)
else:
logging.info('No psms found.')
logging.info(f'Search of file {file_name} complete.')
return True
except Exception as e:
logging.error(f'Search of file {file_name} failed. Exception {e}.')
return f"{e}" #Can't return exception object, cast as string
# -
# ## Searching Large Fasta and or Search Space
# +
#export
from alphapept.fasta import blocks, generate_peptides, add_to_pept_dict
from alphapept.io import list_to_numpy_f32
from alphapept.fasta import block_idx, generate_fasta_list, generate_spectra, check_peptide
from alphapept import constants
mass_dict = constants.mass_dict
import os
import alphapept.performance
#This function is a wrapper and ist tested by the quick_test
def search_fasta_block(to_process:tuple) -> (list, int):
"""Search fasta block. This file digests per block and does not use a saved database.
For searches with big fasta files or unspecific searches.
Args:
to_process (tuple): Tuple containing a fasta_index, fasta_block, a list of files and a list of experimental settings.
Returns:
list: A list of dataframes when searching the respective file.
int: Number of new peptides that were generated in this iteration.
"""
try:
fasta_index, fasta_block, ms_files, settings = to_process
settings_ = settings[0]
spectra_block = settings_['fasta']['spectra_block']
to_add = List()
psms_container = [list() for _ in ms_files]
f_index = 0
pept_dict = {}
for element in fasta_block:
sequence = element["sequence"]
mod_peptides = generate_peptides(sequence, **settings_['fasta'])
pept_dict, added_peptides = add_to_pept_dict(pept_dict, mod_peptides, fasta_index+f_index)
if len(added_peptides) > 0:
to_add.extend(added_peptides)
f_index += 1
if len(to_add) > 0:
for seq_block in blocks(to_add, spectra_block):
spectra = generate_spectra(seq_block, mass_dict)
precmasses, seqs, fragmasses, fragtypes = zip(*spectra)
sortindex = np.argsort(precmasses)
fragmasses = np.array(fragmasses, dtype=object)[sortindex]
fragtypes = np.array(fragtypes, dtype=object)[sortindex]
lens = [len(_) for _ in fragmasses]
n_frags = sum(lens)
frags = np.zeros(n_frags, dtype=fragmasses[0].dtype)
frag_types = np.zeros(n_frags, dtype=fragtypes[0].dtype)
indices = np.zeros(len(lens) + 1, np.int64)
indices[1:] = lens
indices = np.cumsum(indices)
#Fill data
for _ in range(len(indices)-1):
start = indices[_]
end = indices[_+1]
frags[start:end] = fragmasses[_]
frag_types[start:end] = fragtypes[_]
db_data = {}
db_data["precursors"] = np.array(precmasses)[sortindex]
db_data["seqs"] = np.array(seqs)[sortindex]
db_data["fragmasses"] = frags
db_data["fragtypes"] = frag_types
db_data["indices"] = indices
for file_idx, ms_file in enumerate(ms_files):
query_data = alphapept.io.MS_Data_File(
f"{ms_file}"
).read_DDA_query_data(swmr=True)
try:
features = alphapept.io.MS_Data_File(
ms_file
).read(dataset_name="features",swmr=True)
except FileNotFoundError:
features = None
except KeyError:
features = None
psms, num_specs_compared = get_psms(query_data, db_data, features, **settings[file_idx]["search"])
if len(psms) > 0:
#This could be speed up..
psms, fragment_ions = get_score_columns(psms, query_data, db_data, features, **settings[file_idx]["search"])
fasta_indices = [set(x for x in pept_dict[_]) for _ in psms['sequence']]
psms_df = pd.DataFrame(psms)
psms_df['fasta_index'] = fasta_indices
psms_container[file_idx].append(psms_df)
success = True
except Exception as e:
logging.error(f'Search of block {fasta_index} failed. Exception {e}.')
success = f"{e}"
return psms_container, len(to_add), success
# +
#export
def filter_top_n(temp:pd.DataFrame, top_n:int = 10)-> pd.DataFrame:
"""Takes a dataframe and keeps only the top n entries (based on hits).
Combines fasta indices for sequences.
Args:
temp (pd.DataFrame): Pandas DataFrame containing PSMs.
top_n (int, optional): Number of top-n entries to be kept. Defaults to 10.
Returns:
pd.DataFrame: Filtered DataFrame.
"""
pept_dict_ = {}
temp['temp_idx'] = np.arange(len(temp))
for k, v in temp[['sequence','fasta_index']].values:
if k in pept_dict_:
new_set = pept_dict_[k]
if isinstance(v, set):
new_set.update(v)
else:
new_set.add(v)
pept_dict_[k] = new_set
else:
pept_dict_[k] = set(v)
temp['fasta_index'] = [pept_dict_[_] for _ in temp['sequence']]
temp = temp.drop_duplicates(subset = ['raw_idx','sequence','hits','feature_idx'])
temp = temp.sort_values('hits', ascending = False).groupby('raw_idx').head(top_n)
return temp
# +
#hide
def test_filter_top_n():
test_df = pd.DataFrame({'sequence':['A','A','A','A','A'],
'fasta_index':[{1},{2},{3},{4},{5}],
'hits':[1,2,3,4,5],
'feature_idx':[1,2,3,4,5],
'raw_idx':[1,1,1,1,1]})
filtered = filter_top_n(test_df, 3)
assert len(filtered) == 3
assert np.allclose(filtered['hits'].values, np.array([5, 4, 3]))
test_filter_top_n()
# +
#export
import psutil
import alphapept.constants as constants
from alphapept.fasta import get_fragmass, parse
def ion_extractor(df: pd.DataFrame, ms_file, frag_tol:float, ppm:bool)->(np.ndarray, np.ndarray):
"""Extracts the matched hits (fragment_ions) from a dataframe.
Args:
df (pd.DataFrame): Pandas dataframe containing the results of the first search.
ms_file : MsFile
frag_tol (float): Fragment tolerance for search.
ppm (bool): Flag to use ppm instead of Dalton.
Returns:
np.ndarray: Numpy recordarray storing the PSMs.
np.ndarray: Numpy recordarray storing the fragment_ions.
"""
query_data = ms_file.read_DDA_query_data()
query_indices = query_data["indices_ms2"]
query_frags = query_data['mass_list_ms2']
query_ints = query_data['int_list_ms2']
psms = df.to_records()
ion_count = 0
ions_ = List()
for i in range(len(psms)):
query_idx = psms[i]["raw_idx"]
db_idx = psms[i]["db_idx"]
query_idx_start = query_indices[query_idx]
query_idx_end = query_indices[query_idx + 1]
query_frag = query_frags[query_idx_start:query_idx_end]
query_int = query_ints[query_idx_start:query_idx_end]
seq = psms[i]['sequence']
db_frag, frag_type = get_fragmass(parse(seq), constants.mass_dict)
db_int = np.ones_like(db_frag)
fragment_ions = get_hits(query_frag, query_int, db_frag, db_int, frag_type, frag_tol, ppm, LOSSES)
fragment_ions[:,-1] = i
n_fragments_matched = len(fragment_ions)
psms['n_fragments_matched'][i] = n_fragments_matched
psms['fragment_ion_idx'][i] = ion_count
ion_count += n_fragments_matched
ions_.append(fragment_ions)
ions_ = np.vstack(ions_)
return psms, ions_
#This function is a wrapper and ist tested by the quick_test
def search_parallel(settings: dict, calibration:Union[list, None] = None, fragment_calibration:Union[list, None] = None, callback: Union[Callable, None] = None) -> dict:
"""Function to search multiple ms_data files in parallel.
This function will additionally calculate fragments and precursor masses from a given FASTA file.
Args:
settings (dict): Settings file containg the experimental definitions.
calibration (Union[list, None], optional): List of calibrated offsets. Defaults to None.
fragment_calibration (Union[list, None], optional): List of calibrated fragment offsets. Defaults to None.
callback (Union[Callable, None], optional): Callback function. Defaults to None.
Returns:
dict: FASTA dictionary.
"""
fasta_list, fasta_dict = generate_fasta_list(fasta_paths = settings['experiment']['fasta_paths'], **settings['fasta'])
fasta_block = settings['fasta']['fasta_block']
ms_file_path = []
for _ in settings['experiment']['file_paths']:
base, ext = os.path.splitext(_)
ms_file_path.append(base + '.ms_data.hdf')
if calibration:
custom_settings = []
for _ in calibration:
settings_ = copy.deepcopy(settings)
settings_["search"]["prec_tol_calibrated"] = _
custom_settings.append(settings_)
else:
custom_settings = [settings for _ in ms_file_path]
if fragment_calibration:
for idx, _ in enumerate(fragment_calibration):
custom_settings[idx]["search"]["frag_tol_calibrated"] = _
logging.info(f"Number of FASTA entries: {len(fasta_list):,} - FASTA settings {settings['fasta']}")
to_process = [(idx_start, fasta_list[idx_start:idx_end], ms_file_path, custom_settings) for idx_start, idx_end in block_idx(len(fasta_list), fasta_block)]
memory_available = psutil.virtual_memory().available/1024**3
n_processes = int(memory_available // 5 )
logging.info(f'Setting Process limit to {n_processes}')
n_processes = alphapept.performance.set_worker_count(
worker_count=n_processes,
set_global=False
)
n_seqs_ = 0
df_cache = {}
ion_cache = {}
failed = []
to_process_ = []
with alphapept.performance.AlphaPool(n_processes) as p:
max_ = len(to_process)
for i, (psm_container, n_seqs, success) in enumerate(p.imap_unordered(search_fasta_block, to_process)):
n_seqs_ += n_seqs
logging.info(f'Block {i+1} of {max_} complete - {((i+1)/max_*100):.2f} % - created peptides {n_seqs:,} - total peptides {n_seqs_:,} ')
for j in range(len(psm_container)): #Temporary hdf files for avoiding saving issues
output = [_ for _ in psm_container[j]]
if len(output) > 0:
psms = pd.concat(output)
if ms_file_path[j] in df_cache:
temp = filter_top_n(pd.concat([df_cache[ms_file_path[j]], psms]))
selector = temp['temp_idx'].values
df_cache[ms_file_path[j]] = temp
else:
df_cache[ms_file_path[j]] = psms
if callback:
callback((i+1)/max_)
if not success:
failed.append(i)
to_process_.append(to_process_[i])
n_failed = len(failed)
if n_failed > 0:
## Retry failed with more memory
n_processes_ = max((1, int(n_processes // 2)))
logging.info(f'Attempting to rerun failed runs with {n_processes_} processes')
max_ = n_failed
with alphapept.performance.AlphaPool(n_processes) as p:
for i, (psm_container, n_seqs, success) in enumerate(p.imap_unordered(search_fasta_block, to_process_)):
n_seqs_ += n_seqs
logging.info(f'Block {i+1} of {max_} complete - {((i+1)/max_*100):.2f} % - created peptides {n_seqs:,} - total peptides {n_seqs_:,} ')
for j in range(len(psm_container)): #Temporary hdf files for avoiding saving issues
output = [_ for _ in psm_container[j]]
if len(output) > 0:
psms = pd.concat(output)
if ms_file_path[j] in df_cache:
temp = filter_top_n(pd.concat([df_cache[ms_file_path[j]], psms]))
selector = temp['temp_idx'].values
df_cache[ms_file_path[j]] = temp
else:
df_cache[ms_file_path[j]] = psms
for idx, _ in enumerate(ms_file_path):
if _ in df_cache:
x = df_cache[_]
ms_file = alphapept.io.MS_Data_File(_)
x['fasta_index'] = x['fasta_index'].apply(lambda x: ','.join(str(_) for _ in x))
if 'frag_tol_calibrated' in custom_settings[idx]['search']:
frag_tol = custom_settings[idx]['search']['frag_tol_calibrated']
else:
frag_tol = custom_settings[idx]['search']['frag_tol']
ppm = custom_settings[idx]['search']['ppm']
if not calibration:
save_field = 'first_search'
else:
save_field = 'second_search'
logging.info(f'Saving as {save_field}')
psms, fragment_ions = ion_extractor(x, ms_file, frag_tol, ppm)
store_hdf(pd.DataFrame(psms), ms_file, save_field, replace=True)
ion_columns = ['ion_index','fragment_ion_type','fragment_ion_int','db_int','fragment_ion_mass','db_mass','query_idx','db_idx','psms_idx']
store_hdf(pd.DataFrame(fragment_ions, columns = ion_columns), ms_file, 'fragment_ions', replace=True)
#Todo? Callback
logging.info(f'Complete. Created peptides {n_seqs_:,}')
return fasta_dict
# -
#hide
from nbdev.showdoc import *
#hide
from nbdev.export import *
notebook2script()
| nbs/05_search.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="Vlpgcc4XJDQn"
# * CycleGANを用いてピラフからカレーライスへの画像変換を行う
#
# + id="QC-OL61eJNly" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1612522995430, "user_tz": -540, "elapsed": 27375, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06659379309748930560"}} outputId="5c874007-d2a8-4a1c-9144-602dd7ed45d2"
from google.colab import drive
drive.mount('/content/drive')
# + id="LjTnVXUYJY_a"
# カレントディレクトリの読み込みとカレントディレクトリへの移動
import sys
sys.path.append(f'/content/drive/My Drive/system/')
import os
os.chdir(f'/content/drive/My Drive/system/myanswer')
# + id="Kdyuq9CVwhQD" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1612523000762, "user_tz": -540, "elapsed": 32695, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06659379309748930560"}} outputId="8dd7b55f-a4b4-4ee9-cbd5-72572e153f6d"
# !pip install git+https://www.github.com/keras-team/keras-contrib.git
# + id="VaG7zab7k12_" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1612523010751, "user_tz": -540, "elapsed": 42677, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06659379309748930560"}} outputId="989fd797-fa79-44a7-f44b-bc3e3d4fc37f"
# !pip install scipy==1.1.0
# + id="hMXkiZ9vKbgl"
from __future__ import print_function, division
from keras_contrib.layers.normalization.instancenormalization import InstanceNormalization
from keras.layers import Input, Dense, Reshape, Flatten, Dropout, Concatenate
from keras.layers import BatchNormalization, Activation, ZeroPadding2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from keras.optimizers import Adam
from glob import glob
from PIL import Image
import datetime
import matplotlib.pyplot as plt
import numpy as np
import scipy
import scipy.misc
# + id="2qRvQjm0fDBc"
class DataGenerator():
"""
source_nameで指定した画像(A)を target_nameで指定した画像(B)に変換する
そのために,画像を収集し,AとBの画像を学習データとテストデータに分け
それぞれ,学習データ用とテストデータ用のディレクトリに保存するクラス
"""
def __init__(self, source_name, target_name, dataset_name, img_res=(128, 128)):
self.source_name = source_name
self.target_name = target_name
self.dataset_name = dataset_name
self.img_res = img_res
def generate_data(self, indices=1000, train_percentage=0.8):
"""
画像の収集とCycleGANを実行するための画像パスを設定し,収集した画像を設定したパスへと保存するメソッド
indices(int): 画像枚数
デフォルトでは,self.target_nameで指定した画像のうち
8割 (train_percentage): 学習データ用の画像
2割 : テストデータ用の画像
"""
os.makedirs('../figure/foodimg128/{}/trainA'.format(self.dataset_name), exist_ok=True)
os.makedirs('../figure/foodimg128/{}/trainB'.format(self.dataset_name), exist_ok=True)
os.makedirs('../figure/foodimg128/{}/testA'.format(self.dataset_name), exist_ok=True)
os.makedirs('../figure/foodimg128/{}/testB'.format(self.dataset_name), exist_ok=True)
# self.source_nameで指定された食事画像のパスを取得
source_image_pathes = glob('../figure/foodimg128/{}/*.jpg'.format(self.source_name))
# self.target_nameで指定された食事画像のパスを取得
target_image_pathes = glob('../figure/foodimg128/{}/*.jpg'.format(self.target_name))
for index in range(indices):
source_image = Image.open(source_image_pathes[index])
target_image = Image.open(target_image_pathes[index])
if indices * train_percentage <= index < indices:
source_image.save("../figure/foodimg128/{}/testA/{}.jpg".format(self.dataset_name, index)) # テストデータの保存
target_image.save("../figure/foodimg128/{}/testB/{}.jpg".format(self.dataset_name, index))
else:
source_image.save("../figure/foodimg128/{}/trainA/{}.jpg".format(self.dataset_name, index)) # 学習データの保存
target_image.save("../figure/foodimg128/{}/trainB/{}.jpg".format(self.dataset_name, index))
# + id="H55KFwxE7Sv2"
class DataLoader():
def __init__(self, dataset_name, img_res=(128, 128)):
self.dataset_name = dataset_name
self.img_res = img_res
def load_data(self, domain, batch_size=1, is_testing=False):
data_type = "train%s" % domain if not is_testing else "test%s" % domain
path = glob('../figure/foodimg128/%s/%s/*.jpg' % (self.dataset_name, data_type))
batch_images = np.random.choice(path, size=batch_size)
imgs = []
for img_path in batch_images:
img = self.imread(img_path)
if not is_testing:
img = scipy.misc.imresize(img, self.img_res)
if np.random.random() > 0.5:
img = np.fliplr(img)
else:
img = scipy.misc.imresize(img, self.img_res)
imgs.append(img)
imgs = np.array(imgs)/127.5 - 1.
return imgs
def load_batch(self, batch_size=1, is_testing=False):
data_type = "train" if not is_testing else "test"
path_A = glob('../figure/foodimg128/%s/%sA/*.jpg' % (self.dataset_name, data_type))
path_B = glob('../figure/foodimg128/%s/%sB/*.jpg' % (self.dataset_name, data_type))
self.n_batches = int(min(len(path_A), len(path_B)) / batch_size)
total_samples = self.n_batches * batch_size
# Sample n_batches * batch_size from each path list so that model sees all
# samples from both domains
path_A = np.random.choice(path_A, total_samples, replace=False)
path_B = np.random.choice(path_B, total_samples, replace=False)
for i in range(self.n_batches-1):
batch_A = path_A[i*batch_size:(i+1)*batch_size]
batch_B = path_B[i*batch_size:(i+1)*batch_size]
imgs_A, imgs_B = [], []
for img_A, img_B in zip(batch_A, batch_B):
img_A = self.imread(img_A)
img_B = self.imread(img_B)
img_A = scipy.misc.imresize(img_A, self.img_res)
img_B = scipy.misc.imresize(img_B, self.img_res)
if not is_testing and np.random.random() > 0.5:
img_A = np.fliplr(img_A)
img_B = np.fliplr(img_B)
imgs_A.append(img_A)
imgs_B.append(img_B)
imgs_A = np.array(imgs_A)/127.5 - 1.
imgs_B = np.array(imgs_B)/127.5 - 1.
yield imgs_A, imgs_B
def load_img(self, path):
img = self.imread(path)
img = scipy.misc.imresize(img, self.img_res)
img = img/127.5 - 1.
return img[np.newaxis, :, :, :]
def imread(self, path):
return scipy.misc.imread(path, mode='RGB').astype(np.float)
# + id="S-GGuWiJFkEe"
class CycleGAN():
def __init__(self):
# Number of images (簡単のため,入力に使う画像枚数を既知であることが前提)
self.image_num = 2000
# Input shape
self.img_rows = 128
self.img_cols = 128
self.channels = 3
self.img_shape = (self.img_rows, self.img_cols, self.channels)
# Configure data loader
# ピラフからカレーへの画像変換を行う
self.dataset_name = 'pilaf2curry'
self.source_name = "pilaf"
self.target_name = "curry"
self.data_generator = DataGenerator(source_name=self.source_name,
target_name=self.target_name,
dataset_name=self.dataset_name,
img_res=(self.img_rows, self.img_cols))
self.data_loader = DataLoader(dataset_name=self.dataset_name,
img_res=(self.img_rows, self.img_cols))
# Calculate output shape of D (PatchGAN)
patch = int(self.img_rows / 2**4)
self.disc_patch = (patch, patch, 1)
# Number of filters in the first layer of G and D
self.gf = 32
self.df = 64
# Loss weights
self.lambda_cycle = 10.0 # Cycle-consistency loss
self.lambda_id = 0.1 * self.lambda_cycle # Identity loss
optimizer = Adam(0.0002, 0.5)
# Build and compile the discriminators
self.d_A = self.build_discriminator()
self.d_B = self.build_discriminator()
self.d_A.compile(loss='mse',
optimizer=optimizer,
metrics=['accuracy'])
self.d_B.compile(loss='mse',
optimizer=optimizer,
metrics=['accuracy'])
#-------------------------
# Construct Computational
# Graph of Generators
#-------------------------
# Build the generators
self.g_AB = self.build_generator()
self.g_BA = self.build_generator()
# Input images from both domains
img_A = Input(shape=self.img_shape)
img_B = Input(shape=self.img_shape)
# Translate images to the other domain
fake_B = self.g_AB(img_A)
fake_A = self.g_BA(img_B)
# Translate images back to original domain
reconstr_A = self.g_BA(fake_B)
reconstr_B = self.g_AB(fake_A)
# Identity mapping of images
img_A_id = self.g_BA(img_A)
img_B_id = self.g_AB(img_B)
# For the combined model we will only train the generators
self.d_A.trainable = False
self.d_B.trainable = False
# Discriminators determines validity of translated images
valid_A = self.d_A(fake_A)
valid_B = self.d_B(fake_B)
# Combined model trains generators to fool discriminators
self.combined = Model(inputs=[img_A, img_B],
outputs=[ valid_A, valid_B,
reconstr_A, reconstr_B,
img_A_id, img_B_id ])
self.combined.compile(loss=['mse', 'mse',
'mae', 'mae',
'mae', 'mae'],
loss_weights=[ 1, 1,
self.lambda_cycle, self.lambda_cycle,
self.lambda_id, self.lambda_id ],
optimizer=optimizer)
def build_generator(self):
"""U-Net Generator"""
def conv2d(layer_input, filters, f_size=4):
"""Layers used during downsampling"""
d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)
d = LeakyReLU(alpha=0.2)(d)
d = InstanceNormalization()(d)
return d
def deconv2d(layer_input, skip_input, filters, f_size=4, dropout_rate=0):
"""Layers used during upsampling"""
u = UpSampling2D(size=2)(layer_input)
u = Conv2D(filters, kernel_size=f_size, strides=1, padding='same', activation='relu')(u)
if dropout_rate:
u = Dropout(dropout_rate)(u)
u = InstanceNormalization()(u)
u = Concatenate()([u, skip_input])
return u
# Image input
d0 = Input(shape=self.img_shape)
# Downsampling
d1 = conv2d(d0, self.gf)
d2 = conv2d(d1, self.gf*2)
d3 = conv2d(d2, self.gf*4)
d4 = conv2d(d3, self.gf*8)
# Upsampling
u1 = deconv2d(d4, d3, self.gf*4)
u2 = deconv2d(u1, d2, self.gf*2)
u3 = deconv2d(u2, d1, self.gf)
u4 = UpSampling2D(size=2)(u3)
output_img = Conv2D(self.channels, kernel_size=4, strides=1, padding='same', activation='tanh')(u4)
return Model(d0, output_img)
def build_discriminator(self):
def d_layer(layer_input, filters, f_size=4, normalization=True):
"""Discriminator layer"""
d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)
d = LeakyReLU(alpha=0.2)(d)
if normalization:
d = InstanceNormalization()(d)
return d
img = Input(shape=self.img_shape)
d1 = d_layer(img, self.df, normalization=False)
d2 = d_layer(d1, self.df*2)
d3 = d_layer(d2, self.df*4)
d4 = d_layer(d3, self.df*8)
validity = Conv2D(1, kernel_size=4, strides=1, padding='same')(d4)
return Model(img, validity)
def generate_dataset(self):
partitions = ["trainA", "trainB", "testA", "testB"]
total_images = 0
for partition in partitions:
images_each_partition = os.listdir("../figure/foodimg128/{}/{}".format(self.dataset_name, partition))
total_images += len(images_each_partition)
if total_images == self.image_num:
return
else:
self.data_generator.generate_data()
def train(self, epochs, batch_size=1, sample_interval=50):
start_time = datetime.datetime.now()
# Generate data for cyclegan
self.generate_dataset()
# Adversarial loss ground truths
valid = np.ones((batch_size,) + self.disc_patch)
fake = np.zeros((batch_size,) + self.disc_patch)
for epoch in range(epochs):
for batch_i, (imgs_A, imgs_B) in enumerate(self.data_loader.load_batch(batch_size)):
# ----------------------
# Train Discriminators
# ----------------------
# Translate images to opposite domain
fake_B = self.g_AB.predict(imgs_A)
fake_A = self.g_BA.predict(imgs_B)
# Train the discriminators (original images = real / translated = Fake)
dA_loss_real = self.d_A.train_on_batch(imgs_A, valid)
dA_loss_fake = self.d_A.train_on_batch(fake_A, fake)
dA_loss = 0.5 * np.add(dA_loss_real, dA_loss_fake)
dB_loss_real = self.d_B.train_on_batch(imgs_B, valid)
dB_loss_fake = self.d_B.train_on_batch(fake_B, fake)
dB_loss = 0.5 * np.add(dB_loss_real, dB_loss_fake)
# Total disciminator loss
d_loss = 0.5 * np.add(dA_loss, dB_loss)
# ------------------
# Train Generators
# ------------------
# Train the generators
g_loss = self.combined.train_on_batch([imgs_A, imgs_B],
[valid, valid,
imgs_A, imgs_B,
imgs_A, imgs_B])
elapsed_time = datetime.datetime.now() - start_time
# If at save interval => save generated image samples
if batch_i % sample_interval == 0:
self.sample_images(epoch, batch_i)
# Plot the progress
print("[Epoch %d/%d] [Batch %d/%d] [D loss: %f, acc: %3d%%] [G loss: %05f, adv: %05f, recon: %05f, id: %05f] time: %s " \
% ( epoch+1, epochs,
batch_i+1, self.data_loader.n_batches,
d_loss[0], 100*d_loss[1],
g_loss[0],
np.mean(g_loss[1:3]),
np.mean(g_loss[3:5]),
np.mean(g_loss[5:6]),
elapsed_time))
elif batch_i + 2 == self.data_loader.n_batches:
self.sample_images(epoch, batch_i)
# Plot the progress
print("[Epoch %d/%d] [Batch %d/%d] [D loss: %f, acc: %3d%%] [G loss: %05f, adv: %05f, recon: %05f, id: %05f] time: %s " \
% ( epoch+1, epochs,
batch_i+1, self.data_loader.n_batches,
d_loss[0], 100*d_loss[1],
g_loss[0],
np.mean(g_loss[1:3]),
np.mean(g_loss[3:5]),
np.mean(g_loss[5:6]),
elapsed_time))
def sample_images(self, epoch, batch_i):
os.makedirs('../result/%s' % self.dataset_name, exist_ok=True)
r, c = 2, 3
imgs_A = self.data_loader.load_data(domain="A", batch_size=1, is_testing=True)
imgs_B = self.data_loader.load_data(domain="B", batch_size=1, is_testing=True)
# Translate images to the other domain
fake_B = self.g_AB.predict(imgs_A)
fake_A = self.g_BA.predict(imgs_B)
# Translate back to original domain
reconstr_A = self.g_BA.predict(fake_B)
reconstr_B = self.g_AB.predict(fake_A)
gen_imgs = np.concatenate([imgs_A, fake_B, reconstr_A, imgs_B, fake_A, reconstr_B])
# Rescale images 0 - 1
gen_imgs = 0.5 * gen_imgs + 0.5
titles = ['Original', 'Translated', 'Reconstructed']
fig, axs = plt.subplots(r, c, figsize=(10, 10))
cnt = 0
for i in range(r):
for j in range(c):
axs[i,j].imshow(gen_imgs[cnt])
axs[i, j].set_title(titles[j])
axs[i,j].axis('off')
cnt += 1
fig.savefig("../result/{}/epoch{}_imageid{}.png".format(self.dataset_name, epoch+1, batch_i),
transparent=True, dpi=300, bbox_inches="tight", pad_inches=0.0)
if batch_i + 2 == self.data_loader.n_batches:
fig.savefig("../result/{}/epoch{}_imageid{}.png".format(self.dataset_name, epoch+1, batch_i+2),
transparent=True, dpi=300, bbox_inches="tight", pad_inches=0.0)
plt.close()
# + id="gEH_69mvY0M2" colab={"base_uri": "https://localhost:8080/"} outputId="6a6c1ba6-1ba2-4d40-f126-9b4211b5a1ee"
cyclegan = CycleGAN()
cyclegan.train(epochs=200, batch_size=1, sample_interval=200)
| code/kadai3/cyclegan.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Bonus: Temperature Analysis I
import pandas as pd
from datetime import datetime as dt
# "tobs" is "temperature observations"
df = pd.read_csv('Resources/hawaii_measurements.csv')
df.head()
# Convert the date column format from string to datetime
df["date"] = pd.to_datetime(df["date"])
df.dtypes
# Set the date column as the DataFrame index
df = df.set_index("date")
df.head()
# Drop the date column
### Not sure why this is requested, since in the next steps we need the date
### column. So I make a copy of the dataframe to use the copy in the next
### set of questions. Date is the index.
df_copy = df.copy()
df.reset_index(drop=True, inplace=True)
df.head()
# ### Compare June and December data across all years
from scipy import stats
# Filter data for desired months
june_filter = (df_copy.index.month == 6)
december_filter = (df_copy.index.month == 12)
june_df = df_copy.loc[june_filter].copy()
december_df = df_copy.loc[december_filter].copy()
june_df.head()
# Identify the average temperature for June (tobs)
june_df.mean()
# Identify the average temperature for December (tobs)
december_df.mean()
# Create collections of temperature data
## remove any null data
june_list = [temp_inst
for temp_inst in june_df["tobs"]
if isinstance(temp_inst, int)
]
december_list = [temp_inst
for temp_inst in december_df["tobs"]
if isinstance(temp_inst, int)
]
len(june_list), len(december_list)
# Run paired t-test
# Requires that the lists have the same shape, hence same number of values
### Trim down June list to be same quantity as December list
stats.ttest_rel(june_list[:1517], december_list[:1517])
# ### Analysis
# The average temperature in June for the 9 stations in Hawaii for multiple years is 74F. Average temperature in December for the same 9 stations in Hawaii for multiple years is 71F.
#
# I ran the recommended paired t-test because the same group of station temperatures at different points in time were being compared. Had we used two distinct groups of stations temperatures we would have used unpaired t-tests.
#
# The paired t-test requires arrays of the same size. So the June list count was pared down to match the December list count using an index range. December has 31 days while June has 30 days; one way or another, the two arrays will not match in quantity of samples, even if taking in only June ##, #### sample from station yyyyy only if the sample from December ##, #### from station yyyyy was also taken.
#
# Is there a meaningful difference between the temperature in, for example, June and December? The p-value returned was very small, below 0.05. So we reject the null hypothesis of equal averages, and the difference is not statistically significant.
| temp_analysis_bonus_1_starter.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %reload_ext autoreload
# %autoreload 2
# %matplotlib inline
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # CPU
os.environ['DISABLE_V2_BEHAVIOR'] = '1' # disable V2 Behavior - required for NER in TF2 right now
# # **ShallowNLP** Tutorial
#
# The **ShallowNLP** module in *ktrain* is a small collection of text-analytic utilities to help analyze text data in English, Chinese, Russian, and other languages. All methods in **ShallowNLP** are for use on a normal laptop CPU - no GPUs are required. Thus, it is well-suited to those with minimal computational resources and no GPU access.
#
# Let's begin by importing the `shallownlp` module.
from ktrain.text import shallownlp as snlp
# ## SECTION 1: Ready-to-Use Named-Entity-Recognition
#
# **ShallowNLP** includes pre-trained Named Entity Recognition (NER) for English, Chinese, and Russian.
#
# ### English NER
#
# Extracting entities from:
# ><NAME> was head of the Chinese Academy of Medical Sciences and is the current president of Nankai University.
ner = snlp.NER('en')
text = """
<NAME> was head of the Chinese Academy of Medical Sciences and is
the current president of Nankai University.
"""
ner.predict(text)
# The `ner.predict` method automatically merges tokens by entity. To see the unmerged results, set `merge_tokens=False`:
ner.predict(text, merge_tokens=False)
# The `ner.predict` method typically operates on single sentences, as in the example above. For multi-sentence documents, sentences can be extracted with `snlp.sent_tokenize`:
document = """<NAME> is a great actor. <NAME> is not."""
sents = []
for idx, sent in enumerate(snlp.sent_tokenize(document)):
sents.append(sent)
print('sentence #%d: %s' % (idx+1, sent))
ner.predict(sents[0])
ner.predict(sents[1])
# ### Chinese NER
# Extracting entities from the Chinese translation of:
# ><NAME> was head of the Chinese Academy of Medical Sciences and is the current president of Nankai University.
ner = snlp.NER('zh')
ner.predict('曹雪涛曾任中国医学科学院院长,现任南开大学校长。')
# Discovered entities with English translations:
# - 曹雪涛 = Cao Xuetao (PER)
# - 中国医学科学院 = Chinese Academy of Medical Sciences (ORG)
# - 南开大学 = Nankai University (ORG)
#
# The `snlp.sent_tokenize` can also be used with Chinese documents:
document = """这是关于史密斯博士的第一句话。第二句话是关于琼斯先生的。"""
for idx, sent in enumerate(snlp.sent_tokenize(document)):
print('sentence #%d: %s' % (idx+1, sent))
# ### Russian NER
# Extracting entities from the Russian translation of:
# ><NAME>, the youngest daughter of Russian President <NAME>, was appointed head of a new artificial intelligence institute at Moscow State University.
ner = snlp.NER('ru')
russian_sentence = """<NAME>, младшая дочь президента России Владимира Путина,
была назначена руководителем нового института искусственного интеллекта в МГУ."""
ner.predict(russian_sentence)
# Discovered entities with English translations:
# - <NAME> = <NAME> (PER)
# - России = Russia (LOC)
# - Владимира Путина = <NAME> (PER)
# - МГУ = Moscow State University (ORG)
# ## SECTION 2: Text Classification
#
# **ShallowNLP** makes it easy to build a text classifier with minimal computational resources. **ShallowNLP** includes the following sklearn-based text classification models: a non-neural version of [NBSVM](https://nlp.stanford.edu/pubs/sidaw12_simple_sentiment.pdf), Logistic Regression, and [Linear SVM with SGD training (SGDClassifier)](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.SGDClassifier.html). Logistic regression is the default classifier. For these examples, we will use [NBSVM](https://nlp.stanford.edu/pubs/sidaw12_simple_sentiment.pdf).
#
# A classifier can be trained with minimal effort for both English and Chinese.
#
# ### English Text Classification
#
# We'll use the IMDb movie review dataset [available here](https://ai.stanford.edu/~amaas/data/sentiment/) to build a sentiment analysis model for English.
datadir = r'/home/amaiya/data/aclImdb'
(x_train, y_train, label_names) = snlp.Classifier.load_texts_from_folder(datadir+'/train')
(x_test, y_test, _) = snlp.Classifier.load_texts_from_folder(datadir+'/test', shuffle=False)
print('label names: %s' % (label_names))
clf = snlp.Classifier().fit(x_train, y_train, ctype='nbsvm')
print('validation accuracy: %s%%' % (round(clf.evaluate(x_test, y_test)*100, 2)))
pos_text = 'I loved this movie because it was hilarious.'
neg_text = 'I hated this movie because it was boring.'
print('prediction for "%s": %s (pos)' % (pos_text, clf.predict(pos_text)))
print('prediction for "%s": %s (neg)' % (neg_text, clf.predict(neg_text)))
# ### Chinese Text Classification
#
# We'll use the hotel review dataset [available here](here:https://github.com/Tony607/Chinese_sentiment_analysis/tree/master/data/ChnSentiCorp_htl_ba_6000) to build a sentiment analysis model for Chinese.
datadir = '/home/amaiya/data/ChnSentiCorp_htl_ba_6000'
(texts, labels, label_names) = snlp.Classifier.load_texts_from_folder(datadir+'/train')
print('label names: %s' % (label_names))
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(texts, labels, test_size=0.1, random_state=42)
clf = snlp.Classifier().fit(x_train, y_train, ctype='nbsvm')
print('validation accuracy: %s%%' % (round(clf.evaluate(x_test, y_test)*100, 2)))
pos_text = '我喜欢这家酒店,因为它很干净。' # I loved this hotel because it was very clean.
neg_text = '我讨厌这家酒店,因为它很吵。' # I hated this hotel because it was noisy.
print('prediction for "%s": %s' % (pos_text, clf.predict(pos_text)))
print('prediction for "%s": %s' % (neg_text, clf.predict(neg_text)))
# ### Tuning Hyperparameters of a Text Classifier
#
# The hyperparameters of a particular classifier can be tuned using the `grid_search` method. Let's tune the **C** hyperparameter of a Logistic Regression model to see what is the best value for this dataset.
# +
# setup data
datadir = r'/home/amaiya/data/aclImdb'
(x_train, y_train, label_names) = snlp.Classifier.load_texts_from_folder(datadir+'/train')
(x_test, y_test, _) = snlp.Classifier.load_texts_from_folder(datadir+'/test', shuffle=False)
# initialize a model to optimize
clf = snlp.Classifier()
clf.create_model('logreg', x_train)
# create parameter space for values of C
parameters = {'clf__C': (1e0, 1e-1, 1e-2)}
# tune
clf.grid_search(parameters, x_train[:5000], y_train[:5000], n_jobs=-1)
# -
# It looks like a value of `1.0` is best. We can then re-create the model with this hyperparameter value and proceed to train normally:
#
# ```python
# clf.create_model('logreg', x_train, hp_dict={'C':1.0})
# clf.fit(x_train, y_train)
# clf.evaluate(x_test, y_test)
# ```
#
#
# ## SECTION 3: Examples of Searching Text
#
# Here we will show some simple searches over multi-language documents.
#
# +
document1 ="""
Hello there,
Hope this email finds you well.
Are you available to talk about our meeting?
If so, let us plan to schedule the meeting
at the Hefei National Laboratory for Physical Sciences at the Microscale.
As I always say: живи сегодня надейся на завтра
Sincerely,
<NAME>
合肥微尺度国家物理科学实验室
"""
document2 ="""
This is a random document with Arabic about our meeting.
عش اليوم الأمل ليوم غد
Bye for now.
"""
docs = [document1, document2]
# -
# ### Searching English
#
# The `search` function returns a list of documents that match query. Each entry shows:
# 1. the ID of the document
# 2. the query (multiple queries can be supplied in a list, if desired)
# 3. the number of word hits in the document
#
snlp.search(['physical sciences', 'meeting', 'Arabic'], docs, keys=['doc1', 'doc2'])
# ### Searching Chinese
#
# The `search` function returns a list of documents that match query. Each entry shows:
# 1. the ID of the document
# 2. the query
# 3. the number of word hits in the document
#
snlp.search('合肥微尺度国家物理科学实验室', docs, keys=['doc1', 'doc2'])
# For Chinese, the number of word hits is the number of words in the query that appear in the document. Seven of the words in the string 合肥微尺度国家物理科学实验室 were found in `doc1`.
# ### Other Searches
#
# The `search` function can also be used for other languages.
#
# #### Arabic
for result in snlp.search('عش اليوم الأمل ليوم غد', docs, keys=['doc1', 'doc2']):
print("doc id:%s"% (result[0]))
print('query:%s' % (result[1]))
print('# of matches in document:%s' % (result[2]))
# #### Russian
snlp.search('сегодня надейся на завтра', docs, keys=['doc1', 'doc2'])
# #### Extract Chinese, Russian, or Arabic from mixed-language documents
snlp.find_chinese(document1)
snlp.find_russian(document1)
snlp.find_arabic(document2)
| examples/text/shallownlp-examples.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introduction to Scikit-Learn (sklearn)
#
# This notebook demonstrates some of the most useful functions of the beautiful Scikit-Learn library
#
# What we're going to cover:
# +
# Let's listify the contents
what_were_covering = [
"0. And end-to-end Scikit-learn workflow",
"1. Getting the data ready",
"2. Choose the right estimator/algorithm for our problems",
"3. Fit the model/algorithm and use it to make predictions on our data",
"4. Evaluate a model",
"5. Improve a model",
"6. Save and load a tained model",
"7. Putting it all together!"]
# +
# Standards imports
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import pickle
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
# %matplotlib inline
# -
# ## 0. An end-to-end Scikit-learn workflow
# +
# 1. Get the data ready
heart_disease = pd.read_csv("../data/heart-disease.csv")
heart_disease
# +
# Create X (features matrix)
X = heart_disease.drop("target", axis=1)
# Create Y (labels)
y = heart_disease["target"]
# +
# Choose the right model and hyperparameters
clf = RandomForestClassifier()
# We'll keep the default hyperparameters
clf.get_params()
# +
# Fit the model to the training data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# -
clf.fit(X_train, y_train);
X_train
y_preds = clf.predict(X_test)
y_preds
y_test
# +
# 4. Evaluate the model on the training data and test data
clf.score(X_train, y_train)
# -
clf.score(X_test, y_test)
print(classification_report(y_test, y_preds))
confusion_matrix(y_test, y_preds)
accuracy_score(y_test, y_preds)
# +
# 5. Improve a model
# Try different amount of n_estimators
np.random.seed(42)
for i in range(10, 100, 10):
print(f"Trying model with {i} estimators...")
clf = RandomForestClassifier(n_estimators=i).fit(X_train, y_train)
print(f"Model accuracy on test set: {clf.score(X_test, y_test) * 100:.2f}%")
print("")
# +
# 6. Save a model and load it
pickle.dump(clf, open("random_forest_model_1.pkl", "wb"))
# -
loaded_model = pickle.load(open("random_forest_model_1.pkl", "rb"))
loaded_model.score(X_test, y_test)
# ## 1. Getting our data ready to be used with machine learning
#
# Three main things we have to do:
# 1. Split the data into features and labels (usually `X` & `y`)
# 2. Filling (also called imputing) or disregarding missing values
# 3. Converting non-numerical values to numerical values (also called feature feature encoding)
heart_disease.head()
X = heart_disease.drop("target", axis=1)
X.head()
y = heart_disease["target"]
y.head()
# +
# Split the data into training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# -
X_train.shape, X_test.shape, y_train.shape, y_test.shape
| section 2 - data science and machine learning tools/introduction-to-scikit-learn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # BIG DATA ANALYSIS: Matplotlib Basic
# ---
import sys
# !{sys.executable} -m pip install matplotlib
import matplotlib.pyplot as plt
#Jupyter Notebook에 바료 표현하겠다는 명령어
# %matplotlib inline
import numpy as np
# ### 샘플 데이터 준비
# $y = x^2$
#
#0부터 5사이를 11등분
x = np.linspace(0,5,11)
# y=x^2
y = x**2
x
y
# ### Matplotlib을 이용해 차트 그리기
# #### 1. 함수형 방식
# 사용하기 쉬우나, 추가적인 차트 조작이 불편함
#x,y데이터를 차트로 표현
plt.plot(x,y)
# x축 label 추가
# plt.xlabel("X label")
# y축 label 추가
# plt.ylabel("Y label")
# 차트제목 추가
# plt.title("title")
# 선 색 바꾸기
# plt.plot(x,y,'r')
# plt.show()
# 여러 차트를 동시에 그리기
#(행,열,내 위치)
plt.subplot(1,2,1)
plt.plot(x,y,'r')
#(행,열,내 위치)
plt.subplot(1,2,2)
plt.plot(y,x,'b')
# #### 2. 객체 방식
# 일반적으로 사용되는 방식
#가상의 Canvas 객체 확보
fig = plt.figure()
#좌표축 추가
axes = fig.add_axes([0.0,0.0,1.0,1.0])
#차트 그리기
axes.plot(x,y)
#기타 정보들 추가
axes.set_xlabel("X 라벨")
axes.set_ylabel("Y label")
axes.set_title("Title")
plt.show()
# #### 3. 복수개의 차트 표현
# +
#가상의 canvas확보
fig = plt.figure()
#가상의 좌표계1 추가 (left,bottom,width,height)
axes1 = fig.add_axes([0.0,0.0,1.0,1.0])
#가상의 좌표계2 추가
axes2 = fig.add_axes([0.1,0.5,0.4,0.4])
# axes1.plot(x,y)
# axes2.plot(y,x)
# +
#여러 좌표계 (1행 2열)
fig, axes = plt.subplots(nrows=1,ncols=2)
#각각의 좌표계에 접근
for current_ax in axes:
current_ax.plot(x,y)
# axes[0].plot(x,y)
# axes[0].set_title("Chart 1")
# axes[1].plot(x,y)
# axes[1].set_title("Chart 2")
# -
axes
# +
#Figure 사이즈 조정
fig = plt.figure(figsize=(2,3))
ax = fig.add_axes([0,0,1,1])
ax.plot(x,y)
# +
#여러 차트에서의 figure size조정
fig, axes = plt.subplots(nrows=2,ncols=1,figsize=(10,4))
axes[0].plot(x,y)
axes[0].set_title("Chart 1")
axes[1].plot(x,y)
axes[1].set_title("Chart 2")
# fig.tight_layout()
# -
#차트 이미지로 저장 (dpi=dots per inch)
fig.savefig("myfig.jpg",dpi=100)
#범례 추가
fig = plt.figure()
axes = fig.add_axes([0.0,0.0,1.0,1.0])
axes.plot(x,y, label="y=x^2")
axes.plot(y,x, label="y=x^0.5")
axes.set_xlabel("X label")
axes.set_ylabel("Y label")
axes.set_title("Title")
axes.legend(loc=2)
# ## 더 많은 차트들을 보시려면 [여기서](https://matplotlib.org/gallery/index.html)
| practice/week-06/W06_1_Matplotlib.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + active=""
# Import dependencies
# -
import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
# Check working direcotry
os.chdir(os.getcwd()+'/images')
# Test image open
input_paths = [file for file in os.listdir() if file.endswith(\".jpg\")]
spirochete = cv2.imread(input_paths[0], cv2.COLOR_BGR2RGB)
plt.figure(figsize=(10,10))
plt.imshow(cv2.cvtColor(spirochete, cv2.COLOR_BGR2RGB))
plt.show()
# Test dimensions
spirochete.shape
# Test crop
a=int((3456-2304)/2)
b=int(((3456-2304)/2)+2304)
crop_img = spirochete[a:b, 0:2304]
crop_img = cv2.resize(crop_img, (960, 960), interpolation=cv2.INTER_CUBIC)
plt.figure(figsize=(10,10))
plt.imshow(cv2.cvtColor(crop_img, cv2.COLOR_BGR2RGB))
plt.show()
# + [markdown] tags=[]
# Double check dimesions. Convert all to 960 by 960
# +
index = 0
for path in input_paths:
index = index+1
img = cv2.imread(path, cv2.COLOR_BGR2RGB)
print(img.shape)
crop_img = img[a:b, 0:2304]
newimg = cv2.resize(crop_img, (960, 960), interpolation=cv2.INTER_CUBIC)
filename = str(index) + '.jpg'
cv2.imwrite(filename, newimg)
#for gray scale image
#img = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
# -
| 2_image_crop.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R 4.0
# language: R
# name: ir40
# ---
# + [markdown] toc=true
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#Dependencies" data-toc-modified-id="Dependencies-1">Dependencies</a></span></li><li><span><a href="#Paths" data-toc-modified-id="Paths-2">Paths</a></span></li><li><span><a href="#Main" data-toc-modified-id="Main-3">Main</a></span></li></ul></div>
# -
# # Dependencies
options(scipen = 999)
library(tidyverse)
# # Paths
manifestpath <- "/Users/anabbi/OneDrive - UHN/Documents/IPD2/Manifests/"
datapath <- "/Users/anabbi/OneDrive - UHN/Documents/IPD2/Data/"
plotpath <- "/Users/anabbi/OneDrive - UHN/Documents/IPD2/Plots/"
# # Main
vcfpath <- "/Users/anabbi/OneDrive - UHN/Documents/ImmPedCan/Finaldata/vcfs_DKFZ/vcfFiles4Arash/"
vcflist <- list.files(path = "/Users/anabbi/OneDrive - UHN/Documents/ImmPedCan/Finaldata/vcfs_DKFZ/vcfFiles4Arash",
pattern = "CommonRegionFiltered")
cols_snv <- c("Chr", "Start", "End", "ID", "REF", "ALT", "QUAL", "FILTER", "INFO", "FORMAT", "File",
"SEQUENCE_CONTEXT", "INFO_Control", "ANNOTATION_control", "DBSNP", "1K_GENOMES",
"ANNOVAR_FUNCTION", "GENE", "EXONIC_CLASSIFICATION","ANNOVAR_TRANSCRIPTS", ".")
cols_indel <- c("Chr", "Start", "End", "ID", "REF", "ALT", "QUAL", "FILTER", "INFO", "FORMAT", "Allele1", "Allele2",
"DBSNP", "1K_GENOMES", "ANNOVAR_FUNCTION", "GENE", "EXONIC_CLASSIFICATION", "ANNOVAR_TRANSCRIPTS",
"Score", "CHROM", ".")
get_cols <- c("Chr", "Start", "End", "ID", "REF", "ALT", "QUAL", "FILTER", "INFO", "FORMAT",
"ANNOVAR_FUNCTION", "GENE", "EXONIC_CLASSIFICATION", "ANNOVAR_TRANSCRIPTS")
tmp <- lapply(vcflist, function(x)readLines(paste0(vcfpath,x), n = 1))
names(tmp) <- vcflist
tmp <- tmp[sapply(tmp, function(x) length(unlist(x))) > 0]
read.table(paste0(vcfpath, vcflist[grepl("ET9", vcflist)]), sep = "\t", header = F, stringsAsFactors = F)
all_vcfs <- lapply(names(tmp),function(f){
vcffile1 <- read.table(paste0(vcfpath, f), sep = "\t", header = F, stringsAsFactors = F)
snv <- vcffile1[grepl("exonic",vcffile1$V17),]
indel <- vcffile1[!grepl("exonic",vcffile1$V17),]
colnames(snv) <- cols_snv
colnames(indel) <- cols_indel
indel_snv <- rbind(snv[,get_cols], indel[, get_cols])
return(indel_snv)
})
myfilesnames <- names(tmp)
names(all_vcfs) <- gsub("_somatic_SNVs_and_INDELs.CommonRegionFiltered.vcf", "", myfilesnames)
icgc_vcfs <- do.call("rbind", all_vcfs)
icgc_vcfs$filename <- rep(names(all_vcfs), sapply(all_vcfs, nrow))
rownames(icgc_vcfs) <- NULL
# strip last , from ANNOVAR_TRANSCRIPTS and separate rows to catch all transcripts
icgc_vcfs$ANNOVAR_TRANSCRIPTS <- gsub(",$", "", icgc_vcfs$ANNOVAR_TRANSCRIPTS)
icgc_vcfs_separate = separate_rows(icgc_vcfs, ANNOVAR_TRANSCRIPTS,sep = ",")
head(icgc_vcfs_separate)
icgc_vcfs_separate$Hugo_Symbol <- gsub(":.*", "", icgc_vcfs_separate$ANNOVAR_TRANSCRIPTS)
head(icgc_vcfs_separate)
colnames(icgc_vcfs_separate)[colnames(icgc_vcfs_separate) == "filename"] <- "Tumor_Sample_Barcode"
icgc_vcfs_separate$HGVSp_Short <- gsub(".*:p", "p", icgc_vcfs_separate$ANNOVAR_TRANSCRIPTS)
table(icgc_vcfs_separate$EXONIC_CLASSIFICATION, useNA = "always")
icgc_vcfs_separate$variant_classification <- icgc_vcfs_separate$EXONIC_CLASSIFICATION
dkfz_maf <- icgc_vcfs_separate[, c("Tumor_Sample_Barcode", "Hugo_Symbol", "HGVSp_Short", "variant_classification")]
head(dkfz_maf)
write.table(dkfz_maf, file = paste0(datapath, "MAF_mat/dkfz_maf.maf"), sep = "\t", row.names = F,
quote = F)
# make an oncotree table
dkfz_oncotree <- as.data.frame(unique(dkfz_maf$Tumor_Sample_Barcode),
stringsAsFactors = F)
colnames(dkfz_oncotree) <- "Tumor_Sample_Barcode"
# +
dkfz_oncotree$clinical <- NA
dkfz_oncotree$clinical[ grepl("ICGC_ET",dkfz_oncotree$Tumor_Sample_Barcode)] <- "ETMR"
dkfz_oncotree$clinical[ grepl("ICGC_GBM",dkfz_oncotree$Tumor_Sample_Barcode)] <- "GBM"
dkfz_oncotree$clinical[ grepl("ICGC_PA",dkfz_oncotree$Tumor_Sample_Barcode)] <- "LGG"
dkfz_oncotree$clinical[ grepl("H049",dkfz_oncotree$Tumor_Sample_Barcode)] <- "ATRT"
dkfz_oncotree$clinical[ grepl("ICGC_MB",dkfz_oncotree$Tumor_Sample_Barcode)] <- "MB"
# -
write.table(dkfz_oncotree, file = paste0(datapath, "snv/dkfz_oncotree.txt"),
sep = "\t", quote = F, row.names = F)
| notebooks/DKFZ_oncokb.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
from pandas_datareader import data as web
import matplotlib.pyplot as plt
JSL = web.DataReader('JSLG3.SA',data_source='yahoo',start = '2000-1-1')
JSL.head()
JSL.tail()
# # Simple Rate of Return
# $$
# \frac{P_1 - P_0}{P_0} = \frac{P_1}{P_0} -1
# $$
JSL['simple_return'] = (JSL['Adj Close']/JSL['Adj Close'].shift(1)) -1
JSL.head()
JSL['simple_return'].plot(figsize = (10,8))
plt.show()
avg_return = JSL['simple_return'].mean()
avg_return
# ### Negotiation days in year vary from 249 - 252
avg_return_anual = JSL['simple_return'].mean()*250
avg_return_anual
print(str(round(avg_return_anual,4) * 100) + " %")
| finances_study/.ipynb_checkpoints/Simple Returns-checkpoint.ipynb |
# # Score
import sc3nb as scn
sc = scn.startup()
from sc3nb import Score, SynthDef
# The Score class can be used for non-realtime synthesis.
#
# * This is done by starting the SuperCollider audio server scsynth in the non-realtime mode.
# * The server will read the provided OSC file and render the sound to the specified sound file.
# * **Note** that this will require to send all required SynthDefs and Buffers at the beginning. However you can start using the Buffers & SynthDefs immediately after the corresponding OSCMessages as the audio server will handle all messages in the specified order.
#
#
# The `Score.record_nrt` class method provides an easy interface that generates a OSC file from a dict with timings as keys and lists of `OSCMessages` as values.
help(Score.record_nrt)
# Lets create a simple SynthDef for this demonstration
synthdef = SynthDef(
"test",
r"""{ |out, freq = 440|
OffsetOut.ar(out,
SinOsc.ar(freq, 0, 0.2) * Line.kr(1, 0, 0.5, doneAction: Done.freeSelf)
)
}""",
)
# For creating the messages its recommended to use the Bundler class
with sc.server.bundler(send_on_exit=False) as bundler:
synthdef.add() # Send the test SynthDef
bundler.add(0.0, "/s_new", ["test", 1003, 0, 0, "freq", 440])
bundler.add(0.2, "/s_new", ["test", 1000, 0, 0, "freq", 440])
bundler.add(0.4, "/s_new", ["test", 1001, 0, 0, "freq", 660])
bundler.add(0.6, "/s_new", ["test", 1002, 0, 0, "freq", 220])
bundler.add(1, "/c_set", [0, 0]) # The /c_set [0, 0] will close the audio file
# The corresponding messages can be seen with
bundler.messages()
# Lets start the non-realtime synthesis
Score.record_nrt(bundler.messages(), "../media/score.osc", "../media/score.wav", header_format="WAV")
# Lets listen to the created audio file with the IPython Audio class that allows to read and play audio files
from IPython.display import Audio
Audio("../media/score.wav")
sc.exit()
| examples/supercollider-objects/score-examples.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Merlin-sdk
# language: python
# name: Merlin-sdk
# ---
# # XGBoost Sample
# ## Requirements
#
# - Authenticated to gcloud (```gcloud auth application-default login```)
# This notebook demonstrate how to create and deploy IRIS classifier based on xgboost model into Merlin.
# !pip install --upgrade -r requirements.txt > /dev/null
import merlin
import warnings
import os
import xgboost as xgb
from merlin.model import ModelType
from sklearn.datasets import load_iris
warnings.filterwarnings('ignore')
# ## 1. Initialize Merlin Resources
# ### 1.1 Set Merlin Server
# Set Merlin Server
merlin.set_url("localhost:8080/api/merlin")
# ### 1.2 Set Active Project
#
# `project` represent a project in real life. You may have multiple model within a project.
#
# `merlin.set_project(<project_name>)` will set the active project into the name matched by argument. You can only set it to an existing project. If you would like to create a new project, please do so from the MLP console at http://localhost:8080/projects/create.
merlin.set_project("sample")
# ### 1.3 Set Active Model
#
# `model` represents an abstract ML model. Conceptually, `model` in Merlin is similar to a class in programming language. To instantiate a `model` you'll have to create a `model_version`.
#
# Each `model` has a type, currently model type supported by Merlin are: sklearn, xgboost, tensorflow, pytorch, and user defined model (i.e. pyfunc model).
#
# `model_version` represents a snapshot of particular `model` iteration. You'll be able to attach information such as metrics and tag to a given `model_version` as well as deploy it as a model service.
#
# `merlin.set_model(<model_name>, <model_type>)` will set the active model to the name given by parameter, if the model with given name is not found, a new model will be created.
merlin.set_model("xgboost-sample", ModelType.XGBOOST)
# ## 2. Train Model And Deploy
# ### 2.1 Create Model Version and Upload Model
# `merlin.new_model_version()` is a convenient method to create a model version and start its development process. It is equal to following codes:
#
# ```
# v = model.new_model_version()
# v.start()
# v.log_model(model_dir=model_dir)
# v.finish()
# ```
# +
model_dir = "xgboost-model"
BST_FILE = "model.bst"
# Create new version of the model
with merlin.new_model_version() as v:
iris = load_iris()
y = iris['target']
X = iris['data']
dtrain = xgb.DMatrix(X, label=y)
param = {'max_depth': 6,
'eta': 0.1,
'silent': 1,
'nthread': 4,
'num_class': 10,
'objective': 'multi:softmax'
}
xgb_model = xgb.train(params=param, dtrain=dtrain)
model_file = os.path.join((model_dir), BST_FILE)
xgb_model.save_model(model_file)
# Upload the serialized model to Merlin
merlin.log_model(model_dir=model_dir)
# -
# ### 2.2 Deploy Model
endpoint = merlin.deploy(v)
# ### 2.3 Send Test Request
# + magic_args="-s \"$endpoint.url\"" language="bash"
# curl -v -X POST $1 -d '{
# "instances": [
# [2.8, 1.0, 6.8, 0.4],
# [3.1, 1.4, 4.5, 1.6]
# ]
# }'
# -
# ### 2.4 Delete Deployment
# + pycharm={"name": "#%%\n"}
merlin.undeploy(v)
| examples/xgboost/XGBoost.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Kaggle Titanic survival - learning curves
#
# How much data do we need? Do we have enough?
#
# One simple method is to examine how accuracy increases with training set size. Does accuracy plateau before we hit the limit to how much data we have? In that case, getting more data will not help the model significantly. Is accuracy still increasing as we reach the limit to our data size? If so we would likely benefit from more data, if we can get it.
#
# In this notebook we repeat our basic logistic regression model as previously described:
#
# https://github.com/MichaelAllen1966/1804_python_healthcare/blob/master/titanic/02_logistic_regression.ipynb
#
# But we will wrap the model in a loop to increase the training set data size (taking a different random training/test split each time, and keeping the test set the same size). We will have an inner loop to perform 10 replicates at each sample size (to reduce the variation in our results)
#
# We will go through the following steps:
#
# * Download and save pre-processed data
# * Split data into features (X) and label (y)
# * Split data into training and test sets (we will test on data that has not been used to fit the model)
# * Standardise data
# * Loop with increasing training set size:
# * Loop through 10 replicates
# * Fit a logistic regression model (from sklearn)
# * Predict survival of the test set
# * Plot the relationship between training set size and accuracy
# ## Load modules
#
# A standard Anaconda install of Python (https://www.anaconda.com/distribution/) contains all the necessary modules.
import numpy as np
import pandas as pd
# Import machine learning methods
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
# ## Load data
#
# The section below downloads pre-processed data, and saves it to a subfolder (from where this code is run).
# If data has already been downloaded that cell may be skipped.
#
# Code that was used to pre-process the data ready for machine learning may be found at:
# https://github.com/MichaelAllen1966/1804_python_healthcare/blob/master/titanic/01_preprocessing.ipynb
# +
download_required = True
if download_required:
# Download processed data:
address = 'https://raw.githubusercontent.com/MichaelAllen1966/' + \
'1804_python_healthcare/master/titanic/data/processed_data.csv'
data = pd.read_csv(address)
# Create a data subfolder if one does not already exist
import os
data_directory ='./data/'
if not os.path.exists(data_directory):
os.makedirs(data_directory)
# Save data
data.to_csv(data_directory + 'processed_data.csv')
# -
data = pd.read_csv('data/processed_data.csv')
# Make all data 'float' type
data = data.astype(float)
# The first column is a passenger index number. We will remove this, as this is not part of the original Titanic passenger data.
# +
# Drop Passengerid (axis=1 indicates we are removing a column rather than a row)
# We drop passenger ID as it is not original data
data.drop('PassengerId', inplace=True, axis=1)
# -
# ## Divide into X (features) and y (labels)
#
# We will separate out our features (the data we use to make a prediction) from our label (what we are truing to predict).
# By convention our features are called `X` (usually upper case to denote multiple features), and the label (survive or not) `y`.
X = data.drop('Survived',axis=1) # X = all 'data' except the 'survived' column
y = data['Survived'] # y = 'survived' column from 'data'
# ## Standardise data
#
# We want all of out features to be on roughly the same scale. This generally leads to a better model, and also allows us to more easily compare the importance of different features. A common method used in many machine learning methods is standardisation, where we use the mean and standard deviation of the training set of data to normalise the data. We subtract the mean of the test set values, and divide by the standard deviation of the training data. Note that the mean and standard deviation of the training data are used to standardise the test set data as well. Here we will use sklearn's `StandardScaler method`. This method also copes with problems we might otherwise have (such as if one feature has zero standard deviation in the training set).
def standardise_data(X_train, X_test):
# Initialise a new scaling object for normalising input data
sc = StandardScaler()
# Set up the scaler just on the training set
sc.fit(X_train)
# Apply the scaler to the training and test sets
train_std=sc.transform(X_train)
test_std=sc.transform(X_test)
return train_std, test_std
# ## Calculate the maximum training set size we can use
test_fraction = 0.25 # Use 25% of data for testing
data_rows = X.shape[0]
max_training_size = int(data_rows * (1 - test_fraction))
print('Max training size: {}'.format(max_training_size))
# ## Loop through increasing training set sizes
# +
# Set up list to collect results
results_training_size = []
results_accuracy = []
for train_size in range(10, max_training_size, 10):
replicate_accuracy = []
for replicate in range(10):
# Split data into training and test
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size = test_fraction)
# Reduce training set size (use np random choice for random index values)
selection_index = np.random.choice(
max_training_size, train_size, replace=False)
X_train = X_train.iloc[selection_index]
y_train = y_train.iloc[selection_index]
# Standardise
X_train_std, X_test_std = standardise_data(X_train, X_test)
# Fit model
model = LogisticRegression(solver='lbfgs')
model.fit(X_train_std,y_train)
# Predict test set
y_pred_test = model.predict(X_test_std)
# Get accuracy and record results
accuracy = np.mean(y_pred_test == y_test)
replicate_accuracy.append(accuracy)
results_accuracy.append(np.mean(replicate_accuracy))
results_training_size.append(train_size)
# -
# ## Plot learning curve
#
# We will plot the learning curve, including a moving average (the mean of 5 points). Moving averages can help show trends when data is noisy.
# +
import matplotlib.pyplot as plt
# %matplotlib inline
# Calculate moving avergae (of last 5 points) with np.convolve
moving_average = np.convolve(results_accuracy, np.ones((5,))/5, mode='valid')
x_moving_average = results_training_size[2:-2] # Include offset to centre mean
plt.scatter(results_training_size, results_accuracy,
label='Accuracy')
plt.plot(x_moving_average, moving_average,
label='Moving average',
color='orange',
linewidth=3)
plt.xlabel('Training set size')
plt.ylabel('Test set accuracy')
plt.legend()
plt.grid(True)
plt.show()
# -
# Accuracy appears to increase significantly up to 100 training points, and then appears to start to plateau in this model at 300-400 data points. It looks unlikely that collecting more data would significantly improve the model.
| jupyter_notebooks/11_learning_curve.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# The '$' boundary matcher matches an occurrence of a character/character class/group at the end of a line.
#
# **Task**
#
# Write a RegEx to match a test string 'S', under the following conditions:
#
# 'S' should consist of only lowercase and uppercase letters (no numbers or symbols).
# 'S' should end in s.
# +
import re
regex_pattern = r'^[a-zA-Z]*s$'
print(str(bool(re.search(regex_pattern, input()))).lower())
| Regex/3. Repetitions/13. matching ending items.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # EDA - All
# * 이 커널에서는 모든 피처를 하나하나씩 살펴보며 어떤 데이터 양상을 띄고 있는 지 검토할 것입니다.
# ## 라이브러리
import numpy as np
import pandas as pd
import warnings
import gc
warnings.filterwarnings("ignore")
pd.set_option('max_rows', 150)
pd.set_option('max_colwidth', 500)
pd.set_option('max_columns', 500)
# ## 데이터 로드
# + hide_input=false
dtypes = {
'MachineIdentifier': 'object',
'ProductName': 'category',
'EngineVersion': 'category',
'AppVersion': 'category',
'AvSigVersion': 'category',
'IsBeta': 'int8',
'RtpStateBitfield': 'float16',
'IsSxsPassiveMode': 'int8',
'DefaultBrowsersIdentifier': 'float16',
'AVProductStatesIdentifier': 'float32',
'AVProductsInstalled': 'float16',
'AVProductsEnabled': 'float16',
'HasTpm': 'int8',
'CountryIdentifier': 'int16',
'CityIdentifier': 'float32',
'OrganizationIdentifier': 'float16',
'GeoNameIdentifier': 'float16',
'LocaleEnglishNameIdentifier': 'int8',
'Platform': 'category',
'Processor': 'category',
'OsVer': 'category',
'OsBuild': 'int16',
'OsSuite': 'int16',
'OsPlatformSubRelease': 'category',
'OsBuildLab': 'category',
'SkuEdition': 'category',
'IsProtected': 'float16',
'AutoSampleOptIn': 'int8',
'PuaMode': 'category',
'SMode': 'float16',
'IeVerIdentifier': 'float16',
'SmartScreen': 'category',
'Firewall': 'float16',
'UacLuaenable': 'float32',
'Census_MDC2FormFactor': 'category',
'Census_DeviceFamily': 'category',
'Census_OEMNameIdentifier': 'float16',
'Census_OEMModelIdentifier': 'float32',
'Census_ProcessorCoreCount': 'float16',
'Census_ProcessorManufacturerIdentifier': 'float16',
'Census_ProcessorModelIdentifier': 'float16',
'Census_ProcessorClass': 'category',
'Census_PrimaryDiskTotalCapacity': 'float32',
'Census_PrimaryDiskTypeName': 'category',
'Census_SystemVolumeTotalCapacity': 'float32',
'Census_HasOpticalDiskDrive': 'int8',
'Census_TotalPhysicalRAM': 'float32',
'Census_ChassisTypeName': 'category',
'Census_InternalPrimaryDiagonalDisplaySizeInInches': 'float16',
'Census_InternalPrimaryDisplayResolutionHorizontal': 'float16',
'Census_InternalPrimaryDisplayResolutionVertical': 'float16',
'Census_PowerPlatformRoleName': 'category',
'Census_InternalBatteryType': 'category',
'Census_InternalBatteryNumberOfCharges': 'float32',
'Census_OSVersion': 'category',
'Census_OSArchitecture': 'category',
'Census_OSBranch': 'category',
'Census_OSBuildNumber': 'int16',
'Census_OSBuildRevision': 'int32',
'Census_OSEdition': 'category',
'Census_OSSkuName': 'category',
'Census_OSInstallTypeName': 'category',
'Census_OSInstallLanguageIdentifier': 'float16',
'Census_OSUILocaleIdentifier': 'int16',
'Census_OSWUAutoUpdateOptionsName': 'category',
'Census_IsPortableOperatingSystem': 'int8',
'Census_GenuineStateName': 'category',
'Census_ActivationChannel': 'category',
'Census_IsFlightingInternal': 'float16',
'Census_IsFlightsDisabled': 'float16',
'Census_FlightRing': 'category',
'Census_ThresholdOptIn': 'float16',
'Census_FirmwareManufacturerIdentifier': 'float16',
'Census_FirmwareVersionIdentifier': 'float32',
'Census_IsSecureBootEnabled': 'int8',
'Census_IsWIMBootEnabled': 'float16',
'Census_IsVirtualDevice': 'float16',
'Census_IsTouchEnabled': 'int8',
'Census_IsPenCapable': 'int8',
'Census_IsAlwaysOnAlwaysConnectedCapable': 'float16',
'Wdft_IsGamer': 'float16',
'Wdft_RegionIdentifier': 'float16',
'HasDetections': 'int8'
}
# -
train = pd.read_csv('./data/train.csv', dtype=dtypes)
test = pd.read_csv('./data/test.csv', dtype=dtypes)
# ## 컬럼 분석
# ### Census_ChassisTypeName
col = 'Census_ChassisTypeName'
# + hide_input=false
print("[train]")
temp = train[col].value_counts(dropna=False).to_frame()
temp['rate'] = np.around(100 * train[col].value_counts(dropna=False) / train.shape[0], 2)
temp.sort_values('rate', ascending=False)
# + hide_input=false
print("[test]")
temp = test[col].value_counts(dropna=False).to_frame()
temp['rate'] = np.around(100 * test[col].value_counts(dropna=False) / test.shape[0], 2)
temp.sort_values('rate', ascending=False)
# + hide_input=false
table = train.groupby(col)['HasDetections'].sum().to_frame()
table['cnt'] = train.groupby(col)['HasDetections'].count()
table['rate'] = np.around((table.HasDetections / table.cnt) * 100, 2)
table.sort_values('HasDetections', ascending=False)
# -
# ### Census_InternalPrimaryDiagonalDisplaySizeInInches
col = 'Census_InternalPrimaryDiagonalDisplaySizeInInches'
# + hide_input=false
print("[train]")
temp = train[col].value_counts(dropna=False).to_frame()
temp['rate'] = np.around(100 * train[col].value_counts(dropna=False) / train.shape[0], 2)
temp.sort_values('rate', ascending=False)
# + hide_input=false
print("[test]")
temp = test[col].value_counts(dropna=False).to_frame()
temp['rate'] = np.around(100 * test[col].value_counts(dropna=False) / test.shape[0], 2)
temp.sort_values('rate', ascending=False)
# + hide_input=false
table = train.groupby(col)['HasDetections'].sum().to_frame()
table['cnt'] = train.groupby(col)['HasDetections'].count()
table['rate'] = np.around((table.HasDetections / table.cnt) * 100, 2)
table.sort_values('HasDetections', ascending=False)
# -
# ### Census_InternalPrimaryDisplayResolutionHorizontal
col = 'Census_InternalPrimaryDisplayResolutionHorizontal'
# + hide_input=false
print("[train]")
temp = train[col].value_counts(dropna=False).to_frame()
temp['rate'] = np.around(100 * train[col].value_counts(dropna=False) / train.shape[0], 2)
temp.sort_values('rate', ascending=False)
# + hide_input=false
print("[test]")
temp = test[col].value_counts(dropna=False).to_frame()
temp['rate'] = np.around(100 * test[col].value_counts(dropna=False) / test.shape[0], 2)
temp.sort_values('rate', ascending=False)
# + hide_input=false
table = train.groupby(col)['HasDetections'].sum().to_frame()
table['cnt'] = train.groupby(col)['HasDetections'].count()
table['rate'] = np.around((table.HasDetections / table.cnt) * 100, 2)
table.sort_values('HasDetections', ascending=False)
# -
# ### Census_InternalPrimaryDisplayResolutionVertical
col = 'Census_InternalPrimaryDisplayResolutionVertical'
# + hide_input=false
print("[train]")
temp = train[col].value_counts(dropna=False).to_frame()
temp['rate'] = np.around(100 * train[col].value_counts(dropna=False) / train.shape[0], 2)
temp.sort_values('rate', ascending=False)
# + hide_input=false
print("[test]")
temp = test[col].value_counts(dropna=False).to_frame()
temp['rate'] = np.around(100 * test[col].value_counts(dropna=False) / test.shape[0], 2)
temp.sort_values('rate', ascending=False)
# + hide_input=false
table = train.groupby(col)['HasDetections'].sum().to_frame()
table['cnt'] = train.groupby(col)['HasDetections'].count()
table['rate'] = np.around((table.HasDetections / table.cnt) * 100, 2)
table.sort_values('HasDetections', ascending=False)
# -
# ### Census_PowerPlatformRoleName
col = 'Census_PowerPlatformRoleName'
# + hide_input=false
print("[train]")
temp = train[col].value_counts(dropna=False).to_frame()
temp['rate'] = np.around(100 * train[col].value_counts(dropna=False) / train.shape[0], 2)
temp.sort_values('rate', ascending=False)
# + hide_input=false
print("[test]")
temp = test[col].value_counts(dropna=False).to_frame()
temp['rate'] = np.around(100 * test[col].value_counts(dropna=False) / test.shape[0], 2)
temp.sort_values('rate', ascending=False)
# + hide_input=false
table = train.groupby(col)['HasDetections'].sum().to_frame()
table['cnt'] = train.groupby(col)['HasDetections'].count()
table['rate'] = np.around((table.HasDetections / table.cnt) * 100, 2)
table.sort_values('HasDetections', ascending=False)
# -
# ### Census_InternalBatteryType
col = 'Census_InternalBatteryType'
# + hide_input=false
print("[train]")
temp = train[col].value_counts(dropna=False).to_frame()
temp['rate'] = np.around(100 * train[col].value_counts(dropna=False) / train.shape[0], 2)
temp.sort_values('rate', ascending=False)
# + hide_input=false
print("[test]")
temp = test[col].value_counts(dropna=False).to_frame()
temp['rate'] = np.around(100 * test[col].value_counts(dropna=False) / test.shape[0], 2)
temp.sort_values('rate', ascending=False)
# + hide_input=false
table = train.groupby(col)['HasDetections'].sum().to_frame()
table['cnt'] = train.groupby(col)['HasDetections'].count()
table['rate'] = np.around((table.HasDetections / table.cnt) * 100, 2)
table.sort_values('HasDetections', ascending=False)
# -
# ### Census_InternalBatteryNumberOfCharges
col = 'Census_InternalBatteryNumberOfCharges'
# + hide_input=false
print("[train]")
temp = train[col].value_counts(dropna=False).to_frame()
temp['rate'] = np.around(100 * train[col].value_counts(dropna=False) / train.shape[0], 2)
temp.sort_values('rate', ascending=False)
# + hide_input=false
print("[test]")
temp = test[col].value_counts(dropna=False).to_frame()
temp['rate'] = np.around(100 * test[col].value_counts(dropna=False) / test.shape[0], 2)
temp.sort_values('rate', ascending=False)
# + hide_input=false
table = train.groupby(col)['HasDetections'].sum().to_frame()
table['cnt'] = train.groupby(col)['HasDetections'].count()
table['rate'] = np.around((table.HasDetections / table.cnt) * 100, 2)
table.sort_values('HasDetections', ascending=False)
# -
# ### Census_OSVersion
col = 'Census_OSVersion'
# + hide_input=false
print("[train]")
temp = train[col].value_counts(dropna=False).to_frame()
temp['rate'] = np.around(100 * train[col].value_counts(dropna=False) / train.shape[0], 2)
temp.sort_values('rate', ascending=False)
# + hide_input=false
print("[test]")
temp = test[col].value_counts(dropna=False).to_frame()
temp['rate'] = np.around(100 * test[col].value_counts(dropna=False) / test.shape[0], 2)
temp.sort_values('rate', ascending=False)
# + hide_input=false
table = train.groupby(col)['HasDetections'].sum().to_frame()
table['cnt'] = train.groupby(col)['HasDetections'].count()
table['rate'] = np.around((table.HasDetections / table.cnt) * 100, 2)
table.sort_values('HasDetections', ascending=False)
# -
# ### Census_OSArchitecture
col = 'Census_OSArchitecture'
# + hide_input=false
print("[train]")
temp = train[col].value_counts(dropna=False).to_frame()
temp['rate'] = np.around(100 * train[col].value_counts(dropna=False) / train.shape[0], 2)
temp.sort_values('rate', ascending=False)
# + hide_input=false
print("[test]")
temp = test[col].value_counts(dropna=False).to_frame()
temp['rate'] = np.around(100 * test[col].value_counts(dropna=False) / test.shape[0], 2)
temp.sort_values('rate', ascending=False)
# + hide_input=false
table = train.groupby(col)['HasDetections'].sum().to_frame()
table['cnt'] = train.groupby(col)['HasDetections'].count()
table['rate'] = np.around((table.HasDetections / table.cnt) * 100, 2)
table.sort_values('HasDetections', ascending=False)
# -
# ### Census_OSBranch
col = 'Census_OSBranch'
# + hide_input=false
print("[train]")
temp = train[col].value_counts(dropna=False).to_frame()
temp['rate'] = np.around(100 * train[col].value_counts(dropna=False) / train.shape[0], 2)
temp.sort_values('rate', ascending=False)
# + hide_input=false
print("[test]")
temp = test[col].value_counts(dropna=False).to_frame()
temp['rate'] = np.around(100 * test[col].value_counts(dropna=False) / test.shape[0], 2)
temp.sort_values('rate', ascending=False)
# + hide_input=false
table = train.groupby(col)['HasDetections'].sum().to_frame()
table['cnt'] = train.groupby(col)['HasDetections'].count()
table['rate'] = np.around((table.HasDetections / table.cnt) * 100, 2)
table.sort_values('HasDetections', ascending=False)
# -
# ### Census_OSBuildNumber
col = 'Census_OSBuildNumber'
# + hide_input=false
print("[train]")
temp = train[col].value_counts(dropna=False).to_frame()
temp['rate'] = np.around(100 * train[col].value_counts(dropna=False) / train.shape[0], 2)
temp.sort_values('rate', ascending=False)
# + hide_input=false
print("[test]")
temp = test[col].value_counts(dropna=False).to_frame()
temp['rate'] = np.around(100 * test[col].value_counts(dropna=False) / test.shape[0], 2)
temp.sort_values('rate', ascending=False)
# + hide_input=false
table = train.groupby(col)['HasDetections'].sum().to_frame()
table['cnt'] = train.groupby(col)['HasDetections'].count()
table['rate'] = np.around((table.HasDetections / table.cnt) * 100, 2)
table.sort_values('HasDetections', ascending=False)
# -
# ### Census_OSBuildRevision
col = 'Census_OSBuildRevision'
# + hide_input=false
print("[train]")
temp = train[col].value_counts(dropna=False).to_frame()
temp['rate'] = np.around(100 * train[col].value_counts(dropna=False) / train.shape[0], 2)
temp.sort_values('rate', ascending=False)
# + hide_input=false
print("[test]")
temp = test[col].value_counts(dropna=False).to_frame()
temp['rate'] = np.around(100 * test[col].value_counts(dropna=False) / test.shape[0], 2)
temp.sort_values('rate', ascending=False)
# + hide_input=false
table = train.groupby(col)['HasDetections'].sum().to_frame()
table['cnt'] = train.groupby(col)['HasDetections'].count()
table['rate'] = np.around((table.HasDetections / table.cnt) * 100, 2)
table.sort_values('HasDetections', ascending=False)
# -
# ### Census_OSEdition
col = 'Census_OSEdition'
# + hide_input=false
print("[train]")
temp = train[col].value_counts(dropna=False).to_frame()
temp['rate'] = np.around(100 * train[col].value_counts(dropna=False) / train.shape[0], 2)
temp.sort_values('rate', ascending=False)
# + hide_input=false
print("[test]")
temp = test[col].value_counts(dropna=False).to_frame()
temp['rate'] = np.around(100 * test[col].value_counts(dropna=False) / test.shape[0], 2)
temp.sort_values('rate', ascending=False)
# + hide_input=false
table = train.groupby(col)['HasDetections'].sum().to_frame()
table['cnt'] = train.groupby(col)['HasDetections'].count()
table['rate'] = np.around((table.HasDetections / table.cnt) * 100, 2)
table.sort_values('HasDetections', ascending=False)
# -
# ### Census_OSSkuName
col = 'Census_OSSkuName'
# + hide_input=false
print("[train]")
temp = train[col].value_counts(dropna=False).to_frame()
temp['rate'] = np.around(100 * train[col].value_counts(dropna=False) / train.shape[0], 2)
temp.sort_values('rate', ascending=False)
# + hide_input=false
print("[test]")
temp = test[col].value_counts(dropna=False).to_frame()
temp['rate'] = np.around(100 * test[col].value_counts(dropna=False) / test.shape[0], 2)
temp.sort_values('rate', ascending=False)
# + hide_input=false
table = train.groupby(col)['HasDetections'].sum().to_frame()
table['cnt'] = train.groupby(col)['HasDetections'].count()
table['rate'] = np.around((table.HasDetections / table.cnt) * 100, 2)
table.sort_values('HasDetections', ascending=False)
# -
# ### Census_OSInstallTypeName
col = 'Census_OSInstallTypeName'
# + hide_input=false
print("[train]")
temp = train[col].value_counts(dropna=False).to_frame()
temp['rate'] = np.around(100 * train[col].value_counts(dropna=False) / train.shape[0], 2)
temp.sort_values('rate', ascending=False)
# + hide_input=false
print("[test]")
temp = test[col].value_counts(dropna=False).to_frame()
temp['rate'] = np.around(100 * test[col].value_counts(dropna=False) / test.shape[0], 2)
temp.sort_values('rate', ascending=False)
# + hide_input=false
table = train.groupby(col)['HasDetections'].sum().to_frame()
table['cnt'] = train.groupby(col)['HasDetections'].count()
table['rate'] = np.around((table.HasDetections / table.cnt) * 100, 2)
table.sort_values('HasDetections', ascending=False)
| Microsoft Malware Prediction/code/eda/01-4 EDA - All - 4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + active=""
# >>> import fastai
# >>> fastai.__version__
# '1.0.54'
# -
import fastai
fastai.__version__
# # !pip install -U fastai==1.0.60
# !sh download_fastai_sentiment_model.sh
# !du -h sentiment_model.tgz
# !tar xzvf sentiment_model.tgz
# !mv storage model
from pathlib import Path
from fastai.text import load_data, text_classifier_learner, AWD_LSTM
bs=48
path=Path('./model/sentiment')
data_clas = load_data(path, 'data_clas.pkl', bs=bs)
learn = text_classifier_learner(data_clas, AWD_LSTM, drop_mult=0.5)
# pytorch model under the hood
learn.load('third')
learn.model.eval()
example_message = "I don't know why I keep focusing on negative things. What a bummer!"
learn.predict(example_message)
learn.predict('I love this workshop')
from typing import Tuple
def predict_sentiment(sentence:str) -> Tuple[str, float]:
categorical, class_id, scores = learn.predict(sentence)
score = round(scores[class_id].item(), 4)
return "negative" if class_id == 0 else "positive", score
predict_sentiment(example_message)
# # Add step to the pipeline
import redis
r = redis.Redis()
sub = r.pubsub(ignore_subscribe_messages=True)
sub.subscribe(['NER'])
import mq
from tqdm import tqdm
for message in tqdm(sub.listen()):
data = mq.read_message_data(message)
print(data)
entities = predict_sentiment(data['sentence'])
#print(sentiment, score)
for e in entities:
output = dict(
entity=e,
sentiment=sentiment,
score=score
)
r.publish('ready', mq.serialize_message_data(output))
# ### Fallback always-happy sentiment in case the WiFi fails
import random
def decide_sentiment(sentence:str) -> Tuple[str, float]:
return 'positive', round(random.uniform(.5, 1), 3)
| step3_nlp/Sentiment.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# count number of islands with only 2 functions and compare DFS and BFS solutions
import copy
import numpy as np
test_graph = np.array([[0, 1, 1, 0, 0],
[0, 1, 0, 0, 1],
[1, 0, 0, 1, 1],
[0, 0, 0, 0, 1],
[1, 0, 1, 0, 1]])
def count_islands_bfs_with_queue(test_graph):
graph = np.copy(test_graph)
N, D = graph.shape
count = 0
for i in range(N):
for j in range(D):
if graph[i][j] == 1:
BFS_with_queue(graph, i, j)
count += 1
return count
def BFS_with_queue(graph, r, c): # making a change in the original graph and marking 0 values for each traverse
N, D = graph.shape
queue = []
queue.append((r, c))
graph[r][c] = 0
while queue:
directions = [[-1, -1], [-1, 0], [-1, 1], [0, -1], [0, 1], [1, -1], [1, 0], [1, 1]]
r, c = queue.pop(0)
for d in directions:
m, k = d
if r + m >= 0 and r+m < N and c + k >= 0 and c + k < D and graph[r+m][c+k] == 1:
queue.append((r+m, c+k))
graph[r+m][c+k] = 0
def DFS(i, j, graph, visited, N, D):
visited[i][j] = True
for each_neighbor in [[-1, -1], [-1, 0], [-1, 1], [0, -1], [0, 1], [1, -1], [1, 0], [1, 1]]:
r , c = each_neighbor
if (i + r) >= 0 and (i + r) < N and (j + c) >= 0 and (j + c) < D: # if within the borders of the matrix
if graph[i+r][j+c] == 1 and visited[i+r][j+c] == 0:
DFS(i + r, j + c, graph, visited, N, D)
def count_islands(graph):
N,D = graph.shape
visited = np.zeros([N, D], dtype = int)
count = 0
for i in range(N):
for j in range(D):
if visited[i][j] == False and graph[i][j] == 1:
DFS(i, j, graph, visited, N, D)
count += 1
return count
print(count_islands(test_graph))
print(count_islands_bfs_with_queue(test_graph))
print(test_graph)
# +
# create a densely connected test graph where there are 2 sets of nodes, colored either with 0 or 1.
# Make sure no nodes are connected to its own kind. The 1's in the graph means there is an edge between these nodes.
# Edges between vertices:
# 0 -- 1 0 -- 3 0 -- 5
# 2 -- 3 2 -- 1 2 -- 5
# 4 -- 5 4 -- 1 4 -- 3
# with the reverse edges, makes total 18 directed connections between 6 nodes (noted with 1) in the below matrix
import numpy as np
m = 6
raw_matrix = np.zeros([m, m], dtype = int)
for i in range(len(raw_matrix)): # for each row
for j in range(len(raw_matrix[0])): # for each column
if i % 2 == 0 and j % 2 == 1: # if the i index is even and j is odd
raw_matrix[i][j] = 1
if i % 2 == 1 and j % 2 == 0: # if the in index is odd and j is even
raw_matrix[i][j] = 1
print(raw_matrix)
test_matrix = raw_matrix
# +
# BFS implimentation to check bipartiteness.
# Use the test matrix above to test the function below
def check_if_bipartite(matrix):
# Create a color list for each vertex
color_list = [-1] * len(matrix) # assign -1 to each vertex, the vertices later will take 0 or 1
# Assign first color to source vertex, pick the first vertex as source vertex for convenience
color_list[0] = 1
# Create a queue (FIFO) of vertex numbers and enqueue source vertex for BFS traversal
queue = []
queue.append(0) # add the first vertex as source vertex to start the BFS
while len(queue) > 0:
# pop the the first vertex to see its connections
u = queue.pop(0) # queue will push from the end (append) and pop from the head (pop(0))
# Return false if there is a self-loop
if matrix[u][u] == 1: return False
for v in range(len(matrix)): # check all connections of the selected vertex
# if an edge from u to v exists and destination v is not visited, color v into a different group
if matrix[u][v] == 1 and color_list[v] == -1:
# if u is 0 then v is 1, or if u is 1 then v is 0
color_list[v] = 1 - color_list[u]
# push the vertex v into the queue
queue.append(v)
# if an edge from u to v exists but both vertices have the same color, return False
elif matrix[u][v] == 1 and color_list[v] == color_list[u]: return False
print(color_list) # view the color list
return True
check_if_bipartite(test_matrix)
# +
# Maximum flow implimentation of a network of connections with the help of BFS
# Rendition of the wikipedia solution at https://en.wikipedia.org/wiki/Ford%E2%80%93Fulkerson_algorithm
import numpy as np
class Graph:
def __init__(self, graph):
self.graph = np.copy(graph) # graph with max flow capacities from each vertex
# Returns tne maximum flow from s to t in the given graph
def max_flow_edmonds_karp(self, source, sink):
N, D = self.graph.shape
# initialize a parent array
parent = -np.ones(D, dtype = int)
# augment the flow while there is path from source to sink,
# for each path between source and sink, the parent array is continually updated with BFS
while self.BFS(source, sink, parent):
# Find minimum residual capacity (or max flow) of the edges along the path filled by BFS.
min_path_flow = np.inf
t = sink # initialize target as sink
# from sink to source traverse all parents and find the min flow between all vertices
while(t != source):
min_path_flow = min(min_path_flow, self.graph[parent[t]][t])
t = parent[t]
# update residual capacities of the edges and reverse edges along the path
v = sink
while(v != source):
u = parent[v]
self.graph[u][v] -= min_path_flow
self.graph[v][u] += min_path_flow
v = parent[v]
# the residual network is updated
# extract the optimized network flow from the optimized residual network
updated_network = self.draw_the_updated_network_flow()
print('network of the optimized graph with the max flow:')
print(updated_network) # to view the network in equilibrium (when in is equal to out)
# assert that total amount dispatched from source is equal to the total amount received at target
assert(np.sum(updated_network[source , :]) == np.sum(updated_network[: , sink]))
# the max flow into the system at equilibrium is ready to output
return np.sum(updated_network[source, :]) # sum of all inputs from the source vertex into the network
# simple BFS checks if there is a path from s to t, and returns T or F of a changing graph with a parent array
def BFS(self, source, target, parent):
# parent array is an additional feature that will keep being updated for each time BFS is run
# Mark all the vertices as not visited
visited =[False]*(len(self.graph))
# Create a queue for BFS
queue=[]
# Mark the source node as visited and enqueue it
queue.append(source)
visited[source] = True
# Standard BFS Loop
while len(queue) > 0:
#Dequeue a vertex from queue and print it
u = queue.pop(0)
visited[u] = True
# Get all adjacent vertices of the dequeued vertex u that have not been visited,
# mark them visited and then enqueue
for i in range(len(self.graph[u])):
# check if the index is not visited and graph has a value at [u][i] that is being updated
if visited[i] == False and self.graph[u][i] > 0:
# append the vertex to the queue, mark it as True, update parent for all connecting vertices
queue.append(i)
parent[i] = u
# if we reached sink in BFS starting from source, then return true, else false
# equilibrium is reached when source vertex output == target vertex input
# when self.graph[target vertex][vertex_at_i_index] == 0 so that visited[t] returns FALSE
return visited[target] # returns True if visited[t] else it returns False (flow conservation)
def draw_the_updated_network_flow(self):
# transpose of the final residual graph would be the graph with the optimized max flow
n , d = self.graph.shape
updated_graph = np.zeros([d, n], dtype = float)
for i in range(n):
for j in range(d):
updated_graph[j][i] = self.graph[i][j]
return updated_graph
test_graph = np.array([[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0]] , dtype = float)
g = Graph(test_graph)
print('max flow: ', g.max_flow_edmonds_karp(0, 5), '\n')
print('original capacities:', '\n', np.array(test_graph)) # view the original flow of the network
# to view how the graph has been shaped to its final form
# The above implementation of Ford Fulkerson Algorithm is called Edmonds-Karp Algorithm.
# The idea of Edmonds-Karp is to use BFS in Ford Fulkerson implementation as BFS which always picks a path
# with minimum number of edges. When BFS is used, the worst case time complexity can be reduced to O(VE2).
# +
# Edmonds-Karp of max flow with 2 functions:
import numpy as np
def max_flow_edmonds_karp(graph, source, sink):
N, D = graph.shape
# initialize a parent array
parent = -np.ones(D, dtype = int)
while BFS(graph, source, sink, parent):
# Find minimum residual capacity (or max flow) of the edges along the path filled by BFS.
min_path_flow = np.inf
t = sink # initialize target as sink
# from sink to source traverse all parents and find the min flow between all vertices
while(t != source):
min_path_flow = min(min_path_flow, graph[parent[t]][t])
t = parent[t]
# update residual capacities of the edges and reverse edges along the path
v = sink
while(v != source):
u = parent[v]
graph[u][v] -= min_path_flow
graph[v][u] += min_path_flow
v = parent[v]
# the residual network is updated
return np.sum(graph.T[source, :]) # sum of all inputs from the source vertex into the network
# simple BFS checks if there is a path from s to t, and returns T or F of a changing graph with a parent array
def BFS(graph, source, target, parent):
visited =[False]*(len(graph))
queue = [source]
visited[source] = True
while len(queue) > 0:
u = queue.pop(0)
visited[u] = True
for i in range(len(graph[u])):
# check if the index is not visited and graph has a value at [u][i] that is being updated
if visited[i] == False and graph[u][i] > 0:
# append the vertex to the queue, mark it as True, update parent for all connecting vertices
queue.append(i)
parent[i] = u
return visited[target] # returns True if visited[t] else it returns False (flow conservation)
max_flow_edmonds_karp(test_graph, 0, 5)
# +
# Python program to find maximal bipartite matching.
# Use DFS instead of BFS.
import numpy as np
class bpm:
def __init__(self, graph):
self.graph = graph # people are the rows (N) and (D) jobs are the columns
# create a function that returns maximum number of matching
# for each applicant initialize a seen list and see if dfs returns True for this applicant
def max_bipartite_match(self):
# create a job match list that will keep track of which applicant gets which job
# the job array will be filled with the applicant's index number
jobs_array = [-1] * len(self.graph)
# Count of jobs assigned to applicants
result = 0
for each_applicant in range(len(self.graph)): # for each applicant, create a seen array
# Mark all jobs as not seen for next applicant and do a dfs search
seen_jobs = [False] * len(self.graph)
# if the applicant i can get a job increase the max number of jobs by one
if self.dfs(each_applicant, jobs_array, seen_jobs):
result += 1
# print(jobs_array) # view the final form of the applicants array that matched people to jobs
return result , jobs_array
# create a function that returns true if a matching is posibble while updating the job matching list
def dfs(self, u, jobs_array, seen_jobs):
# for each job, see if there is a connection already and if that job is already seen
for v in range(len(self.graph)):
# if applicant u is interested in job v and v is not seen
if self.graph[u][v] > 0 and seen_jobs[v] == False:
# mark job as visited
seen_jobs[v] = True
# if the job has not been assigned yet or the previously assigned person has an alternative job,
# give this job to the previous person. applicants[v] means which candidate has job v?
# if applicants[v] == -1 or self.dfs(applicants[v], applicants, seen_jobs):
if jobs_array[v] == -1 or self.dfs(jobs_array[v], jobs_array, seen_jobs):
# update the match list
jobs_array[v] = u
print(jobs_array) # to view the updates on the jobs array
return True
return False
test_matrix = np.array([[0, 0, 0, 1, 0, 1],
[1, 0, 1, 0, 0, 0],
[1, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 1, 0],
[0, 1, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 1]])
g = bpm(test_matrix)
print(g.graph) # reminder of the graph
g.max_bipartite_match() # max number of matches, jobs list just to view who got which job
# +
# Simplified version of maximal bipartite matching.
import numpy as np
def max_bipartite_match(graph):
jobs_array = [-1] * len(graph) # final jobs array that the people's indices will be shown
for each_applicant in range(len(graph)):
seen_jobs = [False] * len(graph)
dfs(graph, each_applicant, jobs_array, seen_jobs)
return len([i for i in jobs_array if i != -1]), jobs_array
def dfs(graph, u, jobs_array, seen_jobs):
for v in range(len(graph)): # for each job
if graph[u][v] > 0 and seen_jobs[v] == False: # if that job was not seen by applicant and possible to assign
seen_jobs[v] = True # mark the job as viewed
if jobs_array[v] == -1 or dfs(graph, jobs_array[v], jobs_array, seen_jobs):
jobs_array[v] = u # assign the job to new applicant even if index v was previously assigned to prev u
return True # after each assignment return True
test_matrix = np.array([[0, 0, 0, 1, 0, 1],
[1, 0, 1, 0, 0, 0],
[1, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 1, 0],
[0, 1, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 1]])
max_bipartite_match(test_matrix) # max number of matches, jobs list just to view who got which job
# (6, [2, 4, 1, 0, 3, 5])
| Graph_BFS_and_DFS_Implimentations.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Computing the electronic ground state energy of a hydrogen molecule
# ## Setup
#
# First, get an API key from IonQ. This will be used by the IonQ provider inside Qiskit to submit circuits to the IonQ platform.
#
# After securing an API key, install the qiskit-ionq provider at https://github.com/Qiskit-Partners/qiskit-ionq/
#
# ### (Optional) Extra Dependencies
#
# Some examples use additional Python dependencies; please make sure to `pip install` them as needed.
#
# Dependencies:
# * `matplotlib`: To run `qiskit.visualization.plot_histogram`.
#
# **NOTE**: The provider expects an API key to be supplied via the `token` keyword argument to its constructor. If no token is directly provided, the provider will check for one in the `QISKIT_IONQ_API_TOKEN` environment variable.
#
# Now that the Python package has been installed, you can import and instantiate the provider:
# +
#import Aer here, before calling qiskit_ionq_provider
from qiskit import Aer
from qiskit_ionq_provider import IonQProvider
#Call provider and set token value
provider = IonQProvider(token='My token')
# -
# The `provider` instance can now be used to create and submit circuits to IonQ.
#
# ### Backend Types
#
# The IonQ provider supports two backend types:
# * `ionq_simulator`: IonQ's simulator backend.
# * `ionq_qpu`: IonQ's QPU backend.
#
# To view all current backend types, use the `.backends` property on the provider instance:
#
provider.backends()
# ### Let's create a guess ground state
#
# We try $e^{\theta(T-T^\dagger)}$, where $\theta$ is our variational parameter and $T = a_3^\dagger a_2^\dagger a_1 a_0$ is our excitation operator. Qubits 0 and 1 denote our Hartree-Fock ground state (the good initial guess), and we are aiming to explore its vicinity by exciting a pair of electrons to the next level up, which are encoded in qubits 2 and 3. The two $NOT$ gates applied to qubits 0 and 1 at the very beginning (see below) correspond to loading of the two electrons in total to the levels commensurate with the good initial guess.
# +
from qiskit import QuantumCircuit
def load_qasm(fname):
# load circuit template from qasm file
circ = QuantumCircuit.from_qasm_file(fname)
return circ
# Create the ansatz state
qc = load_qasm('ZZZZ.qasm')
# Show the circuit:
qc.draw()
# -
# Once we can generate a guess state, we need to append the circuit that generates the state by a suite of suitable basis transformation circuits that correspond to each term of the Hamiltonian, energy operator. To do so, we start by considering a sample configuration of a hydrogen molecule with the inter-core spacing of 0.712234A. We obtain (by running any choice of your favorite computational chemistry programs, such as psi4 -- we skip this step herein, as it is beyond the scope of this demo)
#
# $H_{\rm MO,electron} = -1.2703 (a_0^\dagger a_0 + a_1^\dagger a_1) -0.4586 (a_2^\dagger a_2 + a_3^\dagger a_3) - 0.6801 (a_1^\dagger a_0^\dagger a_0 a_1) - 0.1797 (a_1^\dagger a_0^\dagger a_2 a_3 + a_3^\dagger a_2^\dagger a_0 a_1) - 0.4889 (a_2^\dagger a_0^\dagger a_0 a_2 + a_3^\dagger a_1^\dagger a_1 a_3) -0.6686 (a_2^\dagger a_1^\dagger a_1 a_2 + a_3^\dagger a_0^\dagger a_0 a_3) +0.1797 (a_2^\dagger a_1^\dagger a_0 a_3) - 0.7028 (a_3^\dagger a_2^\dagger a_2 a_3),$
#
# where the subindices label the different molecular orbitals (MOs) and the included terms of length two contain nucleus-electron interaction and electron kinetic energy and the included terms of length four contain electron-electron interaction. The nucleus-nucleus interaction is computed classically and separately, which amounts in our example case to 0.7430 (Ha). Thus, the expectation value of the total energy is computed according to $\langle H_{\rm MO,electron} \rangle + 0.7430$ (Ha), where $\langle .. \rangle$ denotes an expectation value associated with a prepared ansatz state (state-dependent notations omitted for brevity).
#
# To now evaluate $\langle H_{\rm MO,electron} \rangle + 0.7430$ on a quantum computer, we apply the JW transformation introduced in the slides earlier. We obtain
#
# $H_{\rm MO, electron, JW} + 0.7430 = -0.0597 I_0 I_1 I_2 I_3 -0.0449 X_0 X_1 Y_2 Y_3 + 0.0449 X_0 Y_1 Y_2 X_3 + 0.0449 Y_0 X_1 X_2 Y_3 - 0.0449 Y_0 Y_1 X_2 X_3 + 0.1758 Z_0 I_1 I_2 I_3 + 0.1700 Z_0 Z_1 I_2 I_3 + 0.1222 Z_0 I_1 Z_2 I_3 + 0.1671 Z_0 I_1 I_2 Z_3 + 0.1756 I_0 Z_1 I_2 I_3 + 0.1671 I_0 Z_1 Z_2 I_3 + 0.1222 I_0 Z_1 I_2 Z_3 -0.2367 I_0 I_1 Z_2 I_3 + 0.1757 I_0 I_1 Z_2 Z_3 -0.2367 I_0 I_1 I_2 Z_3.$
#
# Each term is of the form of a product of pauli matrices $X,Y,Z$. As discussed in the slides, to evalualte an expectation value of $X_k,Y_k,Z_k$, applied to say a $k$th qubit, we append $H, S^{\dagger}H, I$ gate(s) to the $k$th qubit. For a product of pauli matrices, applied to multiple qubits, we append corresponding gates to the multiple qubits appropriately. For instance, to compute the expectation value of $X_0 X_1 X_2 X_3$, applied to qubits 0 through 3, we append $H$ gates to qubits 0 through 3. All circuits required for each term shown in $H_{\rm MO, electron, JW}$ are included as QASM files. Note most of the terms admit the same circuit appendix, namely, the pauli strings with either $I$ or $Z$ as its only multiplicands. Further note the very first term with the string $I_0 I_1 I_2 I_3$ does not require an evaluation by the quantum computer, as its expectation value is always 1.
# We are now ready to run the circuits. We start with the simulator.
# +
def cpar(c,a,nn):
# parametrize the circuit
n = QuantumCircuit(nn)
for g in c:
if g[0].name=='rz':
#print(g[0].params[0])
n.rz(g[0].params[0]*a/2**(nn-1),g[1][0].index)
else:
n.append(g[0],[q.index for q in g[1]])
return n
def zbin(x,ss,nn):
mm = [-(((x&(2**q))//2**q)*2-1) for q in range(2**nn)]
r = 1
for s in ss:
r = r*mm[s]
return r
def get_ham4(ress):
ham = -0.0597
ham -= 0.0449*sum([zbin(x,[0,1,2,3],4)*ress[1][x] for x in range(16)])
ham += 0.0449*sum([zbin(x,[0,1,2,3],4)*ress[2][x] for x in range(16)])
ham += 0.0449*sum([zbin(x,[0,1,2,3],4)*ress[3][x] for x in range(16)])
ham -= 0.0449*sum([zbin(x,[0,1,2,3],4)*ress[4][x] for x in range(16)])
ham += 0.1758*sum([zbin(x,[0],4)*ress[0][x] for x in range(16)])
ham += 0.1700*sum([zbin(x,[0,1],4)*ress[0][x] for x in range(16)])
ham += 0.1222*sum([zbin(x,[0,2],4)*ress[0][x] for x in range(16)])
ham += 0.1671*sum([zbin(x,[0,3],4)*ress[0][x] for x in range(16)])
ham += 0.1756*sum([zbin(x,[1],4)*ress[0][x] for x in range(16)])
ham += 0.1671*sum([zbin(x,[1,2],4)*ress[0][x] for x in range(16)])
ham += 0.1222*sum([zbin(x,[1,3],4)*ress[0][x] for x in range(16)])
ham -= 0.2367*sum([zbin(x,[2],4)*ress[0][x] for x in range(16)])
ham += 0.1757*sum([zbin(x,[2,3],4)*ress[0][x] for x in range(16)])
ham -= 0.2367*sum([zbin(x,[3],4)*ress[0][x] for x in range(16)])
#for i in range(5):
# print(ress[i])
return ham
def get_pops(res,nn,n):
#print(res)
pops = [0 for i in range(2**nn)]
for key in res.keys():
pops[int(key,2)] = res[key]/n
return pops
fqsm4 = ['ZZZZ.qasm','XXYY.qasm','XYYX.qasm','YXXY.qasm','YYXX.qasm']
# +
from qiskit.providers.jobstatus import JobStatus
from qiskit import Aer, execute
from qiskit import ClassicalRegister
# Import parametrizable circuits
circs4 = []
for fname in fqsm4:
circs4.append(load_qasm(fname))
# Set the parameter $\theta$
#theta = 0.2144802815837562
theta = 0.0
# Choose the simulator backend
backend = provider.get_backend("ionq_simulator")
# Run the circuit:
def run_jobs(backend,circs,theta,nn,nshots):
jobs = []
job_ids = []
qcs = []
cr = ClassicalRegister(nn,'c')
for circ in circs:
qc = cpar(circ,theta,nn)
qc.add_register(cr)
qc.measure(range(nn),range(nn))
#print(qc.draw())
qcs.append(qc)
job = backend.run(qc, shots=nshots)
jobs.append(job)
job_ids.append(job.job_id())
return jobs
jobs4 = run_jobs(backend,circs4,theta,4,1000)
# Fetch the result
def get_jobs(jobs,nn,nshots):
results = []
for i in range(len(jobs)):
result = jobs[i].result()
results.append(get_pops(result.data()['counts'],nn,nshots))
return results
results4 = get_jobs(jobs4,4,1000)
# Compute the expectation value $\langle H_{\rm MO,electron} \rangle + 0.7430 = \langle H_{\rm MO,electron,JW} \rangle$.
ham4 = get_ham4(results4)
# Print the total energy $\langle H_{\rm MO,electron,JW} \rangle$ (Ha).
print(ham4)
# -
# We have now computed an expectation value of the total energy of a hydrogen molecule with the inter-core spacing of 0.712234A. Specifically, we computed the expectation value for a prepared guess state that corresponds to $\theta = 0$, which coincides with the Hartree-Fock ground state, i.e., the good initial guess state. To find the ground state, which is our goal here, we rely on the following strategy. Note our quantum computer efficiently calculates the expectation value of the energy of a prepared guess state. A simple method to take advantage of this may be to consider the well-known Nelder-Mead method. Roughly speaking, this iterative method keeps track of, for an $N$ variable optimization problem, $N+1$ sets of $N$ variables, while updating the elements of the set(s) one iteration at a time, based on the values of the optimization function for the $N+1$ sets of $N$ variables at a given iteration. With an appropriate convergence criterion, if the problem admits a convergence, the method converges to a local minimum. The location of the local minimum that the method converges to depends in general on the initial choice of the $N+1$ sets of $N$ variables.
#
# Below, we explicitly work out an example of this. Our initial choice of angles $\theta$ are in principle arbitrary. Note small $\theta$ values would be a good choice should our initial good guess (Hartree-Fock) is indeed ``good''.
# +
from scipy.optimize import minimize
# Nelder-Mead implementation
def obj4(backend,circs,theta):
print(theta)
jobs = run_jobs(backend,circs,theta,4,100000)
results = get_jobs(jobs,4,100000)
return get_ham4(results)
# Show the convergence?
result = minimize(lambda x: obj4(backend,circs4,x[0]),0.0,method='nelder-mead',tol=0.0001)
print(result)
theta_opt = result.x[0]
print(obj4(backend,circs4,theta_opt))
# -
# To do this on a quantum computer, we slightly modify the commands as follows. We evaluate the energy expectation values at both the good initial guess and the converged points (see above for the convergence obtained via the simulator). Note we have optimized our framework at various levels, as today's quantum computer has relatively large noise.
# +
fqsm2 = ['ZZ.qasm','XX.qasm','YY.qasm']
def get_ham2(ress):
ham = 0.28604714
ham -= 0.47331*sum([zbin(x,[1],2)*ress[0][x] for x in range(1,3)])/sum([ress[0][x] for x in range(1,3)])
ham += 0.35151*sum([zbin(x,[0],2)*ress[0][x] for x in range(1,3)])/sum([ress[0][x] for x in range(1,3)])
ham -= 0.57874
ham += 0.08984*sum([zbin(x,[0,1],2)*ress[1][x] for x in range(4)])
ham += 0.08984*sum([zbin(x,[0,1],2)*ress[2][x] for x in range(4)])
for i in range(3):
print(ress[i])
return ham
# Import parametrizable circuits
circs = []
for fname in fqsm2:
circs.append(load_qasm(fname))
# Switch the backend to run circuits on a quantum computer
qpu_backend = provider.get_backend("ionq_qpu")
jobs_zero = run_jobs(qpu_backend,circs,0.0,2,1000)
jobs_opt = run_jobs(qpu_backend,circs,theta_opt,2,1000)
# -
# The job will queue, and results will arrive once it's executed!
# +
#Check if jobs are done
for i in range(len(jobs_zero)):
print(jobs_zero[i].status())
for i in range(len(jobs_opt)):
print(jobs_opt[i].status())
# +
# Fetch the result
results_zero = get_jobs(jobs_zero,2,1000)
results_opt = get_jobs(jobs_opt,2,1000)
# Compute the expectation value $\langle H_{\rm MO,electron} \rangle + 0.7430 = \langle H_{\rm MO,electron,JW} \rangle$.
ham2_zero = get_ham2(results_zero)
ham2_opt = get_ham2(results_opt)
# Print the total energy $\langle H_{\rm MO,electron,JW} \rangle$ (Ha).
print(ham2_zero,ham2_opt)
# -
| vqe/hydrogen_molecule.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (phathom)
# language: python
# name: phathom
# ---
import os
from phathom import io
from phathom.registration.registration import detect_blobs_parallel
import matplotlib.pyplot as plt
import numpy as np
working_dir = '/home/jswaney/coregistration'
zarr_path = 'moving/zarr_stack/1_1_1'
image = io.zarr.open(os.path.join(working_dir,
zarr_path),
nested=True,
mode='r')
image.shape
chunk = image[500:1500, 6800:7800, 3000:4000]
# +
chunk_path = 'moving/chunk'
chunks = (100, 100, 100)
z_arr_out = io.zarr.new_zarr(os.path.join(working_dir,
chunk_path),
chunk.shape,
chunks,
chunk.dtype)
z_arr_out[:] = chunk
# +
# Detect blobs
sigma = (1.2, 2.0, 2.0) # (1.2, 2.0, 2.0) previously
min_distance = 2 # 3 previously
min_intensity = 500
nb_workers = 8
overlap = 8
blobs = detect_blobs_parallel(z_arr_out,
sigma,
min_distance,
min_intensity,
nb_workers,
overlap)
print('found {} blobs in fixed image'.format(len(blobs)))
# -
# %matplotlib notebook
z = 500
d = 3
idx = np.where(np.abs(blobs[:, 0]-z) < d)
slice_blobs = blobs[idx]
mip = chunk[z-d:z+d].max(axis=0)
plt.imshow(chunk[z])
plt.plot(slice_blobs[:,2], slice_blobs[:,1], 'r*')
plt.show()
| notebooks/test_blob_detection_params.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .sos
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: SoS
# language: sos
# name: sos
# ---
# + [markdown] kernel="SoS" tags=[]
# # Action `download`
# + [markdown] kernel="SoS" tags=[]
# * **Difficulty level**: easy
# * **Time need to lean**: 10 minutes or less
# * **Key points**:
# * Action `download` download listed URLs in parallel
#
# + [markdown] kernel="SoS" tags=[]
# ## Action `download`
# + [markdown] kernel="SoS" tags=[]
# Action `download(URLs, dest_dir='.', dest_file=None, decompress=False, max_jobs=5)` download files from specified URLs, which can be a list of URLs, or a string with tab, space or newline separated URLs.
#
# * If `dest_file` is specified, only one URL is allowed and the URL can have any form.
# * Otherwise all files will be downloaded to `dest_dir`. Filenames are determined from URLs so the URLs must have the last portion as the filename to save.
# * If `decompress` is True, `.zip` file, compressed or plan `tar` (e.g. `.tar.gz`) files, and `.gz` files will be decompressed to the same directory as the downloaded file.
# * `max_jobs` controls the maximum number of concurrent connection to **each domain** across instances of the `download` action. That is to say, if multiple steps from multiple workflows download files from the same website, at most `max_jobs` connections will be made. This option can therefore be used to throttle downloads to websites.
#
# For example,
#
# ```
# [10]
# GATK_RESOURCE_DIR = '/path/to/resource'
# GATK_URL = 'ftp://gsapubftp-anonymous@ftp.broadinstitute.org/bundle/2.8/hg19/'
#
# download: dest_dir=GATK_RESOURCE_DIR, expand=True
# {GATK_URL}/1000G_omni2.5.hg19.sites.vcf.gz
# {GATK_URL}/1000G_omni2.5.hg19.sites.vcf.gz.md5
# {GATK_URL}/1000G_omni2.5.hg19.sites.vcf.idx.gz
# {GATK_URL}/1000G_omni2.5.hg19.sites.vcf.idx.gz.md5
# ```
#
# download the specified files to `GATK_RESOURCE_DIR`. The `.md5` files will be automatically used to validate the content of the associated files. Note that
#
# SoS automatically save signature of downloaded and decompressed files so the files will not be re-downloaded if the action is called multiple times. You can however still still specifies input and output of the step to use step signature
#
#
# ```
# [10]
# GATK_RESOURCE_DIR = '/path/to/resource'
# GATK_URL = 'ftp://gsapubftp-anonymous@ftp.broadinstitute.org/bundle/2.8/hg19/'
# RESOUCE_FILES = '''1000G_omni2.5.hg19.sites.vcf.gz
# 1000G_omni2.5.hg19.sites.vcf.gz.md5
# 1000G_omni2.5.hg19.sites.vcf.idx.gz
# 1000G_omni2.5.hg19.sites.vcf.idx.gz.md5'''.split()
# input: []
# output: [os.path.join(GATK_RESOURCE_DIR, x) for x in GATK_RESOURCE_FILES]
# download([f'{GATK_URL}/{x}' for x in GATK_RESOURCE_FILES], dest=GATK_RESOURCE_DIR)
# ```
#
# Note that the `download` action uses up to 5 processes to download files. You can change this number by adjusting system configuration `sos_download_processes`.
# + [markdown] kernel="SoS" tags=[]
# ## Further reading
#
# * [SoS Actions](sos_actions.html)
| src/user_guide/download_actions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Using Keras to run the CASP dataset
#
# In this notebook, we show how to use Keras to the run regression test on the CASP dataset. I will introduce 4 different methods and go through each of them by grid search on 4 different combinations of parameters.
#
# ### First, we load the dependencies.
import pandas as pd
import tensorflow as tf
from keras import backend as K
from keras.models import Sequential
from keras.layers import Dense
import keras
# ### Test GPU support
tf.test.gpu_device_name()
# ## Load in dataset using Pandas
#
# We use `read_csv` to the read in and also select the X and y matrices.
data = pd.read_csv('winequality-red.csv', header=0)
# select X and y matrices.
y = data['quality'].as_matrix()
X = data.drop(labels='quality', axis=1).as_matrix()
# check the shape of X and y
y.shape, X.shape
# ## Start training and parameter selection
#
# We the use the API from `scikit-learn` to conduct a grid search on parameters. (Here I use 5-fold cross-validation to save training time.) We also need `KerasRegressor` to use sklearn together with Keras
% time
from sklearn.model_selection import GridSearchCV
from keras.wrappers.scikit_learn import KerasRegressor
from sklearn.model_selection import cross_val_score, StratifiedKFold, train_test_split
# ## 5-fold cross-validation
#
# Here `new2_advanced_hidden_model` is the name for my best model.
# +
% time
def new2_advanced_hidden_model():
model = Sequential()
model.add(Dense(12, activation='relu', input_dim=11))
model.add(Dense(18, activation='relu', input_dim=12))
model.add(Dense(13, activation='relu', input_dim=18))
model.add(Dense(10, activation='relu', input_dim=13))
model.add(Dense(7, activation='relu', input_dim=10))
model.add(Dense(1, activation='linear', input_dim=7))
# model.add(Dense(1, activation='linear'))
model.compile(loss='mean_squared_error', optimizer='adam')
return model
# -
## Here are the params for grid search
parameters = {
'epochs' : [800, 700],
'batch_size' : [150, 140], # The larger the batch size, the faster the model will run.
'verbose' : [0]
}
# fit the network
reg_hidden_new2 = GridSearchCV(KerasRegressor(build_fn=new2_advanced_hidden_model), parameters, cv = 5, scoring = 'neg_mean_absolute_error', return_train_score = True)
reg_hidden_new2.fit(X, y)
# use pandas to check the cv results
pd.DataFrame(reg_hidden_new2.cv_results_)
# ## Here are out best scores with best parameters
reg_hidden_new2.best_score_, reg_hidden_new2.best_score_
reg_hidden_new2.best_params_
| finalproj_allmodels_winequalityprediction/regression/wine-keras.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # "An In-Depth Look at Logistic Regression"
# > "Understanding the ins and outs of a logistic regression is non-trivial. Many sources either only touch the theoretical side or the implementation side respectively. In this post, I would like to create a one-stop-shop for the theoretical basis and the practical implementations of the logistic regression."
#
# - toc: false
# - branch: master
# - badges: true
# - comments: false
# - categories: [classification, theory]
# - hide: false
# - search_exclude: false
# Understanding the ins and outs of a logistic regression is non-trivial. Many sources either only touch the theoretical side or the implementation side respectively. In this post, I would like to create a one-stop-shop for the theoretical basis and the practical implementations of the logistic regression.
# # Our Dataset
# We will use the well-known iris dataset as basis for our discussion. It is simple enough to still employ some intuition when trying to understand the logistic regression. When loading the dataset from sklearn, we get 150 observations of measurements of some of the plants' features. Then botanists have classified the irises into three subtypes with fancy latin names, but since I am no botanist, we will just pretend that - based on those measurements - we can determine if the flower blooms red, green or blue. The measurements will end up in the set of observations $X$, with each observation consisting of 4 measurements each and the classification ends up in the vector $y$ with entries being 0 = red, 1 = green, 2 = blue.
# +
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# -
X, y = load_iris(return_X_y=True)
columns = ('Measurement 1', 'Measurement 2', 'Measurement 3', 'Measurement 4')
dataset = pd.DataFrame(X, columns=columns)
dataset["True Classification"] = y
dataset
# +
colors = ['red', 'green', 'blue']
for X_el, y_el in zip(X, y):
plt.plot(X_el, colors[y_el], alpha=0.1)
plt.xticks(np.arange(4), columns)
plt.show()
# -
# We can clearly see that measurement 3 carries a lot of information. It separates neatly between the red flowers and the rest, and it even seems to be a good guide to separate the greens from the blues. Same goes for measurement 4, while 1 and 2 seem to give no (or very little) hint as to which class a certain specimen belongs to.
clf = LogisticRegression(random_state=42).fit(X, y)
# After having trained the model, the "predict" function allows us to get a prediction for each input tuple. The classes are numbered not named, but can of course be converted to names using the appropriate mapping.
# 0 = red, 1 = green, 2 = blue
clf.predict(X)
# Besides a simple class prediction, we can also get the probability for each class for each input data sample.
clf.predict_proba(X)[0:5] # abbreviated output
# For the first example above, we assign an 88% chance of it belonging to class '0' , a 12% chance of it belonging to class '1' and (almost) 0% chance of it belonging to class '2'. using the `argmax` function, we could then map the probabilites to the exact outcome of the `predict`function above.
# So, for the first part, we have examined the dataset and we got an idea, how well it will be able to separate the classes, based on the features given. Also, we have generated the output of the sklearn logistic regression that a training on the complete dataset is providing and how to interpret that.
# # Evaluating the Model
# A common metric to evaluate the quality of predictions is the 'score', which - according to the sklearn help - is:
# > In multi-label classification, this is the subset accuracy
# which is a harsh metric since you require for each sample that
# each label set be correctly predicted.
clf.score(X, y)
# Basically that means we count all correctly classified labels as a percentage like so:
# +
correct = []
for y_true, y_pred in zip(y, clf.predict(X)):
if y_true == y_pred:
correct.append(1)
else:
correct.append(0)
sum(correct) / len(correct)
# -
# Given that we used the complete dataset for training, that's just okay. A more detailed approach at evaluating the quality of our model would be based on the classification report.
# +
from sklearn.metrics import classification_report
print(classification_report(y_pred=clf.predict(X), y_true=y))
# -
# While the class "0 = red" has perfect precision and recall, because classes "1 = green" and "2 = blue" somewhat overlap in their features, the algorithm mixes them up. This is shown by the recall 0.9 of class "1", which means only 90% of all green flowers where classified as green and the precision of class "2" which means only 91% of all flowers classified as blue where actually blue.
y_true_with_jitter = y + np.random.rand(y.shape[0]) * 0.25
y_classified_with_jitter = clf.predict(X) + np.random.rand(y.shape[0]) * 0.25
plt.xticks(np.arange(3), ('true red', 'true green', 'true blue'))
plt.yticks(np.arange(3), ('class. red', 'class. green', 'class. blue'))
plt.scatter(y_true_with_jitter,
y_classified_with_jitter,
color=[colors[y_el] for y_el in y])
plt.show()
# Is it a good model? Well, on one hand, the bare values of our statistics are really quite good, on the other we have a tiny dataset and we trained on the complete dataset as well, as we did not keep a holdout dataset for testing. Since we do not pay too much attention to the actual result of flower's colors being predicted correctly though and only want to understand how to arrive at the predictions,we will give it a pass. Just make a mental note never to consider that a good result in a real-world application.
# # A Look at the Inner Workings of the sklearn Logistic Regression
# When looking at the variables of the log regression classifier after training, we find three sets of coefficients and three different intercepts. That is because log regression is essentially binary, i.e. does only a yes/no or 1/0 classification. If we have $n > 2$ classes, we need split this problem into $n$ separate "1 vs. rest" classification problems. Each set of coefficients and each intercept belongs to one of these sub-classifications.
clf.__dict__["coef_"] # just running clf.__dict__ spits out all the info about the trained model
clf.__dict__["intercept_"]
# If we now feed a set of features $x^i$ into the trained classifier, we can calculate the probabilities of $x^i$ belonging to a class vs. not belonging to that class via:
# $$p(x_i)=\frac{1}{1+e^{-(\beta_{0} + \beta_{1}x^{i}_1 + \beta_{2}x^{i}_2 + \beta_{3}x^{i}_3 + \beta_{4}x^{i}_4)}}$$
# where $\beta_{0}$ is an intercept and $\beta_{1}..\beta_{4}$ are the coefficients for each entry in the feature vector $x^i = (x^{i}_1,x^{i}_2,x^{i}_3,x^{i}_4)$. We will later explore why this term is the correct one. Let us calculate the above for our very first observation in the dataset:
# @ is shorthand for matrix multiplication
p_0 = 1 / (1 + np.exp(-(clf.intercept_[0] + X[0] @ clf.coef_[0])))
p_1 = 1 / (1 + np.exp(-(clf.intercept_[1] + X[0] @ clf.coef_[1])))
p_2 = 1 / (1 + np.exp(-(clf.intercept_[2] + X[0] @ clf.coef_[2])))
print('p_0 =', p_0)
print('p_1 =', p_1)
print('p_2 =', p_2)
# With this calculation, we have now determined that our $x^0$ has a $0.98$ chance of belonging to class "0 = red" vs. any of the other classes - _either_ green or blue.
# As we can see though, these three probabilities do not add up to 100% and why should they? These probabilities belong to three mathematically independent problems:
# - Does $x^i$ belong to class 0 vs. not to class 0?
# - Does $x^i$ belong to class 1 vs. not to class 1?
# - Does $x^i$ belong to class 2 vs. not to class 2?
#
# What happens, if we linearly scale those probabilities though so that they add up to 1?
p_sum = p_0 + p_1 + p_2
print("p_0_scaled =", p_0 / p_sum)
print("p_1_scaled =", p_1 / p_sum)
print("p_2_scaled =", p_2 / p_sum)
# We have seen these exact numbers before and can make our choice for a prediction using `argmax`:
clf.predict_proba(X)[0]
np.argmax(clf.predict_proba(X)[0]) # 0 = red, 1 = green, 2 = blue
# Now we have an understanding how the interpret the data generated by the training process of sklearn and we have looked beyond the `clf.predict` function to understand how the predictions are picked in the trained model.
# # The Mathematical Background
# We used the formula
# $$p(x^i)=\frac{1}{1+e^{-(\beta_{0} + \beta_{1}x^{i}_{1} + \beta_{2}x^{i}_{2} + \beta_{3}x^{i}_{3} + \beta_{4}x^{i}_{4})}}$$
# above, which is technically a choice, not a mathematical coercion (there are, in fact, others that work as well), but why does that make sense?
#
# In order to understand that, we need to understand odds and their relationship with probabilites first. If an event has a 50% chance of ocurring, the odds of it happening are 1:1 (1 time it happens, 1 time it does not). If an event has a 33,3% chance of happening, the odds are 1:2 (1 time it happens, 2 times it does not), 25% represents odds of 1:3, and 20% represents odds of 1:4. Or as a general formula:
# $$ Odds = \frac{p}{1-p} $$
# e.g. for $p = 0.25$ that evaluates to $ Odds = \frac{0.25}{1-0.25} = 0.333... $, i.e. for every 3 times the event under scrutiny does not happen, there will be 1 time where it happens or 1 success : 3 failures.
# Substituting the $p$ in the odds formula with the $p(x^i)$ from above, we get:
# $$ Odds = e^{\beta_{0} + \beta_{1}x^{i}_{1} + \beta_{2}x^{i}_{2} + \beta_{3}x^{i}_{3} + \beta_{4}x^{i}_{4}} $$
# or
# $$ log(Odds) = \beta_{0} + \beta_{1}x^{i}_{1} + \beta_{2}x^{i}_{2} + \beta_{3}x^{i}_{3} + \beta_{4}x^{i}_{4} $$
# Thereby we have created a link to our feature space $X$ and we can map any observation of features to a probability $ p \in (0,1)$. All we need then is a somewhat arbitrary cutoff rule, usually $p>.5$.
# Furthermore, we get interpretability for free: the coefficient $\beta_i$ describes the change in odds when we increase $x_i$ by one unit. Look again at the spaghetti diagram with the colors above. The greater the value of measurement 3 to smaller the chance we have a red specimen at hand. In fact, the $\beta$ coefficient of measurement 3 in our "red vs. rest" problem is $-2.26$ which means that a unit increase of measurement 3 decreases the odds of the specimen being red by $exp(-2.26) \approx .104$, which is very roughly 1 in 9.5.
# Let us draw this function $p(x)$ to see what it looks like:
# +
import matplotlib.pyplot as plt
def map_to_p(log_odds):
odds = np.exp(log_odds)
p = odds / (1 + odds)
return p
lots_of_log_odds_for_drawing = np.linspace(-10, 10, num=1000)
mapped_to_p = list(map(map_to_p, lots_of_log_odds_for_drawing))
plt.xlabel("log(Odds)")
plt.ylabel("p")
plt.plot(lots_of_log_odds_for_drawing, mapped_to_p);
# -
# This function is called the sigmoid function, which - in terms of our logistic regression model - is the so called link function, as it links a predictor, the $\log(Odds)$ linear combination, to a response in $p \in (0,1)$.
# # Fitting the Parameters
# Fitting the parameters is a bit tricky, as we cannot employ a least squares regression as in a linear case. We have to use numerical methods like the maximum likelihood estimation (MLE). Intuitively we want our $\beta$ in such a way that the linear combinations with the feature vectors $x$ are as far away from the middle of the sigmoid and as far to one side for "successes" (usually the positive) and as far to the other for "failures". "Success" means being a member of a certain class $i$ and failure means a higher probability for any other class.
#
# This beta, once we have found it, we will call $\hat\beta$.
#
# Let's have a look at the sigmoid again and use the parameters for the class "0 = red" from the sklearn logistic regression to distribute our $x$s:
# +
colors = ['red', 'green', 'blue']
# this is the result of b_0 + b_1 * x_1 + b_2 * x_2 + ...
# clf is still our trained classifier
log_odds_from_our_dataset = [clf.intercept_[0] + x @ clf.coef_[0] for x in X]
plt.xlabel("log(Odds)")
plt.ylabel("p")
plt.plot(lots_of_log_odds_for_drawing, mapped_to_p)
for x, y_cl in zip(log_odds_from_our_dataset, y):
#plt.scatter(x, 0, s=10, c=colors[y_cl])
plt.scatter(x, map_to_p(x), s=10, c=colors[y_cl])
plt.show()
# -
# We see now that all of the red dots, which represent the members of class $0$ fall on the right side of 0 (and therefore have $p > .5$), and the other two classes fall on the left side. So, basically, we need to choose $\hat\beta$ in such a way the sigmoid function is as close to zero for some $x$ and as close to 1 for some other $x$. This is a very awkward problem to solve. Luckily, we are working with the interval of $p \in (0,1)$, which means we know that the maximum is 1. So, we can flip those $x$ that are supposed to result in a value of $p$ close to 0 around by calculating $(1-p)$. Now we have a maximization problem for all $x$ in our domain.
# Also, a word on the intercept: while $\beta_1 .. \beta_4$ essentially "stretch" our $x$ out in such a way that there is as little overlap between the groups as possible, $\beta_0$ changes the position of the whole set of dots, so that they can be nicely centered around 0. We do actually not need to give special treatment to the intercept, as we can just augment our feature vector $x$ with a static 1 like so $x^i = (1, x^{i}_{1}, x^{i}_{2},x^{i}_{3},x^{i}_{4})$. Using these augmented vectors in the following steps, will simplify things a lot.
# Furthermore, as we want to maximize all our individual $p$ and $1-p$ respectively, we can also try to maximize the product of all of them and as we want all the terms of the product to be as close to 1 as possible, that means we want the whole product to be as close to 1 as possible.
# In our input data, the class a certain specimen belongs to is denoted by $y_i \in \{0,1,2\}$ with these numbers representing red, green and blue respectively. Now if we want to translate that into a binary problem, we need a $y_{binary, i} \in \{1,0\}$, were e.g. for the first classification "red vs. non-red" we denote a success with "1" (flower is red) vs. "0" (flower is not red). I will omit the "binary" for brevity's sake, but please make a big mental note that the $y_i$s we are working with from now on are not the same ones as above any more.
# In mathematical terms, we want to find our estimator $\hat\beta$ that maximizes the likelihood function:
# $$ l(\beta) = \prod_{x_i; y_i = 1} p(x_i) \times \prod_{x_i; y_i = 0} (1-p(x_i)) $$
# or:
# $$ \hat\beta = \arg\max_{\beta} l(\beta) $$
# Which can then be simplified as follows:
# $$ l(\beta) = \prod_{i} p(x_i)^{y_i} (1-p(x_i))^{1-y_i}$$
# We need to introduce the next concept now - in order to tackle this maximization problem, we use the fact that $log(a)$ is increasing monotonically with $a$ so maximizing $log(a)$ is equivalent to maximizing $a$, therefore:
# $$ \hat\beta = \arg\max_{\beta} l(\beta) \iff \hat\beta = \arg\max_{\beta} \log l(\beta) $$
# and using this property, we can transform the multiplication in $l(\beta)$ to a summation in $\log l(\beta)$ or $ll(\beta)$ for short.
# $$ ll(\beta) = \sum_{i} y_i \log(p(x^i)) + (1-y_i)\log(1-p(x^i))$$
# We will now simplify this equation further and we will start with the log probability in the first term (blue highlighting will become clear further down the line):
# $$ y_i\log(p(x^i)) = y_i\log\frac{1}{1+e^{-\beta x^i}} = \color{blue}{-y_i\log(1+e^{-\beta x^i})} $$
# While that was fairly simple, the second term is a bit more challenging. For now, we will omit the term $(1-y_i)$ and focus on the log inverse probability in the second term:
# $$ \log(1-p(x^i)) = \log(1-\frac{1}{1+e^{-\beta x^i}}) = \log(\frac{e^{-\beta x^i}}{1+e^{-\beta x^i}})$$
# To proceed, we have to make a choice what to do with the term in the brackets: we can either take the $e^{-\beta x_i}$ in the numerator and bring it down into the denominator or use the log rule for fractions to separate the fraction into a subtraction of two fractionless logarithms. It turns out, we need to do both in order to get to the simplest possible form of whole combined equation and after distributing the terms from the $(1-y_i)$ we need to treat each with a different strategy. For the $1$ term (note that the minus sign in the denominator has disappeared):
# $$ 1 \log(\frac{e^{-\beta x^i}}{1+e^{-\beta x^i}}) = \log(\frac{1}{e^{\beta x^i}(1+e^{-\beta x^i})}) = \log(\frac{1}{e^{\beta x^i}+1}) = \color{green}{ -\log(e^{\beta x^i}+1)} $$
# and for the $-y_i$ term:
# $$ -y_i \log(\frac{e^{-\beta x^i}}{1+e^{-\beta x^i}}) = -y_i (\log(e^{-\beta x^i}) - \log(1+e^{-\beta x^i})) $$
# $$ = \color{green}{ -y_i(-\beta x^i)} \color{blue}{ + y_i \log(1+e^{-\beta x^i})} $$
# When we now reassemble the puzzle pieces, the blue terms cancel each other out and the green terms are left
# $$ ll(\beta) = \sum_{i} \color{blue}{-y_i\log(1+e^{-\beta x^i})} \color{green}{ -\log(e^{\beta x^i}+1)} \color{green}{ -y_i(-\beta x^i)} \color{blue}{ + y_i \log(1+e^{-\beta x^i})} $$
# $$ = \sum_{i} y_i\beta x^i-\log(e^{\beta x^i}+1) $$
# To jog our memory:
# - $y_i \in \{1,0\}$, representing that an $x^i$ belongs to a certain class with 1 or not with 0
# - $x^i \in \mathbb{R}^5$ with the first element set fixed to $1$, the feature vector of an observation
# - $\beta \in \mathbb{R}^5$ the vector of coefficients, with the first entry representing the intercept
# We have now managed to state our optimization problem in comparatively simple terms, as the only thing that is missing now is the $\beta$ that will maximize the last expression above, but all the other variables are clearly defined. We cannot compute the optimal $\beta$ algebraically though and have to rely on numerical methods.
# ## A Naive Example
# In order to prepare for the next steps of actually fitting the $\beta$ coefficients, we need translate the theoretical maths into python code. Also translating the three class problem of red, green and blue flowers into multiple binary problems like "flower is red vs. flower is not red" is necessary. First the translation of the log-likelihood function.
def log_likelihood(x, y, beta):
ll = 0
for x_el, y_el in zip(x, y):
ll += y_el * (beta @ x_el) - np.log(np.exp(beta @ x_el) + 1)
return ll
# We split the three class problem into 3 binary sub-problems, so we need to modify the class vector in such a way that $y$ only has "1" entries for that single class we are testing for and "0" entries for the other two classes:
y_binary_red = [1 if y_el == True else 0 for y_el in y == 0]
y_binary_green = [1 if y_el == True else 0 for y_el in y == 1]
y_binary_blue = [1 if y_el == True else 0 for y_el in y == 2]
# Furthermore, we need to add a "1" in the beginning of each feature vector $x$, in order to account for the intercept.
# make a vector with just "1"s and glue it to the left side of X
X_with_intercept = np.hstack((np.ones((X.shape[0], 1)), X))
# Now we leave the cosy realm of algebraic certainty and need to employ numerical methods to get our estimate $\hat\beta$. But before we do that, let us see what we want to accomplish in principle by using a brute force algorithm. We start with random $\beta$ - I cheated here as I already roughly know in which area to find the variables. Given this semi-random beta, we calculate the log-likelihood function, which can assume values between $(-\infty, 0)$ as it is a $log$ of a probability $p \in (0,1)$. If the random $\beta$ increases our likelihood we keep it, otherwise we throw it out and choose another random $\beta$. In order to visualize the results, we program a little helper function first.
# define a plot function to visualize the result
def plot_separation(x, beta, y, color='red'):
color = ['grey', color]
for x_el, y_el in zip(x, y):
log_odds = beta @ x_el
plt.scatter(log_odds, map_to_p(beta @ x_el), c=color[y_el])
plt.plot(lots_of_log_odds_for_drawing, mapped_to_p)
plt.show()
# +
# choose a random, but very small likelihood as basis
ll_hat = -1000000
for step in range(10001):
# choose "random" beta vector 10.000 times
# each entry will be between -3 and 3
beta_random = 6 * np.random.random(5) - 3
ll = log_likelihood(X_with_intercept, y_binary_red, beta_random)
# if our log-likelihood has improved, overwrite old beta, save likelihood for futher iterations
if ll > ll_hat:
beta_hat = beta_random
ll_hat = ll
# draw the result every 5000 steps
if step % 5000 == 0:
print("Step:", step, ", beta_hat:", beta_hat, ", ll_hat:", ll_hat)
plot_separation(X_with_intercept, beta_hat, y_binary_red)
print()
# -
# As we can see, this neatly separates the red from the non-red dots onto the "1" and "0" side of the sigmoid curve. To be honest, that seems to work only because the red specimen are somewhat neatly separated from the rest from the get go. If you try the other colors, the results will not be that good.
# But we have seen the general principle. By wildly choosing random $\beta$s and keeping the ones that increase likelihood, we push the log odds of our "success" class as far to the right as possible, while we keep the "failures" on the left. Now the only step that is left is transitioning from random guessing into a process that is more sophisticated.
# ## Fitting the Parameters with Gradient Descent
# As a next step, we will replace the brute force method with a numerical optimization method like Gradient Descent or Newton-Raphson. Today, we are going to use the Gradient Descent method. In simple terms, we will move $\beta$ in small steps towards the direction that minimizes our error function, which is the true $y$ minus our calculated result for $y$ under our guess for $\beta$. This direction happens to be the negative gradient. The true $y$ is simply our input data for $y$.
def gradient_descent(X, y, steps, learning_rate):
beta = np.zeros(X.shape[1])
for _ in range(1, steps):
# calculate log odds for all x in X at once
log_odds = np.dot(X, beta)
# calculate result based on current beta
tentative_y = list(map(map_to_p, log_odds))
# calculate difference between current estimate and truth
error = np.subtract(y, tentative_y)
# see below for explanation
gradient = np.dot(error, X)
# move beta in opposite direction from error
beta += learning_rate * gradient
return beta
# So what is the gradient of $ll(\beta) $ with regards to $\beta$?
# $$ \nabla_{\beta} ll(\beta) = \sum_{i} \nabla_{\beta} y_i\beta x_i-\nabla_{\beta}\log(e^{\beta x_i}+1) $$
# $$ = \sum_{i} y_i x_i-\nabla_{\beta}\log(e^{\beta x_i}+1) $$
# $$ = \sum_{i} y_i x_i - x_i e^{\beta x_i} \frac{1}{e^{\beta x_i} +1} $$
# $$ = \sum_{i} y_i x_i - x_i \frac{1}{1 + e^{-\beta x_i}} $$
# $$ = \sum_{i} y_i x_i - x_i p(x_i) = \sum_{i} (y_i -p(x_i)) x_i $$
# Which is nothing else than the true $y_i$ minus the calculated approximation for $y_i$ which is $p(x_i)$ times the feature vector for each observation, or in matrix form:
# $$ \nabla_{\beta} ll(\beta) = (y_{true} - y_{estimate}(\beta))X $$
# To mix things up a bit, let us try using the gradient descent method to identify the blue specimen instead of the red ones:
beta_hat = gradient_descent(X_with_intercept,
y_binary_blue,
steps=100001,
learning_rate=10e-5)
plot_separation(X_with_intercept, beta_hat, y_binary_blue, color='blue')
# And there we have a separation of the blue dots towards the positive real numbers and the rest towards the negative ones and their respective probabilities going to 1 and 0. As we can see though, the separation does not work as well as in the "red vs. rest" problem.
# However, if we try the same with the green specimen, it does not work very well at all. But that was somewhat expected, as we have seen in the very beginning. We should take solace in the fact, that the sklearn implementation does also not fare very well, which can be seen in the confusion matrix, and if we plot the result for the sub-problem "green vs. rest" we can barely differentiate between the green and the grey dots.
# parameters for green from sklearn log-reg
beta_0 = np.hstack((clf.__dict__["intercept_"][0], clf.__dict__["coef_"][0])) # red
beta_1 = np.hstack((clf.__dict__["intercept_"][1], clf.__dict__["coef_"][1])) # green
beta_2 = np.hstack((clf.__dict__["intercept_"][2], clf.__dict__["coef_"][2])) # blue
plot_separation(X_with_intercept, beta_0, y_binary_red, color='red')
plot_separation(X_with_intercept, beta_1, y_binary_green, color='green')
plot_separation(X_with_intercept, beta_2, y_binary_blue, color='blue')
# Finally, we can observe that while our solution is not as good as the version implemented in sklearn, it provides results which are quite close already. One difference between the two algorithms is that sklearn penalizes solutions with large coefficients in its optimizer.
# Sources:
# - https://beckernick.github.io/logistic-regression-from-scratch/
# - https://www.youtube.com/watch?v=YMJtsYIp4kg
| _notebooks/2020-08-25-logistic-regression-in-depth.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
import numpy as np
from torch.utils.data import Dataset
import torchvision
import os
import h5py
import pickle # TODO or use h5py instead?
import trimesh
import open3d as o3d
import glob
import config as cfg
import dataset.augmentation as Transforms
# -
class CustomDataset(Dataset):
def __init__(self, split, noise_type):
dataset_path = cfg.CUSTOM_PATH
self.samples, self.labels = self.get_samples(dataset_path, split)
self.transforms = self.get_transforms(split, noise_type)
def __len__(self):
return self.samples.shape[0]
def __getitem__(self, item):
sample = {'points': self.samples[item, :, :], 'label': self.labels[item], 'idx': np.array(item, dtype=np.int32)}
if self.transforms:
sample = self.transforms(sample)
return sample
def get_transforms(self, split, noise_type):
# prepare augmentations
if noise_type == "clean":
# 1-1 correspondence for each point (resample first before splitting), no noise
if split == "train":
transforms = [Transforms.Resampler(2048),
Transforms.SplitSourceRef(),
Transforms.Scale(), Transforms.Shear(), Transforms.Mirror(),
Transforms.RandomTransformSE3_euler(),
Transforms.ShufflePoints()]
else:
transforms = [Transforms.SetDeterministic(),
Transforms.FixedResampler(2048),
Transforms.SplitSourceRef(),
Transforms.RandomTransformSE3_euler(),
Transforms.ShufflePoints()]
elif noise_type == "jitter":
# Points randomly sampled (might not have perfect correspondence), gaussian noise to position
if split == "train":
transforms = [Transforms.SplitSourceRef(),
Transforms.Scale(), Transforms.Shear(), Transforms.Mirror(),
Transforms.RandomTransformSE3_euler(),
Transforms.Resampler(2048),
Transforms.RandomJitter(),
Transforms.ShufflePoints()]
else:
transforms = [Transforms.SetDeterministic(),
Transforms.SplitSourceRef(),
Transforms.RandomTransformSE3_euler(),
Transforms.Resampler(2048),
Transforms.RandomJitter(),
Transforms.ShufflePoints()]
else:
raise ValueError(f"Noise type {noise_type} not supported for CustomData.")
return torchvision.transforms.Compose(transforms)
def get_samples(self, dataset_path, split):
if split == 'train':
path = os.path.join(dataset_path, 'train_data')
elif split == 'val':
path = os.path.join(dataset_path, 'val_data')
else:
path = os.path.join(dataset_path, 'test_data')
all_data = []
all_labels = []
for item in glob.glob(path + '/*.obj'):
mesh = o3d.io.read_triangle_mesh(item)
pcd = mesh.sample_points_uniformly(number_of_points=2048)
xyz = np.array(pcd.points)
data = xyz.astype(np.float32)
labels = 0
all_data.append(data)
all_labels.append(labels)
return np.array(all_data), np.array(all_labels)
if __name__ == '__main__':
dataset = CustomDataset(split = 'train', noise_type='clean')
print(len(dataset))
print(dataset[0])
| dataset_custom.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Nburkhal/DS-Unit-1-Sprint-3-Statistical-Tests-and-Experiments/blob/master/LS_DS_131_Statistics_Probability_Assignment.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="S98qoMgJLR5o" colab_type="text"
# <img align="left" src="https://lever-client-logos.s3.amazonaws.com/864372b1-534c-480e-acd5-9711f850815c-1524247202159.png" width=200>
# <br></br>
# <br></br>
#
# ## *Data Science Unit 1 Sprint 3 Assignment 1*
#
# # Apply the t-test to real data
#
# Your assignment is to determine which issues have "statistically significant" differences between political parties in this [1980s congressional voting data](https://archive.ics.uci.edu/ml/datasets/Congressional+Voting+Records). The data consists of 435 instances (one for each congressperson), a class (democrat or republican), and 16 binary attributes (yes or no for voting for or against certain issues). Be aware - there are missing values!
#
# Your goals:
#
# 1. Load and clean the data (or determine the best method to drop observations when running tests)
# 2. Using hypothesis testing, find an issue that democrats support more than republicans with p < 0.01
# 3. Using hypothesis testing, find an issue that republicans support more than democrats with p < 0.01
# 4. Using hypothesis testing, find an issue where the difference between republicans and democrats has p > 0.1 (i.e. there may not be much of a difference)
#
# Note that this data will involve *2 sample* t-tests, because you're comparing averages across two groups (republicans and democrats) rather than a single group against a null hypothesis.
#
# Stretch goals:
#
# 1. Refactor your code into functions so it's easy to rerun with arbitrary variables
# 2. Apply hypothesis testing to your personal project data (for the purposes of this notebook you can type a summary of the hypothesis you formed and tested)
# + id="aAon_fFALR5v" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 206} outputId="41ea5b41-e282-43bf-c0f8-6ce4ab95d050"
### YOUR CODE STARTS HERE
import pandas as pd
import numpy as np
data_link = 'https://archive.ics.uci.edu/ml/machine-learning-databases/voting-records/house-votes-84.data'
vote_df = pd.read_csv(data_link, header=None)
vote_df.head()
# + id="So8nru5cjvQw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="f042f942-ae24-4e69-8c29-fa06120a3342"
# Replace ? with NaN
# Check to see if there are any leading/trailing whitespaces
vote_df[1][1]
# + id="Ut6oYnUykeb1" colab_type="code" colab={}
# Replace ? with np.nan
vote_df = vote_df.replace('?', np.nan)
# + id="eHMXG6LSkrGE" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 226} outputId="c06362df-a955-4bac-e4c0-18a0477d9a64"
# Add column names to dataframe
vote_df.columns = ['party', 'handicapped_infants', 'water_project_cost_sharing',
'adoption_of_budget_resolution', 'physician_fee_freeze',
'el_salvador_aid', 'religious_groups_in_schools',
'anti_satellite_test_ban', 'aid_to_nicaraguan_contras',
'mx_missile', 'immigration', 'synfuels_corp_cutback',
'education_spending', 'superfund_right_to_sue', 'crime',
'duty_free_exports', 'export_admin_act_south_africa']
vote_df.head()
# + id="VnrydTQIo9mS" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 226} outputId="fbc0c4d6-2c25-4b74-cac2-8f9e23d9b7c1"
# Change y & n to 1 & 0
vote_df = vote_df.replace(['y', 'n'], [1, 0])
vote_df.head()
# + id="md3hvsR4rRUu" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 330} outputId="2bd8e7cc-659c-4858-e53b-1c0041a930f2"
# Check dataframe dtypes
vote_df.dtypes
# + id="CPa9x-0MmMtM" colab_type="code" colab={}
# Create political party samples
gop = vote_df[vote_df['party'] == 'republican']
dems = vote_df[vote_df['party'] == 'democrat']
# + id="sHdu85zJmm1I" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 539} outputId="38773bb2-b1a8-4f74-f361-24ed9a012c93"
print(gop.dtypes)
gop.head()
# + id="70NshXJfmoZk" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 539} outputId="d648479c-c93e-4bb3-c6ff-fef619a68ce7"
print(dems.dtypes)
dems.head()
# + id="uFQthtKemptR" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 320} outputId="210e7e05-e80b-45a6-faf0-1ad968790ab4"
# Check descriptive statistics - see which parties voted for more on what issues
gop.describe()
# + id="sciMQjeOoTum" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 320} outputId="b958a417-920b-40d1-fac2-99244b87367b"
dems.describe()
# + id="XSLaeLwRvm30" colab_type="code" colab={}
# Define function to calculate 2 sample t-tests
def two_tail_ttest(x, y, z='omit'):
from scipy.stats import ttest_ind
return ttest_ind(x, y, nan_policy=z)
# + id="oSuvjdmWo4uK" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="84324317-c876-4045-ffc6-61f849aa3ca9"
# 2 sample t-test for the adoption of the budget
# mean democrat = 0.88
# mean republican = 0.13
# p-value = 0.01
from scipy.stats import ttest_ind, ttest_ind_from_stats, ttest_rel
ttest_ind(dems['adoption_of_budget_resolution'], gop['adoption_of_budget_resolution'], nan_policy='omit')
two_tail_ttest(dems['adoption_of_budget_resolution'], gop['adoption_of_budget_resolution'])
# + [markdown] id="wsVnuMNSsKZd" colab_type="text"
# Due to calculating a t-statistic of 23.2127 which corresponds to a p-value of 2.07e-77, we reject the null hypothesis, using a 99% confidence interval, that the mean of votes between the deomcrats and republicans regarding the adoption of a budget resolution is equal, and suggest the alternative hypothesis that they are different.
# + id="KigrPyb_rMFl" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="dc038305-c0f8-4a6a-c3ea-f081900423a2"
# 2 sample t-test for religious groups in schools
# mean democrat = 0.477
# mean republican = 0.898
# p-value = 0.01
ttest_ind(gop['religious_groups_in_schools'], dems['religious_groups_in_schools'], nan_policy='omit')
two_tail_ttest(gop['religious_groups_in_schools'], dems['religious_groups_in_schools'])
# + [markdown] id="fqhP978ttxto" colab_type="text"
# Due to calculating a t-statistic of 9.7376 which corresponds to a p-value of 2.39e-20, we reject the null hypothesis, using a 99% confidence interval, that the mean of votes between the deomcrats and republicans regarding religious groups in schools is equal, and suggest the alternative hypothesis that they are different.
# + id="sk0GYL2lttjQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="e7f20348-6037-460e-fc74-b417baa00415"
# 2 sample t-test for water project cost sharing
# mean democrat = 0.502
# mean republican = 0.507
# p-value = 0.01
ttest_ind(dems['water_project_cost_sharing'], gop['water_project_cost_sharing'], nan_policy='omit')
# + [markdown] id="1x2HyMLCuUET" colab_type="text"
# Due to calculating a t-statistic of -0.0889 which corresponds to a p-value of 0.9291, we fail to reject the null hypothesis, using a 99% confidence interval, that the mean of votes between the deomcrats and republicans regarding water project cost sharing.
# + id="R1q28nSbuRDR" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="f6f2734d-7576-4377-b97e-e071f41ec60e"
# Testing to see if our function truly works
two_tail_ttest(gop['crime'], dems['crime'])
# + [markdown] id="aKpxeoxUwibr" colab_type="text"
# Due to calculating a t-statistic of 16.34 which corresponds to a p-value of 9.95e-47, we reject the null hypothesis, using a 99% confidence interval, that the mean of votes between the republicans and democrats regarding crime is equal, and suggest the alternative hypothesis that they are different.
# + id="ZIfvP8rVwapM" colab_type="code" colab={}
# Define 1-sample t-test
def one_sample_ttest(data, popmean, z='omit'):
from scipy.stats import ttest_1samp
return ttest_1samp(data, popmean, nan_policy=z)
# + id="PSF-QNP9xmEp" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="d216254a-ee7d-4416-f523-da53bd21a3ec"
# Perform one_sample t-test function test
# Do democrats favor synfuals corporate cutbacks, or is the vote split down the middle?
one_sample_ttest(dems['synfuels_corp_cutback'], 0.5)
# + [markdown] id="w5LQ0a1DyOAX" colab_type="text"
# Due to calculating a t-statistic of 0.1875 which corresponds to a p-value of 0.8514, we fail to reject the null hypothesis, using a 99% confidence interval, that the mean of votes among deomcrats regarding synfuels corporate cutbacks does not sway in a particular direction.
# + id="_i09lVF1yEL9" colab_type="code" colab={}
| LS_DS_131_Statistics_Probability_Assignment.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # The Problem of Overfitting
#
# Consider the problem of predicting $y$ from $x \in R$. The leftmost figure below shows the result of fitting a $y = \theta_0+\theta_1x$ to a dataset. We see that the data doesn’t really lie on straight line, and so the fit is not very good.
#
# 
#
# Instead, if we had added an extra feature $x^2$ , and fit $y=\theta_0+\theta_1x+\theta_2x^2$ , then we obtain a slightly better fit to the data (See middle figure). Naively, it might seem that the more features we add, the better.
#
# However, there is also a **danger** in adding too many features: The rightmost figure is the result of fitting a 5th order polynomial $y = \sum_{j=0} ^5 \theta_j x^j$. We see that even though the fitted curve passes through the data *perfectly*, we would not expect this to be a very good predictor of, say, housing prices (y) for different living areas (x).
#
# Without formally defining what these terms mean, we’ll say the figure on the left shows an instance of **underfitting**—in which the data clearly shows structure not captured by the model—and the figure on the right is an example of **overfitting**.
#
# **Underfitting**, or high bias, is when the form of our hypothesis function h maps poorly to the trend of the data. It is usually caused by a function that is too simple or uses too few features.
#
# At the other extreme, **overfitting**, or high variance, is caused by a hypothesis function that fits the available data but does not generalize well to predict new data. It is usually caused by a complicated function that creates a lot of unnecessary curves and angles unrelated to the data.
#
# This terminology is applied to both linear and logistic regression. There are two main options to address the issue of overfitting:
#
# ### วิธีการแก้ปัญหา Overfitting ทำได้โดย
#
# 1) Reduce the number of features:
# - Manually select which features to keep.
# - Use a model selection algorithm (studied later in the course).
#
# 2) Regularization
# - Keep all the features, but reduce the magnitude of parameters $\theta_j$.
# - Regularization works well when we have a lot of slightly useful features.
#
#
#
#
# # Cost Function
#
# If we have overfitting from our hypothesis function, we can reduce the weight that some of the terms in our function carry by increasing their cost.
#
# Say we wanted to make the following function more quadratic:
#
# $\theta_0 + \theta_1x + \theta_2x^2 + \theta_3x^3 + \theta_4x^4$
#
# We'll want to eliminate the influence of $\theta_3x^3$ and $\theta_4x^4$ . Without actually getting rid of these features or changing the form of our hypothesis, we can instead modify our **cost function**:
#
# $$min_\theta\ \dfrac{1}{2m}\sum_{i=1}^m (h_\theta(x^{(i)}) - y^{(i)})^2 + 1000\cdot\theta_3^2 + 1000\cdot\theta_4^2$$
#
# We've added two extra terms at the end to inflate the cost of $\theta_3$ and $\theta_4$. Now, in order for the cost function to get close to zero, we will have to reduce the values of $\theta_3$ and $\theta_4$ to near zero. This will in turn greatly reduce the values of $\theta_3x^3$ and $\theta_4x^4$ in our hypothesis function. As a result, we see that the new hypothesis (depicted by the pink curve) looks like a quadratic function but fits the data better due to the extra small terms $\theta_3x^3$ and $\theta_4x^4$.
#
# 
#
# We could also regularize all of our theta parameters in a single summation as:
#
# $$min_\theta\ \dfrac{1}{2m}\ \sum_{i=1}^m (h_\theta(x^{(i)}) - y^{(i)})^2 + \lambda\ \sum_{j=1}^n \theta_j^2$$
#
# The $\lambda$, or lambda, is the **regularization parameter**. It determines how much the costs of our theta parameters are inflated.
#
# Using the above cost function with the extra summation, we can smooth the output of our hypothesis function to reduce overfitting. **If lambda is chosen to be too large, it may smooth out the function too much and cause underfitting.** Hence, what would happen if λ=0 or is too small ? --> Overfitting
#
# > มันเป็นการ trade-off ระหว่างจะ underfitting หรือ overfitting ถ้า $\lambda$ ใหญ่จะ under ตรงกันข้ามถ้า $\lambda$ เล็กก็จะ over
# # Regularized Linear Regression
# We can apply regularization to both linear regression and logistic regression. We will approach linear regression first.
#
# ### Gradient Descent
# We will modify our gradient descent function to **separate out $\theta_0$ from the rest of the parameters** because we do not want to penalize $\theta_0$.
#
# $\begin{align*} & \text{Repeat}\ \lbrace \newline & \ \ \ \ \theta_0 := \theta_0 - \alpha\ \frac{1}{m}\ \sum_{i=1}^m (h_\theta(x^{(i)}) - y^{(i)})x_0^{(i)} \newline & \ \ \ \ \theta_j := \theta_j - \alpha\ \left[ \left( \frac{1}{m}\ \sum_{i=1}^m (h_\theta(x^{(i)}) - y^{(i)})x_j^{(i)} \right) + \frac{\lambda}{m}\theta_j \right] &\ \ \ \ \ \ \ \ \ \ j \in \lbrace 1,2...n\rbrace\newline & \rbrace \end{align*}$
#
# (ลอง Prove ด้วย Calculus ดู)
#
# The term $\frac{\lambda}{m}\theta_j$ performs our regularization. With some manipulation our update rule can also be represented as:
#
# $\theta_j := \theta_j(1 - \alpha\frac{\lambda}{m}) - \alpha\frac{1}{m}\sum_{i=1}^m(h_\theta(x^{(i)}) - y^{(i)})x_j^{(i)}$
#
# The first term in the above equation,** $1 - \alpha\frac{\lambda}{m}$ will always be less than 1**. Intuitively you can see it as reducing the value of $\theta_j$ by some amount on every update. Notice that the second term is now exactly the same as it was before.
#
# ### Normal Equation
#
# Now let's approach regularization using the alternate method of the non-iterative normal equation.
#
# To add in regularization, the equation is the same as our original, except that we add another term inside the parentheses:
#
# $\begin{align*}& \theta = \left( X^TX + \lambda \cdot L \right)^{-1} X^Ty \newline& \text{where}\ \ L = \begin{bmatrix} 0 & & & & \newline & 1 & & & \newline & & 1 & & \newline & & & \ddots & \newline & & & & 1 \newline\end{bmatrix} \in \mathbb{R}^{(n+1)x(n+1)}\end{align*}$
#
# L is a matrix with 0 at the top left and 1's down the diagonal, with 0's everywhere else. It should have dimension (n+1)×(n+1). Intuitively, this is the identity matrix (though we are not including x0), multiplied with a single real number $λ$.
#
# Recall that if m < n, then $X^TX$ is non-invertible. However, when we add the term $λ⋅L$, then $X^TX + λ⋅L$ becomes invertible.
# # Regularized Logistic Regression
# We can regularize logistic regression in a similar way that we regularize linear regression. As a result, we can avoid overfitting. The following image shows how the regularized function, displayed by the pink line, is less likely to overfit than the non-regularized function represented by the blue line:
#
# 
#
# ### Cost Function
# Recall that our cost function for logistic regression was:
#
# $J(\theta) = - \frac{1}{m} \sum_{i=1}^m \large[ y^{(i)}\ \log (h_\theta (x^{(i)})) + (1 - y^{(i)})\ \log (1 - h_\theta(x^{(i)})) \large]$
#
# We can regularize this equation by adding a term to the end:
#
# $J(\theta) = - \frac{1}{m} \sum_{i=1}^m \large[ y^{(i)}\ \log (h_\theta (x^{(i)})) + (1 - y^{(i)})\ \log (1 - h_\theta(x^{(i)}))\large] + \frac{\lambda}{2m}\sum_{j=1}^n \theta_j^2$
#
# *Note : Prove หน่อย*
#
# The second sum, $\sum_{j=1}^n \theta_j^2$ **means to explicitly** exclude the bias term, $\theta_0$. I.e. the $\theta$ vector is indexed from 0 to n (holding n+1 values, $\theta_0$ through $\theta_n$), and this sum explicitly skips $\theta_0$, by running from 1 to n, skipping 0. Thus, when computing the equation, we should continuously update the two following equations:
#
# 
#
# ### Prove + Code ส่วนนี้ด้วย
#
# 
# # ====================== CODE =========================
# ข้อมูลชิพที่ผ่าน และไม่ผ่าน คุณภาพการผลิต
# +
import pandas as pd
import matplotlib.pyplot as plt
import scipy.optimize as opt
import numpy as np
data2 = pd.read_csv('programing/machine-learning-ex2/ex2/ex2data2.txt',names=['Test 1','Test 2','Accepted'])
positive = data2[data2['Accepted'].isin([1])]
negative = data2[data2['Accepted'].isin([0])]
fig, ax = plt.subplots(figsize=(8,5))
ax.scatter(positive['Test 1'], positive['Test 2'], s=50, c='b', marker='o', label='Accepted')
ax.scatter(negative['Test 1'], negative['Test 2'], s=50, c='r', marker='x', label='Rejected')
ax.legend()
ax.set_xlabel('Test 1 Score')
ax.set_ylabel('Test 2 Score')
plt.show()
# -
# จากรูป ดูทรงแล้ว Decision Boundary น่าจะเป็นสมการกำลังเลขคู่ (2,4,6) ถ้าลองที่กำลัง 6 จะได้
#
# $$
# \begin{align}
# z = \theta_0 + \theta_1x_1 + \theta_2x_2 + \theta_3x_1^2 + \theta_4x_1x_2 + \theta_5x_2^2 + \theta_6x_1^3 + \theta_7x_1^2x_2 + \theta_8x_1x_2^2 + \theta_9x_2^3 + \theta_{10}x_1^4 + \theta_{11}x_1^3x_2 + \theta_{12}x_1^2x_2^2 + \theta_{13}x_1x_2^3 + \theta_{14}x_2^4 + \theta_{15}x_1^5 + \theta_{16}x_1^4x_2^1 + \theta_{17}x_1^3x_2^2 + \theta_{18}x_1^2x_2^3 + \theta_{19}x_1x_2^4 + \theta_{20}x_2^5 + \theta_{21}x_1^6 + \theta_{22}x_1^5x_2^1 + \theta_{23}x_1^4x_2^2 + \theta_{24}x_1^3x_2^3 + \theta_{25}x_1^2x_2^4 + \theta_{26}x_1x_2^5 + \theta_{27}x_2^6
# \end{align}
# $$
#
# จะเห็นว่าสมการมันไม่ linear อยู่ แปลงให้เป็น linear จะได้
#
# $$
# \begin{align}
# z = \theta_0 + \theta_1x_1 + \theta_2x_2 + \theta_3x_3 + \theta_4x_4 + \theta_5x_5 + \theta_6x_6 + \theta_7x_7 + \theta_8x_8 + \theta_9x_9 + \theta_{10}x_{10} + \theta_{11}x_{11} + \theta_{12}x_{12} + \theta_{13}x_{13} + \theta_{14}x_{14} + \theta_{15}x_{15} + \theta_{16}x_{16} + \theta_{17}x_{17} + \theta_{18}x_{18} + \theta_{19}x_{19} + \theta_{20}x_{20} + \theta_{21}x_{21} + \theta_{22}x_{22} + \theta_{23}x_{23} + \theta_{24}x_{24} + \theta_{25}x_{25} + \theta_{26}x_{26} + \theta_{27}x_{27}
# \end{align}
# $$
#
# ดังนั้นจาก ค่า $x_1,x_2$ ที่เรามีอยู่แล้ว เราต้องหาค่า $x_3 - x_{27}$ เพิ่มด้วย
#
# สร้างฟังก์ชั่นสำหรับแปลง $x_1,x_2$ เป็น $x_1 - x_n$ (จำนวน $n$ ขึ้นกับ degree ของสมการ เช่นที่ degree 6 $n$ คือ 27)
def sigmoid(z):
return 1 / (1 + np.exp(-z))
def mapFeature(degree,x1,x2):
# Returns a new feature array with more features, comprising of X1, X2, X1.^2, X2.^2, X1*X2, X1*X2.^2, etc..
# Inputs X1, X2 must be the same size
df = pd.DataFrame()
df['Ones'] = np.ones(len(x1))
for i in range(1, degree+1):
for j in range(0, i+1):
df['F' + str(i) + str(j)] = np.power(x1, i-j) * np.power(x2, j)
return df
x1 = data2['Test 1']
x2 = data2['Test 2']
features = mapFeature(6,x1,x2)
features.head()
# เมื่อ regularize cost function เป็นแบบนี้
#
# $J(\theta) = - \frac{1}{m} \sum_{i=1}^m \large[ y^{(i)}\ \log (h_\theta (x^{(i)})) + (1 - y^{(i)})\ \log (1 - h_\theta(x^{(i)}))\large] + \frac{\lambda}{2m}\sum_{j=1}^n \theta_j^2$
#
# จะได้
def costReg(theta, X, y, learningRate):
theta = np.matrix(theta)
X = np.matrix(X)
y = np.matrix(y)
first = np.multiply(-y, np.log(sigmoid(X * theta.T)))
second = np.multiply((1 - y), np.log(1 - sigmoid(X * theta.T)))
reg = (learningRate / 2 * len(X)) * np.sum(np.power(theta[:,1:theta.shape[1]], 2))
return np.sum(first - second) / (len(X)) + reg
def gradientReg(theta, X, y, learningRate):
theta = np.matrix(theta)
X = np.matrix(X)
y = np.matrix(y)
parameters = int(theta.ravel().shape[1])
grad = np.zeros(parameters)
error = sigmoid(X * theta.T) - y
for i in range(parameters):
term = np.multiply(error, X[:,i])
if (i == 0):
grad[i] = np.sum(term) / len(X)
else:
grad[i] = (np.sum(term) / len(X)) + ((learningRate / len(X)) * theta[:,i])
return grad
# เตรียม Data ให้ format ถูก ลองใช้ `costReg`
# +
# set X and y
X2 = features.iloc[:,:]
y2 = data2.iloc[:,2:3]
# convert to numpy arrays and initalize the parameter array theta
X2 = np.array(X2.values)
y2 = np.array(y2.values)
theta2 = np.zeros(len(X2[0]))
learningRate = 1
costReg(theta2, X2, y2, learningRate)
# -
# หาพารามิเตอร์ของ Decision Boundary จาก `fmin_tnc`
result2 = opt.fmin_tnc(func=costReg, x0=theta2, fprime=gradientReg, args=(X2, y2, learningRate))
theta_min = result2[0]
# จากพารามิเตอร์ที่ได้มา ลอง plot ดู decision boundary ดังนี้ (ที่ power = 6)
#
# $$
# \begin{align}
# z = \theta_0 + \theta_1x_1 + \theta_2x_2 + \theta_3x_1^2 + \theta_4x_1x_2 + \theta_5x_2^2 + \theta_6x_1^3 + \theta_7x_1^2x_2 + \theta_8x_1x_2^2 + \theta_9x_2^3 + \theta_{10}x_1^4 + \theta_{11}x_1^3x_2 + \theta_{12}x_1^2x_2^2 + \theta_{13}x_1x_2^3 + \theta_{14}x_2^4 + \theta_{15}x_1^5 + \theta_{16}x_1^4x_2^1 + \theta_{17}x_1^3x_2^2 + \theta_{18}x_1^2x_2^3 + \theta_{19}x_1x_2^4 + \theta_{20}x_2^5 + \theta_{21}x_1^6 + \theta_{22}x_1^5x_2^1 + \theta_{23}x_1^4x_2^2 + \theta_{24}x_1^3x_2^3 + \theta_{25}x_1^2x_2^4 + \theta_{26}x_1x_2^5 + \theta_{27}x_2^6
# \end{align}
# $$
#
# ดูจากทรงมันยากที่เราจะวาดเส้น decision boundary จาก การแก้สมการนี้ คิดว่าที่เป็นไปได้ก็คือ แทน x1,x2 ไปเลยในช่วงทั้งหมด แล้วน่าจะเห็นเส้นที่ z = 0 เส้นนั้นแหละคือ decision boundary
def plotDecisionBoundary(theta):
# Here is the grid range
test1 = np.arange(-1,1.5,0.1)
test2 = np.arange(-1,1.5,0.1)
z = np.zeros((len(test1),len(test2)))
# Evaluate z = theta*x over the grid
for t1 in range(len(test1)):
for t2 in range(len(test2)):
z[t1,t2] = mapFeature(6,np.array([test1[t1]]),np.array([test2[t2]]) ).values.dot(theta)[0]
T1, T2 = np.meshgrid(test1, test2)
fig, ax = plt.subplots(figsize=(8,5))
# Data Plot
ax.scatter(positive['Test 1'], positive['Test 2'], s=50, c='b', marker='o', label='Accepted')
ax.scatter(negative['Test 1'], negative['Test 2'], s=50, c='r', marker='x', label='Rejected')
# Decision Boundary
CS = plt.contour(T1, T2, z,0.00000000,colors='y')
ax.legend()
ax.set_xlabel('Test 1 Score')
ax.set_ylabel('Test 2 Score')
plt.show()
plotDecisionBoundary(theta_min)
# สรุปคือ ที่ lambda = 1 decision boundary จะเป็นดังรูปข้างบน
# ## Predict
# เมื่อได้ parameter ของ decision boundary มาแล้ว นำมาทดลองทำนายผลว่าจะเป็น 0 หรือ 1 ดังนี้
#
# $
# h_{\theta}(x) = g(z) = \frac{1}{1 + e^{-z}}
# $
#
# ถ้า $z>0$ จะได้ว่า $g(z)$ ลู่เข้า 1 ตรงกันข้าม ถ้า $z<0$ จะได้ว่า $g(z)$ ลู่เข้า 0
def predict(theta, X):
z = X.dot(theta.T)
predict = (z>=0)
return predict
theta_min = np.matrix(result2[0])
predictions = predict(theta_min, X2)
correct = (y2 == predictions)
accuracy = sum(correct)[0,0]%len(correct)
print('accuracy = {0}%'.format(accuracy))
# จากโปรแกรมข้างบนเราทดลองที่ lambda = 1 อย่างเดียว อยากรู้ว่า decision boundary จะเป็นอย่างไรเมื่อ lambda เป็นค่าอื่นๆ
#
# ก่อนอื่นเราฟังชั่นเก่ามาโมก่อน
def plotDecisionBoundaryVaryLambda(X,y,lamb):
theta2 = np.zeros(len(X[0]))
result2 = opt.fmin_tnc(func=costReg, x0=theta2, fprime=gradientReg, args=(X, y, lamb))
theta_min = result2[0]
# Here is the grid range
test1 = np.arange(-1,1.5,0.1)
test2 = np.arange(-1,1.5,0.1)
z = np.zeros((len(test1),len(test2)))
# Evaluate z = theta*x over the grid
for t1 in range(len(test1)):
for t2 in range(len(test2)):
z[t1,t2] = mapFeature(6,np.array([test1[t1]]),np.array([test2[t2]]) ).values.dot(theta_min)[0]
T1, T2 = np.meshgrid(test1, test2)
fig, ax = plt.subplots(figsize=(8,5))
# Data Plot
ax.scatter(positive['Test 1'], positive['Test 2'], s=50, c='b', marker='o', label='Accepted')
ax.scatter(negative['Test 1'], negative['Test 2'], s=50, c='r', marker='x', label='Rejected')
# Decision Boundary
CS = plt.contour(T1, T2, z,0.00000000,colors='y')
ax.legend()
ax.set_xlabel('Test 1 Score')
ax.set_ylabel('Test 2 Score')
plt.show()
# ที่ lambda = 0
plotDecisionBoundaryVaryLambda(X2,y2,0)
# จะเห็นว่า Overfitting หน่อยๆ
#
# ที่ lambda = 100
plotDecisionBoundaryVaryLambda(X2,y2,100)
# จะเห็นว่า Underfitting มากๆ
| 6 Regularization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# # Recurrent Neural Nets
#
# This notebook explores RNN for flow prediction
import datetime
import calendar
import time
import numpy as np
import pandas as pd
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Embedding, SimpleRNN
# # Load and prepare data
project_folder = '../../datasets/thorium-medium/'
flow = pd.read_csv(project_folder + 'flow1.csv', parse_dates=['time'])
flow = flow.set_index('time')['flow'].fillna(0)
flow = flow.resample('5T').pad()
rainfall = pd.read_csv(project_folder + 'rainfall1.csv', parse_dates=['time'])
rainfall = rainfall.set_index('time')['rainfall'].fillna(0)
rainfall = rainfall.resample('5T').pad()
flow_rain = pd.concat([flow, rainfall], axis=1).dropna()
print(flow_rain.head())
print(flow_rain.tail())
# Replace DateTime with some features
flow_rain['day'] = flow_rain.index.map(lambda x : x.dayofyear)
flow_rain['minute'] = flow_rain.index.map(lambda x : x.hour*60 + x.minute)
input_data = flow_rain[['day', 'minute', 'flow', 'rainfall']]
input_data.head()
# # Extract features
#
# Keras RNN model as an input requires tensor with the following dimensions:
# ```
# (batch_size, #steps, #features)
# ```
# For this notebook we will use the following parameters
input_steps = 3*12 # 3 hours
output_steps = 24*12 # 24 hours
feature_count = 2 # time of the day and flow
# ## Create dataset
#
# Convert input_data into input required by Keras and target value tensor
batch_size = input_data.shape[0] - input_steps
# We will use only 2 input features
a = input_data[['minute', 'flow']].values
X = np.array([a[i:i+input_steps, :] for i in range(batch_size)])
y = input_data.flow.values[input_steps:]
print(X.shape)
print(y.shape)
# # Create Keras model
| notebooks/tensorflow/RNN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Useful additional packages
import matplotlib.pyplot as plt
# %matplotlib inline
import numpy as np
from math import pi
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister
from qiskit import available_backends, execute, register, get_backend
from qiskit.tools.visualization import circuit_drawer
from qiskit.tools.qi.qi import state_fidelity
from qiskit import IBMQ
from qiskit import Aer
from qiskit.wrapper.jupyter import *
# import state tomography functions
from qiskit.tools.visualization import plot_histogram, plot_state
QX_TOKEN = ""
QX_URL = "https://quantumexperience.ng.bluemix.net/api"
IBMQ.enable_account(QX_TOKEN, QX_URL)
IBMQ.backends()
backend5q = IBMQ.get_backend('ibmqx4')
# +
# Test cSWAP only
# Create a Quantum Register with 3 qubit
q = QuantumRegister(3)
c = ClassicalRegister(3)
# Create Quantum circuits for Testing simple QF
qcs = QuantumCircuit(q,c)
# Prepare initial state
qcs.h(q[0])
qcs.x(q[1])
qcs.cswap(q[0],q[1],q[2])
# Measure
qcs.measure(q[0], c[0])
qcs.measure(q[1], c[1])
qcs.measure(q[2], c[2])
circuit_drawer(qcs)
# -
test_job=execute(qcs,backend5q,shots=8192,max_credits=5)
test_job.status()
test_result=test_job.result()
plot_histogram(test_result.get_counts(qcs))
def qf1_rtest(axis,theta):
# Create a Quantum Register with 3 qubit
q = QuantumRegister(3)
c = ClassicalRegister(1)
# Create Quantum circuits for Testing simple QF
q3 = QuantumCircuit(q,c)
# Prepare initial state
q3.h(q[0])
if axis == 'x':
q3.rx(theta,q[1])
elif axis == 'y':
q3.ry(theta,q[1])
elif axis == 'z':
q3.rz(theta,q[1])
q3.barrier()
q3.cswap(q[0],q[1],q[2])
q3.barrier()
q3.iden(q[1])
q3.h(q[2])
q3.barrier()
q3.cswap(q[0],q[1],q[2])
# Measure
q3.measure(q[1], c[0])
return q3
t=np.arange(0,2*pi+pi/8,pi/8)
print(t)
QF_test = qf1_rtest('y',t[0])
#Draw quantum circuit
circuit_drawer(QF_test)
ry_0=qf1_rtest('y',t[0])
ry_1=qf1_rtest('y',t[1])
ry_2=qf1_rtest('y',t[2])
ry_3=qf1_rtest('y',t[3])
ry_4=qf1_rtest('y',t[4])
ry_5=qf1_rtest('y',t[5])
ry_6=qf1_rtest('y',t[6])
ry_7=qf1_rtest('y',t[7])
ry_8=qf1_rtest('y',t[8])
ry_9=qf1_rtest('y',t[9])
ry_10=qf1_rtest('y',t[10])
ry_11=qf1_rtest('y',t[11])
ry_12=qf1_rtest('y',t[12])
ry_13=qf1_rtest('y',t[13])
ry_14=qf1_rtest('y',t[14])
ry_15=qf1_rtest('y',t[15])
ry_16=qf1_rtest('y',t[16])
rtest_y0=execute(ry_0,backend5q,shots=8192)
rtest_y1=execute(ry_1,backend5q,shots=8192)
rtest_y2=execute(ry_2,backend5q,shots=8192)
rtest_y15.status()
y5=rtest_y5.result().get_counts(ry_5)
print(y5)
rtest_y3=execute(ry_3,backend5q,shots=8192)
rtest_y4=execute(ry_4,backend5q,shots=8192)
rtest_y5=execute(ry_5,backend5q,shots=8192)
rtest_y6=execute(ry_6,backend5q,shots=8192)
rtest_y7=execute(ry_7,backend5q,shots=8192)
rtest_y8=execute(ry_8,backend5q,shots=8192)
rtest_y9=execute(ry_9,backend5q,shots=8192)
rtest_y10=execute(ry_10,backend5q,shots=8192)
rtest_y11=execute(ry_11,backend5q,shots=8192)
rtest_y12=execute(ry_12,backend5q,shots=8192)
rtest_y13=execute(ry_13,backend5q,shots=8192)
rtest_y14=execute(ry_14,backend5q,shots=8192)
rtest_y15=execute(ry_15,backend5q,shots=8192)
rtest_y16=execute(ry_16,backend5q,shots=8192)
y0=rtest_y0.result().get_counts(ry_0)
y1=rtest_y1.result().get_counts(ry_1)
y2=rtest_y2.result().get_counts(ry_2)
y3=rtest_y3.result().get_counts(ry_3)
y4=rtest_y4.result().get_counts(ry_4)
y5=rtest_y5.result().get_counts(ry_5)
y6=rtest_y6.result().get_counts(ry_6)
y7=rtest_y7.result().get_counts(ry_7)
y8=rtest_y8.result().get_counts(ry_8)
y9=rtest_y9.result().get_counts(ry_9)
y10=rtest_y10.result().get_counts(ry_10)
y11=rtest_y11.result().get_counts(ry_11)
y12=rtest_y12.result().get_counts(ry_12)
y13=rtest_y13.result().get_counts(ry_13)
y14=rtest_y14.result().get_counts(ry_14)
y15=rtest_y15.result().get_counts(ry_15)
y16=rtest_y16.result().get_counts(ry_16)
RyMxz=np.array([y0['0']-y0['1'],y1['0']-y1['1'],y2['0']-y2['1'],y3['0']-y3['1'],y4['0']-y4['1'],y5['0']-y5['1'],
y6['0']-y6['1'],y7['0']-y7['1'],y8['0']-y8['1'],y9['0']-y9['1'],y10['0']-y10['1'],y11['0']-y11['1'],
y12['0']-y12['1'],y13['0']-y13['1'],y14['0']-y14['1'],y15['0']-y15['1'],y16['0']-y16['1']])
x=np.arange(0,2*pi+0.0001,pi/8)
RyMxz_th=(np.cos(x)+np.sin(x))/2
RxMxz_th=np.cos(x)/2
rx_0=qf1_rtest('x',t[0])
rx_1=qf1_rtest('x',t[1])
rx_2=qf1_rtest('x',t[2])
rx_3=qf1_rtest('x',t[3])
rx_4=qf1_rtest('x',t[4])
rx_5=qf1_rtest('x',t[5])
rx_6=qf1_rtest('x',t[6])
rx_7=qf1_rtest('x',t[7])
rx_8=qf1_rtest('x',t[8])
rx_9=qf1_rtest('x',t[9])
rx_10=qf1_rtest('x',t[10])
rx_11=qf1_rtest('x',t[11])
rx_12=qf1_rtest('x',t[12])
rx_13=qf1_rtest('x',t[13])
rx_14=qf1_rtest('x',t[14])
rx_15=qf1_rtest('x',t[15])
rx_16=qf1_rtest('x',t[16])
#Draw quantum circuit
circuit_drawer(rx_10)
rtest_x0=execute(rx_0,backend5q,shots=8192)
rtest_x1=execute(rx_1,backend5q,shots=8192)
rtest_x2=execute(rx_2,backend5q,shots=8192)
rtest_x3=execute(rx_3,backend5q,shots=8192)
rtest_x4=execute(rx_4,backend5q,shots=8192)
rtest_x5=execute(rx_5,backend5q,shots=8192)
rtest_x6=execute(rx_6,backend5q,shots=8192)
rtest_x7=execute(rx_7,backend5q,shots=8192)
rtest_x8=execute(rx_8,backend5q,shots=8192)
rtest_x9=execute(rx_9,backend5q,shots=8192)
rtest_x10=execute(rx_10,backend5q,shots=8192)
rtest_x11=execute(rx_11,backend5q,shots=8192)
rtest_x12=execute(rx_12,backend5q,shots=8192)
rtest_x13=execute(rx_13,backend5q,shots=8192)
rtest_x14=execute(rx_14,backend5q,shots=8192)
rtest_x15=execute(rx_15,backend5q,shots=8192)
rtest_x16=execute(rx_16,backend5q,shots=8192)
rtest_x16.status()
x0=rtest_x0.result().get_counts(rx_0)
x1=rtest_x1.result().get_counts(rx_1)
x2=rtest_x2.result().get_counts(rx_2)
x3=rtest_x3.result().get_counts(rx_3)
x4=rtest_x4.result().get_counts(rx_4)
x5=rtest_x5.result().get_counts(rx_5)
x6=rtest_x6.result().get_counts(rx_6)
x7=rtest_x7.result().get_counts(rx_7)
x8=rtest_x8.result().get_counts(rx_8)
x9=rtest_x9.result().get_counts(rx_9)
x10=rtest_x10.result().get_counts(rx_10)
x11=rtest_x11.result().get_counts(rx_11)
x12=rtest_x12.result().get_counts(rx_12)
x13=rtest_x13.result().get_counts(rx_13)
x14=rtest_x14.result().get_counts(rx_14)
x15=rtest_x15.result().get_counts(rx_15)
x16=rtest_x16.result().get_counts(rx_16)
RxMxz=np.array([x0['0']-x0['1'],x1['0']-x1['1'],x2['0']-x2['1'],x3['0']-x3['1'],x4['0']-x4['1'],x5['0']-x5['1'],
x6['0']-x6['1'],x7['0']-x7['1'],x8['0']-x8['1'],x9['0']-x9['1'],x10['0']-x10['1'],x11['0']-x11['1'],
x12['0']-x12['1'],x13['0']-x13['1'],x14['0']-x14['1'],x15['0']-x15['1'],x16['0']-x16['1']])
plt.plot(x,2*RyMxz/8192,'--b',x,RyMxz_th,'b',x,2*RxMxz/8192,'--g',x,RxMxz_th,'g')
plt.show()
rz_0=qf1_rtest('z',t[0])
rz_1=qf1_rtest('z',t[1])
rz_2=qf1_rtest('z',t[2])
rz_3=qf1_rtest('z',t[3])
rz_4=qf1_rtest('z',t[4])
rz_5=qf1_rtest('z',t[5])
rz_6=qf1_rtest('z',t[6])
rz_7=qf1_rtest('z',t[7])
rz_8=qf1_rtest('z',t[8])
rz_9=qf1_rtest('z',t[9])
rz_10=qf1_rtest('z',t[10])
rz_11=qf1_rtest('z',t[11])
rz_12=qf1_rtest('z',t[12])
rz_13=qf1_rtest('z',t[13])
rz_14=qf1_rtest('z',t[14])
rz_15=qf1_rtest('z',t[15])
rz_16=qf1_rtest('z',t[16])
#Draw quantum circuit
circuit_drawer(rz_12)
rtest_z0=execute(rz_0,backend5q,shots=8192)
rtest_z1=execute(rz_1,backend5q,shots=8192)
rtest_z2=execute(rz_2,backend5q,shots=8192)
rtest_z15.status()
rtest_z3=execute(rz_3,backend5q,shots=8192)
rtest_z4=execute(rz_4,backend5q,shots=8192)
rtest_z5=execute(rz_5,backend5q,shots=8192)
rtest_z6=execute(rz_6,backend5q,shots=8192)
rtest_z7=execute(rz_7,backend5q,shots=8192)
rtest_z8=execute(rz_8,backend5q,shots=8192)
rtest_z9=execute(rz_9,backend5q,shots=8192)
rtest_z10=execute(rz_10,backend5q,shots=8192)
rtest_z11=execute(rz_11,backend5q,shots=8192)
rtest_z12=execute(rz_12,backend5q,shots=8192)
rtest_z13=execute(rz_13,backend5q,shots=8192)
rtest_z14=execute(rz_14,backend5q,shots=8192)
rtest_z15=execute(rz_15,backend5q,shots=8192)
rtest_z16=execute(rz_16,backend5q,shots=8192)
z0=rtest_z0.result().get_counts(rz_0)
z1=rtest_z1.result().get_counts(rz_1)
z2=rtest_z2.result().get_counts(rz_2)
z3=rtest_z3.result().get_counts(rz_3)
z4=rtest_z4.result().get_counts(rz_4)
z5=rtest_z5.result().get_counts(rz_5)
z6=rtest_z6.result().get_counts(rz_6)
z7=rtest_z7.result().get_counts(rz_7)
z8=rtest_z8.result().get_counts(rz_8)
z9=rtest_z9.result().get_counts(rz_9)
z10=rtest_z10.result().get_counts(rz_10)
z11=rtest_z11.result().get_counts(rz_11)
z12=rtest_z12.result().get_counts(rz_12)
z13=rtest_z13.result().get_counts(rz_13)
z14=rtest_z14.result().get_counts(rz_14)
z15=rtest_z15.result().get_counts(rz_15)
z16=rtest_z16.result().get_counts(rz_16)
RzMxz=np.array([z0['0']-z0['1'],z1['0']-z1['1'],z2['0']-z2['1'],z3['0']-z3['1'],z4['0']-z4['1'],z5['0']-z5['1'],
z6['0']-z6['1'],z7['0']-z7['1'],z8['0']-z8['1'],z9['0']-z9['1'],z10['0']-z10['1'],z11['0']-z11['1'],
z12['0']-z12['1'],z13['0']-z13['1'],z14['0']-z14['1'],z15['0']-z15['1'],z16['0']-z16['1']])
plt.plot(x,RyMxz/8192,'--b',x,RyMxz_th,'b',x,RxMxz/8192,'--g',x,RxMxz_th,'g',x,RzMxz/8192,'--r',x,np.ones(len(x))/2,'r')
plt.show()
# Now run $\theta$ backwards, y-rotation
rtest_y16_2=execute(ry_16,backend5q,shots=8192)
rtest_y14_2=execute(ry_14,backend5q,shots=8192)
rtest_y13_2=execute(ry_13,backend5q,shots=8192)
rtest_y0_2.status()
rtest_y0_2.result().get_counts(ry_0)
rtest_y15_2=execute(ry_15,backend5q,shots=8192)
rtest_y12_2=execute(ry_12,backend5q,shots=8192)
rtest_y11_2=execute(ry_11,backend5q,shots=8192)
rtest_y10_2=execute(ry_10,backend5q,shots=8192)
rtest_y9_2=execute(ry_9,backend5q,shots=8192)
rtest_y8_2=execute(ry_8,backend5q,shots=8192)
rtest_y7_2=execute(ry_7,backend5q,shots=8192)
rtest_y6_2=execute(ry_6,backend5q,shots=8192)
rtest_y5_2=execute(ry_5,backend5q,shots=8192)
rtest_y4_2=execute(ry_4,backend5q,shots=8192)
rtest_y3_2=execute(ry_3,backend5q,shots=8192)
rtest_y2_2=execute(ry_2,backend5q,shots=8192)
rtest_y1_2=execute(ry_1,backend5q,shots=8192)
rtest_y0_2=execute(ry_0,backend5q,shots=8192)
y02=rtest_y0_2.result().get_counts(ry_0)
y12=rtest_y1_2.result().get_counts(ry_1)
y22=rtest_y2_2.result().get_counts(ry_2)
y32=rtest_y3_2.result().get_counts(ry_3)
y42=rtest_y4_2.result().get_counts(ry_4)
y52=rtest_y5_2.result().get_counts(ry_5)
y62=rtest_y6_2.result().get_counts(ry_6)
y72=rtest_y7_2.result().get_counts(ry_7)
y82=rtest_y8_2.result().get_counts(ry_8)
y92=rtest_y9_2.result().get_counts(ry_9)
y102=rtest_y10_2.result().get_counts(ry_10)
y112=rtest_y11_2.result().get_counts(ry_11)
y122=rtest_y12_2.result().get_counts(ry_12)
y132=rtest_y13_2.result().get_counts(ry_13)
y142=rtest_y14_2.result().get_counts(ry_14)
y152=rtest_y15_2.result().get_counts(ry_15)
y162=rtest_y16_2.result().get_counts(ry_16)
RyMxz2=np.array([y02['0']-y02['1'],y12['0']-y12['1'],y22['0']-y22['1'],y32['0']-y32['1'],y42['0']-y42['1'],
y52['0']-y52['1'],y62['0']-y62['1'],y72['0']-y72['1'],y82['0']-y82['1'],y92['0']-y92['1'],
y102['0']-y102['1'],y112['0']-y112['1'],y122['0']-y122['1'],y132['0']-y132['1'],y142['0']-y142['1'],
y152['0']-y152['1'],y162['0']-y162['1']])
# Compare two y-axis rotation experimental results
plt.plot(x,RyMxz/8192,'--b',x,RyMxz2/8192,'--g')
plt.show()
# Run $\theta$ backwards, x-rotation
rtest_x16_2=execute(rx_16,backend5q,shots=8192)
rtest_x15_2=execute(rx_15,backend5q,shots=8192)
rtest_x14_2=execute(rx_14,backend5q,shots=8192)
rtest_x0_2.status()
rtest_x0_2.result().get_counts(rx_0)
rtest_x13_2=execute(rx_13,backend5q,shots=8192)
rtest_x12_2=execute(rx_12,backend5q,shots=8192)
rtest_x11_2=execute(rx_11,backend5q,shots=8192)
rtest_x10_2=execute(rx_10,backend5q,shots=8192)
rtest_x9_2=execute(rx_9,backend5q,shots=8192)
rtest_x8_2=execute(rx_8,backend5q,shots=8192)
rtest_x7_2=execute(rx_7,backend5q,shots=8192)
rtest_x6_2=execute(rx_6,backend5q,shots=8192)
rtest_x5_2=execute(rx_5,backend5q,shots=8192)
rtest_x5_2=execute(rx_5,backend5q,shots=8192)
rtest_x3_2=execute(rx_3,backend5q,shots=8192)
rtest_x4_2=execute(rx_4,backend5q,shots=8192)
rtest_x2_2=execute(rx_2,backend5q,shots=8192)
rtest_x1_2=execute(rx_1,backend5q,shots=8192)
rtest_x0_2=execute(rx_0,backend5q,shots=8192)
x02=rtest_x0_2.result().get_counts(rx_0)
x12=rtest_x1_2.result().get_counts(rx_1)
x22=rtest_x2_2.result().get_counts(rx_2)
x32=rtest_x3_2.result().get_counts(rx_3)
x42=rtest_x4_2.result().get_counts(rx_4)
x52=rtest_x5_2.result().get_counts(rx_5)
x62=rtest_x6_2.result().get_counts(rx_6)
x72=rtest_x7_2.result().get_counts(rx_7)
x82=rtest_x8_2.result().get_counts(rx_8)
x92=rtest_x9_2.result().get_counts(rx_9)
x102=rtest_x10_2.result().get_counts(rx_10)
x112=rtest_x11_2.result().get_counts(rx_11)
x122=rtest_x12_2.result().get_counts(rx_12)
x132=rtest_x13_2.result().get_counts(rx_13)
x142=rtest_x14_2.result().get_counts(rx_14)
x152=rtest_x15_2.result().get_counts(rx_15)
x162=rtest_x16_2.result().get_counts(rx_16)
RxMxz2=np.array([x02['0']-x02['1'],x12['0']-x12['1'],x22['0']-x22['1'],x32['0']-x32['1'],x42['0']-x42['1'],
x52['0']-x52['1'],x62['0']-x62['1'],x72['0']-x72['1'],x82['0']-x82['1'],x92['0']-x92['1'],
x102['0']-x102['1'],x112['0']-x112['1'],x122['0']-x122['1'],x132['0']-x132['1'],x142['0']-x142['1'],
x152['0']-x152['1'],x162['0']-x162['1']])
# Compare two x-axis rotation results
plt.plot(x,RxMxz/8192,'--b',x,RxMxz2/8192,'--g')
plt.show()
# Run $\theta$ backwards, z-axis rotation
rtest_z16_2=execute(rz_16,backend5q,shots=8192)
rtest_z15_2=execute(rz_15,backend5q,shots=8192)
rtest_z14_2=execute(rz_14,backend5q,shots=8192)
rtest_z1_2.status()
rtest_z2_2.result().get_counts(rz_2)
rtest_z13_2=execute(rz_13,backend5q,shots=8192)
rtest_z12_2=execute(rz_12,backend5q,shots=8192)
rtest_z11_2=execute(rz_11,backend5q,shots=8192)
rtest_z10_2=execute(rz_10,backend5q,shots=8192)
rtest_z9_2=execute(rz_9,backend5q,shots=8192)
rtest_z8_2=execute(rz_8,backend5q,shots=8192)
rtest_z7_2=execute(rz_7,backend5q,shots=8192)
rtest_z6_2=execute(rz_6,backend5q,shots=8192)
rtest_z5_2=execute(rz_5,backend5q,shots=8192)
rtest_z4_2=execute(rz_4,backend5q,shots=8192)
rtest_z3_2=execute(rz_3,backend5q,shots=8192)
rtest_z2_2=execute(rz_2,backend5q,shots=8192)
rtest_z1_2=execute(rz_1,backend5q,shots=8192)
rtest_z0_2=execute(rz_0,backend5q,shots=8192)
z02=rtest_z0_2.result().get_counts(rz_0)
z12=rtest_z1_2.result().get_counts(rz_1)
z22=rtest_z2_2.result().get_counts(rz_2)
z32=rtest_z3_2.result().get_counts(rz_3)
z42=rtest_z4_2.result().get_counts(rz_4)
z52=rtest_z5_2.result().get_counts(rz_5)
z62=rtest_z6_2.result().get_counts(rz_6)
z72=rtest_z7_2.result().get_counts(rz_7)
z82=rtest_z8_2.result().get_counts(rz_8)
z92=rtest_z9_2.result().get_counts(rz_9)
z102=rtest_z10_2.result().get_counts(rz_10)
z112=rtest_z11_2.result().get_counts(rz_11)
z122=rtest_z12_2.result().get_counts(rz_12)
z132=rtest_z13_2.result().get_counts(rz_13)
z142=rtest_z14_2.result().get_counts(rz_14)
z152=rtest_z15_2.result().get_counts(rz_15)
z162=rtest_z16_2.result().get_counts(rz_16)
RzMxz2=np.array([z02['0']-z02['1'],z12['0']-z12['1'],z22['0']-z22['1'],z32['0']-z32['1'],z42['0']-z42['1'],
z52['0']-z52['1'],z62['0']-z62['1'],z72['0']-z72['1'],z82['0']-z82['1'],z92['0']-z92['1'],
z102['0']-z102['1'],z112['0']-z112['1'],z122['0']-z122['1'],z132['0']-z132['1'],z142['0']-z142['1'],
z152['0']-z152['1'],z162['0']-z162['1']])
plt.plot(x,RzMxz/8192,'--r',x,RzMxz2/8192,'--b')
plt.show()
plt.plot(x,RxMxz_th,'r',x,np.ones(len(x))/2,'b',x,RyMxz_th,'g')
plt.show()
# Experemtan results (averaged)
plt.plot(x,(RxMxz+RxMxz2)/2/8192,'--r',x,(RzMxz+RzMxz2)/2/8192,'--b',x,(RyMxz+RyMxz2)/2/8192,'--g')
plt.show()
plt.plot(x,(RxMxz+RxMxz2)/2/8192,'--r',x,(RzMxz+RzMxz2)/2/8192,'--b',x,(RyMxz+RyMxz2)/2/8192,'--g')
plt.show()
RxMxz
RzMxz
RyMxz
# Now run $\theta$ in random order: 3, 11, 8, 1, 14, 13, 16, 9, 2, 5, 4, 12, 10, 6, 7, 15, 0
rtest_x3r=execute(rx_3,backend5q,shots=8192)
rtest_y3r=execute(ry_3,backend5q,shots=8192)
rtest_z3r=execute(rz_3,backend5q,shots=8192)
rtest_y16r.status()
rtest_y16r.result().get_counts(ry_16)
rtest_x11r=execute(rx_11,backend5q,shots=8192)
rtest_y11r=execute(ry_11,backend5q,shots=8192)
rtest_z11r=execute(rz_11,backend5q,shots=8192)
rtest_x8r=execute(rx_8,backend5q,shots=8192)
rtest_y8r=execute(ry_8,backend5q,shots=8192)
rtest_z8r=execute(rz_8,backend5q,shots=8192)
rtest_x1r=execute(rx_1,backend5q,shots=8192)
rtest_y1r=execute(ry_1,backend5q,shots=8192)
rtest_z1r=execute(rz_1,backend5q,shots=8192)
rtest_x14r=execute(rx_14,backend5q,shots=8192)
rtest_y14r=execute(ry_14,backend5q,shots=8192)
rtest_z14r=execute(rz_14,backend5q,shots=8192)
rtest_x13r=execute(rx_13,backend5q,shots=8192)
rtest_y13r=execute(ry_13,backend5q,shots=8192)
rtest_z13r=execute(rz_13,backend5q,shots=8192)
rtest_x16r=execute(rx_16,backend5q,shots=8192)
rtest_y16r=execute(ry_16,backend5q,shots=8192)
rtest_z16r=execute(rz_16,backend5q,shots=8192)
rtest_x9r=execute(rx_9,backend5q,shots=8192)
rtest_y9r=execute(ry_9,backend5q,shots=8192)
rtest_z9r=execute(rz_9,backend5q,shots=8192)
rtest_x2r=execute(rx_2,backend5q,shots=8192)
rtest_y2r=execute(ry_2,backend5q,shots=8192)
rtest_z2r=execute(rz_2,backend5q,shots=8192)
rtest_x5r=execute(rx_5,backend5q,shots=8192)
rtest_y5r=execute(ry_5,backend5q,shots=8192)
rtest_z5r=execute(rz_5,backend5q,shots=8192)
rtest_x4r=execute(rx_4,backend5q,shots=8192)
rtest_y4r=execute(ry_4,backend5q,shots=8192)
rtest_z4r=execute(rz_4,backend5q,shots=8192)
rtest_x12r=execute(rx_12,backend5q,shots=8192)
rtest_y12r=execute(ry_12,backend5q,shots=8192)
rtest_z12r=execute(rz_12,backend5q,shots=8192)
rtest_x10r=execute(rx_10,backend5q,shots=8192)
rtest_y10r=execute(ry_10,backend5q,shots=8192)
rtest_z10r=execute(rz_10,backend5q,shots=8192)
rtest_x6r=execute(rx_6,backend5q,shots=8192)
rtest_y6r=execute(ry_6,backend5q,shots=8192)
rtest_z6r=execute(rz_6,backend5q,shots=8192)
rtest_x7r=execute(rx_7,backend5q,shots=8192)
rtest_y7r=execute(ry_7,backend5q,shots=8192)
rtest_z7r=execute(rz_7,backend5q,shots=8192)
rtest_x15r=execute(rx_15,backend5q,shots=8192)
rtest_y15r=execute(ry_15,backend5q,shots=8192)
rtest_z15r=execute(rz_15,backend5q,shots=8192)
rtest_x0r=execute(rx_0,backend5q,shots=8192)
rtest_y0r=execute(ry_0,backend5q,shots=8192)
rtest_z0r=execute(rz_0,backend5q,shots=8192)
x0r=rtest_x0r.result().get_counts(rx_0)
x1r=rtest_x1r.result().get_counts(rx_1)
x2r=rtest_x2r.result().get_counts(rx_2)
x3r=rtest_x3r.result().get_counts(rx_3)
x4r=rtest_x4r.result().get_counts(rx_4)
x5r=rtest_x5r.result().get_counts(rx_5)
x6r=rtest_x6r.result().get_counts(rx_6)
x7r=rtest_x7r.result().get_counts(rx_7)
x8r=rtest_x8r.result().get_counts(rx_8)
x9r=rtest_x9r.result().get_counts(rx_9)
x10r=rtest_x10r.result().get_counts(rx_10)
x11r=rtest_x11r.result().get_counts(rx_11)
x12r=rtest_x12r.result().get_counts(rx_12)
x13r=rtest_x13r.result().get_counts(rx_13)
x14r=rtest_x14r.result().get_counts(rx_14)
x15r=rtest_x15r.result().get_counts(rx_15)
x16r=rtest_x16r.result().get_counts(rx_16)
y0r=rtest_y0r.result().get_counts(ry_0)
y1r=rtest_y1r.result().get_counts(ry_1)
y2r=rtest_y2r.result().get_counts(ry_2)
y3r=rtest_y3r.result().get_counts(ry_3)
y4r=rtest_y4r.result().get_counts(ry_4)
y5r=rtest_y5r.result().get_counts(ry_5)
y6r=rtest_y6r.result().get_counts(ry_6)
y7r=rtest_y7r.result().get_counts(ry_7)
y8r=rtest_y8r.result().get_counts(ry_8)
y9r=rtest_y9r.result().get_counts(ry_9)
y10r=rtest_y10r.result().get_counts(ry_10)
y11r=rtest_y11r.result().get_counts(ry_11)
y12r=rtest_y12r.result().get_counts(ry_12)
y13r=rtest_y13r.result().get_counts(ry_13)
y14r=rtest_y14r.result().get_counts(ry_14)
y15r=rtest_y15r.result().get_counts(ry_15)
y16r=rtest_y16r.result().get_counts(ry_16)
z0r=rtest_z0r.result().get_counts(rz_0)
z1r=rtest_z1r.result().get_counts(rz_1)
z2r=rtest_z2r.result().get_counts(rz_2)
z3r=rtest_z3r.result().get_counts(rz_3)
z4r=rtest_z4r.result().get_counts(rz_4)
z5r=rtest_z5r.result().get_counts(rz_5)
z6r=rtest_z6r.result().get_counts(rz_6)
z7r=rtest_z7r.result().get_counts(rz_7)
z8r=rtest_z8r.result().get_counts(rz_8)
z9r=rtest_z9r.result().get_counts(rz_9)
z10r=rtest_z10r.result().get_counts(rz_10)
z11r=rtest_z11r.result().get_counts(rz_11)
z12r=rtest_z12r.result().get_counts(rz_12)
z13r=rtest_z13r.result().get_counts(rz_13)
z14r=rtest_z14r.result().get_counts(rz_14)
z15r=rtest_z15r.result().get_counts(rz_15)
z16r=rtest_z16r.result().get_counts(rz_16)
RxMxzr=np.array([x0r['0']-x0r['1'],x1r['0']-x1r['1'],x2r['0']-x2r['1'],x3r['0']-x3r['1'],x4r['0']-x4r['1'],
x5r['0']-x5r['1'],x6r['0']-x6r['1'],x7r['0']-x7r['1'],x8r['0']-x8r['1'],x9r['0']-x9r['1'],
x10r['0']-x10r['1'],x11r['0']-x11r['1'],x12r['0']-x12r['1'],x13r['0']-x13r['1'],x14r['0']-x14r['1'],
x15r['0']-x15r['1'],x16r['0']-x16r['1']])
RyMxzr=np.array([y0r['0']-y0r['1'],y1r['0']-y1r['1'],y2r['0']-y2r['1'],y3r['0']-y3r['1'],y4r['0']-y4r['1'],
y5r['0']-y5r['1'],y6r['0']-y6r['1'],y7r['0']-y7r['1'],y8r['0']-y8r['1'],y9r['0']-y9r['1'],
y10r['0']-y10r['1'],y11r['0']-y11r['1'],y12r['0']-y12r['1'],y13r['0']-y13r['1'],y14r['0']-y14r['1'],
y15r['0']-y15r['1'],y16r['0']-y16r['1']])
RzMxzr=np.array([z0r['0']-z0r['1'],z1r['0']-z1r['1'],z2r['0']-z2r['1'],z3r['0']-z3r['1'],z4r['0']-z4r['1'],
z5r['0']-z5r['1'],z6r['0']-z6r['1'],z7r['0']-z7r['1'],z8r['0']-z8r['1'],z9r['0']-z9r['1'],
z10r['0']-z10r['1'],z11r['0']-z11r['1'],z12r['0']-z12r['1'],z13r['0']-z13r['1'],z14r['0']-z14r['1'],
z15r['0']-z15r['1'],z16r['0']-z16r['1']])
plt.plot(x,(RxMxz+RxMxz2+RxMxzr)/3/8192,'--r',x,(RzMxz+RzMxz2+RzMxzr)/3/8192,'--b',x,(RyMxz+RyMxz2+RyMxzr)/3/8192,'--g')
plt.show()
| QForking_NJP_IBMQ.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] colab_type="text" id="view-in-github"
# <a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content-dl/blob/main/tutorials/W1D5_Regularization/W1D5_Tutorial1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# -
# # Tutorial 1: Regularization techniques part 1
# **Week 1, Day 5: Regularization**
#
# **By Neuromatch Academy**
#
# __Content creators:__ <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
#
# __Content reviewers:__ <NAME>, <NAME>, <NAME>
#
# __Content editors:__ <NAME>, <NAME>
#
# __Production editors:__ <NAME>, <NAME>
# **Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs**
#
# <p align='center'><img src='https://github.com/NeuromatchAcademy/widgets/blob/master/sponsors.png?raw=True'/></p>
# ---
# # Tutorial Objectives
#
# 1. Big ANNs are efficient universal approximators due to their adaptive basis functions
# 2. ANNs memorize some but generalize well
# 3. Regularization as shrinkage of overparameterized models: early stopping
# + cellView="form"
# @title Tutorial slides
# @markdown These are the slides for the videos in this tutorial
from IPython.display import IFrame
IFrame(src=f"https://mfr.ca-1.osf.io/render?url=https://osf.io/mf79a/?direct%26mode=render%26action=download%26mode=render", width=854, height=480)
# -
# ---
# # Setup
# Note that some of the code for today can take up to an hour to run. We have therefore "hidden" the code and shown the resulting outputs.
#
# + cellView="form"
# @title Install dependencies
# !sudo apt-get install -y ffmpeg --quiet
# !pip install imageio-ffmpeg --quiet
# !pip install git+https://github.com/NeuromatchAcademy/evaltools --quiet
from evaltools.airtable import AirtableForm
# generate airtable form
atform = AirtableForm('appn7VdPRseSoMXEG','W1D5_T1','https://portal.neuromatchacademy.org/api/redirect/to/a76f99c1-9005-4566-8bcd-bed4e53d21f1')
# +
# Imports
from __future__ import print_function
import time
import copy
import torch
import pathlib
import numpy as np
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from tqdm.auto import tqdm
from IPython.display import HTML, display
from torchvision import transforms
from torchvision.datasets import ImageFolder
# + cellView="form"
# @title Figure Settings
import ipywidgets as widgets
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/content-creation/main/nma.mplstyle")
# + cellView="form"
# @title Loading Animal Faces data
import requests, os
from zipfile import ZipFile
print("Start downloading and unzipping `AnimalFaces` dataset...")
name = 'afhq'
fname = f"{name}.zip"
url = f"https://osf.io/kgfvj/download"
if not os.path.exists(fname):
r = requests.get(url, allow_redirects=True)
with open(fname, 'wb') as fh:
fh.write(r.content)
if os.path.exists(fname):
with ZipFile(fname, 'r') as zfile:
zfile.extractall(f".")
os.remove(fname)
print("Download completed.")
# + cellView="form"
# @title Loading Animal Faces Randomized data
from IPython.display import clear_output
print("Start downloading and unzipping `Randomized AnimalFaces` dataset...")
names = ['afhq_random_32x32', 'afhq_10_32x32']
urls = ["https://osf.io/9sj7p/download",
"https://osf.io/wvgkq/download"]
for i, name in enumerate(names):
url = urls[i]
fname = f"{name}.zip"
if not os.path.exists(fname):
r = requests.get(url, allow_redirects=True)
with open(fname, 'wb') as fh:
fh.write(r.content)
if os.path.exists(fname):
with ZipFile(fname, 'r') as zfile:
zfile.extractall(f".")
os.remove(fname)
print("Download completed.")
# + cellView="form"
# @title Plotting functions
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.axis(False)
plt.show()
def plot_weights(norm, labels, ws, title='Weight Size Measurement'):
plt.figure(figsize=[8, 6])
plt.title(title)
plt.ylabel('Frobenius Norm Value')
plt.xlabel('Model Layers')
plt.bar(labels, ws)
plt.axhline(y=norm,
linewidth=1,
color='r',
ls='--',
label='Total Model F-Norm')
plt.legend()
plt.show()
def early_stop_plot(train_acc_earlystop, val_acc_earlystop, best_epoch):
plt.figure(figsize=(8, 6))
plt.plot(val_acc_earlystop,label='Val - Early',c='red',ls = 'dashed')
plt.plot(train_acc_earlystop,label='Train - Early',c='red',ls = 'solid')
plt.axvline(x=best_epoch, c='green', ls='dashed',
label='Epoch for Max Val Accuracy')
plt.title('Early Stopping')
plt.ylabel('Accuracy (%)')
plt.xlabel('Epoch')
plt.legend()
plt.show()
# + cellView="form"
# @title Set random seed
# @markdown Executing `set_seed(seed=seed)` you are setting the seed
# for DL its critical to set the random seed so that students can have a
# baseline to compare their results to expected results.
# Read more here: https://pytorch.org/docs/stable/notes/randomness.html
# Call `set_seed` function in the exercises to ensure reproducibility.
import random
import torch
def set_seed(seed=None, seed_torch=True):
if seed is None:
seed = np.random.choice(2 ** 32)
random.seed(seed)
np.random.seed(seed)
if seed_torch:
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
print(f'Random seed {seed} has been set.')
# In case that `DataLoader` is used
def seed_worker(worker_id):
worker_seed = torch.initial_seed() % 2**32
np.random.seed(worker_seed)
random.seed(worker_seed)
# + cellView="form"
# @title Set device (GPU or CPU). Execute `set_device()`
# especially if torch modules used.
# inform the user if the notebook uses GPU or CPU.
def set_device():
device = "cuda" if torch.cuda.is_available() else "cpu"
if device != "cuda":
print("WARNING: For this notebook to perform best, "
"if possible, in the menu under `Runtime` -> "
"`Change runtime type.` select `GPU` ")
else:
print("GPU is enabled in this notebook.")
return device
# -
SEED = 2021
set_seed(seed=SEED)
DEVICE = set_device()
# ---
# # Section 0: Defining useful functions
# Let's start the tutorial by defining some functions which we will use frequently today, such as: `AnimalNet`, `train`, `test` and `main`.
# Network Class - Animal Faces
class AnimalNet(nn.Module):
def __init__(self):
super(AnimalNet, self).__init__()
self.fc1 = nn.Linear(3 * 32 * 32, 128)
self.fc2 = nn.Linear(128, 32)
self.fc3 = nn.Linear(32, 3)
def forward(self, x):
x = x.view(x.shape[0],-1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
output = F.log_softmax(x, dim=1)
return output
# The train function takes in the current model, along with the train_loader and loss function, and updates the parameters for a single pass of the entire dataset. The test function takes in the current model after every epoch and calculates the accuracy on the test dataset.
#
# +
def train(args, model, train_loader, optimizer,
reg_function1=None, reg_function2=None, criterion=F.nll_loss):
"""
Trains the current inpur model using the data
from Train_loader and Updates parameters for a single pass
"""
device = args['device']
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
if reg_function1 is None:
loss = criterion(output, target)
elif reg_function2 is None:
loss = criterion(output, target)+args['lambda']*reg_function1(model)
else:
loss = criterion(output, target) + args['lambda1']*reg_function1(model) + args['lambda2']*reg_function2(model)
loss.backward()
optimizer.step()
return model
def test(model, test_loader, criterion=F.nll_loss, device='cpu'):
"""
Tests the current Model
"""
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += criterion(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
return 100. * correct / len(test_loader.dataset)
def main(args, model, train_loader, val_loader,
reg_function1=None, reg_function2=None):
"""
Trains the model with train_loader and tests the learned model using val_loader
"""
device = args['device']
model = model.to(device)
optimizer = optim.SGD(model.parameters(), lr=args['lr'],
momentum=args['momentum'])
val_acc_list, train_acc_list,param_norm_list = [], [], []
for epoch in tqdm(range(args['epochs'])):
trained_model = train(args, model, train_loader, optimizer,
reg_function1=reg_function1,
reg_function2=reg_function2)
train_acc = test(trained_model, train_loader, device=device)
val_acc = test(trained_model, val_loader, device=device)
param_norm = calculate_frobenius_norm(trained_model)
train_acc_list.append(train_acc)
val_acc_list.append(val_acc)
param_norm_list.append(param_norm)
return val_acc_list, train_acc_list, param_norm_list, trained_model
# -
# ---
# # Section 1: Regularization is Shrinkage
#
# *Time estimate: ~20 mins*
# + cellView="form"
# @title Video 1: Introduction to Regularization
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1mo4y1X76E", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"jhQAnIHTR6A", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
#add event to airtable
atform.add_event('Video 1: Introduction to Regularization')
display(out)
# -
# A key idea of neural nets, is that they use models that are "too complex" - complex enough to fit all the noise in the data. One then needs to "regularize" them to make the models fit complex enough, but not too complex. The more complex the model, the better it fits the training data, but if it is too complex, it generalizes less well; it memorizes the training data but is less accurate on future test data.
# + cellView="form"
# @title Video 2: Regularization as Shrinkage
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1YL411H7Dv", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"mhVbJ74upnQ", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
#add event to airtable
atform.add_event('Video 2: Regularization as Shrinkage')
display(out)
# -
# One way to think about Regularization is to think in terms of the magnitude of the overall weights of the model. A model with big weights can fit more data perfectly, whereas a model with smaller weights tends to underperform on the train set but can surprisingly do very well on the test set. Having the weights too small can also be an issue as it can then underfit the model.
#
# This week we use the sum of Frobenius Norm of all the tensors in the model as a measure of the "size of the model".
# ## Coding Exercise 1: Frobenius Norm
# Before we start, let's define the Frobenius norm, sometimes also called the Euclidean norm of an $m×n$ matrix $A$ as the square root of the sum of the absolute squares of its elements.
#
# <br>
#
# \begin{equation}
# ||A||_F= \sqrt{\sum_{i=1}^m\sum_{j=1}^n|a_{ij}|^2}
# \end{equation}
#
# This is just a measure of how big the matrix is, analogous to how big a vector is.
# **Hint:** Use functions `model.parameters()` or `model.named_parameters()`
#
# +
def calculate_frobenius_norm(model):
####################################################################
# Fill in all missing code below (...),
# then remove or comment the line below to test your function
raise NotImplementedError("Define `calculate_frobenius_norm` function")
####################################################################
norm = 0.0
# Sum the square of all parameters
for param in model.parameters():
norm += ...
# Take a square root of the sum of squares of all the parameters
norm = ...
return norm
# add event to airtable
atform.add_event('Coding Exercise 1: Frobenius Norm')
# Seed added for reproducibility
set_seed(seed=SEED)
## uncomment below to test your code
# net = nn.Linear(10, 1)
# print(f'Frobenius Norm of Single Linear Layer: {calculate_frobenius_norm(net)}')
# +
# to_remove solution
def calculate_frobenius_norm(model):
norm = 0.0
# Sum the square of all parameters
for param in model.parameters():
norm += torch.sum(param**2)
# Take a square root of the sum of squares of all the parameters
norm = norm**0.5
return norm
# add event to airtable
atform.add_event('Coding Exercise 1: Frobenius Norm')
# Seed added for reproducibility
set_seed(seed=SEED)
## uncomment below to test your code
net = nn.Linear(10, 1)
print(f'Frobenius Norm of Single Linear Layer: {calculate_frobenius_norm(net)}')
# -
# ```
# Random seed 2021 has been set.
# Frobenius Norm of Single Linear Layer: 0.6572162508964539
# ```
# Apart from calculating the weight size for an entire model, we could also determine the weight size in every layer. For this, we can modify our `calculate_frobenius_norm` function as shown below.
#
# **Have a look how it works!!**
# +
# Frobenius Norm per Layer
def calculate_frobenius_norm(model):
# initialization of variables
norm, ws, labels = 0.0, [], []
# Sum all the parameters
for name, parameters in model.named_parameters():
p = torch.sum(parameters**2)
norm += p
ws.append((p**0.5).cpu().detach().numpy())
labels.append(name)
# Take a square root of the sum of squares of all the parameters
norm = (norm**0.5).cpu().detach().numpy()
return norm, ws, labels
set_seed(SEED)
net = nn.Linear(10,1)
norm, ws, labels = calculate_frobenius_norm(net)
print(f'Frobenius Norm of Single Linear Layer: {norm:.4f}')
# Plots the weights
plot_weights(norm, labels, ws)
# -
# Using the last function `calculate_frobenius_norm`, we can also obtain the Frobenius Norm per layer for a whole NN model and use the `plot_weigts` function to visualize them.
# +
set_seed(seed=SEED)
# Creates a new model
model = AnimalNet()
# Calculates the forbenius norm per layer
norm, ws, labels = calculate_frobenius_norm(model)
print(f'Frobenius Norm of Models weights: {norm:.4f}')
# Plots the weights
plot_weights(norm, labels, ws)
# -
# ---
# # Section 2: Overfitting
#
# *Time estimate: ~15 mins*
#
# + cellView="form"
# @title Video 3: Overparameterization and Overfitting
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1NX4y1A73i", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"-HJ_9HxY38g", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add event to airtable
atform.add_event('Video 3: Overparameterization and Overfitting')
display(out)
# -
# ## Section 2.1: Visualizing Overfitting
#
#
# Let's create some synthetic dataset that we will use to illustrate overfitting in neural networks.
# +
set_seed(seed=SEED)
# creating train data
# input
X = torch.rand((10, 1))
# output
Y = 2*X + 2*torch.empty((X.shape[0], 1)).normal_(mean=0, std=1) # adding small error in the data
#visualizing trian data
plt.figure(figsize=(8, 6))
plt.scatter(X.numpy(),Y.numpy())
plt.xlabel('input (x)')
plt.ylabel('output(y)')
plt.title('toy dataset')
plt.show()
#creating test dataset
X_test = torch.linspace(0, 1, 40)
X_test = X_test.reshape((40, 1, 1))
# -
# Let's create an overparametrized Neural Network that can fit on the dataset that we just created and train it.
#
# First, let's build the model architecture:
# Network Class - 2D
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(1, 300)
self.fc2 = nn.Linear(300, 500)
self.fc3 = nn.Linear(500, 1)
def forward(self, x):
x = F.leaky_relu(self.fc1(x))
x = F.leaky_relu(self.fc2(x))
output = self.fc3(x)
return output
# Next, let's define the different parameters for training our model:
#
# +
set_seed(seed=SEED)
# train the network on toy dataset
model = Net()
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=1e-4)
iters = 0
# Calculates frobenius before training
normi, wsi, label = calculate_frobenius_norm(model)
# -
# At this point, we can now train our model.
# +
set_seed(seed=SEED)
# initializing variables
# losses
train_loss = []
test_loss = []
# model norm
model_norm = []
# Initializing variables to store weights
norm_per_layer = []
max_epochs = 10000
running_predictions = np.empty((40, int(max_epochs / 500 + 1)))
for epoch in tqdm(range(max_epochs)):
# frobenius norm per epoch
norm, pl, layer_names = calculate_frobenius_norm(model)
# training
model_norm.append(norm)
norm_per_layer.append(pl)
model.train()
optimizer.zero_grad()
predictions = model(X)
loss = criterion(predictions, Y)
loss.backward()
optimizer.step()
train_loss.append(loss.data)
model.eval()
Y_test = model(X_test)
loss = criterion(Y_test, 2*X_test)
test_loss.append(loss.data)
if (epoch % 500 == 0 or epoch == max_epochs - 1):
running_predictions[:, iters] = Y_test[:, 0, 0].detach().numpy()
iters += 1
# -
# Now that we have finished training, let's see how the model has evolved over the training process.
# + cellView="form"
# @title Animation (Run Me!)
set_seed(seed=SEED)
# create a figure and axes
fig = plt.figure(figsize=(14, 5))
ax1 = plt.subplot(121)
ax2 = plt.subplot(122)
# organizing subplots
plot1, = ax1.plot([],[])
plot2 = ax2.bar([], [])
def frame(i):
ax1.clear()
title1 = ax1.set_title('')
ax1.set_xlabel("Input(x)")
ax1.set_ylabel("Output(y)")
ax2.clear()
ax2.set_xlabel('Layer names')
ax2.set_ylabel('Frobenius norm')
title2 = ax2.set_title('Weight Measurement: Forbenius Norm')
ax1.scatter(X.numpy(),Y.numpy())
plot1 = ax1.plot(X_test[:,0,:].detach().numpy(), running_predictions[:,i])
title1.set_text(f'Epochs: {i * 500}')
plot2 = ax2.bar(label, norm_per_layer[i*500])
plt.axhline(y=model_norm[i*500], linewidth=1,
color='r', ls='--',
label=f'Norm: {model_norm[i*500]:.2f}')
plt.legend()
return plot1, plot2
anim = animation.FuncAnimation(fig, frame, frames=range(20),
blit=False, repeat=False,
repeat_delay=10000)
html_anim = HTML(anim.to_html5_video())
plt.close()
display(html_anim)
# + cellView="form"
# @title Plot the train and test losses
plt.figure(figsize=(8, 6))
plt.plot(train_loss,label='train_loss')
plt.plot(test_loss,label='test_loss')
plt.ylabel('loss')
plt.xlabel('epochs')
plt.title('loss vs epoch')
plt.legend()
plt.show()
# -
# ### Think! 2.1: Interpreting losses
#
# Regarding the train and test graph above, discuss among yourselves:
#
# * What trend do you see w.r.t to train and test losses ( Where do you see the minimum of these losses?)
# * What does it tell us about the model we trained?
#
#
#
# + cellView="form"
# @title Student Response
from ipywidgets import widgets
text=widgets.Textarea(
value='Type answer here and Push submit',
placeholder='Type something',
description='',
disabled=False
)
button = widgets.Button(description="Submit!")
display(text,button)
def on_button_clicked(b):
atform.add_answer('q1', text.value)
print("Submission successful!")
button.on_click(on_button_clicked)
# -
# to_remove explanation
"""
1. The train and test losses are inversely proportional to each other with increased number of epochs.
The minimum loss in the train set is reached at the final epochs. Meanwhile, in the test set the minimum loss is found in the first epochs.
2. This can tell us that the model is memorizing the training set and not generalizing the test set.
"""
# Now let's visualize the Frobenious norm of the model as we trained. You should see that the value of weights increases over the epochs.
# + cellView="form"
# @markdown Frobenious norm of the model
plt.figure(figsize=(8, 6))
plt.plot(model_norm)
plt.ylabel('norm of the model')
plt.xlabel('epochs')
plt.title('Size of the model vs Epochs') # Change title to Frobenious norm of the model
plt.show()
# -
# Finally, you can compare the Frobenius norm per layer in the model, before and after training.
# + cellView="form"
# @markdown Frobenius norm per layer before and after training
normf, wsf, label = calculate_frobenius_norm(model)
plot_weights(float(normi), label, wsi,
title='Weight Size Before Training')
plot_weights(float(normf), label, wsf,
title='Weight Size After Training')
# -
# ## Section 2.2: Overfitting on Test Dataset
#
#
# In principle we should not touch our test set until after we have chosen all our hyperparameters. Were we to use the test data in the model selection process, there is a risk that we might overfit the test data. Then we would be in serious trouble. If we overfit our training data, there is always the evaluation on test data to keep us honest. But if we overfit the test data, how would we ever know?
#
# Note that there is another kind of overfitting: you do "honest" fitting on one set of images or posts, or medical records, but it may not generalize to other sets of images, posts or medical records.
#
# #### Validation Dataset
# A common practice to address this problem is to split our data in three ways, using a validation dataset (or validation set) to tune the hyperparameters. Ideally, we would only touch the test data once, to assess the very best model or to compare a small number of models to each other, real-world test data is seldom discarded after just one use.
#
#
# ---
# # Section 3: Memorization
#
# *Time estimate: ~25 mins*
#
# Given sufficiently large networks and enough training, Neural Networks can achieve almost 100% train accuracy.
#
# In this section we train three MLPs; one each on:
#
#
# 1. Animal Faces Dataset
# 2. A Completely Noisy Dataset (Random Shuffling of all labels)
# 3. A partially Noisy Dataset (Random Shuffling of 15% labels)
#
# Now, think for a couple of minutes as to what the train and test accuracies of each of these models might be, given that you train for sufficient time and use a powerful network.
# First, let's create the required dataloaders for all three datasets. Notice how we split the data. We train on a fraction of the dataset as it will be faster to train and will overfit more clearly.
# +
# Dataloaders for the Dataset
batch_size = 128
classes = ('cat', 'dog', 'wild')
# defining number of examples for train, val test
len_train, len_val, len_test = 100, 100, 14430
train_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
data_path = pathlib.Path('.')/'afhq' # using pathlib to be compatible with all OS's
img_dataset = ImageFolder(data_path/'train', transform=train_transform)
# +
# Dataloaders for the Original Dataset
# For reproducibility
g_seed = torch.Generator()
g_seed.manual_seed(SEED)
img_train_data, img_val_data,_ = torch.utils.data.random_split(img_dataset,
[len_train,
len_val,
len_test])
# Creating train_loader and Val_loader
train_loader = torch.utils.data.DataLoader(img_train_data,
batch_size=batch_size,
num_workers=2,
worker_init_fn=seed_worker,
generator=g_seed)
val_loader = torch.utils.data.DataLoader(img_val_data,
batch_size=1000,
num_workers=2,
worker_init_fn=seed_worker,
generator=g_seed)
# +
# Dataloaders for the Random Dataset
# For reproducibility
g_seed = torch.Generator()
g_seed.manual_seed(SEED + 1)
# splitting randomized data into training and validation data
data_path = pathlib.Path('.')/'afhq_random_32x32/afhq_random' # using pathlib to be compatible with all OS's
img_dataset = ImageFolder(data_path/'train', transform=train_transform)
random_img_train_data, random_img_val_data,_ = torch.utils.data.random_split(img_dataset, [len_train, len_val, len_test])
# Randomized train and validation dataloader
rand_train_loader = torch.utils.data.DataLoader(random_img_train_data,
batch_size=batch_size,
num_workers=2,
worker_init_fn=seed_worker,
generator=g_seed)
rand_val_loader = torch.utils.data.DataLoader(random_img_val_data,
batch_size=1000,
num_workers=2,
worker_init_fn=seed_worker,
generator=g_seed)
# +
# Dataloaders for the Partially Random Dataset
# For reproducibility
g_seed = torch.Generator()
g_seed.manual_seed(SEED + 1)
# Splitting data between training and validation dataset for partially randomized data
data_path = pathlib.Path('.')/'afhq_10_32x32/afhq_10' # using pathlib to be compatible with all OS's
img_dataset = ImageFolder(data_path/'train', transform=train_transform)
partially_random_train_data, partially_random_val_data,_ = torch.utils.data.random_split(img_dataset, [len_train, len_val, len_test])
# Training and Validation loader for partially randomized data
partial_rand_train_loader = torch.utils.data.DataLoader(partially_random_train_data,
batch_size=batch_size,
num_workers=2,
worker_init_fn=seed_worker,
generator=g_seed)
partial_rand_val_loader = torch.utils.data.DataLoader(partially_random_val_data,
batch_size=1000,
num_workers=2,
worker_init_fn=seed_worker,
generator=g_seed)
# -
# Now let's define a model which has many parameters compared to the training dataset size, and train it on these datasets.
# Network Class - Animal Faces
class BigAnimalNet(nn.Module):
def __init__(self):
super(BigAnimalNet, self).__init__()
self.fc1 = nn.Linear(3*32*32, 124)
self.fc2 = nn.Linear(124, 64)
self.fc3 = nn.Linear(64, 3)
def forward(self, x):
x = x.view(x.shape[0], -1)
x = F.leaky_relu(self.fc1(x))
x = F.leaky_relu(self.fc2(x))
x = self.fc3(x)
output = F.log_softmax(x, dim=1)
return output
# Before training our `BigAnimalNet()`, calculate the Frobenius norm again.
set_seed(seed=SEED)
normi, wsi, label = calculate_frobenius_norm(BigAnimalNet())
# Now, train our `BigAnimalNet()` model
# +
# Here we have 100 true train data.
# Set the arguments
args = {
'epochs': 200,
'lr': 5e-3,
'momentum': 0.9,
'device': DEVICE
}
# Initialize the network
set_seed(seed=SEED)
model = BigAnimalNet()
start_time = time.time()
# Train the network
val_acc_pure, train_acc_pure, _, model = main(args=args,
model=model,
train_loader=train_loader,
val_loader=val_loader)
end_time = time.time()
print(f"Time to memorize the dataset: {end_time - start_time}")
# Train and Test accuracy plot
plt.figure(figsize=(8, 6))
plt.plot(val_acc_pure, label='Val Accuracy Pure', c='red', ls='dashed')
plt.plot(train_acc_pure, label='Train Accuracy Pure', c='red', ls='solid')
plt.axhline(y=max(val_acc_pure), c='green', ls='dashed',
label='max Val accuracy pure')
plt.title('Memorization')
plt.ylabel('Accuracy (%)')
plt.xlabel('Epoch')
plt.legend()
plt.show()
# + cellView="form"
# @markdown #### Frobenius norm for AnimalNet before and after training
normf, wsf, label = calculate_frobenius_norm(model)
plot_weights(float(normi), label, wsi, title='Weight Size Before Training')
plot_weights(float(normf), label, wsf, title='Weight Size After Training')
# -
# ## Coding Exercise 3: Data Visualizer
# Before we train the model on a data with random labels, let's visualize and verify for ourselves that the data is random. Here, we have classes = ("cat", "dog", "wild").
#
# **Hint:** Use `.permute()` method. `plt.imshow()` expects imput to be in numpy format and in the format (Px, Py, 3), where Px and Py are the number of pixels along axis x and y respectively.
# +
def visualize_data(dataloader):
####################################################################
# Fill in all missing code below (...),
# then remove or comment the line below to test your function
# The dataloader here gives out mini batches of 100 data points.
raise NotImplementedError("Complete the Visualize_random_data function")
####################################################################
for idx, (data,label) in enumerate(dataloader):
plt.figure(idx)
# Choose the datapoint you would like to visualize
index = ...
# choose that datapoint using index and permute the dimensions
# and bring the pixel values between [0,1]
data = ...
# Convert the torch tensor into numpy
data = ...
plt.imshow(data)
plt.axis(False)
image_class = classes[...]
print(f'The image belongs to : {image_class}')
plt.show()
# add event to airtable
atform.add_event('Coding Exercise 3: Data Visualizer')
## uncomment to run the function
# visualize_data(rand_train_loader)
# +
# to_remove solution
def visualize_data(dataloader):
for idx, (data,label) in enumerate(dataloader):
plt.figure(idx)
# Choose the datapoint you would like to visualize
index = 22
# choose that datapoint using index and permute the dimensions
# and bring the pixel values between [0,1]
data = data[index].permute(1, 2, 0) * \
torch.tensor([0.5, 0.5, 0.5]) + \
torch.tensor([0.5, 0.5, 0.5])
# Convert the torch tensor into numpy
data = data.numpy()
plt.imshow(data)
plt.axis(False)
image_class = classes[label[index].item()]
print(f'The image belongs to : {image_class}')
plt.show()
# add event to airtable
atform.add_event('Coding Exercise 3: Data Visualizer')
## uncomment to run the function
with plt.xkcd():
visualize_data(rand_train_loader)
# -
# Now let's train the network on the shuffled data and see if it memorizes.
# +
# Here we have 100 completely shuffled train data.
# Set the arguments
args = {
'epochs': 200,
'lr': 5e-3,
'momentum': 0.9,
'device': DEVICE
}
# Intialize the model
set_seed(seed=SEED)
model = BigAnimalNet()
# Train the model
val_acc_random, train_acc_random, _, model = main(args,
model,
rand_train_loader,
val_loader)
# Train and Test accuracy plot
plt.figure(figsize=(8, 6))
plt.plot(val_acc_random, label='Val Accuracy random', c='red', ls='dashed')
plt.plot(train_acc_random, label='Train Accuracy random', c='red', ls='solid')
plt.axhline(y=max(val_acc_random), c='green', ls='dashed',
label='Max Val Accuracy random')
plt.title('Memorization')
plt.ylabel('Accuracy (%)')
plt.xlabel('Epoch')
plt.legend()
plt.show()
# -
# Finally lets train on a partially shuffled dataset where 15% of the labels are noisy.
# +
# Here we have 100 partially shuffled train data.
# Set the arguments
args = {
'epochs': 200,
'lr': 5e-3,
'momentum': 0.9,
'device': DEVICE
}
# Intialize the model
set_seed(seed=SEED)
model = BigAnimalNet()
# Train the model
val_acc_shuffle, train_acc_shuffle, _, _, = main(args,
model,
partial_rand_train_loader,
val_loader)
# train and test acc plot
plt.figure(figsize=(8, 6))
plt.plot(val_acc_shuffle, label='Val Accuracy shuffle', c='red', ls='dashed')
plt.plot(train_acc_shuffle, label='Train Accuracy shuffle', c='red', ls='solid')
plt.axhline(y=max(val_acc_shuffle), c='green', ls='dashed', label='Max Val Accuracy shuffle')
plt.title('Memorization')
plt.ylabel('Accuracy (%)')
plt.xlabel('Epoch')
plt.legend()
plt.show()
# + cellView="form"
#@markdown #### Plotting them all together (Run Me!)
plt.figure(figsize=(8, 6))
plt.plot(val_acc_pure,label='Val - Pure',c='red',ls = 'dashed')
plt.plot(train_acc_pure,label='Train - Pure',c='red',ls = 'solid')
plt.plot(val_acc_random,label='Val - Random',c='blue',ls = 'dashed')
plt.plot(train_acc_random,label='Train - Random',c='blue',ls = 'solid')
plt.plot(val_acc_shuffle, label='Val - shuffle', c='y', ls='dashed')
plt.plot(train_acc_shuffle, label='Train - shuffle', c='y', ls='solid')
plt.title('Memorization')
plt.ylabel('Accuracy (%)')
plt.xlabel('Epoch')
plt.legend()
plt.show()
# -
# ## Think! 3: Does it Generalize?
# Given that the Neural Network fit/memorize the training data perfectly:
#
# * Do you think it generalizes well?
# * What makes you think it does or doesn't?
#
# + cellView="form"
# @title Student Response
from ipywidgets import widgets
text=widgets.Textarea(
value='Type answer here and Push submit',
placeholder='Type something',
description='',
disabled=False
)
button = widgets.Button(description="Submit!")
display(text,button)
def on_button_clicked(b):
atform.add_answer('q2', text.value)
print("Submission successful!")
button.on_click(on_button_clicked)
# +
# to_remove explanation
"""
1. It seems to be like none of the three models generalize
2. When compared with the training accuracy (100%), the validation is well below for all three cases.
"""
# -
# Isn't it surprising to see that the NN was able to achieve 100% training accuracy on randomly shuffled labels? This is one of the reasons why training accuracy is not a good indicator of model performance.
#
# Also it is interesting to note that sometimes the model trained on slightly shuffled data does slightly better than the one trained on pure data. Shuffling some of the data is a form of regularization--one of many ways of adding noise to the training data.
# ---
# # Section 4: Early Stopping
#
# *Time estimate: ~20 mins*
# + cellView="form"
# @title Video 4: Early Stopping
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1cB4y1K777", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"72IG2bX5l30", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add event to airtable
atform.add_event('Video 4: Early Stopping')
display(out)
# -
#
# Now that we have established that the validation accuracy reaches the peak well before the model overfits, we want to somehow stop the training early. You should have also observed from the above plots that the train/test loss on real data is not very smooth and hence you might guess that the choice of epoch can play a very large role on the val/test accuracy of your model.
#
# Early stopping stops training when the validation accuracies stop increasing.
#
# <center><img src="https://raw.githubusercontent.com/NeuromatchAcademy/course-content-dl/main/tutorials/static/early-stopping-machine-learning-5422207.jpg" alt="Overfitting" width="600"/></center>
# ## Coding Exercise 4: Early Stopping
# Reimplement the main function to include early stopping as described above. Then run the code below to validate your implementation.
# +
def early_stopping_main(args, model, train_loader, val_loader):
####################################################################
# Fill in all missing code below (...),
# then remove or comment the line below to test your function
raise NotImplementedError("Complete the early_stopping_main function")
####################################################################
device = args['device']
model = model.to(device)
optimizer = optim.SGD(model.parameters(),
lr=args['lr'],
momentum=args['momentum'])
best_acc = 0.0
best_epoch = 0
# Number of successive epochs that you want to wait before stopping training process
patience = 20
# Keps track of number of epochs during which the val_acc was less than best_acc
wait = 0
val_acc_list, train_acc_list = [], []
for epoch in tqdm(range(args['epochs'])):
# train the model
trained_model = ...
# calculate training accuracy
train_acc = ...
# calculate validation accuracy
val_acc = ...
if (val_acc > best_acc):
best_acc = val_acc
best_epoch = epoch
best_model = copy.deepcopy(trained_model)
wait = 0
else:
wait += 1
if (wait > patience):
print(f'early stopped on epoch: {epoch}')
break
train_acc_list.append(train_acc)
val_acc_list.append(val_acc)
return val_acc_list, train_acc_list, best_model, best_epoch
# add event to airtable
atform.add_event('Coding Exercise 4: Early Stopping')
# Set the arguments
args = {
'epochs': 200,
'lr': 5e-4,
'momentum': 0.99,
'device': DEVICE
}
# Initialize the model
set_seed(seed=SEED)
model = AnimalNet()
## Uncomment to test
# val_acc_earlystop, train_acc_earlystop, best_model, best_epoch = early_stopping_main(args, model, train_loader, val_loader)
# print(f'Maximum Validation Accuracy is reached at epoch: {best_epoch:2d}')
# early_stop_plot(train_acc_earlystop, val_acc_earlystop, best_epoch)
# +
# to_remove solution
def early_stopping_main(args, model, train_loader, val_loader):
device = args['device']
model = model.to(device)
optimizer = optim.SGD(model.parameters(),
lr=args['lr'],
momentum=args['momentum'])
best_acc = 0.0
best_epoch = 0
# Number of successive epochs that you want to wait before stopping training process
patience = 20
# Keps track of number of epochs during which the val_acc was less than best_acc
wait = 0
val_acc_list, train_acc_list = [], []
for epoch in tqdm(range(args['epochs'])):
# train the model
trained_model = train(args, model, train_loader, optimizer)
# calculate training accuracy
train_acc = test(trained_model, train_loader, device=device)
# calculate validation accuracy
val_acc = test(trained_model, val_loader, device=device)
if (val_acc > best_acc):
best_acc = val_acc
best_epoch = epoch
best_model = copy.deepcopy(trained_model)
wait = 0
else:
wait += 1
if (wait > patience):
print(f'early stopped on epoch: {epoch}')
break
train_acc_list.append(train_acc)
val_acc_list.append(val_acc)
return val_acc_list, train_acc_list, best_model, best_epoch
# add event to airtable
atform.add_event('Coding Exercise 4: Early Stopping')
# Set the arguments
args = {
'epochs': 200,
'lr': 5e-4,
'momentum': 0.99,
'device': DEVICE
}
# Initialize the model
set_seed(seed=SEED)
model = AnimalNet()
## Uncomment to test
val_acc_earlystop, train_acc_earlystop, best_model, best_epoch = early_stopping_main(args, model, train_loader, val_loader)
print(f'Maximum Validation Accuracy is reached at epoch: {best_epoch:2d}')
with plt.xkcd():
early_stop_plot(train_acc_earlystop, val_acc_earlystop, best_epoch)
# -
# ## Think! 4: Early Stopping
#
# Discuss among your pod why or why not:
#
# * Do you think early stopping can be harmful for training your network?
# + cellView="form"
# @title Student Response
from ipywidgets import widgets
text=widgets.Textarea(
value='Type answer here and Push submit',
placeholder='Type something',
description='',
disabled=False
)
button = widgets.Button(description="Submit!")
display(text,button)
def on_button_clicked(b):
atform.add_answer('q3', text.value)
print("Submission successful!")
button.on_click(on_button_clicked)
# -
# to_remove explanation
""""
* It can be harmful if we stop the training phase before the model reaches its maximun validation accuracy.
"""
# ---
# # Summary
#
# In this tutorial, you have been introduced to the regularization technique, where we have described it as shrinkage. We have learned about overfitting, one of the worst caveats in deep learning, and finally we learned a method of reducing overfitting in our models called early-stopping.
# + cellView="form"
# @title Airtable Submission Link
from IPython import display as IPydisplay
IPydisplay.HTML(
f"""
<div>
<a href= "{atform.url()}" target="_blank">
<img src="https://github.com/NeuromatchAcademy/course-content-dl/blob/main/tutorials/static/AirtableSubmissionButton.png?raw=1"
alt="button link to Airtable" style="width:410px"></a>
</div>""" )
| tutorials/W1D5_Regularization/W1D5_Tutorial1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Selecting Subsets of Data in Pandas
#
# This notebook is also available as a [blog post on Medium](https://medium.com/dunder-data/selecting-subsets-of-data-in-pandas-6fcd0170be9c).
#
# ## Part 1: Selection with `[]`, `.loc` and `.iloc`
#
# This is the beginning of a seven-part series on how to select subsets of data from a pandas DataFrame or Series. Pandas offers a wide variety of options for subset selection which necessitates multiple articles. This series is broken down into the following 7 topics.
#
# 1. Selection with `[]`, `.loc` and `.iloc`
# 1. Boolean indexing
# 1. Selection with a MultiIndex
# 1. Selecting subsets of data with methods
# 1. Selections with other Index types
# 1. Internals of indexing
# 1. Miscellaneous and Conclusion
#
# # Assumptions before we begin
# These series of articles assume you have no knowledge of pandas, but that you understand the fundamentals of the Python programming language. It also assumes that you have installed pandas on your machine.
#
# The easiest way to get pandas along with Python and the rest of the main scientific computing libraries is to install the [Anaconda distribution](https://www.anaconda.com/download/).
#
# If you have no knowledge of Python then I suggest completing the following two books cover to cover before even touching pandas. They are both free.
# * [Think Python](http://greenteapress.com/wp/think-python-2e/) by <NAME>
# * [Automate the Boring Stuff](https://automatetheboringstuff.com/) by <NAME>
#
# # The importance of making subset selections
# You might be wondering why there needs to be so many articles on selecting subsets of data. This topic is extremely important to pandas and it's unfortunate that it is fairly complicated because subset selection happens frequently during an actual analysis. Because you are frequently making subset selections, you need to master it in order to make your life with pandas easier.
#
# I will also be doing a follow-up series on index alignment which is another extremely important topic that requires you to understand subset selection.
#
# # Always reference the documentation
# The material in this article is also covered in the official pandas documentation on [Indexing and Selecting Data](http://pandas.pydata.org/pandas-docs/stable/indexing.html). I highly recommend that you read that part of the documentation along with this tutorial. In fact, the documentation is one of the primary means for mastering pandas. I wrote a step-by-step article, [How to Learn Pandas](https://medium.com/dunder-data/how-to-learn-pandas-108905ab4955), which gives suggestions on how to use the documentation as you master pandas.
#
# # The anatomy of a DataFrame and a Series
# The pandas library has two primary containers of data, the DataFrame and the Series. You will spend nearly all your time working with both of the objects when you use pandas. The DataFrame is used more than the Series, so let's take a look at an image of it first.
#
# 
#
# This images comes with some added illustrations to highlight its components. At first glance, the DataFrame looks like any other two-dimensional table of data that you have seen. It has rows and it has columns. Technically, there are three main components of the DataFrame.
#
# ## The three components of a DataFrame
# A DataFrame is composed of three different components, the **index**, **columns**, and the **data**. The data is also known as the **values**.
#
# The index represents the sequence of values on the far left-hand side of the DataFrame. All the values in the index are in **bold** font. Each individual value of the index is called a **label**. Sometimes the index is referred to as the **row labels**. In the example above, the row labels are not very interesting and are just the integers beginning from 0 up to n-1, where n is the number of rows in the table. Pandas defaults DataFrames with this simple index.
#
# The columns are the sequence of values at the very top of the DataFrame. They are also in **bold** font. Each individual value of the columns is called a **column**, but can also be referred to as **column name** or **column label**.
#
# Everything else not in bold font is the data or values. You will sometimes hear DataFrames referred to as **tabular** data. This is just another name for a rectangular table data with rows and columns.
#
# ## Axis and axes
# It is also common terminology to refer to the rows or columns as an **axis**. Collectively, we call them **axes**. So, a row is an axis and a column is another axis.
#
# The word axis appears as a parameter in many DataFrame methods. Pandas allows you to choose the direction of how the method will work with this parameter. This has nothing to do with subset selection so you can just ignore it for now.
#
#
# ### Each row has a label and each column has a label
# The main takeaway from the DataFrame anatomy is that each row has a label and each column has a label. These labels are used to refer to specific rows or columns in the DataFrame. It's the same as how humans use names to refer to specific people.
#
# # What is subset selection?
#
# Before we start doing subset selection, it might be good to define what it is. Subset selection is simply selecting particular rows and columns of data from a DataFrame (or Series). This could mean selecting all the rows and some of the columns, some of the rows and all of the columns, or some of each of the rows and columns.
#
#
# ### Example selecting some columns and all rows
# Let's see some images of subset selection. We will first look at a sample DataFrame with fake data.
#
# 
#
# Let's say we want to select just the columns `color`, `age`, and `height` but keep all the rows.
#
# 
#
# Our final DataFrame would look like this:
#
# 
#
# ### Example selecting some rows and all columns
# We can also make selections that select just some of the rows. Let's select the rows with labels `Aaron` and `Dean` along with all of the columns:
#
# 
#
# Our final DataFrame would like:
#
# 
#
# ### Example selecting some rows and some columns
# Let's combine the selections from above and select the columns `color`, `age`, and `height` for only the rows with labels `Aaron` and `Dean`.
#
# 
#
# Our final DataFrame would look like this:
#
# 
#
#
# # Pandas dual references: by label and by integer location
# We already mentioned that each row and each column have a specific label that can be used to reference them. This is displayed in bold font in the DataFrame.
#
# But, what hasn't been mentioned, is that each row and column may be referenced by an integer as well. I call this **integer location**. The integer location begins at 0 and ends at n-1 for each row and column. Take a look above at our sample DataFrame one more time.
#
# The rows with labels `Aaron` and `Dean` can also be referenced by their respective integer locations 2 and 4. Similarly, the columns `color`, `age` and `height` can be referenced by their integer locations 1, 3, and 4.
#
# The documentation refers to integer location as **position**. I don't particularly like this terminology as its not as explicit as integer location. The key thing term here is INTEGER.
#
# # What's the difference between indexing and selecting subsets of data?
# The documentation uses the term **indexing** frequently. This term is essentially just a one-word phrase to say 'subset selection'. I prefer the term subset selection as, again, it is more descriptive of what is actually happening. Indexing is also the term used in the official Python documentation.
#
# # Focusing only on `[]`, `.loc`, and `.iloc`
# There are many ways to select subsets of data, but in this article we will only cover the usage of the square brackets (**`[]`**), **`.loc`** and **`.iloc`**. Collectively, they are called the **indexers**. These are by far the most common ways to select data. A different part of this Series will discuss a few methods that can be used to make subset selections.
#
# If you have a DataFrame, `df`, your subset selection will look something like the following:
#
# ```
# df[ ]
# df.loc[ ]
# df.iloc[ ]
# ```
#
# A real subset selection will have something inside of the square brackets. All selections in this article will take place inside of those square brackets.
#
# Notice that the square brackets also follow `.loc` and `.iloc`. All indexing in Python happens inside of these square brackets.
#
# # A term for just those square brackets
# The term **indexing operator** is used to refer to the square brackets following an object. The **`.loc`** and **`.iloc`** indexers also use the indexing operator to make selections. I will use the term **just the indexing operator** to refer to **`df[]`**. This will distinguish it from **`df.loc[]`** and **`df.iloc[]`**.
#
# # Read in data into a DataFrame with `read_csv`
# Let's begin using pandas to read in a DataFrame, and from there, use the indexing operator by itself to select subsets of data. All the data for these tutorials are in the **data** directory.
#
# We will use the **`read_csv`** function to read in data into a DataFrame. We pass the path to the file as the first argument to the function. We will also use the **`index_col`** parameter to select the first column of data as the index (more on this later).
import pandas as pd
import numpy as np
df = pd.read_csv('../../data/sample_data.csv', index_col=0)
df
# # Extracting the individual DataFrame components
# Earlier, we mentioned the three components of the DataFrame. The index, columns and data (values). We can extract each of these components into their own variables. Let's do that and then inspect them:
index = df.index
columns = df.columns
values = df.values
index
columns
values
# # Data types of the components
# Let's output the type of each component to understand exactly what kind of object they are.
type(index)
type(columns)
type(values)
# # Understanding these types
# Interestingly, both the index and the columns are the same type. They are both a pandas **`Index`** object. This object is quite powerful in itself, but for now you can just think of it as a sequence of labels for either the rows or the columns.
#
# The values are a NumPy **`ndarray`**, which stands for n-dimensional array, and is the primary container of data in the NumPy library. Pandas is built directly on top of NumPy and it's this array that is responsible for the bulk of the workload.
# # Beginning with just the indexing operator on DataFrames
# We will begin our journey of selecting subsets by using just the indexing operator on a DataFrame. Its main purpose is to select a single column or multiple columns of data.
#
# ## Selecting a single column as a Series
# To select a single column of data, simply put the name of the column in-between the brackets. Let's select the food column:
df['food']
# # Anatomy of a Series
# Selecting a single column of data returns the other pandas data container, the Series. A Series is a one-dimensional sequence of labeled data. There are two main components of a Series, the **index** and the **data**(or **values**). There are NO columns in a Series.
#
# The visual display of a Series is just plain text, as opposed to the nicely styled table for DataFrames. The sequence of person names on the left is the index. The sequence of food items on the right is the values.
#
# You will also notice two extra pieces of data on the bottom of the Series. The **name** of the Series becomes the old-column name. You will also see the data type or **`dtype`** of the Series. You can ignore both these items for now.
#
# # Selecting multiple columns with just the indexing operator
# It's possible to select multiple columns with just the indexing operator by passing it a list of column names. Let's select `color`, `food`, and `score`:
df[['color', 'food', 'score']]
# # Selecting multiple columns returns a DataFrame
# Selecting multiple columns returns a DataFrame. You can actually select a single column as a DataFrame with a one-item list:
df[['food']]
# Although, this resembles the Series from above, it is technically a DataFrame, a different object.
#
# # Column order doesn't matter
# When selecting multiple columns, you can select them in any order that you choose. It doesn't have to be the same order as the original DataFrame. For instance, let's select `height` and `color`.
df[['height', 'color']]
# # Exceptions
# There are a couple common exceptions that arise when doing selections with just the indexing operator.
# * If you misspell a word, you will get a **`KeyError`**
# * If you forgot to use a list to contain multiple columns you will also get a **`KeyError`**
df['hight']
df['color', 'age'] # should be: df[['color', 'age']]
# # Summary of just the indexing operator
# * Its primary purpose is to select columns by the column names
# * Select a single column as a Series by passing the column name directly to it: **`df['col_name']`**
# * Select multiple columns as a DataFrame by passing a **list** to it: **`df[['col_name1', 'col_name2']]`**
# * You actually can select rows with it, but this will not be shown here as it is confusing and not used often.
# # Getting started with `.loc`
# The **`.loc`** indexer selects data in a different way than just the indexing operator. It can select subsets of rows or columns. It can also simultaneously select subsets of rows and columns. Most importantly, it only selects data by the **LABEL** of the rows and columns.
#
# # Select a single row as a Series with `.loc`
# The **`.loc`** indexer will return a single row as a Series when given a single row label. Let's select the row for **`Niko`**.
df.loc['Niko']
# We now have a Series, where the old column names are now the index labels. The **`name`** of the Series has become the old index label, **`Niko`** in this case.
# # Select multiple rows as a DataFrame with `.loc`
# To select multiple rows, put all the row labels you want to select in a list and pass that to **`.loc`**. Let's select `Niko` and `Penelope`.
df.loc[['Niko', 'Penelope']]
# # Use slice notation to select a range of rows with `.loc`
# It is possible to 'slice' the rows of a DataFrame with `.loc` by using **slice notation**. Slice notation uses a colon to separate **start**, **stop** and **step** values. For instance we can select all the rows from `Niko` through `Dean` like this:
df.loc['Niko':'Dean']
# # `.loc` includes the last value with slice notation
# Notice that the row labeled with `Dean` was kept. In other data containers such as Python lists, the last value is excluded.
#
# # Other slices
# You can use slice notation similarly to how you use it with lists. Let's slice from the beginning through `Aaron`:
df.loc[:'Aaron']
# Slice from `Niko` to `Christina` stepping by 2:
df.loc['Niko':'Christina':2]
# Slice from `Dean` to the end:
df.loc['Dean':]
# # Selecting rows and columns simultaneously with `.loc`
# Unlike just the indexing operator, it is possible to select rows and columns simultaneously with `.loc`. You do it by separating your row and column selections by a **comma**. It will look something like this:
#
# ```
# df.loc[row_selection, column_selection]
# ```
#
# ## Select two rows and three columns
# For instance, if we wanted to select the rows `Dean` and `Cornelia` along with the columns `age`, `state` and `score` we would do this:
df.loc[['Dean', 'Cornelia'], ['age', 'state', 'score']]
# # Use any combination of selections for either row or columns for `.loc`
# Row or column selections can be any of the following as we have already seen:
# * A single label
# * A list of labels
# * A slice with labels
#
# We can use any of these three for either row or column selections with **`.loc`**. Let's see some examples.
#
# Let's select two rows and a single column:
df.loc[['Dean', 'Aaron'], 'food']
# Select a slice of rows and a list of columns:
df.loc['Jane':'Penelope', ['state', 'color']]
# Select a single row and a single column. This returns a scalar value.
df.loc['Jane', 'age']
# Select a slice of rows and columns
df.loc[:'Dean', 'height':]
# ## Selecting all of the rows and some columns
# It is possible to select all of the rows by using a single colon. You can then select columns as normal:
df.loc[:, ['food', 'color']]
# You can also use this notation to select all of the columns:
df.loc[['Penelope','Cornelia'], :]
# But, it isn't necessary as we have seen, so you can leave out that last colon:
df.loc[['Penelope','Cornelia']]
# # Assign row and column selections to variables
# It might be easier to assign row and column selections to variables before you use `.loc`. This is useful if you are selecting many rows or columns:
rows = ['Jane', 'Niko', 'Dean', 'Penelope', 'Christina']
cols = ['state', 'age', 'height', 'score']
df.loc[rows, cols]
# # Summary of `.loc`
#
# * Only uses labels
# * Can select rows and columns simultaneously
# * Selection can be a single label, a list of labels or a slice of labels
# * Put a comma between row and column selections
# # Getting started with `.iloc`
# The `.iloc` indexer is very similar to `.loc` but only uses integer locations to make its selections. The word `.iloc` itself stands for integer location so that should help with remember what it does.
#
# # Selecting a single row with `.iloc`
# By passing a single integer to `.iloc`, it will select one row as a Series:
df.iloc[3]
# # Selecting multiple rows with `.iloc`
# Use a list of integers to select multiple rows:
df.iloc[[5, 2, 4]] # remember, don't do df.iloc[5, 2, 4] Error!
# # Use slice notation to select a range of rows with `.iloc`
# Slice notation works just like a list in this instance and is exclusive of the last element
df.iloc[3:5]
# Select 3rd position until end:
df.iloc[3:]
# Select 3rd position to end by 2:
df.iloc[3::2]
# # Selecting rows and columns simultaneously with `.iloc`
# Just like with `.iloc` any combination of a single integer, lists of integers or slices can be used to select rows and columns simultaneously. Just remember to separate the selections with a **comma**.
#
# Select two rows and two columns:
df.iloc[[2,3], [0, 4]]
# Select a slice of the rows and two columns:
df.iloc[3:6, [1, 4]]
# Select slices for both
df.iloc[2:5, 2:5]
# Select a single row and column
df.iloc[0, 2]
# Select all the rows and a single column
df.iloc[:, 5]
# # Deprecation of `.ix`
# Early in the development of pandas, there existed another indexer, **`ix`**. This indexer was capable of selecting both by label and by integer location. While it was versatile, it caused lots of confusion because it's not explicit. Sometimes integers can also be labels for rows or columns. Thus there were instances where it was ambiguous.
#
# You can still call **`.ix`**, but it has been deprecated, so please **never use it**.
# # Selecting subsets of Series
#
# We can also, of course, do subset selection with a Series. Earlier I recommended using just the indexing operator for column selection on a DataFrame. Since Series do not have columns, I suggest using only **`.loc`** and **`.iloc`**. You can use just the indexing operator, but its ambiguous as it can take both labels and integers. I will come back to this at the end of the tutorial.
#
# Typically, you will create a Series by selecting a single column from a DataFrame. Let's select the **`food`** column:
food = df['food']
food
# # Series selection with `.loc`
# Series selection with `.loc` is quite simple, since we are only dealing with a single dimension. You can again use a single row label, a list of row labels or a slice of row labels to make your selection. Let's see several examples.
#
# Let's select a single value:
food.loc['Aaron']
# Select three different values. This returns a Series:
food.loc[['Dean', 'Niko', 'Cornelia']]
# Slice from `Niko` to `Christina` - is inclusive of last index
food.loc['Niko':'Christina']
# Slice from `Penelope` to the end:
food.loc['Penelope':]
# Select a single value in a list which returns a Series
food.loc[['Aaron']]
# # Series selection with `.iloc`
# Series subset selection with **`.iloc`** happens similarly to **`.loc`** except it uses integer location. You can use a single integer, a list of integers or a slice of integers. Let's see some examples.
#
# Select a single value:
food.iloc[0]
# Use a list of integers to select multiple values:
food.iloc[[4, 1, 3]]
# Use a slice - is exclusive of last integer
food.iloc[4:6]
# # Comparison to Python lists and dictionaries
# It may be helpful to compare pandas ability to make selections by label and integer location to that of Python lists and dictionaries.
#
# Python lists allow for selection of data only through integer location. You can use a single integer or slice notation to make the selection but NOT a list of integers.
#
# Let's see examples of subset selection of lists using integers:
some_list = ['a', 'two', 10, 4, 0, 'asdf', 'mgmt', 434, 99]
some_list[5]
some_list[-1]
some_list[:4]
some_list[3:]
some_list[2:6:3]
# ### Selection by label with Python dictionaries
# All values in each dictionary are labeled by a **key**. We use this key to make single selections. Dictionaries only allow selection with a single label. Slices and lists of labels are not allowed.
d = {'a':1, 'b':2, 't':20, 'z':26, 'A':27}
d['a']
d['A']
# ### Pandas has power of lists and dictionaries
# DataFrames and Series are able to make selections with integers like a list and with labels like a dictionary.
# # Extra Topics
# There are a few more items that are important and belong in this tutorial and will be mentioned now.
# # Using just the indexing operator to select rows from a DataFrame - Confusing!
# Above, I used just the indexing operator to select a column or columns from a DataFrame. But, it can also be used to select rows using a **slice**. This behavior is very confusing in my opinion. The entire operation changes completely when a slice is passed.
#
# Let's use an integer slice as our first example:
df[3:6]
# To add to this confusion, you can slice by labels as well.
df['Aaron':'Christina']
# # I recommend not doing this!
# This feature is not deprecated and completely up to you whether you wish to use it. But, I highly prefer not to select rows in this manner as can be ambiguous, especially if you have integers in your index.
#
# Using **`.iloc`** and **`.loc`** is explicit and clearly tells the person reading the code what is going to happen. Let's rewrite the above using **`.iloc`** and **`.loc`**.
df.iloc[3:6] # More explicit that df[3:6]
df.loc['Aaron':'Christina'] # more explicit than df['Aaron':'Christina']
# # Cannot simultaneously select rows and columns with `[]`
# An exception will be raised if you try and select rows and columns simultaneously with just the indexing operator. You must use **`.loc`** or **`.iloc`** to do so.
df[3:6, 'Aaron':'Christina']
# # Using just the indexing operator to select rows from a Series - Confusing!
# You can also use just the indexing operator with a Series. Again, this is confusing because it can accept integers or labels. Let's see some examples
food
food[2:4]
food['Niko':'Dean']
# Since Series don't have columns you can use a single label and list of labels to make selections as well
food['Dean']
food[['Dean', 'Christina', 'Aaron']]
# Again, I recommend against doing this and always use **`.iloc`** or **`.loc`**
# # Importing data without choosing an index column
# We imported data by choosing the first column to be the index with the **`index_col`** parameter of the **`read_csv`** function. This is not typically how most DataFrames are read into pandas.
#
# Usually, all the columns in the csv file become DataFrame columns. Pandas will use the integers 0 to n-1 as the labels. See the example data below with a slightly different dataset:
df2 = pd.read_csv('../../data/sample_data2.csv')
df2
# # The default `RangeIndex`
# If you don't specify a column to be the index when first reading in the data, pandas will use the integers 0 to n-1 as the index. This technically creates a **`RangeIndex`** object. Let's take a look at it.
df2.index
# This object is similar to Python **`range`** objects. Let's create one:
range(7)
# Converting both of these objects to a list produces the exact same thing:
list(df2.index)
list(range(7))
# For now, it's not at all important that you have a **`RangeIndex`**. Selections from it happen just the same with **`.loc`** and **`.iloc`**. Let's look at some examples.
df2.loc[[2, 4, 5], ['food', 'color']]
df2.iloc[[2, 4, 5], [3,2]]
# There is a subtle difference when using a slice. **`.iloc`** excludes the last value, while **`.loc`** includes it:
df2.iloc[:3]
df2.loc[:3]
# # Setting an index from a column after reading in data
# It is common to see pandas code that reads in a DataFrame with a RangeIndex and then sets the index to be one of the columns. This is typically done with the **`set_index`** method:
df2_idx = df2.set_index('Names')
df2_idx
# ### The index has a name
# Notice that this DataFrame does not look exactly like our first one from the very top of this tutorial. Directly above the index is the bold-faced word **`Names`**. This is technically the **name** of the index. Our original DataFrame had no name for its index. You can ignore this small detail for now. Subset selections will happen in the same fashion.
# # DataFrame column selection with dot notation
# Pandas allows you to select a single column as a Series by using **dot notation**. This is also referred to as **attribute access**. You simply place the name of the column without quotes following a dot and the DataFrame like this:
df.state
df.age
# # Pros and cons when selecting columns by attribute access
# The best benefit of selecting columns like this is that you get help when chaining methods after selection. For instance, if you place another dot after the column name and press **tab**, a list of all the Series methods will appear in a pop-up menu. It will look like this:
#
# 
#
# This help disappears when you use just the indexing operator:
#
# 
#
# The biggest drawback is that you cannot select columns that have spaces or other characters that are not valid as Python identifiers (variable names).
# # Selecting the same column twice?
# This is rather peculiar, but you can actually select the same column more than once:
df[['age', 'age', 'age']]
# # Summary of Part 1
# We covered an incredible amount of ground. Let's summarize all the main points:
#
# * Before learning pandas, ensure you have the fundamentals of Python
# * Always refer to the documentation when learning new pandas operations
# * The DataFrame and the Series are the containers of data
# * A DataFrame is two-dimensional, tabular data
# * A Series is a single dimension of data
# * The three components of a DataFrame are the **index**, the **columns** and the **data** (or **values**)
# * Each row and column of the DataFrame is referenced by both a **label** and an **integer location**
# * There are three primary ways to select subsets from a DataFrame - **`[]`**, **`.loc`** and **`.iloc`**
# * I use the term **just the indexing operator** to refer to **`[]`** immediately following a DataFrame/Series
# * Just the indexing operator's primary purpose is to select a column or columns from a DataFrame
# * Using a single column name to just the indexing operator returns a single column of data as a Series
# * Passing multiple columns in a list to just the indexing operator returns a DataFrame
# * A Series has two components, the **index** and the **data** (**values**). It has no columns
# * **`.loc`** makes selections **only by label**
# * **`.loc`** can simultaneously select rows and columns
# * **`.loc`** can make selections with either a single label, a list of labels, or a slice of labels
# * **`.loc`** makes row selections first followed by column selections: **`df.loc[row_selection, col_selection]`**
# * **`.iloc`** is analogous to **.`loc`** but uses only **integer location** to refer to rows or columns.
# * **`.ix`** is deprecated and should never be used
# * **`.loc`** and **`.iloc`** work the same for Series except they only select based on the index as their are no columns
# * Pandas combines the power of python lists (selection via integer location) and dictionaries (selection by label)
# * You can use just the indexing operator to select rows from a DataFrame, but I recommend against this and instead sticking with the explicit **`.loc`** and **`.iloc`**
# * Normally data is imported without setting an index. Use the **`set_index`** method to use a column as an index.
# * You can select a single column as a Series from a DataFrame with dot notation
# # Way more to the story
#
# This is only part 1 of the series, so there is much more to cover on how to select subsets of data in pandas. Some of the explanations in this part will be expanded to include other possibilities.
# # Exercises
# This best way to learn pandas is to practice on your own. The following exercise will help cement your understanding of the material that was just covered. All these exercises will use the the Chicago food inspections dataset. The dataset was found [here at data.world](https://data.world/cityofchicago/food-inspections-map).
#
# ### Read in the data with the following command before completing the exercises
df = pd.read_csv('../../data/food_inspections.csv')
df.head()
# # Tip!
# Append the **`head`** method at the end of your statements to prevent long output as was done above.
#
# # Solutions
# Make sure you check your answers with the [solutions notebook](./Solutions.ipynb).
# ### Exercise 1
# <span style="color:green; font-size:16px">The current DataFrame has a simple `RangeIndex`. Let make the **`DBA Name`** column the index to make it more meaningful. Save the result to variable **`df`** and output the first five rows with the **`head`** method.</span>
# +
# your code here
# -
# ### Exercise 2
# <span style="color:green; font-size:16px">Select the **`Risk`** column as a Series with just the indexing operator. Also select it with attribute access.</span>
# +
# your code here
# -
# ### Exercise 3
# <span style="color:green; font-size:16px">Select the **`Risk`** and **`Results`** columns</span>
# +
# your code here
# -
# ### Exercise 4
# <span style="color:green; font-size:16px">Select a single column as a DataFrame</span>
# +
# your code here
# -
# ### Exercise 5
# <span style="color:green; font-size:16px">Select the row for the restaurant **`WILD GOOSE BAR & GRILL`**. What object is returned?</span>
# +
# your code here
# -
# ### Exercise 6
# <span style="color:green; font-size:16px">Select the rows for the restaurants **`WILD GOOSE BAR & GRILL`** and **`TAQUERIA HACIENDA TAPATIA`** along with columns **`Risk`** and **`Results`**.</span>
# +
# your code here
# -
# ### Exercise 7
# <span style="color:green; font-size:16px">What is the risk of restaurant **`SCRUB A DUB`**?</span>
# +
# your code here
# -
# ### Exercise 8
# <span style="color:green; font-size:16px">Select every 3,000th restaurant from **`THRESHOLD SCHOOL`** to **`SCRUB A DUB`** and the columns from **`Inspection Type`** on to the end of the DataFrame.</span>
# +
# your code here
# -
# ### Exercise 9
# <span style="color:green; font-size:16px">Select all columns from the 500th restaurant to the 510th</span>
# +
# your code here
# -
# ### Exercise 10
# <span style="color:green; font-size:16px">Select restaurants 100, 1,000 and 10,000 along with columns 5, 3, and 1</span>
# +
# your code here
# -
# ### Exercise 11
# <span style="color:green; font-size:16px">Select the **`Risk`** column and save it to a Series</span>
# +
# your code here
# -
# ### Exercise 12
# <span style="color:green; font-size:16px">Using the risk Series, select **`ARBYS`** and **`POPEYES FAMOUS FRIED CHICKEN`**</span>
# +
# your code here
| Learn-Pandas/Selecting Subsets/01 Selecting Subsets with [ ], .loc and .iloc.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Custom observation models
#
# While *bayesloop* provides a number of observation models like `Poisson` or `AR1`, many applications call for different distributions, possibly with some parameters set to fixed values (e.g. with a mean value set to zero). The [sympy.stats](http://docs.sympy.org/dev/modules/stats.html) and the [scipy.stats](http://docs.scipy.org/doc/scipy/reference/stats.html) modules include a large number of continuous as well as discrete probability distributions. The observation model classes `SciPy` and `SymPy` allow to create observation models to be used in *bayesloop* studies on-the-fly, just by passing the desired `scipy.stats` distribution (and setting values for fixed parameters, if necessary), or by providing a `sympy.stats` random variable, respectively. Note that these classes can only be used to model statistically independent observations.
#
# In cases where neither `scipy.stats` nor `sympy.stats` provide the needed model, one can further define a custom observation model by stating a likelihood function in terms of arbitrary [NumPy](http://www.numpy.org/) functions, using the `NumPy` class.
#
# ## Sympy.stats random variables
# The [SymPy](http://www.sympy.org/en/index.html) module introduces symbolic mathematics to Python. Its sub-module [sympy.stats](http://docs.sympy.org/dev/modules/stats.html) covers a wide range of discrete and continuous random variables. In the following, we re-define the observation model of the coal mining study `S` defined above, but this time use the `sympy.stats` version of the Poisson distribution:
# +
import bayesloop as bl
import numpy as np
import sympy.stats
from sympy import Symbol
rate = Symbol('lambda', positive=True)
poisson = sympy.stats.Poisson('poisson', rate)
L = bl.om.SymPy(poisson, 'lambda', bl.oint(0, 6, 1000))
# -
# First, we specify the only parameter of the Poisson distribution (denoted $\lambda$) symbolically as a positive real number. Note that providing the keyword argument `positive=True` is important for SymPy to define the Poisson distribution correctly (not setting the keyword argument correctly results in a error). Having defined the parameter, a random variable based on the Poisson distribution is defined. This random variable is then passed to the `SymPy` class of the *bayesloop* observation models. Just as for the built-in observation models of *bayesloop*, one has to specify the parameter names and values (in this case, `lambda` is the only parameter).
#
# Note that upon creating an instance of the observation model, *bayesloop* automatically determines the correct [Jeffreys prior](https://en.wikipedia.org/wiki/Jeffreys_prior) for the Poisson model:
#
# $$
# p(\lambda) \propto 1/\sqrt{\lambda}
# $$
#
# This calculation is done symbolically and therefore represents an important advantage of using the `SymPy` module within *bayesloop*. This behavior can be turned off using the keyword argument `determineJeffreysPrior`, in case one wants to use a flat parameter prior instead or in the case that the automatic determination of the prior takes too long:
# ```
# M = bl.om.SymPy(poisson, 'lambda', bl.oint(0, 6, 1000), determineJeffreysPrior=False)
# ```
# Alternatively, you can of course provide a custom prior via the keyword argument `prior`. This will switch off the automatic determination of the Jeffreys prior as well:
# ```
# M = bl.om.SymPy(poisson, 'lambda', bl.oint(0, 6, 1000), prior=lambda x: 1/x)
# ```
# See also [this tutorial](priordistributions.html) for further information on prior distributions. Having defined the observation model, it can be used for any type of study introduced above. Here, we reproduce the result of the [regime-switching example](changepointstudy.html#Exploring-possible-change-points) we discussed before. We find that the parameter distributions as well as the model evidence is identical - as expected:
# +
# %matplotlib inline
import matplotlib.pyplot as plt # plotting
import seaborn as sns # nicer plots
sns.set_style('whitegrid') # plot styling
S = bl.Study()
S.loadExampleData()
T = bl.tm.RegimeSwitch('log10pMin', -7)
S.set(L, T)
S.fit()
plt.figure(figsize=(8, 4))
plt.bar(S.rawTimestamps, S.rawData, align='center', facecolor='r', alpha=.5)
S.plot('lambda')
plt.xlim([1851, 1962])
plt.xlabel('year');
# -
# Finally, it is important to note that the `SymPy` module can also be used to create random variables for which some parameters have user-defined fixed values. The following example creates a normally distributed random variable with a fixed mean value $\mu = 4$, leaving only the standard deviation as a free parameter of the resulting observation model (which is assigned the parameter interval ]0, 3[):
# ```
# mu = 4
# std = Symbol('stdev', positive=True)
#
# normal = sympy.stats.Normal('normal', mu, std)
# L = bl.om.SymPy(normal, 'stdev', bl.oint(0, 3, 1000))
# ```
#
# ## Scipy.stats probability distributions
# We continue by describing the use of probability distributions of the `scipy.stats` module. Before we show some usage examples, it is important to note here that `scipy.stats` does not use the canonical parameter names for probability distributions. Instead, all continuous distributions have two parameters denoted `loc` (for shifting the distribution) and `scale` (for scaling the distribution). Discrete distributions only support `loc`. While some distributions may have additional parameters, `loc` and `scale` often take the role of known parameters, like *mean* and *standard deviation* in case of the normal distribution. In `scipy.stats`, you do not have to set `loc` or `scale`, as they have default values `loc=0` and `scale=1`. In *bayesloop*, however, you will have to provide values for these parameters, if you want either of them to be fixed and not treated as a variable.
#
# As a first example, we re-define the observation model of the coal mining study `S` defined above, but this time use the `scipy.stats` version of the Poisson distribution. First, we check the parameter names:
# +
import scipy.stats
scipy.stats.poisson.shapes
# -
# In `scipy.stats`, the rate of events in one time interval of the Poisson distribution is called *mu*. Additionally, as a discrete distribution, `stats.poisson` has an additional parameter `loc` (which is **not** shown by `.shapes` attribute!). As we do not want to shift the distribution, we have to set this parameter to zero in *bayesloop* by passing a dictionary for fixed parameters when initializing the class instance. As for the SymPy model, we have to pass the names and values of all free parameters of the model (here only `mu`):
# +
L = bl.om.SciPy(scipy.stats.poisson, 'mu', bl.oint(0, 6, 1000), fixedParameters={'loc': 0})
S.set(L)
S.fit()
plt.figure(figsize=(8, 4))
plt.bar(S.rawTimestamps, S.rawData, align='center', facecolor='r', alpha=.5)
S.plot('mu')
plt.xlim([1851, 1962])
plt.xlabel('year');
# -
# Comparing this result with the [regime-switching example](changepointstudy.html#Exploring-possible-change-points), we find that the model evidence value obtained using the `scipy.stats` implementation of the Poisson distribution is *different* from the value obtained using the built-in implementation or the `sympy.stats` version. The deviation is explained by a different prior distribution for the parameter $\lambda$. While both the built-in version and the `sympy.stats` version use the [Jeffreys prior](https://en.wikipedia.org/wiki/Jeffreys_prior) of the Poisson model, the `scipy.stats` implementation uses a flat prior instead. Since the `scipy.stats` module does not provide symbolic representations of probability distributions, *bayesloop* cannot determine the correct Jeffreys prior in this case. Custom priors are still possible, using the keyword argument `prior`.
#
# ## NumPy likelihood functions
#
# In some cases, the data at hand cannot be described by a common statistical distribution contained in either `scipy.stats` or `sympy.stats`. In the following example, we assume normally distributed data points with known standard deviation $\sigma$, but unknown mean $\mu$. Additionally, we suspect that the data points may be serially correlated and that the correlation coefficient $\rho$ possibly changes over time. For this multivariate problem with the known standard deviation as "extra" data points, we need more flexibility than either the `SymPy` or the `SciPy` class of `bayesloop` can offer. Instead, we may define the likelihood function of the observation model directly, with the help of [NumPy](http://www.numpy.org/) functions.
#
# First, we simulate $1000$ random variates with $\mu=3$, $\sigma=1$, and a linearly varying correlation coefficient $\rho$:
# +
n = 1000
# parameters
mean = 3
sigma = 1
rho = np.concatenate([np.linspace(-0.5, 0.9, 500), np.linspace(0.9, -0.5, 499)])
# covariance matrix
cov = np.diag(np.ones(n)*sigma**2.) + np.diag(np.ones(n-1)*rho*sigma**2., 1) + np.diag(np.ones(n-1)*rho*sigma**2., -1)
# random variates
np.random.seed(123456)
obs_data = np.random.multivariate_normal([mean]*n, cov)
plt.figure(figsize=(8, 4))
plt.plot(obs_data, c='r', alpha=0.7, lw=2)
plt.xlim([0, 1000])
plt.xlabel('time')
plt.ylabel('data');
# -
# Before we create an observation model to be used by `bayesloop`, we define a pure Python function that takes a segment of data as the first argument, and NumPy arrays with parameter grids as further arguments. Here, one data segment includes two subsequent data points `x1` and `x2`, and their known standard deviations `s1` and `s2`. The likelihood function we evaluate states the probability of observing the current data point `x2`, given the previous data point `x1`, the known standard deviations `s2`, `s1` and the parameters $\mu$ and $\rho$:
#
# $$P(x_2~|~x_1, s_2, s_1, \mu, \rho) = \frac{P(x_2, x_1~|~s_2, s_1, \mu, \rho)}{P(x_1~|~s_1, \mu)}~,$$
#
# where $P(x_2, x_1~|~s_2, s_1, \mu, \rho)$ denotes the [bivariate normal distribution](http://mathworld.wolfram.com/BivariateNormalDistribution.html), and $P(x_1~|~s_1, \mu)$ is the marginal, univariate normal distribution of $x_1$. The resulting distribution is expressed as a Python function below. Note that all mathematical functions use NumPy functions, as the function needs to work with arrays as input arguments for the parameters:
def likelihood(data, mu, rho):
x2, x1, s2, s1 = data
exponent = -(((x1-mu)*rho/s1)**2. - (2*rho*(x1-mu)*(x2-mu))/(s1*s2) + ((x2-mu)/s2)**2.) / (2*(1-rho**2.))
norm = np.sqrt(2*np.pi)*s2*np.sqrt(1-rho**2.)
like = np.exp(exponent)/norm
return like
# As `bayesloop` still needs to know about the parameter boundaries and discrete values of the parameters $\mu$ and $\rho$, we need to create an observation model from the custom likelihood function defined above. This can be done with the `NumPy` class:
L = bl.om.NumPy(likelihood, 'mu', bl.cint(0, 6, 100), 'rho', bl.oint(-1, 1, 100))
# Before we can load the data into a `Study` instance, we have to format data segments in the order defined by the likelihood function:
# ```
# [[x1, x0, s1, s0],
# [x2, x1, s2, s1],
# [x3, x2, s3, s2],
# ...]
# ```
# Note that in this case, the standard deviation $\sigma = 1$ for all time steps.
data_segments = input_data = np.array([obs_data[1:], obs_data[:-1], [sigma]*(n-1), [sigma]*(n-1)]).T
# Finally, we create a new `Study` instance, load the formatted data, set the custom observation model, set a suitable transition model, and fit the model parameters:
# +
S = bl.Study()
S.loadData(data_segments)
S.set(L)
T = bl.tm.GaussianRandomWalk('d_rho', 0.03, target='rho')
S.set(T)
S.fit()
# -
# Plotting the true values of $\rho$ used in the simulation of the data together with the inferred distribution (and posterior mean values) below, we see that the custom model accurately infers the time-varying serial correlation in the data.
plt.figure(figsize=(8, 4))
S.plot('rho', label='mean inferred')
plt.plot(rho, c='r', alpha=0.7, lw=2, label='true')
plt.legend()
plt.ylim([-.6, 1]);
# Finally, we note that the `NumPy` observation model allows to access multiple data points at once, as we can pass arbitrary data segments to it (in the example above, each data segment contained the current and the previous data point). This also means that there is no check against looking at the data points twice, and the user has to make sure that the likelihood function at time $t$ always states the probability of **only the current** data point:
#
# $$ P(\text{data}_{t}~|~\{\text{data}_{t'}\}_{t'<t}, \text{parameters})$$
#
# If the left side of this conditional probability contains data points from more than one time step, the algorithm will look at each data point more than once, and this generally results in an underestimation of the uncertainty teid to the model parameters!
| docs/source/tutorials/customobservationmodels.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.7.3 64-bit
# metadata:
# interpreter:
# hash: e534e48711db4d1e1c48977d0d14ff85b1f16d41bcc4fdfd88268a329b3c9d66
# name: python3
# ---
# Import the libraries
import os
import pandas as pd
import geopandas as gpd
import sqlalchemy as sql
from geoalchemy2 import Geometry, WKTElement
# Load the environment variables and constants
# Using postgres database to store the data
DATBASE_HOST = os.getenv('ENVISO_INSIGHT_DATABASE_HOSTNAME')
DATBASE_NAME = 'spatial_db_project'
DATBASE_USER = os.getenv('ENVISO_INSIGHT_DATABASE_USERNAME')
DATBASE_PASS = os.getenv('ENVISO_INSIGHT_DATABASE_PASSWORD')
# Load the house price data
df_house_price = pd.read_csv('dataset/house_price_data.csv')
df_house_price.head()
df_house_price.shape
# Check for NULL values within the dataset
df_house_price.isna().sum()
# To keep things simple, we will start by removing the fields that have NULL values
# This will make our life easier
# Prepare a list of columns to be dropped
list_columns_dropped = ['id_string','project','dev_name','furnishing','area','bathroom_num','floor_count','floor_num','poster_name','trans','url']
df_house_price = df_house_price.drop(labels = list_columns_dropped, axis = 1)
df_house_price.head()
# Check the datatype of the column post_date
df_house_price.dtypes
# Convert the post_date datatype from object to timestamp
# object datatype in pandas is string in basic python
df_house_price['post_date'] = pd.to_datetime(df_house_price['post_date'])
df_house_price.head()
# Convert the pandas dataframe to a geopandas dataframe
# This will make a geometry column based on the latitude and longitude
gdf_house_price = gpd.GeoDataFrame(df_house_price, geometry = gpd.points_from_xy(df_house_price.longitude, df_house_price.latitude), crs="EPSG:4326")
gdf_house_price.head()
# +
# Insert the raw data into postgres table
# Create the database engine first
connection_string = f'postgresql://{DATBASE_USER}:{DATBASE_PASS}@{DATBASE_HOST}:5432/{DATBASE_NAME}'
db_engine = sql.create_engine(connection_string)
# Insert the data into a table in postgres
gdf_house_price.to_postgis(
name = 'mumbai_house_price_raw',
con = db_engine,
if_exists = 'replace',
index = False
)
# -
| 01_data_cleaning_analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# CSCI 183 Lab 2 - Numpy. <NAME>, worked with <NAME>
# # Numpy Reference Guide
#
# Sources:
#
# http://www.engr.ucsb.edu/~shell/che210d/numpy.pdf
#
# https://github.com/hallr/DAT_SF_19/blob/master/code/04_numpy.py
import numpy as np
import matplotlib.pyplot as plt
# From here on, np can be used for numpy and plt for matplotlib.pyplot
# # Lists, Arrays, List of Lists
data1 = [1,2,2.5,4] #This is a simple list
print(data1)
data1[2] #refers to the 3rd element of data1
array1 = np.array(data1) #we just created a 1d list
print(array1)
array1[1] #refers to the second element of array1
data2 = 2*data1
print(data2) # pay attention to output
array2 = 2*array1
print(array2) # pay attention to output
ArrayToList = array2.tolist() #that is how you convert arrays to list
print(ArrayToList) # pay attention to commas
data3 = [[1,4,7],range(1,4)] #list of lists - look at the output of range()
print(data3)
data3[1][2]
data3[0]
data3[1:]
y = data3[1:]
print(y[0][1])
array2 = np.array(data3)
print(array2)
array2[1][2] #alternative command is array2[1,2]
print(2*data3) #pay attention to output
print(2*array2) #look out the output of array
print(array2[1,2])
array3 = array2[:,0] #First Column is returned
print(array3)
array4 = array2[0,:] #First Row is returned
print(array4)
array5 = array2[0, 0:2] #the first two elements of the first row is returned
print(array5)
# # Examining Arrays
print(array2.dtype) #Returns int64 since all elements of array are integer numbers
print(array2.ndim) #it is a 2-dimentional array - Rows and Columns
array1d = np.array([1,2,3])
print(array1d.ndim) #this was a 1-dementional array thus the output is 1
print(array2.shape) #returns (2,3) - there are 2 rows and 3 columns array2.shape[0] is number of rows
print(array2.size) #returns 6 (total number of elements in this array 2*3 = 6)
print(len(array2)) #returns number of rows i.e. 2 - this is usually number of observations
# # Some handy short-cuts to create special arrays
x1 = np.zeros(5) #it creates a one-dimentional array with 5 elements all equal to zero
print("x1 = ")
print(x1)
x2 = np.zeros((2, 4)) #it creates a two-dimentional array with 2 rows and 4 columns. All elements are set to zero
print("x2 = ")
print(x2)
x3 = np.ones(6)
print("x3 = ") #it creates a one-dimentional array with 5 elements all equal to one
print(x3)
x4 = np.linspace(0,6,4) #it generates 4 equally distanced points from 0 to 6 (inclusive) - i.e. 0,2,4,6
print("x4 = ")
print(x4)
x5 = np.logspace(1,3,4) #on logarithmic scale, it generates 4 equally distanced points from 10^1 to 10^3 (inclusive)
print("x5 = ")
print(x5)
x6 =np.arange(5) #it generates a one dimentional array with 5 elements starting from 0
print("x6 = ")
print(x6)
x7 = x6.astype(float) #we just change array type from Integer to Float
print("x7 = ")
print(x7)
# # Logical expressions and Boolean Arrays
cities = np.array(['SF', 'Seattle', 'SF', 'NY'])
Boolean1 = cities == 'SF' # Pay attention to the difference between = and ==
print("Boolean1 = ")
print(Boolean1) #returns True and False
print(cities[~Boolean1]) #returns all cities that are not SF - i.e. 'Seattle' and 'NY'
Boolean2 = (cities == 'SF') | (cities == 'NY')
print("Boolean2 = ")
print(Boolean2) #returns true for elements that are either equal to 'SF' or 'NY'
print(cities[~Boolean2]) #returns 'Seattle'
print(np.unique(cities)) #returns unique values in this array - i.e. 'NY' 'SF' and 'Seattle'
# # Mathematical and Statistical Operations
ArrayTest = np.arange(10)
print(ArrayTest*10) # multiplies each element by 10
ArrayTest = ArrayTest ** 2.5 # Array Test to the power of 2.5
print(ArrayTest)
np.rint(ArrayTest) #round into the nearest integer
np.ceil(ArrayTest) #round up
np.floor(ArrayTest) #Round down
Boolean3 = np.isnan(ArrayTest) #Returns true when an element has value NaN - very handy in cleaning data
print(Boolean3) #it returns all False since all elements had values
np.argmax(ArrayTest) #returns the index of the maximum element in the array. Also try argmin
np.max(ArrayTest) #return maximum value of the array. Also try min
rnd = np.random.randn(4,2)
"""returns a 2d array with 4 rows and 2 columns. Each element is a sample from a standard normal distribution.
Standard Normal distribution has mean zero and sd 1. """
print(rnd)
rnd.mean() #returns mean of all elements
rnd.std() #returns standard deviation of all elements
rnd.var() #returns variance of all elements
rnd.sum() #returns sum of all elements
rnd.sum(axis=0) #returns sum of columns
rnd.sum(axis=1) #returns sum of rows
# # Scatter Plots
SampleSize = 20
x = np.arange(SampleSize)
print(x)
error = np.random.randn(1,SampleSize)
y = -2 + 3*x + 10*error
plt.scatter(x,y)
plt.show()
# # Conditional Logic
Logical = np.where(rnd > 0, 2, -2) #Checks condition, if true then returns 2, if false returns -2
print(Logical)
Logical = np.where(rnd > 0, rnd, -2) #Checks condition, if true then returns the rnd number, if false returns -2
print(Logical)
(rnd > 1).sum() #counts numer of elements that are more than 1
(rnd > 1).any() #Checks if any value is more than 1, if it is, then returns True, if all values are <=1 returns False
(rnd > 1).all() #Checks if all values are more than 1, if it is, then returns True, otherwise False
# # Random Numbers
np.random.seed(213) #Seed number is set to 213
np.random.rand(2,4) #Give you a 2 by 4 array of random numbers. Each element is between 0 and 1
np.random.randn(5) # returns 5 random numbers based on standard normal distribution
np.random.seed() #if you do not specify seed - then the current system time is used
np.random.rand(2,4)
# # Reshaping, Transposing, and Flattening arrays
Initial_1D_Array = np.arange(20)
print(" Initial_1D_Array = ")
print(Initial_1D_Array)
ReShaped_2D = Initial_1D_Array.reshape(5,4) #Reshape our original array to a 5 by 4 two-Dimenational Array
print("ReShaped_2D =")
print(ReShaped_2D)
Flatten_Array = ReShaped_2D.flatten() #our 2-D array is flatten now
print("Flatten_Array = ")
print(Flatten_Array)
Transposed_array = ReShaped_2D.T #We just transposed our 5 by 4 array to a 4 by 5 array
print("Transposed_array = ")
print(Transposed_array )
# Using IPython
# ### Review Python Basics
#
# Test your skills by answering the following questions:
# #### Question 1. Divide 10 by 20 and put it in Variable A.
### Insert your code here and then uncomment | print A | when you are ready to test it.
A = 10/20
print A
#### If you did not get a float (decimals), alter your equation to get the desired result (0.5)
#### If you did not get a float (decimals) alter your equation to get the desired result (0.5)
A = 10.0/20
print A
# #### Question 2. Create a function called division that will divide any two numbers and prints the result (with decimals).
# Call your function. Confirm that the results are as expected.
# +
# Remember functions start with def
def division(a,b):
return float(a)/b
print division(5,3)
print division(25,5)
print division(27.7,92)
# -
# #### Question 3. Using .split() split my string into separate words in a variable named words
# +
my_string = "the cow jumped over the moon"
#put your code here it should return ['the', 'cow', 'jumped', 'over', 'the', 'moon']
words = my_string.split()
#returns ['the', 'cow', 'jumped', 'over', 'the', 'moon']
print words
#print words
# -
# #### Question 4. How many words are in my_string?
#
word_count = len(words)
#returns the number of words- 6
print word_count
# #### Question 5. Use a list comprehension to find the length of each word
#
# result: [3, 3, 6, 4, 3, 4]
length_of_each_word = [len(word) for word in words]
print length_of_each_word
# #### Question 6. Put the words back together in a variable called sentence using .join()
# result:
# the cow jumped over the moon
#put them back together with join
sentance = " ".join(words)
print sentance
# #### Bonus: Add a "||" between each word
# result:
# the||cow||jumped||over||the||moon
#the " " puts the space in between the words. or you could put anything else in
alternate_sentance = "||".join(words)
print alternate_sentance
| CSCI 183 - Data Science/CSCI 183 Lab 2 - Numpy Nicholas Fong.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.5 64-bit (''base'': conda)'
# name: python3
# ---
# +
import re
import string
import pandas as pd
# -
lampung_df = pd.read_csv("datasets/lampung.csv", encoding='cp1252')
lampung_df.columns = ["words"]
lampung_df['label'] = "lampung"
indonesia_df = pd.read_csv("datasets/indonesia.csv", encoding='cp1252')
indonesia_df.columns = ["words"]
indonesia_df['label'] = "indonesia"
dataframe_df = pd.concat([lampung_df, indonesia_df])
dataframe_df = dataframe_df.sample(frac=1).reset_index(drop=True)
dataframe_df
# +
def case_folding(text):
text = text.lower() # ubah text menjadi huruf kecil
text = text.strip() # menghapus wihte space pada awal kalimat
text = re.sub(r"(@\[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)|^rt|http.+?", "", text)
text = text.translate(str.maketrans("","",string.punctuation))
return text
dataframe_df['label'] = dataframe_df['label'].apply(case_folding).values
# -
dataframe_df.head()
dataframe_df.to_csv('datasets/raw_dataset.csv', index=False)
| create_raw_datasets.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="ECVON7mE-whX"
# < [Distance and Similarity](../ica06/Distance_and_Similarity.ipynb) | Contents (TODO) | [Neural Networks](../ica08/Neural_Networks.ipynb) >
#
# <a href="https://colab.research.google.com/github/stephenbaek/bigdata/blob/master/in-class-assignments/ica07/Cluster_Analysis.ipynb"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open and Execute in Google Colaboratory"></a>
# + [markdown] colab_type="text" id="oyVNuRfc-whb"
# # k-means Clustering and the Lloyd's Algorithm
#
# Clustering algorithms are a category of unsupervised learning algorithms which seek to learn an optimal grouping of data points. One of the most widely used clustering algorithms is known as *k-means clustering*. k-means clustering algorithm is a typical example of NP-hard problem, whose solution is unknown. Luckily, a method called the *Lloyd's algorithm* is known to converge to a local minimum of the solution (not the global minimum though) and can be quite useful in many cases.
# + [markdown] colab_type="text" id="yjfNDUwGBd5p"
# To begin with, we first generate some simulated data samples using `make_blobs` function available in Scikit-Learn.
# + colab={} colab_type="code" id="Ijy06oA0BUyV"
from sklearn.datasets.samples_generator import make_blobs
import matplotlib.pyplot as plt
N = 1000
x, y = make_blobs(n_samples=N, centers=3, cluster_std=0.5, random_state=0)
plt.scatter(x[:, 0], x[:, 1]);
# + [markdown] colab_type="text" id="9tqeUfXGCGS6"
# As the name indicates, k-means clustering algorithm seeks to find '*means*' or '*centroids*' for k of each cluster. Here, we define three centroids that are randomly initialized.
# + colab={} colab_type="code" id="bRyK0T9sCF_a"
import numpy as np
K = 3 # user-defined parameter k
centroids = np.random.uniform(-3, 3, size=(K, 2))
plt.scatter(x[:, 0], x[:, 1]);
plt.scatter(centroids[:, 0], centroids[:, 1], c='black', s=300, alpha=0.5);
# + [markdown] colab_type="text" id="sL7LbleIDR2z"
# With the centroids initialized as above, we can now evaluate the cluster membership of each data point based on their distances to the centroids.
# + colab={} colab_type="code" id="FCplm32JB8rT"
y_pred = -np.ones(N)
for i in range(N):
d = np.zeros(K)
for j in range(K):
d[j] = np.sqrt(np.sum((x[i] - centroids[j])**2))
y_pred[i] = np.argmin(d)
plt.scatter(x[:, 0], x[:, 1], c = y_pred);
plt.scatter(centroids[:, 0], centroids[:, 1], c='black', s=300, alpha=0.5);
# + [markdown] colab_type="text" id="l2AmF2cCE7B6"
# Now, based on the cluster membership, we will update the positions of the centroids: to the real center of the cluster, not the randomly initialized positions. The code below does the update:
# + colab={} colab_type="code" id="wZkFepoBDxuL"
for i in range(K):
centroids[i] = [0, 0]
for i in range(N):
centroids[ int(y_pred[i]) ] += x[i]
for i in range(K):
centroids[i] /= np.sum(y_pred == i)
plt.scatter(x[:, 0], x[:, 1], c = y_pred);
plt.scatter(centroids[:, 0], centroids[:, 1], c='black', s=300, alpha=0.5);
# + [markdown] colab_type="text" id="pNTd_IVUF7ge"
# Now, the displacement of the centroids causes the change of the group membership. So we copy and paste the group membership code a few cells above and reuse it below. Notice the updated membership.
# + colab={} colab_type="code" id="p946c4ejF7Fz"
y_pred = -np.ones(N)
for i in range(N):
d = np.zeros(K)
for j in range(K):
d[j] = np.sqrt(np.sum((x[i] - centroids[j])**2))
y_pred[i] = np.argmin(d)
plt.scatter(x[:, 0], x[:, 1], c = y_pred);
plt.scatter(centroids[:, 0], centroids[:, 1], c='black', s=300, alpha=0.5);
# + [markdown] colab_type="text" id="obfKBbCTGVqQ"
# Again, the change of group membership requires update of centroid locations. Similar to the above, we will copy and paste exactly the same code we used earlier.
# + colab={} colab_type="code" id="hP0VwY79GlD4"
for i in range(K):
centroids[i] = [0, 0]
for i in range(N):
centroids[ int(y_pred[i]) ] += x[i]
for i in range(K):
centroids[i] /= np.sum(y_pred == i)
plt.scatter(x[:, 0], x[:, 1], c = y_pred);
plt.scatter(centroids[:, 0], centroids[:, 1], c='black', s=300, alpha=0.5);
# + [markdown] colab_type="text" id="Mp0RxvWNGnnG"
# You may now realize that the clusters are being updated and the algorithm begins to group the data correctly. As such, k-means clustering algorithm (Lloyd's algorithm) is simply a repetition of the membership update and centroid update back and forth. Therefore, we may benefit from modularizing the above code cells into functions:
# + colab={} colab_type="code" id="3Zi0vM94HF_C"
# Assignment: Implement functions to modularize the above steps of the Lloyd's algorithm.
def update_membership(points, centers):
# YOUR CODE HERE
return clusters
def update_centroids(points, clusters):
# YOUR CODE HERE
return centers
def plot_clusters(points, clusters, centers):
plt.scatter(points[:, 0], points[:, 1], c = clusters);
plt.scatter(centers[:, 0], centers[:, 1], c='black', s=300, alpha=0.5);
# + [markdown] colab_type="text" id="c9duMhgaIa_W"
# Now, with the functions defined above, we can run the cell below multiple times (just hit the play button one after another) to complete the Lloyd's algorithm. Each time you run it, see how the cluster updates.
# + colab={} colab_type="code" id="lVU67_uzHfRs"
y_pred = update_membership(x, centroids)
centroids = update_centroids(x, y_pred)
plot_clusters(x, y_pred, centroids)
# + [markdown] colab_type="text" id="dS5pJ0oCx1oL"
# Finally, we just need one more component: some criteria to check when to terminate the iteration.
#
# **Assignment** Search online for the convergence criteria of the Lloyd's algorithm. Implement a function named `kmeans(points, k)` that internally calls `update_membership` and `update_centroids` functions above, repeatedly. In the implementation, let the function determine when the convergence is achieved and terminate.
#
# + [markdown] colab_type="text" id="2sbl92kW-wkE"
# < [Distance and Similarity](../ica06/Distance_and_Similarity.ipynb) | Contents (TODO) | [Neural Networks](../ica08/Neural_Networks.ipynb) >
#
# <a href="https://colab.research.google.com/github/stephenbaek/bigdata/blob/master/in-class-assignments/ica07/Cluster_Analysis.ipynb"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open and Execute in Google Colaboratory"></a>
| in-class-assignments/ica07/Cluster_Analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Bar charts of violations by state, year, contaminant code(s)
# +
import os
import numpy as np
import pandas as pd
import MySQLdb
# %matplotlib inline
data_dir = '../../../data'
# -
# ## Connect to MySQL cloud db
# 1. slack @fpaupier for db_user and password
# 2.1 Download Google SDK:
# Linux: https://cloud.google.com/sdk/docs/quickstart-linux
# Windows: https://cloud.google.com/sdk/docs/quickstart-windows
# macOS: https://cloud.google.com/sdk/docs/quickstart-macos
# 2.2 from command line `gcloud auth login`
# 2.3 from command line `gcloud init`
# 3.1 install https://cloud.google.com/sql/docs/mysql/quickstart-proxy-test#install-proxy
# 3.2 run sql cloud proxy ./cloud_sql_proxy -instances=safe-water-235819:us-east1:safe-water-db=tcp:3306
# 4. pip install MySQLdb
# Read un and pwd from local file that is not deployed!
# first line un, second line pwd
# Alternatively just enter un, pwd in cell below but don't check it in!
my_settings = []
with open(os.path.join(data_dir, 'my_settings.txt')) as f:
for line in f:
my_settings.append(line)
un = my_settings[0].rstrip('\n')
pwd = my_settings[1].rstrip('\n')
db_user= un
password= <PASSWORD>
db = MySQLdb.connect(host="127.0.0.1", # your host, usually localhost
user=db_user, # your username
passwd=password, # your password
port=3306,
db = 'safe_water') # name of the data base
# ## Fetch data into DataFrames
cmd = "select PWSID, PWS_TYPE_CODE, STATE_CODE from WATER_SYSTEM"
water_sys = pd.read_sql(cmd, db)
water_sys.head()
# get a zero padded string for contaminant code
# e.g. 400 to '0400'
def convert_code_to_string(row):
return str(row['CONTAMINANT_CODE']).zfill(4)
# +
# uncomment 2nd line if you want only health based violations
cmd = "select distinct PWSID, CONTAMINANT_CODE, COMPL_PER_BEGIN_DATE from VIOLATION " #\
# + "where IS_HEALTH_BASED_IND = 'Y'"
viol = pd.read_sql(cmd, db)
# this is a workaround, ideally CONTAMINANT_CODE should be a string
viol.loc[:, 'CONTAMINANT_CODE_STR'] = viol.apply(convert_code_to_string, axis=1)
viol.head()
# -
# ## Join violations to dictionary of contaminants, to get names
contaminant_codes = pd.read_csv(os.path.join(data_dir, 'sdwis/contaminant-codes.csv'), sep=',', \
dtype={'CODE': np.str})
contaminant_codes = contaminant_codes.rename(index=str, columns={"NAME": "CONTAMINANT"})
contaminant_codes.head()
viol = pd.merge(viol, contaminant_codes, left_on='CONTAMINANT_CODE_STR', right_on='CODE', how='left')
viol = viol[['PWSID', 'CONTAMINANT_CODE', 'CONTAMINANT', 'COMPL_PER_BEGIN_DATE']].drop_duplicates()
viol.head()
def get_year_for_violation(row):
if row['COMPL_PER_BEGIN_DATE'] is not None:
return row['COMPL_PER_BEGIN_DATE'].year
else:
return 0
viol.loc[:, 'VIOLATION_YEAR'] = viol.apply(get_year_for_violation, axis=1)
water_sys_viol = pd.merge(water_sys, viol, on='PWSID')
water_sys_viol.head()
# df: water system violation dataframe
# state: two letter code
# contaminant_codes: enter empty array for all
def get_num_violations(df, state, min_year, max_year, contaminant_codes=[]):
if len(contaminant_codes) > 0:
return df[(df['STATE_CODE']==state) & \
(df['VIOLATION_YEAR'] >= min_year) & \
(df['VIOLATION_YEAR'] <= max_year) & \
(df['CONTAMINANT_CODE'].isin(contaminant_codes))
].groupby(['CONTAMINANT', 'VIOLATION_YEAR'])
else:
return df[(df['STATE_CODE']==state) & \
(df['VIOLATION_YEAR'] >= min_year) & \
(df['VIOLATION_YEAR'] <= max_year)
].groupby(['CONTAMINANT', 'VIOLATION_YEAR'])
#
# ## Plot number of violations on a bar chart
# +
# Violations in the state between 2008 and 2018
contaminant_codes = [5000] # pass this optionally to filter by contaminant
df = get_num_violations(water_sys_viol, 'VT', 2008, 2018, contaminant_codes)
s1 = df.size()
if len(s1.index) > 0:
df3 = s1.unstack()
df3.T.plot.bar(stacked=True, figsize = (25,15), title='Number of violations', fontsize=14)
else:
print ('No data to chart')
# -
| code/python/notebooks/eda_charts.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Hyper-Spherical Echo State Network reservoir computing
# Learning a [Mackey-Glass](http://www.scholarpedia.org/article/Mackey-Glass_equation) system
# Implemented and tested here:
# "Echo State Networks with Self-Normalizing Activations on the Hyper-Sphere"
#
# Read the paper here:
# https://arxiv.org/abs/1903.11691
# The original pyESN.py demo has an error of 0.139603909616.
#
# Can we beat it using HyperSpherical projection, and a good amount of hyperparameter tuning?
#
# We quickly re-run the orginal pyESN example using the new projection technique, and compare how it does.
# This provides a worked example of how to run the code.
#
# Note: In theory, the Hyper-Sphere projection should stabilise the ESN at the "edge of chaos", even under many spectral_radii stabily, offering a greater search space from which to tune and build great models.
#
# However, the new "spherical radius" now also needs tuning - in relation to the input data values, rather than tuning the topology of the network itself (which is what the "spectral radius" is for).
#
# This additional hyperparameter now adds to the tuning challenge overall, so:
# In the sister notebook in this repo, I've shown how to tune the model using DEAP "Distributed Evolutionary Algorithms in Python" which uses genetic search over parameters to find the best performing ESN architecture.
#
# It seems to work very well, but your mileage may vary.
#
# +
import numpy as np
from pyESN import ESN
from matplotlib import pyplot as plt
# %matplotlib inline
size = 1000
data = np.load('mackey_glass_t17.npy') # http://minds.jacobs-university.de/mantas/code
esn = ESN(n_inputs = 1,
n_outputs = 1,
n_reservoir = size,
projection = 1,
noise = 0.0,
rectifier = 1,
steepness = 2,
sparsity = 0.6,
sphere_radius = 38 ,
teacher_forcing = True,
random_state=21,
spectral_radius = 1.3,
)
trainlen = 2000
future = 2000
pred_training = esn.fit(np.ones(trainlen),data[:trainlen])
prediction = esn.predict(np.ones(future))
print("test error: \n"+str(np.sqrt(np.mean((prediction.flatten() - data[trainlen:trainlen+future])**2))))
plt.figure(figsize=(19,6.5))
plt.plot(range(0,trainlen+future),data[0:trainlen+future],'k',label="target system")
plt.plot(range(trainlen,trainlen+future),prediction,'r', label="free running ESN")
lo,hi = plt.ylim()
plt.plot([trainlen,trainlen],[lo+np.spacing(1),hi-np.spacing(1)],'k:')
plt.legend(loc=(0.61,1.1),fontsize='small')
# -
print("our tuned test error: \n"+str(np.sqrt(np.mean((prediction.flatten() - data[trainlen:trainlen+future])**2))))
# +
The original pyESN.py demo has an error of 0.139603909616
Below are some settings that score highly tuned using DEAP, that you could test out:
-----------
CONCLUSION: The original pyESN.py demo has an error of 0.139603909616, versus this tuned ESN with an MSE error of
0.0006627234400759625
These are the parameters that get this score:
---------------------------------------------
n_reservoir = 1073
projection = 1
noise = 1e-11
rectifier = 1
steepness = 2
sparsity = 0.8672020622440834
sphere_radius = 32.406030224796055
teacher_forcing = True
random_state = 169
spectral_radius = 1.400208448263032
# -
| mackey.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Matplotlib图鉴——基础散点图
#
# ## 公众号:可视化图鉴
import matplotlib
print(matplotlib.__version__) #查看Matplotlib版本
import pandas as pd
print(pd.__version__) #查看pandas版本
import numpy as np
print(np.__version__) #查看numpy版本
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['SimHei'] #设置中文
# 注意,代码在以下环境全部通过测试:
# - Python 3.7.1
# - Matplotlib == 3.3.2
# - pandas == 1.2.0
# - numpy == 1.19.2
#
# 因版本不同,可能会有部分语法差异,如有报错,请先检查拼写及版本是否一致!
# ### 基础散点图
# +
x = list(np.arange(0,5,0.2))
y = np.random.rand(25)
plt.figure(figsize=(10,7))#设置画布的尺寸
plt.scatter(x,y)
plt.title("基础散点图",fontsize =20) # 图形标题
plt.xlabel("我是x轴",fontsize = 14) # x轴名称
plt.ylabel("我是y轴",fontsize = 14) # y轴名称
plt.show()
# -
# 上图为最基本的Matplotlib散点图绘制,有关`plt.scatter`其他参数说明如下:
#
# >matplotlib.pyplot.scatter(x, y, s=None, c=None, marker=None, cmap=None, norm=None, vmin=None, vmax=None, alpha=None, linewidths=None, verts=<deprecated parameter>, edgecolors=None, *, plotnonfinite=False, data=None, **kwargs)
#
# - x: array 数组数据
# - y: array 数组数据
# - s: 散点的大小
# - c: 调整颜色
# - marker: 形状
# - alpha: 0~1 透明度
# - linewidths: 标记边缘的线宽
# - edgecolors: 散点边的颜色
# - cmap: 颜色是浮点数的时候设置
# - norm: 0-1范围内缩放颜色的范围
# - vmin: 颜色的最小取值
# - vmax: 颜色的最大取值
#
# 下面我们将稍微添加/修改部分参数,制作其他样式的**基本散点图**
# ### 基础散点图—修改颜色、边框、大小
# +
x = list(np.arange(0,5,0.2))
y = np.random.rand(25)
plt.figure(figsize = (10,7))
plt.scatter(x,y,marker ='o',color = 'w',edgecolor = 'deeppink',s =150)
plt.title("基础散点图——修改颜色、边框、大小",fontsize =20) # 图形标题
plt.xlabel("我是x轴",fontsize = 14) # x轴名称
plt.ylabel("我是y轴",fontsize = 14) # y轴名称
plt.show()
# -
# ### 基础散点图—修改形状、透明度、颜色
# +
x = list(np.arange(0,5,0.2))
y = np.random.rand(25)
plt.figure(figsize = (10,7))
plt.scatter(x,y,s =200,marker ='^',alpha = 0.7,c = np.random.rand(25))
plt.title("基础散点图——修改形状、透明度",fontsize =20) # 图形标题
plt.xlabel("我是x轴",fontsize = 14) # x轴名称
plt.ylabel("我是y轴",fontsize = 14) # y轴名称
plt.show()
| D-散点图/基础散点图MA_D_01/MA_D_01.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 + Jaspy
# language: python
# name: jaspy
# ---
# # Plots from workflow logs
#
# This notebook demonstrates the use of `pandas` to analyse logs from the `wflogger` LOTUS workflow logging tool.
# +
import sys
sys.path.insert(0, "..")
import wflogger.analysis as wfa
# Reloading is required when developing the: wflogger.analysis library (but not in standard use)
import importlib
import wflogger.analysis
wfa = importlib.reload(wflogger.analysis)
# -
# ## Decide which workflow and tags you want to look at
#
# I want to compare two runs:
#
#
# +
sel_1 = {
"workflow": "my-model-1",
"tag": "idl-version"
}
sel_2 = {
"workflow": "my-model-1",
"tag": "python-version"
}
# -
df1 = wfa.get_results(workflow=sel_1["workflow"], tag=sel_1["tag"])
df2 = wfa.get_results(workflow=sel_2["workflow"], tag=sel_2["tag"])
# + tags=[]
df2.head()
# -
wfa = importlib.reload(wflogger.analysis)
# ## Let's view the different tags in separate plots
wfa.plot_stage_durations_by_iteration(df1)
wfa.plot_stage_durations_by_iteration(df2)
# ### Let's compare the maximum durations of each stage for each tag
#
# In order to view the differences better, we are plotting a log scale on the y-axis:
wfa.plot_comparison_of_two_workflow_tags(df1, df2, "max", yscale="log")
# ### Let's look for outliers
#
# We can identify outliers as those beyond N standard deviations from the mean.
# +
import numpy as np
def get_outliers(df, n_std_deviations=3):
return df[np.abs(df.duration - df.duration.mean()) <= (n_std_deviations * df.duration.std())]
df1_outliers = get_outliers(df1)
# -
df1_outliers.head()
print(df1_outliers.iloc[3].duration)
print(df1_outliers.duration.mean())
# Which hosts are the outliers on?
print(sorted(df1_outliers.hostname.unique()))
df1_outliers["hostname"].value_counts().sort_values(ascending=False)
df1_outliers.loc[:, ["hostname", "duration"]].sort_values("duration", ascending=False)
wfa.plot_bar_chart_comparing_tags(df1, df2)
# ## Next thoughts
#
# Some things we can do with hostname:
# 1. Look at outliers and whether certain hosts are the problem
# 2. Add host groups to the dataframes:
# - by architecture?
# - by numbered groups: e.g. `host100-199`, `host200-299`, etc.
# 3. Then compare the durations by different groups.
# +
# some testing...
def add_hostset_column(df):
df["hostset"] = df["hostname"].str[4] + "00"
add_hostset_column(df1)
# -
df1.head()
def add_hostgroup_column(df):
return
boxplot = df1.groupby("hostset")[("duration", "stage")].boxplot(column=["duration"]) #column=['Col1', 'Col2', 'Col3'])
# +
# Or, better in a single plot
df1_stats = df1.loc[:, ["stage_number", "stage", "hostset", "duration"]].sort_values("stage_number") #.drop("stage_number")
def get_stage_name(row):
return f"{row.stage_number:02d}:{row.stage}"
df1_stats["stage_name"] = df1_stats.apply(get_stage_name, axis=1)
# -
df1_stats.head()
df1_stats.groupby(["stage_name"]).boxplot(column=["duration"], by="stage_name", layout=(1, 4))
df1_stats.groupby(["stage_name"]).boxplot(column=["duration"], by="hostset", layout=(1, 4), figsize=(12, 8))
# ## Wow, the hostset 100-199 seems to have higher values!
| notebooks/wflog-plots.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# The DataFrame data structure is the heart of the Panda's library. It's a primary object that you'll be working
# with in data analysis and cleaning tasks.
#
# The DataFrame is conceptually a two-dimensional series object, where there's an index and multiple columns of
# content, with each column having a label. In fact, the distinction between a column and a row is really only a
# conceptual distinction. And you can think of the DataFrame itself as simply a two-axes labeled array.
# Lets start by importing our pandas library
import pandas as pd
# I'm going to jump in with an example. Lets create three school records for students and their
# class grades. I'll create each as a series which has a student name, the class name, and the score.
record1 = pd.Series({'Name': 'Alice',
'Class': 'Physics',
'Score': 85})
record2 = pd.Series({'Name': 'Jack',
'Class': 'Chemistry',
'Score': 82})
record3 = pd.Series({'Name': 'Helen',
'Class': 'Biology',
'Score': 90})
# +
# Like a Series, the DataFrame object is index. Here I'll use a group of series, where each series
# represents a row of data. Just like the Series function, we can pass in our individual items
# in an array, and we can pass in our index values as a second arguments
df = pd.DataFrame([record1, record2, record3],
index=['school1', 'school2', 'school1'])
# And just like the Series we can use the head() function to see the first several rows of the
# dataframe, including indices from both axes, and we can use this to verify the columns and the rows
df.head()
# +
# You'll notice here that Jupyter creates a nice bit of HTML to render the results of the
# dataframe. So we have the index, which is the leftmost column and is the school name, and
# then we have the rows of data, where each row has a column header which was given in our initial
# record dictionaries
# +
# An alternative method is that you could use a list of dictionaries, where each dictionary
# represents a row of data.
students = [{'Name': 'Alice',
'Class': 'Physics',
'Score': 85},
{'Name': 'Jack',
'Class': 'Chemistry',
'Score': 82},
{'Name': 'Helen',
'Class': 'Biology',
'Score': 90}]
# Then we pass this list of dictionaries into the DataFrame function
df = pd.DataFrame(students, index=['school1', 'school2', 'school1'])
# And lets print the head again
df.head()
# +
# Similar to the series, we can extract data using the .iloc and .loc attributes. Because the
# DataFrame is two-dimensional, passing a single value to the loc indexing operator will return
# the series if there's only one row to return.
# For instance, if we wanted to select data associated with school2, we would just query the
# .loc attribute with one parameter.
df.loc['school2']
# +
# You'll note that the name of the series is returned as the index value, while the column
# name is included in the output.
# We can check the data type of the return using the python type function.
type(df.loc['school2'])
# +
# It's important to remember that the indices and column names along either axes horizontal or
# vertical, could be non-unique. In this example, we see two records for school1 as different rows.
# If we use a single value with the DataFrame lock attribute, multiple rows of the DataFrame will
# return, not as a new series, but as a new DataFrame.
# Lets query for school1 records
df.loc['school1']
# -
# And we can see the the type of this is different too
type(df.loc['school1'])
# +
# One of the powers of the Panda's DataFrame is that you can quickly select data based on multiple axes.
# For instance, if you wanted to just list the student names for school1, you would supply two
# parameters to .loc, one being the row index and the other being the column name.
# For instance, if we are only interested in school1's student names
df.loc['school1', 'Name']
# +
# Remember, just like the Series, the pandas developers have implemented this using the indexing
# operator and not as parameters to a function.
# What would we do if we just wanted to select a single column though? Well, there are a few
# mechanisms. Firstly, we could transpose the matrix. This pivots all of the rows into columns
# and all of the columns into rows, and is done with the T attribute
df.T
# -
# Then we can call .loc on the transpose to get the student names only
df.T.loc['Name']
# However, since iloc and loc are used for row selection, Panda reserves the indexing operator
# directly on the DataFrame for column selection. In a Panda's DataFrame, columns always have a name.
# So this selection is always label based, and is not as confusing as it was when using the square
# bracket operator on the series objects. For those familiar with relational databases, this operator
# is analogous to column projection.
df['Name']
# In practice, this works really well since you're often trying to add or drop new columns. However,
# this also means that you get a key error if you try and use .loc with a column name
df.loc['Name']
# Note too that the result of a single column projection is a Series object
type(df['Name'])
# Since the result of using the indexing operator is either a DataFrame or Series, you can chain
# operations together. For instance, we can select all of the rows which related to school1 using
# .loc, then project the name column from just those rows
df.loc['school1']['Name']
# If you get confused, use type to check the responses from resulting operations
print(type(df.loc['school1'])) #should be a DataFrame
print(type(df.loc['school1']['Name'])) #should be a Series
# +
# Chaining, by indexing on the return type of another index, can come with some costs and is
# best avoided if you can use another approach. In particular, chaining tends to cause Pandas
# to return a copy of the DataFrame instead of a view on the DataFrame.
# For selecting data, this is not a big deal, though it might be slower than necessary.
# If you are changing data though this is an important distinction and can be a source of error.
# +
# Here's another approach. As we saw, .loc does row selection, and it can take two parameters,
# the row index and the list of column names. The .loc attribute also supports slicing.
# If we wanted to select all rows, we can use a colon to indicate a full slice from beginning to end.
# This is just like slicing characters in a list in python. Then we can add the column name as the
# second parameter as a string. If we wanted to include multiple columns, we could do so in a list.
# and Pandas will bring back only the columns we have asked for.
# Here's an example, where we ask for all the names and scores for all schools using the .loc operator.
df.loc[:,['Name', 'Score']]
# +
# Take a look at that again. The colon means that we want to get all of the rows, and the list
# in the second argument position is the list of columns we want to get back
# +
# That's selecting and projecting data from a DataFrame based on row and column labels. The key
# concepts to remember are that the rows and columns are really just for our benefit. Underneath
# this is just a two axes labeled array, and transposing the columns is easy. Also, consider the
# issue of chaining carefully, and try to avoid it, as it can cause unpredictable results, where
# your intent was to obtain a view of the data, but instead Pandas returns to you a copy.
# +
# Before we leave the discussion of accessing data in DataFrames, lets talk about dropping data.
# It's easy to delete data in Series and DataFrames, and we can use the drop function to do so.
# This function takes a single parameter, which is the index or row label, to drop. This is another
# tricky place for new users -- the drop function doesn't change the DataFrame by default! Instead,
# the drop function returns to you a copy of the DataFrame with the given rows removed.
df.drop('school1')
# -
# But if we look at our original DataFrame we see the data is still intact.
df
# +
# Drop has two interesting optional parameters. The first is called inplace, and if it's
# set to true, the DataFrame will be updated in place, instead of a copy being returned.
# The second parameter is the axes, which should be dropped. By default, this value is 0,
# indicating the row axis. But you could change it to 1 if you want to drop a column.
# For example, lets make a copy of a DataFrame using .copy()
copy_df = df.copy()
# Now lets drop the name column in this copy
copy_df.drop("Name", inplace=True, axis=1)
copy_df
# -
# There is a second way to drop a column, and that's directly through the use of the indexing
# operator, using the del keyword. This way of dropping data, however, takes immediate effect
# on the DataFrame and does not return a view.
del copy_df['Class']
copy_df
# +
# Finally, adding a new column to the DataFrame is as easy as assigning it to some value using
# the indexing operator. For instance, if we wanted to add a class ranking column with default
# value of None, we could do so by using the assignment operator after the square brackets.
# This broadcasts the default value to the new column immediately.
df['ClassRanking'] = None
df
# -
# In this lecture you've learned about the data structure you'll use the most in pandas, the DataFrame. The
# dataframe is indexed both by row and column, and you can easily select individual rows and project the columns
# you're interested in using the familiar indexing methods from the Series class. You'll be gaining a lot of
# experience with the DataFrame in the content to come.
| Course - 1: Introduction to Data Science in Python/resources/week-2/DataFrameDataStructure_ed.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # <span style=color:blue>Compact Python wrapper library for commonly used _R-style_ functions</span>
# <p style="color:brown;font-family:verdana;font-size:15px">
# Basic functional programming nature of R provides users with extremely simple and compact interface for quick calculations of probabilities and essential descriptive/inferential statistics for a data analysis problem. On the other hand, Python scripting ability allows the analyst to use those statistics in a wide variety of analytics pipeline with limitless sophistication and creativity. To combine the advantage of both worlds, one needs a simple <b><i>Python-based wrapper library</b></i> which contains some basic functions pertaining to <b><i>probability distributions and descriptive statistics defined in R-style</b></i> so that users can call those functions fast without having to go to the proper Python statistical libraries and figure out the whole list of methods and arguments.
# </p>
# <p style="color:brown;font-family:verdana;font-size:15px">
# Goal of this library is to provide simple Python sub-routines mimicing R-style statistical functions for quickly calculating density/point estimates, cumulative distributions, quantiles, and generating random variates for various important probability distributions. To maintain the spirit of R styling, no class hiararchy was used and just raw functions are defined in this file so that user can import this one Python script and use all the functions whenever he/she needs them with a single name call.
# </p>
# ## <span style=color:blue>Basic descriptive stats</span>
def mean(array):
"""
Calculates the mean of an array/vector
"""
import numpy as np
array=np.array(array)
result= np.mean(array)
return result
def sd(array):
"""
Calculates the standard deviation of an array/vector
"""
import numpy as np
array=np.array(array)
result= np.std(array)
return result
def median(array):
"""
Calculates the median of an array/vector
"""
import numpy as np
array=np.array(array)
result= np.median(array)
return result
def var(array):
"""
Calculates the variance of an array/vector
"""
import numpy as np
array=np.array(array)
result= np.var(array)
return result
def cov(x,y=None):
"""
Calculates the covariance between two arrays/vectors or of a single matrix
"""
import numpy as np
array1=np.array(x)
if y!=None:
array2=np.array(y)
if array1.shape!=array2.shape:
print("Error: incompatible dimensions")
return None
covmat=np.cov(array1,array2)
result=covmat[0][1]
elif len(array1.shape)==1:
result=float(np.cov(array1))
else:
result=np.cov(array1)
return result
def fivenum(array):
"""
Calculates the Tuckey Five-number (min/median/max/1st quartile/3rd quartile) of an array/vector
"""
import numpy as np
array=np.array(array)
result=[0]*5
result[0]=np.min(array)
result[1]=np.percentile(array,25)
result[2]=np.median(array)
result[3]=np.percentile(array,75)
result[4]=np.max(array)
result=np.array(result)
return result
def IQR(array):
"""
Calculates the inter-quartile range of an array/vector
"""
import numpy as np
array=np.array(array)
result = np.percentile(array,75)-np.percentile(array,25)
return result
# ## <span style=color:blue>Probability distributions</span>
# ### Uniform distribution
def dunif(x, minimum=0,maximum=1):
"""
Calculates the point estimate of the uniform distribution
"""
from scipy.stats import uniform
result=uniform.pdf(x=x,loc=minimum,scale=maximum-minimum)
return result
def punif(q, minimum=0,maximum=1):
"""
Calculates the cumulative of the uniform distribution
"""
from scipy.stats import uniform
result=uniform.cdf(x=q,loc=minimum,scale=maximum-minimum)
return result
def qunif(p, minimum=0,maximum=1):
"""
Calculates the quantile function of the uniform distribution
"""
from scipy.stats import uniform
result=uniform.ppf(q=p,loc=minimum,scale=maximum-minimum)
return result
def runif(n, minimum=0,maximum=1):
"""
Generates random variables from the uniform distribution
"""
from scipy.stats import uniform
result=uniform.rvs(size=n,loc=minimum,scale=maximum-minimum)
return result
# ### Binomial distribution
def dbinom(x,size,prob=0.5):
"""
Calculates the point estimate of the binomial distribution
"""
from scipy.stats import binom
result=binom.pmf(k=x,n=size,p=prob,loc=0)
return result
def pbinom(q,size,prob=0.5):
"""
Calculates the cumulative of the binomial distribution
"""
from scipy.stats import binom
result=binom.cdf(k=q,n=size,p=prob,loc=0)
return result
def qbinom(p, size, prob=0.5):
"""
Calculates the quantile function from the binomial distribution
"""
from scipy.stats import binom
result=binom.ppf(q=p,n=size,p=prob,loc=0)
return result
def rbinom(n,size,prob=0.5):
"""
Generates random variables from the binomial distribution
"""
from scipy.stats import binom
result=binom.rvs(n=size,p=prob,size=n)
return result
# ### Normal distribution
def dnorm(x,mean=0,sd =1):
"""
Calculates the density of the Normal distribution
"""
from scipy.stats import norm
result=norm.pdf(x,loc=mean,scale=sd)
return result
def pnorm(q,mean=0,sd=1):
"""
Calculates the cumulative of the normal distribution
"""
from scipy.stats import norm
result=norm.cdf(x=q,loc=mean,scale=sd)
return result
def qnorm(p,mean=0,sd=1):
"""
Calculates the quantile function of the normal distribution
"""
from scipy.stats import norm
result=norm.ppf(q=p,loc=mean,scale=sd)
return result
def rnorm(n,mean=0,sd=1):
"""
Generates random variables from the normal distribution
"""
from scipy.stats import norm
result=norm.rvs(size=n,loc=mean,scale=sd)
return result
# ### Poisson distribution
def dpois(x,mu):
"""
Calculates the density/point estimate of the Poisson distribution
"""
from scipy.stats import poisson
result=poisson.pmf(k=x,mu=mu)
return result
def ppois(q,mu):
"""
Calculates the cumulative of the Poisson distribution
"""
from scipy.stats import poisson
result=poisson.cdf(k=q,mu=mu)
return result
def qpois(p,mu):
"""
Calculates the quantile function of the Poisson distribution
"""
from scipy.stats import poisson
result=poisson.ppf(q=p,mu=mu)
return result
def rpois(n,mu):
"""
Generates random variables from the Poisson distribution
"""
from scipy.stats import poisson
result=poisson.rvs(size=n,mu=mu)
return result
# ### $\chi^2-$ distribution
def dchisq(x,df,ncp=0):
"""
Calculates the density/point estimate of the chi-square distribution
"""
from scipy.stats import chi2,ncx2
if ncp==0:
result=chi2.pdf(x=x,df=df,loc=0,scale=1)
else:
result=ncx2.pdf(x=x,df=df,nc=ncp,loc=0,scale=1)
return result
def pchisq(q,df,ncp=0):
"""
Calculates the cumulative of the chi-square distribution
"""
from scipy.stats import chi2,ncx2
if ncp==0:
result=chi2.cdf(x=q,df=df,loc=0,scale=1)
else:
result=ncx2.cdf(x=q,df=df,nc=ncp,loc=0,scale=1)
return result
def qchisq(p,df,ncp=0):
"""
Calculates the quantile function of the chi-square distribution
"""
from scipy.stats import chi2,ncx2
if ncp==0:
result=chi2.ppf(q=p,df=df,loc=0,scale=1)
else:
result=ncx2.ppf(q=p,df=df,nc=ncp,loc=0,scale=1)
return result
def rchisq(n,df,ncp=0):
"""
Generates random variables from the chi-square distribution
"""
from scipy.stats import chi2,ncx2
if ncp==0:
result=chi2.rvs(size=n,df=df,loc=0,scale=1)
else:
result=ncx2.rvs(size=n,df=df,nc=ncp,loc=0,scale=1)
return result
# ### Student's <i>t</i>-distribution
def dt(x,df,ncp=0):
"""
Calculates the density/point estimate of the t-distribution
"""
from scipy.stats import t,nct
if ncp==0:
result=t.pdf(x=x,df=df,loc=0,scale=1)
else:
result=nct.pdf(x=x,df=df,nc=ncp,loc=0,scale=1)
return result
def pt(q,df,ncp=0):
"""
Calculates the cumulative of the t-distribution
"""
from scipy.stats import t,nct
if ncp==0:
result=t.cdf(x=q,df=df,loc=0,scale=1)
else:
result=nct.cdf(x=q,df=df,nc=ncp,loc=0,scale=1)
return result
def qt(p,df,ncp=0):
"""
Calculates the quantile function of the t-distribution
"""
from scipy.stats import t,nct
if ncp==0:
result=t.ppf(q=p,df=df,loc=0,scale=1)
else:
result=nct.ppf(q=p,df=df,nc=ncp,loc=0,scale=1)
return result
def rt(n,df,ncp=0):
"""
Generates random variables from the t-distribution
"""
from scipy.stats import t,nct
if ncp==0:
result=t.rvs(size=n,df=df,loc=0,scale=1)
else:
result=nct.rvs(size=n,df=df,nc=ncp,loc=0,scale=1)
return result
# ### <i>F</i>-distribution
def df(x,df1,df2,ncp=0):
"""
Calculates the density/point estimate of the F-distribution
"""
from scipy.stats import f,ncf
if ncp==0:
result=f.pdf(x=x,dfn=df1,dfd=df2,loc=0,scale=1)
else:
result=ncf.pdf(x=x,dfn=df1,dfd=df2,nc=ncp,loc=0,scale=1)
return result
def pf(q,df1,df2,ncp=0):
"""
Calculates the cumulative of the F-distribution
"""
from scipy.stats import f,ncf
if ncp==0:
result=f.cdf(x=q,dfn=df1,dfd=df2,loc=0,scale=1)
else:
result=ncf.cdf(x=q,dfn=df1,dfd=df2,nc=ncp,loc=0,scale=1)
return result
def qf(p,df1,df2,ncp=0):
"""
Calculates the quantile function of the F-distribution
"""
from scipy.stats import f,ncf
if ncp==0:
result=f.ppf(q=p,dfn=df1,dfd=df2,loc=0,scale=1)
else:
result=ncf.ppf(q=p,dfn=df1,dfd=df2,nc=ncp,loc=0,scale=1)
return result
def rf(n,df1,df2,ncp=0):
"""
Calculates the quantile function of the F-distribution
"""
from scipy.stats import f,ncf
if ncp==0:
result=f.rvs(size=n,dfn=df1,dfd=df2,loc=0,scale=1)
else:
result=ncf.rvs(size=n,dfn=df1,dfd=df2,nc=ncp,loc=0,scale=1)
return result
# ### Beta distribution
def dbeta(x,shape1,shape2):
"""
Calculates the density/point estimate of the Beta-distribution
"""
from scipy.stats import beta
result=beta.pdf(x=x,a=shape1,b=shape2,loc=0,scale=1)
return result
def pbeta(q,shape1,shape2):
"""
Calculates the cumulative of the Beta-distribution
"""
from scipy.stats import beta
result=beta.cdf(x=q,a=shape1,b=shape2,loc=0,scale=1)
return result
def qbeta(p,shape1,shape2):
"""
Calculates the cumulative of the Beta-distribution
"""
from scipy.stats import beta
result=beta.ppf(q=p,a=shape1,b=shape2,loc=0,scale=1)
return result
def rbeta(n,shape1,shape2):
"""
Calculates the cumulative of the Beta-distribution
"""
from scipy.stats import beta
result=beta.rvs(size=n,a=shape1,b=shape2,loc=0,scale=1)
return result
# ### Gamma distribution
def dgamma(x,shape,rate=1):
"""
Calculates the density/point estimate of the Gamma-distribution
"""
from scipy.stats import gamma
result=rate*gamma.pdf(x=rate*x,a=shape,loc=0,scale=1)
return result
def pgamma(q,shape,rate=1):
"""
Calculates the cumulative of the Gamma-distribution
"""
from scipy.stats import gamma
result=gamma.cdf(x=rate*q,a=shape,loc=0,scale=1)
return result
def qgamma(p,shape,rate=1):
"""
Calculates the cumulative of the Gamma-distribution
"""
from scipy.stats import gamma
result=(1/rate)*gamma.ppf(q=p,a=shape,loc=0,scale=1)
return result
def rgamma(n,shape,rate=1):
"""
Calculates the cumulative of the Gamma-distribution
"""
from scipy.stats import gamma
result=gamma.rvs(size=n,a=shape,loc=0,scale=1)
return result
| R-style Functions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Sparrow0hawk/crime_sim_toolkit/blob/develop/data_manipulation/CrimeDes_toCSS.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="vysrCucw3HVU" colab_type="text"
# ## Map Crime Descriptions to Crime Severity Scores
#
#
# + id="vH9I1j4b3UpT" colab_type="code" outputId="f36eac8d-a58a-4220-a581-67aaf1ca601c" colab={"base_uri": "https://localhost:8080/", "height": 243}
# get the crime seversity score xlsx file
# !wget -O cssdatatool.xls https://www.ons.gov.uk/file?uri=/peoplepopulationandcommunity/crimeandjustice/datasets/crimeseverityscoreexperimentalstatistics/current/cssdatatool.xls
# + id="wslNTUsV32Cj" colab_type="code" outputId="ea1eafc6-fbb6-4cab-b310-d7c50768bcff" colab={"base_uri": "https://localhost:8080/", "height": 36}
# check its in /content
# !ls
# + id="pt2pkJDC3LVT" colab_type="code" colab={}
import pandas as pd
import numpy as np
# + id="tpzexe-B3QBy" colab_type="code" colab={}
# open files including existing crime description
crime_des_data = pd.read_csv('https://raw.githubusercontent.com/Sparrow0hawk/crime_sim_toolkit/master/crime_sim_toolkit/src/simple_policeuk_perc_offence.csv', index_col=0)
CSS_score_xls = pd.ExcelFile('cssdatatool.xls')
# + id="3CfqgwOnVnSe" colab_type="code" colab={}
# set crime descriptions all to lowercase for easier string matching
crime_des_data.Offence_Description = crime_des_data.Offence_Description.str.lower()
# specify all crime codes as strings
crime_des_data.Offence_Code = crime_des_data.Offence_Code.astype(str)
# + id="rvXQV4yi5-15" colab_type="code" colab={}
# get excel sheet with weights
weights = CSS_score_xls.parse('List of weights', skiprows=4)
# + id="YMGGbfxK6j1u" colab_type="code" outputId="072ae8f8-3d63-436b-92f2-518c45a94eb6" colab={"base_uri": "https://localhost:8080/", "height": 206}
# visualise
weights.head()
# + id="6qHXlOb46Hhv" colab_type="code" outputId="f1332ea2-43a5-4c93-e318-603a1f4f0782" colab={"base_uri": "https://localhost:8080/", "height": 206}
# ignore first column and visualise
weights = weights.iloc[:,1:]
weights.head()
# + id="E_TbD-0t6wrY" colab_type="code" colab={}
# rename columns
weights.columns = ['Offence_cd','Offence_Description','Weight']
# set all strings to lower and type string
weights.Offence_Description = weights.Offence_Description.str.lower()
weights.Offence_cd = weights.Offence_cd.astype(str)
# remove rows with NAs
weights.dropna(inplace=True)
# + id="-G5opWN_7SYg" colab_type="code" outputId="23e362a2-32e2-480e-f746-7496c52bef8b" colab={"base_uri": "https://localhost:8080/", "height": 206}
# visualise
weights.head()
# + [markdown] id="jV5_e8Z_qAfv" colab_type="text"
# Start constructing dataframe that includes weights matched by either crime code or string match to crime description
# + id="DIv1vv26Q-D_" colab_type="code" outputId="2b368354-0183-4115-cddc-ce20894cb10d" colab={"base_uri": "https://localhost:8080/", "height": 36}
# create a copy of the crime description dataframe we can work with
basic_frame = crime_des_data.copy()
# remove the duplicates sections for each local authority to get just all 132 crime descriptions
basic_frame = basic_frame.drop_duplicates(subset=['Offence_Description'])
# check shape is as expected
basic_frame.shape
# + id="rc1VEmYpRPBm" colab_type="code" outputId="06ee11c9-ade7-44e3-c0d7-7be0e74b1866" colab={"base_uri": "https://localhost:8080/", "height": 206}
# select only useful columns and visualise
basic_frame = basic_frame[['Policeuk_Cat', 'Offence_Group', 'Offence_Subgroup',
'Offence_Description', 'Offence_Code']]
basic_frame.head()
# + id="l431U_BJUlFD" colab_type="code" colab={}
# join the weights column from weights dataset based on string matches of Offence_Descriptions
weights_frame = basic_frame.set_index('Offence_Description').join(weights[weights.Offence_Description.isin(crime_des_data.Offence_Description.unique().tolist())].set_index('Offence_Description')['Weight'])
# reset the index back to numeric
weights_frame.reset_index(inplace=True)
# + id="pG9wyTGJWcy9" colab_type="code" outputId="97627b11-85bf-4eb8-bfa7-12eb25c69c5c" colab={"base_uri": "https://localhost:8080/", "height": 206}
# visualise
weights_frame.head()
# + id="an-Jv-X-Yd8A" colab_type="code" colab={}
# add a new column for weights matched by offence code
weights_frame = weights_frame.set_index('Offence_Code').join(weights[weights.Offence_cd.isin(crime_des_data.Offence_Code.unique().tolist())].set_index('Offence_cd')['Weight'], rsuffix='_cd')
weights_frame.reset_index(inplace=True)
# rename columns
weights_frame.columns = ['Offence_code','Offence_Description','Policeuk_Cat','Offence_Group','Offence_Subgroup','Weight','Weight_cd']
# + id="ACWzQRxmYt8H" colab_type="code" outputId="bd1ef920-12b3-46c5-d24a-88f77b45d7e9" colab={"base_uri": "https://localhost:8080/", "height": 206}
# fill NaNs with 0
weights_frame.fillna(0, inplace=True)
# condense weight columns
weights_frame['Weight'] = weights_frame.apply(lambda x: x['Weight'] + x['Weight_cd'] if x['Weight'] == 0 else x['Weight'], axis=1)
# drop the weights_cd column (now redundant as we've combined weights)
weights_frame.drop('Weight_cd', axis=1, inplace=True)
# check
weights_frame.head()
# + id="TVkW0L-3anjZ" colab_type="code" outputId="c8d67ac3-d151-4429-e1c6-b4265e376443" colab={"base_uri": "https://localhost:8080/", "height": 237}
# unmatched crime categories
weights_frame[weights_frame.Weight == 0]
# + id="8xR7r8Vfauq5" colab_type="code" outputId="0c2ec10e-5995-4a05-b353-e16210f7655e" colab={"base_uri": "https://localhost:8080/", "height": 112}
# the two burglary categories are due to shared crime codes which I can explode into individual rows
# we'll hack our way to setting these values
weights_frame[(weights_frame.Offence_Description.str.contains('burg')) & (weights_frame.Weight == 0)]
# + id="3AoiKTANfBmJ" colab_type="code" colab={}
# this will set value for offence code 28A
# and will generate a warning
weights_frame.loc[weights_frame.Offence_code == '28A','Weight'] = weights[weights.Offence_cd.str.contains('28A')]['Weight'].tolist()[0]
# + id="U7H5TuAMl-lJ" colab_type="code" colab={}
# this will set value for offence code 30A
weights_frame.loc[weights_frame.Offence_code == '30A','Weight'] = weights[weights.Offence_cd.str.contains('28A')]['Weight'].tolist()[0]
# + id="73wbun8TmaYv" colab_type="code" outputId="9024752f-21b3-4236-d6e8-7ad95b400e20" colab={"base_uri": "https://localhost:8080/", "height": 175}
# unmatched crime categories
weights_frame[weights_frame.Weight == 0]
# + id="BEWX5KOnkoCk" colab_type="code" outputId="3636a4d8-6bd0-4cfd-b375-7dc45be3df3a" colab={"base_uri": "https://localhost:8080/", "height": 36}
# for now we'll just calculate the mean weight of all fraud crimes
weights[weights.Offence_Description.str.contains('fraud')].Weight.mean()
# + id="UBDpATCUmjRM" colab_type="code" colab={}
# this will set value for all remaining 0 weight crimes shown above
weights_frame.loc[weights_frame.Weight == 0,'Weight'] = weights[weights.Offence_Description.str.contains('fraud')].Weight.mean()
# + id="xjO0K88hpoUP" colab_type="code" outputId="701634a7-4659-4089-a6b5-d579bb989c5b" colab={"base_uri": "https://localhost:8080/", "height": 49}
# unmatched crime categories
weights_frame[weights_frame.Weight == 0]
# + id="R0KuIzANppB3" colab_type="code" colab={}
weights_frame = weights_frame[['Policeuk_Cat', 'Offence_Group','Offence_Subgroup','Offence_Description','Offence_code', 'Weight']]
# + id="J-tusFRHpq2t" colab_type="code" outputId="13b3be12-d440-481f-d76d-9fe15086b3cb" colab={"base_uri": "https://localhost:8080/", "height": 1000}
weights_frame
# + id="6pzQ3wFXp2NU" colab_type="code" colab={}
weights_frame.to_csv('crime_des_CSSweights.csv')
# + id="7g43ATt6qmcE" colab_type="code" colab={}
| data_manipulation/CrimeDes_toCSS.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from qiskit import *
from math import pi
# +
# CCX or AND gate
qc = QuantumCircuit(3)
qc.h(range(2))
qc.barrier()
qc.ccx(0, 1, 2)
qc.measure_all() # qubits not reversed!
qc.draw('mpl')
# -
backend = Aer.get_backend('qasm_simulator')
execution = execute(qc, backend = backend, shots = 1024)
results = execution.result()
counts = results.get_counts()
print(counts)
# +
from qiskit.transpiler import PassManager
from qiskit.transpiler.passes import Unroller
pass_ = Unroller(['u3', 'cx'])
pm = PassManager(pass_)
new_circuit = pm.run(qc)
new_circuit.draw('mpl')
# -
new_circuit.count_ops()
# +
# CCX Alternative
qc = QuantumCircuit(3)
qc.h(range(2))
# HZH
qc.h(2)
# U = V^2 = V^2^(n-2)
# here, n = 3 qubits
# Therefor, number of V = 2
# V
qc.cu1(pi/2, 1, 2)
# XOR
qc.cx(0, 1)
# V-dagger
qc.cu1(-pi/2, 1, 2)
# Revert XOR
qc.cx(0, 1)
# V
qc.cu1(pi/2, 0, 2)
qc.h(2)
# HZH ends
qc.measure_all()
qc.draw('mpl')
# -
backend = Aer.get_backend('qasm_simulator')
execution = execute(qc, backend = backend, shots = 1024)
results = execution.result()
counts = results.get_counts()
print(counts)
# +
from qiskit.transpiler import PassManager
from qiskit.transpiler.passes import Unroller
pass_ = Unroller(['u3', 'cx'])
pm = PassManager(pass_)
new_circuit = pm.run(qc)
new_circuit.draw('mpl')
# -
new_circuit.count_ops()
# +
# MAKE CHANGES!!!
# # CCX Alternative-2
# qc = QuantumCircuit(3)
# qc.h(range(2))
# # HZH
# qc.h(2)
# # U = V^2 = V^2^(n-2)
# # here, n = 3 qubits
# # V
# qc.cu1(pi/2, 1, 2)
# # V
# qc.cu1(pi/2, 0, 2)
# # XOR
# qc.cx(0, 1)
# # V-dagger
# qc.cu1(-pi/2, 1, 2)
# # Revert XOR
# qc.cx(0, 1)
# qc.h(2)
# # HZH ends
# qc.measure_all()
# qc.draw('mpl')
# +
# backend = Aer.get_backend('qasm_simulator')
# execution = execute(qc, backend = backend, shots = 1024)
# results = execution.result()
# counts = results.get_counts()
# print(counts)
# +
# from qiskit.transpiler import PassManager
# from qiskit.transpiler.passes import Unroller
# pass_ = Unroller(['cx', 'u3'])
# pm = PassManager(pass_)
# new_circuit = pm.run(qc)
# new_circuit.draw('mpl')
# +
# new_circuit.count_ops()
# -
# Refer: <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>. Elementary gates for quantum computation. Physical review A. 1995 Nov 1;52(5):3457.
#
# https://quantumcomputing.stackexchange.com/questions/7082/how-to-reduce-circuit-elements-of-a-decomposed-c2u-operation
| qiskit-ibm/circuit-decomposition/CCX.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # 线性回归的简洁实现
# + attributes={"classes": [], "id": "", "n": "2"}
import d2l
from mxnet import autograd, np, npx, gluon
npx.set_np()
true_w = np.array([2, -3.4])
true_b = 4.2
features, labels = d2l.synthetic_data(true_w, true_b, 1000)
# + [markdown] slideshow={"slide_type": "slide"}
# 读取数据。
# + attributes={"classes": [], "id": "", "n": "3"}
def load_array(data_arrays, batch_size, is_train=True):
dataset = gluon.data.ArrayDataset(*data_arrays)
return gluon.data.DataLoader(dataset, batch_size, shuffle=is_train)
batch_size = 10
data_iter = load_array((features, labels), batch_size)
for X, y in data_iter:
print('X =\n%sy =\n%s' %(X, y))
break
# + [markdown] slideshow={"slide_type": "slide"}
# 定义模型和初始化模型参数。
# + attributes={"classes": [], "id": "", "n": "5"} slideshow={"slide_type": "-"}
from mxnet.gluon import nn
from mxnet import init
net = nn.Sequential()
net.add(nn.Dense(1))
net.initialize(init.Normal(sigma=0.01))
# + [markdown] slideshow={"slide_type": "slide"}
# 定义损失函数和优化函数。
# + attributes={"classes": [], "id": "", "n": "8"}
from mxnet import gluon
loss = gluon.loss.L2Loss()
trainer = gluon.Trainer(net.collect_params(),
'sgd', {'learning_rate': 0.03})
# + [markdown] slideshow={"slide_type": "slide"}
# 训练。
# + attributes={"classes": [], "id": "", "n": "10"}
for epoch in range(1, 4):
for X, y in data_iter:
with autograd.record():
l = loss(net(X), y)
l.backward()
trainer.step(batch_size)
l = loss(net(features), labels)
print('epoch %d, loss: %f' % (epoch, l.mean()))
w = net[0].weight.data()
print('Error in estimating w', true_w.reshape(w.shape) - w)
b = net[0].bias.data()
print('Error in estimating b', true_b - b)
| notebooks-1/4-linear-regression-gluon.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %reload_ext autoreload
# %autoreload 2
# %matplotlib inline
# -
# # Amazon SageMaker
import base64
import json
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
# ## Boilerplate
# ### Session
# +
import boto3, time, json
sess = boto3.Session()
sm = sess.client("sagemaker")
region = sess.region_name
account = boto3.client("sts").get_caller_identity().get("Account")
# -
# ### IAM Role
#
# **Note**: make sure the IAM role has:
# - `AmazonS3FullAccess`
# - `AmazonEC2ContainerRegistryFullAccess`
# - `AmazonSageMakerFullAccess`
# +
import sagemaker
role = sagemaker.get_execution_role()
role
# -
# ### Amazon Elastic Container Registry (ECR)
#
# **Note**: create ECR if it doesn't exist
registry_name = "fastai-torchserve-sagemaker"
# # !aws ecr create-repository --repository-name {registry_name}
image = f"{account}.dkr.ecr.{region}.amazonaws.com/{registry_name}:latest"
image
# ### Pytorch Model Artifact
#
# Create a compressed `*.tar.gz` file from the `*.mar` file per requirement of Amazon SageMaker and upload the model to your Amazon S3 bucket.
model_file_name = "fastunet"
s3_bucket_name = "<YOUR-S3-BUCKET-NAME>"
# # !tar cvzf {model_file_name}.tar.gz fastunet.mar
# # !aws s3 cp {model_file_name}.tar.gz s3://{s3_bucket_name}/
# ### Build a FastAI+TorchServe Docker container and push it to Amazon ECR
# !aws ecr get-login-password --region {region} | docker login --username AWS --password-stdin {account}.dkr.ecr.{region}.amazonaws.com
# !docker build -t {registry_name} ../
# !docker tag {registry_name}:latest {image}
# !docker push {image}
# ### Model
# +
model_data = f"s3://{s3_bucket_name}/{model_file_name}.tar.gz"
sm_model_name = "fastai-unet-torchserve-sagemaker"
container = {"Image": image, "ModelDataUrl": model_data}
create_model_response = sm.create_model(
ModelName=sm_model_name, ExecutionRoleArn=role, PrimaryContainer=container
)
print(create_model_response["ModelArn"])
# -
# ## Batch Transform
# ### S3 Input and Output
batch_input = f"s3://{s3_bucket_name}/batch_transform_fastai_torchserve_sagemaker/"
batch_output = f"s3://{s3_bucket_name}/batch_transform_fastai_torchserve_sagemaker_output/"
# !aws s3 ls {batch_input}
# +
import time
batch_job_name = 'fastunet-batch' + time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime())
batch_job_name
# -
# ### Batch transform jobs
request = {
"ModelClientConfig": {
"InvocationsTimeoutInSeconds": 3600,
"InvocationsMaxRetries": 1,
},
"TransformJobName": batch_job_name,
"ModelName": sm_model_name,
"BatchStrategy": "MultiRecord",
"TransformOutput": {"S3OutputPath": batch_output, "AssembleWith": "Line"},
"TransformInput": {
"DataSource": {
"S3DataSource": {"S3DataType": "S3Prefix", "S3Uri": batch_input}
},
"CompressionType": "None",
},
"TransformResources": {"InstanceType": "ml.p2.xlarge", "InstanceCount": 1},
}
# +
# %%time
sm.create_transform_job(**request)
while True:
response = sm.describe_transform_job(TransformJobName=batch_job_name)
status = response["TransformJobStatus"]
if status == "Completed":
print("Transform job ended with status: " + status)
break
if status == "Failed":
message = response["FailureReason"]
print("Transform failed with the following error: {}".format(message))
raise Exception("Transform job failed")
print("Transform job is still in status: " + status)
time.sleep(30)
# -
# ### Testing
s3 = boto3.resource("s3")
s3.Bucket(f"{s3_bucket_name}").download_file(
"batch_transform_fastai_torchserve_sagemaker_output/street_view_of_a_small_neighborhood.png.out",
"street_view_of_a_small_neighborhood.txt",
)
s3.Bucket(f"{s3_bucket_name}").download_file(
"batch_transform_fastai_torchserve_sagemaker/street_view_of_a_small_neighborhood.png",
"street_view_of_a_small_neighborhood.png",
)
# +
with open("street_view_of_a_small_neighborhood.txt") as f:
results = f.read()
response = json.loads(results)
# -
pred_decoded_byte = base64.decodebytes(bytes(response["base64_prediction"], encoding="utf-8"))
pred_decoded = np.reshape(
np.frombuffer(pred_decoded_byte, dtype=np.uint8), (96, 128)
)
plt.imshow(pred_decoded);
# ## Inference Endpoint
# ### Endpoint configuration
#
# **Note**: choose your preferred `InstanceType`: https://aws.amazon.com/sagemaker/pricing/
# +
import time
endpoint_config_name = "torchserve-endpoint-config-" + time.strftime(
"%Y-%m-%d-%H-%M-%S", time.gmtime()
)
print(endpoint_config_name)
create_endpoint_config_response = sm.create_endpoint_config(
EndpointConfigName=endpoint_config_name,
ProductionVariants=[
{
"InstanceType": "ml.g4dn.xlarge",
"InitialVariantWeight": 1,
"InitialInstanceCount": 1,
"ModelName": sm_model_name,
"VariantName": "AllTraffic",
}
],
)
print("Endpoint Config Arn: " + create_endpoint_config_response["EndpointConfigArn"])
# -
# ### Endpoint
# +
endpoint_name = "fastunet-torchserve-endpoint-" + time.strftime(
"%Y-%m-%d-%H-%M-%S", time.gmtime()
)
print(endpoint_name)
create_endpoint_response = sm.create_endpoint(
EndpointName=endpoint_name, EndpointConfigName=endpoint_config_name
)
print(create_endpoint_response["EndpointArn"])
# +
# %%time
resp = sm.describe_endpoint(EndpointName=endpoint_name)
status = resp["EndpointStatus"]
print("Status: " + status)
while status == "Creating":
time.sleep(60)
resp = sm.describe_endpoint(EndpointName=endpoint_name)
status = resp["EndpointStatus"]
print("Status: " + status)
print("Arn: " + resp["EndpointArn"])
print("Status: " + status)
# -
# ### Testing
# +
file_name = "../sample/street_view_of_a_small_neighborhood.png"
with open(file_name, 'rb') as f:
payload = f.read()
Image.open(file_name)
# -
# %%time
client = boto3.client("runtime.sagemaker")
response = client.invoke_endpoint(
EndpointName=endpoint_name, ContentType="application/x-image", Body=payload
)
response = json.loads(response["Body"].read())
pred_decoded_byte = base64.decodebytes(bytes(response["base64_prediction"], encoding="utf-8"))
pred_decoded = np.reshape(
np.frombuffer(pred_decoded_byte, dtype=np.uint8), (96, 128)
)
plt.imshow(pred_decoded);
# ### Cleanup
client = boto3.client("sagemaker")
client.delete_model(ModelName=sm_model_name)
client.delete_endpoint(EndpointName=endpoint_name)
client.delete_endpoint_config(EndpointConfigName=endpoint_config_name)
| notebook/04_SageMaker.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## DEMO Part 01 join data using Pandas Dataframe merge
from datetime import datetime as dt
t0 = dt.utcnow()
# %run "__DEMO Part 00.ipynb"
# ### Join dataframes on Site ID with df.merge()
# ## massage type of SiteDB_ID column
sites_gdf["SiteDB_ID"] = pd.to_numeric(sites_gdf["SiteDB_ID"], downcast='integer', errors='coerce')
merged_by_siteid = pd.merge(sites_gdf, slip_rate_df, left_on="SiteDB_ID", right_on='Site Data_Site DB ID', how='inner')
merged_by_siteid.filter(items=['SiteDB_ID', "Site Data_Site DB ID", 'POINT_X', 'geometry',
"Site Data_Site Name",
'Site Data_Easting (NZTM)', 'Site Data_Northing (NZTM)'])
| docker/fs/home/jovyan/DEMO/__DEMO Part 01 join using Panda DF.merge.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# ## The Art of Data Science
# ## <NAME>, Data Scientist
#
# > “Jack of all trades, master of none, though oft times better than master of one."
#
# **Motivation for this task**
#
# - Solve a business problem
# - Understand the end-to-end approach
# - Build a data-driven Machine Learning application on the cloud
#
#
# ** Approach ** to take a case-driven task to showcase this. We will aim to go-wide VS. go-deep. The approach will be both practical and scalable. Let's start by understanding the overall steps involved in building a data-driven application.
#
# 
#
# ## FRAME
#
# > "Doing data science requires more time thinking than doing."
#
# A financial institution provides loans to consumers and has been doing so for a number of years. It now plans to adopt a data-driven lens to its loan portfolio. What are the **type of questions** it can ask?
# - What is the trend in loan defaults?
# - Do older customers have more loan defaults?
# - Which customers are likely to have a loan default?
# - Why do customers default on their loans?
#
#
# ### Type of data-driven analytics
# - **Descriptive**: Understand historic patterns, trends, deviations, relationships and outlier
# - **Inquisitive**: Conduct hypothesis testing
# - **Predictive**: Make a prediction
#
# **Case Question: What is the probability of a loan default?**
# ## ACQUIRE
#
# > "Data is the new oil"
#
# **Ways to acquire data** (typical data source)
#
# - Download from an internal system
# - Obtained from client, or other 3rd party
# - Extracted from a web-based API
# - Scraped from a website
# - Extracted from a PDF file
# - Gathered manually and recorded
#
# **Data Formats**: flat files (e.g. csv, tsv, xls), databases (e.g. MySQL), streaming (e.g. json), storage (e.g. HDFS)
# +
# Load the libraries and configuration
import numpy as np
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
# Load the data and see head
data = pd.read_csv("loan.csv")
data.head()
# -
# *Target*
# - **default**: whether the applicant defaulted (1) or not (0)?
#
# *Features*
# - **age**: age of the applicant
# - **income**: annual income of the applicant
# - **year**: no. of years of employment
# - **ownership**: type of house owned
# - **amount** : amount of loan requested by the applicant
# - **grade**: credit grade of the applicant
# ## REFINE
#
# > "Data is messy"
#
# - **Remove** e.g. remove redundant data from the dataframe
# - **Derive** e.g. state and city from the location field
# - **Parse** e.g. extract date from year and month column
#
# Also, you need to check for consistency and quality of the data
# - **Missing** e.g. Check for missing or incomplete data
# - **Quality** e.g. Check for duplicates, accuracy, unusual data
#
# Let's check for missing values in our data
data.isnull().sum()
# ### Handling missing values
#
# - **REMOVE** - NaN rows or columns
# - **IMPUTATE** - Replace them with something? mean, median, fixed number (based on domain) or high number (e..g 999, though could have issues later)
# - **BIN** - Convert to categorical variable and "missing becomes a category"
# - **DOMAIN SPECIFIC** - Entry error, pipeline, etc.
#
# In our case, let's replace missing values for years with mean
#
# Let us replace the NaNs with mean for years
np.mean(data.years)
# There is a fillna function for missing data
data.years = data.years.fillna(np.mean(data.years))
# ## EXPLORE
#
# > "I don't know, what I don't know"
#
# ### Data Types
#
# - **Categorical**
# - *Nominal*: home owner [rent, own, mortgage]
# - *Ordinal*: credit grade [A > B > C > D > E]
# - **Continuous**
# - *Interval*: approval date [20/04/16, 19/11/15]
# - *Ratio*: loan amount [3000, 10000]
#
# ### Visual Exploration
# - Explore **One dimension visualisation**
# - Explore **Two dimensions visualiation**
# - Explore **Multi dimensionsal visualisation**
# +
# Load the plotting libraries
from plotnine import *
# %matplotlib inline
from plotnine.themes import theme_538
# Convert `default` to categorical variable
data_plot = data.copy()
data_plot['default'] = data_plot['default'].astype('category')
# -
# ### Two Dimension Exploration
#
# We expect the default rate to go up as the credit score of the customer go down.
# Let's see the relationship between `grade` and `default`
(ggplot(data_plot) + aes('grade', fill ="default") +
geom_bar(position = 'fill') + theme_538())
# ### Three Dimension Exploration
#
# We would like to understand what impact does age and income have on the default rate
# Let us see the relationship between `age`, `income` and `default`
( ggplot(data_plot) + aes('age', 'income', color='default') +
geom_bin2d() + scale_y_log10() +
facet_wrap("default") + theme_538()
)
# ## TRANSFORM
#
# > "What is measured may not help answer what is needed "
#
# **Scale Transformation** e.g.
# - Log Transform
# - Sqrt Transform
#
# **Mutate & Summarize** e.g.
# - **Convert** e.g. free text to coded value
# - **Calculate** e.g. percentages, proportion
# - **Merge** e.g. first and surname for full name
# - **Aggregate** e.g. rollup by year, cluster by area
# - **Filter** e.g. exclude based on location
# - **Sample** e.g. extract a representative data
# - **Summary** e.g. show summary stats like mean
#
# **Categorical Encodings** e.g.
# - Label Encoding
# - One Hot Encoding
# Two of the columns are categorical in nature - `grade` and `ownership`. To build models, we need all of the features to be numeric. There exists a number of ways to transform categorical variables to numeric values.
#
# We will use one of the popular options: `LabelEncoding`
#
#
# +
# Load the library for preprocessing
from sklearn.preprocessing import LabelEncoder
# Let's not modify the original dataset. Let's copy it in another dataset
data_encoded = data.copy()
# +
# instantiate label encoder
le_grade = LabelEncoder()
le_ownership = LabelEncoder()
# fit label encoder
le_grade = le_grade.fit(data_encoded["grade"])
le_ownership = le_ownership.fit(data["ownership"])
# Transform the label
data_encoded.grade = le_grade.transform(data_encoded.grade)
data_encoded.ownership = le_ownership.transform(data_encoded.ownership)
# Lets see the encoded data now
data_encoded.head()
# -
# ## MODEL
#
#
# > "All models are wrong, Some of them are useful"
#
#
# ### Supervised Learning
#
# Given a set of **feature** `X`, to predict the value of **target** `y`
# - If `y` is *continuous* - **Regression**
# - If `y` is *categorical* - **Classification**
#
# **Model Family**
# - Linear
# - Tree-Based
# - Kernel-Baed
# - Neural Network
#
# **Choosing a Model**
#
# 1. Interpretability
# 2. Run-time
# 3. Model complexity
# 4. Scalability
#
# Let's build two tree-based classifier - Decision Tree & Random Forest
# +
# Load the library
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
# Setup the features and target
X = data_encoded.iloc[:,1:]
y = data_encoded.iloc[:,0]
# -
# Save the prediction class and probabilities
def prediction(clf, X, y):
clf = clf.fit(X,y)
y_pred = clf.predict(X)
y_proba = clf.predict_proba(X)[:,1]
prediction = pd.DataFrame({"actual": np.array(y), "predicted": y_pred, "probability": y_proba})
prediction['predicted'] = prediction['predicted'].astype('category')
prediction['actual'] = prediction['actual'].astype('category')
return prediction
# +
# Build a Decision Tree Classifier
clf_tree = DecisionTreeClassifier(max_depth=10)
prediction_tree = prediction(clf_tree, X, y)
# Build a Random Forest Classifier
clf_forest = RandomForestClassifier(n_estimators=40)
prediction_forest = prediction(clf_forest, X, y)
# -
# Let us see how well the classifiers are performing in separating the two classes
# Plotting predicted probability VS. actuals for Decision Tree Classifier
(ggplot(prediction_tree) + aes('probability', fill='actual') +
geom_density(alpha = 0.5) + theme_538()
)
# Plotting predicted probability VS. actuals for Random Forest Classifier
(ggplot(prediction_forest) + aes('probability', fill='actual') +
geom_density(alpha = 0.5) + theme_538()
)
# ## INSIGHT
#
# > "The purpose of data science is to create insight"
#
# While we have created many model, we still don't have a *measure* of how good each of the model is and which one should we pick. We need to measure some accuracy metric of the model and have confidence that it will generalize well. We should be confident that when we put the model in production (real-life), the accuracy we get from the model results should mirror the metrics we obtained when we built the model.
#
# - Choosing an Error Metric: `Area Under Curve`
# - Cross Validation: How well will the model generalize on unseen data
# ### Cross Validation using AUC
#
# We will use `StratifiedKFold`. This ensures that in each fold, the proportion of positive class and negative class remain similar to the original dataset. This is the process we will follow to get the mean cv-score
#
# 1. Generate k-fold
# 2. Train the model using k-1 fold
# 3. Predict for the kth fold
# 4. Find the accuracy.
# 5. Append it to the array
# 6. Repeat 2-5 for different validation folds
# 7. Report the mean cross validation score
# Load the libraries
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.model_selection import StratifiedKFold
# Setup a function to conduct cross-validation
def cross_validation(clf, X, y, k):
# Instantiate stratified k fold.
kf = StratifiedKFold(n_splits=k)
# Let's use an array to store the results of cross-validation
kfold_auc_score = []
# Run kfold CV
for train_index, test_index in kf.split(X,y):
clf = clf.fit(X.iloc[train_index], y.iloc[train_index])
proba = clf.predict_proba(X.iloc[test_index])[:,1]
auc_score = roc_auc_score(y.iloc[test_index],proba)
print(auc_score)
kfold_auc_score.append(auc_score)
print("Mean K Fold CV:", np.mean(kfold_auc_score))
# Lets get the cross-validation score for Decision Tree Classifier
cross_validation(clf_tree, X, y, 5)
# Lets get the cross-validation score for Random Forest Classifier
cross_validation(clf_forest, X, y, 5)
# ## DEPLOY
#
# > "What you build - you test, you ship and you maintain"
#
# Once the final model has been selected, we need to ensure that other data application can access the model and use it in their process. This requires us to do two important tasks.
#
# - Serialising the Model (e.g. `pickle`)
# - Serving the ML Model as a Service
#
# Build the selected model
loan_default_model = RandomForestClassifier(n_estimators=40).fit(X, y)
# ### Model Serialization
#
# We will need to serialize both the model and the encoders used to create them
# +
# Use joblib to serialize the model
from sklearn.externals import joblib
joblib.dump(loan_default_model, "loan_default_model.pkl")
joblib.dump(le_grade, "le_grade.pkl")
joblib.dump(le_ownership, "le_ownership.pkl");
# -
# ### ML as a service
#
# While we can package the model with the application and use it, it created tight coupling between the two. Everytime the model changes, the application will have to change. What if there are more than one application using the same model?
#
# It is lot simpler to deploy the ML model as a service exposing it's functionality through an HTTP API.
#
# In this task we are going to use a tool called firefly for running the model as a service.
# +
# %%file loan_default_api.py
"""Service to expose the credit risk model as an API.
"""
from sklearn.externals import joblib
# read the encoders and the model
grade_encoder = joblib.load("le_grade.pkl")
ownership_encoder = joblib.load("le_ownership.pkl")
model = joblib.load("loan_default_model.pkl")
def predict(amount, years, age, ownership, income, grade):
"""Returns the probablity of default for given features.
"""
# encoders work on a vector. Wrapping in a list as we only have a single value
ownership_code = ownership_encoder.transform([ownership])[0]
grade_code = grade_encoder.transform([grade])[0]
# important to pass the features in the same order as we built the model
features = [amount, grade_code, years, ownership_code, income, age]
# probablity for not-defaulting and defaulting
# Again, wrapping in a list as a list of features is expected
p0, p1 = model.predict_proba([features])[0]
return p1
# -
# ### Start the ML Service
# Run the following command in your terminal
#
# firefly loan_default_api.predict
#
# <br>
# ## BUILD
#
# > "The role of data scientist is to build a data-driven product solution
#
# Now that we have a prediction API, this can be consumed as part of many applications to provide insight and help in decision making.
#
# - Dashboards
# - Web / Mobile Application
# - IoT Applications
# +
# Load the libaries
from firefly.client import Client
# Access the predict function from the jupyter notebook
loan_default_api = Client("http://127.0.0.1:8000")
# -
# Example 1
loan_default_api.predict(amount=100000, years=2, age=35, ownership='RENT', income=12345, grade='A')
# Example 2
loan_default_api.predict(amount=100000, years=2, age=35, ownership='RENT', income=12345, grade='G')
# Example 3
loan_default_api.predict(amount=100, years=2, age=35, ownership='RENT', income=12345, grade='G')
| loandefault.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Descarga de datos por parte del usuario
# ====
# Descarga en tres archivos para facilitar la integración posterior.
# * Archivo `demo-citation.csv`
# 
# * Archivo `demo-abstract.csv`
# 
# * Archivo `demo-references.csv`
# 
| part-01-data-download.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Preprocessing
#
# 1. Extract sequence features
# 2. Encode numerical feature vector
# 3. Save to `tsv` and `hdf` files
import os
import re
import import_ipynb
import numpy as np
import pandas as pd
from utils import load_sequences
from tqdm import tqdm
from functools import reduce
# +
# set your configurations here
if 1: ## for training set
sequences_dir = "../Data_files/Sequences_data/Training Set/"
ftrs_and_labels_csv = "../Data_files/features&labels/training_set.csv"
ftrs_and_labels_h5 = "../Data_files/features&labels/training_set.h5"
chunk_size = 21
else: ## for testing set
sequences_dir = "../Data_files/Sequences_data/Testing Set/"
ftrs_and_labels_csv = "../Data_files/features&labels/testing_set.csv"
ftrs_and_labels_h5 = "../Data_files/features&labels/testing_set.h5"
chunk_size = 21
pd.set_option('max_colwidth',20)
pd.set_option('display.max_columns', None)
# -
# ## 特征提取
# +
sequences_df = load_sequences(sequences_dir)
df = sequences_df
hlfchs = int((chunk_size - 1) / 2)
tqdm.pandas(desc="processing")
df['Donor_sites'] = df['CDSjoin'].apply(lambda x: {int(i.split('..')[1]) for i in x.split(',')})
df['Accpt_sites'] = df['CDSjoin'].apply(lambda x: {int(i.split('..')[0]) for i in x.split(',')})
df['Other_sites'] = df.progress_apply(lambda x: list(set(range(hlfchs + 1, len(x['Sequence']) - hlfchs)) -
x['Accpt_sites'] - x['Donor_sites']),
axis=1)
df['Donor_seqs'] = df.progress_apply(lambda x: np.array([x['Sequence'][i-hlfchs:i+hlfchs+1] for i in x['Donor_sites']]), axis=1)
df['Accpt_seqs'] = df.progress_apply(lambda x: np.array([x['Sequence'][i-hlfchs:i+hlfchs+1] for i in x['Accpt_sites']]), axis=1)
df['Other_seqs'] = df.progress_apply(lambda x: np.array([x['Sequence'][i-hlfchs:i+hlfchs+1] for i in x['Other_sites']]), axis=1)
df[0:10]
# -
# ## 编码特征向量
# +
def onehot_enc(onehot_matrix, dna_seq):
dt = onehot_matrix.dtype
code_by_replc = dna_seq.replace('a', '0').replace('c', '1').replace('g', '2').replace('t', '3')
#print(code_by_replc)
code_by_onehot = np.concatenate([onehot_matrix[int(i)] for i in code_by_replc])
return code_by_onehot
extra_base = re.compile(pattern='[a-z]')
enc_by_replc = lambda x: x.replace('a', '0').replace('c', '1').replace('g', '2').replace('t', '3')
def sequence_filter(sequences, regex_compiled):
seqs = [seq for seq in sequences if len(seq) == 21 and extra_base.findall(string=enc_by_replc(seq)) == []]
return seqs
# -
for col in ['Donor_seqs', 'Accpt_seqs', 'Other_seqs']:
df[col] = [sequence_filter(seq, regex_compiled=extra_base) for seq in tqdm(df[col])]
# +
oh_matrix = np.eye(4, dtype=np.int)
df['Donor_ftrs'] = df.progress_apply(lambda x: np.array([onehot_enc(onehot_matrix=oh_matrix, dna_seq=i) for i in x['Donor_seqs']]), axis=1)
df['Accpt_ftrs'] = df.progress_apply(lambda x: np.array([onehot_enc(onehot_matrix=oh_matrix, dna_seq=i) for i in x['Accpt_seqs']]), axis=1)
df['Other_ftrs'] = df.progress_apply(lambda x: np.array([onehot_enc(onehot_matrix=oh_matrix, dna_seq=i) for i in x['Other_seqs']]), axis=1)
df[0:10]
# -
# ## 调整格式
# +
samples = pd.DataFrame()
concat = np.concatenate
concatd_doseqs = concat(df['Donor_seqs'])
concatd_acseqs = concat(df['Accpt_seqs'])
concatd_otseqs = concat(df['Other_seqs'])
choose_ix = concat((np.ones(int(1e5), dtype=int), np.zeros(len(concatd_otseqs) - int(1e5), dtype=int))).astype(bool)
np.random.shuffle(choose_ix)
samples['Seq. features'] = concat((concatd_doseqs, concatd_acseqs, concatd_otseqs[choose_ix]))
# +
concatd_donums = concat(df['Donor_ftrs'])
concatd_acnums = concat(df['Accpt_ftrs'])
concatd_otnums = concat(df['Other_ftrs'])
samples['Num. features'] = concat((concatd_donums, concatd_acnums, concatd_otnums[choose_ix]), axis=0).tolist()
nsamples = samples.shape[0]
ndoseqs = concatd_doseqs.shape[0]
nacseqs = concatd_acseqs.shape[0]
samples['IsDonor'] = concat((np.ones(ndoseqs), np.zeros(nsamples - ndoseqs)))
samples['IsAcceptor'] = concat((np.zeros(ndoseqs), np.ones(nacseqs), np.zeros(nsamples - ndoseqs - nacseqs)))
# -
pd.set_option('max_colwidth',150)
samples[0:10]
# ## 保存到硬盘
# +
folders = set([os.path.split(ftrs_and_labels_h5)[0], os.path.split(ftrs_and_labels_csv)[0]])
for folder in folders:
if not os.path.isdir(folder):
os.mkdir(folder)
samples[0:10000].to_hdf(ftrs_and_labels_h5, key='data')
samples[0:10000].to_csv(ftrs_and_labels_csv)
# -
# ## 生成报告
# ! jupyter nbconvert --to html Preprocessor.ipynb --output ../Presentation/Preprocessor.html
| Source/Preprocessing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6.7 64-bit
# language: python
# name: python36764bit72d3d421fe404776abad78b8443317dc
# ---
# # ADC sampling and quantization
#
# This examples show how to use **fxpmath** to model the *signal quantization* in a analog-to-digital conversion.
# +
import numpy as np
import matplotlib.pyplot as plt
from fxpmath import Fxp
# -
# ## signal
#
# We use a single sinusoidal signal
# +
fs = 1e5 # sampling frequency
n_samples = 2**10 # number of samples
f_sig = 2 # signal frequency
A = 0.8 # amplitude
n = np.arange(n_samples)
t = n / fs
sig = A * np.sin(2*np.pi*f_sig*n/n_samples)
# -
plt.figure(figsize=(12,6))
plt.plot(n, sig)
plt.show()
# ## Quantization with 8 bits, 7 fractional (fxp-s8/4)
# +
signed = True
n_word = 8
n_frac = 7
sig_fxp = Fxp(sig, signed=signed, n_word=n_word, n_frac=n_frac)
# -
plt.figure(figsize=(12,6))
plt.plot(n, sig)
plt.plot(n, sig_fxp)
plt.show()
# ## Quantization with 8 bits, 4 fractional (fxp-s8/4)
# +
signed = True
n_word = 8
n_frac = 4
sig_fxp = Fxp(sig, signed=signed, n_word=n_word, n_frac=n_frac)
# -
plt.figure(figsize=(12,6))
plt.plot(n, sig)
plt.plot(n, sig_fxp)
plt.show()
# ## Quantization with 8 bits, 2 fractional (fxp-s8/2)
# +
signed = True
n_word = 8
n_frac = 2
sig_fxp = Fxp(sig, signed=signed, n_word=n_word, n_frac=n_frac)
# -
plt.figure(figsize=(12,6))
plt.plot(n, sig)
plt.plot(n, sig_fxp)
plt.show()
# ## fxp-s8/2 with several rounding methods
# +
signed = True
n_word = 8
n_frac = 2
roundings = ['ceil', 'floor', 'around', 'fix', 'trunc']
plt.figure(figsize=(12,6))
plt.plot(n, sig)
for r in roundings:
sig_fxp = Fxp(sig, signed=signed, n_word=n_word, n_frac=n_frac, rounding=r)
plt.plot(n, sig_fxp, label=r)
plt.legend()
plt.show()
| examples/ADC_sampling_quantization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# This notebook was prepared by [mrb00l34n](http://github.com/mrb00l34n). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges).
# # Challenge Notebook
# ## Problem: Counting Ways of Making Change.
#
# * [Explanation](#Explanation)
# * [Test Cases](#Test-Cases)
# * [Algorithm](#Algorithm)
# * [Code](#Code)
# * [Unit Test](#Unit-Test)
# * [Solution Notebook](#Solution-Notebook)
# ## Explanation
#
# How many ways are there of making change for n, given an array of distinct coins? For example:
#
# Input: n = 4, coins = [1, 2]
#
# Output: 3. 1+1+1+1, 1+2+1, 2+2, would be the ways of making change.
#
# Note that a coin can be used any number of times, and we are counting unique combinations.
# ## Test Cases
#
# * Input: n = 0, coins = [1, 2] -> Output: 0
# * Input: n = 100, coins = [1, 2, 3] -> Output: 884
# * Input: n = 1000, coins = [1, 2, 3...99, 100] -> Output: 15658181104580771094597751280645
#
# ## Algorithm
#
# Refer to the [Solution Notebook](http://nbviewer.ipython.org/github/donnemartin/interactive-coding-challenges/blob/master/recursion_dynamic/coin_change_ways/coin_change_ways_solution.ipynb). If you are stuck and need a hint, the solution notebook's algorithm discussion might be a good place to start.
# ## Code
def change_ways(n, coins):
# TODO: Implement me
return n
# ## Unit Test
#
#
#
# **The following unit test is expected to fail until you solve the challenge.**
# +
# # %load test_coin_change_ways.py
from nose.tools import assert_equal
class Challenge(object):
def test_coin_change_ways(self,solution):
assert_equal(solution(0, [1, 2]), 0)
assert_equal(solution(100, [1, 2, 3]), 884)
assert_equal(solution(1000, range(1, 101)),
15658181104580771094597751280645)
print('Success: test_coin_change_ways')
def main():
test = Challenge()
test.test_coin_change_ways(change_ways)
if __name__ == '__main__':
main()
# -
# ## Solution Notebook
#
# Review the [Solution Notebook](http://nbviewer.ipython.org/github/donnemartin/interactive-coding-challenges/blob/master/recursion_dynamic/coin_change_ways/coin_change_ways_solution.ipynb) for a discussion on algorithms and code solutions.
| recursion_dynamic/coin_change_ways/coin_change_ways_challenge.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.4.1
# language: julia
# name: julia-1.4
# ---
# # [XOR decryption](https://projecteuler.net/problem=59)
#
# > Each character on a computer is assigned a unique code and the preferred standard is ASCII (American Standard Code for Information Interchange). For example, uppercase A = 65, asterisk (*) = 42, and lowercase k = 107.
# >
# > A modern encryption method is to take a text file, convert the bytes to ASCII, then XOR each byte with a given value, taken from a secret key. The advantage with the XOR function is that using the same encryption key on the cipher text, restores the plain text; for example, 65 XOR 42 = 107, then 107 XOR 42 = 65.
# >
# > For unbreakable encryption, the key is the same length as the plain text message, and the key is made up of random bytes. The user would keep the encrypted message and the encryption key in different locations, and without both "halves", it is impossible to decrypt the message.
# >
# > Unfortunately, this method is impractical for most users, so the modified method is to use a password as a key. If the password is shorter than the message, which is likely, the key is repeated cyclically throughout the message. The balance for this method is using a sufficiently long password key for security, but short enough to be memorable.
# >
# > Your task has been made easy, as the encryption key consists of three lower case characters. Using p059_cipher.txt (right click and 'Save Link/Target As...'), a file containing the encrypted ASCII codes, and the knowledge that the plain text must contain common English words, decrypt the message and find the sum of the ASCII values in the original text.
#
# ASCII has been supplanted by UTF-8 as “the preferred standard” for character encoding and is what Julia uses, but UTF-8 was designed for backward compatibility with ASCII. As long as we are using characters with code points less than 128 (0x80), we don’t have to differentiate between the two standards.
using DelimitedFiles
C = readdlm("p059_cipher.txt", ',', UInt8) # vector of bytes
extrema(C)
# We’re told that the encryption key is three characters long. We’ll reshape the cipher text so we can do frequency analysis on each row—data that was all encrypted with the same character.
keysize = 3
C′ = reshape(C, (keysize, cld(length(C), keysize)))
# Now, for each row, we’ll find the most frequent entry. We know the plain text is made up of common English words. Those will be separated by space characters (0x20), so this will be the most frequently occuring character in any reasonably long span of text. XORing the character code we identify in the cipher with 0x20 should recover the key character.
K = [] # key code points
for R in eachrow(C′)
F = Dict(i => count(isequal(i), R) for i in unique(R)) # code point => occurences
m = argmax(F) # most frequent code point in cipher
k = m ⊻ 0x20 # the key for this row
println(m => Char(k))
push!(K, k)
end
K
# It’s a good sign that all three results are lower-case letters, as hinted in the problem statement. Now that we have the letters of the key, ‘exp’, we can turn our attention to decrypting the cipher text. We’ll repeat the letters we found so that the key is the same length as the cipher text
Kᵣ = repeat(K, cld(length(C), length(K)))
# Now we can compute the bitwise XOR (Julia’s `⊻` operator—`\xor` or `\veebar`) for each element of the cipher and the key. Converting those code points back into characters reveals the plaintext. Adding them up gives the number required by the problem.
P = map(⊻, C, Kᵣ)
print(join(map(Char, P)))
Int(sum(P))
| work/problem059.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Visualisation and analysis of the results from the DESI-MS linear mixed models
# ### Import required packages
import pandas as pds
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from matplotlib.colors import ListedColormap
# ## Read the raw DESI-MS data
# ### VMET2 Cohort
# +
desi_pos_vmet2 = pds.read_csv('../../Data/VMET2_DESI-MS_POS.csv')
desi_neg_vmet2 = pds.read_csv('../../Data/VMET2_DESI-MS_NEG.csv')
vmet2_metadata = pds.read_csv('../../Data/VMET2_CSTAssignment.csv')
desi_pos_vmet2 = vmet2_metadata.merge(desi_pos_vmet2, on='Seq_ID')
desi_neg_vmet2 = vmet2_metadata.merge(desi_neg_vmet2, on='Seq_ID')
# -
# ### VMET Cohort
# +
desi_pos_vmet = pds.read_csv('../../Data/VMET_DESI-MS_NEG.csv')
desi_neg_vmet = pds.read_csv('../../Data/VMET_DESI-MS_NEG.csv')
vmet_metadata = pds.read_csv('../../Data/VMET_CSTAssignment.csv')
desi_pos_vmet = vmet_metadata.merge(desi_pos_vmet, left_on='Seq_ID', right_on='Seq_ID')
desi_neg_vmet = vmet_metadata.merge(desi_neg_vmet, left_on='Seq_ID', right_on='Seq_ID')
# -
# ## Replication of results across the VMET2 and VMET studies
# Find matching peaks (peaks within a certain m/z error from each other) from the VMET and VMET2 datasets which were found to be statistically significant in the linear mixed model analysis of both datasets.
# +
# Define these 2 utility functions to do the matching and comparison of output files from the
# "Linear mixed effect models DESI-MS.Rmd" notebook.
def checkResults(file1, file2, excelFile, datasetNames, sigLevel=0.05, tolerance=5):
file1_res = pds.read_csv(file1)
file2_res = pds.read_csv(file2)
contrasts_file1_names = file1_res.columns
contrasts_file2_names = file2_res.columns
contrast_names = contrasts_file1_names[contrasts_file1_names.isin(contrasts_file2_names)]
contrast_names = [x for x in contrast_names if '_pval' in x]
match_list = list()
file1_only_list = list()
file2_only_list = list()
with pds.ExcelWriter(excelFile) as writer:
for contrast in contrast_names:
file1_sig_names = file1_res.loc[file1_res[contrast] < sigLevel, 'Metabolite']
file2_sig_names = file2_res.loc[file2_res[contrast] < sigLevel, 'Metabolite']
matches = matchPeaks(file1_sig_names, file2_sig_names, datasetNames, tolerance)
matches.to_excel(writer, sheet_name=contrast + 'Shared')
match_list.append([contrast, matches])
file1_only = pds.DataFrame(file1_sig_names[~file1_sig_names.isin(matches[datasetNames[0] + '_Metabolite'])])
file2_only = pds.DataFrame(file2_sig_names[~file2_sig_names.isin(matches[datasetNames[1] + '_Metabolite'])])
file1_only_list.append([contrast, file1_only])
file2_only_list.append([contrast, file2_only])
file1_only.to_excel(writer, sheet_name=contrast + ' ' + datasetNames[0])
file2_only.to_excel(writer, sheet_name=contrast + ' ' + datasetNames[1])
return match_list, file1_only_list, file2_only_list
def matchPeaks(vec1, vec2, datasetNames, tolerance=5):
A = vec1.reset_index(drop=True)
B = vec2.reset_index(drop=True)
matches = [(idx, x, np.argmin(np.abs(x - B)), B[np.argmin(np.abs(x - B))]) for idx, x in enumerate(A) if np.min(np.abs(x-B))/x*1e6 < tolerance]
matches = pds.DataFrame(matches, columns=[datasetNames[0] + '_idx', datasetNames[0] + '_Metabolite', datasetNames[1] + '_idx', datasetNames[1] + '_Metabolite'])
return matches
# -
# ## Identify statistically significant features shared between the VMET2 and VMET datasets
#
# By default a metabolic feature is considered replicated in both VMET and VMET2 if for a statistically significant marker (Benjamini-Hochberg FDR q-value < 0.05) there is another signal within less than 5 ppm error in the other dataset which also has a BH q-value < 0.05.
#
# Two files with the positive and negative ion mode overlap are exported, **DESI_NEG_BH_LinearModelReplication.xlsx** and **DESI_POS_BH_LinearModelReplication.xlsx**.
# +
vmet2_res_neg = './LinearMixedModels_DESI-MS/VMET2_DESI_NEG_LME_CST.csv'
vmet_res_neg = './LinearMixedModels_DESI-MS/VMET_DESI_NEG_LME_CST.csv'
vmet2_res_pos = './LinearMixedModels_DESI-MS/VMET2_DESI_POS_LME_CST.csv'
vmet_res_pos = './LinearMixedModels_DESI-MS/VMET_DESI_POS_LME_CST.csv'
vmet2_res_pos_bh = './LinearMixedModels_DESI-MS/VMET2_DESI_POS_LME_CST_BH.csv'
vmet_res_pos_bh = './LinearMixedModels_DESI-MS/VMET_DESI_POS_LME_CST_BH.csv'
vmet2_res_neg_bh = './LinearMixedModels_DESI-MS/VMET2_DESI_NEG_LME_CST_BH.csv'
vmet_res_neg_bh = './LinearMixedModels_DESI-MS/VMET_DESI_NEG_LME_CST_BH.csv'
dneg_matches = checkResults(vmet2_res_neg, vmet_res_neg, './LinearMixedModels_DESI-MS/DESI_NEG_LinearModelReplication.xlsx', ['VMET2', 'VMET'])
dpos_matches = checkResults(vmet2_res_pos, vmet_res_pos, './LinearMixedModels_DESI-MS/DESI_POS_LinearModelReplication.xlsx', ['VMET2', 'VMET'])
dneg_bh_matches = checkResults(vmet2_res_neg_bh, vmet_res_neg_bh, './LinearMixedModels_DESI-MS/DESI_NEG_BH_LinearModelReplication.xlsx', ['VMET2', 'VMET'])
dpos_bh_matches = checkResults(vmet2_res_pos_bh, vmet_res_pos_bh, './LinearMixedModels_DESI-MS/DESI_POS_BH_LinearModelReplication.xlsx', ['VMET2', 'VMET'])
# +
# Prepare the feature selection vectors
# Commented code can be used to account for floating point precision differences
# between exported results and raw data frame if required.
Dataset = 'VMET2'
dnegMetLDep_vmet2 = dneg_bh_matches[0][15][1][Dataset + '_Metabolite']
dposMetLDep_vmet2 = dpos_bh_matches[0][15][1][Dataset + '_Metabolite']
# Round metabolites and convert to string to allow column indexing by "mz"
#dnegMetLDep_vmet2 = dnegMetLDep_vmet2.round(8)
#dnegMetLDep_vmet2 = dnegMetLDep_vmet2.astype(str)
#dposMetLDep_vmet2 = dposMetLDep_vmet2.round(2)
#dposMetLDep_vmet2 = dposMetLDep_vmet2.astype(str)
Dataset = 'VMET'
dnegMetLDep_vmet = dneg_bh_matches[0][15][1][Dataset + '_Metabolite']
dposMetLDep_vmet = dpos_bh_matches[0][15][1][Dataset + '_Metabolite']
# Round metabolites and convert to string to allow column indexing by "mz"
#dnegMetLDep_vmet = dnegMetLDep_vmet.round(10)
#dnegMetLDep_vmet = dnegMetLDep_vmet.astype(str)
#dposMetLDep_vmet = dposMetLDep_vmet.round(10)
#dposMetLDep_vmet = dposMetLDep_vmet.astype(str)
# +
desi_neg_vmet2_metabolites = np.array([float(x) for x in desi_neg_vmet2.columns[8:-1]])
desi_pos_vmet2_metabolites = np.array([float(x) for x in desi_pos_vmet2.columns[8:-1]])
desi_neg_vmet_metabolites = np.array([float(x) for x in desi_neg_vmet.columns[8:-1]])
desi_pos_vmet_metabolites = np.array([float(x) for x in desi_pos_vmet.columns[8:-1]])
dnegMetLDep_vmet2 = np.array([desi_neg_vmet2_metabolites[np.argmin(np.abs(desi_neg_vmet2_metabolites - float(x)))] for x in dnegMetLDep_vmet2])
dposMetLDep_vmet2 = np.array([desi_pos_vmet2_metabolites[np.argmin(np.abs(desi_pos_vmet2_metabolites - float(x)))] for x in dposMetLDep_vmet2])
dnegMetLDep_vmet = np.array([desi_neg_vmet_metabolites[np.argmin(np.abs(desi_neg_vmet_metabolites - float(x)))] for x in dnegMetLDep_vmet])
dposMetLDep_vmet = np.array([desi_pos_vmet_metabolites[np.argmin(np.abs(desi_pos_vmet_metabolites - float(x)))] for x in dposMetLDep_vmet])
dnegMetLDep_vmet2 = dnegMetLDep_vmet2.astype(str)
dposMetLDep_vmet2 = dposMetLDep_vmet2.astype(str)
dnegMetLDep_vmet = dnegMetLDep_vmet.astype(str)
dposMetLDep_vmet = dposMetLDep_vmet.astype(str)
# -
# ### Generate the heatmap plots from the VMET and VMET2 linear mixed model analysis, as seen in Figure 1 B
# Helper function to re-order the dataframes according to CST for easier comparison
def assembleMat(dataframe, covariate, categoryOrder=['I', 'II', 'III', 'V', 'VI', 'IV']):
asCategorical = pds.Categorical(dataframe[covariate])
reordered_frame = list()
#for category in asCategorical.categories:
for category in categoryOrder:
reordered_frame.append(dataframe.loc[dataframe[covariate] == category, :])
reordered_frame = pds.concat(reordered_frame, axis=0)
return reordered_frame
# +
# Add a LDominant vs LDepleted information variable
desi_neg_vmet2['LDom'] = 'NA'
desi_neg_vmet2.loc[desi_neg_vmet2['CST'].isin(['I', 'II', 'III', 'V', 'VII']), 'LDom'] = 'Lactobacillus Dominant'
desi_neg_vmet2.loc[desi_neg_vmet2['CST'].isin(['VI', 'IV']), 'LDom'] = 'Lactobacillus Depleted'
desi_neg_vmet['LDom'] = 'NA'
desi_neg_vmet.loc[desi_neg_vmet['CST'].isin(['I', 'II', 'III', 'V', 'VII']), 'LDom'] = 'Lactobacillus Dominant'
desi_neg_vmet.loc[desi_neg_vmet['CST'].isin(['VI', 'IV']), 'LDom'] = 'Lactobacillus Depleted'
# -
# The plotHeatmap function is defined first and then used with the results from VMET and VMET2 to generate the heatmaps shown in Figure 1 B.
def plotHeatmap(dataset, features, row_covariate, row_covariate2=None, categoryOrder=['I', 'II', 'III', 'V', 'VII', 'VI', 'IV'], row_cluster=True):
nColors = len(dataset[row_covariate].unique())
cmap = ListedColormap(sns.color_palette("deep", nColors))
dataset_ordered = assembleMat(dataset, row_covariate, categoryOrder)
Xmat = dataset_ordered.loc[:, features]
col_colors_vec = dataset_ordered[row_covariate]
col_colors = [col_colors_vec.astype('category').cat.codes.map(cmap)]
if row_covariate2 is not None:
nColors2 = len(dataset[row_covariate2].unique())
cmap_cat2= ListedColormap(sns.color_palette("Set1", nColors2))
col_colors_vec_2 = dataset_ordered[row_covariate2]
col_colors_2 = col_colors_vec_2.astype('category').cat.codes.map(cmap_cat2)
col_colors.append(col_colors_2)
metaboMap = sns.clustermap(Xmat.T, row_cluster=row_cluster, method='ward', z_score=0,
yticklabels=False, col_colors=col_colors, cmap='RdPu',
xticklabels=False, col_cluster=False)
bottom, top = metaboMap.ax_heatmap.get_ylim()
#metaboMap.ax_heatmap.set_ylim(bottom + 0.5, top - 0.5)
for idx, label in enumerate(col_colors_vec.astype('category').cat.categories):
metaboMap.ax_col_dendrogram.bar(0, 0, color=cmap(idx), label= label, linewidth=0)
for idx, label in enumerate(col_colors_vec_2.astype('category').cat.categories):
metaboMap.ax_col_colors.bar(0, 0, color=cmap_cat2(idx), label=label, linewidth=0)
metaboMap.ax_col_dendrogram.legend(loc='center', ncol=8)
metaboMap.ax_col_colors.legend(loc='upper center', bbox_to_anchor=(0.5, +2.5), ncol=2)
metaboMap.cax.set_ylabel('z(intensity)')
return metaboMap
# Generate the VMET2 heatmap
image_vmet2 = plotHeatmap(desi_neg_vmet2, dnegMetLDep_vmet2, 'CST', 'LDom')
image_vmet2.savefig('./LinearMixedModels_DESI-MS/ExampleFeaturesVMET2_Heatmap_Figure1.png', dpi=150)
#image_vmet2.savefig('VMET2_Heatmap_Figure1.eps', dpi=150)
#image_vmet2.savefig('VMET2_Heatmap_Figure1.svg', dpi=150)
# Generate the VMET heatmap
image_vmet = plotHeatmap(desi_neg_vmet, dnegMetLDep_vmet[image_vmet2.dendrogram_row.reordered_ind], 'CST', 'LDom', row_cluster=False)
image_vmet.savefig('./LinearMixedModels_DESI-MS/VMET_Heatmap_Figure1.png', dpi=150)
#image_vmet.savefig('VMET_Heatmap_Figure1.eps', dpi=150)
#image_vmet.savefig('VMET_Heatmap_Figure1.svg', dpi=150)
| Code/CST Typing by DESI-MS/Linear mixed models - replication in VMETV and MET2 Heatmaps.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
# -
import re, multiprocessing, os
import spacy
from spacy.tokens.doc import Doc
from gensim.models import Word2Vec
# ### Data Import
# Grab the data from: https://www.kaggle.com/c/word2vec-nlp-tutorial/data
train = pd.read_csv('data/labeledTrainData.tsv', sep='\t')
print(train.shape)
train.head()
train['review'].apply(lambda x: len(x.split())).hist(bins=50)
# ### Write Text to Disk (Optional)
# +
count = 0
USE_SPACY_POS = False
VERBOSE = False
with open('data/plain_text/train_sentences.txt', mode='a', encoding='utf-8') as f:
for i, v in train['review'].iteritems():
try:
d = transform_doc(v) if USE_SPACY_POS else v
for line in d.split('\n'):
if len(line) > 0:
f.write(line+'\n')
except Exception as e:
print(e)
if VERBOSE:
print('could not parse: {}'.format(v))
count += 1
if count % 1000 == 0:
print('Finished: {}'.format(count))
# -
# ### Train W2V Model
# Use generator if data is too big for memory, otherwise just put in a list and iterate.
class ParsedSentenceGenerator(object):
def __init__(self, dirname, lower_words=False, verbose=False):
self.dirname = dirname
self.verbose = verbose
self.lower_words = lower_words
def __iter__(self):
for fname in os.listdir(self.dirname):
if fname[-4:] == '.txt':
for line in open(os.path.join(self.dirname, fname), encoding='utf-8'):
try:
split_line = line.split()
if self.lower_words:
split_line = [x.lower() for x in split_line]
yield split_line
except Exception as e:
print(e)
if verbose:
print('Line Failed: ' + line)
pass
s = ParsedSentenceGenerator(dirname='data/plain_text/', lower_words=True)
N_WORKERS = multiprocessing.cpu_count() - 1
print('Number of workers: '+ str(N_WORKERS))
EMBED_SIZE = 32
w2v_model = Word2Vec(sentences=s,
size=EMBED_SIZE,
window=9,
min_count=3,
sample=0.001,
seed=42,
workers=N_WORKERS,
sg=0,
hs=0,
negative=7,
iter=14,
compute_loss=True
)
len(w2v_model.wv.vocab)
w2v_model.most_similar(positive='movie')
w2v_model.most_similar(positive='movie|NOUN')
w2v_model.most_similar(positive='good|ADJ')
w2v_model.save('models/w2v_model_'+str(EMBED_SIZE)+'_plaintext')
# Useful Docs:
# * https://radimrehurek.com/gensim/models/word2vec.html
# Useful Papers:
# * word2vec Parameter Learning Explained: https://arxiv.org/abs/1411.2738
# * Comparative study of word embedding methods in topic segmentation https://ac.els-cdn.com/S1877050917313480/1-s2.0-S1877050917313480-main.pdf?_tid=610efaa2-171a-11e8-a5b4-00000aacb360&acdnat=1519226450_2f4ec995830aa2e27b566f56ad81af8e
# ### Helper Functions
# +
nlp = spacy.load('en_core_web_sm')
LABELS = {
'ENT': 'ENT',
'PERSON': 'ENT',
'NORP': 'ENT',
'FAC': 'ENT',
'ORG': 'ENT',
'GPE': 'ENT',
'LOC': 'ENT',
'LAW': 'ENT',
'PRODUCT': 'ENT',
'EVENT': 'ENT',
'WORK_OF_ART': 'ENT',
'LANGUAGE': 'ENT',
'DATE': 'DATE',
'TIME': 'TIME',
'PERCENT': 'PERCENT',
'MONEY': 'MONEY',
'QUANTITY': 'QUANTITY',
'ORDINAL': 'ORDINAL',
'CARDINAL': 'CARDINAL'
}
pre_format_re = re.compile(r'^[\`\*\~]')
post_format_re = re.compile(r'[\`\*\~]$')
url_re = re.compile(r'\[([^]]+)\]\(%%URL\)')
link_re = re.compile(r'\[([^]]+)\]\(https?://[^\)]+\)')
def strip_meta(text):
if type(text) == str:
text = link_re.sub(r'\1', text)
text = text.replace('>', '>').replace('<', '<')
text = pre_format_re.sub('', text)
text = post_format_re.sub('', text)
return text
else:
return ''
def represent_word(word):
if word.like_url:
return '%%URL|X'
text = re.sub(r'\s', '_', word.text)
tag = LABELS.get(word.ent_type_, word.pos_)
if not tag:
tag = '?'
return text + '|' + tag
def merge_clean_sentence(nlp, text, collapse_punctuation=True, collapse_phrases=True):
doc = nlp(text)
if collapse_punctuation:
spans = []
for word in doc[:-1]:
if word.is_punct:
continue
if not word.nbor(1).is_punct:
continue
start = word.i
end = word.i + 1
while end < len(doc) and doc[end].is_punct:
end += 1
span = doc[start : end]
spans.append(
(span.start_char, span.end_char,
{'tag': word.tag_, 'lemma': word.lemma_, 'ent_type': word.ent_type_})
)
for start, end, attrs in spans:
doc.merge(start, end, **attrs)
if collapse_phrases:
for np in list(doc.noun_chunks):
np.merge(tag=np.root.tag_, lemma=np.root.lemma_, ent_type=np.root.ent_type_)
return doc
def transform_doc(text):
d = merge_clean_sentence(nlp, text, collapse_punctuation=False, collapse_phrases=True)
strings = []
for sent in d.sents:
if sent.text.strip():
strings.append(' '.join(represent_word(w) for w in sent if not w.is_space))
if strings:
return '\n'.join(strings) + '\n'
else:
return ''
# -
# Example:
sentence = "<NAME> is a musician. He was born in 1941. He is not related to <NAME>, who was the former President of the United States of America."
print(sentence)
print(transform_doc(sentence))
| 00_word_embeddings.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import os
import pickle as pkl
from sklearn.linear_model import Lasso, LassoCV, LogisticRegressionCV, LogisticRegression
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import cross_val_score, GridSearchCV, cross_validate, KFold, cross_val_predict
from sklearn.metrics import r2_score, explained_variance_score, normalized_mutual_info_score, \
mutual_info_score, mean_absolute_error, make_scorer
from sklearn.dummy import DummyRegressor
from sklearn.utils.validation import check_X_y, check_array, check_is_fitted
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.ensemble import IsolationForest
from skll.metrics import spearman, pearson
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap, Normalize
import seaborn as sns
from IPython.display import SVG,display,set_matplotlib_formats
set_matplotlib_formats('svg')
from PIL import Image
import warnings
warnings.filterwarnings('ignore')
from pylab import rcParams
rcParams['figure.figsize'] = 5,5
# -
# Custom model implementations and functions are stored in `src/dairyml.py`
from dairyml import PerfectClassifierMeanRegressor, plot_r2, BoundedLasso, BoundedLassoPlusLogReg, plot_coefficients, DummyRegressorCustom
# ## Import the Data
# Load the data from the csv file, and set FoodCode to be the data index
data = pd.read_csv('../data/training_for_GS_122118.csv')
data = data.set_index('FoodCode')
# Get the indices of all columns except description and lactose content - these columns are the numerical features that will be used as model input
#
#
numerical_features = [col for col in data.columns if col not in ['Food_Description','lac.per.100g']]
# print(numerical_features)
# ## Assess correlation of features with target variable
# ### Spearman's rank correlation coefficient
correlations = pd.DataFrame()
correlations['SRC'] = data[list(numerical_features) + ['lac.per.100g']].corr(method='spearman')['lac.per.100g']
# ### Mutual Information
for col in numerical_features:
correlations.loc[col,'MI'] = mutual_info_score(data[col],data['lac.per.100g'])
correlations.loc[col,'NMI'] = normalized_mutual_info_score(data[col],data['lac.per.100g'])
# ### Report average values for each correlation measure
correlations.abs().mean(axis=0)
# +
# correlations.to_excel('../data/correlations.xlsx')
# -
# ## Prepare data for modelling
# ### Standardize the input features
# - Use only the numerical features as model input `X`
# - Use sklearn's `StandardScaler` on these features: this standardizes features by removing the mean and scaling to unit variance
# - Convert the output of `StandardScaler` back to a dataframe for convenience
ss = StandardScaler()
ss.fit(data[numerical_features])
X = (data[numerical_features] - ss.mean_) / np.sqrt(ss.var_) #done by hand rather than fit_transform just to use same syntax as test script
# Save mean and variance to file for use in test script
np.savetxt('../scaling/train_feature_means.csv',ss.mean_,delimiter=',')
np.savetxt('../scaling/train_feature_variances.csv',ss.var_,delimiter=',')
# ### Remove outliers
# `contamination` parameter was hand-tuned based on the appearance of PCA & t-SNE visualizations below, increasing until data points that appeared to be outliers were removed
iso = IsolationForest(contamination=.013,random_state=7)
outliers = iso.fit_predict(X)
print('Outliers removed: {}'.format(sum(outliers == -1)))
# ### Use lactose as prediction target `Y`
Y = data['lac.per.100g']
X_in = X[outliers == 1]
Y_in = Y[outliers == 1]
data_outliers_removed = data.loc[outliers == 1,:]
data_outliers_removed.to_csv('../data/training_for_GS_122118_outliers_removed.csv')
# ## Distribution of target variable, class imbalance
Y_binary = (Y_in != 0).replace(True,'lactose').replace(False,'non-lactose')
# #### Class absolute numbers
Y_binary.value_counts()
# #### Class proportions
Y_binary.value_counts().divide(Y_binary.value_counts().sum()).round(2)
# #### Distribution of all lactose values
ax = sns.distplot(Y_in,bins=20)
_ = ax.set_title("Distribution of all lactose values")
ax.figure.savefig('../graphics/lactose_distribution_all.png')
# reload and save as tiff
img = Image.open(open('../graphics/lactose_distribution_all.png','rb'))
img.save('../graphics/lactose_distribution_all.tiff',dpi=(300,300))
# #### Distribution of nonzero lactose values
ax = sns.distplot(Y_in[Y_in!=0],bins=20)
_ = ax.set_title("Distribution of nonzero lactose values")
ax.figure.savefig('../graphics/lactose_distribution_nonzero.png')
# reload and save as tiff
img = Image.open(open('../graphics/lactose_distribution_nonzero.png','rb'))
img.save('../graphics/lactose_distribution_nonzero.tiff',dpi=(300,300))
# ## Visualization: PCA & t-SNE
# Create axes to plot visualization results
f, axes = plt.subplots(2, 2, figsize=(10,5))
# Create a colormap to outline the outliers
# +
red = np.array([212/256, 17/256, 17/256, 1])
nocolor = np.array([1,1,1,0])
colors = np.array([red,nocolor])
newcmp = ListedColormap(colors)
# -
newcmp
# ### PCA
# Here we use PCA to visualize the features along two axes. Two plots are produced. In the first, the data points are colored by lactose value. In the second, the data points are colored by the logarithm of the lactose values. Since most lactose values are low, with a few high exceptions, coloring by lactose value directly obscures the differences on the lower end of the distribution, so coloring by log lactose attempts to fix that.
pca = PCA(n_components=2)
pca.fit(X_in)
pca_results = pd.DataFrame(pca.transform(X),columns=['PC1','PC2'],index=X.index)
pca_results['outlier_status'] = outliers
pca_results['lac.per.100g'] = Y
pca_results['log.lac.per.100g'] = np.log(pca_results['lac.per.100g'] + .001)
pca_results['log_nonzero.lac.per.100g'] = np.log(pca_results['lac.per.100g'])
pca_results = pca_results.rename(columns={'lac.per.100g':'Lactose (g/100g)',
'log.lac.per.100g':'Log Lactose (g/100g)',
'log_nonzero.lac.per.100g':'Log Lactose (g/100g) (Nonzero)'})
# cmap = cm.get_cmap('Spectral')
ax = pca_results.plot.scatter('PC1','PC2',c='Log Lactose (g/100g) (Nonzero)',colormap='winter',ax=axes[0,0], edgecolors=newcmp(outliers))
axes[0,0].set_xlabel('PC1')
axes[0,0].set_title('PCA, colored by log lactose (nonzero only)')
plt.show()
ax = pca_results.plot.scatter('PC1','PC2',c='Log Lactose (g/100g)',colormap='winter',ax=axes[0,1], edgecolors=newcmp(outliers))
axes[0,1].set_xlabel('PC1')
axes[0,1].set_title('PCA, colored by log lactose')
# ### t-SNE
# The same plots are created using t-SNE, a different visualization technique.
tsne = TSNE(n_components=2,perplexity=30,random_state=7)
tsne_results = pd.DataFrame(tsne.fit_transform(X),columns=['axis 1','axis 2'],index=X.index)
tsne_results['outlier_status'] = outliers
# +
tsne_results['lac.per.100g'] = Y
tsne_results['log.lac.per.100g'] = np.log(tsne_results['lac.per.100g'] + .001)
tsne_results['log_nonzero.lac.per.100g'] = np.log(tsne_results['lac.per.100g'])
tsne_results = tsne_results.rename(columns={'lac.per.100g':'Lactose (g/100g)',
'log.lac.per.100g':'Log Lactose (g/100g)',
'log_nonzero.lac.per.100g':'Log Lactose (g/100g) (Nonzero)'})
ax = tsne_results.plot.scatter('axis 1','axis 2',c='Log Lactose (g/100g) (Nonzero)',colormap='winter',ax=axes[1,0], edgecolors=newcmp(outliers))
axes[1,0].set_xlabel('axis 1')
axes[1,0].set_title('t-SNE, colored by log lactose (nonzero only)')
ax = tsne_results.plot.scatter('axis 1','axis 2',c='Log Lactose (g/100g)',colormap='winter',ax=axes[1,1], edgecolors=newcmp(outliers))
axes[1,1].set_xlabel('axis 1')
axes[1,1].set_title('t-SNE, colored by log lactose')
# -
f.tight_layout()
f
f.savefig('../graphics/pca_tsne_plots.png')
# reload and save as tiff
img = Image.open(open('../graphics/pca_tsne_plots.png','rb'))
img.save('../graphics/pca_tsne_plots.tiff',dpi=(300,300))
# Export pca and tsne results to file
pca_results.to_csv('../graphics/pca_results.csv')
tsne_results.to_csv('../graphics/tsne_results.csv')
# ### Save preprocessed data to pickle files
# +
data_to_save = [X_in, Y_in]
data_dir = '../pkl/data/'
try:
os.makedirs(data_dir)
except FileExistsError:
pass
save_string = "data_outliers_removed"
with open(data_dir + save_string, "wb" ) as f:
pkl.dump(data_to_save,f)
print('saved to {}'.format(data_dir + save_string))
| src/preprocess.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_python3
# language: python
# name: conda_python3
# ---
# + [markdown] colab_type="text" id="NjHnSAbHrInP"
# # Single Arbitrary Rotation Gate
# The Arbitrary Rotation Gate can be rotated at any angle you like.
#
# ## What we'll learn this time
# 1. About Rx,Ry,Rz,U1,U2,U3 gates
# 2. Make circuit
#
# ## Install Blueqat
# Install Blueqat from pip.
# + colab={"base_uri": "https://localhost:8080/", "height": 136} colab_type="code" id="Z2z6oBnToiqa" outputId="413f3f56-1b22-40f7-fcff-4621af4596d4"
# !pip install blueqat
# -
# ### Rx, Ry, Rz
# Rx, Ry, Rz gates can be implemented by the following.
#
# | Rx | Ry | Rz |
# |:-:|:-:|:-:|
# |`rx(θ)`|`ry(θ)`|`rz(θ)`|
#
# $\theta$ is the rotation angle for each axis.
#
# ###### Rx gate
# +
from blueqat import Circuit
import math
Circuit().rx(math.pi/2)[0].m[:].run(shots=100)
# -
# ###### Ry gate
# +
from blueqat import Circuit
import math
Circuit().ry(math.pi/2)[0].m[:].run(shots=100)
# -
# ###### Rz gate
# +
from blueqat import Circuit
import math
Circuit().rz(math.pi/2)[0].m[:].run(shots=100)
# -
# ## U1, U2, U3 gates
# U1, U2, U3 gates can be implemented by the following.
#
# | U1 | U2 | U3 |
# |:-:|:-:|:-:|
# |`u1(λ)`or`phase(λ)`|`u2(φ,λ)`|`u3(φ,λ,θ)`|
# ###### U1 gate
# +
from blueqat import Circuit
import math
Circuit().u1(math.pi)[0].m[:].run(shots=100)
#or
Circuit().phase(math.pi)[0].m[:].run(shots=100)
# -
# ###### U2 gate
# +
from blueqat import Circuit
import math
Circuit().u2(0, math.pi)[0].m[:].run(shots=100)
# -
# ###### U3 gate
# +
from blueqat import Circuit
import math
Circuit().u3(math.pi/2,0,math.pi)[0].m[:].run(shots=100)
# -
# ## Rx, Ry, Rz gates
# These gate rotates the bit around each axis.
#
# <img src="../tutorial-ja/img/007/007_02_5.png" width="80%">
#
# All the states of one qubit can be composed of this gate combination because it can be rotated around each axis.
# ## U1, U2, U3 gates
# U1, U2, U3 are represented by the following, respectively.
#
# <img src="../tutorial-ja/img/007/007_02_6.png" width="85%">
#
# The U1 gate is a general phase gate.
# U2 uses 2 parameters and U3 uses 3 parameters, so all states can be created.
| tutorial/007_rotation_en.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import sys
sys.path.append('..')
sys.path.append('../..')
from stats import *
from sentiment_stats import *
from peewee import SQL
from database.models import RawFacebookComments, RawTwitterComments, RawInstagramComments, RawYouTubeComments, RawHashtagComments
rede_social = 'YouTube'
modelo = RawYouTubeComments
cores = ['#FFA726', '#66BB6A', '#42A5F5', '#FFEE58', '#EF5350', '#AB47BC', '#C8C8C8']
cores2 = ['#FFA726', '#AB47BC', '#FFEE58', '#C8C8C8', '#EF5350', '#66BB6A', '#42A5F5']
cores_val = ['#EF5350', '#C8C8C8', '#66BB6A']
cores_val2 = ['#66BB6A', '#EF5350', '#C8C8C8']
sentimentos = ['ALEGRIA', 'SURPRESA', 'TRISTEZA', 'MEDO', 'RAIVA', 'DESGOSTO', 'NEUTRO']
valencia = ['POSITIVO', 'NEGATIVO', 'NEUTRO']
valencia_dict = OrderedDict()
for val in valencia:
valencia_dict[val] = 0
sentimentos_dict = OrderedDict()
for sentimento in sentimentos:
sentimentos_dict[sentimento] = 0
default_clause = [
SQL('length(clean_comment) > 0'),
]
positivo_clause = [
SQL('length(emotion) > 0 AND length(valence) > 0'),
SQL('emotion in ("ALEGRIA", "SURPRESA") AND valence = "POSITIVO"')
]
negativo_clause = [
SQL('length(emotion) > 0 AND length(valence) > 0'),
SQL('emotion in ("TRISTEZA", "RAIVA", "MEDO", "DESGOSTO") AND valence = "NEGATIVO"')
]
neutro_clause = [
SQL('length(emotion) > 0 AND length(valence) > 0'),
SQL('emotion in ("NEUTRO") AND valence = "NEUTRO"')
]
general = default_clause + [
SQL('length(emotion) > 0 AND length(valence) > 0'),
SQL("""
(emotion in ("ALEGRIA", "SURPRESA") AND valence = "POSITIVO")
OR
(emotion in ("TRISTEZA", "RAIVA", "MEDO", "DESGOSTO") AND valence = "NEGATIVO")
OR
(emotion in ("NEUTRO") AND valence = "NEUTRO")
""")
]
# -
# ### Emoções gerais dos comentários : YouTube
# +
total_comentarios = modelo.select() \
.where(default_clause) \
.count()
comentarios_positivos = modelo.select() \
.where(reduce(operator.and_, default_clause + positivo_clause)) \
.order_by(modelo.hash)
comentarios_negativos = modelo.select() \
.where(reduce(operator.and_, default_clause + negativo_clause)) \
.order_by(modelo.hash)
comentarios_neutros = modelo.select() \
.where(reduce(operator.and_, default_clause + neutro_clause)) \
.order_by(modelo.hash)
comentarios = modelo.select() \
.where(reduce(operator.and_, general)) \
.order_by(modelo.hash)
# -
alegria, surpresa, tristeza, medo, raiva, desgosto, positivo, negativo, neutro = load_emocoes_comentarios(comentarios_positivos, comentarios_negativos, comentarios_neutros)
print_statistics(rede_social, total_comentarios, comentarios_positivos, comentarios_negativos, comentarios_neutros)
# #### Contagem total de comentários : Valência
graph_valence_total(rede_social, cores_val2, valencia, positivo, negativo, neutro)
# #### Contagem total de comentários : Emoções
graph_sentimentos_total(rede_social, cores, sentimentos, alegria, surpresa, tristeza, medo, raiva, desgosto, neutro)
# ### Emoções por candidato : YouTube
# #### <NAME>
# +
candidato_c = [modelo.candidate == '<NAME>']
total_comentarios = modelo.select() \
.where(reduce(operator.and_, default_clause + candidato_c)) \
.count()
comentarios_positivos = modelo.select() \
.where(reduce(operator.and_, default_clause + positivo_clause + candidato_c)) \
.order_by(modelo.hash)
comentarios_negativos = modelo.select() \
.where(reduce(operator.and_, default_clause + negativo_clause + candidato_c)) \
.order_by(modelo.hash)
comentarios_neutros = modelo.select() \
.where(reduce(operator.and_, default_clause + neutro_clause + candidato_c)) \
.order_by(modelo.hash)
comentarios = modelo.select() \
.where(reduce(operator.and_, general + candidato_c)) \
.order_by(modelo.hash)
# -
alegria, surpresa, tristeza, medo, raiva, desgosto, positivo, negativo, neutro = load_emocoes_comentarios(comentarios_positivos, comentarios_negativos, comentarios_neutros)
print_statistics(rede_social, total_comentarios, comentarios_positivos, comentarios_negativos, comentarios_neutros)
# ##### Contagem total de comentários : Valência
graph_valence_total(rede_social, cores_val2, valencia, positivo, negativo, neutro)
# ##### Contagem total de comentários : Emoções
graph_sentimentos_total(rede_social, cores, sentimentos, alegria, surpresa, tristeza, medo, raiva, desgosto, neutro)
# #### <NAME>
# +
candidato_c = [modelo.candidate == '<NAME>']
total_comentarios = modelo.select() \
.where(reduce(operator.and_, default_clause + candidato_c)) \
.count()
comentarios_positivos = modelo.select() \
.where(reduce(operator.and_, default_clause + positivo_clause + candidato_c)) \
.order_by(modelo.hash)
comentarios_negativos = modelo.select() \
.where(reduce(operator.and_, default_clause + negativo_clause + candidato_c)) \
.order_by(modelo.hash)
comentarios_neutros = modelo.select() \
.where(reduce(operator.and_, default_clause + neutro_clause + candidato_c)) \
.order_by(modelo.hash)
comentarios = modelo.select() \
.where(reduce(operator.and_, general + candidato_c)) \
.order_by(modelo.hash)
# -
alegria, surpresa, tristeza, medo, raiva, desgosto, positivo, negativo, neutro = load_emocoes_comentarios(comentarios_positivos, comentarios_negativos, comentarios_neutros)
print_statistics(rede_social, total_comentarios, comentarios_positivos, comentarios_negativos, comentarios_neutros)
# ##### Contagem total de comentários : Valência
graph_valence_total(rede_social, cores_val2, valencia, positivo, negativo, neutro)
# ##### Contagem total de comentários : Emoções
graph_sentimentos_total(rede_social, cores, sentimentos, alegria, surpresa, tristeza, medo, raiva, desgosto, neutro)
# #### <NAME>
# +
candidato_c = [modelo.candidate == '<NAME>']
total_comentarios = modelo.select() \
.where(reduce(operator.and_, default_clause + candidato_c)) \
.count()
comentarios_positivos = modelo.select() \
.where(reduce(operator.and_, default_clause + positivo_clause + candidato_c)) \
.order_by(modelo.hash)
comentarios_negativos = modelo.select() \
.where(reduce(operator.and_, default_clause + negativo_clause + candidato_c)) \
.order_by(modelo.hash)
comentarios_neutros = modelo.select() \
.where(reduce(operator.and_, default_clause + neutro_clause + candidato_c)) \
.order_by(modelo.hash)
comentarios = modelo.select() \
.where(reduce(operator.and_, general + candidato_c)) \
.order_by(modelo.hash)
# -
alegria, surpresa, tristeza, medo, raiva, desgosto, positivo, negativo, neutro = load_emocoes_comentarios(comentarios_positivos, comentarios_negativos, comentarios_neutros)
print_statistics(rede_social, total_comentarios, comentarios_positivos, comentarios_negativos, comentarios_neutros)
# ##### Contagem total de comentários : Valência
graph_valence_total(rede_social, cores_val2, valencia, positivo, negativo, neutro)
# ##### Contagem total de comentários : Emoções
graph_sentimentos_total(rede_social, cores, sentimentos, alegria, surpresa, tristeza, medo, raiva, desgosto, neutro)
# #### <NAME>
# +
candidato_c = [modelo.candidate == '<NAME>']
total_comentarios = modelo.select() \
.where(reduce(operator.and_, default_clause + candidato_c)) \
.count()
comentarios_positivos = modelo.select() \
.where(reduce(operator.and_, default_clause + positivo_clause + candidato_c)) \
.order_by(modelo.hash)
comentarios_negativos = modelo.select() \
.where(reduce(operator.and_, default_clause + negativo_clause + candidato_c)) \
.order_by(modelo.hash)
comentarios_neutros = modelo.select() \
.where(reduce(operator.and_, default_clause + neutro_clause + candidato_c)) \
.order_by(modelo.hash)
comentarios = modelo.select() \
.where(reduce(operator.and_, general + candidato_c)) \
.order_by(modelo.hash)
# -
alegria, surpresa, tristeza, medo, raiva, desgosto, positivo, negativo, neutro = load_emocoes_comentarios(comentarios_positivos, comentarios_negativos, comentarios_neutros)
print_statistics(rede_social, total_comentarios, comentarios_positivos, comentarios_negativos, comentarios_neutros)
# ##### Contagem total de comentários : Valência
graph_valence_total(rede_social, cores_val2, valencia, positivo, negativo, neutro)
# ##### Contagem total de comentários : Emoções
graph_sentimentos_total(rede_social, cores, sentimentos, alegria, surpresa, tristeza, medo, raiva, desgosto, neutro)
| src/statistics/sentiments/notebooks/statistics_sentimento_youtube.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
# %matplotlib inline
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
sns.set(style='ticks', font_scale=1.5)
# -
group_info = pd.read_csv('/Volumes/group/awagner/sgagnon/SST/scripts/subjects_groups.csv')
group_info.head()
filepath='/Volumes/group/awagner/sgagnon/SST/nav_data/route_coding_sarah.csv'
d = pd.read_csv(filepath)
d.head()
d.groupby(['subid']).mean().env.value_counts()
d = d.loc[d.code1 != 'NO MAP SHOWN',:]
codes = d.code1.unique(); codes
data = d.groupby(['subid', 'run_type', 'rep', 'code1']).count().reset_index()
data = pd.pivot_table(data, index=['subid', 'run_type', 'rep'], columns='code1', values='env', fill_value=0).reset_index()
print data.shape
data.head()
data_counts = d.groupby(['subid', 'run_type', 'rep']).count().reset_index()
print data_counts.shape
data_counts.head()
data['total_trials'] = data_counts.env
data.head()
d1 = data.loc[:,codes[:4]]
d2 = data.loc[:,'total_trials']
proportions = d1.divide(d2, axis='rows')
proportions.head()
proportions[['subid', 'run_type', 'rep']] = data[['subid', 'run_type', 'rep']]
proportions.head()
proportions = pd.melt(proportions, id_vars=['subid', 'run_type', 'rep'])
proportions.head()
proportions = proportions.merge(group_info)
proportions.head()
# +
data = proportions.loc[(proportions.run_type =='habit') & (~proportions.subid.isin(['sst01', 'sst02']))]
sns.factorplot(x='code1', y='value',
ci=68, units='subid', data=data, aspect=2)
sns.stripplot(x='code1', y='value', jitter=True, hue='group',
palette=['dodgerblue', 'orange'],size=7,linewidth=1,
data=data)
plt.ylabel('Proportion habit trials')
# -
proportions.loc[(proportions.run_type=='habit') &
(proportions.code1 == 'habit'), 'value'].hist()
proportions.loc[(proportions.run_type=='habit') &
(proportions.code1 == 'habit') &
(proportions.value < .5)]
sns.factorplot(x='code1', y='value', col='run_type',
ci=68, units='subid', data=proportions)
# +
x_order = ['shortcut', 'backtrack', 'other', 'habit']
p = sns.factorplot(x='code1', y='value', order=x_order,
ci=68, units='subid', aspect=1.5,
data=proportions.loc[proportions.run_type == 'shortcut'])
p.set_xlabels('')
p.set_ylabels('proportion of trials')
sns.despine(trim=True)
# plt.savefig('/Volumes/group/awagner/sgagnon/SST/nav_data/group/results/shortcut_choices.png', dpi=300)
# -
data.merge(group_info).mean()
# +
data = proportions.loc[proportions.run_type == 'shortcut']
p = sns.factorplot(x='code1', y='value', hue='group',col='rep',
ci=68, units='subid', order=x_order, kind='bar',
aspect=1.5, dodge=.1, palette=['dodgerblue', 'orange'],
data=data.merge(group_info))
p.set_xlabels('')
p.set_ylabels('proportion of trials')
sns.despine(trim=True)
# plt.savefig('/Volumes/group/awagner/sgagnon/SST/nav_data/group/results/shortcut_choices_bygroup.png', dpi=300)
# -
| analysis/.ipynb_checkpoints/Routecoding_byrater-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
from __future__ import division
get_ipython().magic(u'matplotlib inline')
import sys
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import scipy.io as io
import pickle
import scipy.stats
# -
#SBJ = sys.argv[1]#raw_input('Enter SBJ ID to process:')#'EEG01'
SBJ = 'EEG01'
# In[3]:
#prj_dir = '/Volumes/hoycw_clust/PRJ_Error_eeg/'
prj_dir = '/Users/sheilasteiner/Desktop/Knight_Lab/PRJ_Error_eeg/'
results_dir = prj_dir+'results/'
fig_type = '.png'
data_dir = prj_dir+'data/'
sbj_dir = data_dir+SBJ+'/'
prdm_fname = os.path.join(sbj_dir,'03_events',SBJ+'_odd_prdm_vars.pkl')
with open(prdm_fname, 'rb') as f:
prdm = pickle.load(f)
behav_fname = os.path.join(sbj_dir,'03_events',SBJ+'_behav_oddball.csv')
data = pd.read_csv(behav_fname)
# # Compute Accuracy
# +
accurate_tar_ratio = []
accurate_std_ratio = []
accurate_odd_ratio = []
block_range = np.arange(np.max(data['Block']+1))
condition_labels = ['tar', 'std', 'odd']
accurate_trials = np.zeros((3,1))
accurate_ratio = np.zeros((3,3))
# Compute accuracy per condition
for ix in block_range:
accurate_trials = np.zeros((3,1))
for cond_ix in range(len(condition_labels)):
block_data = data[data['Block'] == ix] #get all the trials in a certain block
cond_trials = block_data[block_data['Condition'] == condition_labels[cond_ix]].index # find all the target trials
for x in cond_trials:
if condition_labels[cond_ix] == 'tar':
if data.loc[x,'Hit'] == 1: #if got it right (Hit), add it to the list of correct ones
accurate_trials[cond_ix] +=1
else:
if data.loc[x,'Miss'] == 0: # same thing but count 1- number of misses/total as measure of accuracy
accurate_trials[cond_ix] +=1
accurate_ratio[cond_ix,ix] = (accurate_trials[cond_ix]/np.size(cond_trials))# add the ratio of right/all to those (1 value for each block)
data_all = data
# Exclude: Training/Examples, first trial of each block
data = data[(data['Block']!=-1) & (data['ITI']>0)]
# -
# # Plot Accuracy
# plot for each block the number correct, separate by condition
f, axes = plt.subplots(1,3)
axes[0].plot(block_range, accurate_ratio[0,:], 'o')
plt.subplots_adjust(top=0.8,wspace=0.8)
axes[1].plot(block_range, accurate_ratio[1,:], 'o')
plt.subplots_adjust(top=0.8,wspace=0.8)
axes[2].plot(block_range, accurate_ratio[2,:], 'o')
plt.subplots_adjust(top=0.8,wspace=0.8)
# +
sns.lineplot(block_range, accurate_ratio[1,:], ax=axes[1], markers = 'True', marker = "o")
plt.subplots_adjust(top=0.8,wspace=0.8)
sns.lineplot(block_range, accurate_ratio[2,:], ax=axes[2], markers = 'True', marker = "o")
plt.subplots_adjust(top=0.8,wspace=0.8)
axes[0].set_xticks([0,1,2])
axes[1].set_xticks([0,1,2])
axes[2].set_xticks([0,1,2])
axes[0].set_xlabel('Block Number')
axes[1].set_xlabel('Block Number')
axes[2].set_xlabel('Block Number')
axes[0].set_ylabel('Accuracy Rate')
axes[1].set_ylabel('Accuracy Rate')
axes[2].set_ylabel('Accuracy Rate')
axes[0].set_ylim(0, 1.05)
axes[1].set_ylim(0, 1.05)
axes[2].set_ylim(0, 1.05)
axes[0].set_title('Target')
axes[1].set_title('Standard')
axes[2].set_title('Oddball')
f.suptitle(SBJ + ' Condition and Accuracy in Oddball Task') # can also get the figure from plt.gcf()
if os.path.isdir(results_dir + 'BHV/ODD/accuracy/') == False:
os.makedirs(results_dir + 'BHV/ODD/accuracy/')
plt.savefig(results_dir+'BHV/ODD/accuracy/'+SBJ+'_acc_condition'+fig_type)
# -
| ODD01_prelim_analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
import pwlf
x = np.linspace(0, 1024, 100)
y = np.sin(.01*x)
plt.figure()
plt.plot(x, y)
from GPyOpt.methods import BayesianOptimization
my_pwlf = pwlf.PiecewiseLinFit(x, y, degree=2)
number_of_line_segments = 4
my_pwlf.use_custom_opt(number_of_line_segments)
# define domain for possible breakpoints
bounds = [{'name': 'break_1', 'type': 'discrete',
'domain': np.arange(1, 1023)},
{'name': 'break_2', 'type': 'discrete',
'domain': np.arange(1, 1023)},
{'name': 'break_3', 'type': 'discrete',
'domain': np.arange(1, 1023)}]
max_iter = 120
def my_obj(x):
f = np.zeros(x.shape[0])
for i, j in enumerate(x):
f[i] = my_pwlf.fit_with_breaks_opt(j)
return f
myBopt = BayesianOptimization(my_obj,
domain=bounds, model_type='GP',
initial_design_numdata=20,
initial_design_type='latin',
exact_feval=True, verbosity=True,
verbosity_model=False)
myBopt.run_optimization(max_iter=max_iter, verbosity=True)
print('\n \n Opt found \n')
print('Optimum number of line segments:', myBopt.x_opt)
print('Function value:', myBopt.fx_opt)
myBopt.plot_convergence()
# +
# perform the fit for the optimum
x_opt = list(myBopt.x_opt)
x_opt.append(x.min())
x_opt.append(x.max())
ssr = my_pwlf.fit_with_breaks(x_opt)
# predict for the determined points
xHat = np.linspace(min(x), max(x), num=10000)
yHat = my_pwlf.predict(xHat)
# plot the results
plt.figure()
plt.plot(x, y, 'o')
plt.plot(xHat, yHat, '-')
plt.show()
# -
ssr
| examples/EGO_integer_only.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="WA3KENYcLSy0" outputId="44e5737e-7daf-48d9-e077-a78a755e865b" colab={"base_uri": "https://localhost:8080/", "height": 493}
from google.colab import drive
drive.mount('/content/drive')
# !ln -s /content/drive/My\ Drive /mydrive
# !ls /mydrive
# !nvidia-smi
# + id="C_jVl7qY-RLU"
# %%time
# %%bash
# INSTALL MINICONDA
MINICONDA_INSTALLER_SCRIPT=Miniconda3-latest-Linux-x86_64.sh
MINICONDA_PREFIX=/usr/local
wget https://repo.continuum.io/miniconda/$MINICONDA_INSTALLER_SCRIPT
chmod +x $MINICONDA_INSTALLER_SCRIPT
./$MINICONDA_INSTALLER_SCRIPT -b -f -p $MINICONDA_PREFIX
which conda
# INSTALL system packages
apt-get update && apt-get install -qq bc tree sox ffmpeg libsndfile1
# + id="GRp0Hm-rLOXx" outputId="cd7684e5-d3bc-49ec-ae4b-37331cbe4608" colab={"base_uri": "https://localhost:8080/", "height": 85}
# %%time
# %%bash
set -e
ENV_FILE=/mydrive/NeMo/environment.yml
ENV_NAME=$(head -n 1 $ENV_FILE | cut -d ':' -f 2 | cut -d ' ' -f 2)
ZIPFILE_NAME=$ENV_NAME-env.tar.gz
ENVS_PATH=/mydrive/envs
ZIPFILE=$ENVS_PATH/$ZIPFILE_NAME
# echo $FILE_NAME
if [ -f $ZIPFILE ]; then
echo "found $ZIPFILE"
if [ ! -f "$ZIPFILE_NAME" ]; then cp $ZIPFILE ./ ; fi
tar xzf $ZIPFILE_NAME -C /usr/local/
#found /mydrive/envs/nemo-env.tar.gz
#CPU times: user 7.84 ms, sys: 3.72 ms, total: 11.6 ms
#Wall time: 2min 28s
else
echo "/usr/local/envs/$ENV_NAME"
if [ -d "/usr/local/envs/$ENV_NAME" ]; then
echo "updating env $ENV_NAME"
conda env update -f $ENV_FILE
else
echo "creating env from $ENV_FILE"
conda env create -f $ENV_FILE
fi
cd /usr/local
tar -czf $ZIPFILE_NAME envs
cp $ZIPFILE_NAME $ENVS_PATH/
fi
# + id="-PHNPFF-81mk" outputId="327a67ba-fec2-4b0d-91ca-e477092b832c" colab={"base_uri": "https://localhost:8080/", "height": 1000}
# %%time
# %%bash
# INSTALL NeMo
conda create -n nemo python=3.8 -y
conda init bash
source activate nemo
# cd /mydrive/NeMo/ && pip install -r requirements/requirements.txt
# cd /mydrive/NeMo/ && pip install -e .[asr]
#Wall time: 1min 59s
# + id="4LM9xfHkU7Qj" outputId="40ef69da-cd8d-4f5e-e213-00be0669fc10" colab={"base_uri": "https://localhost:8080/", "height": 340}
# %%time
# %%bash
source activate nemo
# GET LibriSpeech DATA
# ls -alth /content/
FOLDER_NAME="LibriSpeech"
ZIPFILE=$FOLDER_NAME.tar.gz
if [ ! -f "/mydrive/data/$ZIPFILE" ]; then
echo "download + process LibriSpeech"
python /mydrive/NeMo/scripts/get_librispeech_data.py --data_root "/content" --data_sets "dev_clean,dev_other"
tar -czf $ZIPFILE $FOLDER_NAME
cp $ZIPFILE /mydrive/data/
# Wall time: 3min 51s
else
echo "found $ZIPFILE"
cp /mydrive/data/$ZIPFILE ./
tar xzf $ZIPFILE -C /content/
# Wall time: 44.4 s
fi
# + id="38aGRV1MXi4-" outputId="ab02d546-8022-4754-acce-a9938ae80f2b" colab={"base_uri": "https://localhost:8080/", "height": 51} language="bash"
# source activate nemo
# #pip install wget wandb
# python -c "import torch; print(torch.cuda.is_available())"
# + id="xkTwfEnME4VV" outputId="9e72f261-5be6-4468-89ea-2170152aacd2" colab={"base_uri": "https://localhost:8080/", "height": 1000} language="bash"
# # TRAIN & EVALUATE
#
# source activate nemo
# python --version
# which python
# head -n 10 dev_clean.json > dummy_train.json
#
# export HOME=/mydrive # cause nemo cache-dir is in HOME folder, that where pretrained models are stored
#
# export WANDB_API_KEY=$(head -n 1 /mydrive/wandb.key | cut -d '=' -f 2 )
# export WANDB_PROJECT=nemo
# python -m wandb init --project $WANDB_PROJECT
#
# python
#
# import sys
# nemo_path = "/mydrive/NeMo"
# if nemo_path not in sys.path:
# sys.path.append(nemo_path)
# print(sys.path)
#
# from pprint import pprint
# import yaml
#
# import nemo.collections.asr as nemo_asr
# import pytorch_lightning as pl
# from nemo.collections.asr.models import EncDecCTCModel
#
# config_path = '/mydrive/NeMo/examples/asr/conf/config.yaml'
# with open(config_path, 'r') as stream:
# cfg = yaml.safe_load(stream)
#
# cfg["model"]["train_ds"]["manifest_filepath"]="/content/dummy_train.json"
# cfg["model"]["validation_ds"]["manifest_filepath"]="/content/dev_other.json"
# cfg["trainer"]["max_epochs"]=1
# cfg["trainer"]["gpus"]=1
#
# from pytorch_lightning.loggers import WandbLogger
#
# logger = WandbLogger(name="evaluate_QuartzNet5x5LS-En_devother_GPU", project="nemo")
# print(cfg["trainer"].pop("logger"))
# trainer = pl.Trainer(logger=logger,**cfg["trainer"])
#
# asr_model = nemo_asr.models.EncDecCTCModel.from_pretrained(model_name="QuartzNet5x5LS-En")
# asr_model.setup_training_data(train_data_config=cfg["model"]['train_ds'])
# asr_model.setup_validation_data(val_data_config=cfg["model"]['validation_ds'])
#
# print(f"num trainable params: {asr_model.num_weights}")
# trainer.fit(asr_model)
#
| nemo_asr/nemo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Capstone Project: Create a Customer Segmentation Report for Arvato Financial Services
#
# In this project, you will analyze demographics data for customers of a mail-order sales company in Germany, comparing it against demographics information for the general population. You'll use unsupervised learning techniques to perform customer segmentation, identifying the parts of the population that best describe the core customer base of the company. Then, you'll apply what you've learned on a third dataset with demographics information for targets of a marketing campaign for the company, and use a model to predict which individuals are most likely to convert into becoming customers for the company. The data that you will use has been provided by our partners at Bertelsmann Arvato Analytics, and represents a real-life data science task.
#
# If you completed the first term of this program, you will be familiar with the first part of this project, from the unsupervised learning project. The versions of those two datasets used in this project will include many more features and has not been pre-cleaned. You are also free to choose whatever approach you'd like to analyzing the data rather than follow pre-determined steps. In your work on this project, make sure that you carefully document your steps and decisions, since your main deliverable for this project will be a blog post reporting your findings.
# ## Importing libraries
# %matplotlib inline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
from sklearn.metrics import accuracy_score, roc_auc_score
from sklearn.model_selection import train_test_split, GridSearchCV
import seaborn as sns
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier
import xgboost as xgb
import numpy as np
import pandas as pd
import os
import time
import sys
# ## Part 0: Get to Know the Data
#
# There are four data files associated with this project:
#
# - `Udacity_AZDIAS_052018.csv`: Demographics data for the general population of Germany; 891 211 persons (rows) x 366 features (columns).
# - `Udacity_CUSTOMERS_052018.csv`: Demographics data for customers of a mail-order company; 191 652 persons (rows) x 369 features (columns).
# - `Udacity_MAILOUT_052018_TRAIN.csv`: Demographics data for individuals who were targets of a marketing campaign; 42 982 persons (rows) x 367 (columns).
# - `Udacity_MAILOUT_052018_TEST.csv`: Demographics data for individuals who were targets of a marketing campaign; 42 833 persons (rows) x 366 (columns).
#
# Each row of the demographics files represents a single person, but also includes information outside of individuals, including information about their household, building, and neighborhood. Use the information from the first two files to figure out how customers ("CUSTOMERS") are similar to or differ from the general population at large ("AZDIAS"), then use your analysis to make predictions on the other two files ("MAILOUT"), predicting which recipients are most likely to become a customer for the mail-order company.
#
# The "CUSTOMERS" file contains three extra columns ('CUSTOMER_GROUP', 'ONLINE_PURCHASE', and 'PRODUCT_GROUP'), which provide broad information about the customers depicted in the file. The original "MAILOUT" file included one additional column, "RESPONSE", which indicated whether or not each recipient became a customer of the company. For the "TRAIN" subset, this column has been retained, but in the "TEST" subset it has been removed; it is against that withheld column that your final predictions will be assessed in the Kaggle competition.
#
# Otherwise, all of the remaining columns are the same between the three data files. For more information about the columns depicted in the files, you can refer to two Excel spreadsheets provided in the workspace. [One of them](./DIAS Information Levels - Attributes 2017.xlsx) is a top-level list of attributes and descriptions, organized by informational category. [The other](./DIAS Attributes - Values 2017.xlsx) is a detailed mapping of data values for each feature in alphabetical order.
#
# In the below cell, we've provided some initial code to load in the first two datasets. Note for all of the `.csv` data files in this project that they're semicolon (`;`) delimited, so an additional argument in the [`read_csv()`](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_csv.html) call has been included to read in the data properly. Also, considering the size of the datasets, it may take some time for them to load completely.
#
# You'll notice when the data is loaded in that a warning message will immediately pop up. Before you really start digging into the modeling and analysis, you're going to need to perform some cleaning. Take some time to browse the structure of the data and look over the informational spreadsheets to understand the data values. Make some decisions on which features to keep, which features to drop, and if any revisions need to be made on data formats. It'll be a good idea to create a function with pre-processing steps, since you'll need to clean all of the datasets before you work with them.
# ### Data Exploration
#
# load the data
azdias = pd.read_csv('data/Udacity_AZDIAS_052018.csv', sep=';')
customers = pd.read_csv('data/Udacity_CUSTOMERS_052018.csv', sep=';')
attributes_values = pd.read_excel('data/DIAS Attributes - Values 2017.xlsx', header=1)
attributes_desc = pd.read_excel('data/DIAS Information Levels - Attributes 2017.xlsx', header=1)
# ##### I received a warning when loading the data. It happened because of mixed data type in 2 columns. I handled the mixed type by replacing ‘X’ with nan value.
azdias.iloc[:,18:20].columns
azdias[['CAMEO_INTL_2015','CAMEO_DEUG_2015']].head()
azdias['CAMEO_INTL_2015'].value_counts()
azdias['CAMEO_DEUG_2015'].value_counts()
azdias[azdias.columns[azdias.dtypes == 'object']]
# ##### The problem of mixed types in columns is because there is actually a mix of int and string values. I will remove the string value and convert the column datatypt to float.
def handle_mixed_types(df):
'''This function is created for formating improper
values in columns CAMEO_DEUG_2015 and CAMEO_INTL_2015.
Args:
df: demographics dataframe
returns: transformed dataframe
'''
cols_nan = ['CAMEO_DEUG_2015', 'CAMEO_INTL_2015', 'CAMEO_DEU_2015']
cols = ['CAMEO_DEUG_2015', 'CAMEO_INTL_2015']
if set(cols_nan).issubset(df.columns):
df[cols_nan] = df[cols_nan].replace({'X': np.nan, 'XX': np.nan})
df[cols] = df[cols].astype(float)
return df
customers = handle_mixed_types(customers)
customers.groupby("CAMEO_DEUG_2015")["CAMEO_DEUG_2015"].count()
customers.groupby("CAMEO_INTL_2015")["CAMEO_INTL_2015"].count()
customers['CAMEO_DEU_2015'].unique()
azdias = handle_mixed_types(azdias)
# ##### Looking at the attributes files
attributes_desc.drop(columns=['Unnamed: 0'], inplace=True)
attributes_values.drop(columns=['Unnamed: 0'], inplace=True)
attributes_values['Attribute'] = attributes_values['Attribute'].ffill()
attributes_values['Description'] = attributes_values['Description'].ffill()
# ### Imputation: Replace missing values
unknown = {}
for i, row in attributes_values.iterrows():
if "unknown" in str(row['Meaning']):
unknown[row['Attribute']] = [int(num) for num in str(row['Value']).split(', ')]
# There are 2 types of the missing data:
# 1. NaN in the dataframe
# 2. X and XX values in columns 18 and 19, that we converted to NANs.
def replace_unknown(df, dictionary):
'''This function accept a dataframe which is
going to be check for the missing values accorfing
to the dictionary and if such exists it will be replaced with numpy.nan.
Args:
df: demographics dataframe
dictionary: dictionary which contains encodings for unknown values for each column of the dataset
returns: none
'''
for key, value in dictionary.items():
if key in df.columns:
for i in range(0, len(value)):
df.loc[df[key] == value[i], key] = np.nan
filtered_customers = customers.copy()
filtered_azdias = azdias.copy()
replace_unknown(filtered_customers, unknown)
replace_unknown(filtered_azdias, unknown)
# ### Outliers
#
# I want to verify if there are some outliers in the dataset. I can detect outliers using values in the DIAS Attributes - Values 2017.xlsx file. Everyting which is beyond those descriptions can be considered as outliers.
def get_unique_values(df):
'''This function accept a dataframe for which it will
calculate the range of its actual values.
Args:
df: demographics dataframe
returns: dictionary with unique ranges for every column
in the given dataframe.
'''
dict_unique_values = {}
for col in df.columns:
if col != "LNR":
dict_unique_values[col] = df[col].unique()
return dict_unique_values
def get_ranges():
'''This function gathers the information on the
proper ranges for every column of the dataset.
Args:
none
returns: dictionary with unique ranges for every column
of the dataset.
'''
dict_of_attr = {}
for i, row in attributes_values.iterrows():
if row['Attribute'] in dict_of_attr:
dict_of_attr[row['Attribute']].append(row['Value'])
else:
dict_of_attr[row['Attribute']] = [row['Value']]
return dict_of_attr
# ##### From this, I can see that CAMEO_DEU_2015, CAMEO_DEUINTL_2015, LP_LEBENSPHASE_FEIN, PRAEGENDE_JUGENDJAHRE are definitely overcategorised.
def get_difference(actual_entries_dictionary):
'''This function accepts the dictionary with actual
data ranges of the dataframe and compares them with the ones
that are correct gathers. Function returns the outliers
for each column is such exist.
Args:
actual_entries_dictionary: dictionary with actual entries
returns: dictionary with outliers for each column.
'''
dict_of_outliers = {}
for key, value in actual_entries_dictionary.items():
if key in attributes_proper_range:
dict_of_outliers[key] = [item for item in value if item not in attributes_proper_range[key]]
return dict_of_outliers
# +
additional_missing_values = {'KBA05_MODTEMP': [6.0], 'LP_FAMILIE_FEIN': [0.0], 'LP_FAMILIE_GROB': [0.0],
'LP_LEBENSPHASE_FEIN': [0.0], 'LP_LEBENSPHASE_GROB': [0.0], 'ORTSGR_KLS9': [0.0], 'GEBURTSJAHR': [0]}
replace_unknown(filtered_customers, additional_missing_values)
replace_unknown(filtered_azdias, additional_missing_values)
# -
def remove_outliers(df, sub_df):
"""
Remove outliers from the dataframe
"""
lower = sub_df.quantile(0.25)
upper = sub_df.quantile(0.75)
IQR = upper - lower
df = df[~((sub_df < (lower - 1.5 * IQR)) | (sub_df > (upper + 1.5 * IQR))).any(axis=1)]
# ##### Removing outliers in the next step
remove_outliers(filtered_azdias, filtered_azdias[['ANZ_HAUSHALTE_AKTIV', 'ANZ_HH_TITEL', 'ANZ_PERSONEN', 'ANZ_TITEL', 'KBA13_ANZAHL_PKW']])
remove_outliers(filtered_customers, filtered_azdias[['ANZ_HAUSHALTE_AKTIV', 'ANZ_HH_TITEL', 'ANZ_PERSONEN', 'ANZ_TITEL', 'KBA13_ANZAHL_PKW']])
# ### Detect missing data
def detect_missing_data(df):
"""
Detect missing data in the dataframe
"""
missing_cols_df = pd.Series(df.isna().sum(), name='num').to_frame()
missing_cols_df = missing_cols_df.assign(percentage=lambda x: round((x.num * 100 / df.shape[0]),2))
missing_rows_df = pd.Series(df.isna().sum(axis=1)[df.isna().sum(axis=1)>183], name='num').to_frame()
missing_rows_df = missing_rows_df.assign(percentage=lambda x: round((x.num * 100 / df.shape[1]),2))
return missing_cols_df, missing_rows_df
azdias_missing_cols_df, azdias_missing_rows_df = detect_missing_data(filtered_azdias)
customers_missing_cols_df, customers_missing_rows_df = detect_missing_data(filtered_customers)
# #### Columns having extremely high percentage of the missing data in "Azdias" dataset
azdias_missing_cols_df[azdias_missing_cols_df.percentage < 20].percentage[:20].plot(figsize = (20, 10), kind = 'bar');
plt.show()
# #### Columns having extremely high percentage of the missing data in "Customers" dataset
customers_missing_cols_df[customers_missing_cols_df.percentage < 30].percentage[:20].plot(figsize = (20, 10), kind = 'bar');
plt.show()
# ##### Comparing the missing data from AZDIAS with CUSTOMERS. In order to to this, merging dataframes by column.
merged = azdias_missing_cols_df.reset_index()
merged.rename(columns={'percentage':'azdias_%'}, inplace=True)
del merged["num"]
merged
temp_cus = customers_missing_cols_df.reset_index()
temp_cus.rename(columns={'percentage':'customers_%'}, inplace=True)
del temp_cus["num"]
temp_cus
merged = merged.merge(temp_cus, on='index')
merged
merged_unique = set(list(merged['index']))
cols_to_drop = list(merged.loc[(merged['azdias_%'] > 30) & (merged['customers_%'] > 30)]['index'])
cols_to_drop
filtered_azdias = filtered_azdias.drop(cols_to_drop, axis = 1)
filtered_customers = filtered_customers.drop(cols_to_drop, axis = 1)
print(filtered_azdias.shape)
print(filtered_customers.shape)
customers_unique = set(list(customers.columns))
azdias_unique = set(list(azdias.columns))
attributes_desc_unique = set(list(attributes_desc.Attribute))
attributes_values_unique = set(list(attributes_values.Attribute))
common = customers_unique & azdias_unique & attributes_desc_unique
cols_to_drop = list((set(filtered_azdias.columns) & set(filtered_customers.columns)).difference(attributes_desc_unique))
filtered_azdias = filtered_azdias.drop(cols_to_drop, axis = 1)
filtered_customers = filtered_customers.drop(cols_to_drop, axis = 1)
azdias_missing_cols_df, azdias_missing_rows_df = detect_missing_data(filtered_azdias)
customers_missing_cols_df, customers_missing_rows_df = detect_missing_data(filtered_customers)
def drop_rows(df, missing_rows):
'''This function deletes rows which are mentioned in the dataframe.
Args:
df: demographic dataframe
missing_rows: dataframe which contained information on rows that should be removed
returns: updated dataframe
'''
return df[~df.index.isin(missing_rows.index)]
filtered_azdias = drop_rows(filtered_azdias, azdias_missing_rows_df)
filtered_customers = drop_rows(filtered_customers, customers_missing_rows_df)
# ### Feature Engineering
#
# * `CAMEO_INTL_2015` into `CAMEO_INTL_2015_wealth_type` and `CAMEO_INTL_2015_family_type`
# * `LP_LEBENSPHASE_FEIN` into `LP_LEBENSPHASE_FEIN_family_type`, `LP_LEBENSPHASE_FEIN_earner_type`, `LP_LEBENSPHASE_FEIN_age_group`
# * `PRAEGENDE_JUGENDJAHRE` by movent type (avangard or mainstream) and decade
#
def engineering(df):
'''This function deals designated columns and imputes missing data.
Args:
df: demographic dataframe
returns: none
'''
labelencoder = LabelEncoder()
df['ANZ_HAUSHALTE_AKTIV'].fillna(df['ANZ_HAUSHALTE_AKTIV'].mean(), inplace=True)
print('Replaced ANZ_HAUSHALTE_AKTIV')
df['KBA13_ANZAHL_PKW'].fillna(df['KBA13_ANZAHL_PKW'].mean(), inplace=True)
print('Replaced KBA13_ANZAHL_PKW')
cameo_wealth_type = {
11:'Wealthy', 12:'Wealthy',13:'Wealthy', 14:'Wealthy', 15:'Wealthy',
21:'Prosperous', 22:'Prosperous', 23:'Prosperous', 24:'Prosperous', 25:'Prosperous',
31:'Comfortable', 32:'Comfortable', 33:'Comfortable', 34:'Comfortable', 35:'Comfortable',
41:'Less Affluent', 42:'Less Affluent', 43:'Less Affluent', 44:'Less Affluent', 45:'Less Affluent',
51:'Poorer', 52:'Poorer', 53:'Poorer', 54:'Poorer', 55:'Poorer'
}
cameo_family_type = {
11:'Pre-Family Couples & Singles',
12:'Young Couples With Children',
13:'Families With School Age Children',
14:'Older Families & Mature Couples',
15:'Elders In Retirement',
21:'Pre-Family Couples & Singles',
22:'Young Couples With Children',
23:'Families With School Age Children',
24:'Older Families & Mature Couples',
25:'Elders In Retirement',
31:'Pre-Family Couples & Singles',
32:'Young Couples With Children',
33:'Families With School Age Children',
34:'Older Families & Mature Couples',
35:'Elders In Retirement',
41:'Pre-Family Couples & Singles',
42:'Young Couples With Children',
43:'Families With School Age Children',
44:'Older Families & Mature Couples',
45:'Elders In Retirement',
51:'Pre-Family Couples & Singles',
52:'Young Couples With Children',
53:'Families With School Age Children',
54:'Older Families & Mature Couples',
55:'Elders In Retirement',
}
if 'CAMEO_INTL_2015' in df.columns:
df['CAMEO_INTL_2015_wealth_type'] = df['CAMEO_INTL_2015'].map(cameo_wealth_type)
df['CAMEO_INTL_2015_family_type'] = df['CAMEO_INTL_2015'].map(cameo_family_type)
family_type_dict = {
1: 'single', 2: 'single', 3: 'single', 4: 'single', 5: 'single', 6: 'single', 7: 'single',
8: 'single', 9: 'single', 10:'single', 11:'single', 12:'single', 13:'single', 14:'couples',
15:'couples', 16:'couples', 17:'couples', 18:'couples', 19:'couples', 20:'couples', 21:'single_parent',
22:'single_parent', 23:'single_parent', 24:'families', 25:'families', 26:'families', 27:'families', 28:'families',
29:'mulitperson_households', 30:'mulitperson_households', 31:'mulitperson_households', 32:'mulitperson_households',
33:'mulitperson_households', 34:'mulitperson_households', 35:'mulitperson_households', 36:'mulitperson_households',
37:'mulitperson_households', 38:'mulitperson_households', 39:'mulitperson_households', 40:'mulitperson_households'
}
earner_type_dict = {
1: 'low', 2: 'low', 3: 'average', 4: 'average', 5: 'low', 6: 'low', 7: 'average',
8: 'average', 9: 'independant', 10:'wealthy', 11:'homeowners', 12:'homeowners', 13:'top', 14:'low',
15: 'low', 16:'average', 17: 'independant', 18:'wealthy', 19:'homeowners', 20:'top', 21:'low',
22: 'average', 23:'high', 24:'low', 25:'average', 26: 'independant', 27:'homeowners', 28:'top',
29: 'low', 30:'average', 31:'low', 32:'average',
33: 'independant', 34:'homeowners', 35:'top', 36: 'independant',
37: 'homeowners', 38:'homeowners', 39:'top', 40:'top'
}
age_group_dict = {
1: 'younger_age', 2: 'middle_age', 3: 'younger_age', 4: 'middle_age', 5: 'advanced_age', 6: 'retirement_age', 7: 'advanced_age',
8: 'retirement_age', 9: np.nan, 10: np.nan, 11:'advanced_age', 12:'retirement_age', 13:'higher_age', 14:'younger_age',
15: 'higher_age', 16:'higher_age', 17: np.nan, 18:'younger_age', 19:'higher_age', 20:'higher_age', 21: np.nan,
22: np.nan, 23:np.nan, 24:np.nan, 25:np.nan, 26: np.nan, 27:np.nan, 28:np.nan,
29: 'younger_age', 30:'younger_age', 31:'higher_age', 32:'higher_age',
33: 'younger_age', 34:'younger_age', 35:'younger_age', 36: 'higher_age',
37: 'advanced_age', 38:'retirement_age', 39:'middle_age', 40:'retirement_age'
}
if 'LP_LEBENSPHASE_FEIN' in df.columns:
df['LP_LEBENSPHASE_FEIN_family_type'] = df['LP_LEBENSPHASE_FEIN'].map(family_type_dict)
df['LP_LEBENSPHASE_FEIN_earner_type'] = df['LP_LEBENSPHASE_FEIN'].map(earner_type_dict)
df['LP_LEBENSPHASE_FEIN_age_group'] = df['LP_LEBENSPHASE_FEIN'].map(age_group_dict)
family_type_dict = {
1: 'single', 2: 'couple', 3: 'single_parent', 4: 'single_parent', 5: 'single_parent',
6: 'family', 7: 'family', 8: 'family', 9: 'mulitperson_households', 10: 'mulitperson_households',
11: 'mulitperson_households'
}
if 'LP_FAMILIE_GROB' in df.columns:
df['LP_FAMILIE_GROB_family_type'] = df['LP_FAMILIE_GROB'].map(family_type_dict)
earner_type_dict = {
1: 'low', 2: 'low', 3: 'average', 4: 'average', 5: 'average',
6: 'independant', 7: 'independant', 8: 'houseowners', 9: 'houseowners', 10: 'top'
}
if 'LP_STATUS_GROB' in df.columns:
df['LP_STATUS_GROB_earner_type'] = df['LP_STATUS_GROB'].map(earner_type_dict)
movement_dict = {
1: 'mainstream', 2: 'avantgarde', 3: 'mainstream', 4: 'avantgarde',
5: 'mainstream', 6: 'avantgarde', 7: 'avantgarde', 8: 'mainstream', 9: 'avantgarde',
10: 'mainstream', 11: 'avantgarde', 12: 'mainstream', 13: 'avantgarde', 14: 'mainstream',
15: 'avantgarde'
}
if 'PRAEGENDE_JUGENDJAHRE' in df.columns:
df['PRAEGENDE_JUGENDJAHRE_movement'] = df['PRAEGENDE_JUGENDJAHRE'].map(movement_dict)
print('created PRAEGENDE_JUGENDJAHRE_movement')
decade_dict = {
1: '40s', 2: '40s', 3: '50s', 4: '50s',
5: '60s', 6: '60s', 7: '60s', 8: '70s', 9: '70s',
10: '80s', 11: '80s', 12: '80s', 13: '80s', 14: '90s',
15: '90s'
}
if 'PRAEGENDE_JUGENDJAHRE' in df.columns:
df['PRAEGENDE_JUGENDJAHRE_decade'] = df['PRAEGENDE_JUGENDJAHRE'].map(decade_dict)
print('created PRAEGENDE_JUGENDJAHRE_decade')
if 'EINGEFUEGT_AM' in df.columns:
df['EINGEFUEGT_AM'] = pd.to_datetime(df['EINGEFUEGT_AM']).dt.year
list_of_obj_cols = ['CAMEO_INTL_2015', 'CAMEO_INTL_2015_wealth_type','CAMEO_INTL_2015_family_type',
'LP_LEBENSPHASE_FEIN', 'LP_LEBENSPHASE_FEIN_family_type', 'LP_LEBENSPHASE_FEIN_earner_type',
'LP_LEBENSPHASE_FEIN_age_group', 'LP_FAMILIE_GROB', 'LP_FAMILIE_GROB_family_type',
'LP_STATUS_GROB', 'LP_STATUS_GROB_earner_type', 'PRAEGENDE_JUGENDJAHRE',
'PRAEGENDE_JUGENDJAHRE_movement', 'PRAEGENDE_JUGENDJAHRE_decade', 'EINGEFUEGT_AM',
'CAMEO_INTL_2015', 'OST_WEST_KZ', 'CAMEO_DEU_2015', 'PRODUCT_GROUP', 'CUSTOMER_GROUP',
'D19_LETZTER_KAUF_BRANCHE']
for col in df.columns:
if col in list_of_obj_cols:
df[col].fillna(df[col].value_counts().idxmax(), inplace=True)
else:
print(col)
df[col].fillna(df[col].median(), inplace=True)
print(f'Imputed {col}')
if 'CAMEO_INTL_2015' in df.columns:
df['CAMEO_INTL_2015_wealth_type'] = labelencoder.fit_transform(df['CAMEO_INTL_2015_wealth_type'])
df['CAMEO_INTL_2015_family_type'] = labelencoder.fit_transform(df['CAMEO_INTL_2015_family_type'])
if 'LP_LEBENSPHASE_FEIN_family_type' in df.columns:
df['LP_LEBENSPHASE_FEIN_family_type'] = labelencoder.fit_transform(df['LP_LEBENSPHASE_FEIN_family_type'])
df['LP_LEBENSPHASE_FEIN_earner_type'] = labelencoder.fit_transform(df['LP_LEBENSPHASE_FEIN_earner_type'])
df['LP_LEBENSPHASE_FEIN_age_group'] = labelencoder.fit_transform(df['LP_LEBENSPHASE_FEIN_age_group'])
print('Cretaed LP_LEBENSPHASE_FEIN_family_type, LP_LEBENSPHASE_FEIN_earner_type and LP_LEBENSPHASE_FEIN_age_group')
if 'LP_LEBENSPHASE_FEIN_family_type' in df.columns:
df['LP_FAMILIE_GROB_family_type'] = labelencoder.fit_transform(df['LP_FAMILIE_GROB_family_type'])
print('Transformed LP_FAMILIE_GROB_family_type')
if 'LP_LEBENSPHASE_FEIN_family_type' in df.columns:
df['LP_STATUS_GROB_earner_type'] = labelencoder.fit_transform(df['LP_STATUS_GROB_earner_type'])
print('Transformed LP_STATUS_GROB_earner_type')
drop_cols = ['LP_LEBENSPHASE_FEIN', 'LP_FAMILIE_GROB', 'LP_STATUS_GROB', 'PRAEGENDE_JUGENDJAHRE',
'CAMEO_INTL_2015', 'D19_LETZTER_KAUF_BRANCHE']
df.drop(drop_cols, axis = 1, inplace = True, errors='ignore')
if 'OST_WEST_KZ' in df.columns:
df['OST_WEST_KZ'] = labelencoder.fit_transform(df['OST_WEST_KZ'])
print('Transformed OST_WEST_KZ')
if 'CAMEO_DEU_2015' in df.columns:
df['CAMEO_DEU_2015'] = labelencoder.fit_transform(df['CAMEO_DEU_2015'])
print('Transformed CAMEO_DEU_2015')
for value in ['PRODUCT_GROUP', 'CUSTOMER_GROUP']:
if value in df.columns:
df[value] = labelencoder.fit_transform(df[value])
print(f'Transformed {value}')
if 'PRAEGENDE_JUGENDJAHRE_movement' in df.columns:
df['PRAEGENDE_JUGENDJAHRE_movement'] = labelencoder.fit_transform(df['PRAEGENDE_JUGENDJAHRE_movement'])
df['PRAEGENDE_JUGENDJAHRE_decade'] = labelencoder.fit_transform(df['PRAEGENDE_JUGENDJAHRE_decade'])
engineering(filtered_azdias)
engineering(filtered_customers)
# ##### I will use feature correlation across the entire dataset to determine which features are too highly-correlated.
def drop_cols(df):
"""
drop columns from the dataframe
"""
corr_matrix = df.corr().abs().round(2)
upper = corr_matrix.where(np.triu(np.ones(corr_matrix.shape), k=1).astype(np.bool))
cols_to_drop = [column for column in upper.columns if any(upper[column] > 0.95)]
return cols_to_drop
# #### Setting Threshold >0.95 will be removed.
cols_to_drop = drop_cols(filtered_azdias)
filtered_azdias = filtered_azdias.drop(cols_to_drop, axis = 1)
filtered_customers = filtered_customers.drop(cols_to_drop, axis = 1)
def perform_log_transform(df):
df_log = pd.DataFrame()
for col in df.columns:
df_log[col]=(df[col]+1).transform(np.log)
return df_log
azdias_log_transformed = perform_log_transform(filtered_azdias)
customers_log_transformed = perform_log_transform(filtered_customers)
azdias_log_transformed.head()
# ###### Checking values in filtered_azdias dataframe
filtered_azdias.head()
# ### Scaling Features
def feature_scaling(df):
"""
Scale features in the dataframe
"""
scaled = MinMaxScaler().fit_transform(df)
return pd.DataFrame(scaled, columns = df.columns)
filtered_azdias = feature_scaling(filtered_azdias)
filtered_customers = feature_scaling(filtered_customers)
azdias_log_transformed = feature_scaling(azdias_log_transformed)
customers_log_transformed = feature_scaling(customers_log_transformed)
filtered_customers.info()
# ### Save the pre-processed data
# +
# Commenting out these steps to save time for testing and debugging for later runs.
#filtered_azdias.to_csv('data/Udacity_AZDIAS_clean.csv', index=False, sep=';')
#filtered_customers.to_csv('data/Udacity_CUSTOMERS_clean.csv', index=False, sep=';')
#azdias_log_transformed.to_csv('data/Udacity_AZDIAS_log_clean.csv', index=False, sep=';')
#customers_log.to_csv('data/Udacity_CUSTOMERS_log_clean.csv', index=False, sep=';')
# -
# ## Part 1: Customer Segmentation Report
#
# The main bulk of your analysis will come in this part of the project. Here, you should use unsupervised learning techniques to describe the relationship between the demographics of the company's existing customers and the general population of Germany. By the end of this part, you should be able to describe parts of the general population that are more likely to be part of the mail-order company's main customer base, and which parts of the general population are less so.
attributes_desc = pd.read_excel('./data/DIAS Information Levels - Attributes 2017.xlsx', header=1)
attributes_values = pd.read_excel('./data/DIAS Attributes - Values 2017.xlsx', header=1)
attributes_values['Attribute'] = attributes_values['Attribute'].ffill()
attributes_values['Description'] = attributes_values['Description'].ffill()
attributes_values
attributes_desc.drop(columns=['Unnamed: 0'], inplace=True)
# ### PCA
# * For the dimensionality reduction algorithm PCA it is better to look at a data variance to decide how many top components to include.
#
#
# * PCA is a method that uses simple matrix operations from linear algebra and statistics to calculate a projection of the original data into the same number or fewer dimensions.
def pca_model(data, n_components=None):
'''This function builds the scikit-learn PCA model.
Args:
data: demographic dataframe
n_components: number of components
returns:
pca: pca model
data_transformed: transformed data
'''
pca = PCA(n_components)
data_transformed = pca.fit_transform(data)
return pca, data_transformed
def pca_variance(pca):
'''This function plots the graph of the explained variance ratio.
Args:
pca: pca model
returns: none
'''
exp_variance = np.cumsum(pca.explained_variance_ratio_)
plt.plot(exp_variance)
plt.xlabel('No of Components')
plt.ylabel('Explained Variance')
plt.title('Explained Variance Ratio')
plt.grid(b=True)
plot = plt.show()
azdias_pca_model, pca_transformed = pca_model(filtered_azdias)
def elbow_curve(data):
'''This function performes 13 KMeans model for each
k in range from 2 to 15, in order to find the most suitable K-value
and plots the graph for the elbow method.
Args:
data: demographic dataframe
returns: none
'''
points = np.array([])
K = range(2,4)
for k in K:
t = time.time()
kmeans = KMeans(k)
km = kmeans.fit(data)
points = np.append(points, np.abs(km.score(data)))
print("Done in {:.2f} sec.".format(time.time()-t))
plt.plot(K, points, 'bx-')
plt.xlabel('no of K')
plt.ylabel('avg distance to centroid')
plt.title('Elbow Graph')
plt.show()
pca_variance(azdias_pca_model)
# #### Around 125 component cover 90% of the data.
azdias_pca_model, azdias_pca_transformed_125 = pca_model(filtered_azdias, 125)
# ### Elbow Method
elbow_curve(azdias_pca_transformed_125)
# ## Part 2: Supervised Learning Model
#
# Now that you've found which parts of the population are more likely to be customers of the mail-order company, it's time to build a prediction model. Each of the rows in the "MAILOUT" data files represents an individual that was targeted for a mailout campaign. Ideally, we should be able to use the demographic information from each individual to decide whether or not it will be worth it to include that person in the campaign.
#
# The "MAILOUT" data has been split into two approximately equal parts, each with almost 43 000 data rows. In this part, you can verify your model with the "TRAIN" partition, which includes a column, "RESPONSE", that states whether or not a person became a customer of the company following the campaign. In the next part, you'll need to create predictions on the "TEST" partition, where the "RESPONSE" column has been withheld.
mailout_train = pd.read_csv('data/Udacity_MAILOUT_052018_TRAIN.csv', sep=';')
mailout_train.head()
# ##### Huge Imbalance in class
sns.countplot(x="RESPONSE",data=mailout_train);
# ### Data preprocessing
def data_preprocessing(df, mailout_train, test=False):
'''This function prepares the MAILOUT_TRAIN dataset
for training under the supervised model. This function uses
the functionality of the above declared functions.
Args:
df: demographic dataframe
returns: preprocessed dataframe
'''
df = handle_mixed_types(df)
replace_unknown(df, unknown)
additional_missing_values = {'KBA05_MODTEMP': [6.0], 'LP_FAMILIE_FEIN': [0.0], 'LP_FAMILIE_GROB': [0.0],
'LP_LEBENSPHASE_FEIN': [0.0], 'LP_LEBENSPHASE_GROB': [0.0], 'ORTSGR_KLS9': [0.0], 'GEBURTSJAHR': [0]}
replace_unknown(df, additional_missing_values)
missing_cols_df, missing_rows_df = detect_missing_data(df)
cols_to_drop = missing_cols_df.index[missing_cols_df['percentage'] > 30].tolist()
df = df.drop(cols_to_drop, axis = 1)
cols_to_drop = drop_cols(df)
for col in ['D19_GESAMT_ANZ_24', 'D19_VERSAND_OFFLINE_DATUM']:
if col in cols_to_drop:
cols_to_drop.remove(col)
df = df.drop(cols_to_drop, axis = 1)
engineering(df)
df = feature_scaling(df)
return df
# ##### Applying preprocessing steps on mailout_train dataset
mailout_train = data_preprocessing(mailout_train, mailout_train)
mailout_train.head()
mailout_train[mailout_train.columns[mailout_train.dtypes == 'object']]
mailout_train_Y=pd.DataFrame(mailout_train['RESPONSE'].values,columns=['RESPONSE'])
mailout_train_X=mailout_train.drop(['RESPONSE', 'LNR'],axis=1)
X_train, X_val, y_train, y_val = train_test_split(mailout_train_X, mailout_train_Y, test_size=0.2)
X_train.info()
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression(random_state=42)
lr.fit(X_train, y_train.values.ravel())
y_pred = lr.predict_proba(X_val)[:,1]
print('Accuracy of logistic regression classifier on validation set: {:.2f}'.format(lr.score(X_val, y_val)))
print('Logistic regression ROC-AUC: {:.2f}'.format(roc_auc_score(y_val, y_pred)))
# ##### The project belongs to the binary classification problem and it has highly imbalanced data. Therefore, I have chosen evaluation metric as the Area Under the Curve Receiver Operating Characteristics (AUC-ROC).
#
# * The AUC-ROC is used to visualize the True Positive Rate against False Positive Rate. If AUC equals to 1, it means that True Positives and True Negatives are disjoined and perfectly distinguishable, while AUC equals 0 means that the models makes exact opposite classification (all true negatives are classified as positives and vice versa).
#
#
# * AUC has a range of [0, 1]. The greater the value, the better is the performance of our model.
def train_model(model, X_train, y_train, X_val, y_val):
'''This function customization of the fit method.
Args:
model: instantiated model from the list of the classifiers
X_train: training data
y_train: training labels
X_val: validation data
y_val: validation labels
returns: ROC-AUC score, training time
'''
t = time.time()
model = model.fit(X_train, y_train.values.ravel())
y_pred = model.predict_proba(X_val)[:,1]
roc_score = roc_auc_score(y_val, y_pred)
train_time = time.time() - t
return roc_score, train_time
classifiers = [
("XGBClassifier",xgb.XGBClassifier(random_state=42)),
("AdaBoost", AdaBoostClassifier(random_state=42)),
("GradientBoostingClassifier", GradientBoostingClassifier(random_state=42))
]
def get_scores(classifiers, X_train, y_train, X_val, y_val):
result={
'classifier':[],
'score':[],
'train_time':[]
}
for name, classifier in classifiers:
score, t = train_model(classifier, X_train, y_train, X_val, y_val)
result['classifier'].append(name)
result['score'].append(score)
result['train_time'].append(t)
results_df = pd.DataFrame.from_dict(result, orient='index').transpose()
return results_df
get_scores(classifiers, X_train, y_train, X_val, y_val)
mailout_train_X.isnull().sum().sort_values(ascending=False)
# ##### Performing PCA
mailout_train_X_pca_model, mailout_train_X_pca_transformed = pca_model(mailout_train_X, 190)
X_train, X_val, y_train, y_val = train_test_split(mailout_train_X_pca_transformed, mailout_train_Y, test_size=0.2)
lr.fit(X_train, y_train.values.ravel())
y_pred = lr.predict_proba(X_val)[:,1]
print('Accuracy on validation set: {:.2f}'.format(lr.score(X_val, y_val)))
print('ROC-AUC: {:.2f}'.format(roc_auc_score(y_val, y_pred)))
get_scores(classifiers, X_train, y_train, X_val, y_val)
# ##### Splitting dataset
X_train, X_val, y_train, y_val = train_test_split(mailout_train_X, mailout_train_Y, test_size=0.2)
# ##### Grid Search
# * I have also used Grid Search technique to find the best model parameters and fine tune the model.
# * Grid search is an approach to hyperparameter tuning that will methodically build and evaluate a model for each combination of algorithm parameters specified in a grid.
# +
ada_param_dict = {
'n_estimators': [15, 30],
'learning_rate' : [0.02,0.04] }
adagrid = GridSearchCV(estimator = AdaBoostClassifier(random_state=42),
param_grid = ada_param_dict,
scoring = "roc_auc",
cv = 2,
verbose=2)
# -
adagrid.fit(X_train, y_train.values.ravel())
adagrid.best_score_, adagrid.best_params_
XGB_param_dict = {
'n_estimators': [200, 400],
'max_depth': [4, 6]
}
xgbgrid = GridSearchCV(estimator = xgb.XGBClassifier(random_state=42),
param_grid = XGB_param_dict,
scoring = "roc_auc",
cv = 2,
verbose=1)
xgbgrid.fit(X_train, y_train.values.ravel())
xgbgrid.best_score_, xgbgrid.best_params_
# ## Part 3: Kaggle Competition
#
# Now that you've created a model to predict which individuals are most likely to respond to a mailout campaign, it's time to test that model in competition through Kaggle. If you click on the link [here](http://www.kaggle.com/t/21e6d45d4c574c7fa2d868f0e8c83140), you'll be taken to the competition page where, if you have a Kaggle account, you can enter. If you're one of the top performers, you may have the chance to be contacted by a hiring manager from Arvato or Bertelsmann for an interview!
#
# Your entry to the competition should be a CSV file with two columns. The first column should be a copy of "LNR", which acts as an ID number for each individual in the "TEST" partition. The second column, "RESPONSE", should be some measure of how likely each individual became a customer – this might not be a straightforward probability. As you should have found in Part 2, there is a large output class imbalance, where most individuals did not respond to the mailout. Thus, predicting individual classes and using accuracy does not seem to be an appropriate performance evaluation method. Instead, the competition will be using AUC to evaluate performance. The exact values of the "RESPONSE" column do not matter as much: only that the higher values try to capture as many of the actual customers as possible, early in the ROC curve sweep.
mailout_test = pd.read_csv('./data/Udacity_MAILOUT_052018_TEST.csv', sep=';')
mailout_test['D19_VERSAND_OFFLINE_DATUM']
mailout_test_LNR = mailout_test["LNR"]
mailout_test_LNR
mailout_test = data_preprocessing(mailout_test, mailout_train, test=True)
mailout_test
mailout_test=mailout_test.drop(['LNR'],axis=1)
mailout_test.info()
ada_model = adagrid.best_estimator_
# ##### Fitting AdaBoost model
ada_model.fit(X_train, y_train.values.ravel())
AdaBoost_preds = ada_model.predict_proba(mailout_test)[:,1]
# ##### Finding features having high impact on the model
def feature_imp(model, mailout_train):
"""
Plots top n feature importances for the given model
"""
cols_names = mailout_train.columns
num_features = 10
importances = pd.DataFrame({'feature': cols_names, 'value': model.feature_importances_}).sort_values(by='value', ascending = False).reset_index(drop = True)
print(importances.head(10))
importances[:10].plot(kind='barh')
feature_imp(ada_model, X_train)
data = {'RESPONSE': AdaBoost_preds}
final_solution = pd.DataFrame(index=mailout_test_LNR, data=data)
final_solution.to_csv("submissions/Aravato_Submission.csv")
| .ipynb_checkpoints/Arvato Project-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Working with Unknown Dataset Sizes
#
# This notebook will demonstrate the features built into SmartNoise to handle unknown or private dataset sizes.
# ### Set up libraries and load exemplar dataset
# +
# load libraries
import os
import opendp.smartnoise.core as sn
import numpy as np
import math
import statistics
# establish data information
data_path = os.path.join('.', 'data', 'PUMS_california_demographics_1000', 'data.csv')
var_names = ["age", "sex", "educ", "race", "income", "married"]
data = np.genfromtxt(data_path, delimiter=',', names=True)
age = list(data[:]['age'])
print("Dimension of dataset: " + str(data.shape))
print("Names of variables: " + str(data.dtype.names))
# -
# We see above this dataset has 1000 observations (rows). Oftentimes the number of observations is public information. For example, a researcher might run a random poll of 1000 respondents and publically announce the sample size.
#
# However, there are cases where simply the number of observations itself can leak private information. For example, if a dataset contained all the individuals with a rare disease in a community, then knowing the size of the dataset would reveal how many people in the community had that condition. In general, a dataset maybe composed of some defined subset of a population, and the dataset size is then equivalent to a count query on that subset, and so we should protect it like any other query we want to provide privacy guarantees for.
#
# SmartNoise assumes the sample size is private information. If it happens that you actually know the dataset size, then that information will be valuable if you add it into your analysis graph. However, SmartNoise will not assume you truthfully or correctly know the size of the dataset. (Moreover, it can not directly send you an error message if you get this value incorrect, or this would permit an attack whereby an analyst keeps guessing different dataset sizes until the error message goes away, thereby leaking the exact dataset size.)
#
# If we know the dataset size, we can incorporate it into the analysis as below, where we provide `data_n` as an argument to the release of a mean on age:
# +
# get mean of age, using correct dataset size
with sn.Analysis() as analysis:
# load data
data = sn.Dataset(path = data_path, column_names = var_names)
# get mean of age
age_mean = sn.dp_mean(data = sn.to_float(data['age']),
privacy_usage = {'epsilon': .1},
data_lower = 0.,
data_upper = 100.,
data_rows = 1000 # Here we're providing the correct value of n.
)
analysis.release()
print("DP mean of age: {0}".format(age_mean.value))
print("Privacy usage: {0}".format(analysis.privacy_usage))
# -
# ### Providing incorrect dataset size values
#
# However, if we provide an incorrect value of `data_n` we still receive an answer as we see below:
# +
# get mean of age, using correct dataset size
with sn.Analysis() as analysis:
# load data
data = sn.Dataset(path = data_path, column_names = var_names)
# get mean of age using too small n
age_mean_low_n = sn.dp_mean(data = sn.to_float(data['age']),
privacy_usage = {'epsilon': .1},
data_lower = 0.,
data_upper = 100.,
data_rows = 900 # Here we're setting n below the actual value in the dataset.
)
# get mean of age using too large n
age_mean_high_n = sn.dp_mean(data = sn.to_float(data['age']),
privacy_usage = {'epsilon': .1},
data_lower = 0.,
data_upper = 100.,
data_rows = 1100 # Here we're setting n above the actual value in the dataset.
)
analysis.release()
print("DP mean of age: {0}".format(age_mean_low_n.value))
print("DP mean of age: {0}".format(age_mean_high_n.value))
print("Privacy usage: {0}".format(analysis.privacy_usage))
# -
# Let's examine what is actually happening when these values are provided. When we provide all of the metadata arguments (`data_lower`, `data_upper`, `data_n`) to the function `sn.dp_mean`, it works as a convenience method that knits together a number of library components to provide a mean. A clamping, imputation and resize step are run on the dataset, in order for the validator to certify the analysis is privacy preserving (for more detail see the notebook "data_analysis_tutorial").
# +
with sn.Analysis() as analysis:
# load data
data = sn.Dataset(path = data_path, column_names = var_names)
# establish data
age_dt = sn.to_float(data['age'])
# clamp data to range and impute missing values
age_dt = sn.clamp(data = age_dt, lower = 0., upper = 100.)
age_dt = sn.impute(data = age_dt, distribution = 'Gaussian',
lower = 0., upper = 100., shift = 45., scale = 10.)
# ensure data are consistent with proposed n
age_dt = sn.resize(data = age_dt, number_rows = 1000, distribution = 'Gaussian',
lower = 0., upper = 1000., shift = 45., scale = 10.)
# calculate differentially private mean of age
age_mean = sn.dp_mean(data = age_dt, privacy_usage = {'epsilon': .1})
analysis.release()
# -
# The `resize()` step takes `data` and `n` as arguments and builds a new dataset from the original data, with the number of rows equal to the supplied `n`. This is done by two rules that guarantee the privacy of the analysis:
# 1. If `n` less than or equal to the size of the supplied data, sample `n` rows (without replacement) from the supplied dataset.
# 2. If `n` greater than the size of the supplied data, create a new dataset by using the entire supplied dataset, and adding the remaining required rows by imputing new observations as if they were missing values.
#
# Thus, in general, we get the first `n` rows from the supplied dataset, and any additional rows are treated as if they were in the original data, but missing values.
#
# (Note, we would get a better answer if we resampled any extra rows we needed from the dataset, instead of imputing them. However, then an individual could appear in the dataset twice or more, and this would change all of our worst-case sensitivity calculations in the differentially private mechanisms. We plan to add this as an optional parameter to allow a user to inflate the sensitivity by a maximum value, and pay the corresponding privacy loss in their budget, in a future release.)
#
# We'll demonstrate the effects that resizing can have on differentially private releases at the end of this notebook, but first we show how to deal with the case where `data_n` is simply unknown.
# ### Analysis with no provided dataset size
# If we do not believe we have an accurate estimate for `data_n` we can instead pay for a query on the dataset to release a differentially private value of the dataset size. Then we can use that estimate in the rest of the analysis. Here is an example:
# get mean of age, using DP release of dataset size as estimate
with sn.Analysis(dynamic=True) as analysis:
# load data
data = sn.Dataset(path = data_path, column_names = var_names)
age_dt = sn.to_float(data['age'])
# get dp release of the size of the dataset
dp_num_records = sn.dp_count(data= age_dt,
privacy_usage={'epsilon': .05},
lower=0,
upper=10000
)
# get mean of age
age_mean = sn.dp_mean(data = age_dt,
privacy_usage = {'epsilon': .1},
data_lower = 0.,
data_upper = 100.,
data_rows = dp_num_records # Here we're providing the DP release of n.
)
analysis.release()
print("DP number of records: {0}".format(dp_num_records.value))
print("DP mean of age: {0}".format(age_mean.value))
print("Privacy usage: {0}".format(analysis.privacy_usage))
# Note that our privacy usage has increased because we apportioned some epsilon for both the release count of the dataset, and the mean of the dataset.
#
# The DP released estimate of the dataset size is a noisy answer. As we saw above (in section [Providing incorrect dataset size values](#Providing-incorrect-dataset-size-values)), if the DP released estimate happens to be too small, we will create a new dataset by subsampling from the `age` variable. If the DP released estimate happens to be too large, we will add missing values to the column and impute. Likely, the mean of the imputations is not the mean of the variable, so in this case, the answer will have some bias. We can see this in simulation by plotting the number of records against the DP released mean through many simulations.
#
# (Note, we're adjusting the epsilon for the dp count downwards to intentionally increase the range of released datasets sizes for dramatic visual effect in this simulation.)
# +
import matplotlib
import matplotlib.pyplot as plt
n_sims = 1000
history = np.zeros(shape=(n_sims, 2), dtype=float)
# loop over the previous release to accumulate many simulations
for i in range(n_sims):
with sn.Analysis(dynamic=True) as analysis:
data = sn.Dataset(path = data_path, column_names = var_names)
age_dt = sn.to_float(data['age'])
# get dp release of the size of the dataset
dp_num_records = sn.dp_count(data= age_dt,
privacy_usage={'epsilon': .05},
lower=0,
upper=10000
)
# get mean of age
age_mean = sn.dp_mean(data = age_dt,
privacy_usage = {'epsilon': 1},
data_lower = 0.,
data_upper = 100.,
data_rows = dp_num_records # Here we're providing the DP release of n.
)
analysis.release()
history[i,0] = dp_num_records.value
history[i,1] = age_mean.value
# +
## plot the simulated values,
## as well as their numerical average (for any dataset size > 6 simulations),
## and finally the expected value.
# plot raw simulations
plt.plot(history[:, 0],history[:, 1], 'o', fillstyle='none', color = 'cornflowerblue')
min_count = int(min(history[:, 0]))
max_count = int(max(history[:, 0]))
count_range = range(int(min(history[:, 0])), int(max(history[:, 0])))
expected_values = np.zeros(shape=(len(count_range), 2), dtype=float)
expected_values[:, 0] = list(count_range)
data = np.genfromtxt(data_path, delimiter=',', names=True)
age = list(data[:]['age'])
true_mean_age = statistics.mean(age)
counter = 0
for count in count_range:
flag = history[:,0] == count
subhistory = history[flag,1]
if len(subhistory)>6:
mean_sim = np.mean(subhistory)
# plot average simulation by dp release of dataset size
plt.plot(count, mean_sim, 'o', color = 'indigo')
if count<=1000:
expected_values[counter, 1] = true_mean_age
else:
expected_values[counter, 1] = ((true_mean_age* 1000) + ((count-1000)*50))/count
counter += 1
# plot the expected value by dp release of dataset size
plt.plot(expected_values[:, 0], expected_values[:, 1], linestyle='--', color = 'tomato')
#plt.xlabel('DP Release of Age')
#plt.ylabel('n')
plt.show()
# -
# Note that the banding is an artifact of the snapping mechanism. This provides protection against floating-point attacks. We observe a similar trend across a larger range of sample sizes.
# + pycharm={"is_executing": true}
import pandas as pd
import seaborn as sns
# initialize data
n_range = range(100, 2001, 200)
n_sims = 50
ns = []
releases = []
with sn.Analysis(dynamic=True) as analysis:
data = sn.Dataset(path = data_path, column_names = var_names)
age_dt = sn.to_float(data['age'])
for n in n_range:
for index in range(n_sims):
# get mean of age
ns.append(n)
releases.append(sn.dp_mean(data = age_dt,
privacy_usage = {'epsilon': 1},
data_lower = 0.,
data_upper = 100.,
data_rows = n))
analysis.release()
# get released values
values = [release.value for release in releases]
df = pd.DataFrame({'n': ns, 'release': values})
# + pycharm={"is_executing": true}
# get true mean
data = np.genfromtxt(data_path, delimiter=',', names=True)
age = list(data[:]['age'])
true_mean_age = statistics.mean(age)
# plot distribution of releases by n
plot = sns.boxplot(x = 'n', y = 'release', data = df)
plot.axhline(true_mean_age)
plt.show()
# -
# ### SmartNoise `resize` vs. other approaches
# The standard formula for the mean of a variable is:
# $\bar{x} = \frac{\sum{x}}{n}$
#
# The conventional, and simpler, approach in the differential privacy literature, is to:
#
# 1. compute a DP sum of the variable for the numerator
# 2. compute a DP count of the dataset rows for the denominator
# 3. take their ratio
#
# This is sometimes called a 'plug-in' approach, as we are plugging-in differentially private answers for each of the terms in the original formula, without any additional modifications, and using the resulting answer as our estimate while ignoring the noise processes of differential privacy. While this 'plug-in' approach does result in a differentially private value, the utility here is generally lower than the solution in SmartNoise. Because the number of terms summed in the numerator does not agree with the value in the denominator, the variance is increased and the resulting distribution becomes both biased and asymmetrical, which is visually noticeable in smaller samples. Here's an example:
# + pycharm={"is_executing": true}
n_sims = 1000
history = np.zeros(shape=(n_sims, 2), dtype=float)
for i in range(n_sims):
with sn.Analysis() as analysis_plug_in:
data = sn.Dataset(path = data_path, column_names = var_names)
age = sn.to_float(data['age'])
dp_mean = sn.dp_mean(
data=sn.to_float(data['age']),
privacy_usage={"epsilon": 1.0},
implementation="plug-in",
data_lower=0.,
data_upper=100.)
dp_plugin_mean = dp_mean.value
with sn.Analysis() as analysis_smartnoise:
# load data
data = sn.Dataset(path = data_path, column_names = var_names)
age_dt = sn.to_float(data['age'])
# get dp release of the size of the dataset
dp_num_records = sn.dp_count(data= age_dt,
privacy_usage={'epsilon': 0.5},
lower=0,
upper=10000
)
# get mean of age
age_mean = sn.dp_mean(data = age_dt,
privacy_usage = {'epsilon': 0.5},
data_lower = 0.,
data_upper = 100.,
data_rows = dp_num_records # Here we're providing the DP release of n.
)
analysis_smartnoise.release()
dp_smartnoise_mean = age_mean.value
# store simulation results
history[i,0] = dp_plugin_mean
history[i,1] = dp_smartnoise_mean
print("Plug-in mean privacy usage: {0}".format(analysis_plug_in.privacy_usage))
print("SmartNoise mean privacy usage: {0}".format(analysis_smartnoise.privacy_usage))
# + pycharm={"is_executing": true}
import seaborn as sns
data = np.genfromtxt(data_path, delimiter=',', names=True)
age = list(data[:]['age'])
true_mean_age = statistics.mean(age)
fig, ax = plt.subplots()
sns.kdeplot(history[:, 1], fill=True, linewidth=3,
label = 'SmartNoise Mean')
sns.kdeplot(history[:, 0], fill=True, linewidth=3,
label = 'Plug-in Mean')
ax.plot([true_mean_age,true_mean_age], [0,2], linestyle='--', color = 'forestgreen')
#plt.xlabel('DP Release of Age')
#plt.ylabel('Density')
leg = ax.legend()
# -
# We see that for the same privacy loss, the distribution of answers from SmartNoise's resizing approach to the mean is tighter around the true dataset value (thus lower in error) than the conventional plug-in approach.
#
# *Note, in these simulations, we've shown equal division of the epsilon for all constituent releases, but higher utility (lower error) can be generally gained by moving more of the epsilon into the sum, and using less in the count of the dataset rows, as in earlier examples.*
| analysis/unknown_dataset_size.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from whoosh.index import create_in
from whoosh.fields import *
import os
schema = Schema(title=TEXT(stored=True), path=ID(stored=True), content=TEXT)
if not os.path.exists("out/index"):
os.mkdir("out/index")
ix = create_in("out/indexdir", schema)
writer = ix.writer()
# +
from whoosh.qparser import QueryParser
writer.add_document(title=u"First document", path=u"/a",
content=u"This is the first document we've added!")
writer.add_document(title=u"Second document", path=u"/b",
content=u"The second one is even more interesting!")
writer.commit()
with ix.searcher() as searcher:
query = QueryParser("content", ix.schema).parse("first")
results = searcher.search(query)
print(results[0])
# -
with ix.searcher() as searcher:
query = QueryParser("content", ix.schema).parse("first")
results = searcher.search(query)
print(results[0])
# +
from __future__ import unicode_literals
from jieba.analyse import ChineseAnalyzer
analyzer = ChineseAnalyzer()
schema = Schema(title=TEXT(stored=True), path=ID(stored=True),
content=TEXT(stored=True, analyzer=analyzer))
## rewrite mode
if not os.path.exists("out/test"):
os.mkdir("out/test")
idx = create_in("out/test", schema)
writer = idx.writer()
writer.add_document(
title="first test-document",
path="/c",
content="This is the document for test, 水果和米饭."
)
writer.commit()
searcher = idx.searcher()
parser = QueryParser("content", schema=idx.schema)
for keyword in ("水果","你","first", 'test',"中文","交换机","交换"):
print("result of ",keyword)
q = parser.parse(keyword)
results = searcher.search(q)
for hit in results:
print(hit.highlights("content"))
print("="*10)
# +
from __future__ import unicode_literals
from whoosh.index import open_dir
from whoosh.index import create_in
from whoosh.fields import *
from jieba.analyse import ChineseAnalyzer
from whoosh.qparser import QueryParser
analyzer = ChineseAnalyzer()
schema = Schema(title=TEXT(stored=True), path=ID(stored=True), content=TEXT(stored=True, analyzer=analyzer))
## append mode
idx = open_dir("out/test")
writer = idx.writer()
writer.add_document(
title="test-document-2",
path="/b",
content="This is the document for test, 水果和大蒜."
)
writer.commit()
searcher = idx.searcher()
parser = QueryParser("content", schema=idx.schema)
for keyword in ("水果","你","first", 'test',"中文","交换机","交换"):
print("result of ",keyword)
q = parser.parse(keyword)
results = searcher.search(q)
for hit in results:
print(hit.highlights("content"))
print("="*10)
# +
from __future__ import unicode_literals
from jieba.analyse import ChineseAnalyzer
from sagas.ofbiz.resources import ResourceDigester
rd=ResourceDigester()
resource=rd.process_resource(xml_file='data/i18n/SagasUiLabels.xml')
analyzer = ChineseAnalyzer()
schema = Schema(en=TEXT(stored=True),
fr=TEXT(stored=True),
key=ID(stored=True),
zh=TEXT(stored=True, analyzer=analyzer))
## rewrite mode
out_dir='out/labels'
if not os.path.exists(out_dir):
os.mkdir(out_dir)
idx = create_in(out_dir, schema)
writer = idx.writer()
for key, prop in resource.properties.items():
writer.add_document(
key=key,
en=prop.values['en'],
zh=prop.values['zh'],
fr=prop.values['fr']
)
writer.commit()
searcher = idx.searcher()
parser = QueryParser("zh", schema=idx.schema)
for keyword in ("中文","组成部分","交换"):
print("result of ",keyword)
q = parser.parse(keyword)
results = searcher.search(q)
for hit in results:
print(hit.highlights("zh"))
print("="*10)
| notebook/procs-whoosh.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Soccerstats Predictions v1.4
# The changelog from v1.3:
# * Implement stock `StandardScalar` from PySpark.
# * Clean `team_difference` column.
# * Implement `dist-keras` trainer and evaluator.
# ## A. Data Cleaning & Preparation
# ### 1. Read csv file
# load and cache data
stat_df = sqlContext.read\
.format("com.databricks.spark.csv")\
.options(header = True)\
.load("data/teamFixtures.csv")\
.cache()
# +
# from pyspark.sql.functions import isnan, when, count, col
# count hyphen nulls ("-") per column
# stat_df.select([count(when(stat_df[c] == "-", c)).alias(c) for c in stat_df.columns]).show()
# -
# ### 2. Filter-out "gameFtScore" column values
# +
from pyspark.sql.functions import udf
from pyspark.sql.types import StringType
# replace non-"-" values with null: gameFtScore
nullify_ft_scores = udf(
lambda row_value: None if row_value != "-" else row_value,
StringType()
)
# replace "-" values with null: HTS_teamAvgOpponentPPG, ATS_teamAvgOpponentPPG
nullify_hyphen_cols = udf(
lambda row_value: None if row_value == "-" else row_value,
StringType()
)
stat_df = (stat_df.withColumn("gameFtScore", nullify_ft_scores(stat_df.gameFtScore)))
stat_df = (stat_df.withColumn("HTS_teamAvgOpponentPPG", nullify_hyphen_cols(stat_df.HTS_teamAvgOpponentPPG))
.withColumn("ATS_teamAvgOpponentPPG", nullify_hyphen_cols(stat_df.ATS_teamAvgOpponentPPG))
)
# drop Null values
stat_df = stat_df.dropna()
stat_df.select("gameFtScore", "HTS_teamAvgOpponentPPG", "ATS_teamAvgOpponentPPG").show(5)
print("Total rows: {}".format(stat_df.count()))
# -
# ### 3. Write-out new dataframe to Json
# +
# optional: save to file
# stat_df.coalesce(1).write.format('json').save('sstats_fixtures.json')
# -
# ### 4. Read fixtures Json to dataframe
fx_df = spark.read.json('data/fixtures1.json')
fx_df.printSchema()
# ### 5. Encode "fixture_id" on stat_df dataframe
# +
import hashlib
from pyspark.sql.functions import array
def encode_string(value):
return hashlib.sha1(
value.encode("utf-8")
).hexdigest()
# add an encoded col to "stat_df"; fixture_id
fxcol_df = udf(
lambda row_value: encode_string(u"".join([x for x in row_value])),
StringType()
)
stat_df = (stat_df.withColumn("fixture_id", fxcol_df(array(
"leagueName",
"leagueDivisionName",
"gamePlayDate",
"gameHomeTeamName",
"gameAwayTeamName"
))))
# -
# display some encoded fixtures
stat_df.select("fixture_id").show(5, False)
# ### 6. Concat the two dataframes: "stat_df" and "fx_df"
# +
from pyspark.sql.functions import col
# use "left-outer-join" to concat
full_df = stat_df.alias("a")\
.join(fx_df, stat_df.fixture_id == fx_df.fixture_id, "left_outer")\
.select(*[col("a."+c) for c in stat_df.columns] + [fx_df.ft_score])
full_df.select("leagueName", "leagueDivisionName", "gamePlayDate", "gameHomeTeamName", "gameAwayTeamName", "ft_score").show(5, False)
# -
# ### 7. Assess damage on "ft_score " nulls
# +
# count nulls per column
def count_null(df, col):
return df.where(df[col].isNull()).count()
print("Total rows: {}".format(full_df.count()))
print("Ft_score nulls: {}".format(count_null(full_df, "ft_score")))
# +
# drop null values in ft_Score
full_df = full_df.dropna()
print("Total rows: {}".format(full_df.count()))
print("Ft_score nulls: {}".format(count_null(full_df, "ft_score")))
# -
# ## B. Deep Learning
# ### 1. Clean data
# drop unnecessary columns
ml_df = full_df.drop(
"gameID", "gamePlayDate", "gamePlayTime", "gameHomeTeamName",
"gameAwayTeamName", "gameHomeTeamID","gameAwayTeamID", "leagueName",
"leagueDivisionName", "gameFtScore", "fixture_id"
)
# separate col types: double & string
# double type features
dtype_features = [
"leagueCompletion", "HTS_teamPosition", "HTS_teamGamesPlayed", "HTS_teamGamesWon",
"HTS_teamGamesDraw", "HTS_teamGamesLost", "HTS_teamGoalsScored", "HTS_teamGoalsConceded",
"HTS_teamPoints", "HTS_teamPointsPerGame", "HTS_teamPPGlast8", "HTS_homeGamesWon",
"HTS_homeGamesDraw", "HTS_homeGamesLost", "HTS_homeGamesPlayed", "HTS_awayGamesWon",
"HTS_awayGamesDraw", "HTS_awayGamesLost", "HTS_awayGamesPlayed", "HTS_teamPPGHome",
"HTS_teamPPGAway", "HTS_teamAvgOpponentPPG", "HTS_homeGoalMargin_by1_wins",
"HTS_homeGoalMargin_by1_losses", "HTS_homeGoalMargin_by2_wins", "HTS_homeGoalMargin_by2_losses",
"HTS_homeGoalMargin_by3_wins", "HTS_homeGoalMargin_by3_losses", "HTS_homeGoalMargin_by4p_wins",
"HTS_homeGoalMargin_by4p_losses", "HTS_awayGoalMargin_by1_wins", "HTS_awayGoalMargin_by1_losses",
"HTS_awayGoalMargin_by2_wins", "HTS_awayGoalMargin_by2_losses", "HTS_awayGoalMargin_by3_wins",
"HTS_awayGoalMargin_by3_losses", "HTS_awayGoalMargin_by4p_wins", "HTS_awayGoalMargin_by4p_losses",
"HTS_totalGoalMargin_by1_wins", "HTS_totalGoalMargin_by1_losses", "HTS_totalGoalMargin_by2_wins",
"HTS_totalGoalMargin_by2_losses", "HTS_totalGoalMargin_by3_wins", "HTS_totalGoalMargin_by3_losses",
"HTS_totalGoalMargin_by4p_wins", "HTS_totalGoalMargin_by4p_losses", "HTS_homeGoalsScored",
"HTS_homeGoalsConceded", "HTS_homeGoalsScoredPerMatch", "HTS_homeGoalsConcededPerMatch",
"HTS_homeScored_ConcededPerMatch", "HTS_awayGoalsScored", "HTS_awayGoalsConceded",
"HTS_awayGoalsScoredPerMatch", "HTS_awayGoalsConcededPerMatch", "HTS_awayScored_ConcededPerMatch",
"ATS_teamPosition", "ATS_teamGamesPlayed", "ATS_teamGamesWon", "ATS_teamGamesDraw", "ATS_teamGamesLost",
"ATS_teamGoalsScored", "ATS_teamGoalsConceded", "ATS_teamPoints", "ATS_teamPointsPerGame",
"ATS_teamPPGlast8", "ATS_homeGamesWon", "ATS_homeGamesDraw", "ATS_homeGamesLost",
"ATS_homeGamesPlayed", "ATS_awayGamesWon", "ATS_awayGamesDraw", "ATS_awayGamesLost",
"ATS_awayGamesPlayed", "ATS_teamPPGHome", "ATS_teamPPGAway", "ATS_teamAvgOpponentPPG",
"ATS_homeGoalMargin_by1_wins", "ATS_homeGoalMargin_by1_losses", "ATS_homeGoalMargin_by2_wins",
"ATS_homeGoalMargin_by2_losses", "ATS_homeGoalMargin_by3_wins", "ATS_homeGoalMargin_by3_losses",
"ATS_homeGoalMargin_by4p_wins", "ATS_homeGoalMargin_by4p_losses", "ATS_awayGoalMargin_by1_wins",
"ATS_awayGoalMargin_by1_losses", "ATS_awayGoalMargin_by2_wins", "ATS_awayGoalMargin_by2_losses",
"ATS_awayGoalMargin_by3_wins", "ATS_awayGoalMargin_by3_losses", "ATS_awayGoalMargin_by4p_wins",
"ATS_awayGoalMargin_by4p_losses", "ATS_totalGoalMargin_by1_wins", "ATS_totalGoalMargin_by1_losses",
"ATS_totalGoalMargin_by2_wins", "ATS_totalGoalMargin_by2_losses", "ATS_totalGoalMargin_by3_wins",
"ATS_totalGoalMargin_by3_losses", "ATS_totalGoalMargin_by4p_wins", "ATS_totalGoalMargin_by4p_losses",
"ATS_homeGoalsScored", "ATS_homeGoalsConceded", "ATS_homeGoalsScoredPerMatch", "ATS_homeGoalsConcededPerMatch",
"ATS_homeScored_ConcededPerMatch", "ATS_awayGoalsScored", "ATS_awayGoalsConceded", "ATS_awayGoalsScoredPerMatch",
"ATS_awayGoalsConcededPerMatch", "ATS_awayScored_ConcededPerMatch"
]
# string type features
stype_features = [
"HTS_teamCleanSheetPercent", "HTS_homeOver1_5GoalsPercent",
"HTS_homeOver2_5GoalsPercent", "HTS_homeOver3_5GoalsPercent", "HTS_homeOver4_5GoalsPercent",
"HTS_awayOver1_5GoalsPercent", "HTS_awayOver2_5GoalsPercent", "HTS_awayOver3_5GoalsPercent",
"HTS_awayOver4_5GoalsPercent", "HTS_homeCleanSheets", "HTS_homeWonToNil", "HTS_homeBothTeamsScored",
"HTS_homeFailedToScore", "HTS_homeLostToNil", "HTS_awayCleanSheets", "HTS_awayWonToNil",
"HTS_awayBothTeamsScored", "HTS_awayFailedToScore", "HTS_awayLostToNil", "HTS_homeScored_ConcededBy_0",
"HTS_homeScored_ConcededBy_1", "HTS_homeScored_ConcededBy_2", "HTS_homeScored_ConcededBy_3",
"HTS_homeScored_ConcededBy_4", "HTS_homeScored_ConcededBy_5p", "HTS_homeScored_ConcededBy_0_or_1",
"HTS_homeScored_ConcededBy_2_or_3", "HTS_homeScored_ConcededBy_4p", "HTS_awayScored_ConcededBy_0",
"HTS_awayScored_ConcededBy_1", "HTS_awayScored_ConcededBy_2", "HTS_awayScored_ConcededBy_3",
"HTS_awayScored_ConcededBy_4", "HTS_awayScored_ConcededBy_5p", "HTS_awayScored_ConcededBy_0_or_1",
"HTS_awayScored_ConcededBy_2_or_3", "HTS_awayScored_ConcededBy_4p",
"ATS_teamCleanSheetPercent", "ATS_homeOver1_5GoalsPercent", "ATS_homeOver2_5GoalsPercent",
"ATS_homeOver3_5GoalsPercent", "ATS_homeOver4_5GoalsPercent", "ATS_awayOver1_5GoalsPercent",
"ATS_awayOver2_5GoalsPercent", "ATS_awayOver3_5GoalsPercent", "ATS_awayOver4_5GoalsPercent",
"ATS_homeCleanSheets", "ATS_homeWonToNil", "ATS_homeBothTeamsScored", "ATS_homeFailedToScore",
"ATS_homeLostToNil", "ATS_awayCleanSheets", "ATS_awayWonToNil", "ATS_awayBothTeamsScored",
"ATS_awayFailedToScore", "ATS_awayLostToNil", "ATS_homeScored_ConcededBy_0", "ATS_homeScored_ConcededBy_1",
"ATS_homeScored_ConcededBy_2", "ATS_homeScored_ConcededBy_3", "ATS_homeScored_ConcededBy_4",
"ATS_homeScored_ConcededBy_5p", "ATS_homeScored_ConcededBy_0_or_1", "ATS_homeScored_ConcededBy_2_or_3",
"ATS_homeScored_ConcededBy_4p", "ATS_awayScored_ConcededBy_0", "ATS_awayScored_ConcededBy_1",
"ATS_awayScored_ConcededBy_2", "ATS_awayScored_ConcededBy_3", "ATS_awayScored_ConcededBy_4",
"ATS_awayScored_ConcededBy_5p", "ATS_awayScored_ConcededBy_0_or_1", "ATS_awayScored_ConcededBy_2_or_3",
"ATS_awayScored_ConcededBy_4p"
]
# integer type features
itype_features = ["HTS_teamGoalsDifference", "ATS_teamGoalsDifference"]
# +
from pyspark.sql.types import DoubleType, IntegerType
# cast types to columns: doubles
ml_df = ml_df.select(*[col(c).cast("double").alias(c) for c in dtype_features] + stype_features + itype_features + [ml_df.ft_score])
# convert "HTS_teamGoalsDifference" & "ATS_teamGoalsDifference" to integer
int_udf = udf(
lambda r: int(r),
IntegerType()
)
# cast types to columns: integers
ml_df = ml_df.select(*[int_udf(col(col_name)).name(col_name) for col_name in itype_features] + stype_features + dtype_features + [ml_df.ft_score])
# convert percent cols to float
percent_udf = udf(
lambda r: float(r.split("%")[0])/100,
DoubleType()
)
# cast types to columns: strings
ml_df = ml_df.select(*[percent_udf(col(col_name)).name(col_name) for col_name in stype_features] + itype_features + dtype_features + [ml_df.ft_score])
# -
# add extra column; over/under
over_under_udf = udf(
lambda r: "over" if (int(r.split("-")[0]) + int(r.split("-")[1])) > 3 else "under",
StringType()
)
ml_df = (ml_df.withColumn("over_under", over_under_udf(ml_df.ft_score)))
ml_df.select("ft_score", "over_under").show(5)
# drop "ft_score"
ml_df = ml_df.drop("ft_score")
# ### 2. Some featurization
# +
from pyspark.ml.feature import StringIndexer, OneHotEncoder, VectorIndexer, VectorAssembler
from pyspark.sql import Row
from pyspark.ml import Pipeline
# index the label; "over_under"
si = StringIndexer(inputCol = "over_under", outputCol = "over_under_indx")
df_indexed = si\
.fit(ml_df)\
.transform(ml_df)\
.drop("over_under")\
.withColumnRenamed("over_under_indx", "over_under")
# -
df_indexed.select("HTS_teamGoalsDifference").show(5)
# +
from pyspark.ml.linalg import Vectors
from pyspark.sql import Row
feature_cols = dtype_features + stype_features + itype_features
df_indexed = df_indexed[feature_cols + ["over_under"]]
# vectorize labels and features
row = Row("label", "features")
label_fts = df_indexed.rdd.map(
lambda r: (row(r[-1], Vectors.dense(r[:-1])))
).toDF()
label_fts.show(5)
# -
label_fts.select("features").take(1)
# +
from pyspark.ml.feature import StandardScaler
# apply feature normalization to transform a feature to have
# a mean of 0 and standard deviation 1.
label_fts = StandardScaler(
inputCol="features",
outputCol="features_norm",
withStd=True,
withMean=True)\
.fit(label_fts)\
.transform(label_fts)
# +
from distkeras.transformers import OneHotTransformer
nb_classes = 2 # over/under
nb_features = len(feature_cols)
# one-hot-encode the label
label_fts = OneHotTransformer(
output_dim=nb_classes,
input_col="label",
output_col="label_indx")\
.transform(label_fts)
# -
label_fts.select("label", "label_indx").show(5)
# +
# split train/test values
train, test = label_fts.randomSplit([0.8, 0.2])
# split train/validate values
train, validate = train.randomSplit([0.9, 0.1])
print("Train shape: '{}, {}'".format(train.count(), len(train.columns)))
print("Test shape: '{}, {}'".format(test.count(), len(test.columns)))
print("Validate shape: '{}, {}'".format(validate.count(), len(validate.columns)))
# -
# ### 3. Compose Neural-network
# get some Keras essentials
from keras.models import Sequential
from keras.layers import Dense, Dropout
# build model
model = Sequential()
model.add(Dense(60, activation="relu", input_dim=nb_features))
model.add(Dropout(0.4))
model.add(Dense(50, activation="relu"))
# output layer
model.add(Dense(nb_classes, activation="softmax"))
# +
from distkeras.evaluators import AccuracyEvaluator
from distkeras.predictors import ModelPredictor
from distkeras.transformers import LabelIndexTransformer
# model evaluation function
def evaluate_accuracy(model, test_set):
import time
s = time.time()
# Allocate a Distributed Keras Accuracy evaluator.
evaluator = AccuracyEvaluator(prediction_col="prediction_index", label_col="label")
# Clear the prediction column from the testset.
test_set = test_set.select("features_norm", "label", "label_indx")
# Apply a prediction from a trained model.
predictor = ModelPredictor(keras_model=trained_model, features_col="features_norm")
test_set = predictor.predict(test_set)
# Allocate an index transformer.
index_transformer = LabelIndexTransformer(output_dim=nb_classes)
# Transform the prediction vector to an indexed label.
test_set = index_transformer.transform(test_set)
# Fetch the score.
score = evaluator.evaluate(test_set)
return score
# -
def add_result(trainer, accuracy, dt):
global results;
# Store the metrics.
results[trainer] = {}
results[trainer]['accuracy'] = accuracy;
results[trainer]['time_spent'] = dt
# Display the metrics.
print("Trainer: " + str(trainer))
print(" - Accuracy: " + str(accuracy))
print(" - Training time: " + str(dt))
# Dictionary containing our benchmarking results in the form of
# a mapping from distributed trainer instance --> metric name --> metric value
results = {}
# Number of training epochs to run for each Trainer
TRAIN_EPOCHS = 1
# +
from distkeras.trainers import SingleTrainer
trainer = SingleTrainer(keras_model=model, worker_optimizer="adagrad",
loss="categorical_crossentropy", features_col="features_norm",
label_col="label_indx", num_epoch=TRAIN_EPOCHS, batch_size=32)
trained_model = trainer.train(train)
# -
# Fetch the evaluation metrics.
accuracy = evaluate_accuracy(trained_model, test)
dt = trainer.get_training_time()
# Add the metrics to the results.
add_result('single', accuracy, dt)
| _archived/sstats/sstats-v1.4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.3.1
# language: julia
# name: julia-1.3
# ---
using Pkg; Pkg.activate("/home/roscar/work/cfgrib-project/cfgrib.jl")
using DataStructures
# +
const GLOBAL_ATTRIBUTES_KEYS = ["edition", "centre", "centreDescription", "subCentre"]
const DATA_ATTRIBUTES_KEYS = [
"paramId",
"shortName",
"units",
"name",
"cfName",
"cfVarName",
"dataType",
"missingValue",
"numberOfPoints",
"totalNumber",
"numberOfDirections",
"numberOfFrequencies",
"typeOfLevel",
"NV",
"stepUnits",
"stepType",
"gridType",
"gridDefinitionDescription",
]
const GRID_TYPE_MAP = Dict(
"regular_ll" => [
"Nx",
"iDirectionIncrementInDegrees",
"iScansNegatively",
"longitudeOfFirstGridPointInDegrees",
"longitudeOfLastGridPointInDegrees",
"Ny",
"jDirectionIncrementInDegrees",
"jPointsAreConsecutive",
"jScansPositively",
"latitudeOfFirstGridPointInDegrees",
"latitudeOfLastGridPointInDegrees",
],
"rotated_ll" => [
"Nx",
"Ny",
"angleOfRotationInDegrees",
"iDirectionIncrementInDegrees",
"iScansNegatively",
"jDirectionIncrementInDegrees",
"jPointsAreConsecutive",
"jScansPositively",
"latitudeOfFirstGridPointInDegrees",
"latitudeOfLastGridPointInDegrees",
"latitudeOfSouthernPoleInDegrees",
"longitudeOfFirstGridPointInDegrees",
"longitudeOfLastGridPointInDegrees",
"longitudeOfSouthernPoleInDegrees",
],
"reduced_ll" => [
"Ny",
"jDirectionIncrementInDegrees",
"jPointsAreConsecutive",
"jScansPositively",
"latitudeOfFirstGridPointInDegrees",
"latitudeOfLastGridPointInDegrees",
],
"regular_gg" => [
"Nx",
"iDirectionIncrementInDegrees",
"iScansNegatively",
"longitudeOfFirstGridPointInDegrees",
"longitudeOfLastGridPointInDegrees",
"N",
"Ny",
],
"rotated_gg" => [
"Nx",
"Ny",
"angleOfRotationInDegrees",
"iDirectionIncrementInDegrees",
"iScansNegatively",
"jPointsAreConsecutive",
"jScansPositively",
"latitudeOfFirstGridPointInDegrees",
"latitudeOfLastGridPointInDegrees",
"latitudeOfSouthernPoleInDegrees",
"longitudeOfFirstGridPointInDegrees",
"longitudeOfLastGridPointInDegrees",
"longitudeOfSouthernPoleInDegrees",
"N",
],
"lambert" => [
"LaDInDegrees",
"LoVInDegrees",
"iScansNegatively",
"jPointsAreConsecutive",
"jScansPositively",
"latitudeOfFirstGridPointInDegrees",
"latitudeOfSouthernPoleInDegrees",
"longitudeOfFirstGridPointInDegrees",
"longitudeOfSouthernPoleInDegrees",
"DyInMetres",
"DxInMetres",
"Latin2InDegrees",
"Latin1InDegrees",
"Ny",
"Nx",
],
"reduced_gg" => ["N", "pl"],
"sh" => ["M", "K", "J"],
)
const GRID_TYPE_KEYS = unique(vcat(values(GRID_TYPE_MAP)...))
const ENSEMBLE_KEYS = ["number"]
const VERTICAL_KEYS = ["level"]
const DATA_TIME_KEYS = ["dataDate", "dataTime", "endStep"]
const ALL_REF_TIME_KEYS = ["time", "step", "valid_time", "verifying_time", "forecastMonth"]
const SPECTRA_KEYS = ["directionNumber", "frequencyNumber"]
const ALL_HEADER_DIMS = vcat(ENSEMBLE_KEYS, VERTICAL_KEYS, DATA_TIME_KEYS, ALL_REF_TIME_KEYS, SPECTRA_KEYS)
const ALL_KEYS = sort(unique(vcat(GLOBAL_ATTRIBUTES_KEYS, DATA_ATTRIBUTES_KEYS, GRID_TYPE_KEYS, ALL_HEADER_DIMS)))
# -
ALL_HEADER_DIMS = vcat(ENSEMBLE_KEYS, VERTICAL_KEYS, DATA_TIME_KEYS, ALL_REF_TIME_KEYS, SPECTRA_KEYS)
sort(unique(vcat(GLOBAL_ATTRIBUTES_KEYS, DATA_ATTRIBUTES_KEYS, GRID_TYPE_KEYS, ALL_HEADER_DIMS)))
vcat(values(GRID_TYPE_MAP)...) |> unique
vcat(GLOBAL_ATTRIBUTES_KEYS, DATA_ATTRIBUTES_KEYS, GRID_TYPE_KEYS, ALL_HEADER_DIMS) |> unique |> sort
| dev/scratch.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Keithleene/CPEN-21A-ECE-2-1/blob/main/Operations_and_Expressions.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="a9G8w2cs3sGT"
# ##Boolean Operator
# + id="fCR9vV4Rb1Jt" colab={"base_uri": "https://localhost:8080/"} outputId="ad401b6d-836b-4115-b952-0b1234ea334a"
x=1
y=2
print(x>y)
print(10>11)
print(10==10)
print(10!=11)
# + colab={"base_uri": "https://localhost:8080/"} id="q1fyWXKK4qFF" outputId="a3f778bc-e305-4955-92e7-f6740043682a"
#using bool()function
print(bool('Hi'))
print(bool(15))
print(bool(True))
print(bool(False))
print(bool(None))
print(bool(0))
print(bool([]))
# + [markdown] id="1piWagxd5ZWs"
# #Functions can return Boolean
# + colab={"base_uri": "https://localhost:8080/"} id="uNFdenbT5eVD" outputId="2addc56f-6a86-4bcc-af25-3baa97e5ac55"
def myFunction():return False
print(myFunction())
# + colab={"base_uri": "https://localhost:8080/"} id="M3ZUgXL155uA" outputId="c794fee7-a20a-4146-fa71-d2de68a80ed3"
def myFunction():return False
if myFunction():
print("Yes!")
else:
print("No")
# + [markdown] id="UjOD-v968Qc5"
# #You Try!
# + colab={"base_uri": "https://localhost:8080/"} id="Wba6PbuD6ZLE" outputId="dd051505-7c1d-4b80-dbaf-3b9d974df52d"
a=6
b=7
print(a==b)
print(a!=a)
# + [markdown] id="jL25j9Sx8Z1J"
# #Arithmetic Operators
# + colab={"base_uri": "https://localhost:8080/"} id="nG0j6OCX8dqu" outputId="6777eade-9d4c-40a7-cf61-a44d1b2f52c4"
print(10+5)
print(10-5)
print(10*5)
print(10/5)
print(10%5)#modulo division, remainder
print(10//5)#floor division
print(10//3)#floor division
print(10%3)#3*3=9+1
# + [markdown] id="q3C6nCFo9bpA"
# ##Bitwise Operators
# + colab={"base_uri": "https://localhost:8080/"} id="72B6ma079jCl" outputId="7f0ff3ac-3b9e-48d9-a885-b895766067b6"
a=60 #0011 1100
b=13 #0000 1101
print(a&b)
print(a|b)
print(a^b)
print(~a)
print(a<<1) #0111 1000
print(a<<2) #1111 0000
print(b>>1) #1000 0110
print(b>>2) #0000 0011 carry flag bit=01
# + [markdown] id="hfkh3CSpBziJ"
# ##Python Assignment Operators
# + colab={"base_uri": "https://localhost:8080/"} id="Vi5cLkMIB4yg" outputId="fd0f8d48-a64c-401c-ed6a-31c9150b2f0e"
a+=3 #Same As a = a + 3
#Same As a = 60 + 3, a = 63
print(a)
# + [markdown] id="7CX0fj1QClID"
# ##Logical Operators
# + colab={"base_uri": "https://localhost:8080/"} id="8pLOoMK2CoSa" outputId="01d11685-77d8-4d56-9715-a86cd0e146e8"
#and logical operator
a=True
b=False
print(a and b)
print(not(a and b))
print(a or b)
print(not(a or b))
# + colab={"base_uri": "https://localhost:8080/"} id="wXV7S_QHDOOu" outputId="8965d261-88a7-4c20-c8e7-fad2ab094748"
print(a is b)
print( a is not b)
| Operations_and_Expressions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <p style="text-align:center"><img src="https://github.com/WindIO-Bachelorthesis/Shortterm_Forecast/blob/main/images/header.PNG?raw=true"></p>
# <p style="text-align:center"><img src="https://camo.githubusercontent.com/4a300a0747dc7b5c69a83cc94ff2e2b5ad1f0f937ed3d142b171c0d4a6f8e164/68747470733a2f2f7777772e67652e636f6d2f72656e657761626c65656e657267792f73697465732f64656661756c742f66696c65732f323032302d30312f6f6e73686f72652d6865726f352e6a7067"/></p>
#
# <h1 align="center">This script transfers the preprocessed data to the CE4IoT WindIO instance.</h1>
import ast
import pandas as pd
import requests
import json
# #### Please fill the needed infromation.
pre_path = "./transfer/measurement_windio_msb-0004-a_2021-10-21.csv"
json_path = "./transfer/measurement_windio_msb-0004-a_2021-10-21.json"
instance_url = "https://windio-contact.northeurope.cloudapp.azure.com:8443/"
user_name = 'caddok'
password = '<PASSWORD>'
asset_name = "AS-000003-0003"
time_series_name = "measurement_windio_msb-0002-a_2021-10-21"
# #### Execute this cell to transfer your data to the CE4IoT instance.
# +
df = pd.read_csv(pre_path, parse_dates=["time"])
df["time"] = pd.to_datetime(df["time"], unit='s')
df = df.to_json(json_path, orient = 'records')
with open(json_path) as json_file:
json_data = json.load(json_file)
def get_csrf_token(session):
csrf = session.cookies.get('CSRFToken')
if csrf is None:
session.get(instance_url + "/server/sessioninfo")
csrf = session.cookies.get('CSRFToken')
return csrf
with requests.Session() as session:
session.auth = (user_name, password)
headers = {
"X-Csrf-Token": get_csrf_token(session)
}
resp = session.post(instance_url + "/iot_api/v1/assets/" + asset_name + "/time_series/" + time_series_name + "/data", json=json_data[:10], headers=headers)
print(resp)
resp = session.get(instance_url + '/server/__quit__')
| data/Post_CE4IoT.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
def iter_example():
print("one")
yield 1
yield 2
print("X")
yield 3
yield 1
it = iter_example()
# +
next(it)
# -
next(it)
next(it)
next(it)
next(it)
def iter_example_loop(X):
for i in range(X):
yield i
it2 = iter_example_loop(5)
print(next(it2))
print(next(it2))
print(next(it2))
import random
from mxnet import autograd, nd
def data_iter(batch_size, num_examples):
indices = list(range(num_examples))
# The examples are read at random, in no particular order
random.shuffle(indices)
for i in range(0, num_examples, batch_size):
j = nd.array(indices[i: min(i + batch_size, num_examples)])
yield j
it_data = data_iter(10,1000)
next(it_data)
next(it_data)
| notebooks/yield_example1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
"""
LICENSE MIT
2021
<NAME>
Website : http://www.covidtracker.fr
Mail : <EMAIL>
README:
This file contains scripts that download data from data.gouv.fr and then process it to build many graphes.
I'm currently cleaning the code, please ask me if something is not clear enough.
The charts are exported to 'charts/images/france'.
Data is download to/imported from 'data/france'.
Requirements: please see the imports below (use pip3 to install them).
"""
import pandas as pd
import numpy as np
import cv2
import plotly.graph_objects as go
import france_data_management as data
import plotly
PATH = '../../'
from datetime import datetime
import locale
locale.setlocale(locale.LC_ALL, 'fr_FR.UTF-8')
data.download_data()
data.download_data_hosp_fra_clage()
df_ameli = pd.read_csv("https://datavaccin-covid.ameli.fr/explore/dataset/donnees-vaccination-par-tranche-dage-type-de-vaccin-et-departement/download/?format=csv&timezone=Europe/Berlin&lang=fr&use_labels_for_header=true&csv_separator=%3B", sep=";")
df_ameli_filtre = df_ameli[df_ameli["classe_age"]=="TOUT_AGE"]
df_ameli_filtre = df_ameli_filtre[df_ameli_filtre["date"]==df_ameli_filtre["date"].max()]
df_ameli_filtre = df_ameli_filtre[df_ameli_filtre["libelle_departement"] != "FRANCE"]
df_ameli_filtre = df_ameli_filtre[df_ameli_filtre["type_vaccin"] == "Tout vaccin"]
df_a_vacsi_a_france = data.import_data_vacsi_a_fra()
df_hosp_fra_clage = data.import_data_hosp_fra_clage()
clage_spf = pd.read_csv(PATH+"data/france/clage_spf.csv", sep=";")
#df_a_vacsi_a_france = df_a_vacsi_a_france.merge(clage_spf, left_on="clage_vacsi", right_on="code_spf")
df_new = data.import_data_new()
df_tests_viros = data.import_data_tests_viros()
df_tests_viros = df_tests_viros[df_tests_viros["cl_age90"] == 0]
#df_tests_viros["taux_incid"] = df_tests_viros["P"].rolling(window=7).sum()
#df_tests_viros = df_tests_viros[df_tests_viros["jour"] == df_tests_viros["jour"].max()]
df_tests_viros
# +
from sklearn import datasets, linear_model
fig = go.Figure()
xes = []
yes = []
for dep in df_ameli_filtre["departement_residence"]:
if dep in df_tests_viros["dep"].values:
df_tests_viros_dep = df_tests_viros[df_tests_viros["dep"] == dep]
df_tests_viros_dep["taux_incid"] = df_tests_viros_dep["P"].rolling(window=7).sum() / df_tests_viros_dep["pop"] * 100000
df_new_dep = df_new[df_new["dep"] == dep]
df_new_dep["incid_dc"] = df_new_dep["incid_dc"].rolling(window=7).sum() / df_tests_viros_dep["pop"].values[-1] * 100000
#yes.append(df_new_dep["incid_hosp"].values[-1])
yes.append(df_new_dep["incid_dc"].values[-1])
#xes.append(df_tests_viros_dep["taux_incid"].values[-10])
xes.append(df_ameli_filtre[df_ameli_filtre["departement_residence"]==dep]["taux_cumu_1_inj"].values[-1]*100)
fig.add_trace(go.Scatter(
y=[yes[-1]],
x=[xes[-1]], #[df_tests_viros_dep["taux_incid"].values[-1]], #
showlegend=False,
text=dep,
line=dict(color="red", width=4)
))
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(np.array(xes).reshape(-1, 1), np.array(yes).reshape(-1, 1))
y_pred = regr.predict(np.array([0, 70]).reshape(-1, 1))
score = regr.score(np.array(xes).reshape(-1, 1), np.array(yes).reshape(-1, 1))
a = regr.coef_[0][0]
b = regr.intercept_[0]
"""fig.add_trace(go.Scatter(
x=[0, 70],
y=[y[0] for y in y_pred],
mode="lines",
marker_color="black",
line=dict(dash="dot"),
opacity=0.5,
text="Corrélation",
showlegend=False
))"""
"""fig.add_trace(go.Scatter(
x=[0, 500],
y=[0, 50],
mode="lines",
marker_color="red",
line=dict(dash="dot"),
opacity=0.5,
text="Corrélation",
showlegend=False
))
fig.add_trace(go.Scatter(
x=[0, 500],
y=[0, 25],
mode="lines",
marker_color="orange",
line=dict(dash="dot"),
opacity=0.5,
text="Corrélation",
showlegend=False
))
fig.add_trace(go.Scatter(
x=[0, 500],
y=[0, 12,5],
mode="lines",
marker_color="green",
line=dict(dash="dot"),
opacity=0.5,
text="Corrélation",
showlegend=False
))"""
fig.update_layout(
title=dict(
y=0.92, x=0.5,
font = dict(
size=20, color="black"),
text="Admissions à l'hôpital en fonction de la couverture vaccinale"),
xaxis=dict(
title="<b>Taux de vaccination</b> (au moins une dose)",
ticksuffix=" %"
),
yaxis=dict(
title="<b>Admissions à l'hôpital</b> pour 100k hab.",
ticksuffix=""
),
annotations = [
dict(
x=0.5,
y=1.07,
xref='paper',
yref='paper',
font=dict(color="black"),
text='Par département de résidence. Date : {}. Données : Améli - Santé publique France. Auteur : @guillaumerozier covidtracker.fr.'.format("02 août", ""),
showarrow = False
)]
)
fig.add_annotation(x=4, y=0.1,
text="y = {} x + {} ; R2 = {}".format(round(a, 2), round(b, 2), round(score, 2)),
font=dict(size=8),
showarrow=False,
yshift=10)
fig.write_image(PATH + "images/charts/france/cas_vaccination_dep_comp.jpeg", scale=2, width=900, height=600)
plotly.offline.plot(fig, filename = PATH + 'images/html_exports/france/cas_vaccination_dep_comp.html', auto_open=False)
# -
# +
df_a_vacsi_a_france_80 = df_a_vacsi_a_france[df_a_vacsi_a_france.clage_vacsi==80]
df_hosp_fra_clage_80 = df_hosp_fra_clage[df_hosp_fra_clage.cl_age90 >= 89].groupby(["jour"]).sum().reset_index()
fig = go.Figure()
fig.add_trace(go.Scatter(
x=df_hosp_fra_clage_80.jour,
y=df_hosp_fra_clage_80.hosp.rolling(window=7).mean(),
showlegend=False,
line=dict(color="red", width=4)
))
fig.add_trace(go.Scatter(
x=df_a_vacsi_a_france_80.jour,
y=df_a_vacsi_a_france_80.n_cum_complet/4156974*100,
line=dict(width=4, color="#1f77b4"),
showlegend=False,
yaxis="y2"
))
fig.update_layout(
title=dict(
y=0.90, x=0.5,
font = dict(
size=20, color="black"),
text="<b>[+ de 80 ans] <span style='color:red;'>personnes hospitalisées</span> et <span style='color:#1f77b4;'>vaccinées</span></b>"),
yaxis=dict(
title="<b>Personnes hospitalisées</b>",
titlefont=dict(
color="red"
),
tickfont=dict(
color="red"
)
),
yaxis2=dict(
range=[0, 100],
title="<b>% vaccinés</b> (2 doses)",
titlefont=dict(
color="#1f77b4"
),
ticksuffix=" %",
tickfont=dict(
color="#1f77b4"
),
anchor="free",
overlaying="y",
side="right",
position=1
),
annotations = [
dict(
x=0.5,
y=1.07,
xref='paper',
yref='paper',
font=dict(color="black"),
text='Date : {}. Données : Santé publique France. Auteur : @guillaumerozier covidtracker.fr.'.format(datetime.strptime(max(df_hosp_fra_clage_80.jour), '%Y-%m-%d').strftime('%d %B %Y')),
showarrow = False
)]
)
fig.write_image(PATH + "images/charts/france/hosp_vacsi_p80.jpeg", scale=2, width=800, height=500)
plotly.offline.plot(fig, filename = PATH + 'images/html_exports/france/dc_vacsi_p80.html', auto_open=False)
# +
df_a_vacsi_a_france_80 = df_a_vacsi_a_france[df_a_vacsi_a_france.clage_vacsi!=80].groupby(["jour"]).sum().reset_index()
df_hosp_fra_clage_80 = df_hosp_fra_clage[df_hosp_fra_clage.cl_age90 < 89].groupby(["jour"]).sum().reset_index()
fig = go.Figure()
fig.add_trace(go.Scatter(
x=df_hosp_fra_clage_80.jour,
y=df_hosp_fra_clage_80.hosp.rolling(window=7).mean(),
showlegend=False,
line=dict(color="red", width=4)
))
fig.add_trace(go.Scatter(
x=df_a_vacsi_a_france_80.jour,
y=df_a_vacsi_a_france_80.n_cum_complet/(66990000-4156974)*100,
line=dict(width=4, color="#1f77b4"),
showlegend=False,
yaxis="y2"
))
fig.update_layout(
title=dict(
y=0.90, x=0.5,
font = dict(
size=20, color="black"),
text="<b>[0 - 79 ans] <span style='color:red;'>personnes hospitalisées</span> et <span style='color:#1f77b4;'>vaccinées</span></b>"),
yaxis=dict(
title="<b>Personnes hospitalisées</b>",
titlefont=dict(
color="red"
),
tickfont=dict(
color="red"
)
),
yaxis2=dict(
range=[0, 100],
title="<b>% vaccinés</b> (2 doses)",
titlefont=dict(
color="#1f77b4"
),
ticksuffix=" %",
tickfont=dict(
color="#1f77b4"
),
anchor="free",
overlaying="y",
side="right",
position=1
),
annotations = [
dict(
x=0.5,
y=1.07,
xref='paper',
yref='paper',
font=dict(color="black"),
text='Date : {}. Données : Santé publique France. Auteur : @guillaumerozier covidtracker.fr.'.format(datetime.strptime(max(df_hosp_fra_clage_80.jour), '%Y-%m-%d').strftime('%d %B %Y')),
showarrow = False
)]
)
fig.write_image(PATH + "images/charts/france/hosp_vacsi_m80.jpeg", scale=2, width=800, height=500)
plotly.offline.plot(fig, filename = PATH + 'images/html_exports/france/dc_vacsi_m80.html', auto_open=False)
# +
df_a_vacsi_a_france_80 = df_a_vacsi_a_france[df_a_vacsi_a_france.clage_vacsi==80]
df_hosp_fra_clage_80 = df_hosp_fra_clage[df_hosp_fra_clage.cl_age90 >= 89].groupby(["jour"]).sum().reset_index()
fig = go.Figure()
fig.add_trace(go.Scatter(
x=df_hosp_fra_clage_80.jour,
y=df_hosp_fra_clage_80.dc.diff().rolling(window=7).mean(),
showlegend=False,
line=dict(color="red", width=4)
))
fig.add_trace(go.Scatter(
x=df_a_vacsi_a_france_80.jour,
y=df_a_vacsi_a_france_80.n_cum_dose1/4156974*100,
line=dict(width=4, color="#1f77b4"),
showlegend=False,
yaxis="y2"
))
fig.update_layout(
title=dict(
y=0.90, x=0.5,
font = dict(
size=20, color="black"),
text="<b>[+ de 80 ans] <span style='color:red;'>décès hospitaliers</span> et <span style='color:#1f77b4;'>vaccinations</span></b>"),
yaxis=dict(
title="<b>Décès hospitaliers</b>",
titlefont=dict(
color="red"
),
tickfont=dict(
color="red"
)
),
yaxis2=dict(
range=[0, 100],
title="<b>% vaccinés</b> (au moins 1 dose)",
titlefont=dict(
color="#1f77b4"
),
ticksuffix=" %",
tickfont=dict(
color="#1f77b4"
),
anchor="free",
overlaying="y",
side="right",
position=1
),
annotations = [
dict(
x=0.5,
y=1.07,
xref='paper',
yref='paper',
font=dict(color="black"),
text='Date : {}. Données : Santé publique France. Auteur : @guillaumerozier covidtracker.fr.'.format(datetime.strptime(max(df_hosp_fra_clage_80.jour), '%Y-%m-%d').strftime('%d %B %Y')),
showarrow = False
)]
)
fig.write_image(PATH + "images/charts/france/dc_vacsi_p80.jpeg", scale=2, width=800, height=500)
plotly.offline.plot(fig, filename = PATH + 'images/html_exports/france/dc_vacsi_p80.html', auto_open=False)
# +
df_a_vacsi_a_france_80 = df_a_vacsi_a_france[df_a_vacsi_a_france.clage_vacsi!=80].groupby(["jour"]).sum().reset_index()
df_hosp_fra_clage_80 = df_hosp_fra_clage[df_hosp_fra_clage.cl_age90 < 89].groupby(["jour"]).sum().reset_index()
fig = go.Figure()
fig.add_trace(go.Scatter(
x=df_hosp_fra_clage_80.jour,
y=df_hosp_fra_clage_80.dc.diff().rolling(window=7).mean(),
showlegend=False,
line=dict(color="red", width=4)
))
fig.add_trace(go.Scatter(
x=df_a_vacsi_a_france_80.jour,
y=df_a_vacsi_a_france_80.n_cum_dose1/(66990000-4156974)*100,
line=dict(width=4, color="#1f77b4"),
showlegend=False,
yaxis="y2"
))
fig.update_layout(
title=dict(
y=0.90, x=0.5,
font = dict(
size=20, color="black"),
text="<b>[0 - 79 ans] <span style='color:red;'>décès hospitaliers</span> et <span style='color:#1f77b4;'>vaccinations</span></b>"),
yaxis=dict(
title="<b>Décès hospitaliers</b>",
titlefont=dict(
color="red"
),
tickfont=dict(
color="red"
)
),
yaxis2=dict(
range=[0, 100],
title="<b>% vaccinés</b> (au moins 1 dose)",
titlefont=dict(
color="#1f77b4"
),
ticksuffix=" %",
tickfont=dict(
color="#1f77b4"
),
anchor="free",
overlaying="y",
side="right",
position=1
),
annotations = [
dict(
x=0.5,
y=1.07,
xref='paper',
yref='paper',
font=dict(color="black"),
text='Date : {}. Données : Santé publique France. Auteur : @guillaumerozier covidtracker.fr.'.format(datetime.strptime(max(df_hosp_fra_clage_80.jour), '%Y-%m-%d').strftime('%d %B %Y')),
showarrow = False
)]
)
fig.write_image(PATH + "images/charts/france/dc_vacsi_m80.jpeg", scale=2, width=800, height=500)
plotly.offline.plot(fig, filename = PATH + 'images/html_exports/france/dc_vacsi_m80.html', auto_open=False)
# +
df_a_vacsi_a_france_80 = df_a_vacsi_a_france[df_a_vacsi_a_france.clage_vacsi!=80].groupby(["jour"]).sum().reset_index()
df_hosp_fra_clage_80 = df_hosp_fra_clage[df_hosp_fra_clage.cl_age90 < 89].groupby(["jour"]).sum().reset_index()
fig = go.Figure()
fig.add_trace(go.Scatter(
x=df_hosp_fra_clage_80.jour,
y=df_hosp_fra_clage_80.dc.diff().rolling(window=7).mean(),
showlegend=False,
line=dict(color="red", width=4)
))
df_a_vacsi_a_france_80 = df_a_vacsi_a_france[df_a_vacsi_a_france.clage_vacsi==80]
df_hosp_fra_clage_80 = df_hosp_fra_clage[df_hosp_fra_clage.cl_age90 >= 89].groupby(["jour"]).sum().reset_index()
fig.add_trace(go.Scatter(
x=df_hosp_fra_clage_80.jour,
y=df_hosp_fra_clage_80.dc.diff().rolling(window=7).mean(),
showlegend=False,
line=dict(color="#1f77b4", width=4)
))
fig.update_layout(
title=dict(
y=0.90, x=0.5,
font = dict(
size=20, color="black"),
text="<b>Décès hospitaliers des <span style='color:#1f77b4;'>+ de 80 ans</span> et des <span style='color:red;'>- de 80 ans</span></b>"),
yaxis=dict(
title="<b>Décès hospitaliers</b>",
titlefont=dict(
color="red"
),
tickfont=dict(
color="red"
)
),
yaxis2=dict(
range=[0, 100],
title="<b>% vaccinés</b> (au moins 1 dose)",
titlefont=dict(
color="#1f77b4"
),
ticksuffix=" %",
tickfont=dict(
color="#1f77b4"
),
anchor="free",
overlaying="y",
side="right",
position=1
),
annotations = [
dict(
x=0.5,
y=1.07,
xref='paper',
yref='paper',
font=dict(color="black"),
text='Date : {}. Données : Santé publique France. Auteur : @guillaumerozier covidtracker.fr.'.format(datetime.strptime(max(df_hosp_fra_clage_80.jour), '%Y-%m-%d').strftime('%d %B %Y')),
showarrow = False
)]
)
fig.write_image(PATH + "images/charts/france/dc_vacsi_m80.jpeg", scale=2, width=800, height=500)
plotly.offline.plot(fig, filename = PATH + 'images/html_exports/france/dc_vacsi_m80_p80.html', auto_open=False)
# -
def dc_hosp_clage(df_hosp_fra_clage, lastday="", minday=""):
#lastday = df_hosp_fra_clage.jour.max()
#lastday="2020-09-01"
df_hosp_fra_clage_lastday = df_hosp_fra_clage[df_hosp_fra_clage.jour == lastday]
df_hosp_fra_clage_minday = df_hosp_fra_clage[df_hosp_fra_clage.jour == minday]
sum_hosp = df_hosp_fra_clage_lastday["hosp"].sum()
fig = go.Figure()
fig.add_trace(go.Bar(
y=[str(age-9) + " - " + str(age) +" ans" for age in df_hosp_fra_clage_lastday["cl_age90"].values[:-1]] + ["+ 90 ans"],
x=df_hosp_fra_clage_minday["hosp"]/df_hosp_fra_clage_minday["hosp"].sum()*100,
marker_color='rgba(0,0,0,0)',
marker_line_width=2,
marker_line_color="black",
orientation='h',
name=minday,
showlegend=False
))
x=df_hosp_fra_clage_lastday["hosp"]/sum_hosp*100
fig.add_trace(go.Bar(
y=[str(age-9) + " - " + str(age) +" ans" for age in df_hosp_fra_clage_lastday["cl_age90"].values[:-1]] + ["+ 90 ans"],
x=x,
orientation='h',
marker_line_width=1.5,
marker_color="red",
marker_line_color="red",
text=[str(int(val)) + " %" for val in round(x)],
textposition='auto',
name=lastday
))
value_90 = int(round((df_hosp_fra_clage_lastday["hosp"]/sum_hosp*100).values[-1]))
fig.add_trace(go.Bar(
y=[str(age-9) + " - " + str(age) +" ans" for age in df_hosp_fra_clage_lastday["cl_age90"].values[:-1]] + ["+ 90 ans"],
x=df_hosp_fra_clage_minday["hosp"]/df_hosp_fra_clage_minday["hosp"].sum()*100,
marker_color='rgba(0,0,0,0)',
marker_line_width=2,
marker_line_color="black",
orientation='h',
name=minday
))
fig.update_layout(
annotations=[
dict(
x=0.5,
y=1.12,
xref='paper',
yref='paper',
font=dict(size=11),
text="Lecture : les plus de 90 ans représentent {}% des personnes hospitalisées".format(value_90),
showarrow=False
),
],
legend_orientation="h",
barmode='overlay',
xaxis=dict(ticksuffix=" %"),
title=dict(
text="Part de chaque tranche d'âge dans les hospitalisations".format(lastday),
x=0.5
),
bargap=0.2
)
fig.write_image(PATH + "images/charts/france/dc_hosp_clage/{}.jpeg".format(lastday), scale=2, width=500, height=500)
def vacsi_clage(df_a_vacsi_a_france, lastday=""):
#lastday = df_a_vacsi_a_france.jour.max()
#lastday="2020-09-01"
df_a_vacsi_a_france_lastday = df_a_vacsi_a_france[df_a_vacsi_a_france.jour == lastday].sort_values(["clage_vacsi"])
sum_hosp = df_a_vacsi_a_france_lastday["n_cum_dose1"].sum()
fig = go.Figure()
x=df_a_vacsi_a_france_lastday["couv_dose1"]
fig.add_trace(go.Bar(
y=df_a_vacsi_a_france_lastday.clage_vacsi_text,
x=x,
text=[str(int(val)) + " %" for val in round(x)],
textposition='auto',
orientation='h',
))
value_80 = int(round((x.values[-1])))
fig.update_layout(
annotations=[
dict(
x=0.5,
y=1.12,
xref='paper',
yref='paper',
font=dict(size=11),
text="Lecture : {}% des plus de 80 ans ont reçu une dose de vaccin".format(value_80),
showarrow=False
),
],
title=dict(
text="Couverture vaccinale {}".format(lastday),
x=0.5
),
xaxis=dict(range=[0, 100], ticksuffix=" %"),
bargap=0
)
fig.write_image(PATH + "images/charts/france/vacsi_clage/{}.jpeg".format(lastday), scale=2, width=500, height=500)
def assemble_images(date):
#Assemble images
import numpy as np
PATH = "../../"
im1 = cv2.imread(PATH+'images/charts/france/vacsi_clage/{}.jpeg'.format(date))
im2 = cv2.imread(PATH+'images/charts/france/dc_hosp_clage/{}.jpeg'.format(date))
im_h = cv2.hconcat([im1, im2])
cv2.imwrite(PATH+'images/charts/france/vacsi_hosp_comp/{}.jpeg'.format(date), im_h)
def build_video(dates):
#import glob
for (folder, fps) in [("vacsi_hosp_comp", 6),]:
img_array = []
for i in range(len(dates)):
img = cv2.imread((PATH + "images/charts/france/{}/{}.jpeg").format(folder, dates[i]))
height, width, layers = img.shape
size = (width,height)
img_array.append(img)
if i==len(dates)-1:
for k in range(12):
img_array.append(img)
if i==0:
for k in range(6):
img_array.append(img)
out = cv2.VideoWriter(PATH + 'images/charts/france/{}.mp4'.format(folder),cv2.VideoWriter_fourcc(*'MP4V'), fps, size)
for i in range(len(img_array)):
out.write(img_array[i])
out.release()
try:
import subprocess
subprocess.run(["ffmpeg", "-y", "-i", PATH + "images/charts/france/{}.mp4".format(folder), PATH + "images/charts/france/{}_opti.mp4".format(folder)])
subprocess.run(["rm", PATH + "images/charts/france/{}.mp4".format(folder)])
except:
print("error conversion h265")
# +
dict_clage = {
4:"0 - 4 ans",
9:"5 - 9 ans",
11:"10 - 11 ans",
17:"12 - 17 ans",
24:"18 - 24 ans",
29:"25 - 29 ans",
39:"30 - 39 ans",
49:"40 - 49 ans",
59:"50 - 59 ans",
69:"60 - 69 ans",
74:"70 - 74 ans",
79:"75 - 79 ans",
80:"> 80 ans"
}
df_a_vacsi_a_france["clage_vacsi_text"] = df_a_vacsi_a_france["clage_vacsi"].map(dict_clage)
# -
days = sorted(df_a_vacsi_a_france.jour.unique()) #[-100:]
for date in days:
print(date)
vacsi_clage(df_a_vacsi_a_france, date)
dc_hosp_clage(df_hosp_fra_clage, date, minday=days[0])
assemble_images(date)
build_video(days)
| src/france/covid19_france_vacsin.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import pandas as pd
import sqlite3
import numpy as np
sqliteConnection = sqlite3.connect('lottery.db')
cursor = sqliteConnection.cursor()
sqlite_select_query = """SELECT contest, first_number, second_number, third_number, fourth_number, fifth_number, sixth_number from mega_sena"""
cursor.execute(sqlite_select_query)
records = cursor.fetchall()
records
contests = pd.DataFrame(records)
contests.columns = ('contest', 'first_number', 'second_number', 'third_number', 'fourth_number', 'fifth_number', 'sixth_number')
contests.set_index('contest', inplace=True)
contests
contests.describe()
first_number = contests[['first_number']]
first_count = first_number.groupby(['first_number'])['first_number'].count()
first_count = first_count.reset_index(name='count')
first_count.set_index('first_number', inplace=True)
first_count.sort_values(['count'], ascending=False).head(20)
second_number = contests[['second_number']]
second_count = second_number.groupby(['second_number'])['second_number'].count()
second_count = second_count.reset_index(name='count')
second_count.set_index('second_number', inplace=True)
second_count.sort_values(['count'], ascending=False).head(20)
third_number = contests[['third_number']]
third_count = third_number.groupby(['third_number'])['third_number'].count()
third_count = third_count.reset_index(name='count')
third_count.set_index('third_number', inplace=True)
third_count.sort_values(['count'], ascending=False).head(20)
fourth_number = contests[['fourth_number']]
fourth_count = fourth_number.groupby(['fourth_number'])['fourth_number'].count()
fourth_count = fourth_count.reset_index(name='count')
fourth_count.set_index('fourth_number', inplace=True)
fourth_count.sort_values(['count'], ascending=False).head(20)
fifth_number = contests[['fifth_number']]
fifth_count = fifth_number.groupby(['fifth_number'])['fifth_number'].count()
fifth_count = fifth_count.reset_index(name='count')
fifth_count.set_index('fifth_number', inplace=True)
fifth_count.sort_values(['count'], ascending=False).head(20)
sixth_number = contests[['sixth_number']]
sixth_count = sixth_number.groupby(['sixth_number'])['sixth_number'].count()
sixth_count = sixth_count.reset_index(name='count')
sixth_count.set_index('sixth_number', inplace=True)
sixth_count.sort_values(['count'], ascending=False).head(20)
first_top = first_count.sort_values(['count'], ascending=False).head(20)
first_choice = np.random.choice(first_top.index.values)
first_choice
second_top = second_count.sort_values(['count'], ascending=False).head(20)
second_choice = np.random.choice(second_top.index.values)
second_choice
third_top = third_count.sort_values(['count'], ascending=False).head(20)
third_choice = np.random.choice(third_top.index.values)
third_choice
fourth_top = fourth_count.sort_values(['count'], ascending=False).head(20)
fourth_choice = np.random.choice(fourth_top.index.values)
fourth_choice
fifth_top = fifth_count.sort_values(['count'], ascending=False).head(20)
fifth_choice = np.random.choice(fifth_top.index.values)
fifth_choice
sixth_top = sixth_count.sort_values(['count'], ascending=False).head(20)
sixth_choice = np.random.choice(sixth_top.index.values)
sixth_choice
numbers = [first_choice, second_choice, third_choice, fourth_choice, fifth_choice, sixth_choice]
numbers = sorted(numbers)
numbers
'{0}, {1}, {2}, {3}, {4}, {5}'.format(numbers[0], numbers[1], numbers[2], numbers[3], numbers[4], numbers[5])
sqlite_select_query = "SELECT contest from mega_sena where first_number = {0} and second_number = {1} and third_number = {2} and fourth_number = {3} and fifth_number = {4} and sixth_number = {5}".format(numbers[0], numbers[1], numbers[2], numbers[3], numbers[4], numbers[5])
# sqlite_select_query = "SELECT contest from mega_sena where first_number = {0} and second_number = {1} and third_number = {2} and fourth_number = {3} and fifth_number = {4} and sixth_number = {5}".format(4, 5, 30, 33, 41, 52)
cursor.execute(sqlite_select_query)
teste_contest = cursor.fetchall()
teste_contest
first_unit = np.empty([])
for first_number in first_count.index.values:
unit = str(first_number.item().zfill(2))[1]
np.append(first_unit, unit)
first_unit
| Lottery_Analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: capstone
# language: python
# name: capstone
# ---
# # Appendix
# A. Creating custom list of stop words.
# +
# Basic data science packages
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import joblib
from sklearn.feature_extraction import text
# -
# load data
data = pd.read_pickle('data/cleaned_data.pkl')
data.head()
# ***
# ## Adding adjectives and proper nouns to list of stop words
# Some adjectives are positive or negative by definition. These words are probably highly predictive of a high or low rating, but do not tell us anything else about the whisky specifically. Therefore, to learn more about whisky specific language, these words should be ignored when tokenizing. Likewise, the name of the distiller in a review does not tell us anything about the whisky. These too will be added to the list of stopwords. We will start with creating a list of adjectives.
adj_to_remove = [
'best', 'good', 'love', 'incredible', 'remarkable', 'excellent', 'stunning', 'great', 'fantastic', 'wonderful',
'outstanding', 'superb', 'magnificent', 'exceptional', 'marvelous', 'superior', 'awesome',
'bad', 'terrible', 'worst', 'poor', 'unpleasant', 'inferior', 'unsatisfactory', 'inadequate', 'lousy', 'atrocious',
'deficient', 'awful'
]
# We can inspect the names of the whiskys to see the names of each distiller, and then manually add them to a list
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
print(data['name'].sort_values())
# actual proper nouns that I need to remove - manually inspect proper nouns list and names of the whiskeus
proper_nouns_list = [
'aberfeldy', 'aberlour', 'alltabhainne', 'ardbeg', 'auchentoshan', 'auchentoshans', 'asyla', 'alexander', 'ardmore',
'arran', 'auchroisk', 'aultmore', 'askaig', 'antiquary', 'adelphi',
'ballechin', 'balvenie', 'benachie', 'benriachs', 'bladnoch', 'bladnocha', 'brora', 'broras', 'bowmore', 'bull',
'bruichladdich', 'bruichladdichit', 'bruichladdichs', 'bunnahabhain', 'balblair', 'ballantine', 'nevis', 'benriach',
'benrinnes', 'benromach', 'balmenach', 'blackadder', 'blair', 'boutique', 'box', 'binnys', "binny's",
'cardhu', 'chivas', 'clynelishs', 'clynelish', 'craigellachie', 'cragganmore', 'cadenhead', 'caol', 'ila',
'chieftain', 'compass', 'cuatro', 'cutty', 'collection',
'deanston', 'dailuaine', 'dalmore', 'dalwhinnie', 'dewars', 'dewar', 'deveron', 'douglas', 'duncan', 'dufftown',
'edradour', 'edradours', 'ellen', 'editors',
'farclas',
'garioch', 'gariochs', 'glenallechie','glenburgie', 'glencadam', 'glencraig', 'glendronach', 'glendronachs',
'glenfiddich', 'glenfiddichs', 'glengoyne', 'glenisla', 'glenkeir', 'glenrothes', 'glenlivet', 'glenturret',
'glenfarclas', 'glenglassaugh', 'glenkinchie', 'glenmorangie', 'glenugie', 'gordon',
'hart', 'hazelburn', 'highland', 'hazelwood', 'hunter',
'inchmurrin',
'johnnie', 'jura', 'juras',
'keith', 'kensington', 'kilchomanfree', 'kilchomans', 'kildalton', 'kinchie', 'kininvie', 'kirkland',
'lochnager', 'lochranza', 'lagavulin', 'littlemill', 'linkwood', 'longmorn', 'linlithgow', 'laphroig', 'ledaig',
'lomand', 'lombard', 'lonach', 'longrow',
'macduff', 'macmillans', 'magdelene', 'macallan', 'mortlach', 'monnochmore', 'macdougall', 'mossman', 'mackillops',
'mackinlays', 'master', 'murray', 'mcdavid',
'oban',
'park', 'pulteney', 'peerless',
'scapa', 'shackleton', 'shieldaig', 'skye', 'springbank', 'springbanks', 'strathclyde', 'strathisla', 'scotia',
'signatory', 'scotts', 'singleton', 'speyburn', 'strathmill', 'sovereign',
'talisker', 'tomintoul', 'turasmara', 'teaninich', 'taylor', 'tobermory', 'tomatin', 'tormore', 'tullibardine',
'uigeadail', 'usquaebach',
'valinch',
'walker', 'wemyss'
]
# +
# Adding adjectives and names of distillers to the list of stop words
my_stop_words = text.ENGLISH_STOP_WORDS.union(proper_nouns_list, adj_to_remove)
# Saving custom stop words to list
joblib.dump(my_stop_words, 'data/my_stop_words.pkl')
# -
| Appendix.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import k3d
plot = k3d.plot()
plot += k3d.text2d('\\int_{-\\infty}^\\infty \\hat f(\\xi)\\,e^{2 \\pi i \\xi x} \\,d\\xi',
[0.75,0.5], color=0, size=1.5, reference_point='lt')
plot += k3d.text2d('{(1,1,\\frac{5}{\\pi})}', [0.25, 0.5],
color=0, size=1.5, reference_point='rb')
plot += k3d.text2d('<h1 style="color: red;">Important!</h1>Hello<strong>World</strong>', [0.5, 0.5],
color=0, size=1.5, is_html=True, reference_point='rb')
plot.display()
# -
plot.objects[0].label_box = False
| examples/text2d.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Effect Size
# ===
#
# Examples and exercises for a tutorial on statistical inference.
#
# Copyright 2016 <NAME>
#
# License: [Creative Commons Attribution 4.0 International](http://creativecommons.org/licenses/by/4.0/)
# +
from __future__ import print_function, division
import numpy
import scipy.stats
import matplotlib.pyplot as pyplot
from ipywidgets import interact, interactive, fixed
import ipywidgets as widgets
# seed the random number generator so we all get the same results
numpy.random.seed(17)
# some nice colors from http://colorbrewer2.org/
COLOR1 = '#7fc97f'
COLOR2 = '#beaed4'
COLOR3 = '#fdc086'
COLOR4 = '#ffff99'
COLOR5 = '#386cb0'
# %matplotlib inline
# -
# ## Part One
#
# To explore statistics that quantify effect size, we'll look at the difference in height between men and women. I used data from the Behavioral Risk Factor Surveillance System (BRFSS) to estimate the mean and standard deviation of height in cm for adult women and men in the U.S.
#
# I'll use `scipy.stats.norm` to represent the distributions. The result is an `rv` object (which stands for random variable).
mu1, sig1 = 178, 7.7
male_height = scipy.stats.norm(mu1, sig1)
mu2, sig2 = 163, 7.3
female_height = scipy.stats.norm(mu2, sig2)
# The following function evaluates the normal (Gaussian) probability density function (PDF) within 4 standard deviations of the mean. It takes and rv object and returns a pair of NumPy arrays.
def eval_pdf(rv, num=4):
mean, std = rv.mean(), rv.std()
xs = numpy.linspace(mean - num*std, mean + num*std, 100)
ys = rv.pdf(xs)
return xs, ys
# Here's what the two distributions look like.
# +
xs, ys = eval_pdf(male_height)
pyplot.plot(xs, ys, label='male', linewidth=4, color=COLOR2)
xs, ys = eval_pdf(female_height)
pyplot.plot(xs, ys, label='female', linewidth=4, color=COLOR3)
pyplot.xlabel('height (cm)')
None
# -
# Let's assume for now that those are the true distributions for the population.
#
# I'll use `rvs` to generate random samples from the population distributions. Note that these are totally random, totally representative samples, with no measurement error!
male_sample = male_height.rvs(1000)
female_sample = female_height.rvs(1000)
# Both samples are NumPy arrays. Now we can compute sample statistics like the mean and standard deviation.
mean1, std1 = male_sample.mean(), male_sample.std()
mean1, std1
# The sample mean is close to the population mean, but not exact, as expected.
mean2, std2 = female_sample.mean(), female_sample.std()
mean2, std2
# And the results are similar for the female sample.
#
# Now, there are many ways to describe the magnitude of the difference between these distributions. An obvious one is the difference in the means:
difference_in_means = male_sample.mean() - female_sample.mean()
difference_in_means # in cm
# On average, men are 14--15 centimeters taller. For some applications, that would be a good way to describe the difference, but there are a few problems:
#
# * Without knowing more about the distributions (like the standard deviations) it's hard to interpret whether a difference like 15 cm is a lot or not.
#
# * The magnitude of the difference depends on the units of measure, making it hard to compare across different studies.
#
# There are a number of ways to quantify the difference between distributions. A simple option is to express the difference as a percentage of the mean.
#
# **Exercise 1**: what is the relative difference in means, expressed as a percentage?
# +
# Solution goes here
# -
# **STOP HERE**: We'll regroup and discuss before you move on.
# ## Part Two
#
# An alternative way to express the difference between distributions is to see how much they overlap. To define overlap, we choose a threshold between the two means. The simple threshold is the midpoint between the means:
simple_thresh = (mean1 + mean2) / 2
simple_thresh
# A better, but slightly more complicated threshold is the place where the PDFs cross.
thresh = (std1 * mean2 + std2 * mean1) / (std1 + std2)
thresh
# In this example, there's not much difference between the two thresholds.
#
# Now we can count how many men are below the threshold:
male_below_thresh = sum(male_sample < thresh)
male_below_thresh
# And how many women are above it:
female_above_thresh = sum(female_sample > thresh)
female_above_thresh
# The "overlap" is the area under the curves that ends up on the wrong side of the threshold.
male_overlap = male_below_thresh / len(male_sample)
female_overlap = female_above_thresh / len(female_sample)
male_overlap, female_overlap
# In practical terms, you might report the fraction of people who would be misclassified if you tried to use height to guess sex, which is the average of the male and female overlap rates:
misclassification_rate = (male_overlap + female_overlap) / 2
misclassification_rate
# Another way to quantify the difference between distributions is what's called "probability of superiority", which is a problematic term, but in this context it's the probability that a randomly-chosen man is taller than a randomly-chosen woman.
#
# **Exercise 2**: Suppose I choose a man and a woman at random. What is the probability that the man is taller?
#
# HINT: You can `zip` the two samples together and count the number of pairs where the male is taller, or use NumPy array operations.
# +
# Solution goes here
# +
# Solution goes here
# -
# Overlap (or misclassification rate) and "probability of superiority" have two good properties:
#
# * As probabilities, they don't depend on units of measure, so they are comparable between studies.
#
# * They are expressed in operational terms, so a reader has a sense of what practical effect the difference makes.
#
# ### Cohen's effect size
#
# There is one other common way to express the difference between distributions. Cohen's $d$ is the difference in means, standardized by dividing by the standard deviation. Here's the math notation:
#
# $ d = \frac{\bar{x}_1 - \bar{x}_2} s $
#
# where $s$ is the pooled standard deviation:
#
# $s = \sqrt{\frac{(n_1-1)s^2_1 + (n_2-1)s^2_2}{n_1+n_2 - 2}}$
#
# Here's a function that computes it:
#
def CohenEffectSize(group1, group2):
"""Compute Cohen's d.
group1: Series or NumPy array
group2: Series or NumPy array
returns: float
"""
diff = group1.mean() - group2.mean()
n1, n2 = len(group1), len(group2)
var1 = group1.var()
var2 = group2.var()
pooled_var = (n1 * var1 + n2 * var2) / (n1 + n2)
d = diff / numpy.sqrt(pooled_var)
return d
# Computing the denominator is a little complicated; in fact, people have proposed several ways to do it. This implementation uses the "pooled standard deviation", which is a weighted average of the standard deviations of the two groups.
#
# And here's the result for the difference in height between men and women.
CohenEffectSize(male_sample, female_sample)
# Most people don't have a good sense of how big $d=1.9$ is, so let's make a visualization to get calibrated.
#
# Here's a function that encapsulates the code we already saw for computing overlap and probability of superiority.
def overlap_superiority(control, treatment, n=1000):
"""Estimates overlap and superiority based on a sample.
control: scipy.stats rv object
treatment: scipy.stats rv object
n: sample size
"""
control_sample = control.rvs(n)
treatment_sample = treatment.rvs(n)
thresh = (control.mean() + treatment.mean()) / 2
control_above = sum(control_sample > thresh)
treatment_below = sum(treatment_sample < thresh)
overlap = (control_above + treatment_below) / n
superiority = (treatment_sample > control_sample).mean()
return overlap, superiority
# Here's the function that takes Cohen's $d$, plots normal distributions with the given effect size, and prints their overlap and superiority.
def plot_pdfs(cohen_d=2):
"""Plot PDFs for distributions that differ by some number of stds.
cohen_d: number of standard deviations between the means
"""
control = scipy.stats.norm(0, 1)
treatment = scipy.stats.norm(cohen_d, 1)
xs, ys = eval_pdf(control)
pyplot.fill_between(xs, ys, label='control', color=COLOR3, alpha=0.7)
xs, ys = eval_pdf(treatment)
pyplot.fill_between(xs, ys, label='treatment', color=COLOR2, alpha=0.7)
o, s = overlap_superiority(control, treatment)
print('overlap', o)
print('superiority', s)
# Here's an example that demonstrates the function:
plot_pdfs(2)
# And an interactive widget you can use to visualize what different values of $d$ mean:
slider = widgets.FloatSlider(min=0, max=4, value=2)
interact(plot_pdfs, cohen_d=slider)
None
# Cohen's $d$ has a few nice properties:
#
# * Because mean and standard deviation have the same units, their ratio is dimensionless, so we can compare $d$ across different studies.
#
# * In fields that commonly use $d$, people are calibrated to know what values should be considered big, surprising, or important.
#
# * Given $d$ (and the assumption that the distributions are normal), you can compute overlap, superiority, and related statistics.
# In summary, the best way to report effect size depends on the audience and your goals. There is often a tradeoff between summary statistics that have good technical properties and statistics that are meaningful to a general audience.
| effect_size.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Condicionales
# Los condicionales nos permiten realizar una acción si se cumple algún atributo deseado. Los temas que se verán en este submódulo son:
# 1. Flujos condicionales (if/else)
# 2. Loops (ciclos)
#
# **Video de la clase:**
# - If, else, elif, or, and, for, while, funciones, clases: https://www.youtube.com/watch?v=ttnpUy3E1AI&list=PL3IqEvXBv5p8AYLpqC9t3crl-JSfEBYHR&index=5&ab_channel=Dr.ArrigoCoen
# ### IF/ELSE
#
# La estructura de un _if/else_ es:
#
# if _condición_ **:**
#
# acción_1 #Con sangría
#
# acción_2 #Con sangría
#
# acción_3 #Con sangría
#
# else **:**
#
# acción_A #Con sangría
#
# acción_B #Con sangría
#
# acción_C #Con sangría
#
# Si se cumple _condición_ entonces se realizan las acciones 1, 2, 3,...
#
# Si no, entonces se realizan las acciones A, B, C,...
# +
import random
num_al = random.uniform(0,1)
A = 0
S = 0
if num_al < 0.6:
A += 1 # A = A + 1
else:
S += 1
print("El número aleatorio fue: ",round(num_al,2),"\nHay ",A," Águilas y ",S," Soles")
# -
# ### ELIF
#
# Nos permite poner más condiciones en caso de que no se cumpla la primera.
#
# La estructura es:
#
# if _condición_1_ **:**
#
# acciones_1 #Con sangría
#
# elif _condición_2_**:**
#
# acciones_2 #Con sangría
#
# else **:**
#
# acciones_3 #Con sangría
# +
a,b = 10,5
if a == b:
print("1")
elif a < b: #else if
print("2")
else:
print("3")
# -
# ### OR
#
# Nos permite evaluar dos expresiones. La condición es verdadera si al menos una de las expresiones es verdadera.
#
# Se define con el comando _or_.
# +
num_al = random.uniform(0,1)
if(num_al >= 0.8) or (num_al <0.2):
print("El número ",round(num_al,2)," no está cerca de 0.5")
# -
# ### AND
#
# Nos permite evaluar dos expresiones. La condición es verdadera si ambas expresiones son verdaderas.
#
# Se define con el comando _and_.
# +
num_1 = random.uniform(0,1)
num_2 = random.uniform(0,1)
if(num_1 >= 0.6) and (num_2 < 0.5):
print("Los números son: ",round(num_1,2)," y ",round(num_2,2))
# -
# # Ciclos
# Se utilizan para repetir varias veces una o más acciones.
# ### FOR
# La condición se realiza "n" veces. Se define previamente "n".
#
# La estructura de un _for_ es:
#
# for _variable_ **in** _vector_ **:**
#
# acciones #Con sangría
# +
A = 0
S = 0
for i in range(1,11): #[1,11)
num_al = random.uniform(0,1)
if num_al < 0.6:
A += 1
else:
S += 1
print("Hubo ",A," Águilas y ",S," Soles")
# +
import numpy as np
m = np.arange(1, 9).reshape(2,4)
print(m)
# Ejemplo de for anidado
for c in range(m.shape[1]): #Recorre columnas
for r in range(m.shape[0]): #Recorre renglones
print("La entrada [",r+1,",",c+1,"] de la matriz es: ",m[r,c])
# -
# ### WHILE
# Se repite el ciclo mientras se cumpla una o más condiciones.
num_al = random.uniform(0,1)
cont = 0
while num_al < 0.8:
print("cont = ",cont)
num_al = random.uniform(0,1)
print(round(num_al,3))
if cont == 15:
break #Se sale del ciclo si la variable cont llega a 15
cont += 1
num_al = random.uniform(0,1)
cont = 0
while (num_al < 0.8) and (cont <= 15):
print("cont = ",cont)
num_al = random.uniform(0,1)
print(round(num_al,3))
cont += 1
#se puede combinar while con else
i = 1
while i <= 5:
print(i)
i += 1
else:
print("i es mayor que 5")
| M1/1_4 Condicionales y control de flujo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="csBvdoKNp57h"
# # "제목!!"
# > "요약!!"
#
# - toc:true
# - branch: master
# - badges: true
# - comments: true
# - author: ANAKI
# - categories: [python, turtle]
# + colab={"base_uri": "https://localhost:8080/"} id="yGB-U5okn2_F" outputId="360b3665-ef84-4da0-c586-6966419a355e"
# !pip3 install colabTurtle
# + id="E832D_g7DuFd"
# !pip install ColabTurtle
from ColabTurtle.Turtle import *
initializeTurtle()
for i in range(5):
forward(50)
left(72)
for i in range(5):
forward(200)
left(144)
# + colab={"base_uri": "https://localhost:8080/", "height": 520} id="nDTVtoNNn-4j" outputId="7177aa1e-c3bb-46b1-dba6-68a824cbce40"
import ColabTurtle.Turtle as t
#t.initializeTurtle()
t.bgcolor('skyblue')
t.color('blue')
t.speed(3)
for i in range(3):
t.fd(150)
t.rt(120)
t.pu()
t.goto(200,150)
t.pd()
t.color('gold')
for i in range(1,5):
t.fd(100)
t.rt(90)
t.pu()
t.goto(300,400)
t.pd()
t.color('orange')
for i in range(1,6):
t.fd(100)
t.rt(144)
| _notebooks/2021_07_03_turtle.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # MARATONA BEHIND THE CODE 2020
#
# ## DESAFIO 6 - LIT
# <hr>
# ## Installing Libs
# %%time
# !pip install scikit-learn==0.22.0 --upgrade
# !pip install scikit-learn --upgrade
# !pip install xgboost --upgrade
# <hr>
# ## Imports
# +
import pandas as pd
# -
# ### Download training data (csv) to Pandas Dataframe
# #!wget --no-check-certificate --content-disposition https://raw.githubusercontent.com/vanderlei-test/dataset-3/master/training_dataset.csv
#df_training_dataset = pd.read_csv(r'training_dataset.csv')
df_training_dataset = pd.read_csv(r'C:/Users/MARCOSPAULOFerreira/Anaconda2/MARATONA2020/desafio-6-2020/dataset/training_dataset.csv')
df_training_dataset.tail()
df_training_dataset.nunique()
# Exibindo os dados ausentes do conjunto de dados.
print("Valores nulos no df_training_dataset antes das transformaçoes: \n\n{}\n".format(df_training_dataset.isnull().sum(axis = 0)))
# ## Pre-processando o dataset antes do treinamento
# +
#Nan to Zero nas colunas pretende_fazer_cursos_lit, interesse_mba_lit,
#importante_ter_certificado, modulos_finalizados, certificados
f = ['pretende_fazer_cursos_lit','interesse_mba_lit','importante_ter_certificado','modulos_finalizados','certificados']
values = {f[0]: 0, f[1]: 0, f[2]: 0, f[3]: 0, f[4]: 0}
df_training_dataset.fillna(value=values,inplace=True)
#Average: horas_semanais_estudo
df_training_dataset['horas_semanais_estudo'].fillna((df_training_dataset['horas_semanais_estudo'].mean()), inplace=True)
#modulos_iniciados Se for Nan transformar no mesmo valor de modulos_finalizados
df_training_dataset.modulos_iniciados.fillna(df_training_dataset.modulos_finalizados, inplace=True)
#total_modulos - Se for Nan transformar no mesmo valor de modulos_iniciados.
df_training_dataset.total_modulos.fillna(df_training_dataset.modulos_iniciados, inplace=True)
#Nan to 'Nao Informado' nas colunas graduacao, universidade, profissao, organizacao, como_conheceu_lit
f = ['graduacao','universidade','profissao','organizacao','como_conheceu_lit']
values = {f[0]: 'Nao Informado', f[1]: 'Nao Informado', f[2]: 'Nao Informado', f[3]: 'Nao Informado', f[4]: 'Nao Informado'}
df_training_dataset.fillna(value=values,inplace=True)
# Exibindo os dados ausentes do conjunto de dados apos fillna
print("Valores nulos no df_training_dataset apos transformaçoes: \n\n{}\n".format(df_training_dataset.isnull().sum(axis = 0)))
# -
# ### Tratamento de de variáveis categóricas
# #### Label Encoding
from sklearn.preprocessing import LabelEncoder
df_train = df_training_dataset.copy(deep=True)
# +
#Codificar variaves categoricas com Label Encoding
le_graduacao = LabelEncoder()
le_universidade = LabelEncoder()
le_profissao = LabelEncoder()
le_organizacao = LabelEncoder()
le_como_conheceu_lit = LabelEncoder()
df_train["code_graduacao"] = le_graduacao.fit_transform(df_train["graduacao"])
df_train["code_universidade"] = le_graduacao.fit_transform(df_train["universidade"])
df_train["code_profissao"] = le_graduacao.fit_transform(df_train["profissao"])
df_train["code_organizacao"] = le_graduacao.fit_transform(df_train["organizacao"])
df_train["code_como_conheceu_lit"] = le_graduacao.fit_transform(df_train["como_conheceu_lit"])
df_train.head()
# -
# ## Treinando um classificador XGBOOST
# ### Selecionando FEATURES e definindo a variável TARGET
df_train.columns
features = [ 'pretende_fazer_cursos_lit', 'interesse_mba_lit',
'importante_ter_certificado', 'horas_semanais_estudo',
'total_modulos', 'modulos_iniciados', 'modulos_finalizados',
'certificados', 'code_graduacao', 'code_universidade',
'code_profissao', 'code_organizacao', 'code_como_conheceu_lit'
]
target = ['categoria'] ## NÃO TROQUE O NOME DA VARIÁVEL TARGET.
# Preparação dos argumentos para os métodos da biblioteca ``scikit-learn``
X = df_train[features]
y = df_train[target]
# +
# %%time
from sklearn import model_selection
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.ensemble import RandomForestClassifier
from xgboost import XGBClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
outcome = []
model_names = []
models = [
('LR ', LogisticRegression(solver='lbfgs',max_iter=10000)),
('DTC', DecisionTreeClassifier()),
('KNN', KNeighborsClassifier()),
#('SVM', SVC(gamma='auto')),
('LDA', LinearDiscriminantAnalysis()),
('GNB', GaussianNB()),
('RFC', RandomForestClassifier(n_estimators=100)),
('XGB', XGBClassifier()),
('PAC', PassiveAggressiveClassifier(max_iter=1000, tol=1e-3))
]
for model_name, model in models:
k_fold_validation = model_selection.KFold(n_splits=10, shuffle=True)
results = model_selection.cross_val_score(model, X, y.values.ravel(), cv=k_fold_validation, scoring='accuracy')
outcome.append(results)
model_names.append(model_name)
output_message = "%s| Mean=%f STD=%f" % (model_name, results.mean(), results.std())
print(output_message)
# -
# #### 1st - RFC
# #### 2nd - XGB
# +
#GridSearchCV for Hyperparm tuning
from sklearn.model_selection import RandomizedSearchCV, GridSearchCV
from sklearn.metrics import roc_auc_score, make_scorer, f1_score, precision_score, recall_score, accuracy_score
from sklearn.model_selection import StratifiedKFold
rfc=RandomForestClassifier()
param_grid = {
'n_estimators': [200, 500, 700, 1000, 1500, 2000],
'max_features': ['auto', 'sqrt', 'log2'],
'max_depth' : [2,3,4,5,6,7,8,10],
'criterion' :['gini', 'entropy']
}
CV_rfc = GridSearchCV(estimator=rfc, param_grid=param_grid, cv= 5, verbose=1, n_jobs=-1)
CV_rfc.fit(X, y.values.ravel())
# Dictionary of best parameters
best_pars = grid.best_params_
best_score = grid.best_score_
print(best_pars)
print(best_score)
# -
# Dictionary of best parameters
best_pars = CV_rfc.best_params_
best_score = CV_rfc.best_score_
print(best_pars)
print(best_score)
# ## Scoring dos dados necessários para entregar a solução
# Como entrega da sua solução, esperamos os resultados classificados no seguinte dataset chamado "to_be_scored.csv":
# ### Download da "folha de respostas"
# !wget --no-check-certificate --content-disposition https://raw.githubusercontent.com/vanderlei-test/dataset-3/master/to_be_scored.csv
df_to_be_scored = pd.read_csv(r'to_be_scored.csv')
df_to_be_scored.tail()
# # Atenção!
#
# O dataframe ``to_be_scored`` é a sua "folha de respostas". Note que a coluna "categoria" não existe nessa amostra, que não pode ser então utilizada para treino de modelos de aprendizado supervisionado.
df_to_be_scored.info()
# <hr>
#
# # Atenção!
#
# # Para poder aplicar seu modelo e classificar a folha de respostas, você precisa primeiro aplicar as mesmas transformações com colunas que você aplicou no dataset de treino.
#
# # Não remova ou adicione linhas na folha de respostas.
#
# # Não altere a ordem das linhas na folha de respostas.
#
# # Ao final, as 1000 entradas devem estar classificadas, com os valores previstos em uma coluna chamada "target"
#
# <hr>
# Na célula abaixo, repetimos rapidamente os mesmos passos de pré-processamento usados no exemplo dado com árvore de decisão
# +
# 1 - Removendo linhas com valores NaN em "certificados" e "total_modulos"
df_to_be_scored_1 = df_to_be_scored.dropna(axis='index', how='any', subset=['certificados', 'total_modulos'])
# 2 - Inputando zeros nos valores faltantes
impute_zeros.fit(X=df_to_be_scored_1)
df_to_be_scored_2 = pd.DataFrame.from_records(
data=impute_zeros.transform(
X=df_to_be_scored_1
),
columns=df_to_be_scored_1.columns
)
# 3 - Remoção de colunas
df_to_be_scored_3 = df_to_be_scored_2.drop(columns=['id', 'graduacao', 'universidade', 'organizacao', 'como_conheceu_lit'], inplace=False)
# 4 - Encoding com "dummy variables"
df_to_be_scored_4 = pd.get_dummies(df_to_be_scored_3, columns=['profissao'])
df_to_be_scored_4.tail()
# -
# <hr>
#
# Pode ser verificado abaixo que as colunas da folha de resposta agora são idênticas às que foram usadas para treinar o modelo:
df_training[
[
'pretende_fazer_cursos_lit', 'interesse_mba_lit',
'importante_ter_certificado', 'horas_semanais_estudo', 'total_modulos',
'modulos_iniciados', 'modulos_finalizados', 'certificados',
'profissao_0', 'profissao_Advogado', 'profissao_Analista',
'profissao_Analista Senior', 'profissao_Assessor',
'profissao_Coordenador', 'profissao_Diretor', 'profissao_Engenheiro',
'profissao_Gerente', 'profissao_Outros', 'profissao_SEM EXPERIÊNCIA',
'profissao_Supervisor', 'profissao_Sócio/Dono/Proprietário'
]
].columns
df_to_be_scored_4.columns
# # Atenção
#
# Para todas colunas que não existirem no "df_to_be_scored", você pode usar a técnica abaixo para adicioná-las:
df_to_be_scored_4['profissao_0'] = 0
y_pred = dtc.predict(df_to_be_scored_4)
df_to_be_scored_4['target'] = y_pred
df_to_be_scored_4.tail()
# ### Salvando a folha de respostas como um arquivo .csv para ser submetido
project.save_data(file_name="results.csv", data=df_to_be_scored_4.to_csv(index=False))
# # Atenção
#
# # A execução da célula acima irá criar um novo "data asset" no seu projeto no Watson Studio. Você precisará realizar o download deste arquivo juntamente com este notebook e criar um arquivo zip com os arquivos **results.csv** e **notebook.ipynb** para submissão. (os arquivos devem estar nomeados desta forma)
# <hr>
#
# ## Parabéns!
#
# Se você já está satisfeito com a sua solução, vá até a página abaixo e envie os arquivos necessários para submissão.
#
# # https://lit.maratona.dev
#
| notebook_analisys.ipynb |